diff options
283 files changed, 22825 insertions, 8726 deletions
@@ -1,3 +1,9 @@ +This file documents changes for QEMU releases 0.12 and earlier. +For changelog information for later releases, see +http://wiki.qemu.org/ChangeLog or look at the git history for +more detailed information. + + version 0.12.0: - Update to SeaBIOS 0.5.0 @@ -525,7 +531,7 @@ version 0.1.5: - ppc64 support + personality() patch (Rusty Russell) - first Alpha CPU patches (Falk Hueffner) - - removed bfd.h dependancy + - removed bfd.h dependency - fixed shrd, shld, idivl and divl on PowerPC. - fixed buggy glibc PowerPC rint() function (test-i386 passes now on PowerPC). diff --git a/MAINTAINERS b/MAINTAINERS index e6f853dfff..35d4496186 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -56,8 +56,8 @@ M: Paul Brook <paul@codesourcery.com> Guest CPU cores (TCG): ---------------------- Alpha -M: qemu-devel@nongnu.org -S: Orphan +M: Richard Henderson <rth@twiddle.net> +S: Maintained F: target-alpha/ ARM @@ -88,6 +88,8 @@ include $(SRC_PATH)/Makefile.objs endif $(common-obj-y): $(GENERATED_HEADERS) +subdir-libcacard: $(oslib-obj-y) $(trace-obj-y) qemu-malloc.o qemu-timer-common.o + $(filter %-softmmu,$(SUBDIR_RULES)): $(trace-obj-y) $(common-obj-y) subdir-libdis $(filter %-user,$(SUBDIR_RULES)): $(GENERATED_HEADERS) $(trace-obj-y) subdir-libdis-user subdir-libuser @@ -132,14 +134,14 @@ qemu-img-cmds.h: $(SRC_PATH)/qemu-img-cmds.hx check-qint.o check-qstring.o check-qdict.o check-qlist.o check-qfloat.o check-qjson.o: $(GENERATED_HEADERS) -CHECK_PROG_DEPS = qemu-malloc.o $(oslib-obj-y) $(trace-obj-y) +CHECK_PROG_DEPS = qemu-malloc.o $(oslib-obj-y) $(trace-obj-y) qemu-tool.o check-qint: check-qint.o qint.o $(CHECK_PROG_DEPS) check-qstring: check-qstring.o qstring.o $(CHECK_PROG_DEPS) check-qdict: check-qdict.o qdict.o qfloat.o qint.o qstring.o qbool.o qlist.o $(CHECK_PROG_DEPS) check-qlist: check-qlist.o qlist.o qint.o $(CHECK_PROG_DEPS) check-qfloat: check-qfloat.o qfloat.o $(CHECK_PROG_DEPS) -check-qjson: check-qjson.o qfloat.o qint.o qdict.o qstring.o qlist.o qbool.o qjson.o json-streamer.o json-lexer.o json-parser.o $(CHECK_PROG_DEPS) +check-qjson: check-qjson.o qfloat.o qint.o qdict.o qstring.o qlist.o qbool.o qjson.o json-streamer.o json-lexer.o json-parser.o error.o qerror.o qemu-error.o $(CHECK_PROG_DEPS) QEMULIBS=libhw32 libhw64 libuser libdis libdis-user @@ -183,6 +185,7 @@ ppc_rom.bin openbios-sparc32 openbios-sparc64 openbios-ppc \ pxe-e1000.rom pxe-eepro100.rom pxe-ne2k_pci.rom \ pxe-pcnet.rom pxe-rtl8139.rom pxe-virtio.rom \ bamboo.dtb petalogix-s3adsp1800.dtb petalogix-ml605.dtb \ +mpc8544ds.dtb \ multiboot.bin linuxboot.bin \ s390-zipl.rom \ spapr-rtas.bin slof.bin diff --git a/Makefile.objs b/Makefile.objs index 9d8851e5d4..52d8b23045 100644 --- a/Makefile.objs +++ b/Makefile.objs @@ -2,13 +2,13 @@ # QObject qobject-obj-y = qint.o qstring.o qdict.o qlist.o qfloat.o qbool.o qobject-obj-y += qjson.o json-lexer.o json-streamer.o json-parser.o -qobject-obj-y += qerror.o +qobject-obj-y += qerror.o error.o ####################################################################### # oslib-obj-y is code depending on the OS (win32 vs posix) oslib-obj-y = osdep.o -oslib-obj-$(CONFIG_WIN32) += oslib-win32.o -oslib-obj-$(CONFIG_POSIX) += oslib-posix.o +oslib-obj-$(CONFIG_WIN32) += oslib-win32.o qemu-thread-win32.o +oslib-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-thread-posix.o ####################################################################### # block-obj-y is code used by both qemu system emulation and qemu-img @@ -45,12 +45,14 @@ net-nested-$(CONFIG_SLIRP) += slirp.o net-nested-$(CONFIG_VDE) += vde.o net-obj-y += $(addprefix net/, $(net-nested-y)) -ifeq ($(CONFIG_VIRTIO)$(CONFIG_VIRTFS),yy) +ifeq ($(CONFIG_VIRTIO)$(CONFIG_VIRTFS)$(CONFIG_PCI),yyy) # Lots of the fsdev/9pcode is pulled in by vl.c via qemu_fsdev_add. # only pull in the actual virtio-9p device if we also enabled virtio. CONFIG_REALLY_VIRTFS=y +fsdev-nested-y = qemu-fsdev.o +else +fsdev-nested-y = qemu-fsdev-dummy.o endif -fsdev-nested-$(CONFIG_VIRTFS) = qemu-fsdev.o fsdev-obj-$(CONFIG_VIRTFS) += $(addprefix fsdev/, $(fsdev-nested-y)) ###################################################################### @@ -143,8 +145,7 @@ common-obj-y += $(addprefix ui/, $(ui-obj-y)) common-obj-$(CONFIG_VNC) += $(addprefix ui/, $(vnc-obj-y)) common-obj-y += iov.o acl.o -common-obj-$(CONFIG_POSIX) += qemu-thread-posix.o compatfd.o -common-obj-$(CONFIG_WIN32) += qemu-thread-win32.o +common-obj-$(CONFIG_POSIX) += compatfd.o common-obj-y += notify.o event_notifier.o common-obj-y += qemu-timer.o qemu-timer-common.o @@ -171,6 +172,7 @@ user-obj-y += cutils.o cache-utils.o hw-obj-y = hw-obj-y += vl.o loader.o hw-obj-$(CONFIG_VIRTIO) += virtio.o virtio-console.o +hw-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o hw-obj-y += fw_cfg.o hw-obj-$(CONFIG_PCI) += pci.o pci_bridge.o hw-obj-$(CONFIG_PCI) += msix.o msi.o @@ -194,6 +196,7 @@ hw-obj-$(CONFIG_PCSPK) += pcspk.o hw-obj-$(CONFIG_PCKBD) += pckbd.o hw-obj-$(CONFIG_USB_UHCI) += usb-uhci.o hw-obj-$(CONFIG_USB_OHCI) += usb-ohci.o +hw-obj-$(CONFIG_USB_EHCI) += usb-ehci.o hw-obj-$(CONFIG_FDC) += fdc.o hw-obj-$(CONFIG_ACPI) += acpi.o acpi_piix4.o hw-obj-$(CONFIG_APM) += pm_smbus.o apm.o @@ -285,12 +288,11 @@ sound-obj-$(CONFIG_HDA) += intel-hda.o hda-audio.o adlib.o fmopl.o: QEMU_CFLAGS += -DBUILD_Y8950=0 hw-obj-$(CONFIG_SOUND) += $(sound-obj-y) -9pfs-nested-$(CONFIG_REALLY_VIRTFS) = virtio-9p-debug.o +9pfs-nested-$(CONFIG_VIRTFS) = virtio-9p.o virtio-9p-debug.o 9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-local.o virtio-9p-xattr.o 9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-xattr-user.o virtio-9p-posix-acl.o -hw-obj-$(CONFIG_VIRTFS) += $(addprefix 9pfs/, $(9pfs-nested-y)) -$(addprefix 9pfs/, $(9pfs-nested-y)): CFLAGS += -I$(SRC_PATH)/hw/ +hw-obj-$(CONFIG_REALLY_VIRTFS) += $(addprefix 9pfs/, $(9pfs-nested-y)) ###################################################################### @@ -335,7 +337,7 @@ trace-dtrace.h: trace-dtrace.dtrace $(call quiet-command,dtrace -o $@ -h -s $<, " GEN trace-dtrace.h") # Normal practice is to name DTrace probe file with a '.d' extension -# but that gets picked up by QEMU's Makefile as an external dependancy +# but that gets picked up by QEMU's Makefile as an external dependency # rule file. So we use '.dtrace' instead trace-dtrace.dtrace: trace-dtrace.dtrace-timestamp trace-dtrace.dtrace-timestamp: $(SRC_PATH)/trace-events config-host.mak diff --git a/Makefile.target b/Makefile.target index 21f864afd2..b1a0f6d28b 100644 --- a/Makefile.target +++ b/Makefile.target @@ -71,8 +71,7 @@ all: $(PROGS) stap # cpu emulator library libobj-y = exec.o translate-all.o cpu-exec.o translate.o libobj-y += tcg/tcg.o -libobj-$(CONFIG_SOFTFLOAT) += fpu/softfloat.o -libobj-$(CONFIG_NOSOFTFLOAT) += fpu/softfloat-native.o +libobj-y += fpu/softfloat.o libobj-y += op_helper.o helper.o ifeq ($(TARGET_BASE_ARCH), i386) libobj-y += cpuid.o @@ -94,10 +93,10 @@ tcg/tcg.o: cpu.h # HELPER_CFLAGS is used for all the code compiled with static register # variables -%_helper.o cpu-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +%_helper.o cpu-exec.o user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS) # Note: this is a workaround. The real fix is to avoid compiling -# cpu_signal_handler() in cpu-exec.c. +# cpu_signal_handler() in user-exec.c. signal.o: QEMU_CFLAGS += $(HELPER_CFLAGS) ######################################################### @@ -110,7 +109,7 @@ $(call set-vpath, $(SRC_PATH)/linux-user:$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) -I$(SRC_PATH)/linux-user obj-y = main.o syscall.o strace.o mmap.o signal.o thunk.o \ elfload.o linuxload.o uaccess.o gdbstub.o cpu-uname.o \ - qemu-malloc.o $(oslib-obj-y) + qemu-malloc.o user-exec.o $(oslib-obj-y) obj-$(TARGET_HAS_BFLT) += flatload.o @@ -148,7 +147,7 @@ LDFLAGS+=-Wl,-segaddr,__STD_PROG_ZONE,0x1000 -image_base 0x0e000000 LIBS+=-lmx obj-y = main.o commpage.o machload.o mmap.o signal.o syscall.o thunk.o \ - gdbstub.o + gdbstub.o user-exec.o obj-i386-y += ioport-user.o @@ -170,7 +169,7 @@ $(call set-vpath, $(SRC_PATH)/bsd-user) QEMU_CFLAGS+=-I$(SRC_PATH)/bsd-user -I$(SRC_PATH)/bsd-user/$(TARGET_ARCH) obj-y = main.o bsdload.o elfload.o mmap.o signal.o strace.o syscall.o \ - gdbstub.o uaccess.o + gdbstub.o uaccess.o user-exec.o obj-i386-y += ioport-user.o @@ -191,10 +190,9 @@ obj-y = arch_init.o cpus.o monitor.o machine.o gdbstub.o balloon.o # need to fix this properly obj-$(CONFIG_NO_PCI) += pci-stub.o obj-$(CONFIG_VIRTIO) += virtio-blk.o virtio-balloon.o virtio-net.o virtio-serial-bus.o -obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o obj-y += vhost_net.o obj-$(CONFIG_VHOST_NET) += vhost.o -obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/virtio-9p.o +obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/virtio-9p-device.o obj-y += rwhandler.o obj-$(CONFIG_KVM) += kvm.o kvm-all.o obj-$(CONFIG_NO_KVM) += kvm-stub.o @@ -206,7 +204,19 @@ QEMU_CFLAGS += $(VNC_JPEG_CFLAGS) QEMU_CFLAGS += $(VNC_PNG_CFLAGS) # xen backend driver support -obj-$(CONFIG_XEN) += xen_machine_pv.o xen_domainbuild.o +obj-i386-$(CONFIG_XEN) += xen_machine_pv.o xen_domainbuild.o + +ifeq ($(TARGET_BASE_ARCH), i386) + CONFIG_NO_XEN = $(if $(subst n,,$(CONFIG_XEN)),n,y) +else + CONFIG_NO_XEN = y +endif +# xen support +CONFIG_NO_XEN_MAPCACHE = $(if $(subst n,,$(CONFIG_XEN_MAPCACHE)),n,y) +obj-i386-$(CONFIG_XEN) += xen-all.o +obj-$(CONFIG_NO_XEN) += xen-stub.o +obj-i386-$(CONFIG_XEN_MAPCACHE) += xen-mapcache.o +obj-$(CONFIG_NO_XEN_MAPCACHE) += xen-mapcache-stub.o # Inter-VM PCI shared memory CONFIG_IVSHMEM = @@ -220,7 +230,7 @@ obj-$(CONFIG_IVSHMEM) += ivshmem.o # Hardware support obj-i386-y += vga.o obj-i386-y += mc146818rtc.o i8259.o pc.o -obj-i386-y += cirrus_vga.o apic.o ioapic.o piix_pci.o +obj-i386-y += cirrus_vga.o sga.o apic.o ioapic.o piix_pci.o obj-i386-y += vmport.o obj-i386-y += device-hotplug.o pci-hotplug.o smbios.o wdt_ib700.o obj-i386-y += debugcon.o multiboot.o @@ -364,7 +374,8 @@ obj-m68k-y += m68k-semi.o dummy_m68k.o obj-s390x-y = s390-virtio-bus.o s390-virtio.o -obj-alpha-y = alpha_palcode.o +obj-alpha-y = i8259.o mc146818rtc.o +obj-alpha-y += vga.o cirrus_vga.o main.o: QEMU_CFLAGS+=$(GPROF_CFLAGS) @@ -401,8 +412,6 @@ hmp-commands.h: $(SRC_PATH)/hmp-commands.hx qmp-commands.h: $(SRC_PATH)/qmp-commands.hx $(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@," GEN $(TARGET_DIR)$@") -9pfs/virtio-9p.o: CFLAGS += -I$(SRC_PATH)/hw/ - clean: rm -f *.o *.a *~ $(PROGS) nwfpe/*.o fpu/*.o rm -f *.d */*.d tcg/*.o ide/*.o 9pfs/*.o diff --git a/QMP/qmp.py b/QMP/qmp.py index 14ce8b0d05..c7dbea076d 100644 --- a/QMP/qmp.py +++ b/QMP/qmp.py @@ -22,19 +22,24 @@ class QMPCapabilitiesError(QMPError): pass class QEMUMonitorProtocol: - def __init__(self, address): + def __init__(self, address, server=False): """ Create a QEMUMonitorProtocol class. @param address: QEMU address, can be either a unix socket path (string) or a tuple in the form ( address, port ) for a TCP connection - @note No connection is established, this is done by the connect() method + @param server: server mode listens on the socket (bool) + @raise socket.error on socket connection errors + @note No connection is established, this is done by the connect() or + accept() methods """ self.__events = [] self.__address = address self.__sock = self.__get_sock() - self.__sockfile = self.__sock.makefile() + if server: + self.__sock.bind(self.__address) + self.__sock.listen(1) def __get_sock(self): if isinstance(self.__address, tuple): @@ -43,7 +48,18 @@ class QEMUMonitorProtocol: family = socket.AF_UNIX return socket.socket(family, socket.SOCK_STREAM) - def __json_read(self): + def __negotiate_capabilities(self): + self.__sockfile = self.__sock.makefile() + greeting = self.__json_read() + if greeting is None or not greeting.has_key('QMP'): + raise QMPConnectError + # Greeting seems ok, negotiate capabilities + resp = self.cmd('qmp_capabilities') + if "return" in resp: + return greeting + raise QMPCapabilitiesError + + def __json_read(self, only_event=False): while True: data = self.__sockfile.readline() if not data: @@ -51,7 +67,8 @@ class QEMUMonitorProtocol: resp = json.loads(data) if 'event' in resp: self.__events.append(resp) - continue + if not only_event: + continue return resp error = socket.error @@ -66,14 +83,19 @@ class QEMUMonitorProtocol: @raise QMPCapabilitiesError if fails to negotiate capabilities """ self.__sock.connect(self.__address) - greeting = self.__json_read() - if greeting is None or not greeting.has_key('QMP'): - raise QMPConnectError - # Greeting seems ok, negotiate capabilities - resp = self.cmd('qmp_capabilities') - if "return" in resp: - return greeting - raise QMPCapabilitiesError + return self.__negotiate_capabilities() + + def accept(self): + """ + Await connection from QMP Monitor and perform capabilities negotiation. + + @return QMP greeting dict + @raise socket.error on socket connection errors + @raise QMPConnectError if the greeting is not received + @raise QMPCapabilitiesError if fails to negotiate capabilities + """ + self.__sock, _ = self.__sock.accept() + return self.__negotiate_capabilities() def cmd_obj(self, qmp_cmd): """ @@ -106,9 +128,11 @@ class QEMUMonitorProtocol: qmp_cmd['id'] = id return self.cmd_obj(qmp_cmd) - def get_events(self): + def get_events(self, wait=False): """ Get a list of available QMP events. + + @param wait: block until an event is available (bool) """ self.__sock.setblocking(0) try: @@ -118,6 +142,8 @@ class QEMUMonitorProtocol: # No data available pass self.__sock.setblocking(1) + if not self.__events and wait: + self.__json_read(only_event=True) return self.__events def clear_events(self): diff --git a/alpha-dis.c b/alpha-dis.c index 8a2411e4d5..ae331b35b8 100644 --- a/alpha-dis.c +++ b/alpha-dis.c @@ -238,10 +238,6 @@ extern const unsigned alpha_num_operands; #define AXP_REG_SP 30 #define AXP_REG_ZERO 31 -#define bfd_mach_alpha_ev4 0x10 -#define bfd_mach_alpha_ev5 0x20 -#define bfd_mach_alpha_ev6 0x30 - enum bfd_reloc_code_real { BFD_RELOC_23_PCREL_S2, BFD_RELOC_ALPHA_HINT diff --git a/arch_init.c b/arch_init.c index 0c09f9118e..484b39d4dd 100644 --- a/arch_init.c +++ b/arch_init.c @@ -709,6 +709,11 @@ int audio_available(void) #endif } +int tcg_available(void) +{ + return 1; +} + int kvm_available(void) { #ifdef CONFIG_KVM diff --git a/arch_init.h b/arch_init.h index 86ebc149bc..2de9f0852d 100644 --- a/arch_init.h +++ b/arch_init.h @@ -27,6 +27,7 @@ void do_smbios_option(const char *optarg); void cpudef_init(void); int audio_available(void); void audio_init(qemu_irq *isa_pic, PCIBus *pci_bus); +int tcg_available(void); int kvm_available(void); int xen_available(void); diff --git a/audio/audio_pt_int.c b/audio/audio_pt_int.c index 908c569a92..9a9c306a9c 100644 --- a/audio/audio_pt_int.c +++ b/audio/audio_pt_int.c @@ -6,8 +6,6 @@ #include "audio_int.h" #include "audio_pt_int.h" -#include <signal.h> - static void GCC_FMT_ATTR(3, 4) logerr (struct audio_pt *pt, int err, const char *fmt, ...) { diff --git a/audio/mixeng_template.h b/audio/mixeng_template.h index a2d0ef84fd..e644c231ad 100644 --- a/audio/mixeng_template.h +++ b/audio/mixeng_template.h @@ -46,7 +46,7 @@ static mixeng_real inline glue (conv_, ET) (IN_T v) #endif #else /* !RECIPROCAL */ #ifdef SIGNED - return nv / (mixeng_real) (IN_MAX - IN_MIN); + return nv / (mixeng_real) ((mixeng_real) IN_MAX - IN_MIN); #else return (nv - HALF) / (mixeng_real) IN_MAX; #endif @@ -63,7 +63,7 @@ static IN_T inline glue (clip_, ET) (mixeng_real v) } #ifdef SIGNED - return ENDIAN_CONVERT ((IN_T) (v * (IN_MAX - IN_MIN))); + return ENDIAN_CONVERT ((IN_T) (v * ((mixeng_real) IN_MAX - IN_MIN))); #else return ENDIAN_CONVERT ((IN_T) ((v * IN_MAX) + HALF)); #endif diff --git a/audio/sdlaudio.c b/audio/sdlaudio.c index a847aa90f7..d24daa5ead 100644 --- a/audio/sdlaudio.c +++ b/audio/sdlaudio.c @@ -32,7 +32,6 @@ #elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) #include <pthread.h> #endif -#include <signal.h> #endif #define AUDIO_CAP "sdl" @@ -439,13 +439,7 @@ static int bdrv_open_common(BlockDriverState *bs, const char *filename, bs->drv = drv; bs->opaque = qemu_mallocz(drv->instance_size); - /* - * Yes, BDRV_O_NOCACHE aka O_DIRECT means we have to present a - * write cache to the guest. We do need the fdatasync to flush - * out transactions for block allocations, and we maybe have a - * volatile write cache in our backing device to deal with. - */ - if (flags & (BDRV_O_CACHE_WB|BDRV_O_NOCACHE)) + if (flags & BDRV_O_CACHE_WB) bs->enable_write_cache = 1; /* @@ -455,7 +449,7 @@ static int bdrv_open_common(BlockDriverState *bs, const char *filename, open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); /* - * Snapshots should be writeable. + * Snapshots should be writable. */ if (bs->is_temporary) { open_flags |= BDRV_O_RDWR; @@ -747,7 +741,7 @@ DeviceState *bdrv_get_attached(BlockDriverState *bs) * Run consistency checks on an image * * Returns 0 if the check could be completed (it doesn't mean that the image is - * free of errors) or -errno when an internal error occured. The results of the + * free of errors) or -errno when an internal error occurred. The results of the * check are stored in res. */ int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res) @@ -1305,13 +1299,6 @@ void bdrv_set_geometry_hint(BlockDriverState *bs, bs->secs = secs; } -void bdrv_set_type_hint(BlockDriverState *bs, int type) -{ - bs->type = type; - bs->removable = ((type == BDRV_TYPE_CDROM || - type == BDRV_TYPE_FLOPPY)); -} - void bdrv_set_translation_hint(BlockDriverState *bs, int translation) { bs->translation = translation; @@ -1428,11 +1415,6 @@ void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads, } } -int bdrv_get_type_hint(BlockDriverState *bs) -{ - return bs->type; -} - int bdrv_get_translation_hint(BlockDriverState *bs) { return bs->translation; @@ -1704,9 +1686,8 @@ static void bdrv_print_dict(QObject *obj, void *opaque) bs_dict = qobject_to_qdict(obj); - monitor_printf(mon, "%s: type=%s removable=%d", + monitor_printf(mon, "%s: removable=%d", qdict_get_str(bs_dict, "device"), - qdict_get_str(bs_dict, "type"), qdict_get_bool(bs_dict, "removable")); if (qdict_get_bool(bs_dict, "removable")) { @@ -1747,23 +1728,10 @@ void bdrv_info(Monitor *mon, QObject **ret_data) QTAILQ_FOREACH(bs, &bdrv_states, list) { QObject *bs_obj; - const char *type = "unknown"; - - switch(bs->type) { - case BDRV_TYPE_HD: - type = "hd"; - break; - case BDRV_TYPE_CDROM: - type = "cdrom"; - break; - case BDRV_TYPE_FLOPPY: - type = "floppy"; - break; - } - bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': %s, " + bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': 'unknown', " "'removable': %i, 'locked': %i }", - bs->device_name, type, bs->removable, + bs->device_name, bs->removable, bs->locked); if (bs->drv) { @@ -2913,7 +2881,7 @@ int bdrv_img_create(const char *filename, const char *fmt, char *options, uint64_t img_size, int flags) { QEMUOptionParameter *param = NULL, *create_options = NULL; - QEMUOptionParameter *backing_fmt, *backing_file; + QEMUOptionParameter *backing_fmt, *backing_file, *size; BlockDriverState *bs = NULL; BlockDriver *drv, *proto_drv; BlockDriver *backing_drv = NULL; @@ -2996,7 +2964,8 @@ int bdrv_img_create(const char *filename, const char *fmt, // The size for the image must always be specified, with one exception: // If we are using a backing file, we can obtain the size from there - if (get_option_parameter(param, BLOCK_OPT_SIZE)->value.n == -1) { + size = get_option_parameter(param, BLOCK_OPT_SIZE); + if (size && size->value.n == -1) { if (backing_file && backing_file->value.s) { uint64_t size; char buf[32]; @@ -152,9 +152,6 @@ int bdrv_has_zero_init(BlockDriverState *bs); int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum); -#define BDRV_TYPE_HD 0 -#define BDRV_TYPE_CDROM 1 -#define BDRV_TYPE_FLOPPY 2 #define BIOS_ATA_TRANSLATION_AUTO 0 #define BIOS_ATA_TRANSLATION_NONE 1 #define BIOS_ATA_TRANSLATION_LBA 2 @@ -163,7 +160,6 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, void bdrv_set_geometry_hint(BlockDriverState *bs, int cyls, int heads, int secs); -void bdrv_set_type_hint(BlockDriverState *bs, int type); void bdrv_set_translation_hint(BlockDriverState *bs, int translation); void bdrv_get_geometry_hint(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs); @@ -177,7 +173,6 @@ typedef enum FDriveType { void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads, int *max_track, int *last_sect, FDriveType drive_in, FDriveType *drive); -int bdrv_get_type_hint(BlockDriverState *bs); int bdrv_get_translation_hint(BlockDriverState *bs); void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error, BlockErrorAction on_write_error); diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index 750abe37d4..c9e7bbd9d6 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -70,7 +70,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { - return ret; + goto fail; } BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index 915d85acbf..ac95b88fe1 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -1063,7 +1063,7 @@ fail: * Checks an image for refcount consistency. * * Returns 0 if no errors are found, the number of errors in case the image is - * detected as corrupted, and -errno when an internal error occured. + * detected as corrupted, and -errno when an internal error occurred. */ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) { @@ -1086,7 +1086,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, 1); if (ret < 0) { - return ret; + goto fail; } /* snapshots */ @@ -1095,7 +1095,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); if (ret < 0) { - return ret; + goto fail; } } inc_refcounts(bs, res, refcount_table, nb_clusters, @@ -1159,8 +1159,11 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) } } + ret = 0; + +fail: qemu_free(refcount_table); - return 0; + return ret; } diff --git a/block/qcow2.c b/block/qcow2.c index 75b8becc0a..8451ded9a3 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -229,7 +229,7 @@ static int qcow2_open(BlockDriverState *bs, int flags) } /* alloc L2 table/refcount block cache */ - writethrough = ((flags & BDRV_O_CACHE_MASK) == 0); + writethrough = ((flags & BDRV_O_CACHE_WB) == 0); s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE, writethrough); s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE, writethrough); @@ -1036,7 +1036,7 @@ static int qcow2_create(const char *filename, QEMUOptionParameter *options) const char *backing_fmt = NULL; uint64_t sectors = 0; int flags = 0; - size_t cluster_size = 65536; + size_t cluster_size = DEFAULT_CLUSTER_SIZE; int prealloc = 0; /* Read out options */ @@ -1343,7 +1343,8 @@ static QEMUOptionParameter qcow2_create_options[] = { { .name = BLOCK_OPT_CLUSTER_SIZE, .type = OPT_SIZE, - .help = "qcow2 cluster size" + .help = "qcow2 cluster size", + .value = { .n = DEFAULT_CLUSTER_SIZE }, }, { .name = BLOCK_OPT_PREALLOC, diff --git a/block/qcow2.h b/block/qcow2.h index a019831838..e1ae3e8c2b 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -54,6 +54,8 @@ /* Must be at least 4 to cover all cases of refcount table growth */ #define REFCOUNT_CACHE_SIZE 4 +#define DEFAULT_CLUSTER_SIZE 65536 + typedef struct QCowHeader { uint32_t magic; uint32_t version; diff --git a/block/qed.c b/block/qed.c index c8c5930448..39703793e9 100644 --- a/block/qed.c +++ b/block/qed.c @@ -12,6 +12,7 @@ * */ +#include "qemu-timer.h" #include "trace.h" #include "qed.h" #include "qerror.h" @@ -291,6 +292,88 @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) static void qed_aio_next_io(void *opaque, int ret); +static void qed_plug_allocating_write_reqs(BDRVQEDState *s) +{ + assert(!s->allocating_write_reqs_plugged); + + s->allocating_write_reqs_plugged = true; +} + +static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) +{ + QEDAIOCB *acb; + + assert(s->allocating_write_reqs_plugged); + + s->allocating_write_reqs_plugged = false; + + acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); + if (acb) { + qed_aio_next_io(acb, 0); + } +} + +static void qed_finish_clear_need_check(void *opaque, int ret) +{ + /* Do nothing */ +} + +static void qed_flush_after_clear_need_check(void *opaque, int ret) +{ + BDRVQEDState *s = opaque; + + bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); + + /* No need to wait until flush completes */ + qed_unplug_allocating_write_reqs(s); +} + +static void qed_clear_need_check(void *opaque, int ret) +{ + BDRVQEDState *s = opaque; + + if (ret) { + qed_unplug_allocating_write_reqs(s); + return; + } + + s->header.features &= ~QED_F_NEED_CHECK; + qed_write_header(s, qed_flush_after_clear_need_check, s); +} + +static void qed_need_check_timer_cb(void *opaque) +{ + BDRVQEDState *s = opaque; + + /* The timer should only fire when allocating writes have drained */ + assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); + + trace_qed_need_check_timer_cb(s); + + qed_plug_allocating_write_reqs(s); + + /* Ensure writes are on disk before clearing flag */ + bdrv_aio_flush(s->bs, qed_clear_need_check, s); +} + +static void qed_start_need_check_timer(BDRVQEDState *s) +{ + trace_qed_start_need_check_timer(s); + + /* Use vm_clock so we don't alter the image file while suspended for + * migration. + */ + qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) + + get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); +} + +/* It's okay to call this multiple times or when no timer is started */ +static void qed_cancel_need_check_timer(BDRVQEDState *s) +{ + trace_qed_cancel_need_check_timer(s); + qemu_del_timer(s->need_check_timer); +} + static int bdrv_qed_open(BlockDriverState *bs, int flags) { BDRVQEDState *s = bs->opaque; @@ -406,7 +489,10 @@ static int bdrv_qed_open(BlockDriverState *bs, int flags) BdrvCheckResult result = {0}; ret = qed_check(s, &result, true); - if (!ret && !result.corruptions && !result.check_errors) { + if (ret) { + goto out; + } + if (!result.corruptions && !result.check_errors) { /* Ensure fixes reach storage before clearing check bit */ bdrv_flush(s->bs); @@ -416,6 +502,9 @@ static int bdrv_qed_open(BlockDriverState *bs, int flags) } } + s->need_check_timer = qemu_new_timer_ns(vm_clock, + qed_need_check_timer_cb, s); + out: if (ret) { qed_free_l2_cache(&s->l2_cache); @@ -428,6 +517,9 @@ static void bdrv_qed_close(BlockDriverState *bs) { BDRVQEDState *s = bs->opaque; + qed_cancel_need_check_timer(s); + qemu_free_timer(s->need_check_timer); + /* Ensure writes reach stable storage */ bdrv_flush(bs->file); @@ -809,6 +901,8 @@ static void qed_aio_complete(QEDAIOCB *acb, int ret) acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); if (acb) { qed_aio_next_io(acb, 0); + } else if (s->header.features & QED_F_NEED_CHECK) { + qed_start_need_check_timer(s); } } } @@ -1014,11 +1108,17 @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) { BDRVQEDState *s = acb_to_s(acb); + /* Cancel timer when the first allocating request comes in */ + if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { + qed_cancel_need_check_timer(s); + } + /* Freeze this request if another allocating write is in progress */ if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); } - if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { + if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || + s->allocating_write_reqs_plugged) { return; /* wait for existing request to finish */ } @@ -1233,7 +1333,27 @@ static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs, static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) { - return -ENOTSUP; + BDRVQEDState *s = bs->opaque; + uint64_t old_image_size; + int ret; + + if (!qed_is_image_size_valid(offset, s->header.cluster_size, + s->header.table_size)) { + return -EINVAL; + } + + /* Shrinking is currently not supported */ + if ((uint64_t)offset < s->header.image_size) { + return -ENOTSUP; + } + + old_image_size = s->header.image_size; + s->header.image_size = offset; + ret = qed_write_header_sync(s); + if (ret < 0) { + s->header.image_size = old_image_size; + } + return ret; } static int64_t bdrv_qed_getlength(BlockDriverState *bs) @@ -1344,7 +1464,8 @@ static QEMUOptionParameter qed_create_options[] = { }, { .name = BLOCK_OPT_CLUSTER_SIZE, .type = OPT_SIZE, - .help = "Cluster size (in bytes)" + .help = "Cluster size (in bytes)", + .value = { .n = QED_DEFAULT_CLUSTER_SIZE }, }, { .name = BLOCK_OPT_TABLE_SIZE, .type = OPT_SIZE, diff --git a/block/qed.h b/block/qed.h index 1d1421fee1..388fdb3760 100644 --- a/block/qed.h +++ b/block/qed.h @@ -78,6 +78,9 @@ enum { QED_MIN_TABLE_SIZE = 1, /* in clusters */ QED_MAX_TABLE_SIZE = 16, QED_DEFAULT_TABLE_SIZE = 4, + + /* Delay to flush and clean image after last allocating write completes */ + QED_NEED_CHECK_TIMEOUT = 5, /* in seconds */ }; typedef struct { @@ -157,6 +160,10 @@ typedef struct { /* Allocating write request queue */ QSIMPLEQ_HEAD(, QEDAIOCB) allocating_write_reqs; + bool allocating_write_reqs_plugged; + + /* Periodic flush and clear need check flag */ + QEMUTimer *need_check_timer; } BDRVQEDState; enum { diff --git a/block/raw-posix.c b/block/raw-posix.c index 6b72470599..4cd7d7afbb 100644 --- a/block/raw-posix.c +++ b/block/raw-posix.c @@ -43,7 +43,6 @@ #ifdef __sun__ #define _POSIX_PTHREAD_SEMANTICS 1 -#include <signal.h> #include <sys/dkio.h> #endif #ifdef __linux__ @@ -53,7 +52,6 @@ #include <linux/fd.h> #endif #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) -#include <signal.h> #include <sys/disk.h> #include <sys/cdio.h> #endif @@ -64,6 +62,13 @@ #include <sys/dkio.h> #endif +#ifdef __NetBSD__ +#include <sys/ioctl.h> +#include <sys/disklabel.h> +#include <sys/dkio.h> +#include <sys/disk.h> +#endif + #ifdef __DragonFly__ #include <sys/ioctl.h> #include <sys/diskslice.h> @@ -136,12 +141,55 @@ static int64_t raw_getlength(BlockDriverState *bs); static int cdrom_reopen(BlockDriverState *bs); #endif +#if defined(__NetBSD__) +static int raw_normalize_devicepath(const char **filename) +{ + static char namebuf[PATH_MAX]; + const char *dp, *fname; + struct stat sb; + + fname = *filename; + dp = strrchr(fname, '/'); + if (lstat(fname, &sb) < 0) { + fprintf(stderr, "%s: stat failed: %s\n", + fname, strerror(errno)); + return -errno; + } + + if (!S_ISBLK(sb.st_mode)) { + return 0; + } + + if (dp == NULL) { + snprintf(namebuf, PATH_MAX, "r%s", fname); + } else { + snprintf(namebuf, PATH_MAX, "%.*s/r%s", + (int)(dp - fname), fname, dp + 1); + } + fprintf(stderr, "%s is a block device", fname); + *filename = namebuf; + fprintf(stderr, ", using %s\n", *filename); + + return 0; +} +#else +static int raw_normalize_devicepath(const char **filename) +{ + return 0; +} +#endif + static int raw_open_common(BlockDriverState *bs, const char *filename, int bdrv_flags, int open_flags) { BDRVRawState *s = bs->opaque; int fd, ret; + ret = raw_normalize_devicepath(&filename); + if (ret != 0) { + return ret; + } + s->open_flags = open_flags | O_BINARY; s->open_flags &= ~O_ACCMODE; if (bdrv_flags & BDRV_O_RDWR) { @@ -154,7 +202,7 @@ static int raw_open_common(BlockDriverState *bs, const char *filename, * and O_DIRECT for no caching. */ if ((bdrv_flags & BDRV_O_NOCACHE)) s->open_flags |= O_DIRECT; - else if (!(bdrv_flags & BDRV_O_CACHE_WB)) + if (!(bdrv_flags & BDRV_O_CACHE_WB)) s->open_flags |= O_DSYNC; s->fd = -1; @@ -622,6 +670,31 @@ static int64_t raw_getlength(BlockDriverState *bs) } else return st.st_size; } +#elif defined(__NetBSD__) +static int64_t raw_getlength(BlockDriverState *bs) +{ + BDRVRawState *s = bs->opaque; + int fd = s->fd; + struct stat st; + + if (fstat(fd, &st)) + return -1; + if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { + struct dkwedge_info dkw; + + if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) { + return dkw.dkw_size * 512; + } else { + struct disklabel dl; + + if (ioctl(fd, DIOCGDINFO, &dl)) + return -1; + return (uint64_t)dl.d_secsize * + dl.d_partitions[DISKPART(st.st_rdev)].p_size; + } + } else + return st.st_size; +} #elif defined(__sun__) static int64_t raw_getlength(BlockDriverState *bs) { diff --git a/block/raw-win32.c b/block/raw-win32.c index c204a80d79..56bd7195a1 100644 --- a/block/raw-win32.c +++ b/block/raw-win32.c @@ -88,9 +88,9 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags) } overlapped = FILE_ATTRIBUTE_NORMAL; - if ((flags & BDRV_O_NOCACHE)) - overlapped |= FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH; - else if (!(flags & BDRV_O_CACHE_WB)) + if (flags & BDRV_O_NOCACHE) + overlapped |= FILE_FLAG_NO_BUFFERING; + if (!(flags & BDRV_O_CACHE_WB)) overlapped |= FILE_FLAG_WRITE_THROUGH; s->hfile = CreateFile(filename, access_flags, FILE_SHARE_READ, NULL, @@ -349,9 +349,9 @@ static int hdev_open(BlockDriverState *bs, const char *filename, int flags) create_flags = OPEN_EXISTING; overlapped = FILE_ATTRIBUTE_NORMAL; - if ((flags & BDRV_O_NOCACHE)) - overlapped |= FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH; - else if (!(flags & BDRV_O_CACHE_WB)) + if (flags & BDRV_O_NOCACHE) + overlapped |= FILE_FLAG_NO_BUFFERING; + if (!(flags & BDRV_O_CACHE_WB)) overlapped |= FILE_FLAG_WRITE_THROUGH; s->hfile = CreateFile(filename, access_flags, FILE_SHARE_READ, NULL, diff --git a/block/rbd.c b/block/rbd.c index 249a590c98..bdc448aa9f 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -1,33 +1,39 @@ /* * QEMU Block driver for RADOS (Ceph) * - * Copyright (C) 2010 Christian Brunner <chb@muc.de> + * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>, + * Josh Durgin <josh.durgin@dreamhost.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ +#include <inttypes.h> + #include "qemu-common.h" #include "qemu-error.h" -#include "rbd_types.h" #include "block_int.h" -#include <rados/librados.h> +#include <rbd/librbd.h> /* * When specifying the image filename use: * - * rbd:poolname/devicename + * rbd:poolname/devicename[@snapshotname][:option1=value1[:option2=value2...]] * * poolname must be the name of an existing rados pool * * devicename is the basename for all objects used to * emulate the raw device. * + * Each option given is used to configure rados, and may be + * any Ceph option, or "conf". The "conf" option specifies + * a Ceph configuration file to read. + * * Metadata information (image size, ...) is stored in an * object with the name "devicename.rbd". * @@ -40,6 +46,13 @@ #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) +#define RBD_MAX_CONF_NAME_SIZE 128 +#define RBD_MAX_CONF_VAL_SIZE 512 +#define RBD_MAX_CONF_SIZE 1024 +#define RBD_MAX_POOL_NAME_SIZE 128 +#define RBD_MAX_SNAP_NAME_SIZE 128 +#define RBD_MAX_SNAPS 100 + typedef struct RBDAIOCB { BlockDriverAIOCB common; QEMUBH *bh; @@ -48,7 +61,6 @@ typedef struct RBDAIOCB { char *bounce; int write; int64_t sector_num; - int aiocnt; int error; struct BDRVRBDState *s; int cancelled; @@ -59,7 +71,7 @@ typedef struct RADOSCB { RBDAIOCB *acb; struct BDRVRBDState *s; int done; - int64_t segsize; + int64_t size; char *buf; int ret; } RADOSCB; @@ -69,25 +81,22 @@ typedef struct RADOSCB { typedef struct BDRVRBDState { int fds[2]; - rados_pool_t pool; - rados_pool_t header_pool; - char name[RBD_MAX_OBJ_NAME_SIZE]; - char block_name[RBD_MAX_BLOCK_NAME_SIZE]; - uint64_t size; - uint64_t objsize; + rados_t cluster; + rados_ioctx_t io_ctx; + rbd_image_t image; + char name[RBD_MAX_IMAGE_NAME_SIZE]; int qemu_aio_count; + char *snap; int event_reader_pos; RADOSCB *event_rcb; } BDRVRBDState; -typedef struct rbd_obj_header_ondisk RbdHeader1; - static void rbd_aio_bh_cb(void *opaque); -static int rbd_next_tok(char *dst, int dst_len, - char *src, char delim, - const char *name, - char **p) +static int qemu_rbd_next_tok(char *dst, int dst_len, + char *src, char delim, + const char *name, + char **p) { int l; char *end; @@ -115,10 +124,11 @@ static int rbd_next_tok(char *dst, int dst_len, return 0; } -static int rbd_parsename(const char *filename, - char *pool, int pool_len, - char *snap, int snap_len, - char *name, int name_len) +static int qemu_rbd_parsename(const char *filename, + char *pool, int pool_len, + char *snap, int snap_len, + char *name, int name_len, + char *conf, int conf_len) { const char *start; char *p, *buf; @@ -130,138 +140,108 @@ static int rbd_parsename(const char *filename, buf = qemu_strdup(start); p = buf; + *snap = '\0'; + *conf = '\0'; - ret = rbd_next_tok(pool, pool_len, p, '/', "pool name", &p); + ret = qemu_rbd_next_tok(pool, pool_len, p, '/', "pool name", &p); if (ret < 0 || !p) { ret = -EINVAL; goto done; } - ret = rbd_next_tok(name, name_len, p, '@', "object name", &p); - if (ret < 0) { - goto done; + + if (strchr(p, '@')) { + ret = qemu_rbd_next_tok(name, name_len, p, '@', "object name", &p); + if (ret < 0) { + goto done; + } + ret = qemu_rbd_next_tok(snap, snap_len, p, ':', "snap name", &p); + } else { + ret = qemu_rbd_next_tok(name, name_len, p, ':', "object name", &p); } - if (!p) { - *snap = '\0'; + if (ret < 0 || !p) { goto done; } - ret = rbd_next_tok(snap, snap_len, p, '\0', "snap name", &p); + ret = qemu_rbd_next_tok(conf, conf_len, p, '\0', "configuration", &p); done: qemu_free(buf); return ret; } -static int create_tmap_op(uint8_t op, const char *name, char **tmap_desc) -{ - uint32_t len = strlen(name); - uint32_t len_le = cpu_to_le32(len); - /* total_len = encoding op + name + empty buffer */ - uint32_t total_len = 1 + (sizeof(uint32_t) + len) + sizeof(uint32_t); - uint8_t *desc = NULL; - - desc = qemu_malloc(total_len); - - *tmap_desc = (char *)desc; - - *desc = op; - desc++; - memcpy(desc, &len_le, sizeof(len_le)); - desc += sizeof(len_le); - memcpy(desc, name, len); - desc += len; - len = 0; /* no need for endian conversion for 0 */ - memcpy(desc, &len, sizeof(len)); - desc += sizeof(len); - - return (char *)desc - *tmap_desc; -} - -static void free_tmap_op(char *tmap_desc) +static int qemu_rbd_set_conf(rados_t cluster, const char *conf) { - qemu_free(tmap_desc); -} - -static int rbd_register_image(rados_pool_t pool, const char *name) -{ - char *tmap_desc; - const char *dir = RBD_DIRECTORY; - int ret; - - ret = create_tmap_op(CEPH_OSD_TMAP_SET, name, &tmap_desc); - if (ret < 0) { - return ret; - } - - ret = rados_tmap_update(pool, dir, tmap_desc, ret); - free_tmap_op(tmap_desc); - - return ret; -} + char *p, *buf; + char name[RBD_MAX_CONF_NAME_SIZE]; + char value[RBD_MAX_CONF_VAL_SIZE]; + int ret = 0; -static int touch_rbd_info(rados_pool_t pool, const char *info_oid) -{ - int r = rados_write(pool, info_oid, 0, NULL, 0); - if (r < 0) { - return r; - } - return 0; -} + buf = qemu_strdup(conf); + p = buf; -static int rbd_assign_bid(rados_pool_t pool, uint64_t *id) -{ - uint64_t out[1]; - const char *info_oid = RBD_INFO; + while (p) { + ret = qemu_rbd_next_tok(name, sizeof(name), p, + '=', "conf option name", &p); + if (ret < 0) { + break; + } - *id = 0; + if (!p) { + error_report("conf option %s has no value", name); + ret = -EINVAL; + break; + } - int r = touch_rbd_info(pool, info_oid); - if (r < 0) { - return r; - } + ret = qemu_rbd_next_tok(value, sizeof(value), p, + ':', "conf option value", &p); + if (ret < 0) { + break; + } - r = rados_exec(pool, info_oid, "rbd", "assign_bid", NULL, - 0, (char *)out, sizeof(out)); - if (r < 0) { - return r; + if (strcmp(name, "conf")) { + ret = rados_conf_set(cluster, name, value); + if (ret < 0) { + error_report("invalid conf option %s", name); + ret = -EINVAL; + break; + } + } else { + ret = rados_conf_read_file(cluster, value); + if (ret < 0) { + error_report("error reading conf file %s", value); + break; + } + } } - le64_to_cpus(out); - *id = out[0]; - - return 0; + qemu_free(buf); + return ret; } -static int rbd_create(const char *filename, QEMUOptionParameter *options) +static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options) { int64_t bytes = 0; int64_t objsize; - uint64_t size; - time_t mtime; - uint8_t obj_order = RBD_DEFAULT_OBJ_ORDER; - char pool[RBD_MAX_SEG_NAME_SIZE]; - char n[RBD_MAX_SEG_NAME_SIZE]; - char name[RBD_MAX_OBJ_NAME_SIZE]; - char snap_buf[RBD_MAX_SEG_NAME_SIZE]; + int obj_order = 0; + char pool[RBD_MAX_POOL_NAME_SIZE]; + char name[RBD_MAX_IMAGE_NAME_SIZE]; + char snap_buf[RBD_MAX_SNAP_NAME_SIZE]; + char conf[RBD_MAX_CONF_SIZE]; char *snap = NULL; - RbdHeader1 header; - rados_pool_t p; - uint64_t bid; - uint32_t hi, lo; + rados_t cluster; + rados_ioctx_t io_ctx; int ret; - if (rbd_parsename(filename, - pool, sizeof(pool), - snap_buf, sizeof(snap_buf), - name, sizeof(name)) < 0) { + if (qemu_rbd_parsename(filename, pool, sizeof(pool), + snap_buf, sizeof(snap_buf), + name, sizeof(name), + conf, sizeof(conf)) < 0) { return -EINVAL; } if (snap_buf[0] != '\0') { snap = snap_buf; } - snprintf(n, sizeof(n), "%s%s", name, RBD_SUFFIX); - /* Read out options */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { @@ -277,82 +257,64 @@ static int rbd_create(const char *filename, QEMUOptionParameter *options) error_report("obj size too small"); return -EINVAL; } - obj_order = ffs(objsize) - 1; + obj_order = ffs(objsize) - 1; } } options++; } - memset(&header, 0, sizeof(header)); - pstrcpy(header.text, sizeof(header.text), RBD_HEADER_TEXT); - pstrcpy(header.signature, sizeof(header.signature), RBD_HEADER_SIGNATURE); - pstrcpy(header.version, sizeof(header.version), RBD_HEADER_VERSION); - header.image_size = cpu_to_le64(bytes); - header.options.order = obj_order; - header.options.crypt_type = RBD_CRYPT_NONE; - header.options.comp_type = RBD_COMP_NONE; - header.snap_seq = 0; - header.snap_count = 0; - - if (rados_initialize(0, NULL) < 0) { + if (rados_create(&cluster, NULL) < 0) { error_report("error initializing"); return -EIO; } - if (rados_open_pool(pool, &p)) { - error_report("error opening pool %s", pool); - rados_deinitialize(); - return -EIO; + if (strstr(conf, "conf=") == NULL) { + if (rados_conf_read_file(cluster, NULL) < 0) { + error_report("error reading config file"); + rados_shutdown(cluster); + return -EIO; + } } - /* check for existing rbd header file */ - ret = rados_stat(p, n, &size, &mtime); - if (ret == 0) { - ret=-EEXIST; - goto done; + if (conf[0] != '\0' && + qemu_rbd_set_conf(cluster, conf) < 0) { + error_report("error setting config options"); + rados_shutdown(cluster); + return -EIO; } - ret = rbd_assign_bid(p, &bid); - if (ret < 0) { - error_report("failed assigning block id"); - rados_deinitialize(); + if (rados_connect(cluster) < 0) { + error_report("error connecting"); + rados_shutdown(cluster); return -EIO; } - hi = bid >> 32; - lo = bid & 0xFFFFFFFF; - snprintf(header.block_name, sizeof(header.block_name), "rb.%x.%x", hi, lo); - /* create header file */ - ret = rados_write(p, n, 0, (const char *)&header, sizeof(header)); - if (ret < 0) { - goto done; + if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) { + error_report("error opening pool %s", pool); + rados_shutdown(cluster); + return -EIO; } - ret = rbd_register_image(p, name); -done: - rados_close_pool(p); - rados_deinitialize(); + ret = rbd_create(io_ctx, name, bytes, &obj_order); + rados_ioctx_destroy(io_ctx); + rados_shutdown(cluster); return ret; } /* - * This aio completion is being called from rbd_aio_event_reader() and - * runs in qemu context. It schedules a bh, but just in case the aio + * This aio completion is being called from qemu_rbd_aio_event_reader() + * and runs in qemu context. It schedules a bh, but just in case the aio * was not cancelled before. */ -static void rbd_complete_aio(RADOSCB *rcb) +static void qemu_rbd_complete_aio(RADOSCB *rcb) { RBDAIOCB *acb = rcb->acb; int64_t r; - acb->aiocnt--; - if (acb->cancelled) { - if (!acb->aiocnt) { - qemu_vfree(acb->bounce); - qemu_aio_release(acb); - } + qemu_vfree(acb->bounce); + qemu_aio_release(acb); goto done; } @@ -363,32 +325,25 @@ static void rbd_complete_aio(RADOSCB *rcb) acb->ret = r; acb->error = 1; } else if (!acb->error) { - acb->ret += rcb->segsize; + acb->ret = rcb->size; } } else { - if (r == -ENOENT) { - memset(rcb->buf, 0, rcb->segsize); - if (!acb->error) { - acb->ret += rcb->segsize; - } - } else if (r < 0) { - memset(rcb->buf, 0, rcb->segsize); + if (r < 0) { + memset(rcb->buf, 0, rcb->size); acb->ret = r; acb->error = 1; - } else if (r < rcb->segsize) { - memset(rcb->buf + r, 0, rcb->segsize - r); + } else if (r < rcb->size) { + memset(rcb->buf + r, 0, rcb->size - r); if (!acb->error) { - acb->ret += rcb->segsize; + acb->ret = rcb->size; } } else if (!acb->error) { - acb->ret += r; + acb->ret = r; } } /* Note that acb->bh can be NULL in case where the aio was cancelled */ - if (!acb->aiocnt) { - acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb); - qemu_bh_schedule(acb->bh); - } + acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb); + qemu_bh_schedule(acb->bh); done: qemu_free(rcb); } @@ -397,7 +352,7 @@ done: * aio fd read handler. It runs in the qemu context and calls the * completion handling of completed rados aio operations. */ -static void rbd_aio_event_reader(void *opaque) +static void qemu_rbd_aio_event_reader(void *opaque) { BDRVRBDState *s = opaque; @@ -413,176 +368,87 @@ static void rbd_aio_event_reader(void *opaque) s->event_reader_pos += ret; if (s->event_reader_pos == sizeof(s->event_rcb)) { s->event_reader_pos = 0; - rbd_complete_aio(s->event_rcb); - s->qemu_aio_count --; + qemu_rbd_complete_aio(s->event_rcb); + s->qemu_aio_count--; } } } } while (ret < 0 && errno == EINTR); } -static int rbd_aio_flush_cb(void *opaque) +static int qemu_rbd_aio_flush_cb(void *opaque) { BDRVRBDState *s = opaque; return (s->qemu_aio_count > 0); } - -static int rbd_set_snapc(rados_pool_t pool, const char *snap, RbdHeader1 *header) -{ - uint32_t snap_count = le32_to_cpu(header->snap_count); - rados_snap_t *snaps = NULL; - rados_snap_t seq; - uint32_t i; - uint64_t snap_names_len = le64_to_cpu(header->snap_names_len); - int r; - rados_snap_t snapid = 0; - - if (snap_count) { - const char *header_snap = (const char *)&header->snaps[snap_count]; - const char *end = header_snap + snap_names_len; - snaps = qemu_malloc(sizeof(rados_snap_t) * header->snap_count); - - for (i=0; i < snap_count; i++) { - snaps[i] = le64_to_cpu(header->snaps[i].id); - - if (snap && strcmp(snap, header_snap) == 0) { - snapid = snaps[i]; - } - - header_snap += strlen(header_snap) + 1; - if (header_snap > end) { - error_report("bad header, snapshot list broken"); - } - } - } - - if (snap && !snapid) { - error_report("snapshot not found"); - qemu_free(snaps); - return -ENOENT; - } - seq = le32_to_cpu(header->snap_seq); - - r = rados_set_snap_context(pool, seq, snaps, snap_count); - - rados_set_snap(pool, snapid); - - qemu_free(snaps); - - return r; -} - -#define BUF_READ_START_LEN 4096 - -static int rbd_read_header(BDRVRBDState *s, char **hbuf) -{ - char *buf = NULL; - char n[RBD_MAX_SEG_NAME_SIZE]; - uint64_t len = BUF_READ_START_LEN; - int r; - - snprintf(n, sizeof(n), "%s%s", s->name, RBD_SUFFIX); - - buf = qemu_malloc(len); - - r = rados_read(s->header_pool, n, 0, buf, len); - if (r < 0) { - goto failed; - } - - if (r < len) { - goto done; - } - - qemu_free(buf); - buf = qemu_malloc(len); - - r = rados_stat(s->header_pool, n, &len, NULL); - if (r < 0) { - goto failed; - } - - r = rados_read(s->header_pool, n, 0, buf, len); - if (r < 0) { - goto failed; - } - -done: - *hbuf = buf; - return 0; - -failed: - qemu_free(buf); - return r; -} - -static int rbd_open(BlockDriverState *bs, const char *filename, int flags) +static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags) { BDRVRBDState *s = bs->opaque; - RbdHeader1 *header; - char pool[RBD_MAX_SEG_NAME_SIZE]; - char snap_buf[RBD_MAX_SEG_NAME_SIZE]; - char *snap = NULL; - char *hbuf = NULL; + char pool[RBD_MAX_POOL_NAME_SIZE]; + char snap_buf[RBD_MAX_SNAP_NAME_SIZE]; + char conf[RBD_MAX_CONF_SIZE]; int r; - if (rbd_parsename(filename, pool, sizeof(pool), - snap_buf, sizeof(snap_buf), - s->name, sizeof(s->name)) < 0) { + if (qemu_rbd_parsename(filename, pool, sizeof(pool), + snap_buf, sizeof(snap_buf), + s->name, sizeof(s->name), + conf, sizeof(conf)) < 0) { return -EINVAL; } + s->snap = NULL; if (snap_buf[0] != '\0') { - snap = snap_buf; + s->snap = qemu_strdup(snap_buf); } - if ((r = rados_initialize(0, NULL)) < 0) { + r = rados_create(&s->cluster, NULL); + if (r < 0) { error_report("error initializing"); return r; } - if ((r = rados_open_pool(pool, &s->pool))) { - error_report("error opening pool %s", pool); - rados_deinitialize(); - return r; - } - - if ((r = rados_open_pool(pool, &s->header_pool))) { - error_report("error opening pool %s", pool); - rados_deinitialize(); - return r; + if (strstr(conf, "conf=") == NULL) { + r = rados_conf_read_file(s->cluster, NULL); + if (r < 0) { + error_report("error reading config file"); + rados_shutdown(s->cluster); + return r; + } } - if ((r = rbd_read_header(s, &hbuf)) < 0) { - error_report("error reading header from %s", s->name); - goto failed; + if (conf[0] != '\0') { + r = qemu_rbd_set_conf(s->cluster, conf); + if (r < 0) { + error_report("error setting config options"); + rados_shutdown(s->cluster); + return r; + } } - if (memcmp(hbuf + 64, RBD_HEADER_SIGNATURE, 4)) { - error_report("Invalid header signature"); - r = -EMEDIUMTYPE; - goto failed; + r = rados_connect(s->cluster); + if (r < 0) { + error_report("error connecting"); + rados_shutdown(s->cluster); + return r; } - if (memcmp(hbuf + 68, RBD_HEADER_VERSION, 8)) { - error_report("Unknown image version"); - r = -EMEDIUMTYPE; - goto failed; + r = rados_ioctx_create(s->cluster, pool, &s->io_ctx); + if (r < 0) { + error_report("error opening pool %s", pool); + rados_shutdown(s->cluster); + return r; } - header = (RbdHeader1 *) hbuf; - s->size = le64_to_cpu(header->image_size); - s->objsize = 1ULL << header->options.order; - memcpy(s->block_name, header->block_name, sizeof(header->block_name)); - - r = rbd_set_snapc(s->pool, snap, header); + r = rbd_open(s->io_ctx, s->name, &s->image, s->snap); if (r < 0) { - error_report("failed setting snap context: %s", strerror(-r)); - goto failed; + error_report("error reading header from %s", s->name); + rados_ioctx_destroy(s->io_ctx); + rados_shutdown(s->cluster); + return r; } - bs->read_only = (snap != NULL); + bs->read_only = (s->snap != NULL); s->event_reader_pos = 0; r = qemu_pipe(s->fds); @@ -592,23 +458,20 @@ static int rbd_open(BlockDriverState *bs, const char *filename, int flags) } fcntl(s->fds[0], F_SETFL, O_NONBLOCK); fcntl(s->fds[1], F_SETFL, O_NONBLOCK); - qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], rbd_aio_event_reader, NULL, - rbd_aio_flush_cb, NULL, s); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, + NULL, qemu_rbd_aio_flush_cb, NULL, s); - qemu_free(hbuf); return 0; failed: - qemu_free(hbuf); - - rados_close_pool(s->header_pool); - rados_close_pool(s->pool); - rados_deinitialize(); + rbd_close(s->image); + rados_ioctx_destroy(s->io_ctx); + rados_shutdown(s->cluster); return r; } -static void rbd_close(BlockDriverState *bs) +static void qemu_rbd_close(BlockDriverState *bs) { BDRVRBDState *s = bs->opaque; @@ -617,16 +480,17 @@ static void rbd_close(BlockDriverState *bs) qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL , NULL, NULL, NULL, NULL); - rados_close_pool(s->header_pool); - rados_close_pool(s->pool); - rados_deinitialize(); + rbd_close(s->image); + rados_ioctx_destroy(s->io_ctx); + qemu_free(s->snap); + rados_shutdown(s->cluster); } /* * Cancel aio. Since we don't reference acb in a non qemu threads, * it is safe to access it here. */ -static void rbd_aio_cancel(BlockDriverAIOCB *blockacb) +static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb) { RBDAIOCB *acb = (RBDAIOCB *) blockacb; acb->cancelled = 1; @@ -634,39 +498,28 @@ static void rbd_aio_cancel(BlockDriverAIOCB *blockacb) static AIOPool rbd_aio_pool = { .aiocb_size = sizeof(RBDAIOCB), - .cancel = rbd_aio_cancel, + .cancel = qemu_rbd_aio_cancel, }; -/* - * This is the callback function for rados_aio_read and _write - * - * Note: this function is being called from a non qemu thread so - * we need to be careful about what we do here. Generally we only - * write to the block notification pipe, and do the rest of the - * io completion handling from rbd_aio_event_reader() which - * runs in a qemu context. - */ -static void rbd_finish_aiocb(rados_completion_t c, RADOSCB *rcb) +static int qemu_rbd_send_pipe(BDRVRBDState *s, RADOSCB *rcb) { - int ret; - rcb->ret = rados_aio_get_return_value(c); - rados_aio_release(c); + int ret = 0; while (1) { fd_set wfd; - int fd = rcb->s->fds[RBD_FD_WRITE]; + int fd = s->fds[RBD_FD_WRITE]; - /* send the rcb pointer to the qemu thread that is responsible - for the aio completion. Must do it in a qemu thread context */ + /* send the op pointer to the qemu thread that is responsible + for the aio/op completion. Must do it in a qemu thread context */ ret = write(fd, (void *)&rcb, sizeof(rcb)); if (ret >= 0) { break; } if (errno == EINTR) { continue; - } + } if (errno != EAGAIN) { break; - } + } FD_ZERO(&wfd); FD_SET(fd, &wfd); @@ -675,13 +528,31 @@ static void rbd_finish_aiocb(rados_completion_t c, RADOSCB *rcb) } while (ret < 0 && errno == EINTR); } + return ret; +} + +/* + * This is the callback function for rbd_aio_read and _write + * + * Note: this function is being called from a non qemu thread so + * we need to be careful about what we do here. Generally we only + * write to the block notification pipe, and do the rest of the + * io completion handling from qemu_rbd_aio_event_reader() which + * runs in a qemu context. + */ +static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb) +{ + int ret; + rcb->ret = rbd_aio_get_return_value(c); + rbd_aio_release(c); + ret = qemu_rbd_send_pipe(rcb->s, rcb); if (ret < 0) { - error_report("failed writing to acb->s->fds\n"); + error_report("failed writing to acb->s->fds"); qemu_free(rcb); } } -/* Callback when all queued rados_aio requests are complete */ +/* Callback when all queued rbd_aio requests are complete */ static void rbd_aio_bh_cb(void *opaque) { @@ -707,19 +578,20 @@ static BlockDriverAIOCB *rbd_aio_rw_vector(BlockDriverState *bs, { RBDAIOCB *acb; RADOSCB *rcb; - rados_completion_t c; - char n[RBD_MAX_SEG_NAME_SIZE]; - int64_t segnr, segoffs, segsize, last_segnr; + rbd_completion_t c; int64_t off, size; char *buf; + int r; BDRVRBDState *s = bs->opaque; acb = qemu_aio_get(&rbd_aio_pool, bs, cb, opaque); + if (!acb) { + return NULL; + } acb->write = write; acb->qiov = qiov; acb->bounce = qemu_blockalign(bs, qiov->size); - acb->aiocnt = 0; acb->ret = 0; acb->error = 0; acb->s = s; @@ -734,95 +606,106 @@ static BlockDriverAIOCB *rbd_aio_rw_vector(BlockDriverState *bs, off = sector_num * BDRV_SECTOR_SIZE; size = nb_sectors * BDRV_SECTOR_SIZE; - segnr = off / s->objsize; - segoffs = off % s->objsize; - segsize = s->objsize - segoffs; - - last_segnr = ((off + size - 1) / s->objsize); - acb->aiocnt = (last_segnr - segnr) + 1; - s->qemu_aio_count += acb->aiocnt; /* All the RADOSCB */ + s->qemu_aio_count++; /* All the RADOSCB */ - while (size > 0) { - if (size < segsize) { - segsize = size; - } + rcb = qemu_malloc(sizeof(RADOSCB)); + rcb->done = 0; + rcb->acb = acb; + rcb->buf = buf; + rcb->s = acb->s; + rcb->size = size; + r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c); + if (r < 0) { + goto failed; + } - snprintf(n, sizeof(n), "%s.%012" PRIx64, s->block_name, - segnr); - - rcb = qemu_malloc(sizeof(RADOSCB)); - rcb->done = 0; - rcb->acb = acb; - rcb->segsize = segsize; - rcb->buf = buf; - rcb->s = acb->s; - - if (write) { - rados_aio_create_completion(rcb, NULL, - (rados_callback_t) rbd_finish_aiocb, - &c); - rados_aio_write(s->pool, n, segoffs, buf, segsize, c); - } else { - rados_aio_create_completion(rcb, - (rados_callback_t) rbd_finish_aiocb, - NULL, &c); - rados_aio_read(s->pool, n, segoffs, buf, segsize, c); - } + if (write) { + r = rbd_aio_write(s->image, off, size, buf, c); + } else { + r = rbd_aio_read(s->image, off, size, buf, c); + } - buf += segsize; - size -= segsize; - segoffs = 0; - segsize = s->objsize; - segnr++; + if (r < 0) { + goto failed; } return &acb->common; + +failed: + qemu_free(rcb); + s->qemu_aio_count--; + qemu_aio_release(acb); + return NULL; } -static BlockDriverAIOCB *rbd_aio_readv(BlockDriverState * bs, - int64_t sector_num, QEMUIOVector * qiov, - int nb_sectors, - BlockDriverCompletionFunc * cb, - void *opaque) +static BlockDriverAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) { return rbd_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); } -static BlockDriverAIOCB *rbd_aio_writev(BlockDriverState * bs, - int64_t sector_num, QEMUIOVector * qiov, - int nb_sectors, - BlockDriverCompletionFunc * cb, - void *opaque) +static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) { return rbd_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); } -static int rbd_getinfo(BlockDriverState * bs, BlockDriverInfo * bdi) +static int qemu_rbd_getinfo(BlockDriverState *bs, BlockDriverInfo *bdi) { BDRVRBDState *s = bs->opaque; - bdi->cluster_size = s->objsize; + rbd_image_info_t info; + int r; + + r = rbd_stat(s->image, &info, sizeof(info)); + if (r < 0) { + return r; + } + + bdi->cluster_size = info.obj_size; return 0; } -static int64_t rbd_getlength(BlockDriverState * bs) +static int64_t qemu_rbd_getlength(BlockDriverState *bs) +{ + BDRVRBDState *s = bs->opaque; + rbd_image_info_t info; + int r; + + r = rbd_stat(s->image, &info, sizeof(info)); + if (r < 0) { + return r; + } + + return info.size; +} + +static int qemu_rbd_truncate(BlockDriverState *bs, int64_t offset) { BDRVRBDState *s = bs->opaque; + int r; - return s->size; + r = rbd_resize(s->image, offset); + if (r < 0) { + return r; + } + + return 0; } -static int rbd_snap_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) +static int qemu_rbd_snap_create(BlockDriverState *bs, + QEMUSnapshotInfo *sn_info) { BDRVRBDState *s = bs->opaque; - char inbuf[512], outbuf[128]; - uint64_t snap_id; int r; - char *p = inbuf; - char *end = inbuf + sizeof(inbuf); - char n[RBD_MAX_SEG_NAME_SIZE]; - char *hbuf = NULL; - RbdHeader1 *header; if (sn_info->name[0] == '\0') { return -EINVAL; /* we need a name for rbd snapshots */ @@ -841,185 +724,57 @@ static int rbd_snap_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) return -ERANGE; } - r = rados_selfmanaged_snap_create(s->header_pool, &snap_id); - if (r < 0) { - error_report("failed to create snap id: %s", strerror(-r)); - return r; - } - - *(uint32_t *)p = strlen(sn_info->name); - cpu_to_le32s((uint32_t *)p); - p += sizeof(uint32_t); - strncpy(p, sn_info->name, end - p); - p += strlen(p); - if (p + sizeof(snap_id) > end) { - error_report("invalid input parameter"); - return -EINVAL; - } - - *(uint64_t *)p = snap_id; - cpu_to_le64s((uint64_t *)p); - - snprintf(n, sizeof(n), "%s%s", s->name, RBD_SUFFIX); - - r = rados_exec(s->header_pool, n, "rbd", "snap_add", inbuf, - sizeof(inbuf), outbuf, sizeof(outbuf)); - if (r < 0) { - error_report("rbd.snap_add execution failed failed: %s", strerror(-r)); - return r; - } - - sprintf(sn_info->id_str, "%s", sn_info->name); - - r = rbd_read_header(s, &hbuf); + r = rbd_snap_create(s->image, sn_info->name); if (r < 0) { - error_report("failed reading header: %s", strerror(-r)); + error_report("failed to create snap: %s", strerror(-r)); return r; } - header = (RbdHeader1 *) hbuf; - r = rbd_set_snapc(s->pool, sn_info->name, header); - if (r < 0) { - error_report("failed setting snap context: %s", strerror(-r)); - goto failed; - } - return 0; - -failed: - qemu_free(header); - return r; } -static int decode32(char **p, const char *end, uint32_t *v) -{ - if (*p + 4 > end) { - return -ERANGE; - } - - *v = *(uint32_t *)(*p); - le32_to_cpus(v); - *p += 4; - return 0; -} - -static int decode64(char **p, const char *end, uint64_t *v) -{ - if (*p + 8 > end) { - return -ERANGE; - } - - *v = *(uint64_t *)(*p); - le64_to_cpus(v); - *p += 8; - return 0; -} - -static int decode_str(char **p, const char *end, char **s) -{ - uint32_t len; - int r; - - if ((r = decode32(p, end, &len)) < 0) { - return r; - } - - *s = qemu_malloc(len + 1); - memcpy(*s, *p, len); - *p += len; - (*s)[len] = '\0'; - - return len; -} - -static int rbd_snap_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab) +static int qemu_rbd_snap_list(BlockDriverState *bs, + QEMUSnapshotInfo **psn_tab) { BDRVRBDState *s = bs->opaque; - char n[RBD_MAX_SEG_NAME_SIZE]; QEMUSnapshotInfo *sn_info, *sn_tab = NULL; - RbdHeader1 *header; - char *hbuf = NULL; - char *outbuf = NULL, *end, *buf; - uint64_t len; - uint64_t snap_seq; - uint32_t snap_count; - int r, i; - - /* read header to estimate how much space we need to read the snap - * list */ - if ((r = rbd_read_header(s, &hbuf)) < 0) { - goto done_err; - } - header = (RbdHeader1 *)hbuf; - len = le64_to_cpu(header->snap_names_len); - len += 1024; /* should have already been enough, but new snapshots might - already been created since we read the header. just allocate - a bit more, so that in most cases it'll suffice anyway */ - qemu_free(hbuf); - - snprintf(n, sizeof(n), "%s%s", s->name, RBD_SUFFIX); - while (1) { - qemu_free(outbuf); - outbuf = qemu_malloc(len); + int i, snap_count; + rbd_snap_info_t *snaps; + int max_snaps = RBD_MAX_SNAPS; - r = rados_exec(s->header_pool, n, "rbd", "snap_list", NULL, 0, - outbuf, len); - if (r < 0) { - error_report("rbd.snap_list execution failed failed: %s", strerror(-r)); - goto done_err; + do { + snaps = qemu_malloc(sizeof(*snaps) * max_snaps); + snap_count = rbd_snap_list(s->image, snaps, &max_snaps); + if (snap_count < 0) { + qemu_free(snaps); } - if (r != len) { - break; - } - - /* if we're here, we probably raced with some snaps creation */ - len *= 2; - } - buf = outbuf; - end = buf + len; + } while (snap_count == -ERANGE); - if ((r = decode64(&buf, end, &snap_seq)) < 0) { - goto done_err; - } - if ((r = decode32(&buf, end, &snap_count)) < 0) { - goto done_err; + if (snap_count <= 0) { + return snap_count; } sn_tab = qemu_mallocz(snap_count * sizeof(QEMUSnapshotInfo)); - for (i = 0; i < snap_count; i++) { - uint64_t id, image_size; - char *snap_name; - if ((r = decode64(&buf, end, &id)) < 0) { - goto done_err; - } - if ((r = decode64(&buf, end, &image_size)) < 0) { - goto done_err; - } - if ((r = decode_str(&buf, end, &snap_name)) < 0) { - goto done_err; - } + for (i = 0; i < snap_count; i++) { + const char *snap_name = snaps[i].name; sn_info = sn_tab + i; pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name); pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name); - qemu_free(snap_name); - sn_info->vm_state_size = image_size; + sn_info->vm_state_size = snaps[i].size; sn_info->date_sec = 0; sn_info->date_nsec = 0; sn_info->vm_clock_nsec = 0; } + rbd_snap_list_end(snaps); + *psn_tab = sn_tab; - qemu_free(outbuf); return snap_count; -done_err: - qemu_free(sn_tab); - qemu_free(outbuf); - return r; } -static QEMUOptionParameter rbd_create_options[] = { +static QEMUOptionParameter qemu_rbd_create_options[] = { { .name = BLOCK_OPT_SIZE, .type = OPT_SIZE, @@ -1036,19 +791,20 @@ static QEMUOptionParameter rbd_create_options[] = { static BlockDriver bdrv_rbd = { .format_name = "rbd", .instance_size = sizeof(BDRVRBDState), - .bdrv_file_open = rbd_open, - .bdrv_close = rbd_close, - .bdrv_create = rbd_create, - .bdrv_get_info = rbd_getinfo, - .create_options = rbd_create_options, - .bdrv_getlength = rbd_getlength, + .bdrv_file_open = qemu_rbd_open, + .bdrv_close = qemu_rbd_close, + .bdrv_create = qemu_rbd_create, + .bdrv_get_info = qemu_rbd_getinfo, + .create_options = qemu_rbd_create_options, + .bdrv_getlength = qemu_rbd_getlength, + .bdrv_truncate = qemu_rbd_truncate, .protocol_name = "rbd", - .bdrv_aio_readv = rbd_aio_readv, - .bdrv_aio_writev = rbd_aio_writev, + .bdrv_aio_readv = qemu_rbd_aio_readv, + .bdrv_aio_writev = qemu_rbd_aio_writev, - .bdrv_snapshot_create = rbd_snap_create, - .bdrv_snapshot_list = rbd_snap_list, + .bdrv_snapshot_create = qemu_rbd_snap_create, + .bdrv_snapshot_list = qemu_rbd_snap_list, }; static void bdrv_rbd_init(void) diff --git a/block/rbd_types.h b/block/rbd_types.h deleted file mode 100644 index f4cca9970c..0000000000 --- a/block/rbd_types.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Ceph - scalable distributed file system - * - * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net> - * - * This is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software - * Foundation. See file COPYING.LIB. - * - */ - -#ifndef CEPH_RBD_TYPES_H -#define CEPH_RBD_TYPES_H - - -/* - * rbd image 'foo' consists of objects - * foo.rbd - image metadata - * foo.00000000 - * foo.00000001 - * ... - data - */ - -#define RBD_SUFFIX ".rbd" -#define RBD_DIRECTORY "rbd_directory" -#define RBD_INFO "rbd_info" - -#define RBD_DEFAULT_OBJ_ORDER 22 /* 4MB */ - -#define RBD_MAX_OBJ_NAME_SIZE 96 -#define RBD_MAX_BLOCK_NAME_SIZE 24 -#define RBD_MAX_SEG_NAME_SIZE 128 - -#define RBD_COMP_NONE 0 -#define RBD_CRYPT_NONE 0 - -#define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n" -#define RBD_HEADER_SIGNATURE "RBD" -#define RBD_HEADER_VERSION "001.005" - -struct rbd_info { - uint64_t max_id; -} __attribute__ ((packed)); - -struct rbd_obj_snap_ondisk { - uint64_t id; - uint64_t image_size; -} __attribute__((packed)); - -struct rbd_obj_header_ondisk { - char text[40]; - char block_name[RBD_MAX_BLOCK_NAME_SIZE]; - char signature[4]; - char version[8]; - struct { - uint8_t order; - uint8_t crypt_type; - uint8_t comp_type; - uint8_t unused; - } __attribute__((packed)) options; - uint64_t image_size; - uint64_t snap_seq; - uint32_t snap_count; - uint32_t reserved; - uint64_t snap_names_len; - struct rbd_obj_snap_ondisk snaps[0]; -} __attribute__((packed)); - - -#endif diff --git a/block/sheepdog.c b/block/sheepdog.c index 98946d72b7..0392ca8c9c 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -196,7 +196,7 @@ static inline uint64_t fnv_64a_buf(void *buf, size_t len, uint64_t hval) return hval; } -static inline int is_data_obj_writeable(SheepdogInode *inode, unsigned int idx) +static inline int is_data_obj_writable(SheepdogInode *inode, unsigned int idx) { return inode->vdi_id == inode->data_vdi_id[idx]; } @@ -1577,7 +1577,7 @@ static void sd_readv_writev_bh_cb(void *p) create = 1; } else if (acb->aiocb_type == AIOCB_WRITE_UDATA - && !is_data_obj_writeable(inode, idx)) { + && !is_data_obj_writable(inode, idx)) { /* Copy-On-Write */ create = 1; old_oid = oid; diff --git a/block/vdi.c b/block/vdi.c index 701745bf8c..4c9e201c33 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -87,6 +87,7 @@ void uuid_unparse(const uuid_t uu, char *out); #define MiB (KiB * KiB) #define SECTOR_SIZE 512 +#define DEFAULT_CLUSTER_SIZE (1 * MiB) #if defined(CONFIG_VDI_DEBUG) #define logout(fmt, ...) \ @@ -803,7 +804,7 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options) int result = 0; uint64_t bytes = 0; uint32_t blocks; - size_t block_size = 1 * MiB; + size_t block_size = DEFAULT_CLUSTER_SIZE; uint32_t image_type = VDI_TYPE_DYNAMIC; VdiHeader header; size_t i; @@ -921,7 +922,8 @@ static QEMUOptionParameter vdi_create_options[] = { { .name = BLOCK_OPT_CLUSTER_SIZE, .type = OPT_SIZE, - .help = "VDI cluster (block) size" + .help = "VDI cluster (block) size", + .value = { .n = DEFAULT_CLUSTER_SIZE }, }, #endif #if defined(CONFIG_VDI_STATIC_IMAGE) diff --git a/block/vmdk.c b/block/vmdk.c index 8fc9d67208..922b23d8f5 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -716,11 +716,11 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options) return -errno; magic = cpu_to_be32(VMDK4_MAGIC); memset(&header, 0, sizeof(header)); - header.version = cpu_to_le32(1); - header.flags = cpu_to_le32(3); /* ?? */ - header.capacity = cpu_to_le64(total_size); - header.granularity = cpu_to_le64(128); - header.num_gtes_per_gte = cpu_to_le32(512); + header.version = 1; + header.flags = 3; /* ?? */ + header.capacity = total_size; + header.granularity = 128; + header.num_gtes_per_gte = 512; grains = (total_size + header.granularity - 1) / header.granularity; gt_size = ((header.num_gtes_per_gte * sizeof(uint32_t)) + 511) >> 9; @@ -736,6 +736,12 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options) header.granularity - 1) / header.granularity) * header.granularity; + /* swap endianness for all header fields */ + header.version = cpu_to_le32(header.version); + header.flags = cpu_to_le32(header.flags); + header.capacity = cpu_to_le64(header.capacity); + header.granularity = cpu_to_le64(header.granularity); + header.num_gtes_per_gte = cpu_to_le32(header.num_gtes_per_gte); header.desc_offset = cpu_to_le64(header.desc_offset); header.desc_size = cpu_to_le64(header.desc_size); header.rgd_offset = cpu_to_le64(header.rgd_offset); @@ -759,7 +765,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options) goto exit; } - ret = ftruncate(fd, header.grain_offset << 9); + ret = ftruncate(fd, le64_to_cpu(header.grain_offset) << 9); if (ret < 0) { ret = -errno; goto exit; @@ -767,7 +773,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options) /* write grain directory */ lseek(fd, le64_to_cpu(header.rgd_offset) << 9, SEEK_SET); - for (i = 0, tmp = header.rgd_offset + gd_size; + for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_size; i < gt_count; i++, tmp += gt_size) { ret = qemu_write_full(fd, &tmp, sizeof(tmp)); if (ret != sizeof(tmp)) { @@ -778,7 +784,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options) /* write backup grain directory */ lseek(fd, le64_to_cpu(header.gd_offset) << 9, SEEK_SET); - for (i = 0, tmp = header.gd_offset + gd_size; + for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_size; i < gt_count; i++, tmp += gt_size) { ret = qemu_write_full(fd, &tmp, sizeof(tmp)); if (ret != sizeof(tmp)) { diff --git a/block_int.h b/block_int.h index 545ad11ff3..fa913371e1 100644 --- a/block_int.h +++ b/block_int.h @@ -194,7 +194,6 @@ struct BlockDriverState { /* NOTE: the following infos are only hints for real hardware drivers. They are not used by the block driver */ int cyls, heads, secs, translation; - int type; BlockErrorAction on_read_error, on_write_error; char device_name[32]; unsigned long *dirty_bitmap; diff --git a/blockdev.c b/blockdev.c index 5429621f0c..1502575acb 100644 --- a/blockdev.c +++ b/blockdev.c @@ -326,7 +326,7 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) if ((buf = qemu_opt_get(opts, "cache")) != NULL) { if (!strcmp(buf, "off") || !strcmp(buf, "none")) { - bdrv_flags |= BDRV_O_NOCACHE; + bdrv_flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; } else if (!strcmp(buf, "writeback")) { bdrv_flags |= BDRV_O_CACHE_WB; } else if (!strcmp(buf, "unsafe")) { @@ -487,7 +487,8 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) } break; case MEDIA_CDROM: - bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_CDROM); + bdrv_set_removable(dinfo->bdrv, 1); + dinfo->media_cd = 1; break; } break; @@ -495,7 +496,7 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: - bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_FLOPPY); + bdrv_set_removable(dinfo->bdrv, 1); break; case IF_PFLASH: case IF_MTD: diff --git a/blockdev.h b/blockdev.h index 2c9e7804c9..3587786a64 100644 --- a/blockdev.h +++ b/blockdev.h @@ -33,6 +33,7 @@ struct DriveInfo { int bus; int unit; int auto_del; /* see blockdev_mark_auto_del() */ + int media_cd; QemuOpts *opts; char serial[BLOCK_SERIAL_STRLEN + 1]; QTAILQ_ENTRY(DriveInfo) next; diff --git a/bsd-user/main.c b/bsd-user/main.c index 6b12f8bba1..0c3fca15ca 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -237,7 +237,7 @@ void cpu_loop(CPUX86State *env) break; #ifndef TARGET_ABI32 case EXCP_SYSCALL: - /* syscall from syscall intruction */ + /* syscall from syscall instruction */ if (bsd_type == target_freebsd) env->regs[R_EAX] = do_freebsd_syscall(env, env->regs[R_EAX], diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index e343894ab1..1ba2d083d1 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -323,7 +323,7 @@ abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len); abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len); /* Functions for accessing guest memory. The tget and tput functions - read/write single values, byteswapping as neccessary. The lock_user + read/write single values, byteswapping as necessary. The lock_user gets a pointer to a contiguous area of guest memory, but does not perform and byteswapping. lock_user may return either a pointer to the guest memory, or a temporary buffer. */ diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c index eb1cdf21ca..d4d039a2f6 100644 --- a/bsd-user/syscall.c +++ b/bsd-user/syscall.c @@ -31,7 +31,6 @@ #include <sys/syscall.h> #include <sys/param.h> #include <sys/sysctl.h> -#include <signal.h> #include <utime.h> #include "qemu.h" diff --git a/compatfd.c b/compatfd.c index bd377c411a..41586ceaea 100644 --- a/compatfd.c +++ b/compatfd.c @@ -29,7 +29,7 @@ static void *sigwait_compat(void *opaque) sigset_t all; sigfillset(&all); - sigprocmask(SIG_BLOCK, &all, NULL); + pthread_sigmask(SIG_BLOCK, &all, NULL); while (1) { int sig; @@ -127,6 +127,7 @@ vnc_jpeg="" vnc_png="" vnc_thread="no" xen="" +xen_ctrl_version="" linux_aio="" attr="" vhost_net="" @@ -228,7 +229,7 @@ sdl_config="${cross_prefix}${SDL_CONFIG-sdl-config}" # default flags for all hosts QEMU_CFLAGS="-fno-strict-aliasing $QEMU_CFLAGS" CFLAGS="-g $CFLAGS" -QEMU_CFLAGS="-Wall -Wundef -Wendif-labels -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" +QEMU_CFLAGS="-Wall -Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" QEMU_CFLAGS="-D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" @@ -822,6 +823,75 @@ esac [ -z "$guest_base" ] && guest_base="$host_guest_base" + +default_target_list="" + +# these targets are portable +if [ "$softmmu" = "yes" ] ; then + default_target_list="\ +i386-softmmu \ +x86_64-softmmu \ +alpha-softmmu \ +arm-softmmu \ +cris-softmmu \ +lm32-softmmu \ +m68k-softmmu \ +microblaze-softmmu \ +microblazeel-softmmu \ +mips-softmmu \ +mipsel-softmmu \ +mips64-softmmu \ +mips64el-softmmu \ +ppc-softmmu \ +ppcemb-softmmu \ +ppc64-softmmu \ +sh4-softmmu \ +sh4eb-softmmu \ +sparc-softmmu \ +sparc64-softmmu \ +s390x-softmmu \ +" +fi +# the following are Linux specific +if [ "$linux_user" = "yes" ] ; then + default_target_list="${default_target_list}\ +i386-linux-user \ +x86_64-linux-user \ +alpha-linux-user \ +arm-linux-user \ +armeb-linux-user \ +cris-linux-user \ +m68k-linux-user \ +microblaze-linux-user \ +microblazeel-linux-user \ +mips-linux-user \ +mipsel-linux-user \ +ppc-linux-user \ +ppc64-linux-user \ +ppc64abi32-linux-user \ +sh4-linux-user \ +sh4eb-linux-user \ +sparc-linux-user \ +sparc64-linux-user \ +sparc32plus-linux-user \ +unicore32-linux-user \ +s390x-linux-user \ +" +fi +# the following are Darwin specific +if [ "$darwin_user" = "yes" ] ; then + default_target_list="$default_target_list i386-darwin-user ppc-darwin-user " +fi +# the following are BSD specific +if [ "$bsd_user" = "yes" ] ; then + default_target_list="${default_target_list}\ +i386-bsd-user \ +x86_64-bsd-user \ +sparc-bsd-user \ +sparc64-bsd-user \ +" +fi + if test x"$show_help" = x"yes" ; then cat << EOF @@ -834,7 +904,9 @@ echo " --help print this message" echo " --prefix=PREFIX install in PREFIX [$prefix]" echo " --interp-prefix=PREFIX where to find shared libraries, etc." echo " use %M for cpu name [$interp_prefix]" -echo " --target-list=LIST set target list [$target_list]" +echo " --target-list=LIST set target list (default: build everything)" +echo "Available targets: $default_target_list" | \ + fold -s -w 53 | sed -e 's/^/ /' echo "" echo "Advanced options (experts only):" echo " --source-path=PATH path of source code [$source_path]" @@ -895,6 +967,7 @@ echo " --disable-check-utests disable check unit-tests" echo " --enable-check-utests enable check unit-tests" echo " --disable-bluez disable bluez stack connectivity" echo " --enable-bluez enable bluez stack connectivity" +echo " --disable-slirp disable SLIRP userspace network connectivity" echo " --disable-kvm disable KVM acceleration support" echo " --enable-kvm enable KVM acceleration support" echo " --disable-nptl disable usermode NPTL support" @@ -965,7 +1038,7 @@ fi gcc_flags="-Wold-style-declaration -Wold-style-definition -Wtype-limits" gcc_flags="-Wformat-security -Wformat-y2k -Winit-self -Wignored-qualifiers $gcc_flags" gcc_flags="-Wmissing-include-dirs -Wempty-body -Wnested-externs $gcc_flags" -gcc_flags="-fstack-protector-all $gcc_flags" +gcc_flags="-fstack-protector-all -Wendif-labels $gcc_flags" cat > $TMPC << EOF int main(void) { return 0; } EOF @@ -1004,70 +1077,8 @@ if test "$solaris" = "yes" ; then fi fi - if test -z "$target_list" ; then -# these targets are portable - if [ "$softmmu" = "yes" ] ; then - target_list="\ -i386-softmmu \ -x86_64-softmmu \ -arm-softmmu \ -cris-softmmu \ -lm32-softmmu \ -m68k-softmmu \ -microblaze-softmmu \ -microblazeel-softmmu \ -mips-softmmu \ -mipsel-softmmu \ -mips64-softmmu \ -mips64el-softmmu \ -ppc-softmmu \ -ppcemb-softmmu \ -ppc64-softmmu \ -sh4-softmmu \ -sh4eb-softmmu \ -sparc-softmmu \ -sparc64-softmmu \ -" - fi -# the following are Linux specific - if [ "$linux_user" = "yes" ] ; then - target_list="${target_list}\ -i386-linux-user \ -x86_64-linux-user \ -alpha-linux-user \ -arm-linux-user \ -armeb-linux-user \ -cris-linux-user \ -m68k-linux-user \ -microblaze-linux-user \ -microblazeel-linux-user \ -mips-linux-user \ -mipsel-linux-user \ -ppc-linux-user \ -ppc64-linux-user \ -ppc64abi32-linux-user \ -sh4-linux-user \ -sh4eb-linux-user \ -sparc-linux-user \ -sparc64-linux-user \ -sparc32plus-linux-user \ -unicore32-linux-user \ -" - fi -# the following are Darwin specific - if [ "$darwin_user" = "yes" ] ; then - target_list="$target_list i386-darwin-user ppc-darwin-user " - fi -# the following are BSD specific - if [ "$bsd_user" = "yes" ] ; then - target_list="${target_list}\ -i386-bsd-user \ -x86_64-bsd-user \ -sparc-bsd-user \ -sparc64-bsd-user \ -" - fi + target_list="$default_target_list" else target_list=`echo "$target_list" | sed -e 's/,/ /g'` fi @@ -1180,20 +1191,81 @@ fi if test "$xen" != "no" ; then xen_libs="-lxenstore -lxenctrl -lxenguest" + + # Xen unstable cat > $TMPC <<EOF #include <xenctrl.h> #include <xs.h> -int main(void) { xs_daemon_open(); xc_interface_open(); return 0; } +#include <stdint.h> +#include <xen/hvm/hvm_info_table.h> +#if !defined(HVM_MAX_VCPUS) +# error HVM_MAX_VCPUS not defined +#endif +int main(void) { + xc_interface *xc; + xs_daemon_open(); + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_gnttab_open(NULL, 0); + return 0; +} EOF if compile_prog "" "$xen_libs" ; then + xen_ctrl_version=410 xen=yes - libs_softmmu="$xen_libs $libs_softmmu" + + # Xen 4.0.0 + elif ( + cat > $TMPC <<EOF +#include <xenctrl.h> +#include <xs.h> +#include <stdint.h> +#include <xen/hvm/hvm_info_table.h> +#if !defined(HVM_MAX_VCPUS) +# error HVM_MAX_VCPUS not defined +#endif +int main(void) { + xs_daemon_open(); + xc_interface_open(); + xc_gnttab_open(); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + return 0; +} +EOF + compile_prog "" "$xen_libs" + ) ; then + xen_ctrl_version=400 + xen=yes + + # Xen 3.3.0, 3.4.0 + elif ( + cat > $TMPC <<EOF +#include <xenctrl.h> +#include <xs.h> +int main(void) { + xs_daemon_open(); + xc_interface_open(); + xc_gnttab_open(); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + return 0; +} +EOF + compile_prog "" "$xen_libs" + ) ; then + xen_ctrl_version=330 + xen=yes + + # Xen not found or unsupported else if test "$xen" = "yes" ; then feature_not_found "xen" fi xen=no fi + + if test "$xen" = yes; then + libs_softmmu="$xen_libs $libs_softmmu" + fi fi ########################################## @@ -1638,7 +1710,7 @@ fi if test "$curl" != "no" ; then cat > $TMPC << EOF #include <curl/curl.h> -int main(void) { return curl_easy_init(); } +int main(void) { curl_easy_init(); curl_multi_setopt(0, 0, 0); return 0; } EOF curl_cflags=`$curlconfig --cflags 2>/dev/null` curl_libs=`$curlconfig --libs 2>/dev/null` @@ -1772,6 +1844,21 @@ recent kvm-kmod from http://sourceforge.net/projects/kvm." fi ########################################## +# test for ppc kvm pvr setting + +if test "$kvm" = "yes" && test "$cpu" = "ppc" -o "$cpu" = "ppc64"; then + cat > $TMPC <<EOF + #include <asm/kvm.h> + int main(void) { struct kvm_sregs s; s.pvr = 0; return 0; } +EOF + if compile_prog "$kvm_cflags" "" ; then + kvm_ppc_pvr=yes + else + kvm_ppc_pvr=no + fi +fi + +########################################## # test for vhost net if test "$vhost_net" != "no"; then @@ -1831,41 +1918,24 @@ fi if test "$rbd" != "no" ; then cat > $TMPC <<EOF #include <stdio.h> -#include <rados/librados.h> -int main(void) { rados_initialize(0, NULL); return 0; } -EOF - rbd_libs="-lrados" - if compile_prog "" "$rbd_libs" ; then - librados_too_old=no - cat > $TMPC <<EOF -#include <stdio.h> -#include <rados/librados.h> -#ifndef CEPH_OSD_TMAP_SET -#error missing CEPH_OSD_TMAP_SET -#endif +#include <rbd/librbd.h> int main(void) { - int (*func)(const rados_pool_t pool, uint64_t *snapid) = rados_selfmanaged_snap_create; - rados_initialize(0, NULL); + rados_t cluster; + rados_create(&cluster, NULL); return 0; } EOF - if compile_prog "" "$rbd_libs" ; then - rbd=yes - libs_tools="$rbd_libs $libs_tools" - libs_softmmu="$rbd_libs $libs_softmmu" - else - rbd=no - librados_too_old=yes - fi + rbd_libs="-lrbd -lrados" + if compile_prog "" "$rbd_libs" ; then + rbd=yes + libs_tools="$rbd_libs $libs_tools" + libs_softmmu="$rbd_libs $libs_softmmu" else if test "$rbd" = "yes" ; then feature_not_found "rados block device" fi rbd=no fi - if test "$librados_too_old" = "yes" ; then - echo "-> Your librados version is too old - upgrade needed to have rbd support" - fi fi ########################################## @@ -2345,7 +2415,7 @@ int main(void) { spice_server_new(); return 0; } EOF spice_cflags=$($pkg_config --cflags spice-protocol spice-server 2>/dev/null) spice_libs=$($pkg_config --libs spice-protocol spice-server 2>/dev/null) - if $pkg_config --atleast-version=0.5.3 spice-server >/dev/null 2>&1 && \ + if $pkg_config --atleast-version=0.6.0 spice-server >/dev/null 2>&1 && \ compile_prog "$spice_cflags" "$spice_libs" ; then spice="yes" libs_softmmu="$libs_softmmu $spice_libs" @@ -2540,7 +2610,7 @@ if test \( "$cpu" = "i386" -o "$cpu" = "x86_64" \) -a \ "$softmmu" = yes ; then roms="optionrom" fi -if test "$cpu" = "ppc64" ; then +if test "$cpu" = "ppc64" -a "$targetos" != "Darwin" ; then roms="$roms spapr-rtas" fi @@ -2855,6 +2925,7 @@ if test "$bluez" = "yes" ; then fi if test "$xen" = "yes" ; then echo "CONFIG_XEN=y" >> $config_host_mak + echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak fi if test "$io_thread" = "yes" ; then echo "CONFIG_IOTHREAD=y" >> $config_host_mak @@ -3235,7 +3306,11 @@ echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak case "$target_arch2" in i386|x86_64) if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then + target_phys_bits=64 echo "CONFIG_XEN=y" >> $config_target_mak + if test "$cpu" = "i386" -o "$cpu" = "x86_64"; then + echo "CONFIG_XEN_MAPCACHE=y" >> $config_target_mak + fi fi esac case "$target_arch2" in @@ -3257,6 +3332,9 @@ case "$target_arch2" in if test $vhost_net = "yes" ; then echo "CONFIG_VHOST_NET=y" >> $config_target_mak fi + if test "$kvm_ppc_pvr" = "yes" ; then + echo "CONFIG_KVM_PPC_PVR=y" >> $config_target_mak + fi fi esac if test "$target_bigendian" = "yes" ; then @@ -3291,8 +3369,6 @@ if test ! -z "$gdb_xml_files" ; then echo "TARGET_XML_FILES=$list" >> $config_target_mak fi -echo "CONFIG_SOFTFLOAT=y" >> $config_target_mak - if test "$target_user_only" = "yes" -a "$bflt" = "yes"; then echo "TARGET_HAS_BFLT=y" >> $config_target_mak fi @@ -3457,11 +3533,13 @@ done # for target in $targets # build tree in object directory in case the source is not in the current directory DIRS="tests tests/cris slirp audio block net pc-bios/optionrom" +DIRS="$DIRS pc-bios/spapr-rtas" DIRS="$DIRS roms/seabios roms/vgabios" DIRS="$DIRS fsdev ui" FILES="Makefile tests/Makefile" FILES="$FILES tests/cris/Makefile tests/cris/.gdbinit" FILES="$FILES pc-bios/optionrom/Makefile pc-bios/keymaps" +FILES="$FILES pc-bios/spapr-rtas/Makefile" FILES="$FILES roms/seabios/Makefile roms/vgabios/Makefile" for bios_file in $source_path/pc-bios/*.bin $source_path/pc-bios/*.rom $source_path/pc-bios/*.dtb $source_path/pc-bios/openbios-*; do FILES="$FILES pc-bios/`basename $bios_file`" @@ -180,7 +180,7 @@ void vga_hw_screen_dump(const char *filename) active_console = consoles[0]; /* There is currently no way of specifying which screen we want to dump, so always dump the first one. */ - if (consoles[0]->hw_screen_dump) + if (consoles[0] && consoles[0]->hw_screen_dump) consoles[0]->hw_screen_dump(consoles[0]->hw, filename); active_console = previous_active_console; } @@ -123,8 +123,7 @@ typedef union { endian ! */ typedef union { float64 d; -#if defined(HOST_WORDS_BIGENDIAN) \ - || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) +#if defined(HOST_WORDS_BIGENDIAN) struct { uint32_t upper; uint32_t lower; @@ -138,7 +137,6 @@ typedef union { uint64_t ll; } CPU_DoubleU; -#if defined(FLOATX80) typedef union { floatx80 d; struct { @@ -146,9 +144,7 @@ typedef union { uint16_t upper; } l; } CPU_LDoubleU; -#endif -#if defined(CONFIG_SOFTFLOAT) typedef union { float128 q; #if defined(HOST_WORDS_BIGENDIAN) @@ -175,7 +171,6 @@ typedef union { } ll; #endif } CPU_QuadU; -#endif /* CPU memory access without any memory or io remapping */ @@ -786,18 +781,54 @@ void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) extern CPUState *first_cpu; extern CPUState *cpu_single_env; -#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ -#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ -#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ -#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ -#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ -#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ -#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ -#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ -#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ -#define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */ -#define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */ -#define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */ +/* Flags for use in ENV->INTERRUPT_PENDING. + + The numbers assigned here are non-sequential in order to preserve + binary compatibility with the vmstate dump. Bit 0 (0x0001) was + previously used for CPU_INTERRUPT_EXIT, and is cleared when loading + the vmstate dump. */ + +/* External hardware interrupt pending. This is typically used for + interrupts from devices. */ +#define CPU_INTERRUPT_HARD 0x0002 + +/* Exit the current TB. This is typically used when some system-level device + makes some change to the memory mapping. E.g. the a20 line change. */ +#define CPU_INTERRUPT_EXITTB 0x0004 + +/* Halt the CPU. */ +#define CPU_INTERRUPT_HALT 0x0020 + +/* Debug event pending. */ +#define CPU_INTERRUPT_DEBUG 0x0080 + +/* Several target-specific external hardware interrupts. Each target/cpu.h + should define proper names based on these defines. */ +#define CPU_INTERRUPT_TGT_EXT_0 0x0008 +#define CPU_INTERRUPT_TGT_EXT_1 0x0010 +#define CPU_INTERRUPT_TGT_EXT_2 0x0040 +#define CPU_INTERRUPT_TGT_EXT_3 0x0200 +#define CPU_INTERRUPT_TGT_EXT_4 0x1000 + +/* Several target-specific internal interrupts. These differ from the + preceeding target-specific interrupts in that they are intended to + originate from within the cpu itself, typically in response to some + instruction being executed. These, therefore, are not masked while + single-stepping within the debugger. */ +#define CPU_INTERRUPT_TGT_INT_0 0x0100 +#define CPU_INTERRUPT_TGT_INT_1 0x0400 +#define CPU_INTERRUPT_TGT_INT_2 0x0800 + +/* First unused bit: 0x2000. */ + +/* The set of all bits that should be masked when single-stepping. */ +#define CPU_INTERRUPT_SSTEP_MASK \ + (CPU_INTERRUPT_HARD \ + | CPU_INTERRUPT_TGT_EXT_0 \ + | CPU_INTERRUPT_TGT_EXT_1 \ + | CPU_INTERRUPT_TGT_EXT_2 \ + | CPU_INTERRUPT_TGT_EXT_3 \ + | CPU_INTERRUPT_TGT_EXT_4) #ifndef CONFIG_USER_ONLY typedef void (*CPUInterruptHandler)(CPUState *, int); diff --git a/cpu-common.h b/cpu-common.h index 6410cccda5..9f5917224a 100644 --- a/cpu-common.h +++ b/cpu-common.h @@ -61,12 +61,14 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, ram_addr_t size, void *host); ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size); void qemu_ram_free(ram_addr_t addr); +void qemu_ram_free_from_ptr(ram_addr_t addr); void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); /* This should only be used for ram local to a device. */ void *qemu_get_ram_ptr(ram_addr_t addr); /* Same but slower, to use for migration, where the order of * RAMBlocks must not change. */ void *qemu_safe_ram_ptr(ram_addr_t addr); +void qemu_put_ram_ptr(void *addr); /* This should not be used by devices. */ int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr); ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); diff --git a/cpu-exec.c b/cpu-exec.c index 395cd8cf90..e1de56b397 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -23,22 +23,6 @@ #include "kvm.h" #include "qemu-barrier.h" -#if !defined(CONFIG_SOFTMMU) -#undef EAX -#undef ECX -#undef EDX -#undef EBX -#undef ESP -#undef EBP -#undef ESI -#undef EDI -#undef EIP -#include <signal.h> -#ifdef __linux__ -#include <sys/ucontext.h> -#endif -#endif - #if defined(__sparc__) && !defined(CONFIG_SOLARIS) // Work around ugly bugs in glibc that mangle global register contents #undef env @@ -48,7 +32,6 @@ int tb_invalidated_flag; //#define CONFIG_DEBUG_EXEC -//#define DEBUG_SIGNAL int qemu_cpu_has_work(CPUState *env) { @@ -64,37 +47,17 @@ void cpu_loop_exit(void) /* exit the current TB from a signal handler. The host registers are restored in a state compatible with the CPU emulator */ +#if defined(CONFIG_SOFTMMU) void cpu_resume_from_signal(CPUState *env1, void *puc) { -#if !defined(CONFIG_SOFTMMU) -#ifdef __linux__ - struct ucontext *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#endif -#endif - env = env1; /* XXX: restore cpu registers saved in host registers */ -#if !defined(CONFIG_SOFTMMU) - if (puc) { - /* XXX: use siglongjmp ? */ -#ifdef __linux__ -#ifdef __ia64 - sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL); -#else - sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); -#endif -#elif defined(__OpenBSD__) - sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); -#endif - } -#endif env->exception_index = -1; longjmp(env->jmp_env, 1); } +#endif /* Execute the code without caching the generated code. An interpreter could be used if available. */ @@ -360,10 +323,7 @@ int cpu_exec(CPUState *env1) if (unlikely(interrupt_request)) { if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ - interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_FIQ | - CPU_INTERRUPT_SMI | - CPU_INTERRUPT_NMI); + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; @@ -492,9 +452,6 @@ int cpu_exec(CPUState *env1) next_tb = 0; } } - } else if (interrupt_request & CPU_INTERRUPT_TIMER) { - //do_interrupt(0, 0, 0, 0, 0); - env->interrupt_request &= ~CPU_INTERRUPT_TIMER; } #elif defined(TARGET_ARM) if (interrupt_request & CPU_INTERRUPT_FIQ @@ -509,7 +466,7 @@ int cpu_exec(CPUState *env1) jump normally, then does the exception return when the CPU tries to execute code at the magic address. This will cause the magic PC value to be pushed to - the stack if an interrupt occured at the wrong time. + the stack if an interrupt occurred at the wrong time. We avoid this by disabling interrupts when pc contains a magic address. */ if (interrupt_request & CPU_INTERRUPT_HARD @@ -531,9 +488,36 @@ int cpu_exec(CPUState *env1) next_tb = 0; } #elif defined(TARGET_ALPHA) - if (interrupt_request & CPU_INTERRUPT_HARD) { - do_interrupt(env); - next_tb = 0; + { + int idx = -1; + /* ??? This hard-codes the OSF/1 interrupt levels. */ + switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) { + case 0 ... 3: + if (interrupt_request & CPU_INTERRUPT_HARD) { + idx = EXCP_DEV_INTERRUPT; + } + /* FALLTHRU */ + case 4: + if (interrupt_request & CPU_INTERRUPT_TIMER) { + idx = EXCP_CLK_INTERRUPT; + } + /* FALLTHRU */ + case 5: + if (interrupt_request & CPU_INTERRUPT_SMP) { + idx = EXCP_SMP_INTERRUPT; + } + /* FALLTHRU */ + case 6: + if (interrupt_request & CPU_INTERRUPT_MCHK) { + idx = EXCP_MCHK; + } + } + if (idx >= 0) { + env->exception_index = idx; + env->error_code = 0; + do_interrupt(env); + next_tb = 0; + } } #elif defined(TARGET_CRIS) if (interrupt_request & CPU_INTERRUPT_HARD @@ -569,7 +553,7 @@ int cpu_exec(CPUState *env1) next_tb = 0; } #endif - /* Don't use the cached interupt_request value, + /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; @@ -709,593 +693,3 @@ int cpu_exec(CPUState *env1) cpu_single_env = NULL; return ret; } - -/* must only be called from the generated code as an exception can be - generated */ -void tb_invalidate_page_range(target_ulong start, target_ulong end) -{ - /* XXX: cannot enable it yet because it yields to MMU exception - where NIP != read address on PowerPC */ -#if 0 - target_ulong phys_addr; - phys_addr = get_phys_addr_code(env, start); - tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0); -#endif -} - -#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY) - -void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { - selector &= 0xffff; - cpu_x86_load_seg_cache(env, seg_reg, selector, - (selector << 4), 0xffff, 0); - } else { - helper_load_seg(seg_reg, selector); - } - env = saved_env; -} - -void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - - helper_fsave(ptr, data32); - - env = saved_env; -} - -void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - - helper_frstor(ptr, data32); - - env = saved_env; -} - -#endif /* TARGET_I386 */ - -#if !defined(CONFIG_SOFTMMU) - -#if defined(TARGET_I386) -#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code) -#else -#define EXCEPTION_ACTION cpu_loop_exit() -#endif - -/* 'pc' is the host PC at which the exception was raised. 'address' is - the effective address of the memory exception. 'is_write' is 1 if a - write caused the exception and otherwise 0'. 'old_set' is the - signal set which should be restored */ -static inline int handle_cpu_signal(unsigned long pc, unsigned long address, - int is_write, sigset_t *old_set, - void *puc) -{ - TranslationBlock *tb; - int ret; - - if (cpu_single_env) - env = cpu_single_env; /* XXX: find a correct solution for multithread */ -#if defined(DEBUG_SIGNAL) - qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", - pc, address, is_write, *(unsigned long *)old_set); -#endif - /* XXX: locking issue */ - if (is_write && page_unprotect(h2g(address), pc, puc)) { - return 1; - } - - /* see if it is an MMU fault */ - ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); - if (ret < 0) - return 0; /* not an MMU fault */ - if (ret == 0) - return 1; /* the MMU fault was handled without causing real CPU fault */ - /* now we have a real cpu fault */ - tb = tb_find_pc(pc); - if (tb) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, pc); - } - - /* we restore the process signal mask as the sigreturn should - do it (XXX: use sigsetjmp) */ - sigprocmask(SIG_SETMASK, old_set, NULL); - EXCEPTION_ACTION; - - /* never comes here */ - return 1; -} - -#if defined(__i386__) - -#if defined(__APPLE__) -# include <sys/ucontext.h> - -# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip)) -# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno) -# define ERROR_sig(context) ((context)->uc_mcontext->es.err) -# define MASK_sig(context) ((context)->uc_sigmask) -#elif defined (__NetBSD__) -# include <ucontext.h> - -# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) -# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -# define MASK_sig(context) ((context)->uc_sigmask) -#elif defined (__FreeBSD__) || defined(__DragonFly__) -# include <ucontext.h> - -# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip)) -# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -# define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -# define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__OpenBSD__) -# define EIP_sig(context) ((context)->sc_eip) -# define TRAP_sig(context) ((context)->sc_trapno) -# define ERROR_sig(context) ((context)->sc_err) -# define MASK_sig(context) ((context)->sc_mask) -#else -# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) -# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -# define MASK_sig(context) ((context)->uc_sigmask) -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - struct ucontext *uc = puc; -#endif - unsigned long pc; - int trapno; - -#ifndef REG_EIP -/* for glibc 2.1 */ -#define REG_EIP EIP -#define REG_ERR ERR -#define REG_TRAPNO TRAPNO -#endif - pc = EIP_sig(uc); - trapno = TRAP_sig(uc); - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - trapno == 0xe ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc), puc); -} - -#elif defined(__x86_64__) - -#ifdef __NetBSD__ -#define PC_sig(context) _UC_MACHINE_PC(context) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#elif defined(__OpenBSD__) -#define PC_sig(context) ((context)->sc_rip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#elif defined (__FreeBSD__) || defined(__DragonFly__) -#include <ucontext.h> - -#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#else -#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - unsigned long pc; -#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - struct ucontext *uc = puc; -#endif - - pc = PC_sig(uc); - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - TRAP_sig(uc) == 0xe ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc), puc); -} - -#elif defined(_ARCH_PPC) - -/*********************************************************************** - * signal context platform-specific definitions - * From Wine - */ -#ifdef linux -/* All Registers access - only for local access */ -# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name) -/* Gpr Registers access */ -# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) -# define IAR_sig(context) REG_sig(nip, context) /* Program counter */ -# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */ -# define CTR_sig(context) REG_sig(ctr, context) /* Count register */ -# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */ -# define LR_sig(context) REG_sig(link, context) /* Link register */ -# define CR_sig(context) REG_sig(ccr, context) /* Condition register */ -/* Float Registers access */ -# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num]) -# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4))) -/* Exception Registers access */ -# define DAR_sig(context) REG_sig(dar, context) -# define DSISR_sig(context) REG_sig(dsisr, context) -# define TRAP_sig(context) REG_sig(trap, context) -#endif /* linux */ - -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include <ucontext.h> -# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) -# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) -# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) -# define XER_sig(context) ((context)->uc_mcontext.mc_xer) -# define LR_sig(context) ((context)->uc_mcontext.mc_lr) -# define CR_sig(context) ((context)->uc_mcontext.mc_cr) -/* Exception Registers access */ -# define DAR_sig(context) ((context)->uc_mcontext.mc_dar) -# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) -# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) -#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ - -#ifdef __APPLE__ -# include <sys/ucontext.h> -typedef struct ucontext SIGCONTEXT; -/* All Registers access - only for local access */ -# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name) -# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name) -# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name) -# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name) -/* Gpr Registers access */ -# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) -# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */ -# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */ -# define CTR_sig(context) REG_sig(ctr, context) -# define XER_sig(context) REG_sig(xer, context) /* Link register */ -# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */ -# define CR_sig(context) REG_sig(cr, context) /* Condition register */ -/* Float Registers access */ -# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context) -# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context)) -/* Exception Registers access */ -# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */ -# define DSISR_sig(context) EXCEPREG_sig(dsisr, context) -# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */ -#endif /* __APPLE__ */ - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - ucontext_t *uc = puc; -#else - struct ucontext *uc = puc; -#endif - unsigned long pc; - int is_write; - - pc = IAR_sig(uc); - is_write = 0; -#if 0 - /* ppc 4xx case */ - if (DSISR_sig(uc) & 0x00800000) - is_write = 1; -#else - if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) - is_write = 1; -#endif - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask, puc); -} - -#elif defined(__alpha__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - uint32_t *pc = uc->uc_mcontext.sc_pc; - uint32_t insn = *pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster */ - switch (insn >> 26) { - case 0x0d: // stw - case 0x0e: // stb - case 0x0f: // stq_u - case 0x24: // stf - case 0x25: // stg - case 0x26: // sts - case 0x27: // stt - case 0x2c: // stl - case 0x2d: // stq - case 0x2e: // stl_c - case 0x2f: // stq_c - is_write = 1; - } - - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask, puc); -} -#elif defined(__sparc__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - int is_write; - uint32_t insn; -#if !defined(__arch64__) || defined(CONFIG_SOLARIS) - uint32_t *regs = (uint32_t *)(info + 1); - void *sigmask = (regs + 20); - /* XXX: is there a standard glibc define ? */ - unsigned long pc = regs[1]; -#else -#ifdef __linux__ - struct sigcontext *sc = puc; - unsigned long pc = sc->sigc_regs.tpc; - void *sigmask = (void *)sc->sigc_mask; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; - unsigned long pc = uc->sc_pc; - void *sigmask = (void *)(long)uc->sc_mask; -#endif -#endif - - /* XXX: need kernel patch to get write flag faster */ - is_write = 0; - insn = *(uint32_t *)pc; - if ((insn >> 30) == 3) { - switch((insn >> 19) & 0x3f) { - case 0x05: // stb - case 0x15: // stba - case 0x06: // sth - case 0x16: // stha - case 0x04: // st - case 0x14: // sta - case 0x07: // std - case 0x17: // stda - case 0x0e: // stx - case 0x1e: // stxa - case 0x24: // stf - case 0x34: // stfa - case 0x27: // stdf - case 0x37: // stdfa - case 0x26: // stqf - case 0x36: // stqfa - case 0x25: // stfsr - case 0x3c: // casa - case 0x3e: // casxa - is_write = 1; - break; - } - } - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, sigmask, NULL); -} - -#elif defined(__arm__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc; - int is_write; - -#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) - pc = uc->uc_mcontext.gregs[R15]; -#else - pc = uc->uc_mcontext.arm_pc; -#endif - /* XXX: compute is_write */ - is_write = 0; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, - &uc->uc_sigmask, puc); -} - -#elif defined(__mc68000) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc; - int is_write; - - pc = uc->uc_mcontext.gregs[16]; - /* XXX: compute is_write */ - is_write = 0; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, - &uc->uc_sigmask, puc); -} - -#elif defined(__ia64) - -#ifndef __ISR_VALID - /* This ought to be in <bits/siginfo.h>... */ -# define __ISR_VALID 1 -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long ip; - int is_write = 0; - - ip = uc->uc_mcontext.sc_ip; - switch (host_signum) { - case SIGILL: - case SIGFPE: - case SIGSEGV: - case SIGBUS: - case SIGTRAP: - if (info->si_code && (info->si_segvflags & __ISR_VALID)) - /* ISR.W (write-access) is bit 33: */ - is_write = (info->si_isr >> 33) & 1; - break; - - default: - break; - } - return handle_cpu_signal(ip, (unsigned long)info->si_addr, - is_write, - (sigset_t *)&uc->uc_sigmask, puc); -} - -#elif defined(__s390__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc; - uint16_t *pinsn; - int is_write = 0; - - pc = uc->uc_mcontext.psw.addr; - - /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead - of the normal 2 arguments. The 3rd argument contains the "int_code" - from the hardware which does in fact contain the is_write value. - The rt signal handler, as far as I can tell, does not give this value - at all. Not that we could get to it from here even if it were. */ - /* ??? This is not even close to complete, since it ignores all - of the read-modify-write instructions. */ - pinsn = (uint16_t *)pc; - switch (pinsn[0] >> 8) { - case 0x50: /* ST */ - case 0x42: /* STC */ - case 0x40: /* STH */ - is_write = 1; - break; - case 0xc4: /* RIL format insns */ - switch (pinsn[0] & 0xf) { - case 0xf: /* STRL */ - case 0xb: /* STGRL */ - case 0x7: /* STHRL */ - is_write = 1; - } - break; - case 0xe3: /* RXY format insns */ - switch (pinsn[2] & 0xff) { - case 0x50: /* STY */ - case 0x24: /* STG */ - case 0x72: /* STCY */ - case 0x70: /* STHY */ - case 0x8e: /* STPQ */ - case 0x3f: /* STRVH */ - case 0x3e: /* STRV */ - case 0x2f: /* STRVG */ - is_write = 1; - } - break; - } - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask, puc); -} - -#elif defined(__mips__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - struct ucontext *uc = puc; - greg_t pc = uc->uc_mcontext.pc; - int is_write; - - /* XXX: compute is_write */ - is_write = 0; - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask, puc); -} - -#elif defined(__hppa__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - struct siginfo *info = pinfo; - struct ucontext *uc = puc; - unsigned long pc = uc->uc_mcontext.sc_iaoq[0]; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster. */ - switch (insn >> 26) { - case 0x1a: /* STW */ - case 0x19: /* STH */ - case 0x18: /* STB */ - case 0x1b: /* STWM */ - is_write = 1; - break; - - case 0x09: /* CSTWX, FSTWX, FSTWS */ - case 0x0b: /* CSTDX, FSTDX, FSTDS */ - /* Distinguish from coprocessor load ... */ - is_write = (insn >> 9) & 1; - break; - - case 0x03: - switch ((insn >> 6) & 15) { - case 0xa: /* STWS */ - case 0x9: /* STHS */ - case 0x8: /* STBS */ - case 0xe: /* STWAS */ - case 0xc: /* STBYS */ - is_write = 1; - } - break; - } - - return handle_cpu_signal(pc, (unsigned long)info->si_addr, - is_write, &uc->uc_sigmask, puc); -} - -#else - -#error host CPU specific signal handler needed - -#endif - -#endif /* !defined(CONFIG_SOFTMMU) */ diff --git a/darwin-user/signal.c b/darwin-user/signal.c index 48620184ee..e2adca3918 100644 --- a/darwin-user/signal.c +++ b/darwin-user/signal.c @@ -21,7 +21,6 @@ #include <string.h> #include <stdarg.h> #include <unistd.h> -#include <signal.h> #include <errno.h> #include <sys/ucontext.h> @@ -32,8 +31,6 @@ #undef uc_link #endif -#include <signal.h> - #include "qemu.h" #include "qemu-common.h" diff --git a/darwin-user/syscall.c b/darwin-user/syscall.c index 060acc889d..f3cc1f83a6 100644 --- a/darwin-user/syscall.c +++ b/darwin-user/syscall.c @@ -977,7 +977,7 @@ long do_unix_syscall_indirect(void *cpu_env, int num) #elif TARGET_PPC { int i; - /* XXX: not really needed those regs are volatile accross calls */ + /* XXX: not really needed those regs are volatile across calls */ uint32_t **regs = ((CPUPPCState*)cpu_env)->gpr; for(i = 11; i > 3; i--) *regs[i] = *regs[i-1]; diff --git a/default-configs/alpha-softmmu.mak b/default-configs/alpha-softmmu.mak new file mode 100644 index 0000000000..abadcffec9 --- /dev/null +++ b/default-configs/alpha-softmmu.mak @@ -0,0 +1,9 @@ +# Default configuration for alpha-softmmu + +include pci.mak +CONFIG_SERIAL=y +CONFIG_I8254=y +CONFIG_VGA_PCI=y +CONFIG_IDE_CORE=y +CONFIG_IDE_QDEV=y +CONFIG_VMWARE_VGA=y diff --git a/default-configs/pci.mak b/default-configs/pci.mak index 0471efbcd8..22bd3502d4 100644 --- a/default-configs/pci.mak +++ b/default-configs/pci.mak @@ -3,6 +3,7 @@ CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO=y CONFIG_USB_UHCI=y CONFIG_USB_OHCI=y +CONFIG_USB_EHCI=y CONFIG_NE2000_PCI=y CONFIG_EEPRO100_PCI=y CONFIG_PCNET_PCI=y diff --git a/default-configs/s390x-linux-user.mak b/default-configs/s390x-linux-user.mak new file mode 100644 index 0000000000..a243c99874 --- /dev/null +++ b/default-configs/s390x-linux-user.mak @@ -0,0 +1 @@ +# Default configuration for s390x-linux-user @@ -184,6 +184,9 @@ enum bfd_architecture #define bfd_mach_sh5 0x50 bfd_arch_alpha, /* Dec Alpha */ #define bfd_mach_alpha 1 +#define bfd_mach_alpha_ev4 0x10 +#define bfd_mach_alpha_ev5 0x20 +#define bfd_mach_alpha_ev6 0x30 bfd_arch_arm, /* Advanced Risc Machines ARM */ #define bfd_mach_arm_unknown 0 #define bfd_mach_arm_2 1 @@ -205,7 +205,7 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags) disasm_info.mach = bfd_mach_sh4; print_insn = print_insn_sh; #elif defined(TARGET_ALPHA) - disasm_info.mach = bfd_mach_alpha; + disasm_info.mach = bfd_mach_alpha_ev6; print_insn = print_insn_alpha; #elif defined(TARGET_CRIS) if (flags != 32) { diff --git a/docs/qdev-device-use.txt b/docs/qdev-device-use.txt index 4bb2be8850..057c322090 100644 --- a/docs/qdev-device-use.txt +++ b/docs/qdev-device-use.txt @@ -8,20 +8,23 @@ more buses for children. You can specify a device's parent bus with A device typically has a device address on its parent bus. For buses where this address can be configured, devices provide a bus-specific -property. These are - - bus property name value format - PCI addr %x.%x (dev.fn, .fn optional) - I2C address %u - SCSI scsi-id %u +property. Examples: + + bus property name value format + PCI addr %x.%x (dev.fn, .fn optional) + I2C address %u + SCSI scsi-id %u + IDE unit %u + HDA cad %u + virtio-serial-bus nr %u + ccid-bus slot %u + USB port %d(.%d)* (port.port...) Example: device i440FX-pcihost is on the root bus, and provides a PCI bus named pci.0. To put a FOO device into its slot 4, use -device FOO,bus=/i440FX-pcihost/pci.0,addr=4. The abbreviated form bus=pci.0 also works as long as the bus name is unique. -Note: the USB device address can't be controlled at this time. - === Block Devices === A QEMU block device (drive) has a host and a guest part. @@ -44,28 +47,43 @@ The new way keeps the parts separate: you create the host part with The various old ways to define drives all boil down to the common form - -drive if=TYPE,index=IDX,bus=BUS,unit=UNIT,HOST-OPTS... + -drive if=TYPE,bus=BUS,unit=UNIT,OPTS... TYPE, BUS and UNIT identify the controller device, which of its buses to use, and the drive's address on that bus. Details depend on TYPE. -IDX is an alternative way to specify BUS and UNIT. + +Instead of bus=BUS,unit=UNIT, you can also say index=IDX. In the new way, this becomes something like -drive if=none,id=DRIVE-ID,HOST-OPTS... -device DEVNAME,drive=DRIVE-ID,DEV-OPTS... -The -device argument differs in detail for each kind of drive: +The old OPTS get split into HOST-OPTS and DEV-OPTS as follows: -* if=ide +* file, format, snapshot, cache, aio, readonly, rerror, werror go into + HOST-OPTS. + +* cyls, head, secs and trans go into HOST-OPTS. Future work: they + should go into DEV-OPTS instead. + +* serial goes into DEV-OPTS, for devices supporting serial numbers. + For other devices, it goes nowhere. - -device ide-drive,drive=DRIVE-ID,bus=IDE-BUS,unit=UNIT +* media is special. In the old way, it selects disk vs. CD-ROM with + if=ide, if=scsi and if=xen. The new way uses DEVNAME for that. + Additionally, readonly=on goes into HOST-OPTS. - where IDE-BUS identifies an IDE bus, normally either ide.0 or ide.1, - and UNIT is either 0 or 1. +* addr is special, see if=virtio below. - Bug: new way does not work for ide.1 unit 0 (in old terms: index=2) - unless you disable the default CD-ROM with -nodefaults. +The -device argument differs in detail for each type of drive: + +* if=ide + + -device DEVNAME,drive=DRIVE-ID,bus=IDE-BUS,unit=UNIT + + where DEVNAME is either ide-hd or ide-cd, IDE-BUS identifies an IDE + bus, normally either ide.0 or ide.1, and UNIT is either 0 or 1. * if=scsi @@ -77,27 +95,25 @@ The -device argument differs in detail for each kind of drive: As for all PCI devices, you can add bus=PCI-BUS,addr=DEVFN to control the PCI device address. - This SCSI controller a single SCSI bus, named ID.0. Put a disk on - it: + This SCSI controller provides a single SCSI bus, named ID.0. Put a + disk on it: - -device scsi-disk,drive=DRIVE-ID,bus=ID.0,scsi-id=SCSI-ID,removable=RMB + -device DEVNAME,drive=DRIVE-ID,bus=ID.0,scsi-id=UNIT - The (optional) removable parameter lets you override the SCSI INQUIRY - removable (RMB) bit for non CD-ROM devices. It is ignored for CD-ROM devices - which are always removable. RMB is "on" or "off". + where DEVNAME is either scsi-hd, scsi-cd or scsi-generic. * if=floppy - -global isa-fdc,driveA=DRIVE-ID,driveB=DRIVE-ID + -global isa-fdc.driveA=DRIVE-ID + -global isa-fdc.driveB=DRIVE-ID This is -global instead of -device, because the floppy controller is created automatically, and we want to configure that one, not create a second one (which isn't possible anyway). - Omitting a drive parameter makes that drive empty. - - Bug: driveA works only if you disable the default floppy drive with - -nodefaults. + Without any -global isa-fdc,... you get an empty driveA and no + driveB. You can use -nodefaults to suppress the default driveA, see + "Default Devices". * if=virtio @@ -105,11 +121,12 @@ The -device argument differs in detail for each kind of drive: This lets you control PCI device class and MSI-X vectors. - IOEVENTFD controls whether or not ioeventfd is used for virtqueue notify. It - can be set to on (default) or off. + IOEVENTFD controls whether or not ioeventfd is used for virtqueue + notify. It can be set to on (default) or off. As for all PCI devices, you can add bus=PCI-BUS,addr=DEVFN to - control the PCI device address. + control the PCI device address. This replaces option addr available + with -drive if=virtio. * if=pflash, if=mtd, if=sd, if=xen are not yet available with -device @@ -117,15 +134,20 @@ For USB devices, the old way is actually different: -usbdevice disk:format=FMT:FILENAME -Provides much less control than -drive's HOST-OPTS... The new way -fixes that: +Provides much less control than -drive's OPTS... The new way fixes +that: -device usb-storage,drive=DRIVE-ID,removable=RMB -The removable parameter gives control over the SCSI INQUIRY removable (RMB) -bit. USB thumbdrives usually set removable=on, while USB hard disks set -removable=off. See the if=scsi description above for details on the removable -parameter, which applies only to scsi-disk devices and not to scsi-generic. +The removable parameter gives control over the SCSI INQUIRY removable +(RMB) bit. USB thumbdrives usually set removable=on, while USB hard +disks set removable=off. + +Bug: usb-storage pretends to be a block device, but it's really a SCSI +controller that can serve only a single device, which it creates +automatically. The automatic creation guesses what kind of guest part +to create from the host part, like -drive if=scsi. Host and guest +part are not cleanly separated. === Character Devices === @@ -170,7 +192,9 @@ The appropriate DEVNAME depends on the machine type. For type "pc": -device usb-braille,chardev=braille,vendorid=VID,productid=PRID -chardev braille,id=braille -* -virtioconsole is still being worked on +* -virtioconsole becomes + -device virtio-serial-pci,class=C,vectors=V,ioeventfd=IOEVENTFD,max_ports=N + -device virtconsole,is_console=NUM,nr=NR,name=NAME LEGACY-CHARDEV translates to -chardev HOST-OPTS... as follows: @@ -219,38 +243,29 @@ LEGACY-CHARDEV to refer to a host part defined with -chardev. === Network Devices === -A QEMU network device (NIC) has a host and a guest part. +Host and guest part of network devices have always been separate. -The old ways to define NICs define host and guest part together. It -looks like this: +The old way to define the guest part looks like this: - -net nic,vlan=VLAN,macaddr=MACADDR,model=MODEL,name=ID,addr=STR,vectors=V + -net nic,netdev=NET-ID,macaddr=MACADDR,model=MODEL,name=ID,addr=STR,vectors=V Except for USB it looks like this: - -usbdevice net:vlan=VLAN,macaddr=MACADDR,name=ID,addr=STR,vectors=V + -usbdevice net:netdev=NET-ID,macaddr=MACADDR,name=ID -The new way keeps the parts separate: you create the host part with --netdev, and the guest device with -device, like this: +The new way is -device: - -netdev type=TYPE,id=NET-ID -device DEVNAME,netdev=NET-ID,mac=MACADDR,DEV-OPTS... -Unlike the old way, this creates just a network device, not a VLAN. -If you really want a VLAN, create it the usual way, then create the -guest device like this: - - -device DEVNAME,vlan=VLAN,mac=MACADDR,DEV-OPTS... - DEVNAME equals MODEL, except for virtio you have to name the virtio device appropriate for the bus (virtio-net-pci for PCI), and for USB -NIC you have to use usb-net. +you have to use usb-net. The old name=ID parameter becomes the usual id=ID with -device. For PCI devices, you can add bus=PCI-BUS,addr=DEVFN to control the PCI device address, as usual. The old -net nic provides parameter addr -for that, it is silently ignored when the NIC is not a PCI device. +for that, which is silently ignored when the NIC is not a PCI device. For virtio-net-pci, you can control whether or not ioeventfd is used for virtqueue notify by setting ioeventfd= to on or off (default). @@ -264,20 +279,25 @@ devices and ne2k_isa are. Some PCI devices aren't available with -net nic, e.g. i82558a. -Bug: usb-net does not work, yet. Patch posted. +To connect to a VLAN instead of an ordinary host part, replace +netdev=NET-ID by vlan=VLAN. === Graphics Devices === Host and guest part of graphics devices have always been separate. -The old way to define the guest graphics device is -vga VGA. +The old way to define the guest graphics device is -vga VGA. Not all +machines support all -vga options. -The new way is -device. Map from -vga argument to -device: +The new way is -device. The mapping from -vga argument to -device +depends on the machine type. For machine "pc", it's: std -device VGA cirrus -device cirrus-vga vmware -device vmware-svga - xenfb not yet available with -device + qxl -device qxl-vga + none -nodefaults + disables more than just VGA, see "Default Devices" As for all PCI devices, you can add bus=PCI-BUS,addr=DEVFN to control the PCI device address. @@ -285,13 +305,16 @@ the PCI device address. -device VGA supports properties bios-offset and bios-size, but they aren't used with machine type "pc". -Bug: -device cirrus-vga and -device vmware-svga require -nodefaults. +For machine "isapc", it's -Bug: the new way requires PCI; ISA VGA is not yet available with --device. + std -device isa-vga + cirrus not yet available with -device + none -nodefaults + disables more than just VGA, see "Default Devices" -Bug: the new way doesn't work for machine type "pc", because it -violates obscure device initialization ordering constraints. +Bug: the new way doesn't work for machine types "pc" and "isapc", +because it violates obscure device initialization ordering +constraints. === Audio Devices === @@ -308,6 +331,7 @@ Map from -soundhw sound card name to -device: cs4231a -device cs4231a,iobase=IOADDR,irq=IRQ,dma=DMA es1370 -device ES1370 gus -device gus,iobase=IOADDR,irq=IRQ,dma=DMA,freq=F + hda -device intel-hda,msi=MSI -device hda-duplex sb16 -device sb16,iobase=IOADDR,irq=IRQ,dma=DMA,dma16=DMA16,version=V adlib not yet available with -device pcspk not yet available with -device @@ -321,9 +345,10 @@ The old way to define a virtual USB device is -usbdevice DRIVER:OPTS... The new way is -device DEVNAME,DEV-OPTS... Details depend on DRIVER: +* ccid -device usb-ccid +* keyboard -device usb-kbd * mouse -device usb-mouse * tablet -device usb-tablet -* keyboard -device usb-kdb * wacom-tablet -device usb-wacom-tablet * host:... See "Host Device Assignment" * disk:... See "Block Devices" @@ -353,7 +378,7 @@ The new way is -device pci-assign,host=ADDR,iommu=IOMMU,id=ID -The old dma=none becomes iommu=0 with -device. +The old dma=none becomes iommu=off with -device. The old way to assign a host USB device is @@ -365,4 +390,27 @@ The new way is -device usb-host,hostbus=BUS,hostaddr=ADDR,vendorid=VID,productid=PRID -where left out or zero BUS, ADDR, VID, PRID serve as wildcard. +Omitted options match anything, just like the old way's wildcard. + +=== Default Devices === + +QEMU creates a number of devices by default, depending on the machine +type. + +-device DEVNAME... and global DEVNAME... suppress default devices for +some DEVNAMEs: + + default device suppressing DEVNAMEs + CD-ROM ide-cd, ide-drive, scsi-cd + isa-fdc's driveA isa-fdc + parallel isa-parallel + serial isa-serial + VGA VGA, cirrus-vga, vmware-svga + virtioconsole virtio-serial-pci, virtio-serial-s390, virtio-serial + +The default NIC is connected to a default part created along with it. +It is *not* suppressed by configuring a NIC with -device (you may call +that a bug). -net and -netdev suppress the default NIC. + +-nodefaults suppresses all the default devices mentioned above, plus a +few other things such as default SD-Card drive and default monitor. diff --git a/docs/usb2.txt b/docs/usb2.txt new file mode 100644 index 0000000000..b283c138e0 --- /dev/null +++ b/docs/usb2.txt @@ -0,0 +1,38 @@ + +USB 2.0 Quick Start +=================== + +The QEMU EHCI Adapter does *not* support companion controllers. That +implies there are two completely separate USB busses: One USB 1.1 bus +driven by the UHCI controller and one USB 2.0 bus driven by the EHCI +controller. Devices must be attached to the correct controller +manually. + +The '-usb' switch will make qemu create the UHCI controller as part of +the PIIX3 chipset. The USB 1.1 bus will carry the name "usb.0". + +You can use the standard -device switch to add a EHCI controller to +your virtual machine. It is strongly recommended to specify an ID for +the controller so the USB 2.0 bus gets a individual name, for example +'-device usb-ehci,id=ehci". This will give you a USB 2.0 bus named +"ehci.0". + +I strongly recomment to also use -device to attach usb devices because +you can specify the bus they should be attached to this way. Here is +a complete example: + + qemu -M pc ${otheroptions} \ + -drive if=none,id=usbstick,file=/path/to/image \ + -usb \ + -device usb-ehci,id=ehci \ + -device usb-tablet,bus=usb.0 \ + -device usb-storage,bus=ehci.0,drive=usbstick + +This attaches a usb tablet to the UHCI adapter and a usb mass storage +device to the EHCI adapter. + +enjoy, + Gerd + +-- +Gerd Hoffmann <kraxel@redhat.com> diff --git a/error.c b/error.c new file mode 100644 index 0000000000..867eec2c1a --- /dev/null +++ b/error.c @@ -0,0 +1,140 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ +#include "error.h" +#include "error_int.h" +#include "qemu-objects.h" +#include "qerror.h" +#include <assert.h> + +struct Error +{ + QDict *obj; + const char *fmt; + char *msg; +}; + +void error_set(Error **errp, const char *fmt, ...) +{ + Error *err; + va_list ap; + + if (errp == NULL) { + return; + } + + err = qemu_mallocz(sizeof(*err)); + + va_start(ap, fmt); + err->obj = qobject_to_qdict(qobject_from_jsonv(fmt, &ap)); + va_end(ap); + err->fmt = fmt; + + *errp = err; +} + +bool error_is_set(Error **errp) +{ + return (errp && *errp); +} + +const char *error_get_pretty(Error *err) +{ + if (err->msg == NULL) { + QString *str; + str = qerror_format(err->fmt, err->obj); + err->msg = qemu_strdup(qstring_get_str(str)); + QDECREF(str); + } + + return err->msg; +} + +const char *error_get_field(Error *err, const char *field) +{ + if (strcmp(field, "class") == 0) { + return qdict_get_str(err->obj, field); + } else { + QDict *dict = qdict_get_qdict(err->obj, "data"); + return qdict_get_str(dict, field); + } +} + +QDict *error_get_data(Error *err) +{ + QDict *data = qdict_get_qdict(err->obj, "data"); + QINCREF(data); + return data; +} + +void error_set_field(Error *err, const char *field, const char *value) +{ + QDict *dict = qdict_get_qdict(err->obj, "data"); + return qdict_put(dict, field, qstring_from_str(value)); +} + +void error_free(Error *err) +{ + if (err) { + QDECREF(err->obj); + qemu_free(err->msg); + qemu_free(err); + } +} + +bool error_is_type(Error *err, const char *fmt) +{ + const char *error_class; + char *ptr; + char *end; + + ptr = strstr(fmt, "'class': '"); + assert(ptr != NULL); + ptr += strlen("'class': '"); + + end = strchr(ptr, '\''); + assert(end != NULL); + + error_class = error_get_field(err, "class"); + if (strlen(error_class) != end - ptr) { + return false; + } + + return strncmp(ptr, error_class, end - ptr) == 0; +} + +void error_propagate(Error **dst_err, Error *local_err) +{ + if (dst_err) { + *dst_err = local_err; + } else if (local_err) { + error_free(local_err); + } +} + +QObject *error_get_qobject(Error *err) +{ + QINCREF(err->obj); + return QOBJECT(err->obj); +} + +void error_set_qobject(Error **errp, QObject *obj) +{ + Error *err; + if (errp == NULL) { + return; + } + err = qemu_mallocz(sizeof(*err)); + err->obj = qobject_to_qdict(obj); + qobject_incref(obj); + + *errp = err; +} diff --git a/error.h b/error.h new file mode 100644 index 0000000000..003c855e65 --- /dev/null +++ b/error.h @@ -0,0 +1,70 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ +#ifndef ERROR_H +#define ERROR_H + +#include <stdbool.h> + +/** + * A class representing internal errors within QEMU. An error has a string + * typename and optionally a set of named string parameters. + */ +typedef struct Error Error; + +/** + * Set an indirect pointer to an error given a printf-style format parameter. + * Currently, qerror.h defines these error formats. This function is not + * meant to be used outside of QEMU. + */ +void error_set(Error **err, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); + +/** + * Returns true if an indirect pointer to an error is pointing to a valid + * error object. + */ +bool error_is_set(Error **err); + +/** + * Get a human readable representation of an error object. + */ +const char *error_get_pretty(Error *err); + +/** + * Get an individual named error field. + */ +const char *error_get_field(Error *err, const char *field); + +/** + * Get an individual named error field. + */ +void error_set_field(Error *err, const char *field, const char *value); + +/** + * Propagate an error to an indirect pointer to an error. This function will + * always transfer ownership of the error reference and handles the case where + * dst_err is NULL correctly. + */ +void error_propagate(Error **dst_err, Error *local_err); + +/** + * Free an error object. + */ +void error_free(Error *err); + +/** + * Determine if an error is of a speific type (based on the qerror format). + * Non-QEMU users should get the `class' field to identify the error type. + */ +bool error_is_type(Error *err, const char *fmt); + +#endif diff --git a/error_int.h b/error_int.h new file mode 100644 index 0000000000..5e3942405a --- /dev/null +++ b/error_int.h @@ -0,0 +1,29 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ +#ifndef QEMU_ERROR_INT_H +#define QEMU_ERROR_INT_H + +#include "qemu-common.h" +#include "qobject.h" +#include "qdict.h" +#include "error.h" + +/** + * Internal QEMU functions for working with Error. + * + * These are used to convert QErrors to Errors + */ +QDict *error_get_data(Error *err); +QObject *error_get_qobject(Error *err); +void error_set_qobject(Error **errp, QObject *obj); + +#endif diff --git a/exec-all.h b/exec-all.h index 7c2d29ff98..2a13a9535e 100644 --- a/exec-all.h +++ b/exec-all.h @@ -43,7 +43,11 @@ typedef ram_addr_t tb_page_addr_t; typedef struct TranslationBlock TranslationBlock; /* XXX: make safe guess about sizes */ +#if (HOST_LONG_BITS == 32) && (TARGET_LONG_BITS == 64) +#define MAX_OP_PER_INSTR 128 +#else #define MAX_OP_PER_INSTR 96 +#endif #if HOST_LONG_BITS == 32 #define MAX_OPC_PARAM_PER_ARG 2 @@ -95,7 +99,6 @@ void QEMU_NORETURN cpu_loop_exit(void); int page_unprotect(target_ulong address, unsigned long pc, void *puc); void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access); -void tb_invalidate_page_range(target_ulong start, target_ulong end); void tlb_flush_page(CPUState *env, target_ulong addr); void tlb_flush(CPUState *env, int flush_global); #if !defined(CONFIG_USER_ONLY) @@ -322,7 +325,7 @@ static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong add } pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK; if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { -#if defined(TARGET_SPARC) || defined(TARGET_MIPS) +#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC) do_unassigned_access(addr, 0, 1, 0, 4); #else cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); @@ -32,10 +32,10 @@ #include "hw/qdev.h" #include "osdep.h" #include "kvm.h" +#include "hw/xen.h" #include "qemu-timer.h" #if defined(CONFIG_USER_ONLY) #include <qemu.h> -#include <signal.h> #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #include <sys/param.h> #if __FreeBSD_version >= 700104 @@ -51,6 +51,8 @@ #include <libutil.h> #endif #endif +#else /* !CONFIG_USER_ONLY */ +#include "xen-mapcache.h" #endif //#define DEBUG_TB_INVALIDATE @@ -2085,7 +2087,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = (unsigned long)qemu_safe_ram_ptr(start); - /* Chek that we don't span multiple blocks - this breaks the + /* Check that we don't span multiple blocks - this breaks the address comparisons below. */ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 != (end - 1) - start) { @@ -2916,6 +2918,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, } } + new_block->offset = find_ram_offset(size); if (host) { new_block->host = host; new_block->flags |= RAM_PREALLOC_MASK; @@ -2933,18 +2936,28 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ - new_block->host = mmap((void*)0x1000000, size, + /* S390 KVM requires the topmost vma of the RAM to be smaller than + an system defined value, which is at least 256GB. Larger systems + have larger values. We put the guest between the end of data + segment (system break) and this value. We use 32GB as a base to + have enough room for the system break to grow. */ + new_block->host = mmap((void*)0x800000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (new_block->host == MAP_FAILED) { + fprintf(stderr, "Allocating RAM failed\n"); + abort(); + } #else - new_block->host = qemu_vmalloc(size); + if (xen_mapcache_enabled()) { + xen_ram_alloc(new_block->offset, size); + } else { + new_block->host = qemu_vmalloc(size); + } #endif qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); } } - - new_block->offset = find_ram_offset(size); new_block->length = size; QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); @@ -2965,6 +2978,19 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) return qemu_ram_alloc_from_ptr(dev, name, size, NULL); } +void qemu_ram_free_from_ptr(ram_addr_t addr) +{ + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr == block->offset) { + QLIST_REMOVE(block, next); + qemu_free(block); + return; + } + } +} + void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; @@ -2989,7 +3015,11 @@ void qemu_ram_free(ram_addr_t addr) #if defined(TARGET_S390X) && defined(CONFIG_KVM) munmap(block->host, block->length); #else - qemu_vfree(block->host); + if (xen_mapcache_enabled()) { + qemu_invalidate_entry(block->host); + } else { + qemu_vfree(block->host); + } #endif } qemu_free(block); @@ -3078,6 +3108,16 @@ void *qemu_get_ram_ptr(ram_addr_t addr) QLIST_REMOVE(block, next); QLIST_INSERT_HEAD(&ram_list.blocks, block, next); } + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 1); + } else if (block->host == NULL) { + block->host = xen_map_block(block->offset, block->length); + } + } return block->host + (addr - block->offset); } } @@ -3097,6 +3137,16 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 1); + } else if (block->host == NULL) { + block->host = xen_map_block(block->offset, block->length); + } + } return block->host + (addr - block->offset); } } @@ -3107,17 +3157,48 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) return NULL; } +void qemu_put_ram_ptr(void *addr) +{ + trace_qemu_put_ram_ptr(addr); + + if (xen_mapcache_enabled()) { + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr == block->host) { + break; + } + } + if (block && block->host) { + xen_unmap_block(block->host, block->length); + block->host = NULL; + } else { + qemu_map_cache_unlock(addr); + } + } +} + int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) { RAMBlock *block; uint8_t *host = ptr; QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This case append when the block is not mapped. */ + if (block->host == NULL) { + continue; + } if (host - block->host < block->length) { *ram_addr = block->offset + (host - block->host); return 0; } } + + if (xen_mapcache_enabled()) { + *ram_addr = qemu_ram_addr_from_mapcache(ptr); + return 0; + } + return -1; } @@ -3139,7 +3220,7 @@ static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 1); #endif return 0; @@ -3150,7 +3231,7 @@ static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 2); #endif return 0; @@ -3161,7 +3242,7 @@ static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 4); #endif return 0; @@ -3172,7 +3253,7 @@ static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 1); #endif } @@ -3182,7 +3263,7 @@ static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 2); #endif } @@ -3192,7 +3273,7 @@ static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 4); #endif } @@ -3812,6 +3893,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, cpu_physical_memory_set_dirty_flags( addr1, (0xff & ~CODE_DIRTY_FLAG)); } + qemu_put_ram_ptr(ptr); } } else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && @@ -3839,9 +3921,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } else { /* RAM case */ - ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + - (addr & ~TARGET_PAGE_MASK); - memcpy(buf, ptr, l); + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); + memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l); + qemu_put_ram_ptr(ptr); } } len -= l; @@ -3882,6 +3964,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); + qemu_put_ram_ptr(ptr); } len -= l; buf += l; @@ -4023,6 +4106,15 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, access_len -= l; } } + if (xen_mapcache_enabled()) { + uint8_t *buffer1 = buffer; + uint8_t *end_buffer = buffer + len; + + while (buffer1 < end_buffer) { + qemu_put_ram_ptr(buffer1); + buffer1 += TARGET_PAGE_SIZE; + } + } return; } if (is_write) { diff --git a/fpu/softfloat-native.c b/fpu/softfloat-native.c deleted file mode 100644 index 88486511ee..0000000000 --- a/fpu/softfloat-native.c +++ /dev/null @@ -1,540 +0,0 @@ -/* Native implementation of soft float functions. Only a single status - context is supported */ -#include "softfloat.h" -#include <math.h> -#if defined(CONFIG_SOLARIS) -#include <fenv.h> -#endif - -void set_float_rounding_mode(int val STATUS_PARAM) -{ - STATUS(float_rounding_mode) = val; -#if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) || \ - (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) - fpsetround(val); -#else - fesetround(val); -#endif -} - -#ifdef FLOATX80 -void set_floatx80_rounding_precision(int val STATUS_PARAM) -{ - STATUS(floatx80_rounding_precision) = val; -} -#endif - -#if defined(CONFIG_BSD) || \ - (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) -#define lrint(d) ((int32_t)rint(d)) -#define llrint(d) ((int64_t)rint(d)) -#define lrintf(f) ((int32_t)rint(f)) -#define llrintf(f) ((int64_t)rint(f)) -#define sqrtf(f) ((float)sqrt(f)) -#define remainderf(fa, fb) ((float)remainder(fa, fb)) -#define rintf(f) ((float)rint(f)) -#if !defined(__sparc__) && \ - (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) -extern long double rintl(long double); -extern long double scalbnl(long double, int); - -long long -llrintl(long double x) { - return ((long long) rintl(x)); -} - -long -lrintl(long double x) { - return ((long) rintl(x)); -} - -long double -ldexpl(long double x, int n) { - return (scalbnl(x, n)); -} -#endif -#endif - -#if defined(_ARCH_PPC) - -/* correct (but slow) PowerPC rint() (glibc version is incorrect) */ -static double qemu_rint(double x) -{ - double y = 4503599627370496.0; - if (fabs(x) >= y) - return x; - if (x < 0) - y = -y; - y = (x + y) - y; - if (y == 0.0) - y = copysign(y, x); - return y; -} - -#define rint qemu_rint -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE integer-to-floating-point conversion routines. -*----------------------------------------------------------------------------*/ -float32 int32_to_float32(int v STATUS_PARAM) -{ - return (float32)v; -} - -float32 uint32_to_float32(unsigned int v STATUS_PARAM) -{ - return (float32)v; -} - -float64 int32_to_float64(int v STATUS_PARAM) -{ - return (float64)v; -} - -float64 uint32_to_float64(unsigned int v STATUS_PARAM) -{ - return (float64)v; -} - -#ifdef FLOATX80 -floatx80 int32_to_floatx80(int v STATUS_PARAM) -{ - return (floatx80)v; -} -#endif -float32 int64_to_float32( int64_t v STATUS_PARAM) -{ - return (float32)v; -} -float32 uint64_to_float32( uint64_t v STATUS_PARAM) -{ - return (float32)v; -} -float64 int64_to_float64( int64_t v STATUS_PARAM) -{ - return (float64)v; -} -float64 uint64_to_float64( uint64_t v STATUS_PARAM) -{ - return (float64)v; -} -#ifdef FLOATX80 -floatx80 int64_to_floatx80( int64_t v STATUS_PARAM) -{ - return (floatx80)v; -} -#endif - -/* XXX: this code implements the x86 behaviour, not the IEEE one. */ -#if HOST_LONG_BITS == 32 -static inline int long_to_int32(long a) -{ - return a; -} -#else -static inline int long_to_int32(long a) -{ - if (a != (int32_t)a) - a = 0x80000000; - return a; -} -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE single-precision conversion routines. -*----------------------------------------------------------------------------*/ -int float32_to_int32( float32 a STATUS_PARAM) -{ - return long_to_int32(lrintf(a)); -} -int float32_to_int32_round_to_zero( float32 a STATUS_PARAM) -{ - return (int)a; -} -int64_t float32_to_int64( float32 a STATUS_PARAM) -{ - return llrintf(a); -} - -int64_t float32_to_int64_round_to_zero( float32 a STATUS_PARAM) -{ - return (int64_t)a; -} - -float64 float32_to_float64( float32 a STATUS_PARAM) -{ - return a; -} -#ifdef FLOATX80 -floatx80 float32_to_floatx80( float32 a STATUS_PARAM) -{ - return a; -} -#endif - -unsigned int float32_to_uint32( float32 a STATUS_PARAM) -{ - int64_t v; - unsigned int res; - - v = llrintf(a); - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - res = v; - } - return res; -} -unsigned int float32_to_uint32_round_to_zero( float32 a STATUS_PARAM) -{ - int64_t v; - unsigned int res; - - v = (int64_t)a; - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - res = v; - } - return res; -} - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE single-precision operations. -*----------------------------------------------------------------------------*/ -float32 float32_round_to_int( float32 a STATUS_PARAM) -{ - return rintf(a); -} - -float32 float32_rem( float32 a, float32 b STATUS_PARAM) -{ - return remainderf(a, b); -} - -float32 float32_sqrt( float32 a STATUS_PARAM) -{ - return sqrtf(a); -} -int float32_compare( float32 a, float32 b STATUS_PARAM ) -{ - if (a < b) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (a > b) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int float32_compare_quiet( float32 a, float32 b STATUS_PARAM ) -{ - if (isless(a, b)) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (isgreater(a, b)) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int float32_is_signaling_nan( float32 a1) -{ - float32u u; - uint32_t a; - u.f = a1; - a = u.i; - return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); -} - -int float32_is_quiet_nan( float32 a1 ) -{ - float32u u; - uint64_t a; - u.f = a1; - a = u.i; - return ( 0xFF800000 < ( a<<1 ) ); -} - -int float32_is_any_nan( float32 a1 ) -{ - float32u u; - uint32_t a; - u.f = a1; - a = u.i; - return (a & ~(1 << 31)) > 0x7f800000U; -} - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE double-precision conversion routines. -*----------------------------------------------------------------------------*/ -int float64_to_int32( float64 a STATUS_PARAM) -{ - return long_to_int32(lrint(a)); -} -int float64_to_int32_round_to_zero( float64 a STATUS_PARAM) -{ - return (int)a; -} -int64_t float64_to_int64( float64 a STATUS_PARAM) -{ - return llrint(a); -} -int64_t float64_to_int64_round_to_zero( float64 a STATUS_PARAM) -{ - return (int64_t)a; -} -float32 float64_to_float32( float64 a STATUS_PARAM) -{ - return a; -} -#ifdef FLOATX80 -floatx80 float64_to_floatx80( float64 a STATUS_PARAM) -{ - return a; -} -#endif -#ifdef FLOAT128 -float128 float64_to_float128( float64 a STATUS_PARAM) -{ - return a; -} -#endif - -unsigned int float64_to_uint32( float64 a STATUS_PARAM) -{ - int64_t v; - unsigned int res; - - v = llrint(a); - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - res = v; - } - return res; -} -unsigned int float64_to_uint32_round_to_zero( float64 a STATUS_PARAM) -{ - int64_t v; - unsigned int res; - - v = (int64_t)a; - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - res = v; - } - return res; -} -uint64_t float64_to_uint64 (float64 a STATUS_PARAM) -{ - int64_t v; - - v = llrint(a + (float64)INT64_MIN); - - return v - INT64_MIN; -} -uint64_t float64_to_uint64_round_to_zero (float64 a STATUS_PARAM) -{ - int64_t v; - - v = (int64_t)(a + (float64)INT64_MIN); - - return v - INT64_MIN; -} - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE double-precision operations. -*----------------------------------------------------------------------------*/ -#if defined(__sun__) && \ - (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) -static inline float64 trunc(float64 x) -{ - return x < 0 ? -floor(-x) : floor(x); -} -#endif -float64 float64_trunc_to_int( float64 a STATUS_PARAM ) -{ - return trunc(a); -} - -float64 float64_round_to_int( float64 a STATUS_PARAM ) -{ - return rint(a); -} - -float64 float64_rem( float64 a, float64 b STATUS_PARAM) -{ - return remainder(a, b); -} - -float64 float64_sqrt( float64 a STATUS_PARAM) -{ - return sqrt(a); -} -int float64_compare( float64 a, float64 b STATUS_PARAM ) -{ - if (a < b) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (a > b) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int float64_compare_quiet( float64 a, float64 b STATUS_PARAM ) -{ - if (isless(a, b)) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (isgreater(a, b)) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int float64_is_signaling_nan( float64 a1) -{ - float64u u; - uint64_t a; - u.f = a1; - a = u.i; - return - ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) - && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); - -} - -int float64_is_quiet_nan( float64 a1 ) -{ - float64u u; - uint64_t a; - u.f = a1; - a = u.i; - - return ( LIT64( 0xFFF0000000000000 ) < (uint64_t) ( a<<1 ) ); - -} - -int float64_is_any_nan( float64 a1 ) -{ - float64u u; - uint64_t a; - u.f = a1; - a = u.i; - - return (a & ~(1ULL << 63)) > LIT64 (0x7FF0000000000000 ); -} - -#ifdef FLOATX80 - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE extended double-precision conversion routines. -*----------------------------------------------------------------------------*/ -int floatx80_to_int32( floatx80 a STATUS_PARAM) -{ - return long_to_int32(lrintl(a)); -} -int floatx80_to_int32_round_to_zero( floatx80 a STATUS_PARAM) -{ - return (int)a; -} -int64_t floatx80_to_int64( floatx80 a STATUS_PARAM) -{ - return llrintl(a); -} -int64_t floatx80_to_int64_round_to_zero( floatx80 a STATUS_PARAM) -{ - return (int64_t)a; -} -float32 floatx80_to_float32( floatx80 a STATUS_PARAM) -{ - return a; -} -float64 floatx80_to_float64( floatx80 a STATUS_PARAM) -{ - return a; -} - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE extended double-precision operations. -*----------------------------------------------------------------------------*/ -floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM) -{ - return rintl(a); -} -floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM) -{ - return remainderl(a, b); -} -floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM) -{ - return sqrtl(a); -} -int floatx80_compare( floatx80 a, floatx80 b STATUS_PARAM ) -{ - if (a < b) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (a > b) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int floatx80_compare_quiet( floatx80 a, floatx80 b STATUS_PARAM ) -{ - if (isless(a, b)) { - return float_relation_less; - } else if (a == b) { - return float_relation_equal; - } else if (isgreater(a, b)) { - return float_relation_greater; - } else { - return float_relation_unordered; - } -} -int floatx80_is_signaling_nan( floatx80 a1) -{ - floatx80u u; - uint64_t aLow; - u.f = a1; - - aLow = u.i.low & ~ LIT64( 0x4000000000000000 ); - return - ( ( u.i.high & 0x7FFF ) == 0x7FFF ) - && (uint64_t) ( aLow<<1 ) - && ( u.i.low == aLow ); -} - -int floatx80_is_quiet_nan( floatx80 a1 ) -{ - floatx80u u; - u.f = a1; - return ( ( u.i.high & 0x7FFF ) == 0x7FFF ) && (uint64_t) ( u.i.low<<1 ); -} - -int floatx80_is_any_nan( floatx80 a1 ) -{ - floatx80u u; - u.f = a1; - return ((u.i.high & 0x7FFF) == 0x7FFF) && ( u.i.low<<1 ); -} - -#endif diff --git a/fpu/softfloat-native.h b/fpu/softfloat-native.h deleted file mode 100644 index 6afb74a152..0000000000 --- a/fpu/softfloat-native.h +++ /dev/null @@ -1,531 +0,0 @@ -/* Native implementation of soft float functions */ -#include <math.h> - -#if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) \ - || defined(CONFIG_SOLARIS) -#include <ieeefp.h> -#define fabsf(f) ((float)fabs(f)) -#else -#include <fenv.h> -#endif - -#if defined(__OpenBSD__) || defined(__NetBSD__) -#include <sys/param.h> -#endif - -/* - * Define some C99-7.12.3 classification macros and - * some C99-.12.4 for Solaris systems OS less than 10, - * or Solaris 10 systems running GCC 3.x or less. - * Solaris 10 with GCC4 does not need these macros as they - * are defined in <iso/math_c99.h> with a compiler directive - */ -#if defined(CONFIG_SOLARIS) && \ - ((CONFIG_SOLARIS_VERSION <= 9 ) || \ - ((CONFIG_SOLARIS_VERSION == 10) && (__GNUC__ < 4))) \ - || (defined(__OpenBSD__) && (OpenBSD < 200811)) -/* - * C99 7.12.3 classification macros - * and - * C99 7.12.14 comparison macros - * - * ... do not work on Solaris 10 using GNU CC 3.4.x. - * Try to workaround the missing / broken C99 math macros. - */ -#if defined(__OpenBSD__) -#define unordered(x, y) (isnan(x) || isnan(y)) -#endif - -#ifdef __NetBSD__ -#ifndef isgreater -#define isgreater(x, y) __builtin_isgreater(x, y) -#endif -#ifndef isgreaterequal -#define isgreaterequal(x, y) __builtin_isgreaterequal(x, y) -#endif -#ifndef isless -#define isless(x, y) __builtin_isless(x, y) -#endif -#ifndef islessequal -#define islessequal(x, y) __builtin_islessequal(x, y) -#endif -#ifndef isunordered -#define isunordered(x, y) __builtin_isunordered(x, y) -#endif -#endif - - -#define isnormal(x) (fpclass(x) >= FP_NZERO) -#define isgreater(x, y) ((!unordered(x, y)) && ((x) > (y))) -#define isgreaterequal(x, y) ((!unordered(x, y)) && ((x) >= (y))) -#define isless(x, y) ((!unordered(x, y)) && ((x) < (y))) -#define islessequal(x, y) ((!unordered(x, y)) && ((x) <= (y))) -#define isunordered(x,y) unordered(x, y) -#endif - -#if defined(__sun__) && !defined(CONFIG_NEEDS_LIBSUNMATH) - -#ifndef isnan -# define isnan(x) \ - (sizeof (x) == sizeof (long double) ? isnan_ld (x) \ - : sizeof (x) == sizeof (double) ? isnan_d (x) \ - : isnan_f (x)) -static inline int isnan_f (float x) { return x != x; } -static inline int isnan_d (double x) { return x != x; } -static inline int isnan_ld (long double x) { return x != x; } -#endif - -#ifndef isinf -# define isinf(x) \ - (sizeof (x) == sizeof (long double) ? isinf_ld (x) \ - : sizeof (x) == sizeof (double) ? isinf_d (x) \ - : isinf_f (x)) -static inline int isinf_f (float x) { return isnan (x - x); } -static inline int isinf_d (double x) { return isnan (x - x); } -static inline int isinf_ld (long double x) { return isnan (x - x); } -#endif -#endif - -typedef float float32; -typedef double float64; -#ifdef FLOATX80 -typedef long double floatx80; -#endif - -typedef union { - float32 f; - uint32_t i; -} float32u; -typedef union { - float64 f; - uint64_t i; -} float64u; -#ifdef FLOATX80 -typedef union { - floatx80 f; - struct { - uint64_t low; - uint16_t high; - } i; -} floatx80u; -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE floating-point rounding mode. -*----------------------------------------------------------------------------*/ -#if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) \ - || defined(CONFIG_SOLARIS) -#if defined(__OpenBSD__) -#define FE_RM FP_RM -#define FE_RP FP_RP -#define FE_RZ FP_RZ -#endif -enum { - float_round_nearest_even = FP_RN, - float_round_down = FP_RM, - float_round_up = FP_RP, - float_round_to_zero = FP_RZ -}; -#else -enum { - float_round_nearest_even = FE_TONEAREST, - float_round_down = FE_DOWNWARD, - float_round_up = FE_UPWARD, - float_round_to_zero = FE_TOWARDZERO -}; -#endif - -typedef struct float_status { - int float_rounding_mode; -#ifdef FLOATX80 - int floatx80_rounding_precision; -#endif -} float_status; - -void set_float_rounding_mode(int val STATUS_PARAM); -#ifdef FLOATX80 -void set_floatx80_rounding_precision(int val STATUS_PARAM); -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE integer-to-floating-point conversion routines. -*----------------------------------------------------------------------------*/ -float32 int32_to_float32( int STATUS_PARAM); -float32 uint32_to_float32( unsigned int STATUS_PARAM); -float64 int32_to_float64( int STATUS_PARAM); -float64 uint32_to_float64( unsigned int STATUS_PARAM); -#ifdef FLOATX80 -floatx80 int32_to_floatx80( int STATUS_PARAM); -#endif -#ifdef FLOAT128 -float128 int32_to_float128( int STATUS_PARAM); -#endif -float32 int64_to_float32( int64_t STATUS_PARAM); -float32 uint64_to_float32( uint64_t STATUS_PARAM); -float64 int64_to_float64( int64_t STATUS_PARAM); -float64 uint64_to_float64( uint64_t v STATUS_PARAM); -#ifdef FLOATX80 -floatx80 int64_to_floatx80( int64_t STATUS_PARAM); -#endif -#ifdef FLOAT128 -float128 int64_to_float128( int64_t STATUS_PARAM); -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE single-precision conversion constants. -*----------------------------------------------------------------------------*/ -#define float32_zero (0.0) -#define float32_one (1.0) -#define float32_ln2 (0.6931471) -#define float32_pi (3.1415926) -#define float32_half (0.5) - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE single-precision conversion routines. -*----------------------------------------------------------------------------*/ -int float32_to_int32( float32 STATUS_PARAM); -int float32_to_int32_round_to_zero( float32 STATUS_PARAM); -unsigned int float32_to_uint32( float32 a STATUS_PARAM); -unsigned int float32_to_uint32_round_to_zero( float32 a STATUS_PARAM); -int64_t float32_to_int64( float32 STATUS_PARAM); -int64_t float32_to_int64_round_to_zero( float32 STATUS_PARAM); -float64 float32_to_float64( float32 STATUS_PARAM); -#ifdef FLOATX80 -floatx80 float32_to_floatx80( float32 STATUS_PARAM); -#endif -#ifdef FLOAT128 -float128 float32_to_float128( float32 STATUS_PARAM); -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE single-precision operations. -*----------------------------------------------------------------------------*/ -float32 float32_round_to_int( float32 STATUS_PARAM); -INLINE float32 float32_add( float32 a, float32 b STATUS_PARAM) -{ - return a + b; -} -INLINE float32 float32_sub( float32 a, float32 b STATUS_PARAM) -{ - return a - b; -} -INLINE float32 float32_mul( float32 a, float32 b STATUS_PARAM) -{ - return a * b; -} -INLINE float32 float32_div( float32 a, float32 b STATUS_PARAM) -{ - return a / b; -} -float32 float32_rem( float32, float32 STATUS_PARAM); -float32 float32_sqrt( float32 STATUS_PARAM); -INLINE int float32_eq_quiet( float32 a, float32 b STATUS_PARAM) -{ - return a == b; -} -INLINE int float32_le( float32 a, float32 b STATUS_PARAM) -{ - return a <= b; -} -INLINE int float32_lt( float32 a, float32 b STATUS_PARAM) -{ - return a < b; -} -INLINE int float32_eq( float32 a, float32 b STATUS_PARAM) -{ - return a <= b && a >= b; -} -INLINE int float32_le_quiet( float32 a, float32 b STATUS_PARAM) -{ - return islessequal(a, b); -} -INLINE int float32_lt_quiet( float32 a, float32 b STATUS_PARAM) -{ - return isless(a, b); -} -INLINE int float32_unordered( float32 a, float32 b STATUS_PARAM) -{ - return isunordered(a, b); -} -INLINE int float32_unordered_quiet( float32 a, float32 b STATUS_PARAM) -{ - return isunordered(a, b); -} -int float32_compare( float32, float32 STATUS_PARAM ); -int float32_compare_quiet( float32, float32 STATUS_PARAM ); -int float32_is_signaling_nan( float32 ); -int float32_is_quiet_nan( float32 ); -int float32_is_any_nan( float32 ); - -INLINE float32 float32_abs(float32 a) -{ - return fabsf(a); -} - -INLINE float32 float32_chs(float32 a) -{ - return -a; -} - -INLINE float32 float32_is_infinity(float32 a) -{ - return fpclassify(a) == FP_INFINITE; -} - -INLINE float32 float32_is_neg(float32 a) -{ - float32u u; - u.f = a; - return u.i >> 31; -} - -INLINE float32 float32_is_zero(float32 a) -{ - return fpclassify(a) == FP_ZERO; -} - -INLINE float32 float32_scalbn(float32 a, int n STATUS_PARAM) -{ - return scalbnf(a, n); -} - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE double-precision conversion constants. -*----------------------------------------------------------------------------*/ -#define float64_zero (0.0) -#define float64_one (1.0) -#define float64_ln2 (0.693147180559945) -#define float64_pi (3.141592653589793) -#define float64_half (0.5) - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE double-precision conversion routines. -*----------------------------------------------------------------------------*/ -int float64_to_int32( float64 STATUS_PARAM ); -int float64_to_int32_round_to_zero( float64 STATUS_PARAM ); -unsigned int float64_to_uint32( float64 STATUS_PARAM ); -unsigned int float64_to_uint32_round_to_zero( float64 STATUS_PARAM ); -int64_t float64_to_int64( float64 STATUS_PARAM ); -int64_t float64_to_int64_round_to_zero( float64 STATUS_PARAM ); -uint64_t float64_to_uint64( float64 STATUS_PARAM ); -uint64_t float64_to_uint64_round_to_zero( float64 STATUS_PARAM ); -float32 float64_to_float32( float64 STATUS_PARAM ); -#ifdef FLOATX80 -floatx80 float64_to_floatx80( float64 STATUS_PARAM ); -#endif -#ifdef FLOAT128 -float128 float64_to_float128( float64 STATUS_PARAM ); -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE double-precision operations. -*----------------------------------------------------------------------------*/ -float64 float64_round_to_int( float64 STATUS_PARAM ); -float64 float64_trunc_to_int( float64 STATUS_PARAM ); -INLINE float64 float64_add( float64 a, float64 b STATUS_PARAM) -{ - return a + b; -} -INLINE float64 float64_sub( float64 a, float64 b STATUS_PARAM) -{ - return a - b; -} -INLINE float64 float64_mul( float64 a, float64 b STATUS_PARAM) -{ - return a * b; -} -INLINE float64 float64_div( float64 a, float64 b STATUS_PARAM) -{ - return a / b; -} -float64 float64_rem( float64, float64 STATUS_PARAM ); -float64 float64_sqrt( float64 STATUS_PARAM ); -INLINE int float64_eq_quiet( float64 a, float64 b STATUS_PARAM) -{ - return a == b; -} -INLINE int float64_le( float64 a, float64 b STATUS_PARAM) -{ - return a <= b; -} -INLINE int float64_lt( float64 a, float64 b STATUS_PARAM) -{ - return a < b; -} -INLINE int float64_eq( float64 a, float64 b STATUS_PARAM) -{ - return a <= b && a >= b; -} -INLINE int float64_le_quiet( float64 a, float64 b STATUS_PARAM) -{ - return islessequal(a, b); -} -INLINE int float64_lt_quiet( float64 a, float64 b STATUS_PARAM) -{ - return isless(a, b); - -} -INLINE int float64_unordered( float64 a, float64 b STATUS_PARAM) -{ - return isunordered(a, b); -} -INLINE int float64_unordered_quiet( float64 a, float64 b STATUS_PARAM) -{ - return isunordered(a, b); -} -int float64_compare( float64, float64 STATUS_PARAM ); -int float64_compare_quiet( float64, float64 STATUS_PARAM ); -int float64_is_signaling_nan( float64 ); -int float64_is_any_nan( float64 ); -int float64_is_quiet_nan( float64 ); - -INLINE float64 float64_abs(float64 a) -{ - return fabs(a); -} - -INLINE float64 float64_chs(float64 a) -{ - return -a; -} - -INLINE float64 float64_is_infinity(float64 a) -{ - return fpclassify(a) == FP_INFINITE; -} - -INLINE float64 float64_is_neg(float64 a) -{ - float64u u; - u.f = a; - return u.i >> 63; -} - -INLINE float64 float64_is_zero(float64 a) -{ - return fpclassify(a) == FP_ZERO; -} - -INLINE float64 float64_scalbn(float64 a, int n STATUS_PARAM) -{ - return scalbn(a, n); -} - -#ifdef FLOATX80 - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE extended double-precision conversion constants. -*----------------------------------------------------------------------------*/ -#define floatx80_zero (0.0L) -#define floatx80_one (1.0L) -#define floatx80_ln2 (0.69314718055994530943L) -#define floatx80_pi (3.14159265358979323851L) -#define floatx80_half (0.5L) - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE extended double-precision conversion routines. -*----------------------------------------------------------------------------*/ -int floatx80_to_int32( floatx80 STATUS_PARAM ); -int floatx80_to_int32_round_to_zero( floatx80 STATUS_PARAM ); -int64_t floatx80_to_int64( floatx80 STATUS_PARAM); -int64_t floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM); -float32 floatx80_to_float32( floatx80 STATUS_PARAM ); -float64 floatx80_to_float64( floatx80 STATUS_PARAM ); -#ifdef FLOAT128 -float128 floatx80_to_float128( floatx80 STATUS_PARAM ); -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE extended double-precision operations. -*----------------------------------------------------------------------------*/ -floatx80 floatx80_round_to_int( floatx80 STATUS_PARAM ); -INLINE floatx80 floatx80_add( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a + b; -} -INLINE floatx80 floatx80_sub( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a - b; -} -INLINE floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a * b; -} -INLINE floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a / b; -} -floatx80 floatx80_rem( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_sqrt( floatx80 STATUS_PARAM ); -INLINE int floatx80_eq_quiet( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a == b; -} -INLINE int floatx80_le( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a <= b; -} -INLINE int floatx80_lt( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a < b; -} -INLINE int floatx80_eq( floatx80 a, floatx80 b STATUS_PARAM) -{ - return a <= b && a >= b; -} -INLINE int floatx80_le_quiet( floatx80 a, floatx80 b STATUS_PARAM) -{ - return islessequal(a, b); -} -INLINE int floatx80_lt_quiet( floatx80 a, floatx80 b STATUS_PARAM) -{ - return isless(a, b); - -} -INLINE int floatx80_unordered( floatx80 a, floatx80 b STATUS_PARAM) -{ - return isunordered(a, b); -} -INLINE int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM) -{ - return isunordered(a, b); -} -int floatx80_compare( floatx80, floatx80 STATUS_PARAM ); -int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_is_signaling_nan( floatx80 ); -int floatx80_is_quiet_nan( floatx80 ); -int floatx80_is_any_nan( floatx80 ); - -INLINE floatx80 floatx80_abs(floatx80 a) -{ - return fabsl(a); -} - -INLINE floatx80 floatx80_chs(floatx80 a) -{ - return -a; -} - -INLINE floatx80 floatx80_is_infinity(floatx80 a) -{ - return fpclassify(a) == FP_INFINITE; -} - -INLINE floatx80 floatx80_is_neg(floatx80 a) -{ - floatx80u u; - u.f = a; - return u.i.high >> 15; -} - -INLINE floatx80 floatx80_is_zero(floatx80 a) -{ - return fpclassify(a) == FP_ZERO; -} - -INLINE floatx80 floatx80_scalbn(floatx80 a, int n STATUS_PARAM) -{ - return scalbnl(a, n); -} - -#endif diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h index 9d68aae9d5..c7d35a161d 100644 --- a/fpu/softfloat-specialize.h +++ b/fpu/softfloat-specialize.h @@ -523,8 +523,6 @@ static float64 propagateFloat64NaN( float64 a, float64 b STATUS_PARAM) } } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is a | quiet NaN; otherwise returns 0. This slightly differs from the same @@ -681,10 +679,6 @@ static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b STATUS_PARAM) } } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. @@ -820,4 +814,3 @@ static float128 propagateFloat128NaN( float128 a, float128 b STATUS_PARAM) } } -#endif diff --git a/fpu/softfloat.c b/fpu/softfloat.c index baba1dc44b..7951a0e869 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -64,12 +64,10 @@ void set_float_exception_flags(int val STATUS_PARAM) STATUS(float_exception_flags) = val; } -#ifdef FLOATX80 void set_floatx80_rounding_precision(int val STATUS_PARAM) { STATUS(floatx80_rounding_precision) = val; } -#endif /*---------------------------------------------------------------------------- | Returns the fraction bits of the half-precision floating-point value `a'. @@ -341,7 +339,10 @@ static float32 roundAndPackFloat32( flag zSign, int16 zExp, uint32_t zSig STATUS return packFloat32( zSign, 0xFF, - ( roundIncrement == 0 )); } if ( zExp < 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 ); + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat32(zSign, 0, 0); + } isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < -1 ) @@ -520,7 +521,10 @@ static float64 roundAndPackFloat64( flag zSign, int16 zExp, uint64_t zSig STATUS return packFloat64( zSign, 0x7FF, - ( roundIncrement == 0 )); } if ( zExp < 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat64(zSign, 0, 0); + } isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < -1 ) @@ -558,8 +562,6 @@ static float64 } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the fraction bits of the extended double-precision floating-point | value `a'. @@ -699,7 +701,10 @@ static floatx80 goto overflow; } if ( zExp <= 0 ) { - if ( STATUS(flush_to_zero) ) return packFloatx80( zSign, 0, 0 ); + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloatx80(zSign, 0, 0); + } isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < 0 ) @@ -842,10 +847,6 @@ static floatx80 } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the least-significant 64 fraction bits of the quadruple-precision | floating-point value `a'. @@ -1030,7 +1031,10 @@ static float128 return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( zExp < 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat128(zSign, 0, 0, 0); + } isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < -1 ) @@ -1106,8 +1110,6 @@ static float128 } -#endif - /*---------------------------------------------------------------------------- | Returns the result of converting the 32-bit two's complement integer `a' | to the single-precision floating-point format. The conversion is performed @@ -1147,8 +1149,6 @@ float64 int32_to_float64( int32 a STATUS_PARAM ) } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the 32-bit two's complement integer `a' | to the extended double-precision floating-point format. The conversion @@ -1172,10 +1172,6 @@ floatx80 int32_to_floatx80( int32 a STATUS_PARAM ) } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the 32-bit two's complement integer `a' to | the quadruple-precision floating-point format. The conversion is performed @@ -1198,8 +1194,6 @@ float128 int32_to_float128( int32 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit two's complement integer `a' | to the single-precision floating-point format. The conversion is performed @@ -1279,8 +1273,6 @@ float64 uint64_to_float64( uint64 a STATUS_PARAM ) } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit two's complement integer `a' | to the extended double-precision floating-point format. The conversion @@ -1302,10 +1294,6 @@ floatx80 int64_to_floatx80( int64 a STATUS_PARAM ) } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit two's complement integer `a' to | the quadruple-precision floating-point format. The conversion is performed @@ -1339,8 +1327,6 @@ float128 int64_to_float128( int64 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point value | `a' to the 32-bit two's complement integer format. The conversion is @@ -1578,8 +1564,6 @@ float64 float32_to_float64( float32 a STATUS_PARAM ) } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point value | `a' to the extended double-precision floating-point format. The conversion @@ -1610,10 +1594,6 @@ floatx80 float32_to_floatx80( float32 a STATUS_PARAM ) } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point value | `a' to the double-precision floating-point format. The conversion is @@ -1644,8 +1624,6 @@ float128 float32_to_float128( float32 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Rounds the single-precision floating-point value `a' to an integer, and | returns the result as a single-precision floating-point value. The @@ -1761,7 +1739,12 @@ static float32 addFloat32Sigs( float32 a, float32 b, flag zSign STATUS_PARAM) return a; } if ( aExp == 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 ); + if (STATUS(flush_to_zero)) { + if (aSig | bSig) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat32(zSign, 0, 0); + } return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); } zSig = 0x40000000 + aSig + bSig; @@ -2922,8 +2905,6 @@ float16 float32_to_float16(float32 a, flag ieee STATUS_PARAM) return packFloat16(aSign, aExp + 14, aSig >> 13); } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point value | `a' to the extended double-precision floating-point format. The conversion @@ -2955,10 +2936,6 @@ floatx80 float64_to_floatx80( float64 a STATUS_PARAM ) } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point value | `a' to the quadruple-precision floating-point format. The conversion is @@ -2990,8 +2967,6 @@ float128 float64_to_float128( float64 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Rounds the double-precision floating-point value `a' to an integer, and | returns the result as a double-precision floating-point value. The @@ -3120,7 +3095,12 @@ static float64 addFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) return a; } if ( aExp == 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); + if (STATUS(flush_to_zero)) { + if (aSig | bSig) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat64(zSign, 0, 0); + } return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); } zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; @@ -3794,8 +3774,6 @@ int float64_unordered_quiet( float64 a, float64 b STATUS_PARAM ) return 0; } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the 32-bit two's complement integer format. The @@ -4008,8 +3986,6 @@ float64 floatx80_to_float64( floatx80 a STATUS_PARAM ) } -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the quadruple-precision floating-point format. The @@ -4034,8 +4010,6 @@ float128 floatx80_to_float128( floatx80 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Rounds the extended double-precision floating-point value `a' to an integer, | and returns the result as an extended quadruple-precision floating-point @@ -4827,10 +4801,6 @@ int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM ) return 0; } -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 32-bit two's complement integer format. The conversion @@ -5080,8 +5050,6 @@ float64 float128_to_float64( float128 a STATUS_PARAM ) } -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the extended double-precision floating-point format. The @@ -5117,8 +5085,6 @@ floatx80 float128_to_floatx80( float128 a STATUS_PARAM ) } -#endif - /*---------------------------------------------------------------------------- | Rounds the quadruple-precision floating-point value `a' to an integer, and | returns the result as a quadruple-precision floating-point value. The @@ -5282,7 +5248,12 @@ static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM } add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); if ( aExp == 0 ) { - if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); + if (STATUS(flush_to_zero)) { + if (zSig0 | zSig1) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat128(zSign, 0, 0, 0); + } return packFloat128( zSign, 0, zSig0, zSig1 ); } zSig2 = 0; @@ -5993,8 +5964,6 @@ int float128_unordered_quiet( float128 a, float128 b STATUS_PARAM ) return 0; } -#endif - /* misc functions */ float32 uint32_to_float32( unsigned int a STATUS_PARAM ) { @@ -6396,7 +6365,6 @@ float64 float64_scalbn( float64 a, int n STATUS_PARAM ) return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); } -#ifdef FLOATX80 floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) { flag aSign; @@ -6427,9 +6395,7 @@ floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision), aSign, aExp, aSig, 0 STATUS_VAR ); } -#endif -#ifdef FLOAT128 float128 float128_scalbn( float128 a, int n STATUS_PARAM ) { flag aSign; @@ -6462,4 +6428,3 @@ float128 float128_scalbn( float128 a, int n STATUS_PARAM ) STATUS_VAR ); } -#endif diff --git a/fpu/softfloat.h b/fpu/softfloat.h index 5eff0858f1..bde250087b 100644 --- a/fpu/softfloat.h +++ b/fpu/softfloat.h @@ -74,24 +74,6 @@ typedef int64_t int64; #define SNAN_BIT_IS_ONE 0 #endif -/*---------------------------------------------------------------------------- -| The macro `FLOATX80' must be defined to enable the extended double-precision -| floating-point format `floatx80'. If this macro is not defined, the -| `floatx80' type will not be defined, and none of the functions that either -| input or output the `floatx80' type will be defined. The same applies to -| the `FLOAT128' macro and the quadruple-precision format `float128'. -*----------------------------------------------------------------------------*/ -#ifdef CONFIG_SOFTFLOAT -/* bit exact soft float support */ -#define FLOATX80 -#define FLOAT128 -#else -/* native float support */ -#if (defined(__i386__) || defined(__x86_64__)) && !defined(CONFIG_BSD) -#define FLOATX80 -#endif -#endif /* !CONFIG_SOFTFLOAT */ - #define STATUS_PARAM , float_status *status #define STATUS(field) status->field #define STATUS_VAR , status @@ -106,7 +88,6 @@ enum { float_relation_unordered = 2 }; -#ifdef CONFIG_SOFTFLOAT /*---------------------------------------------------------------------------- | Software IEC/IEEE floating-point types. *----------------------------------------------------------------------------*/ @@ -149,14 +130,11 @@ typedef uint64_t float64; #define const_float32(x) (x) #define const_float64(x) (x) #endif -#ifdef FLOATX80 typedef struct { uint64_t low; uint16_t high; } floatx80; #define make_floatx80(exp, mant) ((floatx80) { mant, exp }) -#endif -#ifdef FLOAT128 typedef struct { #ifdef HOST_WORDS_BIGENDIAN uint64_t high, low; @@ -164,7 +142,6 @@ typedef struct { uint64_t low, high; #endif } float128; -#endif /*---------------------------------------------------------------------------- | Software IEC/IEEE floating-point underflow tininess-detection mode. @@ -193,16 +170,15 @@ enum { float_flag_overflow = 8, float_flag_underflow = 16, float_flag_inexact = 32, - float_flag_input_denormal = 64 + float_flag_input_denormal = 64, + float_flag_output_denormal = 128 }; typedef struct float_status { signed char float_detect_tininess; signed char float_rounding_mode; signed char float_exception_flags; -#ifdef FLOATX80 signed char floatx80_rounding_precision; -#endif /* should denormalised results go to zero and set the inexact flag? */ flag flush_to_zero; /* should denormalised inputs go to zero and set the input_denormal flag? */ @@ -232,9 +208,7 @@ INLINE int get_float_exception_flags(float_status *status) { return STATUS(float_exception_flags); } -#ifdef FLOATX80 void set_floatx80_rounding_precision(int val STATUS_PARAM); -#endif /*---------------------------------------------------------------------------- | Routine to raise any or all of the software IEC/IEEE floating-point @@ -249,22 +223,14 @@ float32 int32_to_float32( int32 STATUS_PARAM ); float64 int32_to_float64( int32 STATUS_PARAM ); float32 uint32_to_float32( unsigned int STATUS_PARAM ); float64 uint32_to_float64( unsigned int STATUS_PARAM ); -#ifdef FLOATX80 floatx80 int32_to_floatx80( int32 STATUS_PARAM ); -#endif -#ifdef FLOAT128 float128 int32_to_float128( int32 STATUS_PARAM ); -#endif float32 int64_to_float32( int64 STATUS_PARAM ); float32 uint64_to_float32( uint64 STATUS_PARAM ); float64 int64_to_float64( int64 STATUS_PARAM ); float64 uint64_to_float64( uint64 STATUS_PARAM ); -#ifdef FLOATX80 floatx80 int64_to_floatx80( int64 STATUS_PARAM ); -#endif -#ifdef FLOAT128 float128 int64_to_float128( int64 STATUS_PARAM ); -#endif /*---------------------------------------------------------------------------- | Software half-precision conversion routines. @@ -302,12 +268,8 @@ uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM ); int64 float32_to_int64( float32 STATUS_PARAM ); int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM ); float64 float32_to_float64( float32 STATUS_PARAM ); -#ifdef FLOATX80 floatx80 float32_to_floatx80( float32 STATUS_PARAM ); -#endif -#ifdef FLOAT128 float128 float32_to_float128( float32 STATUS_PARAM ); -#endif /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision operations. @@ -419,12 +381,8 @@ int64 float64_to_int64_round_to_zero( float64 STATUS_PARAM ); uint64 float64_to_uint64 (float64 a STATUS_PARAM); uint64 float64_to_uint64_round_to_zero (float64 a STATUS_PARAM); float32 float64_to_float32( float64 STATUS_PARAM ); -#ifdef FLOATX80 floatx80 float64_to_floatx80( float64 STATUS_PARAM ); -#endif -#ifdef FLOAT128 float128 float64_to_float128( float64 STATUS_PARAM ); -#endif /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision operations. @@ -491,6 +449,11 @@ INLINE int float64_is_any_nan(float64 a) return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL); } +INLINE int float64_is_zero_or_denormal(float64 a) +{ + return (float64_val(a) & 0x7ff0000000000000LL) == 0; +} + INLINE float64 float64_set_sign(float64 a, int sign) { return make_float64((float64_val(a) & 0x7fffffffffffffffULL) @@ -517,8 +480,6 @@ INLINE float64 float64_set_sign(float64 a, int sign) #define float64_default_nan make_float64(LIT64( 0xFFF8000000000000 )) #endif -#ifdef FLOATX80 - /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision conversion routines. *----------------------------------------------------------------------------*/ @@ -528,9 +489,7 @@ int64 floatx80_to_int64( floatx80 STATUS_PARAM ); int64 floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM ); float32 floatx80_to_float32( floatx80 STATUS_PARAM ); float64 floatx80_to_float64( floatx80 STATUS_PARAM ); -#ifdef FLOAT128 float128 floatx80_to_float128( floatx80 STATUS_PARAM ); -#endif /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision operations. @@ -584,6 +543,11 @@ INLINE int floatx80_is_zero(floatx80 a) return (a.high & 0x7fff) == 0 && a.low == 0; } +INLINE int floatx80_is_zero_or_denormal(floatx80 a) +{ + return (a.high & 0x7fff) == 0; +} + INLINE int floatx80_is_any_nan(floatx80 a) { return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1); @@ -609,10 +573,6 @@ INLINE int floatx80_is_any_nan(floatx80 a) #define floatx80_default_nan_low LIT64( 0xC000000000000000 ) #endif -#endif - -#ifdef FLOAT128 - /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision conversion routines. *----------------------------------------------------------------------------*/ @@ -622,9 +582,7 @@ int64 float128_to_int64( float128 STATUS_PARAM ); int64 float128_to_int64_round_to_zero( float128 STATUS_PARAM ); float32 float128_to_float32( float128 STATUS_PARAM ); float64 float128_to_float64( float128 STATUS_PARAM ); -#ifdef FLOATX80 floatx80 float128_to_floatx80( float128 STATUS_PARAM ); -#endif /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision operations. @@ -678,6 +636,11 @@ INLINE int float128_is_zero(float128 a) return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; } +INLINE int float128_is_zero_or_denormal(float128 a) +{ + return (a.high & 0x7fff000000000000LL) == 0; +} + INLINE int float128_is_any_nan(float128 a) { return ((a.high >> 48) & 0x7fff) == 0x7fff && @@ -696,12 +659,4 @@ INLINE int float128_is_any_nan(float128 a) #define float128_default_nan_low LIT64( 0x0000000000000000 ) #endif -#endif - -#else /* CONFIG_SOFTFLOAT */ - -#include "softfloat-native.h" - -#endif /* !CONFIG_SOFTFLOAT */ - #endif /* !SOFTFLOAT_H */ diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index 126e60e276..af9daf797f 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -97,11 +97,4 @@ typedef struct FileOperations void *opaque; } FileOperations; -static inline const char *rpath(FsContext *ctx, const char *path) -{ - /* FIXME: so wrong... */ - static char buffer[4096]; - snprintf(buffer, sizeof(buffer), "%s/%s", ctx->fs_root, path); - return buffer; -} #endif diff --git a/fsdev/qemu-fsdev-dummy.c b/fsdev/qemu-fsdev-dummy.c new file mode 100644 index 0000000000..4e700dd4e4 --- /dev/null +++ b/fsdev/qemu-fsdev-dummy.c @@ -0,0 +1,28 @@ +/* + * Virtio 9p + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Gautham R Shenoy <ego@in.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ +#include <stdio.h> +#include <string.h> +#include "qemu-fsdev.h" +#include "qemu-config.h" + +int qemu_fsdev_add(QemuOpts *opts) +{ + return 0; +} + +static void fsdev_register_config(void) +{ + qemu_add_opts(&qemu_fsdev_opts); + qemu_add_opts(&qemu_virtfs_opts); +} +machine_init(fsdev_register_config); @@ -1105,10 +1105,6 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) env->active_fpu.fcr31 = tmp & 0xFF83FFFF; /* set rounding mode */ RESTORE_ROUNDING_MODE; -#ifndef CONFIG_SOFTFLOAT - /* no floating point exception for native float */ - SET_FP_ENABLE(env->active_fpu.fcr31, 0); -#endif break; case 71: env->active_fpu.fcr0 = tmp; break; } @@ -1436,7 +1432,11 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) /* XXX */ break; case S390_PC_REGNUM: GET_REGL(env->psw.addr); break; - case S390_CC_REGNUM: GET_REG32(env->cc); break; + case S390_CC_REGNUM: + env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, + env->cc_vr); + GET_REG32(env->cc_op); + break; } return 0; @@ -1462,7 +1462,7 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) /* XXX */ break; case S390_PC_REGNUM: env->psw.addr = tmpl; break; - case S390_CC_REGNUM: env->cc = tmp32; r=4; break; + case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break; } return r; diff --git a/hmp-commands.hx b/hmp-commands.hx index 834e6a8c87..6ad8806785 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -740,10 +740,11 @@ ETEXI #if defined(TARGET_I386) { .name = "nmi", - .args_type = "cpu_index:i", - .params = "cpu", - .help = "inject an NMI on the given CPU", - .mhandler.cmd = do_inject_nmi, + .args_type = "", + .params = "", + .help = "inject an NMI on all guest's CPUs", + .user_print = monitor_user_noop, + .mhandler.cmd_new = do_inject_nmi, }, #endif STEXI diff --git a/hppa-dis.c b/hppa-dis.c index 49f99c8d9e..a5760a9584 100644 --- a/hppa-dis.c +++ b/hppa-dis.c @@ -1645,7 +1645,7 @@ static const char *const fp_reg_names[] = typedef unsigned int CORE_ADDR; -/* Get at various relevent fields of an instruction word. */ +/* Get at various relevant fields of an instruction word. */ #define MASK_5 0x1f #define MASK_10 0x3ff diff --git a/hw/9pfs/virtio-9p-debug.c b/hw/9pfs/virtio-9p-debug.c index 6b18842fd4..4636ad51f0 100644 --- a/hw/9pfs/virtio-9p-debug.c +++ b/hw/9pfs/virtio-9p-debug.c @@ -10,8 +10,9 @@ * the COPYING file in the top-level directory. * */ -#include "virtio.h" -#include "pc.h" + +#include "hw/virtio.h" +#include "hw/pc.h" #include "virtio-9p.h" #include "virtio-9p-debug.h" diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c new file mode 100644 index 0000000000..f235236ea0 --- /dev/null +++ b/hw/9pfs/virtio-9p-device.c @@ -0,0 +1,173 @@ +/* + * Virtio 9p backend + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "hw/virtio.h" +#include "hw/pc.h" +#include "qemu_socket.h" +#include "hw/virtio-pci.h" +#include "virtio-9p.h" +#include "fsdev/qemu-fsdev.h" +#include "virtio-9p-xattr.h" + +static uint32_t virtio_9p_get_features(VirtIODevice *vdev, uint32_t features) +{ + features |= 1 << VIRTIO_9P_MOUNT_TAG; + return features; +} + +static V9fsState *to_virtio_9p(VirtIODevice *vdev) +{ + return (V9fsState *)vdev; +} + +static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config) +{ + struct virtio_9p_config *cfg; + V9fsState *s = to_virtio_9p(vdev); + + cfg = qemu_mallocz(sizeof(struct virtio_9p_config) + + s->tag_len); + stw_raw(&cfg->tag_len, s->tag_len); + memcpy(cfg->tag, s->tag, s->tag_len); + memcpy(config, cfg, s->config_size); + qemu_free(cfg); +} + +VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf) + { + V9fsState *s; + int i, len; + struct stat stat; + FsTypeEntry *fse; + + + s = (V9fsState *)virtio_common_init("virtio-9p", + VIRTIO_ID_9P, + sizeof(struct virtio_9p_config)+ + MAX_TAG_LEN, + sizeof(V9fsState)); + + /* initialize pdu allocator */ + QLIST_INIT(&s->free_list); + for (i = 0; i < (MAX_REQ - 1); i++) { + QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); + } + + s->vq = virtio_add_queue(&s->vdev, MAX_REQ, handle_9p_output); + + fse = get_fsdev_fsentry(conf->fsdev_id); + + if (!fse) { + /* We don't have a fsdev identified by fsdev_id */ + fprintf(stderr, "Virtio-9p device couldn't find fsdev with the " + "id = %s\n", conf->fsdev_id ? conf->fsdev_id : "NULL"); + exit(1); + } + + if (!fse->path || !conf->tag) { + /* we haven't specified a mount_tag or the path */ + fprintf(stderr, "fsdev with id %s needs path " + "and Virtio-9p device needs mount_tag arguments\n", + conf->fsdev_id); + exit(1); + } + + if (!strcmp(fse->security_model, "passthrough")) { + /* Files on the Fileserver set to client user credentials */ + s->ctx.fs_sm = SM_PASSTHROUGH; + s->ctx.xops = passthrough_xattr_ops; + } else if (!strcmp(fse->security_model, "mapped")) { + /* Files on the fileserver are set to QEMU credentials. + * Client user credentials are saved in extended attributes. + */ + s->ctx.fs_sm = SM_MAPPED; + s->ctx.xops = mapped_xattr_ops; + } else if (!strcmp(fse->security_model, "none")) { + /* + * Files on the fileserver are set to QEMU credentials. + */ + s->ctx.fs_sm = SM_NONE; + s->ctx.xops = none_xattr_ops; + } else { + fprintf(stderr, "Default to security_model=none. You may want" + " enable advanced security model using " + "security option:\n\t security_model=passthrough\n\t " + "security_model=mapped\n"); + s->ctx.fs_sm = SM_NONE; + s->ctx.xops = none_xattr_ops; + } + + if (lstat(fse->path, &stat)) { + fprintf(stderr, "share path %s does not exist\n", fse->path); + exit(1); + } else if (!S_ISDIR(stat.st_mode)) { + fprintf(stderr, "share path %s is not a directory\n", fse->path); + exit(1); + } + + s->ctx.fs_root = qemu_strdup(fse->path); + len = strlen(conf->tag); + if (len > MAX_TAG_LEN) { + len = MAX_TAG_LEN; + } + /* s->tag is non-NULL terminated string */ + s->tag = qemu_malloc(len); + memcpy(s->tag, conf->tag, len); + s->tag_len = len; + s->ctx.uid = -1; + + s->ops = fse->ops; + s->vdev.get_features = virtio_9p_get_features; + s->config_size = sizeof(struct virtio_9p_config) + + s->tag_len; + s->vdev.get_config = virtio_9p_get_config; + + return &s->vdev; +} + +static int virtio_9p_init_pci(PCIDevice *pci_dev) +{ + VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); + VirtIODevice *vdev; + + vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf); + vdev->nvectors = proxy->nvectors; + virtio_init_pci(proxy, vdev); + /* make the actual value visible */ + proxy->nvectors = vdev->nvectors; + return 0; +} + +static PCIDeviceInfo virtio_9p_info = { + .qdev.name = "virtio-9p-pci", + .qdev.size = sizeof(VirtIOPCIProxy), + .init = virtio_9p_init_pci, + .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET, + .device_id = 0x1009, + .revision = VIRTIO_PCI_ABI_VERSION, + .class_id = 0x2, + .qdev.props = (Property[]) { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features), + DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag), + DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id), + DEFINE_PROP_END_OF_LIST(), + } +}; + +static void virtio_9p_register_devices(void) +{ + pci_qdev_register(&virtio_9p_info); +} + +device_init(virtio_9p_register_devices) diff --git a/hw/9pfs/virtio-9p-local.c b/hw/9pfs/virtio-9p-local.c index 0a015de9a5..77904c37bd 100644 --- a/hw/9pfs/virtio-9p-local.c +++ b/hw/9pfs/virtio-9p-local.c @@ -10,7 +10,8 @@ * the COPYING file in the top-level directory. * */ -#include "virtio.h" + +#include "hw/virtio.h" #include "virtio-9p.h" #include "virtio-9p-xattr.h" #include <arpa/inet.h> @@ -24,7 +25,8 @@ static int local_lstat(FsContext *fs_ctx, const char *path, struct stat *stbuf) { int err; - err = lstat(rpath(fs_ctx, path), stbuf); + char buffer[PATH_MAX]; + err = lstat(rpath(fs_ctx, path, buffer), stbuf); if (err) { return err; } @@ -34,19 +36,19 @@ static int local_lstat(FsContext *fs_ctx, const char *path, struct stat *stbuf) gid_t tmp_gid; mode_t tmp_mode; dev_t tmp_dev; - if (getxattr(rpath(fs_ctx, path), "user.virtfs.uid", &tmp_uid, + if (getxattr(rpath(fs_ctx, path, buffer), "user.virtfs.uid", &tmp_uid, sizeof(uid_t)) > 0) { stbuf->st_uid = tmp_uid; } - if (getxattr(rpath(fs_ctx, path), "user.virtfs.gid", &tmp_gid, + if (getxattr(rpath(fs_ctx, path, buffer), "user.virtfs.gid", &tmp_gid, sizeof(gid_t)) > 0) { stbuf->st_gid = tmp_gid; } - if (getxattr(rpath(fs_ctx, path), "user.virtfs.mode", &tmp_mode, - sizeof(mode_t)) > 0) { + if (getxattr(rpath(fs_ctx, path, buffer), "user.virtfs.mode", + &tmp_mode, sizeof(mode_t)) > 0) { stbuf->st_mode = tmp_mode; } - if (getxattr(rpath(fs_ctx, path), "user.virtfs.rdev", &tmp_dev, + if (getxattr(rpath(fs_ctx, path, buffer), "user.virtfs.rdev", &tmp_dev, sizeof(dev_t)) > 0) { stbuf->st_rdev = tmp_dev; } @@ -91,10 +93,12 @@ static int local_set_xattr(const char *path, FsCred *credp) static int local_post_create_passthrough(FsContext *fs_ctx, const char *path, FsCred *credp) { - if (chmod(rpath(fs_ctx, path), credp->fc_mode & 07777) < 0) { + char buffer[PATH_MAX]; + if (chmod(rpath(fs_ctx, path, buffer), credp->fc_mode & 07777) < 0) { return -1; } - if (lchown(rpath(fs_ctx, path), credp->fc_uid, credp->fc_gid) < 0) { + if (lchown(rpath(fs_ctx, path, buffer), credp->fc_uid, + credp->fc_gid) < 0) { /* * If we fail to change ownership and if we are * using security model none. Ignore the error @@ -110,9 +114,10 @@ static ssize_t local_readlink(FsContext *fs_ctx, const char *path, char *buf, size_t bufsz) { ssize_t tsize = -1; + char buffer[PATH_MAX]; if (fs_ctx->fs_sm == SM_MAPPED) { int fd; - fd = open(rpath(fs_ctx, path), O_RDONLY); + fd = open(rpath(fs_ctx, path, buffer), O_RDONLY); if (fd == -1) { return -1; } @@ -123,7 +128,7 @@ static ssize_t local_readlink(FsContext *fs_ctx, const char *path, return tsize; } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - tsize = readlink(rpath(fs_ctx, path), buf, bufsz); + tsize = readlink(rpath(fs_ctx, path, buffer), buf, bufsz); } return tsize; } @@ -140,12 +145,14 @@ static int local_closedir(FsContext *ctx, DIR *dir) static int local_open(FsContext *ctx, const char *path, int flags) { - return open(rpath(ctx, path), flags); + char buffer[PATH_MAX]; + return open(rpath(ctx, path, buffer), flags); } static DIR *local_opendir(FsContext *ctx, const char *path) { - return opendir(rpath(ctx, path)); + char buffer[PATH_MAX]; + return opendir(rpath(ctx, path, buffer)); } static void local_rewinddir(FsContext *ctx, DIR *dir) @@ -200,11 +207,12 @@ static ssize_t local_pwritev(FsContext *ctx, int fd, const struct iovec *iov, static int local_chmod(FsContext *fs_ctx, const char *path, FsCred *credp) { + char buffer[PATH_MAX]; if (fs_ctx->fs_sm == SM_MAPPED) { - return local_set_xattr(rpath(fs_ctx, path), credp); + return local_set_xattr(rpath(fs_ctx, path, buffer), credp); } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - return chmod(rpath(fs_ctx, path), credp->fc_mode); + return chmod(rpath(fs_ctx, path, buffer), credp->fc_mode); } return -1; } @@ -213,21 +221,24 @@ static int local_mknod(FsContext *fs_ctx, const char *path, FsCred *credp) { int err = -1; int serrno = 0; + char buffer[PATH_MAX]; /* Determine the security model */ if (fs_ctx->fs_sm == SM_MAPPED) { - err = mknod(rpath(fs_ctx, path), SM_LOCAL_MODE_BITS|S_IFREG, 0); + err = mknod(rpath(fs_ctx, path, buffer), + SM_LOCAL_MODE_BITS|S_IFREG, 0); if (err == -1) { return err; } - local_set_xattr(rpath(fs_ctx, path), credp); + local_set_xattr(rpath(fs_ctx, path, buffer), credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - err = mknod(rpath(fs_ctx, path), credp->fc_mode, credp->fc_rdev); + err = mknod(rpath(fs_ctx, path, buffer), credp->fc_mode, + credp->fc_rdev); if (err == -1) { return err; } @@ -240,7 +251,7 @@ static int local_mknod(FsContext *fs_ctx, const char *path, FsCred *credp) return err; err_end: - remove(rpath(fs_ctx, path)); + remove(rpath(fs_ctx, path, buffer)); errno = serrno; return err; } @@ -249,22 +260,23 @@ static int local_mkdir(FsContext *fs_ctx, const char *path, FsCred *credp) { int err = -1; int serrno = 0; + char buffer[PATH_MAX]; /* Determine the security model */ if (fs_ctx->fs_sm == SM_MAPPED) { - err = mkdir(rpath(fs_ctx, path), SM_LOCAL_DIR_MODE_BITS); + err = mkdir(rpath(fs_ctx, path, buffer), SM_LOCAL_DIR_MODE_BITS); if (err == -1) { return err; } credp->fc_mode = credp->fc_mode|S_IFDIR; - err = local_set_xattr(rpath(fs_ctx, path), credp); + err = local_set_xattr(rpath(fs_ctx, path, buffer), credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - err = mkdir(rpath(fs_ctx, path), credp->fc_mode); + err = mkdir(rpath(fs_ctx, path, buffer), credp->fc_mode); if (err == -1) { return err; } @@ -277,7 +289,7 @@ static int local_mkdir(FsContext *fs_ctx, const char *path, FsCred *credp) return err; err_end: - remove(rpath(fs_ctx, path)); + remove(rpath(fs_ctx, path, buffer)); errno = serrno; return err; } @@ -318,23 +330,24 @@ static int local_open2(FsContext *fs_ctx, const char *path, int flags, int fd = -1; int err = -1; int serrno = 0; + char buffer[PATH_MAX]; /* Determine the security model */ if (fs_ctx->fs_sm == SM_MAPPED) { - fd = open(rpath(fs_ctx, path), flags, SM_LOCAL_MODE_BITS); + fd = open(rpath(fs_ctx, path, buffer), flags, SM_LOCAL_MODE_BITS); if (fd == -1) { return fd; } credp->fc_mode = credp->fc_mode|S_IFREG; /* Set cleint credentials in xattr */ - err = local_set_xattr(rpath(fs_ctx, path), credp); + err = local_set_xattr(rpath(fs_ctx, path, buffer), credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - fd = open(rpath(fs_ctx, path), flags, credp->fc_mode); + fd = open(rpath(fs_ctx, path, buffer), flags, credp->fc_mode); if (fd == -1) { return fd; } @@ -348,7 +361,7 @@ static int local_open2(FsContext *fs_ctx, const char *path, int flags, err_end: close(fd); - remove(rpath(fs_ctx, path)); + remove(rpath(fs_ctx, path, buffer)); errno = serrno; return err; } @@ -359,12 +372,13 @@ static int local_symlink(FsContext *fs_ctx, const char *oldpath, { int err = -1; int serrno = 0; + char buffer[PATH_MAX]; /* Determine the security model */ if (fs_ctx->fs_sm == SM_MAPPED) { int fd; ssize_t oldpath_size, write_size; - fd = open(rpath(fs_ctx, newpath), O_CREAT|O_EXCL|O_RDWR, + fd = open(rpath(fs_ctx, newpath, buffer), O_CREAT|O_EXCL|O_RDWR, SM_LOCAL_MODE_BITS); if (fd == -1) { return fd; @@ -384,18 +398,19 @@ static int local_symlink(FsContext *fs_ctx, const char *oldpath, close(fd); /* Set cleint credentials in symlink's xattr */ credp->fc_mode = credp->fc_mode|S_IFLNK; - err = local_set_xattr(rpath(fs_ctx, newpath), credp); + err = local_set_xattr(rpath(fs_ctx, newpath, buffer), credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - err = symlink(oldpath, rpath(fs_ctx, newpath)); + err = symlink(oldpath, rpath(fs_ctx, newpath, buffer)); if (err) { return err; } - err = lchown(rpath(fs_ctx, newpath), credp->fc_uid, credp->fc_gid); + err = lchown(rpath(fs_ctx, newpath, buffer), credp->fc_uid, + credp->fc_gid); if (err == -1) { /* * If we fail to change ownership and if we are @@ -411,70 +426,45 @@ static int local_symlink(FsContext *fs_ctx, const char *oldpath, return err; err_end: - remove(rpath(fs_ctx, newpath)); + remove(rpath(fs_ctx, newpath, buffer)); errno = serrno; return err; } static int local_link(FsContext *ctx, const char *oldpath, const char *newpath) { - char *tmp = qemu_strdup(rpath(ctx, oldpath)); - int err, serrno = 0; - - if (tmp == NULL) { - return -ENOMEM; - } - - err = link(tmp, rpath(ctx, newpath)); - if (err == -1) { - serrno = errno; - } - - qemu_free(tmp); - - if (err == -1) { - errno = serrno; - } + char buffer[PATH_MAX], buffer1[PATH_MAX]; - return err; + return link(rpath(ctx, oldpath, buffer), rpath(ctx, newpath, buffer1)); } static int local_truncate(FsContext *ctx, const char *path, off_t size) { - return truncate(rpath(ctx, path), size); + char buffer[PATH_MAX]; + return truncate(rpath(ctx, path, buffer), size); } static int local_rename(FsContext *ctx, const char *oldpath, const char *newpath) { - char *tmp; - int err; - - tmp = qemu_strdup(rpath(ctx, oldpath)); - - err = rename(tmp, rpath(ctx, newpath)); - if (err == -1) { - int serrno = errno; - qemu_free(tmp); - errno = serrno; - } else { - qemu_free(tmp); - } - - return err; + char buffer[PATH_MAX], buffer1[PATH_MAX]; + return rename(rpath(ctx, oldpath, buffer), rpath(ctx, newpath, buffer1)); } static int local_chown(FsContext *fs_ctx, const char *path, FsCred *credp) { + char buffer[PATH_MAX]; if ((credp->fc_uid == -1 && credp->fc_gid == -1) || (fs_ctx->fs_sm == SM_PASSTHROUGH)) { - return lchown(rpath(fs_ctx, path), credp->fc_uid, credp->fc_gid); + return lchown(rpath(fs_ctx, path, buffer), credp->fc_uid, + credp->fc_gid); } else if (fs_ctx->fs_sm == SM_MAPPED) { - return local_set_xattr(rpath(fs_ctx, path), credp); + return local_set_xattr(rpath(fs_ctx, path, buffer), credp); } else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) || (fs_ctx->fs_sm == SM_NONE)) { - return lchown(rpath(fs_ctx, path), credp->fc_uid, credp->fc_gid); + return lchown(rpath(fs_ctx, path, buffer), credp->fc_uid, + credp->fc_gid); } return -1; } @@ -482,12 +472,15 @@ static int local_chown(FsContext *fs_ctx, const char *path, FsCred *credp) static int local_utimensat(FsContext *s, const char *path, const struct timespec *buf) { - return qemu_utimensat(AT_FDCWD, rpath(s, path), buf, AT_SYMLINK_NOFOLLOW); + char buffer[PATH_MAX]; + return qemu_utimensat(AT_FDCWD, rpath(s, path, buffer), buf, + AT_SYMLINK_NOFOLLOW); } static int local_remove(FsContext *ctx, const char *path) { - return remove(rpath(ctx, path)); + char buffer[PATH_MAX]; + return remove(rpath(ctx, path, buffer)); } static int local_fsync(FsContext *ctx, int fd, int datasync) @@ -501,7 +494,8 @@ static int local_fsync(FsContext *ctx, int fd, int datasync) static int local_statfs(FsContext *s, const char *path, struct statfs *stbuf) { - return statfs(rpath(s, path), stbuf); + char buffer[PATH_MAX]; + return statfs(rpath(s, path, buffer), stbuf); } static ssize_t local_lgetxattr(FsContext *ctx, const char *path, diff --git a/hw/9pfs/virtio-9p-posix-acl.c b/hw/9pfs/virtio-9p-posix-acl.c index 575abe86b0..f5b392e180 100644 --- a/hw/9pfs/virtio-9p-posix-acl.c +++ b/hw/9pfs/virtio-9p-posix-acl.c @@ -13,7 +13,7 @@ #include <sys/types.h> #include <attr/xattr.h> -#include "virtio.h" +#include "hw/virtio.h" #include "virtio-9p.h" #include "fsdev/file-op-9p.h" #include "virtio-9p-xattr.h" @@ -26,7 +26,8 @@ static ssize_t mp_pacl_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { - return lgetxattr(rpath(ctx, path), MAP_ACL_ACCESS, value, size); + char buffer[PATH_MAX]; + return lgetxattr(rpath(ctx, path, buffer), MAP_ACL_ACCESS, value, size); } static ssize_t mp_pacl_listxattr(FsContext *ctx, const char *path, @@ -50,14 +51,17 @@ static ssize_t mp_pacl_listxattr(FsContext *ctx, const char *path, static int mp_pacl_setxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size, int flags) { - return lsetxattr(rpath(ctx, path), MAP_ACL_ACCESS, value, size, flags); + char buffer[PATH_MAX]; + return lsetxattr(rpath(ctx, path, buffer), MAP_ACL_ACCESS, value, + size, flags); } static int mp_pacl_removexattr(FsContext *ctx, const char *path, const char *name) { int ret; - ret = lremovexattr(rpath(ctx, path), MAP_ACL_ACCESS); + char buffer[PATH_MAX]; + ret = lremovexattr(rpath(ctx, path, buffer), MAP_ACL_ACCESS); if (ret == -1 && errno == ENODATA) { /* * We don't get ENODATA error when trying to remove a @@ -73,7 +77,8 @@ static int mp_pacl_removexattr(FsContext *ctx, static ssize_t mp_dacl_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { - return lgetxattr(rpath(ctx, path), MAP_ACL_DEFAULT, value, size); + char buffer[PATH_MAX]; + return lgetxattr(rpath(ctx, path, buffer), MAP_ACL_DEFAULT, value, size); } static ssize_t mp_dacl_listxattr(FsContext *ctx, const char *path, @@ -97,14 +102,17 @@ static ssize_t mp_dacl_listxattr(FsContext *ctx, const char *path, static int mp_dacl_setxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size, int flags) { - return lsetxattr(rpath(ctx, path), MAP_ACL_DEFAULT, value, size, flags); + char buffer[PATH_MAX]; + return lsetxattr(rpath(ctx, path, buffer), MAP_ACL_DEFAULT, value, + size, flags); } static int mp_dacl_removexattr(FsContext *ctx, const char *path, const char *name) { int ret; - ret = lremovexattr(rpath(ctx, path), MAP_ACL_DEFAULT); + char buffer[PATH_MAX]; + ret = lremovexattr(rpath(ctx, path, buffer), MAP_ACL_DEFAULT); if (ret == -1 && errno == ENODATA) { /* * We don't get ENODATA error when trying to remove a diff --git a/hw/9pfs/virtio-9p-xattr-user.c b/hw/9pfs/virtio-9p-xattr-user.c index bba13ce643..5044a3e5ab 100644 --- a/hw/9pfs/virtio-9p-xattr-user.c +++ b/hw/9pfs/virtio-9p-xattr-user.c @@ -12,7 +12,7 @@ */ #include <sys/types.h> -#include "virtio.h" +#include "hw/virtio.h" #include "virtio-9p.h" #include "fsdev/file-op-9p.h" #include "virtio-9p-xattr.h" @@ -21,6 +21,7 @@ static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { + char buffer[PATH_MAX]; if (strncmp(name, "user.virtfs.", 12) == 0) { /* * Don't allow fetch of user.virtfs namesapce @@ -29,7 +30,7 @@ static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, errno = ENOATTR; return -1; } - return lgetxattr(rpath(ctx, path), name, value, size); + return lgetxattr(rpath(ctx, path, buffer), name, value, size); } static ssize_t mp_user_listxattr(FsContext *ctx, const char *path, @@ -67,6 +68,7 @@ static ssize_t mp_user_listxattr(FsContext *ctx, const char *path, static int mp_user_setxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size, int flags) { + char buffer[PATH_MAX]; if (strncmp(name, "user.virtfs.", 12) == 0) { /* * Don't allow fetch of user.virtfs namesapce @@ -75,12 +77,13 @@ static int mp_user_setxattr(FsContext *ctx, const char *path, const char *name, errno = EACCES; return -1; } - return lsetxattr(rpath(ctx, path), name, value, size, flags); + return lsetxattr(rpath(ctx, path, buffer), name, value, size, flags); } static int mp_user_removexattr(FsContext *ctx, const char *path, const char *name) { + char buffer[PATH_MAX]; if (strncmp(name, "user.virtfs.", 12) == 0) { /* * Don't allow fetch of user.virtfs namesapce @@ -89,7 +92,7 @@ static int mp_user_removexattr(FsContext *ctx, errno = EACCES; return -1; } - return lremovexattr(rpath(ctx, path), name); + return lremovexattr(rpath(ctx, path, buffer), name); } XattrOperations mapped_user_xattr = { diff --git a/hw/9pfs/virtio-9p-xattr.c b/hw/9pfs/virtio-9p-xattr.c index 03c3d3f6bb..bde0b7fb4f 100644 --- a/hw/9pfs/virtio-9p-xattr.c +++ b/hw/9pfs/virtio-9p-xattr.c @@ -11,7 +11,7 @@ * */ -#include "virtio.h" +#include "hw/virtio.h" #include "virtio-9p.h" #include "fsdev/file-op-9p.h" #include "virtio-9p-xattr.h" @@ -66,20 +66,21 @@ ssize_t v9fs_list_xattr(FsContext *ctx, const char *path, void *value, size_t vsize) { ssize_t size = 0; + char buffer[PATH_MAX]; void *ovalue = value; XattrOperations *xops; char *orig_value, *orig_value_start; ssize_t xattr_len, parsed_len = 0, attr_len; /* Get the actual len */ - xattr_len = llistxattr(rpath(ctx, path), value, 0); + xattr_len = llistxattr(rpath(ctx, path, buffer), value, 0); if (xattr_len <= 0) { return xattr_len; } /* Now fetch the xattr and find the actual size */ orig_value = qemu_malloc(xattr_len); - xattr_len = llistxattr(rpath(ctx, path), orig_value, xattr_len); + xattr_len = llistxattr(rpath(ctx, path, buffer), orig_value, xattr_len); /* store the orig pointer */ orig_value_start = orig_value; diff --git a/hw/9pfs/virtio-9p-xattr.h b/hw/9pfs/virtio-9p-xattr.h index 2bbae2dcb5..247e414ebd 100644 --- a/hw/9pfs/virtio-9p-xattr.h +++ b/hw/9pfs/virtio-9p-xattr.h @@ -54,20 +54,23 @@ ssize_t pt_listxattr(FsContext *ctx, const char *path, char *name, void *value, static inline ssize_t pt_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { - return lgetxattr(rpath(ctx, path), name, value, size); + char buffer[PATH_MAX]; + return lgetxattr(rpath(ctx, path, buffer), name, value, size); } static inline int pt_setxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size, int flags) { - return lsetxattr(rpath(ctx, path), name, value, size, flags); + char buffer[PATH_MAX]; + return lsetxattr(rpath(ctx, path, buffer), name, value, size, flags); } static inline int pt_removexattr(FsContext *ctx, const char *path, const char *name) { - return lremovexattr(rpath(ctx, path), name); + char buffer[PATH_MAX]; + return lremovexattr(rpath(ctx, path, buffer), name); } static inline ssize_t notsup_getxattr(FsContext *ctx, const char *path, diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c index b5fc52b3eb..4890df6f75 100644 --- a/hw/9pfs/virtio-9p.c +++ b/hw/9pfs/virtio-9p.c @@ -11,9 +11,10 @@ * */ -#include "virtio.h" -#include "pc.h" +#include "hw/virtio.h" +#include "hw/pc.h" #include "qemu_socket.h" +#include "hw/virtio-pci.h" #include "virtio-9p.h" #include "fsdev/qemu-fsdev.h" #include "virtio-9p-debug.h" @@ -194,7 +195,6 @@ static int v9fs_do_open2(V9fsState *s, char *fullname, uid_t uid, gid_t gid, cred.fc_uid = uid; cred.fc_gid = gid; cred.fc_mode = mode & 07777; - flags = flags; return s->ops->open2(&s->ctx, fullname, flags, &cred); } @@ -423,6 +423,22 @@ static void v9fs_string_copy(V9fsString *lhs, V9fsString *rhs) v9fs_string_sprintf(lhs, "%s", rhs->data); } +/* + * Return TRUE if s1 is an ancestor of s2. + * + * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d". + * As a special case, We treat s1 as ancestor of s2 if they are same! + */ +static int v9fs_path_is_ancestor(V9fsString *s1, V9fsString *s2) +{ + if (!strncmp(s1->data, s2->data, s1->size)) { + if (s2->data[s1->size] == '\0' || s2->data[s1->size] == '/') { + return 1; + } + } + return 0; +} + static size_t v9fs_string_size(V9fsString *str) { return str->size; @@ -2805,13 +2821,13 @@ static int v9fs_complete_rename(V9fsState *s, V9fsRenameState *vs) for (fidp = s->fid_list; fidp; fidp = fidp->next) { if (vs->fidp == fidp) { /* - * we replace name of this fid towards the end - * so that our below strcmp will work + * we replace name of this fid towards the end so + * that our below v9fs_path_is_ancestor check will + * work */ continue; } - if (!strncmp(vs->fidp->path.data, fidp->path.data, - strlen(vs->fidp->path.data))) { + if (v9fs_path_is_ancestor(&vs->fidp->path, &fidp->path)) { /* replace the name */ v9fs_fix_path(&fidp->path, &vs->name, strlen(vs->fidp->path.data)); @@ -3589,6 +3605,11 @@ static pdu_handler_t *pdu_handlers[] = { [P9_TREMOVE] = v9fs_remove, }; +static void v9fs_op_not_supp(V9fsState *s, V9fsPDU *pdu) +{ + complete_pdu(s, pdu, -EOPNOTSUPP); +} + static void submit_pdu(V9fsState *s, V9fsPDU *pdu) { pdu_handler_t *handler; @@ -3596,16 +3617,16 @@ static void submit_pdu(V9fsState *s, V9fsPDU *pdu) if (debug_9p_pdu) { pprint_pdu(pdu); } - - BUG_ON(pdu->id >= ARRAY_SIZE(pdu_handlers)); - - handler = pdu_handlers[pdu->id]; - BUG_ON(handler == NULL); - + if (pdu->id >= ARRAY_SIZE(pdu_handlers) || + (pdu_handlers[pdu->id] == NULL)) { + handler = v9fs_op_not_supp; + } else { + handler = pdu_handlers[pdu->id]; + } handler(s, pdu); } -static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) +void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) { V9fsState *s = (V9fsState *)vdev; V9fsPDU *pdu; @@ -3629,119 +3650,3 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) free_pdu(s, pdu); } - -static uint32_t virtio_9p_get_features(VirtIODevice *vdev, uint32_t features) -{ - features |= 1 << VIRTIO_9P_MOUNT_TAG; - return features; -} - -static V9fsState *to_virtio_9p(VirtIODevice *vdev) -{ - return (V9fsState *)vdev; -} - -static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config) -{ - struct virtio_9p_config *cfg; - V9fsState *s = to_virtio_9p(vdev); - - cfg = qemu_mallocz(sizeof(struct virtio_9p_config) + - s->tag_len); - stw_raw(&cfg->tag_len, s->tag_len); - memcpy(cfg->tag, s->tag, s->tag_len); - memcpy(config, cfg, s->config_size); - qemu_free(cfg); -} - -VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf) - { - V9fsState *s; - int i, len; - struct stat stat; - FsTypeEntry *fse; - - - s = (V9fsState *)virtio_common_init("virtio-9p", - VIRTIO_ID_9P, - sizeof(struct virtio_9p_config)+ - MAX_TAG_LEN, - sizeof(V9fsState)); - - /* initialize pdu allocator */ - QLIST_INIT(&s->free_list); - for (i = 0; i < (MAX_REQ - 1); i++) { - QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); - } - - s->vq = virtio_add_queue(&s->vdev, MAX_REQ, handle_9p_output); - - fse = get_fsdev_fsentry(conf->fsdev_id); - - if (!fse) { - /* We don't have a fsdev identified by fsdev_id */ - fprintf(stderr, "Virtio-9p device couldn't find fsdev with the " - "id = %s\n", conf->fsdev_id ? conf->fsdev_id : "NULL"); - exit(1); - } - - if (!fse->path || !conf->tag) { - /* we haven't specified a mount_tag or the path */ - fprintf(stderr, "fsdev with id %s needs path " - "and Virtio-9p device needs mount_tag arguments\n", - conf->fsdev_id); - exit(1); - } - - if (!strcmp(fse->security_model, "passthrough")) { - /* Files on the Fileserver set to client user credentials */ - s->ctx.fs_sm = SM_PASSTHROUGH; - s->ctx.xops = passthrough_xattr_ops; - } else if (!strcmp(fse->security_model, "mapped")) { - /* Files on the fileserver are set to QEMU credentials. - * Client user credentials are saved in extended attributes. - */ - s->ctx.fs_sm = SM_MAPPED; - s->ctx.xops = mapped_xattr_ops; - } else if (!strcmp(fse->security_model, "none")) { - /* - * Files on the fileserver are set to QEMU credentials. - */ - s->ctx.fs_sm = SM_NONE; - s->ctx.xops = none_xattr_ops; - } else { - fprintf(stderr, "Default to security_model=none. You may want" - " enable advanced security model using " - "security option:\n\t security_model=passthrough \n\t " - "security_model=mapped\n"); - s->ctx.fs_sm = SM_NONE; - s->ctx.xops = none_xattr_ops; - } - - if (lstat(fse->path, &stat)) { - fprintf(stderr, "share path %s does not exist\n", fse->path); - exit(1); - } else if (!S_ISDIR(stat.st_mode)) { - fprintf(stderr, "share path %s is not a directory \n", fse->path); - exit(1); - } - - s->ctx.fs_root = qemu_strdup(fse->path); - len = strlen(conf->tag); - if (len > MAX_TAG_LEN) { - len = MAX_TAG_LEN; - } - /* s->tag is non-NULL terminated string */ - s->tag = qemu_malloc(len); - memcpy(s->tag, conf->tag, len); - s->tag_len = len; - s->ctx.uid = -1; - - s->ops = fse->ops; - s->vdev.get_features = virtio_9p_get_features; - s->config_size = sizeof(struct virtio_9p_config) + - s->tag_len; - s->vdev.get_config = virtio_9p_get_config; - - return &s->vdev; -} diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h index 622928fce5..2bfbe622af 100644 --- a/hw/9pfs/virtio-9p.h +++ b/hw/9pfs/virtio-9p.h @@ -101,6 +101,11 @@ enum p9_proto_version { #define P9_NOTAG (u16)(~0) #define P9_NOFID (u32)(~0) #define P9_MAXWELEM 16 +static inline const char *rpath(FsContext *ctx, const char *path, char *buffer) +{ + snprintf(buffer, PATH_MAX, "%s/%s", ctx->fs_root, path); + return buffer; +} /* * ample room for Twrite/Rread header @@ -504,4 +509,6 @@ static inline size_t do_pdu_unpack(void *dst, struct iovec *sg, int sg_count, return pdu_packunpack(dst, sg, sg_count, offset, size, 0); } +extern void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq); + #endif @@ -1001,8 +1001,6 @@ static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, static void write_bup (AC97LinkState *s, int elapsed) { - int written = 0; - dolog ("write_bup\n"); if (!(s->bup_flag & BUP_SET)) { if (s->bup_flag & BUP_LAST) { @@ -1026,7 +1024,6 @@ static void write_bup (AC97LinkState *s, int elapsed) return; temp -= copied; elapsed -= copied; - written += copied; } } } @@ -1069,7 +1066,7 @@ static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, static void transfer_audio (AC97LinkState *s, int index, int elapsed) { AC97BusMasterRegs *r = &s->bm_regs[index]; - int written = 0, stop = 0; + int stop = 0; if (s->invalid_freq[index]) { AUD_log ("ac97", "attempt to use voice %d with invalid frequency %d\n", @@ -1114,7 +1111,6 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed) switch (index) { case PO_INDEX: temp = write_audio (s, r, elapsed, &stop); - written += temp; elapsed -= temp; r->picb -= (temp >> 1); break; diff --git a/hw/acpi_piix4.c b/hw/acpi_piix4.c index 232008dd93..350558b859 100644 --- a/hw/acpi_piix4.c +++ b/hw/acpi_piix4.c @@ -471,11 +471,13 @@ static void pciej_write(void *opaque, uint32_t addr, uint32_t val) BusState *bus = opaque; DeviceState *qdev, *next; PCIDevice *dev; + PCIDeviceInfo *info; int slot = ffs(val) - 1; QLIST_FOREACH_SAFE(qdev, &bus->children, sibling, next) { dev = DO_UPCAST(PCIDevice, qdev, qdev); - if (PCI_SLOT(dev->devfn) == slot) { + info = container_of(qdev->info, PCIDeviceInfo, qdev); + if (PCI_SLOT(dev->devfn) == slot && !info->no_hotplug) { qdev_free(qdev); } } diff --git a/hw/alpha_palcode.c b/hw/alpha_palcode.c deleted file mode 100644 index 033b54201c..0000000000 --- a/hw/alpha_palcode.c +++ /dev/null @@ -1,1048 +0,0 @@ -/* - * Alpha emulation - PALcode emulation for qemu. - * - * Copyright (c) 2007 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <stdint.h> -#include <stdlib.h> -#include <stdio.h> - -#include "cpu.h" -#include "exec-all.h" - -/* Shared handlers */ -static void pal_reset (CPUState *env); -/* Console handlers */ -static void pal_console_call (CPUState *env, uint32_t palcode); -/* OpenVMS handlers */ -static void pal_openvms_call (CPUState *env, uint32_t palcode); -/* UNIX / Linux handlers */ -static void pal_unix_call (CPUState *env, uint32_t palcode); - -pal_handler_t pal_handlers[] = { - /* Console handler */ - { - .reset = &pal_reset, - .call_pal = &pal_console_call, - }, - /* OpenVMS handler */ - { - .reset = &pal_reset, - .call_pal = &pal_openvms_call, - }, - /* UNIX / Linux handler */ - { - .reset = &pal_reset, - .call_pal = &pal_unix_call, - }, -}; - -#if 0 -/* One must explicitly check that the TB is valid and the FOE bit is reset */ -static void update_itb (void) -{ - /* This writes into a temp register, not the actual one */ - mtpr(TB_TAG); - mtpr(TB_CTL); - /* This commits the TB update */ - mtpr(ITB_PTE); -} - -static void update_dtb (void); -{ - mtpr(TB_CTL); - /* This write into a temp register, not the actual one */ - mtpr(TB_TAG); - /* This commits the TB update */ - mtpr(DTB_PTE); -} -#endif - -static void pal_reset (CPUState *env) -{ -} - -static void do_swappal (CPUState *env, uint64_t palid) -{ - pal_handler_t *pal_handler; - - switch (palid) { - case 0 ... 2: - pal_handler = &pal_handlers[palid]; - env->pal_handler = pal_handler; - env->ipr[IPR_PAL_BASE] = -1ULL; - (*pal_handler->reset)(env); - break; - case 3 ... 255: - /* Unknown identifier */ - env->ir[0] = 1; - return; - default: - /* We were given the entry point address */ - env->pal_handler = NULL; - env->ipr[IPR_PAL_BASE] = palid; - env->pc = env->ipr[IPR_PAL_BASE]; - cpu_loop_exit(); - } -} - -static void pal_console_call (CPUState *env, uint32_t palcode) -{ - uint64_t palid; - - if (palcode < 0x00000080) { - /* Privileged palcodes */ - if (!(env->ps >> 3)) { - /* TODO: generate privilege exception */ - } - } - switch (palcode) { - case 0x00000000: - /* HALT */ - /* REQUIRED */ - break; - case 0x00000001: - /* CFLUSH */ - break; - case 0x00000002: - /* DRAINA */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x00000009: - /* CSERVE */ - /* REQUIRED */ - break; - case 0x0000000A: - /* SWPPAL */ - /* REQUIRED */ - palid = env->ir[16]; - do_swappal(env, palid); - break; - case 0x00000080: - /* BPT */ - /* REQUIRED */ - break; - case 0x00000081: - /* BUGCHK */ - /* REQUIRED */ - break; - case 0x00000086: - /* IMB */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x0000009E: - /* RDUNIQUE */ - /* REQUIRED */ - break; - case 0x0000009F: - /* WRUNIQUE */ - /* REQUIRED */ - break; - case 0x000000AA: - /* GENTRAP */ - /* REQUIRED */ - break; - default: - break; - } -} - -static void pal_openvms_call (CPUState *env, uint32_t palcode) -{ - uint64_t palid, val, oldval; - - if (palcode < 0x00000080) { - /* Privileged palcodes */ - if (!(env->ps >> 3)) { - /* TODO: generate privilege exception */ - } - } - switch (palcode) { - case 0x00000000: - /* HALT */ - /* REQUIRED */ - break; - case 0x00000001: - /* CFLUSH */ - break; - case 0x00000002: - /* DRAINA */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x00000003: - /* LDQP */ - break; - case 0x00000004: - /* STQP */ - break; - case 0x00000005: - /* SWPCTX */ - break; - case 0x00000006: - /* MFPR_ASN */ - if (cpu_alpha_mfpr(env, IPR_ASN, &val) == 0) - env->ir[0] = val; - break; - case 0x00000007: - /* MTPR_ASTEN */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_ASTEN, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000008: - /* MTPR_ASTSR */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_ASTSR, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000009: - /* CSERVE */ - /* REQUIRED */ - break; - case 0x0000000A: - /* SWPPAL */ - /* REQUIRED */ - palid = env->ir[16]; - do_swappal(env, palid); - break; - case 0x0000000B: - /* MFPR_FEN */ - if (cpu_alpha_mfpr(env, IPR_FEN, &val) == 0) - env->ir[0] = val; - break; - case 0x0000000C: - /* MTPR_FEN */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_FEN, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000000D: - /* MTPR_IPIR */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_IPIR, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000000E: - /* MFPR_IPL */ - if (cpu_alpha_mfpr(env, IPR_IPL, &val) == 0) - env->ir[0] = val; - break; - case 0x0000000F: - /* MTPR_IPL */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_IPL, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000010: - /* MFPR_MCES */ - if (cpu_alpha_mfpr(env, IPR_MCES, &val) == 0) - env->ir[0] = val; - break; - case 0x00000011: - /* MTPR_MCES */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_MCES, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000012: - /* MFPR_PCBB */ - if (cpu_alpha_mfpr(env, IPR_PCBB, &val) == 0) - env->ir[0] = val; - break; - case 0x00000013: - /* MFPR_PRBR */ - if (cpu_alpha_mfpr(env, IPR_PRBR, &val) == 0) - env->ir[0] = val; - break; - case 0x00000014: - /* MTPR_PRBR */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_PRBR, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000015: - /* MFPR_PTBR */ - if (cpu_alpha_mfpr(env, IPR_PTBR, &val) == 0) - env->ir[0] = val; - break; - case 0x00000016: - /* MFPR_SCBB */ - if (cpu_alpha_mfpr(env, IPR_SCBB, &val) == 0) - env->ir[0] = val; - break; - case 0x00000017: - /* MTPR_SCBB */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_SCBB, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000018: - /* MTPR_SIRR */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_SIRR, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000019: - /* MFPR_SISR */ - if (cpu_alpha_mfpr(env, IPR_SISR, &val) == 0) - env->ir[0] = val; - break; - case 0x0000001A: - /* MFPR_TBCHK */ - if (cpu_alpha_mfpr(env, IPR_TBCHK, &val) == 0) - env->ir[0] = val; - break; - case 0x0000001B: - /* MTPR_TBIA */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBIA, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000001C: - /* MTPR_TBIAP */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBIAP, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000001D: - /* MTPR_TBIS */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBIS, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000001E: - /* MFPR_ESP */ - if (cpu_alpha_mfpr(env, IPR_ESP, &val) == 0) - env->ir[0] = val; - break; - case 0x0000001F: - /* MTPR_ESP */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_ESP, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000020: - /* MFPR_SSP */ - if (cpu_alpha_mfpr(env, IPR_SSP, &val) == 0) - env->ir[0] = val; - break; - case 0x00000021: - /* MTPR_SSP */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_SSP, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000022: - /* MFPR_USP */ - if (cpu_alpha_mfpr(env, IPR_USP, &val) == 0) - env->ir[0] = val; - break; - case 0x00000023: - /* MTPR_USP */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_USP, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000024: - /* MTPR_TBISD */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBISD, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000025: - /* MTPR_TBISI */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBISI, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000026: - /* MFPR_ASTEN */ - if (cpu_alpha_mfpr(env, IPR_ASTEN, &val) == 0) - env->ir[0] = val; - break; - case 0x00000027: - /* MFPR_ASTSR */ - if (cpu_alpha_mfpr(env, IPR_ASTSR, &val) == 0) - env->ir[0] = val; - break; - case 0x00000029: - /* MFPR_VPTB */ - if (cpu_alpha_mfpr(env, IPR_VPTB, &val) == 0) - env->ir[0] = val; - break; - case 0x0000002A: - /* MTPR_VPTB */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_VPTB, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000002B: - /* MTPR_PERFMON */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000002E: - /* MTPR_DATFX */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_DATFX, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000003E: - /* WTINT */ - break; - case 0x0000003F: - /* MFPR_WHAMI */ - if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0) - env->ir[0] = val; - break; - case 0x00000080: - /* BPT */ - /* REQUIRED */ - break; - case 0x00000081: - /* BUGCHK */ - /* REQUIRED */ - break; - case 0x00000082: - /* CHME */ - break; - case 0x00000083: - /* CHMK */ - break; - case 0x00000084: - /* CHMS */ - break; - case 0x00000085: - /* CHMU */ - break; - case 0x00000086: - /* IMB */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x00000087: - /* INSQHIL */ - break; - case 0x00000088: - /* INSQTIL */ - break; - case 0x00000089: - /* INSQHIQ */ - break; - case 0x0000008A: - /* INSQTIQ */ - break; - case 0x0000008B: - /* INSQUEL */ - break; - case 0x0000008C: - /* INSQUEQ */ - break; - case 0x0000008D: - /* INSQUEL/D */ - break; - case 0x0000008E: - /* INSQUEQ/D */ - break; - case 0x0000008F: - /* PROBER */ - break; - case 0x00000090: - /* PROBEW */ - break; - case 0x00000091: - /* RD_PS */ - break; - case 0x00000092: - /* REI */ - break; - case 0x00000093: - /* REMQHIL */ - break; - case 0x00000094: - /* REMQTIL */ - break; - case 0x00000095: - /* REMQHIQ */ - break; - case 0x00000096: - /* REMQTIQ */ - break; - case 0x00000097: - /* REMQUEL */ - break; - case 0x00000098: - /* REMQUEQ */ - break; - case 0x00000099: - /* REMQUEL/D */ - break; - case 0x0000009A: - /* REMQUEQ/D */ - break; - case 0x0000009B: - /* SWASTEN */ - break; - case 0x0000009C: - /* WR_PS_SW */ - break; - case 0x0000009D: - /* RSCC */ - break; - case 0x0000009E: - /* READ_UNQ */ - /* REQUIRED */ - break; - case 0x0000009F: - /* WRITE_UNQ */ - /* REQUIRED */ - break; - case 0x000000A0: - /* AMOVRR */ - break; - case 0x000000A1: - /* AMOVRM */ - break; - case 0x000000A2: - /* INSQHILR */ - break; - case 0x000000A3: - /* INSQTILR */ - break; - case 0x000000A4: - /* INSQHIQR */ - break; - case 0x000000A5: - /* INSQTIQR */ - break; - case 0x000000A6: - /* REMQHILR */ - break; - case 0x000000A7: - /* REMQTILR */ - break; - case 0x000000A8: - /* REMQHIQR */ - break; - case 0x000000A9: - /* REMQTIQR */ - break; - case 0x000000AA: - /* GENTRAP */ - /* REQUIRED */ - break; - case 0x000000AE: - /* CLRFEN */ - break; - default: - break; - } -} - -static void pal_unix_call (CPUState *env, uint32_t palcode) -{ - uint64_t palid, val, oldval; - - if (palcode < 0x00000080) { - /* Privileged palcodes */ - if (!(env->ps >> 3)) { - /* TODO: generate privilege exception */ - } - } - switch (palcode) { - case 0x00000000: - /* HALT */ - /* REQUIRED */ - break; - case 0x00000001: - /* CFLUSH */ - break; - case 0x00000002: - /* DRAINA */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x00000009: - /* CSERVE */ - /* REQUIRED */ - break; - case 0x0000000A: - /* SWPPAL */ - /* REQUIRED */ - palid = env->ir[16]; - do_swappal(env, palid); - break; - case 0x0000000D: - /* WRIPIR */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_IPIR, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000010: - /* RDMCES */ - if (cpu_alpha_mfpr(env, IPR_MCES, &val) == 0) - env->ir[0] = val; - break; - case 0x00000011: - /* WRMCES */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_MCES, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000002B: - /* WRFEN */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000002D: - /* WRVPTPTR */ - break; - case 0x00000030: - /* SWPCTX */ - break; - case 0x00000031: - /* WRVAL */ - break; - case 0x00000032: - /* RDVAL */ - break; - case 0x00000033: - /* TBI */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_TBIS, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000034: - /* WRENT */ - break; - case 0x00000035: - /* SWPIPL */ - break; - case 0x00000036: - /* RDPS */ - break; - case 0x00000037: - /* WRKGP */ - break; - case 0x00000038: - /* WRUSP */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_USP, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x00000039: - /* WRPERFMON */ - val = env->ir[16]; - if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1) - env->ir[0] = val; - break; - case 0x0000003A: - /* RDUSP */ - if (cpu_alpha_mfpr(env, IPR_USP, &val) == 0) - env->ir[0] = val; - break; - case 0x0000003C: - /* WHAMI */ - if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0) - env->ir[0] = val; - break; - case 0x0000003D: - /* RETSYS */ - break; - case 0x0000003E: - /* WTINT */ - break; - case 0x0000003F: - /* RTI */ - if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0) - env->ir[0] = val; - break; - case 0x00000080: - /* BPT */ - /* REQUIRED */ - break; - case 0x00000081: - /* BUGCHK */ - /* REQUIRED */ - break; - case 0x00000083: - /* CALLSYS */ - break; - case 0x00000086: - /* IMB */ - /* REQUIRED */ - /* Implemented as no-op */ - break; - case 0x00000092: - /* URTI */ - break; - case 0x0000009E: - /* RDUNIQUE */ - /* REQUIRED */ - break; - case 0x0000009F: - /* WRUNIQUE */ - /* REQUIRED */ - break; - case 0x000000AA: - /* GENTRAP */ - /* REQUIRED */ - break; - case 0x000000AE: - /* CLRFEN */ - break; - default: - break; - } -} - -void call_pal (CPUState *env) -{ - pal_handler_t *pal_handler = env->pal_handler; - - switch (env->exception_index) { - case EXCP_RESET: - (*pal_handler->reset)(env); - break; - case EXCP_MCHK: - (*pal_handler->machine_check)(env); - break; - case EXCP_ARITH: - (*pal_handler->arithmetic)(env); - break; - case EXCP_INTERRUPT: - (*pal_handler->interrupt)(env); - break; - case EXCP_DFAULT: - (*pal_handler->dfault)(env); - break; - case EXCP_DTB_MISS_PAL: - (*pal_handler->dtb_miss_pal)(env); - break; - case EXCP_DTB_MISS_NATIVE: - (*pal_handler->dtb_miss_native)(env); - break; - case EXCP_UNALIGN: - (*pal_handler->unalign)(env); - break; - case EXCP_ITB_MISS: - (*pal_handler->itb_miss)(env); - break; - case EXCP_ITB_ACV: - (*pal_handler->itb_acv)(env); - break; - case EXCP_OPCDEC: - (*pal_handler->opcdec)(env); - break; - case EXCP_FEN: - (*pal_handler->fen)(env); - break; - default: - if (env->exception_index >= EXCP_CALL_PAL && - env->exception_index < EXCP_CALL_PALP) { - /* Unprivileged PAL call */ - (*pal_handler->call_pal) - (env, (env->exception_index - EXCP_CALL_PAL) >> 6); - } else if (env->exception_index >= EXCP_CALL_PALP && - env->exception_index < EXCP_CALL_PALE) { - /* Privileged PAL call */ - (*pal_handler->call_pal) - (env, ((env->exception_index - EXCP_CALL_PALP) >> 6) + 0x80); - } else { - /* Should never happen */ - } - break; - } - env->ipr[IPR_EXC_ADDR] &= ~1; -} - -void pal_init (CPUState *env) -{ - do_swappal(env, 0); -} - -#if 0 -static uint64_t get_ptebase (CPUState *env, uint64_t vaddr) -{ - uint64_t virbnd, ptbr; - - if ((env->features & FEATURE_VIRBND)) { - cpu_alpha_mfpr(env, IPR_VIRBND, &virbnd); - if (vaddr >= virbnd) - cpu_alpha_mfpr(env, IPR_SYSPTBR, &ptbr); - else - cpu_alpha_mfpr(env, IPR_PTBR, &ptbr); - } else { - cpu_alpha_mfpr(env, IPR_PTBR, &ptbr); - } - - return ptbr; -} - -static int get_page_bits (CPUState *env) -{ - /* XXX */ - return 13; -} - -static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp, - uint64_t ptebase, int page_bits, uint64_t level, - int mmu_idx, int rw) -{ - uint64_t pteaddr, pte, pfn; - uint8_t gh; - int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar, is_user; - - /* XXX: TOFIX */ - is_user = mmu_idx == MMU_USER_IDX; - pteaddr = (ptebase << page_bits) + (8 * level); - pte = ldq_raw(pteaddr); - /* Decode all interresting PTE fields */ - pfn = pte >> 32; - uwe = (pte >> 13) & 1; - kwe = (pte >> 12) & 1; - ure = (pte >> 9) & 1; - kre = (pte >> 8) & 1; - gh = (pte >> 5) & 3; - foE = (pte >> 3) & 1; - foW = (pte >> 2) & 1; - foR = (pte >> 1) & 1; - v = pte & 1; - ret = 0; - if (!v) - ret = 0x1; - /* Check access rights */ - ar = 0; - if (is_user) { - if (ure) - ar |= PAGE_READ; - if (uwe) - ar |= PAGE_WRITE; - if (rw == 1 && !uwe) - ret |= 0x2; - if (rw != 1 && !ure) - ret |= 0x2; - } else { - if (kre) - ar |= PAGE_READ; - if (kwe) - ar |= PAGE_WRITE; - if (rw == 1 && !kwe) - ret |= 0x2; - if (rw != 1 && !kre) - ret |= 0x2; - } - if (rw == 0 && foR) - ret |= 0x4; - if (rw == 2 && foE) - ret |= 0x8; - if (rw == 1 && foW) - ret |= 0xC; - *pfnp = pfn; - if (zbitsp != NULL) - *zbitsp = page_bits + (3 * gh); - if (protp != NULL) - *protp = ar; - - return ret; -} - -static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot, - uint64_t ptebase, int page_bits, - uint64_t vaddr, int mmu_idx, int rw) -{ - uint64_t pfn, page_mask, lvl_mask, level1, level2, level3; - int lvl_bits, ret; - - page_mask = (1ULL << page_bits) - 1ULL; - lvl_bits = page_bits - 3; - lvl_mask = (1ULL << lvl_bits) - 1ULL; - level3 = (vaddr >> page_bits) & lvl_mask; - level2 = (vaddr >> (page_bits + lvl_bits)) & lvl_mask; - level1 = (vaddr >> (page_bits + (2 * lvl_bits))) & lvl_mask; - /* Level 1 PTE */ - ret = get_pte(&pfn, NULL, NULL, ptebase, page_bits, level1, 0, 0); - switch (ret) { - case 3: - /* Access violation */ - return 2; - case 2: - /* translation not valid */ - return 1; - default: - /* OK */ - break; - } - /* Level 2 PTE */ - ret = get_pte(&pfn, NULL, NULL, pfn, page_bits, level2, 0, 0); - switch (ret) { - case 3: - /* Access violation */ - return 2; - case 2: - /* translation not valid */ - return 1; - default: - /* OK */ - break; - } - /* Level 3 PTE */ - ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, mmu_idx, rw); - if (ret & 0x1) { - /* Translation not valid */ - ret = 1; - } else if (ret & 2) { - /* Access violation */ - ret = 2; - } else { - switch (ret & 0xC) { - case 0: - /* OK */ - ret = 0; - break; - case 0x4: - /* Fault on read */ - ret = 3; - break; - case 0x8: - /* Fault on execute */ - ret = 4; - break; - case 0xC: - /* Fault on write */ - ret = 5; - break; - } - } - *paddr = (pfn << page_bits) | (vaddr & page_mask); - - return 0; -} - -static int virtual_to_physical (CPUState *env, uint64_t *physp, - int *zbitsp, int *protp, - uint64_t virtual, int mmu_idx, int rw) -{ - uint64_t sva, ptebase; - int seg, page_bits, ret; - - sva = ((int64_t)(virtual << (64 - VA_BITS))) >> (64 - VA_BITS); - if (sva != virtual) - seg = -1; - else - seg = sva >> (VA_BITS - 2); - virtual &= ~(0xFFFFFC0000000000ULL << (VA_BITS - 43)); - ptebase = get_ptebase(env, virtual); - page_bits = get_page_bits(env); - ret = 0; - switch (seg) { - case 0: - /* seg1: 3 levels of PTE */ - ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits, - virtual, mmu_idx, rw); - break; - case 1: - /* seg1: 2 levels of PTE */ - ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits, - virtual, mmu_idx, rw); - break; - case 2: - /* kernel segment */ - if (mmu_idx != 0) { - ret = 2; - } else { - *physp = virtual; - } - break; - case 3: - /* seg1: TB mapped */ - ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits, - virtual, mmu_idx, rw); - break; - default: - ret = 1; - break; - } - - return ret; -} - -/* XXX: code provision */ -int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, - int mmu_idx, int is_softmmu) -{ - uint64_t physical, page_size, end; - int prot, zbits, ret; - - ret = virtual_to_physical(env, &physical, &zbits, &prot, - address, mmu_idx, rw); - - switch (ret) { - case 0: - /* No fault */ - page_size = 1ULL << zbits; - address &= ~(page_size - 1); - /* FIXME: page_size should probably be passed to tlb_set_page, - and this loop removed. */ - for (end = physical + page_size; physical < end; physical += 0x1000) { - tlb_set_page(env, address, physical, prot, mmu_idx, - TARGET_PAGE_SIZE); - address += 0x1000; - } - ret = 0; - break; -#if 0 - case 1: - env->exception_index = EXCP_DFAULT; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; - break; - case 2: - env->exception_index = EXCP_ACCESS_VIOLATION; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; - break; - case 3: - env->exception_index = EXCP_FAULT_ON_READ; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; - break; - case 4: - env->exception_index = EXCP_FAULT_ON_EXECUTE; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; - case 5: - env->exception_index = EXCP_FAULT_ON_WRITE; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; -#endif - default: - /* Should never happen */ - env->exception_index = EXCP_MCHK; - env->ipr[IPR_EXC_ADDR] = address; - ret = 1; - break; - } - - return ret; -} -#endif diff --git a/hw/bitbang_i2c.c b/hw/bitbang_i2c.c index 4ee99a18b9..2937b5c4a1 100644 --- a/hw/bitbang_i2c.c +++ b/hw/bitbang_i2c.c @@ -38,7 +38,8 @@ typedef enum bitbang_i2c_state { RECEIVING_BIT2, RECEIVING_BIT1, RECEIVING_BIT0, - SENDING_ACK + SENDING_ACK, + SENT_NACK } bitbang_i2c_state; struct bitbang_i2c_interface { @@ -115,6 +116,7 @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level) } switch (i2c->state) { case STOPPED: + case SENT_NACK: return bitbang_i2c_ret(i2c, 1); case SENDING_BIT7 ... SENDING_BIT0: @@ -155,6 +157,7 @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level) i2c->state = RECEIVING_BIT7; if (data != 0) { DPRINTF("NACKED\n"); + i2c->state = SENT_NACK; i2c_nack(i2c->bus); } else { DPRINTF("ACKED\n"); diff --git a/hw/boards.h b/hw/boards.h index 6f0f0d7925..716fd7b1a6 100644 --- a/hw/boards.h +++ b/hw/boards.h @@ -27,6 +27,7 @@ typedef struct QEMUMachine { no_cdrom:1, no_sdcard:1; int is_default; + const char *default_machine_opts; GlobalProperty *compat_props; struct QEMUMachine *next; } QEMUMachine; diff --git a/hw/bt-hid.c b/hw/bt-hid.c index abdfd35e86..09120af074 100644 --- a/hw/bt-hid.c +++ b/hw/bt-hid.c @@ -323,7 +323,7 @@ static void bt_hid_control_transaction(struct bt_hid_device_s *s, break; } s->proto = parameter; - s->usbdev->info->handle_control(s->usbdev, SET_PROTOCOL, s->proto, 0, 0, + s->usbdev->info->handle_control(s->usbdev, NULL, SET_PROTOCOL, s->proto, 0, 0, NULL); ret = BT_HS_SUCCESSFUL; break; @@ -333,7 +333,7 @@ static void bt_hid_control_transaction(struct bt_hid_device_s *s, ret = BT_HS_ERR_INVALID_PARAMETER; break; } - s->usbdev->info->handle_control(s->usbdev, GET_IDLE, 0, 0, 1, + s->usbdev->info->handle_control(s->usbdev, NULL, GET_IDLE, 0, 0, 1, s->control->sdu_out(s->control, 1)); s->control->sdu_submit(s->control); break; @@ -346,7 +346,7 @@ static void bt_hid_control_transaction(struct bt_hid_device_s *s, /* We don't need to know about the Idle Rate here really, * so just pass it on to the device. */ - ret = s->usbdev->info->handle_control(s->usbdev, + ret = s->usbdev->info->handle_control(s->usbdev, NULL, SET_IDLE, data[1], 0, 0, NULL) ? BT_HS_SUCCESSFUL : BT_HS_ERR_INVALID_PARAMETER; /* XXX: Does this generate a handshake? */ @@ -1441,7 +1441,7 @@ typedef struct { #define EVT_FLUSH_OCCURRED 0x11 typedef struct { uint16_t handle; -} __attribute__ ((packed)) evt_flush_occured; +} __attribute__ ((packed)) evt_flush_occurred; #define EVT_FLUSH_OCCURRED_SIZE 2 #define EVT_ROLE_CHANGE 0x12 diff --git a/hw/eepro100.c b/hw/eepro100.c index 84b98c0929..9b6f4a5cd8 100644 --- a/hw/eepro100.c +++ b/hw/eepro100.c @@ -1112,7 +1112,7 @@ static void eepro100_write_eeprom(eeprom_t * eeprom, uint8_t val) { TRACE(EEPROM, logout("val=0x%02x\n", val)); - /* mask unwriteable bits */ + /* mask unwritable bits */ #if 0 val = SET_MASKED(val, 0x31, eeprom->value); #endif diff --git a/hw/eeprom93xx.c b/hw/eeprom93xx.c index 660b28f225..7b21f98e22 100644 --- a/hw/eeprom93xx.c +++ b/hw/eeprom93xx.c @@ -75,7 +75,7 @@ struct _eeprom_t { uint8_t tick; uint8_t address; uint8_t command; - uint8_t writeable; + uint8_t writable; uint8_t eecs; uint8_t eesk; @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_eeprom = { VMSTATE_UINT8(tick, eeprom_t), VMSTATE_UINT8(address, eeprom_t), VMSTATE_UINT8(command, eeprom_t), - VMSTATE_UINT8(writeable, eeprom_t), + VMSTATE_UINT8(writable, eeprom_t), VMSTATE_UINT8(eecs, eeprom_t), VMSTATE_UINT8(eesk, eeprom_t), @@ -165,7 +165,7 @@ void eeprom93xx_write(eeprom_t *eeprom, int eecs, int eesk, int eedi) address = 0x0; } else if (eeprom->eecs && ! eecs) { /* End chip select cycle. This triggers write / erase. */ - if (eeprom->writeable) { + if (eeprom->writable) { uint8_t subcommand = address >> (eeprom->addrbits - 2); if (command == 0 && subcommand == 2) { /* Erase all. */ @@ -232,7 +232,7 @@ void eeprom93xx_write(eeprom_t *eeprom, int eecs, int eesk, int eedi) switch (address >> (eeprom->addrbits - 2)) { case 0: logout("write disable command\n"); - eeprom->writeable = 0; + eeprom->writable = 0; break; case 1: logout("write all command\n"); @@ -242,7 +242,7 @@ void eeprom93xx_write(eeprom_t *eeprom, int eecs, int eesk, int eedi) break; case 3: logout("write enable command\n"); - eeprom->writeable = 1; + eeprom->writable = 1; break; } } else { @@ -61,10 +61,11 @@ struct ESPState { int32_t ti_size; uint32_t ti_rptr, ti_wptr; uint8_t ti_buf[TI_BUFSZ]; - uint32_t sense; + uint32_t status; uint32_t dma; SCSIBus bus; SCSIDevice *current_dev; + SCSIRequest *current_req; uint8_t cmdbuf[TI_BUFSZ]; uint32_t cmdlen; uint32_t do_cmd; @@ -187,6 +188,17 @@ static void esp_dma_enable(void *opaque, int irq, int level) } } +static void esp_request_cancelled(SCSIRequest *req) +{ + ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent); + + if (req == s->current_req) { + scsi_req_unref(s->current_req); + s->current_req = NULL; + s->current_dev = NULL; + } +} + static uint32_t get_cmd(ESPState *s, uint8_t *buf) { uint32_t dmalen; @@ -209,7 +221,7 @@ static uint32_t get_cmd(ESPState *s, uint8_t *buf) if (s->current_dev) { /* Started a new command before the old one finished. Cancel it. */ - s->current_dev->info->cancel_io(s->current_dev, 0); + scsi_req_cancel(s->current_req); s->async_len = 0; } @@ -232,7 +244,8 @@ static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid) DPRINTF("do_busid_cmd: busid 0x%x\n", busid); lun = busid & 7; - datalen = s->current_dev->info->send_command(s->current_dev, 0, buf, lun); + s->current_req = scsi_req_new(s->current_dev, 0, lun); + datalen = scsi_req_enqueue(s->current_req, buf); s->ti_size = datalen; if (datalen != 0) { s->rregs[ESP_RSTAT] = STAT_TC; @@ -240,11 +253,10 @@ static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid) s->dma_counter = 0; if (datalen > 0) { s->rregs[ESP_RSTAT] |= STAT_DI; - s->current_dev->info->read_data(s->current_dev, 0); } else { s->rregs[ESP_RSTAT] |= STAT_DO; - s->current_dev->info->write_data(s->current_dev, 0); } + scsi_req_continue(s->current_req); } s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; @@ -306,8 +318,8 @@ static void handle_satn_stop(ESPState *s) static void write_response(ESPState *s) { - DPRINTF("Transfer status (sense=%d)\n", s->sense); - s->ti_buf[0] = s->sense; + DPRINTF("Transfer status (status=%d)\n", s->status); + s->ti_buf[0] = s->status; s->ti_buf[1] = 0; if (s->dma) { s->dma_memory_write(s->dma_opaque, s->ti_buf, 2); @@ -370,53 +382,56 @@ static void esp_do_dma(ESPState *s) else s->ti_size -= len; if (s->async_len == 0) { - if (to_device) { - // ti_size is negative - s->current_dev->info->write_data(s->current_dev, 0); - } else { - s->current_dev->info->read_data(s->current_dev, 0); - /* If there is still data to be read from the device then - complete the DMA operation immediately. Otherwise defer - until the scsi layer has completed. */ - if (s->dma_left == 0 && s->ti_size > 0) { - esp_dma_done(s); - } + scsi_req_continue(s->current_req); + /* If there is still data to be read from the device then + complete the DMA operation immediately. Otherwise defer + until the scsi layer has completed. */ + if (to_device || s->dma_left != 0 || s->ti_size == 0) { + return; } - } else { - /* Partially filled a scsi buffer. Complete immediately. */ - esp_dma_done(s); } + + /* Partially filled a scsi buffer. Complete immediately. */ + esp_dma_done(s); } -static void esp_command_complete(SCSIBus *bus, int reason, uint32_t tag, - uint32_t arg) +static void esp_command_complete(SCSIRequest *req, uint32_t status) { - ESPState *s = DO_UPCAST(ESPState, busdev.qdev, bus->qbus.parent); + ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent); - if (reason == SCSI_REASON_DONE) { - DPRINTF("SCSI Command complete\n"); - if (s->ti_size != 0) - DPRINTF("SCSI command completed unexpectedly\n"); - s->ti_size = 0; - s->dma_left = 0; - s->async_len = 0; - if (arg) - DPRINTF("Command failed\n"); - s->sense = arg; - s->rregs[ESP_RSTAT] = STAT_ST; - esp_dma_done(s); + DPRINTF("SCSI Command complete\n"); + if (s->ti_size != 0) { + DPRINTF("SCSI command completed unexpectedly\n"); + } + s->ti_size = 0; + s->dma_left = 0; + s->async_len = 0; + if (status) { + DPRINTF("Command failed\n"); + } + s->status = status; + s->rregs[ESP_RSTAT] = STAT_ST; + esp_dma_done(s); + if (s->current_req) { + scsi_req_unref(s->current_req); + s->current_req = NULL; s->current_dev = NULL; - } else { - DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size); - s->async_len = arg; - s->async_buf = s->current_dev->info->get_buf(s->current_dev, 0); - if (s->dma_left) { - esp_do_dma(s); - } else if (s->dma_counter != 0 && s->ti_size <= 0) { - /* If this was the last part of a DMA transfer then the - completion interrupt is deferred to here. */ - esp_dma_done(s); - } + } +} + +static void esp_transfer_data(SCSIRequest *req, uint32_t len) +{ + ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent); + + DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size); + s->async_len = len; + s->async_buf = scsi_req_get_buf(req); + if (s->dma_left) { + esp_do_dma(s); + } else if (s->dma_counter != 0 && s->ti_size <= 0) { + /* If this was the last part of a DMA transfer then the + completion interrupt is deferred to here. */ + esp_dma_done(s); } } @@ -678,7 +693,7 @@ static const VMStateDescription vmstate_esp = { VMSTATE_UINT32(ti_rptr, ESPState), VMSTATE_UINT32(ti_wptr, ESPState), VMSTATE_BUFFER(ti_buf, ESPState), - VMSTATE_UINT32(sense, ESPState), + VMSTATE_UINT32(status, ESPState), VMSTATE_UINT32(dma, ESPState), VMSTATE_BUFFER(cmdbuf, ESPState), VMSTATE_UINT32(cmdlen, ESPState), @@ -714,6 +729,12 @@ void esp_init(target_phys_addr_t espaddr, int it_shift, *dma_enable = qdev_get_gpio_in(dev, 1); } +static const struct SCSIBusOps esp_scsi_ops = { + .transfer_data = esp_transfer_data, + .complete = esp_command_complete, + .cancel = esp_request_cancelled +}; + static int esp_init1(SysBusDevice *dev) { ESPState *s = FROM_SYSBUS(ESPState, dev); @@ -728,7 +749,7 @@ static int esp_init1(SysBusDevice *dev) qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2); - scsi_bus_new(&s->bus, &dev->qdev, 0, ESP_MAX_DEVS, esp_command_complete); + scsi_bus_new(&s->bus, &dev->qdev, 0, ESP_MAX_DEVS, &esp_scsi_ops); return scsi_bus_legacy_handle_cmdline(&s->bus); } diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index c6e0c7767e..1f008a3dda 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -884,8 +884,31 @@ static int handle_cmd(AHCIState *s, int port, int slot) } if (ide_state->drive_kind != IDE_CD) { - ide_set_sector(ide_state, (cmd_fis[6] << 16) | (cmd_fis[5] << 8) | - cmd_fis[4]); + /* + * We set the sector depending on the sector defined in the FIS. + * Unfortunately, the spec isn't exactly obvious on this one. + * + * Apparently LBA48 commands set fis bytes 10,9,8,6,5,4 to the + * 48 bit sector number. ATA_CMD_READ_DMA_EXT is an example for + * such a command. + * + * Non-LBA48 commands however use 7[lower 4 bits],6,5,4 to define a + * 28-bit sector number. ATA_CMD_READ_DMA is an example for such + * a command. + * + * Since the spec doesn't explicitly state what each field should + * do, I simply assume non-used fields as reserved and OR everything + * together, independent of the command. + */ + ide_set_sector(ide_state, ((uint64_t)cmd_fis[10] << 40) + | ((uint64_t)cmd_fis[9] << 32) + /* This is used for LBA48 commands */ + | ((uint64_t)cmd_fis[8] << 24) + /* This is used for non-LBA48 commands */ + | ((uint64_t)(cmd_fis[7] & 0xf) << 24) + | ((uint64_t)cmd_fis[6] << 16) + | ((uint64_t)cmd_fis[5] << 8) + | cmd_fis[4]); } /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command @@ -1066,9 +1089,11 @@ static int ahci_dma_set_inactive(IDEDMA *dma) ad->dma_cb = NULL; - /* maybe we still have something to process, check later */ - ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); - qemu_bh_schedule(ad->check_bh); + if (!ad->check_bh) { + /* maybe we still have something to process, check later */ + ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + qemu_bh_schedule(ad->check_bh); + } return 0; } diff --git a/hw/ide/core.c b/hw/ide/core.c index 90f553b69b..95beb175b3 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -430,7 +430,6 @@ void ide_dma_error(IDEState *s) s->error = ABRT_ERR; s->status = READY_STAT | ERR_STAT; ide_set_inactive(s); - s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT); ide_set_irq(s->bus); } @@ -500,8 +499,11 @@ handle_rw_error: n = s->nsector; s->io_buffer_index = 0; s->io_buffer_size = n * 512; - if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->is_read) == 0) + if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->is_read) == 0) { + /* The PRDs were too short. Reset the Active bit, but don't raise an + * interrupt. */ goto eot; + } #ifdef DEBUG_AIO printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, is_read=%d\n", @@ -523,7 +525,6 @@ handle_rw_error: return; eot: - s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT); ide_set_inactive(s); } @@ -1592,13 +1593,15 @@ void ide_bus_reset(IDEBus *bus) bus->dma->ops->reset(bus->dma); } -int ide_init_drive(IDEState *s, BlockDriverState *bs, +int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind, const char *version, const char *serial) { int cylinders, heads, secs; uint64_t nb_sectors; s->bs = bs; + s->drive_kind = kind; + bdrv_get_geometry(bs, &nb_sectors); bdrv_guess_geometry(bs, &cylinders, &heads, &secs); if (cylinders < 1 || cylinders > 16383) { @@ -1623,8 +1626,7 @@ int ide_init_drive(IDEState *s, BlockDriverState *bs, s->smart_autosave = 1; s->smart_errors = 0; s->smart_selftest_count = 0; - if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) { - s->drive_kind = IDE_CD; + if (kind == IDE_CD) { bdrv_set_change_cb(bs, cdrom_change_cb, s); bs->buffer_alignment = 2048; } else { @@ -1729,7 +1731,8 @@ void ide_init2_with_non_qdev_drives(IDEBus *bus, DriveInfo *hd0, dinfo = i == 0 ? hd0 : hd1; ide_init1(bus, i); if (dinfo) { - if (ide_init_drive(&bus->ifs[i], dinfo->bdrv, NULL, + if (ide_init_drive(&bus->ifs[i], dinfo->bdrv, + dinfo->media_cd ? IDE_CD : IDE_HD, NULL, *dinfo->serial ? dinfo->serial : NULL) < 0) { error_report("Can't set up IDE drive %s", dinfo->id); exit(1); diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 976cc9244b..054e0734e4 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -90,12 +90,12 @@ static int pci_ich9_ahci_init(PCIDevice *dev) qemu_register_reset(ahci_reset, d); - /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ - pci_register_bar_simple(&d->card, 5, 0x1000, 0, d->ahci.mem); - msi_init(dev, 0x50, 1, true, false); d->ahci.irq = d->card.irq[0]; + /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ + pci_register_bar_simple(&d->card, 5, 0x1000, 0, d->ahci.mem); + return 0; } diff --git a/hw/ide/internal.h b/hw/ide/internal.h index aa198b6b12..c2b35ec5e6 100644 --- a/hw/ide/internal.h +++ b/hw/ide/internal.h @@ -558,7 +558,7 @@ uint32_t ide_data_readw(void *opaque, uint32_t addr); void ide_data_writel(void *opaque, uint32_t addr, uint32_t val); uint32_t ide_data_readl(void *opaque, uint32_t addr); -int ide_init_drive(IDEState *s, BlockDriverState *bs, +int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind, const char *version, const char *serial); void ide_init2(IDEBus *bus, qemu_irq irq); void ide_init2_with_non_qdev_drives(IDEBus *bus, DriveInfo *hd0, diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 65cb56c38c..a4726adbea 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -296,12 +296,8 @@ void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val) */ if (bm->bus->dma->aiocb) { qemu_aio_flush(); -#ifdef DEBUG_IDE - if (bm->bus->dma->aiocb) - printf("ide_dma_cancel: aiocb still pending"); - if (bm->status & BM_STATUS_DMAING) - printf("ide_dma_cancel: BM_STATUS_DMAING still pending"); -#endif + assert(bm->bus->dma->aiocb == NULL); + assert((bm->status & BM_STATUS_DMAING) == 0); } } else { bm->cur_addr = bm->addr; diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index 2bb5c27341..3f9dc89c6d 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -98,7 +98,7 @@ IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive) { DeviceState *dev; - dev = qdev_create(&bus->qbus, "ide-drive"); + dev = qdev_create(&bus->qbus, drive->media_cd ? "ide-cd" : "ide-hd"); qdev_prop_set_uint32(dev, "unit", unit); qdev_prop_set_drive_nofail(dev, "drive", drive->bdrv); qdev_init_nofail(dev); @@ -118,7 +118,7 @@ typedef struct IDEDrive { IDEDevice dev; } IDEDrive; -static int ide_drive_initfn(IDEDevice *dev) +static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind) { IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); IDEState *s = bus->ifs + dev->unit; @@ -134,7 +134,7 @@ static int ide_drive_initfn(IDEDevice *dev) } } - if (ide_init_drive(s, dev->conf.bs, dev->version, serial) < 0) { + if (ide_init_drive(s, dev->conf.bs, kind, dev->version, serial) < 0) { return -1; } @@ -151,22 +151,69 @@ static int ide_drive_initfn(IDEDevice *dev) return 0; } -static IDEDeviceInfo ide_drive_info = { - .qdev.name = "ide-drive", - .qdev.fw_name = "drive", - .qdev.size = sizeof(IDEDrive), - .init = ide_drive_initfn, - .qdev.props = (Property[]) { - DEFINE_PROP_UINT32("unit", IDEDrive, dev.unit, -1), - DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), - DEFINE_PROP_STRING("ver", IDEDrive, dev.version), - DEFINE_PROP_STRING("serial", IDEDrive, dev.serial), - DEFINE_PROP_END_OF_LIST(), +static int ide_hd_initfn(IDEDevice *dev) +{ + return ide_dev_initfn(dev, IDE_HD); +} + +static int ide_cd_initfn(IDEDevice *dev) +{ + return ide_dev_initfn(dev, IDE_CD); +} + +static int ide_drive_initfn(IDEDevice *dev) +{ + DriveInfo *dinfo = drive_get_by_blockdev(dev->conf.bs); + + return ide_dev_initfn(dev, dinfo->media_cd ? IDE_CD : IDE_HD); +} + +#define DEFINE_IDE_DEV_PROPERTIES() \ + DEFINE_PROP_UINT32("unit", IDEDrive, dev.unit, -1), \ + DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \ + DEFINE_PROP_STRING("serial", IDEDrive, dev.serial) + +static IDEDeviceInfo ide_dev_info[] = { + { + .qdev.name = "ide-hd", + .qdev.fw_name = "drive", + .qdev.desc = "virtual IDE disk", + .qdev.size = sizeof(IDEDrive), + .init = ide_hd_initfn, + .qdev.props = (Property[]) { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), + } + },{ + .qdev.name = "ide-cd", + .qdev.fw_name = "drive", + .qdev.desc = "virtual IDE CD-ROM", + .qdev.size = sizeof(IDEDrive), + .init = ide_cd_initfn, + .qdev.props = (Property[]) { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), + } + },{ + .qdev.name = "ide-drive", /* legacy -device ide-drive */ + .qdev.fw_name = "drive", + .qdev.desc = "virtual IDE disk or CD-ROM (legacy)", + .qdev.size = sizeof(IDEDrive), + .init = ide_drive_initfn, + .qdev.props = (Property[]) { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), + } } }; -static void ide_drive_register(void) +static void ide_dev_register(void) { - ide_qdev_register(&ide_drive_info); + int i; + + for (i = 0; i < ARRAY_SIZE(ide_dev_info); i++) { + ide_qdev_register(&ide_dev_info[i]); + } } -device_init(ide_drive_register); +device_init(ide_dev_register); diff --git a/hw/lan9118.c b/hw/lan9118.c index 2dc8d18549..4c42fe94c2 100644 --- a/hw/lan9118.c +++ b/hw/lan9118.c @@ -721,7 +721,7 @@ static void do_phy_write(lan9118_state *s, int reg, uint32_t val) break; } s->phy_control = val & 0x7980; - /* Complete autonegotiation imediately. */ + /* Complete autonegotiation immediately. */ if (val & 0x1000) { s->phy_status |= 0x0020; } diff --git a/hw/lsi53c895a.c b/hw/lsi53c895a.c index 6b9c904ab6..3b75467da4 100644 --- a/hw/lsi53c895a.c +++ b/hw/lsi53c895a.c @@ -174,6 +174,7 @@ do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__);} while (0) #define LSI_TAG_VALID (1 << 16) typedef struct lsi_request { + SCSIRequest *req; uint32_t tag; uint32_t dma_len; uint8_t *dma_buf; @@ -189,7 +190,7 @@ typedef struct { uint32_t script_ram_base; int carry; /* ??? Should this be an a visible register somewhere? */ - int sense; + int status; /* Action to take at the end of a MSG IN phase. 0 = COMMAND, 1 = disconnect, 2 = DATA OUT, 3 = DATA IN. */ int msg_action; @@ -567,11 +568,9 @@ static void lsi_do_dma(LSIState *s, int out) s->csbc += count; s->dnad += count; s->dbc -= count; - - if (s->current->dma_buf == NULL) { - s->current->dma_buf = dev->info->get_buf(dev, s->current->tag); + if (s->current->dma_buf == NULL) { + s->current->dma_buf = scsi_req_get_buf(s->current->req); } - /* ??? Set SFBR to first data byte. */ if (out) { cpu_physical_memory_read(addr, s->current->dma_buf, count); @@ -581,13 +580,7 @@ static void lsi_do_dma(LSIState *s, int out) s->current->dma_len -= count; if (s->current->dma_len == 0) { s->current->dma_buf = NULL; - if (out) { - /* Write the data. */ - dev->info->write_data(dev, s->current->tag); - } else { - /* Request any remaining data. */ - dev->info->read_data(dev, s->current->tag); - } + scsi_req_continue(s->current->req); } else { s->current->dma_buf += count; lsi_resume_script(s); @@ -652,82 +645,123 @@ static void lsi_reselect(LSIState *s, lsi_request *p) } } -/* Record that data is available for a queued command. Returns zero if - the device was reselected, nonzero if the IO is deferred. */ -static int lsi_queue_tag(LSIState *s, uint32_t tag, uint32_t arg) +static lsi_request *lsi_find_by_tag(LSIState *s, uint32_t tag) { lsi_request *p; QTAILQ_FOREACH(p, &s->queue, next) { if (p->tag == tag) { - if (p->pending) { - BADF("Multiple IO pending for tag %d\n", tag); - } - p->pending = arg; - /* Reselect if waiting for it, or if reselection triggers an IRQ - and the bus is free. - Since no interrupt stacking is implemented in the emulation, it - is also required that there are no pending interrupts waiting - for service from the device driver. */ - if (s->waiting == 1 || - (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON) && - !(s->istat0 & (LSI_ISTAT0_SIP | LSI_ISTAT0_DIP)))) { - /* Reselect device. */ - lsi_reselect(s, p); - return 0; - } else { - DPRINTF("Queueing IO tag=0x%x\n", tag); - p->pending = arg; - return 1; - } + return p; } } - BADF("IO with unknown tag %d\n", tag); - return 1; + + return NULL; +} + +static void lsi_request_cancelled(SCSIRequest *req) +{ + LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent); + lsi_request *p; + + if (s->current && req == s->current->req) { + scsi_req_unref(req); + qemu_free(s->current); + s->current = NULL; + return; + } + + p = lsi_find_by_tag(s, req->tag); + if (p) { + QTAILQ_REMOVE(&s->queue, p, next); + scsi_req_unref(req); + qemu_free(p); + } } -/* Callback to indicate that the SCSI layer has completed a transfer. */ -static void lsi_command_complete(SCSIBus *bus, int reason, uint32_t tag, - uint32_t arg) +/* Record that data is available for a queued command. Returns zero if + the device was reselected, nonzero if the IO is deferred. */ +static int lsi_queue_tag(LSIState *s, uint32_t tag, uint32_t len) { - LSIState *s = DO_UPCAST(LSIState, dev.qdev, bus->qbus.parent); + lsi_request *p; + + p = lsi_find_by_tag(s, tag); + if (!p) { + BADF("IO with unknown tag %d\n", tag); + return 1; + } + + if (p->pending) { + BADF("Multiple IO pending for tag %d\n", tag); + } + p->pending = len; + /* Reselect if waiting for it, or if reselection triggers an IRQ + and the bus is free. + Since no interrupt stacking is implemented in the emulation, it + is also required that there are no pending interrupts waiting + for service from the device driver. */ + if (s->waiting == 1 || + (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON) && + !(s->istat0 & (LSI_ISTAT0_SIP | LSI_ISTAT0_DIP)))) { + /* Reselect device. */ + lsi_reselect(s, p); + return 0; + } else { + DPRINTF("Queueing IO tag=0x%x\n", tag); + p->pending = len; + return 1; + } +} + + /* Callback to indicate that the SCSI layer has completed a command. */ +static void lsi_command_complete(SCSIRequest *req, uint32_t status) +{ + LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent); int out; out = (s->sstat1 & PHASE_MASK) == PHASE_DO; - if (reason == SCSI_REASON_DONE) { - DPRINTF("Command complete sense=%d\n", (int)arg); - s->sense = arg; - s->command_complete = 2; - if (s->waiting && s->dbc != 0) { - /* Raise phase mismatch for short transfers. */ - lsi_bad_phase(s, out, PHASE_ST); - } else { - lsi_set_phase(s, PHASE_ST); - } + DPRINTF("Command complete status=%d\n", (int)status); + s->status = status; + s->command_complete = 2; + if (s->waiting && s->dbc != 0) { + /* Raise phase mismatch for short transfers. */ + lsi_bad_phase(s, out, PHASE_ST); + } else { + lsi_set_phase(s, PHASE_ST); + } + if (s->current && req == s->current->req) { + scsi_req_unref(s->current->req); qemu_free(s->current); s->current = NULL; - - lsi_resume_script(s); - return; } + lsi_resume_script(s); +} + + /* Callback to indicate that the SCSI layer has completed a transfer. */ +static void lsi_transfer_data(SCSIRequest *req, uint32_t len) +{ + LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent); + int out; - if (s->waiting == 1 || !s->current || tag != s->current->tag || + if (s->waiting == 1 || !s->current || req->tag != s->current->tag || (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON))) { - if (lsi_queue_tag(s, tag, arg)) + if (lsi_queue_tag(s, req->tag, len)) { return; + } } + out = (s->sstat1 & PHASE_MASK) == PHASE_DO; + /* host adapter (re)connected */ - DPRINTF("Data ready tag=0x%x len=%d\n", tag, arg); - s->current->dma_len = arg; + DPRINTF("Data ready tag=0x%x len=%d\n", req->tag, len); + s->current->dma_len = len; s->command_complete = 1; - if (!s->waiting) - return; - if (s->waiting == 1 || s->dbc == 0) { - lsi_resume_script(s); - } else { - lsi_do_dma(s, out); + if (s->waiting) { + if (s->waiting == 1 || s->dbc == 0) { + lsi_resume_script(s); + } else { + lsi_do_dma(s, out); + } } } @@ -755,16 +789,17 @@ static void lsi_do_command(LSIState *s) assert(s->current == NULL); s->current = qemu_mallocz(sizeof(lsi_request)); s->current->tag = s->select_tag; + s->current->req = scsi_req_new(dev, s->current->tag, s->current_lun); - n = dev->info->send_command(dev, s->current->tag, buf, s->current_lun); - if (n > 0) { - lsi_set_phase(s, PHASE_DI); - dev->info->read_data(dev, s->current->tag); - } else if (n < 0) { - lsi_set_phase(s, PHASE_DO); - dev->info->write_data(dev, s->current->tag); + n = scsi_req_enqueue(s->current->req, buf); + if (n) { + if (n > 0) { + lsi_set_phase(s, PHASE_DI); + } else if (n < 0) { + lsi_set_phase(s, PHASE_DO); + } + scsi_req_continue(s->current->req); } - if (!s->command_complete) { if (n) { /* Command did not complete immediately so disconnect. */ @@ -783,14 +818,14 @@ static void lsi_do_command(LSIState *s) static void lsi_do_status(LSIState *s) { - uint8_t sense; - DPRINTF("Get status len=%d sense=%d\n", s->dbc, s->sense); + uint8_t status; + DPRINTF("Get status len=%d status=%d\n", s->dbc, s->status); if (s->dbc != 1) BADF("Bad Status move\n"); s->dbc = 1; - sense = s->sense; - s->sfbr = sense; - cpu_physical_memory_write(s->dnad, &sense, 1); + status = s->status; + s->sfbr = status; + cpu_physical_memory_write(s->dnad, &status, 1); lsi_set_phase(s, PHASE_MI); s->msg_action = 1; lsi_add_msg_byte(s, 0); /* COMMAND COMPLETE */ @@ -855,13 +890,15 @@ static void lsi_do_msgout(LSIState *s) int len; uint32_t current_tag; SCSIDevice *current_dev; - lsi_request *p, *p_next; + lsi_request *current_req, *p, *p_next; int id; if (s->current) { current_tag = s->current->tag; + current_req = s->current; } else { current_tag = s->select_tag; + current_req = lsi_find_by_tag(s, current_tag); } id = (current_tag >> 8) & 0xf; current_dev = s->bus.devs[id]; @@ -913,7 +950,9 @@ static void lsi_do_msgout(LSIState *s) case 0x0d: /* The ABORT TAG message clears the current I/O process only. */ DPRINTF("MSG: ABORT TAG tag=0x%x\n", current_tag); - current_dev->info->cancel_io(current_dev, current_tag); + if (current_req) { + scsi_req_cancel(current_req->req); + } lsi_disconnect(s); break; case 0x06: @@ -936,7 +975,9 @@ static void lsi_do_msgout(LSIState *s) } /* clear the current I/O process */ - current_dev->info->cancel_io(current_dev, current_tag); + if (s->current) { + scsi_req_cancel(s->current->req); + } /* As the current implemented devices scsi_disk and scsi_generic only support one LUN, we don't need to keep track of LUNs. @@ -948,8 +989,7 @@ static void lsi_do_msgout(LSIState *s) id = current_tag & 0x0000ff00; QTAILQ_FOREACH_SAFE(p, &s->queue, next, p_next) { if ((p->tag & 0x0000ff00) == id) { - current_dev->info->cancel_io(current_dev, p->tag); - QTAILQ_REMOVE(&s->queue, p, next); + scsi_req_cancel(p->req); } } @@ -2122,7 +2162,7 @@ static const VMStateDescription vmstate_lsi_scsi = { VMSTATE_PCI_DEVICE(dev, LSIState), VMSTATE_INT32(carry, LSIState), - VMSTATE_INT32(sense, LSIState), + VMSTATE_INT32(status, LSIState), VMSTATE_INT32(msg_action, LSIState), VMSTATE_INT32(msg_len, LSIState), VMSTATE_BUFFER(msg, LSIState), @@ -2205,6 +2245,12 @@ static int lsi_scsi_uninit(PCIDevice *d) return 0; } +static const struct SCSIBusOps lsi_scsi_ops = { + .transfer_data = lsi_transfer_data, + .complete = lsi_command_complete, + .cancel = lsi_request_cancelled +}; + static int lsi_scsi_init(PCIDevice *dev) { LSIState *s = DO_UPCAST(LSIState, dev, dev); @@ -2232,7 +2278,7 @@ static int lsi_scsi_init(PCIDevice *dev) PCI_BASE_ADDRESS_SPACE_MEMORY, lsi_ram_mapfunc); QTAILQ_INIT(&s->queue); - scsi_bus_new(&s->bus, &dev->qdev, 1, LSI_MAX_DEVS, lsi_command_complete); + scsi_bus_new(&s->bus, &dev->qdev, 1, LSI_MAX_DEVS, &lsi_scsi_ops); if (!dev->qdev.hotplugged) { return scsi_bus_legacy_handle_cmdline(&s->bus); } @@ -155,7 +155,7 @@ int msi_init(struct PCIDevice *dev, uint8_t offset, pci_set_word(dev->wmask + msi_data_off(dev, msi64bit), 0xffff); if (msi_per_vector_mask) { - /* Make mask bits 0 to nr_vectors - 1 writeable. */ + /* Make mask bits 0 to nr_vectors - 1 writable. */ pci_set_long(dev->wmask + msi_mask_off(dev, msi64bit), 0xffffffff >> (PCI_MSI_VECTORS_MAX - nr_vectors)); } @@ -76,7 +76,7 @@ static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries, pci_set_long(config + PCI_MSIX_PBA, (bar_size + MSIX_PAGE_PENDING) | bar_nr); pdev->msix_cap = config_offset; - /* Make flags bit writeable. */ + /* Make flags bit writable. */ pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | MSIX_MASKALL_MASK; return 0; diff --git a/hw/mst_fpga.c b/hw/mst_fpga.c index a04355cc7f..4e47574b63 100644 --- a/hw/mst_fpga.c +++ b/hw/mst_fpga.c @@ -154,7 +154,7 @@ mst_fpga_writeb(void *opaque, target_phys_addr_t addr, uint32_t value) case MST_MSCRD: s->mscrd = value; break; - case MST_INTMSKENA: /* Mask interupt */ + case MST_INTMSKENA: /* Mask interrupt */ s->intmskena = (value & 0xFEEFF); qemu_set_irq(s->parent, s->intsetclr & s->intmskena); break; diff --git a/hw/multiboot.c b/hw/multiboot.c index 394ed0136e..6e6cfb9531 100644 --- a/hw/multiboot.c +++ b/hw/multiboot.c @@ -307,7 +307,7 @@ int load_multiboot(void *fw_cfg, | MULTIBOOT_FLAGS_MMAP); stl_p(bootinfo + MBI_MEM_LOWER, 640); stl_p(bootinfo + MBI_MEM_UPPER, (ram_size / 1024) - 1024); - stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8001ffff); /* XXX: use the -boot switch? */ + stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); mb_debug("multiboot: mh_entry_addr = %#x\n", mh_entry_addr); @@ -957,29 +957,18 @@ void pc_cpus_init(const char *cpu_model) } } -void pc_memory_init(ram_addr_t ram_size, - const char *kernel_filename, +void pc_memory_init(const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, - ram_addr_t *below_4g_mem_size_p, - ram_addr_t *above_4g_mem_size_p) + ram_addr_t below_4g_mem_size, + ram_addr_t above_4g_mem_size) { char *filename; int ret, linux_boot, i; ram_addr_t ram_addr, bios_offset, option_rom_offset; - ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; int bios_size, isa_bios_size; void *fw_cfg; - if (ram_size >= 0xe0000000 ) { - above_4g_mem_size = ram_size - 0xe0000000; - below_4g_mem_size = 0xe0000000; - } else { - below_4g_mem_size = ram_size; - } - *above_4g_mem_size_p = above_4g_mem_size; - *below_4g_mem_size_p = below_4g_mem_size; - linux_boot = (kernel_filename != NULL); /* allocate RAM */ @@ -1081,6 +1070,15 @@ void pc_vga_init(PCIBus *pci_bus) isa_vga_init(); } } + + /* + * sga does not suppress normal vga output. So a machine can have both a + * vga card and sga manually enabled. Output will be seen on both. + * For nographic case, sga is enabled at all times + */ + if (display_type == DT_NOGRAPHIC) { + isa_create_simple("sga"); + } } static void cpu_request_exit(void *opaque, int irq, int level) @@ -1093,7 +1091,8 @@ static void cpu_request_exit(void *opaque, int irq, int level) } void pc_basic_device_init(qemu_irq *isa_irq, - ISADevice **rtc_state) + ISADevice **rtc_state, + bool no_vmport) { int i; DriveInfo *fd[MAX_FD]; @@ -1138,8 +1137,12 @@ void pc_basic_device_init(qemu_irq *isa_irq, a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); i8042 = isa_create_simple("i8042"); i8042_setup_a20_line(i8042, &a20_line[0]); - vmport_init(); - vmmouse = isa_try_create("vmmouse"); + if (!no_vmport) { + vmport_init(); + vmmouse = isa_try_create("vmmouse"); + } else { + vmmouse = NULL; + } if (vmmouse) { qdev_prop_set_ptr(&vmmouse->qdev, "ps2_mouse", i8042); qdev_init_nofail(&vmmouse->qdev); @@ -129,16 +129,16 @@ void pc_cmos_set_s3_resume(void *opaque, int irq, int level); void pc_acpi_smi_interrupt(void *opaque, int irq, int level); void pc_cpus_init(const char *cpu_model); -void pc_memory_init(ram_addr_t ram_size, - const char *kernel_filename, +void pc_memory_init(const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, - ram_addr_t *below_4g_mem_size_p, - ram_addr_t *above_4g_mem_size_p); + ram_addr_t below_4g_mem_size, + ram_addr_t above_4g_mem_size); qemu_irq *pc_allocate_cpu_irq(void); void pc_vga_init(PCIBus *pci_bus); void pc_basic_device_init(qemu_irq *isa_irq, - ISADevice **rtc_state); + ISADevice **rtc_state, + bool no_vmport); void pc_init_ne2k_isa(NICInfo *nd); void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size, const char *boot_device, @@ -176,6 +176,7 @@ struct PCII440FXState; typedef struct PCII440FXState PCII440FXState; PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix_devfn, qemu_irq *pic, ram_addr_t ram_size); +PCIBus *i440fx_xen_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq *pic, ram_addr_t ram_size); void i440fx_init_memory_mappings(PCII440FXState *d); /* piix4.c */ diff --git a/hw/pc_piix.c b/hw/pc_piix.c index 1aba09f8f6..90861257d8 100644 --- a/hw/pc_piix.c +++ b/hw/pc_piix.c @@ -38,6 +38,10 @@ #include "arch_init.h" #include "blockdev.h" #include "smbus.h" +#include "xen.h" +#ifdef CONFIG_XEN +# include <xen/hvm/hvm_info_table.h> +#endif #define MAX_IDE_BUS 2 @@ -92,12 +96,26 @@ static void pc_init1(ram_addr_t ram_size, kvmclock_create(); } + if (ram_size >= 0xe0000000 ) { + above_4g_mem_size = ram_size - 0xe0000000; + below_4g_mem_size = 0xe0000000; + } else { + above_4g_mem_size = 0; + below_4g_mem_size = ram_size; + } + /* allocate ram and load rom/bios */ - pc_memory_init(ram_size, kernel_filename, kernel_cmdline, initrd_filename, - &below_4g_mem_size, &above_4g_mem_size); + if (!xen_enabled()) { + pc_memory_init(kernel_filename, kernel_cmdline, initrd_filename, + below_4g_mem_size, above_4g_mem_size); + } - cpu_irq = pc_allocate_cpu_irq(); - i8259 = i8259_init(cpu_irq[0]); + if (!xen_enabled()) { + cpu_irq = pc_allocate_cpu_irq(); + i8259 = i8259_init(cpu_irq[0]); + } else { + i8259 = xen_interrupt_controller_init(); + } isa_irq_state = qemu_mallocz(sizeof(*isa_irq_state)); isa_irq_state->i8259 = i8259; if (pci_enabled) { @@ -106,7 +124,11 @@ static void pc_init1(ram_addr_t ram_size, isa_irq = qemu_allocate_irqs(isa_irq_handler, isa_irq_state, 24); if (pci_enabled) { - pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, isa_irq, ram_size); + if (!xen_enabled()) { + pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, isa_irq, ram_size); + } else { + pci_bus = i440fx_xen_init(&i440fx_state, &piix3_devfn, isa_irq, ram_size); + } } else { pci_bus = NULL; i440fx_state = NULL; @@ -119,7 +141,7 @@ static void pc_init1(ram_addr_t ram_size, pc_vga_init(pci_enabled? pci_bus: NULL); /* init basic PC hardware */ - pc_basic_device_init(isa_irq, &rtc_state); + pc_basic_device_init(isa_irq, &rtc_state, xen_enabled()); for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; @@ -157,7 +179,11 @@ static void pc_init1(ram_addr_t ram_size, if (pci_enabled && acpi_enabled) { i2c_bus *smbus; - cmos_s3 = qemu_allocate_irqs(pc_cmos_set_s3_resume, rtc_state, 1); + if (!xen_enabled()) { + cmos_s3 = qemu_allocate_irqs(pc_cmos_set_s3_resume, rtc_state, 1); + } else { + cmos_s3 = qemu_allocate_irqs(xen_cmos_set_s3_resume, rtc_state, 1); + } smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, @@ -213,6 +239,24 @@ static void pc_init_isa(ram_addr_t ram_size, initrd_filename, cpu_model, 0, 1); } +#ifdef CONFIG_XEN +static void pc_xen_hvm_init(ram_addr_t ram_size, + const char *boot_device, + const char *kernel_filename, + const char *kernel_cmdline, + const char *initrd_filename, + const char *cpu_model) +{ + if (xen_hvm_init() != 0) { + hw_error("xen hardware virtual machine initialisation failed"); + } + pc_init_pci_no_kvmclock(ram_size, boot_device, + kernel_filename, kernel_cmdline, + initrd_filename, cpu_model); + xen_vcpu_init(); +} +#endif + static QEMUMachine pc_machine = { .name = "pc-0.14", .alias = "pc", @@ -425,6 +469,16 @@ static QEMUMachine isapc_machine = { .max_cpus = 1, }; +#ifdef CONFIG_XEN +static QEMUMachine xenfv_machine = { + .name = "xenfv", + .desc = "Xen Fully-virtualized PC", + .init = pc_xen_hvm_init, + .max_cpus = HVM_MAX_VCPUS, + .default_machine_opts = "accel=xen", +}; +#endif + static void pc_machine_init(void) { qemu_register_machine(&pc_machine); @@ -433,6 +487,9 @@ static void pc_machine_init(void) qemu_register_machine(&pc_machine_v0_11); qemu_register_machine(&pc_machine_v0_10); qemu_register_machine(&isapc_machine); +#ifdef CONFIG_XEN + qemu_register_machine(&xenfv_machine); +#endif } machine_init(pc_machine_init); @@ -168,7 +168,7 @@ void pci_device_reset(PCIDevice *dev) dev->irq_state = 0; pci_update_irq_status(dev); pci_device_deassert_intx(dev); - /* Clear all writeable bits */ + /* Clear all writable bits */ pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, pci_get_word(dev->wmask + PCI_COMMAND) | pci_get_word(dev->w1cmask + PCI_COMMAND)); @@ -891,7 +891,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, wmask = ~(size - 1); addr = pci_bar(pci_dev, region_num); if (region_num == PCI_ROM_SLOT) { - /* ROM enable bit is writeable */ + /* ROM enable bit is writable */ wmask |= PCI_ROM_ADDRESS_ENABLE; } pci_set_long(pci_dev->config + addr, type); @@ -1940,6 +1940,8 @@ static int pci_add_option_rom(PCIDevice *pdev, bool is_default_rom) pci_patch_ids(pdev, ptr, size); } + qemu_put_ram_ptr(ptr); + pci_register_bar(pdev, PCI_ROM_SLOT, size, 0, pci_map_option_rom); @@ -1993,7 +1995,7 @@ void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) if (!offset) return; pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; - /* Make capability writeable again */ + /* Make capability writable again */ memset(pdev->wmask + offset, 0xff, size); memset(pdev->w1cmask + offset, 0, size); /* Clear cmask as device-specific registers can't be checked */ @@ -132,7 +132,7 @@ struct PCIDevice { /* PCI config space */ uint8_t *config; - /* Used to enable config checks on load. Note that writeable bits are + /* Used to enable config checks on load. Note that writable bits are * never checked even if set in cmask. */ uint8_t *cmask; diff --git a/hw/pci_ids.h b/hw/pci_ids.h index ea3418cef2..d9457ed3f4 100644 --- a/hw/pci_ids.h +++ b/hw/pci_ids.h @@ -100,6 +100,7 @@ #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 +#define PCI_DEVICE_ID_INTEL_82801D 0x24CD #define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 diff --git a/hw/pci_regs.h b/hw/pci_regs.h index 63e8956868..e8840964ac 100644 --- a/hw/pci_regs.h +++ b/hw/pci_regs.h @@ -223,7 +223,7 @@ #define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */ #define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */ #define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */ -#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxilliary power support mask */ +#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxiliary power support mask */ #define PCI_PM_CAP_D1 0x0200 /* D1 power state support */ #define PCI_PM_CAP_D2 0x0400 /* D2 power state support */ #define PCI_PM_CAP_PME 0x0800 /* PME pin supported */ @@ -176,7 +176,7 @@ static void hotplug_event_notify(PCIDevice *dev) } /* - * A PCI Express Hot-Plug Event has occured, so update slot status register + * A PCI Express Hot-Plug Event has occurred, so update slot status register * and notify OS of the event if necessary. * * 6.7.3 PCI Express Hot-Plug Events @@ -40,7 +40,7 @@ typedef enum { * * Not all the bits of slot control register match with the ones of * slot status. Not some bits of slot status register is used to - * show status, not to report event occurence. + * show status, not to report event occurrence. * So such bits must be masked out when checking the software * notification condition. */ diff --git a/hw/pcie_aer.c b/hw/pcie_aer.c index b87201026b..be019c7c0a 100644 --- a/hw/pcie_aer.c +++ b/hw/pcie_aer.c @@ -617,7 +617,7 @@ static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal) /* * non-Function specific error must be recorded in all functions. * It is the responsibility of the caller of this function. - * It is also caller's responsiblity to determine which function should + * It is also caller's responsibility to determine which function should * report the rerror. * * 6.2.4 Error Logging diff --git a/hw/pflash_cfi02.c b/hw/pflash_cfi02.c index 14bbc34e16..725cd1e78c 100644 --- a/hw/pflash_cfi02.c +++ b/hw/pflash_cfi02.c @@ -188,7 +188,7 @@ static uint32_t pflash_read (pflash_t *pfl, target_phys_addr_t offset, default: goto flash_read; } - DPRINTF("%s: ID " TARGET_FMT_pld " %x\n", __func__, boff, ret); + DPRINTF("%s: ID " TARGET_FMT_plx " %x\n", __func__, boff, ret); break; case 0xA0: case 0x10: @@ -367,7 +367,7 @@ static void pflash_write (pflash_t *pfl, target_phys_addr_t offset, case 4: switch (pfl->cmd) { case 0xA0: - /* Ignore writes while flash data write is occuring */ + /* Ignore writes while flash data write is occurring */ /* As we suppose write is immediate, this should never happen */ return; case 0x80: diff --git a/hw/piix_pci.c b/hw/piix_pci.c index b927f01bdc..e0da0bdf91 100644 --- a/hw/piix_pci.c +++ b/hw/piix_pci.c @@ -29,6 +29,7 @@ #include "isa.h" #include "sysbus.h" #include "range.h" +#include "xen.h" /* * I440FX chipset data sheet. @@ -172,6 +173,13 @@ static void i440fx_write_config(PCIDevice *dev, } } +static void i440fx_write_config_xen(PCIDevice *dev, + uint32_t address, uint32_t val, int len) +{ + xen_piix_pci_write_config_client(address, val, len); + i440fx_write_config(dev, address, val, len); +} + static int i440fx_load_old(QEMUFile* f, void *opaque, int version_id) { PCII440FXState *d = opaque; @@ -234,7 +242,10 @@ static int i440fx_initfn(PCIDevice *dev) return 0; } -PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq *pic, ram_addr_t ram_size) +static PCIBus *i440fx_common_init(const char *device_name, + PCII440FXState **pi440fx_state, + int *piix3_devfn, + qemu_irq *pic, ram_addr_t ram_size) { DeviceState *dev; PCIBus *b; @@ -248,13 +259,13 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq * s->bus = b; qdev_init_nofail(dev); - d = pci_create_simple(b, 0, "i440FX"); + d = pci_create_simple(b, 0, device_name); *pi440fx_state = DO_UPCAST(PCII440FXState, dev, d); piix3 = DO_UPCAST(PIIX3State, dev, pci_create_simple_multifunction(b, -1, true, "PIIX3")); piix3->pic = pic; - pci_bus_irqs(b, piix3_set_irq, pci_slot_get_pirq, piix3, PIIX_NUM_PIRQS); + (*pi440fx_state)->piix3 = piix3; *piix3_devfn = piix3->dev.devfn; @@ -267,12 +278,36 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq * return b; } +PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, + qemu_irq *pic, ram_addr_t ram_size) +{ + PCIBus *b; + + b = i440fx_common_init("i440FX", pi440fx_state, piix3_devfn, pic, ram_size); + pci_bus_irqs(b, piix3_set_irq, pci_slot_get_pirq, (*pi440fx_state)->piix3, + PIIX_NUM_PIRQS); + + return b; +} + +PCIBus *i440fx_xen_init(PCII440FXState **pi440fx_state, int *piix3_devfn, + qemu_irq *pic, ram_addr_t ram_size) +{ + PCIBus *b; + + b = i440fx_common_init("i440FX-xen", pi440fx_state, piix3_devfn, pic, ram_size); + pci_bus_irqs(b, xen_piix3_set_irq, xen_pci_slot_get_pirq, + (*pi440fx_state)->piix3, PIIX_NUM_PIRQS); + + return b; +} + /* PIIX3 PCI to ISA bridge */ static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq) { qemu_set_irq(piix3->pic[pic_irq], !!(piix3->pic_levels & - (((1UL << PIIX_NUM_PIRQS) - 1) << + (((1ULL << PIIX_NUM_PIRQS) - 1) << (pic_irq * PIIX_NUM_PIRQS)))); } @@ -422,6 +457,14 @@ static PCIDeviceInfo i440fx_info[] = { .revision = 0x02, .class_id = PCI_CLASS_BRIDGE_HOST, },{ + .qdev.name = "i440FX-xen", + .qdev.desc = "Host bridge", + .qdev.size = sizeof(PCII440FXState), + .qdev.vmsd = &vmstate_i440fx, + .qdev.no_user = 1, + .init = i440fx_initfn, + .config_write = i440fx_write_config_xen, + },{ .qdev.name = "PIIX3", .qdev.desc = "ISA bridge", .qdev.size = sizeof(PIIX3State), diff --git a/hw/pl031.c b/hw/pl031.c index 8c2f9d0bc7..017a313fda 100644 --- a/hw/pl031.c +++ b/hw/pl031.c @@ -161,7 +161,7 @@ static void pl031_write(void * opaque, target_phys_addr_t offset, pl031_update(s); break; case RTC_ICR: - /* The PL031 documentation (DDI0224B) states that the interupt is + /* The PL031 documentation (DDI0224B) states that the interrupt is cleared when bit 0 of the written value is set. However the arm926e documentation (DDI0287B) states that the interrupt is cleared when any value is written. */ diff --git a/hw/pl061.c b/hw/pl061.c index 2e181f8c2f..372dfc2da2 100644 --- a/hw/pl061.c +++ b/hw/pl061.c @@ -98,7 +98,7 @@ static uint32_t pl061_read(void *opaque, target_phys_addr_t offset) return s->isense; case 0x408: /* Interrupt both edges */ return s->ibe; - case 0x40c: /* Interupt event */ + case 0x40c: /* Interrupt event */ return s->iev; case 0x410: /* Interrupt mask */ return s->im; @@ -156,7 +156,7 @@ static void pl061_write(void *opaque, target_phys_addr_t offset, case 0x408: /* Interrupt both edges */ s->ibe = value; break; - case 0x40c: /* Interupt event */ + case 0x40c: /* Interrupt event */ s->iev = value; break; case 0x410: /* Interrupt mask */ @@ -452,6 +452,10 @@ uint64_t cpu_ppc_load_tbl (CPUState *env) ppc_tb_t *tb_env = env->tb_env; uint64_t tb; + if (kvm_enabled()) { + return env->spr[SPR_TBL]; + } + tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->tb_offset); LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); @@ -471,6 +475,10 @@ static inline uint32_t _cpu_ppc_load_tbu(CPUState *env) uint32_t cpu_ppc_load_tbu (CPUState *env) { + if (kvm_enabled()) { + return env->spr[SPR_TBU]; + } + return _cpu_ppc_load_tbu(env); } @@ -616,6 +624,10 @@ uint32_t cpu_ppc_load_decr (CPUState *env) { ppc_tb_t *tb_env = env->tb_env; + if (kvm_enabled()) { + return env->spr[SPR_DECR]; + } + return _cpu_ppc_load_decr(env, tb_env->decr_next); } diff --git a/hw/ppc4xx_devs.c b/hw/ppc4xx_devs.c index 7f9ed17138..68bdfaacc7 100644 --- a/hw/ppc4xx_devs.c +++ b/hw/ppc4xx_devs.c @@ -38,7 +38,7 @@ #endif /*****************************************************************************/ -/* Generic PowerPC 4xx processor instanciation */ +/* Generic PowerPC 4xx processor instantiation */ CPUState *ppc4xx_init (const char *cpu_model, clk_setup_t *cpu_clk, clk_setup_t *tb_clk, uint32_t sysclk) diff --git a/hw/ppce500.h b/hw/ppce500.h deleted file mode 100644 index 24d49bb871..0000000000 --- a/hw/ppce500.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * QEMU PowerPC E500 emulation shared definitions - * - * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Yu Liu, <yu.liu@freescale.com> - * - * This file is derived from hw/ppc440.h - * the copyright for that material belongs to the original owners. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#if !defined(PPC_E500_H) -#define PPC_E500_H - -PCIBus *ppce500_pci_init(qemu_irq *pic, target_phys_addr_t registers); - -#endif /* !defined(PPC_E500_H) */ diff --git a/hw/ppce500_mpc8544ds.c b/hw/ppce500_mpc8544ds.c index e111dda5f4..6b57fbf597 100644 --- a/hw/ppce500_mpc8544ds.c +++ b/hw/ppce500_mpc8544ds.c @@ -28,9 +28,10 @@ #include "kvm_ppc.h" #include "device_tree.h" #include "openpic.h" -#include "ppce500.h" +#include "ppc.h" #include "loader.h" #include "elf.h" +#include "sysbus.h" #define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb" #define UIMAGE_LOAD_BASE 0 @@ -50,6 +51,12 @@ #define MPC8544_PCI_IO 0xE1000000 #define MPC8544_PCI_IOLEN 0x10000 +struct boot_info +{ + uint32_t dt_base; + uint32_t entry; +}; + #ifdef CONFIG_FDT static int mpc8544_copy_soc_cell(void *fdt, const char *node, const char *prop) { @@ -82,7 +89,7 @@ static int mpc8544_load_device_tree(target_phys_addr_t addr, { int ret = -1; #ifdef CONFIG_FDT - uint32_t mem_reg_property[] = {0, ramsize}; + uint32_t mem_reg_property[] = {0, cpu_to_be32(ramsize)}; char *filename; int fdt_size; void *fdt; @@ -103,15 +110,19 @@ static int mpc8544_load_device_tree(target_phys_addr_t addr, if (ret < 0) fprintf(stderr, "couldn't set /memory/reg\n"); - ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", - initrd_base); - if (ret < 0) - fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); + if (initrd_size) { + ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", + initrd_base); + if (ret < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); + } - ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", - (initrd_base + initrd_size)); - if (ret < 0) - fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); + ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", + (initrd_base + initrd_size)); + if (ret < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); + } + } ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); @@ -145,6 +156,13 @@ static int mpc8544_load_device_tree(target_phys_addr_t addr, mpc8544_copy_soc_cell(fdt, buf, "clock-frequency"); mpc8544_copy_soc_cell(fdt, buf, "timebase-frequency"); + } else { + const uint32_t freq = 400000000; + + qemu_devtree_setprop_cell(fdt, "/cpus/PowerPC,8544@0", + "clock-frequency", freq); + qemu_devtree_setprop_cell(fdt, "/cpus/PowerPC,8544@0", + "timebase-frequency", freq); } ret = rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); @@ -156,6 +174,35 @@ out: return ret; } +/* Create -kernel TLB entries for BookE, linearly spanning 256MB. */ +static void mmubooke_create_initial_mapping(CPUState *env, + target_ulong va, + target_phys_addr_t pa) +{ + ppcemb_tlb_t *tlb = booke206_get_tlbe(env, 1, 0, 0); + + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 256 * 1024 * 1024; + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; +} + +static void mpc8544ds_cpu_reset(void *opaque) +{ + CPUState *env = opaque; + struct boot_info *bi = env->load_info; + + cpu_reset(env); + + /* Set initial guest state. */ + env->gpr[1] = (16<<20) - 8; + env->gpr[3] = bi->dt_base; + env->nip = bi->entry; + mmubooke_create_initial_mapping(env, 0, 0); +} + static void mpc8544ds_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, @@ -175,15 +222,28 @@ static void mpc8544ds_init(ram_addr_t ram_size, target_long initrd_size=0; int i=0; unsigned int pci_irq_nrs[4] = {1, 2, 3, 4}; - qemu_irq *irqs, *mpic, *pci_irqs; + qemu_irq *irqs, *mpic; + DeviceState *dev; + struct boot_info *boot_info; /* Setup CPU */ - env = cpu_ppc_init("e500v2_v30"); + if (cpu_model == NULL) { + cpu_model = "e500v2_v30"; + } + + env = cpu_ppc_init(cpu_model); if (!env) { fprintf(stderr, "Unable to initialize CPU!\n"); exit(1); } + /* XXX register timer? */ + ppc_emb_timers_init(env, 400000000, PPC_INTERRUPT_DECR); + ppc_dcr_init(env, NULL, NULL); + + /* Register reset handler */ + qemu_register_reset(mpc8544ds_cpu_reset, env); + /* Fixup Memory size on a alignment boundary */ ram_size &= ~(RAM_SIZES_ALIGN - 1); @@ -211,12 +271,11 @@ static void mpc8544ds_init(ram_addr_t ram_size, } /* PCI */ - pci_irqs = qemu_malloc(sizeof(qemu_irq) * 4); - pci_irqs[0] = mpic[pci_irq_nrs[0]]; - pci_irqs[1] = mpic[pci_irq_nrs[1]]; - pci_irqs[2] = mpic[pci_irq_nrs[2]]; - pci_irqs[3] = mpic[pci_irq_nrs[3]]; - pci_bus = ppce500_pci_init(pci_irqs, MPC8544_PCI_REGS_BASE); + dev = sysbus_create_varargs("e500-pcihost", MPC8544_PCI_REGS_BASE, + mpic[pci_irq_nrs[0]], mpic[pci_irq_nrs[1]], + mpic[pci_irq_nrs[2]], mpic[pci_irq_nrs[3]], + NULL); + pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); if (!pci_bus) printf("couldn't create PCI controller!\n"); @@ -259,8 +318,13 @@ static void mpc8544ds_init(ram_addr_t ram_size, } } + boot_info = qemu_mallocz(sizeof(struct boot_info)); + /* If we're loading a kernel directly, we must load the device tree too. */ if (kernel_filename) { +#ifndef CONFIG_FDT + cpu_abort(env, "Compiled without FDT support - can't load kernel\n"); +#endif dt_base = (kernel_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK; if (mpc8544_load_device_tree(dt_base, ram_size, initrd_base, initrd_size, kernel_cmdline) < 0) { @@ -268,17 +332,14 @@ static void mpc8544ds_init(ram_addr_t ram_size, exit(1); } - /* Set initial guest state. */ - env->gpr[1] = (16<<20) - 8; - env->gpr[3] = dt_base; - env->nip = entry; - /* XXX we currently depend on KVM to create some initial TLB entries. */ + boot_info->entry = entry; + boot_info->dt_base = dt_base; } + env->load_info = boot_info; - if (kvm_enabled()) + if (kvm_enabled()) { kvmppc_init(); - - return; + } } static QEMUMachine mpc8544ds_machine = { diff --git a/hw/ppce500_pci.c b/hw/ppce500_pci.c index 83a20e4620..069af9691a 100644 --- a/hw/ppce500_pci.c +++ b/hw/ppce500_pci.c @@ -15,7 +15,6 @@ */ #include "hw.h" -#include "ppce500.h" #include "pci.h" #include "pci_host.h" #include "bswap.h" @@ -29,7 +28,8 @@ #define PCIE500_CFGADDR 0x0 #define PCIE500_CFGDATA 0x4 #define PCIE500_REG_BASE 0xC00 -#define PCIE500_REG_SIZE (0x1000 - PCIE500_REG_BASE) +#define PCIE500_ALL_SIZE 0x1000 +#define PCIE500_REG_SIZE (PCIE500_ALL_SIZE - PCIE500_REG_BASE) #define PPCE500_PCI_CONFIG_ADDR 0x0 #define PPCE500_PCI_CONFIG_DATA 0x4 @@ -73,11 +73,15 @@ struct pci_inbound { }; struct PPCE500PCIState { + PCIHostState pci_state; struct pci_outbound pob[PPCE500_PCI_NR_POBS]; struct pci_inbound pib[PPCE500_PCI_NR_PIBS]; uint32_t gasket_time; - PCIHostState pci_state; - PCIDevice *pci_dev; + qemu_irq irq[4]; + /* mmio maps */ + int cfgaddr; + int cfgdata; + int reg; }; typedef struct PPCE500PCIState PPCE500PCIState; @@ -250,7 +254,6 @@ static const VMStateDescription vmstate_ppce500_pci = { .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { - VMSTATE_PCI_DEVICE_POINTER(pci_dev, PPCE500PCIState), VMSTATE_STRUCT_ARRAY(pob, PPCE500PCIState, PPCE500_PCI_NR_POBS, 1, vmstate_pci_outbound, struct pci_outbound), VMSTATE_STRUCT_ARRAY(pib, PPCE500PCIState, PPCE500_PCI_NR_PIBS, 1, @@ -260,60 +263,73 @@ static const VMStateDescription vmstate_ppce500_pci = { } }; -PCIBus *ppce500_pci_init(qemu_irq pci_irqs[4], target_phys_addr_t registers) +static void e500_pci_map(SysBusDevice *dev, target_phys_addr_t base) +{ + PCIHostState *h = FROM_SYSBUS(PCIHostState, sysbus_from_qdev(dev)); + PPCE500PCIState *s = DO_UPCAST(PPCE500PCIState, pci_state, h); + + cpu_register_physical_memory(base + PCIE500_CFGADDR, 4, s->cfgaddr); + cpu_register_physical_memory(base + PCIE500_CFGDATA, 4, s->cfgdata); + cpu_register_physical_memory(base + PCIE500_REG_BASE, PCIE500_REG_SIZE, + s->reg); +} + +static int e500_pcihost_initfn(SysBusDevice *dev) +{ + PCIHostState *h; + PPCE500PCIState *s; + PCIBus *b; + int i; + + h = FROM_SYSBUS(PCIHostState, sysbus_from_qdev(dev)); + s = DO_UPCAST(PPCE500PCIState, pci_state, h); + + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + sysbus_init_irq(dev, &s->irq[i]); + } + + b = pci_register_bus(&s->pci_state.busdev.qdev, NULL, mpc85xx_pci_set_irq, + mpc85xx_pci_map_irq, s->irq, PCI_DEVFN(0x11, 0), 4); + s->pci_state.bus = b; + + pci_create_simple(b, 0, "e500-host-bridge"); + + s->cfgaddr = pci_host_conf_register_mmio(&s->pci_state, DEVICE_BIG_ENDIAN); + s->cfgdata = pci_host_data_register_mmio(&s->pci_state, + DEVICE_LITTLE_ENDIAN); + s->reg = cpu_register_io_memory(e500_pci_reg_read, e500_pci_reg_write, s, + DEVICE_BIG_ENDIAN); + sysbus_init_mmio_cb(dev, PCIE500_ALL_SIZE, e500_pci_map); + + return 0; +} + +static int e500_host_bridge_initfn(PCIDevice *dev) +{ + pci_config_set_vendor_id(dev->config, PCI_VENDOR_ID_FREESCALE); + pci_config_set_device_id(dev->config, PCI_DEVICE_ID_MPC8533E); + pci_config_set_class(dev->config, PCI_CLASS_PROCESSOR_POWERPC); + + return 0; +} + +static PCIDeviceInfo e500_host_bridge_info = { + .qdev.name = "e500-host-bridge", + .qdev.desc = "Host bridge", + .qdev.size = sizeof(PCIDevice), + .init = e500_host_bridge_initfn, +}; + +static SysBusDeviceInfo e500_pcihost_info = { + .init = e500_pcihost_initfn, + .qdev.name = "e500-pcihost", + .qdev.size = sizeof(PPCE500PCIState), + .qdev.vmsd = &vmstate_ppce500_pci, +}; + +static void e500_pci_register(void) { - PPCE500PCIState *controller; - PCIDevice *d; - int index; - static int ppce500_pci_id; - - controller = qemu_mallocz(sizeof(PPCE500PCIState)); - - controller->pci_state.bus = pci_register_bus(NULL, "pci", - mpc85xx_pci_set_irq, - mpc85xx_pci_map_irq, - pci_irqs, PCI_DEVFN(0x11, 0), - 4); - d = pci_register_device(controller->pci_state.bus, - "host bridge", sizeof(PCIDevice), - 0, NULL, NULL); - - pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_FREESCALE); - pci_config_set_device_id(d->config, PCI_DEVICE_ID_MPC8533E); - pci_config_set_class(d->config, PCI_CLASS_PROCESSOR_POWERPC); - - controller->pci_dev = d; - - /* CFGADDR */ - index = pci_host_conf_register_mmio(&controller->pci_state, - DEVICE_BIG_ENDIAN); - if (index < 0) - goto free; - cpu_register_physical_memory(registers + PCIE500_CFGADDR, 4, index); - - /* CFGDATA */ - index = pci_host_data_register_mmio(&controller->pci_state, - DEVICE_BIG_ENDIAN); - if (index < 0) - goto free; - cpu_register_physical_memory(registers + PCIE500_CFGDATA, 4, index); - - index = cpu_register_io_memory(e500_pci_reg_read, - e500_pci_reg_write, controller, - DEVICE_NATIVE_ENDIAN); - if (index < 0) - goto free; - cpu_register_physical_memory(registers + PCIE500_REG_BASE, - PCIE500_REG_SIZE, index); - - /* XXX load/save code not tested. */ - vmstate_register(&d->qdev, ppce500_pci_id++, &vmstate_ppce500_pci, - controller); - - return controller->pci_state.bus; - -free: - printf("%s error\n", __func__); - qemu_free(controller); - return NULL; + sysbus_register_withprop(&e500_pcihost_info); + pci_qdev_register(&e500_host_bridge_info); } +device_init(e500_pci_register); diff --git a/hw/qxl-render.c b/hw/qxl-render.c index 58965e0179..1316066599 100644 --- a/hw/qxl-render.c +++ b/hw/qxl-render.c @@ -185,7 +185,6 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); QXLCursor *cursor; QEMUCursor *c; - int x = -1, y = -1; if (!qxl->ssd.ds->mouse_set || !qxl->ssd.ds->cursor_define) { return; @@ -198,8 +197,6 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) } switch (cmd->type) { case QXL_CURSOR_SET: - x = cmd->u.set.position.x; - y = cmd->u.set.position.y; cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); if (cursor->chunk.data_size != cursor->data_size) { fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__); @@ -209,18 +206,20 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) if (c == NULL) { c = cursor_builtin_left_ptr(); } - qemu_mutex_lock_iothread(); - qxl->ssd.ds->cursor_define(c); - qxl->ssd.ds->mouse_set(x, y, 1); - qemu_mutex_unlock_iothread(); - cursor_put(c); + qemu_mutex_lock(&qxl->ssd.lock); + if (qxl->ssd.cursor) { + cursor_put(qxl->ssd.cursor); + } + qxl->ssd.cursor = c; + qxl->ssd.mouse_x = cmd->u.set.position.x; + qxl->ssd.mouse_y = cmd->u.set.position.y; + qemu_mutex_unlock(&qxl->ssd.lock); break; case QXL_CURSOR_MOVE: - x = cmd->u.position.x; - y = cmd->u.position.y; - qemu_mutex_lock_iothread(); - qxl->ssd.ds->mouse_set(x, y, 1); - qemu_mutex_unlock_iothread(); + qemu_mutex_lock(&qxl->ssd.lock); + qxl->ssd.mouse_x = cmd->u.position.x; + qxl->ssd.mouse_y = cmd->u.position.y; + qemu_mutex_unlock(&qxl->ssd.lock); break; } } @@ -343,18 +343,24 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) SimpleSpiceUpdate *update; QXLCommandRing *ring; QXLCommand *cmd; - int notify; + int notify, ret; switch (qxl->mode) { case QXL_MODE_VGA: dprint(qxl, 2, "%s: vga\n", __FUNCTION__); - update = qemu_spice_create_update(&qxl->ssd); - if (update == NULL) { - return false; + ret = false; + qemu_mutex_lock(&qxl->ssd.lock); + if (qxl->ssd.update != NULL) { + update = qxl->ssd.update; + qxl->ssd.update = NULL; + *ext = update->ext; + ret = true; } - *ext = update->ext; - qxl_log_command(qxl, "vga", ext); - return true; + qemu_mutex_unlock(&qxl->ssd.lock); + if (ret) { + qxl_log_command(qxl, "vga", ext); + } + return ret; case QXL_MODE_COMPAT: case QXL_MODE_NATIVE: case QXL_MODE_UNDEFINED: @@ -662,10 +668,8 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm) dprint(d, 1, "%s: start%s\n", __FUNCTION__, loadvm ? " (loadvm)" : ""); - qemu_mutex_unlock_iothread(); d->ssd.worker->reset_cursor(d->ssd.worker); d->ssd.worker->reset_image_cache(d->ssd.worker); - qemu_mutex_lock_iothread(); qxl_reset_surfaces(d); qxl_reset_memslots(d); @@ -795,9 +799,7 @@ static void qxl_reset_surfaces(PCIQXLDevice *d) { dprint(d, 1, "%s:\n", __FUNCTION__); d->mode = QXL_MODE_UNDEFINED; - qemu_mutex_unlock_iothread(); d->ssd.worker->destroy_surfaces(d->ssd.worker); - qemu_mutex_lock_iothread(); memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds)); } @@ -866,9 +868,7 @@ static void qxl_destroy_primary(PCIQXLDevice *d) dprint(d, 1, "%s\n", __FUNCTION__); d->mode = QXL_MODE_UNDEFINED; - qemu_mutex_unlock_iothread(); d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0); - qemu_mutex_lock_iothread(); } static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm) @@ -938,10 +938,8 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val) case QXL_IO_UPDATE_AREA: { QXLRect update = d->ram->update_area; - qemu_mutex_unlock_iothread(); d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface, &update, NULL, 0, 0); - qemu_mutex_lock_iothread(); break; } case QXL_IO_NOTIFY_CMD: @@ -1303,6 +1301,9 @@ static int qxl_init_primary(PCIDevice *dev) vga->ds = graphic_console_init(qxl_hw_update, qxl_hw_invalidate, qxl_hw_screen_dump, qxl_hw_text_update, qxl); qxl->ssd.ds = vga->ds; + qemu_mutex_init(&qxl->ssd.lock); + qxl->ssd.mouse_x = -1; + qxl->ssd.mouse_y = -1; qxl->ssd.bufsize = (16 * 1024 * 1024); qxl->ssd.buf = qemu_malloc(qxl->ssd.bufsize); diff --git a/hw/realview.c b/hw/realview.c index 96fb9da241..82f3d82d44 100644 --- a/hw/realview.c +++ b/hw/realview.c @@ -17,7 +17,6 @@ #include "sysemu.h" #include "boards.h" #include "bitbang_i2c.h" -#include "sysbus.h" #include "blockdev.h" #define SMP_BOOT_ADDR 0xe0000000 diff --git a/hw/rtl8139.c b/hw/rtl8139.c index 34e3a9eec7..5214b8cb7c 100644 --- a/hw/rtl8139.c +++ b/hw/rtl8139.c @@ -1399,7 +1399,7 @@ static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val) s->currCPlusTxDesc = 0; } - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xe3, s->bChipCmdState); /* Deassert reset pin before next read */ @@ -1443,7 +1443,7 @@ static void rtl8139_CpCmd_write(RTL8139State *s, uint32_t val) s->cplus_enabled = 1; - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xff84, s->CpCmd); s->CpCmd = val; @@ -1472,7 +1472,7 @@ static uint32_t rtl8139_IntrMitigate_read(RTL8139State *s) return ret; } -static int rtl8139_config_writeable(RTL8139State *s) +static int rtl8139_config_writable(RTL8139State *s) { if (s->Cfg9346 & Cfg9346_Unlock) { @@ -1490,10 +1490,10 @@ static void rtl8139_BasicModeCtrl_write(RTL8139State *s, uint32_t val) DPRINTF("BasicModeCtrl register write(w) val=0x%04x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ uint32_t mask = 0x4cff; - if (1 || !rtl8139_config_writeable(s)) + if (1 || !rtl8139_config_writable(s)) { /* Speed setting and autonegotiation enable bits are read-only */ mask |= 0x3000; @@ -1521,7 +1521,7 @@ static void rtl8139_BasicModeStatus_write(RTL8139State *s, uint32_t val) DPRINTF("BasicModeStatus register write(w) val=0x%04x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xff3f, s->BasicModeStatus); s->BasicModeStatus = val; @@ -1542,7 +1542,7 @@ static void rtl8139_Cfg9346_write(RTL8139State *s, uint32_t val) DPRINTF("Cfg9346 write val=0x%02x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0x31, s->Cfg9346); uint32_t opmode = val & 0xc0; @@ -1594,10 +1594,11 @@ static void rtl8139_Config0_write(RTL8139State *s, uint32_t val) DPRINTF("Config0 write val=0x%02x\n", val); - if (!rtl8139_config_writeable(s)) + if (!rtl8139_config_writable(s)) { return; + } - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xf8, s->Config0); s->Config0 = val; @@ -1618,10 +1619,11 @@ static void rtl8139_Config1_write(RTL8139State *s, uint32_t val) DPRINTF("Config1 write val=0x%02x\n", val); - if (!rtl8139_config_writeable(s)) + if (!rtl8139_config_writable(s)) { return; + } - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xC, s->Config1); s->Config1 = val; @@ -1642,10 +1644,11 @@ static void rtl8139_Config3_write(RTL8139State *s, uint32_t val) DPRINTF("Config3 write val=0x%02x\n", val); - if (!rtl8139_config_writeable(s)) + if (!rtl8139_config_writable(s)) { return; + } - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0x8F, s->Config3); s->Config3 = val; @@ -1666,10 +1669,11 @@ static void rtl8139_Config4_write(RTL8139State *s, uint32_t val) DPRINTF("Config4 write val=0x%02x\n", val); - if (!rtl8139_config_writeable(s)) + if (!rtl8139_config_writable(s)) { return; + } - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0x0a, s->Config4); s->Config4 = val; @@ -1690,7 +1694,7 @@ static void rtl8139_Config5_write(RTL8139State *s, uint32_t val) DPRINTF("Config5 write val=0x%02x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0x80, s->Config5); s->Config5 = val; @@ -1743,7 +1747,7 @@ static void rtl8139_RxConfig_write(RTL8139State *s, uint32_t val) { DPRINTF("RxConfig write val=0x%08x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xf0fc0040, s->RxConfig); s->RxConfig = val; @@ -2610,7 +2614,7 @@ static void rtl8139_IntrMask_write(RTL8139State *s, uint32_t val) { DPRINTF("IntrMask write(w) val=0x%04x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0x1e00, s->IntrMask); s->IntrMask = val; @@ -2642,7 +2646,7 @@ static void rtl8139_IntrStatus_write(RTL8139State *s, uint32_t val) #else uint16_t newStatus = s->IntrStatus & ~val; - /* mask unwriteable bits */ + /* mask unwritable bits */ newStatus = SET_MASKED(newStatus, 0x1e00, s->IntrStatus); /* writing 1 to interrupt status register bit clears it */ @@ -2686,7 +2690,7 @@ static void rtl8139_MultiIntr_write(RTL8139State *s, uint32_t val) { DPRINTF("MultiIntr write(w) val=0x%04x\n", val); - /* mask unwriteable bits */ + /* mask unwritable bits */ val = SET_MASKED(val, 0xf000, s->MultiIntr); s->MultiIntr = val; diff --git a/hw/s390-virtio-bus.c b/hw/s390-virtio-bus.c index bb49e393ec..d4a12f7531 100644 --- a/hw/s390-virtio-bus.c +++ b/hw/s390-virtio-bus.c @@ -60,6 +60,9 @@ static const VirtIOBindings virtio_s390_bindings; static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev); +/* length of VirtIO device pages */ +const target_phys_addr_t virtio_size = S390_DEVICE_PAGES * TARGET_PAGE_SIZE; + VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size) { VirtIOS390Bus *bus; diff --git a/hw/s390-virtio-bus.h b/hw/s390-virtio-bus.h index edf6d04570..0c412d08e1 100644 --- a/hw/s390-virtio-bus.h +++ b/hw/s390-virtio-bus.h @@ -33,7 +33,7 @@ #define VIRTIO_VQCONFIG_LEN 24 #define VIRTIO_RING_LEN (TARGET_PAGE_SIZE * 3) -#define S390_DEVICE_PAGES 256 +#define S390_DEVICE_PAGES 512 typedef struct VirtIOS390Device { DeviceState qdev; diff --git a/hw/s390-virtio.c b/hw/s390-virtio.c index 698ff6f345..3eba7ea1e8 100644 --- a/hw/s390-virtio.c +++ b/hw/s390-virtio.c @@ -131,7 +131,7 @@ int s390_virtio_hypercall(CPUState *env, uint64_t mem, uint64_t hypercall) } /* PC hardware initialisation */ -static void s390_init(ram_addr_t ram_size, +static void s390_init(ram_addr_t my_ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, @@ -143,19 +143,29 @@ static void s390_init(ram_addr_t ram_size, ram_addr_t kernel_size = 0; ram_addr_t initrd_offset; ram_addr_t initrd_size = 0; + int shift = 0; uint8_t *storage_keys; int i; + /* s390x ram size detection needs a 16bit multiplier + an increment. So + guests > 64GB can be specified in 2MB steps etc. */ + while ((my_ram_size >> (20 + shift)) > 65535) { + shift++; + } + my_ram_size = my_ram_size >> (20 + shift) << (20 + shift); + + /* lets propagate the changed ram size into the global variable. */ + ram_size = my_ram_size; /* get a BUS */ - s390_bus = s390_virtio_bus_init(&ram_size); + s390_bus = s390_virtio_bus_init(&my_ram_size); /* allocate RAM */ - ram_addr = qemu_ram_alloc(NULL, "s390.ram", ram_size); - cpu_register_physical_memory(0, ram_size, ram_addr); + ram_addr = qemu_ram_alloc(NULL, "s390.ram", my_ram_size); + cpu_register_physical_memory(0, my_ram_size, ram_addr); /* allocate storage keys */ - storage_keys = qemu_mallocz(ram_size / TARGET_PAGE_SIZE); + storage_keys = qemu_mallocz(my_ram_size / TARGET_PAGE_SIZE); /* init CPUs */ if (cpu_model == NULL) { diff --git a/hw/scsi-bus.c b/hw/scsi-bus.c index ceeb4ecb91..ad6a730be0 100644 --- a/hw/scsi-bus.c +++ b/hw/scsi-bus.c @@ -4,6 +4,7 @@ #include "scsi-defs.h" #include "qdev.h" #include "blockdev.h" +#include "trace.h" static char *scsibus_get_fw_dev_path(DeviceState *dev); @@ -20,13 +21,13 @@ static int next_scsi_bus; /* Create a scsi bus, and attach devices to it. */ void scsi_bus_new(SCSIBus *bus, DeviceState *host, int tcq, int ndev, - scsi_completionfn complete) + const SCSIBusOps *ops) { qbus_create_inplace(&bus->qbus, &scsi_bus_info, host, NULL); bus->busnr = next_scsi_bus++; bus->tcq = tcq; bus->ndev = ndev; - bus->complete = complete; + bus->ops = ops; bus->qbus.allow_hotplug = 1; } @@ -135,42 +136,60 @@ SCSIRequest *scsi_req_alloc(size_t size, SCSIDevice *d, uint32_t tag, uint32_t l SCSIRequest *req; req = qemu_mallocz(size); + req->refcount = 1; req->bus = scsi_bus_from_device(d); req->dev = d; req->tag = tag; req->lun = lun; req->status = -1; - req->enqueued = true; - QTAILQ_INSERT_TAIL(&d->requests, req, next); + trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); return req; } -SCSIRequest *scsi_req_find(SCSIDevice *d, uint32_t tag) +SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun) { - SCSIRequest *req; + return d->info->alloc_req(d, tag, lun); +} - QTAILQ_FOREACH(req, &d->requests, next) { - if (req->tag == tag) { - return req; - } +uint8_t *scsi_req_get_buf(SCSIRequest *req) +{ + return req->dev->info->get_buf(req); +} + +int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) +{ + if (req->dev->info->get_sense) { + return req->dev->info->get_sense(req, buf, len); + } else { + return 0; } - return NULL; +} + +int32_t scsi_req_enqueue(SCSIRequest *req, uint8_t *buf) +{ + int32_t rc; + + assert(!req->enqueued); + scsi_req_ref(req); + req->enqueued = true; + QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); + + scsi_req_ref(req); + rc = req->dev->info->send_command(req, buf); + scsi_req_unref(req); + return rc; } static void scsi_req_dequeue(SCSIRequest *req) { + trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); if (req->enqueued) { QTAILQ_REMOVE(&req->dev->requests, req, next); req->enqueued = false; + scsi_req_unref(req); } } -void scsi_req_free(SCSIRequest *req) -{ - scsi_req_dequeue(req); - qemu_free(req); -} - static int scsi_req_length(SCSIRequest *req, uint8_t *cmd) { switch (cmd[0] >> 5) { @@ -195,6 +214,7 @@ static int scsi_req_length(SCSIRequest *req, uint8_t *cmd) req->cmd.len = 12; break; default: + trace_scsi_req_parse_bad(req->dev->id, req->lun, req->tag, cmd[0]); return -1; } @@ -392,9 +412,104 @@ int scsi_req_parse(SCSIRequest *req, uint8_t *buf) memcpy(req->cmd.buf, buf, req->cmd.len); scsi_req_xfer_mode(req); req->cmd.lba = scsi_req_lba(req); + trace_scsi_req_parsed(req->dev->id, req->lun, req->tag, buf[0], + req->cmd.mode, req->cmd.xfer); + if (req->cmd.lba != -1) { + trace_scsi_req_parsed_lba(req->dev->id, req->lun, req->tag, buf[0], + req->cmd.lba); + } return 0; } +/* + * Predefined sense codes + */ + +/* No sense data available */ +const struct SCSISense sense_code_NO_SENSE = { + .key = NO_SENSE , .asc = 0x00 , .ascq = 0x00 +}; + +/* LUN not ready, Manual intervention required */ +const struct SCSISense sense_code_LUN_NOT_READY = { + .key = NOT_READY, .asc = 0x04, .ascq = 0x03 +}; + +/* LUN not ready, Medium not present */ +const struct SCSISense sense_code_NO_MEDIUM = { + .key = NOT_READY, .asc = 0x3a, .ascq = 0x00 +}; + +/* Hardware error, internal target failure */ +const struct SCSISense sense_code_TARGET_FAILURE = { + .key = HARDWARE_ERROR, .asc = 0x44, .ascq = 0x00 +}; + +/* Illegal request, invalid command operation code */ +const struct SCSISense sense_code_INVALID_OPCODE = { + .key = ILLEGAL_REQUEST, .asc = 0x20, .ascq = 0x00 +}; + +/* Illegal request, LBA out of range */ +const struct SCSISense sense_code_LBA_OUT_OF_RANGE = { + .key = ILLEGAL_REQUEST, .asc = 0x21, .ascq = 0x00 +}; + +/* Illegal request, Invalid field in CDB */ +const struct SCSISense sense_code_INVALID_FIELD = { + .key = ILLEGAL_REQUEST, .asc = 0x24, .ascq = 0x00 +}; + +/* Illegal request, LUN not supported */ +const struct SCSISense sense_code_LUN_NOT_SUPPORTED = { + .key = ILLEGAL_REQUEST, .asc = 0x25, .ascq = 0x00 +}; + +/* Command aborted, I/O process terminated */ +const struct SCSISense sense_code_IO_ERROR = { + .key = ABORTED_COMMAND, .asc = 0x00, .ascq = 0x06 +}; + +/* Command aborted, I_T Nexus loss occurred */ +const struct SCSISense sense_code_I_T_NEXUS_LOSS = { + .key = ABORTED_COMMAND, .asc = 0x29, .ascq = 0x07 +}; + +/* Command aborted, Logical Unit failure */ +const struct SCSISense sense_code_LUN_FAILURE = { + .key = ABORTED_COMMAND, .asc = 0x3e, .ascq = 0x01 +}; + +/* + * scsi_build_sense + * + * Build a sense buffer + */ +int scsi_build_sense(SCSISense sense, uint8_t *buf, int len, int fixed) +{ + if (!fixed && len < 8) { + return 0; + } + + memset(buf, 0, len); + if (fixed) { + /* Return fixed format sense buffer */ + buf[0] = 0xf0; + buf[2] = sense.key; + buf[7] = 7; + buf[12] = sense.asc; + buf[13] = sense.ascq; + return MIN(len, 18); + } else { + /* Return descriptor format sense buffer */ + buf[0] = 0x72; + buf[1] = sense.key; + buf[2] = sense.asc; + buf[3] = sense.ascq; + return 8; + } +} + static const char *scsi_command_name(uint8_t cmd) { static const char *names[] = { @@ -489,6 +604,43 @@ static const char *scsi_command_name(uint8_t cmd) return names[cmd]; } +SCSIRequest *scsi_req_ref(SCSIRequest *req) +{ + req->refcount++; + return req; +} + +void scsi_req_unref(SCSIRequest *req) +{ + if (--req->refcount == 0) { + if (req->dev->info->free_req) { + req->dev->info->free_req(req); + } + qemu_free(req); + } +} + +/* Tell the device that we finished processing this chunk of I/O. It + will start the next chunk or complete the command. */ +void scsi_req_continue(SCSIRequest *req) +{ + trace_scsi_req_continue(req->dev->id, req->lun, req->tag); + if (req->cmd.mode == SCSI_XFER_TO_DEV) { + req->dev->info->write_data(req); + } else { + req->dev->info->read_data(req); + } +} + +/* Called by the devices when data is ready for the HBA. The HBA should + start a DMA operation to read or fill the device's data buffer. + Once it completes, calling scsi_req_continue will restart I/O. */ +void scsi_req_data(SCSIRequest *req, int len) +{ + trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); + req->bus->ops->transfer_data(req, len); +} + void scsi_req_print(SCSIRequest *req) { FILE *fp = stderr; @@ -520,10 +672,42 @@ void scsi_req_print(SCSIRequest *req) void scsi_req_complete(SCSIRequest *req) { assert(req->status != -1); + scsi_req_ref(req); scsi_req_dequeue(req); - req->bus->complete(req->bus, SCSI_REASON_DONE, - req->tag, - req->status); + req->bus->ops->complete(req, req->status); + scsi_req_unref(req); +} + +void scsi_req_cancel(SCSIRequest *req) +{ + if (req->dev && req->dev->info->cancel_io) { + req->dev->info->cancel_io(req); + } + scsi_req_ref(req); + scsi_req_dequeue(req); + if (req->bus->ops->cancel) { + req->bus->ops->cancel(req); + } + scsi_req_unref(req); +} + +void scsi_req_abort(SCSIRequest *req, int status) +{ + req->status = status; + if (req->dev && req->dev->info->cancel_io) { + req->dev->info->cancel_io(req); + } + scsi_req_complete(req); +} + +void scsi_device_purge_requests(SCSIDevice *sdev) +{ + SCSIRequest *req; + + while (!QTAILQ_EMPTY(&sdev->requests)) { + req = QTAILQ_FIRST(&sdev->requests); + scsi_req_cancel(req); + } } static char *scsibus_get_fw_dev_path(DeviceState *dev) diff --git a/hw/scsi-disk.c b/hw/scsi-disk.c index b05e6547df..a8c7372d3e 100644 --- a/hw/scsi-disk.c +++ b/hw/scsi-disk.c @@ -49,14 +49,8 @@ do { fprintf(stderr, "scsi-disk: " fmt , ## __VA_ARGS__); } while (0) typedef struct SCSIDiskState SCSIDiskState; -typedef struct SCSISense { - uint8_t key; -} SCSISense; - typedef struct SCSIDiskReq { SCSIRequest req; - /* ??? We should probably keep track of whether the data transfer is - a read or a write. Currently we rely on the host getting it right. */ /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ uint64_t sector; uint32_t sector_count; @@ -65,6 +59,8 @@ typedef struct SCSIDiskReq { uint32_t status; } SCSIDiskReq; +typedef enum { SCSI_HD, SCSI_CD } SCSIDriveKind; + struct SCSIDiskState { SCSIDevice qdev; @@ -78,32 +74,30 @@ struct SCSIDiskState char *version; char *serial; SCSISense sense; + SCSIDriveKind drive_kind; }; static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type); static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf); -static SCSIDiskReq *scsi_new_request(SCSIDiskState *s, uint32_t tag, +static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun) { + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); SCSIRequest *req; SCSIDiskReq *r; req = scsi_req_alloc(sizeof(SCSIDiskReq), &s->qdev, tag, lun); r = DO_UPCAST(SCSIDiskReq, req, req); r->iov.iov_base = qemu_blockalign(s->bs, SCSI_DMA_BUF_SIZE); - return r; + return req; } -static void scsi_remove_request(SCSIDiskReq *r) +static void scsi_free_request(SCSIRequest *req) { - qemu_vfree(r->iov.iov_base); - scsi_req_free(&r->req); -} + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); -static SCSIDiskReq *scsi_find_request(SCSIDiskState *s, uint32_t tag) -{ - return DO_UPCAST(SCSIDiskReq, req, scsi_req_find(&s->qdev, tag)); + qemu_vfree(r->iov.iov_base); } static void scsi_disk_clear_sense(SCSIDiskState *s) @@ -111,42 +105,33 @@ static void scsi_disk_clear_sense(SCSIDiskState *s) memset(&s->sense, 0, sizeof(s->sense)); } -static void scsi_disk_set_sense(SCSIDiskState *s, uint8_t key) -{ - s->sense.key = key; -} - -static void scsi_req_set_status(SCSIDiskReq *r, int status, int sense_code) +static void scsi_req_set_status(SCSIDiskReq *r, int status, SCSISense sense) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); r->req.status = status; - scsi_disk_set_sense(s, sense_code); + s->sense = sense; } /* Helper function for command completion. */ -static void scsi_command_complete(SCSIDiskReq *r, int status, int sense) +static void scsi_command_complete(SCSIDiskReq *r, int status, SCSISense sense) { - DPRINTF("Command complete tag=0x%x status=%d sense=%d\n", - r->req.tag, status, sense); + DPRINTF("Command complete tag=0x%x status=%d sense=%d/%d/%d\n", + r->req.tag, status, sense.key, sense.asc, sense.ascq); scsi_req_set_status(r, status, sense); scsi_req_complete(&r->req); - scsi_remove_request(r); } /* Cancel a pending data transfer. */ -static void scsi_cancel_io(SCSIDevice *d, uint32_t tag) +static void scsi_cancel_io(SCSIRequest *req) { - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); - SCSIDiskReq *r; - DPRINTF("Cancel tag=0x%x\n", tag); - r = scsi_find_request(s, tag); - if (r) { - if (r->req.aiocb) - bdrv_aio_cancel(r->req.aiocb); - r->req.aiocb = NULL; - scsi_remove_request(r); + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + DPRINTF("Cancel tag=0x%x\n", req->tag); + if (r->req.aiocb) { + bdrv_aio_cancel(r->req.aiocb); } + r->req.aiocb = NULL; } static void scsi_read_complete(void * opaque, int ret) @@ -167,30 +152,38 @@ static void scsi_read_complete(void * opaque, int ret) n = r->iov.iov_len / 512; r->sector += n; r->sector_count -= n; - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, r->iov.iov_len); + scsi_req_data(&r->req, r->iov.iov_len); } -static void scsi_read_request(SCSIDiskReq *r) +/* Read more data from scsi device into buffer. */ +static void scsi_read_data(SCSIRequest *req) { + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, r->iov.iov_len); + scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { - scsi_command_complete(r, GOOD, NO_SENSE); + scsi_command_complete(r, GOOD, SENSE_CODE(NO_SENSE)); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + DPRINTF("Data transfer direction invalid\n"); + scsi_read_complete(r, -EINVAL); + return; + } + n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; @@ -204,23 +197,6 @@ static void scsi_read_request(SCSIDiskReq *r) } } -/* Read more data from scsi device into buffer. */ -static void scsi_read_data(SCSIDevice *d, uint32_t tag) -{ - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); - SCSIDiskReq *r; - - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad read tag 0x%x\n", tag); - /* ??? This is the wrong error. */ - scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); - return; - } - - scsi_read_request(r); -} - static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type) { int is_read = (type == SCSI_REQ_STATUS_RETRY_READ); @@ -242,13 +218,24 @@ static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type) vm_stop(VMSTOP_DISKFULL); } else { if (type == SCSI_REQ_STATUS_RETRY_READ) { - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, 0); + scsi_req_data(&r->req, 0); + } + switch (error) { + case ENOMEM: + scsi_command_complete(r, CHECK_CONDITION, + SENSE_CODE(TARGET_FAILURE)); + break; + case EINVAL: + scsi_command_complete(r, CHECK_CONDITION, + SENSE_CODE(INVALID_FIELD)); + break; + default: + scsi_command_complete(r, CHECK_CONDITION, + SENSE_CODE(IO_ERROR)); + break; } - scsi_command_complete(r, CHECK_CONDITION, - HARDWARE_ERROR); bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read); } - return 1; } @@ -270,7 +257,7 @@ static void scsi_write_complete(void * opaque, int ret) r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { - scsi_command_complete(r, GOOD, NO_SENSE); + scsi_command_complete(r, GOOD, SENSE_CODE(NO_SENSE)); } else { len = r->sector_count * 512; if (len > SCSI_DMA_BUF_SIZE) { @@ -278,25 +265,32 @@ static void scsi_write_complete(void * opaque, int ret) } r->iov.iov_len = len; DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, len); - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, len); + scsi_req_data(&r->req, len); } } -static void scsi_write_request(SCSIDiskReq *r) +static void scsi_write_data(SCSIRequest *req) { + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); + if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { + DPRINTF("Data transfer direction invalid\n"); + scsi_write_complete(r, -EINVAL); + return; + } + n = r->iov.iov_len / 512; if (n) { qemu_iovec_init_external(&r->qiov, &r->iov, 1); r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n, scsi_write_complete, r); if (r->req.aiocb == NULL) { - scsi_write_complete(r, -EIO); + scsi_write_complete(r, -ENOMEM); } } else { /* Invoke completion routine to fetch data from host. */ @@ -304,26 +298,6 @@ static void scsi_write_request(SCSIDiskReq *r) } } -/* Write data to a scsi device. Returns nonzero on failure. - The transfer may complete asynchronously. */ -static int scsi_write_data(SCSIDevice *d, uint32_t tag) -{ - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); - SCSIDiskReq *r; - - DPRINTF("Write data tag=0x%x\n", tag); - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad write tag 0x%x\n", tag); - scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); - return 1; - } - - scsi_write_request(r); - - return 0; -} - static void scsi_dma_restart_bh(void *opaque) { SCSIDiskState *s = opaque; @@ -344,15 +318,15 @@ static void scsi_dma_restart_bh(void *opaque) switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) { case SCSI_REQ_STATUS_RETRY_READ: - scsi_read_request(r); + scsi_read_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_WRITE: - scsi_write_request(r); + scsi_write_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_FLUSH: ret = scsi_disk_emulate_command(r, r->iov.iov_base); if (ret == 0) { - scsi_command_complete(r, GOOD, NO_SENSE); + scsi_command_complete(r, GOOD, SENSE_CODE(NO_SENSE)); } } } @@ -373,19 +347,21 @@ static void scsi_dma_restart_cb(void *opaque, int running, int reason) } /* Return a pointer to the data buffer. */ -static uint8_t *scsi_get_buf(SCSIDevice *d, uint32_t tag) +static uint8_t *scsi_get_buf(SCSIRequest *req) { - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); - SCSIDiskReq *r; + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad buffer tag 0x%x\n", tag); - return NULL; - } return (uint8_t *)r->iov.iov_base; } +/* Copy sense information into the provided buffer */ +static int scsi_get_sense(SCSIRequest *req, uint8_t *outbuf, int len) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + + return scsi_build_sense(s->sense, outbuf, len, len > 14); +} + static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); @@ -406,7 +382,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) return -1; } - if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM) { + if (s->drive_kind == SCSI_CD) { outbuf[buflen++] = 5; } else { outbuf[buflen++] = 0; @@ -424,7 +400,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) outbuf[buflen++] = 0x00; // list of supported pages (this page) outbuf[buflen++] = 0x80; // unit serial number outbuf[buflen++] = 0x83; // device identification - if (bdrv_get_type_hint(s->bs) != BDRV_TYPE_CDROM) { + if (s->drive_kind == SCSI_HD) { outbuf[buflen++] = 0xb0; // block limits outbuf[buflen++] = 0xb2; // thin provisioning } @@ -477,7 +453,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) unsigned int opt_io_size = s->qdev.conf.opt_io_size / s->qdev.blocksize; - if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM) { + if (s->drive_kind == SCSI_CD) { DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", page_code); return -1; @@ -542,12 +518,12 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) memset(outbuf, 0, buflen); - if (req->lun || req->cmd.buf[1] >> 5) { + if (req->lun) { outbuf[0] = 0x7f; /* LUN not supported */ return buflen; } - if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM) { + if (s->drive_kind == SCSI_CD) { outbuf[0] = 5; outbuf[1] = 0x80; memcpy(&outbuf[16], "QEMU CD-ROM ", 16); @@ -678,7 +654,7 @@ static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p, return p[1] + 2; case 0x2a: /* CD Capabilities and Mechanical Status page. */ - if (bdrv_get_type_hint(bdrv) != BDRV_TYPE_CDROM) + if (s->drive_kind != SCSI_CD) return 0; p[0] = 0x2a; p[1] = 0x14; @@ -857,19 +833,8 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) case REQUEST_SENSE: if (req->cmd.xfer < 4) goto illegal_request; - memset(outbuf, 0, 4); - buflen = 4; - if (s->sense.key == NOT_READY && req->cmd.xfer >= 18) { - memset(outbuf, 0, 18); - buflen = 18; - outbuf[7] = 10; - /* asc 0x3a, ascq 0: Medium not present */ - outbuf[12] = 0x3a; - outbuf[13] = 0; - } - outbuf[0] = 0xf0; - outbuf[1] = 0; - outbuf[2] = s->sense.key; + buflen = scsi_build_sense(s->sense, outbuf, req->cmd.xfer, + req->cmd.xfer > 13); scsi_disk_clear_sense(s); break; case INQUIRY: @@ -905,7 +870,7 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) goto illegal_request; break; case START_STOP: - if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM && (req->cmd.buf[4] & 2)) { + if (s->drive_kind == SCSI_CD && (req->cmd.buf[4] & 2)) { /* load/eject medium */ bdrv_eject(s->bs, !(req->cmd.buf[4] & 1)); } @@ -1007,17 +972,22 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) } break; default: - goto illegal_request; + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_OPCODE)); + return -1; } - scsi_req_set_status(r, GOOD, NO_SENSE); + scsi_req_set_status(r, GOOD, SENSE_CODE(NO_SENSE)); return buflen; not_ready: - scsi_command_complete(r, CHECK_CONDITION, NOT_READY); + if (!bdrv_is_inserted(s->bs)) { + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(NO_MEDIUM)); + } else { + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(LUN_NOT_READY)); + } return -1; illegal_request: - scsi_command_complete(r, CHECK_CONDITION, ILLEGAL_REQUEST); + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_FIELD)); return -1; } @@ -1026,33 +996,23 @@ illegal_request: (eg. disk reads), negative for transfers to the device (eg. disk writes), and zero if the command does not transfer any data. */ -static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, - uint8_t *buf, int lun) +static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf) { - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); - uint32_t len; - int is_write; + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + int32_t len; uint8_t command; uint8_t *outbuf; - SCSIDiskReq *r; int rc; command = buf[0]; - r = scsi_find_request(s, tag); - if (r) { - BADF("Tag 0x%x already in use\n", tag); - scsi_cancel_io(d, tag); - } - /* ??? Tags are not unique for different luns. We only implement a - single lun, so this should not matter. */ - r = scsi_new_request(s, tag, lun); outbuf = (uint8_t *)r->iov.iov_base; - is_write = 0; DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); if (scsi_req_parse(&r->req, buf) != 0) { BADF("Unsupported command length, command %x\n", command); - goto fail; + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_OPCODE)); + return 0; } #ifdef DEBUG_SCSI { @@ -1064,11 +1024,14 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, } #endif - if (lun || buf[1] >> 5) { + if (req->lun) { /* Only LUN 0 supported. */ - DPRINTF("Unimplemented LUN %d\n", lun ? lun : buf[1] >> 5); - if (command != REQUEST_SENSE && command != INQUIRY) - goto fail; + DPRINTF("Unimplemented LUN %d\n", req->lun); + if (command != REQUEST_SENSE && command != INQUIRY) { + scsi_command_complete(r, CHECK_CONDITION, + SENSE_CODE(LUN_NOT_SUPPORTED)); + return 0; + } } switch (command) { case TEST_UNIT_READY: @@ -1101,7 +1064,7 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, case READ_10: case READ_12: case READ_16: - len = r->req.cmd.xfer / d->blocksize; + len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; @@ -1115,7 +1078,7 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, case WRITE_VERIFY: case WRITE_VERIFY_12: case WRITE_VERIFY_16: - len = r->req.cmd.xfer / d->blocksize; + len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Write %s(sector %" PRId64 ", count %d)\n", (command & 0xe) == 0xe ? "And Verify " : "", r->req.cmd.lba, len); @@ -1123,7 +1086,6 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; - is_write = 1; break; case MODE_SELECT: DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer); @@ -1150,7 +1112,7 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, } break; case WRITE_SAME_16: - len = r->req.cmd.xfer / d->blocksize; + len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("WRITE SAME(16) (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); @@ -1176,18 +1138,20 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, break; default: DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]); + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_OPCODE)); + return 0; fail: - scsi_command_complete(r, CHECK_CONDITION, ILLEGAL_REQUEST); + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_FIELD)); return 0; illegal_lba: - scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); + scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } if (r->sector_count == 0 && r->iov.iov_len == 0) { - scsi_command_complete(r, GOOD, NO_SENSE); + scsi_command_complete(r, GOOD, SENSE_CODE(NO_SENSE)); } len = r->sector_count * 512 + r->iov.iov_len; - if (is_write) { + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { return -len; } else { if (!r->sector_count) @@ -1196,25 +1160,12 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, } } -static void scsi_disk_purge_requests(SCSIDiskState *s) -{ - SCSIDiskReq *r; - - while (!QTAILQ_EMPTY(&s->qdev.requests)) { - r = DO_UPCAST(SCSIDiskReq, req, QTAILQ_FIRST(&s->qdev.requests)); - if (r->req.aiocb) { - bdrv_aio_cancel(r->req.aiocb); - } - scsi_remove_request(r); - } -} - static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; - scsi_disk_purge_requests(s); + scsi_device_purge_requests(&s->qdev); bdrv_get_geometry(s->bs, &nb_sectors); nb_sectors /= s->cluster_size; @@ -1228,14 +1179,13 @@ static void scsi_destroy(SCSIDevice *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - scsi_disk_purge_requests(s); + scsi_device_purge_requests(&s->qdev); blockdev_mark_auto_del(s->qdev.conf.bs); } -static int scsi_disk_initfn(SCSIDevice *dev) +static int scsi_initfn(SCSIDevice *dev, SCSIDriveKind kind) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - int is_cd; DriveInfo *dinfo; if (!s->qdev.conf.bs) { @@ -1243,9 +1193,9 @@ static int scsi_disk_initfn(SCSIDevice *dev) return -1; } s->bs = s->qdev.conf.bs; - is_cd = bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM; + s->drive_kind = kind; - if (!is_cd && !bdrv_is_inserted(s->bs)) { + if (kind == SCSI_HD && !bdrv_is_inserted(s->bs)) { error_report("Device needs media, but drive is empty"); return -1; } @@ -1265,7 +1215,7 @@ static int scsi_disk_initfn(SCSIDevice *dev) return -1; } - if (is_cd) { + if (kind == SCSI_CD) { s->qdev.blocksize = 2048; } else { s->qdev.blocksize = s->qdev.conf.logical_block_size; @@ -1275,35 +1225,113 @@ static int scsi_disk_initfn(SCSIDevice *dev) s->qdev.type = TYPE_DISK; qemu_add_vm_change_state_handler(scsi_dma_restart_cb, s); - bdrv_set_removable(s->bs, is_cd); + bdrv_set_removable(s->bs, kind == SCSI_CD); add_boot_device_path(s->qdev.conf.bootindex, &dev->qdev, ",0"); return 0; } -static SCSIDeviceInfo scsi_disk_info = { - .qdev.name = "scsi-disk", - .qdev.fw_name = "disk", - .qdev.desc = "virtual scsi disk or cdrom", - .qdev.size = sizeof(SCSIDiskState), - .qdev.reset = scsi_disk_reset, - .init = scsi_disk_initfn, - .destroy = scsi_destroy, - .send_command = scsi_send_command, - .read_data = scsi_read_data, - .write_data = scsi_write_data, - .cancel_io = scsi_cancel_io, - .get_buf = scsi_get_buf, - .qdev.props = (Property[]) { - DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), - DEFINE_PROP_STRING("ver", SCSIDiskState, version), - DEFINE_PROP_STRING("serial", SCSIDiskState, serial), - DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), - DEFINE_PROP_END_OF_LIST(), - }, +static int scsi_hd_initfn(SCSIDevice *dev) +{ + return scsi_initfn(dev, SCSI_HD); +} + +static int scsi_cd_initfn(SCSIDevice *dev) +{ + return scsi_initfn(dev, SCSI_CD); +} + +static int scsi_disk_initfn(SCSIDevice *dev) +{ + SCSIDriveKind kind; + DriveInfo *dinfo; + + if (!dev->conf.bs) { + kind = SCSI_HD; /* will die in scsi_initfn() */ + } else { + dinfo = drive_get_by_blockdev(dev->conf.bs); + kind = dinfo->media_cd ? SCSI_CD : SCSI_HD; + } + + return scsi_initfn(dev, kind); +} + +#define DEFINE_SCSI_DISK_PROPERTIES() \ + DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ + DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ + DEFINE_PROP_STRING("serial", SCSIDiskState, serial) + +static SCSIDeviceInfo scsi_disk_info[] = { + { + .qdev.name = "scsi-hd", + .qdev.fw_name = "disk", + .qdev.desc = "virtual SCSI disk", + .qdev.size = sizeof(SCSIDiskState), + .qdev.reset = scsi_disk_reset, + .init = scsi_hd_initfn, + .destroy = scsi_destroy, + .alloc_req = scsi_new_request, + .free_req = scsi_free_request, + .send_command = scsi_send_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .cancel_io = scsi_cancel_io, + .get_buf = scsi_get_buf, + .get_sense = scsi_get_sense, + .qdev.props = (Property[]) { + DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), + DEFINE_PROP_END_OF_LIST(), + } + },{ + .qdev.name = "scsi-cd", + .qdev.fw_name = "disk", + .qdev.desc = "virtual SCSI CD-ROM", + .qdev.size = sizeof(SCSIDiskState), + .qdev.reset = scsi_disk_reset, + .init = scsi_cd_initfn, + .destroy = scsi_destroy, + .alloc_req = scsi_new_request, + .free_req = scsi_free_request, + .send_command = scsi_send_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .cancel_io = scsi_cancel_io, + .get_buf = scsi_get_buf, + .get_sense = scsi_get_sense, + .qdev.props = (Property[]) { + DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), + }, + },{ + .qdev.name = "scsi-disk", /* legacy -device scsi-disk */ + .qdev.fw_name = "disk", + .qdev.desc = "virtual SCSI disk or CD-ROM (legacy)", + .qdev.size = sizeof(SCSIDiskState), + .qdev.reset = scsi_disk_reset, + .init = scsi_disk_initfn, + .destroy = scsi_destroy, + .alloc_req = scsi_new_request, + .free_req = scsi_free_request, + .send_command = scsi_send_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .cancel_io = scsi_cancel_io, + .get_buf = scsi_get_buf, + .get_sense = scsi_get_sense, + .qdev.props = (Property[]) { + DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), + DEFINE_PROP_END_OF_LIST(), + } + } }; static void scsi_disk_register_devices(void) { - scsi_qdev_register(&scsi_disk_info); + int i; + + for (i = 0; i < ARRAY_SIZE(scsi_disk_info); i++) { + scsi_qdev_register(&scsi_disk_info[i]); + } } device_init(scsi_disk_register_devices) diff --git a/hw/scsi-generic.c b/hw/scsi-generic.c index 9be1cca4c3..8e59c7ee89 100644 --- a/hw/scsi-generic.c +++ b/hw/scsi-generic.c @@ -66,23 +66,49 @@ struct SCSIGenericState uint8_t senselen; }; -static SCSIGenericReq *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun) +static void scsi_set_sense(SCSIGenericState *s, SCSISense sense) { - SCSIRequest *req; + s->senselen = scsi_build_sense(sense, s->sensebuf, SCSI_SENSE_BUF_SIZE, 0); + s->driver_status = SG_ERR_DRIVER_SENSE; +} - req = scsi_req_alloc(sizeof(SCSIGenericReq), d, tag, lun); - return DO_UPCAST(SCSIGenericReq, req, req); +static void scsi_clear_sense(SCSIGenericState *s) +{ + memset(s->sensebuf, 0, SCSI_SENSE_BUF_SIZE); + s->senselen = 0; + s->driver_status = 0; } -static void scsi_remove_request(SCSIGenericReq *r) +static int scsi_get_sense(SCSIRequest *req, uint8_t *outbuf, int len) { - qemu_free(r->buf); - scsi_req_free(&r->req); + SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, req->dev); + int size = SCSI_SENSE_BUF_SIZE; + + if (!(s->driver_status & SG_ERR_DRIVER_SENSE)) { + size = scsi_build_sense(SENSE_CODE(NO_SENSE), s->sensebuf, + SCSI_SENSE_BUF_SIZE, 0); + } + if (size > len) { + size = len; + } + memcpy(outbuf, s->sensebuf, size); + + return size; } -static SCSIGenericReq *scsi_find_request(SCSIGenericState *s, uint32_t tag) +static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun) { - return DO_UPCAST(SCSIGenericReq, req, scsi_req_find(&s->qdev, tag)); + SCSIRequest *req; + + req = scsi_req_alloc(sizeof(SCSIGenericReq), d, tag, lun); + return req; +} + +static void scsi_free_request(SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + qemu_free(r->buf); } /* Helper function for command completion. */ @@ -91,13 +117,30 @@ static void scsi_command_complete(void *opaque, int ret) SCSIGenericReq *r = (SCSIGenericReq *)opaque; SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, r->req.dev); + r->req.aiocb = NULL; s->driver_status = r->io_header.driver_status; if (s->driver_status & SG_ERR_DRIVER_SENSE) s->senselen = r->io_header.sb_len_wr; - if (ret != 0) - r->req.status = BUSY; - else { + if (ret != 0) { + switch (ret) { + case -EDOM: + r->req.status = TASK_SET_FULL; + break; + case -EINVAL: + r->req.status = CHECK_CONDITION; + scsi_set_sense(s, SENSE_CODE(INVALID_FIELD)); + break; + case -ENOMEM: + r->req.status = CHECK_CONDITION; + scsi_set_sense(s, SENSE_CODE(TARGET_FAILURE)); + break; + default: + r->req.status = CHECK_CONDITION; + scsi_set_sense(s, SENSE_CODE(IO_ERROR)); + break; + } + } else { if (s->driver_status & SG_ERR_DRIVER_TIMEOUT) { r->req.status = BUSY; BADF("Driver Timeout\n"); @@ -112,23 +155,18 @@ static void scsi_command_complete(void *opaque, int ret) r, r->req.tag, r->req.status); scsi_req_complete(&r->req); - scsi_remove_request(r); } /* Cancel a pending data transfer. */ -static void scsi_cancel_io(SCSIDevice *d, uint32_t tag) +static void scsi_cancel_io(SCSIRequest *req) { - DPRINTF("scsi_cancel_io 0x%x\n", tag); - SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - SCSIGenericReq *r; - DPRINTF("Cancel tag=0x%x\n", tag); - r = scsi_find_request(s, tag); - if (r) { - if (r->req.aiocb) - bdrv_aio_cancel(r->req.aiocb); - r->req.aiocb = NULL; - scsi_remove_request(r); + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + DPRINTF("Cancel tag=0x%x\n", req->tag); + if (r->req.aiocb) { + bdrv_aio_cancel(r->req.aiocb); } + r->req.aiocb = NULL; } static int execute_command(BlockDriverState *bdrv, @@ -152,7 +190,7 @@ static int execute_command(BlockDriverState *bdrv, r->req.aiocb = bdrv_aio_ioctl(bdrv, SG_IO, &r->io_header, complete, r); if (r->req.aiocb == NULL) { BADF("execute_command: read failed !\n"); - return -1; + return -ENOMEM; } return 0; @@ -163,6 +201,7 @@ static void scsi_read_complete(void * opaque, int ret) SCSIGenericReq *r = (SCSIGenericReq *)opaque; int len; + r->req.aiocb = NULL; if (ret) { DPRINTF("IO error ret %d\n", ret); scsi_command_complete(r, ret); @@ -172,27 +211,21 @@ static void scsi_read_complete(void * opaque, int ret) DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len); r->len = -1; - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, len); - if (len == 0) + if (len == 0) { scsi_command_complete(r, 0); + } else { + scsi_req_data(&r->req, len); + } } /* Read more data from scsi device into buffer. */ -static void scsi_read_data(SCSIDevice *d, uint32_t tag) +static void scsi_read_data(SCSIRequest *req) { - SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - SCSIGenericReq *r; + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, r->req.dev); int ret; - DPRINTF("scsi_read_data 0x%x\n", tag); - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad read tag 0x%x\n", tag); - /* ??? This is the wrong error. */ - scsi_command_complete(r, -EINVAL); - return; - } - + DPRINTF("scsi_read_data 0x%x\n", req->tag); if (r->len == -1) { scsi_command_complete(r, 0); return; @@ -210,13 +243,15 @@ static void scsi_read_data(SCSIDevice *d, uint32_t tag) DPRINTF("Sense: %d %d %d %d %d %d %d %d\n", r->buf[0], r->buf[1], r->buf[2], r->buf[3], r->buf[4], r->buf[5], r->buf[6], r->buf[7]); - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, s->senselen); + scsi_req_data(&r->req, s->senselen); + /* Clear sensebuf after REQUEST_SENSE */ + scsi_clear_sense(s); return; } ret = execute_command(s->bs, r, SG_DXFER_FROM_DEV, scsi_read_complete); - if (ret == -1) { - scsi_command_complete(r, -EINVAL); + if (ret < 0) { + scsi_command_complete(r, ret); return; } } @@ -227,6 +262,7 @@ static void scsi_write_complete(void * opaque, int ret) SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, r->req.dev); DPRINTF("scsi_write_complete() ret = %d\n", ret); + r->req.aiocb = NULL; if (ret) { DPRINTF("IO error\n"); scsi_command_complete(r, ret); @@ -244,46 +280,30 @@ static void scsi_write_complete(void * opaque, int ret) /* Write data to a scsi device. Returns nonzero on failure. The transfer may complete asynchronously. */ -static int scsi_write_data(SCSIDevice *d, uint32_t tag) +static void scsi_write_data(SCSIRequest *req) { - SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - SCSIGenericReq *r; + SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, req->dev); + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); int ret; - DPRINTF("scsi_write_data 0x%x\n", tag); - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad write tag 0x%x\n", tag); - /* ??? This is the wrong error. */ - scsi_command_complete(r, -EINVAL); - return 0; - } - + DPRINTF("scsi_write_data 0x%x\n", req->tag); if (r->len == 0) { r->len = r->buflen; - r->req.bus->complete(r->req.bus, SCSI_REASON_DATA, r->req.tag, r->len); - return 0; + scsi_req_data(&r->req, r->len); + return; } ret = execute_command(s->bs, r, SG_DXFER_TO_DEV, scsi_write_complete); - if (ret == -1) { - scsi_command_complete(r, -EINVAL); - return 1; + if (ret < 0) { + scsi_command_complete(r, ret); } - - return 0; } /* Return a pointer to the data buffer. */ -static uint8_t *scsi_get_buf(SCSIDevice *d, uint32_t tag) +static uint8_t *scsi_get_buf(SCSIRequest *req) { - SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - SCSIGenericReq *r; - r = scsi_find_request(s, tag); - if (!r) { - BADF("Bad buffer tag 0x%x\n", tag); - return NULL; - } + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + return r->buf; } @@ -311,42 +331,23 @@ static void scsi_req_fixup(SCSIRequest *req) (eg. disk reads), negative for transfers to the device (eg. disk writes), and zero if the command does not transfer any data. */ -static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, - uint8_t *cmd, int lun) +static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) { - SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - SCSIGenericReq *r; - SCSIBus *bus; + SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, req->dev); + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); int ret; - if (cmd[0] != REQUEST_SENSE && - (lun != s->lun || (cmd[1] >> 5) != s->lun)) { - DPRINTF("Unimplemented LUN %d\n", lun ? lun : cmd[1] >> 5); - - s->sensebuf[0] = 0x70; - s->sensebuf[1] = 0x00; - s->sensebuf[2] = ILLEGAL_REQUEST; - s->sensebuf[3] = 0x00; - s->sensebuf[4] = 0x00; - s->sensebuf[5] = 0x00; - s->sensebuf[6] = 0x00; - s->senselen = 7; - s->driver_status = SG_ERR_DRIVER_SENSE; - bus = scsi_bus_from_device(d); - bus->complete(bus, SCSI_REASON_DONE, tag, CHECK_CONDITION); + if (cmd[0] != REQUEST_SENSE && req->lun != s->lun) { + DPRINTF("Unimplemented LUN %d\n", req->lun); + scsi_set_sense(s, SENSE_CODE(LUN_NOT_SUPPORTED)); + r->req.status = CHECK_CONDITION; + scsi_req_complete(&r->req); return 0; } - r = scsi_find_request(s, tag); - if (r) { - BADF("Tag 0x%x already in use %p\n", tag, r); - scsi_cancel_io(d, tag); - } - r = scsi_new_request(d, tag, lun); - if (-1 == scsi_req_parse(&r->req, cmd)) { BADF("Unsupported command length, command %x\n", cmd[0]); - scsi_remove_request(r); + scsi_command_complete(r, -EINVAL); return 0; } scsi_req_fixup(&r->req); @@ -370,8 +371,8 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, r->buflen = 0; r->buf = NULL; ret = execute_command(s->bs, r, SG_DXFER_NONE, scsi_command_complete); - if (ret == -1) { - scsi_command_complete(r, -EINVAL); + if (ret < 0) { + scsi_command_complete(r, ret); return 0; } return 0; @@ -389,9 +390,9 @@ static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { r->len = 0; return -r->req.cmd.xfer; + } else { + return r->req.cmd.xfer; } - - return r->req.cmd.xfer; } static int get_blocksize(BlockDriverState *bdrv) @@ -455,31 +456,18 @@ static int get_stream_blocksize(BlockDriverState *bdrv) return (buf[9] << 16) | (buf[10] << 8) | buf[11]; } -static void scsi_generic_purge_requests(SCSIGenericState *s) -{ - SCSIGenericReq *r; - - while (!QTAILQ_EMPTY(&s->qdev.requests)) { - r = DO_UPCAST(SCSIGenericReq, req, QTAILQ_FIRST(&s->qdev.requests)); - if (r->req.aiocb) { - bdrv_aio_cancel(r->req.aiocb); - } - scsi_remove_request(r); - } -} - static void scsi_generic_reset(DeviceState *dev) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev.qdev, dev); - scsi_generic_purge_requests(s); + scsi_device_purge_requests(&s->qdev); } static void scsi_destroy(SCSIDevice *d) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); - scsi_generic_purge_requests(s); + scsi_device_purge_requests(&s->qdev); blockdev_mark_auto_del(s->qdev.conf.bs); } @@ -556,11 +544,14 @@ static SCSIDeviceInfo scsi_generic_info = { .qdev.reset = scsi_generic_reset, .init = scsi_generic_initfn, .destroy = scsi_destroy, + .alloc_req = scsi_new_request, + .free_req = scsi_free_request, .send_command = scsi_send_command, .read_data = scsi_read_data, .write_data = scsi_write_data, .cancel_io = scsi_cancel_io, .get_buf = scsi_get_buf, + .get_sense = scsi_get_sense, .qdev.props = (Property[]) { DEFINE_BLOCK_PROPERTIES(SCSIGenericState, qdev.conf), DEFINE_PROP_END_OF_LIST(), @@ -9,17 +9,11 @@ #define SCSI_CMD_BUF_SIZE 16 -/* scsi-disk.c */ -enum scsi_reason { - SCSI_REASON_DONE, /* Command complete. */ - SCSI_REASON_DATA /* Transfer complete, more data required. */ -}; - typedef struct SCSIBus SCSIBus; +typedef struct SCSIBusOps SCSIBusOps; typedef struct SCSIDevice SCSIDevice; typedef struct SCSIDeviceInfo SCSIDeviceInfo; -typedef void (*scsi_completionfn)(SCSIBus *bus, int reason, uint32_t tag, - uint32_t arg); +typedef struct SCSIRequest SCSIRequest; enum SCSIXferMode { SCSI_XFER_NONE, /* TEST_UNIT_READY, ... */ @@ -27,9 +21,16 @@ enum SCSIXferMode { SCSI_XFER_TO_DEV, /* WRITE, MODE_SELECT, ... */ }; -typedef struct SCSIRequest { +typedef struct SCSISense { + uint8_t key; + uint8_t asc; + uint8_t ascq; +} SCSISense; + +struct SCSIRequest { SCSIBus *bus; SCSIDevice *dev; + uint32_t refcount; uint32_t tag; uint32_t lun; uint32_t status; @@ -43,7 +44,7 @@ typedef struct SCSIRequest { BlockDriverAIOCB *aiocb; bool enqueued; QTAILQ_ENTRY(SCSIRequest) next; -} SCSIRequest; +}; struct SCSIDevice { @@ -66,28 +67,34 @@ struct SCSIDeviceInfo { DeviceInfo qdev; scsi_qdev_initfn init; void (*destroy)(SCSIDevice *s); - int32_t (*send_command)(SCSIDevice *s, uint32_t tag, uint8_t *buf, - int lun); - void (*read_data)(SCSIDevice *s, uint32_t tag); - int (*write_data)(SCSIDevice *s, uint32_t tag); - void (*cancel_io)(SCSIDevice *s, uint32_t tag); - uint8_t *(*get_buf)(SCSIDevice *s, uint32_t tag); + SCSIRequest *(*alloc_req)(SCSIDevice *s, uint32_t tag, uint32_t lun); + void (*free_req)(SCSIRequest *req); + int32_t (*send_command)(SCSIRequest *req, uint8_t *buf); + void (*read_data)(SCSIRequest *req); + void (*write_data)(SCSIRequest *req); + void (*cancel_io)(SCSIRequest *req); + uint8_t *(*get_buf)(SCSIRequest *req); + int (*get_sense)(SCSIRequest *req, uint8_t *buf, int len); +}; + +struct SCSIBusOps { + void (*transfer_data)(SCSIRequest *req, uint32_t arg); + void (*complete)(SCSIRequest *req, uint32_t arg); + void (*cancel)(SCSIRequest *req); }; -typedef void (*SCSIAttachFn)(DeviceState *host, BlockDriverState *bdrv, - int unit); struct SCSIBus { BusState qbus; int busnr; int tcq, ndev; - scsi_completionfn complete; + const SCSIBusOps *ops; SCSIDevice *devs[MAX_SCSI_DEVS]; }; void scsi_bus_new(SCSIBus *bus, DeviceState *host, int tcq, int ndev, - scsi_completionfn complete); + const SCSIBusOps *ops); void scsi_qdev_register(SCSIDeviceInfo *info); static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d) @@ -99,12 +106,54 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockDriverState *bdrv, int unit, bool removable); int scsi_bus_legacy_handle_cmdline(SCSIBus *bus); +/* + * Predefined sense codes + */ + +/* No sense data available */ +extern const struct SCSISense sense_code_NO_SENSE; +/* LUN not ready, Manual intervention required */ +extern const struct SCSISense sense_code_LUN_NOT_READY; +/* LUN not ready, Medium not present */ +extern const struct SCSISense sense_code_NO_MEDIUM; +/* Hardware error, internal target failure */ +extern const struct SCSISense sense_code_TARGET_FAILURE; +/* Illegal request, invalid command operation code */ +extern const struct SCSISense sense_code_INVALID_OPCODE; +/* Illegal request, LBA out of range */ +extern const struct SCSISense sense_code_LBA_OUT_OF_RANGE; +/* Illegal request, Invalid field in CDB */ +extern const struct SCSISense sense_code_INVALID_FIELD; +/* Illegal request, LUN not supported */ +extern const struct SCSISense sense_code_LUN_NOT_SUPPORTED; +/* Command aborted, I/O process terminated */ +extern const struct SCSISense sense_code_IO_ERROR; +/* Command aborted, I_T Nexus loss occurred */ +extern const struct SCSISense sense_code_I_T_NEXUS_LOSS; +/* Command aborted, Logical Unit failure */ +extern const struct SCSISense sense_code_LUN_FAILURE; + +#define SENSE_CODE(x) sense_code_ ## x + +int scsi_build_sense(SCSISense sense, uint8_t *buf, int len, int fixed); +int scsi_sense_valid(SCSISense sense); + SCSIRequest *scsi_req_alloc(size_t size, SCSIDevice *d, uint32_t tag, uint32_t lun); -SCSIRequest *scsi_req_find(SCSIDevice *d, uint32_t tag); +SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun); +int32_t scsi_req_enqueue(SCSIRequest *req, uint8_t *buf); void scsi_req_free(SCSIRequest *req); +SCSIRequest *scsi_req_ref(SCSIRequest *req); +void scsi_req_unref(SCSIRequest *req); int scsi_req_parse(SCSIRequest *req, uint8_t *buf); void scsi_req_print(SCSIRequest *req); +void scsi_req_continue(SCSIRequest *req); +void scsi_req_data(SCSIRequest *req, int len); void scsi_req_complete(SCSIRequest *req); +uint8_t *scsi_req_get_buf(SCSIRequest *req); +int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len); +void scsi_req_abort(SCSIRequest *req, int status); +void scsi_req_cancel(SCSIRequest *req); +void scsi_device_purge_requests(SCSIDevice *sdev); #endif @@ -1104,6 +1104,17 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, } break; + case 52: + case 53: + /* CMD52, CMD53: reserved for SDIO cards + * (see the SDIO Simplified Specification V2.0) + * Handle as illegal command but do not complain + * on stderr, as some OSes may use these in their + * probing for presence of an SDIO card. + */ + sd->card_status |= ILLEGAL_COMMAND; + return sd_r0; + /* Application specific commands (Class 8) */ case 55: /* CMD55: APP_CMD */ if (sd->rca != rca) diff --git a/hw/sga.c b/hw/sga.c new file mode 100644 index 0000000000..7ef750adf6 --- /dev/null +++ b/hw/sga.c @@ -0,0 +1,56 @@ +/* + * QEMU dummy ISA device for loading sgabios option rom. + * + * Copyright (c) 2011 Glauber Costa, Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * sgabios code originally available at code.google.com/p/sgabios + * + */ +#include "pci.h" +#include "pc.h" +#include "loader.h" +#include "sysemu.h" + +#define SGABIOS_FILENAME "sgabios.bin" + +typedef struct ISAGAState { + ISADevice dev; +} ISASGAState; + +static int isa_cirrus_vga_initfn(ISADevice *dev) +{ + rom_add_vga(SGABIOS_FILENAME); + return 0; +} + +static ISADeviceInfo sga_info = { + .qdev.name = "sga", + .qdev.desc = "Serial Graphics Adapter", + .qdev.size = sizeof(ISASGAState), + .init = isa_cirrus_vga_initfn, +}; + +static void sga_register(void) +{ + isa_qdev_register(&sga_info); +} + +device_init(sga_register); diff --git a/hw/sh7750_regs.h b/hw/sh7750_regs.h index 5a23a2ca20..6ec13ab6fe 100644 --- a/hw/sh7750_regs.h +++ b/hw/sh7750_regs.h @@ -23,9 +23,9 @@ * All register has 2 addresses: in 0xff000000 - 0xffffffff (P4 address) and * in 0x1f000000 - 0x1fffffff (area 7 address) */ -#define SH7750_P4_BASE 0xff000000 /* Accessable only in - priveleged mode */ -#define SH7750_A7_BASE 0x1f000000 /* Accessable only using TLB */ +#define SH7750_P4_BASE 0xff000000 /* Accessible only in + privileged mode */ +#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ #define SH7750_P4_REG32(ofs) (SH7750_P4_BASE + (ofs)) #define SH7750_A7_REG32(ofs) (SH7750_A7_BASE + (ofs)) diff --git a/hw/spapr.c b/hw/spapr.c index 1782cc0a94..109b77459a 100644 --- a/hw/spapr.c +++ b/hw/spapr.c @@ -51,7 +51,7 @@ #define TIMEBASE_FREQ 512000000ULL -#define MAX_CPUS 32 +#define MAX_CPUS 256 #define XICS_IRQS 1024 sPAPREnvironment *spapr; @@ -93,7 +93,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model, /* Root node */ _FDT((fdt_begin_node(fdt, ""))); _FDT((fdt_property_string(fdt, "device_type", "chrp"))); - _FDT((fdt_property_string(fdt, "model", "qemu,emulated-pSeries-LPAR"))); + _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)"))); _FDT((fdt_property_cell(fdt, "#address-cells", 0x2))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x2))); @@ -362,8 +362,9 @@ static void ppc_spapr_init(ram_addr_t ram_size, for (i = 0; i < MAX_SERIAL_PORTS; i++, irq++) { if (serial_hds[i]) { - spapr_vty_create(spapr->vio_bus, i, serial_hds[i], - xics_find_qirq(spapr->icp, irq), irq); + spapr_vty_create(spapr->vio_bus, SPAPR_VTY_BASE_ADDRESS + i, + serial_hds[i], xics_find_qirq(spapr->icp, irq), + irq); } } diff --git a/hw/spapr_hcall.c b/hw/spapr_hcall.c index f88e1d2083..43c441dc7d 100644 --- a/hw/spapr_hcall.c +++ b/hw/spapr_hcall.c @@ -100,22 +100,18 @@ static target_ulong h_enter(CPUState *env, sPAPREnvironment *spapr, target_ulong pte_index = args[1]; target_ulong pteh = args[2]; target_ulong ptel = args[3]; - target_ulong porder; - target_ulong i, pa; + target_ulong i; uint8_t *hpte; /* only handle 4k and 16M pages for now */ - porder = 12; if (pteh & HPTE_V_LARGE) { #if 0 /* We don't support 64k pages yet */ if ((ptel & 0xf000) == 0x1000) { /* 64k page */ - porder = 16; } else #endif if ((ptel & 0xff000) == 0) { /* 16M page */ - porder = 24; /* lowest AVA bit must be 0 for 16M pages */ if (pteh & 0x80) { return H_PARAMETER; @@ -125,7 +121,6 @@ static target_ulong h_enter(CPUState *env, sPAPREnvironment *spapr, } } - pa = ptel & HPTE_R_RPN; /* FIXME: bounds check the pa? */ /* Check WIMG */ @@ -455,8 +450,8 @@ static target_ulong h_rtas(CPUState *env, sPAPREnvironment *spapr, nret, rtas_r3 + 12 + 4*nargs); } -spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; -spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE]; +static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; +static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) { diff --git a/hw/spapr_llan.c b/hw/spapr_llan.c index ff3a78f729..c18efc7ee6 100644 --- a/hw/spapr_llan.c +++ b/hw/spapr_llan.c @@ -185,9 +185,6 @@ static NetClientInfo net_spapr_vlan_info = { static int spapr_vlan_init(VIOsPAPRDevice *sdev) { VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; - VIOsPAPRBus *bus; - - bus = DO_UPCAST(VIOsPAPRBus, bus, sdev->qdev.parent_bus); qemu_macaddr_default_if_unset(&dev->nicconf.macaddr); diff --git a/hw/spapr_rtas.c b/hw/spapr_rtas.c index 16b65422b6..00c8ce5a15 100644 --- a/hw/spapr_rtas.c +++ b/hw/spapr_rtas.c @@ -44,7 +44,8 @@ static void rtas_display_character(sPAPREnvironment *spapr, uint32_t nret, target_ulong rets) { uint8_t c = rtas_ld(args, 0); - VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, 0); + VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, + SPAPR_VTY_BASE_ADDRESS); if (!sdev) { rtas_st(rets, 0, -1); diff --git a/hw/spapr_vio.h b/hw/spapr_vio.h index 841b04351a..603a8c43a3 100644 --- a/hw/spapr_vio.h +++ b/hw/spapr_vio.h @@ -32,6 +32,8 @@ enum VIOsPAPR_TCEAccess { SPAPR_TCE_RW = 3, }; +#define SPAPR_VTY_BASE_ADDRESS 0x30000000 + struct VIOsPAPRDevice; typedef struct VIOsPAPR_RTCE { diff --git a/hw/spapr_vscsi.c b/hw/spapr_vscsi.c index 992833450c..1c901ef6eb 100644 --- a/hw/spapr_vscsi.c +++ b/hw/spapr_vscsi.c @@ -74,7 +74,7 @@ typedef struct vscsi_req { union viosrp_iu iu; /* SCSI request tracking */ - SCSIDevice *sdev; + SCSIRequest *sreq; uint32_t qtag; /* qemu tag != srp tag */ int lun; int active; @@ -123,11 +123,16 @@ static struct vscsi_req *vscsi_get_req(VSCSIState *s) static void vscsi_put_req(VSCSIState *s, vscsi_req *req) { + if (req->sreq != NULL) { + scsi_req_unref(req->sreq); + } + req->sreq = NULL; req->active = 0; } -static vscsi_req *vscsi_find_req(VSCSIState *s, uint32_t tag) +static vscsi_req *vscsi_find_req(VSCSIState *s, SCSIRequest *req) { + uint32_t tag = req->tag; if (tag >= VSCSI_REQ_LIMIT || !s->reqs[tag].active) { return NULL; } @@ -442,10 +447,18 @@ static int vscsi_preprocess_desc(vscsi_req *req) static void vscsi_send_request_sense(VSCSIState *s, vscsi_req *req) { - SCSIDevice *sdev = req->sdev; uint8_t *cdb = req->iu.srp.cmd.cdb; int n; + n = scsi_req_get_sense(req->sreq, req->sense, sizeof(req->sense)); + if (n) { + req->senselen = n; + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + vscsi_put_req(s, req); + return; + } + + dprintf("VSCSI: Got CHECK_CONDITION, requesting sense...\n"); cdb[0] = 3; cdb[1] = 0; cdb[2] = 0; @@ -453,66 +466,92 @@ static void vscsi_send_request_sense(VSCSIState *s, vscsi_req *req) cdb[4] = 96; cdb[5] = 0; req->sensing = 1; - n = sdev->info->send_command(sdev, req->qtag, cdb, req->lun); + n = scsi_req_enqueue(req->sreq, cdb); dprintf("VSCSI: Queued request sense tag 0x%x\n", req->qtag); if (n < 0) { fprintf(stderr, "VSCSI: REQUEST_SENSE wants write data !?!?!?\n"); - sdev->info->cancel_io(sdev, req->qtag); vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); - vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); - vscsi_put_req(s, req); + scsi_req_abort(req->sreq, CHECK_CONDITION); return; } else if (n == 0) { return; } - sdev->info->read_data(sdev, req->qtag); + scsi_req_continue(req->sreq); } /* Callback to indicate that the SCSI layer has completed a transfer. */ -static void vscsi_command_complete(SCSIBus *bus, int reason, uint32_t tag, - uint32_t arg) +static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len) { - VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, bus->qbus.parent); - vscsi_req *req = vscsi_find_req(s, tag); - SCSIDevice *sdev; + VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, sreq->bus->qbus.parent); + vscsi_req *req = vscsi_find_req(s, sreq); uint8_t *buf; - int32_t res_in = 0, res_out = 0; - int len, rc = 0; + int rc = 0; - dprintf("VSCSI: SCSI cmd complete, r=0x%x tag=0x%x arg=0x%x, req=%p\n", - reason, tag, arg, req); + dprintf("VSCSI: SCSI xfer complete tag=0x%x len=0x%x, req=%p\n", + sreq->tag, len, req); if (req == NULL) { - fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", tag); + fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); return; } - sdev = req->sdev; if (req->sensing) { - if (reason == SCSI_REASON_DONE) { - dprintf("VSCSI: Sense done !\n"); - vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); - vscsi_put_req(s, req); - } else { - uint8_t *buf = sdev->info->get_buf(sdev, tag); - - len = MIN(arg, SCSI_SENSE_BUF_SIZE); - dprintf("VSCSI: Sense data, %d bytes:\n", len); - dprintf(" %02x %02x %02x %02x %02x %02x %02x %02x\n", - buf[0], buf[1], buf[2], buf[3], - buf[4], buf[5], buf[6], buf[7]); - dprintf(" %02x %02x %02x %02x %02x %02x %02x %02x\n", - buf[8], buf[9], buf[10], buf[11], - buf[12], buf[13], buf[14], buf[15]); - memcpy(req->sense, buf, len); - req->senselen = len; - sdev->info->read_data(sdev, req->qtag); - } + uint8_t *buf = scsi_req_get_buf(sreq); + + len = MIN(len, SCSI_SENSE_BUF_SIZE); + dprintf("VSCSI: Sense data, %d bytes:\n", len); + dprintf(" %02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[0], buf[1], buf[2], buf[3], + buf[4], buf[5], buf[6], buf[7]); + dprintf(" %02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[8], buf[9], buf[10], buf[11], + buf[12], buf[13], buf[14], buf[15]); + memcpy(req->sense, buf, len); + req->senselen = len; + scsi_req_continue(req->sreq); + return; + } + + if (len) { + buf = scsi_req_get_buf(sreq); + rc = vscsi_srp_transfer_data(s, req, req->writing, buf, len); + } + if (rc < 0) { + fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc); + vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); + scsi_req_abort(req->sreq, CHECK_CONDITION); + return; + } + + /* Start next chunk */ + req->data_len -= rc; + scsi_req_continue(sreq); +} + +/* Callback to indicate that the SCSI layer has completed a transfer. */ +static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status) +{ + VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, sreq->bus->qbus.parent); + vscsi_req *req = vscsi_find_req(s, sreq); + int32_t res_in = 0, res_out = 0; + + dprintf("VSCSI: SCSI cmd complete, r=0x%x tag=0x%x status=0x%x, req=%p\n", + reason, sreq->tag, status, req); + if (req == NULL) { + fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); + return; + } + + if (!req->sensing && status == CHECK_CONDITION) { + vscsi_send_request_sense(s, req); return; } - if (reason == SCSI_REASON_DONE) { - dprintf("VSCSI: Command complete err=%d\n", arg); - if (arg == 0) { + if (req->sensing) { + dprintf("VSCSI: Sense done !\n"); + status = CHECK_CONDITION; + } else { + dprintf("VSCSI: Command complete err=%d\n", status); + if (status == 0) { /* We handle overflows, not underflows for normal commands, * but hopefully nobody cares */ @@ -521,41 +560,18 @@ static void vscsi_command_complete(SCSIBus *bus, int reason, uint32_t tag, } else { res_in = req->data_len; } - vscsi_send_rsp(s, req, 0, res_in, res_out); - } else if (arg == CHECK_CONDITION) { - dprintf("VSCSI: Got CHECK_CONDITION, requesting sense...\n"); - vscsi_send_request_sense(s, req); - return; - } else { - vscsi_send_rsp(s, req, arg, 0, 0); } - vscsi_put_req(s, req); - return; } + vscsi_send_rsp(s, req, 0, res_in, res_out); + vscsi_put_req(s, req); +} - /* "arg" is how much we have read for reads and how much we want - * to write for writes (ie, how much is to be DMA'd) - */ - if (arg) { - buf = sdev->info->get_buf(sdev, tag); - rc = vscsi_srp_transfer_data(s, req, req->writing, buf, arg); - } - if (rc < 0) { - fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc); - sdev->info->cancel_io(sdev, req->qtag); - vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); - vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); - vscsi_put_req(s, req); - return; - } +static void vscsi_request_cancelled(SCSIRequest *sreq) +{ + VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, sreq->bus->qbus.parent); + vscsi_req *req = vscsi_find_req(s, sreq); - /* Start next chunk */ - req->data_len -= rc; - if (req->writing) { - sdev->info->write_data(sdev, req->qtag); - } else { - sdev->info->read_data(sdev, req->qtag); - } + vscsi_put_req(s, req); } static void vscsi_process_login(VSCSIState *s, vscsi_req *req) @@ -642,9 +658,9 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) } return 1; } - req->sdev = sdev; req->lun = lun; - n = sdev->info->send_command(sdev, req->qtag, srp->cmd.cdb, lun); + req->sreq = scsi_req_new(sdev, req->qtag, lun); + n = scsi_req_enqueue(req->sreq, srp->cmd.cdb); dprintf("VSCSI: Queued command tag 0x%x CMD 0x%x ID %d LUN %d ret: %d\n", req->qtag, srp->cmd.cdb[0], id, lun, n); @@ -657,15 +673,14 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) /* Preprocess RDMA descriptors */ vscsi_preprocess_desc(req); - } - /* Get transfer direction and initiate transfer */ - if (n > 0) { - req->data_len = n; - sdev->info->read_data(sdev, req->qtag); - } else if (n < 0) { - req->data_len = -n; - sdev->info->write_data(sdev, req->qtag); + /* Get transfer direction and initiate transfer */ + if (n > 0) { + req->data_len = n; + } else if (n < 0) { + req->data_len = -n; + } + scsi_req_continue(req->sreq); } /* Don't touch req here, it may have been recycled already */ @@ -907,6 +922,12 @@ static int vscsi_do_crq(struct VIOsPAPRDevice *dev, uint8_t *crq_data) return 0; } +static const struct SCSIBusOps vscsi_scsi_ops = { + .transfer_data = vscsi_transfer_data, + .complete = vscsi_command_complete, + .cancel = vscsi_request_cancelled +}; + static int spapr_vscsi_init(VIOsPAPRDevice *dev) { VSCSIState *s = DO_UPCAST(VSCSIState, vdev, dev); @@ -923,7 +944,7 @@ static int spapr_vscsi_init(VIOsPAPRDevice *dev) dev->crq.SendFunc = vscsi_do_crq; scsi_bus_new(&s->bus, &dev->qdev, 1, VSCSI_REQ_LIMIT, - vscsi_command_complete); + &vscsi_scsi_ops); if (!dev->qdev.hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus); } diff --git a/hw/ssd0303.c b/hw/ssd0303.c index 108c0683c8..b39e2596fb 100644 --- a/hw/ssd0303.c +++ b/hw/ssd0303.c @@ -93,7 +93,7 @@ static int ssd0303_send(i2c_slave *i2c, uint8_t data) DPRINTF("cmd 0x%02x\n", data); s->mode = SSD0303_IDLE; switch (data) { - case 0x00 ... 0x0f: /* Set lower colum address. */ + case 0x00 ... 0x0f: /* Set lower column address. */ s->col = (s->col & 0xf0) | (data & 0xf); break; case 0x10 ... 0x20: /* Set higher column address. */ diff --git a/hw/sun4m_iommu.c b/hw/sun4m_iommu.c index bba69eef92..7f5dad535c 100644 --- a/hw/sun4m_iommu.c +++ b/hw/sun4m_iommu.c @@ -118,7 +118,7 @@ #define IOPTE_PAGE 0xffffff00 /* Physical page number (PA[35:12]) */ #define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */ -#define IOPTE_WRITE 0x00000004 /* Writeable */ +#define IOPTE_WRITE 0x00000004 /* Writable */ #define IOPTE_VALID 0x00000002 /* IOPTE is valid */ #define IOPTE_WAZ 0x00000001 /* Write as zeros */ diff --git a/hw/syborg_serial.c b/hw/syborg_serial.c index df2950fe88..2ef71758b5 100644 --- a/hw/syborg_serial.c +++ b/hw/syborg_serial.c @@ -126,7 +126,7 @@ static void do_dma_tx(SyborgSerialState *s, uint32_t count) s->dma_tx_ptr += count; } /* QEMU char backends do not have a nonblocking mode, so we transmit all - the data imediately and the interrupt status will be unchanged. */ + the data immediately and the interrupt status will be unchanged. */ } /* Initiate RX DMA, and transfer data from the FIFO. */ diff --git a/hw/usb-bt.c b/hw/usb-bt.c index 22e6845049..baae4876ea 100644 --- a/hw/usb-bt.c +++ b/hw/usb-bt.c @@ -372,13 +372,13 @@ static void usb_bt_handle_reset(USBDevice *dev) s->altsetting = 0; } -static int usb_bt_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_bt_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { switch (request) { case DeviceRequest | USB_REQ_GET_CONFIGURATION: diff --git a/hw/usb-ccid.c b/hw/usb-ccid.c index 079b4a2555..5b6878bea4 100644 --- a/hw/usb-ccid.c +++ b/hw/usb-ccid.c @@ -602,8 +602,8 @@ static void ccid_handle_reset(USBDevice *dev) ccid_reset(s); } -static int ccid_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int ccid_handle_control(USBDevice *dev, USBPacket *p, int request, + int value, int index, int length, uint8_t *data) { USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); int ret = 0; diff --git a/hw/usb-desc.c b/hw/usb-desc.c index 62591f20aa..e4a4680fee 100644 --- a/hw/usb-desc.c +++ b/hw/usb-desc.c @@ -76,7 +76,7 @@ int usb_desc_config(const USBDescConfig *conf, uint8_t *dest, size_t len) { uint8_t bLength = 0x09; uint16_t wTotalLength = 0; - int i, rc, count; + int i, rc; if (len < bLength) { return -1; @@ -91,8 +91,19 @@ int usb_desc_config(const USBDescConfig *conf, uint8_t *dest, size_t len) dest[0x08] = conf->bMaxPower; wTotalLength += bLength; - count = conf->nif ? conf->nif : conf->bNumInterfaces; - for (i = 0; i < count; i++) { + /* handle grouped interfaces if any*/ + for (i = 0; i < conf->nif_groups; i++) { + rc = usb_desc_iface_group(&(conf->if_groups[i]), + dest + wTotalLength, + len - wTotalLength); + if (rc < 0) { + return rc; + } + wTotalLength += rc; + } + + /* handle normal (ungrouped / no IAD) interfaces if any */ + for (i = 0; i < conf->nif; i++) { rc = usb_desc_iface(conf->ifs + i, dest + wTotalLength, len - wTotalLength); if (rc < 0) { return rc; @@ -105,6 +116,41 @@ int usb_desc_config(const USBDescConfig *conf, uint8_t *dest, size_t len) return wTotalLength; } +int usb_desc_iface_group(const USBDescIfaceAssoc *iad, uint8_t *dest, + size_t len) +{ + int pos = 0; + int i = 0; + + /* handle interface association descriptor */ + uint8_t bLength = 0x08; + + if (len < bLength) { + return -1; + } + + dest[0x00] = bLength; + dest[0x01] = USB_DT_INTERFACE_ASSOC; + dest[0x02] = iad->bFirstInterface; + dest[0x03] = iad->bInterfaceCount; + dest[0x04] = iad->bFunctionClass; + dest[0x05] = iad->bFunctionSubClass; + dest[0x06] = iad->bFunctionProtocol; + dest[0x07] = iad->iFunction; + pos += bLength; + + /* handle associated interfaces in this group */ + for (i = 0; i < iad->nif; i++) { + int rc = usb_desc_iface(&(iad->ifs[i]), dest + pos, len - pos); + if (rc < 0) { + return rc; + } + pos += rc; + } + + return pos; +} + int usb_desc_iface(const USBDescIface *iface, uint8_t *dest, size_t len) { uint8_t bLength = 0x09; @@ -344,8 +390,8 @@ int usb_desc_get_descriptor(USBDevice *dev, int value, uint8_t *dest, size_t len return ret; } -int usb_desc_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +int usb_desc_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { const USBDesc *desc = dev->info->usb_desc; int i, ret = -1; diff --git a/hw/usb-desc.h b/hw/usb-desc.h index ac734ab088..9d7ed599ce 100644 --- a/hw/usb-desc.h +++ b/hw/usb-desc.h @@ -30,6 +30,24 @@ struct USBDescConfig { uint8_t bmAttributes; uint8_t bMaxPower; + /* grouped interfaces */ + uint8_t nif_groups; + const USBDescIfaceAssoc *if_groups; + + /* "normal" interfaces */ + uint8_t nif; + const USBDescIface *ifs; +}; + +/* conceptually an Interface Association Descriptor, and releated interfaces */ +struct USBDescIfaceAssoc { + uint8_t bFirstInterface; + uint8_t bInterfaceCount; + uint8_t bFunctionClass; + uint8_t bFunctionSubClass; + uint8_t bFunctionProtocol; + uint8_t iFunction; + uint8_t nif; const USBDescIface *ifs; }; @@ -75,6 +93,8 @@ int usb_desc_device(const USBDescID *id, const USBDescDevice *dev, int usb_desc_device_qualifier(const USBDescDevice *dev, uint8_t *dest, size_t len); int usb_desc_config(const USBDescConfig *conf, uint8_t *dest, size_t len); +int usb_desc_iface_group(const USBDescIfaceAssoc *iad, uint8_t *dest, + size_t len); int usb_desc_iface(const USBDescIface *iface, uint8_t *dest, size_t len); int usb_desc_endpoint(const USBDescEndpoint *ep, uint8_t *dest, size_t len); int usb_desc_other(const USBDescOther *desc, uint8_t *dest, size_t len); @@ -86,7 +106,7 @@ void usb_desc_set_string(USBDevice *dev, uint8_t index, const char *str); const char *usb_desc_get_string(USBDevice *dev, uint8_t index); int usb_desc_string(USBDevice *dev, int index, uint8_t *dest, size_t len); int usb_desc_get_descriptor(USBDevice *dev, int value, uint8_t *dest, size_t len); -int usb_desc_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data); +int usb_desc_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data); #endif /* QEMU_HW_USB_DESC_H */ diff --git a/hw/usb-ehci.c b/hw/usb-ehci.c new file mode 100644 index 0000000000..f63519ecf9 --- /dev/null +++ b/hw/usb-ehci.c @@ -0,0 +1,2037 @@ +/* + * QEMU USB EHCI Emulation + * + * Copyright(c) 2008 Emutex Ltd. (address@hidden) + * + * EHCI project was started by Mark Burkley, with contributions by + * Niels de Vos. David S. Ahern continued working on it. Kevin Wolf, + * Jan Kiszka and Vincent Palatin contributed bugfixes. + * + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or(at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + * TODO: + * o Downstream port handoff + */ + +#include "hw.h" +#include "qemu-timer.h" +#include "usb.h" +#include "pci.h" +#include "monitor.h" + +#define EHCI_DEBUG 0 +#define STATE_DEBUG 0 /* state transitions */ + +#if EHCI_DEBUG || STATE_DEBUG +#define DPRINTF printf +#else +#define DPRINTF(...) +#endif + +#if STATE_DEBUG +#define DPRINTF_ST DPRINTF +#else +#define DPRINTF_ST(...) +#endif + +/* internal processing - reset HC to try and recover */ +#define USB_RET_PROCERR (-99) + +#define MMIO_SIZE 0x1000 + +/* Capability Registers Base Address - section 2.2 */ +#define CAPREGBASE 0x0000 +#define CAPLENGTH CAPREGBASE + 0x0000 // 1-byte, 0x0001 reserved +#define HCIVERSION CAPREGBASE + 0x0002 // 2-bytes, i/f version # +#define HCSPARAMS CAPREGBASE + 0x0004 // 4-bytes, structural params +#define HCCPARAMS CAPREGBASE + 0x0008 // 4-bytes, capability params +#define EECP HCCPARAMS + 1 +#define HCSPPORTROUTE1 CAPREGBASE + 0x000c +#define HCSPPORTROUTE2 CAPREGBASE + 0x0010 + +#define OPREGBASE 0x0020 // Operational Registers Base Address + +#define USBCMD OPREGBASE + 0x0000 +#define USBCMD_RUNSTOP (1 << 0) // run / Stop +#define USBCMD_HCRESET (1 << 1) // HC Reset +#define USBCMD_FLS (3 << 2) // Frame List Size +#define USBCMD_FLS_SH 2 // Frame List Size Shift +#define USBCMD_PSE (1 << 4) // Periodic Schedule Enable +#define USBCMD_ASE (1 << 5) // Asynch Schedule Enable +#define USBCMD_IAAD (1 << 6) // Int Asynch Advance Doorbell +#define USBCMD_LHCR (1 << 7) // Light Host Controller Reset +#define USBCMD_ASPMC (3 << 8) // Async Sched Park Mode Count +#define USBCMD_ASPME (1 << 11) // Async Sched Park Mode Enable +#define USBCMD_ITC (0x7f << 16) // Int Threshold Control +#define USBCMD_ITC_SH 16 // Int Threshold Control Shift + +#define USBSTS OPREGBASE + 0x0004 +#define USBSTS_RO_MASK 0x0000003f +#define USBSTS_INT (1 << 0) // USB Interrupt +#define USBSTS_ERRINT (1 << 1) // Error Interrupt +#define USBSTS_PCD (1 << 2) // Port Change Detect +#define USBSTS_FLR (1 << 3) // Frame List Rollover +#define USBSTS_HSE (1 << 4) // Host System Error +#define USBSTS_IAA (1 << 5) // Interrupt on Async Advance +#define USBSTS_HALT (1 << 12) // HC Halted +#define USBSTS_REC (1 << 13) // Reclamation +#define USBSTS_PSS (1 << 14) // Periodic Schedule Status +#define USBSTS_ASS (1 << 15) // Asynchronous Schedule Status + +/* + * Interrupt enable bits correspond to the interrupt active bits in USBSTS + * so no need to redefine here. + */ +#define USBINTR OPREGBASE + 0x0008 +#define USBINTR_MASK 0x0000003f + +#define FRINDEX OPREGBASE + 0x000c +#define CTRLDSSEGMENT OPREGBASE + 0x0010 +#define PERIODICLISTBASE OPREGBASE + 0x0014 +#define ASYNCLISTADDR OPREGBASE + 0x0018 +#define ASYNCLISTADDR_MASK 0xffffffe0 + +#define CONFIGFLAG OPREGBASE + 0x0040 + +#define PORTSC (OPREGBASE + 0x0044) +#define PORTSC_BEGIN PORTSC +#define PORTSC_END (PORTSC + 4 * NB_PORTS) +/* + * Bits that are reserverd or are read-only are masked out of values + * written to us by software + */ +#define PORTSC_RO_MASK 0x007021c5 +#define PORTSC_RWC_MASK 0x0000002a +#define PORTSC_WKOC_E (1 << 22) // Wake on Over Current Enable +#define PORTSC_WKDS_E (1 << 21) // Wake on Disconnect Enable +#define PORTSC_WKCN_E (1 << 20) // Wake on Connect Enable +#define PORTSC_PTC (15 << 16) // Port Test Control +#define PORTSC_PTC_SH 16 // Port Test Control shift +#define PORTSC_PIC (3 << 14) // Port Indicator Control +#define PORTSC_PIC_SH 14 // Port Indicator Control Shift +#define PORTSC_POWNER (1 << 13) // Port Owner +#define PORTSC_PPOWER (1 << 12) // Port Power +#define PORTSC_LINESTAT (3 << 10) // Port Line Status +#define PORTSC_LINESTAT_SH 10 // Port Line Status Shift +#define PORTSC_PRESET (1 << 8) // Port Reset +#define PORTSC_SUSPEND (1 << 7) // Port Suspend +#define PORTSC_FPRES (1 << 6) // Force Port Resume +#define PORTSC_OCC (1 << 5) // Over Current Change +#define PORTSC_OCA (1 << 4) // Over Current Active +#define PORTSC_PEDC (1 << 3) // Port Enable/Disable Change +#define PORTSC_PED (1 << 2) // Port Enable/Disable +#define PORTSC_CSC (1 << 1) // Connect Status Change +#define PORTSC_CONNECT (1 << 0) // Current Connect Status + +#define FRAME_TIMER_FREQ 1000 +#define FRAME_TIMER_USEC (1000000 / FRAME_TIMER_FREQ) + +#define NB_MAXINTRATE 8 // Max rate at which controller issues ints +#define NB_PORTS 4 // Number of downstream ports +#define BUFF_SIZE 5*4096 // Max bytes to transfer per transaction +#define MAX_ITERATIONS 20 // Max number of QH before we break the loop +#define MAX_QH 100 // Max allowable queue heads in a chain + +/* Internal periodic / asynchronous schedule state machine states + */ +typedef enum { + EST_INACTIVE = 1000, + EST_ACTIVE, + EST_EXECUTING, + EST_SLEEPING, + /* The following states are internal to the state machine function + */ + EST_WAITLISTHEAD, + EST_FETCHENTRY, + EST_FETCHQH, + EST_FETCHITD, + EST_ADVANCEQUEUE, + EST_FETCHQTD, + EST_EXECUTE, + EST_WRITEBACK, + EST_HORIZONTALQH +} EHCI_STATES; + +/* macros for accessing fields within next link pointer entry */ +#define NLPTR_GET(x) ((x) & 0xffffffe0) +#define NLPTR_TYPE_GET(x) (((x) >> 1) & 3) +#define NLPTR_TBIT(x) ((x) & 1) // 1=invalid, 0=valid + +/* link pointer types */ +#define NLPTR_TYPE_ITD 0 // isoc xfer descriptor +#define NLPTR_TYPE_QH 1 // queue head +#define NLPTR_TYPE_STITD 2 // split xaction, isoc xfer descriptor +#define NLPTR_TYPE_FSTN 3 // frame span traversal node + + +/* EHCI spec version 1.0 Section 3.3 + */ +typedef struct EHCIitd { + uint32_t next; + + uint32_t transact[8]; +#define ITD_XACT_ACTIVE (1 << 31) +#define ITD_XACT_DBERROR (1 << 30) +#define ITD_XACT_BABBLE (1 << 29) +#define ITD_XACT_XACTERR (1 << 28) +#define ITD_XACT_LENGTH_MASK 0x0fff0000 +#define ITD_XACT_LENGTH_SH 16 +#define ITD_XACT_IOC (1 << 15) +#define ITD_XACT_PGSEL_MASK 0x00007000 +#define ITD_XACT_PGSEL_SH 12 +#define ITD_XACT_OFFSET_MASK 0x00000fff + + uint32_t bufptr[7]; +#define ITD_BUFPTR_MASK 0xfffff000 +#define ITD_BUFPTR_SH 12 +#define ITD_BUFPTR_EP_MASK 0x00000f00 +#define ITD_BUFPTR_EP_SH 8 +#define ITD_BUFPTR_DEVADDR_MASK 0x0000007f +#define ITD_BUFPTR_DEVADDR_SH 0 +#define ITD_BUFPTR_DIRECTION (1 << 11) +#define ITD_BUFPTR_MAXPKT_MASK 0x000007ff +#define ITD_BUFPTR_MAXPKT_SH 0 +#define ITD_BUFPTR_MULT_MASK 0x00000003 +} EHCIitd; + +/* EHCI spec version 1.0 Section 3.4 + */ +typedef struct EHCIsitd { + uint32_t next; // Standard next link pointer + uint32_t epchar; +#define SITD_EPCHAR_IO (1 << 31) +#define SITD_EPCHAR_PORTNUM_MASK 0x7f000000 +#define SITD_EPCHAR_PORTNUM_SH 24 +#define SITD_EPCHAR_HUBADD_MASK 0x007f0000 +#define SITD_EPCHAR_HUBADDR_SH 16 +#define SITD_EPCHAR_EPNUM_MASK 0x00000f00 +#define SITD_EPCHAR_EPNUM_SH 8 +#define SITD_EPCHAR_DEVADDR_MASK 0x0000007f + + uint32_t uframe; +#define SITD_UFRAME_CMASK_MASK 0x0000ff00 +#define SITD_UFRAME_CMASK_SH 8 +#define SITD_UFRAME_SMASK_MASK 0x000000ff + + uint32_t results; +#define SITD_RESULTS_IOC (1 << 31) +#define SITD_RESULTS_PGSEL (1 << 30) +#define SITD_RESULTS_TBYTES_MASK 0x03ff0000 +#define SITD_RESULTS_TYBYTES_SH 16 +#define SITD_RESULTS_CPROGMASK_MASK 0x0000ff00 +#define SITD_RESULTS_CPROGMASK_SH 8 +#define SITD_RESULTS_ACTIVE (1 << 7) +#define SITD_RESULTS_ERR (1 << 6) +#define SITD_RESULTS_DBERR (1 << 5) +#define SITD_RESULTS_BABBLE (1 << 4) +#define SITD_RESULTS_XACTERR (1 << 3) +#define SITD_RESULTS_MISSEDUF (1 << 2) +#define SITD_RESULTS_SPLITXSTATE (1 << 1) + + uint32_t bufptr[2]; +#define SITD_BUFPTR_MASK 0xfffff000 +#define SITD_BUFPTR_CURROFF_MASK 0x00000fff +#define SITD_BUFPTR_TPOS_MASK 0x00000018 +#define SITD_BUFPTR_TPOS_SH 3 +#define SITD_BUFPTR_TCNT_MASK 0x00000007 + + uint32_t backptr; // Standard next link pointer +} EHCIsitd; + +/* EHCI spec version 1.0 Section 3.5 + */ +typedef struct EHCIqtd { + uint32_t next; // Standard next link pointer + uint32_t altnext; // Standard next link pointer + uint32_t token; +#define QTD_TOKEN_DTOGGLE (1 << 31) +#define QTD_TOKEN_TBYTES_MASK 0x7fff0000 +#define QTD_TOKEN_TBYTES_SH 16 +#define QTD_TOKEN_IOC (1 << 15) +#define QTD_TOKEN_CPAGE_MASK 0x00007000 +#define QTD_TOKEN_CPAGE_SH 12 +#define QTD_TOKEN_CERR_MASK 0x00000c00 +#define QTD_TOKEN_CERR_SH 10 +#define QTD_TOKEN_PID_MASK 0x00000300 +#define QTD_TOKEN_PID_SH 8 +#define QTD_TOKEN_ACTIVE (1 << 7) +#define QTD_TOKEN_HALT (1 << 6) +#define QTD_TOKEN_DBERR (1 << 5) +#define QTD_TOKEN_BABBLE (1 << 4) +#define QTD_TOKEN_XACTERR (1 << 3) +#define QTD_TOKEN_MISSEDUF (1 << 2) +#define QTD_TOKEN_SPLITXSTATE (1 << 1) +#define QTD_TOKEN_PING (1 << 0) + + uint32_t bufptr[5]; // Standard buffer pointer +#define QTD_BUFPTR_MASK 0xfffff000 +} EHCIqtd; + +/* EHCI spec version 1.0 Section 3.6 + */ +typedef struct EHCIqh { + uint32_t next; // Standard next link pointer + + /* endpoint characteristics */ + uint32_t epchar; +#define QH_EPCHAR_RL_MASK 0xf0000000 +#define QH_EPCHAR_RL_SH 28 +#define QH_EPCHAR_C (1 << 27) +#define QH_EPCHAR_MPLEN_MASK 0x07FF0000 +#define QH_EPCHAR_MPLEN_SH 16 +#define QH_EPCHAR_H (1 << 15) +#define QH_EPCHAR_DTC (1 << 14) +#define QH_EPCHAR_EPS_MASK 0x00003000 +#define QH_EPCHAR_EPS_SH 12 +#define EHCI_QH_EPS_FULL 0 +#define EHCI_QH_EPS_LOW 1 +#define EHCI_QH_EPS_HIGH 2 +#define EHCI_QH_EPS_RESERVED 3 + +#define QH_EPCHAR_EP_MASK 0x00000f00 +#define QH_EPCHAR_EP_SH 8 +#define QH_EPCHAR_I (1 << 7) +#define QH_EPCHAR_DEVADDR_MASK 0x0000007f +#define QH_EPCHAR_DEVADDR_SH 0 + + /* endpoint capabilities */ + uint32_t epcap; +#define QH_EPCAP_MULT_MASK 0xc0000000 +#define QH_EPCAP_MULT_SH 30 +#define QH_EPCAP_PORTNUM_MASK 0x3f800000 +#define QH_EPCAP_PORTNUM_SH 23 +#define QH_EPCAP_HUBADDR_MASK 0x007f0000 +#define QH_EPCAP_HUBADDR_SH 16 +#define QH_EPCAP_CMASK_MASK 0x0000ff00 +#define QH_EPCAP_CMASK_SH 8 +#define QH_EPCAP_SMASK_MASK 0x000000ff +#define QH_EPCAP_SMASK_SH 0 + + uint32_t current_qtd; // Standard next link pointer + uint32_t next_qtd; // Standard next link pointer + uint32_t altnext_qtd; +#define QH_ALTNEXT_NAKCNT_MASK 0x0000001e +#define QH_ALTNEXT_NAKCNT_SH 1 + + uint32_t token; // Same as QTD token + uint32_t bufptr[5]; // Standard buffer pointer +#define BUFPTR_CPROGMASK_MASK 0x000000ff +#define BUFPTR_FRAMETAG_MASK 0x0000001f +#define BUFPTR_SBYTES_MASK 0x00000fe0 +#define BUFPTR_SBYTES_SH 5 +} EHCIqh; + +/* EHCI spec version 1.0 Section 3.7 + */ +typedef struct EHCIfstn { + uint32_t next; // Standard next link pointer + uint32_t backptr; // Standard next link pointer +} EHCIfstn; + +typedef struct { + PCIDevice dev; + qemu_irq irq; + target_phys_addr_t mem_base; + int mem; + int num_ports; + /* + * EHCI spec version 1.0 Section 2.3 + * Host Controller Operational Registers + */ + union { + uint8_t mmio[MMIO_SIZE]; + struct { + uint8_t cap[OPREGBASE]; + uint32_t usbcmd; + uint32_t usbsts; + uint32_t usbintr; + uint32_t frindex; + uint32_t ctrldssegment; + uint32_t periodiclistbase; + uint32_t asynclistaddr; + uint32_t notused[9]; + uint32_t configflag; + uint32_t portsc[NB_PORTS]; + }; + }; + /* + * Internal states, shadow registers, etc + */ + uint32_t sofv; + QEMUTimer *frame_timer; + int attach_poll_counter; + int astate; // Current state in asynchronous schedule + int pstate; // Current state in periodic schedule + USBPort ports[NB_PORTS]; + uint8_t buffer[BUFF_SIZE]; + uint32_t usbsts_pending; + + /* cached data from guest - needs to be flushed + * when guest removes an entry (doorbell, handshake sequence) + */ + EHCIqh qh; // copy of current QH (being worked on) + uint32_t qhaddr; // address QH read from + + EHCIqtd qtd; // copy of current QTD (being worked on) + uint32_t qtdaddr; // address QTD read from + + uint32_t itdaddr; // current ITD + + uint32_t fetch_addr; // which address to look at next + + USBBus bus; + USBPacket usb_packet; + int async_complete; + uint32_t tbytes; + int pid; + int exec_status; + int isoch_pause; + uint32_t last_run_usec; + uint32_t frame_end_usec; +} EHCIState; + +#define SET_LAST_RUN_CLOCK(s) \ + (s)->last_run_usec = qemu_get_clock_ns(vm_clock) / 1000; + +/* nifty macros from Arnon's EHCI version */ +#define get_field(data, field) \ + (((data) & field##_MASK) >> field##_SH) + +#define set_field(data, newval, field) do { \ + uint32_t val = *data; \ + val &= ~ field##_MASK; \ + val |= ((newval) << field##_SH) & field##_MASK; \ + *data = val; \ + } while(0) + + +#if EHCI_DEBUG +static const char *addr2str(unsigned addr) +{ + const char *r = " unknown"; + const char *n[] = { + [ CAPLENGTH ] = " CAPLENGTH", + [ HCIVERSION ] = "HCIVERSION", + [ HCSPARAMS ] = " HCSPARAMS", + [ HCCPARAMS ] = " HCCPARAMS", + [ USBCMD ] = " COMMAND", + [ USBSTS ] = " STATUS", + [ USBINTR ] = " INTERRUPT", + [ FRINDEX ] = " FRAME IDX", + [ PERIODICLISTBASE ] = "P-LIST BASE", + [ ASYNCLISTADDR ] = "A-LIST ADDR", + [ PORTSC_BEGIN ... + PORTSC_END ] = "PORT STATUS", + [ CONFIGFLAG ] = "CONFIG FLAG", + }; + + if (addr < ARRAY_SIZE(n) && n[addr] != NULL) { + return n[addr]; + } else { + return r; + } +} +#endif + + +static inline void ehci_set_interrupt(EHCIState *s, int intr) +{ + int level = 0; + + // TODO honour interrupt threshold requests + + s->usbsts |= intr; + + if ((s->usbsts & USBINTR_MASK) & s->usbintr) { + level = 1; + } + + qemu_set_irq(s->irq, level); +} + +static inline void ehci_record_interrupt(EHCIState *s, int intr) +{ + s->usbsts_pending |= intr; +} + +static inline void ehci_commit_interrupt(EHCIState *s) +{ + if (!s->usbsts_pending) { + return; + } + ehci_set_interrupt(s, s->usbsts_pending); + s->usbsts_pending = 0; +} + +/* Attach or detach a device on root hub */ + +static void ehci_attach(USBPort *port) +{ + EHCIState *s = port->opaque; + uint32_t *portsc = &s->portsc[port->index]; + + DPRINTF("ehci_attach invoked for index %d, portsc 0x%x, desc %s\n", + port->index, *portsc, port->dev->product_desc); + + *portsc |= PORTSC_CONNECT; + *portsc |= PORTSC_CSC; + + /* + * If a high speed device is attached then we own this port(indicated + * by zero in the PORTSC_POWNER bit field) so set the status bit + * and set an interrupt if enabled. + */ + if ( !(*portsc & PORTSC_POWNER)) { + ehci_set_interrupt(s, USBSTS_PCD); + } +} + +static void ehci_detach(USBPort *port) +{ + EHCIState *s = port->opaque; + uint32_t *portsc = &s->portsc[port->index]; + + DPRINTF("ehci_attach invoked for index %d, portsc 0x%x\n", + port->index, *portsc); + + *portsc &= ~PORTSC_CONNECT; + *portsc |= PORTSC_CSC; + + /* + * If a high speed device is attached then we own this port(indicated + * by zero in the PORTSC_POWNER bit field) so set the status bit + * and set an interrupt if enabled. + */ + if ( !(*portsc & PORTSC_POWNER)) { + ehci_set_interrupt(s, USBSTS_PCD); + } +} + +/* 4.1 host controller initialization */ +static void ehci_reset(void *opaque) +{ + EHCIState *s = opaque; + uint8_t *pci_conf; + int i; + + pci_conf = s->dev.config; + + memset(&s->mmio[OPREGBASE], 0x00, MMIO_SIZE - OPREGBASE); + + s->usbcmd = NB_MAXINTRATE << USBCMD_ITC_SH; + s->usbsts = USBSTS_HALT; + + s->astate = EST_INACTIVE; + s->pstate = EST_INACTIVE; + s->async_complete = 0; + s->isoch_pause = -1; + s->attach_poll_counter = 0; + + for(i = 0; i < NB_PORTS; i++) { + s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER; + + if (s->ports[i].dev) { + usb_attach(&s->ports[i], s->ports[i].dev); + } + } +} + +static uint32_t ehci_mem_readb(void *ptr, target_phys_addr_t addr) +{ + EHCIState *s = ptr; + uint32_t val; + + val = s->mmio[addr]; + + return val; +} + +static uint32_t ehci_mem_readw(void *ptr, target_phys_addr_t addr) +{ + EHCIState *s = ptr; + uint32_t val; + + val = s->mmio[addr] | (s->mmio[addr+1] << 8); + + return val; +} + +static uint32_t ehci_mem_readl(void *ptr, target_phys_addr_t addr) +{ + EHCIState *s = ptr; + uint32_t val; + + val = s->mmio[addr] | (s->mmio[addr+1] << 8) | + (s->mmio[addr+2] << 16) | (s->mmio[addr+3] << 24); + + return val; +} + +static void ehci_mem_writeb(void *ptr, target_phys_addr_t addr, uint32_t val) +{ + fprintf(stderr, "EHCI doesn't handle byte writes to MMIO\n"); + exit(1); +} + +static void ehci_mem_writew(void *ptr, target_phys_addr_t addr, uint32_t val) +{ + fprintf(stderr, "EHCI doesn't handle 16-bit writes to MMIO\n"); + exit(1); +} + +static void handle_port_status_write(EHCIState *s, int port, uint32_t val) +{ + uint32_t *portsc = &s->portsc[port]; + int rwc; + USBDevice *dev = s->ports[port].dev; + + DPRINTF("port_status_write: " + "PORTSC (port %d) curr %08X new %08X rw-clear %08X rw %08X\n", + port, *portsc, val, (val & PORTSC_RWC_MASK), val & PORTSC_RO_MASK); + + rwc = val & PORTSC_RWC_MASK; + val &= PORTSC_RO_MASK; + + // handle_read_write_clear(&val, portsc, PORTSC_PEDC | PORTSC_CSC); + + *portsc &= ~rwc; + + if ((val & PORTSC_PRESET) && !(*portsc & PORTSC_PRESET)) { + DPRINTF("port_status_write: USBTRAN Port %d reset begin\n", port); + } + + if (!(val & PORTSC_PRESET) &&(*portsc & PORTSC_PRESET)) { + DPRINTF("port_status_write: USBTRAN Port %d reset done\n", port); + usb_attach(&s->ports[port], dev); + + // TODO how to handle reset of ports with no device + if (dev) { + usb_send_msg(dev, USB_MSG_RESET); + } + + if (s->ports[port].dev) { + DPRINTF("port_status_write: " + "Device was connected before reset, clearing CSC bit\n"); + *portsc &= ~PORTSC_CSC; + } + + /* Table 2.16 Set the enable bit(and enable bit change) to indicate + * to SW that this port has a high speed device attached + * + * TODO - when to disable? + */ + val |= PORTSC_PED; + val |= PORTSC_PEDC; + } + + *portsc &= ~PORTSC_RO_MASK; + *portsc |= val; + DPRINTF("port_status_write: Port %d status set to 0x%08x\n", port, *portsc); +} + +static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) +{ + EHCIState *s = ptr; + int i; +#if EHCI_DEBUG + const char *str; +#endif + + /* Only aligned reads are allowed on OHCI */ + if (addr & 3) { + fprintf(stderr, "usb-ehci: Mis-aligned write to addr 0x" + TARGET_FMT_plx "\n", addr); + return; + } + + if (addr >= PORTSC && addr < PORTSC + 4 * NB_PORTS) { + handle_port_status_write(s, (addr-PORTSC)/4, val); + return; + } + + if (addr < OPREGBASE) { + fprintf(stderr, "usb-ehci: write attempt to read-only register" + TARGET_FMT_plx "\n", addr); + return; + } + + + /* Do any register specific pre-write processing here. */ +#if EHCI_DEBUG + str = addr2str((unsigned) addr); +#endif + switch(addr) { + case USBCMD: + DPRINTF("ehci_mem_writel: USBCMD val=0x%08X, current cmd=0x%08X\n", + val, s->usbcmd); + + if ((val & USBCMD_RUNSTOP) && !(s->usbcmd & USBCMD_RUNSTOP)) { + DPRINTF("ehci_mem_writel: %s run, clear halt\n", str); + qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock)); + SET_LAST_RUN_CLOCK(s); + s->usbsts &= ~USBSTS_HALT; + } + + if (!(val & USBCMD_RUNSTOP) && (s->usbcmd & USBCMD_RUNSTOP)) { + DPRINTF(" ** STOP **\n"); + qemu_del_timer(s->frame_timer); + // TODO - should finish out some stuff before setting halt + s->usbsts |= USBSTS_HALT; + } + + if (val & USBCMD_HCRESET) { + DPRINTF("ehci_mem_writel: %s run, resetting\n", str); + ehci_reset(s); + val &= ~USBCMD_HCRESET; + } + + /* not supporting dynamic frame list size at the moment */ + if ((val & USBCMD_FLS) && !(s->usbcmd & USBCMD_FLS)) { + fprintf(stderr, "attempt to set frame list size -- value %d\n", + val & USBCMD_FLS); + val &= ~USBCMD_FLS; + } +#if EHCI_DEBUG + if ((val & USBCMD_PSE) && !(s->usbcmd & USBCMD_PSE)) { + DPRINTF("periodic scheduling enabled\n"); + } + if (!(val & USBCMD_PSE) && (s->usbcmd & USBCMD_PSE)) { + DPRINTF("periodic scheduling disabled\n"); + } + if ((val & USBCMD_ASE) && !(s->usbcmd & USBCMD_ASE)) { + DPRINTF("asynchronous scheduling enabled\n"); + } + if (!(val & USBCMD_ASE) && (s->usbcmd & USBCMD_ASE)) { + DPRINTF("asynchronous scheduling disabled\n"); + } + if ((val & USBCMD_IAAD) && !(s->usbcmd & USBCMD_IAAD)) { + DPRINTF("doorbell request received\n"); + } + if ((val & USBCMD_LHCR) && !(s->usbcmd & USBCMD_LHCR)) { + DPRINTF("light host controller reset received\n"); + } + if ((val & USBCMD_ITC) != (s->usbcmd & USBCMD_ITC)) { + DPRINTF("interrupt threshold control set to %x\n", + (val & USBCMD_ITC)>>USBCMD_ITC_SH); + } +#endif + break; + + + case USBSTS: + val &= USBSTS_RO_MASK; // bits 6 thru 31 are RO + DPRINTF("ehci_mem_writel: %s RWC set to 0x%08X\n", str, val); + + val = (s->usbsts &= ~val); // bits 0 thru 5 are R/WC + + DPRINTF("ehci_mem_writel: %s updating interrupt condition\n", str); + ehci_set_interrupt(s, 0); + break; + + + case USBINTR: + val &= USBINTR_MASK; + DPRINTF("ehci_mem_writel: %s set to 0x%08X\n", str, val); + break; + + case FRINDEX: + s->sofv = val >> 3; + DPRINTF("ehci_mem_writel: %s set to 0x%08X\n", str, val); + break; + + case CONFIGFLAG: + DPRINTF("ehci_mem_writel: %s set to 0x%08X\n", str, val); + val &= 0x1; + if (val) { + for(i = 0; i < NB_PORTS; i++) + s->portsc[i] &= ~PORTSC_POWNER; + } + break; + + case PERIODICLISTBASE: + if ((s->usbcmd & USBCMD_PSE) && (s->usbcmd & USBCMD_RUNSTOP)) { + fprintf(stderr, + "ehci: PERIODIC list base register set while periodic schedule\n" + " is enabled and HC is enabled\n"); + } + DPRINTF("ehci_mem_writel: P-LIST BASE set to 0x%08X\n", val); + break; + + case ASYNCLISTADDR: + if ((s->usbcmd & USBCMD_ASE) && (s->usbcmd & USBCMD_RUNSTOP)) { + fprintf(stderr, + "ehci: ASYNC list address register set while async schedule\n" + " is enabled and HC is enabled\n"); + } + DPRINTF("ehci_mem_writel: A-LIST ADDR set to 0x%08X\n", val); + break; + } + + *(uint32_t *)(&s->mmio[addr]) = val; +} + + +// TODO : Put in common header file, duplication from usb-ohci.c + +/* Get an array of dwords from main memory */ +static inline int get_dwords(uint32_t addr, uint32_t *buf, int num) +{ + int i; + + for(i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + cpu_physical_memory_rw(addr,(uint8_t *)buf, sizeof(*buf), 0); + *buf = le32_to_cpu(*buf); + } + + return 1; +} + +/* Put an array of dwords in to main memory */ +static inline int put_dwords(uint32_t addr, uint32_t *buf, int num) +{ + int i; + + for(i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + uint32_t tmp = cpu_to_le32(*buf); + cpu_physical_memory_rw(addr,(uint8_t *)&tmp, sizeof(tmp), 1); + } + + return 1; +} + +// 4.10.2 + +static int ehci_qh_do_overlay(EHCIState *ehci, EHCIqh *qh, EHCIqtd *qtd) +{ + int i; + int dtoggle; + int ping; + int eps; + int reload; + + // remember values in fields to preserve in qh after overlay + + dtoggle = qh->token & QTD_TOKEN_DTOGGLE; + ping = qh->token & QTD_TOKEN_PING; + + DPRINTF("setting qh.current from %08X to 0x%08X\n", qh->current_qtd, + ehci->qtdaddr); + qh->current_qtd = ehci->qtdaddr; + qh->next_qtd = qtd->next; + qh->altnext_qtd = qtd->altnext; + qh->token = qtd->token; + + + eps = get_field(qh->epchar, QH_EPCHAR_EPS); + if (eps == EHCI_QH_EPS_HIGH) { + qh->token &= ~QTD_TOKEN_PING; + qh->token |= ping; + } + + reload = get_field(qh->epchar, QH_EPCHAR_RL); + set_field(&qh->altnext_qtd, reload, QH_ALTNEXT_NAKCNT); + + for (i = 0; i < 5; i++) { + qh->bufptr[i] = qtd->bufptr[i]; + } + + if (!(qh->epchar & QH_EPCHAR_DTC)) { + // preserve QH DT bit + qh->token &= ~QTD_TOKEN_DTOGGLE; + qh->token |= dtoggle; + } + + qh->bufptr[1] &= ~BUFPTR_CPROGMASK_MASK; + qh->bufptr[2] &= ~BUFPTR_FRAMETAG_MASK; + + put_dwords(NLPTR_GET(ehci->qhaddr), (uint32_t *) qh, sizeof(EHCIqh) >> 2); + + return 0; +} + +static int ehci_buffer_rw(uint8_t *buffer, EHCIqh *qh, int bytes, int rw) +{ + int bufpos = 0; + int cpage, offset; + uint32_t head; + uint32_t tail; + + + if (!bytes) { + return 0; + } + + cpage = get_field(qh->token, QTD_TOKEN_CPAGE); + if (cpage > 4) { + fprintf(stderr, "cpage out of range (%d)\n", cpage); + return USB_RET_PROCERR; + } + + offset = qh->bufptr[0] & ~QTD_BUFPTR_MASK; + DPRINTF("ehci_buffer_rw: %sing %d bytes %08x cpage %d offset %d\n", + rw ? "writ" : "read", bytes, qh->bufptr[0], cpage, offset); + + do { + /* start and end of this page */ + head = qh->bufptr[cpage] & QTD_BUFPTR_MASK; + tail = head + ~QTD_BUFPTR_MASK + 1; + /* add offset into page */ + head |= offset; + + if (bytes <= (tail - head)) { + tail = head + bytes; + } + + DPRINTF("DATA %s cpage:%d head:%08X tail:%08X target:%08X\n", + rw ? "WRITE" : "READ ", cpage, head, tail, bufpos); + + cpu_physical_memory_rw(head, &buffer[bufpos], tail - head, rw); + + bufpos += (tail - head); + bytes -= (tail - head); + + if (bytes > 0) { + cpage++; + offset = 0; + } + } while (bytes > 0); + + /* save cpage */ + set_field(&qh->token, cpage, QTD_TOKEN_CPAGE); + + /* save offset into cpage */ + offset = tail - head; + qh->bufptr[0] &= ~QTD_BUFPTR_MASK; + qh->bufptr[0] |= offset; + + return 0; +} + +static void ehci_async_complete_packet(USBDevice *dev, USBPacket *packet) +{ + EHCIState *ehci = container_of(packet, EHCIState, usb_packet); + + DPRINTF("Async packet complete\n"); + ehci->async_complete = 1; + ehci->exec_status = packet->len; +} + +static int ehci_execute_complete(EHCIState *ehci, EHCIqh *qh, int ret) +{ + int c_err, reload; + + if (ret == USB_RET_ASYNC && !ehci->async_complete) { + DPRINTF("not done yet\n"); + return ret; + } + + ehci->async_complete = 0; + + DPRINTF("execute_complete: qhaddr 0x%x, next %x, qtdaddr 0x%x, status %d\n", + ehci->qhaddr, qh->next, ehci->qtdaddr, ret); + + if (ret < 0) { +err: + /* TO-DO: put this is in a function that can be invoked below as well */ + c_err = get_field(qh->token, QTD_TOKEN_CERR); + c_err--; + set_field(&qh->token, c_err, QTD_TOKEN_CERR); + + switch(ret) { + case USB_RET_NODEV: + fprintf(stderr, "USB no device\n"); + break; + case USB_RET_STALL: + fprintf(stderr, "USB stall\n"); + qh->token |= QTD_TOKEN_HALT; + ehci_record_interrupt(ehci, USBSTS_ERRINT); + break; + case USB_RET_NAK: + /* 4.10.3 */ + reload = get_field(qh->epchar, QH_EPCHAR_RL); + if ((ehci->pid == USB_TOKEN_IN) && reload) { + int nakcnt = get_field(qh->altnext_qtd, QH_ALTNEXT_NAKCNT); + nakcnt--; + set_field(&qh->altnext_qtd, nakcnt, QH_ALTNEXT_NAKCNT); + } else if (!reload) { + return USB_RET_NAK; + } + break; + case USB_RET_BABBLE: + fprintf(stderr, "USB babble TODO\n"); + qh->token |= QTD_TOKEN_BABBLE; + ehci_record_interrupt(ehci, USBSTS_ERRINT); + break; + default: + fprintf(stderr, "USB invalid response %d to handle\n", ret); + /* TO-DO: transaction error */ + ret = USB_RET_PROCERR; + break; + } + } else { + // DPRINTF("Short packet condition\n"); + // TODO check 4.12 for splits + + if ((ret > ehci->tbytes) && (ehci->pid == USB_TOKEN_IN)) { + ret = USB_RET_BABBLE; + goto err; + } + + if (ehci->tbytes && ehci->pid == USB_TOKEN_IN) { + if (ehci_buffer_rw(ehci->buffer, qh, ret, 1) != 0) { + return USB_RET_PROCERR; + } + ehci->tbytes -= ret; + } else { + ehci->tbytes = 0; + } + + DPRINTF("updating tbytes to %d\n", ehci->tbytes); + set_field(&qh->token, ehci->tbytes, QTD_TOKEN_TBYTES); + } + + qh->token ^= QTD_TOKEN_DTOGGLE; + qh->token &= ~QTD_TOKEN_ACTIVE; + + if ((ret >= 0) && (qh->token & QTD_TOKEN_IOC)) { + ehci_record_interrupt(ehci, USBSTS_INT); + } + + return ret; +} + +// 4.10.3 + +static int ehci_execute(EHCIState *ehci, EHCIqh *qh) +{ + USBPort *port; + USBDevice *dev; + int ret; + int i; + int endp; + int devadr; + + if ( !(qh->token & QTD_TOKEN_ACTIVE)) { + fprintf(stderr, "Attempting to execute inactive QH\n"); + return USB_RET_PROCERR; + } + + ehci->tbytes = (qh->token & QTD_TOKEN_TBYTES_MASK) >> QTD_TOKEN_TBYTES_SH; + if (ehci->tbytes > BUFF_SIZE) { + fprintf(stderr, "Request for more bytes than allowed\n"); + return USB_RET_PROCERR; + } + + ehci->pid = (qh->token & QTD_TOKEN_PID_MASK) >> QTD_TOKEN_PID_SH; + switch(ehci->pid) { + case 0: ehci->pid = USB_TOKEN_OUT; break; + case 1: ehci->pid = USB_TOKEN_IN; break; + case 2: ehci->pid = USB_TOKEN_SETUP; break; + default: fprintf(stderr, "bad token\n"); break; + } + + if ((ehci->tbytes && ehci->pid != USB_TOKEN_IN) && + (ehci_buffer_rw(ehci->buffer, qh, ehci->tbytes, 0) != 0)) { + return USB_RET_PROCERR; + } + + endp = get_field(qh->epchar, QH_EPCHAR_EP); + devadr = get_field(qh->epchar, QH_EPCHAR_DEVADDR); + + ret = USB_RET_NODEV; + + // TO-DO: associating device with ehci port + for(i = 0; i < NB_PORTS; i++) { + port = &ehci->ports[i]; + dev = port->dev; + + // TODO sometime we will also need to check if we are the port owner + + if (!(ehci->portsc[i] &(PORTSC_CONNECT))) { + DPRINTF("Port %d, no exec, not connected(%08X)\n", + i, ehci->portsc[i]); + continue; + } + + ehci->usb_packet.pid = ehci->pid; + ehci->usb_packet.devaddr = devadr; + ehci->usb_packet.devep = endp; + ehci->usb_packet.data = ehci->buffer; + ehci->usb_packet.len = ehci->tbytes; + + ret = usb_handle_packet(dev, &ehci->usb_packet); + + DPRINTF("submit: qh %x next %x qtd %x pid %x len %d (total %d) endp %x ret %d\n", + ehci->qhaddr, qh->next, ehci->qtdaddr, ehci->pid, + ehci->usb_packet.len, ehci->tbytes, endp, ret); + + if (ret != USB_RET_NODEV) { + break; + } + } + + if (ret > BUFF_SIZE) { + fprintf(stderr, "ret from usb_handle_packet > BUFF_SIZE\n"); + return USB_RET_PROCERR; + } + + if (ret == USB_RET_ASYNC) { + ehci->async_complete = 0; + } + + return ret; +} + +/* 4.7.2 + */ + +static int ehci_process_itd(EHCIState *ehci, + EHCIitd *itd) +{ + USBPort *port; + USBDevice *dev; + int ret; + int i, j; + int ptr; + int pid; + int pg; + int len; + int dir; + int devadr; + int endp; + int maxpkt; + + dir =(itd->bufptr[1] & ITD_BUFPTR_DIRECTION); + devadr = get_field(itd->bufptr[0], ITD_BUFPTR_DEVADDR); + endp = get_field(itd->bufptr[0], ITD_BUFPTR_EP); + maxpkt = get_field(itd->bufptr[1], ITD_BUFPTR_MAXPKT); + + for(i = 0; i < 8; i++) { + if (itd->transact[i] & ITD_XACT_ACTIVE) { + DPRINTF("ISOCHRONOUS active for frame %d, interval %d\n", + ehci->frindex >> 3, i); + + pg = get_field(itd->transact[i], ITD_XACT_PGSEL); + ptr = (itd->bufptr[pg] & ITD_BUFPTR_MASK) | + (itd->transact[i] & ITD_XACT_OFFSET_MASK); + len = get_field(itd->transact[i], ITD_XACT_LENGTH); + + if (len > BUFF_SIZE) { + return USB_RET_PROCERR; + } + + DPRINTF("ISOCH: buffer %08X len %d\n", ptr, len); + + if (!dir) { + cpu_physical_memory_rw(ptr, &ehci->buffer[0], len, 0); + pid = USB_TOKEN_OUT; + } else + pid = USB_TOKEN_IN; + + ret = USB_RET_NODEV; + + for (j = 0; j < NB_PORTS; j++) { + port = &ehci->ports[j]; + dev = port->dev; + + // TODO sometime we will also need to check if we are the port owner + + if (!(ehci->portsc[j] &(PORTSC_CONNECT))) { + DPRINTF("Port %d, no exec, not connected(%08X)\n", + j, ehci->portsc[j]); + continue; + } + + ehci->usb_packet.pid = ehci->pid; + ehci->usb_packet.devaddr = devadr; + ehci->usb_packet.devep = endp; + ehci->usb_packet.data = ehci->buffer; + ehci->usb_packet.len = len; + + DPRINTF("calling usb_handle_packet\n"); + ret = usb_handle_packet(dev, &ehci->usb_packet); + + if (ret != USB_RET_NODEV) { + break; + } + } + + /* In isoch, there is no facility to indicate a NAK so let's + * instead just complete a zero-byte transaction. Setting + * DBERR seems too draconian. + */ + + if (ret == USB_RET_NAK) { + if (ehci->isoch_pause > 0) { + DPRINTF("ISOCH: received a NAK but paused so returning\n"); + ehci->isoch_pause--; + return 0; + } else if (ehci->isoch_pause == -1) { + DPRINTF("ISOCH: recv NAK & isoch pause inactive, setting\n"); + // Pause frindex for up to 50 msec waiting for data from + // remote + ehci->isoch_pause = 50; + return 0; + } else { + DPRINTF("ISOCH: isoch pause timeout! return 0\n"); + ret = 0; + } + } else { + DPRINTF("ISOCH: received ACK, clearing pause\n"); + ehci->isoch_pause = -1; + } + + if (ret >= 0) { + itd->transact[i] &= ~ITD_XACT_ACTIVE; + + if (itd->transact[i] & ITD_XACT_IOC) { + ehci_record_interrupt(ehci, USBSTS_INT); + } + } + + if (ret >= 0 && dir) { + cpu_physical_memory_rw(ptr, &ehci->buffer[0], len, 1); + + if (ret != len) { + DPRINTF("ISOCH IN expected %d, got %d\n", + len, ret); + set_field(&itd->transact[i], ret, ITD_XACT_LENGTH); + } + } + } + } + return 0; +} + +/* This state is the entry point for asynchronous schedule + * processing. Entry here consitutes a EHCI start event state (4.8.5) + */ +static int ehci_state_waitlisthead(EHCIState *ehci, int async, int *state) +{ + EHCIqh *qh = &ehci->qh; + int i = 0; + int again = 0; + uint32_t entry = ehci->asynclistaddr; + + /* set reclamation flag at start event (4.8.6) */ + if (async) { + ehci->usbsts |= USBSTS_REC; + } + + /* Find the head of the list (4.9.1.1) */ + for(i = 0; i < MAX_QH; i++) { + get_dwords(NLPTR_GET(entry), (uint32_t *) qh, sizeof(EHCIqh) >> 2); + + if (qh->epchar & QH_EPCHAR_H) { + DPRINTF_ST("WAITLISTHEAD: QH %08X is the HEAD of the list\n", + entry); + if (async) { + entry |= (NLPTR_TYPE_QH << 1); + } + + ehci->fetch_addr = entry; + *state = EST_FETCHENTRY; + again = 1; + goto out; + } + + DPRINTF_ST("WAITLISTHEAD: QH %08X is NOT the HEAD of the list\n", + entry); + entry = qh->next; + if (entry == ehci->asynclistaddr) { + DPRINTF("WAITLISTHEAD: reached beginning of QH list\n"); + break; + } + } + + /* no head found for list. */ + + *state = EST_ACTIVE; + +out: + return again; +} + + +/* This state is the entry point for periodic schedule processing as + * well as being a continuation state for async processing. + */ +static int ehci_state_fetchentry(EHCIState *ehci, int async, int *state) +{ + int again = 0; + uint32_t entry = ehci->fetch_addr; + +#if EHCI_DEBUG == 0 + if (qemu_get_clock_ns(vm_clock) / 1000 >= ehci->frame_end_usec) { + if (async) { + DPRINTF("FETCHENTRY: FRAME timer elapsed, exit state machine\n"); + goto out; + } else { + DPRINTF("FETCHENTRY: WARNING " + "- frame timer elapsed during periodic\n"); + } + } +#endif + if (entry < 0x1000) { + DPRINTF("fetchentry: entry invalid (0x%08x)\n", entry); + *state = EST_ACTIVE; + goto out; + } + + /* section 4.8, only QH in async schedule */ + if (async && (NLPTR_TYPE_GET(entry) != NLPTR_TYPE_QH)) { + fprintf(stderr, "non queue head request in async schedule\n"); + return -1; + } + + switch (NLPTR_TYPE_GET(entry)) { + case NLPTR_TYPE_QH: + DPRINTF_ST("FETCHENTRY: entry %X is a Queue Head\n", entry); + *state = EST_FETCHQH; + ehci->qhaddr = entry; + again = 1; + break; + + case NLPTR_TYPE_ITD: + DPRINTF_ST("FETCHENTRY: entry %X is an ITD\n", entry); + *state = EST_FETCHITD; + ehci->itdaddr = entry; + again = 1; + break; + + default: + // TODO: handle siTD and FSTN types + fprintf(stderr, "FETCHENTRY: entry at %X is of type %d " + "which is not supported yet\n", entry, NLPTR_TYPE_GET(entry)); + return -1; + } + +out: + return again; +} + +static int ehci_state_fetchqh(EHCIState *ehci, int async, int *state) +{ + EHCIqh *qh = &ehci->qh; + int reload; + int again = 0; + + get_dwords(NLPTR_GET(ehci->qhaddr), (uint32_t *) qh, sizeof(EHCIqh) >> 2); + + if (async && (qh->epchar & QH_EPCHAR_H)) { + + /* EHCI spec version 1.0 Section 4.8.3 & 4.10.1 */ + if (ehci->usbsts & USBSTS_REC) { + ehci->usbsts &= ~USBSTS_REC; + } else { + DPRINTF("FETCHQH: QH 0x%08x. H-bit set, reclamation status reset" + " - done processing\n", ehci->qhaddr); + *state = EST_ACTIVE; + goto out; + } + } + +#if EHCI_DEBUG + if (ehci->qhaddr != qh->next) { + DPRINTF("FETCHQH: QH 0x%08x (h %x halt %x active %x) next 0x%08x\n", + ehci->qhaddr, + qh->epchar & QH_EPCHAR_H, + qh->token & QTD_TOKEN_HALT, + qh->token & QTD_TOKEN_ACTIVE, + qh->next); + } +#endif + + reload = get_field(qh->epchar, QH_EPCHAR_RL); + if (reload) { + DPRINTF_ST("FETCHQH: reloading nakcnt to %d\n", reload); + set_field(&qh->altnext_qtd, reload, QH_ALTNEXT_NAKCNT); + } + + if (qh->token & QTD_TOKEN_HALT) { + DPRINTF_ST("FETCHQH: QH Halted, go horizontal\n"); + *state = EST_HORIZONTALQH; + again = 1; + + } else if ((qh->token & QTD_TOKEN_ACTIVE) && (qh->current_qtd > 0x1000)) { + DPRINTF_ST("FETCHQH: Active, !Halt, execute - fetch qTD\n"); + ehci->qtdaddr = qh->current_qtd; + *state = EST_FETCHQTD; + again = 1; + + } else { + /* EHCI spec version 1.0 Section 4.10.2 */ + DPRINTF_ST("FETCHQH: !Active, !Halt, advance queue\n"); + *state = EST_ADVANCEQUEUE; + again = 1; + } + +out: + return again; +} + +static int ehci_state_fetchitd(EHCIState *ehci, int async, int *state) +{ + EHCIitd itd; + + get_dwords(NLPTR_GET(ehci->itdaddr),(uint32_t *) &itd, + sizeof(EHCIitd) >> 2); + DPRINTF_ST("FETCHITD: Fetched ITD at address %08X " "(next is %08X)\n", + ehci->itdaddr, itd.next); + + if (ehci_process_itd(ehci, &itd) != 0) { + return -1; + } + + put_dwords(NLPTR_GET(ehci->itdaddr), (uint32_t *) &itd, + sizeof(EHCIitd) >> 2); + ehci->fetch_addr = itd.next; + *state = EST_FETCHENTRY; + + return 1; +} + +/* Section 4.10.2 - paragraph 3 */ +static int ehci_state_advqueue(EHCIState *ehci, int async, int *state) +{ +#if 0 + /* TO-DO: 4.10.2 - paragraph 2 + * if I-bit is set to 1 and QH is not active + * go to horizontal QH + */ + if (I-bit set) { + *state = EST_HORIZONTALQH; + goto out; + } +#endif + + /* + * want data and alt-next qTD is valid + */ + if (((ehci->qh.token & QTD_TOKEN_TBYTES_MASK) != 0) && + (ehci->qh.altnext_qtd > 0x1000) && + (NLPTR_TBIT(ehci->qh.altnext_qtd) == 0)) { + DPRINTF_ST("ADVQUEUE: goto alt next qTD. " + "curr 0x%08x next 0x%08x alt 0x%08x (next qh %x)\n", + ehci->qh.current_qtd, ehci->qh.altnext_qtd, + ehci->qh.next_qtd, ehci->qh.next); + ehci->qtdaddr = ehci->qh.altnext_qtd; + *state = EST_FETCHQTD; + + /* + * next qTD is valid + */ + } else if ((ehci->qh.next_qtd > 0x1000) && + (NLPTR_TBIT(ehci->qh.next_qtd) == 0)) { + DPRINTF_ST("ADVQUEUE: next qTD. " + "curr 0x%08x next 0x%08x alt 0x%08x (next qh %x)\n", + ehci->qh.current_qtd, ehci->qh.altnext_qtd, + ehci->qh.next_qtd, ehci->qh.next); + ehci->qtdaddr = ehci->qh.next_qtd; + *state = EST_FETCHQTD; + + /* + * no valid qTD, try next QH + */ + } else { + DPRINTF_ST("ADVQUEUE: go to horizontal QH\n"); + *state = EST_HORIZONTALQH; + } + + return 1; +} + +/* Section 4.10.2 - paragraph 4 */ +static int ehci_state_fetchqtd(EHCIState *ehci, int async, int *state) +{ + EHCIqtd *qtd = &ehci->qtd; + int again = 0; + + get_dwords(NLPTR_GET(ehci->qtdaddr),(uint32_t *) qtd, sizeof(EHCIqtd) >> 2); + + if (qtd->token & QTD_TOKEN_ACTIVE) { + *state = EST_EXECUTE; + again = 1; + } else { + *state = EST_HORIZONTALQH; + again = 1; + } + + return again; +} + +static int ehci_state_horizqh(EHCIState *ehci, int async, int *state) +{ + int again = 0; + + if (ehci->fetch_addr != ehci->qh.next) { + ehci->fetch_addr = ehci->qh.next; + *state = EST_FETCHENTRY; + again = 1; + } else { + *state = EST_ACTIVE; + } + + return again; +} + +static int ehci_state_execute(EHCIState *ehci, int async, int *state) +{ + EHCIqh *qh = &ehci->qh; + EHCIqtd *qtd = &ehci->qtd; + int again = 0; + int reload, nakcnt; + int smask; + + if (async) { + DPRINTF_ST(">>>>> ASYNC STATE MACHINE execute QH 0x%08x, QTD 0x%08x\n", + ehci->qhaddr, ehci->qtdaddr); + } else { + DPRINTF_ST(">>>>> PERIODIC STATE MACHINE execute\n"); + } + + if (ehci_qh_do_overlay(ehci, qh, qtd) != 0) { + return -1; + } + + smask = get_field(qh->epcap, QH_EPCAP_SMASK); + + if (!smask) { + reload = get_field(qh->epchar, QH_EPCHAR_RL); + nakcnt = get_field(qh->altnext_qtd, QH_ALTNEXT_NAKCNT); + if (reload && !nakcnt) { + DPRINTF_ST("EXECUTE: RL != 0 but NakCnt == 0 -- no execute\n"); + *state = EST_HORIZONTALQH; + again = 1; + goto out; + } + } + + // TODO verify enough time remains in the uframe as in 4.4.1.1 + // TODO write back ptr to async list when done or out of time + // TODO Windows does not seem to ever set the MULT field + + if (!async) { + int transactCtr = get_field(qh->epcap, QH_EPCAP_MULT); + if (!transactCtr) { + DPRINTF("ZERO transactctr for int qh, go HORIZ\n"); + *state = EST_HORIZONTALQH; + again = 1; + goto out; + } + } + + if (async) { + ehci->usbsts |= USBSTS_REC; + } + + ehci->exec_status = ehci_execute(ehci, qh); + if (ehci->exec_status == USB_RET_PROCERR) { + again = -1; + goto out; + } + *state = EST_EXECUTING; + + if (ehci->exec_status != USB_RET_ASYNC) { + again = 1; + } + +out: + return again; +} + +static int ehci_state_executing(EHCIState *ehci, int async, int *state) +{ + EHCIqh *qh = &ehci->qh; + int again = 0; + int reload, nakcnt; + + ehci->exec_status = ehci_execute_complete(ehci, qh, ehci->exec_status); + if (ehci->exec_status == USB_RET_ASYNC) { + goto out; + } + if (ehci->exec_status == USB_RET_PROCERR) { + again = -1; + goto out; + } + + // 4.10.3 + if (!async) { + int transactCtr = get_field(qh->epcap, QH_EPCAP_MULT); + transactCtr--; + set_field(&qh->epcap, transactCtr, QH_EPCAP_MULT); + // 4.10.3, bottom of page 82, should exit this state when transaction + // counter decrements to 0 + } + + + reload = get_field(qh->epchar, QH_EPCHAR_RL); + if (reload) { + nakcnt = get_field(qh->altnext_qtd, QH_ALTNEXT_NAKCNT); + if (ehci->exec_status == USB_RET_NAK) { + if (nakcnt) { + nakcnt--; + } + DPRINTF_ST("EXECUTING: Nak occured and RL != 0, dec NakCnt to %d\n", + nakcnt); + } else { + nakcnt = reload; + DPRINTF_ST("EXECUTING: Nak didn't occur, reloading to %d\n", + nakcnt); + } + set_field(&qh->altnext_qtd, nakcnt, QH_ALTNEXT_NAKCNT); + } + + /* + * Write the qh back to guest physical memory. This step isn't + * in the EHCI spec but we need to do it since we don't share + * physical memory with our guest VM. + */ + + DPRINTF("EXECUTING: write QH to VM memory: qhaddr 0x%x, next 0x%x\n", + ehci->qhaddr, qh->next); + put_dwords(NLPTR_GET(ehci->qhaddr), (uint32_t *) qh, sizeof(EHCIqh) >> 2); + + /* 4.10.5 */ + if ((ehci->exec_status == USB_RET_NAK) || (qh->token & QTD_TOKEN_ACTIVE)) { + *state = EST_HORIZONTALQH; + } else { + *state = EST_WRITEBACK; + } + + again = 1; + +out: + return again; +} + + +static int ehci_state_writeback(EHCIState *ehci, int async, int *state) +{ + EHCIqh *qh = &ehci->qh; + int again = 0; + + /* Write back the QTD from the QH area */ + DPRINTF_ST("WRITEBACK: write QTD to VM memory\n"); + put_dwords(NLPTR_GET(ehci->qtdaddr),(uint32_t *) &qh->next_qtd, + sizeof(EHCIqtd) >> 2); + + /* TODO confirm next state. For now, keep going if async + * but stop after one qtd if periodic + */ + //if (async) { + *state = EST_ADVANCEQUEUE; + again = 1; + //} else { + // *state = EST_ACTIVE; + //} + return again; +} + +/* + * This is the state machine that is common to both async and periodic + */ + +static int ehci_advance_state(EHCIState *ehci, + int async, + int state) +{ + int again; + int iter = 0; + + do { + if (state == EST_FETCHQH) { + iter++; + /* if we are roaming a lot of QH without executing a qTD + * something is wrong with the linked list. TO-DO: why is + * this hack needed? + */ + if (iter > MAX_ITERATIONS) { + DPRINTF("\n*** advance_state: bailing on MAX ITERATIONS***\n"); + state = EST_ACTIVE; + break; + } + } + switch(state) { + case EST_WAITLISTHEAD: + again = ehci_state_waitlisthead(ehci, async, &state); + break; + + case EST_FETCHENTRY: + again = ehci_state_fetchentry(ehci, async, &state); + break; + + case EST_FETCHQH: + again = ehci_state_fetchqh(ehci, async, &state); + break; + + case EST_FETCHITD: + again = ehci_state_fetchitd(ehci, async, &state); + break; + + case EST_ADVANCEQUEUE: + again = ehci_state_advqueue(ehci, async, &state); + break; + + case EST_FETCHQTD: + again = ehci_state_fetchqtd(ehci, async, &state); + break; + + case EST_HORIZONTALQH: + again = ehci_state_horizqh(ehci, async, &state); + break; + + case EST_EXECUTE: + iter = 0; + again = ehci_state_execute(ehci, async, &state); + break; + + case EST_EXECUTING: + again = ehci_state_executing(ehci, async, &state); + break; + + case EST_WRITEBACK: + again = ehci_state_writeback(ehci, async, &state); + break; + + default: + fprintf(stderr, "Bad state!\n"); + again = -1; + break; + } + + if (again < 0) { + fprintf(stderr, "processing error - resetting ehci HC\n"); + ehci_reset(ehci); + again = 0; + } + } + while (again); + + ehci_commit_interrupt(ehci); + return state; +} + +static void ehci_advance_async_state(EHCIState *ehci) +{ + EHCIqh qh; + int state = ehci->astate; + + switch(state) { + case EST_INACTIVE: + if (!(ehci->usbcmd & USBCMD_ASE)) { + break; + } + ehci->usbsts |= USBSTS_ASS; + ehci->astate = EST_ACTIVE; + // No break, fall through to ACTIVE + + case EST_ACTIVE: + if ( !(ehci->usbcmd & USBCMD_ASE)) { + ehci->usbsts &= ~USBSTS_ASS; + ehci->astate = EST_INACTIVE; + break; + } + + /* If the doorbell is set, the guest wants to make a change to the + * schedule. The host controller needs to release cached data. + * (section 4.8.2) + */ + if (ehci->usbcmd & USBCMD_IAAD) { + DPRINTF("ASYNC: doorbell request acknowledged\n"); + ehci->usbcmd &= ~USBCMD_IAAD; + ehci_set_interrupt(ehci, USBSTS_IAA); + break; + } + + /* make sure guest has acknowledged */ + /* TO-DO: is this really needed? */ + if (ehci->usbsts & USBSTS_IAA) { + DPRINTF("IAA status bit still set.\n"); + break; + } + + DPRINTF_ST("ASYNC: waiting for listhead, starting at %08x\n", + ehci->asynclistaddr); + /* check that address register has been set */ + if (ehci->asynclistaddr == 0) { + break; + } + + state = EST_WAITLISTHEAD; + /* fall through */ + + case EST_FETCHENTRY: + /* fall through */ + + case EST_EXECUTING: + get_dwords(NLPTR_GET(ehci->qhaddr), (uint32_t *) &qh, + sizeof(EHCIqh) >> 2); + ehci->astate = ehci_advance_state(ehci, 1, state); + break; + + default: + /* this should only be due to a developer mistake */ + fprintf(stderr, "ehci: Bad asynchronous state %d. " + "Resetting to active\n", ehci->astate); + ehci->astate = EST_ACTIVE; + } +} + +static void ehci_advance_periodic_state(EHCIState *ehci) +{ + uint32_t entry; + uint32_t list; + + // 4.6 + + switch(ehci->pstate) { + case EST_INACTIVE: + if ( !(ehci->frindex & 7) && (ehci->usbcmd & USBCMD_PSE)) { + DPRINTF("PERIODIC going active\n"); + ehci->usbsts |= USBSTS_PSS; + ehci->pstate = EST_ACTIVE; + // No break, fall through to ACTIVE + } else + break; + + case EST_ACTIVE: + if ( !(ehci->frindex & 7) && !(ehci->usbcmd & USBCMD_PSE)) { + DPRINTF("PERIODIC going inactive\n"); + ehci->usbsts &= ~USBSTS_PSS; + ehci->pstate = EST_INACTIVE; + break; + } + + list = ehci->periodiclistbase & 0xfffff000; + /* check that register has been set */ + if (list == 0) { + break; + } + list |= ((ehci->frindex & 0x1ff8) >> 1); + + cpu_physical_memory_rw(list, (uint8_t *) &entry, sizeof entry, 0); + entry = le32_to_cpu(entry); + + DPRINTF("PERIODIC state adv fr=%d. [%08X] -> %08X\n", + ehci->frindex / 8, list, entry); + ehci->fetch_addr = entry; + ehci->pstate = ehci_advance_state(ehci, 0, EST_FETCHENTRY); + break; + + case EST_EXECUTING: + DPRINTF("PERIODIC state adv for executing\n"); + ehci->pstate = ehci_advance_state(ehci, 0, EST_EXECUTING); + break; + + default: + /* this should only be due to a developer mistake */ + fprintf(stderr, "ehci: Bad periodic state %d. " + "Resetting to active\n", ehci->pstate); + ehci->pstate = EST_ACTIVE; + } +} + +static void ehci_frame_timer(void *opaque) +{ + EHCIState *ehci = opaque; + int64_t expire_time, t_now; + int usec_elapsed; + int frames; + int usec_now; + int i; + int skipped_frames = 0; + + + t_now = qemu_get_clock_ns(vm_clock); + expire_time = t_now + (get_ticks_per_sec() / FRAME_TIMER_FREQ); + if (expire_time == t_now) { + expire_time++; + } + + usec_now = t_now / 1000; + usec_elapsed = usec_now - ehci->last_run_usec; + frames = usec_elapsed / FRAME_TIMER_USEC; + ehci->frame_end_usec = usec_now + FRAME_TIMER_USEC - 10; + + for (i = 0; i < frames; i++) { + if ( !(ehci->usbsts & USBSTS_HALT)) { + if (ehci->isoch_pause <= 0) { + ehci->frindex += 8; + } + + if (ehci->frindex > 0x00001fff) { + ehci->frindex = 0; + ehci_set_interrupt(ehci, USBSTS_FLR); + } + + ehci->sofv = (ehci->frindex - 1) >> 3; + ehci->sofv &= 0x000003ff; + } + + if (frames - i > 10) { + skipped_frames++; + } else { + // TODO could this cause periodic frames to get skipped if async + // active? + if (ehci->astate != EST_EXECUTING) { + ehci_advance_periodic_state(ehci); + } + } + + ehci->last_run_usec += FRAME_TIMER_USEC; + } + +#if 0 + if (skipped_frames) { + DPRINTF("WARNING - EHCI skipped %d frames\n", skipped_frames); + } +#endif + + /* Async is not inside loop since it executes everything it can once + * called + */ + if (ehci->pstate != EST_EXECUTING) { + ehci_advance_async_state(ehci); + } + + qemu_mod_timer(ehci->frame_timer, expire_time); +} + +static CPUReadMemoryFunc *ehci_readfn[3]={ + ehci_mem_readb, + ehci_mem_readw, + ehci_mem_readl +}; + +static CPUWriteMemoryFunc *ehci_writefn[3]={ + ehci_mem_writeb, + ehci_mem_writew, + ehci_mem_writel +}; + +static void ehci_map(PCIDevice *pci_dev, int region_num, + pcibus_t addr, pcibus_t size, int type) +{ + EHCIState *s =(EHCIState *)pci_dev; + + DPRINTF("ehci_map: region %d, addr %08" PRIx64 ", size %" PRId64 ", s->mem %08X\n", + region_num, addr, size, s->mem); + s->mem_base = addr; + cpu_register_physical_memory(addr, size, s->mem); +} + +static int usb_ehci_initfn(PCIDevice *dev); + +static USBPortOps ehci_port_ops = { + .attach = ehci_attach, + .detach = ehci_detach, + .complete = ehci_async_complete_packet, +}; + +static PCIDeviceInfo ehci_info = { + .qdev.name = "usb-ehci", + .qdev.size = sizeof(EHCIState), + .init = usb_ehci_initfn, +}; + +static int usb_ehci_initfn(PCIDevice *dev) +{ + EHCIState *s = DO_UPCAST(EHCIState, dev, dev); + uint8_t *pci_conf = s->dev.config; + int i; + + pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); + pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_82801D); + pci_set_byte(&pci_conf[PCI_REVISION_ID], 0x10); + pci_set_byte(&pci_conf[PCI_CLASS_PROG], 0x20); + pci_config_set_class(pci_conf, PCI_CLASS_SERIAL_USB); + pci_set_byte(&pci_conf[PCI_HEADER_TYPE], PCI_HEADER_TYPE_NORMAL); + + /* capabilities pointer */ + pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x00); + //pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x50); + + pci_set_byte(&pci_conf[PCI_INTERRUPT_PIN], 4); // interrupt pin 3 + pci_set_byte(&pci_conf[PCI_MIN_GNT], 0); + pci_set_byte(&pci_conf[PCI_MAX_LAT], 0); + + // pci_conf[0x50] = 0x01; // power management caps + + pci_set_byte(&pci_conf[0x60], 0x20); // spec release number (2.1.4) + pci_set_byte(&pci_conf[0x61], 0x20); // frame length adjustment (2.1.5) + pci_set_word(&pci_conf[0x62], 0x00); // port wake up capability (2.1.6) + + pci_conf[0x64] = 0x00; + pci_conf[0x65] = 0x00; + pci_conf[0x66] = 0x00; + pci_conf[0x67] = 0x00; + pci_conf[0x68] = 0x01; + pci_conf[0x69] = 0x00; + pci_conf[0x6a] = 0x00; + pci_conf[0x6b] = 0x00; // USBLEGSUP + pci_conf[0x6c] = 0x00; + pci_conf[0x6d] = 0x00; + pci_conf[0x6e] = 0x00; + pci_conf[0x6f] = 0xc0; // USBLEFCTLSTS + + // 2.2 host controller interface version + s->mmio[0x00] = (uint8_t) OPREGBASE; + s->mmio[0x01] = 0x00; + s->mmio[0x02] = 0x00; + s->mmio[0x03] = 0x01; // HC version + s->mmio[0x04] = NB_PORTS; // Number of downstream ports + s->mmio[0x05] = 0x00; // No companion ports at present + s->mmio[0x06] = 0x00; + s->mmio[0x07] = 0x00; + s->mmio[0x08] = 0x80; // We can cache whole frame, not 64-bit capable + s->mmio[0x09] = 0x68; // EECP + s->mmio[0x0a] = 0x00; + s->mmio[0x0b] = 0x00; + + s->irq = s->dev.irq[3]; + + usb_bus_new(&s->bus, &s->dev.qdev); + for(i = 0; i < NB_PORTS; i++) { + usb_register_port(&s->bus, &s->ports[i], s, i, &ehci_port_ops, + USB_SPEED_MASK_HIGH); + usb_port_location(&s->ports[i], NULL, i+1); + s->ports[i].dev = 0; + } + + s->frame_timer = qemu_new_timer_ns(vm_clock, ehci_frame_timer, s); + + qemu_register_reset(ehci_reset, s); + + s->mem = cpu_register_io_memory(ehci_readfn, ehci_writefn, s, + DEVICE_LITTLE_ENDIAN); + + pci_register_bar(&s->dev, 0, MMIO_SIZE, PCI_BASE_ADDRESS_SPACE_MEMORY, + ehci_map); + + fprintf(stderr, "*** EHCI support is under development ***\n"); + + return 0; +} + +static void ehci_register(void) +{ + pci_qdev_register(&ehci_info); +} +device_init(ehci_register); + +/* + * vim: expandtab ts=4 + */ diff --git a/hw/usb-hid.c b/hw/usb-hid.c index 89c293c466..53b261c3b9 100644 --- a/hw/usb-hid.c +++ b/hw/usb-hid.c @@ -211,6 +211,7 @@ static const USBDescDevice desc_device_mouse = { .iConfiguration = STR_CONFIG_MOUSE, .bmAttributes = 0xa0, .bMaxPower = 50, + .nif = 1, .ifs = &desc_iface_mouse, }, }, @@ -227,6 +228,7 @@ static const USBDescDevice desc_device_tablet = { .iConfiguration = STR_CONFIG_TABLET, .bmAttributes = 0xa0, .bMaxPower = 50, + .nif = 1, .ifs = &desc_iface_tablet, }, }, @@ -243,6 +245,7 @@ static const USBDescDevice desc_device_keyboard = { .iConfiguration = STR_CONFIG_KEYBOARD, .bmAttributes = 0xa0, .bMaxPower = 50, + .nif = 1, .ifs = &desc_iface_keyboard, }, }, @@ -724,13 +727,13 @@ static void usb_hid_set_next_idle(USBHIDState *s, int64_t curtime) s->next_idle_clock = curtime + (get_ticks_per_sec() * s->idle * 4) / 1000; } -static int usb_hid_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_hid_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { USBHIDState *s = (USBHIDState *)dev; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } diff --git a/hw/usb-hub.c b/hw/usb-hub.c index 3dd31ba31f..6e2a35839d 100644 --- a/hw/usb-hub.c +++ b/hw/usb-hub.c @@ -119,6 +119,7 @@ static const USBDescDevice desc_device_hub = { .bNumInterfaces = 1, .bConfigurationValue = 1, .bmAttributes = 0xe0, + .nif = 1, .ifs = &desc_iface_hub, }, }, @@ -256,6 +257,19 @@ static void usb_hub_wakeup(USBDevice *dev) } } +static void usb_hub_complete(USBDevice *dev, USBPacket *packet) +{ + USBHubState *s = dev->port->opaque; + + /* + * Just pass it along upstream for now. + * + * If we ever inplement usb 2.0 split transactions this will + * become a little more complicated ... + */ + usb_packet_complete(&s->dev, packet); +} + static void usb_hub_handle_attach(USBDevice *dev) { USBHubState *s = DO_UPCAST(USBHubState, dev, dev); @@ -271,13 +285,13 @@ static void usb_hub_handle_reset(USBDevice *dev) /* XXX: do it */ } -static int usb_hub_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_hub_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { USBHubState *s = (USBHubState *)dev; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } @@ -481,7 +495,7 @@ static int usb_hub_broadcast_packet(USBHubState *s, USBPacket *p) port = &s->ports[i]; dev = port->port.dev; if (dev && (port->wPortStatus & PORT_STAT_ENABLE)) { - ret = dev->info->handle_packet(dev, p); + ret = usb_handle_packet(dev, p); if (ret != USB_RET_NODEV) { return ret; } @@ -524,6 +538,7 @@ static USBPortOps usb_hub_port_ops = { .attach = usb_hub_attach, .detach = usb_hub_detach, .wakeup = usb_hub_wakeup, + .complete = usb_hub_complete, }; static int usb_hub_initfn(USBDevice *dev) diff --git a/hw/usb-msd.c b/hw/usb-msd.c index 947fd3f83c..c59797b27e 100644 --- a/hw/usb-msd.c +++ b/hw/usb-msd.c @@ -48,6 +48,7 @@ typedef struct { uint32_t data_len; uint32_t residue; uint32_t tag; + SCSIRequest *req; SCSIBus bus; BlockConf conf; SCSIDevice *scsi_dev; @@ -119,6 +120,7 @@ static const USBDescDevice desc_device_full = { .bConfigurationValue = 1, .iConfiguration = STR_CONFIG_FULL, .bmAttributes = 0xc0, + .nif = 1, .ifs = &desc_iface_full, }, }, @@ -153,6 +155,7 @@ static const USBDescDevice desc_device_high = { .bConfigurationValue = 1, .iConfiguration = STR_CONFIG_HIGH, .bmAttributes = 0xc0, + .nif = 1, .ifs = &desc_iface_high, }, }, @@ -189,11 +192,7 @@ static void usb_msd_copy_data(MSDState *s) s->scsi_buf += len; s->data_len -= len; if (s->scsi_len == 0 || s->data_len == 0) { - if (s->mode == USB_MSDM_DATAIN) { - s->scsi_dev->info->read_data(s->scsi_dev, s->tag); - } else if (s->mode == USB_MSDM_DATAOUT) { - s->scsi_dev->info->write_data(s->scsi_dev, s->tag); - } + scsi_req_continue(s->req); } } @@ -211,54 +210,78 @@ static void usb_msd_send_status(MSDState *s, USBPacket *p) memcpy(p->data, &csw, len); } -static void usb_msd_command_complete(SCSIBus *bus, int reason, uint32_t tag, - uint32_t arg) +static void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) { - MSDState *s = DO_UPCAST(MSDState, dev.qdev, bus->qbus.parent); + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); USBPacket *p = s->packet; - if (tag != s->tag) { - fprintf(stderr, "usb-msd: Unexpected SCSI Tag 0x%x\n", tag); - } - if (reason == SCSI_REASON_DONE) { - DPRINTF("Command complete %d\n", arg); - s->residue = s->data_len; - s->result = arg != 0; - if (s->packet) { - if (s->data_len == 0 && s->mode == USB_MSDM_DATAOUT) { - /* A deferred packet with no write data remaining must be - the status read packet. */ - usb_msd_send_status(s, p); - s->mode = USB_MSDM_CBW; - } else { - if (s->data_len) { - s->data_len -= s->usb_len; - if (s->mode == USB_MSDM_DATAIN) - memset(s->usb_buf, 0, s->usb_len); - s->usb_len = 0; - } - if (s->data_len == 0) - s->mode = USB_MSDM_CSW; - } - s->packet = NULL; - usb_packet_complete(p); - } else if (s->data_len == 0) { - s->mode = USB_MSDM_CSW; - } - return; + if (req->tag != s->tag) { + fprintf(stderr, "usb-msd: Unexpected SCSI Tag 0x%x\n", req->tag); } - s->scsi_len = arg; - s->scsi_buf = s->scsi_dev->info->get_buf(s->scsi_dev, tag); + + assert((s->mode == USB_MSDM_DATAOUT) == (req->cmd.mode == SCSI_XFER_TO_DEV)); + s->scsi_len = len; + s->scsi_buf = scsi_req_get_buf(req); if (p) { usb_msd_copy_data(s); - if (s->usb_len == 0) { + if (s->packet && s->usb_len == 0) { /* Set s->packet to NULL before calling usb_packet_complete because another request may be issued before usb_packet_complete returns. */ DPRINTF("Packet complete %p\n", p); s->packet = NULL; - usb_packet_complete(p); + usb_packet_complete(&s->dev, p); + } + } +} + +static void usb_msd_command_complete(SCSIRequest *req, uint32_t status) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + USBPacket *p = s->packet; + + if (req->tag != s->tag) { + fprintf(stderr, "usb-msd: Unexpected SCSI Tag 0x%x\n", req->tag); + } + DPRINTF("Command complete %d\n", status); + s->residue = s->data_len; + s->result = status != 0; + if (s->packet) { + if (s->data_len == 0 && s->mode == USB_MSDM_DATAOUT) { + /* A deferred packet with no write data remaining must be + the status read packet. */ + usb_msd_send_status(s, p); + s->mode = USB_MSDM_CBW; + } else { + if (s->data_len) { + s->data_len -= s->usb_len; + if (s->mode == USB_MSDM_DATAIN) { + memset(s->usb_buf, 0, s->usb_len); + } + s->usb_len = 0; + } + if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } } + s->packet = NULL; + usb_packet_complete(&s->dev, p); + } else if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } + scsi_req_unref(req); + s->req = NULL; +} + +static void usb_msd_request_cancelled(SCSIRequest *req) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + + if (req == s->req) { + scsi_req_unref(s->req); + s->req = NULL; + s->packet = NULL; + s->scsi_len = 0; } } @@ -270,13 +293,13 @@ static void usb_msd_handle_reset(USBDevice *dev) s->mode = USB_MSDM_CBW; } -static int usb_msd_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_msd_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { MSDState *s = (MSDState *)dev; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } @@ -313,12 +336,10 @@ static int usb_msd_handle_control(USBDevice *dev, int request, int value, return ret; } -static void usb_msd_cancel_io(USBPacket *p, void *opaque) +static void usb_msd_cancel_io(USBDevice *dev, USBPacket *p) { - MSDState *s = opaque; - s->scsi_dev->info->cancel_io(s->scsi_dev, s->tag); - s->packet = NULL; - s->scsi_len = 0; + MSDState *s = DO_UPCAST(MSDState, dev, dev); + scsi_req_cancel(s->req); } static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) @@ -364,15 +385,13 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) DPRINTF("Command tag 0x%x flags %08x len %d data %d\n", s->tag, cbw.flags, cbw.cmd_len, s->data_len); s->residue = 0; - s->scsi_dev->info->send_command(s->scsi_dev, s->tag, cbw.cmd, 0); + s->scsi_len = 0; + s->req = scsi_req_new(s->scsi_dev, s->tag, 0); + scsi_req_enqueue(s->req, cbw.cmd); /* ??? Should check that USB and SCSI data transfer directions match. */ - if (s->residue == 0) { - if (s->mode == USB_MSDM_DATAIN) { - s->scsi_dev->info->read_data(s->scsi_dev, s->tag); - } else if (s->mode == USB_MSDM_DATAOUT) { - s->scsi_dev->info->write_data(s->scsi_dev, s->tag); - } + if (s->mode != USB_MSDM_CSW && s->residue == 0) { + scsi_req_continue(s->req); } ret = len; break; @@ -395,7 +414,6 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) } if (s->usb_len) { DPRINTF("Deferring packet %p\n", p); - usb_defer_packet(p, usb_msd_cancel_io, s); s->packet = p; ret = USB_RET_ASYNC; } else { @@ -418,7 +436,6 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) if (s->data_len != 0 || len < 13) goto fail; /* Waiting for SCSI write to complete. */ - usb_defer_packet(p, usb_msd_cancel_io, s); s->packet = p; ret = USB_RET_ASYNC; break; @@ -452,7 +469,6 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) } if (s->usb_len) { DPRINTF("Deferring packet %p\n", p); - usb_defer_packet(p, usb_msd_cancel_io, s); s->packet = p; ret = USB_RET_ASYNC; } else { @@ -486,6 +502,12 @@ static void usb_msd_password_cb(void *opaque, int err) qdev_unplug(&s->dev.qdev); } +static const struct SCSIBusOps usb_msd_scsi_ops = { + .transfer_data = usb_msd_transfer_data, + .complete = usb_msd_command_complete, + .cancel = usb_msd_request_cancelled +}; + static int usb_msd_initfn(USBDevice *dev) { MSDState *s = DO_UPCAST(MSDState, dev, dev); @@ -515,7 +537,7 @@ static int usb_msd_initfn(USBDevice *dev) } usb_desc_init(dev); - scsi_bus_new(&s->bus, &s->dev.qdev, 0, 1, usb_msd_command_complete); + scsi_bus_new(&s->bus, &s->dev.qdev, 0, 1, &usb_msd_scsi_ops); s->scsi_dev = scsi_bus_legacy_add_drive(&s->bus, bs, 0, !!s->removable); if (!s->scsi_dev) { return -1; @@ -601,6 +623,7 @@ static struct USBDeviceInfo msd_info = { .usb_desc = &desc, .init = usb_msd_initfn, .handle_packet = usb_generic_handle_packet, + .cancel_packet = usb_msd_cancel_io, .handle_attach = usb_desc_attach, .handle_reset = usb_msd_handle_reset, .handle_control = usb_msd_handle_control, diff --git a/hw/usb-musb.c b/hw/usb-musb.c index 15bc549a85..6037193db8 100644 --- a/hw/usb-musb.c +++ b/hw/usb-musb.c @@ -261,13 +261,24 @@ static void musb_attach(USBPort *port); static void musb_detach(USBPort *port); +static void musb_schedule_cb(USBDevice *dev, USBPacket *p); static USBPortOps musb_port_ops = { .attach = musb_attach, .detach = musb_detach, + .complete = musb_schedule_cb, }; -typedef struct { +typedef struct MUSBPacket MUSBPacket; +typedef struct MUSBEndPoint MUSBEndPoint; + +struct MUSBPacket { + USBPacket p; + MUSBEndPoint *ep; + int dir; +}; + +struct MUSBEndPoint { uint16_t faddr[2]; uint8_t haddr[2]; uint8_t hport[2]; @@ -284,7 +295,7 @@ typedef struct { int fifolen[2]; int fifostart[2]; int fifoaddr[2]; - USBPacket packey[2]; + MUSBPacket packey[2]; int status[2]; int ext_size[2]; @@ -294,7 +305,7 @@ typedef struct { MUSBState *musb; USBCallback *delayed_cb[2]; QEMUTimer *intv_timer[2]; -} MUSBEndPoint; +}; struct MUSBState { qemu_irq *irqs; @@ -321,7 +332,9 @@ struct MUSBState { /* Duplicating the world since 2008!... probably we should have 32 * logical, single endpoints instead. */ MUSBEndPoint ep[16]; -} *musb_init(qemu_irq *irqs) +}; + +struct MUSBState *musb_init(qemu_irq *irqs) { MUSBState *s = qemu_mallocz(sizeof(*s)); int i; @@ -484,25 +497,27 @@ static void musb_detach(USBPort *port) musb_session_update(s, 1, s->session); } -static inline void musb_cb_tick0(void *opaque) +static void musb_cb_tick0(void *opaque) { MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - ep->delayed_cb[0](&ep->packey[0], opaque); + ep->delayed_cb[0](&ep->packey[0].p, opaque); } -static inline void musb_cb_tick1(void *opaque) +static void musb_cb_tick1(void *opaque) { MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - ep->delayed_cb[1](&ep->packey[1], opaque); + ep->delayed_cb[1](&ep->packey[1].p, opaque); } #define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0) -static inline void musb_schedule_cb(USBPacket *packey, void *opaque, int dir) +static void musb_schedule_cb(USBDevice *dev, USBPacket *packey) { - MUSBEndPoint *ep = (MUSBEndPoint *) opaque; + MUSBPacket *p = container_of(packey, MUSBPacket, p); + MUSBEndPoint *ep = p->ep; + int dir = p->dir; int timeout = 0; if (ep->status[dir] == USB_RET_NAK) @@ -510,25 +525,15 @@ static inline void musb_schedule_cb(USBPacket *packey, void *opaque, int dir) else if (ep->interrupt[dir]) timeout = 8; else - return musb_cb_tick(opaque); + return musb_cb_tick(ep); if (!ep->intv_timer[dir]) - ep->intv_timer[dir] = qemu_new_timer_ns(vm_clock, musb_cb_tick, opaque); + ep->intv_timer[dir] = qemu_new_timer_ns(vm_clock, musb_cb_tick, ep); qemu_mod_timer(ep->intv_timer[dir], qemu_get_clock_ns(vm_clock) + muldiv64(timeout, get_ticks_per_sec(), 8000)); } -static void musb_schedule0_cb(USBPacket *packey, void *opaque) -{ - return musb_schedule_cb(packey, opaque, 0); -} - -static void musb_schedule1_cb(USBPacket *packey, void *opaque) -{ - return musb_schedule_cb(packey, opaque, 1); -} - static int musb_timeout(int ttype, int speed, int val) { #if 1 @@ -567,7 +572,7 @@ static int musb_timeout(int ttype, int speed, int val) hw_error("bad interval\n"); } -static inline void musb_packet(MUSBState *s, MUSBEndPoint *ep, +static void musb_packet(MUSBState *s, MUSBEndPoint *ep, int epnum, int pid, int len, USBCallback cb, int dir) { int ret; @@ -585,19 +590,18 @@ static inline void musb_packet(MUSBState *s, MUSBEndPoint *ep, ep->type[idx] >> 6, ep->interval[idx]); ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT; ep->delayed_cb[dir] = cb; - cb = dir ? musb_schedule1_cb : musb_schedule0_cb; - ep->packey[dir].pid = pid; + ep->packey[dir].p.pid = pid; /* A wild guess on the FADDR semantics... */ - ep->packey[dir].devaddr = ep->faddr[idx]; - ep->packey[dir].devep = ep->type[idx] & 0xf; - ep->packey[dir].data = (void *) ep->buf[idx]; - ep->packey[dir].len = len; - ep->packey[dir].complete_cb = cb; - ep->packey[dir].complete_opaque = ep; + ep->packey[dir].p.devaddr = ep->faddr[idx]; + ep->packey[dir].p.devep = ep->type[idx] & 0xf; + ep->packey[dir].p.data = (void *) ep->buf[idx]; + ep->packey[dir].p.len = len; + ep->packey[dir].ep = ep; + ep->packey[dir].dir = dir; if (s->port.dev) - ret = s->port.dev->info->handle_packet(s->port.dev, &ep->packey[dir]); + ret = usb_handle_packet(s->port.dev, &ep->packey[dir].p); else ret = USB_RET_NODEV; @@ -607,7 +611,7 @@ static inline void musb_packet(MUSBState *s, MUSBEndPoint *ep, } ep->status[dir] = ret; - usb_packet_complete(&ep->packey[dir]); + usb_packet_complete(s->port.dev, &ep->packey[dir].p); } static void musb_tx_packet_complete(USBPacket *packey, void *opaque) @@ -821,14 +825,14 @@ static void musb_rx_req(MUSBState *s, int epnum) /* If we already have a packet, which didn't fit into the * 64 bytes of the FIFO, only move the FIFO start and return. (Obsolete) */ - if (ep->packey[1].pid == USB_TOKEN_IN && ep->status[1] >= 0 && + if (ep->packey[1].p.pid == USB_TOKEN_IN && ep->status[1] >= 0 && (ep->fifostart[1]) + ep->rxcount < - ep->packey[1].len) { + ep->packey[1].p.len) { TRACE("0x%08x, %d", ep->fifostart[1], ep->rxcount ); ep->fifostart[1] += ep->rxcount; ep->fifolen[1] = 0; - ep->rxcount = MIN(ep->packey[0].len - (ep->fifostart[1]), + ep->rxcount = MIN(ep->packey[0].p.len - (ep->fifostart[1]), ep->maxp[1]); ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; @@ -866,10 +870,11 @@ static void musb_rx_req(MUSBState *s, int epnum) #ifdef SETUPLEN_HACK /* Why should *we* do that instead of Linux? */ if (!epnum) { - if (ep->packey[0].devaddr == 2) + if (ep->packey[0].p.devaddr == 2) { total = MIN(s->setup_len, 8); - else + } else { total = MIN(s->setup_len, 64); + } s->setup_len -= total; } #endif diff --git a/hw/usb-net.c b/hw/usb-net.c index bf51bb3890..9be709f7cf 100644 --- a/hw/usb-net.c +++ b/hw/usb-net.c @@ -1042,13 +1042,13 @@ static void usb_net_handle_reset(USBDevice *dev) { } -static int usb_net_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_net_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { USBNetState *s = (USBNetState *) dev; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } diff --git a/hw/usb-ohci.c b/hw/usb-ohci.c index 7ff23228cc..a67556af41 100644 --- a/hw/usb-ohci.c +++ b/hw/usb-ohci.c @@ -575,9 +575,9 @@ static void ohci_copy_iso_td(OHCIState *ohci, static void ohci_process_lists(OHCIState *ohci, int completion); -static void ohci_async_complete_packet(USBPacket *packet, void *opaque) +static void ohci_async_complete_packet(USBDevice *dev, USBPacket *packet) { - OHCIState *ohci = opaque; + OHCIState *ohci = container_of(packet, OHCIState, usb_packet); #ifdef DEBUG_PACKET DPRINTF("Async packet complete\n"); #endif @@ -748,9 +748,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, ohci->usb_packet.devep = OHCI_BM(ed->flags, ED_EN); ohci->usb_packet.data = ohci->usb_buf; ohci->usb_packet.len = len; - ohci->usb_packet.complete_cb = ohci_async_complete_packet; - ohci->usb_packet.complete_opaque = ohci; - ret = dev->info->handle_packet(dev, &ohci->usb_packet); + ret = usb_handle_packet(dev, &ohci->usb_packet); if (ret != USB_RET_NODEV) break; } @@ -946,9 +944,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) ohci->usb_packet.devep = OHCI_BM(ed->flags, ED_EN); ohci->usb_packet.data = ohci->usb_buf; ohci->usb_packet.len = len; - ohci->usb_packet.complete_cb = ohci_async_complete_packet; - ohci->usb_packet.complete_opaque = ohci; - ret = dev->info->handle_packet(dev, &ohci->usb_packet); + ret = usb_handle_packet(dev, &ohci->usb_packet); if (ret != USB_RET_NODEV) break; } @@ -1665,6 +1661,7 @@ static CPUWriteMemoryFunc * const ohci_writefn[3]={ static USBPortOps ohci_port_ops = { .attach = ohci_attach, .detach = ohci_detach, + .complete = ohci_async_complete_packet, }; static void usb_ohci_init(OHCIState *ohci, DeviceState *dev, diff --git a/hw/usb-serial.c b/hw/usb-serial.c index 6763d52040..59cb0fb2f7 100644 --- a/hw/usb-serial.c +++ b/hw/usb-serial.c @@ -146,6 +146,7 @@ static const USBDescDevice desc_device = { .bConfigurationValue = 1, .bmAttributes = 0x80, .bMaxPower = 50, + .nif = 1, .ifs = &desc_iface0, }, }, @@ -218,14 +219,14 @@ static uint8_t usb_get_modem_lines(USBSerialState *s) return ret; } -static int usb_serial_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_serial_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { USBSerialState *s = (USBSerialState *)dev; int ret; DPRINTF("got control %x, value %x\n",request, value); - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } diff --git a/hw/usb-uhci.c b/hw/usb-uhci.c index 536c24c825..872a995575 100644 --- a/hw/usb-uhci.c +++ b/hw/usb-uhci.c @@ -106,6 +106,8 @@ static void dump_data(const uint8_t *data, int len) static void dump_data(const uint8_t *data, int len) {} #endif +typedef struct UHCIState UHCIState; + /* * Pending async transaction. * 'packet' must be the first field because completion @@ -113,7 +115,8 @@ static void dump_data(const uint8_t *data, int len) {} */ typedef struct UHCIAsync { USBPacket packet; - struct UHCIAsync *next; + UHCIState *uhci; + QTAILQ_ENTRY(UHCIAsync) next; uint32_t td; uint32_t token; int8_t valid; @@ -127,7 +130,7 @@ typedef struct UHCIPort { uint16_t ctrl; } UHCIPort; -typedef struct UHCIState { +struct UHCIState { PCIDevice dev; USBBus bus; uint16_t cmd; /* cmd register */ @@ -145,10 +148,9 @@ typedef struct UHCIState { uint32_t pending_int_mask; /* Active packets */ - UHCIAsync *async_pending; - UHCIAsync *async_pool; + QTAILQ_HEAD(,UHCIAsync) async_pending; uint8_t num_ports_vmstate; -} UHCIState; +}; typedef struct UHCI_TD { uint32_t link; @@ -167,12 +169,12 @@ static UHCIAsync *uhci_async_alloc(UHCIState *s) UHCIAsync *async = qemu_malloc(sizeof(UHCIAsync)); memset(&async->packet, 0, sizeof(async->packet)); + async->uhci = s; async->valid = 0; async->td = 0; async->token = 0; async->done = 0; async->isoc = 0; - async->next = NULL; return async; } @@ -184,24 +186,12 @@ static void uhci_async_free(UHCIState *s, UHCIAsync *async) static void uhci_async_link(UHCIState *s, UHCIAsync *async) { - async->next = s->async_pending; - s->async_pending = async; + QTAILQ_INSERT_HEAD(&s->async_pending, async, next); } static void uhci_async_unlink(UHCIState *s, UHCIAsync *async) { - UHCIAsync *curr = s->async_pending; - UHCIAsync **prev = &s->async_pending; - - while (curr) { - if (curr == async) { - *prev = curr->next; - return; - } - - prev = &curr->next; - curr = curr->next; - } + QTAILQ_REMOVE(&s->async_pending, async, next); } static void uhci_async_cancel(UHCIState *s, UHCIAsync *async) @@ -220,11 +210,10 @@ static void uhci_async_cancel(UHCIState *s, UHCIAsync *async) */ static UHCIAsync *uhci_async_validate_begin(UHCIState *s) { - UHCIAsync *async = s->async_pending; + UHCIAsync *async; - while (async) { + QTAILQ_FOREACH(async, &s->async_pending, next) { async->valid--; - async = async->next; } return NULL; } @@ -234,47 +223,30 @@ static UHCIAsync *uhci_async_validate_begin(UHCIState *s) */ static void uhci_async_validate_end(UHCIState *s) { - UHCIAsync *curr = s->async_pending; - UHCIAsync **prev = &s->async_pending; - UHCIAsync *next; + UHCIAsync *curr, *n; - while (curr) { + QTAILQ_FOREACH_SAFE(curr, &s->async_pending, next, n) { if (curr->valid > 0) { - prev = &curr->next; - curr = curr->next; continue; } - - next = curr->next; - - /* Unlink */ - *prev = next; - + uhci_async_unlink(s, curr); uhci_async_cancel(s, curr); - - curr = next; } } static void uhci_async_cancel_all(UHCIState *s) { - UHCIAsync *curr = s->async_pending; - UHCIAsync *next; - - while (curr) { - next = curr->next; + UHCIAsync *curr, *n; + QTAILQ_FOREACH_SAFE(curr, &s->async_pending, next, n) { + uhci_async_unlink(s, curr); uhci_async_cancel(s, curr); - - curr = next; } - - s->async_pending = NULL; } static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t addr, uint32_t token) { - UHCIAsync *async = s->async_pending; + UHCIAsync *async; UHCIAsync *match = NULL; int count = 0; @@ -291,7 +263,7 @@ static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t addr, uint32_t token * If we ever do we'd want to optimize this algorithm. */ - while (async) { + QTAILQ_FOREACH(async, &s->async_pending, next) { if (async->token == token) { /* Good match */ match = async; @@ -301,8 +273,6 @@ static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t addr, uint32_t token break; } } - - async = async->next; count++; } @@ -662,7 +632,7 @@ static int uhci_broadcast_packet(UHCIState *s, USBPacket *p) USBDevice *dev = port->port.dev; if (dev && (port->ctrl & UHCI_PORT_EN)) - ret = dev->info->handle_packet(dev, p); + ret = usb_handle_packet(dev, p); } DPRINTF("uhci: packet exit. ret %d len %d\n", ret, p->len); @@ -672,7 +642,7 @@ static int uhci_broadcast_packet(UHCIState *s, USBPacket *p) return ret; } -static void uhci_async_complete(USBPacket * packet, void *opaque); +static void uhci_async_complete(USBDevice *dev, USBPacket *packet); static void uhci_process_frame(UHCIState *s); /* return -1 if fatal error (frame must be stopped) @@ -732,11 +702,15 @@ out: case USB_RET_STALL: td->ctrl |= TD_CTRL_STALL; td->ctrl &= ~TD_CTRL_ACTIVE; + s->status |= UHCI_STS_USBERR; + uhci_update_irq(s); return 1; case USB_RET_BABBLE: td->ctrl |= TD_CTRL_BABBLE | TD_CTRL_STALL; td->ctrl &= ~TD_CTRL_ACTIVE; + s->status |= UHCI_STS_USBERR; + uhci_update_irq(s); /* frame interrupted */ return -1; @@ -825,8 +799,6 @@ static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td, uint32_t *in async->packet.devep = (td->token >> 15) & 0xf; async->packet.data = async->buffer; async->packet.len = max_len; - async->packet.complete_cb = uhci_async_complete; - async->packet.complete_opaque = s; switch(pid) { case USB_TOKEN_OUT: @@ -862,10 +834,10 @@ done: return len; } -static void uhci_async_complete(USBPacket *packet, void *opaque) +static void uhci_async_complete(USBDevice *dev, USBPacket *packet) { - UHCIState *s = opaque; - UHCIAsync *async = (UHCIAsync *) packet; + UHCIAsync *async = container_of(packet, UHCIAsync, packet); + UHCIState *s = async->uhci; DPRINTF("uhci: async complete. td 0x%x token 0x%x\n", async->td, async->token); @@ -1113,6 +1085,7 @@ static USBPortOps uhci_port_ops = { .attach = uhci_attach, .detach = uhci_detach, .wakeup = uhci_wakeup, + .complete = uhci_async_complete, }; static int usb_uhci_common_initfn(PCIDevice *dev) @@ -1136,6 +1109,7 @@ static int usb_uhci_common_initfn(PCIDevice *dev) s->expire_time = qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / FRAME_TIMER_FREQ); s->num_ports_vmstate = NB_PORTS; + QTAILQ_INIT(&s->async_pending); qemu_register_reset(uhci_reset, s); diff --git a/hw/usb-wacom.c b/hw/usb-wacom.c index 16be7a20cf..9d348e170e 100644 --- a/hw/usb-wacom.c +++ b/hw/usb-wacom.c @@ -108,6 +108,7 @@ static const USBDescDevice desc_device_wacom = { .bConfigurationValue = 1, .bmAttributes = 0x80, .bMaxPower = 40, + .nif = 1, .ifs = &desc_iface_wacom, }, }, @@ -249,13 +250,13 @@ static void usb_wacom_handle_reset(USBDevice *dev) s->mode = WACOM_MODE_HID; } -static int usb_wacom_handle_control(USBDevice *dev, int request, int value, - int index, int length, uint8_t *data) +static int usb_wacom_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { USBWacomState *s = (USBWacomState *) dev; int ret; - ret = usb_desc_handle_control(dev, request, value, index, length, data); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } @@ -63,9 +63,10 @@ void usb_wakeup(USBDevice *dev) protocol) */ -#define SETUP_STATE_IDLE 0 -#define SETUP_STATE_DATA 1 -#define SETUP_STATE_ACK 2 +#define SETUP_STATE_IDLE 0 +#define SETUP_STATE_SETUP 1 +#define SETUP_STATE_DATA 2 +#define SETUP_STATE_ACK 3 static int do_token_setup(USBDevice *s, USBPacket *p) { @@ -82,10 +83,14 @@ static int do_token_setup(USBDevice *s, USBPacket *p) request = (s->setup_buf[0] << 8) | s->setup_buf[1]; value = (s->setup_buf[3] << 8) | s->setup_buf[2]; index = (s->setup_buf[5] << 8) | s->setup_buf[4]; - + if (s->setup_buf[0] & USB_DIR_IN) { - ret = s->info->handle_control(s, request, value, index, + ret = s->info->handle_control(s, p, request, value, index, s->setup_len, s->data_buf); + if (ret == USB_RET_ASYNC) { + s->setup_state = SETUP_STATE_SETUP; + return USB_RET_ASYNC; + } if (ret < 0) return ret; @@ -93,6 +98,12 @@ static int do_token_setup(USBDevice *s, USBPacket *p) s->setup_len = ret; s->setup_state = SETUP_STATE_DATA; } else { + if (s->setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small (%d > %zu)\n", + s->setup_len, sizeof(s->data_buf)); + return USB_RET_STALL; + } if (s->setup_len == 0) s->setup_state = SETUP_STATE_ACK; else @@ -117,9 +128,12 @@ static int do_token_in(USBDevice *s, USBPacket *p) switch(s->setup_state) { case SETUP_STATE_ACK: if (!(s->setup_buf[0] & USB_DIR_IN)) { - s->setup_state = SETUP_STATE_IDLE; - ret = s->info->handle_control(s, request, value, index, + ret = s->info->handle_control(s, p, request, value, index, s->setup_len, s->data_buf); + if (ret == USB_RET_ASYNC) { + return USB_RET_ASYNC; + } + s->setup_state = SETUP_STATE_IDLE; if (ret > 0) return 0; return ret; @@ -232,6 +246,36 @@ int usb_generic_handle_packet(USBDevice *s, USBPacket *p) } } +/* ctrl complete function for devices which use usb_generic_handle_packet and + may return USB_RET_ASYNC from their handle_control callback. Device code + which does this *must* call this function instead of the normal + usb_packet_complete to complete their async control packets. */ +void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p) +{ + if (p->len < 0) { + s->setup_state = SETUP_STATE_IDLE; + } + + switch (s->setup_state) { + case SETUP_STATE_SETUP: + if (p->len < s->setup_len) { + s->setup_len = p->len; + } + s->setup_state = SETUP_STATE_DATA; + p->len = 8; + break; + + case SETUP_STATE_ACK: + s->setup_state = SETUP_STATE_IDLE; + p->len = 0; + break; + + default: + break; + } + usb_packet_complete(s, p); +} + /* XXX: fix overflow */ int set_usb_string(uint8_t *buf, const char *str) { @@ -253,9 +297,54 @@ int set_usb_string(uint8_t *buf, const char *str) void usb_send_msg(USBDevice *dev, int msg) { USBPacket p; + int ret; + memset(&p, 0, sizeof(p)); p.pid = msg; - dev->info->handle_packet(dev, &p); - + ret = usb_handle_packet(dev, &p); /* This _must_ be synchronous */ + assert(ret != USB_RET_ASYNC); +} + +/* Hand over a packet to a device for processing. Return value + USB_RET_ASYNC indicates the processing isn't finished yet, the + driver will call usb_packet_complete() when done processing it. */ +int usb_handle_packet(USBDevice *dev, USBPacket *p) +{ + int ret; + + assert(p->owner == NULL); + ret = dev->info->handle_packet(dev, p); + if (ret == USB_RET_ASYNC) { + if (p->owner == NULL) { + p->owner = dev; + } else { + /* We'll end up here when usb_handle_packet is called + * recursively due to a hub being in the chain. Nothing + * to do. Leave p->owner pointing to the device, not the + * hub. */; + } + } + return ret; +} + +/* Notify the controller that an async packet is complete. This should only + be called for packets previously deferred by returning USB_RET_ASYNC from + handle_packet. */ +void usb_packet_complete(USBDevice *dev, USBPacket *p) +{ + /* Note: p->owner != dev is possible in case dev is a hub */ + assert(p->owner != NULL); + dev->port->ops->complete(dev, p); + p->owner = NULL; +} + +/* Cancel an active packet. The packed must have been deferred by + returning USB_RET_ASYNC from handle_packet, and not yet + completed. */ +void usb_cancel_packet(USBPacket * p) +{ + assert(p->owner != NULL); + p->owner->info->cancel_packet(p->owner, p); + p->owner = NULL; } @@ -124,6 +124,7 @@ #define USB_DT_ENDPOINT 0x05 #define USB_DT_DEVICE_QUALIFIER 0x06 #define USB_DT_OTHER_SPEED_CONFIG 0x07 +#define USB_DT_INTERFACE_ASSOC 0x0B #define USB_ENDPOINT_XFER_CONTROL 0 #define USB_ENDPOINT_XFER_ISOC 1 @@ -140,6 +141,7 @@ typedef struct USBDesc USBDesc; typedef struct USBDescID USBDescID; typedef struct USBDescDevice USBDescDevice; typedef struct USBDescConfig USBDescConfig; +typedef struct USBDescIfaceAssoc USBDescIfaceAssoc; typedef struct USBDescIface USBDescIface; typedef struct USBDescEndpoint USBDescEndpoint; typedef struct USBDescOther USBDescOther; @@ -167,7 +169,7 @@ struct USBDevice { int32_t state; uint8_t setup_buf[8]; - uint8_t data_buf[1024]; + uint8_t data_buf[4096]; int32_t remote_wakeup; int32_t setup_state; int32_t setup_len; @@ -192,6 +194,11 @@ struct USBDeviceInfo { int (*handle_packet)(USBDevice *dev, USBPacket *p); /* + * Called when a packet is canceled. + */ + void (*cancel_packet)(USBDevice *dev, USBPacket *p); + + /* * Called when device is destroyed. */ void (*handle_destroy)(USBDevice *dev); @@ -212,7 +219,7 @@ struct USBDeviceInfo { * * Returns length or one of the USB_RET_ codes. */ - int (*handle_control)(USBDevice *dev, int request, int value, + int (*handle_control)(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data); /* @@ -235,6 +242,7 @@ typedef struct USBPortOps { void (*attach)(USBPort *port); void (*detach)(USBPort *port); void (*wakeup)(USBDevice *dev); + void (*complete)(USBDevice *dev, USBPacket *p); } USBPortOps; /* USB port on which a device can be connected */ @@ -259,40 +267,17 @@ struct USBPacket { uint8_t *data; int len; /* Internal use by the USB layer. */ - USBCallback *complete_cb; - void *complete_opaque; - USBCallback *cancel_cb; - void *cancel_opaque; + USBDevice *owner; }; -/* Defer completion of a USB packet. The hadle_packet routine should then - return USB_RET_ASYNC. Packets that complete immediately (before - handle_packet returns) should not call this method. */ -static inline void usb_defer_packet(USBPacket *p, USBCallback *cancel, - void * opaque) -{ - p->cancel_cb = cancel; - p->cancel_opaque = opaque; -} - -/* Notify the controller that an async packet is complete. This should only - be called for packets previously deferred with usb_defer_packet, and - should never be called from within handle_packet. */ -static inline void usb_packet_complete(USBPacket *p) -{ - p->complete_cb(p, p->complete_opaque); -} - -/* Cancel an active packet. The packed must have been deferred with - usb_defer_packet, and not yet completed. */ -static inline void usb_cancel_packet(USBPacket * p) -{ - p->cancel_cb(p, p->cancel_opaque); -} +int usb_handle_packet(USBDevice *dev, USBPacket *p); +void usb_packet_complete(USBDevice *dev, USBPacket *p); +void usb_cancel_packet(USBPacket * p); void usb_attach(USBPort *port, USBDevice *dev); void usb_wakeup(USBDevice *dev); int usb_generic_handle_packet(USBDevice *s, USBPacket *p); +void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p); int set_usb_string(uint8_t *buf, const char *str); void usb_send_msg(USBDevice *dev, int msg); diff --git a/hw/vga-isa.c b/hw/vga-isa.c index fde0d56fd3..245841f18b 100644 --- a/hw/vga-isa.c +++ b/hw/vga-isa.c @@ -77,7 +77,6 @@ static ISADeviceInfo vga_info = { .qdev.size = sizeof(ISAVGAState), .qdev.vmsd = &vmstate_vga_common, .qdev.reset = vga_reset_isa, - .qdev.no_user = 1, .init = vga_initfn, }; diff --git a/hw/virtio-console.c b/hw/virtio-console.c index de539c4eac..b076331d37 100644 --- a/hw/virtio-console.c +++ b/hw/virtio-console.c @@ -74,25 +74,26 @@ static void chr_event(void *opaque, int event) } } -static int generic_port_init(VirtConsole *vcon, VirtIOSerialPort *port) +static int virtconsole_initfn(VirtIOSerialPort *port) { + VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port); + VirtIOSerialPortInfo *info = DO_UPCAST(VirtIOSerialPortInfo, qdev, + vcon->port.dev.info); + + if (port->id == 0 && !info->is_console) { + error_report("Port number 0 on virtio-serial devices reserved for virtconsole devices for backward compatibility."); + return -1; + } + if (vcon->chr) { qemu_chr_add_handlers(vcon->chr, chr_can_read, chr_read, chr_event, vcon); - vcon->port.info->have_data = flush_buf; - vcon->port.info->guest_open = guest_open; - vcon->port.info->guest_close = guest_close; + info->have_data = flush_buf; + info->guest_open = guest_open; + info->guest_close = guest_close; } - return 0; -} -/* Virtio Console Ports */ -static int virtconsole_initfn(VirtIOSerialPort *port) -{ - VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port); - - port->is_console = true; - return generic_port_init(vcon, port); + return 0; } static int virtconsole_exitfn(VirtIOSerialPort *port) @@ -113,10 +114,10 @@ static int virtconsole_exitfn(VirtIOSerialPort *port) static VirtIOSerialPortInfo virtconsole_info = { .qdev.name = "virtconsole", .qdev.size = sizeof(VirtConsole), + .is_console = true, .init = virtconsole_initfn, .exit = virtconsole_exitfn, .qdev.props = (Property[]) { - DEFINE_PROP_UINT8("is_console", VirtConsole, port.is_console, 1), DEFINE_PROP_UINT32("nr", VirtConsole, port.id, VIRTIO_CONSOLE_BAD_ID), DEFINE_PROP_CHR("chardev", VirtConsole, chr), DEFINE_PROP_STRING("name", VirtConsole, port.name), @@ -130,26 +131,10 @@ static void virtconsole_register(void) } device_init(virtconsole_register) -/* Generic Virtio Serial Ports */ -static int virtserialport_initfn(VirtIOSerialPort *port) -{ - VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port); - - if (port->id == 0) { - /* - * Disallow a generic port at id 0, that's reserved for - * console ports. - */ - error_report("Port number 0 on virtio-serial devices reserved for virtconsole devices for backward compatibility."); - return -1; - } - return generic_port_init(vcon, port); -} - static VirtIOSerialPortInfo virtserialport_info = { .qdev.name = "virtserialport", .qdev.size = sizeof(VirtConsole), - .init = virtserialport_initfn, + .init = virtconsole_initfn, .exit = virtconsole_exitfn, .qdev.props = (Property[]) { DEFINE_PROP_UINT32("nr", VirtConsole, port.id, VIRTIO_CONSOLE_BAD_ID), diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c index b86c441623..b3e7ba5d12 100644 --- a/hw/virtio-pci.c +++ b/hw/virtio-pci.c @@ -26,6 +26,7 @@ #include "loader.h" #include "kvm.h" #include "blockdev.h" +#include "virtio-pci.h" /* from Linux's linux/virtio_pci.h */ @@ -74,9 +75,6 @@ VIRTIO_PCI_CONFIG_MSI : \ VIRTIO_PCI_CONFIG_NOMSI) -/* Virtio ABI version, if we increment this, we break the guest driver. */ -#define VIRTIO_PCI_ABI_VERSION 0 - /* How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 @@ -95,27 +93,6 @@ */ #define wmb() do { } while (0) -/* PCI bindings. */ - -typedef struct { - PCIDevice pci_dev; - VirtIODevice *vdev; - uint32_t flags; - uint32_t addr; - uint32_t class_code; - uint32_t nvectors; - BlockConf block; - NICConf nic; - uint32_t host_features; -#ifdef CONFIG_LINUX - V9fsConf fsconf; -#endif - virtio_serial_conf serial; - virtio_net_conf net; - bool ioeventfd_disabled; - bool ioeventfd_started; -} VirtIOPCIProxy; - /* virtio device */ static void virtio_pci_notify(void *opaque, uint16_t vector) @@ -671,7 +648,7 @@ static const VirtIOBindings virtio_pci_bindings = { .vmstate_change = virtio_pci_vmstate_change, }; -static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev) +void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev) { uint8_t *config; uint32_t size; @@ -814,21 +791,6 @@ static int virtio_balloon_init_pci(PCIDevice *pci_dev) return 0; } -#ifdef CONFIG_VIRTFS -static int virtio_9p_init_pci(PCIDevice *pci_dev) -{ - VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); - VirtIODevice *vdev; - - vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf); - vdev->nvectors = proxy->nvectors; - virtio_init_pci(proxy, vdev); - /* make the actual value visible */ - proxy->nvectors = vdev->nvectors; - return 0; -} -#endif - static PCIDeviceInfo virtio_info[] = { { .qdev.name = "virtio-blk-pci", @@ -913,24 +875,6 @@ static PCIDeviceInfo virtio_info[] = { }, .qdev.reset = virtio_pci_reset, },{ -#ifdef CONFIG_VIRTFS - .qdev.name = "virtio-9p-pci", - .qdev.alias = "virtio-9p", - .qdev.size = sizeof(VirtIOPCIProxy), - .init = virtio_9p_init_pci, - .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET, - .device_id = 0x1009, - .revision = VIRTIO_PCI_ABI_VERSION, - .class_id = 0x2, - .qdev.props = (Property[]) { - DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), - DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features), - DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag), - DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id), - DEFINE_PROP_END_OF_LIST(), - }, - }, { -#endif /* end of list */ } }; diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h new file mode 100644 index 0000000000..b5189172de --- /dev/null +++ b/hw/virtio-pci.h @@ -0,0 +1,45 @@ +/* + * Virtio PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Paul Brook <paul@codesourcery.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef QEMU_VIRTIO_PCI_H +#define QEMU_VIRTIO_PCI_H + +#include "virtio-net.h" +#include "virtio-serial.h" + +typedef struct { + PCIDevice pci_dev; + VirtIODevice *vdev; + uint32_t flags; + uint32_t addr; + uint32_t class_code; + uint32_t nvectors; + BlockConf block; + NICConf nic; + uint32_t host_features; +#ifdef CONFIG_LINUX + V9fsConf fsconf; +#endif + virtio_serial_conf serial; + virtio_net_conf net; + bool ioeventfd_disabled; + bool ioeventfd_started; +} VirtIOPCIProxy; + +void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev); + +/* Virtio ABI version, if we increment this, we break the guest driver. */ +#define VIRTIO_PCI_ABI_VERSION 0 + +#endif diff --git a/hw/virtio-serial-bus.c b/hw/virtio-serial-bus.c index f10d48fdb0..9a12104982 100644 --- a/hw/virtio-serial-bus.c +++ b/hw/virtio-serial-bus.c @@ -39,7 +39,7 @@ struct VirtIOSerial { /* Arrays of ivqs and ovqs: one per port */ VirtQueue **ivqs, **ovqs; - VirtIOSerialBus *bus; + VirtIOSerialBus bus; DeviceState *qdev; @@ -129,9 +129,13 @@ static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev) static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq, VirtIODevice *vdev) { + VirtIOSerialPortInfo *info; + assert(port); assert(virtio_queue_ready(vq)); + info = DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info); + while (!port->throttled) { unsigned int i; @@ -149,10 +153,10 @@ static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq, ssize_t ret; buf_size = port->elem.out_sg[i].iov_len - port->iov_offset; - ret = port->info->have_data(port, - port->elem.out_sg[i].iov_base - + port->iov_offset, - buf_size); + ret = info->have_data(port, + port->elem.out_sg[i].iov_base + + port->iov_offset, + buf_size); if (ret < 0 && ret != -EAGAIN) { /* We don't handle any other type of errors here */ abort(); @@ -285,6 +289,13 @@ size_t virtio_serial_guest_ready(VirtIOSerialPort *port) return 0; } +static void flush_queued_data_bh(void *opaque) +{ + VirtIOSerialPort *port = opaque; + + flush_queued_data(port); +} + void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle) { if (!port) { @@ -295,14 +306,14 @@ void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle) if (throttle) { return; } - - flush_queued_data(port); + qemu_bh_schedule(port->bh); } /* Guest wants to notify us of some event */ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) { struct VirtIOSerialPort *port; + struct VirtIOSerialPortInfo *info; struct virtio_console_control cpkt, *gcpkt; uint8_t *buffer; size_t buffer_len; @@ -321,11 +332,13 @@ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) if (!port && cpkt.event != VIRTIO_CONSOLE_DEVICE_READY) return; + info = DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info); + switch(cpkt.event) { case VIRTIO_CONSOLE_DEVICE_READY: if (!cpkt.value) { error_report("virtio-serial-bus: Guest failure in adding device %s\n", - vser->bus->qbus.name); + vser->bus.qbus.name); break; } /* @@ -340,7 +353,7 @@ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) case VIRTIO_CONSOLE_PORT_READY: if (!cpkt.value) { error_report("virtio-serial-bus: Guest failure in adding port %u for device %s\n", - port->id, vser->bus->qbus.name); + port->id, vser->bus.qbus.name); break; } /* @@ -350,7 +363,7 @@ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) * this port is a console port so that the guest can hook it * up to hvc. */ - if (port->is_console) { + if (info->is_console) { send_control_event(port, VIRTIO_CONSOLE_CONSOLE_PORT, 1); } @@ -379,21 +392,21 @@ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) * initialised. If some app is interested in knowing about * this event, let it know. */ - if (port->info->guest_ready) { - port->info->guest_ready(port); + if (info->guest_ready) { + info->guest_ready(port); } break; case VIRTIO_CONSOLE_PORT_OPEN: port->guest_connected = cpkt.value; - if (cpkt.value && port->info->guest_open) { + if (cpkt.value && info->guest_open) { /* Send the guest opened notification if an app is interested */ - port->info->guest_open(port); + info->guest_open(port); } - if (!cpkt.value && port->info->guest_close) { + if (!cpkt.value && info->guest_close) { /* Send the guest closed notification if an app is interested */ - port->info->guest_close(port); + info->guest_close(port); } break; } @@ -442,11 +455,13 @@ static void handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSerial *vser; VirtIOSerialPort *port; + VirtIOSerialPortInfo *info; vser = DO_UPCAST(VirtIOSerial, vdev, vdev); port = find_port_by_vq(vser, vq); + info = port ? DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info) : NULL; - if (!port || !port->host_connected || !port->info->have_data) { + if (!port || !port->host_connected || !info->have_data) { discard_vq_data(vq, vdev); return; } @@ -467,7 +482,7 @@ static uint32_t get_features(VirtIODevice *vdev, uint32_t features) vser = DO_UPCAST(VirtIOSerial, vdev, vdev); - if (vser->bus->max_nr_ports > 1) { + if (vser->bus.max_nr_ports > 1) { features |= (1 << VIRTIO_CONSOLE_F_MULTIPORT); } return features; @@ -644,16 +659,6 @@ static struct BusInfo virtser_bus_info = { .print_dev = virtser_bus_dev_print, }; -static VirtIOSerialBus *virtser_bus_new(DeviceState *dev) -{ - VirtIOSerialBus *bus; - - bus = FROM_QBUS(VirtIOSerialBus, qbus_create(&virtser_bus_info, dev, NULL)); - bus->qbus.allow_hotplug = 1; - - return bus; -} - static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent) { VirtIOSerialPort *port = DO_UPCAST(VirtIOSerialPort, dev, qdev); @@ -726,13 +731,14 @@ static int virtser_port_qdev_init(DeviceState *qdev, DeviceInfo *base) bool plugging_port0; port->vser = bus->vser; + port->bh = qemu_bh_new(flush_queued_data_bh, port); /* * Is the first console port we're seeing? If so, put it up at * location 0. This is done for backward compatibility (old * kernel, new qemu). */ - plugging_port0 = port->is_console && !find_port_by_id(port->vser, 0); + plugging_port0 = info->is_console && !find_port_by_id(port->vser, 0); if (find_port_by_id(port->vser, port->id)) { error_report("virtio-serial-bus: A port already exists at id %u\n", @@ -759,7 +765,6 @@ static int virtser_port_qdev_init(DeviceState *qdev, DeviceInfo *base) return -1; } - port->info = info; ret = info->init(port); if (ret) { return ret; @@ -790,15 +795,18 @@ static int virtser_port_qdev_init(DeviceState *qdev, DeviceInfo *base) static int virtser_port_qdev_exit(DeviceState *qdev) { VirtIOSerialPort *port = DO_UPCAST(VirtIOSerialPort, dev, qdev); + VirtIOSerialPortInfo *info = DO_UPCAST(VirtIOSerialPortInfo, qdev, + port->dev.info); VirtIOSerial *vser = port->vser; + qemu_bh_delete(port->bh); remove_port(port->vser, port->id); QTAILQ_REMOVE(&vser->ports, port, next); - if (port->info->exit) - port->info->exit(port); - + if (info->exit) { + info->exit(port); + } return 0; } @@ -835,11 +843,12 @@ VirtIODevice *virtio_serial_init(DeviceState *dev, virtio_serial_conf *conf) vser = DO_UPCAST(VirtIOSerial, vdev, vdev); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ - vser->bus = virtser_bus_new(dev); - vser->bus->vser = vser; + qbus_create_inplace(&vser->bus.qbus, &virtser_bus_info, dev, NULL); + vser->bus.qbus.allow_hotplug = 1; + vser->bus.vser = vser; QTAILQ_INIT(&vser->ports); - vser->bus->max_nr_ports = conf->max_virtserial_ports; + vser->bus.max_nr_ports = conf->max_virtserial_ports; vser->ivqs = qemu_malloc(conf->max_virtserial_ports * sizeof(VirtQueue *)); vser->ovqs = qemu_malloc(conf->max_virtserial_ports * sizeof(VirtQueue *)); @@ -859,7 +868,7 @@ VirtIODevice *virtio_serial_init(DeviceState *dev, virtio_serial_conf *conf) /* control queue: guest to host */ vser->c_ovq = virtio_add_queue(vdev, 32, control_out); - for (i = 1; i < vser->bus->max_nr_ports; i++) { + for (i = 1; i < vser->bus.max_nr_ports; i++) { /* Add a per-port queue for host to guest transfers */ vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input); /* Add a per-per queue for guest to host transfers */ diff --git a/hw/virtio-serial.h b/hw/virtio-serial.h index 5eb948e3fd..36e9d222e5 100644 --- a/hw/virtio-serial.h +++ b/hw/virtio-serial.h @@ -75,7 +75,6 @@ typedef struct VirtIOSerialPortInfo VirtIOSerialPortInfo; */ struct VirtIOSerialPort { DeviceState dev; - VirtIOSerialPortInfo *info; QTAILQ_ENTRY(VirtIOSerialPort) next; @@ -119,8 +118,10 @@ struct VirtIOSerialPort { uint32_t iov_idx; uint64_t iov_offset; - /* Identify if this is a port that binds with hvc in the guest */ - uint8_t is_console; + /* + * When unthrottling we use a bottom-half to call flush_queued_data. + */ + QEMUBH *bh; /* Is the corresponding guest device open? */ bool guest_connected; @@ -132,6 +133,10 @@ struct VirtIOSerialPort { struct VirtIOSerialPortInfo { DeviceInfo qdev; + + /* Is this a device that binds with hvc in the guest? */ + bool is_console; + /* * The per-port (or per-app) init function that's called when a * new device is found on the bus. @@ -8,6 +8,8 @@ */ #include <inttypes.h> +#include "qemu-common.h" + /* xen-machine.c */ enum xen_mode { XEN_EMULATE = 0, // xen emulation, using xenner (default) @@ -18,4 +20,43 @@ enum xen_mode { extern uint32_t xen_domid; extern enum xen_mode xen_mode; +extern int xen_allowed; + +static inline int xen_enabled(void) +{ +#ifdef CONFIG_XEN + return xen_allowed; +#else + return 0; +#endif +} + +static inline int xen_mapcache_enabled(void) +{ +#ifdef CONFIG_XEN_MAPCACHE + return xen_enabled(); +#else + return 0; +#endif +} + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num); +void xen_piix3_set_irq(void *opaque, int irq_num, int level); +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len); +void xen_cmos_set_s3_resume(void *opaque, int irq, int level); + +qemu_irq *xen_interrupt_controller_init(void); + +int xen_init(void); +int xen_hvm_init(void); +void xen_vcpu_init(void); + +#if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY) +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size); +#endif + +#if defined(CONFIG_XEN) && CONFIG_XEN_CTRL_INTERFACE_VERSION < 400 +# define HVM_MAX_VCPUS 32 +#endif + #endif /* QEMU_HW_XEN_H */ diff --git a/hw/xen_backend.c b/hw/xen_backend.c index a2e408fa0e..d881fa2f70 100644 --- a/hw/xen_backend.c +++ b/hw/xen_backend.c @@ -43,7 +43,8 @@ /* ------------------------------------------------------------- */ /* public */ -int xen_xc; +XenXC xen_xc = XC_HANDLER_INITIAL_VALUE; +XenGnttab xen_xcg = XC_HANDLER_INITIAL_VALUE; struct xs_handle *xenstore = NULL; const char *xen_protocol; @@ -58,8 +59,9 @@ int xenstore_write_str(const char *base, const char *node, const char *val) char abspath[XEN_BUFSIZE]; snprintf(abspath, sizeof(abspath), "%s/%s", base, node); - if (!xs_write(xenstore, 0, abspath, val, strlen(val))) - return -1; + if (!xs_write(xenstore, 0, abspath, val, strlen(val))) { + return -1; + } return 0; } @@ -94,8 +96,9 @@ int xenstore_read_int(const char *base, const char *node, int *ival) int rc = -1; val = xenstore_read_str(base, node); - if (val && 1 == sscanf(val, "%d", ival)) - rc = 0; + if (val && 1 == sscanf(val, "%d", ival)) { + rc = 0; + } qemu_free(val); return rc; } @@ -134,16 +137,16 @@ int xenstore_read_fe_int(struct XenDevice *xendev, const char *node, int *ival) const char *xenbus_strstate(enum xenbus_state state) { - static const char *const name[] = { - [ XenbusStateUnknown ] = "Unknown", - [ XenbusStateInitialising ] = "Initialising", - [ XenbusStateInitWait ] = "InitWait", - [ XenbusStateInitialised ] = "Initialised", - [ XenbusStateConnected ] = "Connected", - [ XenbusStateClosing ] = "Closing", - [ XenbusStateClosed ] = "Closed", - }; - return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; + static const char *const name[] = { + [ XenbusStateUnknown ] = "Unknown", + [ XenbusStateInitialising ] = "Initialising", + [ XenbusStateInitWait ] = "InitWait", + [ XenbusStateInitialised ] = "Initialised", + [ XenbusStateConnected ] = "Connected", + [ XenbusStateClosing ] = "Closing", + [ XenbusStateClosed ] = "Closed", + }; + return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state) @@ -151,10 +154,11 @@ int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state) int rc; rc = xenstore_write_be_int(xendev, "state", state); - if (rc < 0) - return rc; + if (rc < 0) { + return rc; + } xen_be_printf(xendev, 1, "backend state: %s -> %s\n", - xenbus_strstate(xendev->be_state), xenbus_strstate(state)); + xenbus_strstate(xendev->be_state), xenbus_strstate(state)); xendev->be_state = state; return 0; } @@ -166,13 +170,16 @@ struct XenDevice *xen_be_find_xendev(const char *type, int dom, int dev) struct XenDevice *xendev; QTAILQ_FOREACH(xendev, &xendevs, next) { - if (xendev->dom != dom) - continue; - if (xendev->dev != dev) - continue; - if (strcmp(xendev->type, type) != 0) - continue; - return xendev; + if (xendev->dom != dom) { + continue; + } + if (xendev->dev != dev) { + continue; + } + if (strcmp(xendev->type, type) != 0) { + continue; + } + return xendev; } return NULL; } @@ -187,8 +194,9 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, char *dom0; xendev = xen_be_find_xendev(type, dom, dev); - if (xendev) - return xendev; + if (xendev) { + return xendev; + } /* init new xendev */ xendev = qemu_mallocz(ops->size); @@ -199,38 +207,39 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, dom0 = xs_get_domain_path(xenstore, 0); snprintf(xendev->be, sizeof(xendev->be), "%s/backend/%s/%d/%d", - dom0, xendev->type, xendev->dom, xendev->dev); + dom0, xendev->type, xendev->dom, xendev->dev); snprintf(xendev->name, sizeof(xendev->name), "%s-%d", - xendev->type, xendev->dev); + xendev->type, xendev->dev); free(dom0); xendev->debug = debug; xendev->local_port = -1; - xendev->evtchndev = xc_evtchn_open(); - if (xendev->evtchndev < 0) { - xen_be_printf(NULL, 0, "can't open evtchn device\n"); - qemu_free(xendev); - return NULL; + xendev->evtchndev = xen_xc_evtchn_open(NULL, 0); + if (xendev->evtchndev == XC_HANDLER_INITIAL_VALUE) { + xen_be_printf(NULL, 0, "can't open evtchn device\n"); + qemu_free(xendev); + return NULL; } fcntl(xc_evtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC); if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) { - xendev->gnttabdev = xc_gnttab_open(); - if (xendev->gnttabdev < 0) { - xen_be_printf(NULL, 0, "can't open gnttab device\n"); - xc_evtchn_close(xendev->evtchndev); - qemu_free(xendev); - return NULL; - } + xendev->gnttabdev = xen_xc_gnttab_open(NULL, 0); + if (xendev->gnttabdev == XC_HANDLER_INITIAL_VALUE) { + xen_be_printf(NULL, 0, "can't open gnttab device\n"); + xc_evtchn_close(xendev->evtchndev); + qemu_free(xendev); + return NULL; + } } else { - xendev->gnttabdev = -1; + xendev->gnttabdev = XC_HANDLER_INITIAL_VALUE; } QTAILQ_INSERT_TAIL(&xendevs, xendev, next); - if (xendev->ops->alloc) - xendev->ops->alloc(xendev); + if (xendev->ops->alloc) { + xendev->ops->alloc(xendev); + } return xendev; } @@ -251,28 +260,33 @@ static struct XenDevice *xen_be_del_xendev(int dom, int dev) xendev = xnext; xnext = xendev->next.tqe_next; - if (xendev->dom != dom) - continue; - if (xendev->dev != dev && dev != -1) - continue; - - if (xendev->ops->free) - xendev->ops->free(xendev); - - if (xendev->fe) { - char token[XEN_BUFSIZE]; - snprintf(token, sizeof(token), "fe:%p", xendev); - xs_unwatch(xenstore, xendev->fe, token); - qemu_free(xendev->fe); - } - - if (xendev->evtchndev >= 0) - xc_evtchn_close(xendev->evtchndev); - if (xendev->gnttabdev >= 0) - xc_gnttab_close(xendev->gnttabdev); - - QTAILQ_REMOVE(&xendevs, xendev, next); - qemu_free(xendev); + if (xendev->dom != dom) { + continue; + } + if (xendev->dev != dev && dev != -1) { + continue; + } + + if (xendev->ops->free) { + xendev->ops->free(xendev); + } + + if (xendev->fe) { + char token[XEN_BUFSIZE]; + snprintf(token, sizeof(token), "fe:%p", xendev); + xs_unwatch(xenstore, xendev->fe, token); + qemu_free(xendev->fe); + } + + if (xendev->evtchndev != XC_HANDLER_INITIAL_VALUE) { + xc_evtchn_close(xendev->evtchndev); + } + if (xendev->gnttabdev != XC_HANDLER_INITIAL_VALUE) { + xc_gnttab_close(xendev->gnttabdev); + } + + QTAILQ_REMOVE(&xendevs, xendev, next); + qemu_free(xendev); } return NULL; } @@ -285,14 +299,16 @@ static struct XenDevice *xen_be_del_xendev(int dom, int dev) static void xen_be_backend_changed(struct XenDevice *xendev, const char *node) { if (node == NULL || strcmp(node, "online") == 0) { - if (xenstore_read_be_int(xendev, "online", &xendev->online) == -1) - xendev->online = 0; + if (xenstore_read_be_int(xendev, "online", &xendev->online) == -1) { + xendev->online = 0; + } } if (node) { - xen_be_printf(xendev, 2, "backend update: %s\n", node); - if (xendev->ops->backend_changed) - xendev->ops->backend_changed(xendev, node); + xen_be_printf(xendev, 2, "backend update: %s\n", node); + if (xendev->ops->backend_changed) { + xendev->ops->backend_changed(xendev, node); + } } } @@ -301,25 +317,29 @@ static void xen_be_frontend_changed(struct XenDevice *xendev, const char *node) int fe_state; if (node == NULL || strcmp(node, "state") == 0) { - if (xenstore_read_fe_int(xendev, "state", &fe_state) == -1) - fe_state = XenbusStateUnknown; - if (xendev->fe_state != fe_state) - xen_be_printf(xendev, 1, "frontend state: %s -> %s\n", - xenbus_strstate(xendev->fe_state), - xenbus_strstate(fe_state)); - xendev->fe_state = fe_state; + if (xenstore_read_fe_int(xendev, "state", &fe_state) == -1) { + fe_state = XenbusStateUnknown; + } + if (xendev->fe_state != fe_state) { + xen_be_printf(xendev, 1, "frontend state: %s -> %s\n", + xenbus_strstate(xendev->fe_state), + xenbus_strstate(fe_state)); + } + xendev->fe_state = fe_state; } if (node == NULL || strcmp(node, "protocol") == 0) { - qemu_free(xendev->protocol); - xendev->protocol = xenstore_read_fe_str(xendev, "protocol"); - if (xendev->protocol) - xen_be_printf(xendev, 1, "frontend protocol: %s\n", xendev->protocol); + qemu_free(xendev->protocol); + xendev->protocol = xenstore_read_fe_str(xendev, "protocol"); + if (xendev->protocol) { + xen_be_printf(xendev, 1, "frontend protocol: %s\n", xendev->protocol); + } } if (node) { - xen_be_printf(xendev, 2, "frontend update: %s\n", node); - if (xendev->ops->frontend_changed) - xendev->ops->frontend_changed(xendev, node); + xen_be_printf(xendev, 2, "frontend update: %s\n", node); + if (xendev->ops->frontend_changed) { + xendev->ops->frontend_changed(xendev, node); + } } } @@ -340,28 +360,28 @@ static int xen_be_try_setup(struct XenDevice *xendev) int be_state; if (xenstore_read_be_int(xendev, "state", &be_state) == -1) { - xen_be_printf(xendev, 0, "reading backend state failed\n"); - return -1; + xen_be_printf(xendev, 0, "reading backend state failed\n"); + return -1; } if (be_state != XenbusStateInitialising) { - xen_be_printf(xendev, 0, "initial backend state is wrong (%s)\n", - xenbus_strstate(be_state)); - return -1; + xen_be_printf(xendev, 0, "initial backend state is wrong (%s)\n", + xenbus_strstate(be_state)); + return -1; } xendev->fe = xenstore_read_be_str(xendev, "frontend"); if (xendev->fe == NULL) { - xen_be_printf(xendev, 0, "reading frontend path failed\n"); - return -1; + xen_be_printf(xendev, 0, "reading frontend path failed\n"); + return -1; } /* setup frontend watch */ snprintf(token, sizeof(token), "fe:%p", xendev); if (!xs_watch(xenstore, xendev->fe, token)) { - xen_be_printf(xendev, 0, "watching frontend path (%s) failed\n", - xendev->fe); - return -1; + xen_be_printf(xendev, 0, "watching frontend path (%s) failed\n", + xendev->fe); + return -1; } xen_be_set_state(xendev, XenbusStateInitialising); @@ -383,15 +403,16 @@ static int xen_be_try_init(struct XenDevice *xendev) int rc = 0; if (!xendev->online) { - xen_be_printf(xendev, 1, "not online\n"); - return -1; + xen_be_printf(xendev, 1, "not online\n"); + return -1; } - if (xendev->ops->init) - rc = xendev->ops->init(xendev); + if (xendev->ops->init) { + rc = xendev->ops->init(xendev); + } if (rc != 0) { - xen_be_printf(xendev, 1, "init() failed\n"); - return rc; + xen_be_printf(xendev, 1, "init() failed\n"); + return rc; } xenstore_write_be_str(xendev, "hotplug-status", "connected"); @@ -411,20 +432,21 @@ static int xen_be_try_connect(struct XenDevice *xendev) int rc = 0; if (xendev->fe_state != XenbusStateInitialised && - xendev->fe_state != XenbusStateConnected) { - if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { - xen_be_printf(xendev, 2, "frontend not ready, ignoring\n"); - } else { - xen_be_printf(xendev, 2, "frontend not ready (yet)\n"); - return -1; - } + xendev->fe_state != XenbusStateConnected) { + if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { + xen_be_printf(xendev, 2, "frontend not ready, ignoring\n"); + } else { + xen_be_printf(xendev, 2, "frontend not ready (yet)\n"); + return -1; + } } - if (xendev->ops->connect) - rc = xendev->ops->connect(xendev); + if (xendev->ops->connect) { + rc = xendev->ops->connect(xendev); + } if (rc != 0) { - xen_be_printf(xendev, 0, "connect() failed\n"); - return rc; + xen_be_printf(xendev, 0, "connect() failed\n"); + return rc; } xen_be_set_state(xendev, XenbusStateConnected); @@ -440,10 +462,12 @@ static void xen_be_disconnect(struct XenDevice *xendev, enum xenbus_state state) { if (xendev->be_state != XenbusStateClosing && xendev->be_state != XenbusStateClosed && - xendev->ops->disconnect) - xendev->ops->disconnect(xendev); - if (xendev->be_state != state) + xendev->ops->disconnect) { + xendev->ops->disconnect(xendev); + } + if (xendev->be_state != state) { xen_be_set_state(xendev, state); + } } /* @@ -451,8 +475,9 @@ static void xen_be_disconnect(struct XenDevice *xendev, enum xenbus_state state) */ static int xen_be_try_reset(struct XenDevice *xendev) { - if (xendev->fe_state != XenbusStateInitialising) + if (xendev->fe_state != XenbusStateInitialising) { return -1; + } xen_be_printf(xendev, 1, "device reset (for re-connect)\n"); xen_be_set_state(xendev, XenbusStateInitialising); @@ -468,31 +493,32 @@ void xen_be_check_state(struct XenDevice *xendev) /* frontend may request shutdown from almost anywhere */ if (xendev->fe_state == XenbusStateClosing || - xendev->fe_state == XenbusStateClosed) { - xen_be_disconnect(xendev, xendev->fe_state); - return; + xendev->fe_state == XenbusStateClosed) { + xen_be_disconnect(xendev, xendev->fe_state); + return; } /* check for possible backend state transitions */ for (;;) { - switch (xendev->be_state) { - case XenbusStateUnknown: - rc = xen_be_try_setup(xendev); - break; - case XenbusStateInitialising: - rc = xen_be_try_init(xendev); - break; - case XenbusStateInitWait: - rc = xen_be_try_connect(xendev); - break; + switch (xendev->be_state) { + case XenbusStateUnknown: + rc = xen_be_try_setup(xendev); + break; + case XenbusStateInitialising: + rc = xen_be_try_init(xendev); + break; + case XenbusStateInitWait: + rc = xen_be_try_connect(xendev); + break; case XenbusStateClosed: rc = xen_be_try_reset(xendev); break; - default: - rc = -1; - } - if (rc != 0) - break; + default: + rc = -1; + } + if (rc != 0) { + break; + } } } @@ -511,26 +537,28 @@ static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops) snprintf(path, sizeof(path), "%s/backend/%s/%d", dom0, type, dom); free(dom0); if (!xs_watch(xenstore, path, token)) { - xen_be_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path); - return -1; + xen_be_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path); + return -1; } /* look for backends */ dev = xs_directory(xenstore, 0, path, &cdev); - if (!dev) - return 0; + if (!dev) { + return 0; + } for (j = 0; j < cdev; j++) { - xendev = xen_be_get_xendev(type, dom, atoi(dev[j]), ops); - if (xendev == NULL) - continue; - xen_be_check_state(xendev); + xendev = xen_be_get_xendev(type, dom, atoi(dev[j]), ops); + if (xendev == NULL) { + continue; + } + xen_be_check_state(xendev); } free(dev); return 0; } static void xenstore_update_be(char *watch, char *type, int dom, - struct XenDevOps *ops) + struct XenDevOps *ops) { struct XenDevice *xendev; char path[XEN_BUFSIZE], *dom0; @@ -539,25 +567,28 @@ static void xenstore_update_be(char *watch, char *type, int dom, dom0 = xs_get_domain_path(xenstore, 0); len = snprintf(path, sizeof(path), "%s/backend/%s/%d", dom0, type, dom); free(dom0); - if (strncmp(path, watch, len) != 0) - return; + if (strncmp(path, watch, len) != 0) { + return; + } if (sscanf(watch+len, "/%u/%255s", &dev, path) != 2) { - strcpy(path, ""); - if (sscanf(watch+len, "/%u", &dev) != 1) - dev = -1; + strcpy(path, ""); + if (sscanf(watch+len, "/%u", &dev) != 1) { + dev = -1; + } + } + if (dev == -1) { + return; } - if (dev == -1) - return; if (0) { - /* FIXME: detect devices being deleted from xenstore ... */ - xen_be_del_xendev(dom, dev); + /* FIXME: detect devices being deleted from xenstore ... */ + xen_be_del_xendev(dom, dev); } xendev = xen_be_get_xendev(type, dom, dev, ops); if (xendev != NULL) { - xen_be_backend_changed(xendev, path); - xen_be_check_state(xendev); + xen_be_backend_changed(xendev, path); + xen_be_check_state(xendev); } } @@ -567,10 +598,12 @@ static void xenstore_update_fe(char *watch, struct XenDevice *xendev) unsigned int len; len = strlen(xendev->fe); - if (strncmp(xendev->fe, watch, len) != 0) - return; - if (watch[len] != '/') - return; + if (strncmp(xendev->fe, watch, len) != 0) { + return; + } + if (watch[len] != '/') { + return; + } node = watch + len + 1; xen_be_frontend_changed(xendev, node); @@ -584,14 +617,17 @@ static void xenstore_update(void *unused) unsigned int dom, count; vec = xs_read_watch(xenstore, &count); - if (vec == NULL) - goto cleanup; + if (vec == NULL) { + goto cleanup; + } if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR, - &type, &dom, &ops) == 3) - xenstore_update_be(vec[XS_WATCH_PATH], (void*)type, dom, (void*)ops); - if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) - xenstore_update_fe(vec[XS_WATCH_PATH], (void*)ptr); + &type, &dom, &ops) == 3) { + xenstore_update_be(vec[XS_WATCH_PATH], (void*)type, dom, (void*)ops); + } + if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) { + xenstore_update_fe(vec[XS_WATCH_PATH], (void*)ptr); + } cleanup: free(vec); @@ -604,14 +640,15 @@ static void xen_be_evtchn_event(void *opaque) port = xc_evtchn_pending(xendev->evtchndev); if (port != xendev->local_port) { - xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n", - port, xendev->local_port); - return; + xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n", + port, xendev->local_port); + return; } xc_evtchn_unmask(xendev->evtchndev, port); - if (xendev->ops->event) - xendev->ops->event(xendev); + if (xendev->ops->event) { + xendev->ops->event(xendev); + } } /* -------------------------------------------------------------------- */ @@ -620,17 +657,17 @@ int xen_be_init(void) { xenstore = xs_daemon_open(); if (!xenstore) { - xen_be_printf(NULL, 0, "can't connect to xenstored\n"); - return -1; + xen_be_printf(NULL, 0, "can't connect to xenstored\n"); + return -1; } - if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) - goto err; + if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) { + goto err; + } - xen_xc = xc_interface_open(); - if (xen_xc == -1) { - xen_be_printf(NULL, 0, "can't open xen interface\n"); - goto err; + if (xen_xc == XC_HANDLER_INITIAL_VALUE) { + /* Check if xen_init() have been called */ + goto err; } return 0; @@ -649,24 +686,26 @@ int xen_be_register(const char *type, struct XenDevOps *ops) int xen_be_bind_evtchn(struct XenDevice *xendev) { - if (xendev->local_port != -1) - return 0; + if (xendev->local_port != -1) { + return 0; + } xendev->local_port = xc_evtchn_bind_interdomain - (xendev->evtchndev, xendev->dom, xendev->remote_port); + (xendev->evtchndev, xendev->dom, xendev->remote_port); if (xendev->local_port == -1) { - xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n"); - return -1; + xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n"); + return -1; } xen_be_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), - xen_be_evtchn_event, NULL, xendev); + xen_be_evtchn_event, NULL, xendev); return 0; } void xen_be_unbind_evtchn(struct XenDevice *xendev) { - if (xendev->local_port == -1) - return; + if (xendev->local_port == -1) { + return; + } qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); xc_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); @@ -690,17 +729,21 @@ void xen_be_printf(struct XenDevice *xendev, int msg_level, const char *fmt, ... va_list args; if (xendev) { - if (msg_level > xendev->debug) + if (msg_level > xendev->debug) { return; + } qemu_log("xen be: %s: ", xendev->name); - if (msg_level == 0) + if (msg_level == 0) { fprintf(stderr, "xen be: %s: ", xendev->name); + } } else { - if (msg_level > debug) + if (msg_level > debug) { return; + } qemu_log("xen be core: "); - if (msg_level == 0) + if (msg_level == 0) { fprintf(stderr, "xen be core: "); + } } va_start(args, fmt); qemu_log_vprintf(fmt, args); diff --git a/hw/xen_backend.h b/hw/xen_backend.h index 1b428e3bf4..6401c85a7e 100644 --- a/hw/xen_backend.h +++ b/hw/xen_backend.h @@ -45,8 +45,8 @@ struct XenDevice { int remote_port; int local_port; - int evtchndev; - int gnttabdev; + XenEvtchn evtchndev; + XenGnttab gnttabdev; struct XenDevOps *ops; QTAILQ_ENTRY(XenDevice) next; @@ -55,7 +55,7 @@ struct XenDevice { /* ------------------------------------------------------------- */ /* variables */ -extern int xen_xc; +extern XenXC xen_xc; extern struct xs_handle *xenstore; extern const char *xen_protocol; diff --git a/hw/xen_common.h b/hw/xen_common.h index 8a55b44f0b..a1958a0af1 100644 --- a/hw/xen_common.h +++ b/hw/xen_common.h @@ -1,6 +1,8 @@ #ifndef QEMU_HW_XEN_COMMON_H #define QEMU_HW_XEN_COMMON_H 1 +#include "config-host.h" + #include <stddef.h> #include <inttypes.h> @@ -13,22 +15,98 @@ #include "qemu-queue.h" /* - * tweaks needed to build with different xen versions - * 0x00030205 -> 3.1.0 - * 0x00030207 -> 3.2.0 - * 0x00030208 -> unstable + * We don't support Xen prior to 3.3.0. */ -#include <xen/xen-compat.h> -#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00030205 -# define evtchn_port_or_error_t int -#endif -#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00030207 -# define xc_map_foreign_pages xc_map_foreign_batch + +/* Xen before 4.0 */ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 400 +static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot, + xen_pfn_t *arr, int *err, + unsigned int num) +{ + return xc_map_foreign_batch(xc_handle, dom, prot, arr, num); +} #endif -#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00030208 -# define xen_mb() mb() -# define xen_rmb() rmb() -# define xen_wmb() wmb() + + +/* Xen before 4.1 */ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410 + +typedef int XenXC; +typedef int XenEvtchn; +typedef int XenGnttab; + +# define XC_INTERFACE_FMT "%i" +# define XC_HANDLER_INITIAL_VALUE -1 + +static inline XenEvtchn xen_xc_evtchn_open(void *logger, + unsigned int open_flags) +{ + return xc_evtchn_open(); +} + +static inline XenGnttab xen_xc_gnttab_open(void *logger, + unsigned int open_flags) +{ + return xc_gnttab_open(); +} + +static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, + unsigned int open_flags) +{ + return xc_interface_open(); +} + +static inline int xc_fd(int xen_xc) +{ + return xen_xc; +} + + +static inline int xc_domain_populate_physmap_exact + (XenXC xc_handle, uint32_t domid, unsigned long nr_extents, + unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) +{ + return xc_domain_memory_populate_physmap + (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start); +} + + +/* Xen 4.1 */ +#else + +typedef xc_interface *XenXC; +typedef xc_evtchn *XenEvtchn; +typedef xc_gnttab *XenGnttab; + +# define XC_INTERFACE_FMT "%p" +# define XC_HANDLER_INITIAL_VALUE NULL + +static inline XenEvtchn xen_xc_evtchn_open(void *logger, + unsigned int open_flags) +{ + return xc_evtchn_open(logger, open_flags); +} + +static inline XenGnttab xen_xc_gnttab_open(void *logger, + unsigned int open_flags) +{ + return xc_gnttab_open(logger, open_flags); +} + +static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, + unsigned int open_flags) +{ + return xc_interface_open(logger, dombuild_logger, open_flags); +} + +/* FIXME There is now way to have the xen fd */ +static inline int xc_fd(xc_interface *xen_xc) +{ + return -1; +} #endif +void destroy_hvm_domain(void); + #endif /* QEMU_HW_XEN_COMMON_H */ diff --git a/hw/xen_devconfig.c b/hw/xen_devconfig.c index 8d50216c04..3a9215566d 100644 --- a/hw/xen_devconfig.c +++ b/hw/xen_devconfig.c @@ -96,7 +96,7 @@ int xen_config_dev_blk(DriveInfo *disk) { char fe[256], be[256]; int vdev = 202 * 256 + 16 * disk->unit; - int cdrom = disk->bdrv->type == BDRV_TYPE_CDROM; + int cdrom = disk->media_cd; const char *devtype = cdrom ? "cdrom" : "disk"; const char *mode = cdrom ? "r" : "w"; diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 558bf8ae25..0c298afa8d 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -120,17 +120,18 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { - if (blkdev->requests_total >= max_requests) - goto out; - /* allocate new struct */ - ioreq = qemu_mallocz(sizeof(*ioreq)); - ioreq->blkdev = blkdev; - blkdev->requests_total++; + if (blkdev->requests_total >= max_requests) { + goto out; + } + /* allocate new struct */ + ioreq = qemu_mallocz(sizeof(*ioreq)); + ioreq->blkdev = blkdev; + blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { - /* get one from freelist */ - ioreq = QLIST_FIRST(&blkdev->freelist); - QLIST_REMOVE(ioreq, list); + /* get one from freelist */ + ioreq = QLIST_FIRST(&blkdev->freelist); + QLIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); @@ -173,30 +174,32 @@ static int ioreq_parse(struct ioreq *ioreq) int i; xen_be_printf(&blkdev->xendev, 3, - "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", - ioreq->req.operation, ioreq->req.nr_segments, - ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); + "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", + ioreq->req.operation, ioreq->req.nr_segments, + ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); switch (ioreq->req.operation) { case BLKIF_OP_READ: - ioreq->prot = PROT_WRITE; /* to memory */ - break; + ioreq->prot = PROT_WRITE; /* to memory */ + break; case BLKIF_OP_WRITE_BARRIER: if (!ioreq->req.nr_segments) { ioreq->presync = 1; return 0; } - if (!syncwrite) - ioreq->presync = ioreq->postsync = 1; - /* fall through */ + if (!syncwrite) { + ioreq->presync = ioreq->postsync = 1; + } + /* fall through */ case BLKIF_OP_WRITE: - ioreq->prot = PROT_READ; /* from memory */ - if (syncwrite) - ioreq->postsync = 1; - break; + ioreq->prot = PROT_READ; /* from memory */ + if (syncwrite) { + ioreq->postsync = 1; + } + break; default: - xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", - ioreq->req.operation); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", + ioreq->req.operation); + goto err; }; if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { @@ -206,29 +209,29 @@ static int ioreq_parse(struct ioreq *ioreq) ioreq->start = ioreq->req.sector_number * blkdev->file_blk; for (i = 0; i < ioreq->req.nr_segments; i++) { - if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); - goto err; - } - if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { - xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); - goto err; - } - if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { - xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); - goto err; - } - - ioreq->domids[i] = blkdev->xendev.dom; - ioreq->refs[i] = ioreq->req.seg[i].gref; - - mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; - len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; + if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); + goto err; + } + if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { + xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); + goto err; + } + if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { + xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); + goto err; + } + + ioreq->domids[i] = blkdev->xendev.dom; + ioreq->refs[i] = ioreq->req.seg[i].gref; + + mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; + len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; qemu_iovec_add(&ioreq->v, (void*)mem, len); } if (ioreq->start + ioreq->v.size > blkdev->file_size) { - xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); + goto err; } return 0; @@ -239,66 +242,73 @@ err: static void ioreq_unmap(struct ioreq *ioreq) { - int gnt = ioreq->blkdev->xendev.gnttabdev; + XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return; + } if (batch_maps) { - if (!ioreq->pages) - return; - if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map -= ioreq->v.niov; - ioreq->pages = NULL; + if (!ioreq->pages) { + return; + } + if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map -= ioreq->v.niov; + ioreq->pages = NULL; } else { - for (i = 0; i < ioreq->v.niov; i++) { - if (!ioreq->page[i]) - continue; - if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map--; - ioreq->page[i] = NULL; - } + for (i = 0; i < ioreq->v.niov; i++) { + if (!ioreq->page[i]) { + continue; + } + if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map--; + ioreq->page[i] = NULL; + } } } static int ioreq_map(struct ioreq *ioreq) { - int gnt = ioreq->blkdev->xendev.gnttabdev; + XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return 0; + } if (batch_maps) { - ioreq->pages = xc_gnttab_map_grant_refs - (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); - if (ioreq->pages == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map %d grant refs (%s, %d maps)\n", - ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); - return -1; - } - for (i = 0; i < ioreq->v.niov; i++) - ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + - (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map += ioreq->v.niov; + ioreq->pages = xc_gnttab_map_grant_refs + (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); + if (ioreq->pages == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map %d grant refs (%s, %d maps)\n", + ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); + return -1; + } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + + (uintptr_t)ioreq->v.iov[i].iov_base; + } + ioreq->blkdev->cnt_map += ioreq->v.niov; } else { - for (i = 0; i < ioreq->v.niov; i++) { - ioreq->page[i] = xc_gnttab_map_grant_ref - (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); - if (ioreq->page[i] == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map grant ref %d (%s, %d maps)\n", - ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); - ioreq_unmap(ioreq); - return -1; - } - ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map++; - } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->page[i] = xc_gnttab_map_grant_ref + (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); + if (ioreq->page[i] == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map grant ref %d (%s, %d maps)\n", + ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); + ioreq_unmap(ioreq); + return -1; + } + ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; + ioreq->blkdev->cnt_map++; + } } return 0; } @@ -306,57 +316,59 @@ static int ioreq_map(struct ioreq *ioreq) static int ioreq_runio_qemu_sync(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; - int i, rc, len = 0; + int i, rc; off_t pos; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; - if (ioreq->presync) - bdrv_flush(blkdev->bs); + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } + if (ioreq->presync) { + bdrv_flush(blkdev->bs); + } switch (ioreq->req.operation) { case BLKIF_OP_READ: - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + pos += ioreq->v.iov[i].iov_len; + } + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + } + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + pos += ioreq->v.iov[i].iov_len; + } + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); + } ioreq->status = BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -382,8 +394,9 @@ static void qemu_aio_complete(void *opaque, int ret) } ioreq->aio_inflight--; - if (ioreq->aio_inflight > 0) + if (ioreq->aio_inflight > 0) { return; + } ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -395,12 +408,14 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } ioreq->aio_inflight++; - if (ioreq->presync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->presync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } switch (ioreq->req.operation) { case BLKIF_OP_READ: @@ -408,23 +423,25 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; + } ioreq->aio_inflight++; bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } qemu_aio_complete(ioreq, 0); return 0; @@ -452,36 +469,37 @@ static int blk_send_response_one(struct ioreq *ioreq) /* Place on the response ring for the relevant domain. */ switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); - break; + dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); + break; case BLKIF_PROTOCOL_X86_32: dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, blkdev->rings.x86_32_part.rsp_prod_pvt); - break; + break; case BLKIF_PROTOCOL_X86_64: dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, blkdev->rings.x86_64_part.rsp_prod_pvt); - break; + break; default: - dst = NULL; + dst = NULL; } memcpy(dst, &resp, sizeof(resp)); blkdev->rings.common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { - /* - * Tail check for pending requests. Allows frontend to avoid - * notifications if requests are already in flight (lower - * overheads and promotes batching). - */ - RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); + /* + * Tail check for pending requests. Allows frontend to avoid + * notifications if requests are already in flight (lower + * overheads and promotes batching). + */ + RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { - have_requests = 1; + have_requests = 1; } - if (have_requests) - blkdev->more_work++; + if (have_requests) { + blkdev->more_work++; + } return send_notify; } @@ -493,28 +511,29 @@ static void blk_send_response_all(struct XenBlkDev *blkdev) while (!QLIST_EMPTY(&blkdev->finished)) { ioreq = QLIST_FIRST(&blkdev->finished); - send_notify += blk_send_response_one(ioreq); - ioreq_release(ioreq); + send_notify += blk_send_response_one(ioreq); + ioreq_release(ioreq); + } + if (send_notify) { + xen_be_send_notify(&blkdev->xendev); } - if (send_notify) - xen_be_send_notify(&blkdev->xendev); } static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) { switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), - sizeof(ioreq->req)); - break; + memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), + sizeof(ioreq->req)); + break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); - break; + break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); - break; + break; } return 0; } @@ -530,12 +549,14 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) rp = blkdev->rings.common.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - if (use_aio) + if (use_aio) { blk_send_response_all(blkdev); + } while (rc != rp) { /* pull request from ring */ - if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) + if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { break; + } ioreq = ioreq_start(blkdev); if (ioreq == NULL) { blkdev->more_work++; @@ -546,8 +567,9 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) /* parse them */ if (ioreq_parse(ioreq) != 0) { - if (blk_send_response_one(ioreq)) + if (blk_send_response_one(ioreq)) { xen_be_send_notify(&blkdev->xendev); + } ioreq_release(ioreq); continue; } @@ -560,11 +582,13 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) ioreq_runio_qemu_sync(ioreq); } } - if (!use_aio) + if (!use_aio) { blk_send_response_all(blkdev); + } - if (blkdev->more_work && blkdev->requests_inflight < max_requests) + if (blkdev->more_work && blkdev->requests_inflight < max_requests) { qemu_bh_schedule(blkdev->bh); + } } /* ------------------------------------------------------------- */ @@ -583,8 +607,9 @@ static void blk_alloc(struct XenDevice *xendev) QLIST_INIT(&blkdev->finished); QLIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); - if (xen_mode != XEN_EMULATE) + if (xen_mode != XEN_EMULATE) { batch_maps = 1; + } } static int blk_init(struct XenDevice *xendev) @@ -595,44 +620,50 @@ static int blk_init(struct XenDevice *xendev) /* read xenstore entries */ if (blkdev->params == NULL) { - blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); + blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); h = strchr(blkdev->params, ':'); - if (h != NULL) { - blkdev->fileproto = blkdev->params; - blkdev->filename = h+1; - *h = 0; - } else { - blkdev->fileproto = "<unset>"; - blkdev->filename = blkdev->params; - } - } - if (blkdev->mode == NULL) - blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); - if (blkdev->type == NULL) - blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); - if (blkdev->dev == NULL) - blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); - if (blkdev->devtype == NULL) - blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + if (h != NULL) { + blkdev->fileproto = blkdev->params; + blkdev->filename = h+1; + *h = 0; + } else { + blkdev->fileproto = "<unset>"; + blkdev->filename = blkdev->params; + } + } + if (blkdev->mode == NULL) { + blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); + } + if (blkdev->type == NULL) { + blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); + } + if (blkdev->dev == NULL) { + blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); + } + if (blkdev->devtype == NULL) { + blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + } /* do we have all we need? */ if (blkdev->params == NULL || - blkdev->mode == NULL || - blkdev->type == NULL || - blkdev->dev == NULL) - return -1; + blkdev->mode == NULL || + blkdev->type == NULL || + blkdev->dev == NULL) { + return -1; + } /* read-only ? */ if (strcmp(blkdev->mode, "w") == 0) { - qflags = BDRV_O_RDWR; + qflags = BDRV_O_RDWR; } else { - qflags = 0; - info |= VDISK_READONLY; + qflags = 0; + info |= VDISK_READONLY; } /* cdrom ? */ - if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) - info |= VDISK_CDROM; + if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { + info |= VDISK_CDROM; + } /* init qemu block driver */ index = (blkdev->xendev.dev - 202 * 256) / 16; @@ -649,7 +680,7 @@ static int blk_init(struct XenDevice *xendev) } else { /* setup via qemu cmdline -> already setup for us */ xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); - blkdev->bs = blkdev->dinfo->bdrv; + blkdev->bs = blkdev->dinfo->bdrv; } blkdev->file_blk = BLOCK_SIZE; blkdev->file_size = bdrv_getlength(blkdev->bs); @@ -657,21 +688,21 @@ static int blk_init(struct XenDevice *xendev) xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); - blkdev->file_size = 0; + blkdev->file_size = 0; } have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," - " size %" PRId64 " (%" PRId64 " MB)\n", - blkdev->type, blkdev->fileproto, blkdev->filename, - blkdev->file_size, blkdev->file_size >> 20); + " size %" PRId64 " (%" PRId64 " MB)\n", + blkdev->type, blkdev->fileproto, blkdev->filename, + blkdev->file_size, blkdev->file_size >> 20); /* fill info */ xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers); xenstore_write_be_int(&blkdev->xendev, "info", info); xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); xenstore_write_be_int(&blkdev->xendev, "sectors", - blkdev->file_size / blkdev->file_blk); + blkdev->file_size / blkdev->file_blk); return 0; } @@ -679,57 +710,62 @@ static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) - return -1; + if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { + return -1; + } if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", - &blkdev->xendev.remote_port) == -1) - return -1; + &blkdev->xendev.remote_port) == -1) { + return -1; + } blkdev->protocol = BLKIF_PROTOCOL_NATIVE; if (blkdev->xendev.protocol) { - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_32; - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) + } + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_64; + } } blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, - blkdev->xendev.dom, - blkdev->ring_ref, - PROT_READ | PROT_WRITE); - if (!blkdev->sring) - return -1; + blkdev->xendev.dom, + blkdev->ring_ref, + PROT_READ | PROT_WRITE); + if (!blkdev->sring) { + return -1; + } blkdev->cnt_map++; switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { - blkif_sring_t *sring_native = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); - break; + blkif_sring_t *sring_native = blkdev->sring; + BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); + break; } case BLKIF_PROTOCOL_X86_32: { - blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; + blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); - break; + break; } case BLKIF_PROTOCOL_X86_64: { - blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; + blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); - break; + break; } } xen_be_bind_evtchn(&blkdev->xendev); xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " - "remote port %d, local port %d\n", - blkdev->xendev.protocol, blkdev->ring_ref, - blkdev->xendev.remote_port, blkdev->xendev.local_port); + "remote port %d, local port %d\n", + blkdev->xendev.protocol, blkdev->ring_ref, + blkdev->xendev.remote_port, blkdev->xendev.local_port); return 0; } @@ -743,14 +779,14 @@ static void blk_disconnect(struct XenDevice *xendev) bdrv_close(blkdev->bs); bdrv_delete(blkdev->bs); } - blkdev->bs = NULL; + blkdev->bs = NULL; } xen_be_unbind_evtchn(&blkdev->xendev); if (blkdev->sring) { - xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); - blkdev->cnt_map--; - blkdev->sring = NULL; + xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); + blkdev->cnt_map--; + blkdev->sring = NULL; } } @@ -760,10 +796,10 @@ static int blk_free(struct XenDevice *xendev) struct ioreq *ioreq; while (!QLIST_EMPTY(&blkdev->freelist)) { - ioreq = QLIST_FIRST(&blkdev->freelist); + ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); - qemu_free(ioreq); + qemu_free(ioreq); } qemu_free(blkdev->params); diff --git a/hw/xen_domainbuild.c b/hw/xen_domainbuild.c index 4093587df1..a6a12e5930 100644 --- a/hw/xen_domainbuild.c +++ b/hw/xen_domainbuild.c @@ -175,8 +175,9 @@ static int xen_domain_watcher(void) for (i = 3; i < n; i++) { if (i == fd[0]) continue; - if (i == xen_xc) + if (i == xc_fd(xen_xc)) { continue; + } close(i); } diff --git a/hw/xen_machine_pv.c b/hw/xen_machine_pv.c index 0d7f73ed82..7985d11d5a 100644 --- a/hw/xen_machine_pv.c +++ b/hw/xen_machine_pv.c @@ -113,6 +113,7 @@ static QEMUMachine xenpv_machine = { .desc = "Xen Para-virtualized PC", .init = xen_init_pv, .max_cpus = 1, + .default_machine_opts = "accel=xen", }; static void xenpv_machine_init(void) diff --git a/hw/xen_nic.c b/hw/xen_nic.c index 08055b83ff..ff86491cfa 100644 --- a/hw/xen_nic.c +++ b/hw/xen_nic.c @@ -74,20 +74,23 @@ static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, i resp->status = st; #if 0 - if (txp->flags & NETTXF_extra_info) - RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; + if (txp->flags & NETTXF_extra_info) { + RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; + } #endif netdev->tx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); - if (notify) - xen_be_send_notify(&netdev->xendev); + if (notify) { + xen_be_send_notify(&netdev->xendev); + } if (i == netdev->tx_ring.req_cons) { - int more_to_do; - RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); - if (more_to_do) - netdev->tx_work++; + int more_to_do; + RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); + if (more_to_do) { + netdev->tx_work++; + } } } @@ -101,10 +104,11 @@ static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING RING_IDX cons = netdev->tx_ring.req_cons; do { - make_tx_response(netif, txp, NETIF_RSP_ERROR); - if (cons >= end) - break; - txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); + make_tx_response(netif, txp, NETIF_RSP_ERROR); + if (cons >= end) { + break; + } + txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); } while (1); netdev->tx_ring.req_cons = cons; netif_schedule_work(netif); @@ -122,75 +126,78 @@ static void net_tx_packets(struct XenNetDev *netdev) void *tmpbuf = NULL; for (;;) { - rc = netdev->tx_ring.req_cons; - rp = netdev->tx_ring.sring->req_prod; - xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + rc = netdev->tx_ring.req_cons; + rp = netdev->tx_ring.sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - while ((rc != rp)) { - if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) - break; - memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); - netdev->tx_ring.req_cons = ++rc; + while ((rc != rp)) { + if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { + break; + } + memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); + netdev->tx_ring.req_cons = ++rc; #if 1 - /* should not happen in theory, we don't announce the * - * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ - if (txreq.flags & NETTXF_extra_info) { - xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } - if (txreq.flags & NETTXF_more_data) { - xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } + /* should not happen in theory, we don't announce the * + * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ + if (txreq.flags & NETTXF_extra_info) { + xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_more_data) { + xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } #endif - if (txreq.size < 14) { - xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size); - net_tx_error(netdev, &txreq, rc); - continue; - } - - if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { - xen_be_printf(&netdev->xendev, 0, "error: page crossing\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } - - xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", - txreq.gref, txreq.offset, txreq.size, txreq.flags, - (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", - (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", - (txreq.flags & NETTXF_more_data) ? " more_data" : "", - (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); - - page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - txreq.gref, PROT_READ); - if (page == NULL) { - xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n", + if (txreq.size < 14) { + xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size); + net_tx_error(netdev, &txreq, rc); + continue; + } + + if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { + xen_be_printf(&netdev->xendev, 0, "error: page crossing\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + + xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", + txreq.gref, txreq.offset, txreq.size, txreq.flags, + (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", + (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", + (txreq.flags & NETTXF_more_data) ? " more_data" : "", + (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); + + page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, + netdev->xendev.dom, + txreq.gref, PROT_READ); + if (page == NULL) { + xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n", txreq.gref); - net_tx_error(netdev, &txreq, rc); - continue; - } - if (txreq.flags & NETTXF_csum_blank) { + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_csum_blank) { /* have read-only mapping -> can't fill checksum in-place */ - if (!tmpbuf) + if (!tmpbuf) { tmpbuf = qemu_malloc(XC_PAGE_SIZE); + } memcpy(tmpbuf, page + txreq.offset, txreq.size); - net_checksum_calculate(tmpbuf, txreq.size); + net_checksum_calculate(tmpbuf, txreq.size); qemu_send_packet(&netdev->nic->nc, tmpbuf, txreq.size); } else { qemu_send_packet(&netdev->nic->nc, page + txreq.offset, txreq.size); } - xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); - net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); - } - if (!netdev->tx_work) - break; - netdev->tx_work = 0; + xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); + net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); + } + if (!netdev->tx_work) { + break; + } + netdev->tx_work = 0; } qemu_free(tmpbuf); } @@ -198,9 +205,9 @@ static void net_tx_packets(struct XenNetDev *netdev) /* ------------------------------------------------------------- */ static void net_rx_response(struct XenNetDev *netdev, - netif_rx_request_t *req, int8_t st, - uint16_t offset, uint16_t size, - uint16_t flags) + netif_rx_request_t *req, int8_t st, + uint16_t offset, uint16_t size, + uint16_t flags) { RING_IDX i = netdev->rx_ring.rsp_prod_pvt; netif_rx_response_t *resp; @@ -211,16 +218,18 @@ static void net_rx_response(struct XenNetDev *netdev, resp->flags = flags; resp->id = req->id; resp->status = (int16_t)size; - if (st < 0) - resp->status = (int16_t)st; + if (st < 0) { + resp->status = (int16_t)st; + } xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n", - i, resp->status, resp->flags); + i, resp->status, resp->flags); netdev->rx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); - if (notify) - xen_be_send_notify(&netdev->xendev); + if (notify) { + xen_be_send_notify(&netdev->xendev); + } } #define NET_IP_ALIGN 2 @@ -230,17 +239,18 @@ static int net_rx_ok(VLANClientState *nc) struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque; RING_IDX rc, rp; - if (netdev->xendev.be_state != XenbusStateConnected) - return 0; + if (netdev->xendev.be_state != XenbusStateConnected) { + return 0; + } rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { - xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", - __FUNCTION__, rc, rp); - return 0; + xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", + __FUNCTION__, rc, rp); + return 0; } return 1; } @@ -252,34 +262,35 @@ static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t siz RING_IDX rc, rp; void *page; - if (netdev->xendev.be_state != XenbusStateConnected) - return -1; + if (netdev->xendev.be_state != XenbusStateConnected) { + return -1; + } rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { - xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); - return -1; + xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); + return -1; } if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { - xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", - (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); - return -1; + xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", + (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); + return -1; } memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); netdev->rx_ring.req_cons = ++rc; page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - rxreq.gref, PROT_WRITE); + netdev->xendev.dom, + rxreq.gref, PROT_WRITE); if (page == NULL) { - xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", + xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", rxreq.gref); - net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); - return -1; + net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); + return -1; } memcpy(page + NET_IP_ALIGN, buf, size); xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); @@ -302,15 +313,18 @@ static int net_init(struct XenDevice *xendev) struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); /* read xenstore entries */ - if (netdev->mac == NULL) - netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); + if (netdev->mac == NULL) { + netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); + } /* do we have all we need? */ - if (netdev->mac == NULL) - return -1; + if (netdev->mac == NULL) { + return -1; + } - if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) + if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { return -1; + } netdev->conf.vlan = qemu_find_vlan(netdev->xendev.dev, 1); netdev->conf.peer = NULL; @@ -334,41 +348,46 @@ static int net_connect(struct XenDevice *xendev) int rx_copy; if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", - &netdev->tx_ring_ref) == -1) - return -1; + &netdev->tx_ring_ref) == -1) { + return -1; + } if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", - &netdev->rx_ring_ref) == -1) - return 1; + &netdev->rx_ring_ref) == -1) { + return 1; + } if (xenstore_read_fe_int(&netdev->xendev, "event-channel", - &netdev->xendev.remote_port) == -1) - return -1; + &netdev->xendev.remote_port) == -1) { + return -1; + } - if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) - rx_copy = 0; + if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { + rx_copy = 0; + } if (rx_copy == 0) { - xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n"); - return -1; + xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n"); + return -1; } netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - netdev->tx_ring_ref, - PROT_READ | PROT_WRITE); + netdev->xendev.dom, + netdev->tx_ring_ref, + PROT_READ | PROT_WRITE); netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - netdev->rx_ring_ref, - PROT_READ | PROT_WRITE); - if (!netdev->txs || !netdev->rxs) - return -1; + netdev->xendev.dom, + netdev->rx_ring_ref, + PROT_READ | PROT_WRITE); + if (!netdev->txs || !netdev->rxs) { + return -1; + } BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); xen_be_bind_evtchn(&netdev->xendev); xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " - "remote port %d, local port %d\n", - netdev->tx_ring_ref, netdev->rx_ring_ref, - netdev->xendev.remote_port, netdev->xendev.local_port); + "remote port %d, local port %d\n", + netdev->tx_ring_ref, netdev->rx_ring_ref, + netdev->xendev.remote_port, netdev->xendev.local_port); net_tx_packets(netdev); return 0; @@ -381,12 +400,12 @@ static void net_disconnect(struct XenDevice *xendev) xen_be_unbind_evtchn(&netdev->xendev); if (netdev->txs) { - xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1); - netdev->txs = NULL; + xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1); + netdev->txs = NULL; } if (netdev->rxs) { - xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1); - netdev->rxs = NULL; + xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1); + netdev->rxs = NULL; } if (netdev->nic) { qemu_del_vlan_client(&netdev->nic->nc); diff --git a/hw/xilinx_axidma.c b/hw/xilinx_axidma.c index e32534feaf..571a5b0661 100644 --- a/hw/xilinx_axidma.c +++ b/hw/xilinx_axidma.c @@ -134,10 +134,10 @@ static inline int stream_idle(struct AXIStream *s) static void stream_reset(struct AXIStream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ - s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshhold. */ + s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */ } -/* Mapp an offset addr into a channel index. */ +/* Map an offset addr into a channel index. */ static inline int streamid_from_addr(target_phys_addr_t addr) { int sid; diff --git a/json-lexer.c b/json-lexer.c index 65c9720d65..c21338f66d 100644 --- a/json-lexer.c +++ b/json-lexer.c @@ -18,6 +18,8 @@ #include "qemu-common.h" #include "json-lexer.h" +#define MAX_TOKEN_SIZE (64ULL << 20) + /* * \"([^\\\"]|(\\\"\\'\\\\\\/\\b\\f\\n\\r\\t\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]))*\" * '([^\\']|(\\\"\\'\\\\\\/\\b\\f\\n\\r\\t\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]))*' @@ -103,7 +105,8 @@ static const uint8_t json_lexer[][256] = { ['u'] = IN_DQ_UCODE0, }, [IN_DQ_STRING] = { - [1 ... 0xFF] = IN_DQ_STRING, + [1 ... 0xBF] = IN_DQ_STRING, + [0xC2 ... 0xF4] = IN_DQ_STRING, ['\\'] = IN_DQ_STRING_ESCAPE, ['"'] = JSON_STRING, }, @@ -142,7 +145,8 @@ static const uint8_t json_lexer[][256] = { ['u'] = IN_SQ_UCODE0, }, [IN_SQ_STRING] = { - [1 ... 0xFF] = IN_SQ_STRING, + [1 ... 0xBF] = IN_SQ_STRING, + [0xC2 ... 0xF4] = IN_SQ_STRING, ['\\'] = IN_SQ_STRING_ESCAPE, ['\''] = JSON_STRING, }, @@ -272,7 +276,7 @@ void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func) lexer->x = lexer->y = 0; } -static int json_lexer_feed_char(JSONLexer *lexer, char ch) +static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush) { int char_consumed, new_state; @@ -303,12 +307,41 @@ static int json_lexer_feed_char(JSONLexer *lexer, char ch) new_state = IN_START; break; case IN_ERROR: - return -EINVAL; + /* XXX: To avoid having previous bad input leaving the parser in an + * unresponsive state where we consume unpredictable amounts of + * subsequent "good" input, percolate this error state up to the + * tokenizer/parser by forcing a NULL object to be emitted, then + * reset state. + * + * Also note that this handling is required for reliable channel + * negotiation between QMP and the guest agent, since chr(0xFF) + * is placed at the beginning of certain events to ensure proper + * delivery when the channel is in an unknown state. chr(0xFF) is + * never a valid ASCII/UTF-8 sequence, so this should reliably + * induce an error/flush state. + */ + lexer->emit(lexer, lexer->token, JSON_ERROR, lexer->x, lexer->y); + QDECREF(lexer->token); + lexer->token = qstring_new(); + new_state = IN_START; + lexer->state = new_state; + return 0; default: break; } lexer->state = new_state; - } while (!char_consumed); + } while (!char_consumed && !flush); + + /* Do not let a single token grow to an arbitrarily large size, + * this is a security consideration. + */ + if (lexer->token->length > MAX_TOKEN_SIZE) { + lexer->emit(lexer, lexer->token, lexer->state, lexer->x, lexer->y); + QDECREF(lexer->token); + lexer->token = qstring_new(); + lexer->state = IN_START; + } + return 0; } @@ -319,7 +352,7 @@ int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size) for (i = 0; i < size; i++) { int err; - err = json_lexer_feed_char(lexer, buffer[i]); + err = json_lexer_feed_char(lexer, buffer[i], false); if (err < 0) { return err; } @@ -330,7 +363,7 @@ int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size) int json_lexer_flush(JSONLexer *lexer) { - return lexer->state == IN_START ? 0 : json_lexer_feed_char(lexer, 0); + return lexer->state == IN_START ? 0 : json_lexer_feed_char(lexer, 0, true); } void json_lexer_destroy(JSONLexer *lexer) diff --git a/json-lexer.h b/json-lexer.h index 3b50c4634b..10bc0a7798 100644 --- a/json-lexer.h +++ b/json-lexer.h @@ -25,6 +25,7 @@ typedef enum json_token_type { JSON_STRING, JSON_ESCAPE, JSON_SKIP, + JSON_ERROR, } JSONTokenType; typedef struct JSONLexer JSONLexer; diff --git a/json-parser.c b/json-parser.c index 6c06ef91a6..849e2156da 100644 --- a/json-parser.c +++ b/json-parser.c @@ -22,9 +22,11 @@ #include "qbool.h" #include "json-parser.h" #include "json-lexer.h" +#include "qerror.h" typedef struct JSONParserContext { + Error *err; } JSONParserContext; #define BUG_ON(cond) assert(!(cond)) @@ -95,11 +97,15 @@ static void GCC_FMT_ATTR(3, 4) parse_error(JSONParserContext *ctxt, QObject *token, const char *msg, ...) { va_list ap; + char message[1024]; va_start(ap, msg); - fprintf(stderr, "parse error: "); - vfprintf(stderr, msg, ap); - fprintf(stderr, "\n"); + vsnprintf(message, sizeof(message), msg, ap); va_end(ap); + if (ctxt->err) { + error_free(ctxt->err); + ctxt->err = NULL; + } + error_set(&ctxt->err, QERR_JSON_PARSE_ERROR, message); } /** @@ -269,10 +275,15 @@ out: */ static int parse_pair(JSONParserContext *ctxt, QDict *dict, QList **tokens, va_list *ap) { - QObject *key, *token = NULL, *value, *peek; + QObject *key = NULL, *token = NULL, *value, *peek; QList *working = qlist_copy(*tokens); peek = qlist_peek(working); + if (peek == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + key = parse_value(ctxt, &working, ap); if (!key || qobject_type(key) != QTYPE_QSTRING) { parse_error(ctxt, peek, "key is not a string in object"); @@ -280,6 +291,11 @@ static int parse_pair(JSONParserContext *ctxt, QDict *dict, QList **tokens, va_l } token = qlist_pop(working); + if (token == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + if (!token_is_operator(token, ':')) { parse_error(ctxt, token, "missing : in object pair"); goto out; @@ -315,6 +331,10 @@ static QObject *parse_object(JSONParserContext *ctxt, QList **tokens, va_list *a QList *working = qlist_copy(*tokens); token = qlist_pop(working); + if (token == NULL) { + goto out; + } + if (!token_is_operator(token, '{')) { goto out; } @@ -324,12 +344,22 @@ static QObject *parse_object(JSONParserContext *ctxt, QList **tokens, va_list *a dict = qdict_new(); peek = qlist_peek(working); + if (peek == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + if (!token_is_operator(peek, '}')) { if (parse_pair(ctxt, dict, &working, ap) == -1) { goto out; } token = qlist_pop(working); + if (token == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + while (!token_is_operator(token, '}')) { if (!token_is_operator(token, ',')) { parse_error(ctxt, token, "expected separator in dict"); @@ -343,6 +373,10 @@ static QObject *parse_object(JSONParserContext *ctxt, QList **tokens, va_list *a } token = qlist_pop(working); + if (token == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } } qobject_decref(token); token = NULL; @@ -371,6 +405,10 @@ static QObject *parse_array(JSONParserContext *ctxt, QList **tokens, va_list *ap QList *working = qlist_copy(*tokens); token = qlist_pop(working); + if (token == NULL) { + goto out; + } + if (!token_is_operator(token, '[')) { goto out; } @@ -380,6 +418,11 @@ static QObject *parse_array(JSONParserContext *ctxt, QList **tokens, va_list *ap list = qlist_new(); peek = qlist_peek(working); + if (peek == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + if (!token_is_operator(peek, ']')) { QObject *obj; @@ -392,6 +435,11 @@ static QObject *parse_array(JSONParserContext *ctxt, QList **tokens, va_list *ap qlist_append_obj(list, obj); token = qlist_pop(working); + if (token == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } + while (!token_is_operator(token, ']')) { if (!token_is_operator(token, ',')) { parse_error(ctxt, token, "expected separator in list"); @@ -410,6 +458,10 @@ static QObject *parse_array(JSONParserContext *ctxt, QList **tokens, va_list *ap qlist_append_obj(list, obj); token = qlist_pop(working); + if (token == NULL) { + parse_error(ctxt, NULL, "premature EOI"); + goto out; + } } qobject_decref(token); @@ -438,6 +490,9 @@ static QObject *parse_keyword(JSONParserContext *ctxt, QList **tokens) QList *working = qlist_copy(*tokens); token = qlist_pop(working); + if (token == NULL) { + goto out; + } if (token_get_type(token) != JSON_KEYWORD) { goto out; @@ -475,6 +530,9 @@ static QObject *parse_escape(JSONParserContext *ctxt, QList **tokens, va_list *a } token = qlist_pop(working); + if (token == NULL) { + goto out; + } if (token_is_escape(token, "%p")) { obj = va_arg(*ap, QObject *); @@ -514,6 +572,10 @@ static QObject *parse_literal(JSONParserContext *ctxt, QList **tokens) QList *working = qlist_copy(*tokens); token = qlist_pop(working); + if (token == NULL) { + goto out; + } + switch (token_get_type(token)) { case JSON_STRING: obj = QOBJECT(qstring_from_escaped_str(ctxt, token)); @@ -565,13 +627,24 @@ static QObject *parse_value(JSONParserContext *ctxt, QList **tokens, va_list *ap QObject *json_parser_parse(QList *tokens, va_list *ap) { + return json_parser_parse_err(tokens, ap, NULL); +} + +QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp) +{ JSONParserContext ctxt = {}; - QList *working = qlist_copy(tokens); + QList *working; QObject *result; + if (!tokens) { + return NULL; + } + working = qlist_copy(tokens); result = parse_value(&ctxt, &working, ap); QDECREF(working); + error_propagate(errp, ctxt.err); + return result; } diff --git a/json-parser.h b/json-parser.h index 97f43f67d4..8f2b5ec4bc 100644 --- a/json-parser.h +++ b/json-parser.h @@ -16,7 +16,9 @@ #include "qemu-common.h" #include "qlist.h" +#include "error.h" QObject *json_parser_parse(QList *tokens, va_list *ap); +QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp); #endif diff --git a/json-streamer.c b/json-streamer.c index f7e7a68d40..c255c7818f 100644 --- a/json-streamer.c +++ b/json-streamer.c @@ -18,6 +18,9 @@ #include "json-lexer.h" #include "json-streamer.h" +#define MAX_TOKEN_SIZE (64ULL << 20) +#define MAX_NESTING (1ULL << 10) + static void json_message_process_token(JSONLexer *lexer, QString *token, JSONTokenType type, int x, int y) { JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer); @@ -49,14 +52,44 @@ static void json_message_process_token(JSONLexer *lexer, QString *token, JSONTok qdict_put(dict, "x", qint_from_int(x)); qdict_put(dict, "y", qint_from_int(y)); + parser->token_size += token->length; + qlist_append(parser->tokens, dict); - if (parser->brace_count == 0 && - parser->bracket_count == 0) { - parser->emit(parser, parser->tokens); + if (type == JSON_ERROR) { + goto out_emit_bad; + } else if (parser->brace_count < 0 || + parser->bracket_count < 0 || + (parser->brace_count == 0 && + parser->bracket_count == 0)) { + goto out_emit; + } else if (parser->token_size > MAX_TOKEN_SIZE || + parser->bracket_count > MAX_NESTING || + parser->brace_count > MAX_NESTING) { + /* Security consideration, we limit total memory allocated per object + * and the maximum recursion depth that a message can force. + */ + goto out_emit; + } + + return; + +out_emit_bad: + /* clear out token list and tell the parser to emit and error + * indication by passing it a NULL list + */ + QDECREF(parser->tokens); + parser->tokens = NULL; +out_emit: + /* send current list of tokens to parser and reset tokenizer */ + parser->brace_count = 0; + parser->bracket_count = 0; + parser->emit(parser, parser->tokens); + if (parser->tokens) { QDECREF(parser->tokens); - parser->tokens = qlist_new(); } + parser->tokens = qlist_new(); + parser->token_size = 0; } void json_message_parser_init(JSONMessageParser *parser, @@ -66,6 +99,7 @@ void json_message_parser_init(JSONMessageParser *parser, parser->brace_count = 0; parser->bracket_count = 0; parser->tokens = qlist_new(); + parser->token_size = 0; json_lexer_init(&parser->lexer, json_message_process_token); } diff --git a/json-streamer.h b/json-streamer.h index 09f3bd70e4..f09bc4daec 100644 --- a/json-streamer.h +++ b/json-streamer.h @@ -24,6 +24,7 @@ typedef struct JSONMessageParser int brace_count; int bracket_count; QList *tokens; + uint64_t token_size; } JSONMessageParser; void json_message_parser_init(JSONMessageParser *parser, @@ -603,6 +603,11 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, if (err) { fprintf(stderr, "%s: error registering prefix slot: %s\n", __func__, strerror(-err)); +#ifdef TARGET_PPC + fprintf(stderr, "%s: This is probably because your kernel's " \ + "PAGE_SIZE is too big. Please try to use 4k " \ + "PAGE_SIZE!\n", __func__); +#endif abort(); } } diff --git a/libcacard/Makefile b/libcacard/Makefile index 4010029173..1d34df0004 100644 --- a/libcacard/Makefile +++ b/libcacard/Makefile @@ -4,14 +4,7 @@ $(call set-vpath, $(SRC_PATH):$(SRC_PATH)/libcacard) -ifeq ($(CONFIG_WIN32),y) -QEMU_THREAD=qemu-thread-win32.o -else -QEMU_THREAD=qemu-thread-posix.o -endif - - -QEMU_OBJS=$(addprefix ../, $(QEMU_THREAD) $(oslib-obj-y) $(trace-obj-y) qemu-malloc.o qemu-timer-common.o) +QEMU_OBJS=$(addprefix ../, $(oslib-obj-y) $(trace-obj-y) qemu-malloc.o qemu-timer-common.o) QEMU_CFLAGS+=-I../ diff --git a/libcacard/vcard_emul_nss.c b/libcacard/vcard_emul_nss.c index baada52a3c..f3db657d74 100644 --- a/libcacard/vcard_emul_nss.c +++ b/libcacard/vcard_emul_nss.c @@ -971,7 +971,7 @@ find_blank(const char *str) /* * We really want to use some existing argument parsing library here. That - * would give us a consistant look */ + * would give us a consistent look */ static VCardEmulOptions options; #define READER_STEP 4 diff --git a/libcacard/vscard_common.h b/libcacard/vscard_common.h index bebd52db17..609ae98bcf 100644 --- a/libcacard/vscard_common.h +++ b/libcacard/vscard_common.h @@ -153,7 +153,7 @@ typedef struct VSCMsgCardRemove { /* * VSCMsgAPDU Client <-> Host - * Main reason of existance. Transfer a single APDU in either direction. + * Main reason of existence. Transfer a single APDU in either direction. */ typedef struct VSCMsgAPDU { uint8_t data[0]; diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 4c399f8e33..dcfeb7a286 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -867,6 +867,25 @@ static inline void init_thread(struct target_pt_regs *regs, #endif /* TARGET_ALPHA */ +#ifdef TARGET_S390X + +#define ELF_START_MMAP (0x20000000000ULL) + +#define elf_check_arch(x) ( (x) == ELF_ARCH ) + +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_S390 + +static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) +{ + regs->psw.addr = infop->entry; + regs->psw.mask = PSW_MASK_64 | PSW_MASK_32; + regs->gprs[15] = infop->start_stack; +} + +#endif /* TARGET_S390X */ + #ifndef ELF_PLATFORM #define ELF_PLATFORM (NULL) #endif diff --git a/linux-user/main.c b/linux-user/main.c index a1e37e4948..04da0a4ca4 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -323,7 +323,7 @@ void cpu_loop(CPUX86State *env) break; #ifndef TARGET_ABI32 case EXCP_SYSCALL: - /* linux syscall from syscall intruction */ + /* linux syscall from syscall instruction */ env->regs[R_EAX] = do_syscall(env, env->regs[R_EAX], env->regs[R_EDI], @@ -455,24 +455,6 @@ void cpu_loop(CPUX86State *env) #ifdef TARGET_ARM -static void arm_cache_flush(abi_ulong start, abi_ulong last) -{ - abi_ulong addr, last1; - - if (last < start) - return; - addr = start; - for(;;) { - last1 = ((addr + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK) - 1; - if (last1 > last) - last1 = last; - tb_invalidate_page_range(addr, last1 + 1); - if (last1 == last) - break; - addr = last1 + 1; - } -} - /* Handle a jump to the kernel code page. */ static int do_kernel_trap(CPUARMState *env) @@ -717,7 +699,7 @@ void cpu_loop(CPUARMState *env) } if (n == ARM_NR_cacheflush) { - arm_cache_flush(env->regs[0], env->regs[1]); + /* nop */ } else if (n == ARM_NR_semihosting || n == ARM_NR_thumb_semihosting) { env->regs[0] = do_arm_semihosting (env); @@ -733,7 +715,7 @@ void cpu_loop(CPUARMState *env) if ( n > ARM_NR_BASE) { switch (n) { case ARM_NR_cacheflush: - arm_cache_flush(env->regs[0], env->regs[1]); + /* nop */ break; case ARM_NR_set_tls: cpu_set_tls(env, env->regs[0]); @@ -2526,49 +2508,27 @@ void cpu_loop (CPUState *env) fprintf(stderr, "Machine check exception. Exit\n"); exit(1); break; - case EXCP_ARITH: - env->lock_addr = -1; - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = TARGET_FPE_FLTINV; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, &info); - break; - case EXCP_HW_INTERRUPT: + case EXCP_SMP_INTERRUPT: + case EXCP_CLK_INTERRUPT: + case EXCP_DEV_INTERRUPT: fprintf(stderr, "External interrupt. Exit\n"); exit(1); break; - case EXCP_DFAULT: + case EXCP_MMFAULT: env->lock_addr = -1; info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; - info.si_code = (page_get_flags(env->ipr[IPR_EXC_ADDR]) & PAGE_VALID + info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); - info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR]; + info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, &info); break; - case EXCP_DTB_MISS_PAL: - fprintf(stderr, "MMU data TLB miss in PALcode\n"); - exit(1); - break; - case EXCP_ITB_MISS: - fprintf(stderr, "MMU instruction TLB miss\n"); - exit(1); - break; - case EXCP_ITB_ACV: - fprintf(stderr, "MMU instruction access violation\n"); - exit(1); - break; - case EXCP_DTB_MISS_NATIVE: - fprintf(stderr, "MMU data TLB miss\n"); - exit(1); - break; case EXCP_UNALIGN: env->lock_addr = -1; info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR]; + info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, &info); break; case EXCP_OPCDEC: @@ -2580,12 +2540,20 @@ void cpu_loop (CPUState *env) info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; + case EXCP_ARITH: + env->lock_addr = -1; + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_FLTINV; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; case EXCP_FEN: /* No-op. Linux simply re-enables the FPU. */ break; - case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1): + case EXCP_CALL_PAL: env->lock_addr = -1; - switch ((trapnr >> 6) | 0x80) { + switch (env->error_code) { case 0x80: /* BPT */ info.si_signo = TARGET_SIGTRAP; @@ -2676,8 +2644,6 @@ void cpu_loop (CPUState *env) goto do_sigill; } break; - case EXCP_CALL_PALP ... (EXCP_CALL_PALE - 1): - goto do_sigill; case EXCP_DEBUG: info.si_signo = gdb_handlesig (env, TARGET_SIGTRAP); if (info.si_signo) { @@ -2701,6 +2667,80 @@ void cpu_loop (CPUState *env) } #endif /* TARGET_ALPHA */ +#ifdef TARGET_S390X +void cpu_loop(CPUS390XState *env) +{ + int trapnr; + target_siginfo_t info; + + while (1) { + trapnr = cpu_s390x_exec (env); + + switch (trapnr) { + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig (env, TARGET_SIGTRAP); + if (sig) { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case EXCP_SVC: + { + int n = env->int_svc_code; + if (!n) { + /* syscalls > 255 */ + n = env->regs[1]; + } + env->psw.addr += env->int_svc_ilc; + env->regs[2] = do_syscall(env, n, + env->regs[2], + env->regs[3], + env->regs[4], + env->regs[5], + env->regs[6], + env->regs[7]); + } + break; + case EXCP_ADDR: + { + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->__excp_addr; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_SPEC: + { + fprintf(stderr,"specification exception insn 0x%08x%04x\n", ldl(env->psw.addr), lduw(env->psw.addr + 4)); + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPC; + info._sifields._sigfault._addr = env->__excp_addr; + queue_signal(env, info.si_signo, &info); + } + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(env, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} + +#endif /* TARGET_S390X */ + static void version(void) { printf("qemu-" TARGET_ARCH " version " QEMU_VERSION QEMU_PKGVERSION @@ -3450,6 +3490,15 @@ int main(int argc, char **argv, char **envp) env->regs[15] = regs->acr; env->pc = regs->erp; } +#elif defined(TARGET_S390X) + { + int i; + for (i = 0; i < 16; i++) { + env->regs[i] = regs->gprs[i]; + } + env->psw.mask = regs->psw.mask; + env->psw.addr = regs->psw.addr; + } #else #error unsupported target CPU #endif diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 0cf22f8cb2..994c02bb77 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -354,7 +354,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) } wrapped = 1; /* Don't actually use 0 when wrapping, instead indicate - that we'd truely like an allocation in low memory. */ + that we'd truly like an allocation in low memory. */ addr = (mmap_min_addr > TARGET_PAGE_SIZE ? TARGET_PAGE_ALIGN(mmap_min_addr) : TARGET_PAGE_SIZE); diff --git a/linux-user/qemu.h b/linux-user/qemu.h index f522f5e64a..237386caac 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -379,7 +379,7 @@ abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len); abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len); /* Functions for accessing guest memory. The tget and tput functions - read/write single values, byteswapping as neccessary. The lock_user + read/write single values, byteswapping as necessary. The lock_user gets a pointer to a contiguous area of guest memory, but does not perform and byteswapping. lock_user may return either a pointer to the guest memory, or a temporary buffer. */ diff --git a/linux-user/s390x/syscall.h b/linux-user/s390x/syscall.h new file mode 100644 index 0000000000..c2ea151ea5 --- /dev/null +++ b/linux-user/s390x/syscall.h @@ -0,0 +1,23 @@ +/* this typedef defines how a Program Status Word looks like */ +typedef struct { + abi_ulong mask; + abi_ulong addr; +} __attribute__ ((aligned(8))) target_psw_t; + +/* + * The pt_regs struct defines the way the registers are stored on + * the stack during a system call. + */ + +#define TARGET_NUM_GPRS 16 + +struct target_pt_regs { + abi_ulong args[1]; + target_psw_t psw; + abi_ulong gprs[TARGET_NUM_GPRS]; + abi_ulong orig_gpr2; + unsigned short ilc; + unsigned short trap; +}; + +#define UNAME_MACHINE "s390x" diff --git a/linux-user/s390x/syscall_nr.h b/linux-user/s390x/syscall_nr.h new file mode 100644 index 0000000000..7cc6db2e1b --- /dev/null +++ b/linux-user/s390x/syscall_nr.h @@ -0,0 +1,349 @@ +/* + * This file contains the system call numbers. + */ + +#define TARGET_NR_exit 1 +#define TARGET_NR_fork 2 +#define TARGET_NR_read 3 +#define TARGET_NR_write 4 +#define TARGET_NR_open 5 +#define TARGET_NR_close 6 +#define TARGET_NR_restart_syscall 7 +#define TARGET_NR_creat 8 +#define TARGET_NR_link 9 +#define TARGET_NR_unlink 10 +#define TARGET_NR_execve 11 +#define TARGET_NR_chdir 12 +#define TARGET_NR_mknod 14 +#define TARGET_NR_chmod 15 +#define TARGET_NR_lseek 19 +#define TARGET_NR_getpid 20 +#define TARGET_NR_mount 21 +#define TARGET_NR_umount 22 +#define TARGET_NR_ptrace 26 +#define TARGET_NR_alarm 27 +#define TARGET_NR_pause 29 +#define TARGET_NR_utime 30 +#define TARGET_NR_access 33 +#define TARGET_NR_nice 34 +#define TARGET_NR_sync 36 +#define TARGET_NR_kill 37 +#define TARGET_NR_rename 38 +#define TARGET_NR_mkdir 39 +#define TARGET_NR_rmdir 40 +#define TARGET_NR_dup 41 +#define TARGET_NR_pipe 42 +#define TARGET_NR_times 43 +#define TARGET_NR_brk 45 +#define TARGET_NR_signal 48 +#define TARGET_NR_acct 51 +#define TARGET_NR_umount2 52 +#define TARGET_NR_ioctl 54 +#define TARGET_NR_fcntl 55 +#define TARGET_NR_setpgid 57 +#define TARGET_NR_umask 60 +#define TARGET_NR_chroot 61 +#define TARGET_NR_ustat 62 +#define TARGET_NR_dup2 63 +#define TARGET_NR_getppid 64 +#define TARGET_NR_getpgrp 65 +#define TARGET_NR_setsid 66 +#define TARGET_NR_sigaction 67 +#define TARGET_NR_sigsuspend 72 +#define TARGET_NR_sigpending 73 +#define TARGET_NR_sethostname 74 +#define TARGET_NR_setrlimit 75 +#define TARGET_NR_getrusage 77 +#define TARGET_NR_gettimeofday 78 +#define TARGET_NR_settimeofday 79 +#define TARGET_NR_symlink 83 +#define TARGET_NR_readlink 85 +#define TARGET_NR_uselib 86 +#define TARGET_NR_swapon 87 +#define TARGET_NR_reboot 88 +#define TARGET_NR_readdir 89 +#define TARGET_NR_mmap 90 +#define TARGET_NR_munmap 91 +#define TARGET_NR_truncate 92 +#define TARGET_NR_ftruncate 93 +#define TARGET_NR_fchmod 94 +#define TARGET_NR_getpriority 96 +#define TARGET_NR_setpriority 97 +#define TARGET_NR_statfs 99 +#define TARGET_NR_fstatfs 100 +#define TARGET_NR_socketcall 102 +#define TARGET_NR_syslog 103 +#define TARGET_NR_setitimer 104 +#define TARGET_NR_getitimer 105 +#define TARGET_NR_stat 106 +#define TARGET_NR_lstat 107 +#define TARGET_NR_fstat 108 +#define TARGET_NR_lookup_dcookie 110 +#define TARGET_NR_vhangup 111 +#define TARGET_NR_idle 112 +#define TARGET_NR_wait4 114 +#define TARGET_NR_swapoff 115 +#define TARGET_NR_sysinfo 116 +#define TARGET_NR_ipc 117 +#define TARGET_NR_fsync 118 +#define TARGET_NR_sigreturn 119 +#define TARGET_NR_clone 120 +#define TARGET_NR_setdomainname 121 +#define TARGET_NR_uname 122 +#define TARGET_NR_adjtimex 124 +#define TARGET_NR_mprotect 125 +#define TARGET_NR_sigprocmask 126 +#define TARGET_NR_create_module 127 +#define TARGET_NR_init_module 128 +#define TARGET_NR_delete_module 129 +#define TARGET_NR_get_kernel_syms 130 +#define TARGET_NR_quotactl 131 +#define TARGET_NR_getpgid 132 +#define TARGET_NR_fchdir 133 +#define TARGET_NR_bdflush 134 +#define TARGET_NR_sysfs 135 +#define TARGET_NR_personality 136 +#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define TARGET_NR_getdents 141 +#define TARGET_NR_flock 143 +#define TARGET_NR_msync 144 +#define TARGET_NR_readv 145 +#define TARGET_NR_writev 146 +#define TARGET_NR_getsid 147 +#define TARGET_NR_fdatasync 148 +#define TARGET_NR__sysctl 149 +#define TARGET_NR_mlock 150 +#define TARGET_NR_munlock 151 +#define TARGET_NR_mlockall 152 +#define TARGET_NR_munlockall 153 +#define TARGET_NR_sched_setparam 154 +#define TARGET_NR_sched_getparam 155 +#define TARGET_NR_sched_setscheduler 156 +#define TARGET_NR_sched_getscheduler 157 +#define TARGET_NR_sched_yield 158 +#define TARGET_NR_sched_get_priority_max 159 +#define TARGET_NR_sched_get_priority_min 160 +#define TARGET_NR_sched_rr_get_interval 161 +#define TARGET_NR_nanosleep 162 +#define TARGET_NR_mremap 163 +#define TARGET_NR_query_module 167 +#define TARGET_NR_poll 168 +#define TARGET_NR_nfsservctl 169 +#define TARGET_NR_prctl 172 +#define TARGET_NR_rt_sigreturn 173 +#define TARGET_NR_rt_sigaction 174 +#define TARGET_NR_rt_sigprocmask 175 +#define TARGET_NR_rt_sigpending 176 +#define TARGET_NR_rt_sigtimedwait 177 +#define TARGET_NR_rt_sigqueueinfo 178 +#define TARGET_NR_rt_sigsuspend 179 +#define TARGET_NR_pread64 180 +#define TARGET_NR_pwrite64 181 +#define TARGET_NR_getcwd 183 +#define TARGET_NR_capget 184 +#define TARGET_NR_capset 185 +#define TARGET_NR_sigaltstack 186 +#define TARGET_NR_sendfile 187 +#define TARGET_NR_getpmsg 188 +#define TARGET_NR_putpmsg 189 +#define TARGET_NR_vfork 190 +#define TARGET_NR_pivot_root 217 +#define TARGET_NR_mincore 218 +#define TARGET_NR_madvise 219 +#define TARGET_NR_getdents64 220 +#define TARGET_NR_readahead 222 +#define TARGET_NR_setxattr 224 +#define TARGET_NR_lsetxattr 225 +#define TARGET_NR_fsetxattr 226 +#define TARGET_NR_getxattr 227 +#define TARGET_NR_lgetxattr 228 +#define TARGET_NR_fgetxattr 229 +#define TARGET_NR_listxattr 230 +#define TARGET_NR_llistxattr 231 +#define TARGET_NR_flistxattr 232 +#define TARGET_NR_removexattr 233 +#define TARGET_NR_lremovexattr 234 +#define TARGET_NR_fremovexattr 235 +#define TARGET_NR_gettid 236 +#define TARGET_NR_tkill 237 +#define TARGET_NR_futex 238 +#define TARGET_NR_sched_setaffinity 239 +#define TARGET_NR_sched_getaffinity 240 +#define TARGET_NR_tgkill 241 +/* Number 242 is reserved for tux */ +#define TARGET_NR_io_setup 243 +#define TARGET_NR_io_destroy 244 +#define TARGET_NR_io_getevents 245 +#define TARGET_NR_io_submit 246 +#define TARGET_NR_io_cancel 247 +#define TARGET_NR_exit_group 248 +#define TARGET_NR_epoll_create 249 +#define TARGET_NR_epoll_ctl 250 +#define TARGET_NR_epoll_wait 251 +#define TARGET_NR_set_tid_address 252 +#define TARGET_NR_fadvise64 253 +#define TARGET_NR_timer_create 254 +#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1) +#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2) +#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3) +#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4) +#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5) +#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6) +#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7) +#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8) +/* Number 263 is reserved for vserver */ +#define TARGET_NR_statfs64 265 +#define TARGET_NR_fstatfs64 266 +#define TARGET_NR_remap_file_pages 267 +/* Number 268 is reserved for new sys_mbind */ +/* Number 269 is reserved for new sys_get_mempolicy */ +/* Number 270 is reserved for new sys_set_mempolicy */ +#define TARGET_NR_mq_open 271 +#define TARGET_NR_mq_unlink 272 +#define TARGET_NR_mq_timedsend 273 +#define TARGET_NR_mq_timedreceive 274 +#define TARGET_NR_mq_notify 275 +#define TARGET_NR_mq_getsetattr 276 +#define TARGET_NR_kexec_load 277 +#define TARGET_NR_add_key 278 +#define TARGET_NR_request_key 279 +#define TARGET_NR_keyctl 280 +#define TARGET_NR_waitid 281 +#define TARGET_NR_ioprio_set 282 +#define TARGET_NR_ioprio_get 283 +#define TARGET_NR_inotify_init 284 +#define TARGET_NR_inotify_add_watch 285 +#define TARGET_NR_inotify_rm_watch 286 +/* Number 287 is reserved for new sys_migrate_pages */ +#define TARGET_NR_openat 288 +#define TARGET_NR_mkdirat 289 +#define TARGET_NR_mknodat 290 +#define TARGET_NR_fchownat 291 +#define TARGET_NR_futimesat 292 +#define TARGET_NR_unlinkat 294 +#define TARGET_NR_renameat 295 +#define TARGET_NR_linkat 296 +#define TARGET_NR_symlinkat 297 +#define TARGET_NR_readlinkat 298 +#define TARGET_NR_fchmodat 299 +#define TARGET_NR_faccessat 300 +#define TARGET_NR_pselect6 301 +#define TARGET_NR_ppoll 302 +#define TARGET_NR_unshare 303 +#define TARGET_NR_set_robust_list 304 +#define TARGET_NR_get_robust_list 305 +#define TARGET_NR_splice 306 +#define TARGET_NR_sync_file_range 307 +#define TARGET_NR_tee 308 +#define TARGET_NR_vmsplice 309 +/* Number 310 is reserved for new sys_move_pages */ +#define TARGET_NR_getcpu 311 +#define TARGET_NR_epoll_pwait 312 +#define TARGET_NR_utimes 313 +#define TARGET_NR_fallocate 314 +#define TARGET_NR_utimensat 315 +#define TARGET_NR_signalfd 316 +#define TARGET_NR_timerfd 317 +#define TARGET_NR_eventfd 318 +#define TARGET_NR_timerfd_create 319 +#define TARGET_NR_timerfd_settime 320 +#define TARGET_NR_timerfd_gettime 321 +#define TARGET_NR_signalfd4 322 +#define TARGET_NR_eventfd2 323 +#define TARGET_NR_inotify_init1 324 +#define TARGET_NR_pipe2 325 +#define TARGET_NR_dup3 326 +#define TARGET_NR_epoll_create1 327 +#undef NR_syscalls +#define NR_syscalls 328 + +/* + * There are some system calls that are not present on 64 bit, some + * have a different name although they do the same (e.g. TARGET_NR_chown32 + * is TARGET_NR_chown on 64 bit). + */ +#ifndef TARGET_S390X + +#define TARGET_NR_time 13 +#define TARGET_NR_lchown 16 +#define TARGET_NR_setuid 23 +#define TARGET_NR_getuid 24 +#define TARGET_NR_stime 25 +#define TARGET_NR_setgid 46 +#define TARGET_NR_getgid 47 +#define TARGET_NR_geteuid 49 +#define TARGET_NR_getegid 50 +#define TARGET_NR_setreuid 70 +#define TARGET_NR_setregid 71 +#define TARGET_NR_getrlimit 76 +#define TARGET_NR_getgroups 80 +#define TARGET_NR_setgroups 81 +#define TARGET_NR_fchown 95 +#define TARGET_NR_ioperm 101 +#define TARGET_NR_setfsuid 138 +#define TARGET_NR_setfsgid 139 +#define TARGET_NR__llseek 140 +#define TARGET_NR__newselect 142 +#define TARGET_NR_setresuid 164 +#define TARGET_NR_getresuid 165 +#define TARGET_NR_setresgid 170 +#define TARGET_NR_getresgid 171 +#define TARGET_NR_chown 182 +#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define TARGET_NR_mmap2 192 +#define TARGET_NR_truncate64 193 +#define TARGET_NR_ftruncate64 194 +#define TARGET_NR_stat64 195 +#define TARGET_NR_lstat64 196 +#define TARGET_NR_fstat64 197 +#define TARGET_NR_lchown32 198 +#define TARGET_NR_getuid32 199 +#define TARGET_NR_getgid32 200 +#define TARGET_NR_geteuid32 201 +#define TARGET_NR_getegid32 202 +#define TARGET_NR_setreuid32 203 +#define TARGET_NR_setregid32 204 +#define TARGET_NR_getgroups32 205 +#define TARGET_NR_setgroups32 206 +#define TARGET_NR_fchown32 207 +#define TARGET_NR_setresuid32 208 +#define TARGET_NR_getresuid32 209 +#define TARGET_NR_setresgid32 210 +#define TARGET_NR_getresgid32 211 +#define TARGET_NR_chown32 212 +#define TARGET_NR_setuid32 213 +#define TARGET_NR_setgid32 214 +#define TARGET_NR_setfsuid32 215 +#define TARGET_NR_setfsgid32 216 +#define TARGET_NR_fcntl64 221 +#define TARGET_NR_sendfile64 223 +#define TARGET_NR_fadvise64_64 264 +#define TARGET_NR_fstatat64 293 + +#else + +#define TARGET_NR_select 142 +#define TARGET_NR_getrlimit 191 /* SuS compliant getrlimit */ +#define TARGET_NR_lchown 198 +#define TARGET_NR_getuid 199 +#define TARGET_NR_getgid 200 +#define TARGET_NR_geteuid 201 +#define TARGET_NR_getegid 202 +#define TARGET_NR_setreuid 203 +#define TARGET_NR_setregid 204 +#define TARGET_NR_getgroups 205 +#define TARGET_NR_setgroups 206 +#define TARGET_NR_fchown 207 +#define TARGET_NR_setresuid 208 +#define TARGET_NR_getresuid 209 +#define TARGET_NR_setresgid 210 +#define TARGET_NR_getresgid 211 +#define TARGET_NR_chown 212 +#define TARGET_NR_setuid 213 +#define TARGET_NR_setgid 214 +#define TARGET_NR_setfsuid 215 +#define TARGET_NR_setfsgid 216 +#define TARGET_NR_newfstatat 293 + +#endif + diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h new file mode 100644 index 0000000000..b4816b040f --- /dev/null +++ b/linux-user/s390x/target_signal.h @@ -0,0 +1,26 @@ +#ifndef TARGET_SIGNAL_H +#define TARGET_SIGNAL_H + +#include "cpu.h" + +typedef struct target_sigaltstack { + abi_ulong ss_sp; + int ss_flags; + abi_ulong ss_size; +} target_stack_t; + +/* + * sigaltstack controls + */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_SIGSTKSZ 8192 + +static inline abi_ulong get_sp_from_cpustate(CPUS390XState *state) +{ + return state->regs[15]; +} + +#endif /* TARGET_SIGNAL_H */ diff --git a/linux-user/s390x/termbits.h b/linux-user/s390x/termbits.h new file mode 100644 index 0000000000..2a78a05594 --- /dev/null +++ b/linux-user/s390x/termbits.h @@ -0,0 +1,283 @@ +/* + * include/asm-s390/termbits.h + * + * S390 version + * + * Derived from "include/asm-i386/termbits.h" + */ + +#define TARGET_NCCS 19 +struct target_termios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ +}; + +struct target_termios2 { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ + unsigned int c_ispeed; /* input speed */ + unsigned int c_ospeed; /* output speed */ +}; + +struct target_ktermios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ + unsigned int c_ispeed; /* input speed */ + unsigned int c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define TARGET_VINTR 0 +#define TARGET_VQUIT 1 +#define TARGET_VERASE 2 +#define TARGET_VKILL 3 +#define TARGET_VEOF 4 +#define TARGET_VTIME 5 +#define TARGET_VMIN 6 +#define TARGET_VSWTC 7 +#define TARGET_VSTART 8 +#define TARGET_VSTOP 9 +#define TARGET_VSUSP 10 +#define TARGET_VEOL 11 +#define TARGET_VREPRINT 12 +#define TARGET_VDISCARD 13 +#define TARGET_VWERASE 14 +#define TARGET_VLNEXT 15 +#define TARGET_VEOL2 16 + +/* c_iflag bits */ +#define TARGET_IGNBRK 0000001 +#define TARGET_BRKINT 0000002 +#define TARGET_IGNPAR 0000004 +#define TARGET_PARMRK 0000010 +#define TARGET_INPCK 0000020 +#define TARGET_ISTRIP 0000040 +#define TARGET_INLCR 0000100 +#define TARGET_IGNCR 0000200 +#define TARGET_ICRNL 0000400 +#define TARGET_IUCLC 0001000 +#define TARGET_IXON 0002000 +#define TARGET_IXANY 0004000 +#define TARGET_IXOFF 0010000 +#define TARGET_IMAXBEL 0020000 +#define TARGET_IUTF8 0040000 + +/* c_oflag bits */ +#define TARGET_OPOST 0000001 +#define TARGET_OLCUC 0000002 +#define TARGET_ONLCR 0000004 +#define TARGET_OCRNL 0000010 +#define TARGET_ONOCR 0000020 +#define TARGET_ONLRET 0000040 +#define TARGET_OFILL 0000100 +#define TARGET_OFDEL 0000200 +#define TARGET_NLDLY 0000400 +#define TARGET_NL0 0000000 +#define TARGET_NL1 0000400 +#define TARGET_CRDLY 0003000 +#define TARGET_CR0 0000000 +#define TARGET_CR1 0001000 +#define TARGET_CR2 0002000 +#define TARGET_CR3 0003000 +#define TARGET_TABDLY 0014000 +#define TARGET_TAB0 0000000 +#define TARGET_TAB1 0004000 +#define TARGET_TAB2 0010000 +#define TARGET_TAB3 0014000 +#define TARGET_XTABS 0014000 +#define TARGET_BSDLY 0020000 +#define TARGET_BS0 0000000 +#define TARGET_BS1 0020000 +#define TARGET_VTDLY 0040000 +#define TARGET_VT0 0000000 +#define TARGET_VT1 0040000 +#define TARGET_FFDLY 0100000 +#define TARGET_FF0 0000000 +#define TARGET_FF1 0100000 + +/* c_cflag bit meaning */ +#define TARGET_CBAUD 0010017 +#define TARGET_B0 0000000 /* hang up */ +#define TARGET_B50 0000001 +#define TARGET_B75 0000002 +#define TARGET_B110 0000003 +#define TARGET_B134 0000004 +#define TARGET_B150 0000005 +#define TARGET_B200 0000006 +#define TARGET_B300 0000007 +#define TARGET_B600 0000010 +#define TARGET_B1200 0000011 +#define TARGET_B1800 0000012 +#define TARGET_B2400 0000013 +#define TARGET_B4800 0000014 +#define TARGET_B9600 0000015 +#define TARGET_B19200 0000016 +#define TARGET_B38400 0000017 +#define TARGET_EXTA B19200 +#define TARGET_EXTB B38400 +#define TARGET_CSIZE 0000060 +#define TARGET_CS5 0000000 +#define TARGET_CS6 0000020 +#define TARGET_CS7 0000040 +#define TARGET_CS8 0000060 +#define TARGET_CSTOPB 0000100 +#define TARGET_CREAD 0000200 +#define TARGET_PARENB 0000400 +#define TARGET_PARODD 0001000 +#define TARGET_HUPCL 0002000 +#define TARGET_CLOCAL 0004000 +#define TARGET_CBAUDEX 0010000 +#define TARGET_BOTHER 0010000 +#define TARGET_B57600 0010001 +#define TARGET_B115200 0010002 +#define TARGET_B230400 0010003 +#define TARGET_B460800 0010004 +#define TARGET_B500000 0010005 +#define TARGET_B576000 0010006 +#define TARGET_B921600 0010007 +#define TARGET_B1000000 0010010 +#define TARGET_B1152000 0010011 +#define TARGET_B1500000 0010012 +#define TARGET_B2000000 0010013 +#define TARGET_B2500000 0010014 +#define TARGET_B3000000 0010015 +#define TARGET_B3500000 0010016 +#define TARGET_B4000000 0010017 +#define TARGET_CIBAUD 002003600000 /* input baud rate */ +#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ +#define TARGET_CRTSCTS 020000000000 /* flow control */ + +#define TARGET_IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ + +/* c_lflag bits */ +#define TARGET_ISIG 0000001 +#define TARGET_ICANON 0000002 +#define TARGET_XCASE 0000004 +#define TARGET_ECHO 0000010 +#define TARGET_ECHOE 0000020 +#define TARGET_ECHOK 0000040 +#define TARGET_ECHONL 0000100 +#define TARGET_NOFLSH 0000200 +#define TARGET_TOSTOP 0000400 +#define TARGET_ECHOCTL 0001000 +#define TARGET_ECHOPRT 0002000 +#define TARGET_ECHOKE 0004000 +#define TARGET_FLUSHO 0010000 +#define TARGET_PENDIN 0040000 +#define TARGET_IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TARGET_TCOOFF 0 +#define TARGET_TCOON 1 +#define TARGET_TCIOFF 2 +#define TARGET_TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TARGET_TCIFLUSH 0 +#define TARGET_TCOFLUSH 1 +#define TARGET_TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TARGET_TCSANOW 0 +#define TARGET_TCSADRAIN 1 +#define TARGET_TCSAFLUSH 2 + +/* + * include/asm-s390/ioctls.h + * + * S390 version + * + * Derived from "include/asm-i386/ioctls.h" + */ + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TARGET_TCGETS 0x5401 +#define TARGET_TCSETS 0x5402 +#define TARGET_TCSETSW 0x5403 +#define TARGET_TCSETSF 0x5404 +#define TARGET_TCGETA 0x5405 +#define TARGET_TCSETA 0x5406 +#define TARGET_TCSETAW 0x5407 +#define TARGET_TCSETAF 0x5408 +#define TARGET_TCSBRK 0x5409 +#define TARGET_TCXONC 0x540A +#define TARGET_TCFLSH 0x540B +#define TARGET_TIOCEXCL 0x540C +#define TARGET_TIOCNXCL 0x540D +#define TARGET_TIOCSCTTY 0x540E +#define TARGET_TIOCGPGRP 0x540F +#define TARGET_TIOCSPGRP 0x5410 +#define TARGET_TIOCOUTQ 0x5411 +#define TARGET_TIOCSTI 0x5412 +#define TARGET_TIOCGWINSZ 0x5413 +#define TARGET_TIOCSWINSZ 0x5414 +#define TARGET_TIOCMGET 0x5415 +#define TARGET_TIOCMBIS 0x5416 +#define TARGET_TIOCMBIC 0x5417 +#define TARGET_TIOCMSET 0x5418 +#define TARGET_TIOCGSOFTCAR 0x5419 +#define TARGET_TIOCSSOFTCAR 0x541A +#define TARGET_FIONREAD 0x541B +#define TARGET_TIOCINQ FIONREAD +#define TARGET_TIOCLINUX 0x541C +#define TARGET_TIOCCONS 0x541D +#define TARGET_TIOCGSERIAL 0x541E +#define TARGET_TIOCSSERIAL 0x541F +#define TARGET_TIOCPKT 0x5420 +#define TARGET_FIONBIO 0x5421 +#define TARGET_TIOCNOTTY 0x5422 +#define TARGET_TIOCSETD 0x5423 +#define TARGET_TIOCGETD 0x5424 +#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ +#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ +#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TARGET_TCGETS2 _IOR('T',0x2A, struct termios2) +#define TARGET_TCSETS2 _IOW('T',0x2B, struct termios2) +#define TARGET_TCSETSW2 _IOW('T',0x2C, struct termios2) +#define TARGET_TCSETSF2 _IOW('T',0x2D, struct termios2) +#define TARGET_TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TARGET_TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TARGET_TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ + +#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define TARGET_FIOCLEX 0x5451 +#define TARGET_FIOASYNC 0x5452 +#define TARGET_TIOCSERCONFIG 0x5453 +#define TARGET_TIOCSERGWILD 0x5454 +#define TARGET_TIOCSERSWILD 0x5455 +#define TARGET_TIOCGLCKTRMIOS 0x5456 +#define TARGET_TIOCSLCKTRMIOS 0x5457 +#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TARGET_FIOQSIZE 0x545E + +/* Used for packet mode */ +#define TARGET_TIOCPKT_DATA 0 +#define TARGET_TIOCPKT_FLUSHREAD 1 +#define TARGET_TIOCPKT_FLUSHWRITE 2 +#define TARGET_TIOCPKT_STOP 4 +#define TARGET_TIOCPKT_START 8 +#define TARGET_TIOCPKT_NOSTOP 16 +#define TARGET_TIOCPKT_DOSTOP 32 + +#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + diff --git a/linux-user/signal.c b/linux-user/signal.c index ce033e90b4..11b25be7b8 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -21,7 +21,6 @@ #include <string.h> #include <stdarg.h> #include <unistd.h> -#include <signal.h> #include <errno.h> #include <assert.h> #include <sys/ucontext.h> @@ -391,7 +390,7 @@ static void QEMU_NORETURN force_sig(int target_sig) target_sig, strsignal(host_sig), "core dumped" ); } - /* The proper exit code for dieing from an uncaught signal is + /* The proper exit code for dying from an uncaught signal is * -<signal>. The kernel doesn't allow exit() or _exit() to pass * a negative value. To get the proper exit code we need to * actually die from an uncaught signal. Here the default signal @@ -3614,6 +3613,339 @@ long do_rt_sigreturn(CPUState *env) return -TARGET_ENOSYS; } +#elif defined(TARGET_S390X) + +#define __NUM_GPRS 16 +#define __NUM_FPRS 16 +#define __NUM_ACRS 16 + +#define S390_SYSCALL_SIZE 2 +#define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ + +#define _SIGCONTEXT_NSIG 64 +#define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ +#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) +#define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) +#define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ +#define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) + +typedef struct { + target_psw_t psw; + target_ulong gprs[__NUM_GPRS]; + unsigned int acrs[__NUM_ACRS]; +} target_s390_regs_common; + +typedef struct { + unsigned int fpc; + double fprs[__NUM_FPRS]; +} target_s390_fp_regs; + +typedef struct { + target_s390_regs_common regs; + target_s390_fp_regs fpregs; +} target_sigregs; + +struct target_sigcontext { + target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; + target_sigregs *sregs; +}; + +typedef struct { + uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + struct target_sigcontext sc; + target_sigregs sregs; + int signo; + uint8_t retcode[S390_SYSCALL_SIZE]; +} sigframe; + +struct target_ucontext { + target_ulong uc_flags; + struct target_ucontext *uc_link; + target_stack_t uc_stack; + target_sigregs uc_mcontext; + target_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +typedef struct { + uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + uint8_t retcode[S390_SYSCALL_SIZE]; + struct target_siginfo info; + struct target_ucontext uc; +} rt_sigframe; + +static inline abi_ulong +get_sigframe(struct target_sigaction *ka, CPUState *env, size_t frame_size) +{ + abi_ulong sp; + + /* Default to using normal stack */ + sp = env->regs[15]; + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa_flags & TARGET_SA_ONSTACK) { + if (!sas_ss_flags(sp)) { + sp = target_sigaltstack_used.ss_sp + + target_sigaltstack_used.ss_size; + } + } + + /* This is the legacy signal stack switching. */ + else if (/* FIXME !user_mode(regs) */ 0 && + !(ka->sa_flags & TARGET_SA_RESTORER) && + ka->sa_restorer) { + sp = (abi_ulong) ka->sa_restorer; + } + + return (sp - frame_size) & -8ul; +} + +static void save_sigregs(CPUState *env, target_sigregs *sregs) +{ + int i; + //save_access_regs(current->thread.acrs); FIXME + + /* Copy a 'clean' PSW mask to the user to avoid leaking + information about whether PER is currently on. */ + __put_user(env->psw.mask, &sregs->regs.psw.mask); + __put_user(env->psw.addr, &sregs->regs.psw.addr); + for (i = 0; i < 16; i++) { + __put_user(env->regs[i], &sregs->regs.gprs[i]); + } + for (i = 0; i < 16; i++) { + __put_user(env->aregs[i], &sregs->regs.acrs[i]); + } + /* + * We have to store the fp registers to current->thread.fp_regs + * to merge them with the emulated registers. + */ + //save_fp_regs(¤t->thread.fp_regs); FIXME + for (i = 0; i < 16; i++) { + __put_user(env->fregs[i].ll, &sregs->fpregs.fprs[i]); + } +} + +static void setup_frame(int sig, struct target_sigaction *ka, + target_sigset_t *set, CPUState *env) +{ + sigframe *frame; + abi_ulong frame_addr; + + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, + (unsigned long long)frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto give_sigsegv; + } + + qemu_log("%s: 1\n", __FUNCTION__); + if (__put_user(set->sig[0], &frame->sc.oldmask[0])) { + goto give_sigsegv; + } + + save_sigregs(env, &frame->sregs); + + __put_user((abi_ulong)(unsigned long)&frame->sregs, + (abi_ulong *)&frame->sc.sregs); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa_flags & TARGET_SA_RESTORER) { + env->regs[14] = (unsigned long) + ka->sa_restorer | PSW_ADDR_AMODE; + } else { + env->regs[14] = (unsigned long) + frame->retcode | PSW_ADDR_AMODE; + if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, + (uint16_t *)(frame->retcode))) + goto give_sigsegv; + } + + /* Set up backchain. */ + if (__put_user(env->regs[15], (abi_ulong *) frame)) { + goto give_sigsegv; + } + + /* Set up registers for signal handler */ + env->regs[15] = (target_ulong)(unsigned long) frame; + env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; + + env->regs[2] = sig; //map_signal(sig); + env->regs[3] = (target_ulong)(unsigned long) &frame->sc; + + /* We forgot to include these in the sigcontext. + To avoid breaking binary compatibility, they are passed as args. */ + env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; + env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; + + /* Place signal number on stack to allow backtrace from handler. */ + if (__put_user(env->regs[2], (int *) &frame->signo)) { + goto give_sigsegv; + } + unlock_user_struct(frame, frame_addr, 1); + return; + +give_sigsegv: + qemu_log("%s: give_sigsegv\n", __FUNCTION__); + unlock_user_struct(frame, frame_addr, 1); + force_sig(TARGET_SIGSEGV); +} + +static void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, + target_sigset_t *set, CPUState *env) +{ + int i; + rt_sigframe *frame; + abi_ulong frame_addr; + + frame_addr = get_sigframe(ka, env, sizeof *frame); + qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, + (unsigned long long)frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto give_sigsegv; + } + + qemu_log("%s: 1\n", __FUNCTION__); + if (copy_siginfo_to_user(&frame->info, info)) { + goto give_sigsegv; + } + + /* Create the ucontext. */ + __put_user(0, &frame->uc.uc_flags); + __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.uc_link); + __put_user(target_sigaltstack_used.ss_sp, &frame->uc.uc_stack.ss_sp); + __put_user(sas_ss_flags(get_sp_from_cpustate(env)), + &frame->uc.uc_stack.ss_flags); + __put_user(target_sigaltstack_used.ss_size, &frame->uc.uc_stack.ss_size); + save_sigregs(env, &frame->uc.uc_mcontext); + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user((abi_ulong)set->sig[i], + (abi_ulong *)&frame->uc.uc_sigmask.sig[i]); + } + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa_flags & TARGET_SA_RESTORER) { + env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; + } else { + env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; + if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, + (uint16_t *)(frame->retcode))) { + goto give_sigsegv; + } + } + + /* Set up backchain. */ + if (__put_user(env->regs[15], (abi_ulong *) frame)) { + goto give_sigsegv; + } + + /* Set up registers for signal handler */ + env->regs[15] = (target_ulong)(unsigned long) frame; + env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; + + env->regs[2] = sig; //map_signal(sig); + env->regs[3] = (target_ulong)(unsigned long) &frame->info; + env->regs[4] = (target_ulong)(unsigned long) &frame->uc; + return; + +give_sigsegv: + qemu_log("%s: give_sigsegv\n", __FUNCTION__); + unlock_user_struct(frame, frame_addr, 1); + force_sig(TARGET_SIGSEGV); +} + +static int +restore_sigregs(CPUState *env, target_sigregs *sc) +{ + int err = 0; + int i; + + for (i = 0; i < 16; i++) { + err |= __get_user(env->regs[i], &sc->regs.gprs[i]); + } + + err |= __get_user(env->psw.mask, &sc->regs.psw.mask); + qemu_log("%s: sc->regs.psw.addr 0x%llx env->psw.addr 0x%llx\n", + __FUNCTION__, (unsigned long long)sc->regs.psw.addr, + (unsigned long long)env->psw.addr); + err |= __get_user(env->psw.addr, &sc->regs.psw.addr); + /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ + + for (i = 0; i < 16; i++) { + err |= __get_user(env->aregs[i], &sc->regs.acrs[i]); + } + for (i = 0; i < 16; i++) { + err |= __get_user(env->fregs[i].ll, &sc->fpregs.fprs[i]); + } + + return err; +} + +long do_sigreturn(CPUState *env) +{ + sigframe *frame; + abi_ulong frame_addr = env->regs[15]; + qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, + (unsigned long long)frame_addr); + target_sigset_t target_set; + sigset_t set; + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + if (__get_user(target_set.sig[0], &frame->sc.oldmask[0])) { + goto badframe; + } + + target_to_host_sigset_internal(&set, &target_set); + sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ + + if (restore_sigregs(env, &frame->sregs)) { + goto badframe; + } + + unlock_user_struct(frame, frame_addr, 0); + return env->regs[2]; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return 0; +} + +long do_rt_sigreturn(CPUState *env) +{ + rt_sigframe *frame; + abi_ulong frame_addr = env->regs[15]; + qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, + (unsigned long long)frame_addr); + sigset_t set; + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + target_to_host_sigset(&set, &frame->uc.uc_sigmask); + + sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ + + if (restore_sigregs(env, &frame->uc.uc_mcontext)) { + goto badframe; + } + + if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.uc_stack), 0, + get_sp_from_cpustate(env)) == -EFAULT) { + goto badframe; + } + unlock_user_struct(frame, frame_addr, 0); + return env->regs[2]; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return 0; +} + #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) /* FIXME: Many of the structures are defined for both PPC and PPC64, but diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 279cef3cd4..5cb27c7f9f 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -287,7 +287,7 @@ static int sys_uname(struct new_utsname *buf) * struct linux kernel uses). */ - bzero(buf, sizeof (*buf)); + memset(buf, 0, sizeof(*buf)); COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); COPY_UTSNAME_FIELD(buf->release, uts_buf.release); @@ -5548,7 +5548,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = get_errno(settimeofday(&tv, NULL)); } break; -#ifdef TARGET_NR_select +#if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) case TARGET_NR_select: { struct target_sel_arg_struct *sel; @@ -5659,7 +5659,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_mmap case TARGET_NR_mmap: -#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) +#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ + defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ + || defined(TARGET_S390X) { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; @@ -6155,6 +6157,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); #elif defined(TARGET_CRIS) ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); +#elif defined(TARGET_S390X) + ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); #else ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); #endif @@ -6363,8 +6367,12 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, } break; #endif /* TARGET_NR_getdents64 */ -#ifdef TARGET_NR__newselect +#if defined(TARGET_NR__newselect) || defined(TARGET_S390X) +#ifdef TARGET_S390X + case TARGET_NR_select: +#else case TARGET_NR__newselect: +#endif ret = do_select(arg1, arg2, arg3, arg4, arg5); break; #endif @@ -6681,7 +6689,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_sigaltstack: #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ - defined(TARGET_M68K) + defined(TARGET_M68K) || defined(TARGET_S390X) ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); break; #else diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index e05ddf9120..04c268de7c 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -58,7 +58,8 @@ #endif #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \ - || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_UNICORE32) + || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_UNICORE32) \ + || defined(TARGET_S390X) #define TARGET_IOC_SIZEBITS 14 #define TARGET_IOC_DIRBITS 2 @@ -321,7 +322,8 @@ int do_sigaction(int sig, const struct target_sigaction *act, #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \ || defined(TARGET_PPC) || defined(TARGET_MIPS) || defined(TARGET_SH4) \ || defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) \ - || defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) + || defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) \ + || defined(TARGET_S390X) #if defined(TARGET_SPARC) #define TARGET_SA_NOCLDSTOP 8u @@ -1688,6 +1690,27 @@ struct target_stat { abi_long __unused[3]; }; +#elif defined(TARGET_S390X) +struct target_stat { + abi_ulong st_dev; + abi_ulong st_ino; + abi_ulong st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad1; + abi_ulong st_rdev; + abi_ulong st_size; + abi_ulong target_st_atime; + abi_ulong target_st_atime_nsec; + abi_ulong target_st_mtime; + abi_ulong target_st_mtime_nsec; + abi_ulong target_st_ctime; + abi_ulong target_st_ctime_nsec; + abi_ulong st_blksize; + abi_long st_blocks; + abi_ulong __unused[3]; +}; #else #error unsupported CPU #endif @@ -1774,6 +1797,34 @@ struct target_statfs64 { abi_long f_frsize; abi_long f_spare[5]; }; +#elif defined(TARGET_S390X) +struct target_statfs { + int32_t f_type; + int32_t f_bsize; + abi_long f_blocks; + abi_long f_bfree; + abi_long f_bavail; + abi_long f_files; + abi_long f_ffree; + kernel_fsid_t f_fsid; + int32_t f_namelen; + int32_t f_frsize; + int32_t f_spare[5]; +}; + +struct target_statfs64 { + int32_t f_type; + int32_t f_bsize; + abi_long f_blocks; + abi_long f_bfree; + abi_long f_bavail; + abi_long f_files; + abi_long f_ffree; + kernel_fsid_t f_fsid; + int32_t f_namelen; + int32_t f_frsize; + int32_t f_spare[5]; +}; #else struct target_statfs { uint32_t f_type; @@ -2544,16 +2544,21 @@ static void do_wav_capture(Monitor *mon, const QDict *qdict) #endif #if defined(TARGET_I386) -static void do_inject_nmi(Monitor *mon, const QDict *qdict) +static int do_inject_nmi(Monitor *mon, const QDict *qdict, QObject **ret_data) { CPUState *env; - int cpu_index = qdict_get_int(qdict, "cpu_index"); - for (env = first_cpu; env != NULL; env = env->next_cpu) - if (env->cpu_index == cpu_index) { - cpu_interrupt(env, CPU_INTERRUPT_NMI); - break; - } + for (env = first_cpu; env != NULL; env = env->next_cpu) { + cpu_interrupt(env, CPU_INTERRUPT_NMI); + } + + return 0; +} +#else +static int do_inject_nmi(Monitor *mon, const QDict *qdict, QObject **ret_data) +{ + qerror_report(QERR_UNSUPPORTED); + return -1; } #endif @@ -3466,7 +3471,76 @@ static const MonitorDef monitor_defs[] = { { "sr13", offsetof(CPUState, sr[13]) }, { "sr14", offsetof(CPUState, sr[14]) }, { "sr15", offsetof(CPUState, sr[15]) }, - /* Too lazy to put BATs and SPRs ... */ + /* Too lazy to put BATs... */ + { "pvr", offsetof(CPUState, spr[SPR_PVR]) }, + + { "srr0", offsetof(CPUState, spr[SPR_SRR0]) }, + { "srr1", offsetof(CPUState, spr[SPR_SRR1]) }, + { "sprg0", offsetof(CPUState, spr[SPR_SPRG0]) }, + { "sprg1", offsetof(CPUState, spr[SPR_SPRG1]) }, + { "sprg2", offsetof(CPUState, spr[SPR_SPRG2]) }, + { "sprg3", offsetof(CPUState, spr[SPR_SPRG3]) }, + { "sprg4", offsetof(CPUState, spr[SPR_SPRG4]) }, + { "sprg5", offsetof(CPUState, spr[SPR_SPRG5]) }, + { "sprg6", offsetof(CPUState, spr[SPR_SPRG6]) }, + { "sprg7", offsetof(CPUState, spr[SPR_SPRG7]) }, + { "pid", offsetof(CPUState, spr[SPR_BOOKE_PID]) }, + { "csrr0", offsetof(CPUState, spr[SPR_BOOKE_CSRR0]) }, + { "csrr1", offsetof(CPUState, spr[SPR_BOOKE_CSRR1]) }, + { "esr", offsetof(CPUState, spr[SPR_BOOKE_ESR]) }, + { "dear", offsetof(CPUState, spr[SPR_BOOKE_DEAR]) }, + { "mcsr", offsetof(CPUState, spr[SPR_BOOKE_MCSR]) }, + { "tsr", offsetof(CPUState, spr[SPR_BOOKE_TSR]) }, + { "tcr", offsetof(CPUState, spr[SPR_BOOKE_TCR]) }, + { "vrsave", offsetof(CPUState, spr[SPR_VRSAVE]) }, + { "pir", offsetof(CPUState, spr[SPR_BOOKE_PIR]) }, + { "mcsrr0", offsetof(CPUState, spr[SPR_BOOKE_MCSRR0]) }, + { "mcsrr1", offsetof(CPUState, spr[SPR_BOOKE_MCSRR1]) }, + { "decar", offsetof(CPUState, spr[SPR_BOOKE_DECAR]) }, + { "ivpr", offsetof(CPUState, spr[SPR_BOOKE_IVPR]) }, + { "epcr", offsetof(CPUState, spr[SPR_BOOKE_EPCR]) }, + { "sprg8", offsetof(CPUState, spr[SPR_BOOKE_SPRG8]) }, + { "ivor0", offsetof(CPUState, spr[SPR_BOOKE_IVOR0]) }, + { "ivor1", offsetof(CPUState, spr[SPR_BOOKE_IVOR1]) }, + { "ivor2", offsetof(CPUState, spr[SPR_BOOKE_IVOR2]) }, + { "ivor3", offsetof(CPUState, spr[SPR_BOOKE_IVOR3]) }, + { "ivor4", offsetof(CPUState, spr[SPR_BOOKE_IVOR4]) }, + { "ivor5", offsetof(CPUState, spr[SPR_BOOKE_IVOR5]) }, + { "ivor6", offsetof(CPUState, spr[SPR_BOOKE_IVOR6]) }, + { "ivor7", offsetof(CPUState, spr[SPR_BOOKE_IVOR7]) }, + { "ivor8", offsetof(CPUState, spr[SPR_BOOKE_IVOR8]) }, + { "ivor9", offsetof(CPUState, spr[SPR_BOOKE_IVOR9]) }, + { "ivor10", offsetof(CPUState, spr[SPR_BOOKE_IVOR10]) }, + { "ivor11", offsetof(CPUState, spr[SPR_BOOKE_IVOR11]) }, + { "ivor12", offsetof(CPUState, spr[SPR_BOOKE_IVOR12]) }, + { "ivor13", offsetof(CPUState, spr[SPR_BOOKE_IVOR13]) }, + { "ivor14", offsetof(CPUState, spr[SPR_BOOKE_IVOR14]) }, + { "ivor15", offsetof(CPUState, spr[SPR_BOOKE_IVOR15]) }, + { "ivor32", offsetof(CPUState, spr[SPR_BOOKE_IVOR32]) }, + { "ivor33", offsetof(CPUState, spr[SPR_BOOKE_IVOR33]) }, + { "ivor34", offsetof(CPUState, spr[SPR_BOOKE_IVOR34]) }, + { "ivor35", offsetof(CPUState, spr[SPR_BOOKE_IVOR35]) }, + { "ivor36", offsetof(CPUState, spr[SPR_BOOKE_IVOR36]) }, + { "ivor37", offsetof(CPUState, spr[SPR_BOOKE_IVOR37]) }, + { "mas0", offsetof(CPUState, spr[SPR_BOOKE_MAS0]) }, + { "mas1", offsetof(CPUState, spr[SPR_BOOKE_MAS1]) }, + { "mas2", offsetof(CPUState, spr[SPR_BOOKE_MAS2]) }, + { "mas3", offsetof(CPUState, spr[SPR_BOOKE_MAS3]) }, + { "mas4", offsetof(CPUState, spr[SPR_BOOKE_MAS4]) }, + { "mas6", offsetof(CPUState, spr[SPR_BOOKE_MAS6]) }, + { "mas7", offsetof(CPUState, spr[SPR_BOOKE_MAS7]) }, + { "mmucfg", offsetof(CPUState, spr[SPR_MMUCFG]) }, + { "tlb0cfg", offsetof(CPUState, spr[SPR_BOOKE_TLB0CFG]) }, + { "tlb1cfg", offsetof(CPUState, spr[SPR_BOOKE_TLB1CFG]) }, + { "epr", offsetof(CPUState, spr[SPR_BOOKE_EPR]) }, + { "eplc", offsetof(CPUState, spr[SPR_BOOKE_EPLC]) }, + { "epsc", offsetof(CPUState, spr[SPR_BOOKE_EPSC]) }, + { "svr", offsetof(CPUState, spr[SPR_E500_SVR]) }, + { "mcar", offsetof(CPUState, spr[SPR_Exxx_MCAR]) }, + { "pid1", offsetof(CPUState, spr[SPR_BOOKE_PID1]) }, + { "pid2", offsetof(CPUState, spr[SPR_BOOKE_PID2]) }, + { "hid0", offsetof(CPUState, spr[SPR_HID0]) }, + #elif defined(TARGET_SPARC) { "g0", offsetof(CPUState, gregs[0]) }, { "g1", offsetof(CPUState, gregs[1]) }, diff --git a/net/slirp.c b/net/slirp.c index e387a116ad..e057a14ce9 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -614,7 +614,7 @@ static int slirp_guestfwd(SlirpState *s, const char *config_str, } fwd = qemu_malloc(sizeof(struct GuestFwd)); - snprintf(buf, sizeof(buf), "guestfwd.tcp:%d", port); + snprintf(buf, sizeof(buf), "guestfwd.tcp.%d", port); fwd->hd = qemu_chr_open(buf, p, NULL); if (!fwd->hd) { error_report("could not open guest forwarding device '%s'", buf); @@ -27,7 +27,6 @@ #include "config-host.h" -#include <signal.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/wait.h> diff --git a/pc-bios/s390-zipl.rom b/pc-bios/s390-zipl.rom Binary files differindex f7af9b155d..3115128efe 100644 --- a/pc-bios/s390-zipl.rom +++ b/pc-bios/s390-zipl.rom @@ -39,13 +39,16 @@ #pragma GCC poison CPU_INTERRUPT_HARD #pragma GCC poison CPU_INTERRUPT_EXITTB -#pragma GCC poison CPU_INTERRUPT_TIMER -#pragma GCC poison CPU_INTERRUPT_FIQ #pragma GCC poison CPU_INTERRUPT_HALT -#pragma GCC poison CPU_INTERRUPT_SMI #pragma GCC poison CPU_INTERRUPT_DEBUG -#pragma GCC poison CPU_INTERRUPT_VIRQ -#pragma GCC poison CPU_INTERRUPT_NMI +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 #endif #endif diff --git a/posix-aio-compat.c b/posix-aio-compat.c index 6d4df9da30..c4116e30f2 100644 --- a/posix-aio-compat.c +++ b/posix-aio-compat.c @@ -17,7 +17,6 @@ #include <unistd.h> #include <errno.h> #include <time.h> -#include <signal.h> #include <string.h> #include <stdlib.h> #include <stdio.h> @@ -322,7 +321,9 @@ static void *aio_thread(void *unused) while (QTAILQ_EMPTY(&request_list) && !(ret == ETIMEDOUT)) { + idle_threads++; ret = cond_timedwait(&cond, &lock, &ts); + idle_threads--; } if (QTAILQ_EMPTY(&request_list)) @@ -331,7 +332,6 @@ static void *aio_thread(void *unused) aiocb = QTAILQ_FIRST(&request_list); QTAILQ_REMOVE(&request_list, aiocb, node); aiocb->active = 1; - idle_threads--; mutex_unlock(&lock); switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { @@ -353,13 +353,11 @@ static void *aio_thread(void *unused) mutex_lock(&lock); aiocb->ret = ret; - idle_threads++; mutex_unlock(&lock); if (kill(pid, aiocb->ev_signo)) die("kill failed"); } - idle_threads--; cur_threads--; mutex_unlock(&lock); @@ -371,7 +369,6 @@ static void spawn_thread(void) sigset_t set, oldset; cur_threads++; - idle_threads++; /* block all signals */ if (sigfillset(&set)) die("sigfillset"); diff --git a/qemu-char.c b/qemu-char.c index 5e04a20b8c..fb13b28454 100644 --- a/qemu-char.c +++ b/qemu-char.c @@ -35,7 +35,6 @@ #include <unistd.h> #include <fcntl.h> -#include <signal.h> #include <time.h> #include <errno.h> #include <sys/time.h> diff --git a/qemu-common.h b/qemu-common.h index f9f705da85..39fabc9e0f 100644 --- a/qemu-common.h +++ b/qemu-common.h @@ -39,6 +39,7 @@ typedef struct Monitor Monitor; #include <sys/stat.h> #include <sys/time.h> #include <assert.h> +#include <signal.h> #ifdef _WIN32 #include "qemu-os-win32.h" @@ -201,11 +202,6 @@ const char *path(const char *pathname); #define qemu_isascii(c) isascii((unsigned char)(c)) #define qemu_toascii(c) toascii((unsigned char)(c)) -#ifdef _WIN32 -/* ffs() in oslib-win32.c for WIN32, strings.h for the rest of the world */ -int ffs(int i); -#endif - void *qemu_oom_check(void *ptr); void *qemu_malloc(size_t size); void *qemu_realloc(void *ptr, size_t size); @@ -346,7 +342,7 @@ void qemu_iovec_memset_skip(QEMUIOVector *qiov, int c, size_t count, void qemu_progress_init(int enabled, float min_skip); void qemu_progress_end(void); -void qemu_progress_print(float percent, int max); +void qemu_progress_print(float delta, int max); #define QEMU_FILE_TYPE_BIOS 0 #define QEMU_FILE_TYPE_KEYMAP 1 diff --git a/qemu-config.c b/qemu-config.c index 14d34194d0..c63741c6b1 100644 --- a/qemu-config.c +++ b/qemu-config.c @@ -306,7 +306,7 @@ static QemuOptsList qemu_trace_opts = { .name = "file", .type = QEMU_OPT_STRING, }, - { /* end if list */ } + { /* end of list */ } }, }; #endif @@ -385,6 +385,12 @@ QemuOptsList qemu_spice_opts = { .name = "disable-ticketing", .type = QEMU_OPT_BOOL, },{ + .name = "disable-copy-paste", + .type = QEMU_OPT_BOOL, + },{ + .name = "sasl", + .type = QEMU_OPT_BOOL, + },{ .name = "x509-dir", .type = QEMU_OPT_STRING, },{ @@ -430,7 +436,7 @@ QemuOptsList qemu_spice_opts = { .name = "playback-compression", .type = QEMU_OPT_BOOL, }, - { /* end if list */ } + { /* end of list */ } }, }; @@ -446,7 +452,20 @@ QemuOptsList qemu_option_rom_opts = { .name = "romfile", .type = QEMU_OPT_STRING, }, - { /* end if list */ } + { /* end of list */ } + }, +}; + +static QemuOptsList qemu_machine_opts = { + .name = "machine", + .head = QTAILQ_HEAD_INITIALIZER(qemu_machine_opts.head), + .desc = { + { + .name = "accel", + .type = QEMU_OPT_STRING, + .help = "accelerator list", + }, + { /* End of list */ } }, }; @@ -464,6 +483,7 @@ static QemuOptsList *vm_config_groups[32] = { &qemu_trace_opts, #endif &qemu_option_rom_opts, + &qemu_machine_opts, NULL, }; diff --git a/qemu-doc.texi b/qemu-doc.texi index 86e017ccfa..47e1991712 100644 --- a/qemu-doc.texi +++ b/qemu-doc.texi @@ -278,7 +278,11 @@ targets do not need a disk image. @c man begin OPTIONS -During the graphical emulation, you can use the following keys: +During the graphical emulation, you can use special key combinations to change +modes. The default key mappings are shown below, but if you use @code{-alt-grab} +then the modifier is Ctrl-Alt-Shift (instead of Ctrl-Alt) and if you use +@code{-ctrl-grab} then the modifier is the right Ctrl key (instead of Ctrl-Alt): + @table @key @item Ctrl-Alt-f @kindex Ctrl-Alt-f diff --git a/qemu-img.c b/qemu-img.c index e8251234b1..4f162d1abe 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -496,14 +496,37 @@ static int img_commit(int argc, char **argv) return 0; } +/* + * Checks whether the sector is not a zero sector. + * + * Attention! The len must be a multiple of 4 * sizeof(long) due to + * restriction of optimizations in this function. + */ static int is_not_zero(const uint8_t *sector, int len) { + /* + * Use long as the biggest available internal data type that fits into the + * CPU register and unroll the loop to smooth out the effect of memory + * latency. + */ + int i; - len >>= 2; - for(i = 0;i < len; i++) { - if (((uint32_t *)sector)[i] != 0) + long d0, d1, d2, d3; + const long * const data = (const long *) sector; + + len /= sizeof(long); + + for(i = 0; i < len; i += 4) { + d0 = data[i + 0]; + d1 = data[i + 1]; + d2 = data[i + 2]; + d3 = data[i + 3]; + + if (d0 || d1 || d2 || d3) { return 1; + } } + return 0; } @@ -785,7 +808,7 @@ static int img_convert(int argc, char **argv) nb_sectors = total_sectors; local_progress = (float)100 / - (nb_sectors / MIN(nb_sectors, (cluster_sectors))); + (nb_sectors / MIN(nb_sectors, cluster_sectors)); for(;;) { int64_t bs_num; @@ -856,7 +879,7 @@ static int img_convert(int argc, char **argv) sector_num = 0; // total number of sectors converted so far nb_sectors = total_sectors - sector_num; local_progress = (float)100 / - (nb_sectors / MIN(nb_sectors, (IO_BUF_SIZE / 512))); + (nb_sectors / MIN(nb_sectors, IO_BUF_SIZE / 512)); for(;;) { nb_sectors = total_sectors - sector_num; @@ -1331,7 +1354,7 @@ static int img_rebase(int argc, char **argv) bdrv_get_geometry(bs, &num_sectors); local_progress = (float)100 / - (num_sectors / MIN(num_sectors, (IO_BUF_SIZE / 512))); + (num_sectors / MIN(num_sectors, IO_BUF_SIZE / 512)); for (sector = 0; sector < num_sectors; sector += n) { /* How many sectors can we handle with the next read? */ @@ -1655,7 +1655,7 @@ open_f(int argc, char **argv) flags |= BDRV_O_SNAPSHOT; break; case 'n': - flags |= BDRV_O_NOCACHE; + flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'r': readonly = 1; @@ -1751,7 +1751,7 @@ int main(int argc, char **argv) flags |= BDRV_O_SNAPSHOT; break; case 'n': - flags |= BDRV_O_NOCACHE; + flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'c': add_user_command(optarg); diff --git a/qemu-nbd.c b/qemu-nbd.c index e858033e06..110d78e6a4 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -238,7 +238,7 @@ int main(int argc, char **argv) flags |= BDRV_O_SNAPSHOT; break; case 'n': - flags |= BDRV_O_NOCACHE; + flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'b': bindto = optarg; diff --git a/qemu-options.hx b/qemu-options.hx index 489df10c46..f2ef9a1f08 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -662,7 +662,8 @@ DEF("alt-grab", 0, QEMU_OPTION_alt_grab, STEXI @item -alt-grab @findex -alt-grab -Use Ctrl-Alt-Shift to grab mouse (instead of Ctrl-Alt). +Use Ctrl-Alt-Shift to grab mouse (instead of Ctrl-Alt). Note that this also +affects the special keys (for fullscreen, monitor-mode switching, etc). ETEXI DEF("ctrl-grab", 0, QEMU_OPTION_ctrl_grab, @@ -671,7 +672,8 @@ DEF("ctrl-grab", 0, QEMU_OPTION_ctrl_grab, STEXI @item -ctrl-grab @findex -ctrl-grab -Use Right-Ctrl to grab mouse (instead of Ctrl-Alt). +Use Right-Ctrl to grab mouse (instead of Ctrl-Alt). Note that this also +affects the special keys (for fullscreen, monitor-mode switching, etc). ETEXI DEF("no-quit", 0, QEMU_OPTION_no_quit, @@ -712,9 +714,25 @@ Force using the specified IP version. @item password=<secret> Set the password you need to authenticate. +@item sasl +Require that the client use SASL to authenticate with the spice. +The exact choice of authentication method used is controlled from the +system / user's SASL configuration file for the 'qemu' service. This +is typically found in /etc/sasl2/qemu.conf. If running QEMU as an +unprivileged user, an environment variable SASL_CONF_PATH can be used +to make it search alternate locations for the service config. +While some SASL auth methods can also provide data encryption (eg GSSAPI), +it is recommended that SASL always be combined with the 'tls' and +'x509' settings to enable use of SSL and server certificates. This +ensures a data encryption preventing compromise of authentication +credentials. + @item disable-ticketing Allow client connects without authentication. +@item disable-copy-paste +Disable copy paste between the client and the guest. + @item tls-port=<nr> Set the TCP port spice is listening on for encrypted channels. @@ -1159,9 +1177,9 @@ Specify the guest-visible address of the host. Default is the 2nd IP in the guest network, i.e. x.x.x.2. @item restrict=y|yes|n|no -If this options is enabled, the guest will be isolated, i.e. it will not be +If this option is enabled, the guest will be isolated, i.e. it will not be able to contact the host and no guest IP packets will be routed over the host -to the outside. This option does not affect explicitly set forwarding rule. +to the outside. This option does not affect any explicitly set forwarding rules. @item hostname=@var{name} Specifies the client hostname reported by the builtin DHCP server. @@ -2030,6 +2048,16 @@ Enable KVM full virtualization support. This option is only available if KVM support is enabled when compiling. ETEXI +DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ + "-machine accel=accel1[:accel2] use an accelerator (kvm,xen,tcg), default is tcg\n", QEMU_ARCH_ALL) +STEXI +@item -machine accel=@var{accels} +@findex -machine +This is use to enable an accelerator, in kvm,xen,tcg. +By default, it use only tcg. If there a more than one accelerator +specified, the next one is used if the first don't work. +ETEXI + DEF("xen-domid", HAS_ARG, QEMU_OPTION_xen_domid, "-xen-domid id specify xen guest domain id\n", QEMU_ARCH_ALL) DEF("xen-create", 0, QEMU_OPTION_xen_create, diff --git a/qemu-os-win32.h b/qemu-os-win32.h index ed2753d1b7..8a069d7fb6 100644 --- a/qemu-os-win32.h +++ b/qemu-os-win32.h @@ -29,6 +29,9 @@ #include <windows.h> #include <winsock2.h> +/* Declaration of ffs() is missing in MinGW's strings.h. */ +int ffs(int i); + /* Polling handling */ /* return TRUE if no sleep should be done afterwards */ diff --git a/qemu-progress.c b/qemu-progress.c index a4894c0dfc..5f1b8dfb97 100644 --- a/qemu-progress.c +++ b/qemu-progress.c @@ -26,7 +26,6 @@ #include "osdep.h" #include "sysemu.h" #include <stdio.h> -#include <signal.h> struct progress_state { float current; @@ -96,6 +95,13 @@ static void progress_dummy_init(void) state.end = progress_dummy_end; } +/* + * Initialize progress reporting. + * If @enabled is false, actual reporting is suppressed. The user can + * still trigger a report by sending a SIGUSR1. + * Reports are also suppressed unless we've had at least @min_skip + * percent progress since the last report. + */ void qemu_progress_init(int enabled, float min_skip) { state.min_skip = min_skip; @@ -111,14 +117,25 @@ void qemu_progress_end(void) state.end(); } -void qemu_progress_print(float percent, int max) +/* + * Report progress. + * @delta is how much progress we made. + * If @max is zero, @delta is an absolut value of the total job done. + * Else, @delta is a progress delta since the last call, as a fraction + * of @max. I.e. the delta is @delta * @max / 100. This allows + * relative accounting of functions which may be a different fraction of + * the full job, depending on the context they are called in. I.e. + * a function might be considered 40% of the full job if used from + * bdrv_img_create() but only 20% if called from img_convert(). + */ +void qemu_progress_print(float delta, int max) { float current; if (max == 0) { - current = percent; + current = delta; } else { - current = state.current + percent / 100 * max; + current = state.current + delta / 100 * max; } if (current > 100) { current = 100; diff --git a/qemu-timer.c b/qemu-timer.c index 4141b6edbe..72066c7c50 100644 --- a/qemu-timer.c +++ b/qemu-timer.c @@ -39,15 +39,6 @@ #include <sys/param.h> #endif -#ifdef __linux__ -#include <sys/ioctl.h> -#include <linux/rtc.h> -/* For the benefit of older linux systems which don't supply it, - we use a local copy of hpet.h. */ -/* #include <linux/hpet.h> */ -#include "hpet.h" -#endif - #ifdef _WIN32 #include <windows.h> #include <mmsystem.h> @@ -234,12 +225,6 @@ static int dynticks_start_timer(struct qemu_alarm_timer *t); static void dynticks_stop_timer(struct qemu_alarm_timer *t); static void dynticks_rearm_timer(struct qemu_alarm_timer *t); -static int hpet_start_timer(struct qemu_alarm_timer *t); -static void hpet_stop_timer(struct qemu_alarm_timer *t); - -static int rtc_start_timer(struct qemu_alarm_timer *t); -static void rtc_stop_timer(struct qemu_alarm_timer *t); - #endif /* __linux__ */ #endif /* _WIN32 */ @@ -304,10 +289,6 @@ static struct qemu_alarm_timer alarm_timers[] = { #ifdef __linux__ {"dynticks", dynticks_start_timer, dynticks_stop_timer, dynticks_rearm_timer}, - /* HPET - if available - is preferred */ - {"hpet", hpet_start_timer, hpet_stop_timer, NULL}, - /* ...otherwise try RTC */ - {"rtc", rtc_start_timer, rtc_stop_timer, NULL}, #endif {"unix", unix_start_timer, unix_stop_timer, NULL}, #else @@ -822,107 +803,6 @@ static int64_t qemu_next_alarm_deadline(void) #if defined(__linux__) -#define RTC_FREQ 1024 - -static void enable_sigio_timer(int fd) -{ - struct sigaction act; - - /* timer signal */ - sigfillset(&act.sa_mask); - act.sa_flags = 0; - act.sa_handler = host_alarm_handler; - - sigaction(SIGIO, &act, NULL); - fcntl_setfl(fd, O_ASYNC); - fcntl(fd, F_SETOWN, getpid()); -} - -static int hpet_start_timer(struct qemu_alarm_timer *t) -{ - struct hpet_info info; - int r, fd; - - fd = qemu_open("/dev/hpet", O_RDONLY); - if (fd < 0) - return -1; - - /* Set frequency */ - r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ); - if (r < 0) { - fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n" - "error, but for better emulation accuracy type:\n" - "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n"); - goto fail; - } - - /* Check capabilities */ - r = ioctl(fd, HPET_INFO, &info); - if (r < 0) - goto fail; - - /* Enable periodic mode */ - r = ioctl(fd, HPET_EPI, 0); - if (info.hi_flags && (r < 0)) - goto fail; - - /* Enable interrupt */ - r = ioctl(fd, HPET_IE_ON, 0); - if (r < 0) - goto fail; - - enable_sigio_timer(fd); - t->fd = fd; - - return 0; -fail: - close(fd); - return -1; -} - -static void hpet_stop_timer(struct qemu_alarm_timer *t) -{ - int fd = t->fd; - - close(fd); -} - -static int rtc_start_timer(struct qemu_alarm_timer *t) -{ - int rtc_fd; - unsigned long current_rtc_freq = 0; - - TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY)); - if (rtc_fd < 0) - return -1; - ioctl(rtc_fd, RTC_IRQP_READ, ¤t_rtc_freq); - if (current_rtc_freq != RTC_FREQ && - ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) { - fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n" - "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n" - "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n"); - goto fail; - } - if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) { - fail: - close(rtc_fd); - return -1; - } - - enable_sigio_timer(rtc_fd); - - t->fd = rtc_fd; - - return 0; -} - -static void rtc_stop_timer(struct qemu_alarm_timer *t) -{ - int rtc_fd = t->fd; - - close(rtc_fd); -} - static int dynticks_start_timer(struct qemu_alarm_timer *t) { struct sigevent ev; diff --git a/qemu-tool.c b/qemu-tool.c index f4a6ad081c..41e5c4156a 100644 --- a/qemu-tool.c +++ b/qemu-tool.c @@ -19,6 +19,7 @@ #include <sys/time.h> QEMUClock *rt_clock; +QEMUClock *vm_clock; FILE *logfile; @@ -71,3 +72,27 @@ int qemu_set_fd_handler2(int fd, void qemu_notify_event(void) { } + +QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, + QEMUTimerCB *cb, void *opaque) +{ + return qemu_malloc(1); +} + +void qemu_free_timer(QEMUTimer *ts) +{ + qemu_free(ts); +} + +void qemu_del_timer(QEMUTimer *ts) +{ +} + +void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time) +{ +} + +int64_t qemu_get_clock_ns(QEMUClock *clock) +{ + return 0; +} @@ -201,6 +201,10 @@ static const QErrorStringTable qerror_table[] = { .desc = "An undefined error has ocurred", }, { + .error_fmt = QERR_UNSUPPORTED, + .desc = "this feature or command is not currently supported", + }, + { .error_fmt = QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, .desc = "'%(device)' uses a %(format) feature which is not " "supported by this qemu version: %(feature)", @@ -326,12 +330,14 @@ QError *qerror_from_info(const char *file, int linenr, const char *func, return qerr; } -static void parse_error(const QError *qerror, int c) +static void parse_error(const QErrorStringTable *entry, int c) { - qerror_abort(qerror, "expected '%c' in '%s'", c, qerror->entry->desc); + fprintf(stderr, "expected '%c' in '%s'", c, entry->desc); + abort(); } -static const char *append_field(QString *outstr, const QError *qerror, +static const char *append_field(QDict *error, QString *outstr, + const QErrorStringTable *entry, const char *start) { QObject *obj; @@ -340,23 +346,23 @@ static const char *append_field(QString *outstr, const QError *qerror, const char *end, *key; if (*start != '%') - parse_error(qerror, '%'); + parse_error(entry, '%'); start++; if (*start != '(') - parse_error(qerror, '('); + parse_error(entry, '('); start++; end = strchr(start, ')'); if (!end) - parse_error(qerror, ')'); + parse_error(entry, ')'); key_qs = qstring_from_substr(start, 0, end - start - 1); key = qstring_get_str(key_qs); - qdict = qobject_to_qdict(qdict_get(qerror->error, "data")); + qdict = qobject_to_qdict(qdict_get(error, "data")); obj = qdict_get(qdict, key); if (!obj) { - qerror_abort(qerror, "key '%s' not found in QDict", key); + abort(); } switch (qobject_type(obj)) { @@ -367,41 +373,60 @@ static const char *append_field(QString *outstr, const QError *qerror, qstring_append_int(outstr, qdict_get_int(qdict, key)); break; default: - qerror_abort(qerror, "invalid type '%c'", qobject_type(obj)); + abort(); } QDECREF(key_qs); return ++end; } -/** - * qerror_human(): Format QError data into human-readable string. - * - * Formats according to member 'desc' of the specified QError object. - */ -QString *qerror_human(const QError *qerror) +static QString *qerror_format_desc(QDict *error, + const QErrorStringTable *entry) { - const char *p; QString *qstring; + const char *p; - assert(qerror->entry != NULL); + assert(entry != NULL); qstring = qstring_new(); - for (p = qerror->entry->desc; *p != '\0';) { + for (p = entry->desc; *p != '\0';) { if (*p != '%') { qstring_append_chr(qstring, *p++); } else if (*(p + 1) == '%') { qstring_append_chr(qstring, '%'); p += 2; } else { - p = append_field(qstring, qerror, p); + p = append_field(error, qstring, entry, p); } } return qstring; } +QString *qerror_format(const char *fmt, QDict *error) +{ + const QErrorStringTable *entry = NULL; + int i; + + for (i = 0; qerror_table[i].error_fmt; i++) { + if (strcmp(qerror_table[i].error_fmt, fmt) == 0) { + entry = &qerror_table[i]; + break; + } + } + + return qerror_format_desc(error, entry); +} + +/** + * qerror_human(): Format QError data into human-readable string. + */ +QString *qerror_human(const QError *qerror) +{ + return qerror_format_desc(qerror->error, qerror->entry); +} + /** * qerror_print(): Print QError data * @@ -39,6 +39,7 @@ QString *qerror_human(const QError *qerror); void qerror_print(QError *qerror); void qerror_report_internal(const char *file, int linenr, const char *func, const char *fmt, ...) GCC_FMT_ATTR(4, 5); +QString *qerror_format(const char *fmt, QDict *error); #define qerror_report(fmt, ...) \ qerror_report_internal(__FILE__, __LINE__, __func__, fmt, ## __VA_ARGS__) QError *qobject_to_qerror(const QObject *obj); @@ -120,6 +121,9 @@ QError *qobject_to_qerror(const QObject *obj); #define QERR_JSON_PARSING \ "{ 'class': 'JSONParsing', 'data': {} }" +#define QERR_JSON_PARSE_ERROR \ + "{ 'class': 'JSONParseError', 'data': { 'message': %s } }" + #define QERR_KVM_MISSING_CAP \ "{ 'class': 'KVMMissingCap', 'data': { 'capability': %s, 'feature': %s } }" @@ -165,6 +169,9 @@ QError *qobject_to_qerror(const QObject *obj); #define QERR_UNDEFINED_ERROR \ "{ 'class': 'UndefinedError', 'data': {} }" +#define QERR_UNSUPPORTED \ + "{ 'class': 'Unsupported', 'data': {} }" + #define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \ "{ 'class': 'UnknownBlockFormatFeature', 'data': { 'device': %s, 'format': %s, 'feature': %s } }" diff --git a/qmp-commands.hx b/qmp-commands.hx index fbd98ee295..92c5c3a318 100644 --- a/qmp-commands.hx +++ b/qmp-commands.hx @@ -430,6 +430,33 @@ Example: EQMP { + .name = "inject-nmi", + .args_type = "", + .params = "", + .help = "", + .user_print = monitor_user_noop, + .mhandler.cmd_new = do_inject_nmi, + }, + +SQMP +inject-nmi +---------- + +Inject an NMI on guest's CPUs. + +Arguments: None. + +Example: + +-> { "execute": "inject-nmi" } +<- { "return": {} } + +Note: inject-nmi is only supported for x86 guest currently, it will + returns "Unsupported" error for non-x86 guest. + +EQMP + + { .name = "migrate", .args_type = "detach:-d,blk:-b,inc:-i,uri:s", .params = "[-d] [-b] [-i] uri", @@ -1039,7 +1066,8 @@ Each json-object contain the following: - "device": device name (json-string) - "type": device type (json-string) - - Possible values: "hd", "cdrom", "floppy", "unknown" + - deprecated, retained for backward compatibility + - Possible values: "unknown" - "removable": true if the device is removable, false otherwise (json-bool) - "locked": true if the device is locked, false otherwise (json-bool) - "inserted": only present if the device is inserted, it is a json-object @@ -1070,25 +1098,25 @@ Example: "encrypted":false, "file":"disks/test.img" }, - "type":"hd" + "type":"unknown" }, { "device":"ide1-cd0", "locked":false, "removable":true, - "type":"cdrom" + "type":"unknown" }, { "device":"floppy0", "locked":false, "removable":true, - "type": "floppy" + "type":"unknown" }, { "device":"sd0", "locked":false, "removable":true, - "type":"floppy" + "type":"unknown" } ] } @@ -23,7 +23,6 @@ */ #include <unistd.h> #include <fcntl.h> -#include <signal.h> #include <time.h> #include <errno.h> #include <sys/time.h> diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh index c50beb7337..83a44d8e2a 100644 --- a/scripts/qemu-binfmt-conf.sh +++ b/scripts/qemu-binfmt-conf.sh @@ -1,5 +1,5 @@ #!/bin/sh -# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC program execution by the kernel +# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel # load the binfmt_misc module if [ ! -d /proc/sys/fs/binfmt_misc ]; then @@ -63,4 +63,6 @@ fi if [ $cpu != "sh" ] ; then echo ':sh4:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-sh4:' > /proc/sys/fs/binfmt_misc/register echo ':sh4eb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sh4eb:' > /proc/sys/fs/binfmt_misc/register +if [ $cpu != "s390x" ] ; then + echo ':s390x:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-s390x:' > /proc/sys/fs/binfmt_misc/register fi diff --git a/spice-qemu-char.c b/spice-qemu-char.c index fa15a71e14..605c241239 100644 --- a/spice-qemu-char.c +++ b/spice-qemu-char.c @@ -36,14 +36,13 @@ static int vmc_write(SpiceCharDeviceInstance *sin, const uint8_t *buf, int len) while (len > 0) { last_out = MIN(len, VMC_MAX_HOST_WRITE); - qemu_chr_read(scd->chr, p, last_out); - if (last_out > 0) { - out += last_out; - len -= last_out; - p += last_out; - } else { + if (qemu_chr_can_read(scd->chr) < last_out) { break; } + qemu_chr_read(scd->chr, p, last_out); + out += last_out; + len -= last_out; + p += last_out; } dprintf(scd, 3, "%s: %lu/%zd\n", __func__, out, len + out); @@ -42,6 +42,8 @@ void qemu_system_shutdown_request(void); void qemu_system_powerdown_request(void); void qemu_system_debug_request(void); void qemu_system_vmstop_request(int reason); +int qemu_shutdown_requested_get(void); +int qemu_reset_requested_get(void); int qemu_shutdown_requested(void); int qemu_reset_requested(void); int qemu_powerdown_requested(void); diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h index 686fb4a6a7..e98b32513c 100644 --- a/target-alpha/cpu.h +++ b/target-alpha/cpu.h @@ -192,171 +192,39 @@ enum { #define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK) -/* Internal processor registers */ -/* XXX: TOFIX: most of those registers are implementation dependant */ -enum { -#if defined(CONFIG_USER_ONLY) - IPR_EXC_ADDR, - IPR_EXC_SUM, - IPR_EXC_MASK, -#else - /* Ebox IPRs */ - IPR_CC = 0xC0, /* 21264 */ - IPR_CC_CTL = 0xC1, /* 21264 */ -#define IPR_CC_CTL_ENA_SHIFT 32 -#define IPR_CC_CTL_COUNTER_MASK 0xfffffff0UL - IPR_VA = 0xC2, /* 21264 */ - IPR_VA_CTL = 0xC4, /* 21264 */ -#define IPR_VA_CTL_VA_48_SHIFT 1 -#define IPR_VA_CTL_VPTB_SHIFT 30 - IPR_VA_FORM = 0xC3, /* 21264 */ - /* Ibox IPRs */ - IPR_ITB_TAG = 0x00, /* 21264 */ - IPR_ITB_PTE = 0x01, /* 21264 */ - IPR_ITB_IAP = 0x02, - IPR_ITB_IA = 0x03, /* 21264 */ - IPR_ITB_IS = 0x04, /* 21264 */ - IPR_PMPC = 0x05, - IPR_EXC_ADDR = 0x06, /* 21264 */ - IPR_IVA_FORM = 0x07, /* 21264 */ - IPR_CM = 0x09, /* 21264 */ -#define IPR_CM_SHIFT 3 -#define IPR_CM_MASK (3ULL << IPR_CM_SHIFT) /* 21264 */ - IPR_IER = 0x0A, /* 21264 */ -#define IPR_IER_MASK 0x0000007fffffe000ULL - IPR_IER_CM = 0x0B, /* 21264: = CM | IER */ - IPR_SIRR = 0x0C, /* 21264 */ -#define IPR_SIRR_SHIFT 14 -#define IPR_SIRR_MASK 0x7fff - IPR_ISUM = 0x0D, /* 21264 */ - IPR_HW_INT_CLR = 0x0E, /* 21264 */ - IPR_EXC_SUM = 0x0F, - IPR_PAL_BASE = 0x10, - IPR_I_CTL = 0x11, -#define IPR_I_CTL_CHIP_ID_SHIFT 24 /* 21264 */ -#define IPR_I_CTL_BIST_FAIL (1 << 23) /* 21264 */ -#define IPR_I_CTL_IC_EN_SHIFT 2 /* 21264 */ -#define IPR_I_CTL_SDE1_SHIFT 7 /* 21264 */ -#define IPR_I_CTL_HWE_SHIFT 12 /* 21264 */ -#define IPR_I_CTL_VA_48_SHIFT 15 /* 21264 */ -#define IPR_I_CTL_SPE_SHIFT 3 /* 21264 */ -#define IPR_I_CTL_CALL_PAL_R23_SHIFT 20 /* 21264 */ - IPR_I_STAT = 0x16, /* 21264 */ - IPR_IC_FLUSH = 0x13, /* 21264 */ - IPR_IC_FLUSH_ASM = 0x12, /* 21264 */ - IPR_CLR_MAP = 0x15, - IPR_SLEEP = 0x17, - IPR_PCTX = 0x40, - IPR_PCTX_ASN = 0x01, /* field */ -#define IPR_PCTX_ASN_SHIFT 39 - IPR_PCTX_ASTER = 0x02, /* field */ -#define IPR_PCTX_ASTER_SHIFT 5 - IPR_PCTX_ASTRR = 0x04, /* field */ -#define IPR_PCTX_ASTRR_SHIFT 9 - IPR_PCTX_PPCE = 0x08, /* field */ -#define IPR_PCTX_PPCE_SHIFT 1 - IPR_PCTX_FPE = 0x10, /* field */ -#define IPR_PCTX_FPE_SHIFT 2 - IPR_PCTX_ALL = 0x5f, /* all fields */ - IPR_PCTR_CTL = 0x14, /* 21264 */ - /* Mbox IPRs */ - IPR_DTB_TAG0 = 0x20, /* 21264 */ - IPR_DTB_TAG1 = 0xA0, /* 21264 */ - IPR_DTB_PTE0 = 0x21, /* 21264 */ - IPR_DTB_PTE1 = 0xA1, /* 21264 */ - IPR_DTB_ALTMODE = 0xA6, - IPR_DTB_ALTMODE0 = 0x26, /* 21264 */ -#define IPR_DTB_ALTMODE_MASK 3 - IPR_DTB_IAP = 0xA2, - IPR_DTB_IA = 0xA3, /* 21264 */ - IPR_DTB_IS0 = 0x24, - IPR_DTB_IS1 = 0xA4, - IPR_DTB_ASN0 = 0x25, /* 21264 */ - IPR_DTB_ASN1 = 0xA5, /* 21264 */ -#define IPR_DTB_ASN_SHIFT 56 - IPR_MM_STAT = 0x27, /* 21264 */ - IPR_M_CTL = 0x28, /* 21264 */ -#define IPR_M_CTL_SPE_SHIFT 1 -#define IPR_M_CTL_SPE_MASK 7 - IPR_DC_CTL = 0x29, /* 21264 */ - IPR_DC_STAT = 0x2A, /* 21264 */ - /* Cbox IPRs */ - IPR_C_DATA = 0x2B, - IPR_C_SHIFT = 0x2C, - - IPR_ASN, - IPR_ASTEN, - IPR_ASTSR, - IPR_DATFX, - IPR_ESP, - IPR_FEN, - IPR_IPIR, - IPR_IPL, - IPR_KSP, - IPR_MCES, - IPR_PERFMON, - IPR_PCBB, - IPR_PRBR, - IPR_PTBR, - IPR_SCBB, - IPR_SISR, - IPR_SSP, - IPR_SYSPTBR, - IPR_TBCHK, - IPR_TBIA, - IPR_TBIAP, - IPR_TBIS, - IPR_TBISD, - IPR_TBISI, - IPR_USP, - IPR_VIRBND, - IPR_VPTB, - IPR_WHAMI, - IPR_ALT_MODE, -#endif - IPR_LAST, -}; +/* MMU modes definitions */ -typedef struct CPUAlphaState CPUAlphaState; +/* Alpha has 5 MMU modes: PALcode, kernel, executive, supervisor, and user. + The Unix PALcode only exposes the kernel and user modes; presumably + executive and supervisor are used by VMS. -typedef struct pal_handler_t pal_handler_t; -struct pal_handler_t { - /* Reset */ - void (*reset)(CPUAlphaState *env); - /* Uncorrectable hardware error */ - void (*machine_check)(CPUAlphaState *env); - /* Arithmetic exception */ - void (*arithmetic)(CPUAlphaState *env); - /* Interrupt / correctable hardware error */ - void (*interrupt)(CPUAlphaState *env); - /* Data fault */ - void (*dfault)(CPUAlphaState *env); - /* DTB miss pal */ - void (*dtb_miss_pal)(CPUAlphaState *env); - /* DTB miss native */ - void (*dtb_miss_native)(CPUAlphaState *env); - /* Unaligned access */ - void (*unalign)(CPUAlphaState *env); - /* ITB miss */ - void (*itb_miss)(CPUAlphaState *env); - /* Instruction stream access violation */ - void (*itb_acv)(CPUAlphaState *env); - /* Reserved or privileged opcode */ - void (*opcdec)(CPUAlphaState *env); - /* Floating point exception */ - void (*fen)(CPUAlphaState *env); - /* Call pal instruction */ - void (*call_pal)(CPUAlphaState *env, uint32_t palcode); -}; + PALcode itself uses physical mode for code and kernel mode for data; + there are PALmode instructions that can access data via physical mode + or via an os-installed "alternate mode", which is one of the 4 above. -#define NB_MMU_MODES 4 + QEMU does not currently properly distinguish between code/data when + looking up addresses. To avoid having to address this issue, our + emulated PALcode will cheat and use the KSEG mapping for its code+data + rather than physical addresses. + + Moreover, we're only emulating Unix PALcode, and not attempting VMS. + + All of which allows us to drop all but kernel and user modes. + Elide the unused MMU modes to save space. */ + +#define NB_MMU_MODES 2 + +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_KERNEL_IDX 0 +#define MMU_USER_IDX 1 + +typedef struct CPUAlphaState CPUAlphaState; struct CPUAlphaState { uint64_t ir[31]; float64 fir[31]; uint64_t pc; - uint64_t ipr[IPR_LAST]; - uint64_t ps; uint64_t unique; uint64_t lock_addr; uint64_t lock_st_addr; @@ -371,10 +239,33 @@ struct CPUAlphaState { uint8_t fpcr_dnod; uint8_t fpcr_undz; - /* Used for HW_LD / HW_ST */ - uint8_t saved_mode; - /* For RC and RS */ + /* The Internal Processor Registers. Some of these we assume always + exist for use in user-mode. */ + uint8_t ps; uint8_t intr_flag; + uint8_t pal_mode; + uint8_t fen; + + uint32_t pcc_ofs; + + /* These pass data from the exception logic in the translator and + helpers to the OS entry point. This is used for both system + emulation and user-mode. */ + uint64_t trap_arg0; + uint64_t trap_arg1; + uint64_t trap_arg2; + +#if !defined(CONFIG_USER_ONLY) + /* The internal data required by our emulation of the Unix PALcode. */ + uint64_t exc_addr; + uint64_t palbr; + uint64_t ptbr; + uint64_t vptptr; + uint64_t sysval; + uint64_t usp; + uint64_t shadow[8]; + uint64_t scratch[24]; +#endif #if TARGET_LONG_BITS > HOST_LONG_BITS /* temporary fixed-point registers @@ -386,14 +277,11 @@ struct CPUAlphaState { /* Those resources are used only in Qemu core */ CPU_COMMON - uint32_t hflags; - int error_code; uint32_t features; uint32_t amask; int implver; - pal_handler_t *pal_handler; }; #define cpu_init cpu_alpha_init @@ -401,17 +289,6 @@ struct CPUAlphaState { #define cpu_gen_code cpu_alpha_gen_code #define cpu_signal_handler cpu_alpha_signal_handler -/* MMU modes definitions */ -#define MMU_MODE0_SUFFIX _kernel -#define MMU_MODE1_SUFFIX _executive -#define MMU_MODE2_SUFFIX _supervisor -#define MMU_MODE3_SUFFIX _user -#define MMU_USER_IDX 3 -static inline int cpu_mmu_index (CPUState *env) -{ - return (env->ps >> 3) & 3; -} - #include "cpu-all.h" enum { @@ -422,36 +299,89 @@ enum { }; enum { - EXCP_RESET = 0x0000, - EXCP_MCHK = 0x0020, - EXCP_ARITH = 0x0060, - EXCP_HW_INTERRUPT = 0x00E0, - EXCP_DFAULT = 0x01E0, - EXCP_DTB_MISS_PAL = 0x09E0, - EXCP_ITB_MISS = 0x03E0, - EXCP_ITB_ACV = 0x07E0, - EXCP_DTB_MISS_NATIVE = 0x08E0, - EXCP_UNALIGN = 0x11E0, - EXCP_OPCDEC = 0x13E0, - EXCP_FEN = 0x17E0, - EXCP_CALL_PAL = 0x2000, - EXCP_CALL_PALP = 0x3000, - EXCP_CALL_PALE = 0x4000, - /* Pseudo exception for console */ - EXCP_CONSOLE_DISPATCH = 0x4001, - EXCP_CONSOLE_FIXUP = 0x4002, - EXCP_STL_C = 0x4003, - EXCP_STQ_C = 0x4004, + EXCP_RESET, + EXCP_MCHK, + EXCP_SMP_INTERRUPT, + EXCP_CLK_INTERRUPT, + EXCP_DEV_INTERRUPT, + EXCP_MMFAULT, + EXCP_UNALIGN, + EXCP_OPCDEC, + EXCP_ARITH, + EXCP_FEN, + EXCP_CALL_PAL, + /* For Usermode emulation. */ + EXCP_STL_C, + EXCP_STQ_C, }; -/* Arithmetic exception */ -#define EXC_M_IOV (1<<16) /* Integer Overflow */ -#define EXC_M_INE (1<<15) /* Inexact result */ -#define EXC_M_UNF (1<<14) /* Underflow */ -#define EXC_M_FOV (1<<13) /* Overflow */ -#define EXC_M_DZE (1<<12) /* Division by zero */ -#define EXC_M_INV (1<<11) /* Invalid operation */ -#define EXC_M_SWC (1<<10) /* Software completion */ +/* Alpha-specific interrupt pending bits. */ +#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0 +#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2 + +/* OSF/1 Page table bits. */ +enum { + PTE_VALID = 0x0001, + PTE_FOR = 0x0002, /* used for page protection (fault on read) */ + PTE_FOW = 0x0004, /* used for page protection (fault on write) */ + PTE_FOE = 0x0008, /* used for page protection (fault on exec) */ + PTE_ASM = 0x0010, + PTE_KRE = 0x0100, + PTE_URE = 0x0200, + PTE_KWE = 0x1000, + PTE_UWE = 0x2000 +}; + +/* Hardware interrupt (entInt) constants. */ +enum { + INT_K_IP, + INT_K_CLK, + INT_K_MCHK, + INT_K_DEV, + INT_K_PERF, +}; + +/* Memory management (entMM) constants. */ +enum { + MM_K_TNV, + MM_K_ACV, + MM_K_FOR, + MM_K_FOE, + MM_K_FOW +}; + +/* Arithmetic exception (entArith) constants. */ +enum { + EXC_M_SWC = 1, /* Software completion */ + EXC_M_INV = 2, /* Invalid operation */ + EXC_M_DZE = 4, /* Division by zero */ + EXC_M_FOV = 8, /* Overflow */ + EXC_M_UNF = 16, /* Underflow */ + EXC_M_INE = 32, /* Inexact result */ + EXC_M_IOV = 64 /* Integer Overflow */ +}; + +/* Processor status constants. */ +enum { + /* Low 3 bits are interrupt mask level. */ + PS_INT_MASK = 7, + + /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; + The Unix PALcode only uses bit 4. */ + PS_USER_MODE = 8 +}; + +static inline int cpu_mmu_index(CPUState *env) +{ + if (env->pal_mode) { + return MMU_KERNEL_IDX; + } else if (env->ps & PS_USER_MODE) { + return MMU_USER_IDX; + } else { + return MMU_KERNEL_IDX; + } +} enum { IR_V0 = 0, @@ -504,19 +434,46 @@ void do_interrupt (CPUState *env); uint64_t cpu_alpha_load_fpcr (CPUState *env); void cpu_alpha_store_fpcr (CPUState *env, uint64_t val); -int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp); -int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp); -#if !defined (CONFIG_USER_ONLY) -void pal_init (CPUState *env); -void call_pal (CPUState *env); +#ifndef CONFIG_USER_ONLY +void swap_shadow_regs(CPUState *env); +extern QEMU_NORETURN void do_unassigned_access(target_phys_addr_t addr, + int, int, int, int); #endif +/* Bits in TB->FLAGS that control how translation is processed. */ +enum { + TB_FLAGS_PAL_MODE = 1, + TB_FLAGS_FEN = 2, + TB_FLAGS_USER_MODE = 8, + + TB_FLAGS_AMASK_SHIFT = 4, + TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT, +}; + static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, - target_ulong *cs_base, int *flags) + target_ulong *cs_base, int *pflags) { + int flags = 0; + *pc = env->pc; *cs_base = 0; - *flags = env->ps; + + if (env->pal_mode) { + flags = TB_FLAGS_PAL_MODE; + } else { + flags = env->ps & PS_USER_MODE; + } + if (env->fen) { + flags |= TB_FLAGS_FEN; + } + flags |= env->amask << TB_FLAGS_AMASK_SHIFT; + + *pflags = flags; } #if defined(CONFIG_USER_ONLY) diff --git a/target-alpha/exec.h b/target-alpha/exec.h index 6ae96d148b..7a325e7a75 100644 --- a/target-alpha/exec.h +++ b/target-alpha/exec.h @@ -39,7 +39,17 @@ register struct CPUAlphaState *env asm(AREG0); static inline int cpu_has_work(CPUState *env) { - return (env->interrupt_request & CPU_INTERRUPT_HARD); + /* Here we are checking to see if the CPU should wake up from HALT. + We will have gotten into this state only for WTINT from PALmode. */ + /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU + asleep even if (some) interrupts have been asserted. For now, + assume that if a CPU really wants to stay asleep, it will mask + interrupts at the chipset level, which will prevent these bits + from being set in the first place. */ + return env->interrupt_request & (CPU_INTERRUPT_HARD + | CPU_INTERRUPT_TIMER + | CPU_INTERRUPT_SMP + | CPU_INTERRUPT_MCHK); } static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) diff --git a/target-alpha/helper.c b/target-alpha/helper.c index 3ba4478c8e..32c2cf9db3 100644 --- a/target-alpha/helper.c +++ b/target-alpha/helper.c @@ -160,382 +160,299 @@ void cpu_alpha_store_fpcr (CPUState *env, uint64_t val) } #if defined(CONFIG_USER_ONLY) - int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int mmu_idx, int is_softmmu) { - if (rw == 2) - env->exception_index = EXCP_ITB_MISS; - else - env->exception_index = EXCP_DFAULT; - env->ipr[IPR_EXC_ADDR] = address; - + env->exception_index = EXCP_MMFAULT; + env->trap_arg0 = address; return 1; } - -void do_interrupt (CPUState *env) +#else +void swap_shadow_regs(CPUState *env) { - env->exception_index = -1; + uint64_t i0, i1, i2, i3, i4, i5, i6, i7; + + i0 = env->ir[8]; + i1 = env->ir[9]; + i2 = env->ir[10]; + i3 = env->ir[11]; + i4 = env->ir[12]; + i5 = env->ir[13]; + i6 = env->ir[14]; + i7 = env->ir[25]; + + env->ir[8] = env->shadow[0]; + env->ir[9] = env->shadow[1]; + env->ir[10] = env->shadow[2]; + env->ir[11] = env->shadow[3]; + env->ir[12] = env->shadow[4]; + env->ir[13] = env->shadow[5]; + env->ir[14] = env->shadow[6]; + env->ir[25] = env->shadow[7]; + + env->shadow[0] = i0; + env->shadow[1] = i1; + env->shadow[2] = i2; + env->shadow[3] = i3; + env->shadow[4] = i4; + env->shadow[5] = i5; + env->shadow[6] = i6; + env->shadow[7] = i7; } -#else +/* Returns the OSF/1 entMM failure indication, or -1 on success. */ +static int get_physical_address(CPUState *env, target_ulong addr, + int prot_need, int mmu_idx, + target_ulong *pphys, int *pprot) +{ + target_long saddr = addr; + target_ulong phys = 0; + target_ulong L1pte, L2pte, L3pte; + target_ulong pt, index; + int prot = 0; + int ret = MM_K_ACV; + + /* Ensure that the virtual address is properly sign-extended from + the last implemented virtual address bit. */ + if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) { + goto exit; + } + + /* Translate the superpage. */ + /* ??? When we do more than emulate Unix PALcode, we'll need to + determine which KSEG is actually active. */ + if (saddr < 0 && ((saddr >> 41) & 3) == 2) { + /* User-space cannot access KSEG addresses. */ + if (mmu_idx != MMU_KERNEL_IDX) { + goto exit; + } + + /* For the benefit of the Typhoon chipset, move bit 40 to bit 43. + We would not do this if the 48-bit KSEG is enabled. */ + phys = saddr & ((1ull << 40) - 1); + phys |= (saddr & (1ull << 40)) << 3; + + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = -1; + goto exit; + } + + /* Interpret the page table exactly like PALcode does. */ -target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr) + pt = env->ptbr; + + /* L1 page table read. */ + index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff; + L1pte = ldq_phys(pt + index*8); + + if (unlikely((L1pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + if (unlikely((L1pte & PTE_KRE) == 0)) { + goto exit; + } + pt = L1pte >> 32 << TARGET_PAGE_BITS; + + /* L2 page table read. */ + index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff; + L2pte = ldq_phys(pt + index*8); + + if (unlikely((L2pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + if (unlikely((L2pte & PTE_KRE) == 0)) { + goto exit; + } + pt = L2pte >> 32 << TARGET_PAGE_BITS; + + /* L3 page table read. */ + index = (addr >> TARGET_PAGE_BITS) & 0x3ff; + L3pte = ldq_phys(pt + index*8); + + phys = L3pte >> 32 << TARGET_PAGE_BITS; + if (unlikely((L3pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + +#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4 +# error page bits out of date +#endif + + /* Check access violations. */ + if (L3pte & (PTE_KRE << mmu_idx)) { + prot |= PAGE_READ | PAGE_EXEC; + } + if (L3pte & (PTE_KWE << mmu_idx)) { + prot |= PAGE_WRITE; + } + if (unlikely((prot & prot_need) == 0 && prot_need)) { + goto exit; + } + + /* Check fault-on-operation violations. */ + prot &= ~(L3pte >> 1); + ret = -1; + if (unlikely((prot & prot_need) == 0)) { + ret = (prot_need & PAGE_EXEC ? MM_K_FOE : + prot_need & PAGE_WRITE ? MM_K_FOW : + prot_need & PAGE_READ ? MM_K_FOR : -1); + } + + exit: + *pphys = phys; + *pprot = prot; + return ret; +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) { - return -1; + target_ulong phys; + int prot, fail; + + fail = get_physical_address(env, addr, 0, 0, &phys, &prot); + return (fail >= 0 ? -1 : phys); } -int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw, - int mmu_idx, int is_softmmu) +int cpu_alpha_handle_mmu_fault(CPUState *env, target_ulong addr, int rw, + int mmu_idx, int is_softmmu) { - uint32_t opc; - - if (rw == 2) { - /* Instruction translation buffer miss */ - env->exception_index = EXCP_ITB_MISS; - } else { - if (env->ipr[IPR_EXC_ADDR] & 1) - env->exception_index = EXCP_DTB_MISS_PAL; - else - env->exception_index = EXCP_DTB_MISS_NATIVE; - opc = (ldl_code(env->pc) >> 21) << 4; - if (rw) { - opc |= 0x9; - } else { - opc |= 0x4; - } - env->ipr[IPR_MM_STAT] = opc; + target_ulong phys; + int prot, fail; + + fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot); + if (unlikely(fail >= 0)) { + env->exception_index = EXCP_MMFAULT; + env->trap_arg0 = addr; + env->trap_arg1 = fail; + env->trap_arg2 = (rw == 2 ? -1 : rw); + return 1; } - return 1; + tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; } +#endif /* USER_ONLY */ -int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp) +void do_interrupt (CPUState *env) { - uint64_t hwpcb; - int ret = 0; - - hwpcb = env->ipr[IPR_PCBB]; - switch (iprn) { - case IPR_ASN: - if (env->features & FEATURE_ASN) - *valp = env->ipr[IPR_ASN]; - else - *valp = 0; - break; - case IPR_ASTEN: - *valp = ((int64_t)(env->ipr[IPR_ASTEN] << 60)) >> 60; - break; - case IPR_ASTSR: - *valp = ((int64_t)(env->ipr[IPR_ASTSR] << 60)) >> 60; - break; - case IPR_DATFX: - /* Write only */ - ret = -1; - break; - case IPR_ESP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_ESP]; - else - *valp = ldq_raw(hwpcb + 8); - break; - case IPR_FEN: - *valp = ((int64_t)(env->ipr[IPR_FEN] << 63)) >> 63; - break; - case IPR_IPIR: - /* Write-only */ - ret = -1; - break; - case IPR_IPL: - *valp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59; - break; - case IPR_KSP: - if (!(env->ipr[IPR_EXC_ADDR] & 1)) { - ret = -1; - } else { - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_KSP]; - else - *valp = ldq_raw(hwpcb + 0); - } - break; - case IPR_MCES: - *valp = ((int64_t)(env->ipr[IPR_MCES] << 59)) >> 59; - break; - case IPR_PERFMON: - /* Implementation specific */ - *valp = 0; - break; - case IPR_PCBB: - *valp = ((int64_t)env->ipr[IPR_PCBB] << 16) >> 16; - break; - case IPR_PRBR: - *valp = env->ipr[IPR_PRBR]; - break; - case IPR_PTBR: - *valp = env->ipr[IPR_PTBR]; - break; - case IPR_SCBB: - *valp = (int64_t)((int32_t)env->ipr[IPR_SCBB]); - break; - case IPR_SIRR: - /* Write-only */ - ret = -1; - break; - case IPR_SISR: - *valp = (int64_t)((int16_t)env->ipr[IPR_SISR]); - case IPR_SSP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_SSP]; - else - *valp = ldq_raw(hwpcb + 16); - break; - case IPR_SYSPTBR: - if (env->features & FEATURE_VIRBND) - *valp = env->ipr[IPR_SYSPTBR]; - else - ret = -1; - break; - case IPR_TBCHK: - if ((env->features & FEATURE_TBCHK)) { - /* XXX: TODO */ - *valp = 0; - ret = -1; - } else { - ret = -1; + int i = env->exception_index; + + if (qemu_loglevel_mask(CPU_LOG_INT)) { + static int count; + const char *name = "<unknown>"; + + switch (i) { + case EXCP_RESET: + name = "reset"; + break; + case EXCP_MCHK: + name = "mchk"; + break; + case EXCP_SMP_INTERRUPT: + name = "smp_interrupt"; + break; + case EXCP_CLK_INTERRUPT: + name = "clk_interrupt"; + break; + case EXCP_DEV_INTERRUPT: + name = "dev_interrupt"; + break; + case EXCP_MMFAULT: + name = "mmfault"; + break; + case EXCP_UNALIGN: + name = "unalign"; + break; + case EXCP_OPCDEC: + name = "opcdec"; + break; + case EXCP_ARITH: + name = "arith"; + break; + case EXCP_FEN: + name = "fen"; + break; + case EXCP_CALL_PAL: + name = "call_pal"; + break; + case EXCP_STL_C: + name = "stl_c"; + break; + case EXCP_STQ_C: + name = "stq_c"; + break; } - break; - case IPR_TBIA: - /* Write-only */ - ret = -1; - break; - case IPR_TBIAP: - /* Write-only */ - ret = -1; - break; - case IPR_TBIS: - /* Write-only */ - ret = -1; - break; - case IPR_TBISD: - /* Write-only */ - ret = -1; - break; - case IPR_TBISI: - /* Write-only */ - ret = -1; - break; - case IPR_USP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_USP]; - else - *valp = ldq_raw(hwpcb + 24); - break; - case IPR_VIRBND: - if (env->features & FEATURE_VIRBND) - *valp = env->ipr[IPR_VIRBND]; - else - ret = -1; - break; - case IPR_VPTB: - *valp = env->ipr[IPR_VPTB]; - break; - case IPR_WHAMI: - *valp = env->ipr[IPR_WHAMI]; - break; - default: - /* Invalid */ - ret = -1; - break; + qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n", + ++count, name, env->error_code, env->pc, env->ir[IR_SP]); } - return ret; -} + env->exception_index = -1; -int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp) -{ - uint64_t hwpcb, tmp64; - uint8_t tmp8; - int ret = 0; - - hwpcb = env->ipr[IPR_PCBB]; - switch (iprn) { - case IPR_ASN: - /* Read-only */ - ret = -1; +#if !defined(CONFIG_USER_ONLY) + switch (i) { + case EXCP_RESET: + i = 0x0000; break; - case IPR_ASTEN: - tmp8 = ((int8_t)(env->ipr[IPR_ASTEN] << 4)) >> 4; - *oldvalp = tmp8; - tmp8 &= val & 0xF; - tmp8 |= (val >> 4) & 0xF; - env->ipr[IPR_ASTEN] &= ~0xF; - env->ipr[IPR_ASTEN] |= tmp8; - ret = 1; + case EXCP_MCHK: + i = 0x0080; break; - case IPR_ASTSR: - tmp8 = ((int8_t)(env->ipr[IPR_ASTSR] << 4)) >> 4; - *oldvalp = tmp8; - tmp8 &= val & 0xF; - tmp8 |= (val >> 4) & 0xF; - env->ipr[IPR_ASTSR] &= ~0xF; - env->ipr[IPR_ASTSR] |= tmp8; - ret = 1; - case IPR_DATFX: - env->ipr[IPR_DATFX] &= ~0x1; - env->ipr[IPR_DATFX] |= val & 1; - tmp64 = ldq_raw(hwpcb + 56); - tmp64 &= ~0x8000000000000000ULL; - tmp64 |= (val & 1) << 63; - stq_raw(hwpcb + 56, tmp64); + case EXCP_SMP_INTERRUPT: + i = 0x0100; break; - case IPR_ESP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_ESP] = val; - else - stq_raw(hwpcb + 8, val); + case EXCP_CLK_INTERRUPT: + i = 0x0180; break; - case IPR_FEN: - env->ipr[IPR_FEN] = val & 1; - tmp64 = ldq_raw(hwpcb + 56); - tmp64 &= ~1; - tmp64 |= val & 1; - stq_raw(hwpcb + 56, tmp64); + case EXCP_DEV_INTERRUPT: + i = 0x0200; break; - case IPR_IPIR: - /* XXX: TODO: Send IRQ to CPU #ir[16] */ + case EXCP_MMFAULT: + i = 0x0280; break; - case IPR_IPL: - *oldvalp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59; - env->ipr[IPR_IPL] &= ~0x1F; - env->ipr[IPR_IPL] |= val & 0x1F; - /* XXX: may issue an interrupt or ASR _now_ */ - ret = 1; + case EXCP_UNALIGN: + i = 0x0300; break; - case IPR_KSP: - if (!(env->ipr[IPR_EXC_ADDR] & 1)) { - ret = -1; - } else { - if (env->features & FEATURE_SPS) - env->ipr[IPR_KSP] = val; - else - stq_raw(hwpcb + 0, val); - } + case EXCP_OPCDEC: + i = 0x0380; break; - case IPR_MCES: - env->ipr[IPR_MCES] &= ~((val & 0x7) | 0x18); - env->ipr[IPR_MCES] |= val & 0x18; + case EXCP_ARITH: + i = 0x0400; break; - case IPR_PERFMON: - /* Implementation specific */ - *oldvalp = 0; - ret = 1; + case EXCP_FEN: + i = 0x0480; break; - case IPR_PCBB: - /* Read-only */ - ret = -1; - break; - case IPR_PRBR: - env->ipr[IPR_PRBR] = val; - break; - case IPR_PTBR: - /* Read-only */ - ret = -1; - break; - case IPR_SCBB: - env->ipr[IPR_SCBB] = (uint32_t)val; - break; - case IPR_SIRR: - if (val & 0xF) { - env->ipr[IPR_SISR] |= 1 << (val & 0xF); - /* XXX: request a software interrupt _now_ */ + case EXCP_CALL_PAL: + i = env->error_code; + /* There are 64 entry points for both privileged and unprivileged, + with bit 0x80 indicating unprivileged. Each entry point gets + 64 bytes to do its job. */ + if (i & 0x80) { + i = 0x2000 + (i - 0x80) * 64; + } else { + i = 0x1000 + i * 64; } break; - case IPR_SISR: - /* Read-only */ - ret = -1; - break; - case IPR_SSP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_SSP] = val; - else - stq_raw(hwpcb + 16, val); - break; - case IPR_SYSPTBR: - if (env->features & FEATURE_VIRBND) - env->ipr[IPR_SYSPTBR] = val; - else - ret = -1; - break; - case IPR_TBCHK: - /* Read-only */ - ret = -1; - break; - case IPR_TBIA: - tlb_flush(env, 1); - break; - case IPR_TBIAP: - tlb_flush(env, 1); - break; - case IPR_TBIS: - tlb_flush_page(env, val); - break; - case IPR_TBISD: - tlb_flush_page(env, val); - break; - case IPR_TBISI: - tlb_flush_page(env, val); - break; - case IPR_USP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_USP] = val; - else - stq_raw(hwpcb + 24, val); - break; - case IPR_VIRBND: - if (env->features & FEATURE_VIRBND) - env->ipr[IPR_VIRBND] = val; - else - ret = -1; - break; - case IPR_VPTB: - env->ipr[IPR_VPTB] = val; - break; - case IPR_WHAMI: - /* Read-only */ - ret = -1; - break; default: - /* Invalid */ - ret = -1; - break; + cpu_abort(env, "Unhandled CPU exception"); } - return ret; -} + /* Remember where the exception happened. Emulate real hardware in + that the low bit of the PC indicates PALmode. */ + env->exc_addr = env->pc | env->pal_mode; -void do_interrupt (CPUState *env) -{ - int excp; + /* Continue execution at the PALcode entry point. */ + env->pc = env->palbr + i; - env->ipr[IPR_EXC_ADDR] = env->pc | 1; - excp = env->exception_index; - env->exception_index = -1; - env->error_code = 0; - /* XXX: disable interrupts and memory mapping */ - if (env->ipr[IPR_PAL_BASE] != -1ULL) { - /* We use native PALcode */ - env->pc = env->ipr[IPR_PAL_BASE] + excp; - } else { - /* We use emulated PALcode */ - call_pal(env); - /* Emulate REI */ - env->pc = env->ipr[IPR_EXC_ADDR] & ~7; - env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; - /* XXX: re-enable interrupts and memory mapping */ + /* Switch to PALmode. */ + if (!env->pal_mode) { + env->pal_mode = 1; + swap_shadow_regs(env); } +#endif /* !USER_ONLY */ } -#endif void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf, int flags) @@ -548,7 +465,7 @@ void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf, }; int i; - cpu_fprintf(f, " PC " TARGET_FMT_lx " PS " TARGET_FMT_lx "\n", + cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n", env->pc, env->ps); for (i = 0; i < 31; i++) { cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i, diff --git a/target-alpha/helper.h b/target-alpha/helper.h index ccf6a2aae9..2dec57e44b 100644 --- a/target-alpha/helper.h +++ b/target-alpha/helper.h @@ -100,27 +100,19 @@ DEF_HELPER_1(ieee_input_cmp, i64, i64) DEF_HELPER_1(ieee_input_s, i64, i64) #if !defined (CONFIG_USER_ONLY) -DEF_HELPER_0(hw_rei, void) DEF_HELPER_1(hw_ret, void, i64) -DEF_HELPER_2(mfpr, i64, int, i64) -DEF_HELPER_2(mtpr, void, int, i64) -DEF_HELPER_0(set_alt_mode, void) -DEF_HELPER_0(restore_mode, void) - -DEF_HELPER_1(ld_virt_to_phys, i64, i64) -DEF_HELPER_1(st_virt_to_phys, i64, i64) -DEF_HELPER_2(ldl_raw, void, i64, i64) -DEF_HELPER_2(ldq_raw, void, i64, i64) -DEF_HELPER_2(ldl_l_raw, void, i64, i64) -DEF_HELPER_2(ldq_l_raw, void, i64, i64) -DEF_HELPER_2(ldl_kernel, void, i64, i64) -DEF_HELPER_2(ldq_kernel, void, i64, i64) -DEF_HELPER_2(ldl_data, void, i64, i64) -DEF_HELPER_2(ldq_data, void, i64, i64) -DEF_HELPER_2(stl_raw, void, i64, i64) -DEF_HELPER_2(stq_raw, void, i64, i64) -DEF_HELPER_2(stl_c_raw, i64, i64, i64) -DEF_HELPER_2(stq_c_raw, i64, i64, i64) + +DEF_HELPER_1(ldl_phys, i64, i64) +DEF_HELPER_1(ldq_phys, i64, i64) +DEF_HELPER_1(ldl_l_phys, i64, i64) +DEF_HELPER_1(ldq_l_phys, i64, i64) +DEF_HELPER_2(stl_phys, void, i64, i64) +DEF_HELPER_2(stq_phys, void, i64, i64) +DEF_HELPER_2(stl_c_phys, i64, i64, i64) +DEF_HELPER_2(stq_c_phys, i64, i64, i64) + +DEF_HELPER_FLAGS_0(tbia, TCG_CALL_CONST, void) +DEF_HELPER_FLAGS_1(tbis, TCG_CALL_CONST, void, i64) #endif #include "def-helper.h" diff --git a/target-alpha/machine.c b/target-alpha/machine.c new file mode 100644 index 0000000000..76d70d9b35 --- /dev/null +++ b/target-alpha/machine.c @@ -0,0 +1,87 @@ +#include "hw/hw.h" +#include "hw/boards.h" + +static int get_fpcr(QEMUFile *f, void *opaque, size_t size) +{ + CPUAlphaState *env = opaque; + cpu_alpha_store_fpcr(env, qemu_get_be64(f)); + return 0; +} + +static void put_fpcr(QEMUFile *f, void *opaque, size_t size) +{ + CPUAlphaState *env = opaque; + qemu_put_be64(f, cpu_alpha_load_fpcr(env)); +} + +static const VMStateInfo vmstate_fpcr = { + .name = "fpcr", + .get = get_fpcr, + .put = put_fpcr, +}; + +static VMStateField vmstate_cpu_fields[] = { + VMSTATE_UINTTL_ARRAY(ir, CPUState, 31), + VMSTATE_UINTTL_ARRAY(fir, CPUState, 31), + /* Save the architecture value of the fpcr, not the internally + expanded version. Since this architecture value does not + exist in memory to be stored, this requires a but of hoop + jumping. We want OFFSET=0 so that we effectively pass ENV + to the helper functions, and we need to fill in the name by + hand since there's no field of that name. */ + { + .name = "fpcr", + .version_id = 0, + .size = sizeof(uint64_t), + .info = &vmstate_fpcr, + .flags = VMS_SINGLE, + .offset = 0 + }, + VMSTATE_UINTTL(pc, CPUState), + VMSTATE_UINTTL(unique, CPUState), + VMSTATE_UINTTL(lock_addr, CPUState), + VMSTATE_UINTTL(lock_value, CPUState), + /* Note that lock_st_addr is not saved; it is a temporary + used during the execution of the st[lq]_c insns. */ + + VMSTATE_UINT8(ps, CPUState), + VMSTATE_UINT8(intr_flag, CPUState), + VMSTATE_UINT8(pal_mode, CPUState), + VMSTATE_UINT8(fen, CPUState), + + VMSTATE_UINT32(pcc_ofs, CPUState), + + VMSTATE_UINTTL(trap_arg0, CPUState), + VMSTATE_UINTTL(trap_arg1, CPUState), + VMSTATE_UINTTL(trap_arg2, CPUState), + + VMSTATE_UINTTL(exc_addr, CPUState), + VMSTATE_UINTTL(palbr, CPUState), + VMSTATE_UINTTL(ptbr, CPUState), + VMSTATE_UINTTL(vptptr, CPUState), + VMSTATE_UINTTL(sysval, CPUState), + VMSTATE_UINTTL(usp, CPUState), + + VMSTATE_UINTTL_ARRAY(shadow, CPUState, 8), + VMSTATE_UINTTL_ARRAY(scratch, CPUState, 24), + + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_cpu = { + .name = "cpu", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = vmstate_cpu_fields, +}; + +void cpu_save(QEMUFile *f, void *opaque) +{ + vmstate_save_state(f, &vmstate_cpu, opaque); +} + +int cpu_load(QEMUFile *f, void *opaque, int version_id) +{ + return vmstate_load_state(f, &vmstate_cpu, opaque, version_id); +} diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c index 4ccb10b0f4..d33271958f 100644 --- a/target-alpha/op_helper.c +++ b/target-alpha/op_helper.c @@ -25,17 +25,57 @@ /*****************************************************************************/ /* Exceptions processing helpers */ -void QEMU_NORETURN helper_excp (int excp, int error) + +/* This should only be called from translate, via gen_excp. + We expect that ENV->PC has already been updated. */ +void QEMU_NORETURN helper_excp(int excp, int error) { env->exception_index = excp; env->error_code = error; cpu_loop_exit(); } +static void do_restore_state(void *retaddr) +{ + unsigned long pc = (unsigned long)retaddr; + + if (pc) { + TranslationBlock *tb = tb_find_pc(pc); + if (tb) { + cpu_restore_state(tb, env, pc); + } + } +} + +/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ +static void QEMU_NORETURN dynamic_excp(int excp, int error) +{ + env->exception_index = excp; + env->error_code = error; + do_restore_state(GETPC()); + cpu_loop_exit(); +} + +static void QEMU_NORETURN arith_excp(int exc, uint64_t mask) +{ + env->trap_arg0 = exc; + env->trap_arg1 = mask; + dynamic_excp(EXCP_ARITH, 0); +} + uint64_t helper_load_pcc (void) { - /* ??? This isn't a timer for which we have any rate info. */ +#ifndef CONFIG_USER_ONLY + /* In system mode we have access to a decent high-resolution clock. + In order to make OS-level time accounting work with the RPCC, + present it with a well-timed clock fixed at 250MHz. */ + return (((uint64_t)env->pcc_ofs << 32) + | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2)); +#else + /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu + clock ticks. Also, don't bother taking PCC_OFS into account. */ return (uint32_t)cpu_get_real_ticks(); +#endif } uint64_t helper_load_fpcr (void) @@ -53,7 +93,7 @@ uint64_t helper_addqv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 += op2; if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return op1; } @@ -63,7 +103,7 @@ uint64_t helper_addlv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 = (uint32_t)(op1 + op2); if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return op1; } @@ -73,7 +113,7 @@ uint64_t helper_subqv (uint64_t op1, uint64_t op2) uint64_t res; res = op1 - op2; if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return res; } @@ -83,7 +123,7 @@ uint64_t helper_sublv (uint64_t op1, uint64_t op2) uint32_t res; res = op1 - op2; if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return res; } @@ -93,7 +133,7 @@ uint64_t helper_mullv (uint64_t op1, uint64_t op2) int64_t res = (int64_t)op1 * (int64_t)op2; if (unlikely((int32_t)res != res)) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return (int64_t)((int32_t)res); } @@ -105,7 +145,7 @@ uint64_t helper_mulqv (uint64_t op1, uint64_t op2) muls64(&tl, &th, op1, op2); /* If th != 0 && th != -1, then we had an overflow */ if (unlikely((th + 1) > 1)) { - helper_excp(EXCP_ARITH, EXC_M_IOV); + arith_excp(EXC_M_IOV, 0); } return tl; } @@ -373,8 +413,6 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno) if (exc) { uint32_t hw_exc = 0; - env->ipr[IPR_EXC_MASK] |= 1ull << regno; - if (exc & float_flag_invalid) { hw_exc |= EXC_M_INV; } @@ -390,7 +428,8 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno) if (exc & float_flag_inexact) { hw_exc |= EXC_M_INE; } - helper_excp(EXCP_ARITH, hw_exc); + + arith_excp(hw_exc, 1ull << regno); } } @@ -420,7 +459,7 @@ uint64_t helper_ieee_input(uint64_t val) if (env->fpcr_dnz) { val &= 1ull << 63; } else { - helper_excp(EXCP_ARITH, EXC_M_UNF); + arith_excp(EXC_M_UNF, 0); } } } else if (exp == 0x7ff) { @@ -428,7 +467,7 @@ uint64_t helper_ieee_input(uint64_t val) /* ??? I'm not sure these exception bit flags are correct. I do know that the Linux kernel, at least, doesn't rely on them and just emulates the insn to figure out what exception to use. */ - helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV); + arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0); } return val; } @@ -445,12 +484,12 @@ uint64_t helper_ieee_input_cmp(uint64_t val) if (env->fpcr_dnz) { val &= 1ull << 63; } else { - helper_excp(EXCP_ARITH, EXC_M_UNF); + arith_excp(EXC_M_UNF, 0); } } } else if (exp == 0x7ff && frac) { /* NaN. */ - helper_excp(EXCP_ARITH, EXC_M_INV); + arith_excp(EXC_M_INV, 0); } return val; } @@ -513,7 +552,7 @@ static inline float32 f_to_float32(uint64_t a) if (unlikely(!exp && mant_sig)) { /* Reserved operands / Dirty zero */ - helper_excp(EXCP_OPCDEC, 0); + dynamic_excp(EXCP_OPCDEC, 0); } if (exp < 3) { @@ -643,7 +682,7 @@ static inline float64 g_to_float64(uint64_t a) if (!exp && mant_sig) { /* Reserved operands / Dirty zero */ - helper_excp(EXCP_OPCDEC, 0); + dynamic_excp(EXCP_OPCDEC, 0); } if (exp < 3) { @@ -1156,187 +1195,122 @@ uint64_t helper_cvtqg (uint64_t a) /* PALcode support special instructions */ #if !defined (CONFIG_USER_ONLY) -void helper_hw_rei (void) -{ - env->pc = env->ipr[IPR_EXC_ADDR] & ~3; - env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; - env->intr_flag = 0; - env->lock_addr = -1; - /* XXX: re-enable interrupts and memory mapping */ -} - void helper_hw_ret (uint64_t a) { env->pc = a & ~3; - env->ipr[IPR_EXC_ADDR] = a & 1; env->intr_flag = 0; env->lock_addr = -1; - /* XXX: re-enable interrupts and memory mapping */ -} - -uint64_t helper_mfpr (int iprn, uint64_t val) -{ - uint64_t tmp; - - if (cpu_alpha_mfpr(env, iprn, &tmp) == 0) - val = tmp; - - return val; -} - -void helper_mtpr (int iprn, uint64_t val) -{ - cpu_alpha_mtpr(env, iprn, val, NULL); + if ((a & 1) == 0) { + env->pal_mode = 0; + swap_shadow_regs(env); + } } -void helper_set_alt_mode (void) +void helper_tbia(void) { - env->saved_mode = env->ps & 0xC; - env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC); + tlb_flush(env, 1); } -void helper_restore_mode (void) +void helper_tbis(uint64_t p) { - env->ps = (env->ps & ~0xC) | env->saved_mode; + tlb_flush_page(env, p); } - #endif /*****************************************************************************/ /* Softmmu support */ #if !defined (CONFIG_USER_ONLY) - -/* XXX: the two following helpers are pure hacks. - * Hopefully, we emulate the PALcode, then we should never see - * HW_LD / HW_ST instructions. - */ -uint64_t helper_ld_virt_to_phys (uint64_t virtaddr) -{ - uint64_t tlb_addr, physaddr; - int index, mmu_idx; - void *retaddr; - - mmu_idx = cpu_mmu_index(env); - index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].addr_read; - if ((virtaddr & TARGET_PAGE_MASK) == - (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; - } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC(); - tlb_fill(virtaddr, 0, mmu_idx, retaddr); - goto redo; - } - return physaddr; -} - -uint64_t helper_st_virt_to_phys (uint64_t virtaddr) +uint64_t helper_ldl_phys(uint64_t p) { - uint64_t tlb_addr, physaddr; - int index, mmu_idx; - void *retaddr; - - mmu_idx = cpu_mmu_index(env); - index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - if ((virtaddr & TARGET_PAGE_MASK) == - (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; - } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC(); - tlb_fill(virtaddr, 1, mmu_idx, retaddr); - goto redo; - } - return physaddr; + return (int32_t)ldl_phys(p); } -void helper_ldl_raw(uint64_t t0, uint64_t t1) +uint64_t helper_ldq_phys(uint64_t p) { - ldl_raw(t1, t0); + return ldq_phys(p); } -void helper_ldq_raw(uint64_t t0, uint64_t t1) +uint64_t helper_ldl_l_phys(uint64_t p) { - ldq_raw(t1, t0); + env->lock_addr = p; + return env->lock_value = (int32_t)ldl_phys(p); } -void helper_ldl_l_raw(uint64_t t0, uint64_t t1) +uint64_t helper_ldq_l_phys(uint64_t p) { - env->lock = t1; - ldl_raw(t1, t0); + env->lock_addr = p; + return env->lock_value = ldl_phys(p); } -void helper_ldq_l_raw(uint64_t t0, uint64_t t1) +void helper_stl_phys(uint64_t p, uint64_t v) { - env->lock = t1; - ldl_raw(t1, t0); + stl_phys(p, v); } -void helper_ldl_kernel(uint64_t t0, uint64_t t1) +void helper_stq_phys(uint64_t p, uint64_t v) { - ldl_kernel(t1, t0); + stq_phys(p, v); } -void helper_ldq_kernel(uint64_t t0, uint64_t t1) +uint64_t helper_stl_c_phys(uint64_t p, uint64_t v) { - ldq_kernel(t1, t0); -} + uint64_t ret = 0; -void helper_ldl_data(uint64_t t0, uint64_t t1) -{ - ldl_data(t1, t0); -} + if (p == env->lock_addr) { + int32_t old = ldl_phys(p); + if (old == (int32_t)env->lock_value) { + stl_phys(p, v); + ret = 1; + } + } + env->lock_addr = -1; -void helper_ldq_data(uint64_t t0, uint64_t t1) -{ - ldq_data(t1, t0); + return ret; } -void helper_stl_raw(uint64_t t0, uint64_t t1) +uint64_t helper_stq_c_phys(uint64_t p, uint64_t v) { - stl_raw(t1, t0); -} + uint64_t ret = 0; -void helper_stq_raw(uint64_t t0, uint64_t t1) -{ - stq_raw(t1, t0); + if (p == env->lock_addr) { + uint64_t old = ldq_phys(p); + if (old == env->lock_value) { + stq_phys(p, v); + ret = 1; + } + } + env->lock_addr = -1; + + return ret; } -uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1) +static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write, + int is_user, void *retaddr) { - uint64_t ret; + uint64_t pc; + uint32_t insn; - if (t1 == env->lock) { - stl_raw(t1, t0); - ret = 0; - } else - ret = 1; + do_restore_state(retaddr); - env->lock = 1; + pc = env->pc; + insn = ldl_code(pc); - return ret; + env->trap_arg0 = addr; + env->trap_arg1 = insn >> 26; /* opcode */ + env->trap_arg2 = (insn >> 21) & 31; /* dest regno */ + helper_excp(EXCP_UNALIGN, 0); } -uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1) +void QEMU_NORETURN do_unassigned_access(target_phys_addr_t addr, int is_write, + int is_exec, int unused, int size) { - uint64_t ret; - - if (t1 == env->lock) { - stq_raw(t1, t0); - ret = 0; - } else - ret = 1; - - env->lock = 1; - - return ret; + env->trap_arg0 = addr; + env->trap_arg1 = is_write; + dynamic_excp(EXCP_MCHK, 0); } #define MMUSUFFIX _mmu +#define ALIGNED_ONLY #define SHIFT 0 #include "softmmu_template.h" @@ -1356,9 +1330,7 @@ uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1) /* XXX: fix it to restore all registers */ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) { - TranslationBlock *tb; CPUState *saved_env; - unsigned long pc; int ret; /* XXX: hack to restore env in all cases, even if not called from @@ -1366,21 +1338,11 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) saved_env = env; env = cpu_single_env; ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1); - if (!likely(ret == 0)) { - if (likely(retaddr)) { - /* now we have a real cpu fault */ - pc = (unsigned long)retaddr; - tb = tb_find_pc(pc); - if (likely(tb)) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, pc); - } - } + if (unlikely(ret != 0)) { + do_restore_state(retaddr); /* Exception index and error code are already set */ cpu_loop_exit(); } env = saved_env; } - #endif diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 456ba51ac6..ad6c2ca448 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -47,10 +47,6 @@ struct DisasContext { CPUAlphaState *env; uint64_t pc; int mem_idx; -#if !defined (CONFIG_USER_ONLY) - int pal_mode; -#endif - uint32_t amask; /* Current rounding mode for this TB. */ int tb_rm; @@ -89,8 +85,10 @@ static TCGv cpu_pc; static TCGv cpu_lock_addr; static TCGv cpu_lock_st_addr; static TCGv cpu_lock_value; -#ifdef CONFIG_USER_ONLY -static TCGv cpu_uniq; +static TCGv cpu_unique; +#ifndef CONFIG_USER_ONLY +static TCGv cpu_sysval; +static TCGv cpu_usp; #endif /* register names */ @@ -135,9 +133,13 @@ static void alpha_translate_init(void) offsetof(CPUState, lock_value), "lock_value"); -#ifdef CONFIG_USER_ONLY - cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, unique), "uniq"); + cpu_unique = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, unique), "unique"); +#ifndef CONFIG_USER_ONLY + cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, sysval), "sysval"); + cpu_usp = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, usp), "usp"); #endif /* register helpers */ @@ -147,17 +149,21 @@ static void alpha_translate_init(void) done_init = 1; } -static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code) +static void gen_excp_1(int exception, int error_code) { TCGv_i32 tmp1, tmp2; - tcg_gen_movi_i64(cpu_pc, ctx->pc); tmp1 = tcg_const_i32(exception); tmp2 = tcg_const_i32(error_code); gen_helper_excp(tmp1, tmp2); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp1); +} +static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code) +{ + tcg_gen_movi_i64(cpu_pc, ctx->pc); + gen_excp_1(exception, error_code); return EXIT_NORETURN; } @@ -322,7 +328,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, #if defined(CONFIG_USER_ONLY) addr = cpu_lock_st_addr; #else - addr = tcg_local_new(); + addr = tcg_temp_local_new(); #endif if (rb != 31) { @@ -345,7 +351,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, lab_fail = gen_new_label(); lab_done = gen_new_label(); - tcg_gen_brcond(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); + tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); val = tcg_temp_new(); if (quad) { @@ -353,7 +359,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, } else { tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx); } - tcg_gen_brcond(TCG_COND_NE, val, cpu_lock_value, lab_fail); + tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail); if (quad) { tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx); @@ -1464,6 +1470,194 @@ static void gen_rx(int ra, int set) tcg_temp_free_i32(tmp); } +static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) +{ + /* We're emulating OSF/1 PALcode. Many of these are trivial access + to internal cpu registers. */ + + /* Unprivileged PAL call */ + if (palcode >= 0x80 && palcode < 0xC0) { + switch (palcode) { + case 0x86: + /* IMB */ + /* No-op inside QEMU. */ + break; + case 0x9E: + /* RDUNIQUE */ + tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique); + break; + case 0x9F: + /* WRUNIQUE */ + tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]); + break; + default: + return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf); + } + return NO_EXIT; + } + +#ifndef CONFIG_USER_ONLY + /* Privileged PAL code */ + if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) { + switch (palcode) { + case 0x01: + /* CFLUSH */ + /* No-op inside QEMU. */ + break; + case 0x02: + /* DRAINA */ + /* No-op inside QEMU. */ + break; + case 0x2D: + /* WRVPTPTR */ + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr)); + break; + case 0x31: + /* WRVAL */ + tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]); + break; + case 0x32: + /* RDVAL */ + tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval); + break; + + case 0x35: { + /* SWPIPL */ + TCGv tmp; + + /* Note that we already know we're in kernel mode, so we know + that PS only contains the 3 IPL bits. */ + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps)); + + /* But make sure and store only the 3 IPL bits from the user. */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK); + tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps)); + tcg_temp_free(tmp); + break; + } + + case 0x36: + /* RDPS */ + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps)); + break; + case 0x38: + /* WRUSP */ + tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]); + break; + case 0x3A: + /* RDUSP */ + tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp); + break; + case 0x3C: + /* WHAMI */ + tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUState, cpu_index)); + break; + + default: + return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f); + } + return NO_EXIT; + } +#endif + + return gen_invalid(ctx); +} + +#ifndef CONFIG_USER_ONLY + +#define PR_BYTE 0x100000 +#define PR_LONG 0x200000 + +static int cpu_pr_data(int pr) +{ + switch (pr) { + case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE; + case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE; + case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; + case 3: return offsetof(CPUAlphaState, trap_arg0); + case 4: return offsetof(CPUAlphaState, trap_arg1); + case 5: return offsetof(CPUAlphaState, trap_arg2); + case 6: return offsetof(CPUAlphaState, exc_addr); + case 7: return offsetof(CPUAlphaState, palbr); + case 8: return offsetof(CPUAlphaState, ptbr); + case 9: return offsetof(CPUAlphaState, vptptr); + case 10: return offsetof(CPUAlphaState, unique); + case 11: return offsetof(CPUAlphaState, sysval); + case 12: return offsetof(CPUAlphaState, usp); + + case 32 ... 39: + return offsetof(CPUAlphaState, shadow[pr - 32]); + case 40 ... 63: + return offsetof(CPUAlphaState, scratch[pr - 40]); + } + return 0; +} + +static void gen_mfpr(int ra, int regno) +{ + int data = cpu_pr_data(regno); + + /* In our emulated PALcode, these processor registers have no + side effects from reading. */ + if (ra == 31) { + return; + } + + /* The basic registers are data only, and unknown registers + are read-zero, write-ignore. */ + if (data == 0) { + tcg_gen_movi_i64(cpu_ir[ra], 0); + } else if (data & PR_BYTE) { + tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE); + } else if (data & PR_LONG) { + tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG); + } else { + tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data); + } +} + +static void gen_mtpr(int rb, int regno) +{ + TCGv tmp; + + if (rb == 31) { + tmp = tcg_const_i64(0); + } else { + tmp = cpu_ir[rb]; + } + + /* These two register numbers perform a TLB cache flush. Thankfully we + can only do this inside PALmode, which means that the current basic + block cannot be affected by the change in mappings. */ + if (regno == 255) { + /* TBIA */ + gen_helper_tbia(); + } else if (regno == 254) { + /* TBIS */ + gen_helper_tbis(tmp); + } else { + /* The basic registers are data only, and unknown registers + are read-zero, write-ignore. */ + int data = cpu_pr_data(regno); + if (data != 0) { + if (data & PR_BYTE) { + tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE); + } else if (data & PR_LONG) { + tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG); + } else { + tcg_gen_st_i64(tmp, cpu_env, data); + } + } + } + + if (rb == 31) { + tcg_temp_free(tmp); + } +} +#endif /* !USER_ONLY*/ + static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) { uint32_t palcode; @@ -1499,32 +1693,8 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) switch (opc) { case 0x00: /* CALL_PAL */ -#ifdef CONFIG_USER_ONLY - if (palcode == 0x9E) { - /* RDUNIQUE */ - tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq); - break; - } else if (palcode == 0x9F) { - /* WRUNIQUE */ - tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]); - break; - } -#endif - if (palcode >= 0x80 && palcode < 0xC0) { - /* Unprivileged PAL call */ - ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0); - break; - } -#ifndef CONFIG_USER_ONLY - if (palcode < 0x40) { - /* Privileged PAL code */ - if (ctx->mem_idx & 1) - goto invalid_opc; - ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0); - } -#endif - /* Invalid PAL call */ - goto invalid_opc; + ret = gen_call_pal(ctx, palcode); + break; case 0x01: /* OPC01 */ goto invalid_opc; @@ -1566,20 +1736,22 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x0A: /* LDBU */ - if (!(ctx->amask & AMASK_BWX)) - goto invalid_opc; - gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { + gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + break; + } + goto invalid_opc; case 0x0B: /* LDQ_U */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); break; case 0x0C: /* LDWU */ - if (!(ctx->amask & AMASK_BWX)) - goto invalid_opc; - gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { + gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + break; + } + goto invalid_opc; case 0x0D: /* STW */ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); @@ -1983,20 +2155,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x61: /* AMASK */ if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - switch (ctx->env->implver) { - case IMPLVER_2106x: - /* EV4, EV45, LCA, LCA45 & EV5 */ - break; - case IMPLVER_21164: - case IMPLVER_21264: - case IMPLVER_21364: - tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc], - ~(uint64_t)ctx->amask); - break; + uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT; + + if (islit) { + tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask); + } else { + tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask); } } break; @@ -2210,8 +2374,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) switch (fpfn) { /* fn11 & 0x3F */ case 0x04: /* ITOFS */ - if (!(ctx->amask & AMASK_FIX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; + } if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -2224,20 +2389,23 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x0A: /* SQRTF */ - if (!(ctx->amask & AMASK_FIX)) - goto invalid_opc; - gen_fsqrtf(rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { + gen_fsqrtf(rb, rc); + break; + } + goto invalid_opc; case 0x0B: /* SQRTS */ - if (!(ctx->amask & AMASK_FIX)) - goto invalid_opc; - gen_fsqrts(ctx, rb, rc, fn11); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { + gen_fsqrts(ctx, rb, rc, fn11); + break; + } + goto invalid_opc; case 0x14: /* ITOFF */ - if (!(ctx->amask & AMASK_FIX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; + } if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -2250,8 +2418,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x24: /* ITOFT */ - if (!(ctx->amask & AMASK_FIX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; + } if (likely(rc != 31)) { if (ra != 31) tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]); @@ -2261,16 +2430,18 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x2A: /* SQRTG */ - if (!(ctx->amask & AMASK_FIX)) - goto invalid_opc; - gen_fsqrtg(rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { + gen_fsqrtg(rb, rc); + break; + } + goto invalid_opc; case 0x02B: /* SQRTT */ - if (!(ctx->amask & AMASK_FIX)) - goto invalid_opc; - gen_fsqrtt(ctx, rb, rc, fn11); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { + gen_fsqrtt(ctx, rb, rc, fn11); + break; + } + goto invalid_opc; default: goto invalid_opc; } @@ -2571,18 +2742,13 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x19: /* HW_MFPR (PALcode) */ -#if defined (CONFIG_USER_ONLY) - goto invalid_opc; -#else - if (!ctx->pal_mode) - goto invalid_opc; - if (ra != 31) { - TCGv tmp = tcg_const_i32(insn & 0xFF); - gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]); - tcg_temp_free(tmp); +#ifndef CONFIG_USER_ONLY + if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { + gen_mfpr(ra, insn & 0xffff); + break; } - break; #endif + goto invalid_opc; case 0x1A: /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch prediction stack action, which of course we don't implement. */ @@ -2598,13 +2764,15 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x1B: /* HW_LD (PALcode) */ -#if defined (CONFIG_USER_ONLY) - goto invalid_opc; -#else - if (!ctx->pal_mode) - goto invalid_opc; - if (ra != 31) { - TCGv addr = tcg_temp_new(); +#ifndef CONFIG_USER_ONLY + if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { + TCGv addr; + + if (ra == 31) { + break; + } + + addr = tcg_temp_new(); if (rb != 31) tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); else @@ -2612,27 +2780,26 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access (hw_ldl/p) */ - gen_helper_ldl_raw(cpu_ir[ra], addr); + gen_helper_ldl_phys(cpu_ir[ra], addr); break; case 0x1: /* Quadword physical access (hw_ldq/p) */ - gen_helper_ldq_raw(cpu_ir[ra], addr); + gen_helper_ldq_phys(cpu_ir[ra], addr); break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ - gen_helper_ldl_l_raw(cpu_ir[ra], addr); + gen_helper_ldl_l_phys(cpu_ir[ra], addr); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ - gen_helper_ldq_l_raw(cpu_ir[ra], addr); + gen_helper_ldq_l_phys(cpu_ir[ra], addr); break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); - break; + goto invalid_opc; case 0x5: /* Quadword virtual PTE fetch (hw_ldq/v) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + goto invalid_opc; break; case 0x6: /* Incpu_ir[ra]id */ @@ -2642,63 +2809,47 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0x8: /* Longword virtual access (hw_ldl) */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldl_raw(cpu_ir[ra], addr); - break; + goto invalid_opc; case 0x9: /* Quadword virtual access (hw_ldq) */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldq_raw(cpu_ir[ra], addr); - break; + goto invalid_opc; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); + tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldl_raw(cpu_ir[ra], addr); - gen_helper_restore_mode(); - break; + goto invalid_opc; case 0xD: /* Quadword virtual access with alt access mode (hw_ldq/a) */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldq_raw(cpu_ir[ra], addr); - gen_helper_restore_mode(); - break; + goto invalid_opc; case 0xE: /* Longword virtual access with alternate access mode and - * protection checks (hw_ldl/wa) - */ - gen_helper_set_alt_mode(); - gen_helper_ldl_data(cpu_ir[ra], addr); - gen_helper_restore_mode(); + protection checks (hw_ldl/wa) */ + tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX); break; case 0xF: /* Quadword virtual access with alternate access mode and - * protection checks (hw_ldq/wa) - */ - gen_helper_set_alt_mode(); - gen_helper_ldq_data(cpu_ir[ra], addr); - gen_helper_restore_mode(); + protection checks (hw_ldq/wa) */ + tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX); break; } tcg_temp_free(addr); + break; } - break; #endif + goto invalid_opc; case 0x1C: switch (fn7) { case 0x00: /* SEXTB */ - if (!(ctx->amask & AMASK_BWX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) { goto invalid_opc; + } if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit)); @@ -2708,138 +2859,164 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x01: /* SEXTW */ - if (!(ctx->amask & AMASK_BWX)) - goto invalid_opc; - if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit)); - else - tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]); + if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { + if (likely(rc != 31)) { + if (islit) { + tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit)); + } else { + tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]); + } + } + break; } - break; + goto invalid_opc; case 0x30: /* CTPOP */ - if (!(ctx->amask & AMASK_CIX)) - goto invalid_opc; - if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); - else - gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); + if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { + if (likely(rc != 31)) { + if (islit) { + tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); + } else { + gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); + } + } + break; } - break; + goto invalid_opc; case 0x31: /* PERR */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_perr(ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_perr(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x32: /* CTLZ */ - if (!(ctx->amask & AMASK_CIX)) - goto invalid_opc; - if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); - else - gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); + if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { + if (likely(rc != 31)) { + if (islit) { + tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); + } else { + gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); + } + } + break; } - break; + goto invalid_opc; case 0x33: /* CTTZ */ - if (!(ctx->amask & AMASK_CIX)) - goto invalid_opc; - if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); - else - gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); + if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { + if (likely(rc != 31)) { + if (islit) { + tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); + } else { + gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); + } + } + break; } - break; + goto invalid_opc; case 0x34: /* UNPKBW */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - if (real_islit || ra != 31) - goto invalid_opc; - gen_unpkbw (rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + if (real_islit || ra != 31) { + goto invalid_opc; + } + gen_unpkbw(rb, rc); + break; + } + goto invalid_opc; case 0x35: /* UNPKBL */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - if (real_islit || ra != 31) - goto invalid_opc; - gen_unpkbl (rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + if (real_islit || ra != 31) { + goto invalid_opc; + } + gen_unpkbl(rb, rc); + break; + } + goto invalid_opc; case 0x36: /* PKWB */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - if (real_islit || ra != 31) - goto invalid_opc; - gen_pkwb (rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + if (real_islit || ra != 31) { + goto invalid_opc; + } + gen_pkwb(rb, rc); + break; + } + goto invalid_opc; case 0x37: /* PKLB */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - if (real_islit || ra != 31) - goto invalid_opc; - gen_pklb (rb, rc); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + if (real_islit || ra != 31) { + goto invalid_opc; + } + gen_pklb(rb, rc); + break; + } + goto invalid_opc; case 0x38: /* MINSB8 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_minsb8 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_minsb8(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x39: /* MINSW4 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_minsw4 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_minsw4(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3A: /* MINUB8 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_minub8 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_minub8(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3B: /* MINUW4 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_minuw4 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_minuw4(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3C: /* MAXUB8 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_maxub8 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_maxub8(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3D: /* MAXUW4 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_maxuw4 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_maxuw4(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3E: /* MAXSB8 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_maxsb8 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_maxsb8(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x3F: /* MAXSW4 */ - if (!(ctx->amask & AMASK_MVI)) - goto invalid_opc; - gen_maxsw4 (ra, rb, rc, islit, lit); - break; + if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { + gen_maxsw4(ra, rb, rc, islit, lit); + break; + } + goto invalid_opc; case 0x70: /* FTOIT */ - if (!(ctx->amask & AMASK_FIX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; + } if (likely(rc != 31)) { if (ra != 31) tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]); @@ -2849,8 +3026,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x78: /* FTOIS */ - if (!(ctx->amask & AMASK_FIX)) + if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; + } if (rc != 31) { TCGv_i32 tmp1 = tcg_temp_new_i32(); if (ra != 31) @@ -2870,57 +3048,37 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x1D: /* HW_MTPR (PALcode) */ -#if defined (CONFIG_USER_ONLY) - goto invalid_opc; -#else - if (!ctx->pal_mode) - goto invalid_opc; - else { - TCGv tmp1 = tcg_const_i32(insn & 0xFF); - if (ra != 31) - gen_helper_mtpr(tmp1, cpu_ir[ra]); - else { - TCGv tmp2 = tcg_const_i64(0); - gen_helper_mtpr(tmp1, tmp2); - tcg_temp_free(tmp2); - } - tcg_temp_free(tmp1); - ret = EXIT_PC_STALE; +#ifndef CONFIG_USER_ONLY + if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { + gen_mtpr(rb, insn & 0xffff); + break; } - break; #endif - case 0x1E: - /* HW_REI (PALcode) */ -#if defined (CONFIG_USER_ONLY) goto invalid_opc; -#else - if (!ctx->pal_mode) - goto invalid_opc; - if (rb == 31) { - /* "Old" alpha */ - gen_helper_hw_rei(); - } else { - TCGv tmp; - - if (ra != 31) { - tmp = tcg_temp_new(); - tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51)); - } else - tmp = tcg_const_i64(((int64_t)insn << 51) >> 51); - gen_helper_hw_ret(tmp); - tcg_temp_free(tmp); + case 0x1E: + /* HW_RET (PALcode) */ +#ifndef CONFIG_USER_ONLY + if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { + if (rb == 31) { + /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return + address from EXC_ADDR. This turns out to be useful for our + emulation PALcode, so continue to accept it. */ + TCGv tmp = tcg_temp_new(); + tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr)); + gen_helper_hw_ret(tmp); + tcg_temp_free(tmp); + } else { + gen_helper_hw_ret(cpu_ir[rb]); + } + ret = EXIT_PC_UPDATED; + break; } - ret = EXIT_PC_UPDATED; - break; #endif + goto invalid_opc; case 0x1F: /* HW_ST (PALcode) */ -#if defined (CONFIG_USER_ONLY) - goto invalid_opc; -#else - if (!ctx->pal_mode) - goto invalid_opc; - else { +#ifndef CONFIG_USER_ONLY + if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { TCGv addr, val; addr = tcg_temp_new(); if (rb != 31) @@ -2936,30 +3094,26 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ - gen_helper_stl_raw(val, addr); + gen_helper_stl_phys(addr, val); break; case 0x1: /* Quadword physical access */ - gen_helper_stq_raw(val, addr); + gen_helper_stq_phys(addr, val); break; case 0x2: /* Longword physical access with lock */ - gen_helper_stl_c_raw(val, val, addr); + gen_helper_stl_c_phys(val, addr, val); break; case 0x3: /* Quadword physical access with lock */ - gen_helper_stq_c_raw(val, val, addr); + gen_helper_stq_c_phys(val, addr, val); break; case 0x4: /* Longword virtual access */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_raw(val, addr); - break; + goto invalid_opc; case 0x5: /* Quadword virtual access */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stq_raw(val, addr); - break; + goto invalid_opc; case 0x6: /* Invalid */ goto invalid_opc; @@ -2980,18 +3134,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0xC: /* Longword virtual access with alternate access mode */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_raw(val, addr); - gen_helper_restore_mode(); - break; + goto invalid_opc; case 0xD: /* Quadword virtual access with alternate access mode */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_raw(val, addr); - gen_helper_restore_mode(); - break; + goto invalid_opc; case 0xE: /* Invalid */ goto invalid_opc; @@ -3002,9 +3148,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) if (ra == 31) tcg_temp_free(val); tcg_temp_free(addr); + break; } - break; #endif + goto invalid_opc; case 0x20: /* LDF */ gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); @@ -3155,13 +3302,7 @@ static inline void gen_intermediate_code_internal(CPUState *env, ctx.tb = tb; ctx.env = env; ctx.pc = pc_start; - ctx.amask = env->amask; -#if defined (CONFIG_USER_ONLY) - ctx.mem_idx = 0; -#else - ctx.mem_idx = ((env->ps >> 3) & 3); - ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1; -#endif + ctx.mem_idx = cpu_mmu_index(env); /* ??? Every TB begins with unset rounding mode, to be initialized on the first fp insn of the TB. Alternately we could define a proper @@ -3211,18 +3352,15 @@ static inline void gen_intermediate_code_internal(CPUState *env, ctx.pc += 4; ret = translate_one(ctxp, insn); - if (ret == NO_EXIT) { - /* If we reach a page boundary, are single stepping, - or exhaust instruction count, stop generation. */ - if (env->singlestep_enabled) { - gen_excp(&ctx, EXCP_DEBUG, 0); - ret = EXIT_PC_UPDATED; - } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0 - || gen_opc_ptr >= gen_opc_end - || num_insns >= max_insns - || singlestep) { - ret = EXIT_PC_STALE; - } + /* If we reach a page boundary, are single stepping, + or exhaust instruction count, stop generation. */ + if (ret == NO_EXIT + && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0 + || gen_opc_ptr >= gen_opc_end + || num_insns >= max_insns + || singlestep + || env->singlestep_enabled)) { + ret = EXIT_PC_STALE; } } while (ret == NO_EXIT); @@ -3238,7 +3376,11 @@ static inline void gen_intermediate_code_internal(CPUState *env, tcg_gen_movi_i64(cpu_pc, ctx.pc); /* FALLTHRU */ case EXIT_PC_UPDATED: - tcg_gen_exit_tb(0); + if (env->singlestep_enabled) { + gen_excp_1(EXCP_DEBUG, 0); + } else { + tcg_gen_exit_tb(0); + } break; default: abort(); @@ -3325,43 +3467,13 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model) env->implver = implver; env->amask = amask; - env->ps = 0x1F00; #if defined (CONFIG_USER_ONLY) - env->ps |= 1 << 3; + env->ps = PS_USER_MODE; cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD | FPCR_UNFD | FPCR_INED | FPCR_DNOD)); -#else - pal_init(env); #endif env->lock_addr = -1; - - /* Initialize IPR */ -#if defined (CONFIG_USER_ONLY) - env->ipr[IPR_EXC_ADDR] = 0; - env->ipr[IPR_EXC_SUM] = 0; - env->ipr[IPR_EXC_MASK] = 0; -#else - { - // uint64_t hwpcb; - // hwpcb = env->ipr[IPR_PCBB]; - env->ipr[IPR_ASN] = 0; - env->ipr[IPR_ASTEN] = 0; - env->ipr[IPR_ASTSR] = 0; - env->ipr[IPR_DATFX] = 0; - /* XXX: fix this */ - // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8); - // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0); - // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16); - // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24); - env->ipr[IPR_FEN] = 0; - env->ipr[IPR_IPL] = 31; - env->ipr[IPR_MCES] = 0; - env->ipr[IPR_PERFMON] = 0; /* Implementation specific */ - // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32); - env->ipr[IPR_SISR] = 0; - env->ipr[IPR_VIRBND] = -1ULL; - } -#endif + env->fen = 1; qemu_init_vcpu(env); return env; diff --git a/target-arm/cpu.h b/target-arm/cpu.h index d5af64465f..01f5b57fbc 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -55,6 +55,10 @@ #define ARMV7M_EXCP_PENDSV 14 #define ARMV7M_EXCP_SYSTICK 15 +/* ARM-specific interrupt pending bits. */ +#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 + + typedef void ARMWriteCPFunc(void *opaque, int cp_info, int srcreg, int operand, uint32_t value); typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, diff --git a/target-arm/exec.h b/target-arm/exec.h index 44e1b55aa2..db6608ec8b 100644 --- a/target-arm/exec.h +++ b/target-arm/exec.h @@ -21,8 +21,6 @@ register struct CPUARMState *env asm(AREG0); -#define M0 env->iwmmxt.val - #include "cpu.h" #include "exec-all.h" diff --git a/target-arm/helper.c b/target-arm/helper.c index 62ae72ec27..12084167d6 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -848,6 +848,7 @@ void do_interrupt(CPUARMState *env) return; } } + env->cp15.c5_insn = 2; /* Fall through to prefetch abort. */ case EXCP_PREFETCH_ABORT: new_mode = ARM_CPU_MODE_ABT; @@ -2355,7 +2356,7 @@ static inline int vfp_exceptbits_from_host(int host_bits) target_bits |= 2; if (host_bits & float_flag_overflow) target_bits |= 4; - if (host_bits & float_flag_underflow) + if (host_bits & (float_flag_underflow | float_flag_output_denormal)) target_bits |= 8; if (host_bits & float_flag_inexact) target_bits |= 0x10; @@ -2526,99 +2527,39 @@ DO_VFP_cmp(s, float32) DO_VFP_cmp(d, float64) #undef DO_VFP_cmp -/* Integer to float conversion. */ -float32 VFP_HELPER(uito, s)(uint32_t x, CPUState *env) -{ - return uint32_to_float32(x, &env->vfp.fp_status); -} - -float64 VFP_HELPER(uito, d)(uint32_t x, CPUState *env) -{ - return uint32_to_float64(x, &env->vfp.fp_status); -} - -float32 VFP_HELPER(sito, s)(uint32_t x, CPUState *env) -{ - return int32_to_float32(x, &env->vfp.fp_status); -} +/* Integer to float and float to integer conversions */ -float64 VFP_HELPER(sito, d)(uint32_t x, CPUState *env) -{ - return int32_to_float64(x, &env->vfp.fp_status); -} - -/* Float to integer conversion. */ -uint32_t VFP_HELPER(toui, s)(float32 x, CPUState *env) -{ - if (float32_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float32_to_uint32(x, &env->vfp.fp_status); -} - -uint32_t VFP_HELPER(toui, d)(float64 x, CPUState *env) -{ - if (float64_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float64_to_uint32(x, &env->vfp.fp_status); -} - -uint32_t VFP_HELPER(tosi, s)(float32 x, CPUState *env) -{ - if (float32_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float32_to_int32(x, &env->vfp.fp_status); -} - -uint32_t VFP_HELPER(tosi, d)(float64 x, CPUState *env) -{ - if (float64_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float64_to_int32(x, &env->vfp.fp_status); +#define CONV_ITOF(name, fsz, sign) \ + float##fsz HELPER(name)(uint32_t x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return sign##int32_to_##float##fsz(x, fpst); \ } -uint32_t VFP_HELPER(touiz, s)(float32 x, CPUState *env) -{ - if (float32_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float32_to_uint32_round_to_zero(x, &env->vfp.fp_status); +#define CONV_FTOI(name, fsz, sign, round) \ +uint32_t HELPER(name)(float##fsz x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##sign##int32##round(x, fpst); \ } -uint32_t VFP_HELPER(touiz, d)(float64 x, CPUState *env) -{ - if (float64_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float64_to_uint32_round_to_zero(x, &env->vfp.fp_status); -} +#define FLOAT_CONVS(name, p, fsz, sign) \ +CONV_ITOF(vfp_##name##to##p, fsz, sign) \ +CONV_FTOI(vfp_to##name##p, fsz, sign, ) \ +CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero) -uint32_t VFP_HELPER(tosiz, s)(float32 x, CPUState *env) -{ - if (float32_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float32_to_int32_round_to_zero(x, &env->vfp.fp_status); -} +FLOAT_CONVS(si, s, 32, ) +FLOAT_CONVS(si, d, 64, ) +FLOAT_CONVS(ui, s, 32, u) +FLOAT_CONVS(ui, d, 64, u) -uint32_t VFP_HELPER(tosiz, d)(float64 x, CPUState *env) -{ - if (float64_is_any_nan(x)) { - float_raise(float_flag_invalid, &env->vfp.fp_status); - return 0; - } - return float64_to_int32_round_to_zero(x, &env->vfp.fp_status); -} +#undef CONV_ITOF +#undef CONV_FTOI +#undef FLOAT_CONVS /* floating point conversion */ float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env) @@ -2641,23 +2582,25 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env) /* VFP3 fixed point conversion. */ #define VFP_CONV_FIX(name, p, fsz, itype, sign) \ -float##fsz VFP_HELPER(name##to, p)(uint##fsz##_t x, uint32_t shift, \ - CPUState *env) \ +float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \ + void *fpstp) \ { \ + float_status *fpst = fpstp; \ float##fsz tmp; \ - tmp = sign##int32_to_##float##fsz ((itype##_t)x, &env->vfp.fp_status); \ - return float##fsz##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \ + tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \ + return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ } \ -uint##fsz##_t VFP_HELPER(to##name, p)(float##fsz x, uint32_t shift, \ - CPUState *env) \ +uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \ + void *fpstp) \ { \ + float_status *fpst = fpstp; \ float##fsz tmp; \ if (float##fsz##_is_any_nan(x)) { \ - float_raise(float_flag_invalid, &env->vfp.fp_status); \ + float_raise(float_flag_invalid, fpst); \ return 0; \ } \ - tmp = float##fsz##_scalbn(x, shift, &env->vfp.fp_status); \ - return float##fsz##_to_##itype##_round_to_zero(tmp, &env->vfp.fp_status); \ + tmp = float##fsz##_scalbn(x, shift, fpst); \ + return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \ } VFP_CONV_FIX(sh, d, 64, int16, ) @@ -2720,6 +2663,9 @@ float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env) float_status *s = &env->vfp.standard_fp_status; if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } return float32_two; } return float32_sub(float32_two, float32_mul(a, b, s), s); @@ -2731,6 +2677,9 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env) float32 product; if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } return float32_one_point_five; } product = float32_mul(a, b, s); @@ -2749,7 +2698,11 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env) */ static float64 recip_estimate(float64 a, CPUState *env) { - float_status *s = &env->vfp.standard_fp_status; + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = env->vfp.standard_fp_status; + float_status *s = &dummy_status; /* q = (int)(a * 512.0) */ float64 q = float64_mul(float64_512, a, s); int64_t q_int = float64_to_int64_round_to_zero(q, s); @@ -2787,6 +2740,9 @@ float32 HELPER(recpe_f32)(float32 a, CPUState *env) } else if (float32_is_infinity(a)) { return float32_set_sign(float32_zero, float32_is_neg(a)); } else if (float32_is_zero_or_denormal(a)) { + if (!float32_is_zero(a)) { + float_raise(float_flag_input_denormal, s); + } float_raise(float_flag_divbyzero, s); return float32_set_sign(float32_infinity, float32_is_neg(a)); } else if (a_exp >= 253) { @@ -2812,7 +2768,11 @@ float32 HELPER(recpe_f32)(float32 a, CPUState *env) */ static float64 recip_sqrt_estimate(float64 a, CPUState *env) { - float_status *s = &env->vfp.standard_fp_status; + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = env->vfp.standard_fp_status; + float_status *s = &dummy_status; float64 q; int64_t q_int; @@ -2874,6 +2834,9 @@ float32 HELPER(rsqrte_f32)(float32 a, CPUState *env) } return float32_default_nan; } else if (float32_is_zero_or_denormal(a)) { + if (!float32_is_zero(a)) { + float_raise(float_flag_input_denormal, s); + } float_raise(float_flag_divbyzero, s); return float32_set_sign(float32_infinity, float32_is_neg(a)); } else if (float32_is_neg(a)) { diff --git a/target-arm/helper.h b/target-arm/helper.h index ae701e8451..7d5533f613 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -96,36 +96,36 @@ DEF_HELPER_3(vfp_cmped, void, f64, f64, env) DEF_HELPER_2(vfp_fcvtds, f64, f32, env) DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) -DEF_HELPER_2(vfp_uitos, f32, i32, env) -DEF_HELPER_2(vfp_uitod, f64, i32, env) -DEF_HELPER_2(vfp_sitos, f32, i32, env) -DEF_HELPER_2(vfp_sitod, f64, i32, env) - -DEF_HELPER_2(vfp_touis, i32, f32, env) -DEF_HELPER_2(vfp_touid, i32, f64, env) -DEF_HELPER_2(vfp_touizs, i32, f32, env) -DEF_HELPER_2(vfp_touizd, i32, f64, env) -DEF_HELPER_2(vfp_tosis, i32, f32, env) -DEF_HELPER_2(vfp_tosid, i32, f64, env) -DEF_HELPER_2(vfp_tosizs, i32, f32, env) -DEF_HELPER_2(vfp_tosizd, i32, f64, env) - -DEF_HELPER_3(vfp_toshs, i32, f32, i32, env) -DEF_HELPER_3(vfp_tosls, i32, f32, i32, env) -DEF_HELPER_3(vfp_touhs, i32, f32, i32, env) -DEF_HELPER_3(vfp_touls, i32, f32, i32, env) -DEF_HELPER_3(vfp_toshd, i64, f64, i32, env) -DEF_HELPER_3(vfp_tosld, i64, f64, i32, env) -DEF_HELPER_3(vfp_touhd, i64, f64, i32, env) -DEF_HELPER_3(vfp_tould, i64, f64, i32, env) -DEF_HELPER_3(vfp_shtos, f32, i32, i32, env) -DEF_HELPER_3(vfp_sltos, f32, i32, i32, env) -DEF_HELPER_3(vfp_uhtos, f32, i32, i32, env) -DEF_HELPER_3(vfp_ultos, f32, i32, i32, env) -DEF_HELPER_3(vfp_shtod, f64, i64, i32, env) -DEF_HELPER_3(vfp_sltod, f64, i64, i32, env) -DEF_HELPER_3(vfp_uhtod, f64, i64, i32, env) -DEF_HELPER_3(vfp_ultod, f64, i64, i32, env) +DEF_HELPER_2(vfp_uitos, f32, i32, ptr) +DEF_HELPER_2(vfp_uitod, f64, i32, ptr) +DEF_HELPER_2(vfp_sitos, f32, i32, ptr) +DEF_HELPER_2(vfp_sitod, f64, i32, ptr) + +DEF_HELPER_2(vfp_touis, i32, f32, ptr) +DEF_HELPER_2(vfp_touid, i32, f64, ptr) +DEF_HELPER_2(vfp_touizs, i32, f32, ptr) +DEF_HELPER_2(vfp_touizd, i32, f64, ptr) +DEF_HELPER_2(vfp_tosis, i32, f32, ptr) +DEF_HELPER_2(vfp_tosid, i32, f64, ptr) +DEF_HELPER_2(vfp_tosizs, i32, f32, ptr) +DEF_HELPER_2(vfp_tosizd, i32, f64, ptr) + +DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c index f5b173aa71..9165519236 100644 --- a/target-arm/neon_helper.c +++ b/target-arm/neon_helper.c @@ -1802,41 +1802,37 @@ uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b) return float32_val(float32_mul(make_float32(a), make_float32(b), NFS)); } -/* Floating point comparisons produce an integer result. */ -#define NEON_VOP_FCMP(name, ok) \ -uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \ -{ \ - switch (float32_compare_quiet(make_float32(a), make_float32(b), NFS)) { \ - ok return ~0; \ - default: return 0; \ - } \ +/* Floating point comparisons produce an integer result. + * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. + * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. + */ +uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b) +{ + return -float32_eq_quiet(make_float32(a), make_float32(b), NFS); +} + +uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b) +{ + return -float32_le(make_float32(b), make_float32(a), NFS); } -NEON_VOP_FCMP(ceq_f32, case float_relation_equal:) -NEON_VOP_FCMP(cge_f32, case float_relation_equal: case float_relation_greater:) -NEON_VOP_FCMP(cgt_f32, case float_relation_greater:) +uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b) +{ + return -float32_lt(make_float32(b), make_float32(a), NFS); +} uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b) { float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); - switch (float32_compare_quiet(f0, f1, NFS)) { - case float_relation_equal: - case float_relation_greater: - return ~0; - default: - return 0; - } + return -float32_le(f1, f0, NFS); } uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b) { float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); - if (float32_compare_quiet(f0, f1, NFS) == float_relation_greater) { - return ~0; - } - return 0; + return -float32_lt(f1, f0, NFS); } #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) diff --git a/target-arm/translate.c b/target-arm/translate.c index a1af436e34..f5507ec3b6 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -909,6 +909,26 @@ VFP_OP2(div) #undef VFP_OP2 +static inline void gen_vfp_F1_mul(int dp) +{ + /* Like gen_vfp_mul() but put result in F1 */ + if (dp) { + gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, cpu_env); + } else { + gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, cpu_env); + } +} + +static inline void gen_vfp_F1_neg(int dp) +{ + /* Like gen_vfp_neg() but put result in F1 */ + if (dp) { + gen_helper_vfp_negd(cpu_F1d, cpu_F0d); + } else { + gen_helper_vfp_negs(cpu_F1s, cpu_F0s); + } +} + static inline void gen_vfp_abs(int dp) { if (dp) @@ -957,63 +977,73 @@ static inline void gen_vfp_F1_ld0(int dp) tcg_gen_movi_i32(cpu_F1s, 0); } -static inline void gen_vfp_uito(int dp) -{ - if (dp) - gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env); - else - gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env); -} - -static inline void gen_vfp_sito(int dp) -{ - if (dp) - gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env); - else - gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env); +#define VFP_GEN_ITOF(name) \ +static inline void gen_vfp_##name(int dp, int neon) \ +{ \ + TCGv_ptr statusptr = tcg_temp_new_ptr(); \ + int offset; \ + if (neon) { \ + offset = offsetof(CPUState, vfp.standard_fp_status); \ + } else { \ + offset = offsetof(CPUState, vfp.fp_status); \ + } \ + tcg_gen_addi_ptr(statusptr, cpu_env, offset); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ } -static inline void gen_vfp_toui(int dp) -{ - if (dp) - gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env); - else - gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env); -} +VFP_GEN_ITOF(uito) +VFP_GEN_ITOF(sito) +#undef VFP_GEN_ITOF -static inline void gen_vfp_touiz(int dp) -{ - if (dp) - gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env); - else - gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env); -} - -static inline void gen_vfp_tosi(int dp) -{ - if (dp) - gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env); - else - gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env); +#define VFP_GEN_FTOI(name) \ +static inline void gen_vfp_##name(int dp, int neon) \ +{ \ + TCGv_ptr statusptr = tcg_temp_new_ptr(); \ + int offset; \ + if (neon) { \ + offset = offsetof(CPUState, vfp.standard_fp_status); \ + } else { \ + offset = offsetof(CPUState, vfp.fp_status); \ + } \ + tcg_gen_addi_ptr(statusptr, cpu_env, offset); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(statusptr); \ } -static inline void gen_vfp_tosiz(int dp) -{ - if (dp) - gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env); - else - gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env); -} +VFP_GEN_FTOI(toui) +VFP_GEN_FTOI(touiz) +VFP_GEN_FTOI(tosi) +VFP_GEN_FTOI(tosiz) +#undef VFP_GEN_FTOI #define VFP_GEN_FIX(name) \ -static inline void gen_vfp_##name(int dp, int shift) \ +static inline void gen_vfp_##name(int dp, int shift, int neon) \ { \ TCGv tmp_shift = tcg_const_i32(shift); \ - if (dp) \ - gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\ - else \ - gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\ + TCGv_ptr statusptr = tcg_temp_new_ptr(); \ + int offset; \ + if (neon) { \ + offset = offsetof(CPUState, vfp.standard_fp_status); \ + } else { \ + offset = offsetof(CPUState, vfp.fp_status); \ + } \ + tcg_gen_addi_ptr(statusptr, cpu_env, offset); \ + if (dp) { \ + gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \ + } \ tcg_temp_free_i32(tmp_shift); \ + tcg_temp_free_ptr(statusptr); \ } VFP_GEN_FIX(tosh) VFP_GEN_FIX(tosl) @@ -1331,7 +1361,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) return 0; } -/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured +/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) { @@ -2335,7 +2365,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) return 0; } -/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured +/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn) { @@ -2681,7 +2711,7 @@ static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size) return tmp; } -/* Disassemble a VFP instruction. Returns nonzero if an error occured +/* Disassemble a VFP instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) { @@ -3021,27 +3051,34 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) for (;;) { /* Perform the calculation. */ switch (op) { - case 0: /* mac: fd + (fn * fm) */ - gen_vfp_mul(dp); - gen_mov_F1_vreg(dp, rd); + case 0: /* VMLA: fd + (fn * fm) */ + /* Note that order of inputs to the add matters for NaNs */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); gen_vfp_add(dp); break; - case 1: /* nmac: fd - (fn * fm) */ + case 1: /* VMLS: fd + -(fn * fm) */ gen_vfp_mul(dp); - gen_vfp_neg(dp); - gen_mov_F1_vreg(dp, rd); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); gen_vfp_add(dp); break; - case 2: /* msc: -fd + (fn * fm) */ - gen_vfp_mul(dp); - gen_mov_F1_vreg(dp, rd); - gen_vfp_sub(dp); + case 2: /* VNMLS: -fd + (fn * fm) */ + /* Note that it isn't valid to replace (-A + B) with (B - A) + * or similar plausible looking simplifications + * because this will give wrong results for NaNs. + */ + gen_vfp_F1_mul(dp); + gen_mov_F0_vreg(dp, rd); + gen_vfp_neg(dp); + gen_vfp_add(dp); break; - case 3: /* nmsc: -fd - (fn * fm) */ + case 3: /* VNMLA: -fd + -(fn * fm) */ gen_vfp_mul(dp); + gen_vfp_F1_neg(dp); + gen_mov_F0_vreg(dp, rd); gen_vfp_neg(dp); - gen_mov_F1_vreg(dp, rd); - gen_vfp_sub(dp); + gen_vfp_add(dp); break; case 4: /* mul: fn * fm */ gen_vfp_mul(dp); @@ -3156,62 +3193,62 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env); break; case 16: /* fuito */ - gen_vfp_uito(dp); + gen_vfp_uito(dp, 0); break; case 17: /* fsito */ - gen_vfp_sito(dp); + gen_vfp_sito(dp, 0); break; case 20: /* fshto */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_shto(dp, 16 - rm); + gen_vfp_shto(dp, 16 - rm, 0); break; case 21: /* fslto */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_slto(dp, 32 - rm); + gen_vfp_slto(dp, 32 - rm, 0); break; case 22: /* fuhto */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_uhto(dp, 16 - rm); + gen_vfp_uhto(dp, 16 - rm, 0); break; case 23: /* fulto */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_ulto(dp, 32 - rm); + gen_vfp_ulto(dp, 32 - rm, 0); break; case 24: /* ftoui */ - gen_vfp_toui(dp); + gen_vfp_toui(dp, 0); break; case 25: /* ftouiz */ - gen_vfp_touiz(dp); + gen_vfp_touiz(dp, 0); break; case 26: /* ftosi */ - gen_vfp_tosi(dp); + gen_vfp_tosi(dp, 0); break; case 27: /* ftosiz */ - gen_vfp_tosiz(dp); + gen_vfp_tosiz(dp, 0); break; case 28: /* ftosh */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_tosh(dp, 16 - rm); + gen_vfp_tosh(dp, 16 - rm, 0); break; case 29: /* ftosl */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_tosl(dp, 32 - rm); + gen_vfp_tosl(dp, 32 - rm, 0); break; case 30: /* ftouh */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_touh(dp, 16 - rm); + gen_vfp_touh(dp, 16 - rm, 0); break; case 31: /* ftoul */ if (!arm_feature(env, ARM_FEATURE_VFP3)) return 1; - gen_vfp_toul(dp, 32 - rm); + gen_vfp_toul(dp, 32 - rm, 0); break; default: /* undefined */ printf ("rn:%d\n", rn); @@ -5224,14 +5261,14 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); if (!(op & 1)) { if (u) - gen_vfp_ulto(0, shift); + gen_vfp_ulto(0, shift, 1); else - gen_vfp_slto(0, shift); + gen_vfp_slto(0, shift, 1); } else { if (u) - gen_vfp_toul(0, shift); + gen_vfp_toul(0, shift, 1); else - gen_vfp_tosl(0, shift); + gen_vfp_tosl(0, shift, 1); } tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); } @@ -6044,16 +6081,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env); break; case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ - gen_vfp_sito(0); + gen_vfp_sito(0, 1); break; case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ - gen_vfp_uito(0); + gen_vfp_uito(0, 1); break; case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ - gen_vfp_tosiz(0); + gen_vfp_tosiz(0, 1); break; case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ - gen_vfp_touiz(0); + gen_vfp_touiz(0, 1); break; default: /* Reserved op values were caught by the @@ -7348,7 +7385,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } else if ((insn & 0x000003e0) == 0x00000060) { tmp = load_reg(s, rm); shift = (insn >> 10) & 3; - /* ??? In many cases it's not neccessary to do a + /* ??? In many cases it's not necessary to do a rotate, a shift is sufficient. */ if (shift != 0) tcg_gen_rotri_i32(tmp, tmp, shift * 8); @@ -8139,7 +8176,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) case 1: /* Sign/zero extend. */ tmp = load_reg(s, rm); shift = (insn >> 4) & 3; - /* ??? In many cases it's not neccessary to do a + /* ??? In many cases it's not necessary to do a rotate, a shift is sufficient. */ if (shift != 0) tcg_gen_rotri_i32(tmp, tmp, shift * 8); diff --git a/target-cris/cpu.h b/target-cris/cpu.h index d9087759d3..2bc35e4975 100644 --- a/target-cris/cpu.h +++ b/target-cris/cpu.h @@ -36,6 +36,9 @@ #define EXCP_IRQ 4 #define EXCP_BREAK 5 +/* CRIS-specific interrupt pending bits. */ +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 + /* Register aliases. R0 - R15 */ #define R_FP 8 #define R_SP 14 @@ -101,7 +104,7 @@ typedef struct CPUCRISState { /* P0 - P15 are referred to as special registers in the docs. */ uint32_t pregs[16]; - /* Pseudo register for the PC. Not directly accessable on CRIS. */ + /* Pseudo register for the PC. Not directly accessible on CRIS. */ uint32_t pc; /* Pseudo register for the kernel stack. */ diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c index 41db158965..5b14157d1a 100644 --- a/target-cris/translate_v10.c +++ b/target-cris/translate_v10.c @@ -956,7 +956,7 @@ static int dec10_bdap_m(DisasContext *dc, int size) return insn_len; } #endif - /* Now the rest of the modes are truely indirect. */ + /* Now the rest of the modes are truly indirect. */ insn_len += dec10_prep_move_m(dc, 1, size, cpu_PR[PR_PREFIX]); tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]); cris_set_prefix(dc); diff --git a/target-i386/cpu.h b/target-i386/cpu.h index c7047d5912..fe658862c2 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -466,6 +466,15 @@ #define EXCP_SYSCALL 0x100 /* only happens in user only emulation for syscall instruction */ +/* i386-specific interrupt pending bits. */ +#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 +#define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1 +#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2 + + enum { CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ @@ -523,16 +532,6 @@ enum { CC_OP_NB, }; -#ifdef FLOATX80 -#define USE_X86LDOUBLE -#endif - -#ifdef USE_X86LDOUBLE -typedef floatx80 CPU86_LDouble; -#else -typedef float64 CPU86_LDouble; -#endif - typedef struct SegmentCache { uint32_t selector; target_ulong base; @@ -585,11 +584,7 @@ typedef union { #define MMX_Q(n) q typedef union { -#ifdef USE_X86LDOUBLE - CPU86_LDouble d __attribute__((aligned(16))); -#else - CPU86_LDouble d; -#endif + floatx80 d __attribute__((aligned(16))); MMXReg mmx; } FPReg; @@ -645,7 +640,7 @@ typedef struct CPUX86State { /* emulator internal variables */ float_status fp_status; - CPU86_LDouble ft0; + floatx80 ft0; float_status mmx_status; /* for 3DNow! float ops */ float_status sse_status; @@ -856,8 +851,8 @@ static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) /* op_helper.c */ /* used for debug or cpu save/restore */ -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f); -CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper); +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); /* cpu-exec.c */ /* the following helpers are only usable in user mode simulation as diff --git a/target-i386/exec.h b/target-i386/exec.h index ee36a7181a..9bd080e3a8 100644 --- a/target-i386/exec.h +++ b/target-i386/exec.h @@ -98,67 +98,6 @@ static inline void svm_check_intercept(uint32_t type) #endif /* !defined(CONFIG_USER_ONLY) */ -#ifdef USE_X86LDOUBLE -/* use long double functions */ -#define floatx_to_int32 floatx80_to_int32 -#define floatx_to_int64 floatx80_to_int64 -#define floatx_to_int32_round_to_zero floatx80_to_int32_round_to_zero -#define floatx_to_int64_round_to_zero floatx80_to_int64_round_to_zero -#define int32_to_floatx int32_to_floatx80 -#define int64_to_floatx int64_to_floatx80 -#define float32_to_floatx float32_to_floatx80 -#define float64_to_floatx float64_to_floatx80 -#define floatx_to_float32 floatx80_to_float32 -#define floatx_to_float64 floatx80_to_float64 -#define floatx_add floatx80_add -#define floatx_div floatx80_div -#define floatx_mul floatx80_mul -#define floatx_sub floatx80_sub -#define floatx_sqrt floatx80_sqrt -#define floatx_abs floatx80_abs -#define floatx_chs floatx80_chs -#define floatx_scalbn floatx80_scalbn -#define floatx_round_to_int floatx80_round_to_int -#define floatx_compare floatx80_compare -#define floatx_compare_quiet floatx80_compare_quiet -#define floatx_is_any_nan floatx80_is_any_nan -#define floatx_is_neg floatx80_is_neg -#define floatx_is_zero floatx80_is_zero -#define floatx_zero floatx80_zero -#define floatx_one floatx80_one -#define floatx_ln2 floatx80_ln2 -#define floatx_pi floatx80_pi -#else -#define floatx_to_int32 float64_to_int32 -#define floatx_to_int64 float64_to_int64 -#define floatx_to_int32_round_to_zero float64_to_int32_round_to_zero -#define floatx_to_int64_round_to_zero float64_to_int64_round_to_zero -#define int32_to_floatx int32_to_float64 -#define int64_to_floatx int64_to_float64 -#define float32_to_floatx float32_to_float64 -#define float64_to_floatx(x, e) (x) -#define floatx_to_float32 float64_to_float32 -#define floatx_to_float64(x, e) (x) -#define floatx_add float64_add -#define floatx_div float64_div -#define floatx_mul float64_mul -#define floatx_sub float64_sub -#define floatx_sqrt float64_sqrt -#define floatx_abs float64_abs -#define floatx_chs float64_chs -#define floatx_scalbn float64_scalbn -#define floatx_round_to_int float64_round_to_int -#define floatx_compare float64_compare -#define floatx_compare_quiet float64_compare_quiet -#define floatx_is_any_nan float64_is_any_nan -#define floatx_is_neg float64_is_neg -#define floatx_is_zero float64_is_zero -#define floatx_zero float64_zero -#define floatx_one float64_one -#define floatx_ln2 float64_ln2 -#define floatx_pi float64_pi -#endif - #define RC_MASK 0xc00 #define RC_NEAR 0x000 #define RC_DOWN 0x400 @@ -167,11 +106,6 @@ static inline void svm_check_intercept(uint32_t type) #define MAXTAN 9223372036854775808.0 -#ifdef USE_X86LDOUBLE - -/* only for x86 */ -typedef CPU_LDoubleU CPU86_LDoubleU; - /* the following deal with x86 long double-precision numbers */ #define MAXEXPD 0x7fff #define EXPBIAS 16383 @@ -180,23 +114,6 @@ typedef CPU_LDoubleU CPU86_LDoubleU; #define MANTD(fp) (fp.l.lower) #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS -#else - -typedef CPU_DoubleU CPU86_LDoubleU; - -/* the following deal with IEEE double-precision numbers */ -#define MAXEXPD 0x7ff -#define EXPBIAS 1023 -#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) -#define SIGND(fp) ((fp.l.upper) & 0x80000000) -#ifdef __arm__ -#define MANTD(fp) (fp.l.lower | ((uint64_t)(fp.l.upper & ((1 << 20) - 1)) << 32)) -#else -#define MANTD(fp) (fp.ll & ((1LL << 52) - 1)) -#endif -#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7ff << 20)) | (EXPBIAS << 20) -#endif - static inline void fpush(void) { env->fpstt = (env->fpstt - 1) & 7; @@ -209,65 +126,24 @@ static inline void fpop(void) env->fpstt = (env->fpstt + 1) & 7; } -#ifndef USE_X86LDOUBLE -static inline CPU86_LDouble helper_fldt(target_ulong ptr) -{ - CPU86_LDoubleU temp; - int upper, e; - uint64_t ll; - - /* mantissa */ - upper = lduw(ptr + 8); - /* XXX: handle overflow ? */ - e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */ - e |= (upper >> 4) & 0x800; /* sign */ - ll = (ldq(ptr) >> 11) & ((1LL << 52) - 1); -#ifdef __arm__ - temp.l.upper = (e << 20) | (ll >> 32); - temp.l.lower = ll; -#else - temp.ll = ll | ((uint64_t)e << 52); -#endif - return temp.d; -} - -static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) +static inline floatx80 helper_fldt(target_ulong ptr) { - CPU86_LDoubleU temp; - int e; - - temp.d = f; - /* mantissa */ - stq(ptr, (MANTD(temp) << 11) | (1LL << 63)); - /* exponent + sign */ - e = EXPD(temp) - EXPBIAS + 16383; - e |= SIGND(temp) >> 16; - stw(ptr + 8, e); -} -#else - -/* we use memory access macros */ - -static inline CPU86_LDouble helper_fldt(target_ulong ptr) -{ - CPU86_LDoubleU temp; + CPU_LDoubleU temp; temp.l.lower = ldq(ptr); temp.l.upper = lduw(ptr + 8); return temp.d; } -static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) +static inline void helper_fstt(floatx80 f, target_ulong ptr) { - CPU86_LDoubleU temp; + CPU_LDoubleU temp; temp.d = f; stq(ptr, temp.l.lower); stw(ptr + 8, temp.l.upper); } -#endif /* USE_X86LDOUBLE */ - #define FPUS_IE (1 << 0) #define FPUS_DE (1 << 1) #define FPUS_ZE (1 << 2) diff --git a/target-i386/helper.c b/target-i386/helper.c index 89df997436..509d68ca0f 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -21,7 +21,6 @@ #include <stdio.h> #include <string.h> #include <inttypes.h> -#include <signal.h> #include "cpu.h" #include "exec-all.h" @@ -403,15 +402,10 @@ void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, fptag, env->mxcsr); for(i=0;i<8;i++) { -#if defined(USE_X86LDOUBLE) CPU_LDoubleU u; u.d = env->fpregs[i].d; cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", i, u.l.lower, u.l.upper); -#else - cpu_fprintf(f, "FPR%d=%016" PRIx64, - i, env->fpregs[i].mmx.q); -#endif if ((i & 1) == 1) cpu_fprintf(f, "\n"); else diff --git a/target-i386/machine.c b/target-i386/machine.c index d78eceb779..bbeae8852c 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -84,7 +84,6 @@ static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size) exit(0); } -#ifdef USE_X86LDOUBLE /* XXX: add that in a FPU generic layer */ union x86_longdouble { uint64_t mant; @@ -202,102 +201,6 @@ static bool fpregs_is_1_no_mmx(void *opaque, int version_id) VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \ VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg) -#else -static int get_fpreg(QEMUFile *f, void *opaque, size_t size) -{ - FPReg *fp_reg = opaque; - - qemu_get_be64s(f, &fp_reg->mmx.MMX_Q(0)); - return 0; -} - -static void put_fpreg(QEMUFile *f, void *opaque, size_t size) -{ - FPReg *fp_reg = opaque; - /* if we use doubles for float emulation, we save the doubles to - avoid losing information in case of MMX usage. It can give - problems if the image is restored on a CPU where long - doubles are used instead. */ - qemu_put_be64s(f, &fp_reg->mmx.MMX_Q(0)); -} - -const VMStateInfo vmstate_fpreg = { - .name = "fpreg", - .get = get_fpreg, - .put = put_fpreg, -}; - -static int get_fpreg_0_mmx(QEMUFile *f, void *opaque, size_t size) -{ - FPReg *fp_reg = opaque; - uint64_t mant; - uint16_t exp; - - qemu_get_be64s(f, &mant); - qemu_get_be16s(f, &exp); - fp_reg->mmx.MMX_Q(0) = mant; - return 0; -} - -const VMStateInfo vmstate_fpreg_0_mmx = { - .name = "fpreg_0_mmx", - .get = get_fpreg_0_mmx, - .put = put_fpreg_error, -}; - -static int get_fpreg_0_no_mmx(QEMUFile *f, void *opaque, size_t size) -{ - FPReg *fp_reg = opaque; - uint64_t mant; - uint16_t exp; - - qemu_get_be64s(f, &mant); - qemu_get_be16s(f, &exp); - - fp_reg->d = cpu_set_fp80(mant, exp); - return 0; -} - -const VMStateInfo vmstate_fpreg_0_no_mmx = { - .name = "fpreg_0_no_mmx", - .get = get_fpreg_0_no_mmx, - .put = put_fpreg_error, -}; - -static bool fpregs_is_1(void *opaque, int version_id) -{ - CPUState *env = opaque; - - return env->fpregs_format_vmstate == 1; -} - -static bool fpregs_is_0_mmx(void *opaque, int version_id) -{ - CPUState *env = opaque; - int guess_mmx; - - guess_mmx = ((env->fptag_vmstate == 0xff) && - (env->fpus_vmstate & 0x3800) == 0); - return guess_mmx && env->fpregs_format_vmstate == 0; -} - -static bool fpregs_is_0_no_mmx(void *opaque, int version_id) -{ - CPUState *env = opaque; - int guess_mmx; - - guess_mmx = ((env->fptag_vmstate == 0xff) && - (env->fpus_vmstate & 0x3800) == 0); - return !guess_mmx && env->fpregs_format_vmstate == 0; -} - -#define VMSTATE_FP_REGS(_field, _state, _n) \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1, vmstate_fpreg, FPReg), \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0_mmx, vmstate_fpreg_0_mmx, FPReg), \ - VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0_no_mmx, vmstate_fpreg_0_no_mmx, FPReg) - -#endif /* USE_X86LDOUBLE */ - static bool version_is_5(void *opaque, int version_id) { return version_id == 5; @@ -344,11 +247,7 @@ static void cpu_pre_save(void *opaque) env->fptag_vmstate |= ((!env->fptags[i]) << i); } -#ifdef USE_X86LDOUBLE env->fpregs_format_vmstate = 0; -#else - env->fpregs_format_vmstate = 1; -#endif } static int cpu_post_load(void *opaque, int version_id) diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c index 3c539f37cf..cec0c7686f 100644 --- a/target-i386/op_helper.c +++ b/target-i386/op_helper.c @@ -95,26 +95,9 @@ static const uint8_t rclb_table[32] = { 6, 7, 8, 0, 1, 2, 3, 4, }; -#if defined(CONFIG_SOFTFLOAT) -# define floatx_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL ) -# define floatx_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL ) -# define floatx_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL ) -#else -# define floatx_lg2 (0.30102999566398119523L) -# define floatx_l2e (1.44269504088896340739L) -# define floatx_l2t (3.32192809488736234781L) -#endif - -static const CPU86_LDouble f15rk[7] = -{ - floatx_zero, - floatx_one, - floatx_pi, - floatx_lg2, - floatx_ln2, - floatx_l2e, - floatx_l2t, -}; +#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL ) +#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL ) +#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL ) /* broken thread support */ @@ -3442,18 +3425,18 @@ void helper_verw(target_ulong selector1) /* x87 FPU helpers */ -static inline double CPU86_LDouble_to_double(CPU86_LDouble a) +static inline double floatx80_to_double(floatx80 a) { union { float64 f64; double d; } u; - u.f64 = floatx_to_float64(a, &env->fp_status); + u.f64 = floatx80_to_float64(a, &env->fp_status); return u.d; } -static inline CPU86_LDouble double_to_CPU86_LDouble(double a) +static inline floatx80 double_to_floatx80(double a) { union { float64 f64; @@ -3461,7 +3444,7 @@ static inline CPU86_LDouble double_to_CPU86_LDouble(double a) } u; u.d = a; - return float64_to_floatx(u.f64, &env->fp_status); + return float64_to_floatx80(u.f64, &env->fp_status); } static void fpu_set_exception(int mask) @@ -3471,12 +3454,12 @@ static void fpu_set_exception(int mask) env->fpus |= FPUS_SE | FPUS_B; } -static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b) +static inline floatx80 helper_fdiv(floatx80 a, floatx80 b) { - if (floatx_is_zero(b)) { + if (floatx80_is_zero(b)) { fpu_set_exception(FPUS_ZE); } - return floatx_div(a, b, &env->fp_status); + return floatx80_div(a, b, &env->fp_status); } static void fpu_raise_exception(void) @@ -3498,7 +3481,7 @@ void helper_flds_FT0(uint32_t val) uint32_t i; } u; u.i = val; - FT0 = float32_to_floatx(u.f, &env->fp_status); + FT0 = float32_to_floatx80(u.f, &env->fp_status); } void helper_fldl_FT0(uint64_t val) @@ -3508,12 +3491,12 @@ void helper_fldl_FT0(uint64_t val) uint64_t i; } u; u.i = val; - FT0 = float64_to_floatx(u.f, &env->fp_status); + FT0 = float64_to_floatx80(u.f, &env->fp_status); } void helper_fildl_FT0(int32_t val) { - FT0 = int32_to_floatx(val, &env->fp_status); + FT0 = int32_to_floatx80(val, &env->fp_status); } void helper_flds_ST0(uint32_t val) @@ -3525,7 +3508,7 @@ void helper_flds_ST0(uint32_t val) } u; new_fpstt = (env->fpstt - 1) & 7; u.i = val; - env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status); + env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } @@ -3539,7 +3522,7 @@ void helper_fldl_ST0(uint64_t val) } u; new_fpstt = (env->fpstt - 1) & 7; u.i = val; - env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status); + env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } @@ -3548,7 +3531,7 @@ void helper_fildl_ST0(int32_t val) { int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status); + env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } @@ -3557,7 +3540,7 @@ void helper_fildll_ST0(int64_t val) { int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status); + env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } @@ -3568,7 +3551,7 @@ uint32_t helper_fsts_ST0(void) float32 f; uint32_t i; } u; - u.f = floatx_to_float32(ST0, &env->fp_status); + u.f = floatx80_to_float32(ST0, &env->fp_status); return u.i; } @@ -3578,14 +3561,14 @@ uint64_t helper_fstl_ST0(void) float64 f; uint64_t i; } u; - u.f = floatx_to_float64(ST0, &env->fp_status); + u.f = floatx80_to_float64(ST0, &env->fp_status); return u.i; } int32_t helper_fist_ST0(void) { int32_t val; - val = floatx_to_int32(ST0, &env->fp_status); + val = floatx80_to_int32(ST0, &env->fp_status); if (val != (int16_t)val) val = -32768; return val; @@ -3594,21 +3577,21 @@ int32_t helper_fist_ST0(void) int32_t helper_fistl_ST0(void) { int32_t val; - val = floatx_to_int32(ST0, &env->fp_status); + val = floatx80_to_int32(ST0, &env->fp_status); return val; } int64_t helper_fistll_ST0(void) { int64_t val; - val = floatx_to_int64(ST0, &env->fp_status); + val = floatx80_to_int64(ST0, &env->fp_status); return val; } int32_t helper_fistt_ST0(void) { int32_t val; - val = floatx_to_int32_round_to_zero(ST0, &env->fp_status); + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); if (val != (int16_t)val) val = -32768; return val; @@ -3617,14 +3600,14 @@ int32_t helper_fistt_ST0(void) int32_t helper_fisttl_ST0(void) { int32_t val; - val = floatx_to_int32_round_to_zero(ST0, &env->fp_status); + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); return val; } int64_t helper_fisttll_ST0(void) { int64_t val; - val = floatx_to_int64_round_to_zero(ST0, &env->fp_status); + val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); return val; } @@ -3693,7 +3676,7 @@ void helper_fmov_STN_ST0(int st_index) void helper_fxchg_ST0_STN(int st_index) { - CPU86_LDouble tmp; + floatx80 tmp; tmp = ST(st_index); ST(st_index) = ST0; ST0 = tmp; @@ -3707,7 +3690,7 @@ void helper_fcom_ST0_FT0(void) { int ret; - ret = floatx_compare(ST0, FT0, &env->fp_status); + ret = floatx80_compare(ST0, FT0, &env->fp_status); env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; } @@ -3715,7 +3698,7 @@ void helper_fucom_ST0_FT0(void) { int ret; - ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1]; } @@ -3726,7 +3709,7 @@ void helper_fcomi_ST0_FT0(void) int eflags; int ret; - ret = floatx_compare(ST0, FT0, &env->fp_status); + ret = floatx80_compare(ST0, FT0, &env->fp_status); eflags = helper_cc_compute_all(CC_OP); eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; CC_SRC = eflags; @@ -3737,7 +3720,7 @@ void helper_fucomi_ST0_FT0(void) int eflags; int ret; - ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); eflags = helper_cc_compute_all(CC_OP); eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; CC_SRC = eflags; @@ -3745,22 +3728,22 @@ void helper_fucomi_ST0_FT0(void) void helper_fadd_ST0_FT0(void) { - ST0 = floatx_add(ST0, FT0, &env->fp_status); + ST0 = floatx80_add(ST0, FT0, &env->fp_status); } void helper_fmul_ST0_FT0(void) { - ST0 = floatx_mul(ST0, FT0, &env->fp_status); + ST0 = floatx80_mul(ST0, FT0, &env->fp_status); } void helper_fsub_ST0_FT0(void) { - ST0 = floatx_sub(ST0, FT0, &env->fp_status); + ST0 = floatx80_sub(ST0, FT0, &env->fp_status); } void helper_fsubr_ST0_FT0(void) { - ST0 = floatx_sub(FT0, ST0, &env->fp_status); + ST0 = floatx80_sub(FT0, ST0, &env->fp_status); } void helper_fdiv_ST0_FT0(void) @@ -3777,34 +3760,34 @@ void helper_fdivr_ST0_FT0(void) void helper_fadd_STN_ST0(int st_index) { - ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status); + ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); } void helper_fmul_STN_ST0(int st_index) { - ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status); + ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); } void helper_fsub_STN_ST0(int st_index) { - ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status); + ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); } void helper_fsubr_STN_ST0(int st_index) { - ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status); + ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); } void helper_fdiv_STN_ST0(int st_index) { - CPU86_LDouble *p; + floatx80 *p; p = &ST(st_index); *p = helper_fdiv(*p, ST0); } void helper_fdivr_STN_ST0(int st_index) { - CPU86_LDouble *p; + floatx80 *p; p = &ST(st_index); *p = helper_fdiv(ST0, *p); } @@ -3812,52 +3795,52 @@ void helper_fdivr_STN_ST0(int st_index) /* misc FPU operations */ void helper_fchs_ST0(void) { - ST0 = floatx_chs(ST0); + ST0 = floatx80_chs(ST0); } void helper_fabs_ST0(void) { - ST0 = floatx_abs(ST0); + ST0 = floatx80_abs(ST0); } void helper_fld1_ST0(void) { - ST0 = f15rk[1]; + ST0 = floatx80_one; } void helper_fldl2t_ST0(void) { - ST0 = f15rk[6]; + ST0 = floatx80_l2t; } void helper_fldl2e_ST0(void) { - ST0 = f15rk[5]; + ST0 = floatx80_l2e; } void helper_fldpi_ST0(void) { - ST0 = f15rk[2]; + ST0 = floatx80_pi; } void helper_fldlg2_ST0(void) { - ST0 = f15rk[3]; + ST0 = floatx80_lg2; } void helper_fldln2_ST0(void) { - ST0 = f15rk[4]; + ST0 = floatx80_ln2; } void helper_fldz_ST0(void) { - ST0 = f15rk[0]; + ST0 = floatx80_zero; } void helper_fldz_FT0(void) { - FT0 = f15rk[0]; + FT0 = floatx80_zero; } uint32_t helper_fnstsw(void) @@ -3891,7 +3874,6 @@ static void update_fp_status(void) break; } set_float_rounding_mode(rnd_type, &env->fp_status); -#ifdef FLOATX80 switch((env->fpuc >> 8) & 3) { case 0: rnd_type = 32; @@ -3905,7 +3887,6 @@ static void update_fp_status(void) break; } set_floatx80_rounding_precision(rnd_type, &env->fp_status); -#endif } void helper_fldcw(uint32_t val) @@ -3944,7 +3925,7 @@ void helper_fninit(void) void helper_fbld_ST0(target_ulong ptr) { - CPU86_LDouble tmp; + floatx80 tmp; uint64_t val; unsigned int v; int i; @@ -3954,9 +3935,9 @@ void helper_fbld_ST0(target_ulong ptr) v = ldub(ptr + i); val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); } - tmp = int64_to_floatx(val, &env->fp_status); + tmp = int64_to_floatx80(val, &env->fp_status); if (ldub(ptr + 9) & 0x80) { - floatx_chs(tmp); + floatx80_chs(tmp); } fpush(); ST0 = tmp; @@ -3968,7 +3949,7 @@ void helper_fbst_ST0(target_ulong ptr) target_ulong mem_ref, mem_end; int64_t val; - val = floatx_to_int64(ST0, &env->fp_status); + val = floatx80_to_int64(ST0, &env->fp_status); mem_ref = ptr; mem_end = mem_ref + 9; if (val < 0) { @@ -3992,19 +3973,19 @@ void helper_fbst_ST0(target_ulong ptr) void helper_f2xm1(void) { - double val = CPU86_LDouble_to_double(ST0); + double val = floatx80_to_double(ST0); val = pow(2.0, val) - 1.0; - ST0 = double_to_CPU86_LDouble(val); + ST0 = double_to_floatx80(val); } void helper_fyl2x(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if (fptemp>0.0){ fptemp = log(fptemp)/log(2.0); /* log2(ST) */ - fptemp *= CPU86_LDouble_to_double(ST1); - ST1 = double_to_CPU86_LDouble(fptemp); + fptemp *= floatx80_to_double(ST1); + ST1 = double_to_floatx80(fptemp); fpop(); } else { env->fpus &= (~0x4700); @@ -4014,15 +3995,15 @@ void helper_fyl2x(void) void helper_fptan(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { fptemp = tan(fptemp); - ST0 = double_to_CPU86_LDouble(fptemp); + ST0 = double_to_floatx80(fptemp); fpush(); - ST0 = floatx_one; + ST0 = floatx80_one; env->fpus &= (~0x400); /* C2 <-- 0 */ /* the above code is for |arg| < 2**52 only */ } @@ -4032,21 +4013,21 @@ void helper_fpatan(void) { double fptemp, fpsrcop; - fpsrcop = CPU86_LDouble_to_double(ST1); - fptemp = CPU86_LDouble_to_double(ST0); - ST1 = double_to_CPU86_LDouble(atan2(fpsrcop, fptemp)); + fpsrcop = floatx80_to_double(ST1); + fptemp = floatx80_to_double(ST0); + ST1 = double_to_floatx80(atan2(fpsrcop, fptemp)); fpop(); } void helper_fxtract(void) { - CPU86_LDoubleU temp; + CPU_LDoubleU temp; temp.d = ST0; - if (floatx_is_zero(ST0)) { + if (floatx80_is_zero(ST0)) { /* Easy way to generate -inf and raising division by 0 exception */ - ST0 = floatx_div(floatx_chs(floatx_one), floatx_zero, &env->fp_status); + ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status); fpush(); ST0 = temp.d; } else { @@ -4054,7 +4035,7 @@ void helper_fxtract(void) expdif = EXPD(temp) - EXPBIAS; /*DP exponent bias*/ - ST0 = int32_to_floatx(expdif, &env->fp_status); + ST0 = int32_to_floatx80(expdif, &env->fp_status); fpush(); BIASEXPONENT(temp); ST0 = temp.d; @@ -4064,15 +4045,15 @@ void helper_fxtract(void) void helper_fprem1(void) { double st0, st1, dblq, fpsrcop, fptemp; - CPU86_LDoubleU fpsrcop1, fptemp1; + CPU_LDoubleU fpsrcop1, fptemp1; int expdif; signed long long int q; - st0 = CPU86_LDouble_to_double(ST0); - st1 = CPU86_LDouble_to_double(ST1); + st0 = floatx80_to_double(ST0); + st1 = floatx80_to_double(ST1); if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { - ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */ + ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ return; } @@ -4116,21 +4097,21 @@ void helper_fprem1(void) -(floor(fabs(fpsrcop))) : floor(fpsrcop); st0 -= (st1 * fpsrcop * fptemp); } - ST0 = double_to_CPU86_LDouble(st0); + ST0 = double_to_floatx80(st0); } void helper_fprem(void) { double st0, st1, dblq, fpsrcop, fptemp; - CPU86_LDoubleU fpsrcop1, fptemp1; + CPU_LDoubleU fpsrcop1, fptemp1; int expdif; signed long long int q; - st0 = CPU86_LDouble_to_double(ST0); - st1 = CPU86_LDouble_to_double(ST1); + st0 = floatx80_to_double(ST0); + st1 = floatx80_to_double(ST1); if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { - ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */ + ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ return; } @@ -4175,17 +4156,17 @@ void helper_fprem(void) -(floor(fabs(fpsrcop))) : floor(fpsrcop); st0 -= (st1 * fpsrcop * fptemp); } - ST0 = double_to_CPU86_LDouble(st0); + ST0 = double_to_floatx80(st0); } void helper_fyl2xp1(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if ((fptemp+1.0)>0.0) { fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */ - fptemp *= CPU86_LDouble_to_double(ST1); - ST1 = double_to_CPU86_LDouble(fptemp); + fptemp *= floatx80_to_double(ST1); + ST1 = double_to_floatx80(fptemp); fpop(); } else { env->fpus &= (~0x4700); @@ -4195,23 +4176,23 @@ void helper_fyl2xp1(void) void helper_fsqrt(void) { - if (floatx_is_neg(ST0)) { + if (floatx80_is_neg(ST0)) { env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ env->fpus |= 0x400; } - ST0 = floatx_sqrt(ST0, &env->fp_status); + ST0 = floatx80_sqrt(ST0, &env->fp_status); } void helper_fsincos(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { - ST0 = double_to_CPU86_LDouble(sin(fptemp)); + ST0 = double_to_floatx80(sin(fptemp)); fpush(); - ST0 = double_to_CPU86_LDouble(cos(fptemp)); + ST0 = double_to_floatx80(cos(fptemp)); env->fpus &= (~0x400); /* C2 <-- 0 */ /* the above code is for |arg| < 2**63 only */ } @@ -4219,27 +4200,27 @@ void helper_fsincos(void) void helper_frndint(void) { - ST0 = floatx_round_to_int(ST0, &env->fp_status); + ST0 = floatx80_round_to_int(ST0, &env->fp_status); } void helper_fscale(void) { - if (floatx_is_any_nan(ST1)) { + if (floatx80_is_any_nan(ST1)) { ST0 = ST1; } else { - int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status); - ST0 = floatx_scalbn(ST0, n, &env->fp_status); + int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); + ST0 = floatx80_scalbn(ST0, n, &env->fp_status); } } void helper_fsin(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { - ST0 = double_to_CPU86_LDouble(sin(fptemp)); + ST0 = double_to_floatx80(sin(fptemp)); env->fpus &= (~0x400); /* C2 <-- 0 */ /* the above code is for |arg| < 2**53 only */ } @@ -4247,12 +4228,12 @@ void helper_fsin(void) void helper_fcos(void) { - double fptemp = CPU86_LDouble_to_double(ST0); + double fptemp = floatx80_to_double(ST0); if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { - ST0 = double_to_CPU86_LDouble(cos(fptemp)); + ST0 = double_to_floatx80(cos(fptemp)); env->fpus &= (~0x400); /* C2 <-- 0 */ /* the above code is for |arg5 < 2**63 only */ } @@ -4260,7 +4241,7 @@ void helper_fcos(void) void helper_fxam_ST0(void) { - CPU86_LDoubleU temp; + CPU_LDoubleU temp; int expdif; temp.d = ST0; @@ -4272,11 +4253,7 @@ void helper_fxam_ST0(void) /* XXX: test fptags too */ expdif = EXPD(temp); if (expdif == MAXEXPD) { -#ifdef USE_X86LDOUBLE if (MANTD(temp) == 0x8000000000000000ULL) -#else - if (MANTD(temp) == 0) -#endif env->fpus |= 0x500 /*Infinity*/; else env->fpus |= 0x100 /*NaN*/; @@ -4294,7 +4271,7 @@ void helper_fstenv(target_ulong ptr, int data32) { int fpus, fptag, exp, i; uint64_t mant; - CPU86_LDoubleU tmp; + CPU_LDoubleU tmp; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; @@ -4310,9 +4287,7 @@ void helper_fstenv(target_ulong ptr, int data32) /* zero */ fptag |= 1; } else if (exp == 0 || exp == MAXEXPD -#ifdef USE_X86LDOUBLE || (mant & (1LL << 63)) == 0 -#endif ) { /* NaNs, infinity, denormal */ fptag |= 2; @@ -4364,7 +4339,7 @@ void helper_fldenv(target_ulong ptr, int data32) void helper_fsave(target_ulong ptr, int data32) { - CPU86_LDouble tmp; + floatx80 tmp; int i; helper_fstenv(ptr, data32); @@ -4392,7 +4367,7 @@ void helper_fsave(target_ulong ptr, int data32) void helper_frstor(target_ulong ptr, int data32) { - CPU86_LDouble tmp; + floatx80 tmp; int i; helper_fldenv(ptr, data32); @@ -4408,7 +4383,7 @@ void helper_frstor(target_ulong ptr, int data32) void helper_fxsave(target_ulong ptr, int data64) { int fpus, fptag, i, nb_xmm_regs; - CPU86_LDouble tmp; + floatx80 tmp; target_ulong addr; /* The operand must be 16 byte aligned */ @@ -4469,7 +4444,7 @@ void helper_fxsave(target_ulong ptr, int data64) void helper_fxrstor(target_ulong ptr, int data64) { int i, fpus, fptag, nb_xmm_regs; - CPU86_LDouble tmp; + floatx80 tmp; target_ulong addr; /* The operand must be 16 byte aligned */ @@ -4516,61 +4491,23 @@ void helper_fxrstor(target_ulong ptr, int data64) } } -#ifndef USE_X86LDOUBLE - -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) -{ - CPU86_LDoubleU temp; - int e; - - temp.d = f; - /* mantissa */ - *pmant = (MANTD(temp) << 11) | (1LL << 63); - /* exponent + sign */ - e = EXPD(temp) - EXPBIAS + 16383; - e |= SIGND(temp) >> 16; - *pexp = e; -} - -CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) -{ - CPU86_LDoubleU temp; - int e; - uint64_t ll; - - /* XXX: handle overflow ? */ - e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */ - e |= (upper >> 4) & 0x800; /* sign */ - ll = (mant >> 11) & ((1LL << 52) - 1); -#ifdef __arm__ - temp.l.upper = (e << 20) | (ll >> 32); - temp.l.lower = ll; -#else - temp.ll = ll | ((uint64_t)e << 52); -#endif - return temp.d; -} - -#else - -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) { - CPU86_LDoubleU temp; + CPU_LDoubleU temp; temp.d = f; *pmant = temp.l.lower; *pexp = temp.l.upper; } -CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) { - CPU86_LDoubleU temp; + CPU_LDoubleU temp; temp.l.upper = upper; temp.l.lower = mant; return temp.d; } -#endif #ifdef TARGET_X86_64 diff --git a/target-i386/translate.c b/target-i386/translate.c index 199302e517..10bd72a0e2 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -7538,7 +7538,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) break; case 5: /* lfence */ case 6: /* mfence */ - if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE)) + if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2)) goto illegal_op; break; case 7: /* sfence / clflush */ diff --git a/target-lm32/translate.c b/target-lm32/translate.c index bcd52fe73d..eb2115814c 100644 --- a/target-lm32/translate.c +++ b/target-lm32/translate.c @@ -584,9 +584,6 @@ static void dec_orhi(DisasContext *dc) static void dec_scall(DisasContext *dc) { - TCGv t0; - int l1; - if (dc->imm5 == 7) { LOG_DIS("scall\n"); } else if (dc->imm5 == 2) { @@ -595,9 +592,6 @@ static void dec_scall(DisasContext *dc) cpu_abort(dc->env, "invalid opcode\n"); } - t0 = tcg_temp_new(); - l1 = gen_new_label(); - if (dc->imm5 == 7) { tcg_gen_movi_tl(cpu_pc, dc->pc); t_gen_raise_exception(dc, EXCP_SYSTEMCALL); diff --git a/target-m68k/helper.c b/target-m68k/helper.c index 514b03904f..faa8c42ae8 100644 --- a/target-m68k/helper.c +++ b/target-m68k/helper.c @@ -714,7 +714,7 @@ void HELPER(macsats)(CPUState *env, uint32_t acc) if (env->macsr & MACSR_V) { env->macsr |= MACSR_PAV0 << acc; if (env->macsr & MACSR_OMC) { - /* The result is saturated to 32 bits, despite overflow occuring + /* The result is saturated to 32 bits, despite overflow occurring at 48 bits. Seems weird, but that's what the hardware docs say. */ result = (result >> 63) ^ 0x7fffffff; diff --git a/target-m68k/translate.c b/target-m68k/translate.c index 9e5578d455..26f0ee42e9 100644 --- a/target-m68k/translate.c +++ b/target-m68k/translate.c @@ -171,9 +171,6 @@ typedef void (*disas_proc)(DisasContext *, uint16_t); static void disas_##name (DisasContext *s, uint16_t insn) #endif -/* FIXME: Remove this. */ -#define gen_im32(val) tcg_const_i32(val) - /* Generate a load from the specified address. Narrow values are sign extended to full register width. */ static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign) @@ -339,7 +336,7 @@ static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base) if ((ext & 0x80) == 0) { /* base not suppressed */ if (IS_NULL_QREG(base)) { - base = gen_im32(offset + bd); + base = tcg_const_i32(offset + bd); bd = 0; } if (!IS_NULL_QREG(add)) { @@ -355,7 +352,7 @@ static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base) add = tmp; } } else { - add = gen_im32(bd); + add = tcg_const_i32(bd); } if ((ext & 3) != 0) { /* memory indirect */ @@ -536,15 +533,15 @@ static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize) case 0: /* Absolute short. */ offset = ldsw_code(s->pc); s->pc += 2; - return gen_im32(offset); + return tcg_const_i32(offset); case 1: /* Absolute long. */ offset = read_im32(s); - return gen_im32(offset); + return tcg_const_i32(offset); case 2: /* pc displacement */ offset = s->pc; offset += ldsw_code(s->pc); s->pc += 2; - return gen_im32(offset); + return tcg_const_i32(offset); case 3: /* pc index+displacement. */ return gen_lea_indexed(s, opsize, NULL_QREG); case 4: /* Immediate. */ @@ -1209,16 +1206,16 @@ DISAS_INSN(arith_im) break; case 2: /* subi */ tcg_gen_mov_i32(dest, src1); - gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im)); + gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im)); tcg_gen_subi_i32(dest, dest, im); - gen_update_cc_add(dest, gen_im32(im)); + gen_update_cc_add(dest, tcg_const_i32(im)); s->cc_op = CC_OP_SUB; break; case 3: /* addi */ tcg_gen_mov_i32(dest, src1); tcg_gen_addi_i32(dest, dest, im); - gen_update_cc_add(dest, gen_im32(im)); - gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im)); + gen_update_cc_add(dest, tcg_const_i32(im)); + gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im)); s->cc_op = CC_OP_ADD; break; case 5: /* eori */ @@ -1228,7 +1225,7 @@ DISAS_INSN(arith_im) case 6: /* cmpi */ tcg_gen_mov_i32(dest, src1); tcg_gen_subi_i32(dest, dest, im); - gen_update_cc_add(dest, gen_im32(im)); + gen_update_cc_add(dest, tcg_const_i32(im)); s->cc_op = CC_OP_SUB; break; default: @@ -1324,8 +1321,8 @@ DISAS_INSN(clr) default: abort(); } - DEST_EA(insn, opsize, gen_im32(0), NULL); - gen_logic_cc(s, gen_im32(0)); + DEST_EA(insn, opsize, tcg_const_i32(0), NULL); + gen_logic_cc(s, tcg_const_i32(0)); } static TCGv gen_get_ccr(DisasContext *s) @@ -1589,7 +1586,7 @@ DISAS_INSN(jump) } if ((insn & 0x40) == 0) { /* jsr */ - gen_push(s, gen_im32(s->pc)); + gen_push(s, tcg_const_i32(s->pc)); } gen_jmp(s, tmp); } @@ -1617,7 +1614,7 @@ DISAS_INSN(addsubq) tcg_gen_addi_i32(dest, dest, val); } } else { - src2 = gen_im32(val); + src2 = tcg_const_i32(val); if (insn & 0x0100) { gen_helper_xflag_lt(QREG_CC_X, dest, src2); tcg_gen_subi_i32(dest, dest, val); @@ -1666,7 +1663,7 @@ DISAS_INSN(branch) } if (op == 1) { /* bsr */ - gen_push(s, gen_im32(s->pc)); + gen_push(s, tcg_const_i32(s->pc)); } gen_flush_cc_op(s); if (op > 1) { @@ -1757,7 +1754,7 @@ DISAS_INSN(mov3q) val = (insn >> 9) & 7; if (val == 0) val = -1; - src = gen_im32(val); + src = tcg_const_i32(val); gen_logic_cc(s, src); DEST_EA(insn, OS_LONG, src, NULL); } @@ -1883,7 +1880,7 @@ DISAS_INSN(shift_im) tmp = (insn >> 9) & 7; if (tmp == 0) tmp = 8; - shift = gen_im32(tmp); + shift = tcg_const_i32(tmp); /* No need to flush flags becuse we know we will set C flag. */ if (insn & 0x100) { gen_helper_shl_cc(reg, cpu_env, reg, shift); @@ -2191,7 +2188,7 @@ DISAS_INSN(fpu) switch ((ext >> 10) & 7) { case 4: /* FPCR */ /* Not implemented. Always return zero. */ - tmp32 = gen_im32(0); + tmp32 = tcg_const_i32(0); break; case 1: /* FPIAR */ case 2: /* FPSR */ @@ -2592,7 +2589,7 @@ DISAS_INSN(mac) /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(); tmp = tcg_temp_new(); - gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc)); + gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif @@ -2626,7 +2623,7 @@ DISAS_INSN(mac) /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(); tmp = tcg_temp_new(); - gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc)); + gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h index 536222e4b8..78fe14ff35 100644 --- a/target-microblaze/cpu.h +++ b/target-microblaze/cpu.h @@ -41,6 +41,9 @@ struct CPUMBState; #define EXCP_HW_BREAK 5 #define EXCP_HW_EXCP 6 +/* MicroBlaze-specific interrupt pending bits. */ +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 + /* Register aliases. R0 - R15 */ #define R_SP 1 #define SR_PC 0 diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c index 5230b52c18..a623c7b04a 100644 --- a/target-microblaze/helper.c +++ b/target-microblaze/helper.c @@ -117,7 +117,7 @@ void do_interrupt(CPUState *env) { uint32_t t; - /* IMM flag cannot propagate accross a branch and into the dslot. */ + /* IMM flag cannot propagate across a branch and into the dslot. */ assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG))); assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); /* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */ diff --git a/target-mips/exec.h b/target-mips/exec.h index b3c5a13f56..607edf12ca 100644 --- a/target-mips/exec.h +++ b/target-mips/exec.h @@ -29,10 +29,6 @@ static inline int cpu_has_work(CPUState *env) has_work = 1; } - if (env->interrupt_request & CPU_INTERRUPT_TIMER) { - has_work = 1; - } - return has_work; } diff --git a/target-mips/helper.c b/target-mips/helper.c index bdc1e53669..0f057c2171 100644 --- a/target-mips/helper.c +++ b/target-mips/helper.c @@ -272,8 +272,8 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw, #if !defined(CONFIG_USER_ONLY) target_phys_addr_t physical; int prot; -#endif int access_type; +#endif int ret = 0; #if 0 @@ -285,21 +285,19 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw, rw &= 1; /* data access */ +#if !defined(CONFIG_USER_ONLY) /* XXX: put correct access by using cpu_restore_state() correctly */ access_type = ACCESS_INT; -#if defined(CONFIG_USER_ONLY) - ret = TLBRET_NOMATCH; -#else ret = get_physical_address(env, &physical, &prot, address, rw, access_type); qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, physical, prot); if (ret == TLBRET_MATCH) { - tlb_set_page(env, address & TARGET_PAGE_MASK, - physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, - mmu_idx, TARGET_PAGE_SIZE); - ret = 0; + tlb_set_page(env, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; } else if (ret < 0) #endif { diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c index 590e092a1d..d55c522bf3 100644 --- a/target-mips/translate_init.c +++ b/target-mips/translate_init.c @@ -38,7 +38,7 @@ ((1 << CP0C2_M)) /* No config4, no DSP ASE, no large physaddr (PABITS), - no external interrupt controller, no vectored interupts, + no external interrupt controller, no vectored interrupts, no 1kb pages, no SmartMIPS ASE, no trace logic */ #define MIPS_CONFIG3 \ ((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ @@ -477,7 +477,7 @@ static const mips_def_t mips_defs[] = .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), .SYNCI_Step = 16, .CCRes = 2, - .CP0_Status_rw_bitmask = 0xF5D0FF1F, /*bit5:7 not writeable*/ + .CP0_Status_rw_bitmask = 0xF5D0FF1F, /*bit5:7 not writable*/ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), .SEGBITS = 40, .PABITS = 40, diff --git a/target-ppc/STATUS b/target-ppc/STATUS index 32e7ffa493..c8e9018bfc 100644 --- a/target-ppc/STATUS +++ b/target-ppc/STATUS @@ -11,7 +11,7 @@ INSN: instruction set. SPR: special purpose registers set OK => all SPR registered (but some may be fake) KO => some SPR are missing or should be removed - ? => uncheked + ? => unchecked MSR: MSR bits definitions OK => all MSR bits properly defined KO => MSR definition is incorrect diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 04b12590fa..8e4582f6ab 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -108,8 +108,8 @@ enum powerpc_mmu_t { POWERPC_MMU_MPC8xx = 0x00000007, /* BookE MMU model */ POWERPC_MMU_BOOKE = 0x00000008, - /* BookE FSL MMU model */ - POWERPC_MMU_BOOKE_FSL = 0x00000009, + /* BookE 2.06 MMU model */ + POWERPC_MMU_BOOKE206 = 0x00000009, /* PowerPC 601 MMU model (specific BATs format) */ POWERPC_MMU_601 = 0x0000000A, #if defined(TARGET_PPC64) @@ -225,7 +225,7 @@ enum { /* 970FX specific exceptions */ POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */ POWERPC_EXCP_MAINT = 89, /* Maintenance exception */ - /* Freescale embeded cores specific exceptions */ + /* Freescale embedded cores specific exceptions */ POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */ POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */ POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */ @@ -420,6 +420,7 @@ struct ppc_slb_t { #define MSR_CM 31 /* Computation mode for BookE hflags */ #define MSR_ICM 30 /* Interrupt computation mode for BookE */ #define MSR_THV 29 /* hypervisor state for 32 bits PowerPC hflags */ +#define MSR_GS 28 /* guest state for BookE */ #define MSR_UCLE 26 /* User-mode cache lock enable for BookE */ #define MSR_VR 25 /* altivec available x hflags */ #define MSR_SPE 25 /* SPE enable for BookE x hflags */ @@ -457,6 +458,7 @@ struct ppc_slb_t { #define msr_cm ((env->msr >> MSR_CM) & 1) #define msr_icm ((env->msr >> MSR_ICM) & 1) #define msr_thv ((env->msr >> MSR_THV) & 1) +#define msr_gs ((env->msr >> MSR_GS) & 1) #define msr_ucle ((env->msr >> MSR_UCLE) & 1) #define msr_vr ((env->msr >> MSR_VR) & 1) #define msr_spe ((env->msr >> MSR_SPE) & 1) @@ -606,6 +608,224 @@ enum { #define vscr_sat (((env->vscr) >> VSCR_SAT) & 0x1) /*****************************************************************************/ +/* BookE e500 MMU registers */ + +#define MAS0_NV_SHIFT 0 +#define MAS0_NV_MASK (0xfff << MAS0_NV_SHIFT) + +#define MAS0_WQ_SHIFT 12 +#define MAS0_WQ_MASK (3 << MAS0_WQ_SHIFT) +/* Write TLB entry regardless of reservation */ +#define MAS0_WQ_ALWAYS (0 << MAS0_WQ_SHIFT) +/* Write TLB entry only already in use */ +#define MAS0_WQ_COND (1 << MAS0_WQ_SHIFT) +/* Clear TLB entry */ +#define MAS0_WQ_CLR_RSRV (2 << MAS0_WQ_SHIFT) + +#define MAS0_HES_SHIFT 14 +#define MAS0_HES (1 << MAS0_HES_SHIFT) + +#define MAS0_ESEL_SHIFT 16 +#define MAS0_ESEL_MASK (0xfff << MAS0_ESEL_SHIFT) + +#define MAS0_TLBSEL_SHIFT 28 +#define MAS0_TLBSEL_MASK (3 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB0 (0 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB1 (1 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB2 (2 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB3 (3 << MAS0_TLBSEL_SHIFT) + +#define MAS0_ATSEL_SHIFT 31 +#define MAS0_ATSEL (1 << MAS0_ATSEL_SHIFT) +#define MAS0_ATSEL_TLB 0 +#define MAS0_ATSEL_LRAT MAS0_ATSEL + +#define MAS1_TSIZE_SHIFT 8 +#define MAS1_TSIZE_MASK (0xf << MAS1_TSIZE_SHIFT) + +#define MAS1_TS_SHIFT 12 +#define MAS1_TS (1 << MAS1_TS_SHIFT) + +#define MAS1_IND_SHIFT 13 +#define MAS1_IND (1 << MAS1_IND_SHIFT) + +#define MAS1_TID_SHIFT 16 +#define MAS1_TID_MASK (0x3fff << MAS1_TID_SHIFT) + +#define MAS1_IPROT_SHIFT 30 +#define MAS1_IPROT (1 << MAS1_IPROT_SHIFT) + +#define MAS1_VALID_SHIFT 31 +#define MAS1_VALID 0x80000000 + +#define MAS2_EPN_SHIFT 12 +#define MAS2_EPN_MASK (0xfffff << MAS2_EPN_SHIFT) + +#define MAS2_ACM_SHIFT 6 +#define MAS2_ACM (1 << MAS2_ACM_SHIFT) + +#define MAS2_VLE_SHIFT 5 +#define MAS2_VLE (1 << MAS2_VLE_SHIFT) + +#define MAS2_W_SHIFT 4 +#define MAS2_W (1 << MAS2_W_SHIFT) + +#define MAS2_I_SHIFT 3 +#define MAS2_I (1 << MAS2_I_SHIFT) + +#define MAS2_M_SHIFT 2 +#define MAS2_M (1 << MAS2_M_SHIFT) + +#define MAS2_G_SHIFT 1 +#define MAS2_G (1 << MAS2_G_SHIFT) + +#define MAS2_E_SHIFT 0 +#define MAS2_E (1 << MAS2_E_SHIFT) + +#define MAS3_RPN_SHIFT 12 +#define MAS3_RPN_MASK (0xfffff << MAS3_RPN_SHIFT) + +#define MAS3_U0 0x00000200 +#define MAS3_U1 0x00000100 +#define MAS3_U2 0x00000080 +#define MAS3_U3 0x00000040 +#define MAS3_UX 0x00000020 +#define MAS3_SX 0x00000010 +#define MAS3_UW 0x00000008 +#define MAS3_SW 0x00000004 +#define MAS3_UR 0x00000002 +#define MAS3_SR 0x00000001 +#define MAS3_SPSIZE_SHIFT 1 +#define MAS3_SPSIZE_MASK (0x3e << MAS3_SPSIZE_SHIFT) + +#define MAS4_TLBSELD_SHIFT MAS0_TLBSEL_SHIFT +#define MAS4_TLBSELD_MASK MAS0_TLBSEL_MASK +#define MAS4_TIDSELD_MASK 0x00030000 +#define MAS4_TIDSELD_PID0 0x00000000 +#define MAS4_TIDSELD_PID1 0x00010000 +#define MAS4_TIDSELD_PID2 0x00020000 +#define MAS4_TIDSELD_PIDZ 0x00030000 +#define MAS4_INDD 0x00008000 /* Default IND */ +#define MAS4_TSIZED_SHIFT MAS1_TSIZE_SHIFT +#define MAS4_TSIZED_MASK MAS1_TSIZE_MASK +#define MAS4_ACMD 0x00000040 +#define MAS4_VLED 0x00000020 +#define MAS4_WD 0x00000010 +#define MAS4_ID 0x00000008 +#define MAS4_MD 0x00000004 +#define MAS4_GD 0x00000002 +#define MAS4_ED 0x00000001 +#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ +#define MAS4_WIMGED_SHIFT 0 + +#define MAS5_SGS 0x80000000 +#define MAS5_SLPID_MASK 0x00000fff + +#define MAS6_SPID0 0x3fff0000 +#define MAS6_SPID1 0x00007ffe +#define MAS6_ISIZE(x) MAS1_TSIZE(x) +#define MAS6_SAS 0x00000001 +#define MAS6_SPID MAS6_SPID0 +#define MAS6_SIND 0x00000002 /* Indirect page */ +#define MAS6_SIND_SHIFT 1 +#define MAS6_SPID_MASK 0x3fff0000 +#define MAS6_SPID_SHIFT 16 +#define MAS6_ISIZE_MASK 0x00000f80 +#define MAS6_ISIZE_SHIFT 7 + +#define MAS7_RPN 0xffffffff + +#define MAS8_TGS 0x80000000 +#define MAS8_VF 0x40000000 +#define MAS8_TLBPID 0x00000fff + +/* Bit definitions for MMUCFG */ +#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ +#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ +#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ +#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ +#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ +#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ +#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ +#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ +#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ + +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ +#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ + MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) +#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ +#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ +#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ +#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ + +/* TLBnCFG encoding */ +#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ +#define TLBnCFG_HES 0x00002000 /* HW select supported */ +#define TLBnCFG_AVAIL 0x00004000 /* variable page size */ +#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ +#define TLBnCFG_GTWE 0x00010000 /* Guest can write */ +#define TLBnCFG_IND 0x00020000 /* IND entries supported */ +#define TLBnCFG_PT 0x00040000 /* Can load from page table */ +#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ +#define TLBnCFG_MINSIZE_SHIFT 20 +#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ +#define TLBnCFG_MAXSIZE_SHIFT 16 +#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ +#define TLBnCFG_ASSOC_SHIFT 24 + +/* TLBnPS encoding */ +#define TLBnPS_4K 0x00000004 +#define TLBnPS_8K 0x00000008 +#define TLBnPS_16K 0x00000010 +#define TLBnPS_32K 0x00000020 +#define TLBnPS_64K 0x00000040 +#define TLBnPS_128K 0x00000080 +#define TLBnPS_256K 0x00000100 +#define TLBnPS_512K 0x00000200 +#define TLBnPS_1M 0x00000400 +#define TLBnPS_2M 0x00000800 +#define TLBnPS_4M 0x00001000 +#define TLBnPS_8M 0x00002000 +#define TLBnPS_16M 0x00004000 +#define TLBnPS_32M 0x00008000 +#define TLBnPS_64M 0x00010000 +#define TLBnPS_128M 0x00020000 +#define TLBnPS_256M 0x00040000 +#define TLBnPS_512M 0x00080000 +#define TLBnPS_1G 0x00100000 +#define TLBnPS_2G 0x00200000 +#define TLBnPS_4G 0x00400000 +#define TLBnPS_8G 0x00800000 +#define TLBnPS_16G 0x01000000 +#define TLBnPS_32G 0x02000000 +#define TLBnPS_64G 0x04000000 +#define TLBnPS_128G 0x08000000 +#define TLBnPS_256G 0x10000000 + +/* tlbilx action encoding */ +#define TLBILX_T_ALL 0 +#define TLBILX_T_TID 1 +#define TLBILX_T_FULLMATCH 3 +#define TLBILX_T_CLASS0 4 +#define TLBILX_T_CLASS1 5 +#define TLBILX_T_CLASS2 6 +#define TLBILX_T_CLASS3 7 + +/* BookE 2.06 helper defines */ + +#define BOOKE206_FLUSH_TLB0 (1 << 0) +#define BOOKE206_FLUSH_TLB1 (1 << 1) +#define BOOKE206_FLUSH_TLB2 (1 << 2) +#define BOOKE206_FLUSH_TLB3 (1 << 3) + +/* number of possible TLBs */ +#define BOOKE206_MAX_TLBN 4 + +/*****************************************************************************/ /* The whole PowerPC CPU context */ #define NB_MMU_MODES 3 @@ -676,7 +896,7 @@ struct CPUPPCState { int nb_BATs; target_ulong DBAT[2][8]; target_ulong IBAT[2][8]; - /* PowerPC TLB registers (for 4xx and 60x software driven TLBs) */ + /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */ int nb_tlb; /* Total number of TLB */ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */ int nb_ways; /* Number of ways in the TLB set */ @@ -720,6 +940,7 @@ struct CPUPPCState { int bfd_mach; uint32_t flags; uint64_t insns_flags; + uint64_t insns_flags2; #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) target_phys_addr_t vpa; @@ -853,6 +1074,10 @@ void store_40x_dbcr0 (CPUPPCState *env, uint32_t val); void store_40x_sler (CPUPPCState *env, uint32_t val); void store_booke_tcr (CPUPPCState *env, target_ulong val); void store_booke_tsr (CPUPPCState *env, target_ulong val); +void booke206_flush_tlb(CPUState *env, int flags, const int check_iprot); +int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddrp, target_ulong address, + uint32_t pid, int ext, int i); void ppc_tlb_invalidate_all (CPUPPCState *env); void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr); #if defined(TARGET_PPC64) @@ -1016,6 +1241,7 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) #define SPR_HSPRG1 (0x131) #define SPR_HDSISR (0x132) #define SPR_HDAR (0x133) +#define SPR_BOOKE_EPCR (0x133) #define SPR_SPURR (0x134) #define SPR_BOOKE_DBCR0 (0x134) #define SPR_IBCR (0x135) @@ -1543,6 +1769,11 @@ enum { PPC_DCRUX = 0x4000000000000000ULL, /* popcntw and popcntd instructions */ PPC_POPCNTWD = 0x8000000000000000ULL, + + /* extended type values */ + + /* BookE 2.06 PowerPC specification */ + PPC2_BOOKE206 = 0x0000000000000001ULL, }; /*****************************************************************************/ @@ -1695,6 +1926,77 @@ static inline void cpu_set_tls(CPUState *env, target_ulong newtls) #endif } +#if !defined(CONFIG_USER_ONLY) +static inline int booke206_tlbe_id(CPUState *env, ppcemb_tlb_t *tlbe) +{ + uintptr_t tlbel = (uintptr_t)tlbe; + uintptr_t tlbl = (uintptr_t)env->tlb; + + return (tlbel - tlbl) / sizeof(env->tlb[0]); +} + +static inline int booke206_tlb_size(CPUState *env, int tlbn) +{ + uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + int r = tlbncfg & TLBnCFG_N_ENTRY; + return r; +} + +static inline int booke206_tlb_ways(CPUState *env, int tlbn) +{ + uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + int r = tlbncfg >> TLBnCFG_ASSOC_SHIFT; + return r; +} + +static inline int booke206_tlbe_to_tlbn(CPUState *env, ppcemb_tlb_t *tlbe) +{ + int id = booke206_tlbe_id(env, tlbe); + int end = 0; + int i; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + end += booke206_tlb_size(env, i); + if (id < end) { + return i; + } + } + + cpu_abort(env, "Unknown TLBe: %d\n", id); + return 0; +} + +static inline int booke206_tlbe_to_way(CPUState *env, ppcemb_tlb_t *tlb) +{ + int tlbn = booke206_tlbe_to_tlbn(env, tlb); + int tlbid = booke206_tlbe_id(env, tlb); + return tlbid & (booke206_tlb_ways(env, tlbn) - 1); +} + +static inline ppcemb_tlb_t *booke206_get_tlbe(CPUState *env, const int tlbn, + target_ulong ea, int way) +{ + int r; + uint32_t ways = booke206_tlb_ways(env, tlbn); + int ways_bits = ffs(ways) - 1; + int tlb_bits = ffs(booke206_tlb_size(env, tlbn)) - 1; + int i; + + way &= ways - 1; + ea >>= MAS2_EPN_SHIFT; + ea &= (1 << (tlb_bits - ways_bits)) - 1; + r = (ea << ways_bits) | way; + + /* bump up to tlbn index */ + for (i = 0; i < tlbn; i++) { + r += booke206_tlb_size(env, i); + } + + return &env->tlb[r].tlbe; +} + +#endif + extern void (*cpu_ppc_hypercall)(CPUState *); #endif /* !defined (__CPU_PPC_H__) */ diff --git a/target-ppc/helper.c b/target-ppc/helper.c index 5e4030bb53..cf2a368b57 100644 --- a/target-ppc/helper.c +++ b/target-ppc/helper.c @@ -21,7 +21,6 @@ #include <stdio.h> #include <string.h> #include <inttypes.h> -#include <signal.h> #include "cpu.h" #include "exec-all.h" @@ -606,7 +605,7 @@ static inline int _find_pte(CPUState *env, mmu_ctx_t *ctx, int is_64b, int h, r = pte64_check(ctx, pte0, pte1, h, rw, type); LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", - pteg_base + (i * 16), pte0, pte1, (int)(pte0 & 1), h, + pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h, (int)((pte0 >> 1) & 1), ctx->ptem); } else #endif @@ -621,7 +620,7 @@ static inline int _find_pte(CPUState *env, mmu_ctx_t *ctx, int is_64b, int h, r = pte32_check(ctx, pte0, pte1, h, rw, type); LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", - pteg_base + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, + pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1), ctx->ptem); } switch (r) { @@ -918,8 +917,7 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, if (eaddr != 0xEFFFFFFF) LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx - " hash=" TARGET_FMT_plx " pg_addr=" - TARGET_FMT_plx "\n", env->htab_base, + " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); ret2 = find_pte(env, ctx, 1, rw, type, target_page_bits); @@ -993,10 +991,10 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, } /* Generic TLB check function for embedded PowerPC implementations */ -static inline int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb, - target_phys_addr_t *raddrp, - target_ulong address, uint32_t pid, int ext, - int i) +int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddrp, + target_ulong address, uint32_t pid, int ext, + int i) { target_ulong mask; @@ -1006,8 +1004,8 @@ static inline int ppcemb_tlb_check(CPUState *env, ppcemb_tlb_t *tlb, } mask = ~(tlb->size - 1); LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx - " " TARGET_FMT_lx " %u\n", __func__, i, address, pid, tlb->EPN, - mask, (uint32_t)tlb->PID); + " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); /* Check PID */ if (tlb->PID != 0 && tlb->PID != pid) return -1; @@ -1153,48 +1151,164 @@ void store_40x_sler (CPUPPCState *env, uint32_t val) env->spr[SPR_405_SLER] = val; } +static inline int mmubooke_check_tlb (CPUState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddr, int *prot, + target_ulong address, int rw, + int access_type, int i) +{ + int ret, _prot; + + if (ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID], + !env->nb_pids, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + _prot = tlb->prot & 0xF; + } else { + _prot = (tlb->prot >> 4) & 0xF; + } + + /* Check the address space */ + if (access_type == ACCESS_CODE) { + if (msr_ir != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = _prot; + if (_prot & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, _prot); + ret = -3; + } else { + if (msr_dr != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = _prot; + if ((!rw && _prot & PAGE_READ) || (rw && (_prot & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, _prot); + ret = -2; + } + + return ret; +} + static int mmubooke_get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong address, int rw, int access_type) { ppcemb_tlb_t *tlb; target_phys_addr_t raddr; - int i, prot, ret; + int i, ret; ret = -1; raddr = (target_phys_addr_t)-1ULL; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb[i].tlbe; - if (ppcemb_tlb_check(env, tlb, &raddr, address, - env->spr[SPR_BOOKE_PID], 1, i) < 0) - continue; - if (msr_pr != 0) - prot = tlb->prot & 0xF; - else - prot = (tlb->prot >> 4) & 0xF; - /* Check the address space */ - if (access_type == ACCESS_CODE) { - if (msr_ir != (tlb->attr & 1)) - continue; - ctx->prot = prot; - if (prot & PAGE_EXEC) { - ret = 0; - break; + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + access_type, i); + if (!ret) { + break; + } + } + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +void booke206_flush_tlb(CPUState *env, int flags, const int check_iprot) +{ + int tlb_size; + int i, j; + ppc_tlb_t *tlb = env->tlb; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + if (flags & (1 << i)) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!check_iprot || !(tlb[j].tlbe.attr & MAS1_IPROT)) { + tlb[j].tlbe.prot = 0; + } } - ret = -3; - } else { - if (msr_dr != (tlb->attr & 1)) - continue; - ctx->prot = prot; - if ((!rw && prot & PAGE_READ) || (rw && (prot & PAGE_WRITE))) { - ret = 0; - break; + } + tlb += booke206_tlb_size(env, i); + } + + tlb_flush(env, 1); +} + +static int mmubooke206_get_physical_address(CPUState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, j, ret; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbe(env, i, address, j); + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + access_type, j); + if (ret != -1) { + goto found_tlb; } - ret = -2; } } - if (ret >= 0) + +found_tlb: + + if (ret >= 0) { ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } return ret; } @@ -1254,9 +1368,8 @@ static inline int check_physical(CPUState *env, mmu_ctx_t *ctx, /* XXX: TODO */ cpu_abort(env, "MPC8xx MMU model is not implemented\n"); break; - case POWERPC_MMU_BOOKE_FSL: - /* XXX: TODO */ - cpu_abort(env, "BookE FSL MMU model not implemented\n"); + case POWERPC_MMU_BOOKE206: + cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n"); break; default: cpu_abort(env, "Unknown or invalid MMU model\n"); @@ -1281,6 +1394,9 @@ int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr, IS and DS bits only affect the address space. */ ret = mmubooke_get_physical_address(env, ctx, eaddr, rw, access_type); + } else if (env->mmu_model == POWERPC_MMU_BOOKE206) { + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); } else { /* No address translation. */ ret = check_physical(env, ctx, eaddr, rw); @@ -1314,14 +1430,14 @@ int get_physical_address (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr, ret = mmubooke_get_physical_address(env, ctx, eaddr, rw, access_type); break; + case POWERPC_MMU_BOOKE206: + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); + break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, "MPC8xx MMU model is not implemented\n"); break; - case POWERPC_MMU_BOOKE_FSL: - /* XXX: TODO */ - cpu_abort(env, "BookE FSL MMU model not implemented\n"); - return -1; case POWERPC_MMU_REAL: cpu_abort(env, "PowerPC in real mode do not do any translation\n"); return -1; @@ -1348,6 +1464,46 @@ target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr) return ctx.raddr & TARGET_PAGE_MASK; } +static void booke206_update_mas_tlb_miss(CPUState *env, target_ulong address, + int rw) +{ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID1: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID2: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT; + break; + } + + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + /* Perform address translation */ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int mmu_idx, int is_softmmu) @@ -1403,15 +1559,14 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ case POWERPC_MMU_BOOKE: env->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; return -1; - case POWERPC_MMU_BOOKE_FSL: - /* XXX: TODO */ - cpu_abort(env, "BookE FSL MMU model is not implemented\n"); - return -1; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, "MPC8xx MMU model is not implemented\n"); @@ -1432,7 +1587,8 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, break; case -3: /* No execute protection violation */ - if (env->mmu_model == POWERPC_MMU_BOOKE) { + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_ESR] = 0x00000000; } env->exception_index = POWERPC_EXCP_ISI; @@ -1522,16 +1678,15 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, /* XXX: TODO */ cpu_abort(env, "MPC8xx MMU model is not implemented\n"); break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ case POWERPC_MMU_BOOKE: env->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0; return -1; - case POWERPC_MMU_BOOKE_FSL: - /* XXX: TODO */ - cpu_abort(env, "BookE FSL MMU model is not implemented\n"); - return -1; case POWERPC_MMU_REAL: cpu_abort(env, "PowerPC in real mode should never raise " "any MMU exceptions\n"); @@ -1551,7 +1706,8 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, if (rw) { env->spr[SPR_40x_ESR] |= 0x00800000; } - } else if (env->mmu_model == POWERPC_MMU_BOOKE) { + } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? 1 << ESR_ST : 0; } else { @@ -1822,10 +1978,8 @@ void ppc_tlb_invalidate_all (CPUPPCState *env) case POWERPC_MMU_BOOKE: tlb_flush(env, 1); break; - case POWERPC_MMU_BOOKE_FSL: - /* XXX: TODO */ - if (!kvm_enabled()) - cpu_abort(env, "BookE MMU model is not implemented\n"); + case POWERPC_MMU_BOOKE206: + booke206_flush_tlb(env, -1, 0); break; case POWERPC_MMU_32B: case POWERPC_MMU_601: @@ -1869,9 +2023,9 @@ void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr) /* XXX: TODO */ cpu_abort(env, "BookE MMU model is not implemented\n"); break; - case POWERPC_MMU_BOOKE_FSL: + case POWERPC_MMU_BOOKE206: /* XXX: TODO */ - cpu_abort(env, "BookE FSL MMU model is not implemented\n"); + cpu_abort(env, "BookE 2.06 MMU model is not implemented\n"); break; case POWERPC_MMU_32B: case POWERPC_MMU_601: @@ -1984,7 +2138,7 @@ void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value) /* VSID = VSID */ rs |= (value & 0xfffffff) << 12; /* flags = flags */ - rs |= ((value >> 27) & 0xf) << 9; + rs |= ((value >> 27) & 0xf) << 8; ppc_store_slb(env, rb, rs); } else @@ -2589,7 +2743,8 @@ static inline void powerpc_excp(CPUState *env, int excp_model, int excp) env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; - if (env->mmu_model == POWERPC_MMU_BOOKE) { + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { /* XXX: The BookE changes address space when switching modes, we should probably implement that as different MMU indexes, but for the moment we do it the slow way and flush all. */ diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 7c02be9cfd..470e42f676 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -51,9 +51,7 @@ DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32) DEF_HELPER_FLAGS_2(brinc, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl, tl) DEF_HELPER_0(float_check_status, void) -#ifdef CONFIG_SOFTFLOAT DEF_HELPER_0(reset_fpstatus, void) -#endif DEF_HELPER_2(compute_fprf, i32, i64, i32) DEF_HELPER_2(store_fpscr, void, i64, i32) DEF_HELPER_1(fpscr_clrbit, void, i32) @@ -334,6 +332,12 @@ DEF_HELPER_1(4xx_tlbsx, tl, tl) DEF_HELPER_2(440_tlbre, tl, i32, tl) DEF_HELPER_3(440_tlbwe, void, i32, tl, tl) DEF_HELPER_1(440_tlbsx, tl, tl) +DEF_HELPER_0(booke206_tlbre, void) +DEF_HELPER_0(booke206_tlbwe, void) +DEF_HELPER_1(booke206_tlbsx, void, tl) +DEF_HELPER_1(booke206_tlbivax, void, tl) +DEF_HELPER_1(booke206_tlbflush, void, i32) +DEF_HELPER_2(booke_setpid, void, i32, tl) DEF_HELPER_1(6xx_tlbd, void, tl) DEF_HELPER_1(6xx_tlbi, void, tl) DEF_HELPER_1(74xx_tlbd, void, tl) diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index 2cfb24bb1d..e7b1b10c69 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -2,6 +2,7 @@ * PowerPC implementation of KVM hooks * * Copyright IBM Corp. 2007 + * Copyright (C) 2011 Freescale Semiconductor, Inc. * * Authors: * Jerone Young <jyoung5@us.ibm.com> @@ -43,6 +44,8 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static int cap_interrupt_unset = false; static int cap_interrupt_level = false; +static int cap_segstate; +static int cap_booke_sregs; /* XXX We have a race condition where we actually have a level triggered * interrupt, but the infrastructure can't expose that yet, so the guest @@ -68,6 +71,12 @@ int kvm_arch_init(KVMState *s) #ifdef KVM_CAP_PPC_IRQ_LEVEL cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); #endif +#ifdef KVM_CAP_PPC_SEGSTATE + cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); +#endif +#ifdef KVM_CAP_PPC_BOOKE_SREGS + cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); +#endif if (!cap_interrupt_level) { fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the " @@ -77,13 +86,50 @@ int kvm_arch_init(KVMState *s) return 0; } -int kvm_arch_init_vcpu(CPUState *cenv) +static int kvm_arch_sync_sregs(CPUState *cenv) { - int ret = 0; struct kvm_sregs sregs; + int ret; + + if (cenv->excp_model == POWERPC_EXCP_BOOKE) { + /* What we're really trying to say is "if we're on BookE, we use + the native PVR for now". This is the only sane way to check + it though, so we potentially confuse users that they can run + BookE guests on BookS. Let's hope nobody dares enough :) */ + return 0; + } else { + if (!cap_segstate) { + fprintf(stderr, "kvm error: missing PVR setting capability\n"); + return -ENOSYS; + } + } + +#if !defined(CONFIG_KVM_PPC_PVR) + if (1) { + fprintf(stderr, "kvm error: missing PVR setting capability\n"); + return -ENOSYS; + } +#endif + + ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs); + if (ret) { + return ret; + } +#ifdef CONFIG_KVM_PPC_PVR sregs.pvr = cenv->spr[SPR_PVR]; - ret = kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs); +#endif + return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs); +} + +int kvm_arch_init_vcpu(CPUState *cenv) +{ + int ret; + + ret = kvm_arch_sync_sregs(cenv); + if (ret) { + return ret; + } idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv); @@ -122,6 +168,8 @@ int kvm_arch_put_registers(CPUState *env, int level) regs.sprg6 = env->spr[SPR_SPRG6]; regs.sprg7 = env->spr[SPR_SPRG7]; + regs.pid = env->spr[SPR_BOOKE_PID]; + for (i = 0;i < 32; i++) regs.gpr[i] = env->gpr[i]; @@ -136,15 +184,18 @@ int kvm_arch_get_registers(CPUState *env) { struct kvm_regs regs; struct kvm_sregs sregs; + uint32_t cr; int i, ret; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); if (ret < 0) return ret; - ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); - if (ret < 0) - return ret; + cr = regs.cr; + for (i = 7; i >= 0; i--) { + env->crf[i] = cr & 15; + cr >>= 4; + } env->ctr = regs.ctr; env->lr = regs.lr; @@ -164,11 +215,124 @@ int kvm_arch_get_registers(CPUState *env) env->spr[SPR_SPRG6] = regs.sprg6; env->spr[SPR_SPRG7] = regs.sprg7; + env->spr[SPR_BOOKE_PID] = regs.pid; + for (i = 0;i < 32; i++) env->gpr[i] = regs.gpr[i]; + if (cap_booke_sregs) { + ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); + if (ret < 0) { + return ret; + } + +#ifdef KVM_CAP_PPC_BOOKE_SREGS + if (sregs.u.e.features & KVM_SREGS_E_BASE) { + env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0; + env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1; + env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr; + env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear; + env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr; + env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr; + env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr; + env->spr[SPR_DECR] = sregs.u.e.dec; + env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff; + env->spr[SPR_TBU] = sregs.u.e.tb >> 32; + env->spr[SPR_VRSAVE] = sregs.u.e.vrsave; + } + + if (sregs.u.e.features & KVM_SREGS_E_ARCH206) { + env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir; + env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0; + env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1; + env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar; + env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr; + } + + if (sregs.u.e.features & KVM_SREGS_E_64) { + env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr; + } + + if (sregs.u.e.features & KVM_SREGS_E_SPRG8) { + env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8; + } + + if (sregs.u.e.features & KVM_SREGS_E_IVOR) { + env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0]; + env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1]; + env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2]; + env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3]; + env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4]; + env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5]; + env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6]; + env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7]; + env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8]; + env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9]; + env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10]; + env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11]; + env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12]; + env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13]; + env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14]; + env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15]; + + if (sregs.u.e.features & KVM_SREGS_E_SPE) { + env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0]; + env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1]; + env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2]; + } + + if (sregs.u.e.features & KVM_SREGS_E_PM) { + env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3]; + } + + if (sregs.u.e.features & KVM_SREGS_E_PC) { + env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4]; + env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5]; + } + } + + if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) { + env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0; + env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1; + env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2; + env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff; + env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4; + env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6; + env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32; + env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg; + env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0]; + env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1]; + } + + if (sregs.u.e.features & KVM_SREGS_EXP) { + env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr; + } + + if (sregs.u.e.features & KVM_SREGS_E_PD) { + env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc; + env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc; + } + + if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { + env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr; + env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar; + env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0; + + if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) { + env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1; + env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2; + } + } +#endif + } + + if (cap_segstate) { + ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); + if (ret < 0) { + return ret; + } + #ifdef KVM_CAP_PPC_SEGSTATE - if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_SEGSTATE)) { ppc_store_sdr1(env, sregs.u.s.sdr1); /* Sync SLB */ @@ -191,8 +355,8 @@ int kvm_arch_get_registers(CPUState *env) env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; } - } #endif + } return 0; } diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c index d5db484b4a..15d9222c72 100644 --- a/target-ppc/op_helper.c +++ b/target-ppc/op_helper.c @@ -362,7 +362,6 @@ void helper_icbi(target_ulong addr) * do the load "by hand". */ ldl(addr); - tb_invalidate_page_range(addr, addr + env->icache_line_size); } // XXX: to be tested @@ -972,7 +971,6 @@ void helper_store_fpscr (uint64_t arg, uint32_t mask) void helper_float_check_status (void) { -#ifdef CONFIG_SOFTFLOAT if (env->exception_index == POWERPC_EXCP_PROGRAM && (env->error_code & POWERPC_EXCP_FP)) { /* Differred floating-point exception after target FPR update */ @@ -990,22 +988,12 @@ void helper_float_check_status (void) float_inexact_excp(); } } -#else - if (env->exception_index == POWERPC_EXCP_PROGRAM && - (env->error_code & POWERPC_EXCP_FP)) { - /* Differred floating-point exception after target FPR update */ - if (msr_fe0 != 0 || msr_fe1 != 0) - helper_raise_exception_err(env->exception_index, env->error_code); - } -#endif } -#ifdef CONFIG_SOFTFLOAT void helper_reset_fpstatus (void) { set_float_exception_flags(0, &env->fp_status); } -#endif /* fadd - fadd. */ uint64_t helper_fadd (uint64_t arg1, uint64_t arg2) @@ -4206,4 +4194,300 @@ target_ulong helper_440_tlbsx (target_ulong address) return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); } +/* PowerPC BookE 2.06 TLB management */ + +static ppcemb_tlb_t *booke206_cur_tlb(CPUState *env) +{ + uint32_t tlbncfg = 0; + int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; + int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); + int tlb; + + tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; + + if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { + cpu_abort(env, "we don't support HES yet\n"); + } + + return booke206_get_tlbe(env, tlb, ea, esel); +} + +static inline target_phys_addr_t booke206_tlb_to_page_size(int size) +{ + return (1 << (size << 1)) << 10; +} + +static inline target_phys_addr_t booke206_page_size_to_tlb(uint64_t size) +{ + return (ffs(size >> 10) - 1) >> 1; +} + +void helper_booke_setpid(uint32_t pidn, target_ulong pid) +{ + env->spr[pidn] = pid; + /* changing PIDs mean we're in a different address space now */ + tlb_flush(env, 1); +} + +void helper_booke206_tlbwe(void) +{ + uint32_t tlbncfg, tlbn; + ppcemb_tlb_t *tlb; + target_phys_addr_t rpn; + int tlbe_size; + + switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { + case MAS0_WQ_ALWAYS: + /* good to go, write that entry */ + break; + case MAS0_WQ_COND: + /* XXX check if reserved */ + if (0) { + return; + } + break; + case MAS0_WQ_CLR_RSRV: + /* XXX clear entry */ + return; + default: + /* no idea what to do */ + return; + } + + if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && + !msr_gs) { + /* XXX we don't support direct LRAT setting yet */ + fprintf(stderr, "cpu: don't support LRAT setting yet\n"); + return; + } + + tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + + tlb = booke206_cur_tlb(env); + + if (msr_gs) { + cpu_abort(env, "missing HV implementation\n"); + } else { + rpn = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | + (env->spr[SPR_BOOKE_MAS3] & 0xfffff000); + } + tlb->RPN = rpn; + + tlb->PID = (env->spr[SPR_BOOKE_MAS1] & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlbncfg & TLBnCFG_AVAIL) { + tlbe_size = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) + >> MAS1_TSIZE_SHIFT; + } else { + tlbe_size = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; + } + + tlb->size = booke206_tlb_to_page_size(tlbe_size); + tlb->EPN = (uint32_t)(env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); + tlb->attr = env->spr[SPR_BOOKE_MAS2] & (MAS2_ACM | MAS2_VLE | MAS2_W | + MAS2_I | MAS2_M | MAS2_G | MAS2_E) + << 1; + + if (tlbncfg & TLBnCFG_IPROT) { + tlb->attr |= env->spr[SPR_BOOKE_MAS1] & MAS1_IPROT; + } + tlb->attr |= (env->spr[SPR_BOOKE_MAS3] & + ((MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3)) << 8); + if (env->spr[SPR_BOOKE_MAS1] & MAS1_TS) { + tlb->attr |= 1; + } + + tlb->prot = 0; + + if (env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) { + tlb->prot |= PAGE_VALID; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_UX) { + tlb->prot |= PAGE_EXEC; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_SX) { + tlb->prot |= PAGE_EXEC << 4; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_UW) { + tlb->prot |= PAGE_WRITE; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_SW) { + tlb->prot |= PAGE_WRITE << 4; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_UR) { + tlb->prot |= PAGE_READ; + } + if (env->spr[SPR_BOOKE_MAS3] & MAS3_SR) { + tlb->prot |= PAGE_READ << 4; + } + + if (tlb->size == TARGET_PAGE_SIZE) { + tlb_flush_page(env, tlb->EPN); + } else { + tlb_flush(env, 1); + } +} + +static inline void booke206_tlb_to_mas(CPUState *env, ppcemb_tlb_t *tlb) +{ + int tlbn = booke206_tlbe_to_tlbn(env, tlb); + int way = booke206_tlbe_to_way(env, tlb); + + env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; + env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; + + env->spr[SPR_BOOKE_MAS1] = MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] = 0; + + env->spr[SPR_BOOKE_MAS7] = (uint64_t)tlb->RPN >> 32; + env->spr[SPR_BOOKE_MAS3] = tlb->RPN; + env->spr[SPR_BOOKE_MAS1] |= tlb->PID << MAS1_TID_SHIFT; + env->spr[SPR_BOOKE_MAS1] |= booke206_page_size_to_tlb(tlb->size) + << MAS1_TSIZE_SHIFT; + env->spr[SPR_BOOKE_MAS1] |= tlb->attr & MAS1_IPROT; + if (tlb->attr & 1) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + } + + env->spr[SPR_BOOKE_MAS2] = tlb->EPN; + env->spr[SPR_BOOKE_MAS2] |= (tlb->attr >> 1) & + (MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E); + + if (tlb->prot & PAGE_EXEC) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_UX; + } + if (tlb->prot & (PAGE_EXEC << 4)) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_SX; + } + if (tlb->prot & PAGE_WRITE) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_UW; + } + if (tlb->prot & (PAGE_WRITE << 4)) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_SW; + } + if (tlb->prot & PAGE_READ) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_UR; + } + if (tlb->prot & (PAGE_READ << 4)) { + env->spr[SPR_BOOKE_MAS3] |= MAS3_SR; + } + + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +void helper_booke206_tlbre(void) +{ + ppcemb_tlb_t *tlb = NULL; + + tlb = booke206_cur_tlb(env); + booke206_tlb_to_mas(env, tlb); +} + +void helper_booke206_tlbsx(target_ulong address) +{ + ppcemb_tlb_t *tlb = NULL; + int i, j; + target_phys_addr_t raddr; + uint32_t spid, sas; + + spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; + sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbe(env, i, address, j); + + if (ppcemb_tlb_check(env, tlb, &raddr, address, spid, 0, j)) { + continue; + } + + if (sas != (tlb->attr & MAS6_SAS)) { + continue; + } + + booke206_tlb_to_mas(env, tlb); + return; + } + } + + /* no entry found, fill with defaults */ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + } + + env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) + << MAS1_TID_SHIFT; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +static inline void booke206_invalidate_ea_tlb(CPUState *env, int tlbn, + uint32_t ea) +{ + int i; + int ways = booke206_tlb_ways(env, tlbn); + + for (i = 0; i < ways; i++) { + ppcemb_tlb_t *tlb = booke206_get_tlbe(env, tlbn, ea, i); + target_phys_addr_t masked_ea = ea & ~(tlb->size - 1); + if ((tlb->EPN == (masked_ea >> MAS2_EPN_SHIFT)) && + !(tlb->attr & MAS1_IPROT)) { + tlb->prot = 0; + } + } +} + +void helper_booke206_tlbivax(target_ulong address) +{ + if (address & 0x4) { + /* flush all entries */ + if (address & 0x8) { + /* flush all of TLB1 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); + } else { + /* flush all of TLB0 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); + } + return; + } + + if (address & 0x8) { + /* flush TLB1 entries */ + booke206_invalidate_ea_tlb(env, 1, address); + tlb_flush(env, 1); + } else { + /* flush TLB0 entries */ + booke206_invalidate_ea_tlb(env, 0, address); + tlb_flush_page(env, address & MAS2_EPN_MASK); + } +} + +void helper_booke206_tlbflush(uint32_t type) +{ + int flags = 0; + + if (type & 2) { + flags |= BOOKE206_FLUSH_TLB1; + } + + if (type & 4) { + flags |= BOOKE206_FLUSH_TLB0; + } + + booke206_flush_tlb(env, flags, 1); +} + #endif /* !CONFIG_USER_ONLY */ diff --git a/target-ppc/translate.c b/target-ppc/translate.c index a943dbcf8e..59aef855d4 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -2,6 +2,7 @@ * PowerPC emulation for qemu: main translation routines. * * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright (C) 2011 Freescale Semiconductor, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -200,6 +201,8 @@ struct opc_handler_t { uint32_t inval; /* instruction type */ uint64_t type; + /* extended instruction type */ + uint64_t type2; /* handler */ void (*handler)(DisasContext *ctx); #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) @@ -212,9 +215,7 @@ struct opc_handler_t { static inline void gen_reset_fpstatus(void) { -#ifdef CONFIG_SOFTFLOAT gen_helper_reset_fpstatus(); -#endif } static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc) @@ -313,10 +314,16 @@ static inline void gen_sync_exception(DisasContext *ctx) } #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ -GEN_OPCODE(name, opc1, opc2, opc3, inval, type) +GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) + +#define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ +GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ -GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type) +GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) + +#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ +GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) typedef struct opcode_t { unsigned char opc1, opc2, opc3; @@ -456,7 +463,7 @@ static inline target_ulong MASK(uint32_t start, uint32_t end) /* PowerPC instructions table */ #if defined(DO_PPC_STATISTICS) -#define GEN_OPCODE(name, op1, op2, op3, invl, _typ) \ +#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ @@ -465,12 +472,13 @@ static inline target_ulong MASK(uint32_t start, uint32_t end) .handler = { \ .inval = invl, \ .type = _typ, \ + .type2 = _typ2, \ .handler = &gen_##name, \ .oname = stringify(name), \ }, \ .oname = stringify(name), \ } -#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ) \ +#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ @@ -479,13 +487,14 @@ static inline target_ulong MASK(uint32_t start, uint32_t end) .handler = { \ .inval = invl, \ .type = _typ, \ + .type2 = _typ2, \ .handler = &gen_##name, \ .oname = onam, \ }, \ .oname = onam, \ } #else -#define GEN_OPCODE(name, op1, op2, op3, invl, _typ) \ +#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ @@ -494,11 +503,12 @@ static inline target_ulong MASK(uint32_t start, uint32_t end) .handler = { \ .inval = invl, \ .type = _typ, \ + .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = stringify(name), \ } -#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ) \ +#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ @@ -507,6 +517,7 @@ static inline target_ulong MASK(uint32_t start, uint32_t end) .handler = { \ .inval = invl, \ .type = _typ, \ + .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = onam, \ @@ -533,6 +544,7 @@ static void gen_invalid(DisasContext *ctx) static opc_handler_t invalid_handler = { .inval = 0xFFFFFFFF, .type = PPC_NONE, + .type2 = PPC_NONE, .handler = gen_invalid, }; @@ -5974,6 +5986,80 @@ static void gen_tlbwe_440(DisasContext *ctx) #endif } +/* TLB management - PowerPC BookE 2.06 implementation */ + +/* tlbre */ +static void gen_tlbre_booke206(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); +#else + if (unlikely(!ctx->mem_idx)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + + gen_helper_booke206_tlbre(); +#endif +} + +/* tlbsx - tlbsx. */ +static void gen_tlbsx_booke206(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); +#else + TCGv t0; + if (unlikely(!ctx->mem_idx)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + + if (rA(ctx->opcode)) { + t0 = tcg_temp_new(); + tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]); + } else { + t0 = tcg_const_tl(0); + } + + tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]); + gen_helper_booke206_tlbsx(t0); +#endif +} + +/* tlbwe */ +static void gen_tlbwe_booke206(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); +#else + if (unlikely(!ctx->mem_idx)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + gen_helper_booke206_tlbwe(); +#endif +} + +static void gen_tlbivax_booke206(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); +#else + TCGv t0; + if (unlikely(!ctx->mem_idx)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + + t0 = tcg_temp_new(); + gen_addr_reg_index(ctx, t0); + + gen_helper_booke206_tlbivax(t0); +#endif +} + + /* wrtee */ static void gen_wrtee(DisasContext *ctx) { @@ -8420,7 +8506,7 @@ GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON), GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON), GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP), -GEN_HANDLER(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE), +GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI), GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI), GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB), @@ -8429,12 +8515,23 @@ GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB), GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE), GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE), GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE), +GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, + PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), -GEN_HANDLER(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, PPC_BOOKE), -GEN_HANDLER(msync, 0x1F, 0x16, 0x12, 0x03FFF801, PPC_BOOKE), -GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE), +GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, + PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER_E(msync, 0x1F, 0x16, 0x12, 0x03FFF801, + PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, + PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), @@ -9124,9 +9221,84 @@ void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf, } cpu_fprintf(f, "FPSCR %08x\n", env->fpscr); #if !defined(CONFIG_USER_ONLY) - cpu_fprintf(f, "SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx " SDR1 " - TARGET_FMT_lx "\n", env->spr[SPR_SRR0], env->spr[SPR_SRR1], - env->spr[SPR_SDR1]); + cpu_fprintf(f, " SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx + " PVR " TARGET_FMT_lx " VRSAVE " TARGET_FMT_lx "\n", + env->spr[SPR_SRR0], env->spr[SPR_SRR1], + env->spr[SPR_PVR], env->spr[SPR_VRSAVE]); + + cpu_fprintf(f, "SPRG0 " TARGET_FMT_lx " SPRG1 " TARGET_FMT_lx + " SPRG2 " TARGET_FMT_lx " SPRG3 " TARGET_FMT_lx "\n", + env->spr[SPR_SPRG0], env->spr[SPR_SPRG1], + env->spr[SPR_SPRG2], env->spr[SPR_SPRG3]); + + cpu_fprintf(f, "SPRG4 " TARGET_FMT_lx " SPRG5 " TARGET_FMT_lx + " SPRG6 " TARGET_FMT_lx " SPRG7 " TARGET_FMT_lx "\n", + env->spr[SPR_SPRG4], env->spr[SPR_SPRG5], + env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]); + + if (env->excp_model == POWERPC_EXCP_BOOKE) { + cpu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx + " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1], + env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); + + cpu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx + " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_TCR], env->spr[SPR_BOOKE_TSR], + env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]); + + cpu_fprintf(f, " PIR " TARGET_FMT_lx " DECAR " TARGET_FMT_lx + " IVPR " TARGET_FMT_lx " EPCR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_PIR], env->spr[SPR_BOOKE_DECAR], + env->spr[SPR_BOOKE_IVPR], env->spr[SPR_BOOKE_EPCR]); + + cpu_fprintf(f, " MCSR " TARGET_FMT_lx " SPRG8 " TARGET_FMT_lx + " EPR " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MCSR], env->spr[SPR_BOOKE_SPRG8], + env->spr[SPR_BOOKE_EPR]); + + /* FSL-specific */ + cpu_fprintf(f, " MCAR " TARGET_FMT_lx " PID1 " TARGET_FMT_lx + " PID2 " TARGET_FMT_lx " SVR " TARGET_FMT_lx "\n", + env->spr[SPR_Exxx_MCAR], env->spr[SPR_BOOKE_PID1], + env->spr[SPR_BOOKE_PID2], env->spr[SPR_E500_SVR]); + + /* + * IVORs are left out as they are large and do not change often -- + * they can be read with "p $ivor0", "p $ivor1", etc. + */ + } + + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: +#endif + cpu_fprintf(f, " SDR1 " TARGET_FMT_lx "\n", env->spr[SPR_SDR1]); + break; + case POWERPC_MMU_BOOKE206: + cpu_fprintf(f, " MAS0 " TARGET_FMT_lx " MAS1 " TARGET_FMT_lx + " MAS2 " TARGET_FMT_lx " MAS3 " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MAS0], env->spr[SPR_BOOKE_MAS1], + env->spr[SPR_BOOKE_MAS2], env->spr[SPR_BOOKE_MAS3]); + + cpu_fprintf(f, " MAS4 " TARGET_FMT_lx " MAS6 " TARGET_FMT_lx + " MAS7 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n", + env->spr[SPR_BOOKE_MAS4], env->spr[SPR_BOOKE_MAS6], + env->spr[SPR_BOOKE_MAS7], env->spr[SPR_BOOKE_PID]); + + cpu_fprintf(f, "MMUCFG " TARGET_FMT_lx " TLB0CFG " TARGET_FMT_lx + " TLB1CFG " TARGET_FMT_lx "\n", + env->spr[SPR_MMUCFG], env->spr[SPR_BOOKE_TLB0CFG], + env->spr[SPR_BOOKE_TLB1CFG]); + break; + default: + break; + } #endif #undef RGPL diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index e2a83c5a38..fc50ae3cd2 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -37,6 +37,7 @@ struct ppc_def_t { uint32_t pvr; uint32_t svr; uint64_t insns_flags; + uint64_t insns_flags2; uint64_t msr_mask; powerpc_mmu_t mmu_model; powerpc_excp_t excp_model; @@ -72,7 +73,7 @@ static void spr_read_generic (void *opaque, int gprn, int sprn) gen_load_spr(cpu_gpr[gprn], sprn); #ifdef PPC_DUMP_SPR_ACCESSES { - TCGv t0 = tcg_const_i32(sprn); + TCGv_i32 t0 = tcg_const_i32(sprn); gen_helper_load_dump_spr(t0); tcg_temp_free_i32(t0); } @@ -84,7 +85,7 @@ static void spr_write_generic (void *opaque, int sprn, int gprn) gen_store_spr(sprn, cpu_gpr[gprn]); #ifdef PPC_DUMP_SPR_ACCESSES { - TCGv t0 = tcg_const_i32(sprn); + TCGv_i32 t0 = tcg_const_i32(sprn); gen_helper_store_dump_spr(t0); tcg_temp_free_i32(t0); } @@ -1354,6 +1355,31 @@ static void gen_74xx_soft_tlb (CPUPPCState *env, int nb_tlbs, int nb_ways) #endif } +#if !defined(CONFIG_USER_ONLY) +static void spr_write_e500_l1csr0 (void *opaque, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, cpu_gpr[gprn], ~256); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); +} + +static void spr_write_booke206_mmucsr0 (void *opaque, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_booke206_tlbflush(t0); + tcg_temp_free_i32(t0); +} + +static void spr_write_booke_pid (void *opaque, int sprn, int gprn) +{ + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_booke_setpid(t0, cpu_gpr[gprn]); + tcg_temp_free_i32(t0); +} +#endif + static void gen_spr_usprgh (CPUPPCState *env) { spr_register(env, SPR_USPRG4, "USPRG4", @@ -1493,7 +1519,7 @@ static void gen_spr_BookE (CPUPPCState *env, uint64_t ivor_mask) } spr_register(env, SPR_BOOKE_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_booke_pid, 0x00000000); spr_register(env, SPR_BOOKE_TCR, "TCR", SPR_NOACCESS, SPR_NOACCESS, @@ -1535,8 +1561,19 @@ static void gen_spr_BookE (CPUPPCState *env, uint64_t ivor_mask) 0x00000000); } -/* FSL storage control registers */ -static void gen_spr_BookE_FSL (CPUPPCState *env, uint32_t mas_mask) +static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize, + uint32_t maxsize, uint32_t flags, + uint32_t nentries) +{ + return (assoc << TLBnCFG_ASSOC_SHIFT) | + (minsize << TLBnCFG_MINSIZE_SHIFT) | + (maxsize << TLBnCFG_MAXSIZE_SHIFT) | + flags | nentries; +} + +/* BookE 2.06 storage control registers */ +static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, + uint32_t *tlbncfg) { #if !defined(CONFIG_USER_ONLY) const char *mas_names[8] = { @@ -1562,14 +1599,14 @@ static void gen_spr_BookE_FSL (CPUPPCState *env, uint32_t mas_mask) /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_booke_pid, 0x00000000); } /* XXX : not implemented */ @@ -1577,45 +1614,38 @@ static void gen_spr_BookE_FSL (CPUPPCState *env, uint32_t mas_mask) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* TOFIX */ - /* XXX : not implemented */ - spr_register(env, SPR_MMUCSR0, "MMUCSR0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); /* TOFIX */ switch (env->nb_ways) { case 4: - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, - 0x00000000); /* TOFIX */ + tlbncfg[3]); /* Fallthru */ case 3: - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, - 0x00000000); /* TOFIX */ + tlbncfg[2]); /* Fallthru */ case 2: - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, - 0x00000000); /* TOFIX */ + tlbncfg[1]); /* Fallthru */ case 1: - /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, - 0x00000000); /* TOFIX */ + tlbncfg[0]); /* Fallthru */ case 0: default: break; } #endif + + gen_spr_usprgh(env); } /* SPR specific to PowerPC 440 implementation */ @@ -3201,6 +3231,7 @@ static int check_pow_hid0_74xx (CPUPPCState *env) PPC_CACHE_DCBZ | \ PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_401 (PPC_NONE) #define POWERPC_MSRM_401 (0x00000000000FD201ULL) #define POWERPC_MMU_401 (POWERPC_MMU_REAL) #define POWERPC_EXCP_401 (POWERPC_EXCP_40x) @@ -3230,6 +3261,7 @@ static void init_proc_401 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_401x2 (PPC_NONE) #define POWERPC_MSRM_401x2 (0x00000000001FD231ULL) #define POWERPC_MMU_401x2 (POWERPC_MMU_SOFT_4xx_Z) #define POWERPC_EXCP_401x2 (POWERPC_EXCP_40x) @@ -3266,6 +3298,7 @@ static void init_proc_401x2 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_401x3 (PPC_NONE) #define POWERPC_MSRM_401x3 (0x00000000001FD631ULL) #define POWERPC_MMU_401x3 (POWERPC_MMU_SOFT_4xx_Z) #define POWERPC_EXCP_401x3 (POWERPC_EXCP_40x) @@ -3298,6 +3331,7 @@ static void init_proc_401x3 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_IOP480 (PPC_NONE) #define POWERPC_MSRM_IOP480 (0x00000000001FD231ULL) #define POWERPC_MMU_IOP480 (POWERPC_MMU_SOFT_4xx_Z) #define POWERPC_EXCP_IOP480 (POWERPC_EXCP_40x) @@ -3333,6 +3367,7 @@ static void init_proc_IOP480 (CPUPPCState *env) PPC_CACHE_DCBZ | \ PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_403 (PPC_NONE) #define POWERPC_MSRM_403 (0x000000000007D00DULL) #define POWERPC_MMU_403 (POWERPC_MMU_REAL) #define POWERPC_EXCP_403 (POWERPC_EXCP_40x) @@ -3363,6 +3398,7 @@ static void init_proc_403 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \ PPC_4xx_COMMON | PPC_40x_EXCP) +#define POWERPC_INSNS2_403GCX (PPC_NONE) #define POWERPC_MSRM_403GCX (0x000000000007D00DULL) #define POWERPC_MMU_403GCX (POWERPC_MMU_SOFT_4xx_Z) #define POWERPC_EXCP_403GCX (POWERPC_EXCP_40x) @@ -3411,6 +3447,7 @@ static void init_proc_403GCX (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | \ PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP) +#define POWERPC_INSNS2_405 (PPC_NONE) #define POWERPC_MSRM_405 (0x000000000006E630ULL) #define POWERPC_MMU_405 (POWERPC_MMU_SOFT_4xx) #define POWERPC_EXCP_405 (POWERPC_EXCP_40x) @@ -3458,6 +3495,7 @@ static void init_proc_405 (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_MFTB | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_440EP (PPC_NONE) #define POWERPC_MSRM_440EP (0x000000000006D630ULL) #define POWERPC_MMU_440EP (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_440EP (POWERPC_EXCP_BOOKE) @@ -3538,6 +3576,7 @@ static void init_proc_440EP (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_440GP (PPC_NONE) #define POWERPC_MSRM_440GP (0x000000000006FF30ULL) #define POWERPC_MMU_440GP (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_440GP (POWERPC_EXCP_BOOKE) @@ -3600,6 +3639,7 @@ static void init_proc_440GP (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_MFTB | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_440x4 (PPC_NONE) #define POWERPC_MSRM_440x4 (0x000000000006FF30ULL) #define POWERPC_MMU_440x4 (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_440x4 (POWERPC_EXCP_BOOKE) @@ -3662,6 +3702,7 @@ static void init_proc_440x4 (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_MFTB | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_440x5 (PPC_NONE) #define POWERPC_MSRM_440x5 (0x000000000006FF30ULL) #define POWERPC_MMU_440x5 (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_440x5 (POWERPC_EXCP_BOOKE) @@ -3742,6 +3783,7 @@ static void init_proc_440x5 (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_TLBIVA | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_460 (PPC_NONE) #define POWERPC_MSRM_460 (0x000000000006FF30ULL) #define POWERPC_MMU_460 (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_460 (POWERPC_EXCP_BOOKE) @@ -3831,6 +3873,7 @@ static void init_proc_460 (CPUPPCState *env) PPC_MEM_TLBSYNC | PPC_TLBIVA | \ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | \ PPC_440_SPEC) +#define POWERPC_INSNS2_460F (PPC_NONE) #define POWERPC_MSRM_460 (0x000000000006FF30ULL) #define POWERPC_MMU_460F (POWERPC_MMU_BOOKE) #define POWERPC_EXCP_460F (POWERPC_EXCP_BOOKE) @@ -3913,6 +3956,7 @@ static void init_proc_460F (CPUPPCState *env) PPC_MEM_EIEIO | PPC_MEM_SYNC | \ PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX | \ PPC_MFTB) +#define POWERPC_INSNS2_MPC5xx (PPC_NONE) #define POWERPC_MSRM_MPC5xx (0x000000000001FF43ULL) #define POWERPC_MMU_MPC5xx (POWERPC_MMU_REAL) #define POWERPC_EXCP_MPC5xx (POWERPC_EXCP_603) @@ -3939,6 +3983,7 @@ static void init_proc_MPC5xx (CPUPPCState *env) #define POWERPC_INSNS_MPC8xx (PPC_INSNS_BASE | PPC_STRING | \ PPC_MEM_EIEIO | PPC_MEM_SYNC | \ PPC_CACHE_ICBI | PPC_MFTB) +#define POWERPC_INSNS2_MPC8xx (PPC_NONE) #define POWERPC_MSRM_MPC8xx (0x000000000001F673ULL) #define POWERPC_MMU_MPC8xx (POWERPC_MMU_MPC8xx) #define POWERPC_EXCP_MPC8xx (POWERPC_EXCP_603) @@ -3970,6 +4015,7 @@ static void init_proc_MPC8xx (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_G2 (PPC_NONE) #define POWERPC_MSRM_G2 (0x000000000006FFF2ULL) #define POWERPC_MMU_G2 (POWERPC_MMU_SOFT_6xx) //#define POWERPC_EXCP_G2 (POWERPC_EXCP_G2) @@ -4027,6 +4073,7 @@ static void init_proc_G2 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_G2LE (PPC_NONE) #define POWERPC_MSRM_G2LE (0x000000000007FFF3ULL) #define POWERPC_MMU_G2LE (POWERPC_MMU_SOFT_6xx) #define POWERPC_EXCP_G2LE (POWERPC_EXCP_G2) @@ -4093,8 +4140,9 @@ static void init_proc_G2LE (CPUPPCState *env) PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \ PPC_MEM_TLBSYNC | PPC_TLBIVAX | \ PPC_BOOKE) +#define POWERPC_INSNS2_e200 (PPC_NONE) #define POWERPC_MSRM_e200 (0x000000000606FF30ULL) -#define POWERPC_MMU_e200 (POWERPC_MMU_BOOKE_FSL) +#define POWERPC_MMU_e200 (POWERPC_MMU_BOOKE206) #define POWERPC_EXCP_e200 (POWERPC_EXCP_BOOKE) #define POWERPC_INPUT_e200 (PPC_FLAGS_INPUT_BookE) #define POWERPC_BFDM_e200 (bfd_mach_ppc_860) @@ -4115,7 +4163,7 @@ static void init_proc_e200 (CPUPPCState *env) &spr_read_spefscr, &spr_write_spefscr, 0x00000000); /* Memory management */ - gen_spr_BookE_FSL(env, 0x0000005D); + gen_spr_BookE206(env, 0x0000005D, NULL); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, @@ -4186,6 +4234,11 @@ static void init_proc_e200 (CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MMUCSR0, "MMUCSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); /* TOFIX */ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -4213,6 +4266,7 @@ static void init_proc_e200 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_e300 (PPC_NONE) #define POWERPC_MSRM_e300 (0x000000000007FFF3ULL) #define POWERPC_MMU_e300 (POWERPC_MMU_SOFT_6xx) #define POWERPC_EXCP_e300 (POWERPC_EXCP_603) @@ -4262,10 +4316,10 @@ static void init_proc_e300 (CPUPPCState *env) PPC_WRTEE | PPC_RFDI | \ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \ - PPC_MEM_TLBSYNC | PPC_TLBIVAX | \ - PPC_BOOKE) + PPC_MEM_TLBSYNC | PPC_TLBIVAX) +#define POWERPC_INSNS2_e500v1 (PPC2_BOOKE206) #define POWERPC_MSRM_e500v1 (0x000000000606FF30ULL) -#define POWERPC_MMU_e500v1 (POWERPC_MMU_BOOKE_FSL) +#define POWERPC_MMU_e500v1 (POWERPC_MMU_BOOKE206) #define POWERPC_EXCP_e500v1 (POWERPC_EXCP_BOOKE) #define POWERPC_INPUT_e500v1 (PPC_FLAGS_INPUT_BookE) #define POWERPC_BFDM_e500v1 (bfd_mach_ppc_860) @@ -4273,7 +4327,7 @@ static void init_proc_e300 (CPUPPCState *env) POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | \ POWERPC_FLAG_BUS_CLK) #define check_pow_e500v1 check_pow_hid0 -#define init_proc_e500v1 init_proc_e500 +#define init_proc_e500v1 init_proc_e500v1 /* e500v2 core */ #define POWERPC_INSNS_e500v2 (PPC_INSNS_BASE | PPC_ISEL | \ @@ -4281,10 +4335,10 @@ static void init_proc_e300 (CPUPPCState *env) PPC_WRTEE | PPC_RFDI | \ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \ PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \ - PPC_MEM_TLBSYNC | PPC_TLBIVAX | \ - PPC_BOOKE) + PPC_MEM_TLBSYNC | PPC_TLBIVAX) +#define POWERPC_INSNS2_e500v2 (PPC2_BOOKE206) #define POWERPC_MSRM_e500v2 (0x000000000606FF30ULL) -#define POWERPC_MMU_e500v2 (POWERPC_MMU_BOOKE_FSL) +#define POWERPC_MMU_e500v2 (POWERPC_MMU_BOOKE206) #define POWERPC_EXCP_e500v2 (POWERPC_EXCP_BOOKE) #define POWERPC_INPUT_e500v2 (PPC_FLAGS_INPUT_BookE) #define POWERPC_BFDM_e500v2 (bfd_mach_ppc_860) @@ -4292,13 +4346,23 @@ static void init_proc_e300 (CPUPPCState *env) POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | \ POWERPC_FLAG_BUS_CLK) #define check_pow_e500v2 check_pow_hid0 -#define init_proc_e500v2 init_proc_e500 +#define init_proc_e500v2 init_proc_e500v2 -static void init_proc_e500 (CPUPPCState *env) +static void init_proc_e500 (CPUPPCState *env, int version) { + uint32_t tlbncfg[2]; +#if !defined(CONFIG_USER_ONLY) + int i; +#endif + /* Time base */ gen_tbl(env); - gen_spr_BookE(env, 0x0000000F0000FD7FULL); + /* + * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't + * complain when accessing them. + * gen_spr_BookE(env, 0x0000000F0000FD7FULL); + */ + gen_spr_BookE(env, 0x0000000F0000FFFFULL); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, @@ -4312,8 +4376,24 @@ static void init_proc_e500 (CPUPPCState *env) /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_pids = 3; + env->nb_ways = 2; + env->id_tlbs = 0; + switch (version) { + case 1: + /* e500v1 */ + tlbncfg[0] = gen_tlbncfg(2, 1, 1, 0, 256); + tlbncfg[1] = gen_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + break; + case 2: + /* e500v2 */ + tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); + tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + break; + default: + cpu_abort(env, "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); + } #endif - gen_spr_BookE_FSL(env, 0x0000005F); + gen_spr_BookE206(env, 0x000000DF, tlbncfg); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, @@ -4362,23 +4442,13 @@ static void init_proc_e500 (CPUPPCState *env) /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_e500_l1csr0, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - /* XXX : not implemented */ - spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -4387,11 +4457,18 @@ static void init_proc_e500 (CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + spr_register(env, SPR_MMUCSR0, "MMUCSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke206_mmucsr0, + 0x00000000); + #if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; + env->nb_tlb = 0; + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + env->nb_tlb += booke206_tlb_size(env, i); + } #endif + init_excp_e200(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -4399,6 +4476,16 @@ static void init_proc_e500 (CPUPPCState *env) ppce500_irq_init(env); } +static void init_proc_e500v1(CPUPPCState *env) +{ + init_proc_e500(env, 1); +} + +static void init_proc_e500v2(CPUPPCState *env) +{ + init_proc_e500(env, 2); +} + /* Non-embedded PowerPC */ /* POWER : same as 601, without mfmsr, mfsr */ @@ -4414,6 +4501,7 @@ static void init_proc_e500 (CPUPPCState *env) PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_601 (PPC_NONE) #define POWERPC_MSRM_601 (0x000000000000FD70ULL) #define POWERPC_MSRR_601 (0x0000000000001040ULL) //#define POWERPC_MMU_601 (POWERPC_MMU_601) @@ -4466,6 +4554,7 @@ static void init_proc_601 (CPUPPCState *env) PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | \ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_601v (PPC_NONE) #define POWERPC_MSRM_601v (0x000000000000FD70ULL) #define POWERPC_MSRR_601v (0x0000000000001040ULL) #define POWERPC_MMU_601v (POWERPC_MMU_601) @@ -4493,6 +4582,7 @@ static void init_proc_601v (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_602_SPEC) +#define POWERPC_INSNS2_602 (PPC_NONE) #define POWERPC_MSRM_602 (0x0000000000C7FF73ULL) /* XXX: 602 MMU is quite specific. Should add a special case */ #define POWERPC_MMU_602 (POWERPC_MMU_SOFT_6xx) @@ -4538,6 +4628,7 @@ static void init_proc_602 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_603 (PPC_NONE) #define POWERPC_MSRM_603 (0x000000000007FF73ULL) #define POWERPC_MMU_603 (POWERPC_MMU_SOFT_6xx) //#define POWERPC_EXCP_603 (POWERPC_EXCP_603) @@ -4582,6 +4673,7 @@ static void init_proc_603 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_603E (PPC_NONE) #define POWERPC_MSRM_603E (0x000000000007FF73ULL) #define POWERPC_MMU_603E (POWERPC_MMU_SOFT_6xx) //#define POWERPC_EXCP_603E (POWERPC_EXCP_603E) @@ -4631,6 +4723,7 @@ static void init_proc_603E (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_604 (PPC_NONE) #define POWERPC_MSRM_604 (0x000000000005FF77ULL) #define POWERPC_MMU_604 (POWERPC_MMU_32B) //#define POWERPC_EXCP_604 (POWERPC_EXCP_604) @@ -4669,6 +4762,7 @@ static void init_proc_604 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_604E (PPC_NONE) #define POWERPC_MSRM_604E (0x000000000005FF77ULL) #define POWERPC_MMU_604E (POWERPC_MMU_32B) #define POWERPC_EXCP_604E (POWERPC_EXCP_604) @@ -4727,6 +4821,7 @@ static void init_proc_604E (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_740 (PPC_NONE) #define POWERPC_MSRM_740 (0x000000000005FF77ULL) #define POWERPC_MMU_740 (POWERPC_MMU_32B) #define POWERPC_EXCP_740 (POWERPC_EXCP_7x0) @@ -4772,6 +4867,7 @@ static void init_proc_740 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_750 (PPC_NONE) #define POWERPC_MSRM_750 (0x000000000005FF77ULL) #define POWERPC_MMU_750 (POWERPC_MMU_32B) #define POWERPC_EXCP_750 (POWERPC_EXCP_7x0) @@ -4863,6 +4959,7 @@ static void init_proc_750 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_750cl (PPC_NONE) #define POWERPC_MSRM_750cl (0x000000000005FF77ULL) #define POWERPC_MMU_750cl (POWERPC_MMU_32B) #define POWERPC_EXCP_750cl (POWERPC_EXCP_7x0) @@ -5001,6 +5098,7 @@ static void init_proc_750cl (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_750cx (PPC_NONE) #define POWERPC_MSRM_750cx (0x000000000005FF77ULL) #define POWERPC_MMU_750cx (POWERPC_MMU_32B) #define POWERPC_EXCP_750cx (POWERPC_EXCP_7x0) @@ -5058,6 +5156,7 @@ static void init_proc_750cx (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_750fx (PPC_NONE) #define POWERPC_MSRM_750fx (0x000000000005FF77ULL) #define POWERPC_MMU_750fx (POWERPC_MMU_32B) #define POWERPC_EXCP_750fx (POWERPC_EXCP_7x0) @@ -5120,6 +5219,7 @@ static void init_proc_750fx (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_750gx (PPC_NONE) #define POWERPC_MSRM_750gx (0x000000000005FF77ULL) #define POWERPC_MMU_750gx (POWERPC_MMU_32B) #define POWERPC_EXCP_750gx (POWERPC_EXCP_7x0) @@ -5182,6 +5282,7 @@ static void init_proc_750gx (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_745 (PPC_NONE) #define POWERPC_MSRM_745 (0x000000000005FF77ULL) #define POWERPC_MMU_745 (POWERPC_MMU_SOFT_6xx) #define POWERPC_EXCP_745 (POWERPC_EXCP_7x5) @@ -5235,6 +5336,7 @@ static void init_proc_745 (CPUPPCState *env) PPC_MEM_SYNC | PPC_MEM_EIEIO | \ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | \ PPC_SEGMENT | PPC_EXTERN) +#define POWERPC_INSNS2_755 (PPC_NONE) #define POWERPC_MSRM_755 (0x000000000005FF77ULL) #define POWERPC_MMU_755 (POWERPC_MMU_SOFT_6xx) #define POWERPC_EXCP_755 (POWERPC_EXCP_7x5) @@ -5303,6 +5405,7 @@ static void init_proc_755 (CPUPPCState *env) PPC_MEM_TLBIA | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7400 (PPC_NONE) #define POWERPC_MSRM_7400 (0x000000000205FF77ULL) #define POWERPC_MMU_7400 (POWERPC_MMU_32B) #define POWERPC_EXCP_7400 (POWERPC_EXCP_74xx) @@ -5355,6 +5458,7 @@ static void init_proc_7400 (CPUPPCState *env) PPC_MEM_TLBIA | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7410 (PPC_NONE) #define POWERPC_MSRM_7410 (0x000000000205FF77ULL) #define POWERPC_MMU_7410 (POWERPC_MMU_32B) #define POWERPC_EXCP_7410 (POWERPC_EXCP_74xx) @@ -5413,6 +5517,7 @@ static void init_proc_7410 (CPUPPCState *env) PPC_MEM_TLBIA | PPC_74xx_TLB | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7440 (PPC_NONE) #define POWERPC_MSRM_7440 (0x000000000205FF77ULL) #define POWERPC_MMU_7440 (POWERPC_MMU_SOFT_74xx) #define POWERPC_EXCP_7440 (POWERPC_EXCP_74xx) @@ -5498,6 +5603,7 @@ static void init_proc_7440 (CPUPPCState *env) PPC_MEM_TLBIA | PPC_74xx_TLB | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7450 (PPC_NONE) #define POWERPC_MSRM_7450 (0x000000000205FF77ULL) #define POWERPC_MMU_7450 (POWERPC_MMU_SOFT_74xx) #define POWERPC_EXCP_7450 (POWERPC_EXCP_74xx) @@ -5609,6 +5715,7 @@ static void init_proc_7450 (CPUPPCState *env) PPC_MEM_TLBIA | PPC_74xx_TLB | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7445 (PPC_NONE) #define POWERPC_MSRM_7445 (0x000000000205FF77ULL) #define POWERPC_MMU_7445 (POWERPC_MMU_SOFT_74xx) #define POWERPC_EXCP_7445 (POWERPC_EXCP_74xx) @@ -5723,6 +5830,7 @@ static void init_proc_7445 (CPUPPCState *env) PPC_MEM_TLBIA | PPC_74xx_TLB | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7455 (PPC_NONE) #define POWERPC_MSRM_7455 (0x000000000205FF77ULL) #define POWERPC_MMU_7455 (POWERPC_MMU_SOFT_74xx) #define POWERPC_EXCP_7455 (POWERPC_EXCP_74xx) @@ -5839,6 +5947,7 @@ static void init_proc_7455 (CPUPPCState *env) PPC_MEM_TLBIA | PPC_74xx_TLB | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_ALTIVEC) +#define POWERPC_INSNS2_7457 (PPC_NONE) #define POWERPC_MSRM_7457 (0x000000000205FF77ULL) #define POWERPC_MMU_7457 (POWERPC_MMU_SOFT_74xx) #define POWERPC_EXCP_7457 (POWERPC_EXCP_74xx) @@ -5978,6 +6087,7 @@ static void init_proc_7457 (CPUPPCState *env) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_64B | PPC_ALTIVEC | \ PPC_SEGMENT_64B | PPC_SLBI) +#define POWERPC_INSNS2_970 (PPC_NONE) #define POWERPC_MSRM_970 (0x900000000204FF36ULL) #define POWERPC_MMU_970 (POWERPC_MMU_64B) //#define POWERPC_EXCP_970 (POWERPC_EXCP_970) @@ -6073,6 +6183,7 @@ static void init_proc_970 (CPUPPCState *env) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_64B | PPC_ALTIVEC | \ PPC_SEGMENT_64B | PPC_SLBI) +#define POWERPC_INSNS2_970FX (PPC_NONE) #define POWERPC_MSRM_970FX (0x800000000204FF36ULL) #define POWERPC_MMU_970FX (POWERPC_MMU_64B) #define POWERPC_EXCP_970FX (POWERPC_EXCP_970) @@ -6174,6 +6285,7 @@ static void init_proc_970FX (CPUPPCState *env) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_64B | PPC_ALTIVEC | \ PPC_SEGMENT_64B | PPC_SLBI) +#define POWERPC_INSNS2_970GX (PPC_NONE) #define POWERPC_MSRM_970GX (0x800000000204FF36ULL) #define POWERPC_MMU_970GX (POWERPC_MMU_64B) #define POWERPC_EXCP_970GX (POWERPC_EXCP_970) @@ -6263,6 +6375,7 @@ static void init_proc_970GX (CPUPPCState *env) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_64B | PPC_ALTIVEC | \ PPC_SEGMENT_64B | PPC_SLBI) +#define POWERPC_INSNS2_970MP (PPC_NONE) #define POWERPC_MSRM_970MP (0x900000000204FF36ULL) #define POWERPC_MMU_970MP (POWERPC_MMU_64B) #define POWERPC_EXCP_970MP (POWERPC_EXCP_970) @@ -6354,6 +6467,7 @@ static void init_proc_970MP (CPUPPCState *env) PPC_64B | PPC_ALTIVEC | \ PPC_SEGMENT_64B | PPC_SLBI | \ PPC_POPCNTB | PPC_POPCNTWD) +#define POWERPC_INSNS2_POWER7 (PPC_NONE) #define POWERPC_MSRM_POWER7 (0x800000000204FF36ULL) #define POWERPC_MMU_POWER7 (POWERPC_MMU_2_06) #define POWERPC_EXCP_POWER7 (POWERPC_EXCP_POWER7) @@ -6424,6 +6538,7 @@ static void init_proc_POWER7 (CPUPPCState *env) PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | \ PPC_SEGMENT | PPC_EXTERN | \ PPC_64B | PPC_SLBI) +#define POWERPC_INSNS2_620 (PPC_NONE) #define POWERPC_MSRM_620 (0x800000000005FF77ULL) //#define POWERPC_MMU_620 (POWERPC_MMU_620) #define POWERPC_EXCP_620 (POWERPC_EXCP_970) @@ -6459,6 +6574,7 @@ static void init_proc_620 (CPUPPCState *env) /* Default 32 bits PowerPC target will be 604 */ #define CPU_POWERPC_PPC32 CPU_POWERPC_604 #define POWERPC_INSNS_PPC32 POWERPC_INSNS_604 +#define POWERPC_INSNS2_PPC32 POWERPC_INSNS2_604 #define POWERPC_MSRM_PPC32 POWERPC_MSRM_604 #define POWERPC_MMU_PPC32 POWERPC_MMU_604 #define POWERPC_EXCP_PPC32 POWERPC_EXCP_604 @@ -6471,6 +6587,7 @@ static void init_proc_620 (CPUPPCState *env) /* Default 64 bits PowerPC target will be 970 FX */ #define CPU_POWERPC_PPC64 CPU_POWERPC_970FX #define POWERPC_INSNS_PPC64 POWERPC_INSNS_970FX +#define POWERPC_INSNS2_PPC64 POWERPC_INSNS2_970FX #define POWERPC_MSRM_PPC64 POWERPC_MSRM_970FX #define POWERPC_MMU_PPC64 POWERPC_MMU_970FX #define POWERPC_EXCP_PPC64 POWERPC_EXCP_970FX @@ -6482,27 +6599,29 @@ static void init_proc_620 (CPUPPCState *env) /* Default PowerPC target will be PowerPC 32 */ #if defined (TARGET_PPC64) && 0 // XXX: TODO -#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC64 -#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC64 -#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC64 -#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC64 -#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC64 -#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC64 -#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC64 -#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC64 -#define check_pow_DEFAULT check_pow_PPC64 -#define init_proc_DEFAULT init_proc_PPC64 +#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC64 +#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC64 +#define POWERPC_INSNS2_DEFAULT POWERPC_INSNS_PPC64 +#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC64 +#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC64 +#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC64 +#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC64 +#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC64 +#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC64 +#define check_pow_DEFAULT check_pow_PPC64 +#define init_proc_DEFAULT init_proc_PPC64 #else -#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC32 -#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC32 -#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC32 -#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC32 -#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC32 -#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC32 -#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC32 -#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC32 -#define check_pow_DEFAULT check_pow_PPC32 -#define init_proc_DEFAULT init_proc_PPC32 +#define CPU_POWERPC_DEFAULT CPU_POWERPC_PPC32 +#define POWERPC_INSNS_DEFAULT POWERPC_INSNS_PPC32 +#define POWERPC_INSNS2_DEFAULT POWERPC_INSNS_PPC32 +#define POWERPC_MSRM_DEFAULT POWERPC_MSRM_PPC32 +#define POWERPC_MMU_DEFAULT POWERPC_MMU_PPC32 +#define POWERPC_EXCP_DEFAULT POWERPC_EXCP_PPC32 +#define POWERPC_INPUT_DEFAULT POWERPC_INPUT_PPC32 +#define POWERPC_BFDM_DEFAULT POWERPC_BFDM_PPC32 +#define POWERPC_FLAG_DEFAULT POWERPC_FLAG_PPC32 +#define check_pow_DEFAULT check_pow_PPC32 +#define init_proc_DEFAULT init_proc_PPC32 #endif /*****************************************************************************/ @@ -7351,18 +7470,19 @@ enum { /* PowerPC CPU definitions */ #define POWERPC_DEF_SVR(_name, _pvr, _svr, _type) \ { \ - .name = _name, \ - .pvr = _pvr, \ - .svr = _svr, \ - .insns_flags = glue(POWERPC_INSNS_,_type), \ - .msr_mask = glue(POWERPC_MSRM_,_type), \ - .mmu_model = glue(POWERPC_MMU_,_type), \ - .excp_model = glue(POWERPC_EXCP_,_type), \ - .bus_model = glue(POWERPC_INPUT_,_type), \ - .bfd_mach = glue(POWERPC_BFDM_,_type), \ - .flags = glue(POWERPC_FLAG_,_type), \ - .init_proc = &glue(init_proc_,_type), \ - .check_pow = &glue(check_pow_,_type), \ + .name = _name, \ + .pvr = _pvr, \ + .svr = _svr, \ + .insns_flags = glue(POWERPC_INSNS_,_type), \ + .insns_flags2 = glue(POWERPC_INSNS2_,_type), \ + .msr_mask = glue(POWERPC_MSRM_,_type), \ + .mmu_model = glue(POWERPC_MMU_,_type), \ + .excp_model = glue(POWERPC_EXCP_,_type), \ + .bus_model = glue(POWERPC_INPUT_,_type), \ + .bfd_mach = glue(POWERPC_BFDM_,_type), \ + .flags = glue(POWERPC_FLAG_,_type), \ + .init_proc = &glue(init_proc_,_type), \ + .check_pow = &glue(check_pow_,_type), \ } #define POWERPC_DEF(_name, _pvr, _type) \ POWERPC_DEF_SVR(_name, _pvr, POWERPC_SVR_NONE, _type) @@ -9049,7 +9169,7 @@ static const ppc_def_t ppc_defs[] = { }; /*****************************************************************************/ -/* Generic CPU instanciation routine */ +/* Generic CPU instantiation routine */ static void init_ppc_proc (CPUPPCState *env, const ppc_def_t *def) { #if !defined(CONFIG_USER_ONLY) @@ -9437,7 +9557,8 @@ static int create_ppc_opcodes (CPUPPCState *env, const ppc_def_t *def) fill_new_table(env->opcodes, 0x40); for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { - if ((opc->handler.type & def->insns_flags) != 0) { + if (((opc->handler.type & def->insns_flags) != 0) || + ((opc->handler.type2 & def->insns_flags2) != 0)) { if (register_insn(env->opcodes, opc) < 0) { printf("*** ERROR initializing PowerPC instruction " "0x%02x 0x%02x 0x%02x\n", opc->opc1, opc->opc2, @@ -9650,6 +9771,7 @@ int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def) env->excp_model = def->excp_model; env->bus_model = def->bus_model; env->insns_flags = def->insns_flags; + env->insns_flags2 = def->insns_flags2; env->flags = def->flags; env->bfd_mach = def->bfd_mach; env->check_pow = def->check_pow; @@ -9699,8 +9821,8 @@ int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def) case POWERPC_MMU_BOOKE: mmu_model = "PowerPC BookE"; break; - case POWERPC_MMU_BOOKE_FSL: - mmu_model = "PowerPC BookE FSL"; + case POWERPC_MMU_BOOKE206: + mmu_model = "PowerPC BookE 2.06"; break; case POWERPC_MMU_601: mmu_model = "PowerPC 601"; diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index a84b3ee184..4e5c3917d3 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -67,7 +67,6 @@ typedef struct CPUS390XState { PSW psw; - uint32_t cc; uint32_t cc_op; uint64_t cc_src; uint64_t cc_dst; @@ -87,9 +86,12 @@ typedef struct CPUS390XState { int pending_int; ExtQueue ext_queue[MAX_EXT_QUEUE]; + int ext_index; + + CPU_COMMON + /* reset does memset(0) up to here */ - int ext_index; int cpu_num; uint8_t *storage_keys; @@ -98,8 +100,6 @@ typedef struct CPUS390XState { QEMUTimer *tod_timer; QEMUTimer *cpu_timer; - - CPU_COMMON } CPUS390XState; #if defined(CONFIG_USER_ONLY) @@ -287,12 +287,32 @@ int cpu_s390x_handle_mmu_fault (CPUS390XState *env, target_ulong address, int rw #ifndef CONFIG_USER_ONLY int s390_virtio_hypercall(CPUState *env, uint64_t mem, uint64_t hypercall); +#ifdef CONFIG_KVM void kvm_s390_interrupt(CPUState *env, int type, uint32_t code); void kvm_s390_virtio_irq(CPUState *env, int config_change, uint64_t token); void kvm_s390_interrupt_internal(CPUState *env, int type, uint32_t parm, uint64_t parm64, int vm); +#else +static inline void kvm_s390_interrupt(CPUState *env, int type, uint32_t code) +{ +} + +static inline void kvm_s390_virtio_irq(CPUState *env, int config_change, + uint64_t token) +{ +} + +static inline void kvm_s390_interrupt_internal(CPUState *env, int type, + uint32_t parm, uint64_t parm64, + int vm) +{ +} +#endif CPUState *s390_cpu_addr2state(uint16_t cpu_addr); +/* from s390-virtio-bus */ +extern const target_phys_addr_t virtio_size; + #ifndef KVM_S390_SIGP_STOP #define KVM_S390_SIGP_STOP 0 #define KVM_S390_PROGRAM_INT 0 diff --git a/target-s390x/helper.c b/target-s390x/helper.c index 629dfd9708..745d8c52bb 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -2,6 +2,7 @@ * S/390 helpers * * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -25,27 +26,102 @@ #include "exec-all.h" #include "gdbstub.h" #include "qemu-common.h" +#include "qemu-timer.h" -#include <linux/kvm.h> -#include "kvm.h" +//#define DEBUG_S390 +//#define DEBUG_S390_PTE +//#define DEBUG_S390_STDOUT + +#ifdef DEBUG_S390 +#ifdef DEBUG_S390_STDOUT +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, fmt, ## __VA_ARGS__); \ + qemu_log(fmt, ##__VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { qemu_log(fmt, ## __VA_ARGS__); } while (0) +#endif +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +#ifdef DEBUG_S390_PTE +#define PTE_DPRINTF DPRINTF +#else +#define PTE_DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +#ifndef CONFIG_USER_ONLY +static void s390x_tod_timer(void *opaque) +{ + CPUState *env = opaque; + + env->pending_int |= INTERRUPT_TOD; + cpu_interrupt(env, CPU_INTERRUPT_HARD); +} + +static void s390x_cpu_timer(void *opaque) +{ + CPUState *env = opaque; + + env->pending_int |= INTERRUPT_CPUTIMER; + cpu_interrupt(env, CPU_INTERRUPT_HARD); +} +#endif CPUS390XState *cpu_s390x_init(const char *cpu_model) { CPUS390XState *env; +#if !defined (CONFIG_USER_ONLY) + struct tm tm; +#endif static int inited = 0; + static int cpu_num = 0; env = qemu_mallocz(sizeof(CPUS390XState)); cpu_exec_init(env); if (!inited) { inited = 1; + s390x_translate_init(); } +#if !defined(CONFIG_USER_ONLY) + qemu_get_timedate(&tm, 0); + env->tod_offset = TOD_UNIX_EPOCH + + (time2tod(mktimegm(&tm)) * 1000000000ULL); + env->tod_basetime = 0; + env->tod_timer = qemu_new_timer_ns(vm_clock, s390x_tod_timer, env); + env->cpu_timer = qemu_new_timer_ns(vm_clock, s390x_cpu_timer, env); +#endif env->cpu_model_str = cpu_model; + env->cpu_num = cpu_num++; + env->ext_index = -1; cpu_reset(env); qemu_init_vcpu(env); return env; } +#if defined(CONFIG_USER_ONLY) + +void do_interrupt (CPUState *env) +{ + env->exception_index = -1; +} + +int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + int mmu_idx, int is_softmmu) +{ + /* fprintf(stderr,"%s: address 0x%lx rw %d mmu_idx %d is_softmmu %d\n", + __FUNCTION__, address, rw, mmu_idx, is_softmmu); */ + env->exception_index = EXCP_ADDR; + env->__excp_addr = address; /* FIXME: find out how this works on a real machine */ + return 1; +} + +#endif /* CONFIG_USER_ONLY */ + void cpu_reset(CPUS390XState *env) { if (qemu_loglevel_mask(CPU_LOG_RESET)) { @@ -58,31 +134,495 @@ void cpu_reset(CPUS390XState *env) tlb_flush(env, 1); } -target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +#ifndef CONFIG_USER_ONLY + +/* Ensure to exit the TB after this call! */ +static void trigger_pgm_exception(CPUState *env, uint32_t code, uint32_t ilc) +{ + env->exception_index = EXCP_PGM; + env->int_pgm_code = code; + env->int_pgm_ilc = ilc; +} + +static int trans_bits(CPUState *env, uint64_t mode) +{ + int bits = 0; + + switch (mode) { + case PSW_ASC_PRIMARY: + bits = 1; + break; + case PSW_ASC_SECONDARY: + bits = 2; + break; + case PSW_ASC_HOME: + bits = 3; + break; + default: + cpu_abort(env, "unknown asc mode\n"); + break; + } + + return bits; +} + +static void trigger_prot_fault(CPUState *env, target_ulong vaddr, uint64_t mode) +{ + int ilc = ILC_LATER_INC_2; + int bits = trans_bits(env, mode) | 4; + + DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits); + + stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits); + trigger_pgm_exception(env, PGM_PROTECTION, ilc); +} + +static void trigger_page_fault(CPUState *env, target_ulong vaddr, uint32_t type, + uint64_t asc, int rw) +{ + int ilc = ILC_LATER; + int bits = trans_bits(env, asc); + + if (rw == 2) { + /* code has is undefined ilc */ + ilc = 2; + } + + DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits); + + stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits); + trigger_pgm_exception(env, type, ilc); +} + +static int mmu_translate_asce(CPUState *env, target_ulong vaddr, uint64_t asc, + uint64_t asce, int level, target_ulong *raddr, + int *flags, int rw) { + uint64_t offs = 0; + uint64_t origin; + uint64_t new_asce; + + PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __FUNCTION__, asce); + + if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) || + ((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) { + /* XXX different regions have different faults */ + DPRINTF("%s: invalid region\n", __FUNCTION__); + trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw); + return -1; + } + + if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) { + trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw); + return -1; + } + + if (asce & _ASCE_REAL_SPACE) { + /* direct mapping */ + + *raddr = vaddr; + return 0; + } + + origin = asce & _ASCE_ORIGIN; + + switch (level) { + case _ASCE_TYPE_REGION1 + 4: + offs = (vaddr >> 50) & 0x3ff8; + break; + case _ASCE_TYPE_REGION1: + offs = (vaddr >> 39) & 0x3ff8; + break; + case _ASCE_TYPE_REGION2: + offs = (vaddr >> 28) & 0x3ff8; + break; + case _ASCE_TYPE_REGION3: + offs = (vaddr >> 17) & 0x3ff8; + break; + case _ASCE_TYPE_SEGMENT: + offs = (vaddr >> 9) & 0x07f8; + origin = asce & _SEGMENT_ENTRY_ORIGIN; + break; + } + + /* XXX region protection flags */ + /* *flags &= ~PAGE_WRITE */ + + new_asce = ldq_phys(origin + offs); + PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n", + __FUNCTION__, origin, offs, new_asce); + + if (level != _ASCE_TYPE_SEGMENT) { + /* yet another region */ + return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr, + flags, rw); + } + + /* PTE */ + if (new_asce & _PAGE_INVALID) { + DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __FUNCTION__, new_asce); + trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw); + return -1; + } + + if (new_asce & _PAGE_RO) { + *flags &= ~PAGE_WRITE; + } + + *raddr = new_asce & _ASCE_ORIGIN; + + PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __FUNCTION__, new_asce); + return 0; } -#ifndef CONFIG_USER_ONLY +static int mmu_translate_asc(CPUState *env, target_ulong vaddr, uint64_t asc, + target_ulong *raddr, int *flags, int rw) +{ + uint64_t asce = 0; + int level, new_level; + int r; -int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + switch (asc) { + case PSW_ASC_PRIMARY: + PTE_DPRINTF("%s: asc=primary\n", __FUNCTION__); + asce = env->cregs[1]; + break; + case PSW_ASC_SECONDARY: + PTE_DPRINTF("%s: asc=secondary\n", __FUNCTION__); + asce = env->cregs[7]; + break; + case PSW_ASC_HOME: + PTE_DPRINTF("%s: asc=home\n", __FUNCTION__); + asce = env->cregs[13]; + break; + } + + switch (asce & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + break; + case _ASCE_TYPE_REGION2: + if (vaddr & 0xffe0000000000000ULL) { + DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64 + " 0xffe0000000000000ULL\n", __FUNCTION__, + vaddr); + trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw); + return -1; + } + break; + case _ASCE_TYPE_REGION3: + if (vaddr & 0xfffffc0000000000ULL) { + DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64 + " 0xfffffc0000000000ULL\n", __FUNCTION__, + vaddr); + trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw); + return -1; + } + break; + case _ASCE_TYPE_SEGMENT: + if (vaddr & 0xffffffff80000000ULL) { + DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64 + " 0xffffffff80000000ULL\n", __FUNCTION__, + vaddr); + trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw); + return -1; + } + break; + } + + /* fake level above current */ + level = asce & _ASCE_TYPE_MASK; + new_level = level + 4; + asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK); + + r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw); + + if ((rw == 1) && !(*flags & PAGE_WRITE)) { + trigger_prot_fault(env, vaddr, asc); + return -1; + } + + return r; +} + +int mmu_translate(CPUState *env, target_ulong vaddr, int rw, uint64_t asc, + target_ulong *raddr, int *flags) +{ + int r = -1; + + *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + vaddr &= TARGET_PAGE_MASK; + + if (!(env->psw.mask & PSW_MASK_DAT)) { + *raddr = vaddr; + r = 0; + goto out; + } + + switch (asc) { + case PSW_ASC_PRIMARY: + case PSW_ASC_HOME: + r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw); + break; + case PSW_ASC_SECONDARY: + /* + * Instruction: Primary + * Data: Secondary + */ + if (rw == 2) { + r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags, + rw); + *flags &= ~(PAGE_READ | PAGE_WRITE); + } else { + r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags, + rw); + *flags &= ~(PAGE_EXEC); + } + break; + case PSW_ASC_ACCREG: + default: + hw_error("guest switched to unknown asc mode\n"); + break; + } + +out: + /* Convert real address -> absolute address */ + if (*raddr < 0x2000) { + *raddr = *raddr + env->psa; + } + + return r; +} + +int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong _vaddr, int rw, int mmu_idx, int is_softmmu) { - target_ulong phys; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + target_ulong vaddr, raddr; int prot; - /* XXX: implement mmu */ + DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d is_softmmu %d\n", + __FUNCTION__, _vaddr, rw, mmu_idx, is_softmmu); + + _vaddr &= TARGET_PAGE_MASK; + vaddr = _vaddr; + + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + vaddr &= 0x7fffffff; + } + + if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) { + /* Translation ended in exception */ + return 1; + } + + /* check out of RAM access */ + if (raddr > (ram_size + virtio_size)) { + DPRINTF("%s: aaddr %" PRIx64 " > ram_size %" PRIx64 "\n", __FUNCTION__, + (uint64_t)aaddr, (uint64_t)ram_size); + trigger_pgm_exception(env, PGM_ADDRESSING, ILC_LATER); + return 1; + } - phys = address; - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __FUNCTION__, + (uint64_t)vaddr, (uint64_t)raddr, prot); - tlb_set_page(env, address & TARGET_PAGE_MASK, - phys & TARGET_PAGE_MASK, prot, + tlb_set_page(env, _vaddr, raddr, prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; } -#endif /* CONFIG_USER_ONLY */ + +target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong vaddr) +{ + target_ulong raddr; + int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + int old_exc = env->exception_index; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + vaddr &= 0x7fffffff; + } + + mmu_translate(env, vaddr, 2, asc, &raddr, &prot); + env->exception_index = old_exc; + + return raddr; +} + +void load_psw(CPUState *env, uint64_t mask, uint64_t addr) +{ + if (mask & PSW_MASK_WAIT) { + env->halted = 1; + env->exception_index = EXCP_HLT; + if (!(mask & (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK))) { + /* XXX disabled wait state - CPU is dead */ + } + } + + env->psw.addr = addr; + env->psw.mask = mask; + env->cc_op = (mask >> 13) & 3; +} + +static uint64_t get_psw_mask(CPUState *env) +{ + uint64_t r = env->psw.mask; + + env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); + + r &= ~(3ULL << 13); + assert(!(env->cc_op & ~3)); + r |= env->cc_op << 13; + + return r; +} + +static void do_svc_interrupt(CPUState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + target_phys_addr_t len = TARGET_PAGE_SIZE; + + lowcore = cpu_physical_memory_map(env->psa, &len, 1); + + lowcore->svc_code = cpu_to_be16(env->int_svc_code); + lowcore->svc_ilc = cpu_to_be16(env->int_svc_ilc); + lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + (env->int_svc_ilc)); + mask = be64_to_cpu(lowcore->svc_new_psw.mask); + addr = be64_to_cpu(lowcore->svc_new_psw.addr); + + cpu_physical_memory_unmap(lowcore, len, 1, len); + + load_psw(env, mask, addr); +} + +static void do_program_interrupt(CPUState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + target_phys_addr_t len = TARGET_PAGE_SIZE; + int ilc = env->int_pgm_ilc; + + switch (ilc) { + case ILC_LATER: + ilc = get_ilc(ldub_code(env->psw.addr)); + break; + case ILC_LATER_INC: + ilc = get_ilc(ldub_code(env->psw.addr)); + env->psw.addr += ilc * 2; + break; + case ILC_LATER_INC_2: + ilc = get_ilc(ldub_code(env->psw.addr)) * 2; + env->psw.addr += ilc; + break; + } + + qemu_log("%s: code=0x%x ilc=%d\n", __FUNCTION__, env->int_pgm_code, ilc); + + lowcore = cpu_physical_memory_map(env->psa, &len, 1); + + lowcore->pgm_ilc = cpu_to_be16(ilc); + lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); + lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); + mask = be64_to_cpu(lowcore->program_new_psw.mask); + addr = be64_to_cpu(lowcore->program_new_psw.addr); + + cpu_physical_memory_unmap(lowcore, len, 1, len); + + DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __FUNCTION__, + env->int_pgm_code, ilc, env->psw.mask, + env->psw.addr); + + load_psw(env, mask, addr); +} + +#define VIRTIO_SUBCODE_64 0x0D00 + +static void do_ext_interrupt(CPUState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + target_phys_addr_t len = TARGET_PAGE_SIZE; + ExtQueue *q; + + if (!(env->psw.mask & PSW_MASK_EXT)) { + cpu_abort(env, "Ext int w/o ext mask\n"); + } + + if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) { + cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index); + } + + q = &env->ext_queue[env->ext_index]; + lowcore = cpu_physical_memory_map(env->psa, &len, 1); + + lowcore->ext_int_code = cpu_to_be16(q->code); + lowcore->ext_params = cpu_to_be32(q->param); + lowcore->ext_params2 = cpu_to_be64(q->param64); + lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); + lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64); + mask = be64_to_cpu(lowcore->external_new_psw.mask); + addr = be64_to_cpu(lowcore->external_new_psw.addr); + + cpu_physical_memory_unmap(lowcore, len, 1, len); + + env->ext_index--; + if (env->ext_index == -1) { + env->pending_int &= ~INTERRUPT_EXT; + } + + DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __FUNCTION__, + env->psw.mask, env->psw.addr); + + load_psw(env, mask, addr); +} void do_interrupt (CPUState *env) { + qemu_log("%s: %d at pc=%" PRIx64 "\n", __FUNCTION__, env->exception_index, + env->psw.addr); + + /* handle external interrupts */ + if ((env->psw.mask & PSW_MASK_EXT) && + env->exception_index == -1) { + if (env->pending_int & INTERRUPT_EXT) { + /* code is already in env */ + env->exception_index = EXCP_EXT; + } else if (env->pending_int & INTERRUPT_TOD) { + cpu_inject_ext(env, 0x1004, 0, 0); + env->exception_index = EXCP_EXT; + env->pending_int &= ~INTERRUPT_EXT; + env->pending_int &= ~INTERRUPT_TOD; + } else if (env->pending_int & INTERRUPT_CPUTIMER) { + cpu_inject_ext(env, 0x1005, 0, 0); + env->exception_index = EXCP_EXT; + env->pending_int &= ~INTERRUPT_EXT; + env->pending_int &= ~INTERRUPT_TOD; + } + } + + switch (env->exception_index) { + case EXCP_PGM: + do_program_interrupt(env); + break; + case EXCP_SVC: + do_svc_interrupt(env); + break; + case EXCP_EXT: + do_ext_interrupt(env); + break; + } + env->exception_index = -1; + + if (!env->pending_int) { + env->interrupt_request &= ~CPU_INTERRUPT_HARD; + } } + +#endif /* CONFIG_USER_ONLY */ diff --git a/target-s390x/helpers.h b/target-s390x/helpers.h new file mode 100644 index 0000000000..6ca48ebe00 --- /dev/null +++ b/target-s390x/helpers.h @@ -0,0 +1,151 @@ +#include "def-helper.h" + +DEF_HELPER_1(exception, void, i32) +DEF_HELPER_3(nc, i32, i32, i64, i64) +DEF_HELPER_3(oc, i32, i32, i64, i64) +DEF_HELPER_3(xc, i32, i32, i64, i64) +DEF_HELPER_3(mvc, void, i32, i64, i64) +DEF_HELPER_3(clc, i32, i32, i64, i64) +DEF_HELPER_2(mvcl, i32, i32, i32) +DEF_HELPER_FLAGS_1(set_cc_comp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32) +DEF_HELPER_FLAGS_1(set_cc_comp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64) +DEF_HELPER_FLAGS_2(set_cc_icm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32) +DEF_HELPER_3(clm, i32, i32, i32, i64) +DEF_HELPER_3(stcm, void, i32, i32, i64) +DEF_HELPER_2(mlg, void, i32, i64) +DEF_HELPER_2(dlg, void, i32, i64) +DEF_HELPER_FLAGS_3(set_cc_add64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64) +DEF_HELPER_FLAGS_3(set_cc_addu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(set_cc_add32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32) +DEF_HELPER_FLAGS_3(set_cc_addu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(set_cc_sub64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64) +DEF_HELPER_FLAGS_3(set_cc_subu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(set_cc_sub32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32) +DEF_HELPER_FLAGS_3(set_cc_subu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32) +DEF_HELPER_3(srst, i32, i32, i32, i32) +DEF_HELPER_3(clst, i32, i32, i32, i32) +DEF_HELPER_3(mvpg, void, i64, i64, i64) +DEF_HELPER_3(mvst, void, i32, i32, i32) +DEF_HELPER_3(csg, i32, i32, i64, i32) +DEF_HELPER_3(cdsg, i32, i32, i64, i32) +DEF_HELPER_3(cs, i32, i32, i64, i32) +DEF_HELPER_4(ex, i32, i32, i64, i64, i64) +DEF_HELPER_FLAGS_1(abs_i32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32) +DEF_HELPER_FLAGS_1(nabs_i32, TCG_CALL_PURE|TCG_CALL_CONST, s32, s32) +DEF_HELPER_FLAGS_1(abs_i64, TCG_CALL_PURE|TCG_CALL_CONST, i64, s64) +DEF_HELPER_FLAGS_1(nabs_i64, TCG_CALL_PURE|TCG_CALL_CONST, s64, s64) +DEF_HELPER_3(stcmh, void, i32, i64, i32) +DEF_HELPER_3(icmh, i32, i32, i64, i32) +DEF_HELPER_2(ipm, void, i32, i32) +DEF_HELPER_FLAGS_3(addc_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(set_cc_addc_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_3(stam, void, i32, i64, i32) +DEF_HELPER_3(lam, void, i32, i64, i32) +DEF_HELPER_3(mvcle, i32, i32, i64, i32) +DEF_HELPER_3(clcle, i32, i32, i64, i32) +DEF_HELPER_3(slb, i32, i32, i32, i32) +DEF_HELPER_4(slbg, i32, i32, i32, i64, i64) +DEF_HELPER_2(cefbr, void, i32, s32) +DEF_HELPER_2(cdfbr, void, i32, s32) +DEF_HELPER_2(cxfbr, void, i32, s32) +DEF_HELPER_2(cegbr, void, i32, s64) +DEF_HELPER_2(cdgbr, void, i32, s64) +DEF_HELPER_2(cxgbr, void, i32, s64) +DEF_HELPER_2(adbr, i32, i32, i32) +DEF_HELPER_2(aebr, i32, i32, i32) +DEF_HELPER_2(sebr, i32, i32, i32) +DEF_HELPER_2(sdbr, i32, i32, i32) +DEF_HELPER_2(debr, void, i32, i32) +DEF_HELPER_2(dxbr, void, i32, i32) +DEF_HELPER_2(mdbr, void, i32, i32) +DEF_HELPER_2(mxbr, void, i32, i32) +DEF_HELPER_2(ldebr, void, i32, i32) +DEF_HELPER_2(ldxbr, void, i32, i32) +DEF_HELPER_2(lxdbr, void, i32, i32) +DEF_HELPER_2(ledbr, void, i32, i32) +DEF_HELPER_2(lexbr, void, i32, i32) +DEF_HELPER_2(lpebr, i32, i32, i32) +DEF_HELPER_2(lpdbr, i32, i32, i32) +DEF_HELPER_2(lpxbr, i32, i32, i32) +DEF_HELPER_2(ltebr, i32, i32, i32) +DEF_HELPER_2(ltdbr, i32, i32, i32) +DEF_HELPER_2(ltxbr, i32, i32, i32) +DEF_HELPER_2(lcebr, i32, i32, i32) +DEF_HELPER_2(lcdbr, i32, i32, i32) +DEF_HELPER_2(lcxbr, i32, i32, i32) +DEF_HELPER_2(aeb, void, i32, i32) +DEF_HELPER_2(deb, void, i32, i32) +DEF_HELPER_2(meeb, void, i32, i32) +DEF_HELPER_2(cdb, i32, i32, i64) +DEF_HELPER_2(adb, i32, i32, i64) +DEF_HELPER_2(seb, void, i32, i32) +DEF_HELPER_2(sdb, i32, i32, i64) +DEF_HELPER_2(mdb, void, i32, i64) +DEF_HELPER_2(ddb, void, i32, i64) +DEF_HELPER_FLAGS_2(cebr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_FLAGS_2(cdbr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_FLAGS_2(cxbr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_3(cgebr, i32, i32, i32, i32) +DEF_HELPER_3(cgdbr, i32, i32, i32, i32) +DEF_HELPER_3(cgxbr, i32, i32, i32, i32) +DEF_HELPER_1(lzer, void, i32) +DEF_HELPER_1(lzdr, void, i32) +DEF_HELPER_1(lzxr, void, i32) +DEF_HELPER_3(cfebr, i32, i32, i32, i32) +DEF_HELPER_3(cfdbr, i32, i32, i32, i32) +DEF_HELPER_3(cfxbr, i32, i32, i32, i32) +DEF_HELPER_2(axbr, i32, i32, i32) +DEF_HELPER_2(sxbr, i32, i32, i32) +DEF_HELPER_2(meebr, void, i32, i32) +DEF_HELPER_2(ddbr, void, i32, i32) +DEF_HELPER_3(madb, void, i32, i64, i32) +DEF_HELPER_3(maebr, void, i32, i32, i32) +DEF_HELPER_3(madbr, void, i32, i32, i32) +DEF_HELPER_3(msdbr, void, i32, i32, i32) +DEF_HELPER_2(lxdb, void, i32, i64) +DEF_HELPER_FLAGS_2(tceb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_FLAGS_2(tcxb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_2(flogr, i32, i32, i64) +DEF_HELPER_2(sqdbr, void, i32, i32) +DEF_HELPER_FLAGS_1(cvd, TCG_CALL_PURE|TCG_CALL_CONST, i64, s32) +DEF_HELPER_3(unpk, void, i32, i64, i64) +DEF_HELPER_3(tr, void, i32, i64, i64) + +DEF_HELPER_2(servc, i32, i32, i64) +DEF_HELPER_3(diag, i64, i32, i64, i64) +DEF_HELPER_2(load_psw, void, i64, i64) +DEF_HELPER_1(program_interrupt, void, i32) +DEF_HELPER_FLAGS_1(stidp, TCG_CALL_CONST, void, i64) +DEF_HELPER_FLAGS_1(spx, TCG_CALL_CONST, void, i64) +DEF_HELPER_FLAGS_1(sck, TCG_CALL_CONST, i32, i64) +DEF_HELPER_1(stck, i32, i64) +DEF_HELPER_1(stcke, i32, i64) +DEF_HELPER_FLAGS_1(sckc, TCG_CALL_CONST, void, i64) +DEF_HELPER_FLAGS_1(stckc, TCG_CALL_CONST, void, i64) +DEF_HELPER_FLAGS_1(spt, TCG_CALL_CONST, void, i64) +DEF_HELPER_FLAGS_1(stpt, TCG_CALL_CONST, void, i64) +DEF_HELPER_3(stsi, i32, i64, i32, i32) +DEF_HELPER_3(lctl, void, i32, i64, i32) +DEF_HELPER_3(lctlg, void, i32, i64, i32) +DEF_HELPER_3(stctl, void, i32, i64, i32) +DEF_HELPER_3(stctg, void, i32, i64, i32) +DEF_HELPER_FLAGS_2(tprot, TCG_CALL_CONST, i32, i64, i64) +DEF_HELPER_FLAGS_1(iske, TCG_CALL_PURE|TCG_CALL_CONST, i64, i64) +DEF_HELPER_FLAGS_2(sske, TCG_CALL_CONST, void, i32, i64) +DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_CONST, i32, i32, i64) +DEF_HELPER_2(csp, i32, i32, i32) +DEF_HELPER_3(mvcs, i32, i64, i64, i64) +DEF_HELPER_3(mvcp, i32, i64, i64, i64) +DEF_HELPER_3(sigp, i32, i64, i32, i64) +DEF_HELPER_1(sacf, void, i64) +DEF_HELPER_FLAGS_2(ipte, TCG_CALL_CONST, void, i64, i64) +DEF_HELPER_FLAGS_0(ptlb, TCG_CALL_CONST, void) +DEF_HELPER_2(lra, i32, i64, i32) +DEF_HELPER_2(stura, void, i64, i32) +DEF_HELPER_2(cksm, void, i32, i32) + +DEF_HELPER_FLAGS_4(calc_cc, TCG_CALL_PURE|TCG_CALL_CONST, + i32, i32, i64, i64, i64) + +#include "def-helper.h" diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 2643460722..4beb794cca 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -49,13 +49,6 @@ #define DIAG_KVM_HYPERCALL 0x500 #define DIAG_KVM_BREAKPOINT 0x501 -#define SCP_LENGTH 0x00 -#define SCP_FUNCTION_CODE 0x02 -#define SCP_CONTROL_MASK 0x03 -#define SCP_RESPONSE_CODE 0x06 -#define SCP_MEM_CODE 0x08 -#define SCP_INCREMENT 0x0a - #define ICPT_INSTRUCTION 0x04 #define ICPT_WAITPSW 0x1c #define ICPT_SOFT_INTERCEPT 0x24 @@ -179,7 +172,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run) int kvm_arch_process_async_events(CPUState *env) { - return 0; + return env->halted; } void kvm_s390_interrupt_internal(CPUState *env, int type, uint32_t parm, @@ -228,9 +221,9 @@ static void enter_pgmcheck(CPUState *env, uint16_t code) kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code); } -static void setcc(CPUState *env, uint64_t cc) +static inline void setcc(CPUState *env, uint64_t cc) { - env->kvm_run->psw_mask &= ~(3ul << 44); + env->kvm_run->psw_mask &= ~(3ull << 44); env->kvm_run->psw_mask |= (cc & 3) << 44; env->psw.mask &= ~(3ul << 44); @@ -248,35 +241,11 @@ static int kvm_sclp_service_call(CPUState *env, struct kvm_run *run, sccb = env->regs[ipbh0 & 0xf]; code = env->regs[(ipbh0 & 0xf0) >> 4]; - dprintf("sclp(0x%x, 0x%lx)\n", sccb, code); - - if (sccb & ~0x7ffffff8ul) { - fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb); - r = -1; - goto out; - } - - switch(code) { - case SCLP_CMDW_READ_SCP_INFO: - case SCLP_CMDW_READ_SCP_INFO_FORCED: - stw_phys(sccb + SCP_MEM_CODE, ram_size >> 20); - stb_phys(sccb + SCP_INCREMENT, 1); - stw_phys(sccb + SCP_RESPONSE_CODE, 0x10); - setcc(env, 0); - - kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE, - sccb & ~3, 0, 1); - break; - default: - dprintf("KVM: invalid sclp call 0x%x / 0x%lx\n", sccb, code); - r = -1; - break; - } - -out: - if (r < 0) { + r = sclp_service_call(env, sccb, code); + if (r) { setcc(env, 3); } + return 0; } @@ -408,7 +377,7 @@ static int handle_sigp(CPUState *env, struct kvm_run *run, uint8_t ipa1) r = s390_cpu_initial_reset(target_env); break; default: - fprintf(stderr, "KVM: unknown SIGP: 0x%x\n", ipa1); + fprintf(stderr, "KVM: unknown SIGP: 0x%x\n", order_code); break; } @@ -449,7 +418,8 @@ static int handle_intercept(CPUState *env) int icpt_code = run->s390_sieic.icptcode; int r = 0; - dprintf("intercept: 0x%x (at 0x%lx)\n", icpt_code, env->kvm_run->psw_addr); + dprintf("intercept: 0x%x (at 0x%lx)\n", icpt_code, + (long)env->kvm_run->psw_addr); switch (icpt_code) { case ICPT_INSTRUCTION: r = handle_instruction(env, run); diff --git a/target-s390x/op_helper.c b/target-s390x/op_helper.c index be455b9de2..db03a7971f 100644 --- a/target-s390x/op_helper.c +++ b/target-s390x/op_helper.c @@ -1,6 +1,7 @@ /* * S/390 helper routines * + * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or @@ -18,6 +19,11 @@ */ #include "exec.h" +#include "host-utils.h" +#include "helpers.h" +#include <string.h> +#include "kvm.h" +#include "qemu-timer.h" /*****************************************************************************/ /* Softmmu support */ @@ -64,10 +70,2917 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) cpu_restore_state(tb, env, pc); } } - /* XXX */ - /* helper_raise_exception_err(env->exception_index, env->error_code); */ + cpu_loop_exit(); } env = saved_env; } #endif + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +/* raise an exception */ +void HELPER(exception)(uint32_t excp) +{ + HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp); + env->exception_index = excp; + cpu_loop_exit(); +} + +#ifndef CONFIG_USER_ONLY +static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest, + uint8_t byte) +{ + target_phys_addr_t dest_phys; + target_phys_addr_t len = l; + void *dest_p; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + int flags; + + if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) { + stb(dest, byte); + cpu_abort(env, "should never reach here"); + } + dest_phys |= dest & ~TARGET_PAGE_MASK; + + dest_p = cpu_physical_memory_map(dest_phys, &len, 1); + + memset(dest_p, byte, len); + + cpu_physical_memory_unmap(dest_p, 1, len, len); +} + +static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest, + uint64_t src) +{ + target_phys_addr_t dest_phys; + target_phys_addr_t src_phys; + target_phys_addr_t len = l; + void *dest_p; + void *src_p; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + int flags; + + if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) { + stb(dest, 0); + cpu_abort(env, "should never reach here"); + } + dest_phys |= dest & ~TARGET_PAGE_MASK; + + if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) { + ldub(src); + cpu_abort(env, "should never reach here"); + } + src_phys |= src & ~TARGET_PAGE_MASK; + + dest_p = cpu_physical_memory_map(dest_phys, &len, 1); + src_p = cpu_physical_memory_map(src_phys, &len, 0); + + memmove(dest_p, src_p, len); + + cpu_physical_memory_unmap(dest_p, 1, len, len); + cpu_physical_memory_unmap(src_p, 0, len, len); +} +#endif + +/* and on array */ +uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src) +{ + int i; + unsigned char x; + uint32_t cc = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __FUNCTION__, l, dest, src); + for (i = 0; i <= l; i++) { + x = ldub(dest + i) & ldub(src + i); + if (x) { + cc = 1; + } + stb(dest + i, x); + } + return cc; +} + +/* xor on array */ +uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src) +{ + int i; + unsigned char x; + uint32_t cc = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __FUNCTION__, l, dest, src); + +#ifndef CONFIG_USER_ONLY + /* xor with itself is the same as memset(0) */ + if ((l > 32) && (src == dest) && + (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) { + mvc_fast_memset(env, l + 1, dest, 0); + return 0; + } +#else + if (src == dest) { + memset(g2h(dest), 0, l + 1); + return 0; + } +#endif + + for (i = 0; i <= l; i++) { + x = ldub(dest + i) ^ ldub(src + i); + if (x) { + cc = 1; + } + stb(dest + i, x); + } + return cc; +} + +/* or on array */ +uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src) +{ + int i; + unsigned char x; + uint32_t cc = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __FUNCTION__, l, dest, src); + for (i = 0; i <= l; i++) { + x = ldub(dest + i) | ldub(src + i); + if (x) { + cc = 1; + } + stb(dest + i, x); + } + return cc; +} + +/* memmove */ +void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src) +{ + int i = 0; + int x = 0; + uint32_t l_64 = (l + 1) / 8; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __FUNCTION__, l, dest, src); + +#ifndef CONFIG_USER_ONLY + if ((l > 32) && + (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) && + (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) { + if (dest == (src + 1)) { + mvc_fast_memset(env, l + 1, dest, ldub(src)); + return; + } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { + mvc_fast_memmove(env, l + 1, dest, src); + return; + } + } +#else + if (dest == (src + 1)) { + memset(g2h(dest), ldub(src), l + 1); + return; + } else { + memmove(g2h(dest), g2h(src), l + 1); + return; + } +#endif + + /* handle the parts that fit into 8-byte loads/stores */ + if (dest != (src + 1)) { + for (i = 0; i < l_64; i++) { + stq(dest + x, ldq(src + x)); + x += 8; + } + } + + /* slow version crossing pages with byte accesses */ + for (i = x; i <= l; i++) { + stb(dest + i, ldub(src + i)); + } +} + +/* compare unsigned byte arrays */ +uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2) +{ + int i; + unsigned char x,y; + uint32_t cc; + HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", + __FUNCTION__, l, s1, s2); + for (i = 0; i <= l; i++) { + x = ldub(s1 + i); + y = ldub(s2 + i); + HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); + if (x < y) { + cc = 1; + goto done; + } else if (x > y) { + cc = 2; + goto done; + } + } + cc = 0; +done: + HELPER_LOG("\n"); + return cc; +} + +/* compare logical under mask */ +uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr) +{ + uint8_t r,d; + uint32_t cc; + HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1, + mask, addr); + cc = 0; + while (mask) { + if (mask & 8) { + d = ldub(addr); + r = (r1 & 0xff000000UL) >> 24; + HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, + addr); + if (r < d) { + cc = 1; + break; + } else if (r > d) { + cc = 2; + break; + } + addr++; + } + mask = (mask << 1) & 0xf; + r1 <<= 8; + } + HELPER_LOG("\n"); + return cc; +} + +/* store character under mask */ +void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr) +{ + uint8_t r; + HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask, + addr); + while (mask) { + if (mask & 8) { + r = (r1 & 0xff000000UL) >> 24; + stb(addr, r); + HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr); + addr++; + } + mask = (mask << 1) & 0xf; + r1 <<= 8; + } + HELPER_LOG("\n"); +} + +/* 64/64 -> 128 unsigned multiplication */ +void HELPER(mlg)(uint32_t r1, uint64_t v2) +{ +#if HOST_LONG_BITS == 64 && defined(__GNUC__) + /* assuming 64-bit hosts have __uint128_t */ + __uint128_t res = (__uint128_t)env->regs[r1 + 1]; + res *= (__uint128_t)v2; + env->regs[r1] = (uint64_t)(res >> 64); + env->regs[r1 + 1] = (uint64_t)res; +#else + mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2); +#endif +} + +/* 128 -> 64/64 unsigned division */ +void HELPER(dlg)(uint32_t r1, uint64_t v2) +{ + uint64_t divisor = v2; + + if (!env->regs[r1]) { + /* 64 -> 64/64 case */ + env->regs[r1] = env->regs[r1+1] % divisor; + env->regs[r1+1] = env->regs[r1+1] / divisor; + return; + } else { + +#if HOST_LONG_BITS == 64 && defined(__GNUC__) + /* assuming 64-bit hosts have __uint128_t */ + __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) | + (env->regs[r1+1]); + __uint128_t quotient = dividend / divisor; + env->regs[r1+1] = quotient; + __uint128_t remainder = dividend % divisor; + env->regs[r1] = remainder; +#else + /* 32-bit hosts would need special wrapper functionality - just abort if + we encounter such a case; it's very unlikely anyways. */ + cpu_abort(env, "128 -> 64/64 division not implemented\n"); +#endif + } +} + +static inline uint64_t get_address(int x2, int b2, int d2) +{ + uint64_t r = d2; + + if (x2) { + r += env->regs[x2]; + } + + if (b2) { + r += env->regs[b2]; + } + + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + r &= 0x7fffffff; + } + + return r; +} + +static inline uint64_t get_address_31fix(int reg) +{ + uint64_t r = env->regs[reg]; + + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + r &= 0x7fffffff; + } + + return r; +} + +/* search string (c is byte to search, r2 is string, r1 end of string) */ +uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + uint64_t i; + uint32_t cc = 2; + uint64_t str = get_address_31fix(r2); + uint64_t end = get_address_31fix(r1); + + HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__, + c, env->regs[r1], env->regs[r2]); + + for (i = str; i != end; i++) { + if (ldub(i) == c) { + env->regs[r1] = i; + cc = 1; + break; + } + } + + return cc; +} + +/* unsigned string compare (c is string terminator) */ +uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + uint64_t s1 = get_address_31fix(r1); + uint64_t s2 = get_address_31fix(r2); + uint8_t v1, v2; + uint32_t cc; + c = c & 0xff; +#ifdef CONFIG_USER_ONLY + if (!c) { + HELPER_LOG("%s: comparing '%s' and '%s'\n", + __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2)); + } +#endif + for (;;) { + v1 = ldub(s1); + v2 = ldub(s2); + if ((v1 == c || v2 == c) || (v1 != v2)) { + break; + } + s1++; + s2++; + } + + if (v1 == v2) { + cc = 0; + } else { + cc = (v1 < v2) ? 1 : 2; + /* FIXME: 31-bit mode! */ + env->regs[r1] = s1; + env->regs[r2] = s2; + } + return cc; +} + +/* move page */ +void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2) +{ + /* XXX missing r0 handling */ +#ifdef CONFIG_USER_ONLY + int i; + + for (i = 0; i < TARGET_PAGE_SIZE; i++) { + stb(r1 + i, ldub(r2 + i)); + } +#else + mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2); +#endif +} + +/* string copy (c is string terminator) */ +void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + uint64_t dest = get_address_31fix(r1); + uint64_t src = get_address_31fix(r2); + uint8_t v; + c = c & 0xff; +#ifdef CONFIG_USER_ONLY + if (!c) { + HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src), + dest); + } +#endif + for (;;) { + v = ldub(src); + stb(dest, v); + if (v == c) { + break; + } + src++; + dest++; + } + env->regs[r1] = dest; /* FIXME: 31-bit mode! */ +} + +/* compare and swap 64-bit */ +uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + uint64_t v2 = ldq(a2); + if (env->regs[r1] == v2) { + cc = 0; + stq(a2, env->regs[r3]); + } else { + cc = 1; + env->regs[r1] = v2; + } + return cc; +} + +/* compare double and swap 64-bit */ +uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + uint64_t v2_hi = ldq(a2); + uint64_t v2_lo = ldq(a2 + 8); + uint64_t v1_hi = env->regs[r1]; + uint64_t v1_lo = env->regs[r1 + 1]; + + if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) { + cc = 0; + stq(a2, env->regs[r3]); + stq(a2 + 8, env->regs[r3 + 1]); + } else { + cc = 1; + env->regs[r1] = v2_hi; + env->regs[r1 + 1] = v2_lo; + } + + return cc; +} + +/* compare and swap 32-bit */ +uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3); + uint32_t v2 = ldl(a2); + if (((uint32_t)env->regs[r1]) == v2) { + cc = 0; + stl(a2, (uint32_t)env->regs[r3]); + } else { + cc = 1; + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2; + } + return cc; +} + +static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask) +{ + int pos = 24; /* top of the lower half of r1 */ + uint64_t rmask = 0xff000000ULL; + uint8_t val = 0; + int ccd = 0; + uint32_t cc = 0; + + while (mask) { + if (mask & 8) { + env->regs[r1] &= ~rmask; + val = ldub(address); + if ((val & 0x80) && !ccd) { + cc = 1; + } + ccd = 1; + if (val && cc == 0) { + cc = 2; + } + env->regs[r1] |= (uint64_t)val << pos; + address++; + } + mask = (mask << 1) & 0xf; + pos -= 8; + rmask >>= 8; + } + + return cc; +} + +/* execute instruction + this instruction executes an insn modified with the contents of r1 + it does not change the executed instruction in memory + it does not change the program counter + in other words: tricky... + currently implemented by interpreting the cases it is most commonly used in + */ +uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret) +{ + uint16_t insn = lduw_code(addr); + HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr, + insn); + if ((insn & 0xf0ff) == 0xd000) { + uint32_t l, insn2, b1, b2, d1, d2; + l = v1 & 0xff; + insn2 = ldl_code(addr + 2); + b1 = (insn2 >> 28) & 0xf; + b2 = (insn2 >> 12) & 0xf; + d1 = (insn2 >> 16) & 0xfff; + d2 = insn2 & 0xfff; + switch (insn & 0xf00) { + case 0x200: + helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2)); + break; + case 0x500: + cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2)); + break; + case 0x700: + cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2)); + break; + default: + goto abort; + break; + } + } else if ((insn & 0xff00) == 0x0a00) { + /* supervisor call */ + HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff); + env->psw.addr = ret - 4; + env->int_svc_code = (insn|v1) & 0xff; + env->int_svc_ilc = 4; + helper_exception(EXCP_SVC); + } else if ((insn & 0xff00) == 0xbf00) { + uint32_t insn2, r1, r3, b2, d2; + insn2 = ldl_code(addr + 2); + r1 = (insn2 >> 20) & 0xf; + r3 = (insn2 >> 16) & 0xf; + b2 = (insn2 >> 12) & 0xf; + d2 = insn2 & 0xfff; + cc = helper_icm(r1, get_address(0, b2, d2), r3); + } else { +abort: + cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n", + insn); + } + return cc; +} + +/* absolute value 32-bit */ +uint32_t HELPER(abs_i32)(int32_t val) +{ + if (val < 0) { + return -val; + } else { + return val; + } +} + +/* negative absolute value 32-bit */ +int32_t HELPER(nabs_i32)(int32_t val) +{ + if (val < 0) { + return val; + } else { + return -val; + } +} + +/* absolute value 64-bit */ +uint64_t HELPER(abs_i64)(int64_t val) +{ + HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val); + + if (val < 0) { + return -val; + } else { + return val; + } +} + +/* negative absolute value 64-bit */ +int64_t HELPER(nabs_i64)(int64_t val) +{ + if (val < 0) { + return val; + } else { + return -val; + } +} + +/* add with carry 32-bit unsigned */ +uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2) +{ + uint32_t res; + + res = v1 + v2; + if (cc & 2) { + res++; + } + + return res; +} + +/* store character under mask high operates on the upper half of r1 */ +void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask) +{ + int pos = 56; /* top of the upper half of r1 */ + + while (mask) { + if (mask & 8) { + stb(address, (env->regs[r1] >> pos) & 0xff); + address++; + } + mask = (mask << 1) & 0xf; + pos -= 8; + } +} + +/* insert character under mask high; same as icm, but operates on the + upper half of r1 */ +uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask) +{ + int pos = 56; /* top of the upper half of r1 */ + uint64_t rmask = 0xff00000000000000ULL; + uint8_t val = 0; + int ccd = 0; + uint32_t cc = 0; + + while (mask) { + if (mask & 8) { + env->regs[r1] &= ~rmask; + val = ldub(address); + if ((val & 0x80) && !ccd) { + cc = 1; + } + ccd = 1; + if (val && cc == 0) { + cc = 2; + } + env->regs[r1] |= (uint64_t)val << pos; + address++; + } + mask = (mask << 1) & 0xf; + pos -= 8; + rmask >>= 8; + } + + return cc; +} + +/* insert psw mask and condition code into r1 */ +void HELPER(ipm)(uint32_t cc, uint32_t r1) +{ + uint64_t r = env->regs[r1]; + + r &= 0xffffffff00ffffffULL; + r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf ); + env->regs[r1] = r; + HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__, + cc, env->psw.mask, r); +} + +/* load access registers r1 to r3 from memory at a2 */ +void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + + for (i = r1;; i = (i + 1) % 16) { + env->aregs[i] = ldl(a2); + a2 += 4; + + if (i == r3) { + break; + } + } +} + +/* store access registers r1 to r3 in memory at a2 */ +void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + + for (i = r1;; i = (i + 1) % 16) { + stl(a2, env->aregs[i]); + a2 += 4; + + if (i == r3) { + break; + } + } +} + +/* move long */ +uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2) +{ + uint64_t destlen = env->regs[r1 + 1] & 0xffffff; + uint64_t dest = get_address_31fix(r1); + uint64_t srclen = env->regs[r2 + 1] & 0xffffff; + uint64_t src = get_address_31fix(r2); + uint8_t pad = src >> 24; + uint8_t v; + uint32_t cc; + + if (destlen == srclen) { + cc = 0; + } else if (destlen < srclen) { + cc = 1; + } else { + cc = 2; + } + + if (srclen > destlen) { + srclen = destlen; + } + + for (; destlen && srclen; src++, dest++, destlen--, srclen--) { + v = ldub(src); + stb(dest, v); + } + + for (; destlen; dest++, destlen--) { + stb(dest, pad); + } + + env->regs[r1 + 1] = destlen; + /* can't use srclen here, we trunc'ed it */ + env->regs[r2 + 1] -= src - env->regs[r2]; + env->regs[r1] = dest; + env->regs[r2] = src; + + return cc; +} + +/* move long extended another memcopy insn with more bells and whistles */ +uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + uint64_t destlen = env->regs[r1 + 1]; + uint64_t dest = env->regs[r1]; + uint64_t srclen = env->regs[r3 + 1]; + uint64_t src = env->regs[r3]; + uint8_t pad = a2 & 0xff; + uint8_t v; + uint32_t cc; + + if (!(env->psw.mask & PSW_MASK_64)) { + destlen = (uint32_t)destlen; + srclen = (uint32_t)srclen; + dest &= 0x7fffffff; + src &= 0x7fffffff; + } + + if (destlen == srclen) { + cc = 0; + } else if (destlen < srclen) { + cc = 1; + } else { + cc = 2; + } + + if (srclen > destlen) { + srclen = destlen; + } + + for (; destlen && srclen; src++, dest++, destlen--, srclen--) { + v = ldub(src); + stb(dest, v); + } + + for (; destlen; dest++, destlen--) { + stb(dest, pad); + } + + env->regs[r1 + 1] = destlen; + /* can't use srclen here, we trunc'ed it */ + /* FIXME: 31-bit mode! */ + env->regs[r3 + 1] -= src - env->regs[r3]; + env->regs[r1] = dest; + env->regs[r3] = src; + + return cc; +} + +/* compare logical long extended memcompare insn with padding */ +uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + uint64_t destlen = env->regs[r1 + 1]; + uint64_t dest = get_address_31fix(r1); + uint64_t srclen = env->regs[r3 + 1]; + uint64_t src = get_address_31fix(r3); + uint8_t pad = a2 & 0xff; + uint8_t v1 = 0,v2 = 0; + uint32_t cc = 0; + + if (!(destlen || srclen)) { + return cc; + } + + if (srclen > destlen) { + srclen = destlen; + } + + for (; destlen || srclen; src++, dest++, destlen--, srclen--) { + v1 = srclen ? ldub(src) : pad; + v2 = destlen ? ldub(dest) : pad; + if (v1 != v2) { + cc = (v1 < v2) ? 1 : 2; + break; + } + } + + env->regs[r1 + 1] = destlen; + /* can't use srclen here, we trunc'ed it */ + env->regs[r3 + 1] -= src - env->regs[r3]; + env->regs[r1] = dest; + env->regs[r3] = src; + + return cc; +} + +/* subtract unsigned v2 from v1 with borrow */ +uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2) +{ + uint32_t v1 = env->regs[r1]; + uint32_t res = v1 + (~v2) + (cc >> 1); + + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res; + if (cc & 2) { + /* borrow */ + return v1 ? 1 : 0; + } else { + return v1 ? 3 : 2; + } +} + +/* subtract unsigned v2 from v1 with borrow */ +uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2) +{ + uint64_t res = v1 + (~v2) + (cc >> 1); + + env->regs[r1] = res; + if (cc & 2) { + /* borrow */ + return v1 ? 1 : 0; + } else { + return v1 ? 3 : 2; + } +} + +static inline int float_comp_to_cc(int float_compare) +{ + switch (float_compare) { + case float_relation_equal: + return 0; + case float_relation_less: + return 1; + case float_relation_greater: + return 2; + case float_relation_unordered: + return 3; + default: + cpu_abort(env, "unknown return value for float compare\n"); + } +} + +/* condition codes for binary FP ops */ +static uint32_t set_cc_f32(float32 v1, float32 v2) +{ + return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status)); +} + +static uint32_t set_cc_f64(float64 v1, float64 v2) +{ + return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status)); +} + +/* condition codes for unary FP ops */ +static uint32_t set_cc_nz_f32(float32 v) +{ + if (float32_is_any_nan(v)) { + return 3; + } else if (float32_is_zero(v)) { + return 0; + } else if (float32_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +static uint32_t set_cc_nz_f64(float64 v) +{ + if (float64_is_any_nan(v)) { + return 3; + } else if (float64_is_zero(v)) { + return 0; + } else if (float64_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +static uint32_t set_cc_nz_f128(float128 v) +{ + if (float128_is_any_nan(v)) { + return 3; + } else if (float128_is_zero(v)) { + return 0; + } else if (float128_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +/* convert 32-bit int to 64-bit float */ +void HELPER(cdfbr)(uint32_t f1, int32_t v2) +{ + HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status); +} + +/* convert 32-bit int to 128-bit float */ +void HELPER(cxfbr)(uint32_t f1, int32_t v2) +{ + CPU_QuadU v1; + v1.q = int32_to_float128(v2, &env->fpu_status); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; +} + +/* convert 64-bit int to 32-bit float */ +void HELPER(cegbr)(uint32_t f1, int64_t v2) +{ + HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status); +} + +/* convert 64-bit int to 64-bit float */ +void HELPER(cdgbr)(uint32_t f1, int64_t v2) +{ + HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status); +} + +/* convert 64-bit int to 128-bit float */ +void HELPER(cxgbr)(uint32_t f1, int64_t v2) +{ + CPU_QuadU x1; + x1.q = int64_to_float128(v2, &env->fpu_status); + HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2, + x1.ll.upper, x1.ll.lower); + env->fregs[f1].ll = x1.ll.upper; + env->fregs[f1 + 2].ll = x1.ll.lower; +} + +/* convert 32-bit int to 32-bit float */ +void HELPER(cefbr)(uint32_t f1, int32_t v2) +{ + env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status); + HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2, + env->fregs[f1].l.upper, f1); +} + +/* 32-bit FP addition RR */ +uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper, + env->fregs[f2].l.upper, + &env->fpu_status); + HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__, + env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1); + + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* 64-bit FP addition RR */ +uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d, + &env->fpu_status); + HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__, + env->fregs[f2].d, env->fregs[f1].d, f1); + + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* 32-bit FP subtraction RR */ +uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper, + env->fregs[f2].l.upper, + &env->fpu_status); + HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__, + env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1); + + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* 64-bit FP subtraction RR */ +uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d, + &env->fpu_status); + HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n", + __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1); + + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* 32-bit FP division RR */ +void HELPER(debr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper, + env->fregs[f2].l.upper, + &env->fpu_status); +} + +/* 128-bit FP division RR */ +void HELPER(dxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_div(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* 64-bit FP multiplication RR */ +void HELPER(mdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d, + &env->fpu_status); +} + +/* 128-bit FP multiplication RR */ +void HELPER(mxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_mul(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* convert 32-bit float to 64-bit float */ +void HELPER(ldebr)(uint32_t r1, uint32_t r2) +{ + env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper, + &env->fpu_status); +} + +/* convert 128-bit float to 64-bit float */ +void HELPER(ldxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status); + HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d); +} + +/* convert 64-bit float to 128-bit float */ +void HELPER(lxdbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU res; + res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* convert 64-bit float to 32-bit float */ +void HELPER(ledbr)(uint32_t f1, uint32_t f2) +{ + float64 d2 = env->fregs[f2].d; + env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status); +} + +/* convert 128-bit float to 32-bit float */ +void HELPER(lexbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status); + HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper); +} + +/* absolute value of 32-bit float */ +uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2) +{ + float32 v1; + float32 v2 = env->fregs[f2].d; + v1 = float32_abs(v2); + env->fregs[f1].d = v1; + return set_cc_nz_f32(v1); +} + +/* absolute value of 64-bit float */ +uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2) +{ + float64 v1; + float64 v2 = env->fregs[f2].d; + v1 = float64_abs(v2); + env->fregs[f1].d = v1; + return set_cc_nz_f64(v1); +} + +/* absolute value of 128-bit float */ +uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + v1.q = float128_abs(v2.q); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; + return set_cc_nz_f128(v1.q); +} + +/* load and test 64-bit float */ +uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = env->fregs[f2].d; + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* load and test 32-bit float */ +uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = env->fregs[f2].l.upper; + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* load and test 128-bit float */ +uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x; + x.ll.upper = env->fregs[f2].ll; + x.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].ll = x.ll.upper; + env->fregs[f1 + 2].ll = x.ll.lower; + return set_cc_nz_f128(x.q); +} + +/* load complement of 32-bit float */ +uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper); + + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* load complement of 64-bit float */ +uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_chs(env->fregs[f2].d); + + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* load complement of 128-bit float */ +uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x1, x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + x1.q = float128_chs(x2.q); + env->fregs[f1].ll = x1.ll.upper; + env->fregs[f1 + 2].ll = x1.ll.lower; + return set_cc_nz_f128(x1.q); +} + +/* 32-bit FP addition RM */ +void HELPER(aeb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__, + v1, f1, v2.f); + env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status); +} + +/* 32-bit FP division RM */ +void HELPER(deb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__, + v1, f1, v2.f); + env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status); +} + +/* 32-bit FP multiplication RM */ +void HELPER(meeb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__, + v1, f1, v2.f); + env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status); +} + +/* 32-bit FP compare RR */ +uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2) +{ + float32 v1 = env->fregs[f1].l.upper; + float32 v2 = env->fregs[f2].l.upper;; + HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__, + v1, f1, v2); + return set_cc_f32(v1, v2); +} + +/* 64-bit FP compare RR */ +uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2) +{ + float64 v1 = env->fregs[f1].d; + float64 v2 = env->fregs[f2].d;; + HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__, + v1, f1, v2); + return set_cc_f64(v1, v2); +} + +/* 128-bit FP compare RR */ +uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + + return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q, + &env->fpu_status)); +} + +/* 64-bit FP compare RM */ +uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1, + f1, v2.d); + return set_cc_f64(v1, v2.d); +} + +/* 64-bit FP addition RM */ +uint32_t HELPER(adb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__, + v1, f1, v2.d); + env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status); + return set_cc_nz_f64(v1); +} + +/* 32-bit FP subtraction RM */ +void HELPER(seb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status); +} + +/* 64-bit FP subtraction RM */ +uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status); + return set_cc_nz_f64(v1); +} + +/* 64-bit FP multiplication RM */ +void HELPER(mdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__, + v1, f1, v2.d); + env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status); +} + +/* 64-bit FP division RM */ +void HELPER(ddb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__, + v1, f1, v2.d); + env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status); +} + +static void set_round_mode(int m3) +{ + switch (m3) { + case 0: + /* current mode */ + break; + case 1: + /* biased round no nearest */ + case 4: + /* round to nearest */ + set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); + break; + case 5: + /* round to zero */ + set_float_rounding_mode(float_round_to_zero, &env->fpu_status); + break; + case 6: + /* round to +inf */ + set_float_rounding_mode(float_round_up, &env->fpu_status); + break; + case 7: + /* round to -inf */ + set_float_rounding_mode(float_round_down, &env->fpu_status); + break; + } +} + +/* convert 32-bit float to 64-bit int */ +uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float32 v2 = env->fregs[f2].l.upper; + set_round_mode(m3); + env->regs[r1] = float32_to_int64(v2, &env->fpu_status); + return set_cc_nz_f32(v2); +} + +/* convert 64-bit float to 64-bit int */ +uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float64 v2 = env->fregs[f2].d; + set_round_mode(m3); + env->regs[r1] = float64_to_int64(v2, &env->fpu_status); + return set_cc_nz_f64(v2); +} + +/* convert 128-bit float to 64-bit int */ +uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + set_round_mode(m3); + env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status); + if (float128_is_any_nan(v2.q)) { + return 3; + } else if (float128_is_zero(v2.q)) { + return 0; + } else if (float128_is_neg(v2.q)) { + return 1; + } else { + return 2; + } +} + +/* convert 32-bit float to 32-bit int */ +uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float32 v2 = env->fregs[f2].l.upper; + set_round_mode(m3); + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | + float32_to_int32(v2, &env->fpu_status); + return set_cc_nz_f32(v2); +} + +/* convert 64-bit float to 32-bit int */ +uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float64 v2 = env->fregs[f2].d; + set_round_mode(m3); + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | + float64_to_int32(v2, &env->fpu_status); + return set_cc_nz_f64(v2); +} + +/* convert 128-bit float to 32-bit int */ +uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | + float128_to_int32(v2.q, &env->fpu_status); + return set_cc_nz_f128(v2.q); +} + +/* load 32-bit FP zero */ +void HELPER(lzer)(uint32_t f1) +{ + env->fregs[f1].l.upper = float32_zero; +} + +/* load 64-bit FP zero */ +void HELPER(lzdr)(uint32_t f1) +{ + env->fregs[f1].d = float64_zero; +} + +/* load 128-bit FP zero */ +void HELPER(lzxr)(uint32_t f1) +{ + CPU_QuadU x; + x.q = float64_to_float128(float64_zero, &env->fpu_status); + env->fregs[f1].ll = x.ll.upper; + env->fregs[f1 + 1].ll = x.ll.lower; +} + +/* 128-bit FP subtraction RR */ +uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_sub(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; + return set_cc_nz_f128(res.q); +} + +/* 128-bit FP addition RR */ +uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_add(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; + return set_cc_nz_f128(res.q); +} + +/* 32-bit FP multiplication RR */ +void HELPER(meebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper, + env->fregs[f2].l.upper, + &env->fpu_status); +} + +/* 64-bit FP division RR */ +void HELPER(ddbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d, + &env->fpu_status); +} + +/* 64-bit FP multiply and add RM */ +void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3) +{ + HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3); + CPU_DoubleU v2; + v2.ll = ldq(a2); + env->fregs[f1].d = float64_add(env->fregs[f1].d, + float64_mul(v2.d, env->fregs[f3].d, + &env->fpu_status), + &env->fpu_status); +} + +/* 64-bit FP multiply and add RR */ +void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3); + env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d, + env->fregs[f3].d, + &env->fpu_status), + env->fregs[f1].d, &env->fpu_status); +} + +/* 64-bit FP multiply and subtract RR */ +void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3); + env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d, + env->fregs[f3].d, + &env->fpu_status), + env->fregs[f1].d, &env->fpu_status); +} + +/* 32-bit FP multiply and add RR */ +void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper, + float32_mul(env->fregs[f2].l.upper, + env->fregs[f3].l.upper, + &env->fpu_status), + &env->fpu_status); +} + +/* convert 64-bit float to 128-bit float */ +void HELPER(lxdb)(uint32_t f1, uint64_t a2) +{ + CPU_DoubleU v2; + v2.ll = ldq(a2); + CPU_QuadU v1; + v1.q = float64_to_float128(v2.d, &env->fpu_status); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; +} + +/* test data class 32-bit */ +uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2) +{ + float32 v1 = env->fregs[f1].l.upper; + int neg = float32_is_neg(v1); + uint32_t cc = 0; + + HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg); + if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) || + (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) || + (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) || + (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) { + cc = 1; + } else if (m2 & (1 << (9-neg))) { + /* assume normalized number */ + cc = 1; + } + + /* FIXME: denormalized? */ + return cc; +} + +/* test data class 64-bit */ +uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2) +{ + float64 v1 = env->fregs[f1].d; + int neg = float64_is_neg(v1); + uint32_t cc = 0; + + HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg); + if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) || + (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) || + (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) || + (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) { + cc = 1; + } else if (m2 & (1 << (9-neg))) { + /* assume normalized number */ + cc = 1; + } + /* FIXME: denormalized? */ + return cc; +} + +/* test data class 128-bit */ +uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2) +{ + CPU_QuadU v1; + uint32_t cc = 0; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + + int neg = float128_is_neg(v1.q); + if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) || + (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) || + (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) || + (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) { + cc = 1; + } else if (m2 & (1 << (9-neg))) { + /* assume normalized number */ + cc = 1; + } + /* FIXME: denormalized? */ + return cc; +} + +/* find leftmost one */ +uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2) +{ + uint64_t res = 0; + uint64_t ov2 = v2; + + while (!(v2 & 0x8000000000000000ULL) && v2) { + v2 <<= 1; + res++; + } + + if (!v2) { + env->regs[r1] = 64; + env->regs[r1 + 1] = 0; + return 0; + } else { + env->regs[r1] = res; + env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res); + return 2; + } +} + +/* square root 64-bit RR */ +void HELPER(sqdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status); +} + +/* checksum */ +void HELPER(cksm)(uint32_t r1, uint32_t r2) +{ + uint64_t src = get_address_31fix(r2); + uint64_t src_len = env->regs[(r2 + 1) & 15]; + uint64_t cksm = (uint32_t)env->regs[r1]; + + while (src_len >= 4) { + cksm += ldl(src); + + /* move to next word */ + src_len -= 4; + src += 4; + } + + switch (src_len) { + case 0: + break; + case 1: + cksm += ldub(src) << 24; + break; + case 2: + cksm += lduw(src) << 16; + break; + case 3: + cksm += lduw(src) << 16; + cksm += ldub(src + 2) << 8; + break; + } + + /* indicate we've processed everything */ + env->regs[r2] = src + src_len; + env->regs[(r2 + 1) & 15] = 0; + + /* store result */ + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | + ((uint32_t)cksm + (cksm >> 32)); +} + +static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src, + int32_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst) +{ + return cc_calc_ltgt_32(env, dst, 0); +} + +static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src, + int64_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst) +{ + return cc_calc_ltgt_64(env, dst, 0); +} + +static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src, + uint32_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src, + uint64_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask) +{ + HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask); + uint16_t r = val & mask; + if (r == 0 || mask == 0) { + return 0; + } else if (r == mask) { + return 3; + } else { + return 1; + } +} + +/* set condition code for test under mask */ +static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask) +{ + uint16_t r = val & mask; + HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r); + if (r == 0 || mask == 0) { + return 0; + } else if (r == mask) { + return 3; + } else { + while (!(mask & 0x8000)) { + mask <<= 1; + val <<= 1; + } + if (val & 0x8000) { + return 2; + } else { + return 1; + } + } +} + +static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst) +{ + return !!dst; +} + +static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2, + int64_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2, + uint64_t ar) +{ + if (ar == 0) { + if (a1) { + return 2; + } else { + return 0; + } + } else { + if (ar < a1 || ar < a2) { + return 3; + } else { + return 1; + } + } +} + +static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2, + int64_t ar) +{ + if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2, + uint64_t ar) +{ + if (ar == 0) { + return 2; + } else { + if (a2 > a1) { + return 1; + } else { + return 3; + } + } +} + +static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst) +{ + if ((uint64_t)dst == 0x8000000000000000ULL) { + return 3; + } else if (dst) { + return 1; + } else { + return 0; + } +} + +static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst) +{ + return !!dst; +} + +static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst) +{ + if ((uint64_t)dst == 0x8000000000000000ULL) { + return 3; + } else if (dst < 0) { + return 1; + } else if (dst > 0) { + return 2; + } else { + return 0; + } +} + + +static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2, + int32_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2, + uint32_t ar) +{ + if (ar == 0) { + if (a1) { + return 2; + } else { + return 0; + } + } else { + if (ar < a1 || ar < a2) { + return 3; + } else { + return 1; + } + } +} + +static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2, + int32_t ar) +{ + if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2, + uint32_t ar) +{ + if (ar == 0) { + return 2; + } else { + if (a2 > a1) { + return 1; + } else { + return 3; + } + } +} + +static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst) +{ + if ((uint32_t)dst == 0x80000000UL) { + return 3; + } else if (dst) { + return 1; + } else { + return 0; + } +} + +static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst) +{ + return !!dst; +} + +static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst) +{ + if ((uint32_t)dst == 0x80000000UL) { + return 3; + } else if (dst < 0) { + return 1; + } else if (dst > 0) { + return 2; + } else { + return 0; + } +} + +/* calculate condition code for insert character under mask insn */ +static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val) +{ + HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val); + uint32_t cc; + + if (mask == 0xf) { + if (!val) { + return 0; + } else if (val & 0x80000000) { + return 1; + } else { + return 2; + } + } + + if (!val || !mask) { + cc = 0; + } else { + while (mask != 1) { + mask >>= 1; + val >>= 8; + } + if (val & 0x80) { + cc = 1; + } else { + cc = 2; + } + } + return cc; +} + +static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift) +{ + uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); + uint64_t match, r; + + /* check if the sign bit stays the same */ + if (src & (1ULL << 63)) { + match = mask; + } else { + match = 0; + } + + if ((src & mask) != match) { + /* overflow */ + return 3; + } + + r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63)); + + if ((int64_t)r == 0) { + return 0; + } else if ((int64_t)r < 0) { + return 1; + } + + return 2; +} + + +static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, + uint64_t dst, uint64_t vr) +{ + uint32_t r = 0; + + switch (cc_op) { + case CC_OP_CONST0: + case CC_OP_CONST1: + case CC_OP_CONST2: + case CC_OP_CONST3: + /* cc_op value _is_ cc */ + r = cc_op; + break; + case CC_OP_LTGT0_32: + r = cc_calc_ltgt0_32(env, dst); + break; + case CC_OP_LTGT0_64: + r = cc_calc_ltgt0_64(env, dst); + break; + case CC_OP_LTGT_32: + r = cc_calc_ltgt_32(env, src, dst); + break; + case CC_OP_LTGT_64: + r = cc_calc_ltgt_64(env, src, dst); + break; + case CC_OP_LTUGTU_32: + r = cc_calc_ltugtu_32(env, src, dst); + break; + case CC_OP_LTUGTU_64: + r = cc_calc_ltugtu_64(env, src, dst); + break; + case CC_OP_TM_32: + r = cc_calc_tm_32(env, src, dst); + break; + case CC_OP_TM_64: + r = cc_calc_tm_64(env, src, dst); + break; + case CC_OP_NZ: + r = cc_calc_nz(env, dst); + break; + case CC_OP_ADD_64: + r = cc_calc_add_64(env, src, dst, vr); + break; + case CC_OP_ADDU_64: + r = cc_calc_addu_64(env, src, dst, vr); + break; + case CC_OP_SUB_64: + r = cc_calc_sub_64(env, src, dst, vr); + break; + case CC_OP_SUBU_64: + r = cc_calc_subu_64(env, src, dst, vr); + break; + case CC_OP_ABS_64: + r = cc_calc_abs_64(env, dst); + break; + case CC_OP_NABS_64: + r = cc_calc_nabs_64(env, dst); + break; + case CC_OP_COMP_64: + r = cc_calc_comp_64(env, dst); + break; + + case CC_OP_ADD_32: + r = cc_calc_add_32(env, src, dst, vr); + break; + case CC_OP_ADDU_32: + r = cc_calc_addu_32(env, src, dst, vr); + break; + case CC_OP_SUB_32: + r = cc_calc_sub_32(env, src, dst, vr); + break; + case CC_OP_SUBU_32: + r = cc_calc_subu_32(env, src, dst, vr); + break; + case CC_OP_ABS_32: + r = cc_calc_abs_64(env, dst); + break; + case CC_OP_NABS_32: + r = cc_calc_nabs_64(env, dst); + break; + case CC_OP_COMP_32: + r = cc_calc_comp_32(env, dst); + break; + + case CC_OP_ICM: + r = cc_calc_icm_32(env, src, dst); + break; + case CC_OP_SLAG: + r = cc_calc_slag(env, src, dst); + break; + + case CC_OP_LTGT_F32: + r = set_cc_f32(src, dst); + break; + case CC_OP_LTGT_F64: + r = set_cc_f64(src, dst); + break; + case CC_OP_NZ_F32: + r = set_cc_nz_f32(dst); + break; + case CC_OP_NZ_F64: + r = set_cc_nz_f64(dst); + break; + + default: + cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op)); + } + + HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__, + cc_name(cc_op), src, dst, vr, r); + return r; +} + +uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst, + uint64_t vr) +{ + return do_calc_cc(env, cc_op, src, dst, vr); +} + +uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst, + uint64_t vr) +{ + return do_calc_cc(env, cc_op, src, dst, vr); +} + +uint64_t HELPER(cvd)(int32_t bin) +{ + /* positive 0 */ + uint64_t dec = 0x0c; + int shift = 4; + + if (bin < 0) { + bin = -bin; + dec = 0x0d; + } + + for (shift = 4; (shift < 64) && bin; shift += 4) { + int current_number = bin % 10; + + dec |= (current_number) << shift; + bin /= 10; + } + + return dec; +} + +void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src) +{ + int len_dest = len >> 4; + int len_src = len & 0xf; + uint8_t b; + int second_nibble = 0; + + dest += len_dest; + src += len_src; + + /* last byte is special, it only flips the nibbles */ + b = ldub(src); + stb(dest, (b << 4) | (b >> 4)); + src--; + len_src--; + + /* now pad every nibble with 0xf0 */ + + while (len_dest > 0) { + uint8_t cur_byte = 0; + + if (len_src > 0) { + cur_byte = ldub(src); + } + + len_dest--; + dest--; + + /* only advance one nibble at a time */ + if (second_nibble) { + cur_byte >>= 4; + len_src--; + src--; + } + second_nibble = !second_nibble; + + /* digit */ + cur_byte = (cur_byte & 0xf); + /* zone bits */ + cur_byte |= 0xf0; + + stb(dest, cur_byte); + } +} + +void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans) +{ + int i; + + for (i = 0; i <= len; i++) { + uint8_t byte = ldub(array + i); + uint8_t new_byte = ldub(trans + byte); + stb(array + i, new_byte); + } +} + +#ifndef CONFIG_USER_ONLY + +void HELPER(load_psw)(uint64_t mask, uint64_t addr) +{ + load_psw(env, mask, addr); + cpu_loop_exit(); +} + +static void program_interrupt(CPUState *env, uint32_t code, int ilc) +{ + qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr); + + if (kvm_enabled()) { + kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code); + } else { + env->int_pgm_code = code; + env->int_pgm_ilc = ilc; + env->exception_index = EXCP_PGM; + cpu_loop_exit(); + } +} + +static void ext_interrupt(CPUState *env, int type, uint32_t param, + uint64_t param64) +{ + cpu_inject_ext(env, type, param, param64); +} + +int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code) +{ + int r = 0; + int shift = 0; + +#ifdef DEBUG_HELPER + printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code); +#endif + + if (sccb & ~0x7ffffff8ul) { + fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb); + r = -1; + goto out; + } + + switch(code) { + case SCLP_CMDW_READ_SCP_INFO: + case SCLP_CMDW_READ_SCP_INFO_FORCED: + while ((ram_size >> (20 + shift)) > 65535) { + shift++; + } + stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift)); + stb_phys(sccb + SCP_INCREMENT, 1 << shift); + stw_phys(sccb + SCP_RESPONSE_CODE, 0x10); + + if (kvm_enabled()) { +#ifdef CONFIG_KVM + kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE, + sccb & ~3, 0, 1); +#endif + } else { + env->psw.addr += 4; + ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0); + } + break; + default: +#ifdef DEBUG_HELPER + printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code); +#endif + r = -1; + break; + } + +out: + return r; +} + +/* SCLP service call */ +uint32_t HELPER(servc)(uint32_t r1, uint64_t r2) +{ + if (sclp_service_call(env, r1, r2)) { + return 3; + } + + return 0; +} + +/* DIAG */ +uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code) +{ + uint64_t r; + + switch (num) { + case 0x500: + /* KVM hypercall */ + r = s390_virtio_hypercall(env, mem, code); + break; + case 0x44: + /* yield */ + r = 0; + break; + case 0x308: + /* ipl */ + r = 0; + break; + default: + r = -1; + break; + } + + if (r) { + program_interrupt(env, PGM_OPERATION, ILC_LATER_INC); + } + + return r; +} + +/* Store CPU ID */ +void HELPER(stidp)(uint64_t a1) +{ + stq(a1, env->cpu_num); +} + +/* Set Prefix */ +void HELPER(spx)(uint64_t a1) +{ + uint32_t prefix; + + prefix = ldl(a1); + env->psa = prefix & 0xfffff000; + qemu_log("prefix: %#x\n", prefix); + tlb_flush_page(env, 0); + tlb_flush_page(env, TARGET_PAGE_SIZE); +} + +/* Set Clock */ +uint32_t HELPER(sck)(uint64_t a1) +{ + /* XXX not implemented - is it necessary? */ + + return 0; +} + +static inline uint64_t clock_value(CPUState *env) +{ + uint64_t time; + + time = env->tod_offset + + time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime); + + return time; +} + +/* Store Clock */ +uint32_t HELPER(stck)(uint64_t a1) +{ + stq(a1, clock_value(env)); + + return 0; +} + +/* Store Clock Extended */ +uint32_t HELPER(stcke)(uint64_t a1) +{ + stb(a1, 0); + /* basically the same value as stck */ + stq(a1 + 1, clock_value(env) | env->cpu_num); + /* more fine grained than stck */ + stq(a1 + 9, 0); + /* XXX programmable fields */ + stw(a1 + 17, 0); + + + return 0; +} + +/* Set Clock Comparator */ +void HELPER(sckc)(uint64_t a1) +{ + uint64_t time = ldq(a1); + + if (time == -1ULL) { + return; + } + + /* difference between now and then */ + time -= clock_value(env); + /* nanoseconds */ + time = (time * 125) >> 9; + + qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time); +} + +/* Store Clock Comparator */ +void HELPER(stckc)(uint64_t a1) +{ + /* XXX implement */ + stq(a1, 0); +} + +/* Set CPU Timer */ +void HELPER(spt)(uint64_t a1) +{ + uint64_t time = ldq(a1); + + if (time == -1ULL) { + return; + } + + /* nanoseconds */ + time = (time * 125) >> 9; + + qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time); +} + +/* Store CPU Timer */ +void HELPER(stpt)(uint64_t a1) +{ + /* XXX implement */ + stq(a1, 0); +} + +/* Store System Information */ +uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1) +{ + int cc = 0; + int sel1, sel2; + + if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 && + ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) { + /* valid function code, invalid reserved bits */ + program_interrupt(env, PGM_SPECIFICATION, 2); + } + + sel1 = r0 & STSI_R0_SEL1_MASK; + sel2 = r1 & STSI_R1_SEL2_MASK; + + /* XXX: spec exception if sysib is not 4k-aligned */ + + switch (r0 & STSI_LEVEL_MASK) { + case STSI_LEVEL_1: + if ((sel1 == 1) && (sel2 == 1)) { + /* Basic Machine Configuration */ + struct sysib_111 sysib; + + memset(&sysib, 0, sizeof(sysib)); + ebcdic_put(sysib.manuf, "QEMU ", 16); + /* same as machine type number in STORE CPU ID */ + ebcdic_put(sysib.type, "QEMU", 4); + /* same as model number in STORE CPU ID */ + ebcdic_put(sysib.model, "QEMU ", 16); + ebcdic_put(sysib.sequence, "QEMU ", 16); + ebcdic_put(sysib.plant, "QEMU", 4); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else if ((sel1 == 2) && (sel2 == 1)) { + /* Basic Machine CPU */ + struct sysib_121 sysib; + + memset(&sysib, 0, sizeof(sysib)); + /* XXX make different for different CPUs? */ + ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16); + ebcdic_put(sysib.plant, "QEMU", 4); + stw_p(&sysib.cpu_addr, env->cpu_num); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else if ((sel1 == 2) && (sel2 == 2)) { + /* Basic Machine CPUs */ + struct sysib_122 sysib; + + memset(&sysib, 0, sizeof(sysib)); + stl_p(&sysib.capability, 0x443afc29); + /* XXX change when SMP comes */ + stw_p(&sysib.total_cpus, 1); + stw_p(&sysib.active_cpus, 1); + stw_p(&sysib.standby_cpus, 0); + stw_p(&sysib.reserved_cpus, 0); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else { + cc = 3; + } + break; + case STSI_LEVEL_2: + { + if ((sel1 == 2) && (sel2 == 1)) { + /* LPAR CPU */ + struct sysib_221 sysib; + + memset(&sysib, 0, sizeof(sysib)); + /* XXX make different for different CPUs? */ + ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16); + ebcdic_put(sysib.plant, "QEMU", 4); + stw_p(&sysib.cpu_addr, env->cpu_num); + stw_p(&sysib.cpu_id, 0); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else if ((sel1 == 2) && (sel2 == 2)) { + /* LPAR CPUs */ + struct sysib_222 sysib; + + memset(&sysib, 0, sizeof(sysib)); + stw_p(&sysib.lpar_num, 0); + sysib.lcpuc = 0; + /* XXX change when SMP comes */ + stw_p(&sysib.total_cpus, 1); + stw_p(&sysib.conf_cpus, 1); + stw_p(&sysib.standby_cpus, 0); + stw_p(&sysib.reserved_cpus, 0); + ebcdic_put(sysib.name, "QEMU ", 8); + stl_p(&sysib.caf, 1000); + stw_p(&sysib.dedicated_cpus, 0); + stw_p(&sysib.shared_cpus, 0); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else { + cc = 3; + } + break; + } + case STSI_LEVEL_3: + { + if ((sel1 == 2) && (sel2 == 2)) { + /* VM CPUs */ + struct sysib_322 sysib; + + memset(&sysib, 0, sizeof(sysib)); + sysib.count = 1; + /* XXX change when SMP comes */ + stw_p(&sysib.vm[0].total_cpus, 1); + stw_p(&sysib.vm[0].conf_cpus, 1); + stw_p(&sysib.vm[0].standby_cpus, 0); + stw_p(&sysib.vm[0].reserved_cpus, 0); + ebcdic_put(sysib.vm[0].name, "KVMguest", 8); + stl_p(&sysib.vm[0].caf, 1000); + ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16); + cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1); + } else { + cc = 3; + } + break; + } + case STSI_LEVEL_CURRENT: + env->regs[0] = STSI_LEVEL_3; + break; + default: + cc = 3; + break; + } + + return cc; +} + +void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + uint64_t src = a2; + + for (i = r1;; i = (i + 1) % 16) { + env->cregs[i] = ldq(src); + HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", + i, src, env->cregs[i]); + src += sizeof(uint64_t); + + if (i == r3) { + break; + } + } + + tlb_flush(env, 1); +} + +void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + uint64_t src = a2; + + for (i = r1;; i = (i + 1) % 16) { + env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src); + src += sizeof(uint32_t); + + if (i == r3) { + break; + } + } + + tlb_flush(env, 1); +} + +void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + uint64_t dest = a2; + + for (i = r1;; i = (i + 1) % 16) { + stq(dest, env->cregs[i]); + dest += sizeof(uint64_t); + + if (i == r3) { + break; + } + } +} + +void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + uint64_t dest = a2; + + for (i = r1;; i = (i + 1) % 16) { + stl(dest, env->cregs[i]); + dest += sizeof(uint32_t); + + if (i == r3) { + break; + } + } +} + +uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2) +{ + /* XXX implement */ + + return 0; +} + +/* insert storage key extended */ +uint64_t HELPER(iske)(uint64_t r2) +{ + uint64_t addr = get_address(0, 0, r2); + + if (addr > ram_size) { + return 0; + } + + /* XXX maybe use qemu's internal keys? */ + return env->storage_keys[addr / TARGET_PAGE_SIZE]; +} + +/* set storage key extended */ +void HELPER(sske)(uint32_t r1, uint64_t r2) +{ + uint64_t addr = get_address(0, 0, r2); + + if (addr > ram_size) { + return; + } + + env->storage_keys[addr / TARGET_PAGE_SIZE] = r1; +} + +/* reset reference bit extended */ +uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2) +{ + if (r2 > ram_size) { + return 0; + } + + /* XXX implement */ +#if 0 + env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED; +#endif + + /* + * cc + * + * 0 Reference bit zero; change bit zero + * 1 Reference bit zero; change bit one + * 2 Reference bit one; change bit zero + * 3 Reference bit one; change bit one + */ + return 0; +} + +/* compare and swap and purge */ +uint32_t HELPER(csp)(uint32_t r1, uint32_t r2) +{ + uint32_t cc; + uint32_t o1 = env->regs[r1]; + uint64_t a2 = get_address_31fix(r2) & ~3ULL; + uint32_t o2 = ldl(a2); + + if (o1 == o2) { + stl(a2, env->regs[(r1 + 1) & 15]); + if (env->regs[r2] & 0x3) { + /* flush TLB / ALB */ + tlb_flush(env, 1); + } + cc = 0; + } else { + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2; + cc = 1; + } + + return cc; +} + +static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2, + uint64_t mode2) +{ + target_ulong src, dest; + int flags, cc = 0, i; + + if (!l) { + return 0; + } else if (l > 256) { + /* max 256 */ + l = 256; + cc = 3; + } + + if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) { + cpu_loop_exit(); + } + dest |= a1 & ~TARGET_PAGE_MASK; + + if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) { + cpu_loop_exit(); + } + src |= a2 & ~TARGET_PAGE_MASK; + + /* XXX replace w/ memcpy */ + for (i = 0; i < l; i++) { + /* XXX be more clever */ + if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) || + (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) { + mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2); + break; + } + stb_phys(dest + i, ldub_phys(src + i)); + } + + return cc; +} + +uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2) +{ + HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", + __FUNCTION__, l, a1, a2); + + return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY); +} + +uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2) +{ + HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", + __FUNCTION__, l, a1, a2); + + return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY); +} + +uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr) +{ + int cc = 0; + + HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n", + __FUNCTION__, order_code, r1, cpu_addr); + + /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register" + as parameter (input). Status (output) is always R1. */ + + switch (order_code) { + case SIGP_SET_ARCH: + /* switch arch */ + break; + case SIGP_SENSE: + /* enumerate CPU status */ + if (cpu_addr) { + /* XXX implement when SMP comes */ + return 3; + } + env->regs[r1] &= 0xffffffff00000000ULL; + cc = 1; + break; + default: + /* unknown sigp */ + fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code); + cc = 3; + } + + return cc; +} + +void HELPER(sacf)(uint64_t a1) +{ + HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1); + + switch (a1 & 0xf00) { + case 0x000: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x100: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x300: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_HOME; + break; + default: + qemu_log("unknown sacf mode: %" PRIx64 "\n", a1); + program_interrupt(env, PGM_SPECIFICATION, 2); + break; + } +} + +/* invalidate pte */ +void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr) +{ + uint64_t page = vaddr & TARGET_PAGE_MASK; + uint64_t pte = 0; + + /* XXX broadcast to other CPUs */ + + /* XXX Linux is nice enough to give us the exact pte address. + According to spec we'd have to find it out ourselves */ + /* XXX Linux is fine with overwriting the pte, the spec requires + us to only set the invalid bit */ + stq_phys(pte_addr, pte | _PAGE_INVALID); + + /* XXX we exploit the fact that Linux passes the exact virtual + address here - it's not obliged to! */ + tlb_flush_page(env, page); +} + +/* flush local tlb */ +void HELPER(ptlb)(void) +{ + tlb_flush(env, 1); +} + +/* store using real address */ +void HELPER(stura)(uint64_t addr, uint32_t v1) +{ + stw_phys(get_address(0, 0, addr), v1); +} + +/* load real address */ +uint32_t HELPER(lra)(uint64_t addr, uint32_t r1) +{ + uint32_t cc = 0; + int old_exc = env->exception_index; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + uint64_t ret; + int flags; + + /* XXX incomplete - has more corner cases */ + if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { + program_interrupt(env, PGM_SPECIAL_OP, 2); + } + + env->exception_index = old_exc; + if (mmu_translate(env, addr, 0, asc, &ret, &flags)) { + cc = 3; + } + if (env->exception_index == EXCP_PGM) { + ret = env->int_pgm_code | 0x80000000; + } else { + ret |= addr & ~TARGET_PAGE_MASK; + } + env->exception_index = old_exc; + + if (!(env->psw.mask & PSW_MASK_64)) { + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL); + } else { + env->regs[r1] = ret; + } + + return cc; +} + +#endif diff --git a/target-s390x/translate.c b/target-s390x/translate.c index 4d45e32616..eda4624d11 100644 --- a/target-s390x/translate.c +++ b/target-s390x/translate.c @@ -2,6 +2,7 @@ * S/390 translation * * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2010 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -16,6 +17,22 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> + +/* #define DEBUG_ILLEGAL_INSTRUCTIONS */ +/* #define DEBUG_INLINE_BRANCHES */ +#define S390X_DEBUG_DISAS +/* #define S390X_DEBUG_DISAS_VERBOSE */ + +#ifdef S390X_DEBUG_DISAS_VERBOSE +# define LOG_DISAS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif #include "cpu.h" #include "exec-all.h" @@ -23,18 +40,60 @@ #include "tcg-op.h" #include "qemu-log.h" +/* global register indexes */ +static TCGv_ptr cpu_env; + +#include "gen-icount.h" +#include "helpers.h" +#define GEN_HELPER 1 +#include "helpers.h" + +typedef struct DisasContext DisasContext; +struct DisasContext { + uint64_t pc; + int is_jmp; + enum cc_op cc_op; + struct TranslationBlock *tb; +}; + +#define DISAS_EXCP 4 + +static void gen_op_calc_cc(DisasContext *s); + +#ifdef DEBUG_INLINE_BRANCHES +static uint64_t inline_branch_hit[CC_OP_MAX]; +static uint64_t inline_branch_miss[CC_OP_MAX]; +#endif + +static inline void debug_insn(uint64_t insn) +{ + LOG_DISAS("insn: 0x%" PRIx64 "\n", insn); +} + +static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc) +{ + if (!(s->tb->flags & FLAG_MASK_64)) { + if (s->tb->flags & FLAG_MASK_32) { + return pc | 0x80000000; + } + } + return pc; +} + void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int i; + for (i = 0; i < 16; i++) { - cpu_fprintf(f, "R%02d=%016lx", i, env->regs[i]); + cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]); if ((i % 4) == 3) { cpu_fprintf(f, "\n"); } else { cpu_fprintf(f, " "); } } + for (i = 0; i < 16; i++) { cpu_fprintf(f, "F%02d=%016" PRIx64, i, *(uint64_t *)&env->fregs[i]); if ((i % 4) == 3) { @@ -43,18 +102,5145 @@ void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, cpu_fprintf(f, " "); } } - cpu_fprintf(f, "PSW=mask %016lx addr %016lx cc %02x\n", env->psw.mask, env->psw.addr, env->cc); + + cpu_fprintf(f, "\n"); + +#ifndef CONFIG_USER_ONLY + for (i = 0; i < 16; i++) { + cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]); + if ((i % 4) == 3) { + cpu_fprintf(f, "\n"); + } else { + cpu_fprintf(f, " "); + } + } +#endif + + cpu_fprintf(f, "\n"); + + if (env->cc_op > 3) { + cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n", + env->psw.mask, env->psw.addr, cc_name(env->cc_op)); + } else { + cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n", + env->psw.mask, env->psw.addr, env->cc_op); + } + +#ifdef DEBUG_INLINE_BRANCHES + for (i = 0; i < CC_OP_MAX; i++) { + cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i), + inline_branch_miss[i], inline_branch_hit[i]); + } +#endif +} + +static TCGv_i64 psw_addr; +static TCGv_i64 psw_mask; + +static TCGv_i32 cc_op; +static TCGv_i64 cc_src; +static TCGv_i64 cc_dst; +static TCGv_i64 cc_vr; + +static char cpu_reg_names[10*3 + 6*4]; +static TCGv_i64 regs[16]; + +static uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; + +void s390x_translate_init(void) +{ + int i; + size_t cpu_reg_names_size = sizeof(cpu_reg_names); + char *p; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + psw_addr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.addr), + "psw_addr"); + psw_mask = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.mask), + "psw_mask"); + + cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc_op), + "cc_op"); + cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_src), + "cc_src"); + cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_dst), + "cc_dst"); + cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_vr), + "cc_vr"); + + p = cpu_reg_names; + for (i = 0; i < 16; i++) { + snprintf(p, cpu_reg_names_size, "r%d", i); + regs[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUState, regs[i]), p); + p += (i < 10) ? 3 : 4; + cpu_reg_names_size -= (i < 10) ? 3 : 4; + } +} + +static inline TCGv_i64 load_reg(int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(); + tcg_gen_mov_i64(r, regs[reg]); + return r; +} + +static inline TCGv_i64 load_freg(int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(); + tcg_gen_ld_i64(r, cpu_env, offsetof(CPUState, fregs[reg].d)); + return r; +} + +static inline TCGv_i32 load_freg32(int reg) +{ + TCGv_i32 r = tcg_temp_new_i32(); + tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, fregs[reg].l.upper)); + return r; +} + +static inline TCGv_i32 load_reg32(int reg) +{ + TCGv_i32 r = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(r, regs[reg]); + return r; +} + +static inline TCGv_i64 load_reg32_i64(int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(r, regs[reg]); + return r; +} + +static inline void store_reg(int reg, TCGv_i64 v) +{ + tcg_gen_mov_i64(regs[reg], v); +} + +static inline void store_freg(int reg, TCGv_i64 v) +{ + tcg_gen_st_i64(v, cpu_env, offsetof(CPUState, fregs[reg].d)); +} + +static inline void store_reg32(int reg, TCGv_i32 v) +{ +#if HOST_LONG_BITS == 32 + tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v); +#else + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp, v); + /* 32 bit register writes keep the upper half */ + tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 32); + tcg_temp_free_i64(tmp); +#endif +} + +static inline void store_reg32_i64(int reg, TCGv_i64 v) +{ + /* 32 bit register writes keep the upper half */ +#if HOST_LONG_BITS == 32 + tcg_gen_mov_i32(TCGV_LOW(regs[reg]), TCGV_LOW(v)); +#else + tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); +#endif +} + +static inline void store_reg16(int reg, TCGv_i32 v) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp, v); + /* 16 bit register writes keep the upper bytes */ + tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 16); + tcg_temp_free_i64(tmp); +} + +static inline void store_reg8(int reg, TCGv_i64 v) +{ + /* 8 bit register writes keep the upper bytes */ + tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 8); +} + +static inline void store_freg32(int reg, TCGv_i32 v) +{ + tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, fregs[reg].l.upper)); +} + +static inline void update_psw_addr(DisasContext *s) +{ + /* psw.addr */ + tcg_gen_movi_i64(psw_addr, s->pc); +} + +static inline void potential_page_fault(DisasContext *s) +{ +#ifndef CONFIG_USER_ONLY + update_psw_addr(s); + gen_op_calc_cc(s); +#endif +} + +static inline uint64_t ld_code2(uint64_t pc) +{ + return (uint64_t)lduw_code(pc); +} + +static inline uint64_t ld_code4(uint64_t pc) +{ + return (uint64_t)ldl_code(pc); +} + +static inline uint64_t ld_code6(uint64_t pc) +{ + uint64_t opc; + opc = (uint64_t)lduw_code(pc) << 32; + opc |= (uint64_t)(uint32_t)ldl_code(pc+2); + return opc; +} + +static inline int get_mem_index(DisasContext *s) +{ + switch (s->tb->flags & FLAG_MASK_ASC) { + case PSW_ASC_PRIMARY >> 32: + return 0; + case PSW_ASC_SECONDARY >> 32: + return 1; + case PSW_ASC_HOME >> 32: + return 2; + default: + tcg_abort(); + break; + } +} + +static inline void gen_debug(DisasContext *s) +{ + TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG); + update_psw_addr(s); + gen_op_calc_cc(s); + gen_helper_exception(tmp); + tcg_temp_free_i32(tmp); + s->is_jmp = DISAS_EXCP; +} + +#ifdef CONFIG_USER_ONLY + +static void gen_illegal_opcode(DisasContext *s, int ilc) +{ + TCGv_i32 tmp = tcg_const_i32(EXCP_SPEC); + update_psw_addr(s); + gen_op_calc_cc(s); + gen_helper_exception(tmp); + tcg_temp_free_i32(tmp); + s->is_jmp = DISAS_EXCP; +} + +#else /* CONFIG_USER_ONLY */ + +static void debug_print_inst(DisasContext *s, int ilc) +{ +#ifdef DEBUG_ILLEGAL_INSTRUCTIONS + uint64_t inst = 0; + + switch (ilc & 3) { + case 1: + inst = ld_code2(s->pc); + break; + case 2: + inst = ld_code4(s->pc); + break; + case 3: + inst = ld_code6(s->pc); + break; + } + + fprintf(stderr, "Illegal instruction [%d at %016" PRIx64 "]: 0x%016" + PRIx64 "\n", ilc, s->pc, inst); +#endif +} + +static void gen_program_exception(DisasContext *s, int ilc, int code) +{ + TCGv_i32 tmp; + + debug_print_inst(s, ilc); + + /* remember what pgm exeption this was */ + tmp = tcg_const_i32(code); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_code)); + tcg_temp_free_i32(tmp); + + tmp = tcg_const_i32(ilc); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_ilc)); + tcg_temp_free_i32(tmp); + + /* advance past instruction */ + s->pc += (ilc * 2); + update_psw_addr(s); + + /* save off cc */ + gen_op_calc_cc(s); + + /* trigger exception */ + tmp = tcg_const_i32(EXCP_PGM); + gen_helper_exception(tmp); + tcg_temp_free_i32(tmp); + + /* end TB here */ + s->is_jmp = DISAS_EXCP; +} + + +static void gen_illegal_opcode(DisasContext *s, int ilc) +{ + gen_program_exception(s, ilc, PGM_SPECIFICATION); +} + +static void gen_privileged_exception(DisasContext *s, int ilc) +{ + gen_program_exception(s, ilc, PGM_PRIVILEGED); +} + +static void check_privileged(DisasContext *s, int ilc) +{ + if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) { + gen_privileged_exception(s, ilc); + } +} + +#endif /* CONFIG_USER_ONLY */ + +static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) +{ + TCGv_i64 tmp; + + /* 31-bitify the immediate part; register contents are dealt with below */ + if (!(s->tb->flags & FLAG_MASK_64)) { + d2 &= 0x7fffffffUL; + } + + if (x2) { + if (d2) { + tmp = tcg_const_i64(d2); + tcg_gen_add_i64(tmp, tmp, regs[x2]); + } else { + tmp = load_reg(x2); + } + if (b2) { + tcg_gen_add_i64(tmp, tmp, regs[b2]); + } + } else if (b2) { + if (d2) { + tmp = tcg_const_i64(d2); + tcg_gen_add_i64(tmp, tmp, regs[b2]); + } else { + tmp = load_reg(b2); + } + } else { + tmp = tcg_const_i64(d2); + } + + /* 31-bit mode mask if there are values loaded from registers */ + if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) { + tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL); + } + + return tmp; +} + +static void gen_op_movi_cc(DisasContext *s, uint32_t val) +{ + s->cc_op = CC_OP_CONST0 + val; +} + +static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) +{ + tcg_gen_discard_i64(cc_src); + tcg_gen_mov_i64(cc_dst, dst); + tcg_gen_discard_i64(cc_vr); + s->cc_op = op; +} + +static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst) +{ + tcg_gen_discard_i64(cc_src); + tcg_gen_extu_i32_i64(cc_dst, dst); + tcg_gen_discard_i64(cc_vr); + s->cc_op = op; +} + +static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, + TCGv_i64 dst) +{ + tcg_gen_mov_i64(cc_src, src); + tcg_gen_mov_i64(cc_dst, dst); + tcg_gen_discard_i64(cc_vr); + s->cc_op = op; +} + +static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src, + TCGv_i32 dst) +{ + tcg_gen_extu_i32_i64(cc_src, src); + tcg_gen_extu_i32_i64(cc_dst, dst); + tcg_gen_discard_i64(cc_vr); + s->cc_op = op; +} + +static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, + TCGv_i64 dst, TCGv_i64 vr) +{ + tcg_gen_mov_i64(cc_src, src); + tcg_gen_mov_i64(cc_dst, dst); + tcg_gen_mov_i64(cc_vr, vr); + s->cc_op = op; +} + +static void gen_op_update3_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src, + TCGv_i32 dst, TCGv_i32 vr) +{ + tcg_gen_extu_i32_i64(cc_src, src); + tcg_gen_extu_i32_i64(cc_dst, dst); + tcg_gen_extu_i32_i64(cc_vr, vr); + s->cc_op = op; +} + +static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val) +{ + gen_op_update1_cc_i32(s, CC_OP_NZ, val); +} + +static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ, val); +} + +static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, + enum cc_op cond) +{ + gen_op_update2_cc_i32(s, cond, v1, v2); +} + +static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, + enum cc_op cond) +{ + gen_op_update2_cc_i64(s, cond, v1, v2); +} + +static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2) +{ + cmp_32(s, v1, v2, CC_OP_LTGT_32); +} + +static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2) +{ + cmp_32(s, v1, v2, CC_OP_LTUGTU_32); +} + +static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2) +{ + /* XXX optimize for the constant? put it in s? */ + TCGv_i32 tmp = tcg_const_i32(v2); + cmp_32(s, v1, tmp, CC_OP_LTGT_32); + tcg_temp_free_i32(tmp); +} + +static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2) +{ + TCGv_i32 tmp = tcg_const_i32(v2); + cmp_32(s, v1, tmp, CC_OP_LTUGTU_32); + tcg_temp_free_i32(tmp); +} + +static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2) +{ + cmp_64(s, v1, v2, CC_OP_LTGT_64); +} + +static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2) +{ + cmp_64(s, v1, v2, CC_OP_LTUGTU_64); +} + +static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2) +{ + TCGv_i64 tmp = tcg_const_i64(v2); + cmp_s64(s, v1, tmp); + tcg_temp_free_i64(tmp); +} + +static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2) +{ + TCGv_i64 tmp = tcg_const_i64(v2); + cmp_u64(s, v1, tmp); + tcg_temp_free_i64(tmp); +} + +static inline void set_cc_s32(DisasContext *s, TCGv_i32 val) +{ + gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val); +} + +static inline void set_cc_s64(DisasContext *s, TCGv_i64 val) +{ + gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val); +} + +static void set_cc_add64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr) +{ + gen_op_update3_cc_i64(s, CC_OP_ADD_64, v1, v2, vr); +} + +static void set_cc_addu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, + TCGv_i64 vr) +{ + gen_op_update3_cc_i64(s, CC_OP_ADDU_64, v1, v2, vr); +} + +static void set_cc_sub64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr) +{ + gen_op_update3_cc_i64(s, CC_OP_SUB_64, v1, v2, vr); +} + +static void set_cc_subu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, + TCGv_i64 vr) +{ + gen_op_update3_cc_i64(s, CC_OP_SUBU_64, v1, v2, vr); +} + +static void set_cc_abs64(DisasContext *s, TCGv_i64 v1) +{ + gen_op_update1_cc_i64(s, CC_OP_ABS_64, v1); +} + +static void set_cc_nabs64(DisasContext *s, TCGv_i64 v1) +{ + gen_op_update1_cc_i64(s, CC_OP_NABS_64, v1); +} + +static void set_cc_add32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr) +{ + gen_op_update3_cc_i32(s, CC_OP_ADD_32, v1, v2, vr); +} + +static void set_cc_addu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, + TCGv_i32 vr) +{ + gen_op_update3_cc_i32(s, CC_OP_ADDU_32, v1, v2, vr); +} + +static void set_cc_sub32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr) +{ + gen_op_update3_cc_i32(s, CC_OP_SUB_32, v1, v2, vr); +} + +static void set_cc_subu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, + TCGv_i32 vr) +{ + gen_op_update3_cc_i32(s, CC_OP_SUBU_32, v1, v2, vr); +} + +static void set_cc_abs32(DisasContext *s, TCGv_i32 v1) +{ + gen_op_update1_cc_i32(s, CC_OP_ABS_32, v1); +} + +static void set_cc_nabs32(DisasContext *s, TCGv_i32 v1) +{ + gen_op_update1_cc_i32(s, CC_OP_NABS_32, v1); +} + +static void set_cc_comp32(DisasContext *s, TCGv_i32 v1) +{ + gen_op_update1_cc_i32(s, CC_OP_COMP_32, v1); +} + +static void set_cc_comp64(DisasContext *s, TCGv_i64 v1) +{ + gen_op_update1_cc_i64(s, CC_OP_COMP_64, v1); +} + +static void set_cc_icm(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2) +{ + gen_op_update2_cc_i32(s, CC_OP_ICM, v1, v2); +} + +static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2) +{ + tcg_gen_extu_i32_i64(cc_src, v1); + tcg_gen_mov_i64(cc_dst, v2); + tcg_gen_discard_i64(cc_vr); + s->cc_op = CC_OP_LTGT_F32; +} + +static void set_cc_nz_f32(DisasContext *s, TCGv_i32 v1) +{ + gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1); +} + +static inline void set_cc_nz_f64(DisasContext *s, TCGv_i64 v1) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ_F64, v1); +} + +/* CC value is in env->cc_op */ +static inline void set_cc_static(DisasContext *s) +{ + tcg_gen_discard_i64(cc_src); + tcg_gen_discard_i64(cc_dst); + tcg_gen_discard_i64(cc_vr); + s->cc_op = CC_OP_STATIC; +} + +static inline void gen_op_set_cc_op(DisasContext *s) +{ + if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { + tcg_gen_movi_i32(cc_op, s->cc_op); + } +} + +static inline void gen_update_cc_op(DisasContext *s) +{ + gen_op_set_cc_op(s); +} + +/* calculates cc into cc_op */ +static void gen_op_calc_cc(DisasContext *s) +{ + TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op); + TCGv_i64 dummy = tcg_const_i64(0); + + switch (s->cc_op) { + case CC_OP_CONST0: + case CC_OP_CONST1: + case CC_OP_CONST2: + case CC_OP_CONST3: + /* s->cc_op is the cc value */ + tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); + break; + case CC_OP_STATIC: + /* env->cc_op already is the cc value */ + break; + case CC_OP_NZ: + case CC_OP_ABS_64: + case CC_OP_NABS_64: + case CC_OP_ABS_32: + case CC_OP_NABS_32: + case CC_OP_LTGT0_32: + case CC_OP_LTGT0_64: + case CC_OP_COMP_32: + case CC_OP_COMP_64: + case CC_OP_NZ_F32: + case CC_OP_NZ_F64: + /* 1 argument */ + gen_helper_calc_cc(cc_op, local_cc_op, dummy, cc_dst, dummy); + break; + case CC_OP_ICM: + case CC_OP_LTGT_32: + case CC_OP_LTGT_64: + case CC_OP_LTUGTU_32: + case CC_OP_LTUGTU_64: + case CC_OP_TM_32: + case CC_OP_TM_64: + case CC_OP_LTGT_F32: + case CC_OP_LTGT_F64: + case CC_OP_SLAG: + /* 2 arguments */ + gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, dummy); + break; + case CC_OP_ADD_64: + case CC_OP_ADDU_64: + case CC_OP_SUB_64: + case CC_OP_SUBU_64: + case CC_OP_ADD_32: + case CC_OP_ADDU_32: + case CC_OP_SUB_32: + case CC_OP_SUBU_32: + /* 3 arguments */ + gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, cc_vr); + break; + case CC_OP_DYNAMIC: + /* unknown operation - assume 3 arguments and cc_op in env */ + gen_helper_calc_cc(cc_op, cc_op, cc_src, cc_dst, cc_vr); + break; + default: + tcg_abort(); + } + + tcg_temp_free_i32(local_cc_op); + + /* We now have cc in cc_op as constant */ + set_cc_static(s); +} + +static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2) +{ + debug_insn(insn); + + *r1 = (insn >> 4) & 0xf; + *r2 = insn & 0xf; +} + +static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1, + int *x2, int *b2, int *d2) +{ + debug_insn(insn); + + *r1 = (insn >> 20) & 0xf; + *x2 = (insn >> 16) & 0xf; + *b2 = (insn >> 12) & 0xf; + *d2 = insn & 0xfff; + + return get_address(s, *x2, *b2, *d2); +} + +static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3, + int *b2, int *d2) +{ + debug_insn(insn); + + *r1 = (insn >> 20) & 0xf; + /* aka m3 */ + *r3 = (insn >> 16) & 0xf; + *b2 = (insn >> 12) & 0xf; + *d2 = insn & 0xfff; +} + +static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2, + int *b1, int *d1) +{ + debug_insn(insn); + + *i2 = (insn >> 16) & 0xff; + *b1 = (insn >> 12) & 0xf; + *d1 = insn & 0xfff; + + return get_address(s, 0, *b1, *d1); +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc) +{ + TranslationBlock *tb; + + gen_update_cc_op(s); + + tb = s->tb; + /* NOTE: we handle the case where the TB spans two pages here */ + if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || + (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { + /* jump to same page: we can use a direct jump */ + tcg_gen_goto_tb(tb_num); + tcg_gen_movi_i64(psw_addr, pc); + tcg_gen_exit_tb((long)tb + tb_num); + } else { + /* jump to another page: currently not optimized */ + tcg_gen_movi_i64(psw_addr, pc); + tcg_gen_exit_tb(0); + } +} + +static inline void account_noninline_branch(DisasContext *s, int cc_op) +{ +#ifdef DEBUG_INLINE_BRANCHES + inline_branch_miss[cc_op]++; +#endif +} + +static inline void account_inline_branch(DisasContext *s) +{ +#ifdef DEBUG_INLINE_BRANCHES + inline_branch_hit[s->cc_op]++; +#endif +} + +static void gen_jcc(DisasContext *s, uint32_t mask, int skip) +{ + TCGv_i32 tmp, tmp2, r; + TCGv_i64 tmp64; + int old_cc_op; + + switch (s->cc_op) { + case CC_OP_LTGT0_32: + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, cc_dst); + switch (mask) { + case 0x8 | 0x4: /* dst <= 0 */ + tcg_gen_brcondi_i32(TCG_COND_GT, tmp, 0, skip); + break; + case 0x8 | 0x2: /* dst >= 0 */ + tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, skip); + break; + case 0x8: /* dst == 0 */ + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip); + break; + case 0x7: /* dst != 0 */ + case 0x6: /* dst != 0 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip); + break; + case 0x4: /* dst < 0 */ + tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, skip); + break; + case 0x2: /* dst > 0 */ + tcg_gen_brcondi_i32(TCG_COND_LE, tmp, 0, skip); + break; + default: + tcg_temp_free_i32(tmp); + goto do_dynamic; + } + account_inline_branch(s); + tcg_temp_free_i32(tmp); + break; + case CC_OP_LTGT0_64: + switch (mask) { + case 0x8 | 0x4: /* dst <= 0 */ + tcg_gen_brcondi_i64(TCG_COND_GT, cc_dst, 0, skip); + break; + case 0x8 | 0x2: /* dst >= 0 */ + tcg_gen_brcondi_i64(TCG_COND_LT, cc_dst, 0, skip); + break; + case 0x8: /* dst == 0 */ + tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip); + break; + case 0x7: /* dst != 0 */ + case 0x6: /* dst != 0 */ + tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip); + break; + case 0x4: /* dst < 0 */ + tcg_gen_brcondi_i64(TCG_COND_GE, cc_dst, 0, skip); + break; + case 0x2: /* dst > 0 */ + tcg_gen_brcondi_i64(TCG_COND_LE, cc_dst, 0, skip); + break; + default: + goto do_dynamic; + } + account_inline_branch(s); + break; + case CC_OP_LTGT_32: + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, cc_src); + tcg_gen_trunc_i64_i32(tmp2, cc_dst); + switch (mask) { + case 0x8 | 0x4: /* src <= dst */ + tcg_gen_brcond_i32(TCG_COND_GT, tmp, tmp2, skip); + break; + case 0x8 | 0x2: /* src >= dst */ + tcg_gen_brcond_i32(TCG_COND_LT, tmp, tmp2, skip); + break; + case 0x8: /* src == dst */ + tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip); + break; + case 0x7: /* src != dst */ + case 0x6: /* src != dst */ + tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip); + break; + case 0x4: /* src < dst */ + tcg_gen_brcond_i32(TCG_COND_GE, tmp, tmp2, skip); + break; + case 0x2: /* src > dst */ + tcg_gen_brcond_i32(TCG_COND_LE, tmp, tmp2, skip); + break; + default: + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + goto do_dynamic; + } + account_inline_branch(s); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + break; + case CC_OP_LTGT_64: + switch (mask) { + case 0x8 | 0x4: /* src <= dst */ + tcg_gen_brcond_i64(TCG_COND_GT, cc_src, cc_dst, skip); + break; + case 0x8 | 0x2: /* src >= dst */ + tcg_gen_brcond_i64(TCG_COND_LT, cc_src, cc_dst, skip); + break; + case 0x8: /* src == dst */ + tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip); + break; + case 0x7: /* src != dst */ + case 0x6: /* src != dst */ + tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip); + break; + case 0x4: /* src < dst */ + tcg_gen_brcond_i64(TCG_COND_GE, cc_src, cc_dst, skip); + break; + case 0x2: /* src > dst */ + tcg_gen_brcond_i64(TCG_COND_LE, cc_src, cc_dst, skip); + break; + default: + goto do_dynamic; + } + account_inline_branch(s); + break; + case CC_OP_LTUGTU_32: + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, cc_src); + tcg_gen_trunc_i64_i32(tmp2, cc_dst); + switch (mask) { + case 0x8 | 0x4: /* src <= dst */ + tcg_gen_brcond_i32(TCG_COND_GTU, tmp, tmp2, skip); + break; + case 0x8 | 0x2: /* src >= dst */ + tcg_gen_brcond_i32(TCG_COND_LTU, tmp, tmp2, skip); + break; + case 0x8: /* src == dst */ + tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip); + break; + case 0x7: /* src != dst */ + case 0x6: /* src != dst */ + tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip); + break; + case 0x4: /* src < dst */ + tcg_gen_brcond_i32(TCG_COND_GEU, tmp, tmp2, skip); + break; + case 0x2: /* src > dst */ + tcg_gen_brcond_i32(TCG_COND_LEU, tmp, tmp2, skip); + break; + default: + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + goto do_dynamic; + } + account_inline_branch(s); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + break; + case CC_OP_LTUGTU_64: + switch (mask) { + case 0x8 | 0x4: /* src <= dst */ + tcg_gen_brcond_i64(TCG_COND_GTU, cc_src, cc_dst, skip); + break; + case 0x8 | 0x2: /* src >= dst */ + tcg_gen_brcond_i64(TCG_COND_LTU, cc_src, cc_dst, skip); + break; + case 0x8: /* src == dst */ + tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip); + break; + case 0x7: /* src != dst */ + case 0x6: /* src != dst */ + tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip); + break; + case 0x4: /* src < dst */ + tcg_gen_brcond_i64(TCG_COND_GEU, cc_src, cc_dst, skip); + break; + case 0x2: /* src > dst */ + tcg_gen_brcond_i64(TCG_COND_LEU, cc_src, cc_dst, skip); + break; + default: + goto do_dynamic; + } + account_inline_branch(s); + break; + case CC_OP_NZ: + switch (mask) { + /* dst == 0 || dst != 0 */ + case 0x8 | 0x4: + case 0x8 | 0x4 | 0x2: + case 0x8 | 0x4 | 0x2 | 0x1: + case 0x8 | 0x4 | 0x1: + break; + /* dst == 0 */ + case 0x8: + case 0x8 | 0x2: + case 0x8 | 0x2 | 0x1: + case 0x8 | 0x1: + tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip); + break; + /* dst != 0 */ + case 0x4: + case 0x4 | 0x2: + case 0x4 | 0x2 | 0x1: + case 0x4 | 0x1: + tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip); + break; + default: + goto do_dynamic; + } + account_inline_branch(s); + break; + case CC_OP_TM_32: + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + + tcg_gen_trunc_i64_i32(tmp, cc_src); + tcg_gen_trunc_i64_i32(tmp2, cc_dst); + tcg_gen_and_i32(tmp, tmp, tmp2); + switch (mask) { + case 0x8: /* val & mask == 0 */ + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip); + break; + case 0x4 | 0x2 | 0x1: /* val & mask != 0 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip); + break; + default: + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + goto do_dynamic; + } + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + account_inline_branch(s); + break; + case CC_OP_TM_64: + tmp64 = tcg_temp_new_i64(); + + tcg_gen_and_i64(tmp64, cc_src, cc_dst); + switch (mask) { + case 0x8: /* val & mask == 0 */ + tcg_gen_brcondi_i64(TCG_COND_NE, tmp64, 0, skip); + break; + case 0x4 | 0x2 | 0x1: /* val & mask != 0 */ + tcg_gen_brcondi_i64(TCG_COND_EQ, tmp64, 0, skip); + break; + default: + tcg_temp_free_i64(tmp64); + goto do_dynamic; + } + tcg_temp_free_i64(tmp64); + account_inline_branch(s); + break; + case CC_OP_ICM: + switch (mask) { + case 0x8: /* val == 0 */ + tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip); + break; + case 0x4 | 0x2 | 0x1: /* val != 0 */ + case 0x4 | 0x2: /* val != 0 */ + tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip); + break; + default: + goto do_dynamic; + } + account_inline_branch(s); + break; + case CC_OP_STATIC: + old_cc_op = s->cc_op; + goto do_dynamic_nocccalc; + case CC_OP_DYNAMIC: + default: +do_dynamic: + old_cc_op = s->cc_op; + /* calculate cc value */ + gen_op_calc_cc(s); + +do_dynamic_nocccalc: + /* jump based on cc */ + account_noninline_branch(s, old_cc_op); + + switch (mask) { + case 0x8 | 0x4 | 0x2 | 0x1: + /* always true */ + break; + case 0x8 | 0x4 | 0x2: /* cc != 3 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 3, skip); + break; + case 0x8 | 0x4 | 0x1: /* cc != 2 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 2, skip); + break; + case 0x8 | 0x2 | 0x1: /* cc != 1 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 1, skip); + break; + case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 */ + tmp = tcg_temp_new_i32(); + tcg_gen_andi_i32(tmp, cc_op, 1); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip); + tcg_temp_free_i32(tmp); + break; + case 0x8 | 0x4: /* cc < 2 */ + tcg_gen_brcondi_i32(TCG_COND_GEU, cc_op, 2, skip); + break; + case 0x8: /* cc == 0 */ + tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 0, skip); + break; + case 0x4 | 0x2 | 0x1: /* cc != 0 */ + tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 0, skip); + break; + case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 */ + tmp = tcg_temp_new_i32(); + tcg_gen_andi_i32(tmp, cc_op, 1); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip); + tcg_temp_free_i32(tmp); + break; + case 0x4: /* cc == 1 */ + tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 1, skip); + break; + case 0x2 | 0x1: /* cc > 1 */ + tcg_gen_brcondi_i32(TCG_COND_LEU, cc_op, 1, skip); + break; + case 0x2: /* cc == 2 */ + tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 2, skip); + break; + case 0x1: /* cc == 3 */ + tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 3, skip); + break; + default: /* cc is masked by something else */ + tmp = tcg_const_i32(3); + /* 3 - cc */ + tcg_gen_sub_i32(tmp, tmp, cc_op); + tmp2 = tcg_const_i32(1); + /* 1 << (3 - cc) */ + tcg_gen_shl_i32(tmp2, tmp2, tmp); + r = tcg_const_i32(mask); + /* mask & (1 << (3 - cc)) */ + tcg_gen_and_i32(r, r, tmp2); + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + + tcg_gen_brcondi_i32(TCG_COND_EQ, r, 0, skip); + tcg_temp_free_i32(r); + break; + } + break; + } +} + +static void gen_bcr(DisasContext *s, uint32_t mask, TCGv_i64 target, + uint64_t offset) +{ + int skip; + + if (mask == 0xf) { + /* unconditional */ + tcg_gen_mov_i64(psw_addr, target); + tcg_gen_exit_tb(0); + } else if (mask == 0) { + /* ignore cc and never match */ + gen_goto_tb(s, 0, offset + 2); + } else { + TCGv_i64 new_addr = tcg_temp_local_new_i64(); + + tcg_gen_mov_i64(new_addr, target); + skip = gen_new_label(); + gen_jcc(s, mask, skip); + tcg_gen_mov_i64(psw_addr, new_addr); + tcg_temp_free_i64(new_addr); + tcg_gen_exit_tb(0); + gen_set_label(skip); + tcg_temp_free_i64(new_addr); + gen_goto_tb(s, 1, offset + 2); + } +} + +static void gen_brc(uint32_t mask, DisasContext *s, int32_t offset) +{ + int skip; + + if (mask == 0xf) { + /* unconditional */ + gen_goto_tb(s, 0, s->pc + offset); + } else if (mask == 0) { + /* ignore cc and never match */ + gen_goto_tb(s, 0, s->pc + 4); + } else { + skip = gen_new_label(); + gen_jcc(s, mask, skip); + gen_goto_tb(s, 0, s->pc + offset); + gen_set_label(skip); + gen_goto_tb(s, 1, s->pc + 4); + } + s->is_jmp = DISAS_TB_JUMP; +} + +static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2) +{ + TCGv_i64 tmp, tmp2; + int i; + int l_memset = gen_new_label(); + int l_out = gen_new_label(); + TCGv_i64 dest = tcg_temp_local_new_i64(); + TCGv_i64 src = tcg_temp_local_new_i64(); + TCGv_i32 vl; + + /* Find out if we should use the inline version of mvc */ + switch (l) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 11: + case 15: + /* use inline */ + break; + default: + /* Fall back to helper */ + vl = tcg_const_i32(l); + potential_page_fault(s); + gen_helper_mvc(vl, s1, s2); + tcg_temp_free_i32(vl); + return; + } + + tcg_gen_mov_i64(dest, s1); + tcg_gen_mov_i64(src, s2); + + if (!(s->tb->flags & FLAG_MASK_64)) { + /* XXX what if we overflow while moving? */ + tcg_gen_andi_i64(dest, dest, 0x7fffffffUL); + tcg_gen_andi_i64(src, src, 0x7fffffffUL); + } + + tmp = tcg_temp_new_i64(); + tcg_gen_addi_i64(tmp, src, 1); + tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset); + tcg_temp_free_i64(tmp); + + switch (l) { + case 0: + tmp = tcg_temp_new_i64(); + + tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st8(tmp, dest, get_mem_index(s)); + + tcg_temp_free_i64(tmp); + break; + case 1: + tmp = tcg_temp_new_i64(); + + tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st16(tmp, dest, get_mem_index(s)); + + tcg_temp_free_i64(tmp); + break; + case 3: + tmp = tcg_temp_new_i64(); + + tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st32(tmp, dest, get_mem_index(s)); + + tcg_temp_free_i64(tmp); + break; + case 4: + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s)); + tcg_gen_addi_i64(src, src, 4); + tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s)); + tcg_gen_qemu_st32(tmp, dest, get_mem_index(s)); + tcg_gen_addi_i64(dest, dest, 4); + tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s)); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 7: + tmp = tcg_temp_new_i64(); + + tcg_gen_qemu_ld64(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st64(tmp, dest, get_mem_index(s)); + + tcg_temp_free_i64(tmp); + break; + default: + /* The inline version can become too big for too uneven numbers, only + use it on known good lengths */ + tmp = tcg_temp_new_i64(); + tmp2 = tcg_const_i64(8); + for (i = 0; (i + 7) <= l; i += 8) { + tcg_gen_qemu_ld64(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st64(tmp, dest, get_mem_index(s)); + + tcg_gen_add_i64(src, src, tmp2); + tcg_gen_add_i64(dest, dest, tmp2); + } + + tcg_temp_free_i64(tmp2); + tmp2 = tcg_const_i64(1); + + for (; i <= l; i++) { + tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s)); + tcg_gen_qemu_st8(tmp, dest, get_mem_index(s)); + + tcg_gen_add_i64(src, src, tmp2); + tcg_gen_add_i64(dest, dest, tmp2); + } + + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp); + break; + } + + tcg_gen_br(l_out); + + gen_set_label(l_memset); + /* memset case (dest == (src + 1)) */ + + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + /* fill tmp with the byte */ + tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s)); + tcg_gen_shli_i64(tmp2, tmp, 8); + tcg_gen_or_i64(tmp, tmp, tmp2); + tcg_gen_shli_i64(tmp2, tmp, 16); + tcg_gen_or_i64(tmp, tmp, tmp2); + tcg_gen_shli_i64(tmp2, tmp, 32); + tcg_gen_or_i64(tmp, tmp, tmp2); + tcg_temp_free_i64(tmp2); + + tmp2 = tcg_const_i64(8); + + for (i = 0; (i + 7) <= l; i += 8) { + tcg_gen_qemu_st64(tmp, dest, get_mem_index(s)); + tcg_gen_addi_i64(dest, dest, 8); + } + + tcg_temp_free_i64(tmp2); + tmp2 = tcg_const_i64(1); + + for (; i <= l; i++) { + tcg_gen_qemu_st8(tmp, dest, get_mem_index(s)); + tcg_gen_addi_i64(dest, dest, 1); + } + + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp); + + gen_set_label(l_out); + + tcg_temp_free(dest); + tcg_temp_free(src); +} + +static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2) +{ + TCGv_i64 tmp; + TCGv_i64 tmp2; + TCGv_i32 vl; + + /* check for simple 32bit or 64bit match */ + switch (l) { + case 0: + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s)); + tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s)); + cmp_u64(s, tmp, tmp2); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + return; + case 1: + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s)); + tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s)); + cmp_u64(s, tmp, tmp2); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + return; + case 3: + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s)); + tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s)); + cmp_u64(s, tmp, tmp2); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + return; + case 7: + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s)); + tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s)); + cmp_u64(s, tmp, tmp2); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + return; + } + + potential_page_fault(s); + vl = tcg_const_i32(l); + gen_helper_clc(cc_op, vl, s1, s2); + tcg_temp_free_i32(vl); + set_cc_static(s); +} + +static void disas_e3(DisasContext* s, int op, int r1, int x2, int b2, int d2) +{ + TCGv_i64 addr, tmp, tmp2, tmp3, tmp4; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + + LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n", + op, r1, x2, b2, d2); + addr = get_address(s, x2, b2, d2); + switch (op) { + case 0x2: /* LTG R1,D2(X2,B2) [RXY] */ + case 0x4: /* lg r1,d2(x2,b2) */ + tcg_gen_qemu_ld64(regs[r1], addr, get_mem_index(s)); + if (op == 0x2) { + set_cc_s64(s, regs[r1]); + } + break; + case 0x12: /* LT R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + store_reg32(r1, tmp32_1); + set_cc_s32(s, tmp32_1); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0xc: /* MSG R1,D2(X2,B2) [RXY] */ + case 0x1c: /* MSGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0xc) { + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + } else { + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + } + tcg_gen_mul_i64(regs[r1], regs[r1], tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0xd: /* DSG R1,D2(X2,B2) [RXY] */ + case 0x1d: /* DSGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0x1d) { + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + } else { + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + } + tmp4 = load_reg(r1 + 1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_div_i64(tmp3, tmp4, tmp2); + store_reg(r1 + 1, tmp3); + tcg_gen_rem_i64(tmp3, tmp4, tmp2); + store_reg(r1, tmp3); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i64(tmp4); + break; + case 0x8: /* AG R1,D2(X2,B2) [RXY] */ + case 0xa: /* ALG R1,D2(X2,B2) [RXY] */ + case 0x18: /* AGF R1,D2(X2,B2) [RXY] */ + case 0x1a: /* ALGF R1,D2(X2,B2) [RXY] */ + if (op == 0x1a) { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + } else if (op == 0x18) { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + } else { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + } + tmp4 = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp4, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x8: + case 0x18: + set_cc_add64(s, tmp4, tmp2, tmp3); + break; + case 0xa: + case 0x1a: + set_cc_addu64(s, tmp4, tmp2, tmp3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i64(tmp4); + break; + case 0x9: /* SG R1,D2(X2,B2) [RXY] */ + case 0xb: /* SLG R1,D2(X2,B2) [RXY] */ + case 0x19: /* SGF R1,D2(X2,B2) [RXY] */ + case 0x1b: /* SLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0x19) { + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + } else if (op == 0x1b) { + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + } else { + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + } + tmp4 = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_sub_i64(tmp3, tmp4, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x9: + case 0x19: + set_cc_sub64(s, tmp4, tmp2, tmp3); + break; + case 0xb: + case 0x1b: + set_cc_subu64(s, tmp4, tmp2, tmp3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i64(tmp4); + break; + case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + tcg_gen_bswap64_i64(tmp2, tmp2); + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x14: /* LGF R1,D2(X2,B2) [RXY] */ + case 0x16: /* LLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + if (op == 0x14) { + tcg_gen_ext32s_i64(tmp2, tmp2); + } + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x15: /* LGH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s)); + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL); + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_gen_bswap32_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_gen_bswap16_i32(tmp32_1, tmp32_1); + store_reg16(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x20: /* CG R1,D2(X2,B2) [RXY] */ + case 0x21: /* CLG R1,D2(X2,B2) */ + case 0x30: /* CGF R1,D2(X2,B2) [RXY] */ + case 0x31: /* CLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + switch (op) { + case 0x20: + case 0x21: + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + break; + case 0x30: + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + break; + case 0x31: + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + break; + default: + tcg_abort(); + } + switch (op) { + case 0x20: + case 0x30: + cmp_s64(s, regs[r1], tmp2); + break; + case 0x21: + case 0x31: + cmp_u64(s, regs[r1], tmp2); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp2); + break; + case 0x24: /* stg r1, d2(x2,b2) */ + tcg_gen_qemu_st64(regs[r1], addr, get_mem_index(s)); + break; + case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */ + tmp32_1 = load_reg32(r1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_bswap32_i32(tmp32_1, tmp32_1); + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s)); + tcg_temp_free_i64(tmp2); + break; + case 0x50: /* STY R1,D2(X2,B2) [RXY] */ + tmp32_1 = load_reg32(r1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s)); + tcg_temp_free_i64(tmp2); + break; + case 0x57: /* XY R1,D2(X2,B2) [RXY] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + tcg_temp_free_i64(tmp2); + tcg_gen_xor_i32(tmp32_2, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_2); + set_cc_nz_u32(s, tmp32_2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x58: /* LY R1,D2(X2,B2) [RXY] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, addr, get_mem_index(s)); + store_reg32_i64(r1, tmp3); + tcg_temp_free_i64(tmp3); + break; + case 0x5a: /* AY R1,D2(X2,B2) [RXY] */ + case 0x5b: /* SY R1,D2(X2,B2) [RXY] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + tcg_temp_free_i64(tmp2); + switch (op) { + case 0x5a: + tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2); + break; + case 0x5b: + tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_3); + switch (op) { + case 0x5a: + set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x5b: + set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x71: /* LAY R1,D2(X2,B2) [RXY] */ + store_reg(r1, addr); + break; + case 0x72: /* STCY R1,D2(X2,B2) [RXY] */ + tmp32_1 = load_reg32(r1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp2, tmp32_1); + tcg_gen_qemu_st8(tmp2, addr, get_mem_index(s)); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp2); + break; + case 0x73: /* ICY R1,D2(X2,B2) [RXY] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp3, addr, get_mem_index(s)); + store_reg8(r1, tmp3); + tcg_temp_free_i64(tmp3); + break; + case 0x76: /* LB R1,D2(X2,B2) [RXY] */ + case 0x77: /* LGB R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8s(tmp2, addr, get_mem_index(s)); + switch (op) { + case 0x76: + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg32_i64(r1, tmp2); + break; + case 0x77: + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg(r1, tmp2); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp2); + break; + case 0x78: /* LHY R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s)); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x80: /* NG R1,D2(X2,B2) [RXY] */ + case 0x81: /* OG R1,D2(X2,B2) [RXY] */ + case 0x82: /* XG R1,D2(X2,B2) [RXY] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp3, addr, get_mem_index(s)); + switch (op) { + case 0x80: + tcg_gen_and_i64(regs[r1], regs[r1], tmp3); + break; + case 0x81: + tcg_gen_or_i64(regs[r1], regs[r1], tmp3); + break; + case 0x82: + tcg_gen_xor_i64(regs[r1], regs[r1], tmp3); + break; + default: + tcg_abort(); + } + set_cc_nz_u64(s, regs[r1]); + tcg_temp_free_i64(tmp3); + break; + case 0x86: /* MLG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_const_i32(r1); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + gen_helper_mlg(tmp32_1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x87: /* DLG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_const_i32(r1); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + gen_helper_dlg(tmp32_1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x88: /* ALCG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + /* XXX possible optimization point */ + gen_op_calc_cc(s); + tcg_gen_extu_i32_i64(tmp3, cc_op); + tcg_gen_shri_i64(tmp3, tmp3, 1); + tcg_gen_andi_i64(tmp3, tmp3, 1); + tcg_gen_add_i64(tmp3, tmp2, tmp3); + tcg_gen_add_i64(tmp3, regs[r1], tmp3); + store_reg(r1, tmp3); + set_cc_addu64(s, regs[r1], tmp2, tmp3); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x89: /* SLBG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_const_i32(r1); + tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s)); + /* XXX possible optimization point */ + gen_op_calc_cc(s); + gen_helper_slbg(cc_op, cc_op, tmp32_1, regs[r1], tmp2); + set_cc_static(s); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x90: /* LLGC R1,D2(X2,B2) [RXY] */ + tcg_gen_qemu_ld8u(regs[r1], addr, get_mem_index(s)); + break; + case 0x91: /* LLGH R1,D2(X2,B2) [RXY] */ + tcg_gen_qemu_ld16u(regs[r1], addr, get_mem_index(s)); + break; + case 0x94: /* LLC R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, addr, get_mem_index(s)); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x95: /* LLH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s)); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x96: /* ML R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32u_i64(tmp3, tmp3); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_mul_i64(tmp2, tmp2, tmp3); + store_reg32_i64((r1 + 1) & 15, tmp2); + tcg_gen_shri_i64(tmp2, tmp2, 32); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x97: /* DL R1,D2(X2,B2) [RXY] */ + /* reg(r1) = reg(r1, r1+1) % ld32(addr) */ + /* reg(r1+1) = reg(r1, r1+1) / ld32(addr) */ + tmp = load_reg(r1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32u_i64(tmp2, tmp2); + tcg_gen_ext32u_i64(tmp3, tmp3); + tcg_gen_shli_i64(tmp, tmp, 32); + tcg_gen_or_i64(tmp, tmp, tmp3); + + tcg_gen_rem_i64(tmp3, tmp, tmp2); + tcg_gen_div_i64(tmp, tmp, tmp2); + store_reg32_i64((r1 + 1) & 15, tmp); + store_reg32_i64(r1, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x98: /* ALC R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + /* XXX possible optimization point */ + gen_op_calc_cc(s); + gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2); + set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3); + store_reg32(r1, tmp32_3); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x99: /* SLB R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + /* XXX possible optimization point */ + gen_op_calc_cc(s); + gen_helper_slb(cc_op, cc_op, tmp32_1, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + default: + LOG_DISAS("illegal e3 operation 0x%x\n", op); + gen_illegal_opcode(s, 3); + break; + } + tcg_temp_free_i64(addr); +} + +#ifndef CONFIG_USER_ONLY +static void disas_e5(DisasContext* s, uint64_t insn) +{ + TCGv_i64 tmp, tmp2; + int op = (insn >> 32) & 0xff; + + tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff); + tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff); + + LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn); + switch (op) { + case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */ + /* Test Protection */ + potential_page_fault(s); + gen_helper_tprot(cc_op, tmp, tmp2); + set_cc_static(s); + break; + default: + LOG_DISAS("illegal e5 operation 0x%x\n", op); + gen_illegal_opcode(s, 3); + break; + } + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); +} +#endif + +static void disas_eb(DisasContext *s, int op, int r1, int r3, int b2, int d2) +{ + TCGv_i64 tmp, tmp2, tmp3, tmp4; + TCGv_i32 tmp32_1, tmp32_2; + int i, stm_len; + int ilc = 3; + + LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n", + op, r1, r3, b2, d2); + switch (op) { + case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */ + case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */ + case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */ + case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */ + case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */ + if (b2) { + tmp = get_address(s, 0, b2, d2); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + } else { + tmp = tcg_const_i64(d2 & 0x3f); + } + switch (op) { + case 0xc: + tcg_gen_shr_i64(regs[r1], regs[r3], tmp); + break; + case 0xd: + tcg_gen_shl_i64(regs[r1], regs[r3], tmp); + break; + case 0xa: + tcg_gen_sar_i64(regs[r1], regs[r3], tmp); + break; + case 0xb: + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_temp_new_i64(); + gen_op_update2_cc_i64(s, CC_OP_SLAG, regs[r3], tmp); + tcg_gen_shl_i64(tmp2, regs[r3], tmp); + /* override sign bit with source sign */ + tcg_gen_andi_i64(tmp2, tmp2, ~0x8000000000000000ULL); + tcg_gen_andi_i64(tmp3, regs[r3], 0x8000000000000000ULL); + tcg_gen_or_i64(regs[r1], tmp2, tmp3); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x1c: + tcg_gen_rotl_i64(regs[r1], regs[r3], tmp); + break; + default: + tcg_abort(); + break; + } + if (op == 0xa) { + set_cc_s64(s, regs[r1]); + } + tcg_temp_free_i64(tmp); + break; + case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */ + if (b2) { + tmp = get_address(s, 0, b2, d2); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + } else { + tmp = tcg_const_i64(d2 & 0x3f); + } + tmp32_1 = tcg_temp_new_i32(); + tmp32_2 = load_reg32(r3); + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + switch (op) { + case 0x1d: + tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1); + break; + default: + tcg_abort(); + break; + } + store_reg32(r1, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x4: /* LMG R1,R3,D2(B2) [RSE] */ + case 0x24: /* STMG R1,R3,D2(B2) [RSE] */ + stm_len = 8; + goto do_mh; + case 0x26: /* STMH R1,R3,D2(B2) [RSE] */ + case 0x96: /* LMH R1,R3,D2(B2) [RSE] */ + stm_len = 4; +do_mh: + /* Apparently, unrolling lmg/stmg of any size gains performance - + even for very long ones... */ + tmp = get_address(s, 0, b2, d2); + tmp3 = tcg_const_i64(stm_len); + tmp4 = tcg_const_i64(op == 0x26 ? 32 : 4); + for (i = r1;; i = (i + 1) % 16) { + switch (op) { + case 0x4: + tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s)); + break; + case 0x96: + tmp2 = tcg_temp_new_i64(); +#if HOST_LONG_BITS == 32 + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(TCGV_HIGH(regs[i]), tmp2); +#else + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_shl_i64(tmp2, tmp2, tmp4); + tcg_gen_ext32u_i64(regs[i], regs[i]); + tcg_gen_or_i64(regs[i], regs[i], tmp2); +#endif + tcg_temp_free_i64(tmp2); + break; + case 0x24: + tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s)); + break; + case 0x26: + tmp2 = tcg_temp_new_i64(); + tcg_gen_shr_i64(tmp2, regs[i], tmp4); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp2); + break; + default: + tcg_abort(); + } + if (i == r3) { + break; + } + tcg_gen_add_i64(tmp, tmp, tmp3); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i64(tmp4); + break; + case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */ + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_stcmh(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; +#ifndef CONFIG_USER_ONLY + case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */ + /* Load Control */ + check_privileged(s, ilc); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_lctlg(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */ + /* Store Control */ + check_privileged(s, ilc); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_stctg(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; +#endif + case 0x30: /* CSG R1,R3,D2(B2) [RSY] */ + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + /* XXX rewrite in tcg */ + gen_helper_csg(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */ + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + /* XXX rewrite in tcg */ + gen_helper_cdsg(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x51: /* TMY D1(B1),I2 [SIY] */ + tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */ + tmp2 = tcg_const_i64((r1 << 4) | r3); + tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s)); + /* yes, this is a 32 bit operation with 64 bit tcg registers, because + that incurs less conversions */ + cmp_64(s, tmp, tmp2, CC_OP_TM_32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x52: /* MVIY D1(B1),I2 [SIY] */ + tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */ + tmp2 = tcg_const_i64((r1 << 4) | r3); + tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x55: /* CLIY D1(B1),I2 [SIY] */ + tmp3 = get_address(s, 0, b2, d2); /* SIY -> this is the 1st operand */ + tmp = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld8u(tmp, tmp3, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + cmp_u32c(s, tmp32_1, (r1 << 4) | r3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i32(tmp32_1); + break; + case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */ + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + /* XXX split CC calculation out */ + gen_helper_icmh(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + default: + LOG_DISAS("illegal eb operation 0x%x\n", op); + gen_illegal_opcode(s, ilc); + break; + } +} + +static void disas_ed(DisasContext *s, int op, int r1, int x2, int b2, int d2, + int r1b) +{ + TCGv_i32 tmp_r1, tmp32; + TCGv_i64 addr, tmp; + addr = get_address(s, x2, b2, d2); + tmp_r1 = tcg_const_i32(r1); + switch (op) { + case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_lxdb(tmp_r1, addr); + break; + case 0x9: /* CEB R1,D2(X2,B2) [RXE] */ + tmp = tcg_temp_new_i64(); + tmp32 = load_freg32(r1); + tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s)); + set_cc_cmp_f32_i64(s, tmp32, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32); + break; + case 0xa: /* AEB R1,D2(X2,B2) [RXE] */ + tmp = tcg_temp_new_i64(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_aeb(tmp_r1, tmp32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32); + + tmp32 = load_freg32(r1); + set_cc_nz_f32(s, tmp32); + tcg_temp_free_i32(tmp32); + break; + case 0xb: /* SEB R1,D2(X2,B2) [RXE] */ + tmp = tcg_temp_new_i64(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_seb(tmp_r1, tmp32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32); + + tmp32 = load_freg32(r1); + set_cc_nz_f32(s, tmp32); + tcg_temp_free_i32(tmp32); + break; + case 0xd: /* DEB R1,D2(X2,B2) [RXE] */ + tmp = tcg_temp_new_i64(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_deb(tmp_r1, tmp32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32); + break; + case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_tceb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_tcdb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_tcxb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */ + tmp = tcg_temp_new_i64(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_meeb(tmp_r1, tmp32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32); + break; + case 0x19: /* CDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_cdb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_adb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_sdb(cc_op, tmp_r1, addr); + set_cc_static(s); + break; + case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_mdb(tmp_r1, addr); + break; + case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */ + potential_page_fault(s); + gen_helper_ddb(tmp_r1, addr); + break; + case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */ + /* for RXF insns, r1 is R3 and r1b is R1 */ + tmp32 = tcg_const_i32(r1b); + potential_page_fault(s); + gen_helper_madb(tmp32, addr, tmp_r1); + tcg_temp_free_i32(tmp32); + break; + default: + LOG_DISAS("illegal ed operation 0x%x\n", op); + gen_illegal_opcode(s, 3); + return; + } + tcg_temp_free_i32(tmp_r1); + tcg_temp_free_i64(addr); +} + +static void disas_a5(DisasContext *s, int op, int r1, int i2) +{ + TCGv_i64 tmp, tmp2; + TCGv_i32 tmp32; + LOG_DISAS("disas_a5: op 0x%x r1 %d i2 0x%x\n", op, r1, i2); + switch (op) { + case 0x0: /* IIHH R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 48, 16); + tcg_temp_free_i64(tmp); + break; + case 0x1: /* IIHL R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 32, 16); + tcg_temp_free_i64(tmp); + break; + case 0x2: /* IILH R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 16, 16); + tcg_temp_free_i64(tmp); + break; + case 0x3: /* IILL R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 0, 16); + tcg_temp_free_i64(tmp); + break; + case 0x4: /* NIHH R1,I2 [RI] */ + case 0x8: /* OIHH R1,I2 [RI] */ + tmp = load_reg(r1); + tmp32 = tcg_temp_new_i32(); + switch (op) { + case 0x4: + tmp2 = tcg_const_i64((((uint64_t)i2) << 48) + | 0x0000ffffffffffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0x8: + tmp2 = tcg_const_i64(((uint64_t)i2) << 48); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp2, tmp, 48); + tcg_gen_trunc_i64_i32(tmp32, tmp2); + set_cc_nz_u32(s, tmp32); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32); + tcg_temp_free_i64(tmp); + break; + case 0x5: /* NIHL R1,I2 [RI] */ + case 0x9: /* OIHL R1,I2 [RI] */ + tmp = load_reg(r1); + tmp32 = tcg_temp_new_i32(); + switch (op) { + case 0x5: + tmp2 = tcg_const_i64((((uint64_t)i2) << 32) + | 0xffff0000ffffffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0x9: + tmp2 = tcg_const_i64(((uint64_t)i2) << 32); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp2, tmp, 32); + tcg_gen_trunc_i64_i32(tmp32, tmp2); + tcg_gen_andi_i32(tmp32, tmp32, 0xffff); + set_cc_nz_u32(s, tmp32); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32); + tcg_temp_free_i64(tmp); + break; + case 0x6: /* NILH R1,I2 [RI] */ + case 0xa: /* OILH R1,I2 [RI] */ + tmp = load_reg(r1); + tmp32 = tcg_temp_new_i32(); + switch (op) { + case 0x6: + tmp2 = tcg_const_i64((((uint64_t)i2) << 16) + | 0xffffffff0000ffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0xa: + tmp2 = tcg_const_i64(((uint64_t)i2) << 16); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp, tmp, 16); + tcg_gen_trunc_i64_i32(tmp32, tmp); + tcg_gen_andi_i32(tmp32, tmp32, 0xffff); + set_cc_nz_u32(s, tmp32); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32); + tcg_temp_free_i64(tmp); + break; + case 0x7: /* NILL R1,I2 [RI] */ + case 0xb: /* OILL R1,I2 [RI] */ + tmp = load_reg(r1); + tmp32 = tcg_temp_new_i32(); + switch (op) { + case 0x7: + tmp2 = tcg_const_i64(i2 | 0xffffffffffff0000ULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0xb: + tmp2 = tcg_const_i64(i2); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_trunc_i64_i32(tmp32, tmp); + tcg_gen_andi_i32(tmp32, tmp32, 0xffff); + set_cc_nz_u32(s, tmp32); /* signedness should not matter here */ + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32); + tcg_temp_free_i64(tmp); + break; + case 0xc: /* LLIHH R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 48 ); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xd: /* LLIHL R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 32 ); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xe: /* LLILH R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 16 ); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xf: /* LLILL R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + default: + LOG_DISAS("illegal a5 operation 0x%x\n", op); + gen_illegal_opcode(s, 2); + return; + } +} + +static void disas_a7(DisasContext *s, int op, int r1, int i2) +{ + TCGv_i64 tmp, tmp2; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + int l1; + + LOG_DISAS("disas_a7: op 0x%x r1 %d i2 0x%x\n", op, r1, i2); + switch (op) { + case 0x0: /* TMLH or TMH R1,I2 [RI] */ + case 0x1: /* TMLL or TML R1,I2 [RI] */ + case 0x2: /* TMHH R1,I2 [RI] */ + case 0x3: /* TMHL R1,I2 [RI] */ + tmp = load_reg(r1); + tmp2 = tcg_const_i64((uint16_t)i2); + switch (op) { + case 0x0: + tcg_gen_shri_i64(tmp, tmp, 16); + break; + case 0x1: + break; + case 0x2: + tcg_gen_shri_i64(tmp, tmp, 48); + break; + case 0x3: + tcg_gen_shri_i64(tmp, tmp, 32); + break; + } + tcg_gen_andi_i64(tmp, tmp, 0xffff); + cmp_64(s, tmp, tmp2, CC_OP_TM_64); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x4: /* brc m1, i2 */ + gen_brc(r1, s, i2 * 2LL); + return; + case 0x5: /* BRAS R1,I2 [RI] */ + tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 4)); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + gen_goto_tb(s, 0, s->pc + i2 * 2LL); + s->is_jmp = DISAS_TB_JUMP; + break; + case 0x6: /* BRCT R1,I2 [RI] */ + tmp32_1 = load_reg32(r1); + tcg_gen_subi_i32(tmp32_1, tmp32_1, 1); + store_reg32(r1, tmp32_1); + gen_update_cc_op(s); + l1 = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1); + gen_goto_tb(s, 0, s->pc + (i2 * 2LL)); + gen_set_label(l1); + gen_goto_tb(s, 1, s->pc + 4); + s->is_jmp = DISAS_TB_JUMP; + tcg_temp_free_i32(tmp32_1); + break; + case 0x7: /* BRCTG R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_subi_i64(tmp, tmp, 1); + store_reg(r1, tmp); + gen_update_cc_op(s); + l1 = gen_new_label(); + tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); + gen_goto_tb(s, 0, s->pc + (i2 * 2LL)); + gen_set_label(l1); + gen_goto_tb(s, 1, s->pc + 4); + s->is_jmp = DISAS_TB_JUMP; + tcg_temp_free_i64(tmp); + break; + case 0x8: /* lhi r1, i2 */ + tmp32_1 = tcg_const_i32(i2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x9: /* lghi r1, i2 */ + tmp = tcg_const_i64(i2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xa: /* AHI R1,I2 [RI] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_const_i32(i2); + + if (i2 < 0) { + tcg_gen_subi_i32(tmp32_2, tmp32_1, -i2); + } else { + tcg_gen_add_i32(tmp32_2, tmp32_1, tmp32_3); + } + + store_reg32(r1, tmp32_2); + set_cc_add32(s, tmp32_1, tmp32_3, tmp32_2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xb: /* aghi r1, i2 */ + tmp = load_reg(r1); + tmp2 = tcg_const_i64(i2); + + if (i2 < 0) { + tcg_gen_subi_i64(regs[r1], tmp, -i2); + } else { + tcg_gen_add_i64(regs[r1], tmp, tmp2); + } + set_cc_add64(s, tmp, tmp2, regs[r1]); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0xc: /* MHI R1,I2 [RI] */ + tmp32_1 = load_reg32(r1); + tcg_gen_muli_i32(tmp32_1, tmp32_1, i2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0xd: /* MGHI R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_muli_i64(tmp, tmp, i2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xe: /* CHI R1,I2 [RI] */ + tmp32_1 = load_reg32(r1); + cmp_s32c(s, tmp32_1, i2); + tcg_temp_free_i32(tmp32_1); + break; + case 0xf: /* CGHI R1,I2 [RI] */ + tmp = load_reg(r1); + cmp_s64c(s, tmp, i2); + tcg_temp_free_i64(tmp); + break; + default: + LOG_DISAS("illegal a7 operation 0x%x\n", op); + gen_illegal_opcode(s, 2); + return; + } +} + +static void disas_b2(DisasContext *s, int op, uint32_t insn) +{ + TCGv_i64 tmp, tmp2, tmp3; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + int r1, r2; + int ilc = 2; +#ifndef CONFIG_USER_ONLY + int r3, d2, b2; +#endif + + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + + LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2); + + switch (op) { + case 0x22: /* IPM R1 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + gen_op_calc_cc(s); + gen_helper_ipm(cc_op, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x41: /* CKSM R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + potential_page_fault(s); + gen_helper_cksm(tmp32_1, tmp32_2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + gen_op_movi_cc(s, 0); + break; + case 0x4e: /* SAR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r1])); + tcg_temp_free_i32(tmp32_1); + break; + case 0x4f: /* EAR R1,R2 [RRE] */ + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r2])); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x52: /* MSR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r2); + tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x54: /* MVPG R1,R2 [RRE] */ + tmp = load_reg(0); + tmp2 = load_reg(r1); + tmp3 = load_reg(r2); + potential_page_fault(s); + gen_helper_mvpg(tmp, tmp2, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + /* XXX check CCO bit and set CC accordingly */ + gen_op_movi_cc(s, 0); + break; + case 0x55: /* MVST R1,R2 [RRE] */ + tmp32_1 = load_reg32(0); + tmp32_2 = tcg_const_i32(r1); + tmp32_3 = tcg_const_i32(r2); + potential_page_fault(s); + gen_helper_mvst(tmp32_1, tmp32_2, tmp32_3); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + gen_op_movi_cc(s, 1); + break; + case 0x5d: /* CLST R1,R2 [RRE] */ + tmp32_1 = load_reg32(0); + tmp32_2 = tcg_const_i32(r1); + tmp32_3 = tcg_const_i32(r2); + potential_page_fault(s); + gen_helper_clst(cc_op, tmp32_1, tmp32_2, tmp32_3); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x5e: /* SRST R1,R2 [RRE] */ + tmp32_1 = load_reg32(0); + tmp32_2 = tcg_const_i32(r1); + tmp32_3 = tcg_const_i32(r2); + potential_page_fault(s); + gen_helper_srst(cc_op, tmp32_1, tmp32_2, tmp32_3); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + +#ifndef CONFIG_USER_ONLY + case 0x02: /* STIDP D2(B2) [S] */ + /* Store CPU ID */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_stidp(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x04: /* SCK D2(B2) [S] */ + /* Set Clock */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_sck(cc_op, tmp); + set_cc_static(s); + tcg_temp_free_i64(tmp); + break; + case 0x05: /* STCK D2(B2) [S] */ + /* Store Clock */ + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_stck(cc_op, tmp); + set_cc_static(s); + tcg_temp_free_i64(tmp); + break; + case 0x06: /* SCKC D2(B2) [S] */ + /* Set Clock Comparator */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_sckc(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x07: /* STCKC D2(B2) [S] */ + /* Store Clock Comparator */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_stckc(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x08: /* SPT D2(B2) [S] */ + /* Set CPU Timer */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_spt(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x09: /* STPT D2(B2) [S] */ + /* Store CPU Timer */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_stpt(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x0a: /* SPKA D2(B2) [S] */ + /* Set PSW Key from Address */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY); + tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4); + tcg_gen_or_i64(psw_mask, tmp2, tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp); + break; + case 0x0d: /* PTLB [S] */ + /* Purge TLB */ + check_privileged(s, ilc); + gen_helper_ptlb(); + break; + case 0x10: /* SPX D2(B2) [S] */ + /* Set Prefix Register */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_spx(tmp); + tcg_temp_free_i64(tmp); + break; + case 0x11: /* STPX D2(B2) [S] */ + /* Store Prefix */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUState, psa)); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x12: /* STAP D2(B2) [S] */ + /* Store CPU Address */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, cpu_num)); + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x21: /* IPTE R1,R2 [RRE] */ + /* Invalidate PTE */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp = load_reg(r1); + tmp2 = load_reg(r2); + gen_helper_ipte(tmp, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x29: /* ISKE R1,R2 [RRE] */ + /* Insert Storage Key Extended */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp = load_reg(r2); + tmp2 = tcg_temp_new_i64(); + gen_helper_iske(tmp2, tmp); + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x2a: /* RRBE R1,R2 [RRE] */ + /* Set Storage Key Extended */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp32_1 = load_reg32(r1); + tmp = load_reg(r2); + gen_helper_rrbe(cc_op, tmp32_1, tmp); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x2b: /* SSKE R1,R2 [RRE] */ + /* Set Storage Key Extended */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp32_1 = load_reg32(r1); + tmp = load_reg(r2); + gen_helper_sske(tmp32_1, tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x34: /* STCH ? */ + /* Store Subchannel */ + check_privileged(s, ilc); + gen_op_movi_cc(s, 3); + break; + case 0x46: /* STURA R1,R2 [RRE] */ + /* Store Using Real Address */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp32_1 = load_reg32(r1); + tmp = load_reg(r2); + potential_page_fault(s); + gen_helper_stura(tmp, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x50: /* CSP R1,R2 [RRE] */ + /* Compare And Swap And Purge */ + check_privileged(s, ilc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + gen_helper_csp(cc_op, tmp32_1, tmp32_2); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x5f: /* CHSC ? */ + /* Channel Subsystem Call */ + check_privileged(s, ilc); + gen_op_movi_cc(s, 3); + break; + case 0x78: /* STCKE D2(B2) [S] */ + /* Store Clock Extended */ + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_stcke(cc_op, tmp); + set_cc_static(s); + tcg_temp_free_i64(tmp); + break; + case 0x79: /* SACF D2(B2) [S] */ + /* Store Clock Extended */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + potential_page_fault(s); + gen_helper_sacf(tmp); + tcg_temp_free_i64(tmp); + /* addressing mode has changed, so end the block */ + s->pc += ilc * 2; + update_psw_addr(s); + s->is_jmp = DISAS_EXCP; + break; + case 0x7d: /* STSI D2,(B2) [S] */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = load_reg32(0); + tmp32_2 = load_reg32(1); + potential_page_fault(s); + gen_helper_stsi(cc_op, tmp, tmp32_1, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x9d: /* LFPC D2(B2) [S] */ + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0xb1: /* STFL D2(B2) [S] */ + /* Store Facility List (CPU features) at 200 */ + check_privileged(s, ilc); + tmp2 = tcg_const_i64(0xc0000000); + tmp = tcg_const_i64(200); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp); + break; + case 0xb2: /* LPSWE D2(B2) [S] */ + /* Load PSW Extended */ + check_privileged(s, ilc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s)); + tcg_gen_addi_i64(tmp, tmp, 8); + tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s)); + gen_helper_load_psw(tmp2, tmp3); + /* we need to keep cc_op intact */ + s->is_jmp = DISAS_JUMP; + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x20: /* SERVC R1,R2 [RRE] */ + /* SCLP Service call (PV hypercall) */ + check_privileged(s, ilc); + potential_page_fault(s); + tmp32_1 = load_reg32(r2); + tmp = load_reg(r1); + gen_helper_servc(cc_op, tmp32_1, tmp); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; +#endif + default: + LOG_DISAS("illegal b2 operation 0x%x\n", op); + gen_illegal_opcode(s, ilc); + break; + } +} + +static void disas_b3(DisasContext *s, int op, int m3, int r1, int r2) +{ + TCGv_i64 tmp; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2); +#define FP_HELPER(i) \ + tmp32_1 = tcg_const_i32(r1); \ + tmp32_2 = tcg_const_i32(r2); \ + gen_helper_ ## i (tmp32_1, tmp32_2); \ + tcg_temp_free_i32(tmp32_1); \ + tcg_temp_free_i32(tmp32_2); + +#define FP_HELPER_CC(i) \ + tmp32_1 = tcg_const_i32(r1); \ + tmp32_2 = tcg_const_i32(r2); \ + gen_helper_ ## i (cc_op, tmp32_1, tmp32_2); \ + set_cc_static(s); \ + tcg_temp_free_i32(tmp32_1); \ + tcg_temp_free_i32(tmp32_2); + + switch (op) { + case 0x0: /* LPEBR R1,R2 [RRE] */ + FP_HELPER_CC(lpebr); + break; + case 0x2: /* LTEBR R1,R2 [RRE] */ + FP_HELPER_CC(ltebr); + break; + case 0x3: /* LCEBR R1,R2 [RRE] */ + FP_HELPER_CC(lcebr); + break; + case 0x4: /* LDEBR R1,R2 [RRE] */ + FP_HELPER(ldebr); + break; + case 0x5: /* LXDBR R1,R2 [RRE] */ + FP_HELPER(lxdbr); + break; + case 0x9: /* CEBR R1,R2 [RRE] */ + FP_HELPER_CC(cebr); + break; + case 0xa: /* AEBR R1,R2 [RRE] */ + FP_HELPER_CC(aebr); + break; + case 0xb: /* SEBR R1,R2 [RRE] */ + FP_HELPER_CC(sebr); + break; + case 0xd: /* DEBR R1,R2 [RRE] */ + FP_HELPER(debr); + break; + case 0x10: /* LPDBR R1,R2 [RRE] */ + FP_HELPER_CC(lpdbr); + break; + case 0x12: /* LTDBR R1,R2 [RRE] */ + FP_HELPER_CC(ltdbr); + break; + case 0x13: /* LCDBR R1,R2 [RRE] */ + FP_HELPER_CC(lcdbr); + break; + case 0x15: /* SQBDR R1,R2 [RRE] */ + FP_HELPER(sqdbr); + break; + case 0x17: /* MEEBR R1,R2 [RRE] */ + FP_HELPER(meebr); + break; + case 0x19: /* CDBR R1,R2 [RRE] */ + FP_HELPER_CC(cdbr); + break; + case 0x1a: /* ADBR R1,R2 [RRE] */ + FP_HELPER_CC(adbr); + break; + case 0x1b: /* SDBR R1,R2 [RRE] */ + FP_HELPER_CC(sdbr); + break; + case 0x1c: /* MDBR R1,R2 [RRE] */ + FP_HELPER(mdbr); + break; + case 0x1d: /* DDBR R1,R2 [RRE] */ + FP_HELPER(ddbr); + break; + case 0xe: /* MAEBR R1,R3,R2 [RRF] */ + case 0x1e: /* MADBR R1,R3,R2 [RRF] */ + case 0x1f: /* MSDBR R1,R3,R2 [RRF] */ + /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */ + tmp32_1 = tcg_const_i32(m3); + tmp32_2 = tcg_const_i32(r2); + tmp32_3 = tcg_const_i32(r1); + switch (op) { + case 0xe: + gen_helper_maebr(tmp32_1, tmp32_3, tmp32_2); + break; + case 0x1e: + gen_helper_madbr(tmp32_1, tmp32_3, tmp32_2); + break; + case 0x1f: + gen_helper_msdbr(tmp32_1, tmp32_3, tmp32_2); + break; + default: + tcg_abort(); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x40: /* LPXBR R1,R2 [RRE] */ + FP_HELPER_CC(lpxbr); + break; + case 0x42: /* LTXBR R1,R2 [RRE] */ + FP_HELPER_CC(ltxbr); + break; + case 0x43: /* LCXBR R1,R2 [RRE] */ + FP_HELPER_CC(lcxbr); + break; + case 0x44: /* LEDBR R1,R2 [RRE] */ + FP_HELPER(ledbr); + break; + case 0x45: /* LDXBR R1,R2 [RRE] */ + FP_HELPER(ldxbr); + break; + case 0x46: /* LEXBR R1,R2 [RRE] */ + FP_HELPER(lexbr); + break; + case 0x49: /* CXBR R1,R2 [RRE] */ + FP_HELPER_CC(cxbr); + break; + case 0x4a: /* AXBR R1,R2 [RRE] */ + FP_HELPER_CC(axbr); + break; + case 0x4b: /* SXBR R1,R2 [RRE] */ + FP_HELPER_CC(sxbr); + break; + case 0x4c: /* MXBR R1,R2 [RRE] */ + FP_HELPER(mxbr); + break; + case 0x4d: /* DXBR R1,R2 [RRE] */ + FP_HELPER(dxbr); + break; + case 0x65: /* LXR R1,R2 [RRE] */ + tmp = load_freg(r2); + store_freg(r1, tmp); + tcg_temp_free_i64(tmp); + tmp = load_freg(r2 + 2); + store_freg(r1 + 2, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x74: /* LZER R1 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + gen_helper_lzer(tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x75: /* LZDR R1 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + gen_helper_lzdr(tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x76: /* LZXR R1 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + gen_helper_lzxr(tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x84: /* SFPC R1 [RRE] */ + tmp32_1 = load_reg32(r1); + tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc)); + tcg_temp_free_i32(tmp32_1); + break; + case 0x8c: /* EFPC R1 [RRE] */ + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc)); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x94: /* CEFBR R1,R2 [RRE] */ + case 0x95: /* CDFBR R1,R2 [RRE] */ + case 0x96: /* CXFBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = load_reg32(r2); + switch (op) { + case 0x94: + gen_helper_cefbr(tmp32_1, tmp32_2); + break; + case 0x95: + gen_helper_cdfbr(tmp32_1, tmp32_2); + break; + case 0x96: + gen_helper_cxfbr(tmp32_1, tmp32_2); + break; + default: + tcg_abort(); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x98: /* CFEBR R1,R2 [RRE] */ + case 0x99: /* CFDBR R1,R2 [RRE] */ + case 0x9a: /* CFXBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + tmp32_3 = tcg_const_i32(m3); + switch (op) { + case 0x98: + gen_helper_cfebr(cc_op, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x99: + gen_helper_cfdbr(cc_op, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x9a: + gen_helper_cfxbr(cc_op, tmp32_1, tmp32_2, tmp32_3); + break; + default: + tcg_abort(); + } + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xa4: /* CEGBR R1,R2 [RRE] */ + case 0xa5: /* CDGBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp = load_reg(r2); + switch (op) { + case 0xa4: + gen_helper_cegbr(tmp32_1, tmp); + break; + case 0xa5: + gen_helper_cdgbr(tmp32_1, tmp); + break; + default: + tcg_abort(); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0xa6: /* CXGBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp = load_reg(r2); + gen_helper_cxgbr(tmp32_1, tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0xa8: /* CGEBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + tmp32_3 = tcg_const_i32(m3); + gen_helper_cgebr(cc_op, tmp32_1, tmp32_2, tmp32_3); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xa9: /* CGDBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + tmp32_3 = tcg_const_i32(m3); + gen_helper_cgdbr(cc_op, tmp32_1, tmp32_2, tmp32_3); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xaa: /* CGXBR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + tmp32_3 = tcg_const_i32(m3); + gen_helper_cgxbr(cc_op, tmp32_1, tmp32_2, tmp32_3); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + default: + LOG_DISAS("illegal b3 operation 0x%x\n", op); + gen_illegal_opcode(s, 2); + break; + } + +#undef FP_HELPER_CC +#undef FP_HELPER +} + +static void disas_b9(DisasContext *s, int op, int r1, int r2) +{ + TCGv_i64 tmp, tmp2, tmp3; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + + LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2); + switch (op) { + case 0x0: /* LPGR R1,R2 [RRE] */ + case 0x1: /* LNGR R1,R2 [RRE] */ + case 0x2: /* LTGR R1,R2 [RRE] */ + case 0x3: /* LCGR R1,R2 [RRE] */ + case 0x10: /* LPGFR R1,R2 [RRE] */ + case 0x11: /* LNFGR R1,R2 [RRE] */ + case 0x12: /* LTGFR R1,R2 [RRE] */ + case 0x13: /* LCGFR R1,R2 [RRE] */ + if (op & 0x10) { + tmp = load_reg32_i64(r2); + } else { + tmp = load_reg(r2); + } + switch (op & 0xf) { + case 0x0: /* LP?GR */ + set_cc_abs64(s, tmp); + gen_helper_abs_i64(tmp, tmp); + store_reg(r1, tmp); + break; + case 0x1: /* LN?GR */ + set_cc_nabs64(s, tmp); + gen_helper_nabs_i64(tmp, tmp); + store_reg(r1, tmp); + break; + case 0x2: /* LT?GR */ + if (r1 != r2) { + store_reg(r1, tmp); + } + set_cc_s64(s, tmp); + break; + case 0x3: /* LC?GR */ + tcg_gen_neg_i64(regs[r1], tmp); + set_cc_comp64(s, regs[r1]); + break; + } + tcg_temp_free_i64(tmp); + break; + case 0x4: /* LGR R1,R2 [RRE] */ + store_reg(r1, regs[r2]); + break; + case 0x6: /* LGBR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg(r1, tmp2); + tcg_temp_free_i64(tmp2); + break; + case 0x8: /* AGR R1,R2 [RRE] */ + case 0xa: /* ALGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x8: + set_cc_add64(s, tmp, tmp2, tmp3); + break; + case 0xa: + set_cc_addu64(s, tmp, tmp2, tmp3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x9: /* SGR R1,R2 [RRE] */ + case 0xb: /* SLGR R1,R2 [RRE] */ + case 0x1b: /* SLGFR R1,R2 [RRE] */ + case 0x19: /* SGFR R1,R2 [RRE] */ + tmp = load_reg(r1); + switch (op) { + case 0x1b: + tmp32_1 = load_reg32(r2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x19: + tmp32_1 = load_reg32(r2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + default: + tmp2 = load_reg(r2); + break; + } + tmp3 = tcg_temp_new_i64(); + tcg_gen_sub_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x9: + case 0x19: + set_cc_sub64(s, tmp, tmp2, tmp3); + break; + case 0xb: + case 0x1b: + set_cc_subu64(s, tmp, tmp2, tmp3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0xc: /* MSGR R1,R2 [RRE] */ + case 0x1c: /* MSGFR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + if (op == 0x1c) { + tcg_gen_ext32s_i64(tmp2, tmp2); + } + tcg_gen_mul_i64(tmp, tmp, tmp2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0xd: /* DSGR R1,R2 [RRE] */ + case 0x1d: /* DSGFR R1,R2 [RRE] */ + tmp = load_reg(r1 + 1); + if (op == 0xd) { + tmp2 = load_reg(r2); + } else { + tmp32_1 = load_reg32(r2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + } + tmp3 = tcg_temp_new_i64(); + tcg_gen_div_i64(tmp3, tmp, tmp2); + store_reg(r1 + 1, tmp3); + tcg_gen_rem_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x14: /* LGFR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tmp = tcg_temp_new_i64(); + tcg_gen_ext_i32_i64(tmp, tmp32_1); + store_reg(r1, tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x16: /* LLGFR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tmp = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp, tmp32_1); + store_reg(r1, tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x17: /* LLGTR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tmp = tcg_temp_new_i64(); + tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL); + tcg_gen_extu_i32_i64(tmp, tmp32_1); + store_reg(r1, tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x18: /* AGFR R1,R2 [RRE] */ + case 0x1a: /* ALGFR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tmp2 = tcg_temp_new_i64(); + if (op == 0x18) { + tcg_gen_ext_i32_i64(tmp2, tmp32_1); + } else { + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + } + tcg_temp_free_i32(tmp32_1); + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + if (op == 0x18) { + set_cc_add64(s, tmp, tmp2, tmp3); + } else { + set_cc_addu64(s, tmp, tmp2, tmp3); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x0f: /* LRVGR R1,R2 [RRE] */ + tcg_gen_bswap64_i64(regs[r1], regs[r2]); + break; + case 0x1f: /* LRVR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_bswap32_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x20: /* CGR R1,R2 [RRE] */ + case 0x30: /* CGFR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + if (op == 0x30) { + tcg_gen_ext32s_i64(tmp2, tmp2); + } + tmp = load_reg(r1); + cmp_s64(s, tmp, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x21: /* CLGR R1,R2 [RRE] */ + case 0x31: /* CLGFR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + if (op == 0x31) { + tcg_gen_ext32u_i64(tmp2, tmp2); + } + tmp = load_reg(r1); + cmp_u64(s, tmp, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x26: /* LBR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_ext8s_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x27: /* LHR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_ext16s_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x80: /* NGR R1,R2 [RRE] */ + case 0x81: /* OGR R1,R2 [RRE] */ + case 0x82: /* XGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + switch (op) { + case 0x80: + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0x81: + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + case 0x82: + tcg_gen_xor_i64(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp); + set_cc_nz_u64(s, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x83: /* FLOGR R1,R2 [RRE] */ + tmp = load_reg(r2); + tmp32_1 = tcg_const_i32(r1); + gen_helper_flogr(cc_op, tmp32_1, tmp); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + break; + case 0x84: /* LLGCR R1,R2 [RRE] */ + tmp = load_reg(r2); + tcg_gen_andi_i64(tmp, tmp, 0xff); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x85: /* LLGHR R1,R2 [RRE] */ + tmp = load_reg(r2); + tcg_gen_andi_i64(tmp, tmp, 0xffff); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x87: /* DLGR R1,R2 [RRE] */ + tmp32_1 = tcg_const_i32(r1); + tmp = load_reg(r2); + gen_helper_dlg(tmp32_1, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + break; + case 0x88: /* ALCGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = tcg_temp_new_i64(); + gen_op_calc_cc(s); + tcg_gen_extu_i32_i64(tmp3, cc_op); + tcg_gen_shri_i64(tmp3, tmp3, 1); + tcg_gen_andi_i64(tmp3, tmp3, 1); + tcg_gen_add_i64(tmp3, tmp2, tmp3); + tcg_gen_add_i64(tmp3, tmp, tmp3); + store_reg(r1, tmp3); + set_cc_addu64(s, tmp, tmp2, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x89: /* SLBGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp32_1 = tcg_const_i32(r1); + gen_op_calc_cc(s); + gen_helper_slbg(cc_op, cc_op, tmp32_1, tmp, tmp2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x94: /* LLCR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xff); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x95: /* LLHR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xffff); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x96: /* MLR R1,R2 [RRE] */ + /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */ + tmp2 = load_reg(r2); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32u_i64(tmp2, tmp2); + tcg_gen_ext32u_i64(tmp3, tmp3); + tcg_gen_mul_i64(tmp2, tmp2, tmp3); + store_reg32_i64((r1 + 1) & 15, tmp2); + tcg_gen_shri_i64(tmp2, tmp2, 32); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x97: /* DLR R1,R2 [RRE] */ + /* reg(r1) = reg(r1, r1+1) % reg(r2) */ + /* reg(r1+1) = reg(r1, r1+1) / reg(r2) */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32u_i64(tmp2, tmp2); + tcg_gen_ext32u_i64(tmp3, tmp3); + tcg_gen_shli_i64(tmp, tmp, 32); + tcg_gen_or_i64(tmp, tmp, tmp3); + + tcg_gen_rem_i64(tmp3, tmp, tmp2); + tcg_gen_div_i64(tmp, tmp, tmp2); + store_reg32_i64((r1 + 1) & 15, tmp); + store_reg32_i64(r1, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x98: /* ALCR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r2); + tmp32_3 = tcg_temp_new_i32(); + /* XXX possible optimization point */ + gen_op_calc_cc(s); + gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2); + set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3); + store_reg32(r1, tmp32_3); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x99: /* SLBR R1,R2 [RRE] */ + tmp32_1 = load_reg32(r2); + tmp32_2 = tcg_const_i32(r1); + gen_op_calc_cc(s); + gen_helper_slb(cc_op, cc_op, tmp32_2, tmp32_1); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + default: + LOG_DISAS("illegal b9 operation 0x%x\n", op); + gen_illegal_opcode(s, 2); + break; + } +} + +static void disas_c0(DisasContext *s, int op, int r1, int i2) +{ + TCGv_i64 tmp; + TCGv_i32 tmp32_1, tmp32_2; + uint64_t target = s->pc + i2 * 2LL; + int l1; + + LOG_DISAS("disas_c0: op 0x%x r1 %d i2 %d\n", op, r1, i2); + + switch (op) { + case 0: /* larl r1, i2 */ + tmp = tcg_const_i64(target); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x1: /* LGFI R1,I2 [RIL] */ + tmp = tcg_const_i64((int64_t)i2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x4: /* BRCL M1,I2 [RIL] */ + /* m1 & (1 << (3 - cc)) */ + tmp32_1 = tcg_const_i32(3); + tmp32_2 = tcg_const_i32(1); + gen_op_calc_cc(s); + tcg_gen_sub_i32(tmp32_1, tmp32_1, cc_op); + tcg_gen_shl_i32(tmp32_2, tmp32_2, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tmp32_1 = tcg_const_i32(r1); /* m1 == r1 */ + tcg_gen_and_i32(tmp32_1, tmp32_1, tmp32_2); + l1 = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1); + gen_goto_tb(s, 0, target); + gen_set_label(l1); + gen_goto_tb(s, 1, s->pc + 6); + s->is_jmp = DISAS_TB_JUMP; + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x5: /* brasl r1, i2 */ + tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 6)); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + gen_goto_tb(s, 0, target); + s->is_jmp = DISAS_TB_JUMP; + break; + case 0x7: /* XILF R1,I2 [RIL] */ + case 0xb: /* NILF R1,I2 [RIL] */ + case 0xd: /* OILF R1,I2 [RIL] */ + tmp32_1 = load_reg32(r1); + switch (op) { + case 0x7: + tcg_gen_xori_i32(tmp32_1, tmp32_1, (uint32_t)i2); + break; + case 0xb: + tcg_gen_andi_i32(tmp32_1, tmp32_1, (uint32_t)i2); + break; + case 0xd: + tcg_gen_ori_i32(tmp32_1, tmp32_1, (uint32_t)i2); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_1); + set_cc_nz_u32(s, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x9: /* IILF R1,I2 [RIL] */ + tmp32_1 = tcg_const_i32((uint32_t)i2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0xa: /* NIHF R1,I2 [RIL] */ + tmp = load_reg(r1); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_andi_i64(tmp, tmp, (((uint64_t)((uint32_t)i2)) << 32) + | 0xffffffffULL); + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp, tmp, 32); + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + set_cc_nz_u32(s, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + break; + case 0xe: /* LLIHF R1,I2 [RIL] */ + tmp = tcg_const_i64(((uint64_t)(uint32_t)i2) << 32); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0xf: /* LLILF R1,I2 [RIL] */ + tmp = tcg_const_i64((uint32_t)i2); + store_reg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + default: + LOG_DISAS("illegal c0 operation 0x%x\n", op); + gen_illegal_opcode(s, 3); + break; + } +} + +static void disas_c2(DisasContext *s, int op, int r1, int i2) +{ + TCGv_i64 tmp, tmp2, tmp3; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3; + + switch (op) { + case 0x4: /* SLGFI R1,I2 [RIL] */ + case 0xa: /* ALGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + tmp2 = tcg_const_i64((uint64_t)(uint32_t)i2); + tmp3 = tcg_temp_new_i64(); + switch (op) { + case 0x4: + tcg_gen_sub_i64(tmp3, tmp, tmp2); + set_cc_subu64(s, tmp, tmp2, tmp3); + break; + case 0xa: + tcg_gen_add_i64(tmp3, tmp, tmp2); + set_cc_addu64(s, tmp, tmp2, tmp3); + break; + default: + tcg_abort(); + } + store_reg(r1, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x5: /* SLFI R1,I2 [RIL] */ + case 0xb: /* ALFI R1,I2 [RIL] */ + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_const_i32(i2); + tmp32_3 = tcg_temp_new_i32(); + switch (op) { + case 0x5: + tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2); + set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0xb: + tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2); + set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_3); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xc: /* CGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + cmp_s64c(s, tmp, (int64_t)i2); + tcg_temp_free_i64(tmp); + break; + case 0xe: /* CLGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + cmp_u64c(s, tmp, (uint64_t)(uint32_t)i2); + tcg_temp_free_i64(tmp); + break; + case 0xd: /* CFI R1,I2 [RIL] */ + tmp32_1 = load_reg32(r1); + cmp_s32c(s, tmp32_1, i2); + tcg_temp_free_i32(tmp32_1); + break; + case 0xf: /* CLFI R1,I2 [RIL] */ + tmp32_1 = load_reg32(r1); + cmp_u32c(s, tmp32_1, i2); + tcg_temp_free_i32(tmp32_1); + break; + default: + LOG_DISAS("illegal c2 operation 0x%x\n", op); + gen_illegal_opcode(s, 3); + break; + } +} + +static void gen_and_or_xor_i32(int opc, TCGv_i32 tmp, TCGv_i32 tmp2) +{ + switch (opc & 0xf) { + case 0x4: + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + case 0x6: + tcg_gen_or_i32(tmp, tmp, tmp2); + break; + case 0x7: + tcg_gen_xor_i32(tmp, tmp, tmp2); + break; + default: + tcg_abort(); + } +} + +static void disas_s390_insn(DisasContext *s) +{ + TCGv_i64 tmp, tmp2, tmp3, tmp4; + TCGv_i32 tmp32_1, tmp32_2, tmp32_3, tmp32_4; + unsigned char opc; + uint64_t insn; + int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b; + TCGv_i32 vl; + int ilc; + int l1; + + opc = ldub_code(s->pc); + LOG_DISAS("opc 0x%x\n", opc); + + ilc = get_ilc(opc); + + switch (opc) { +#ifndef CONFIG_USER_ONLY + case 0x01: /* SAM */ + insn = ld_code2(s->pc); + /* set addressing mode, but we only do 64bit anyways */ + break; +#endif + case 0x6: /* BCTR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r1); + tcg_gen_subi_i32(tmp32_1, tmp32_1, 1); + store_reg32(r1, tmp32_1); + + if (r2) { + gen_update_cc_op(s); + l1 = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1); + + /* not taking the branch, jump to after the instruction */ + gen_goto_tb(s, 0, s->pc + 2); + gen_set_label(l1); + + /* take the branch, move R2 into psw.addr */ + tmp32_1 = load_reg32(r2); + tmp = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(tmp, tmp32_1); + tcg_gen_mov_i64(psw_addr, tmp); + s->is_jmp = DISAS_JUMP; + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + } + break; + case 0x7: /* BCR M1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + if (r2) { + tmp = load_reg(r2); + gen_bcr(s, r1, tmp, s->pc); + tcg_temp_free_i64(tmp); + s->is_jmp = DISAS_TB_JUMP; + } else { + /* XXX: "serialization and checkpoint-synchronization function"? */ + } + break; + case 0xa: /* SVC I [RR] */ + insn = ld_code2(s->pc); + debug_insn(insn); + i = insn & 0xff; + update_psw_addr(s); + gen_op_calc_cc(s); + tmp32_1 = tcg_const_i32(i); + tmp32_2 = tcg_const_i32(ilc * 2); + tmp32_3 = tcg_const_i32(EXCP_SVC); + tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, int_svc_code)); + tcg_gen_st_i32(tmp32_2, cpu_env, offsetof(CPUState, int_svc_ilc)); + gen_helper_exception(tmp32_3); + s->is_jmp = DISAS_EXCP; + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0xd: /* BASR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 2)); + store_reg(r1, tmp); + if (r2) { + tmp2 = load_reg(r2); + tcg_gen_mov_i64(psw_addr, tmp2); + tcg_temp_free_i64(tmp2); + s->is_jmp = DISAS_JUMP; + } + tcg_temp_free_i64(tmp); + break; + case 0xe: /* MVCL R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r2); + potential_page_fault(s); + gen_helper_mvcl(cc_op, tmp32_1, tmp32_2); + set_cc_static(s); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x10: /* LPR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r2); + set_cc_abs32(s, tmp32_1); + gen_helper_abs_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x11: /* LNR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r2); + set_cc_nabs32(s, tmp32_1); + gen_helper_nabs_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x12: /* LTR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r2); + if (r1 != r2) { + store_reg32(r1, tmp32_1); + } + set_cc_s32(s, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x13: /* LCR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r2); + tcg_gen_neg_i32(tmp32_1, tmp32_1); + store_reg32(r1, tmp32_1); + set_cc_comp32(s, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x14: /* NR R1,R2 [RR] */ + case 0x16: /* OR R1,R2 [RR] */ + case 0x17: /* XR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_2 = load_reg32(r2); + tmp32_1 = load_reg32(r1); + gen_and_or_xor_i32(opc, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_1); + set_cc_nz_u32(s, tmp32_1); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x18: /* LR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x15: /* CLR R1,R2 [RR] */ + case 0x19: /* CR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r2); + if (opc == 0x15) { + cmp_u32(s, tmp32_1, tmp32_2); + } else { + cmp_s32(s, tmp32_1, tmp32_2); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x1a: /* AR R1,R2 [RR] */ + case 0x1e: /* ALR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r2); + tmp32_3 = tcg_temp_new_i32(); + tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_3); + if (opc == 0x1a) { + set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3); + } else { + set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x1b: /* SR R1,R2 [RR] */ + case 0x1f: /* SLR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r2); + tmp32_3 = tcg_temp_new_i32(); + tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_3); + if (opc == 0x1b) { + set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3); + } else { + set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3); + } + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x1c: /* MR R1,R2 [RR] */ + /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp2 = load_reg(r2); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32s_i64(tmp2, tmp2); + tcg_gen_ext32s_i64(tmp3, tmp3); + tcg_gen_mul_i64(tmp2, tmp2, tmp3); + store_reg32_i64((r1 + 1) & 15, tmp2); + tcg_gen_shri_i64(tmp2, tmp2, 32); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x1d: /* DR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r1 + 1); + tmp32_3 = load_reg32(r2); + + tmp = tcg_temp_new_i64(); /* dividend */ + tmp2 = tcg_temp_new_i64(); /* divisor */ + tmp3 = tcg_temp_new_i64(); + + /* dividend is r(r1 << 32) | r(r1 + 1) */ + tcg_gen_extu_i32_i64(tmp, tmp32_1); + tcg_gen_extu_i32_i64(tmp2, tmp32_2); + tcg_gen_shli_i64(tmp, tmp, 32); + tcg_gen_or_i64(tmp, tmp, tmp2); + + /* divisor is r(r2) */ + tcg_gen_ext_i32_i64(tmp2, tmp32_3); + + tcg_gen_div_i64(tmp3, tmp, tmp2); + tcg_gen_rem_i64(tmp, tmp, tmp2); + + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + tcg_gen_trunc_i64_i32(tmp32_2, tmp3); + + store_reg32(r1, tmp32_1); /* remainder */ + store_reg32(r1 + 1, tmp32_2); /* quotient */ + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x28: /* LDR R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp = load_freg(r2); + store_freg(r1, tmp); + tcg_temp_free_i64(tmp); + break; + case 0x38: /* LER R1,R2 [RR] */ + insn = ld_code2(s->pc); + decode_rr(s, insn, &r1, &r2); + tmp32_1 = load_freg32(r2); + store_freg32(r1, tmp32_1); + tcg_temp_free_i32(tmp32_1); + break; + case 0x40: /* STH R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = load_reg(r1); + tcg_gen_qemu_st16(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x41: /* la */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + store_reg(r1, tmp); /* FIXME: 31/24-bit addressing */ + tcg_temp_free_i64(tmp); + break; + case 0x42: /* STC R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = load_reg(r1); + tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x43: /* IC R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s)); + store_reg8(r1, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x44: /* EX R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = load_reg(r1); + tmp3 = tcg_const_i64(s->pc + 4); + update_psw_addr(s); + gen_op_calc_cc(s); + gen_helper_ex(cc_op, cc_op, tmp2, tmp, tmp3); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x46: /* BCT R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tcg_temp_free_i64(tmp); + + tmp32_1 = load_reg32(r1); + tcg_gen_subi_i32(tmp32_1, tmp32_1, 1); + store_reg32(r1, tmp32_1); + + gen_update_cc_op(s); + l1 = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1); + + /* not taking the branch, jump to after the instruction */ + gen_goto_tb(s, 0, s->pc + 4); + gen_set_label(l1); + + /* take the branch, move R2 into psw.addr */ + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tcg_gen_mov_i64(psw_addr, tmp); + s->is_jmp = DISAS_JUMP; + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + break; + case 0x47: /* BC M1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + gen_bcr(s, r1, tmp, s->pc + 4); + tcg_temp_free_i64(tmp); + s->is_jmp = DISAS_TB_JUMP; + break; + case 0x48: /* LH R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s)); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x49: /* CH R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + cmp_s32(s, tmp32_1, tmp32_2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x4a: /* AH R1,D2(X2,B2) [RX] */ + case 0x4b: /* SH R1,D2(X2,B2) [RX] */ + case 0x4c: /* MH R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_temp_new_i32(); + + tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + switch (opc) { + case 0x4a: + tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2); + set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x4b: + tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2); + set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x4c: + tcg_gen_mul_i32(tmp32_3, tmp32_1, tmp32_2); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_3); + + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x4d: /* BAS R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_const_i64(pc_to_link_info(s, s->pc + 4)); + store_reg(r1, tmp2); + tcg_gen_mov_i64(psw_addr, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + s->is_jmp = DISAS_JUMP; + break; + case 0x4e: /* CVD R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp32_1, regs[r1]); + gen_helper_cvd(tmp2, tmp32_1); + tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x50: /* st r1, d2(x2, b2) */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = load_reg(r1); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x55: /* CL R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tmp32_2 = load_reg32(r1); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + cmp_u32(s, tmp32_2, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x54: /* N R1,D2(X2,B2) [RX] */ + case 0x56: /* O R1,D2(X2,B2) [RX] */ + case 0x57: /* X R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + gen_and_or_xor_i32(opc, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_1); + set_cc_nz_u32(s, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x58: /* l r1, d2(x2, b2) */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x59: /* C R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tmp32_2 = load_reg32(r1); + tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + cmp_s32(s, tmp32_2, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x5a: /* A R1,D2(X2,B2) [RX] */ + case 0x5b: /* S R1,D2(X2,B2) [RX] */ + case 0x5e: /* AL R1,D2(X2,B2) [RX] */ + case 0x5f: /* SL R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32s(tmp, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp); + switch (opc) { + case 0x5a: + case 0x5e: + tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2); + break; + case 0x5b: + case 0x5f: + tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_3); + switch (opc) { + case 0x5a: + set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x5e: + set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x5b: + set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3); + break; + case 0x5f: + set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + break; + case 0x5c: /* M R1,D2(X2,B2) [RX] */ + /* reg(r1, r1+1) = reg(r1+1) * *(s32*)addr */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s)); + tmp3 = load_reg((r1 + 1) & 15); + tcg_gen_ext32s_i64(tmp2, tmp2); + tcg_gen_ext32s_i64(tmp3, tmp3); + tcg_gen_mul_i64(tmp2, tmp2, tmp3); + store_reg32_i64((r1 + 1) & 15, tmp2); + tcg_gen_shri_i64(tmp2, tmp2, 32); + store_reg32_i64(r1, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x5d: /* D R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp3 = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r1 + 1); + + tmp = tcg_temp_new_i64(); + tmp2 = tcg_temp_new_i64(); + + /* dividend is r(r1 << 32) | r(r1 + 1) */ + tcg_gen_extu_i32_i64(tmp, tmp32_1); + tcg_gen_extu_i32_i64(tmp2, tmp32_2); + tcg_gen_shli_i64(tmp, tmp, 32); + tcg_gen_or_i64(tmp, tmp, tmp2); + + /* divisor is in memory */ + tcg_gen_qemu_ld32s(tmp2, tmp3, get_mem_index(s)); + + /* XXX divisor == 0 -> FixP divide exception */ + + tcg_gen_div_i64(tmp3, tmp, tmp2); + tcg_gen_rem_i64(tmp, tmp, tmp2); + + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + tcg_gen_trunc_i64_i32(tmp32_2, tmp3); + + store_reg32(r1, tmp32_1); /* remainder */ + store_reg32(r1 + 1, tmp32_2); /* quotient */ + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x60: /* STD R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = load_freg(r1); + tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x68: /* LD R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s)); + store_freg(r1, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x70: /* STE R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_freg32(r1); + tcg_gen_extu_i32_i64(tmp2, tmp32_1); + tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0x71: /* MS R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2); + store_reg32(r1, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x78: /* LE R1,D2(X2,B2) [RX] */ + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_1, tmp2); + store_freg32(r1, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; +#ifndef CONFIG_USER_ONLY + case 0x80: /* SSM D2(B2) [S] */ + /* Set System Mask */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp3, psw_mask, ~0xff00000000000000ULL); + tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s)); + tcg_gen_shli_i64(tmp2, tmp2, 56); + tcg_gen_or_i64(psw_mask, tmp3, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; + case 0x82: /* LPSW D2(B2) [S] */ + /* Load PSW */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_addi_i64(tmp, tmp, 4); + tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s)); + gen_helper_load_psw(tmp2, tmp3); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + /* we need to keep cc_op intact */ + s->is_jmp = DISAS_JUMP; + break; + case 0x83: /* DIAG R1,R3,D2 [RS] */ + /* Diagnose call (KVM hypercall) */ + check_privileged(s, ilc); + potential_page_fault(s); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp32_1 = tcg_const_i32(insn & 0xfff); + tmp2 = load_reg(2); + tmp3 = load_reg(1); + gen_helper_diag(tmp2, tmp32_1, tmp2, tmp3); + store_reg(2, tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; +#endif + case 0x88: /* SRL R1,D2(B2) [RS] */ + case 0x89: /* SLL R1,D2(B2) [RS] */ + case 0x8a: /* SRA R1,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp32_2, tmp); + tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f); + switch (opc) { + case 0x88: + tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2); + break; + case 0x89: + tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2); + break; + case 0x8a: + tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2); + set_cc_s32(s, tmp32_1); + break; + default: + tcg_abort(); + } + store_reg32(r1, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x8c: /* SRDL R1,D2(B2) [RS] */ + case 0x8d: /* SLDL R1,D2(B2) [RS] */ + case 0x8e: /* SRDA R1,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); /* shift */ + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = load_reg32(r1 + 1); + tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */ + switch (opc) { + case 0x8c: + tcg_gen_shr_i64(tmp2, tmp2, tmp); + break; + case 0x8d: + tcg_gen_shl_i64(tmp2, tmp2, tmp); + break; + case 0x8e: + tcg_gen_sar_i64(tmp2, tmp2, tmp); + set_cc_s64(s, tmp2); + break; + } + tcg_gen_shri_i64(tmp, tmp2, 32); + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + store_reg32(r1, tmp32_1); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + store_reg32(r1 + 1, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x98: /* LM R1,R3,D2(B2) [RS] */ + case 0x90: /* STM R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp3 = tcg_const_i64(4); + tmp4 = tcg_const_i64(0xffffffff00000000ULL); + for (i = r1;; i = (i + 1) % 16) { + if (opc == 0x98) { + tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s)); + tcg_gen_and_i64(regs[i], regs[i], tmp4); + tcg_gen_or_i64(regs[i], regs[i], tmp2); + } else { + tcg_gen_qemu_st32(regs[i], tmp, get_mem_index(s)); + } + if (i == r3) { + break; + } + tcg_gen_add_i64(tmp, tmp, tmp3); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + tcg_temp_free_i64(tmp4); + break; + case 0x91: /* TM D1(B1),I2 [SI] */ + insn = ld_code4(s->pc); + tmp = decode_si(s, insn, &i2, &b1, &d1); + tmp2 = tcg_const_i64(i2); + tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s)); + cmp_64(s, tmp, tmp2, CC_OP_TM_32); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x92: /* MVI D1(B1),I2 [SI] */ + insn = ld_code4(s->pc); + tmp = decode_si(s, insn, &i2, &b1, &d1); + tmp2 = tcg_const_i64(i2); + tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s)); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x94: /* NI D1(B1),I2 [SI] */ + case 0x96: /* OI D1(B1),I2 [SI] */ + case 0x97: /* XI D1(B1),I2 [SI] */ + insn = ld_code4(s->pc); + tmp = decode_si(s, insn, &i2, &b1, &d1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s)); + switch (opc) { + case 0x94: + tcg_gen_andi_i64(tmp2, tmp2, i2); + break; + case 0x96: + tcg_gen_ori_i64(tmp2, tmp2, i2); + break; + case 0x97: + tcg_gen_xori_i64(tmp2, tmp2, i2); + break; + default: + tcg_abort(); + } + tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s)); + set_cc_nz_u64(s, tmp2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x95: /* CLI D1(B1),I2 [SI] */ + insn = ld_code4(s->pc); + tmp = decode_si(s, insn, &i2, &b1, &d1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s)); + cmp_u64c(s, tmp2, i2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0x9a: /* LAM R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_lam(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0x9b: /* STAM R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_stam(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xa5: + insn = ld_code4(s->pc); + r1 = (insn >> 20) & 0xf; + op = (insn >> 16) & 0xf; + i2 = insn & 0xffff; + disas_a5(s, op, r1, i2); + break; + case 0xa7: + insn = ld_code4(s->pc); + r1 = (insn >> 20) & 0xf; + op = (insn >> 16) & 0xf; + i2 = (short)insn; + disas_a7(s, op, r1, i2); + break; + case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_mvcle(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_clcle(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; +#ifndef CONFIG_USER_ONLY + case 0xac: /* STNSM D1(B1),I2 [SI] */ + case 0xad: /* STOSM D1(B1),I2 [SI] */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + tmp = decode_si(s, insn, &i2, &b1, &d1); + tmp2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(tmp2, psw_mask, 56); + tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s)); + if (opc == 0xac) { + tcg_gen_andi_i64(psw_mask, psw_mask, + ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL); + } else { + tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + case 0xae: /* SIGP R1,R3,D2(B2) [RS] */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp2 = load_reg(r3); + tmp32_1 = tcg_const_i32(r1); + potential_page_fault(s); + gen_helper_sigp(cc_op, tmp, tmp32_1, tmp2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + break; + case 0xb1: /* LRA R1,D2(X2, B2) [RX] */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2); + tmp32_1 = tcg_const_i32(r1); + potential_page_fault(s); + gen_helper_lra(cc_op, tmp, tmp32_1); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + break; +#endif + case 0xb2: + insn = ld_code4(s->pc); + op = (insn >> 16) & 0xff; + switch (op) { + case 0x9c: /* STFPC D2(B2) [S] */ + d2 = insn & 0xfff; + b2 = (insn >> 12) & 0xf; + tmp32_1 = tcg_temp_new_i32(); + tmp = tcg_temp_new_i64(); + tmp2 = get_address(s, 0, b2, d2); + tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc)); + tcg_gen_extu_i32_i64(tmp, tmp32_1); + tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s)); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; + default: + disas_b2(s, op, insn); + break; + } + break; + case 0xb3: + insn = ld_code4(s->pc); + op = (insn >> 16) & 0xff; + r3 = (insn >> 12) & 0xf; /* aka m3 */ + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + disas_b3(s, op, r3, r1, r2); + break; +#ifndef CONFIG_USER_ONLY + case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */ + /* Store Control */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_stctl(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */ + /* Load Control */ + check_privileged(s, ilc); + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_lctl(tmp32_1, tmp, tmp32_2); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; +#endif + case 0xb9: + insn = ld_code4(s->pc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + op = (insn >> 16) & 0xff; + disas_b9(s, op, r1, r2); + break; + case 0xba: /* CS R1,R3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_const_i32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_cs(cc_op, tmp32_1, tmp, tmp32_2); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xbd: /* CLM R1,M3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_clm(cc_op, tmp32_1, tmp32_2, tmp); + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xbe: /* STCM R1,M3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + tmp = get_address(s, 0, b2, d2); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_const_i32(r3); + potential_page_fault(s); + gen_helper_stcm(tmp32_1, tmp32_2, tmp); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + break; + case 0xbf: /* ICM R1,M3,D2(B2) [RS] */ + insn = ld_code4(s->pc); + decode_rs(s, insn, &r1, &r3, &b2, &d2); + if (r3 == 15) { + /* effectively a 32-bit load */ + tmp = get_address(s, 0, b2, d2); + tmp32_1 = tcg_temp_new_i32(); + tmp32_2 = tcg_const_i32(r3); + tcg_gen_qemu_ld32u(tmp, tmp, get_mem_index(s)); + store_reg32_i64(r1, tmp); + tcg_gen_trunc_i64_i32(tmp32_1, tmp); + set_cc_icm(s, tmp32_2, tmp32_1); + tcg_temp_free_i64(tmp); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + } else if (r3) { + uint32_t mask = 0x00ffffffUL; + uint32_t shift = 24; + int m3 = r3; + tmp = get_address(s, 0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tmp32_1 = load_reg32(r1); + tmp32_2 = tcg_temp_new_i32(); + tmp32_3 = tcg_const_i32(r3); + tmp32_4 = tcg_const_i32(0); + while (m3) { + if (m3 & 8) { + tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s)); + tcg_gen_trunc_i64_i32(tmp32_2, tmp2); + if (shift) { + tcg_gen_shli_i32(tmp32_2, tmp32_2, shift); + } + tcg_gen_andi_i32(tmp32_1, tmp32_1, mask); + tcg_gen_or_i32(tmp32_1, tmp32_1, tmp32_2); + tcg_gen_or_i32(tmp32_4, tmp32_4, tmp32_2); + tcg_gen_addi_i64(tmp, tmp, 1); + } + m3 = (m3 << 1) & 0xf; + mask = (mask >> 8) | 0xff000000UL; + shift -= 8; + } + store_reg32(r1, tmp32_1); + set_cc_icm(s, tmp32_3, tmp32_4); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i32(tmp32_1); + tcg_temp_free_i32(tmp32_2); + tcg_temp_free_i32(tmp32_3); + tcg_temp_free_i32(tmp32_4); + } else { + /* i.e. env->cc = 0 */ + gen_op_movi_cc(s, 0); + } + break; + case 0xc0: + case 0xc2: + insn = ld_code6(s->pc); + r1 = (insn >> 36) & 0xf; + op = (insn >> 32) & 0xf; + i2 = (int)insn; + switch (opc) { + case 0xc0: + disas_c0(s, op, r1, i2); + break; + case 0xc2: + disas_c2(s, op, r1, i2); + break; + default: + tcg_abort(); + } + break; + case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */ + case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */ + case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */ + case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */ + case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */ + case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */ + case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */ + insn = ld_code6(s->pc); + vl = tcg_const_i32((insn >> 32) & 0xff); + b1 = (insn >> 28) & 0xf; + b2 = (insn >> 12) & 0xf; + d1 = (insn >> 16) & 0xfff; + d2 = insn & 0xfff; + tmp = get_address(s, 0, b1, d1); + tmp2 = get_address(s, 0, b2, d2); + switch (opc) { + case 0xd2: + gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2); + break; + case 0xd4: + potential_page_fault(s); + gen_helper_nc(cc_op, vl, tmp, tmp2); + set_cc_static(s); + break; + case 0xd5: + gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2); + break; + case 0xd6: + potential_page_fault(s); + gen_helper_oc(cc_op, vl, tmp, tmp2); + set_cc_static(s); + break; + case 0xd7: + potential_page_fault(s); + gen_helper_xc(cc_op, vl, tmp, tmp2); + set_cc_static(s); + break; + case 0xdc: + potential_page_fault(s); + gen_helper_tr(vl, tmp, tmp2); + set_cc_static(s); + break; + case 0xf3: + potential_page_fault(s); + gen_helper_unpk(vl, tmp, tmp2); + break; + default: + tcg_abort(); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + break; +#ifndef CONFIG_USER_ONLY + case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */ + case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */ + check_privileged(s, ilc); + potential_page_fault(s); + insn = ld_code6(s->pc); + r1 = (insn >> 36) & 0xf; + r3 = (insn >> 32) & 0xf; + b1 = (insn >> 28) & 0xf; + d1 = (insn >> 16) & 0xfff; + b2 = (insn >> 12) & 0xf; + d2 = insn & 0xfff; + tmp = load_reg(r1); + /* XXX key in r3 */ + tmp2 = get_address(s, 0, b1, d1); + tmp3 = get_address(s, 0, b2, d2); + if (opc == 0xda) { + gen_helper_mvcp(cc_op, tmp, tmp2, tmp3); + } else { + gen_helper_mvcs(cc_op, tmp, tmp2, tmp3); + } + set_cc_static(s); + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(tmp2); + tcg_temp_free_i64(tmp3); + break; +#endif + case 0xe3: + insn = ld_code6(s->pc); + debug_insn(insn); + op = insn & 0xff; + r1 = (insn >> 36) & 0xf; + x2 = (insn >> 32) & 0xf; + b2 = (insn >> 28) & 0xf; + d2 = ((int)((((insn >> 16) & 0xfff) + | ((insn << 4) & 0xff000)) << 12)) >> 12; + disas_e3(s, op, r1, x2, b2, d2 ); + break; +#ifndef CONFIG_USER_ONLY + case 0xe5: + /* Test Protection */ + check_privileged(s, ilc); + insn = ld_code6(s->pc); + debug_insn(insn); + disas_e5(s, insn); + break; +#endif + case 0xeb: + insn = ld_code6(s->pc); + debug_insn(insn); + op = insn & 0xff; + r1 = (insn >> 36) & 0xf; + r3 = (insn >> 32) & 0xf; + b2 = (insn >> 28) & 0xf; + d2 = ((int)((((insn >> 16) & 0xfff) + | ((insn << 4) & 0xff000)) << 12)) >> 12; + disas_eb(s, op, r1, r3, b2, d2); + break; + case 0xed: + insn = ld_code6(s->pc); + debug_insn(insn); + op = insn & 0xff; + r1 = (insn >> 36) & 0xf; + x2 = (insn >> 32) & 0xf; + b2 = (insn >> 28) & 0xf; + d2 = (short)((insn >> 16) & 0xfff); + r1b = (insn >> 12) & 0xf; + disas_ed(s, op, r1, x2, b2, d2, r1b); + break; + default: + LOG_DISAS("unimplemented opcode 0x%x\n", opc); + gen_illegal_opcode(s, ilc); + break; + } + + /* Instruction length is encoded in the opcode */ + s->pc += (ilc * 2); +} + +static inline void gen_intermediate_code_internal(CPUState *env, + TranslationBlock *tb, + int search_pc) +{ + DisasContext dc; + target_ulong pc_start; + uint64_t next_page_start; + uint16_t *gen_opc_end; + int j, lj = -1; + int num_insns, max_insns; + CPUBreakpoint *bp; + + pc_start = tb->pc; + + /* 31-bit mode */ + if (!(tb->flags & FLAG_MASK_64)) { + pc_start &= 0x7fffffff; + } + + dc.pc = pc_start; + dc.is_jmp = DISAS_NEXT; + dc.tb = tb; + dc.cc_op = CC_OP_DYNAMIC; + + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + + gen_icount_start(); + + do { + if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { + QTAILQ_FOREACH(bp, &env->breakpoints, entry) { + if (bp->pc == dc.pc) { + gen_debug(&dc); + break; + } + } + } + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) { + gen_opc_instr_start[lj++] = 0; + } + } + gen_opc_pc[lj] = dc.pc; + gen_opc_cc_op[lj] = dc.cc_op; + gen_opc_instr_start[lj] = 1; + gen_opc_icount[lj] = num_insns; + } + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { + gen_io_start(); + } +#if defined(S390X_DEBUG_DISAS_VERBOSE) + LOG_DISAS("pc " TARGET_FMT_lx "\n", + dc.pc); +#endif + disas_s390_insn(&dc); + + num_insns++; + if (env->singlestep_enabled) { + gen_debug(&dc); + } + } while (!dc.is_jmp && gen_opc_ptr < gen_opc_end && dc.pc < next_page_start + && num_insns < max_insns && !env->singlestep_enabled + && !singlestep); + + if (!dc.is_jmp) { + update_psw_addr(&dc); + } + + if (singlestep && dc.cc_op != CC_OP_DYNAMIC) { + gen_op_calc_cc(&dc); + } else { + /* next TB starts off with CC_OP_DYNAMIC, so make sure the cc op type + is in env */ + gen_op_set_cc_op(&dc); + } + + if (tb->cflags & CF_LAST_IO) { + gen_io_end(); + } + /* Generate the return instruction */ + if (dc.is_jmp != DISAS_TB_JUMP) { + tcg_gen_exit_tb(0); + } + gen_icount_end(tb, num_insns); + *gen_opc_ptr = INDEX_op_end; + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) { + gen_opc_instr_start[lj++] = 0; + } + } else { + tb->size = dc.pc - pc_start; + tb->icount = num_insns; + } +#if defined(S390X_DEBUG_DISAS) + log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0); + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(pc_start, dc.pc - pc_start, 1); + qemu_log("\n"); + } +#endif } void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb) { + gen_intermediate_code_internal(env, tb, 0); } void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb) { + gen_intermediate_code_internal(env, tb, 1); } void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos) { + int cc_op; env->psw.addr = gen_opc_pc[pc_pos]; + cc_op = gen_opc_cc_op[pc_pos]; + if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { + env->cc_op = cc_op; + } } diff --git a/target-sparc/helper.c b/target-sparc/helper.c index b2d4d70a11..e9b42d03a9 100644 --- a/target-sparc/helper.c +++ b/target-sparc/helper.c @@ -21,7 +21,6 @@ #include <stdio.h> #include <string.h> #include <inttypes.h> -#include <signal.h> #include "cpu.h" #include "exec-all.h" diff --git a/target-sparc/helper.h b/target-sparc/helper.h index 12e8557133..023f4d6023 100644 --- a/target-sparc/helper.h +++ b/target-sparc/helper.h @@ -35,7 +35,6 @@ DEF_HELPER_2(check_align, void, tl, i32) DEF_HELPER_0(debug, void) DEF_HELPER_0(save, void) DEF_HELPER_0(restore, void) -DEF_HELPER_1(flush, void, tl) DEF_HELPER_2(udiv, tl, tl, tl) DEF_HELPER_2(udiv_cc, tl, tl, tl) DEF_HELPER_2(sdiv, tl, tl, tl) diff --git a/target-sparc/op_helper.c b/target-sparc/op_helper.c index ffffb8c0bd..b38691e19d 100644 --- a/target-sparc/op_helper.c +++ b/target-sparc/op_helper.c @@ -4092,12 +4092,6 @@ void helper_write_softint(uint64_t value) } #endif -void helper_flush(target_ulong addr) -{ - addr &= ~7; - tb_invalidate_page_range(addr, addr + 8); -} - #ifdef TARGET_SPARC64 #ifdef DEBUG_PCALL static const char * const excp_names[0x80] = { diff --git a/target-sparc/translate.c b/target-sparc/translate.c index 3c958b26d6..0cc47e9ff3 100644 --- a/target-sparc/translate.c +++ b/target-sparc/translate.c @@ -1893,7 +1893,7 @@ static void disas_sparc_insn(DisasContext * dc) int cc; target = GET_FIELD_SP(insn, 0, 18); - target = sign_extend(target, 18); + target = sign_extend(target, 19); target <<= 2; cc = GET_FIELD_SP(insn, 20, 21); if (cc == 0) @@ -3505,16 +3505,28 @@ static void disas_sparc_insn(DisasContext * dc) tcg_gen_mov_tl(cpu_tbr, cpu_tmp0); break; case 6: // pstate - save_state(dc, cpu_cond); - gen_helper_wrpstate(cpu_tmp0); - dc->npc = DYNAMIC_PC; + { + TCGv r_tmp = tcg_temp_local_new(); + + tcg_gen_mov_tl(r_tmp, cpu_tmp0); + save_state(dc, cpu_cond); + gen_helper_wrpstate(r_tmp); + tcg_temp_free(r_tmp); + dc->npc = DYNAMIC_PC; + } break; case 7: // tl - save_state(dc, cpu_cond); - tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0); - tcg_gen_st_i32(cpu_tmp32, cpu_env, - offsetof(CPUSPARCState, tl)); - dc->npc = DYNAMIC_PC; + { + TCGv r_tmp = tcg_temp_local_new(); + + tcg_gen_mov_tl(r_tmp, cpu_tmp0); + save_state(dc, cpu_cond); + tcg_gen_trunc_tl_i32(cpu_tmp32, r_tmp); + tcg_temp_free(r_tmp); + tcg_gen_st_i32(cpu_tmp32, cpu_env, + offsetof(CPUSPARCState, tl)); + dc->npc = DYNAMIC_PC; + } break; case 8: // pil gen_helper_wrpil(cpu_tmp0); @@ -4214,7 +4226,7 @@ static void disas_sparc_insn(DisasContext * dc) case 0x3b: /* flush */ if (!((dc)->def->features & CPU_FEATURE_FLUSH)) goto unimp_flush; - gen_helper_flush(cpu_dst); + /* nop */ break; case 0x3c: /* save */ save_state(dc, cpu_cond); diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 0028bfa562..8cb7d88eb6 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -102,7 +102,11 @@ enum { /* guest base is supported */ #define TCG_TARGET_HAS_GUEST_BASE +#ifdef __OpenBSD__ +#include <machine/sysarch.h> +#else #include <sys/cachectl.h> +#endif static inline void flush_icache_range(unsigned long start, unsigned long stop) { diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 207a89fd97..ebf5e1389d 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -1063,66 +1063,66 @@ static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) tcg_gen_op2i_i64(INDEX_op_movi_i64, ret, arg); } -static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset); } -static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset); } -static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset); } -static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset); } -static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset); } -static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_i64 arg2, +static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset); } -static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_i64 arg2, tcg_target_long offset) +static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset); } -static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_i64 arg2, +static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset); } -static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_i64 arg2, +static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset); } -static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_i64 arg2, +static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset); } -static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_i64 arg2, tcg_target_long offset) +static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset); } @@ -2304,8 +2304,8 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) #endif } -#define tcg_gen_ld_ptr tcg_gen_ld_i32 -#define tcg_gen_discard_ptr tcg_gen_discard_i32 +#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i32(TCGV_PTR_TO_NAT(R), (A), (O)) +#define tcg_gen_discard_ptr(A) tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A)) #else /* TCG_TARGET_REG_BITS == 32 */ @@ -2372,8 +2372,8 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_st64, arg, addr, mem_index); } -#define tcg_gen_ld_ptr tcg_gen_ld_i64 -#define tcg_gen_discard_ptr tcg_gen_discard_i64 +#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i64(TCGV_PTR_TO_NAT(R), (A), (O)) +#define tcg_gen_discard_ptr(A) tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A)) #endif /* TCG_TARGET_REG_BITS != 32 */ @@ -2523,11 +2523,17 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) #endif #if TCG_TARGET_REG_BITS == 32 -#define tcg_gen_add_ptr tcg_gen_add_i32 -#define tcg_gen_addi_ptr tcg_gen_addi_i32 -#define tcg_gen_ext_i32_ptr tcg_gen_mov_i32 +#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), \ + TCGV_PTR_TO_NAT(A), \ + TCGV_PTR_TO_NAT(B)) +#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), \ + TCGV_PTR_TO_NAT(A), (B)) +#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A)) #else /* TCG_TARGET_REG_BITS == 32 */ -#define tcg_gen_add_ptr tcg_gen_add_i64 -#define tcg_gen_addi_ptr tcg_gen_addi_i64 -#define tcg_gen_ext_i32_ptr tcg_gen_ext_i32_i64 +#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), \ + TCGV_PTR_TO_NAT(A), \ + TCGV_PTR_TO_NAT(B)) +#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), \ + TCGV_PTR_TO_NAT(A), (B)) +#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A)) #endif /* TCG_TARGET_REG_BITS != 32 */ @@ -585,7 +585,7 @@ void tcg_register_helper(void *func, const char *name) void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, int sizemask, TCGArg ret, int nargs, TCGArg *args) { -#ifdef TCG_TARGET_I386 +#if defined(TCG_TARGET_I386) && TCG_TARGET_REG_BITS < 64 int call_type; #endif int i; @@ -612,7 +612,7 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, *gen_opc_ptr++ = INDEX_op_call; nparam = gen_opparam_ptr++; -#ifdef TCG_TARGET_I386 +#if defined(TCG_TARGET_I386) && TCG_TARGET_REG_BITS < 64 call_type = (flags & TCG_CALL_TYPE_MASK); #endif if (ret != TCG_CALL_DUMMY_ARG) { @@ -1193,7 +1193,7 @@ static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps) } } -/* Liveness analysis : update the opc_dead_iargs array to tell if a +/* Liveness analysis : update the opc_dead_args array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ static void tcg_liveness_analysis(TCGContext *s) @@ -1203,13 +1203,13 @@ static void tcg_liveness_analysis(TCGContext *s) TCGArg *args; const TCGOpDef *def; uint8_t *dead_temps; - unsigned int dead_iargs; + unsigned int dead_args; gen_opc_ptr++; /* skip end */ nb_ops = gen_opc_ptr - gen_opc_buf; - s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t)); + s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t)); dead_temps = tcg_malloc(s->nb_temps); memset(dead_temps, 1, s->nb_temps); @@ -1245,8 +1245,12 @@ static void tcg_liveness_analysis(TCGContext *s) do_not_remove_call: /* output args are dead */ + dead_args = 0; for(i = 0; i < nb_oargs; i++) { arg = args[i]; + if (dead_temps[arg]) { + dead_args |= (1 << i); + } dead_temps[arg] = 1; } @@ -1256,17 +1260,16 @@ static void tcg_liveness_analysis(TCGContext *s) } /* input args are live */ - dead_iargs = 0; - for(i = 0; i < nb_iargs; i++) { - arg = args[i + nb_oargs]; + for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + arg = args[i]; if (arg != TCG_CALL_DUMMY_ARG) { if (dead_temps[arg]) { - dead_iargs |= (1 << i); + dead_args |= (1 << i); } dead_temps[arg] = 0; } } - s->op_dead_iargs[op_index] = dead_iargs; + s->op_dead_args[op_index] = dead_args; } args--; } @@ -1313,8 +1316,12 @@ static void tcg_liveness_analysis(TCGContext *s) do_not_remove: /* output args are dead */ + dead_args = 0; for(i = 0; i < nb_oargs; i++) { arg = args[i]; + if (dead_temps[arg]) { + dead_args |= (1 << i); + } dead_temps[arg] = 1; } @@ -1327,15 +1334,14 @@ static void tcg_liveness_analysis(TCGContext *s) } /* input args are live */ - dead_iargs = 0; - for(i = 0; i < nb_iargs; i++) { - arg = args[i + nb_oargs]; + for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + arg = args[i]; if (dead_temps[arg]) { - dead_iargs |= (1 << i); + dead_args |= (1 << i); } dead_temps[arg] = 0; } - s->op_dead_iargs[op_index] = dead_iargs; + s->op_dead_args[op_index] = dead_args; } break; } @@ -1352,8 +1358,8 @@ static void tcg_liveness_analysis(TCGContext *s) int nb_ops; nb_ops = gen_opc_ptr - gen_opc_buf; - s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t)); - memset(s->op_dead_iargs, 0, nb_ops * sizeof(uint16_t)); + s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t)); + memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t)); } #endif @@ -1557,7 +1563,7 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) save_globals(s, allocated_regs); } -#define IS_DEAD_IARG(n) ((dead_iargs >> (n)) & 1) +#define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1) static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args) { @@ -1582,7 +1588,7 @@ static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args) static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, const TCGArg *args, - unsigned int dead_iargs) + unsigned int dead_args) { TCGTemp *ts, *ots; int reg; @@ -1592,9 +1598,9 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, ts = &s->temps[args[1]]; arg_ct = &def->args_ct[0]; - /* XXX: always mark arg dead if IS_DEAD_IARG(0) */ + /* XXX: always mark arg dead if IS_DEAD_ARG(1) */ if (ts->val_type == TEMP_VAL_REG) { - if (IS_DEAD_IARG(0) && !ts->fixed_reg && !ots->fixed_reg) { + if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { /* the mov can be suppressed */ if (ots->val_type == TEMP_VAL_REG) s->reg_to_temp[ots->reg] = -1; @@ -1642,7 +1648,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, static void tcg_reg_alloc_op(TCGContext *s, const TCGOpDef *def, TCGOpcode opc, const TCGArg *args, - unsigned int dead_iargs) + unsigned int dead_args) { TCGRegSet allocated_regs; int i, k, nb_iargs, nb_oargs, reg; @@ -1701,8 +1707,9 @@ static void tcg_reg_alloc_op(TCGContext *s, /* if the input is aliased to an output and if it is not dead after the instruction, we must allocate a new register and move it */ - if (!IS_DEAD_IARG(i - nb_oargs)) + if (!IS_DEAD_ARG(i)) { goto allocate_in_reg; + } } } reg = ts->reg; @@ -1725,9 +1732,9 @@ static void tcg_reg_alloc_op(TCGContext *s, tcg_reg_alloc_bb_end(s, allocated_regs); } else { /* mark dead temporaries and free the associated registers */ - for(i = 0; i < nb_iargs; i++) { - arg = args[nb_oargs + i]; - if (IS_DEAD_IARG(i)) { + for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + arg = args[i]; + if (IS_DEAD_ARG(i)) { ts = &s->temps[arg]; if (!ts->fixed_reg) { if (ts->val_type == TEMP_VAL_REG) @@ -1775,12 +1782,16 @@ static void tcg_reg_alloc_op(TCGContext *s, if (!ts->fixed_reg) { if (ts->val_type == TEMP_VAL_REG) s->reg_to_temp[ts->reg] = -1; - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - /* temp value is modified, so the value kept in memory is - potentially not the same */ - ts->mem_coherent = 0; - s->reg_to_temp[reg] = arg; + if (IS_DEAD_ARG(i)) { + ts->val_type = TEMP_VAL_DEAD; + } else { + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + /* temp value is modified, so the value kept in memory is + potentially not the same */ + ts->mem_coherent = 0; + s->reg_to_temp[reg] = arg; + } } oarg_end: new_args[i] = reg; @@ -1808,7 +1819,7 @@ static void tcg_reg_alloc_op(TCGContext *s, static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, TCGOpcode opc, const TCGArg *args, - unsigned int dead_iargs) + unsigned int dead_args) { int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params; TCGArg arg, func_arg; @@ -1930,9 +1941,9 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, /* mark dead temporaries and free the associated registers */ - for(i = 0; i < nb_iargs; i++) { - arg = args[nb_oargs + i]; - if (IS_DEAD_IARG(i)) { + for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + arg = args[i]; + if (IS_DEAD_ARG(i)) { ts = &s->temps[arg]; if (!ts->fixed_reg) { if (ts->val_type == TEMP_VAL_REG) @@ -1974,10 +1985,14 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, } else { if (ts->val_type == TEMP_VAL_REG) s->reg_to_temp[ts->reg] = -1; - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - ts->mem_coherent = 0; - s->reg_to_temp[reg] = arg; + if (IS_DEAD_ARG(i)) { + ts->val_type = TEMP_VAL_DEAD; + } else { + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + ts->mem_coherent = 0; + s->reg_to_temp[reg] = arg; + } } } @@ -2007,7 +2022,7 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, TCGOpcode opc; int op_index; const TCGOpDef *def; - unsigned int dead_iargs; + unsigned int dead_args; const TCGArg *args; #ifdef DEBUG_DISAS @@ -2058,8 +2073,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, #if TCG_TARGET_REG_BITS == 64 case INDEX_op_mov_i64: #endif - dead_iargs = s->op_dead_iargs[op_index]; - tcg_reg_alloc_mov(s, def, args, dead_iargs); + dead_args = s->op_dead_args[op_index]; + tcg_reg_alloc_mov(s, def, args, dead_args); break; case INDEX_op_movi_i32: #if TCG_TARGET_REG_BITS == 64 @@ -2095,8 +2110,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, tcg_out_label(s, args[0], (long)s->code_ptr); break; case INDEX_op_call: - dead_iargs = s->op_dead_iargs[op_index]; - args += tcg_reg_alloc_call(s, def, opc, args, dead_iargs); + dead_args = s->op_dead_args[op_index]; + args += tcg_reg_alloc_call(s, def, opc, args, dead_args); goto next; case INDEX_op_end: goto the_end; @@ -2104,8 +2119,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ - dead_iargs = s->op_dead_iargs[op_index]; - tcg_reg_alloc_op(s, def, opc, args, dead_iargs); + dead_args = s->op_dead_args[op_index]; + tcg_reg_alloc_op(s, def, opc, args, dead_args); break; } args += def->nb_args; @@ -129,7 +129,7 @@ typedef tcg_target_ulong TCGArg; We use plain int by default to avoid this runtime overhead. Users of tcg_gen_* don't need to know about any of this, and should treat TCGv as an opaque type. - In additon we do typechecking for different types of variables. TCGv_i32 + In addition we do typechecking for different types of variables. TCGv_i32 and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr are aliases for target_ulong and host pointer sized values respectively. */ @@ -150,12 +150,19 @@ typedef struct int i64; } TCGv_i64; +typedef struct { + int iptr; +} TCGv_ptr; + #define MAKE_TCGV_I32(i) __extension__ \ ({ TCGv_i32 make_tcgv_tmp = {i}; make_tcgv_tmp;}) #define MAKE_TCGV_I64(i) __extension__ \ ({ TCGv_i64 make_tcgv_tmp = {i}; make_tcgv_tmp;}) +#define MAKE_TCGV_PTR(i) __extension__ \ + ({ TCGv_ptr make_tcgv_tmp = {i}; make_tcgv_tmp; }) #define GET_TCGV_I32(t) ((t).i32) #define GET_TCGV_I64(t) ((t).i64) +#define GET_TCGV_PTR(t) ((t).iptr) #if TCG_TARGET_REG_BITS == 32 #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) @@ -165,10 +172,17 @@ typedef struct typedef int TCGv_i32; typedef int TCGv_i64; +#if TCG_TARGET_REG_BITS == 32 +#define TCGv_ptr TCGv_i32 +#else +#define TCGv_ptr TCGv_i64 +#endif #define MAKE_TCGV_I32(x) (x) #define MAKE_TCGV_I64(x) (x) +#define MAKE_TCGV_PTR(x) (x) #define GET_TCGV_I32(t) (t) #define GET_TCGV_I64(t) (t) +#define GET_TCGV_PTR(t) (t) #if TCG_TARGET_REG_BITS == 32 #define TCGV_LOW(t) (t) @@ -252,9 +266,9 @@ typedef struct TCGTemp { unsigned int fixed_reg:1; unsigned int mem_coherent:1; unsigned int mem_allocated:1; - unsigned int temp_local:1; /* If true, the temp is saved accross + unsigned int temp_local:1; /* If true, the temp is saved across basic blocks. Otherwise, it is not - preserved accross basic blocks. */ + preserved across basic blocks. */ unsigned int temp_allocated:1; /* never used for code gen */ /* index of next free temp of same base type, -1 if end */ int next_free_temp; @@ -286,8 +300,8 @@ struct TCGContext { uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */ /* liveness analysis */ - uint16_t *op_dead_iargs; /* for each operation, each bit tells if the - corresponding input argument is dead */ + uint16_t *op_dead_args; /* for each operation, each bit tells if the + corresponding argument is dead */ /* tells in which temporary a given register is. It does not take into account fixed registers */ @@ -459,25 +473,27 @@ do {\ void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs); #if TCG_TARGET_REG_BITS == 32 -#define tcg_const_ptr tcg_const_i32 -#define tcg_add_ptr tcg_add_i32 -#define tcg_sub_ptr tcg_sub_i32 -#define TCGv_ptr TCGv_i32 -#define GET_TCGV_PTR GET_TCGV_I32 -#define tcg_global_reg_new_ptr tcg_global_reg_new_i32 -#define tcg_global_mem_new_ptr tcg_global_mem_new_i32 -#define tcg_temp_new_ptr tcg_temp_new_i32 -#define tcg_temp_free_ptr tcg_temp_free_i32 +#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n)) +#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n)) + +#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32(V)) +#define tcg_global_reg_new_ptr(R, N) \ + TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N))) +#define tcg_global_mem_new_ptr(R, O, N) \ + TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N))) +#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32()) +#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T)) #else -#define tcg_const_ptr tcg_const_i64 -#define tcg_add_ptr tcg_add_i64 -#define tcg_sub_ptr tcg_sub_i64 -#define TCGv_ptr TCGv_i64 -#define GET_TCGV_PTR GET_TCGV_I64 -#define tcg_global_reg_new_ptr tcg_global_reg_new_i64 -#define tcg_global_mem_new_ptr tcg_global_mem_new_i64 -#define tcg_temp_new_ptr tcg_temp_new_i64 -#define tcg_temp_free_ptr tcg_temp_free_i64 +#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n)) +#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n)) + +#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64(V)) +#define tcg_global_reg_new_ptr(R, N) \ + TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N))) +#define tcg_global_mem_new_ptr(R, O, N) \ + TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N))) +#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64()) +#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T)) #endif void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, diff --git a/tests/test-i386.c b/tests/test-i386.c index 8f481c7f7a..56ff1103fa 100644 --- a/tests/test-i386.c +++ b/tests/test-i386.c @@ -2281,7 +2281,7 @@ void test_sse_comi(double a1, double b1) } /* Force %xmm0 usage to avoid the case where both register index are 0 - to test intruction decoding more extensively */ + to test instruction decoding more extensively */ #define CVT_OP_XMM2MMX(op)\ {\ asm volatile (#op " %1, %0" : "=y" (r.q[0]) : "x" (a.dq) \ diff --git a/tests/test-mmap.c b/tests/test-mmap.c index c578e2572a..c67174a260 100644 --- a/tests/test-mmap.c +++ b/tests/test-mmap.c @@ -322,7 +322,7 @@ void check_file_unfixed_eof_mmaps(void) fail_unless (p1[(test_fsize & pagemask) / sizeof *p1 - 1] == ((test_fsize - sizeof *p1) / sizeof *p1)); - /* Verify that the end of page is accessable and zeroed. */ + /* Verify that the end of page is accessible and zeroed. */ cp = (void *) p1; fail_unless (cp[pagesize - 4] == 0); munmap (p1, pagesize); @@ -365,7 +365,7 @@ void check_file_fixed_eof_mmaps(void) fail_unless (p1[(test_fsize & pagemask) / sizeof *p1 - 1] == ((test_fsize - sizeof *p1) / sizeof *p1)); - /* Verify that the end of page is accessable and zeroed. */ + /* Verify that the end of page is accessible and zeroed. */ cp = (void *)p1; fail_unless (cp[pagesize - 4] == 0); munmap (p1, pagesize); diff --git a/trace-events b/trace-events index 4f965e2ebd..e0e9574d21 100644 --- a/trace-events +++ b/trace-events @@ -205,6 +205,15 @@ disable usb_set_config(int addr, int config, int ret) "dev %d, config %d, ret %d disable usb_clear_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" disable usb_set_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" +# hw/scsi-bus.c +disable scsi_req_alloc(int target, int lun, int tag) "target %d lun %d tag %d" +disable scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" +disable scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d" +disable scsi_req_continue(int target, int lun, int tag) "target %d lun %d tag %d" +disable scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "target %d lun %d tag %d command %d dir %d length %d" +disable scsi_req_parsed_lba(int target, int lun, int tag, int cmd, uint64_t lba) "target %d lun %d tag %d command %d lba %"PRIu64"" +disable scsi_req_parse_bad(int target, int lun, int tag, int cmd) "target %d lun %d tag %d command %d" + # vl.c disable vm_state_notify(int running, int reason) "running %d reason %d" @@ -220,6 +229,9 @@ disable qed_write_table(void *s, uint64_t offset, void *table, unsigned int inde disable qed_write_table_cb(void *s, void *table, int flush, int ret) "s %p table %p flush %d ret %d" # block/qed.c +disable qed_need_check_timer_cb(void *s) "s %p" +disable qed_start_need_check_timer(void *s) "s %p" +disable qed_cancel_need_check_timer(void *s) "s %p" disable qed_aio_complete(void *s, void *acb, int ret) "s %p acb %p ret %d" disable qed_aio_setup(void *s, void *acb, int64_t sector_num, int nb_sectors, void *opaque, int is_write) "s %p acb %p sector_num %"PRId64" nb_sectors %d opaque %p is_write %d" disable qed_aio_next_io(void *s, void *acb, int ret, uint64_t cur_pos) "s %p acb %p ret %d cur_pos %"PRIu64"" @@ -361,3 +373,16 @@ disable milkymist_uart_pulse_irq_tx(void) "Pulse IRQ TX" # hw/milkymist-vgafb.c disable milkymist_vgafb_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x" disable milkymist_vgafb_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x" + +# xen-all.c +disable xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" + +# xen-mapcache.c +disable qemu_map_cache(uint64_t phys_addr) "want %#"PRIx64"" +disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64"" +disable qemu_map_cache_return(void* ptr) "%p" +disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64"" +disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx" + +# exec.c +disable qemu_put_ram_ptr(void* addr) "%p" diff --git a/ui/curses.c b/ui/curses.c index 82bc614040..d29b6cf874 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -24,7 +24,6 @@ #include <curses.h> #ifndef _WIN32 -#include <signal.h> #include <sys/ioctl.h> #include <termios.h> #endif diff --git a/ui/qemu-spice.h b/ui/qemu-spice.h index 916e5dce21..3c6f1fe419 100644 --- a/ui/qemu-spice.h +++ b/ui/qemu-spice.h @@ -47,8 +47,16 @@ CharDriverState *qemu_chr_open_spice(QemuOpts *opts); #else /* CONFIG_SPICE */ #define using_spice 0 -#define qemu_spice_set_passwd(_p, _f1, _f2) (-1) -#define qemu_spice_set_pw_expire(_e) (-1) +static inline int qemu_spice_set_passwd(const char *passwd, + bool fail_if_connected, + bool disconnect_if_connected) +{ + return -1; +} +static inline int qemu_spice_set_pw_expire(time_t expires) +{ + return -1; +} static inline int qemu_spice_migrate_info(const char *h, int p, int t, const char *s) { return -1; } @@ -28,10 +28,6 @@ #include <SDL.h> #include <SDL_syswm.h> -#ifndef _WIN32 -#include <signal.h> -#endif - #include "qemu-common.h" #include "console.h" #include "sysemu.h" @@ -831,6 +827,18 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame) if (!full_screen) { setenv("SDL_VIDEO_ALLOW_SCREENSAVER", "1", 0); } +#ifdef __linux__ + /* on Linux, SDL may use fbcon|directfb|svgalib when run without + * accessible $DISPLAY to open X11 window. This is often the case + * when qemu is run using sudo. But in this case, and when actually + * run in X11 environment, SDL fights with X11 for the video card, + * making current display unavailable, often until reboot. + * So make x11 the default SDL video driver if this variable is unset. + * This is a bit hackish but saves us from bigger problem. + * Maybe it's a good idea to fix this in SDL instead. + */ + setenv("SDL_VIDEODRIVER", "x11", 0); +#endif /* Enable normal up/down events for Caps-Lock and Num-Lock keys. * This requires SDL >= 1.2.14. */ diff --git a/ui/spice-core.c b/ui/spice-core.c index ef56ed61a9..dd9905be36 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -299,8 +299,6 @@ static int parse_name(const char *string, const char *optname, exit(1); } -#if SPICE_SERVER_VERSION >= 0x000600 /* 0.6.0 */ - static const char *stream_video_names[] = { [ SPICE_STREAM_VIDEO_OFF ] = "off", [ SPICE_STREAM_VIDEO_ALL ] = "all", @@ -309,8 +307,6 @@ static const char *stream_video_names[] = { #define parse_stream_video(_name) \ name2enum(_name, stream_video_names, ARRAY_SIZE(stream_video_names)) -#endif /* >= 0.6.0 */ - static const char *compression_names[] = { [ SPICE_IMAGE_COMPRESS_OFF ] = "off", [ SPICE_IMAGE_COMPRESS_AUTO_GLZ ] = "auto_glz", @@ -549,11 +545,29 @@ void qemu_spice_init(void) if (password) { spice_server_set_ticket(spice_server, password, 0, 0, 0); } + if (qemu_opt_get_bool(opts, "sasl", 0)) { +#if SPICE_SERVER_VERSION >= 0x000900 /* 0.9.0 */ + if (spice_server_set_sasl_appname(spice_server, "qemu") == -1 || + spice_server_set_sasl(spice_server, 1) == -1) { + fprintf(stderr, "spice: failed to enable sasl\n"); + exit(1); + } +#else + fprintf(stderr, "spice: sasl is not available (spice >= 0.9 required)\n"); + exit(1); +#endif + } if (qemu_opt_get_bool(opts, "disable-ticketing", 0)) { auth = "none"; spice_server_set_noauth(spice_server); } +#if SPICE_SERVER_VERSION >= 0x000801 + if (qemu_opt_get_bool(opts, "disable-copy-paste", 0)) { + spice_server_set_agent_copypaste(spice_server, false); + } +#endif + compression = SPICE_IMAGE_COMPRESS_AUTO_GLZ; str = qemu_opt_get(opts, "image-compression"); if (str) { @@ -575,8 +589,6 @@ void qemu_spice_init(void) } spice_server_set_zlib_glz_compression(spice_server, wan_compr); -#if SPICE_SERVER_VERSION >= 0x000600 /* 0.6.0 */ - str = qemu_opt_get(opts, "streaming-video"); if (str) { int streaming_video = parse_stream_video(str); @@ -588,8 +600,6 @@ void qemu_spice_init(void) spice_server_set_playback_compression (spice_server, qemu_opt_get_bool(opts, "playback-compression", 1)); -#endif /* >= 0.6.0 */ - qemu_opt_foreach(opts, add_channel, NULL, 0); spice_server_init(spice_server, &core_interface); diff --git a/ui/spice-display.c b/ui/spice-display.c index 020b423bd6..15f0704eaf 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -62,14 +62,7 @@ void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r) dest->right = MAX(dest->right, r->right); } -/* - * Called from spice server thread context (via interface_get_command). - * - * We must aquire the global qemu mutex here to make sure the - * DisplayState (+DisplaySurface) we are accessing doesn't change - * underneath us. - */ -SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd) +static SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd) { SimpleSpiceUpdate *update; QXLDrawable *drawable; @@ -78,9 +71,7 @@ SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd) uint8_t *src, *dst; int by, bw, bh; - qemu_mutex_lock_iothread(); if (qemu_spice_rect_is_empty(&ssd->dirty)) { - qemu_mutex_unlock_iothread(); return NULL; }; @@ -141,7 +132,6 @@ SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd) cmd->data = (intptr_t)drawable; memset(&ssd->dirty, 0, sizeof(ssd->dirty)); - qemu_mutex_unlock_iothread(); return update; } @@ -186,18 +176,14 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd) surface.mem = (intptr_t)ssd->buf; surface.group_id = MEMSLOT_GROUP_HOST; - qemu_mutex_unlock_iothread(); ssd->worker->create_primary_surface(ssd->worker, 0, &surface); - qemu_mutex_lock_iothread(); } void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd) { dprint(1, "%s:\n", __FUNCTION__); - qemu_mutex_unlock_iothread(); ssd->worker->destroy_primary_surface(ssd->worker, 0); - qemu_mutex_lock_iothread(); } void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason) @@ -207,9 +193,7 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason) if (running) { ssd->worker->start(ssd->worker); } else { - qemu_mutex_unlock_iothread(); ssd->worker->stop(ssd->worker); - qemu_mutex_lock_iothread(); } ssd->running = running; } @@ -241,6 +225,12 @@ void qemu_spice_display_resize(SimpleSpiceDisplay *ssd) qemu_pf_conv_put(ssd->conv); ssd->conv = NULL; + qemu_mutex_lock(&ssd->lock); + if (ssd->update != NULL) { + qemu_spice_destroy_update(ssd, ssd->update); + ssd->update = NULL; + } + qemu_mutex_unlock(&ssd->lock); qemu_spice_destroy_host_primary(ssd); qemu_spice_create_host_primary(ssd); @@ -252,6 +242,24 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd) { dprint(3, "%s:\n", __FUNCTION__); vga_hw_update(); + + qemu_mutex_lock(&ssd->lock); + if (ssd->update == NULL) { + ssd->update = qemu_spice_create_update(ssd); + ssd->notify++; + } + if (ssd->cursor) { + ssd->ds->cursor_define(ssd->cursor); + cursor_put(ssd->cursor); + ssd->cursor = NULL; + } + if (ssd->mouse_x != -1 && ssd->mouse_y != -1) { + ssd->ds->mouse_set(ssd->mouse_x, ssd->mouse_y, 1); + ssd->mouse_x = -1; + ssd->mouse_y = -1; + } + qemu_mutex_unlock(&ssd->lock); + if (ssd->notify) { ssd->notify = 0; ssd->worker->wakeup(ssd->worker); @@ -298,14 +306,20 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceUpdate *update; + int ret = false; dprint(3, "%s:\n", __FUNCTION__); - update = qemu_spice_create_update(ssd); - if (update == NULL) { - return false; + + qemu_mutex_lock(&ssd->lock); + if (ssd->update != NULL) { + update = ssd->update; + ssd->update = NULL; + *ext = update->ext; + ret = true; } - *ext = update->ext; - return true; + qemu_mutex_unlock(&ssd->lock); + + return ret; } static int interface_req_cmd_notification(QXLInstance *sin) @@ -398,6 +412,9 @@ void qemu_spice_display_init(DisplayState *ds) { assert(sdpy.ds == NULL); sdpy.ds = ds; + qemu_mutex_init(&sdpy.lock); + sdpy.mouse_x = -1; + sdpy.mouse_y = -1; sdpy.bufsize = (16 * 1024 * 1024); sdpy.buf = qemu_malloc(sdpy.bufsize); register_displaychangelistener(ds, &display_listener); diff --git a/ui/spice-display.h b/ui/spice-display.h index aef0464f86..2f95f68aad 100644 --- a/ui/spice-display.h +++ b/ui/spice-display.h @@ -19,6 +19,8 @@ #include <spice/enums.h> #include <spice/qxl_dev.h> +#include "qemu-thread.h" +#include "console.h" #include "pflib.h" #define NUM_MEMSLOTS 8 @@ -31,7 +33,10 @@ #define NUM_SURFACES 1024 -typedef struct SimpleSpiceDisplay { +typedef struct SimpleSpiceDisplay SimpleSpiceDisplay; +typedef struct SimpleSpiceUpdate SimpleSpiceUpdate; + +struct SimpleSpiceDisplay { DisplayState *ds; void *buf; int bufsize; @@ -43,19 +48,28 @@ typedef struct SimpleSpiceDisplay { QXLRect dirty; int notify; int running; -} SimpleSpiceDisplay; -typedef struct SimpleSpiceUpdate { + /* + * All struct members below this comment can be accessed from + * both spice server and qemu (iothread) context and any access + * to them must be protected by the lock. + */ + QemuMutex lock; + SimpleSpiceUpdate *update; + QEMUCursor *cursor; + int mouse_x, mouse_y; +}; + +struct SimpleSpiceUpdate { QXLDrawable drawable; QXLImage image; QXLCommandExt ext; uint8_t *bitmap; -} SimpleSpiceUpdate; +}; int qemu_spice_rect_is_empty(const QXLRect* r); void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r); -SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *sdpy); void qemu_spice_destroy_update(SimpleSpiceDisplay *sdpy, SimpleSpiceUpdate *update); void qemu_spice_create_host_memslot(SimpleSpiceDisplay *ssd); void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd); @@ -39,7 +39,6 @@ #else #include <bus/usb/usb.h> #endif -#include <signal.h> /* This value has maximum potential at 16. * You should also set hw.usb.debug to gain @@ -126,6 +125,7 @@ static void usb_host_handle_reset(USBDevice *dev) * and return appropriate response */ static int usb_host_handle_control(USBDevice *dev, + USBPacket *p, int request, int value, int index, diff --git a/usb-linux.c b/usb-linux.c index 1f33c2c230..fcfa09e4b8 100644 --- a/usb-linux.c +++ b/usb-linux.c @@ -37,7 +37,6 @@ #include <dirent.h> #include <sys/ioctl.h> -#include <signal.h> #include <linux/usbdevice_fs.h> #include <linux/version.h> @@ -54,15 +53,7 @@ struct usb_ctrltransfer { void *data; }; -struct usb_ctrlrequest { - uint8_t bRequestType; - uint8_t bRequest; - uint16_t wValue; - uint16_t wIndex; - uint16_t wLength; -}; - -typedef int USBScanFunc(void *opaque, int bus_num, int addr, int devpath, +typedef int USBScanFunc(void *opaque, int bus_num, int addr, char *port, int class_id, int vendor_id, int product_id, const char *product_name, int speed); @@ -78,7 +69,8 @@ typedef int USBScanFunc(void *opaque, int bus_num, int addr, int devpath, #define USBPROCBUS_PATH "/proc/bus/usb" #define PRODUCT_NAME_SZ 32 -#define MAX_ENDPOINTS 16 +#define MAX_ENDPOINTS 15 +#define MAX_PORTLEN 16 #define USBDEVBUS_PATH "/dev/bus/usb" #define USBSYSBUS_PATH "/sys/bus/usb" @@ -92,34 +84,29 @@ static char *usb_host_device_path; static int usb_fs_type; /* endpoint association data */ +#define ISO_FRAME_DESC_PER_URB 32 +#define ISO_URB_COUNT 3 +#define INVALID_EP_TYPE 255 + +/* devio.c limits single requests to 16k */ +#define MAX_USBFS_BUFFER_SIZE 16384 + +typedef struct AsyncURB AsyncURB; + struct endp_data { uint8_t type; uint8_t halted; -}; - -enum { - CTRL_STATE_IDLE = 0, - CTRL_STATE_SETUP, - CTRL_STATE_DATA, - CTRL_STATE_ACK -}; - -/* - * Control transfer state. - * Note that 'buffer' _must_ follow 'req' field because - * we need contiguous buffer when we submit control URB. - */ -struct ctrl_struct { - uint16_t len; - uint16_t offset; - uint8_t state; - struct usb_ctrlrequest req; - uint8_t buffer[8192]; + uint8_t iso_started; + AsyncURB *iso_urb; + int iso_urb_idx; + int iso_buffer_used; + int max_packet_size; }; struct USBAutoFilter { uint32_t bus_num; uint32_t addr; + char *port; uint32_t vendor_id; uint32_t product_id; }; @@ -135,13 +122,13 @@ typedef struct USBHostDevice { int closing; Notifier exit; - struct ctrl_struct ctrl; struct endp_data endp_table[MAX_ENDPOINTS]; + QLIST_HEAD(, AsyncURB) aurbs; /* Host side address */ int bus_num; int addr; - int devpath; + char port[MAX_PORTLEN]; struct USBAutoFilter match; QTAILQ_ENTRY(USBHostDevice) next; @@ -160,6 +147,11 @@ static int is_isoc(USBHostDevice *s, int ep) return s->endp_table[ep - 1].type == USBDEVFS_URB_TYPE_ISO; } +static int is_valid(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].type != INVALID_EP_TYPE; +} + static int is_halted(USBHostDevice *s, int ep) { return s->endp_table[ep - 1].halted; @@ -175,50 +167,106 @@ static void set_halt(USBHostDevice *s, int ep) s->endp_table[ep - 1].halted = 1; } +static int is_iso_started(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].iso_started; +} + +static void clear_iso_started(USBHostDevice *s, int ep) +{ + s->endp_table[ep - 1].iso_started = 0; +} + +static void set_iso_started(USBHostDevice *s, int ep) +{ + s->endp_table[ep - 1].iso_started = 1; +} + +static void set_iso_urb(USBHostDevice *s, int ep, AsyncURB *iso_urb) +{ + s->endp_table[ep - 1].iso_urb = iso_urb; +} + +static AsyncURB *get_iso_urb(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].iso_urb; +} + +static void set_iso_urb_idx(USBHostDevice *s, int ep, int i) +{ + s->endp_table[ep - 1].iso_urb_idx = i; +} + +static int get_iso_urb_idx(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].iso_urb_idx; +} + +static void set_iso_buffer_used(USBHostDevice *s, int ep, int i) +{ + s->endp_table[ep - 1].iso_buffer_used = i; +} + +static int get_iso_buffer_used(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].iso_buffer_used; +} + +static void set_max_packet_size(USBHostDevice *s, int ep, uint8_t *descriptor) +{ + int raw = descriptor[4] + (descriptor[5] << 8); + int size, microframes; + + size = raw & 0x7ff; + switch ((raw >> 11) & 3) { + case 1: microframes = 2; break; + case 2: microframes = 3; break; + default: microframes = 1; break; + } + DPRINTF("husb: max packet size: 0x%x -> %d x %d\n", + raw, microframes, size); + s->endp_table[ep - 1].max_packet_size = size * microframes; +} + +static int get_max_packet_size(USBHostDevice *s, int ep) +{ + return s->endp_table[ep - 1].max_packet_size; +} + /* * Async URB state. - * We always allocate one isoc descriptor even for bulk transfers + * We always allocate iso packet descriptors even for bulk transfers * to simplify allocation and casts. */ -typedef struct AsyncURB +struct AsyncURB { struct usbdevfs_urb urb; - struct usbdevfs_iso_packet_desc isocpd; + struct usbdevfs_iso_packet_desc isocpd[ISO_FRAME_DESC_PER_URB]; + USBHostDevice *hdev; + QLIST_ENTRY(AsyncURB) next; + /* For regular async urbs */ USBPacket *packet; - USBHostDevice *hdev; -} AsyncURB; + int more; /* large transfer, more urbs follow */ + + /* For buffered iso handling */ + int iso_frame_idx; /* -1 means in flight */ +}; -static AsyncURB *async_alloc(void) +static AsyncURB *async_alloc(USBHostDevice *s) { - return (AsyncURB *) qemu_mallocz(sizeof(AsyncURB)); + AsyncURB *aurb = qemu_mallocz(sizeof(AsyncURB)); + aurb->hdev = s; + QLIST_INSERT_HEAD(&s->aurbs, aurb, next); + return aurb; } static void async_free(AsyncURB *aurb) { + QLIST_REMOVE(aurb, next); qemu_free(aurb); } -static void async_complete_ctrl(USBHostDevice *s, USBPacket *p) -{ - switch(s->ctrl.state) { - case CTRL_STATE_SETUP: - if (p->len < s->ctrl.len) - s->ctrl.len = p->len; - s->ctrl.state = CTRL_STATE_DATA; - p->len = 8; - break; - - case CTRL_STATE_ACK: - s->ctrl.state = CTRL_STATE_IDLE; - p->len = 0; - break; - - default: - break; - } -} - static void async_complete(void *opaque) { USBHostDevice *s = opaque; @@ -244,18 +292,25 @@ static void async_complete(void *opaque) return; } - p = aurb->packet; - DPRINTF("husb: async completed. aurb %p status %d alen %d\n", aurb, aurb->urb.status, aurb->urb.actual_length); + /* If this is a buffered iso urb mark it as complete and don't do + anything else (it is handled further in usb_host_handle_iso_data) */ + if (aurb->iso_frame_idx == -1) { + if (aurb->urb.status == -EPIPE) { + set_halt(s, aurb->urb.endpoint & 0xf); + } + aurb->iso_frame_idx = 0; + continue; + } + + p = aurb->packet; + if (p) { switch (aurb->urb.status) { case 0: - p->len = aurb->urb.actual_length; - if (aurb->urb.type == USBDEVFS_URB_TYPE_CONTROL) { - async_complete_ctrl(s, p); - } + p->len += aurb->urb.actual_length; break; case -EPIPE: @@ -268,26 +323,36 @@ static void async_complete(void *opaque) break; } - usb_packet_complete(p); + if (aurb->urb.type == USBDEVFS_URB_TYPE_CONTROL) { + usb_generic_async_ctrl_complete(&s->dev, p); + } else if (!aurb->more) { + usb_packet_complete(&s->dev, p); + } } async_free(aurb); } } -static void async_cancel(USBPacket *unused, void *opaque) +static void usb_host_async_cancel(USBDevice *dev, USBPacket *p) { - AsyncURB *aurb = opaque; - USBHostDevice *s = aurb->hdev; + USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev); + AsyncURB *aurb; + + QLIST_FOREACH(aurb, &s->aurbs, next) { + if (p != aurb->packet) { + continue; + } - DPRINTF("husb: async cancel. aurb %p\n", aurb); + DPRINTF("husb: async cancel: packet %p, aurb %p\n", p, aurb); - /* Mark it as dead (see async_complete above) */ - aurb->packet = NULL; + /* Mark it as dead (see async_complete above) */ + aurb->packet = NULL; - int r = ioctl(s->fd, USBDEVFS_DISCARDURB, aurb); - if (r < 0) { - DPRINTF("husb: async. discard urb failed errno %d\n", errno); + int r = ioctl(s->fd, USBDEVFS_DISCARDURB, aurb); + if (r < 0) { + DPRINTF("husb: async. discard urb failed errno %d\n", errno); + } } } @@ -415,69 +480,261 @@ static void usb_host_handle_destroy(USBDevice *dev) static int usb_linux_update_endp_table(USBHostDevice *s); -static int usb_host_handle_data(USBHostDevice *s, USBPacket *p) +/* iso data is special, we need to keep enough urbs in flight to make sure + that the controller never runs out of them, otherwise the device will + likely suffer a buffer underrun / overrun. */ +static AsyncURB *usb_host_alloc_iso(USBHostDevice *s, uint8_t ep, int in) { - struct usbdevfs_urb *urb; AsyncURB *aurb; - int ret; + int i, j, len = get_max_packet_size(s, ep); + + aurb = qemu_mallocz(ISO_URB_COUNT * sizeof(*aurb)); + for (i = 0; i < ISO_URB_COUNT; i++) { + aurb[i].urb.endpoint = ep; + aurb[i].urb.buffer_length = ISO_FRAME_DESC_PER_URB * len; + aurb[i].urb.buffer = qemu_malloc(aurb[i].urb.buffer_length); + aurb[i].urb.type = USBDEVFS_URB_TYPE_ISO; + aurb[i].urb.flags = USBDEVFS_URB_ISO_ASAP; + aurb[i].urb.number_of_packets = ISO_FRAME_DESC_PER_URB; + for (j = 0 ; j < ISO_FRAME_DESC_PER_URB; j++) + aurb[i].urb.iso_frame_desc[j].length = len; + if (in) { + aurb[i].urb.endpoint |= 0x80; + /* Mark as fully consumed (idle) */ + aurb[i].iso_frame_idx = ISO_FRAME_DESC_PER_URB; + } + } + set_iso_urb(s, ep, aurb); - aurb = async_alloc(); - aurb->hdev = s; - aurb->packet = p; + return aurb; +} - urb = &aurb->urb; +static void usb_host_stop_n_free_iso(USBHostDevice *s, uint8_t ep) +{ + AsyncURB *aurb; + int i, ret, killed = 0, free = 1; + + aurb = get_iso_urb(s, ep); + if (!aurb) { + return; + } + + for (i = 0; i < ISO_URB_COUNT; i++) { + /* in flight? */ + if (aurb[i].iso_frame_idx == -1) { + ret = ioctl(s->fd, USBDEVFS_DISCARDURB, &aurb[i]); + if (ret < 0) { + printf("husb: discard isoc in urb failed errno %d\n", errno); + free = 0; + continue; + } + killed++; + } + } + + /* Make sure any urbs we've killed are reaped before we free them */ + if (killed) { + async_complete(s); + } + + for (i = 0; i < ISO_URB_COUNT; i++) { + qemu_free(aurb[i].urb.buffer); + } + + if (free) + qemu_free(aurb); + else + printf("husb: leaking iso urbs because of discard failure\n"); + set_iso_urb(s, ep, NULL); + set_iso_urb_idx(s, ep, 0); + clear_iso_started(s, ep); +} + +static int urb_status_to_usb_ret(int status) +{ + switch (status) { + case -EPIPE: + return USB_RET_STALL; + default: + return USB_RET_NAK; + } +} + +static int usb_host_handle_iso_data(USBHostDevice *s, USBPacket *p, int in) +{ + AsyncURB *aurb; + int i, j, ret, max_packet_size, offset, len = 0; + + max_packet_size = get_max_packet_size(s, p->devep); + if (max_packet_size == 0) + return USB_RET_NAK; + + aurb = get_iso_urb(s, p->devep); + if (!aurb) { + aurb = usb_host_alloc_iso(s, p->devep, in); + } + + i = get_iso_urb_idx(s, p->devep); + j = aurb[i].iso_frame_idx; + if (j >= 0 && j < ISO_FRAME_DESC_PER_URB) { + if (in) { + /* Check urb status */ + if (aurb[i].urb.status) { + len = urb_status_to_usb_ret(aurb[i].urb.status); + /* Move to the next urb */ + aurb[i].iso_frame_idx = ISO_FRAME_DESC_PER_URB - 1; + /* Check frame status */ + } else if (aurb[i].urb.iso_frame_desc[j].status) { + len = urb_status_to_usb_ret( + aurb[i].urb.iso_frame_desc[j].status); + /* Check the frame fits */ + } else if (aurb[i].urb.iso_frame_desc[j].actual_length > p->len) { + printf("husb: received iso data is larger then packet\n"); + len = USB_RET_NAK; + /* All good copy data over */ + } else { + len = aurb[i].urb.iso_frame_desc[j].actual_length; + memcpy(p->data, + aurb[i].urb.buffer + + j * aurb[i].urb.iso_frame_desc[0].length, + len); + } + } else { + len = p->len; + offset = (j == 0) ? 0 : get_iso_buffer_used(s, p->devep); + + /* Check the frame fits */ + if (len > max_packet_size) { + printf("husb: send iso data is larger then max packet size\n"); + return USB_RET_NAK; + } + + /* All good copy data over */ + memcpy(aurb[i].urb.buffer + offset, p->data, len); + aurb[i].urb.iso_frame_desc[j].length = len; + offset += len; + set_iso_buffer_used(s, p->devep, offset); + + /* Start the stream once we have buffered enough data */ + if (!is_iso_started(s, p->devep) && i == 1 && j == 8) { + set_iso_started(s, p->devep); + } + } + aurb[i].iso_frame_idx++; + if (aurb[i].iso_frame_idx == ISO_FRAME_DESC_PER_URB) { + i = (i + 1) % ISO_URB_COUNT; + set_iso_urb_idx(s, p->devep, i); + } + } else { + if (in) { + set_iso_started(s, p->devep); + } else { + DPRINTF("hubs: iso out error no free buffer, dropping packet\n"); + } + } + + if (is_iso_started(s, p->devep)) { + /* (Re)-submit all fully consumed / filled urbs */ + for (i = 0; i < ISO_URB_COUNT; i++) { + if (aurb[i].iso_frame_idx == ISO_FRAME_DESC_PER_URB) { + ret = ioctl(s->fd, USBDEVFS_SUBMITURB, &aurb[i]); + if (ret < 0) { + printf("husb error submitting iso urb %d: %d\n", i, errno); + if (!in || len == 0) { + switch(errno) { + case ETIMEDOUT: + len = USB_RET_NAK; + break; + case EPIPE: + default: + len = USB_RET_STALL; + } + } + break; + } + aurb[i].iso_frame_idx = -1; + } + } + } + + return len; +} + +static int usb_host_handle_data(USBDevice *dev, USBPacket *p) +{ + USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev); + struct usbdevfs_urb *urb; + AsyncURB *aurb; + int ret, rem; + uint8_t *pbuf; + uint8_t ep; + + if (!is_valid(s, p->devep)) { + return USB_RET_NAK; + } if (p->pid == USB_TOKEN_IN) { - urb->endpoint = p->devep | 0x80; + ep = p->devep | 0x80; } else { - urb->endpoint = p->devep; + ep = p->devep; } if (is_halted(s, p->devep)) { - ret = ioctl(s->fd, USBDEVFS_CLEAR_HALT, &urb->endpoint); + ret = ioctl(s->fd, USBDEVFS_CLEAR_HALT, &ep); if (ret < 0) { DPRINTF("husb: failed to clear halt. ep 0x%x errno %d\n", - urb->endpoint, errno); + ep, errno); return USB_RET_NAK; } clear_halt(s, p->devep); } - urb->buffer = p->data; - urb->buffer_length = p->len; - if (is_isoc(s, p->devep)) { - /* Setup ISOC transfer */ - urb->type = USBDEVFS_URB_TYPE_ISO; - urb->flags = USBDEVFS_URB_ISO_ASAP; - urb->number_of_packets = 1; - urb->iso_frame_desc[0].length = p->len; - } else { - /* Setup bulk transfer */ - urb->type = USBDEVFS_URB_TYPE_BULK; + return usb_host_handle_iso_data(s, p, p->pid == USB_TOKEN_IN); } - urb->usercontext = s; + rem = p->len; + pbuf = p->data; + p->len = 0; + while (rem) { + aurb = async_alloc(s); + aurb->packet = p; - ret = ioctl(s->fd, USBDEVFS_SUBMITURB, urb); + urb = &aurb->urb; + urb->endpoint = ep; + urb->type = USBDEVFS_URB_TYPE_BULK; + urb->usercontext = s; + urb->buffer = pbuf; - DPRINTF("husb: data submit. ep 0x%x len %u aurb %p\n", - urb->endpoint, p->len, aurb); + if (rem > MAX_USBFS_BUFFER_SIZE) { + urb->buffer_length = MAX_USBFS_BUFFER_SIZE; + aurb->more = 1; + } else { + urb->buffer_length = rem; + aurb->more = 0; + } + pbuf += urb->buffer_length; + rem -= urb->buffer_length; - if (ret < 0) { - DPRINTF("husb: submit failed. errno %d\n", errno); - async_free(aurb); + ret = ioctl(s->fd, USBDEVFS_SUBMITURB, urb); - switch(errno) { - case ETIMEDOUT: - return USB_RET_NAK; - case EPIPE: - default: - return USB_RET_STALL; + DPRINTF("husb: data submit: ep 0x%x, len %u, more %d, packet %p, aurb %p\n", + urb->endpoint, urb->buffer_length, aurb->more, p, aurb); + + if (ret < 0) { + DPRINTF("husb: submit failed. errno %d\n", errno); + async_free(aurb); + + switch(errno) { + case ETIMEDOUT: + return USB_RET_NAK; + case EPIPE: + default: + return USB_RET_STALL; + } } } - usb_defer_packet(p, async_cancel, aurb); return USB_RET_ASYNC; } @@ -515,7 +772,13 @@ static int usb_host_set_config(USBHostDevice *s, int config) static int usb_host_set_interface(USBHostDevice *s, int iface, int alt) { struct usbdevfs_setinterface si; - int ret; + int i, ret; + + for (i = 1; i <= MAX_ENDPOINTS; i++) { + if (is_isoc(s, i)) { + usb_host_stop_n_free_iso(s, i); + } + } si.interface = iface; si.altsetting = alt; @@ -531,50 +794,43 @@ static int usb_host_set_interface(USBHostDevice *s, int iface, int alt) return 0; } -static int usb_host_handle_control(USBHostDevice *s, USBPacket *p) +static int usb_host_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) { + USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev); struct usbdevfs_urb *urb; AsyncURB *aurb; - int ret, value, index; - int buffer_len; + int ret; /* * Process certain standard device requests. * These are infrequent and are processed synchronously. */ - value = le16_to_cpu(s->ctrl.req.wValue); - index = le16_to_cpu(s->ctrl.req.wIndex); + /* Note request is (bRequestType << 8) | bRequest */ DPRINTF("husb: ctrl type 0x%x req 0x%x val 0x%x index %u len %u\n", - s->ctrl.req.bRequestType, s->ctrl.req.bRequest, value, index, - s->ctrl.len); + request >> 8, request & 0xff, value, index, length); - if (s->ctrl.req.bRequestType == 0) { - switch (s->ctrl.req.bRequest) { - case USB_REQ_SET_ADDRESS: - return usb_host_set_address(s, value); + switch (request) { + case DeviceOutRequest | USB_REQ_SET_ADDRESS: + return usb_host_set_address(s, value); - case USB_REQ_SET_CONFIGURATION: - return usb_host_set_config(s, value & 0xff); - } - } + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: + return usb_host_set_config(s, value & 0xff); - if (s->ctrl.req.bRequestType == 1 && - s->ctrl.req.bRequest == USB_REQ_SET_INTERFACE) { + case InterfaceOutRequest | USB_REQ_SET_INTERFACE: return usb_host_set_interface(s, index, value); } /* The rest are asynchronous */ - buffer_len = 8 + s->ctrl.len; - if (buffer_len > sizeof(s->ctrl.buffer)) { - fprintf(stderr, "husb: ctrl buffer too small (%u > %zu)\n", - buffer_len, sizeof(s->ctrl.buffer)); + if (length > sizeof(dev->data_buf)) { + fprintf(stderr, "husb: ctrl buffer too small (%d > %zu)\n", + length, sizeof(dev->data_buf)); return USB_RET_STALL; } - aurb = async_alloc(); - aurb->hdev = s; + aurb = async_alloc(s); aurb->packet = p; /* @@ -588,8 +844,8 @@ static int usb_host_handle_control(USBHostDevice *s, USBPacket *p) urb->type = USBDEVFS_URB_TYPE_CONTROL; urb->endpoint = p->devep; - urb->buffer = &s->ctrl.req; - urb->buffer_length = buffer_len; + urb->buffer = &dev->setup_buf; + urb->buffer_length = length + 8; urb->usercontext = s; @@ -610,174 +866,9 @@ static int usb_host_handle_control(USBHostDevice *s, USBPacket *p) } } - usb_defer_packet(p, async_cancel, aurb); return USB_RET_ASYNC; } -static int do_token_setup(USBDevice *dev, USBPacket *p) -{ - USBHostDevice *s = (USBHostDevice *) dev; - int ret = 0; - - if (p->len != 8) { - return USB_RET_STALL; - } - - memcpy(&s->ctrl.req, p->data, 8); - s->ctrl.len = le16_to_cpu(s->ctrl.req.wLength); - s->ctrl.offset = 0; - s->ctrl.state = CTRL_STATE_SETUP; - - if (s->ctrl.req.bRequestType & USB_DIR_IN) { - ret = usb_host_handle_control(s, p); - if (ret < 0) { - return ret; - } - - if (ret < s->ctrl.len) { - s->ctrl.len = ret; - } - s->ctrl.state = CTRL_STATE_DATA; - } else { - if (s->ctrl.len == 0) { - s->ctrl.state = CTRL_STATE_ACK; - } else { - s->ctrl.state = CTRL_STATE_DATA; - } - } - - return ret; -} - -static int do_token_in(USBDevice *dev, USBPacket *p) -{ - USBHostDevice *s = (USBHostDevice *) dev; - int ret = 0; - - if (p->devep != 0) { - return usb_host_handle_data(s, p); - } - - switch(s->ctrl.state) { - case CTRL_STATE_ACK: - if (!(s->ctrl.req.bRequestType & USB_DIR_IN)) { - ret = usb_host_handle_control(s, p); - if (ret == USB_RET_ASYNC) { - return USB_RET_ASYNC; - } - s->ctrl.state = CTRL_STATE_IDLE; - return ret > 0 ? 0 : ret; - } - - return 0; - - case CTRL_STATE_DATA: - if (s->ctrl.req.bRequestType & USB_DIR_IN) { - int len = s->ctrl.len - s->ctrl.offset; - if (len > p->len) { - len = p->len; - } - memcpy(p->data, s->ctrl.buffer + s->ctrl.offset, len); - s->ctrl.offset += len; - if (s->ctrl.offset >= s->ctrl.len) { - s->ctrl.state = CTRL_STATE_ACK; - } - return len; - } - - s->ctrl.state = CTRL_STATE_IDLE; - return USB_RET_STALL; - - default: - return USB_RET_STALL; - } -} - -static int do_token_out(USBDevice *dev, USBPacket *p) -{ - USBHostDevice *s = (USBHostDevice *) dev; - - if (p->devep != 0) { - return usb_host_handle_data(s, p); - } - - switch(s->ctrl.state) { - case CTRL_STATE_ACK: - if (s->ctrl.req.bRequestType & USB_DIR_IN) { - s->ctrl.state = CTRL_STATE_IDLE; - /* transfer OK */ - } else { - /* ignore additional output */ - } - return 0; - - case CTRL_STATE_DATA: - if (!(s->ctrl.req.bRequestType & USB_DIR_IN)) { - int len = s->ctrl.len - s->ctrl.offset; - if (len > p->len) { - len = p->len; - } - memcpy(s->ctrl.buffer + s->ctrl.offset, p->data, len); - s->ctrl.offset += len; - if (s->ctrl.offset >= s->ctrl.len) { - s->ctrl.state = CTRL_STATE_ACK; - } - return len; - } - - s->ctrl.state = CTRL_STATE_IDLE; - return USB_RET_STALL; - - default: - return USB_RET_STALL; - } -} - -/* - * Packet handler. - * Called by the HC (host controller). - * - * Returns length of the transaction or one of the USB_RET_XXX codes. - */ -static int usb_host_handle_packet(USBDevice *s, USBPacket *p) -{ - switch(p->pid) { - case USB_MSG_ATTACH: - s->state = USB_STATE_ATTACHED; - return 0; - - case USB_MSG_DETACH: - s->state = USB_STATE_NOTATTACHED; - return 0; - - case USB_MSG_RESET: - s->remote_wakeup = 0; - s->addr = 0; - s->state = USB_STATE_DEFAULT; - s->info->handle_reset(s); - return 0; - } - - /* Rest of the PIDs must match our address */ - if (s->state < USB_STATE_DEFAULT || p->devaddr != s->addr) { - return USB_RET_NODEV; - } - - switch (p->pid) { - case USB_TOKEN_SETUP: - return do_token_setup(s, p); - - case USB_TOKEN_IN: - return do_token_in(s, p); - - case USB_TOKEN_OUT: - return do_token_out(s, p); - - default: - return USB_RET_STALL; - } -} - static int usb_linux_get_configuration(USBHostDevice *s) { uint8_t configuration; @@ -788,7 +879,7 @@ static int usb_linux_get_configuration(USBHostDevice *s) char device_name[32], line[1024]; int configuration; - sprintf(device_name, "%d-%d", s->bus_num, s->devpath); + sprintf(device_name, "%d-%s", s->bus_num, s->port); if (!usb_host_read_file(line, sizeof(line), "bConfigurationValue", device_name)) { @@ -823,13 +914,56 @@ usbdevfs: return configuration; } +static uint8_t usb_linux_get_alt_setting(USBHostDevice *s, + uint8_t configuration, uint8_t interface) +{ + uint8_t alt_setting; + struct usb_ctrltransfer ct; + int ret; + + if (usb_fs_type == USB_FS_SYS) { + char device_name[64], line[1024]; + int alt_setting; + + sprintf(device_name, "%d-%s:%d.%d", s->bus_num, s->port, + (int)configuration, (int)interface); + + if (!usb_host_read_file(line, sizeof(line), "bAlternateSetting", + device_name)) { + goto usbdevfs; + } + if (sscanf(line, "%d", &alt_setting) != 1) { + goto usbdevfs; + } + return alt_setting; + } + +usbdevfs: + ct.bRequestType = USB_DIR_IN | USB_RECIP_INTERFACE; + ct.bRequest = USB_REQ_GET_INTERFACE; + ct.wValue = 0; + ct.wIndex = interface; + ct.wLength = 1; + ct.data = &alt_setting; + ct.timeout = 50; + ret = ioctl(s->fd, USBDEVFS_CONTROL, &ct); + if (ret < 0) { + /* Assume alt 0 on error */ + return 0; + } + + return alt_setting; +} + /* returns 1 on problem encountered or 0 for success */ static int usb_linux_update_endp_table(USBHostDevice *s) { uint8_t *descriptors; uint8_t devep, type, configuration, alt_interface; - struct usb_ctrltransfer ct; - int interface, ret, length, i; + int interface, length, i; + + for (i = 0; i < MAX_ENDPOINTS; i++) + s->endp_table[i].type = INVALID_EP_TYPE; i = usb_linux_get_configuration(s); if (i < 0) @@ -858,19 +992,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s) } interface = descriptors[i + 2]; - - ct.bRequestType = USB_DIR_IN | USB_RECIP_INTERFACE; - ct.bRequest = USB_REQ_GET_INTERFACE; - ct.wValue = 0; - ct.wIndex = interface; - ct.wLength = 1; - ct.data = &alt_interface; - ct.timeout = 50; - - ret = ioctl(s->fd, USBDEVFS_CONTROL, &ct); - if (ret < 0) { - alt_interface = interface; - } + alt_interface = usb_linux_get_alt_setting(s, configuration, interface); /* the current interface descriptor is the active interface * and has endpoints */ @@ -899,6 +1021,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s) break; case 0x01: type = USBDEVFS_URB_TYPE_ISO; + set_max_packet_size(s, (devep & 0xf), descriptors + i); break; case 0x02: type = USBDEVFS_URB_TYPE_BULK; @@ -920,7 +1043,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s) } static int usb_host_open(USBHostDevice *dev, int bus_num, - int addr, int devpath, const char *prod_name) + int addr, char *port, const char *prod_name) { int fd = -1, ret; struct usbdevfs_connectinfo ci; @@ -946,7 +1069,7 @@ static int usb_host_open(USBHostDevice *dev, int bus_num, dev->bus_num = bus_num; dev->addr = addr; - dev->devpath = devpath; + strcpy(dev->port, port); dev->fd = fd; /* read the device description */ @@ -1021,12 +1144,19 @@ fail: static int usb_host_close(USBHostDevice *dev) { + int i; + if (dev->fd == -1) { return -1; } qemu_set_fd_handler(dev->fd, NULL, NULL, NULL); dev->closing = 1; + for (i = 1; i <= MAX_ENDPOINTS; i++) { + if (is_isoc(dev, i)) { + usb_host_stop_n_free_iso(dev, i); + } + } async_complete(dev); dev->closing = 0; usb_device_detach(&dev->dev); @@ -1063,7 +1193,10 @@ static struct USBDeviceInfo usb_host_dev_info = { .qdev.name = "usb-host", .qdev.size = sizeof(USBHostDevice), .init = usb_host_initfn, - .handle_packet = usb_host_handle_packet, + .handle_packet = usb_generic_handle_packet, + .cancel_packet = usb_host_async_cancel, + .handle_data = usb_host_handle_data, + .handle_control = usb_host_handle_control, .handle_reset = usb_host_handle_reset, .handle_destroy = usb_host_handle_destroy, .usbdevice_name = "host", @@ -1071,6 +1204,7 @@ static struct USBDeviceInfo usb_host_dev_info = { .qdev.props = (Property[]) { DEFINE_PROP_UINT32("hostbus", USBHostDevice, match.bus_num, 0), DEFINE_PROP_UINT32("hostaddr", USBHostDevice, match.addr, 0), + DEFINE_PROP_STRING("hostport", USBHostDevice, match.port), DEFINE_PROP_HEX32("vendorid", USBHostDevice, match.vendor_id, 0), DEFINE_PROP_HEX32("productid", USBHostDevice, match.product_id, 0), DEFINE_PROP_END_OF_LIST(), @@ -1311,8 +1445,9 @@ static int usb_host_scan_sys(void *opaque, USBScanFunc *func) { DIR *dir = NULL; char line[1024]; - int bus_num, addr, devpath, speed, class_id, product_id, vendor_id; + int bus_num, addr, speed, class_id, product_id, vendor_id; int ret = 0; + char port[MAX_PORTLEN]; char product_name[512]; struct dirent *de; @@ -1324,12 +1459,8 @@ static int usb_host_scan_sys(void *opaque, USBScanFunc *func) while ((de = readdir(dir))) { if (de->d_name[0] != '.' && !strchr(de->d_name, ':')) { - char *tmpstr = de->d_name; - if (!strncmp(de->d_name, "usb", 3)) { - tmpstr += 3; - } - if (sscanf(tmpstr, "%d-%d", &bus_num, &devpath) < 1) { - goto the_end; + if (sscanf(de->d_name, "%d-%7[0-9.]", &bus_num, port) < 2) { + continue; } if (!usb_host_read_file(line, sizeof(line), "devnum", de->d_name)) { @@ -1381,7 +1512,7 @@ static int usb_host_scan_sys(void *opaque, USBScanFunc *func) speed = USB_SPEED_FULL; } - ret = func(opaque, bus_num, addr, devpath, class_id, vendor_id, + ret = func(opaque, bus_num, addr, port, class_id, vendor_id, product_id, product_name, speed); if (ret) { goto the_end; @@ -1472,7 +1603,7 @@ static int usb_host_scan(void *opaque, USBScanFunc *func) static QEMUTimer *usb_auto_timer; -static int usb_host_auto_scan(void *opaque, int bus_num, int addr, int devpath, +static int usb_host_auto_scan(void *opaque, int bus_num, int addr, char *port, int class_id, int vendor_id, int product_id, const char *product_name, int speed) { @@ -1492,6 +1623,9 @@ static int usb_host_auto_scan(void *opaque, int bus_num, int addr, int devpath, if (f->addr > 0 && f->addr != addr) { continue; } + if (f->port != NULL && (port == NULL || strcmp(f->port, port) != 0)) { + continue; + } if (f->vendor_id > 0 && f->vendor_id != vendor_id) { continue; @@ -1508,7 +1642,7 @@ static int usb_host_auto_scan(void *opaque, int bus_num, int addr, int devpath, } DPRINTF("husb: auto open: bus_num %d addr %d\n", bus_num, addr); - usb_host_open(s, bus_num, addr, devpath, product_name); + usb_host_open(s, bus_num, addr, port, product_name); } return 0; @@ -1630,8 +1764,8 @@ static const char *usb_class_str(uint8_t class) return p->class_name; } -static void usb_info_device(Monitor *mon, int bus_num, int addr, int class_id, - int vendor_id, int product_id, +static void usb_info_device(Monitor *mon, int bus_num, int addr, char *port, + int class_id, int vendor_id, int product_id, const char *product_name, int speed) { @@ -1652,8 +1786,8 @@ static void usb_info_device(Monitor *mon, int bus_num, int addr, int class_id, break; } - monitor_printf(mon, " Device %d.%d, speed %s Mb/s\n", - bus_num, addr, speed_str); + monitor_printf(mon, " Bus %d, Addr %d, Port %s, Speed %s Mb/s\n", + bus_num, addr, port, speed_str); class_str = usb_class_str(class_id); if (class_str) { monitor_printf(mon, " %s:", class_str); @@ -1668,14 +1802,14 @@ static void usb_info_device(Monitor *mon, int bus_num, int addr, int class_id, } static int usb_host_info_device(void *opaque, int bus_num, int addr, - int devpath, int class_id, + char *path, int class_id, int vendor_id, int product_id, const char *product_name, int speed) { Monitor *mon = opaque; - usb_info_device(mon, bus_num, addr, class_id, vendor_id, product_id, + usb_info_device(mon, bus_num, addr, path, class_id, vendor_id, product_id, product_name, speed); return 0; } @@ -1717,7 +1851,7 @@ void usb_host_info(Monitor *mon) dec2str(f->addr, addr, sizeof(addr)); hex2str(f->vendor_id, vid, sizeof(vid)); hex2str(f->product_id, pid, sizeof(pid)); - monitor_printf(mon, " Device %s.%s ID %s:%s\n", - bus, addr, vid, pid); + monitor_printf(mon, " Bus %s, Addr %s, Port %s, ID %s:%s\n", + bus, addr, f->port ? f->port : "*", vid, pid); } } diff --git a/user-exec.c b/user-exec.c new file mode 100644 index 0000000000..d4a6abb8f5 --- /dev/null +++ b/user-exec.c @@ -0,0 +1,673 @@ +/* + * User emulator execution + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "config.h" +#include "exec.h" +#include "disas.h" +#include "tcg.h" + +#undef EAX +#undef ECX +#undef EDX +#undef EBX +#undef ESP +#undef EBP +#undef ESI +#undef EDI +#undef EIP +#include <signal.h> +#ifdef __linux__ +#include <sys/ucontext.h> +#endif + +//#define DEBUG_SIGNAL + +#if defined(TARGET_I386) +#define EXCEPTION_ACTION \ + raise_exception_err(env->exception_index, env->error_code) +#else +#define EXCEPTION_ACTION \ + cpu_loop_exit() +#endif + +/* exit the current TB from a signal handler. The host registers are + restored in a state compatible with the CPU emulator + */ +void cpu_resume_from_signal(CPUState *env1, void *puc) +{ +#ifdef __linux__ + struct ucontext *uc = puc; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; +#endif + + env = env1; + + /* XXX: restore cpu registers saved in host registers */ + + if (puc) { + /* XXX: use siglongjmp ? */ +#ifdef __linux__ +#ifdef __ia64 + sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL); +#else + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); +#endif +#elif defined(__OpenBSD__) + sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); +#endif + } + env->exception_index = -1; + longjmp(env->jmp_env, 1); +} + +/* 'pc' is the host PC at which the exception was raised. 'address' is + the effective address of the memory exception. 'is_write' is 1 if a + write caused the exception and otherwise 0'. 'old_set' is the + signal set which should be restored */ +static inline int handle_cpu_signal(unsigned long pc, unsigned long address, + int is_write, sigset_t *old_set, + void *puc) +{ + TranslationBlock *tb; + int ret; + + if (cpu_single_env) { + env = cpu_single_env; /* XXX: find a correct solution for multithread */ + } +#if defined(DEBUG_SIGNAL) + qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", + pc, address, is_write, *(unsigned long *)old_set); +#endif + /* XXX: locking issue */ + if (is_write && page_unprotect(h2g(address), pc, puc)) { + return 1; + } + + /* see if it is an MMU fault */ + ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); + if (ret < 0) { + return 0; /* not an MMU fault */ + } + if (ret == 0) { + return 1; /* the MMU fault was handled without causing real CPU fault */ + } + /* now we have a real cpu fault */ + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc); + } + + /* we restore the process signal mask as the sigreturn should + do it (XXX: use sigsetjmp) */ + sigprocmask(SIG_SETMASK, old_set, NULL); + EXCEPTION_ACTION; + + /* never comes here */ + return 1; +} + +#if defined(__i386__) + +#if defined(__APPLE__) +#include <sys/ucontext.h> + +#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext->ss.eip)) +#define TRAP_sig(context) ((context)->uc_mcontext->es.trapno) +#define ERROR_sig(context) ((context)->uc_mcontext->es.err) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__NetBSD__) +#include <ucontext.h> + +#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__FreeBSD__) || defined(__DragonFly__) +#include <ucontext.h> + +#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) +#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__OpenBSD__) +#define EIP_sig(context) ((context)->sc_eip) +#define TRAP_sig(context) ((context)->sc_trapno) +#define ERROR_sig(context) ((context)->sc_err) +#define MASK_sig(context) ((context)->sc_mask) +#else +#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; +#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) + ucontext_t *uc = puc; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; +#else + struct ucontext *uc = puc; +#endif + unsigned long pc; + int trapno; + +#ifndef REG_EIP +/* for glibc 2.1 */ +#define REG_EIP EIP +#define REG_ERR ERR +#define REG_TRAPNO TRAPNO +#endif + pc = EIP_sig(uc); + trapno = TRAP_sig(uc); + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + trapno == 0xe ? + (ERROR_sig(uc) >> 1) & 1 : 0, + &MASK_sig(uc), puc); +} + +#elif defined(__x86_64__) + +#ifdef __NetBSD__ +#define PC_sig(context) _UC_MACHINE_PC(context) +#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#elif defined(__OpenBSD__) +#define PC_sig(context) ((context)->sc_rip) +#define TRAP_sig(context) ((context)->sc_trapno) +#define ERROR_sig(context) ((context)->sc_err) +#define MASK_sig(context) ((context)->sc_mask) +#elif defined(__FreeBSD__) || defined(__DragonFly__) +#include <ucontext.h> + +#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) +#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) +#define MASK_sig(context) ((context)->uc_sigmask) +#else +#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) +#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) +#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) +#define MASK_sig(context) ((context)->uc_sigmask) +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + unsigned long pc; +#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) + ucontext_t *uc = puc; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; +#else + struct ucontext *uc = puc; +#endif + + pc = PC_sig(uc); + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + TRAP_sig(uc) == 0xe ? + (ERROR_sig(uc) >> 1) & 1 : 0, + &MASK_sig(uc), puc); +} + +#elif defined(_ARCH_PPC) + +/*********************************************************************** + * signal context platform-specific definitions + * From Wine + */ +#ifdef linux +/* All Registers access - only for local access */ +#define REG_sig(reg_name, context) \ + ((context)->uc_mcontext.regs->reg_name) +/* Gpr Registers access */ +#define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) +/* Program counter */ +#define IAR_sig(context) REG_sig(nip, context) +/* Machine State Register (Supervisor) */ +#define MSR_sig(context) REG_sig(msr, context) +/* Count register */ +#define CTR_sig(context) REG_sig(ctr, context) +/* User's integer exception register */ +#define XER_sig(context) REG_sig(xer, context) +/* Link register */ +#define LR_sig(context) REG_sig(link, context) +/* Condition register */ +#define CR_sig(context) REG_sig(ccr, context) + +/* Float Registers access */ +#define FLOAT_sig(reg_num, context) \ + (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) +#define FPSCR_sig(context) \ + (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) +/* Exception Registers access */ +#define DAR_sig(context) REG_sig(dar, context) +#define DSISR_sig(context) REG_sig(dsisr, context) +#define TRAP_sig(context) REG_sig(trap, context) +#endif /* linux */ + +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <ucontext.h> +#define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) +#define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) +#define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) +#define XER_sig(context) ((context)->uc_mcontext.mc_xer) +#define LR_sig(context) ((context)->uc_mcontext.mc_lr) +#define CR_sig(context) ((context)->uc_mcontext.mc_cr) +/* Exception Registers access */ +#define DAR_sig(context) ((context)->uc_mcontext.mc_dar) +#define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) +#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) +#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ + +#ifdef __APPLE__ +#include <sys/ucontext.h> +typedef struct ucontext SIGCONTEXT; +/* All Registers access - only for local access */ +#define REG_sig(reg_name, context) \ + ((context)->uc_mcontext->ss.reg_name) +#define FLOATREG_sig(reg_name, context) \ + ((context)->uc_mcontext->fs.reg_name) +#define EXCEPREG_sig(reg_name, context) \ + ((context)->uc_mcontext->es.reg_name) +#define VECREG_sig(reg_name, context) \ + ((context)->uc_mcontext->vs.reg_name) +/* Gpr Registers access */ +#define GPR_sig(reg_num, context) REG_sig(r##reg_num, context) +/* Program counter */ +#define IAR_sig(context) REG_sig(srr0, context) +/* Machine State Register (Supervisor) */ +#define MSR_sig(context) REG_sig(srr1, context) +#define CTR_sig(context) REG_sig(ctr, context) +/* Link register */ +#define XER_sig(context) REG_sig(xer, context) +/* User's integer exception register */ +#define LR_sig(context) REG_sig(lr, context) +/* Condition register */ +#define CR_sig(context) REG_sig(cr, context) +/* Float Registers access */ +#define FLOAT_sig(reg_num, context) \ + FLOATREG_sig(fpregs[reg_num], context) +#define FPSCR_sig(context) \ + ((double)FLOATREG_sig(fpscr, context)) +/* Exception Registers access */ +/* Fault registers for coredump */ +#define DAR_sig(context) EXCEPREG_sig(dar, context) +#define DSISR_sig(context) EXCEPREG_sig(dsisr, context) +/* number of powerpc exception taken */ +#define TRAP_sig(context) EXCEPREG_sig(exception, context) +#endif /* __APPLE__ */ + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + ucontext_t *uc = puc; +#else + struct ucontext *uc = puc; +#endif + unsigned long pc; + int is_write; + + pc = IAR_sig(uc); + is_write = 0; +#if 0 + /* ppc 4xx case */ + if (DSISR_sig(uc) & 0x00800000) { + is_write = 1; + } +#else + if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { + is_write = 1; + } +#endif + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} + +#elif defined(__alpha__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + uint32_t *pc = uc->uc_mcontext.sc_pc; + uint32_t insn = *pc; + int is_write = 0; + + /* XXX: need kernel patch to get write flag faster */ + switch (insn >> 26) { + case 0x0d: /* stw */ + case 0x0e: /* stb */ + case 0x0f: /* stq_u */ + case 0x24: /* stf */ + case 0x25: /* stg */ + case 0x26: /* sts */ + case 0x27: /* stt */ + case 0x2c: /* stl */ + case 0x2d: /* stq */ + case 0x2e: /* stl_c */ + case 0x2f: /* stq_c */ + is_write = 1; + } + + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} +#elif defined(__sparc__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + int is_write; + uint32_t insn; +#if !defined(__arch64__) || defined(CONFIG_SOLARIS) + uint32_t *regs = (uint32_t *)(info + 1); + void *sigmask = (regs + 20); + /* XXX: is there a standard glibc define ? */ + unsigned long pc = regs[1]; +#else +#ifdef __linux__ + struct sigcontext *sc = puc; + unsigned long pc = sc->sigc_regs.tpc; + void *sigmask = (void *)sc->sigc_mask; +#elif defined(__OpenBSD__) + struct sigcontext *uc = puc; + unsigned long pc = uc->sc_pc; + void *sigmask = (void *)(long)uc->sc_mask; +#endif +#endif + + /* XXX: need kernel patch to get write flag faster */ + is_write = 0; + insn = *(uint32_t *)pc; + if ((insn >> 30) == 3) { + switch ((insn >> 19) & 0x3f) { + case 0x05: /* stb */ + case 0x15: /* stba */ + case 0x06: /* sth */ + case 0x16: /* stha */ + case 0x04: /* st */ + case 0x14: /* sta */ + case 0x07: /* std */ + case 0x17: /* stda */ + case 0x0e: /* stx */ + case 0x1e: /* stxa */ + case 0x24: /* stf */ + case 0x34: /* stfa */ + case 0x27: /* stdf */ + case 0x37: /* stdfa */ + case 0x26: /* stqf */ + case 0x36: /* stqfa */ + case 0x25: /* stfsr */ + case 0x3c: /* casa */ + case 0x3e: /* casxa */ + is_write = 1; + break; + } + } + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, sigmask, NULL); +} + +#elif defined(__arm__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + unsigned long pc; + int is_write; + +#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) + pc = uc->uc_mcontext.gregs[R15]; +#else + pc = uc->uc_mcontext.arm_pc; +#endif + /* XXX: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, + &uc->uc_sigmask, puc); +} + +#elif defined(__mc68000) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + unsigned long pc; + int is_write; + + pc = uc->uc_mcontext.gregs[16]; + /* XXX: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, + &uc->uc_sigmask, puc); +} + +#elif defined(__ia64) + +#ifndef __ISR_VALID + /* This ought to be in <bits/siginfo.h>... */ +# define __ISR_VALID 1 +#endif + +int cpu_signal_handler(int host_signum, void *pinfo, void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + unsigned long ip; + int is_write = 0; + + ip = uc->uc_mcontext.sc_ip; + switch (host_signum) { + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGBUS: + case SIGTRAP: + if (info->si_code && (info->si_segvflags & __ISR_VALID)) { + /* ISR.W (write-access) is bit 33: */ + is_write = (info->si_isr >> 33) & 1; + } + break; + + default: + break; + } + return handle_cpu_signal(ip, (unsigned long)info->si_addr, + is_write, + (sigset_t *)&uc->uc_sigmask, puc); +} + +#elif defined(__s390__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + unsigned long pc; + uint16_t *pinsn; + int is_write = 0; + + pc = uc->uc_mcontext.psw.addr; + + /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead + of the normal 2 arguments. The 3rd argument contains the "int_code" + from the hardware which does in fact contain the is_write value. + The rt signal handler, as far as I can tell, does not give this value + at all. Not that we could get to it from here even if it were. */ + /* ??? This is not even close to complete, since it ignores all + of the read-modify-write instructions. */ + pinsn = (uint16_t *)pc; + switch (pinsn[0] >> 8) { + case 0x50: /* ST */ + case 0x42: /* STC */ + case 0x40: /* STH */ + is_write = 1; + break; + case 0xc4: /* RIL format insns */ + switch (pinsn[0] & 0xf) { + case 0xf: /* STRL */ + case 0xb: /* STGRL */ + case 0x7: /* STHRL */ + is_write = 1; + } + break; + case 0xe3: /* RXY format insns */ + switch (pinsn[2] & 0xff) { + case 0x50: /* STY */ + case 0x24: /* STG */ + case 0x72: /* STCY */ + case 0x70: /* STHY */ + case 0x8e: /* STPQ */ + case 0x3f: /* STRVH */ + case 0x3e: /* STRV */ + case 0x2f: /* STRVG */ + is_write = 1; + } + break; + } + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} + +#elif defined(__mips__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + greg_t pc = uc->uc_mcontext.pc; + int is_write; + + /* XXX: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} + +#elif defined(__hppa__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + struct siginfo *info = pinfo; + struct ucontext *uc = puc; + unsigned long pc = uc->uc_mcontext.sc_iaoq[0]; + uint32_t insn = *(uint32_t *)pc; + int is_write = 0; + + /* XXX: need kernel patch to get write flag faster. */ + switch (insn >> 26) { + case 0x1a: /* STW */ + case 0x19: /* STH */ + case 0x18: /* STB */ + case 0x1b: /* STWM */ + is_write = 1; + break; + + case 0x09: /* CSTWX, FSTWX, FSTWS */ + case 0x0b: /* CSTDX, FSTDX, FSTDS */ + /* Distinguish from coprocessor load ... */ + is_write = (insn >> 9) & 1; + break; + + case 0x03: + switch ((insn >> 6) & 15) { + case 0xa: /* STWS */ + case 0x9: /* STHS */ + case 0x8: /* STBS */ + case 0xe: /* STWAS */ + case 0xc: /* STBYS */ + is_write = 1; + } + break; + } + + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} + +#else + +#error host CPU specific signal handler needed + +#endif + +#if defined(TARGET_I386) + +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg_reg, selector, + (selector << 4), 0xffff, 0); + } else { + helper_load_seg(seg_reg, selector); + } + env = saved_env; +} + +void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + + helper_fsave(ptr, data32); + + env = saved_env; +} + +void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + + helper_frstor(ptr, data32); + + env = saved_env; +} + +#endif /* TARGET_I386 */ @@ -257,7 +257,9 @@ static NotifierList exit_notifiers = static NotifierList machine_init_done_notifiers = NOTIFIER_LIST_INITIALIZER(machine_init_done_notifiers); +static int tcg_allowed = 1; int kvm_allowed = 0; +int xen_allowed = 0; uint32_t xen_domid; enum xen_mode xen_mode = XEN_EMULATE; @@ -277,13 +279,18 @@ static struct { { .driver = "isa-serial", .flag = &default_serial }, { .driver = "isa-parallel", .flag = &default_parallel }, { .driver = "isa-fdc", .flag = &default_floppy }, + { .driver = "ide-cd", .flag = &default_cdrom }, + { .driver = "ide-hd", .flag = &default_cdrom }, { .driver = "ide-drive", .flag = &default_cdrom }, + { .driver = "scsi-cd", .flag = &default_cdrom }, { .driver = "virtio-serial-pci", .flag = &default_virtcon }, { .driver = "virtio-serial-s390", .flag = &default_virtcon }, { .driver = "virtio-serial", .flag = &default_virtcon }, { .driver = "VGA", .flag = &default_vga }, { .driver = "cirrus-vga", .flag = &default_vga }, { .driver = "vmware-svga", .flag = &default_vga }, + { .driver = "isa-vga", .flag = &default_vga }, + { .driver = "qxl-vga", .flag = &default_vga }, }; static int default_driver_check(QemuOpts *opts, void *opaque) @@ -1159,6 +1166,16 @@ static int powerdown_requested; static int debug_requested; static int vmstop_requested; +int qemu_shutdown_requested_get(void) +{ + return shutdown_requested; +} + +int qemu_reset_requested_get(void) +{ + return reset_requested; +} + int qemu_shutdown_requested(void) { int r = shutdown_requested; @@ -1876,6 +1893,84 @@ static int debugcon_parse(const char *devname) return 0; } +static int tcg_init(void) +{ + return 0; +} + +static struct { + const char *opt_name; + const char *name; + int (*available)(void); + int (*init)(void); + int *allowed; +} accel_list[] = { + { "tcg", "tcg", tcg_available, tcg_init, &tcg_allowed }, + { "xen", "Xen", xen_available, xen_init, &xen_allowed }, + { "kvm", "KVM", kvm_available, kvm_init, &kvm_allowed }, +}; + +static int configure_accelerator(void) +{ + const char *p = NULL; + char buf[10]; + int i, ret; + bool accel_initalised = 0; + bool init_failed = 0; + + QemuOptsList *list = qemu_find_opts("machine"); + if (!QTAILQ_EMPTY(&list->head)) { + p = qemu_opt_get(QTAILQ_FIRST(&list->head), "accel"); + } + + if (p == NULL) { + /* Use the default "accelerator", tcg */ + p = "tcg"; + } + + while (!accel_initalised && *p != '\0') { + if (*p == ':') { + p++; + } + p = get_opt_name(buf, sizeof (buf), p, ':'); + for (i = 0; i < ARRAY_SIZE(accel_list); i++) { + if (strcmp(accel_list[i].opt_name, buf) == 0) { + *(accel_list[i].allowed) = 1; + ret = accel_list[i].init(); + if (ret < 0) { + init_failed = 1; + if (!accel_list[i].available()) { + printf("%s not supported for this target\n", + accel_list[i].name); + } else { + fprintf(stderr, "failed to initialize %s: %s\n", + accel_list[i].name, + strerror(-ret)); + } + *(accel_list[i].allowed) = 0; + } else { + accel_initalised = 1; + } + break; + } + } + if (i == ARRAY_SIZE(accel_list)) { + fprintf(stderr, "\"%s\" accelerator does not exist.\n", buf); + } + } + + if (!accel_initalised) { + fprintf(stderr, "No accelerator found!\n"); + exit(1); + } + + if (init_failed) { + fprintf(stderr, "Back to %s accelerator.\n", accel_list[i].name); + } + + return !accel_initalised; +} + void qemu_add_exit_notifier(Notifier *notify) { notifier_list_add(&exit_notifiers, notify); @@ -2576,7 +2671,18 @@ int main(int argc, char **argv, char **envp) do_smbios_option(optarg); break; case QEMU_OPTION_enable_kvm: - kvm_allowed = 1; + olist = qemu_find_opts("machine"); + qemu_opts_reset(olist); + qemu_opts_parse(olist, "accel=kvm", 0); + break; + case QEMU_OPTION_machine: + olist = qemu_find_opts("machine"); + qemu_opts_reset(olist); + opts = qemu_opts_parse(olist, optarg, 0); + if (!opts) { + fprintf(stderr, "parse error: %s\n", optarg); + exit(1); + } break; case QEMU_OPTION_usb: usb_enabled = 1; @@ -2826,6 +2932,28 @@ int main(int argc, char **argv, char **envp) exit(1); } + /* + * Get the default machine options from the machine if it is not already + * specified either by the configuration file or by the command line. + */ + if (machine->default_machine_opts) { + QemuOptsList *list = qemu_find_opts("machine"); + const char *p = NULL; + + if (!QTAILQ_EMPTY(&list->head)) { + p = qemu_opt_get(QTAILQ_FIRST(&list->head), "accel"); + } + if (p == NULL) { + opts = qemu_opts_parse(qemu_find_opts("machine"), + machine->default_machine_opts, 0); + if (!opts) { + fprintf(stderr, "parse error for machine %s: %s\n", + machine->name, machine->default_machine_opts); + exit(1); + } + } + } + qemu_opts_foreach(qemu_find_opts("device"), default_driver_check, NULL, 0); qemu_opts_foreach(qemu_find_opts("global"), default_driver_check, NULL, 0); @@ -2896,17 +3024,7 @@ int main(int argc, char **argv, char **envp) exit(1); } - if (kvm_allowed) { - int ret = kvm_init(); - if (ret < 0) { - if (!kvm_available()) { - printf("KVM not supported for this target\n"); - } else { - fprintf(stderr, "failed to initialize KVM: %s\n", strerror(-ret)); - } - exit(1); - } - } + configure_accelerator(); if (qemu_init_main_loop()) { fprintf(stderr, "qemu_init_main_loop failed\n"); diff --git a/xen-all.c b/xen-all.c new file mode 100644 index 0000000000..0eac202d4e --- /dev/null +++ b/xen-all.c @@ -0,0 +1,605 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include <sys/mman.h> + +#include "hw/pci.h" +#include "hw/pc.h" +#include "hw/xen_common.h" +#include "hw/xen_backend.h" + +#include "xen-mapcache.h" +#include "trace.h" + +#include <xen/hvm/ioreq.h> +#include <xen/hvm/params.h> + +//#define DEBUG_XEN + +#ifdef DEBUG_XEN +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +/* Compatibility with older version */ +#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a +static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) +{ + return shared_page->vcpu_iodata[i].vp_eport; +} +static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) +{ + return &shared_page->vcpu_iodata[vcpu].vp_ioreq; +} +# define FMT_ioreq_size PRIx64 +#else +static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) +{ + return shared_page->vcpu_ioreq[i].vp_eport; +} +static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) +{ + return &shared_page->vcpu_ioreq[vcpu]; +} +# define FMT_ioreq_size "u" +#endif + +#define BUFFER_IO_MAX_DELAY 100 + +typedef struct XenIOState { + shared_iopage_t *shared_page; + buffered_iopage_t *buffered_io_page; + QEMUTimer *buffered_io_timer; + /* the evtchn port for polling the notification, */ + evtchn_port_t *ioreq_local_port; + /* the evtchn fd for polling */ + XenEvtchn xce_handle; + /* which vcpu we are serving */ + int send_vcpu; + + struct xs_handle *xenstore; + + Notifier exit; +} XenIOState; + +/* Xen specific function for piix pci */ + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num + ((pci_dev->devfn >> 3) << 2); +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ + xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < len; i++) { + uint8_t v = (val >> (8 * i)) & 0xff; + if (v & 0x80) { + v = 0; + } + v &= 0xf; + if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { + xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v); + } + } +} + +void xen_cmos_set_s3_resume(void *opaque, int irq, int level) +{ + pc_cmos_set_s3_resume(opaque, irq, level); + if (level) { + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); + } +} + +/* Xen Interrupt Controller */ + +static void xen_set_irq(void *opaque, int irq, int level) +{ + xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return qemu_allocate_irqs(xen_set_irq, NULL, 16); +} + +/* Memory Ops */ + +static void xen_ram_init(ram_addr_t ram_size) +{ + RAMBlock *new_block; + ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; + + new_block = qemu_mallocz(sizeof (*new_block)); + pstrcpy(new_block->idstr, sizeof (new_block->idstr), "xen.ram"); + new_block->host = NULL; + new_block->offset = 0; + new_block->length = ram_size; + + QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); + + ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, + new_block->length >> TARGET_PAGE_BITS); + memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), + 0xff, new_block->length >> TARGET_PAGE_BITS); + + if (ram_size >= 0xe0000000 ) { + above_4g_mem_size = ram_size - 0xe0000000; + below_4g_mem_size = 0xe0000000; + } else { + below_4g_mem_size = ram_size; + } + + cpu_register_physical_memory(0, below_4g_mem_size, new_block->offset); +#if TARGET_PHYS_ADDR_BITS > 32 + if (above_4g_mem_size > 0) { + cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size, + new_block->offset + below_4g_mem_size); + } +#endif +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = qemu_malloc(sizeof (*pfn_list) * nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + hw_error("xen: failed to populate ram at %lx", ram_addr); + } + + qemu_free(pfn_list); +} + + +/* VCPU Operations, MMIO, IO ring ... */ + +static void xen_reset_vcpu(void *opaque) +{ + CPUState *env = opaque; + + env->halted = 1; +} + +void xen_vcpu_init(void) +{ + CPUState *first_cpu; + + if ((first_cpu = qemu_get_cpu(0))) { + qemu_register_reset(xen_reset_vcpu, first_cpu); + xen_reset_vcpu(first_cpu); + } +} + +/* get the ioreq packets from share mem */ +static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) +{ + ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); + + if (req->state != STATE_IOREQ_READY) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + return NULL; + } + + xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ + + req->state = STATE_IOREQ_INPROCESS; + return req; +} + +/* use poll to get the port notification */ +/* ioreq_vec--out,the */ +/* retval--the number of ioreq packet */ +static ioreq_t *cpu_get_ioreq(XenIOState *state) +{ + int i; + evtchn_port_t port; + + port = xc_evtchn_pending(state->xce_handle); + if (port != -1) { + for (i = 0; i < smp_cpus; i++) { + if (state->ioreq_local_port[i] == port) { + break; + } + } + + if (i == smp_cpus) { + hw_error("Fatal error while trying to get io event!\n"); + } + + /* unmask the wanted port again */ + xc_evtchn_unmask(state->xce_handle, port); + + /* get the io packet from shared memory */ + state->send_vcpu = i; + return cpu_get_ioreq_from_shared_memory(state, i); + } + + /* read error or read nothing */ + return NULL; +} + +static uint32_t do_inp(pio_addr_t addr, unsigned long size) +{ + switch (size) { + case 1: + return cpu_inb(addr); + case 2: + return cpu_inw(addr); + case 4: + return cpu_inl(addr); + default: + hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size); + } +} + +static void do_outp(pio_addr_t addr, + unsigned long size, uint32_t val) +{ + switch (size) { + case 1: + return cpu_outb(addr, val); + case 2: + return cpu_outw(addr, val); + case 4: + return cpu_outl(addr, val); + default: + hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size); + } +} + +static void cpu_ioreq_pio(ioreq_t *req) +{ + int i, sign; + + sign = req->df ? -1 : 1; + + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(req->addr, req->size); + } else { + uint32_t tmp; + + for (i = 0; i < req->count; i++) { + tmp = do_inp(req->addr, req->size); + cpu_physical_memory_write(req->data + (sign * i * req->size), + (uint8_t *) &tmp, req->size); + } + } + } else if (req->dir == IOREQ_WRITE) { + if (!req->data_is_ptr) { + do_outp(req->addr, req->size, req->data); + } else { + for (i = 0; i < req->count; i++) { + uint32_t tmp = 0; + + cpu_physical_memory_read(req->data + (sign * i * req->size), + (uint8_t*) &tmp, req->size); + do_outp(req->addr, req->size, tmp); + } + } + } +} + +static void cpu_ioreq_move(ioreq_t *req) +{ + int i, sign; + + sign = req->df ? -1 : 1; + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + cpu_physical_memory_read(req->addr + (sign * i * req->size), + (uint8_t *) &req->data, req->size); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + cpu_physical_memory_write(req->addr + (sign * i * req->size), + (uint8_t *) &req->data, req->size); + } + } + } else { + target_ulong tmp; + + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + cpu_physical_memory_read(req->addr + (sign * i * req->size), + (uint8_t*) &tmp, req->size); + cpu_physical_memory_write(req->data + (sign * i * req->size), + (uint8_t*) &tmp, req->size); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + cpu_physical_memory_read(req->data + (sign * i * req->size), + (uint8_t*) &tmp, req->size); + cpu_physical_memory_write(req->addr + (sign * i * req->size), + (uint8_t*) &tmp, req->size); + } + } + } +} + +static void handle_ioreq(ioreq_t *req) +{ + if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && + (req->size < sizeof (target_ulong))) { + req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; + } + + switch (req->type) { + case IOREQ_TYPE_PIO: + cpu_ioreq_pio(req); + break; + case IOREQ_TYPE_COPY: + cpu_ioreq_move(req); + break; + case IOREQ_TYPE_TIMEOFFSET: + break; + case IOREQ_TYPE_INVALIDATE: + qemu_invalidate_map_cache(); + break; + default: + hw_error("Invalid ioreq type 0x%x\n", req->type); + } +} + +static void handle_buffered_iopage(XenIOState *state) +{ + buf_ioreq_t *buf_req = NULL; + ioreq_t req; + int qw; + + if (!state->buffered_io_page) { + return; + } + + while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) { + buf_req = &state->buffered_io_page->buf_ioreq[ + state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM]; + req.size = 1UL << buf_req->size; + req.count = 1; + req.addr = buf_req->addr; + req.data = buf_req->data; + req.state = STATE_IOREQ_READY; + req.dir = buf_req->dir; + req.df = 1; + req.type = buf_req->type; + req.data_is_ptr = 0; + qw = (req.size == 8); + if (qw) { + buf_req = &state->buffered_io_page->buf_ioreq[ + (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM]; + req.data |= ((uint64_t)buf_req->data) << 32; + } + + handle_ioreq(&req); + + xen_mb(); + state->buffered_io_page->read_pointer += qw ? 2 : 1; + } +} + +static void handle_buffered_io(void *opaque) +{ + XenIOState *state = opaque; + + handle_buffered_iopage(state); + qemu_mod_timer(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); +} + +static void cpu_handle_ioreq(void *opaque) +{ + XenIOState *state = opaque; + ioreq_t *req = cpu_get_ioreq(state); + + handle_buffered_iopage(state); + if (req) { + handle_ioreq(req); + + if (req->state != STATE_IOREQ_INPROCESS) { + fprintf(stderr, "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + destroy_hvm_domain(); + return; + } + + xen_wmb(); /* Update ioreq contents /then/ update state. */ + + /* + * We do this before we send the response so that the tools + * have the opportunity to pick up on the reset before the + * guest resumes and does a hlt with interrupts disabled which + * causes Xen to powerdown the domain. + */ + if (vm_running) { + if (qemu_shutdown_requested_get()) { + destroy_hvm_domain(); + } + if (qemu_reset_requested_get()) { + qemu_system_reset(); + } + } + + req->state = STATE_IORESP_READY; + xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); + } +} + +static void xenstore_record_dm_state(XenIOState *s, const char *state) +{ + char path[50]; + + snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid); + if (!xs_write(s->xenstore, XBT_NULL, path, state, strlen(state))) { + fprintf(stderr, "error recording dm state\n"); + exit(1); + } +} + +static void xen_main_loop_prepare(XenIOState *state) +{ + int evtchn_fd = -1; + + if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) { + evtchn_fd = xc_evtchn_fd(state->xce_handle); + } + + state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io, + state); + qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock)); + + if (evtchn_fd != -1) { + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); + } + + /* record state running */ + xenstore_record_dm_state(state, "running"); +} + + +/* Initialise Xen */ + +static void xen_vm_change_state_handler(void *opaque, int running, int reason) +{ + XenIOState *state = opaque; + if (running) { + xen_main_loop_prepare(state); + } +} + +static void xen_exit_notifier(Notifier *n) +{ + XenIOState *state = container_of(n, XenIOState, exit); + + xc_evtchn_close(state->xce_handle); + xs_daemon_close(state->xenstore); +} + +int xen_init(void) +{ + xen_xc = xen_xc_interface_open(0, 0, 0); + if (xen_xc == XC_HANDLER_INITIAL_VALUE) { + xen_be_printf(NULL, 0, "can't open xen interface\n"); + return -1; + } + + return 0; +} + +int xen_hvm_init(void) +{ + int i, rc; + unsigned long ioreq_pfn; + XenIOState *state; + + state = qemu_mallocz(sizeof (XenIOState)); + + state->xce_handle = xen_xc_evtchn_open(NULL, 0); + if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) { + perror("xen: event channel open"); + return -errno; + } + + state->xenstore = xs_daemon_open(); + if (state->xenstore == NULL) { + perror("xen: xenstore open"); + return -errno; + } + + state->exit.notify = xen_exit_notifier; + qemu_add_exit_notifier(&state->exit); + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); + DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, + PROT_READ|PROT_WRITE, ioreq_pfn); + if (state->shared_page == NULL) { + hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT, + errno, xen_xc); + } + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); + DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); + state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, + PROT_READ|PROT_WRITE, ioreq_pfn); + if (state->buffered_io_page == NULL) { + hw_error("map buffered IO page returned error %d", errno); + } + + state->ioreq_local_port = qemu_mallocz(smp_cpus * sizeof (evtchn_port_t)); + + /* FIXME: how about if we overflow the page here? */ + for (i = 0; i < smp_cpus; i++) { + rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, i)); + if (rc == -1) { + fprintf(stderr, "bind interdomain ioctl error %d\n", errno); + return -1; + } + state->ioreq_local_port[i] = rc; + } + + /* Init RAM management */ + qemu_map_cache_init(); + xen_ram_init(ram_size); + + qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state); + + return 0; +} + +void destroy_hvm_domain(void) +{ + XenXC xc_handle; + int sts; + + xc_handle = xen_xc_interface_open(0, 0, 0); + if (xc_handle == XC_HANDLER_INITIAL_VALUE) { + fprintf(stderr, "Cannot acquire xenctrl handle\n"); + } else { + sts = xc_domain_shutdown(xc_handle, xen_domid, SHUTDOWN_poweroff); + if (sts != 0) { + fprintf(stderr, "? xc_domain_shutdown failed to issue poweroff, " + "sts %d, %s\n", sts, strerror(errno)); + } else { + fprintf(stderr, "Issued domain %d poweroff\n", xen_domid); + } + xc_interface_close(xc_handle); + } +} diff --git a/xen-mapcache-stub.c b/xen-mapcache-stub.c new file mode 100644 index 0000000000..7c14b3d141 --- /dev/null +++ b/xen-mapcache-stub.c @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "config.h" + +#include "exec-all.h" +#include "qemu-common.h" +#include "cpu-common.h" +#include "xen-mapcache.h" + +void qemu_map_cache_init(void) +{ +} + +uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock) +{ + return qemu_get_ram_ptr(phys_addr); +} + +void qemu_map_cache_unlock(void *buffer) +{ +} + +ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) +{ + return -1; +} + +void qemu_invalidate_map_cache(void) +{ +} + +void qemu_invalidate_entry(uint8_t *buffer) +{ +} +uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size) +{ + return NULL; +} diff --git a/xen-mapcache.c b/xen-mapcache.c new file mode 100644 index 0000000000..349cc6221d --- /dev/null +++ b/xen-mapcache.c @@ -0,0 +1,375 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "config.h" + +#include <sys/resource.h> + +#include "hw/xen_backend.h" +#include "blockdev.h" +#include "bitmap.h" + +#include <xen/hvm/params.h> +#include <sys/mman.h> + +#include "xen-mapcache.h" +#include "trace.h" + + +//#define MAPCACHE_DEBUG + +#ifdef MAPCACHE_DEBUG +# define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +# define DPRINTF(fmt, ...) do { } while (0) +#endif + +#if defined(__i386__) +# define MCACHE_BUCKET_SHIFT 16 +# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ +#elif defined(__x86_64__) +# define MCACHE_BUCKET_SHIFT 20 +# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ +#endif +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) + +typedef struct MapCacheEntry { + target_phys_addr_t paddr_index; + uint8_t *vaddr_base; + DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); + uint8_t lock; + struct MapCacheEntry *next; +} MapCacheEntry; + +typedef struct MapCacheRev { + uint8_t *vaddr_req; + target_phys_addr_t paddr_index; + QTAILQ_ENTRY(MapCacheRev) next; +} MapCacheRev; + +typedef struct MapCache { + MapCacheEntry *entry; + unsigned long nr_buckets; + QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; + + /* For most cases (>99.9%), the page address is the same. */ + target_phys_addr_t last_address_index; + uint8_t *last_address_vaddr; + unsigned long max_mcache_size; + unsigned int mcache_bucket_shift; +} MapCache; + +static MapCache *mapcache; + +void qemu_map_cache_init(void) +{ + unsigned long size; + struct rlimit rlimit_as; + + mapcache = qemu_mallocz(sizeof (MapCache)); + + QTAILQ_INIT(&mapcache->locked_entries); + mapcache->last_address_index = -1; + + getrlimit(RLIMIT_AS, &rlimit_as); + if (rlimit_as.rlim_max < MCACHE_MAX_SIZE) { + rlimit_as.rlim_cur = rlimit_as.rlim_max; + } else { + rlimit_as.rlim_cur = MCACHE_MAX_SIZE; + } + + setrlimit(RLIMIT_AS, &rlimit_as); + mapcache->max_mcache_size = rlimit_as.rlim_cur; + + mapcache->nr_buckets = + (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); + + size = mapcache->nr_buckets * sizeof (MapCacheEntry); + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); + DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size); + mapcache->entry = qemu_mallocz(size); +} + +static void qemu_remap_bucket(MapCacheEntry *entry, + target_phys_addr_t size, + target_phys_addr_t address_index) +{ + uint8_t *vaddr_base; + xen_pfn_t *pfns; + int *err; + unsigned int i; + target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; + + trace_qemu_remap_bucket(address_index); + + pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t)); + err = qemu_mallocz(nb_pfn * sizeof (int)); + + if (entry->vaddr_base != NULL) { + if (munmap(entry->vaddr_base, size) != 0) { + perror("unmap fails"); + exit(-1); + } + } + + for (i = 0; i < nb_pfn; i++) { + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; + } + + vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, + pfns, err, nb_pfn); + if (vaddr_base == NULL) { + perror("xc_map_foreign_bulk"); + exit(-1); + } + + entry->vaddr_base = vaddr_base; + entry->paddr_index = address_index; + + bitmap_zero(entry->valid_mapping, nb_pfn); + for (i = 0; i < nb_pfn; i++) { + if (!err[i]) { + bitmap_set(entry->valid_mapping, i, 1); + } + } + + qemu_free(pfns); + qemu_free(err); +} + +uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock) +{ + MapCacheEntry *entry, *pentry = NULL; + target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; + target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + + trace_qemu_map_cache(phys_addr); + + if (address_index == mapcache->last_address_index && !lock) { + trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); + return mapcache->last_address_vaddr + address_offset; + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + + while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + entry = qemu_mallocz(sizeof (MapCacheEntry)); + pentry->next = entry; + qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + } else if (!entry->lock) { + if (!entry->vaddr_base || entry->paddr_index != address_index || + !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { + qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + } + } + + if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { + mapcache->last_address_index = -1; + trace_qemu_map_cache_return(NULL); + return NULL; + } + + mapcache->last_address_index = address_index; + mapcache->last_address_vaddr = entry->vaddr_base; + if (lock) { + MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev)); + entry->lock++; + reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; + reventry->paddr_index = mapcache->last_address_index; + QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); + } + + trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); + return mapcache->last_address_vaddr + address_offset; +} + +void qemu_map_cache_unlock(void *buffer) +{ + MapCacheEntry *entry = NULL, *pentry = NULL; + MapCacheRev *reventry; + target_phys_addr_t paddr_index; + int found = 0; + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == buffer) { + paddr_index = reventry->paddr_index; + found = 1; + break; + } + } + if (!found) { + return; + } + QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + qemu_free(reventry); + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && entry->paddr_index != paddr_index) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + return; + } + if (entry->lock > 0) { + entry->lock--; + } +} + +ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) +{ + MapCacheRev *reventry; + target_phys_addr_t paddr_index; + int found = 0; + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == ptr) { + paddr_index = reventry->paddr_index; + found = 1; + break; + } + } + if (!found) { + fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, + reventry->vaddr_req); + } + abort(); + return 0; + } + + return paddr_index << MCACHE_BUCKET_SHIFT; +} + +void qemu_invalidate_entry(uint8_t *buffer) +{ + MapCacheEntry *entry = NULL, *pentry = NULL; + MapCacheRev *reventry; + target_phys_addr_t paddr_index; + int found = 0; + + if (mapcache->last_address_vaddr == buffer) { + mapcache->last_address_index = -1; + } + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == buffer) { + paddr_index = reventry->paddr_index; + found = 1; + break; + } + } + if (!found) { + DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); + } + return; + } + QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + qemu_free(reventry); + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && entry->paddr_index != paddr_index) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); + return; + } + entry->lock--; + if (entry->lock > 0 || pentry == NULL) { + return; + } + + pentry->next = entry->next; + if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + perror("unmap fails"); + exit(-1); + } + qemu_free(entry); +} + +void qemu_invalidate_map_cache(void) +{ + unsigned long i; + MapCacheRev *reventry; + + /* Flush pending AIO before destroying the mapcache */ + qemu_aio_flush(); + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF("There should be no locked mappings at this time, " + "but "TARGET_FMT_plx" -> %p is present\n", + reventry->paddr_index, reventry->vaddr_req); + } + + mapcache_lock(); + + for (i = 0; i < mapcache->nr_buckets; i++) { + MapCacheEntry *entry = &mapcache->entry[i]; + + if (entry->vaddr_base == NULL) { + continue; + } + + if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + perror("unmap fails"); + exit(-1); + } + + entry->paddr_index = 0; + entry->vaddr_base = NULL; + } + + mapcache->last_address_index = -1; + mapcache->last_address_vaddr = NULL; + + mapcache_unlock(); +} + +uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size) +{ + uint8_t *vaddr_base; + xen_pfn_t *pfns; + int *err; + unsigned int i; + target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; + + trace_xen_map_block(phys_addr, size); + phys_addr >>= XC_PAGE_SHIFT; + + pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t)); + err = qemu_mallocz(nb_pfn * sizeof (int)); + + for (i = 0; i < nb_pfn; i++) { + pfns[i] = phys_addr + i; + } + + vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, + pfns, err, nb_pfn); + if (vaddr_base == NULL) { + perror("xc_map_foreign_bulk"); + exit(-1); + } + + qemu_free(pfns); + qemu_free(err); + + return vaddr_base; +} diff --git a/xen-mapcache.h b/xen-mapcache.h new file mode 100644 index 0000000000..339444c94e --- /dev/null +++ b/xen-mapcache.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef XEN_MAPCACHE_H +#define XEN_MAPCACHE_H + +#include <sys/mman.h> +#include "trace.h" + +void qemu_map_cache_init(void); +uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock); +void qemu_map_cache_unlock(void *phys_addr); +ram_addr_t qemu_ram_addr_from_mapcache(void *ptr); +void qemu_invalidate_entry(uint8_t *buffer); +void qemu_invalidate_map_cache(void); + +uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size); + +static inline void xen_unmap_block(void *addr, ram_addr_t size) +{ + trace_xen_unmap_block(addr, size); + + if (munmap(addr, size) != 0) { + hw_error("xen_unmap_block: %s", strerror(errno)); + } +} + + +#define mapcache_lock() ((void)0) +#define mapcache_unlock() ((void)0) + +#endif /* !XEN_MAPCACHE_H */ diff --git a/xen-stub.c b/xen-stub.c new file mode 100644 index 0000000000..a4f35a19fb --- /dev/null +++ b/xen-stub.c @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "hw/xen.h" + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return -1; +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ +} + +void xen_cmos_set_s3_resume(void *opaque, int irq, int level) +{ +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size) +{ +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return NULL; +} + +int xen_init(void) +{ + return -ENOSYS; +} |