diff options
440 files changed, 40613 insertions, 21244 deletions
diff --git a/.gitignore b/.gitignore index 9859c7d746..824c0d24df 100644 --- a/.gitignore +++ b/.gitignore @@ -41,12 +41,14 @@ qemu-io qemu-ga qemu-bridge-helper qemu-monitor.texi +vscclient QMP/qmp-commands.txt test-coroutine test-qmp-input-visitor test-qmp-output-visitor test-string-input-visitor test-string-output-visitor +test-visitor-serialization fsdev/virtfs-proxy-helper.1 fsdev/virtfs-proxy-helper.pod .gdbinit @@ -69,6 +71,10 @@ fsdev/virtfs-proxy-helper.pod *.vr *.d *.o +*.lo +*.la +*.pc +.libs *.swp *.orig .pc diff --git a/MAINTAINERS b/MAINTAINERS index b45f0750b2..30ed56dd77 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -207,6 +207,12 @@ M: qemu-devel@nongnu.org S: Orphan F: hw/gumstix.c +i.MX31 +M: Peter Chubb <peter.chubb@nicta.com.au> +S: Odd fixes +F: hw/imx* +F: hw/kzm.c + Integrator CP M: Paul Brook <paul@codesourcery.com> M: Peter Maydell <peter.maydell@linaro.org> @@ -311,6 +317,11 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: hw/petalogix_s3adsp1800.c +petalogix_ml605 +M: Peter Crosthwaite <peter.crosthwaite@petalogix.com> +S: Maintained +F: hw/petalogix_ml605_mmu.c + MIPS Machines ------------- Jazz @@ -477,6 +488,17 @@ S: Supported F: hw/virtio-serial* F: hw/virtio-console* +Xilinx EDK +M: Peter Crosthwaite <peter.crosthwaite@petalogix.com> +M: Edgar E. Iglesias <edgar.iglesias@gmail.com> +S: Maintained +F: hw/xilinx_axi* +F: hw/xilinx_uartlite.c +F: hw/xilinx_intc.c +F: hw/xilinx_ethlite.c +F: hw/xilinx_timer.c +F: hw/xilinx.h + Subsystems ---------- Audio @@ -495,6 +517,12 @@ M: Anthony Liguori <aliguori@us.ibm.com> S: Maintained F: qemu-char.c +Device Tree +M: Peter Crosthwaite <peter.crosthwaite@petalogix.com> +M: Alexander Graf <agraf@suse.de> +S: Maintained +F: device-tree.[ch] + GDB stub M: qemu-devel@nongnu.org S: Odd Fixes @@ -33,7 +33,7 @@ configure: ; .PHONY: all clean cscope distclean dvi html info install install-doc \ pdf recurse-all speed tar tarbin test build-all -$(call set-vpath, $(SRC_PATH):$(SRC_PATH)/hw) +$(call set-vpath, $(SRC_PATH)) LIBS+=-lz $(LIBS_TOOLS) @@ -91,19 +91,18 @@ qemu-options.def: $(SRC_PATH)/qemu-options.hx SUBDIR_RULES=$(patsubst %,subdir-%, $(TARGET_DIRS)) -subdir-%: $(GENERATED_HEADERS) +subdir-%: $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $* V="$(V)" TARGET_DIR="$*/" all,) ifneq ($(wildcard config-host.mak),) include $(SRC_PATH)/Makefile.objs endif -$(universal-obj-y) $(common-obj-y): $(GENERATED_HEADERS) subdir-libcacard: $(oslib-obj-y) $(trace-obj-y) qemu-timer-common.o -$(filter %-softmmu,$(SUBDIR_RULES)): $(universal-obj-y) $(trace-obj-y) $(common-obj-y) subdir-libdis +$(filter %-softmmu,$(SUBDIR_RULES)): $(universal-obj-y) $(trace-obj-y) $(common-obj-y) $(extra-obj-y) subdir-libdis -$(filter %-user,$(SUBDIR_RULES)): $(GENERATED_HEADERS) $(universal-obj-y) $(trace-obj-y) subdir-libdis-user subdir-libuser +$(filter %-user,$(SUBDIR_RULES)): $(universal-obj-y) $(trace-obj-y) subdir-libdis-user subdir-libuser ROMSUBDIR_RULES=$(patsubst %,romsubdir-%, $(ROMS)) romsubdir-%: @@ -121,7 +120,7 @@ QEMU_CFLAGS += -I$(SRC_PATH)/include ui/cocoa.o: ui/cocoa.m -ui/sdl.o audio/sdlaudio.o ui/sdl_zoom.o baum.o: QEMU_CFLAGS += $(SDL_CFLAGS) +ui/sdl.o audio/sdlaudio.o ui/sdl_zoom.o hw/baum.o: QEMU_CFLAGS += $(SDL_CFLAGS) ui/vnc.o: QEMU_CFLAGS += $(VNC_TLS_CFLAGS) @@ -142,16 +141,19 @@ libcacard.la: install-libcacard: @echo "libtool is missing, please install and rerun configure"; exit 1 else -libcacard.la: $(GENERATED_HEADERS) $(oslib-obj-y) qemu-timer-common.o $(addsuffix .lo, $(basename $(trace-obj-y))) +libcacard.la: $(oslib-obj-y) qemu-timer-common.o $(addsuffix .lo, $(basename $(trace-obj-y))) $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C libcacard V="$(V)" TARGET_DIR="$*/" libcacard.la,) install-libcacard: libcacard.la $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C libcacard V="$(V)" TARGET_DIR="$*/" install-libcacard,) endif + +vscclient$(EXESUF): $(libcacard-y) $(oslib-obj-y) $(trace-obj-y) qemu-timer-common.o libcacard/vscclient.o + $(call quiet-command,$(CC) $(LDFLAGS) -o $@ $^ $(libcacard_libs) $(LIBS)," LINK $@") + ###################################################################### qemu-img.o: qemu-img-cmds.h -qemu-img.o qemu-tool.o qemu-nbd.o qemu-io.o cmd.o qemu-ga.o: $(GENERATED_HEADERS) tools-obj-y = $(oslib-obj-y) $(trace-obj-y) qemu-tool.o qemu-timer.o \ qemu-timer-common.o main-loop.o notify.o \ @@ -163,7 +165,6 @@ qemu-nbd$(EXESUF): qemu-nbd.o $(tools-obj-y) $(block-obj-y) qemu-io$(EXESUF): qemu-io.o cmd.o $(tools-obj-y) $(block-obj-y) qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o -qemu-bridge-helper.o: $(GENERATED_HEADERS) fsdev/virtfs-proxy-helper$(EXESUF): fsdev/virtfs-proxy-helper.o fsdev/virtio-9p-marshal.o oslib-posix.o $(trace-obj-y) fsdev/virtfs-proxy-helper$(EXESUF): LIBS += -lcap @@ -171,10 +172,8 @@ fsdev/virtfs-proxy-helper$(EXESUF): LIBS += -lcap qemu-img-cmds.h: $(SRC_PATH)/qemu-img-cmds.hx $(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@," GEN $@") -$(qapi-obj-y): $(GENERATED_HEADERS) -qapi-dir := $(BUILD_DIR)/qapi-generated qemu-ga$(EXESUF): LIBS = $(LIBS_QGA) -qemu-ga$(EXESUF): QEMU_CFLAGS += -I $(qapi-dir) +qemu-ga$(EXESUF): QEMU_CFLAGS += -I qapi-generated gen-out-type = $(subst .,-,$(suffix $@)) @@ -182,15 +181,15 @@ ifneq ($(wildcard config-host.mak),) include $(SRC_PATH)/tests/Makefile endif -$(qapi-dir)/qga-qapi-types.c $(qapi-dir)/qga-qapi-types.h :\ +qapi-generated/qga-qapi-types.c qapi-generated/qga-qapi-types.h :\ $(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-types.py - $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@") -$(qapi-dir)/qga-qapi-visit.c $(qapi-dir)/qga-qapi-visit.h :\ + $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py $(gen-out-type) -o qapi-generated -p "qga-" < $<, " GEN $@") +qapi-generated/qga-qapi-visit.c qapi-generated/qga-qapi-visit.h :\ $(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-visit.py - $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@") -$(qapi-dir)/qga-qmp-commands.h $(qapi-dir)/qga-qmp-marshal.c :\ + $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py $(gen-out-type) -o qapi-generated -p "qga-" < $<, " GEN $@") +qapi-generated/qga-qmp-commands.h qapi-generated/qga-qmp-marshal.c :\ $(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-commands.py - $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@") + $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -o qapi-generated -p "qga-" < $<, " GEN $@") qapi-types.c qapi-types.h :\ $(SRC_PATH)/qapi-schema.json $(SRC_PATH)/scripts/qapi-types.py @@ -202,10 +201,10 @@ qmp-commands.h qmp-marshal.c :\ $(SRC_PATH)/qapi-schema.json $(SRC_PATH)/scripts/qapi-commands.py $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -m -o "." < $<, " GEN $@") -QGALIB_OBJ=$(addprefix $(qapi-dir)/, qga-qapi-types.o qga-qapi-visit.o qga-qmp-marshal.o) -QGALIB_GEN=$(addprefix $(qapi-dir)/, qga-qapi-types.h qga-qapi-visit.h qga-qmp-commands.h) -$(QGALIB_OBJ): $(QGALIB_GEN) $(GENERATED_HEADERS) -$(qga-obj-y) qemu-ga.o: $(QGALIB_GEN) $(GENERATED_HEADERS) +QGALIB_OBJ=$(addprefix qapi-generated/, qga-qapi-types.o qga-qapi-visit.o qga-qmp-marshal.o) +QGALIB_GEN=$(addprefix qapi-generated/, qga-qapi-types.h qga-qapi-visit.h qga-qmp-commands.h) +$(QGALIB_OBJ): $(QGALIB_GEN) +$(qga-obj-y) qemu-ga.o: $(QGALIB_GEN) qemu-ga$(EXESUF): qemu-ga.o $(qga-obj-y) $(tools-obj-y) $(qapi-obj-y) $(qobject-obj-y) $(version-obj-y) $(QGALIB_OBJ) @@ -219,6 +218,7 @@ clean: rm -Rf .libs rm -f slirp/*.o slirp/*.d audio/*.o audio/*.d block/*.o block/*.d net/*.o net/*.d fsdev/*.o fsdev/*.d ui/*.o ui/*.d qapi/*.o qapi/*.d qga/*.o qga/*.d rm -f qom/*.o qom/*.d + rm -f usb/*.o usb/*.d hw/*.o hw/*.d rm -f qemu-img-cmds.h rm -f trace/*.o trace/*.d rm -f trace-dtrace.dtrace trace-dtrace.dtrace-timestamp @@ -226,7 +226,7 @@ clean: rm -f trace-dtrace.h trace-dtrace.h-timestamp rm -f $(foreach f,$(GENERATED_HEADERS),$(f) $(f)-timestamp) rm -f $(foreach f,$(GENERATED_SOURCES),$(f) $(f)-timestamp) - rm -rf $(qapi-dir) + rm -rf qapi-generated $(MAKE) -C tests/tcg clean for d in $(ALL_SUBDIRS) $(QEMULIBS) libcacard; do \ if test -d $$d; then $(MAKE) -C $$d $@ || exit 1; fi; \ @@ -250,7 +250,8 @@ distclean: clean KEYMAPS=da en-gb et fr fr-ch is lt modifiers no pt-br sv \ ar de en-us fi fr-be hr it lv nl pl ru th \ -common de-ch es fo fr-ca hu ja mk nl-be pt sl tr +common de-ch es fo fr-ca hu ja mk nl-be pt sl tr \ +bepo ifdef INSTALL_BLOBS BLOBS=bios.bin sgabios.bin vgabios.bin vgabios-cirrus.bin \ @@ -260,7 +261,6 @@ pxe-e1000.rom pxe-eepro100.rom pxe-ne2k_pci.rom \ pxe-pcnet.rom pxe-rtl8139.rom pxe-virtio.rom \ qemu-icon.bmp \ bamboo.dtb petalogix-s3adsp1800.dtb petalogix-ml605.dtb \ -mpc8544ds.dtb \ multiboot.bin linuxboot.bin kvmvapic.bin \ s390-zipl.rom \ spapr-rtas.bin slof.bin \ @@ -272,6 +272,7 @@ endif install-doc: $(DOCS) $(INSTALL_DIR) "$(DESTDIR)$(qemu_docdir)" $(INSTALL_DATA) qemu-doc.html qemu-tech.html "$(DESTDIR)$(qemu_docdir)" + $(INSTALL_DATA) QMP/qmp-commands.txt "$(DESTDIR)$(qemu_docdir)" ifdef CONFIG_POSIX $(INSTALL_DIR) "$(DESTDIR)$(mandir)/man1" $(INSTALL_DATA) qemu.1 qemu-img.1 "$(DESTDIR)$(mandir)/man1" @@ -400,5 +401,10 @@ tar: cd /tmp && tar zcvf ~/$(FILE).tar.gz $(FILE) --exclude CVS --exclude .git --exclude .svn rm -rf /tmp/$(FILE) +# Add a dependency on the generated files, so that they are always +# rebuilt before other object files +Makefile: $(GENERATED_HEADERS) + # Include automatically generated dependency files --include $(wildcard *.d audio/*.d slirp/*.d block/*.d net/*.d ui/*.d qapi/*.d qga/*.d) +# All subdir dependencies come automatically from our recursive subdir rules +-include $(wildcard *.d) diff --git a/Makefile.dis b/Makefile.dis index 3e1fcaf4b7..09060f0a1a 100644 --- a/Makefile.dis +++ b/Makefile.dis @@ -20,4 +20,4 @@ clean: rm -f *.o *.d *.a *~ # Include automatically generated dependency files --include $(wildcard *.d */*.d) +-include $(wildcard *.d) diff --git a/Makefile.hw b/Makefile.hw index 33f1ab0183..28fe100fbe 100644 --- a/Makefile.hw +++ b/Makefile.hw @@ -7,7 +7,7 @@ include $(SRC_PATH)/rules.mak .PHONY: all -$(call set-vpath, $(SRC_PATH):$(SRC_PATH)/hw) +$(call set-vpath, $(SRC_PATH)) QEMU_CFLAGS+=-I.. QEMU_CFLAGS += -I$(SRC_PATH)/include @@ -19,7 +19,8 @@ all: $(hw-obj-y) @true clean: - rm -f *.o */*.o *.d */*.d *.a */*.a *~ */*~ + rm -f $(addsuffix *.o, $(sort $(dir $(hw-obj-y)))) + rm -f $(addsuffix *.d, $(sort $(dir $(hw-obj-y)))) # Include automatically generated dependency files --include $(wildcard *.d */*.d) +-include $(patsubst %.o, %.d, $(hw-obj-y)) diff --git a/Makefile.objs b/Makefile.objs index f173946fd2..5ebbcfa171 100644 --- a/Makefile.objs +++ b/Makefile.objs @@ -1,6 +1,7 @@ ####################################################################### # Target-independent parts used in system and user emulation universal-obj-y = +universal-obj-y += qemu-log.o ####################################################################### # QObject @@ -12,9 +13,7 @@ universal-obj-y += $(qobject-obj-y) ####################################################################### # QOM -include $(SRC_PATH)/qom/Makefile -qom-obj-y = $(addprefix qom/, $(qom-y)) -qom-obj-twice-y = $(addprefix qom/, $(qom-twice-y)) +qom-obj-y = qom/ universal-obj-y += $(qom-obj-y) @@ -47,45 +46,13 @@ block-obj-y += nbd.o block.o aio.o aes.o qemu-config.o qemu-progress.o qemu-sock block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y) block-obj-$(CONFIG_POSIX) += posix-aio-compat.o block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o - -block-nested-y += raw.o cow.o qcow.o vdi.o vmdk.o cloop.o dmg.o bochs.o vpc.o vvfat.o -block-nested-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o -block-nested-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o -block-nested-y += qed-check.o -block-nested-y += parallels.o nbd.o blkdebug.o sheepdog.o blkverify.o -block-nested-y += stream.o -block-nested-$(CONFIG_WIN32) += raw-win32.o -block-nested-$(CONFIG_POSIX) += raw-posix.o -block-nested-$(CONFIG_LIBISCSI) += iscsi.o -block-nested-$(CONFIG_CURL) += curl.o -block-nested-$(CONFIG_RBD) += rbd.o - -block-obj-y += $(addprefix block/, $(block-nested-y)) - -net-obj-y = net.o -net-nested-y = queue.o checksum.o util.o -net-nested-y += socket.o -net-nested-y += dump.o -net-nested-$(CONFIG_POSIX) += tap.o -net-nested-$(CONFIG_LINUX) += tap-linux.o -net-nested-$(CONFIG_WIN32) += tap-win32.o -net-nested-$(CONFIG_BSD) += tap-bsd.o -net-nested-$(CONFIG_SOLARIS) += tap-solaris.o -net-nested-$(CONFIG_AIX) += tap-aix.o -net-nested-$(CONFIG_HAIKU) += tap-haiku.o -net-nested-$(CONFIG_SLIRP) += slirp.o -net-nested-$(CONFIG_VDE) += vde.o -net-obj-y += $(addprefix net/, $(net-nested-y)) +block-obj-y += block/ ifeq ($(CONFIG_VIRTIO)$(CONFIG_VIRTFS)$(CONFIG_PCI),yyy) # Lots of the fsdev/9pcode is pulled in by vl.c via qemu_fsdev_add. # only pull in the actual virtio-9p device if we also enabled virtio. CONFIG_REALLY_VIRTFS=y -fsdev-nested-y = qemu-fsdev.o virtio-9p-marshal.o -else -fsdev-nested-y = qemu-fsdev-dummy.o endif -fsdev-obj-$(CONFIG_VIRTFS) += $(addprefix fsdev/, $(fsdev-nested-y)) ###################################################################### # Target independent part of system emulation. The long term path is to @@ -93,104 +60,40 @@ fsdev-obj-$(CONFIG_VIRTFS) += $(addprefix fsdev/, $(fsdev-nested-y)) # single QEMU executable should support all CPUs and machines. common-obj-y = $(block-obj-y) blockdev.o -common-obj-y += $(net-obj-y) -common-obj-y += $(qom-obj-twice-y) -common-obj-$(CONFIG_LINUX) += $(fsdev-obj-$(CONFIG_LINUX)) +common-obj-y += net.o net/ +common-obj-y += qom/ common-obj-y += readline.o console.o cursor.o common-obj-y += $(oslib-obj-y) common-obj-$(CONFIG_WIN32) += os-win32.o common-obj-$(CONFIG_POSIX) += os-posix.o +common-obj-$(CONFIG_LINUX) += fsdev/ +extra-obj-$(CONFIG_LINUX) += fsdev/ + common-obj-y += tcg-runtime.o host-utils.o main-loop.o -common-obj-y += irq.o input.o -common-obj-$(CONFIG_PTIMER) += ptimer.o -common-obj-$(CONFIG_MAX7310) += max7310.o -common-obj-$(CONFIG_WM8750) += wm8750.o -common-obj-$(CONFIG_TWL92230) += twl92230.o -common-obj-$(CONFIG_TSC2005) += tsc2005.o -common-obj-$(CONFIG_LM832X) += lm832x.o -common-obj-$(CONFIG_TMP105) += tmp105.o -common-obj-$(CONFIG_STELLARIS_INPUT) += stellaris_input.o -common-obj-$(CONFIG_SSD0303) += ssd0303.o -common-obj-$(CONFIG_SSD0323) += ssd0323.o -common-obj-$(CONFIG_ADS7846) += ads7846.o -common-obj-$(CONFIG_MAX111X) += max111x.o -common-obj-$(CONFIG_DS1338) += ds1338.o -common-obj-y += i2c.o smbus.o smbus_eeprom.o -common-obj-y += eeprom93xx.o -common-obj-y += scsi-disk.o cdrom.o -common-obj-y += scsi-generic.o scsi-bus.o -common-obj-y += hid.o -common-obj-y += usb/core.o usb/bus.o usb/desc.o usb/dev-hub.o -common-obj-y += usb/host-$(HOST_USB).o -common-obj-y += usb/dev-hid.o usb/dev-storage.o usb/dev-wacom.o -common-obj-y += usb/dev-serial.o usb/dev-network.o usb/dev-audio.o -common-obj-$(CONFIG_SSI) += ssi.o -common-obj-$(CONFIG_SSI_SD) += ssi-sd.o -common-obj-$(CONFIG_SD) += sd.o -common-obj-y += bt.o bt-host.o bt-vhci.o bt-l2cap.o bt-sdp.o bt-hci.o bt-hid.o -common-obj-y += bt-hci-csr.o usb/dev-bluetooth.o +common-obj-y += input.o common-obj-y += buffered_file.o migration.o migration-tcp.o common-obj-y += qemu-char.o #aio.o -common-obj-y += msmouse.o ps2.o -common-obj-y += qdev.o qdev-properties.o qdev-monitor.o common-obj-y += block-migration.o iohandler.o common-obj-y += pflib.o common-obj-y += bitmap.o bitops.o -common-obj-$(CONFIG_BRLAPI) += baum.o common-obj-$(CONFIG_POSIX) += migration-exec.o migration-unix.o migration-fd.o common-obj-$(CONFIG_WIN32) += version.o -common-obj-$(CONFIG_SPICE) += ui/spice-core.o ui/spice-input.o ui/spice-display.o spice-qemu-char.o - -audio-obj-y = audio.o noaudio.o wavaudio.o mixeng.o -audio-obj-$(CONFIG_SDL) += sdlaudio.o -audio-obj-$(CONFIG_OSS) += ossaudio.o -audio-obj-$(CONFIG_SPICE) += spiceaudio.o -audio-obj-$(CONFIG_COREAUDIO) += coreaudio.o -audio-obj-$(CONFIG_ALSA) += alsaaudio.o -audio-obj-$(CONFIG_DSOUND) += dsoundaudio.o -audio-obj-$(CONFIG_FMOD) += fmodaudio.o -audio-obj-$(CONFIG_ESD) += esdaudio.o -audio-obj-$(CONFIG_PA) += paaudio.o -audio-obj-$(CONFIG_WINWAVE) += winwaveaudio.o -audio-obj-$(CONFIG_AUDIO_PT_INT) += audio_pt_int.o -audio-obj-$(CONFIG_AUDIO_WIN_INT) += audio_win_int.o -audio-obj-y += wavcapture.o -common-obj-y += $(addprefix audio/, $(audio-obj-y)) - -ui-obj-y += keymaps.o -ui-obj-$(CONFIG_SDL) += sdl.o sdl_zoom.o x_keymap.o -ui-obj-$(CONFIG_COCOA) += cocoa.o -ui-obj-$(CONFIG_CURSES) += curses.o -vnc-obj-y += vnc.o d3des.o -vnc-obj-y += vnc-enc-zlib.o vnc-enc-hextile.o -vnc-obj-y += vnc-enc-tight.o vnc-palette.o -vnc-obj-y += vnc-enc-zrle.o -vnc-obj-$(CONFIG_VNC_TLS) += vnc-tls.o vnc-auth-vencrypt.o -vnc-obj-$(CONFIG_VNC_SASL) += vnc-auth-sasl.o -ifdef CONFIG_VNC_THREAD -vnc-obj-y += vnc-jobs-async.o -else -vnc-obj-y += vnc-jobs-sync.o -endif -common-obj-y += $(addprefix ui/, $(ui-obj-y)) -common-obj-$(CONFIG_VNC) += $(addprefix ui/, $(vnc-obj-y)) +common-obj-$(CONFIG_SPICE) += spice-qemu-char.o + +common-obj-y += audio/ +common-obj-y += hw/ +common-obj-y += ui/ +common-obj-y += bt-host.o bt-vhci.o common-obj-y += iov.o acl.o common-obj-$(CONFIG_POSIX) += compatfd.o common-obj-y += notify.o event_notifier.o common-obj-y += qemu-timer.o qemu-timer-common.o -slirp-obj-y = cksum.o if.o ip_icmp.o ip_input.o ip_output.o -slirp-obj-y += slirp.o mbuf.o misc.o sbuf.o socket.o tcp_input.o tcp_output.o -slirp-obj-y += tcp_subr.o tcp_timer.o udp.o bootp.o tftp.o arp_table.o -common-obj-$(CONFIG_SLIRP) += $(addprefix slirp/, $(slirp-obj-y)) - -# xen backend driver support -common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o -common-obj-$(CONFIG_XEN_BACKEND) += xen_console.o xenfb.o xen_disk.o xen_nic.o +common-obj-$(CONFIG_SLIRP) += slirp/ ###################################################################### # libuser @@ -202,152 +105,12 @@ user-obj-y += cutils.o iov.o cache-utils.o user-obj-y += module.o user-obj-y += qemu-user.o user-obj-y += $(trace-obj-y) -user-obj-y += $(qom-obj-twice-y) +user-obj-y += qom/ ###################################################################### # libhw -hw-obj-y = -hw-obj-y += vl.o loader.o -hw-obj-$(CONFIG_VIRTIO) += virtio-console.o -hw-obj-y += usb/libhw.o -hw-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o -hw-obj-y += fw_cfg.o -hw-obj-$(CONFIG_PCI) += pci.o pci_bridge.o pci_bridge_dev.o -hw-obj-$(CONFIG_PCI) += msix.o msi.o -hw-obj-$(CONFIG_PCI) += shpc.o -hw-obj-$(CONFIG_PCI) += slotid_cap.o -hw-obj-$(CONFIG_PCI) += pci_host.o pcie_host.o -hw-obj-$(CONFIG_PCI) += ioh3420.o xio3130_upstream.o xio3130_downstream.o -hw-obj-y += watchdog.o -hw-obj-$(CONFIG_ISA_MMIO) += isa_mmio.o -hw-obj-$(CONFIG_ECC) += ecc.o -hw-obj-$(CONFIG_NAND) += nand.o -hw-obj-$(CONFIG_PFLASH_CFI01) += pflash_cfi01.o -hw-obj-$(CONFIG_PFLASH_CFI02) += pflash_cfi02.o - -hw-obj-$(CONFIG_M48T59) += m48t59.o -hw-obj-$(CONFIG_ESCC) += escc.o -hw-obj-$(CONFIG_EMPTY_SLOT) += empty_slot.o - -hw-obj-$(CONFIG_SERIAL) += serial.o -hw-obj-$(CONFIG_PARALLEL) += parallel.o -hw-obj-$(CONFIG_I8254) += i8254_common.o i8254.o -hw-obj-$(CONFIG_PCSPK) += pcspk.o -hw-obj-$(CONFIG_PCKBD) += pckbd.o -hw-obj-$(CONFIG_USB_UHCI) += usb/hcd-uhci.o -hw-obj-$(CONFIG_USB_OHCI) += usb/hcd-ohci.o -hw-obj-$(CONFIG_USB_EHCI) += usb/hcd-ehci.o -hw-obj-$(CONFIG_USB_XHCI) += usb/hcd-xhci.o -hw-obj-$(CONFIG_FDC) += fdc.o -hw-obj-$(CONFIG_ACPI) += acpi.o acpi_piix4.o -hw-obj-$(CONFIG_APM) += pm_smbus.o apm.o -hw-obj-$(CONFIG_DMA) += dma.o -hw-obj-$(CONFIG_I82374) += i82374.o -hw-obj-$(CONFIG_HPET) += hpet.o -hw-obj-$(CONFIG_APPLESMC) += applesmc.o -hw-obj-$(CONFIG_SMARTCARD) += usb/dev-smartcard-reader.o ccid-card-passthru.o -hw-obj-$(CONFIG_SMARTCARD_NSS) += ccid-card-emulated.o -hw-obj-$(CONFIG_USB_REDIR) += usb/redirect.o -hw-obj-$(CONFIG_I8259) += i8259_common.o i8259.o - -# PPC devices -hw-obj-$(CONFIG_PREP_PCI) += prep_pci.o -hw-obj-$(CONFIG_I82378) += i82378.o -# Mac shared devices -hw-obj-$(CONFIG_MACIO) += macio.o -hw-obj-$(CONFIG_CUDA) += cuda.o -hw-obj-$(CONFIG_ADB) += adb.o -hw-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o -hw-obj-$(CONFIG_MAC_DBDMA) += mac_dbdma.o -# OldWorld PowerMac -hw-obj-$(CONFIG_HEATHROW_PIC) += heathrow_pic.o -hw-obj-$(CONFIG_GRACKLE_PCI) += grackle_pci.o -# NewWorld PowerMac -hw-obj-$(CONFIG_UNIN_PCI) += unin_pci.o -hw-obj-$(CONFIG_DEC_PCI) += dec_pci.o -# PowerPC E500 boards -hw-obj-$(CONFIG_PPCE500_PCI) += ppce500_pci.o - -# MIPS devices -hw-obj-$(CONFIG_PIIX4) += piix4.o -hw-obj-$(CONFIG_G364FB) += g364fb.o -hw-obj-$(CONFIG_JAZZ_LED) += jazz_led.o - -# PCI watchdog devices -hw-obj-$(CONFIG_PCI) += wdt_i6300esb.o - -hw-obj-$(CONFIG_PCI) += pcie.o pcie_aer.o pcie_port.o - -# PCI network cards -hw-obj-$(CONFIG_NE2000_PCI) += ne2000.o -hw-obj-$(CONFIG_EEPRO100_PCI) += eepro100.o -hw-obj-$(CONFIG_PCNET_PCI) += pcnet-pci.o -hw-obj-$(CONFIG_PCNET_COMMON) += pcnet.o -hw-obj-$(CONFIG_E1000_PCI) += e1000.o -hw-obj-$(CONFIG_RTL8139_PCI) += rtl8139.o - -hw-obj-$(CONFIG_SMC91C111) += smc91c111.o -hw-obj-$(CONFIG_LAN9118) += lan9118.o -hw-obj-$(CONFIG_NE2000_ISA) += ne2000-isa.o -hw-obj-$(CONFIG_OPENCORES_ETH) += opencores_eth.o - -# IDE -hw-obj-$(CONFIG_IDE_CORE) += ide/core.o ide/atapi.o -hw-obj-$(CONFIG_IDE_QDEV) += ide/qdev.o -hw-obj-$(CONFIG_IDE_PCI) += ide/pci.o -hw-obj-$(CONFIG_IDE_ISA) += ide/isa.o -hw-obj-$(CONFIG_IDE_PIIX) += ide/piix.o -hw-obj-$(CONFIG_IDE_CMD646) += ide/cmd646.o -hw-obj-$(CONFIG_IDE_MACIO) += ide/macio.o -hw-obj-$(CONFIG_IDE_VIA) += ide/via.o -hw-obj-$(CONFIG_AHCI) += ide/ahci.o -hw-obj-$(CONFIG_AHCI) += ide/ich.o - -# SCSI layer -hw-obj-$(CONFIG_LSI_SCSI_PCI) += lsi53c895a.o -hw-obj-$(CONFIG_ESP) += esp.o - -hw-obj-y += dma-helpers.o sysbus.o isa-bus.o -hw-obj-y += qdev-addr.o - -# VGA -hw-obj-$(CONFIG_VGA_PCI) += vga-pci.o -hw-obj-$(CONFIG_VGA_ISA) += vga-isa.o -hw-obj-$(CONFIG_VGA_ISA_MM) += vga-isa-mm.o -hw-obj-$(CONFIG_VMWARE_VGA) += vmware_vga.o -hw-obj-$(CONFIG_VMMOUSE) += vmmouse.o -hw-obj-$(CONFIG_VGA_CIRRUS) += cirrus_vga.o - -hw-obj-$(CONFIG_RC4030) += rc4030.o -hw-obj-$(CONFIG_DP8393X) += dp8393x.o -hw-obj-$(CONFIG_DS1225Y) += ds1225y.o -hw-obj-$(CONFIG_MIPSNET) += mipsnet.o - -hw-obj-y += qtest.o - -# Sound -sound-obj-y = -sound-obj-$(CONFIG_SB16) += sb16.o -sound-obj-$(CONFIG_ES1370) += es1370.o -sound-obj-$(CONFIG_AC97) += ac97.o -sound-obj-$(CONFIG_ADLIB) += fmopl.o adlib.o -sound-obj-$(CONFIG_GUS) += gus.o gusemu_hal.o gusemu_mixer.o -sound-obj-$(CONFIG_CS4231A) += cs4231a.o -sound-obj-$(CONFIG_HDA) += intel-hda.o hda-audio.o - -adlib.o fmopl.o: QEMU_CFLAGS += -DBUILD_Y8950=0 -hw-obj-$(CONFIG_SOUND) += $(sound-obj-y) - -9pfs-nested-$(CONFIG_VIRTFS) = virtio-9p.o -9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-local.o virtio-9p-xattr.o -9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-xattr-user.o virtio-9p-posix-acl.o -9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-coth.o cofs.o codir.o cofile.o -9pfs-nested-$(CONFIG_VIRTFS) += coxattr.o virtio-9p-synth.o -9pfs-nested-$(CONFIG_OPEN_BY_HANDLE) += virtio-9p-handle.o -9pfs-nested-$(CONFIG_VIRTFS) += virtio-9p-proxy.o - -hw-obj-$(CONFIG_REALLY_VIRTFS) += $(addprefix 9pfs/, $(9pfs-nested-y)) +hw-obj-y = vl.o dma-helpers.o qtest.o hw/ ###################################################################### # libdis @@ -425,31 +188,29 @@ ifneq ($(TRACE_BACKEND),dtrace) trace-obj-y = trace.o endif -trace-nested-$(CONFIG_TRACE_DEFAULT) += default.o - -trace-nested-$(CONFIG_TRACE_SIMPLE) += simple.o +trace-obj-$(CONFIG_TRACE_DEFAULT) += trace/default.o +trace-obj-$(CONFIG_TRACE_SIMPLE) += trace/simple.o trace-obj-$(CONFIG_TRACE_SIMPLE) += qemu-timer-common.o - -trace-nested-$(CONFIG_TRACE_STDERR) += stderr.o - -trace-nested-y += control.o - -trace-obj-y += $(addprefix trace/, $(trace-nested-y)) +trace-obj-$(CONFIG_TRACE_STDERR) += trace/stderr.o +trace-obj-y += trace/control.o $(trace-obj-y): $(GENERATED_HEADERS) ###################################################################### # smartcard -libcacard-y = cac.o event.o vcard.o vreader.o vcard_emul_nss.o vcard_emul_type.o card_7816.o +libcacard-y += libcacard/cac.o libcacard/event.o +libcacard-y += libcacard/vcard.o libcacard/vreader.o +libcacard-y += libcacard/vcard_emul_nss.o +libcacard-y += libcacard/vcard_emul_type.o +libcacard-y += libcacard/card_7816.o + +common-obj-$(CONFIG_SMARTCARD_NSS) += $(libcacard-y) ###################################################################### # qapi -qapi-nested-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o -qapi-nested-y += qmp-output-visitor.o qmp-registry.o qmp-dispatch.o -qapi-nested-y += string-input-visitor.o string-output-visitor.o -qapi-obj-y = $(addprefix qapi/, $(qapi-nested-y)) +qapi-obj-y = qapi/ common-obj-y += qmp-marshal.o qapi-visit.o qapi-types.o common-obj-y += qmp.o hmp.o @@ -459,11 +220,7 @@ universal-obj-y += $(qapi-obj-y) ###################################################################### # guest agent -qga-nested-y = commands.o guest-agent-command-state.o -qga-nested-$(CONFIG_POSIX) += commands-posix.o channel-posix.o -qga-nested-$(CONFIG_WIN32) += commands-win32.o channel-win32.o service-win32.o -qga-obj-y = $(addprefix qga/, $(qga-nested-y)) -qga-obj-y += qemu-ga.o module.o +qga-obj-y = qga/ qemu-ga.o module.o qga-obj-$(CONFIG_WIN32) += oslib-win32.o qga-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-sockets.o qemu-option.o @@ -473,3 +230,13 @@ vl.o: QEMU_CFLAGS+=$(SDL_CFLAGS) QEMU_CFLAGS+=$(GLIB_CFLAGS) +nested-vars += \ + hw-obj-y \ + qga-obj-y \ + block-obj-y \ + qom-obj-y \ + qapi-obj-y \ + user-obj-y \ + common-obj-y \ + extra-obj-y +dummy := $(call unnest-vars) diff --git a/Makefile.target b/Makefile.target index c94abfd3a4..74f7a4a170 100644 --- a/Makefile.target +++ b/Makefile.target @@ -1,10 +1,5 @@ # -*- Mode: makefile -*- -GENERATED_HEADERS = config-target.h -CONFIG_NO_PCI = $(if $(subst n,,$(CONFIG_PCI)),n,y) -CONFIG_NO_KVM = $(if $(subst n,,$(CONFIG_KVM)),n,y) -CONFIG_NO_XEN = $(if $(subst n,,$(CONFIG_XEN)),n,y) - include ../config-host.mak include config-devices.mak include config-target.mak @@ -13,14 +8,11 @@ ifneq ($(HWDIR),) include $(HWDIR)/config.mak endif -TARGET_PATH=$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -$(call set-vpath, $(SRC_PATH):$(TARGET_PATH):$(SRC_PATH)/hw) +$(call set-vpath, $(SRC_PATH)) ifdef CONFIG_LINUX QEMU_CFLAGS += -I../linux-headers endif -QEMU_CFLAGS += -I.. -I$(TARGET_PATH) -DNEED_CPU_H - -include $(SRC_PATH)/Makefile.objs +QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H QEMU_CFLAGS+=-I$(SRC_PATH)/include @@ -77,78 +69,34 @@ all: $(PROGS) stap ######################################################### # cpu emulator library -libobj-y = exec.o translate-all.o cpu-exec.o translate.o -libobj-y += tcg/tcg.o tcg/optimize.o -libobj-$(CONFIG_TCG_INTERPRETER) += tci.o -libobj-y += fpu/softfloat.o -ifneq ($(TARGET_BASE_ARCH), sparc) -ifneq ($(TARGET_BASE_ARCH), alpha) -libobj-y += op_helper.o -endif -endif -libobj-y += helper.o -ifneq ($(TARGET_BASE_ARCH), ppc) -libobj-y += cpu.o -endif -libobj-$(TARGET_SPARC64) += vis_helper.o -libobj-$(CONFIG_NEED_MMU) += mmu.o -libobj-$(TARGET_ARM) += neon_helper.o iwmmxt_helper.o -ifeq ($(TARGET_BASE_ARCH), sparc) -libobj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o -endif -libobj-$(TARGET_SPARC) += int32_helper.o -libobj-$(TARGET_SPARC64) += int64_helper.o -libobj-$(TARGET_ALPHA) += int_helper.o fpu_helper.o sys_helper.o mem_helper.o - -libobj-y += disas.o -libobj-$(CONFIG_TCI_DIS) += tci-dis.o +obj-y = exec.o translate-all.o cpu-exec.o +obj-y += tcg/tcg.o tcg/optimize.o +obj-$(CONFIG_TCG_INTERPRETER) += tci.o +obj-y += fpu/softfloat.o +obj-y += disas.o +obj-$(CONFIG_TCI_DIS) += tci-dis.o +obj-y += target-$(TARGET_BASE_ARCH)/ +obj-$(CONFIG_GDBSTUB_XML) += gdbstub-xml.o tci-dis.o: QEMU_CFLAGS += -I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/tci -$(libobj-y): $(GENERATED_HEADERS) - # HELPER_CFLAGS is used for all the legacy code compiled with static register # variables -ifneq ($(TARGET_BASE_ARCH), sparc) -op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) -endif user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS) # Note: this is a workaround. The real fix is to avoid compiling # cpu_signal_handler() in user-exec.c. -signal.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +%/signal.o: QEMU_CFLAGS += $(HELPER_CFLAGS) ######################################################### # Linux user emulator target ifdef CONFIG_LINUX_USER -$(call set-vpath, $(SRC_PATH)/linux-user:$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR)) - QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) -I$(SRC_PATH)/linux-user -obj-y = main.o syscall.o strace.o mmap.o signal.o thunk.o \ - elfload.o linuxload.o uaccess.o gdbstub.o cpu-uname.o \ - user-exec.o $(oslib-obj-y) - -obj-$(TARGET_HAS_BFLT) += flatload.o - -obj-$(TARGET_I386) += vm86.o - -obj-i386-y += ioport-user.o -nwfpe-obj-y = fpa11.o fpa11_cpdo.o fpa11_cpdt.o fpa11_cprt.o fpopcode.o -nwfpe-obj-y += single_cpdo.o double_cpdo.o extended_cpdo.o -obj-arm-y += $(addprefix nwfpe/, $(nwfpe-obj-y)) -obj-arm-y += arm-semi.o - -obj-m68k-y += m68k-sim.o m68k-semi.o - -$(obj-y) $(obj-$(TARGET_BASE_ARCH)-y): $(GENERATED_HEADERS) - -obj-y += $(addprefix ../, $(universal-obj-y)) -obj-y += $(addprefix ../libuser/, $(user-obj-y)) -obj-y += $(addprefix ../libdis-user/, $(libdis-y)) -obj-y += $(libobj-y) +obj-y += linux-user/ +obj-y += gdbstub.o thunk.o user-exec.o $(oslib-obj-y) endif #CONFIG_LINUX_USER @@ -157,274 +105,81 @@ endif #CONFIG_LINUX_USER ifdef CONFIG_BSD_USER -$(call set-vpath, $(SRC_PATH)/bsd-user) - QEMU_CFLAGS+=-I$(SRC_PATH)/bsd-user -I$(SRC_PATH)/bsd-user/$(TARGET_ARCH) -obj-y = main.o bsdload.o elfload.o mmap.o signal.o strace.o syscall.o \ - gdbstub.o uaccess.o user-exec.o - -obj-i386-y += ioport-user.o - -$(obj-y) $(obj-$(TARGET_BASE_ARCH)-y): $(GENERATED_HEADERS) - -obj-y += $(addprefix ../, $(universal-obj-y)) -obj-y += $(addprefix ../libuser/, $(user-obj-y)) -obj-y += $(addprefix ../libdis-user/, $(libdis-y)) -obj-y += $(libobj-y) +obj-y += bsd-user/ +obj-y += gdbstub.o user-exec.o $(oslib-obj-y) endif #CONFIG_BSD_USER ######################################################### # System emulator target ifdef CONFIG_SOFTMMU +CONFIG_NO_PCI = $(if $(subst n,,$(CONFIG_PCI)),n,y) +CONFIG_NO_KVM = $(if $(subst n,,$(CONFIG_KVM)),n,y) +CONFIG_NO_XEN = $(if $(subst n,,$(CONFIG_XEN)),n,y) +CONFIG_NO_GET_MEMORY_MAPPING = $(if $(subst n,,$(CONFIG_HAVE_GET_MEMORY_MAPPING)),n,y) +CONFIG_NO_CORE_DUMP = $(if $(subst n,,$(CONFIG_HAVE_CORE_DUMP)),n,y) -obj-y = arch_init.o cpus.o monitor.o machine.o gdbstub.o balloon.o ioport.o -# virtio has to be here due to weird dependency between PCI and virtio-net. -# need to fix this properly -obj-$(CONFIG_NO_PCI) += pci-stub.o -obj-$(CONFIG_VIRTIO) += virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-serial-bus.o -obj-$(CONFIG_VIRTIO) += virtio-scsi.o -obj-y += vhost_net.o -obj-$(CONFIG_VHOST_NET) += vhost.o -obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/virtio-9p-device.o -obj-$(CONFIG_KVM) += kvm.o kvm-all.o +obj-y += arch_init.o cpus.o monitor.o gdbstub.o balloon.o ioport.o +obj-y += hw/ +obj-$(CONFIG_KVM) += kvm-all.o obj-$(CONFIG_NO_KVM) += kvm-stub.o -obj-$(CONFIG_VGA) += vga.o obj-y += memory.o savevm.o cputlb.o -obj-y += memory_mapping.o -obj-$(CONFIG_HAVE_GET_MEMORY_MAPPING) += arch_memory_mapping.o -obj-$(CONFIG_HAVE_CORE_DUMP) += arch_dump.o +obj-$(CONFIG_HAVE_GET_MEMORY_MAPPING) += memory_mapping.o +obj-$(CONFIG_HAVE_CORE_DUMP) += dump.o +obj-$(CONFIG_NO_GET_MEMORY_MAPPING) += memory_mapping-stub.o +obj-$(CONFIG_NO_CORE_DUMP) += dump-stub.o LIBS+=-lz -obj-i386-$(CONFIG_KVM) += hyperv.o - QEMU_CFLAGS += $(VNC_TLS_CFLAGS) QEMU_CFLAGS += $(VNC_SASL_CFLAGS) QEMU_CFLAGS += $(VNC_JPEG_CFLAGS) QEMU_CFLAGS += $(VNC_PNG_CFLAGS) # xen support -obj-$(CONFIG_XEN) += xen-all.o xen_machine_pv.o xen_domainbuild.o xen-mapcache.o +obj-$(CONFIG_XEN) += xen-all.o xen-mapcache.o obj-$(CONFIG_NO_XEN) += xen-stub.o -obj-i386-$(CONFIG_XEN) += xen_platform.o xen_apic.o - -# Inter-VM PCI shared memory -CONFIG_IVSHMEM = -ifeq ($(CONFIG_KVM), y) - ifeq ($(CONFIG_PCI), y) - CONFIG_IVSHMEM = y - endif -endif -obj-$(CONFIG_IVSHMEM) += ivshmem.o - -# Generic hotplugging -obj-y += device-hotplug.o - # Hardware support -obj-i386-y += mc146818rtc.o pc.o -obj-i386-y += apic_common.o apic.o kvmvapic.o -obj-i386-y += sga.o ioapic_common.o ioapic.o piix_pci.o -obj-i386-y += vmport.o -obj-i386-y += pci-hotplug.o smbios.o wdt_ib700.o -obj-i386-y += debugcon.o multiboot.o -obj-i386-y += pc_piix.o -obj-i386-y += pc_sysfw.o -obj-i386-$(CONFIG_KVM) += kvm/clock.o kvm/apic.o kvm/i8259.o kvm/ioapic.o kvm/i8254.o -obj-i386-$(CONFIG_SPICE) += qxl.o qxl-logger.o qxl-render.o - -# shared objects -obj-ppc-y = ppc.o ppc_booke.o -# PREP target -obj-ppc-y += mc146818rtc.o -obj-ppc-y += ppc_prep.o -# OldWorld PowerMac -obj-ppc-y += ppc_oldworld.o -# NewWorld PowerMac -obj-ppc-y += ppc_newworld.o -# IBM pSeries (sPAPR) -obj-ppc-$(CONFIG_PSERIES) += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o -obj-ppc-$(CONFIG_PSERIES) += xics.o spapr_vty.o spapr_llan.o spapr_vscsi.o -obj-ppc-$(CONFIG_PSERIES) += spapr_pci.o device-hotplug.o pci-hotplug.o -# PowerPC 4xx boards -obj-ppc-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o -obj-ppc-y += ppc440_bamboo.o -# PowerPC E500 boards -obj-ppc-y += ppce500_mpc8544ds.o mpc8544_guts.o ppce500_spin.o -# PowerPC 440 Xilinx ML507 reference board. -obj-ppc-y += virtex_ml507.o -obj-ppc-$(CONFIG_KVM) += kvm_ppc.o -obj-ppc-$(CONFIG_FDT) += device_tree.o -# PowerPC OpenPIC -obj-ppc-y += openpic.o - -# Xilinx PPC peripherals -obj-ppc-y += xilinx_intc.o -obj-ppc-y += xilinx_timer.o -obj-ppc-y += xilinx_uartlite.o -obj-ppc-y += xilinx_ethlite.o - -# LM32 boards -obj-lm32-y += lm32_boards.o -obj-lm32-y += milkymist.o - -# LM32 peripherals -obj-lm32-y += lm32_pic.o -obj-lm32-y += lm32_juart.o -obj-lm32-y += lm32_timer.o -obj-lm32-y += lm32_uart.o -obj-lm32-y += lm32_sys.o -obj-lm32-y += milkymist-ac97.o -obj-lm32-y += milkymist-hpdmc.o -obj-lm32-y += milkymist-memcard.o -obj-lm32-y += milkymist-minimac2.o -obj-lm32-y += milkymist-pfpu.o -obj-lm32-y += milkymist-softusb.o -obj-lm32-y += milkymist-sysctl.o -obj-lm32-$(CONFIG_OPENGL) += milkymist-tmu2.o -obj-lm32-y += milkymist-uart.o -obj-lm32-y += milkymist-vgafb.o -obj-lm32-y += framebuffer.o - -obj-mips-y = mips_r4k.o mips_jazz.o mips_malta.o mips_mipssim.o -obj-mips-y += mips_addr.o mips_timer.o mips_int.o -obj-mips-y += gt64xxx.o mc146818rtc.o -obj-mips-$(CONFIG_FULONG) += bonito.o vt82c686.o mips_fulong2e.o - -obj-microblaze-y = petalogix_s3adsp1800_mmu.o -obj-microblaze-y += petalogix_ml605_mmu.o -obj-microblaze-y += microblaze_boot.o - -obj-microblaze-y += microblaze_pic_cpu.o -obj-microblaze-y += xilinx_intc.o -obj-microblaze-y += xilinx_timer.o -obj-microblaze-y += xilinx_uartlite.o -obj-microblaze-y += xilinx_ethlite.o -obj-microblaze-y += xilinx_axidma.o -obj-microblaze-y += xilinx_axienet.o - -obj-microblaze-$(CONFIG_FDT) += device_tree.o - -# Boards -obj-cris-y = cris_pic_cpu.o -obj-cris-y += cris-boot.o -obj-cris-y += axis_dev88.o - -# IO blocks -obj-cris-y += etraxfs_dma.o -obj-cris-y += etraxfs_pic.o -obj-cris-y += etraxfs_eth.o -obj-cris-y += etraxfs_timer.o -obj-cris-y += etraxfs_ser.o - ifeq ($(TARGET_ARCH), sparc64) -obj-sparc-y = sun4u.o apb_pci.o -obj-sparc-y += mc146818rtc.o +obj-y += hw/sparc64/ else -obj-sparc-y = sun4m.o lance.o tcx.o sun4m_iommu.o slavio_intctl.o -obj-sparc-y += slavio_timer.o slavio_misc.o sparc32_dma.o -obj-sparc-y += cs4231.o eccmemctl.o sbi.o sun4c_intctl.o leon3.o - -# GRLIB -obj-sparc-y += grlib_gptimer.o grlib_irqmp.o grlib_apbuart.o +obj-y += hw/$(TARGET_BASE_ARCH)/ endif -obj-arm-y = integratorcp.o versatilepb.o arm_pic.o arm_timer.o -obj-arm-y += arm_boot.o pl011.o pl031.o pl050.o pl080.o pl110.o pl181.o pl190.o -obj-arm-y += versatile_pci.o -obj-arm-y += versatile_i2c.o -obj-arm-y += cadence_uart.o -obj-arm-y += cadence_ttc.o -obj-arm-y += cadence_gem.o -obj-arm-y += xilinx_zynq.o zynq_slcr.o -obj-arm-y += arm_gic.o -obj-arm-y += realview_gic.o realview.o arm_sysctl.o arm11mpcore.o a9mpcore.o -obj-arm-y += exynos4210_gic.o exynos4210_combiner.o exynos4210.o -obj-arm-y += exynos4_boards.o exynos4210_uart.o exynos4210_pwm.o -obj-arm-y += exynos4210_pmu.o exynos4210_mct.o exynos4210_fimd.o -obj-arm-y += arm_l2x0.o -obj-arm-y += arm_mptimer.o a15mpcore.o -obj-arm-y += armv7m.o armv7m_nvic.o stellaris.o pl022.o stellaris_enet.o -obj-arm-y += highbank.o -obj-arm-y += pl061.o -obj-arm-y += xgmac.o -obj-arm-y += arm-semi.o -obj-arm-y += pxa2xx.o pxa2xx_pic.o pxa2xx_gpio.o pxa2xx_timer.o pxa2xx_dma.o -obj-arm-y += pxa2xx_lcd.o pxa2xx_mmci.o pxa2xx_pcmcia.o pxa2xx_keypad.o -obj-arm-y += gumstix.o -obj-arm-y += zaurus.o ide/microdrive.o spitz.o tosa.o tc6393xb.o -obj-arm-y += omap1.o omap_lcdc.o omap_dma.o omap_clk.o omap_mmc.o omap_i2c.o \ - omap_gpio.o omap_intc.o omap_uart.o -obj-arm-y += omap2.o omap_dss.o soc_dma.o omap_gptimer.o omap_synctimer.o \ - omap_gpmc.o omap_sdrc.o omap_spi.o omap_tap.o omap_l4.o -obj-arm-y += omap_sx1.o palm.o tsc210x.o -obj-arm-y += nseries.o blizzard.o onenand.o cbus.o tusb6010.o usb/hcd-musb.o -obj-arm-y += mst_fpga.o mainstone.o -obj-arm-y += z2.o -obj-arm-y += musicpal.o bitbang_i2c.o marvell_88w8618_audio.o -obj-arm-y += framebuffer.o -obj-arm-y += vexpress.o -obj-arm-y += strongarm.o -obj-arm-y += collie.o -obj-arm-y += pl041.o lm4549.o -obj-arm-$(CONFIG_FDT) += device_tree.o - -obj-sh4-y = shix.o r2d.o sh7750.o sh7750_regnames.o tc58128.o -obj-sh4-y += sh_timer.o sh_serial.o sh_intc.o sh_pci.o sm501.o -obj-sh4-y += ide/mmio.o - -obj-m68k-y = an5206.o mcf5206.o mcf_uart.o mcf_intc.o mcf5208.o mcf_fec.o -obj-m68k-y += m68k-semi.o dummy_m68k.o - -obj-s390x-y = s390-virtio-bus.o s390-virtio.o - -obj-alpha-y = mc146818rtc.o -obj-alpha-y += alpha_pci.o alpha_dp264.o alpha_typhoon.o - -obj-xtensa-y += xtensa_pic.o -obj-xtensa-y += xtensa_sim.o -obj-xtensa-y += xtensa_lx60.o -obj-xtensa-y += xtensa-semi.o -obj-xtensa-y += core-dc232b.o -obj-xtensa-y += core-dc233c.o -obj-xtensa-y += core-fsf.o - main.o: QEMU_CFLAGS+=$(GPROF_CFLAGS) -monitor.o: hmp-commands.h qmp-commands-old.h - -$(obj-y) $(obj-$(TARGET_BASE_ARCH)-y): $(GENERATED_HEADERS) - -obj-y += $(addprefix ../, $(universal-obj-y)) -obj-y += $(addprefix ../, $(common-obj-y)) -obj-y += $(addprefix ../libdis/, $(libdis-y)) -obj-y += $(libobj-y) -obj-y += $(addprefix $(HWDIR)/, $(hw-obj-y)) -obj-y += $(addprefix ../, $(trace-obj-y)) +GENERATED_HEADERS += hmp-commands.h qmp-commands-old.h endif # CONFIG_SOFTMMU -obj-y += dump.o +nested-vars += obj-y -ifndef CONFIG_LINUX_USER -ifndef CONFIG_BSD_USER -# libcacard needs qemu-thread support, and besides is only needed by devices -# so not requires with linux-user / bsd-user targets -obj-$(CONFIG_SMARTCARD_NSS) += $(addprefix ../libcacard/, $(libcacard-y)) -endif # CONFIG_BSD_USER -endif # CONFIG_LINUX_USER +# This resolves all nested paths, so it must come last +include $(SRC_PATH)/Makefile.objs -obj-$(CONFIG_GDBSTUB_XML) += gdbstub-xml.o +all-obj-y = $(obj-y) +all-obj-y += $(addprefix ../, $(universal-obj-y)) + +ifdef CONFIG_SOFTMMU +all-obj-y += $(addprefix ../, $(common-obj-y)) +all-obj-y += $(addprefix ../libdis/, $(libdis-y)) +all-obj-y += $(addprefix $(HWDIR)/, $(hw-obj-y)) +all-obj-y += $(addprefix ../, $(trace-obj-y)) +else +all-obj-y += $(addprefix ../libuser/, $(user-obj-y)) +all-obj-y += $(addprefix ../libdis-user/, $(libdis-y)) +endif #CONFIG_LINUX_USER ifdef QEMU_PROGW # The linker builds a windows executable. Make also a console executable. -$(QEMU_PROGW): $(obj-y) $(obj-$(TARGET_BASE_ARCH)-y) +$(QEMU_PROGW): $(all-obj-y) $(call LINK,$^) $(QEMU_PROG): $(QEMU_PROGW) $(call quiet-command,$(OBJCOPY) --subsystem console $(QEMU_PROGW) $(QEMU_PROG)," GEN $(TARGET_DIR)$(QEMU_PROG)") else -$(QEMU_PROG): $(obj-y) $(obj-$(TARGET_BASE_ARCH)-y) +$(QEMU_PROG): $(all-obj-y) $(call LINK,$^) endif @@ -438,8 +193,8 @@ qmp-commands-old.h: $(SRC_PATH)/qmp-commands.hx $(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@," GEN $(TARGET_DIR)$@") clean: - rm -f *.o *.a *~ $(PROGS) nwfpe/*.o fpu/*.o - rm -f *.d */*.d tcg/*.o ide/*.o 9pfs/*.o kvm/*.o + rm -f *.a *~ $(PROGS) + rm -f $(shell find . -name '*.[od]') rm -f hmp-commands.h qmp-commands-old.h gdbstub-xml.c ifdef CONFIG_TRACE_SYSTEMTAP rm -f *.stp @@ -457,5 +212,8 @@ ifdef CONFIG_TRACE_SYSTEMTAP $(INSTALL_DATA) $(QEMU_PROG).stp "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset" endif +GENERATED_HEADERS += config-target.h +Makefile: $(GENERATED_HEADERS) + # Include automatically generated dependency files --include $(wildcard *.d */*.d) +-include $(wildcard *.d fpu/*.d tcg/*.d) diff --git a/Makefile.user b/Makefile.user index b717820407..1783b2a257 100644 --- a/Makefile.user +++ b/Makefile.user @@ -10,6 +10,7 @@ $(call set-vpath, $(SRC_PATH)) QEMU_CFLAGS+=-I.. QEMU_CFLAGS += -I$(SRC_PATH)/include +QEMU_CFLAGS += -DCONFIG_USER_ONLY include $(SRC_PATH)/Makefile.objs @@ -23,4 +24,4 @@ clean: done # Include automatically generated dependency files --include $(wildcard *.d */*.d) +-include $(wildcard *.d) diff --git a/QMP/qmp-events.txt b/QMP/qmp-events.txt index 9286af569d..9ba7079589 100644 --- a/QMP/qmp-events.txt +++ b/QMP/qmp-events.txt @@ -335,3 +335,21 @@ Example: "len": 10737418240, "offset": 134217728, "speed": 0 }, "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } + + +BALLOON_CHANGE +---------- + +Emitted when the guest changes the actual BALLOON level. This +value is equivalent to the 'actual' field return by the +'query-balloon' command + +Data: + +- "actual": actual level of the guest memory balloon in bytes (json-number) + +Example: + +{ "event": "BALLOON_CHANGE", + "data": { "actual": 944766976 }, + "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } diff --git a/arch_init.c b/arch_init.c index a9e8b7442b..5b0f5626a9 100644 --- a/arch_init.c +++ b/arch_init.c @@ -44,6 +44,14 @@ #include "exec-memory.h" #include "hw/pcspk.h" +#ifdef DEBUG_ARCH_INIT +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + #ifdef TARGET_SPARC int graphic_width = 1024; int graphic_height = 768; @@ -161,6 +169,18 @@ static int is_dup_page(uint8_t *page) return 1; } +static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, + int cont, int flag) +{ + qemu_put_be64(f, offset | cont | flag); + if (!cont) { + qemu_put_byte(f, strlen(block->idstr)); + qemu_put_buffer(f, (uint8_t *)block->idstr, + strlen(block->idstr)); + } + +} + static RAMBlock *last_block; static ram_addr_t last_offset; @@ -187,21 +207,11 @@ static int ram_save_block(QEMUFile *f) p = memory_region_get_ram_ptr(mr) + offset; if (is_dup_page(p)) { - qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_COMPRESS); - if (!cont) { - qemu_put_byte(f, strlen(block->idstr)); - qemu_put_buffer(f, (uint8_t *)block->idstr, - strlen(block->idstr)); - } + save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, *p); bytes_sent = 1; } else { - qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_PAGE); - if (!cont) { - qemu_put_byte(f, strlen(block->idstr)); - qemu_put_buffer(f, (uint8_t *)block->idstr, - strlen(block->idstr)); - } + save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); qemu_put_buffer(f, p, TARGET_PAGE_SIZE); bytes_sent = TARGET_PAGE_SIZE; } @@ -228,20 +238,7 @@ static uint64_t bytes_transferred; static ram_addr_t ram_save_remaining(void) { - RAMBlock *block; - ram_addr_t count = 0; - - QLIST_FOREACH(block, &ram_list.blocks, next) { - ram_addr_t addr; - for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) { - if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE, - DIRTY_MEMORY_MIGRATION)) { - count++; - } - } - } - - return count; + return ram_list.dirty_pages; } uint64_t ram_bytes_remaining(void) @@ -294,16 +291,23 @@ static void sort_ram_list(void) g_free(blocks); } +static void migration_end(void) +{ + memory_global_dirty_log_stop(); +} + +#define MAX_WAIT 50 /* ms, half buffered_file limit */ + int ram_save_live(QEMUFile *f, int stage, void *opaque) { ram_addr_t addr; uint64_t bytes_transferred_last; double bwidth = 0; - uint64_t expected_time = 0; int ret; + int i; if (stage < 0) { - memory_global_dirty_log_stop(); + migration_end(); return 0; } @@ -340,6 +344,7 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque) bytes_transferred_last = bytes_transferred; bwidth = qemu_get_clock_ns(rt_clock); + i = 0; while ((ret = qemu_file_rate_limit(f)) == 0) { int bytes_sent; @@ -348,6 +353,20 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque) if (bytes_sent == 0) { /* no more blocks */ break; } + /* we want to check in the 1st loop, just in case it was the 1st time + and we had to sync the dirty bitmap. + qemu_get_clock_ns() is a bit expensive, so we only check each some + iterations + */ + if ((i & 63) == 0) { + uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000; + if (t1 > MAX_WAIT) { + DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n", + t1, i); + break; + } + } + i++; } if (ret < 0) { @@ -376,9 +395,16 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque) qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; + if (stage == 2) { + uint64_t expected_time; + expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; - return (stage == 2) && (expected_time <= migrate_max_downtime()); + DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n", + expected_time, migrate_max_downtime()); + + return expected_time <= migrate_max_downtime(); + } + return 0; } static inline void *host_from_stream_offset(QEMUFile *f, @@ -414,8 +440,11 @@ static inline void *host_from_stream_offset(QEMUFile *f, int ram_load(QEMUFile *f, void *opaque, int version_id) { ram_addr_t addr; - int flags; + int flags, ret = 0; int error; + static uint64_t seq_iter; + + seq_iter++; if (version_id < 4 || version_id > 4) { return -EINVAL; @@ -445,8 +474,10 @@ int ram_load(QEMUFile *f, void *opaque, int version_id) QLIST_FOREACH(block, &ram_list.blocks, next) { if (!strncmp(id, block->idstr, sizeof(id))) { - if (block->length != length) - return -EINVAL; + if (block->length != length) { + ret = -EINVAL; + goto done; + } break; } } @@ -454,7 +485,8 @@ int ram_load(QEMUFile *f, void *opaque, int version_id) if (!block) { fprintf(stderr, "Unknown ramblock \"%s\", cannot " "accept migration\n", id); - return -EINVAL; + ret = -EINVAL; + goto done; } total_ram_bytes -= length; @@ -483,16 +515,23 @@ int ram_load(QEMUFile *f, void *opaque, int version_id) void *host; host = host_from_stream_offset(f, addr, flags); + if (!host) { + return -EINVAL; + } qemu_get_buffer(f, host, TARGET_PAGE_SIZE); } error = qemu_file_get_error(f); if (error) { - return error; + ret = error; + goto done; } } while (!(flags & RAM_SAVE_FLAG_EOS)); - return 0; +done: + DPRINTF("Completed load of VM with exit code %d seq iteration " PRIu64 "\n", + ret, seq_iter); + return ret; } #ifdef HAS_AUDIO diff --git a/audio/Makefile.objs b/audio/Makefile.objs new file mode 100644 index 0000000000..0f2932d1b3 --- /dev/null +++ b/audio/Makefile.objs @@ -0,0 +1,14 @@ +common-obj-y = audio.o noaudio.o wavaudio.o mixeng.o +common-obj-$(CONFIG_SDL) += sdlaudio.o +common-obj-$(CONFIG_OSS) += ossaudio.o +common-obj-$(CONFIG_SPICE) += spiceaudio.o +common-obj-$(CONFIG_COREAUDIO) += coreaudio.o +common-obj-$(CONFIG_ALSA) += alsaaudio.o +common-obj-$(CONFIG_DSOUND) += dsoundaudio.o +common-obj-$(CONFIG_FMOD) += fmodaudio.o +common-obj-$(CONFIG_ESD) += esdaudio.o +common-obj-$(CONFIG_PA) += paaudio.o +common-obj-$(CONFIG_WINWAVE) += winwaveaudio.o +common-obj-$(CONFIG_AUDIO_PT_INT) += audio_pt_int.o +common-obj-$(CONFIG_AUDIO_WIN_INT) += audio_win_int.o +common-obj-y += wavcapture.o diff --git a/audio/winwaveaudio.c b/audio/winwaveaudio.c index 87e7493270..663abb9b50 100644 --- a/audio/winwaveaudio.c +++ b/audio/winwaveaudio.c @@ -72,7 +72,7 @@ static void winwave_log_mmresult (MMRESULT mr) break; case MMSYSERR_NOMEM: - str = "Unable to allocate or locl memory"; + str = "Unable to allocate or lock memory"; break; case WAVERR_SYNC: @@ -30,6 +30,7 @@ #include "balloon.h" #include "trace.h" #include "qmp-commands.h" +#include "qjson.h" static QEMUBalloonEvent *balloon_event_fn; static QEMUBalloonStatus *balloon_stat_fn; @@ -80,6 +81,19 @@ static int qemu_balloon_status(BalloonInfo *info) return 1; } +void qemu_balloon_changed(int64_t actual) +{ + QObject *data; + + data = qobject_from_jsonf("{ 'actual': %" PRId64 " }", + actual); + + monitor_protocol_event(QEVENT_BALLOON_CHANGE, data); + + qobject_decref(data); +} + + BalloonInfo *qmp_query_balloon(Error **errp) { BalloonInfo *info; @@ -24,4 +24,6 @@ int qemu_add_balloon_handler(QEMUBalloonEvent *event_func, QEMUBalloonStatus *stat_func, void *opaque); void qemu_remove_balloon_handler(void *opaque); +void qemu_balloon_changed(int64_t actual); + #endif @@ -269,4 +269,94 @@ static inline unsigned long hweight_long(unsigned long w) return count; } +/** + * extract32: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 32 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 32 bit word. It is valid to request that + * all 32 bits are returned (ie @length 32 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint32_t extract32(uint32_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 32 - start); + return (value >> start) & (~0U >> (32 - length)); +} + +/** + * extract64: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 64 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 64 bit word. It is valid to request that + * all 64 bits are returned (ie @length 64 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint64_t extract64(uint64_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 64 - start); + return (value >> start) & (~0ULL >> (64 - length)); +} + +/** + * deposit32: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 32 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 32 bit word. + * It is valid to request that all 64 bits are modified (ie @length + * 64 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint32_t deposit32(uint32_t value, int start, int length, + uint32_t fieldval) +{ + uint32_t mask; + assert(start >= 0 && length > 0 && length <= 32 - start); + mask = (~0U >> (32 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +/** + * deposit32: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 64 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 32 bit word. + * It is valid to request that all 64 bits are modified (ie @length + * 64 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint64_t deposit64(uint64_t value, int start, int length, + uint64_t fieldval) +{ + uint64_t mask; + assert(start >= 0 && length > 0 && length <= 64 - start); + mask = (~0ULL >> (64 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + #endif diff --git a/block-migration.c b/block-migration.c index fd2ffff0d5..b95b4e1389 100644 --- a/block-migration.c +++ b/block-migration.c @@ -700,13 +700,13 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) return 0; } -static void block_set_params(int blk_enable, int shared_base, void *opaque) +static void block_set_params(const MigrationParams *params, void *opaque) { - block_mig_state.blk_enable = blk_enable; - block_mig_state.shared_base = shared_base; + block_mig_state.blk_enable = params->blk; + block_mig_state.shared_base = params->shared; /* shared base means that blk_enable = 1 */ - block_mig_state.blk_enable |= shared_base; + block_mig_state.blk_enable |= params->shared; } void blk_mig_init(void) @@ -649,12 +649,13 @@ static int bdrv_open_common(BlockDriverState *bs, const char *filename, bs->opaque = g_malloc0(drv->instance_size); bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); + open_flags = flags | BDRV_O_CACHE_WB; /* * Clear flags that are internal to the block layer before opening the * image. */ - open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); + open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); /* * Snapshots should be writable. @@ -970,98 +971,130 @@ static void bdrv_rebind(BlockDriverState *bs) } } +static void bdrv_move_feature_fields(BlockDriverState *bs_dest, + BlockDriverState *bs_src) +{ + /* move some fields that need to stay attached to the device */ + bs_dest->open_flags = bs_src->open_flags; + + /* dev info */ + bs_dest->dev_ops = bs_src->dev_ops; + bs_dest->dev_opaque = bs_src->dev_opaque; + bs_dest->dev = bs_src->dev; + bs_dest->buffer_alignment = bs_src->buffer_alignment; + bs_dest->copy_on_read = bs_src->copy_on_read; + + bs_dest->enable_write_cache = bs_src->enable_write_cache; + + /* i/o timing parameters */ + bs_dest->slice_time = bs_src->slice_time; + bs_dest->slice_start = bs_src->slice_start; + bs_dest->slice_end = bs_src->slice_end; + bs_dest->io_limits = bs_src->io_limits; + bs_dest->io_base = bs_src->io_base; + bs_dest->throttled_reqs = bs_src->throttled_reqs; + bs_dest->block_timer = bs_src->block_timer; + bs_dest->io_limits_enabled = bs_src->io_limits_enabled; + + /* geometry */ + bs_dest->cyls = bs_src->cyls; + bs_dest->heads = bs_src->heads; + bs_dest->secs = bs_src->secs; + bs_dest->translation = bs_src->translation; + + /* r/w error */ + bs_dest->on_read_error = bs_src->on_read_error; + bs_dest->on_write_error = bs_src->on_write_error; + + /* i/o status */ + bs_dest->iostatus_enabled = bs_src->iostatus_enabled; + bs_dest->iostatus = bs_src->iostatus; + + /* dirty bitmap */ + bs_dest->dirty_count = bs_src->dirty_count; + bs_dest->dirty_bitmap = bs_src->dirty_bitmap; + + /* job */ + bs_dest->in_use = bs_src->in_use; + bs_dest->job = bs_src->job; + + /* keep the same entry in bdrv_states */ + pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name), + bs_src->device_name); + bs_dest->list = bs_src->list; +} + /* - * Add new bs contents at the top of an image chain while the chain is - * live, while keeping required fields on the top layer. + * Swap bs contents for two image chains while they are live, + * while keeping required fields on the BlockDriverState that is + * actually attached to a device. * * This will modify the BlockDriverState fields, and swap contents - * between bs_new and bs_top. Both bs_new and bs_top are modified. + * between bs_new and bs_old. Both bs_new and bs_old are modified. * * bs_new is required to be anonymous. * * This function does not create any image files. */ -void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) +void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) { BlockDriverState tmp; - /* bs_new must be anonymous */ + /* bs_new must be anonymous and shouldn't have anything fancy enabled */ assert(bs_new->device_name[0] == '\0'); + assert(bs_new->dirty_bitmap == NULL); + assert(bs_new->job == NULL); + assert(bs_new->dev == NULL); + assert(bs_new->in_use == 0); + assert(bs_new->io_limits_enabled == false); + assert(bs_new->block_timer == NULL); tmp = *bs_new; + *bs_new = *bs_old; + *bs_old = tmp; - /* there are some fields that need to stay on the top layer: */ - tmp.open_flags = bs_top->open_flags; + /* there are some fields that should not be swapped, move them back */ + bdrv_move_feature_fields(&tmp, bs_old); + bdrv_move_feature_fields(bs_old, bs_new); + bdrv_move_feature_fields(bs_new, &tmp); - /* dev info */ - tmp.dev_ops = bs_top->dev_ops; - tmp.dev_opaque = bs_top->dev_opaque; - tmp.dev = bs_top->dev; - tmp.buffer_alignment = bs_top->buffer_alignment; - tmp.copy_on_read = bs_top->copy_on_read; - - /* i/o timing parameters */ - tmp.slice_time = bs_top->slice_time; - tmp.slice_start = bs_top->slice_start; - tmp.slice_end = bs_top->slice_end; - tmp.io_limits = bs_top->io_limits; - tmp.io_base = bs_top->io_base; - tmp.throttled_reqs = bs_top->throttled_reqs; - tmp.block_timer = bs_top->block_timer; - tmp.io_limits_enabled = bs_top->io_limits_enabled; - - /* geometry */ - tmp.cyls = bs_top->cyls; - tmp.heads = bs_top->heads; - tmp.secs = bs_top->secs; - tmp.translation = bs_top->translation; + /* bs_new shouldn't be in bdrv_states even after the swap! */ + assert(bs_new->device_name[0] == '\0'); - /* r/w error */ - tmp.on_read_error = bs_top->on_read_error; - tmp.on_write_error = bs_top->on_write_error; + /* Check a few fields that should remain attached to the device */ + assert(bs_new->dev == NULL); + assert(bs_new->job == NULL); + assert(bs_new->in_use == 0); + assert(bs_new->io_limits_enabled == false); + assert(bs_new->block_timer == NULL); - /* i/o status */ - tmp.iostatus_enabled = bs_top->iostatus_enabled; - tmp.iostatus = bs_top->iostatus; + bdrv_rebind(bs_new); + bdrv_rebind(bs_old); +} - /* keep the same entry in bdrv_states */ - pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name); - tmp.list = bs_top->list; +/* + * Add new bs contents at the top of an image chain while the chain is + * live, while keeping required fields on the top layer. + * + * This will modify the BlockDriverState fields, and swap contents + * between bs_new and bs_top. Both bs_new and bs_top are modified. + * + * bs_new is required to be anonymous. + * + * This function does not create any image files. + */ +void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) +{ + bdrv_swap(bs_new, bs_top); /* The contents of 'tmp' will become bs_top, as we are * swapping bs_new and bs_top contents. */ - tmp.backing_hd = bs_new; - pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename); - bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format)); - - /* swap contents of the fixed new bs and the current top */ - *bs_new = *bs_top; - *bs_top = tmp; - - /* device_name[] was carried over from the old bs_top. bs_new - * shouldn't be in bdrv_states, so we need to make device_name[] - * reflect the anonymity of bs_new - */ - bs_new->device_name[0] = '\0'; - - /* clear the copied fields in the new backing file */ - bdrv_detach_dev(bs_new, bs_new->dev); - - qemu_co_queue_init(&bs_new->throttled_reqs); - memset(&bs_new->io_base, 0, sizeof(bs_new->io_base)); - memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits)); - bdrv_iostatus_disable(bs_new); - - /* we don't use bdrv_io_limits_disable() for this, because we don't want - * to affect or delete the block_timer, as it has been moved to bs_top */ - bs_new->io_limits_enabled = false; - bs_new->block_timer = NULL; - bs_new->slice_time = 0; - bs_new->slice_start = 0; - bs_new->slice_end = 0; - - bdrv_rebind(bs_new); - bdrv_rebind(bs_top); + bs_top->backing_hd = bs_new; + bs_top->open_flags &= ~BDRV_O_NO_BACKING; + pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file), + bs_new->filename); + pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format), + bs_new->drv ? bs_new->drv->format_name : ""); } void bdrv_delete(BlockDriverState *bs) @@ -1222,14 +1255,14 @@ bool bdrv_dev_is_medium_locked(BlockDriverState *bs) * free of errors) or -errno when an internal error occurred. The results of the * check are stored in res. */ -int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res) +int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { if (bs->drv->bdrv_check == NULL) { return -ENOTSUP; } memset(res, 0, sizeof(*res)); - return bs->drv->bdrv_check(bs, res); + return bs->drv->bdrv_check(bs, res, fix); } #define COMMIT_BUF_SECTORS 2048 @@ -1606,6 +1639,20 @@ int bdrv_read(BlockDriverState *bs, int64_t sector_num, return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false); } +/* Just like bdrv_read(), but with I/O throttling temporarily disabled */ +int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + bool enabled; + int ret; + + enabled = bs->io_limits_enabled; + bs->io_limits_enabled = false; + ret = bdrv_read(bs, 0, buf, 1); + bs->io_limits_enabled = enabled; + return ret; +} + #define BITS_PER_LONG (sizeof(unsigned long) * 8) static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, @@ -1758,8 +1805,8 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, return ret; } - /* No flush needed for cache modes that use O_DSYNC */ - if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) { + /* No flush needed for cache modes that already do it */ + if (bs->enable_write_cache) { bdrv_flush(bs); } @@ -1808,6 +1855,9 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, cluster_nb_sectors); } else { + /* This does not change the data on the disk, it is not necessary + * to flush even in cache=writethrough mode. + */ ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, &bounce_qiov); } @@ -1977,6 +2027,10 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); } + if (ret == 0 && !bs->enable_write_cache) { + ret = bdrv_co_flush(bs); + } + if (bs->dirty_bitmap) { set_dirty_bitmap(bs, sector_num, nb_sectors, 1); } @@ -2096,11 +2150,10 @@ static int guess_disk_lchs(BlockDriverState *bs, int *pcylinders, int *pheads, int *psectors) { uint8_t buf[BDRV_SECTOR_SIZE]; - int ret, i, heads, sectors, cylinders; + int i, heads, sectors, cylinders; struct partition *p; uint32_t nr_sects; uint64_t nb_sectors; - bool enabled; bdrv_get_geometry(bs, &nb_sectors); @@ -2109,12 +2162,9 @@ static int guess_disk_lchs(BlockDriverState *bs, * but also in async I/O mode. So the I/O throttling function has to * be disabled temporarily here, not permanently. */ - enabled = bs->io_limits_enabled; - bs->io_limits_enabled = false; - ret = bdrv_read(bs, 0, buf, 1); - bs->io_limits_enabled = enabled; - if (ret < 0) + if (bdrv_read_unthrottled(bs, 0, buf, 1) < 0) { return -1; + } /* test msdos magic */ if (buf[510] != 0x55 || buf[511] != 0xaa) return -1; @@ -2297,46 +2347,40 @@ void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads, uint64_t nb_sectors, size; int i, first_match, match; - bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect); - if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) { - /* User defined disk */ - *rate = FDRIVE_RATE_500K; - } else { - bdrv_get_geometry(bs, &nb_sectors); - match = -1; - first_match = -1; - for (i = 0; ; i++) { - parse = &fd_formats[i]; - if (parse->drive == FDRIVE_DRV_NONE) { + bdrv_get_geometry(bs, &nb_sectors); + match = -1; + first_match = -1; + for (i = 0; ; i++) { + parse = &fd_formats[i]; + if (parse->drive == FDRIVE_DRV_NONE) { + break; + } + if (drive_in == parse->drive || + drive_in == FDRIVE_DRV_NONE) { + size = (parse->max_head + 1) * parse->max_track * + parse->last_sect; + if (nb_sectors == size) { + match = i; break; } - if (drive_in == parse->drive || - drive_in == FDRIVE_DRV_NONE) { - size = (parse->max_head + 1) * parse->max_track * - parse->last_sect; - if (nb_sectors == size) { - match = i; - break; - } - if (first_match == -1) { - first_match = i; - } - } - } - if (match == -1) { if (first_match == -1) { - match = 1; - } else { - match = first_match; + first_match = i; } - parse = &fd_formats[match]; } - *nb_heads = parse->max_head + 1; - *max_track = parse->max_track; - *last_sect = parse->last_sect; - *drive = parse->drive; - *rate = parse->rate; } + if (match == -1) { + if (first_match == -1) { + match = 1; + } else { + match = first_match; + } + parse = &fd_formats[match]; + } + *nb_heads = parse->max_head + 1; + *max_track = parse->max_track; + *last_sect = parse->last_sect; + *drive = parse->drive; + *rate = parse->rate; } int bdrv_get_translation_hint(BlockDriverState *bs) @@ -2371,6 +2415,11 @@ int bdrv_enable_write_cache(BlockDriverState *bs) return bs->enable_write_cache; } +void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) +{ + bs->enable_write_cache = wce; +} + int bdrv_is_encrypted(BlockDriverState *bs) { if (bs->backing_hd && bs->backing_hd->encrypted) @@ -2413,13 +2462,9 @@ int bdrv_set_key(BlockDriverState *bs, const char *key) return ret; } -void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size) +const char *bdrv_get_format_name(BlockDriverState *bs) { - if (!bs->drv) { - buf[0] = '\0'; - } else { - pstrcpy(buf, buf_size, bs->drv->format_name); - } + return bs->drv ? bs->drv->format_name : NULL; } void bdrv_iterate_format(void (*it)(void *opaque, const char *name), @@ -2466,6 +2511,11 @@ const char *bdrv_get_device_name(BlockDriverState *bs) return bs->device_name; } +int bdrv_get_flags(BlockDriverState *bs) +{ + return bs->open_flags; +} + void bdrv_flush_all(void) { BlockDriverState *bs; @@ -2569,6 +2619,55 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, return data.ret; } +/* + * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] + * + * Return true if the given sector is allocated in any image between + * BASE and TOP (inclusive). BASE can be NULL to check if the given + * sector is allocated in any image of the chain. Return false otherwise. + * + * 'pnum' is set to the number of sectors (including and immediately following + * the specified sector) that are known to be in the same + * allocated/unallocated state. + * + */ +int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + int64_t sector_num, + int nb_sectors, int *pnum) +{ + BlockDriverState *intermediate; + int ret, n = nb_sectors; + + intermediate = top; + while (intermediate && intermediate != base) { + int pnum_inter; + ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors, + &pnum_inter); + if (ret < 0) { + return ret; + } else if (ret) { + *pnum = pnum_inter; + return 1; + } + + /* + * [sector_num, nb_sectors] is unallocated on top but intermediate + * might have + * + * [sector_num+x, nr_sectors] allocated. + */ + if (n > pnum_inter) { + n = pnum_inter; + } + + intermediate = intermediate->backing_hd; + } + + *pnum = n; + return 0; +} + BlockInfoList *qmp_query_block(Error **errp) { BlockInfoList *head = NULL, *cur_item = NULL; @@ -122,6 +122,7 @@ int bdrv_create(BlockDriver *drv, const char* filename, int bdrv_create_file(const char* filename, QEMUOptionParameter *options); BlockDriverState *bdrv_new(const char *device_name); void bdrv_make_anon(BlockDriverState *bs); +void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old); void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top); void bdrv_delete(BlockDriverState *bs); int bdrv_parse_cache_flags(const char *mode, int *flags); @@ -141,6 +142,8 @@ bool bdrv_dev_is_tray_open(BlockDriverState *bs); bool bdrv_dev_is_medium_locked(BlockDriverState *bs); int bdrv_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors); +int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); int bdrv_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors); int bdrv_pread(BlockDriverState *bs, int64_t offset, @@ -165,6 +168,10 @@ int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors); int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum); +int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + int64_t sector_num, + int nb_sectors, int *pnum); BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, const char *backing_file); int bdrv_truncate(BlockDriverState *bs, int64_t offset); @@ -183,10 +190,17 @@ typedef struct BdrvCheckResult { int corruptions; int leaks; int check_errors; + int corruptions_fixed; + int leaks_fixed; BlockFragInfo bfi; } BdrvCheckResult; -int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res); +typedef enum { + BDRV_FIX_LEAKS = 1, + BDRV_FIX_ERRORS = 2, +} BdrvCheckMode; + +int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); /* async block I/O */ typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector, @@ -280,11 +294,12 @@ BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read); int bdrv_is_read_only(BlockDriverState *bs); int bdrv_is_sg(BlockDriverState *bs); int bdrv_enable_write_cache(BlockDriverState *bs); +void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce); int bdrv_is_inserted(BlockDriverState *bs); int bdrv_media_changed(BlockDriverState *bs); void bdrv_lock_medium(BlockDriverState *bs, bool locked); void bdrv_eject(BlockDriverState *bs, bool eject_flag); -void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size); +const char *bdrv_get_format_name(BlockDriverState *bs); BlockDriverState *bdrv_find(const char *name); BlockDriverState *bdrv_next(BlockDriverState *bs); void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), @@ -296,6 +311,7 @@ int bdrv_query_missing_keys(void); void bdrv_iterate_format(void (*it)(void *opaque, const char *name), void *opaque); const char *bdrv_get_device_name(BlockDriverState *bs); +int bdrv_get_flags(BlockDriverState *bs); int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors); int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); @@ -382,9 +398,7 @@ typedef enum { BLKDBG_L2_ALLOC_COW_READ, BLKDBG_L2_ALLOC_WRITE, - BLKDBG_READ, BLKDBG_READ_AIO, - BLKDBG_READ_BACKING, BLKDBG_READ_BACKING_AIO, BLKDBG_READ_COMPRESSED, diff --git a/block/Makefile.objs b/block/Makefile.objs new file mode 100644 index 0000000000..b5754d39bf --- /dev/null +++ b/block/Makefile.objs @@ -0,0 +1,11 @@ +block-obj-y += raw.o cow.o qcow.o vdi.o vmdk.o cloop.o dmg.o bochs.o vpc.o vvfat.o +block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o +block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o +block-obj-y += qed-check.o +block-obj-y += parallels.o nbd.o blkdebug.o sheepdog.o blkverify.o +block-obj-y += stream.o +block-obj-$(CONFIG_WIN32) += raw-win32.o +block-obj-$(CONFIG_POSIX) += raw-posix.o +block-obj-$(CONFIG_LIBISCSI) += iscsi.o +block-obj-$(CONFIG_CURL) += curl.o +block-obj-$(CONFIG_RBD) += rbd.o diff --git a/block/blkdebug.c b/block/blkdebug.c index e56e37da51..59dcea0650 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -26,24 +26,10 @@ #include "block_int.h" #include "module.h" -typedef struct BlkdebugVars { - int state; - - /* If inject_errno != 0, an error is injected for requests */ - int inject_errno; - - /* Decides if all future requests fail (false) or only the next one and - * after the next request inject_errno is reset to 0 (true) */ - bool inject_once; - - /* Decides if aio_readv/writev fails right away (true) or returns an error - * return value only in the callback (false) */ - bool inject_immediately; -} BlkdebugVars; - typedef struct BDRVBlkdebugState { - BlkdebugVars vars; - QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX]; + int state; + QLIST_HEAD(, BlkdebugRule) rules[BLKDBG_EVENT_MAX]; + QSIMPLEQ_HEAD(, BlkdebugRule) active_rules; } BDRVBlkdebugState; typedef struct BlkdebugAIOCB { @@ -73,12 +59,14 @@ typedef struct BlkdebugRule { int error; int immediately; int once; + int64_t sector; } inject; struct { int new_state; } set_state; } options; QLIST_ENTRY(BlkdebugRule) next; + QSIMPLEQ_ENTRY(BlkdebugRule) active_next; } BlkdebugRule; static QemuOptsList inject_error_opts = { @@ -98,6 +86,10 @@ static QemuOptsList inject_error_opts = { .type = QEMU_OPT_NUMBER, }, { + .name = "sector", + .type = QEMU_OPT_NUMBER, + }, + { .name = "once", .type = QEMU_OPT_BOOL, }, @@ -147,9 +139,7 @@ static const char *event_names[BLKDBG_EVENT_MAX] = { [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read", [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write", - [BLKDBG_READ] = "read", [BLKDBG_READ_AIO] = "read_aio", - [BLKDBG_READ_BACKING] = "read_backing", [BLKDBG_READ_BACKING_AIO] = "read_backing_aio", [BLKDBG_READ_COMPRESSED] = "read_compressed", @@ -228,6 +218,7 @@ static int add_rule(QemuOpts *opts, void *opaque) rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0); rule->options.inject.immediately = qemu_opt_get_bool(opts, "immediately", 0); + rule->options.inject.sector = qemu_opt_get_number(opts, "sector", -1); break; case ACTION_SET_STATE: @@ -302,7 +293,7 @@ static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags) filename = c + 1; /* Set initial state */ - s->vars.state = 1; + s->state = 1; /* Open the backing file */ ret = bdrv_file_open(&bs->file, filename, flags); @@ -328,18 +319,18 @@ static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb) } static BlockDriverAIOCB *inject_error(BlockDriverState *bs, - BlockDriverCompletionFunc *cb, void *opaque) + BlockDriverCompletionFunc *cb, void *opaque, BlkdebugRule *rule) { BDRVBlkdebugState *s = bs->opaque; - int error = s->vars.inject_errno; + int error = rule->options.inject.error; struct BlkdebugAIOCB *acb; QEMUBH *bh; - if (s->vars.inject_once) { - s->vars.inject_errno = 0; + if (rule->options.inject.once) { + QSIMPLEQ_INIT(&s->active_rules); } - if (s->vars.inject_immediately) { + if (rule->options.inject.immediately) { return NULL; } @@ -358,14 +349,21 @@ static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque) { BDRVBlkdebugState *s = bs->opaque; + BlkdebugRule *rule = NULL; - if (s->vars.inject_errno) { - return inject_error(bs, cb, opaque); + QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) { + if (rule->options.inject.sector == -1 || + (rule->options.inject.sector >= sector_num && + rule->options.inject.sector < sector_num + nb_sectors)) { + break; + } + } + + if (rule && rule->options.inject.error) { + return inject_error(bs, cb, opaque, rule); } - BlockDriverAIOCB *acb = - bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque); - return acb; + return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque); } static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs, @@ -373,14 +371,21 @@ static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque) { BDRVBlkdebugState *s = bs->opaque; + BlkdebugRule *rule = NULL; + + QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) { + if (rule->options.inject.sector == -1 || + (rule->options.inject.sector >= sector_num && + rule->options.inject.sector < sector_num + nb_sectors)) { + break; + } + } - if (s->vars.inject_errno) { - return inject_error(bs, cb, opaque); + if (rule && rule->options.inject.error) { + return inject_error(bs, cb, opaque, rule); } - BlockDriverAIOCB *acb = - bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque); - return acb; + return bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque); } static void blkdebug_close(BlockDriverState *bs) @@ -397,44 +402,53 @@ static void blkdebug_close(BlockDriverState *bs) } } -static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule, - BlkdebugVars *old_vars) +static bool process_rule(BlockDriverState *bs, struct BlkdebugRule *rule, + int old_state, bool injected) { BDRVBlkdebugState *s = bs->opaque; - BlkdebugVars *vars = &s->vars; /* Only process rules for the current state */ - if (rule->state && rule->state != old_vars->state) { - return; + if (rule->state && rule->state != old_state) { + return injected; } /* Take the action */ switch (rule->action) { case ACTION_INJECT_ERROR: - vars->inject_errno = rule->options.inject.error; - vars->inject_once = rule->options.inject.once; - vars->inject_immediately = rule->options.inject.immediately; + if (!injected) { + QSIMPLEQ_INIT(&s->active_rules); + injected = true; + } + QSIMPLEQ_INSERT_HEAD(&s->active_rules, rule, active_next); break; case ACTION_SET_STATE: - vars->state = rule->options.set_state.new_state; + s->state = rule->options.set_state.new_state; break; } + return injected; } static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event) { BDRVBlkdebugState *s = bs->opaque; struct BlkdebugRule *rule; - BlkdebugVars old_vars = s->vars; + int old_state = s->state; + bool injected; assert((int)event >= 0 && event < BLKDBG_EVENT_MAX); + injected = false; QLIST_FOREACH(rule, &s->rules[event], next) { - process_rule(bs, rule, &old_vars); + injected = process_rule(bs, rule, old_state, injected); } } +static int64_t blkdebug_getlength(BlockDriverState *bs) +{ + return bdrv_getlength(bs->file); +} + static BlockDriver bdrv_blkdebug = { .format_name = "blkdebug", .protocol_name = "blkdebug", @@ -443,6 +457,7 @@ static BlockDriver bdrv_blkdebug = { .bdrv_file_open = blkdebug_open, .bdrv_close = blkdebug_close, + .bdrv_getlength = blkdebug_getlength, .bdrv_aio_readv = blkdebug_aio_readv, .bdrv_aio_writev = blkdebug_aio_writev, diff --git a/block/iscsi.c b/block/iscsi.c index ecb7a2211e..993a86d829 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -35,6 +35,10 @@ #include <iscsi/iscsi.h> #include <iscsi/scsi-lowlevel.h> +#ifdef __linux__ +#include <scsi/sg.h> +#include <hw/scsi-defs.h> +#endif typedef struct IscsiLun { struct iscsi_context *iscsi; @@ -56,6 +60,9 @@ typedef struct IscsiAIOCB { int canceled; size_t read_size; size_t read_offset; +#ifdef __linux__ + sg_io_hdr_t *ioh; +#endif } IscsiAIOCB; struct IscsiTask { @@ -514,6 +521,136 @@ iscsi_aio_discard(BlockDriverState *bs, return &acb->common; } +#ifdef __linux__ +static void +iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, + void *command_data, void *opaque) +{ + IscsiAIOCB *acb = opaque; + + if (acb->canceled != 0) { + qemu_aio_release(acb); + scsi_free_scsi_task(acb->task); + acb->task = NULL; + return; + } + + acb->status = 0; + if (status < 0) { + error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s", + iscsi_get_error(iscsi)); + acb->status = -EIO; + } + + acb->ioh->driver_status = 0; + acb->ioh->host_status = 0; + acb->ioh->resid = 0; + +#define SG_ERR_DRIVER_SENSE 0x08 + + if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->datain.size >= 2) { + int ss; + + acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE; + + acb->ioh->sb_len_wr = acb->task->datain.size - 2; + ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ? + acb->ioh->mx_sb_len : acb->ioh->sb_len_wr; + memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss); + } + + iscsi_schedule_bh(iscsi_readv_writev_bh_cb, acb); + scsi_free_scsi_task(acb->task); + acb->task = NULL; +} + +static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockDriverCompletionFunc *cb, void *opaque) +{ + IscsiLun *iscsilun = bs->opaque; + struct iscsi_context *iscsi = iscsilun->iscsi; + struct iscsi_data data; + IscsiAIOCB *acb; + + assert(req == SG_IO); + + acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque); + + acb->iscsilun = iscsilun; + acb->canceled = 0; + acb->buf = NULL; + acb->ioh = buf; + + acb->task = malloc(sizeof(struct scsi_task)); + if (acb->task == NULL) { + error_report("iSCSI: Failed to allocate task for scsi command. %s", + iscsi_get_error(iscsi)); + qemu_aio_release(acb); + return NULL; + } + memset(acb->task, 0, sizeof(struct scsi_task)); + + switch (acb->ioh->dxfer_direction) { + case SG_DXFER_TO_DEV: + acb->task->xfer_dir = SCSI_XFER_WRITE; + break; + case SG_DXFER_FROM_DEV: + acb->task->xfer_dir = SCSI_XFER_READ; + break; + default: + acb->task->xfer_dir = SCSI_XFER_NONE; + break; + } + + acb->task->cdb_size = acb->ioh->cmd_len; + memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len); + acb->task->expxferlen = acb->ioh->dxfer_len; + + if (acb->task->xfer_dir == SCSI_XFER_WRITE) { + data.data = acb->ioh->dxferp; + data.size = acb->ioh->dxfer_len; + } + if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, + iscsi_aio_ioctl_cb, + (acb->task->xfer_dir == SCSI_XFER_WRITE) ? + &data : NULL, + acb) != 0) { + scsi_free_scsi_task(acb->task); + qemu_aio_release(acb); + return NULL; + } + + /* tell libiscsi to read straight into the buffer we got from ioctl */ + if (acb->task->xfer_dir == SCSI_XFER_READ) { + scsi_task_add_data_in_buffer(acb->task, + acb->ioh->dxfer_len, + acb->ioh->dxferp); + } + + iscsi_set_events(iscsilun); + + return &acb->common; +} + +static int iscsi_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) +{ + IscsiLun *iscsilun = bs->opaque; + + switch (req) { + case SG_GET_VERSION_NUM: + *(int *)buf = 30000; + break; + case SG_GET_SCSI_ID: + ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type; + break; + default: + return -1; + } + return 0; +} +#endif + static int64_t iscsi_getlength(BlockDriverState *bs) { @@ -884,6 +1021,16 @@ static int iscsi_open(BlockDriverState *bs, const char *filename, int flags) if (iscsi_url != NULL) { iscsi_destroy_url(iscsi_url); } + + /* Medium changer or tape. We dont have any emulation for this so this must + * be sg ioctl compatible. We force it to be sg, otherwise qemu will try + * to read from the device to guess the image format. + */ + if (iscsilun->type == TYPE_MEDIUM_CHANGER || + iscsilun->type == TYPE_TAPE) { + bs->sg = 1; + } + return 0; failed: @@ -925,6 +1072,11 @@ static BlockDriver bdrv_iscsi = { .bdrv_aio_flush = iscsi_aio_flush, .bdrv_aio_discard = iscsi_aio_discard, + +#ifdef __linux__ + .bdrv_ioctl = iscsi_ioctl, + .bdrv_aio_ioctl = iscsi_aio_ioctl, +#endif }; static void iscsi_block_init(void) diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c index 710d4b1828..2d4322a8dd 100644 --- a/block/qcow2-cache.c +++ b/block/qcow2-cache.c @@ -40,11 +40,9 @@ struct Qcow2Cache { struct Qcow2Cache* depends; int size; bool depends_on_flush; - bool writethrough; }; -Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables, - bool writethrough) +Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables) { BDRVQcowState *s = bs->opaque; Qcow2Cache *c; @@ -53,7 +51,6 @@ Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables, c = g_malloc0(sizeof(*c)); c->size = num_tables; c->entries = g_malloc0(sizeof(*c->entries) * num_tables); - c->writethrough = writethrough; for (i = 0; i < c->size; i++) { c->entries[i].table = qemu_blockalign(bs, s->cluster_size); @@ -307,12 +304,7 @@ found: *table = NULL; assert(c->entries[i].ref >= 0); - - if (c->writethrough) { - return qcow2_cache_entry_flush(bs, c, i); - } else { - return 0; - } + return 0; } void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table) @@ -329,16 +321,3 @@ void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table) found: c->entries[i].dirty = true; } - -bool qcow2_cache_set_writethrough(BlockDriverState *bs, Qcow2Cache *c, - bool enable) -{ - bool old = c->writethrough; - - if (!old && enable) { - qcow2_cache_flush(bs, c); - } - - c->writethrough = enable; - return old; -} diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index 4b3345b11b..d7e0e19d9c 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -471,6 +471,8 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); *cluster_offset &= L2E_OFFSET_MASK; break; + default: + abort(); } qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); @@ -538,7 +540,6 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset, if (l2_offset) { qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t)); } - l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; } /* find the cluster offset for the given disk offset */ @@ -641,11 +642,10 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) } if (m->nb_available & (s->cluster_sectors - 1)) { - uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1); cow = true; qemu_co_mutex_unlock(&s->lock); - ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9), - m->nb_available - end, s->cluster_sectors); + ret = copy_sectors(bs, start_sect, cluster_offset, m->nb_available, + align_offset(m->nb_available, s->cluster_sectors)); qemu_co_mutex_lock(&s->lock); if (ret < 0) goto err; @@ -947,8 +947,16 @@ again: /* save info needed for meta data update */ if (nb_clusters > 0) { + /* + * requested_sectors: Number of sectors from the start of the first + * newly allocated cluster to the end of the (possibly shortened + * before) write request. + * + * avail_sectors: Number of sectors from the start of the first + * newly allocated to the end of the last newly allocated cluster. + */ int requested_sectors = n_end - keep_clusters * s->cluster_sectors; - int avail_sectors = (keep_clusters + nb_clusters) + int avail_sectors = nb_clusters << (s->cluster_bits - BDRV_SECTOR_BITS); *m = (QCowL2Meta) { diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index 812c93c5c7..5e3f9153fb 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -367,7 +367,7 @@ static int alloc_refcount_block(BlockDriverState *bs, } for(i = 0; i < table_size; i++) { - cpu_to_be64s(&new_table[i]); + be64_to_cpus(&new_table[i]); } /* Hook up the new refcount table in the qcow2 header */ @@ -627,10 +627,11 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); assert(size > 0 && size <= s->cluster_size); if (s->free_byte_offset == 0) { - s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size); - if (s->free_byte_offset < 0) { - return s->free_byte_offset; + offset = qcow2_alloc_clusters(bs, s->cluster_size); + if (offset < 0) { + return offset; } + s->free_byte_offset = offset; } redo: free_in_cluster = s->cluster_size - @@ -726,13 +727,6 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t old_offset, old_l2_offset; int i, j, l1_modified = 0, nb_csectors, refcount; int ret; - bool old_l2_writethrough, old_refcount_writethrough; - - /* Switch caches to writeback mode during update */ - old_l2_writethrough = - qcow2_cache_set_writethrough(bs, s->l2_table_cache, false); - old_refcount_writethrough = - qcow2_cache_set_writethrough(bs, s->refcount_block_cache, false); l2_table = NULL; l1_table = NULL; @@ -856,11 +850,6 @@ fail: qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); } - /* Enable writethrough cache mode again */ - qcow2_cache_set_writethrough(bs, s->l2_table_cache, old_l2_writethrough); - qcow2_cache_set_writethrough(bs, s->refcount_block_cache, - old_refcount_writethrough); - /* Update L1 only if it isn't deleted anyway (addend = -1) */ if (addend >= 0 && l1_modified) { for(i = 0; i < l1_size; i++) @@ -1122,11 +1111,12 @@ fail: * Returns 0 if no errors are found, the number of errors in case the image is * detected as corrupted, and -errno when an internal error occurred. */ -int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) +int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, + BdrvCheckMode fix) { BDRVQcowState *s = bs->opaque; - int64_t size; - int nb_clusters, refcount1, refcount2, i; + int64_t size, i; + int nb_clusters, refcount1, refcount2; QCowSnapshot *sn; uint16_t *refcount_table; int ret; @@ -1170,14 +1160,15 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) /* Refcount blocks are cluster aligned */ if (offset & (s->cluster_size - 1)) { - fprintf(stderr, "ERROR refcount block %d is not " + fprintf(stderr, "ERROR refcount block %" PRId64 " is not " "cluster aligned; refcount table entry corrupted\n", i); res->corruptions++; continue; } if (cluster >= nb_clusters) { - fprintf(stderr, "ERROR refcount block %d is outside image\n", i); + fprintf(stderr, "ERROR refcount block %" PRId64 + " is outside image\n", i); res->corruptions++; continue; } @@ -1186,7 +1177,8 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) inc_refcounts(bs, res, refcount_table, nb_clusters, offset, s->cluster_size); if (refcount_table[cluster] != 1) { - fprintf(stderr, "ERROR refcount block %d refcount=%d\n", + fprintf(stderr, "ERROR refcount block %" PRId64 + " refcount=%d\n", i, refcount_table[cluster]); res->corruptions++; } @@ -1197,7 +1189,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) for(i = 0; i < nb_clusters; i++) { refcount1 = get_refcount(bs, i); if (refcount1 < 0) { - fprintf(stderr, "Can't get refcount for cluster %d: %s\n", + fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", i, strerror(-refcount1)); res->check_errors++; continue; @@ -1205,9 +1197,31 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) refcount2 = refcount_table[i]; if (refcount1 != refcount2) { - fprintf(stderr, "%s cluster %d refcount=%d reference=%d\n", - refcount1 < refcount2 ? "ERROR" : "Leaked", + + /* Check if we're allowed to fix the mismatch */ + int *num_fixed = NULL; + if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { + num_fixed = &res->leaks_fixed; + } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { + num_fixed = &res->corruptions_fixed; + } + + fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n", + num_fixed != NULL ? "Repairing" : + refcount1 < refcount2 ? "ERROR" : + "Leaked", i, refcount1, refcount2); + + if (num_fixed) { + ret = update_refcount(bs, i << s->cluster_bits, 1, + refcount2 - refcount1); + if (ret >= 0) { + (*num_fixed)++; + continue; + } + } + + /* And if we couldn't, print an error */ if (refcount1 < refcount2) { res->corruptions++; } else { diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c index 4561a2abf9..4e7c93b8b3 100644 --- a/block/qcow2-snapshot.c +++ b/block/qcow2-snapshot.c @@ -405,7 +405,7 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; - qcow2_check_refcounts(bs, &result); + qcow2_check_refcounts(bs, &result, 0); } #endif return 0; @@ -522,7 +522,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; - qcow2_check_refcounts(bs, &result); + qcow2_check_refcounts(bs, &result, 0); } #endif return 0; @@ -582,7 +582,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id) #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; - qcow2_check_refcounts(bs, &result); + qcow2_check_refcounts(bs, &result, 0); } #endif return 0; diff --git a/block/qcow2.c b/block/qcow2.c index 1b5b36cfc1..870148ddf8 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -220,7 +220,6 @@ static int qcow2_open(BlockDriverState *bs, int flags) int len, i, ret = 0; QCowHeader header; uint64_t ext_end; - bool writethrough; ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); if (ret < 0) { @@ -298,14 +297,6 @@ static int qcow2_open(BlockDriverState *bs, int flags) goto fail; } - if (!bs->read_only && s->autoclear_features != 0) { - s->autoclear_features = 0; - ret = qcow2_update_header(bs); - if (ret < 0) { - goto fail; - } - } - /* Check support for various header values */ if (header.refcount_order != 4) { report_unsupported(bs, "%d bit reference counts", @@ -367,10 +358,8 @@ static int qcow2_open(BlockDriverState *bs, int flags) } /* alloc L2 table/refcount block cache */ - writethrough = ((flags & BDRV_O_CACHE_WB) == 0); - s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE, writethrough); - s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE, - writethrough); + s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE); + s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE); s->cluster_cache = g_malloc(s->cluster_size); /* one more sector for decompressed data alignment */ @@ -411,13 +400,22 @@ static int qcow2_open(BlockDriverState *bs, int flags) goto fail; } + /* Clear unknown autoclear feature bits */ + if (!bs->read_only && s->autoclear_features != 0) { + s->autoclear_features = 0; + ret = qcow2_update_header(bs); + if (ret < 0) { + goto fail; + } + } + /* Initialise locks */ qemu_co_mutex_init(&s->lock); #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; - qcow2_check_refcounts(bs, &result); + qcow2_check_refcounts(bs, &result, 0); } #endif return ret; @@ -1467,9 +1465,10 @@ static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) } -static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result) +static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, + BdrvCheckMode fix) { - return qcow2_check_refcounts(bs, result); + return qcow2_check_refcounts(bs, result, fix); } #if 0 diff --git a/block/qcow2.h b/block/qcow2.h index 93567f6451..455b6d7cfe 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -261,7 +261,8 @@ void qcow2_free_any_clusters(BlockDriverState *bs, int qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t l1_table_offset, int l1_size, int addend); -int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res); +int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, + BdrvCheckMode fix); /* qcow2-cluster.c functions */ int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size); @@ -296,11 +297,8 @@ void qcow2_free_snapshots(BlockDriverState *bs); int qcow2_read_snapshots(BlockDriverState *bs); /* qcow2-cache.c functions */ -Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables, - bool writethrough); +Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables); int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c); -bool qcow2_cache_set_writethrough(BlockDriverState *bs, Qcow2Cache *c, - bool enable); void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table); int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c); diff --git a/block/qed-check.c b/block/qed-check.c index 94327ff5b3..5edf60775b 100644 --- a/block/qed-check.c +++ b/block/qed-check.c @@ -87,6 +87,7 @@ static unsigned int qed_check_l2_table(QEDCheck *check, QEDTable *table) if (!qed_check_cluster_offset(s, offset)) { if (check->fix) { table->offsets[i] = 0; + check->result->corruptions_fixed++; } else { check->result->corruptions++; } @@ -127,6 +128,7 @@ static int qed_check_l1_table(QEDCheck *check, QEDTable *table) /* Clear invalid offset */ if (check->fix) { table->offsets[i] = 0; + check->result->corruptions_fixed++; } else { check->result->corruptions++; } diff --git a/block/qed.c b/block/qed.c index 847417a3d6..5f3eefa3af 100644 --- a/block/qed.c +++ b/block/qed.c @@ -748,7 +748,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, /* If the read straddles the end of the backing file, shorten it */ size = MIN((uint64_t)backing_length - pos, qiov->size); - BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING); + BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, qiov, size / BDRV_SECTOR_SIZE, cb, opaque); } @@ -1517,11 +1517,12 @@ static void bdrv_qed_invalidate_cache(BlockDriverState *bs) bdrv_qed_open(bs, bs->open_flags); } -static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result) +static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, + BdrvCheckMode fix) { BDRVQEDState *s = bs->opaque; - return qed_check(s, result, false); + return qed_check(s, result, !!fix); } static QEMUOptionParameter qed_create_options[] = { diff --git a/block/raw-posix.c b/block/raw-posix.c index 03fcfcc031..0dce089be5 100644 --- a/block/raw-posix.c +++ b/block/raw-posix.c @@ -52,6 +52,10 @@ #include <sys/param.h> #include <linux/cdrom.h> #include <linux/fd.h> +#include <linux/fs.h> +#endif +#ifdef CONFIG_FIEMAP +#include <linux/fiemap.h> #endif #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) #include <sys/disk.h> @@ -583,6 +587,106 @@ static int raw_create(const char *filename, QEMUOptionParameter *options) return result; } +/* + * Returns true iff the specified sector is present in the disk image. Drivers + * not implementing the functionality are assumed to not support backing files, + * hence all their sectors are reported as allocated. + * + * If 'sector_num' is beyond the end of the disk image the return value is 0 + * and 'pnum' is set to 0. + * + * 'pnum' is set to the number of sectors (including and immediately following + * the specified sector) that are known to be in the same + * allocated/unallocated state. + * + * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes + * beyond the end of the disk image it will be clamped. + */ +static int coroutine_fn raw_co_is_allocated(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, int *pnum) +{ + off_t start, data, hole; + int ret; + + ret = fd_open(bs); + if (ret < 0) { + return ret; + } + + start = sector_num * BDRV_SECTOR_SIZE; + +#ifdef CONFIG_FIEMAP + + BDRVRawState *s = bs->opaque; + struct { + struct fiemap fm; + struct fiemap_extent fe; + } f; + + f.fm.fm_start = start; + f.fm.fm_length = (int64_t)nb_sectors * BDRV_SECTOR_SIZE; + f.fm.fm_flags = 0; + f.fm.fm_extent_count = 1; + f.fm.fm_reserved = 0; + if (ioctl(s->fd, FS_IOC_FIEMAP, &f) == -1) { + /* Assume everything is allocated. */ + *pnum = nb_sectors; + return 1; + } + + if (f.fm.fm_mapped_extents == 0) { + /* No extents found, data is beyond f.fm.fm_start + f.fm.fm_length. + * f.fm.fm_start + f.fm.fm_length must be clamped to the file size! + */ + off_t length = lseek(s->fd, 0, SEEK_END); + hole = f.fm.fm_start; + data = MIN(f.fm.fm_start + f.fm.fm_length, length); + } else { + data = f.fe.fe_logical; + hole = f.fe.fe_logical + f.fe.fe_length; + } + +#elif defined SEEK_HOLE && defined SEEK_DATA + + BDRVRawState *s = bs->opaque; + + hole = lseek(s->fd, start, SEEK_HOLE); + if (hole == -1) { + /* -ENXIO indicates that sector_num was past the end of the file. + * There is a virtual hole there. */ + assert(errno != -ENXIO); + + /* Most likely EINVAL. Assume everything is allocated. */ + *pnum = nb_sectors; + return 1; + } + + if (hole > start) { + data = start; + } else { + /* On a hole. We need another syscall to find its end. */ + data = lseek(s->fd, start, SEEK_DATA); + if (data == -1) { + data = lseek(s->fd, 0, SEEK_END); + } + } +#else + *pnum = nb_sectors; + return 1; +#endif + + if (data <= start) { + /* On a data extent, compute sectors to the end of the extent. */ + *pnum = MIN(nb_sectors, (hole - start) / BDRV_SECTOR_SIZE); + return 1; + } else { + /* On a hole, compute sectors to the beginning of the next extent. */ + *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE); + return 0; + } +} + #ifdef CONFIG_XFS static int xfs_discard(BDRVRawState *s, int64_t sector_num, int nb_sectors) { @@ -634,6 +738,7 @@ static BlockDriver bdrv_file = { .bdrv_close = raw_close, .bdrv_create = raw_create, .bdrv_co_discard = raw_co_discard, + .bdrv_co_is_allocated = raw_co_is_allocated, .bdrv_aio_readv = raw_aio_readv, .bdrv_aio_writev = raw_aio_writev, diff --git a/block/raw.c b/block/raw.c index 7086e314a6..ff34ea41e7 100644 --- a/block/raw.c +++ b/block/raw.c @@ -12,12 +12,14 @@ static int raw_open(BlockDriverState *bs, int flags) static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { + BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); return bdrv_co_readv(bs->file, sector_num, nb_sectors, qiov); } static int coroutine_fn raw_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { + BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); return bdrv_co_writev(bs->file, sector_num, nb_sectors, qiov); } @@ -25,6 +27,13 @@ static void raw_close(BlockDriverState *bs) { } +static int coroutine_fn raw_co_is_allocated(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, int *pnum) +{ + return bdrv_co_is_allocated(bs->file, sector_num, nb_sectors, pnum); +} + static int64_t raw_getlength(BlockDriverState *bs) { return bdrv_getlength(bs->file); @@ -108,6 +117,7 @@ static BlockDriver bdrv_raw = { .bdrv_co_readv = raw_co_readv, .bdrv_co_writev = raw_co_writev, + .bdrv_co_is_allocated = raw_co_is_allocated, .bdrv_co_discard = raw_co_discard, .bdrv_probe = raw_probe, diff --git a/block/rbd.c b/block/rbd.c index 49a4787b55..5a0f79fc8f 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -476,6 +476,25 @@ static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags) s->snap = g_strdup(snap_buf); } + /* + * Fallback to more conservative semantics if setting cache + * options fails. Ignore errors from setting rbd_cache because the + * only possible error is that the option does not exist, and + * librbd defaults to no caching. If write through caching cannot + * be set up, fall back to no caching. + */ + if (flags & BDRV_O_NOCACHE) { + rados_conf_set(s->cluster, "rbd_cache", "false"); + } else { + rados_conf_set(s->cluster, "rbd_cache", "true"); + if (!(flags & BDRV_O_CACHE_WB)) { + r = rados_conf_set(s->cluster, "rbd_cache_max_dirty", "0"); + if (r < 0) { + rados_conf_set(s->cluster, "rbd_cache", "false"); + } + } + } + if (strstr(conf, "conf=") == NULL) { /* try default location, but ignore failure */ rados_conf_read_file(s->cluster, NULL); diff --git a/block/sheepdog.c b/block/sheepdog.c index 2c7aecec4f..809df39d9e 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -259,8 +259,7 @@ typedef struct AIOReq { uint8_t flags; uint32_t id; - QLIST_ENTRY(AIOReq) outstanding_aio_siblings; - QLIST_ENTRY(AIOReq) aioreq_siblings; + QLIST_ENTRY(AIOReq) aio_siblings; } AIOReq; enum AIOCBState { @@ -283,8 +282,7 @@ struct SheepdogAIOCB { void (*aio_done_func)(SheepdogAIOCB *); int canceled; - - QLIST_HEAD(aioreq_head, AIOReq) aioreq_head; + int nr_pending; }; typedef struct BDRVSheepdogState { @@ -307,7 +305,8 @@ typedef struct BDRVSheepdogState { Coroutine *co_recv; uint32_t aioreq_seq_num; - QLIST_HEAD(outstanding_aio_head, AIOReq) outstanding_aio_head; + QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head; + QLIST_HEAD(pending_aio_head, AIOReq) pending_aio_head; } BDRVSheepdogState; static const char * sd_strerror(int err) @@ -358,7 +357,7 @@ static const char * sd_strerror(int err) * Sheepdog I/O handling: * * 1. In sd_co_rw_vector, we send the I/O requests to the server and - * link the requests to the outstanding_list in the + * link the requests to the inflight_list in the * BDRVSheepdogState. The function exits without waiting for * receiving the response. * @@ -386,21 +385,18 @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, aio_req->flags = flags; aio_req->id = s->aioreq_seq_num++; - QLIST_INSERT_HEAD(&s->outstanding_aio_head, aio_req, - outstanding_aio_siblings); - QLIST_INSERT_HEAD(&acb->aioreq_head, aio_req, aioreq_siblings); - + acb->nr_pending++; return aio_req; } -static inline int free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) +static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) { SheepdogAIOCB *acb = aio_req->aiocb; - QLIST_REMOVE(aio_req, outstanding_aio_siblings); - QLIST_REMOVE(aio_req, aioreq_siblings); + + QLIST_REMOVE(aio_req, aio_siblings); g_free(aio_req); - return !QLIST_EMPTY(&acb->aioreq_head); + acb->nr_pending--; } static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb) @@ -446,7 +442,7 @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, acb->canceled = 0; acb->coroutine = qemu_coroutine_self(); acb->ret = 0; - QLIST_INIT(&acb->aioreq_head); + acb->nr_pending = 0; return acb; } @@ -522,8 +518,8 @@ static int send_req(int sockfd, SheepdogReq *hdr, void *data, return ret; } -static int send_co_req(int sockfd, SheepdogReq *hdr, void *data, - unsigned int *wlen) +static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data, + unsigned int *wlen) { int ret; @@ -540,11 +536,19 @@ static int send_co_req(int sockfd, SheepdogReq *hdr, void *data, return ret; } + +static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data, + unsigned int *wlen, unsigned int *rlen); + static int do_req(int sockfd, SheepdogReq *hdr, void *data, unsigned int *wlen, unsigned int *rlen) { int ret; + if (qemu_in_coroutine()) { + return do_co_req(sockfd, hdr, data, wlen, rlen); + } + socket_set_block(sockfd); ret = send_req(sockfd, hdr, data, wlen); if (ret < 0) { @@ -576,10 +580,21 @@ out: return ret; } -static int do_co_req(int sockfd, SheepdogReq *hdr, void *data, - unsigned int *wlen, unsigned int *rlen) +static void restart_co_req(void *opaque) +{ + Coroutine *co = opaque; + + qemu_coroutine_enter(co, NULL); +} + +static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data, + unsigned int *wlen, unsigned int *rlen) { int ret; + Coroutine *co; + + co = qemu_coroutine_self(); + qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co); socket_set_block(sockfd); ret = send_co_req(sockfd, hdr, data, wlen); @@ -587,6 +602,8 @@ static int do_co_req(int sockfd, SheepdogReq *hdr, void *data, goto out; } + qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co); + ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); if (ret < sizeof(*hdr)) { error_report("failed to get a rsp, %s", strerror(errno)); @@ -608,6 +625,7 @@ static int do_co_req(int sockfd, SheepdogReq *hdr, void *data, } ret = 0; out: + qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL); socket_set_nonblock(sockfd); return ret; } @@ -616,32 +634,41 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, struct iovec *iov, int niov, int create, enum AIOCBState aiocb_type); + +static AIOReq *find_pending_req(BDRVSheepdogState *s, uint64_t oid) +{ + AIOReq *aio_req; + + QLIST_FOREACH(aio_req, &s->pending_aio_head, aio_siblings) { + if (aio_req->oid == oid) { + return aio_req; + } + } + + return NULL; +} + /* * This function searchs pending requests to the object `oid', and * sends them. */ -static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid, uint32_t id) +static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid) { - AIOReq *aio_req, *next; + AIOReq *aio_req; SheepdogAIOCB *acb; int ret; - QLIST_FOREACH_SAFE(aio_req, &s->outstanding_aio_head, - outstanding_aio_siblings, next) { - if (id == aio_req->id) { - continue; - } - if (aio_req->oid != oid) { - continue; - } - + while ((aio_req = find_pending_req(s, oid)) != NULL) { acb = aio_req->aiocb; + /* move aio_req from pending list to inflight one */ + QLIST_REMOVE(aio_req, aio_siblings); + QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); ret = add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, 0, acb->aiocb_type); if (ret < 0) { error_report("add_aio_request is failed"); free_aio_req(s, aio_req); - if (QLIST_EMPTY(&acb->aioreq_head)) { + if (!acb->nr_pending) { sd_finish_aiocb(acb); } } @@ -662,10 +689,9 @@ static void coroutine_fn aio_read_response(void *opaque) int ret; AIOReq *aio_req = NULL; SheepdogAIOCB *acb; - int rest; unsigned long idx; - if (QLIST_EMPTY(&s->outstanding_aio_head)) { + if (QLIST_EMPTY(&s->inflight_aio_head)) { goto out; } @@ -676,8 +702,8 @@ static void coroutine_fn aio_read_response(void *opaque) goto out; } - /* find the right aio_req from the outstanding_aio list */ - QLIST_FOREACH(aio_req, &s->outstanding_aio_head, outstanding_aio_siblings) { + /* find the right aio_req from the inflight aio list */ + QLIST_FOREACH(aio_req, &s->inflight_aio_head, aio_siblings) { if (aio_req->id == rsp.id) { break; } @@ -715,7 +741,7 @@ static void coroutine_fn aio_read_response(void *opaque) * create requests are not allowed, so we search the * pending requests here. */ - send_pending_req(s, vid_to_data_oid(s->inode.vdi_id, idx), rsp.id); + send_pending_req(s, vid_to_data_oid(s->inode.vdi_id, idx)); } break; case AIOCB_READ_UDATA: @@ -733,8 +759,8 @@ static void coroutine_fn aio_read_response(void *opaque) error_report("%s", sd_strerror(rsp.result)); } - rest = free_aio_req(s, aio_req); - if (!rest) { + free_aio_req(s, aio_req); + if (!acb->nr_pending) { /* * We've finished all requests which belong to the AIOCB, so * we can switch back to sd_co_readv/writev now. @@ -767,7 +793,8 @@ static int aio_flush_request(void *opaque) { BDRVSheepdogState *s = opaque; - return !QLIST_EMPTY(&s->outstanding_aio_head); + return !QLIST_EMPTY(&s->inflight_aio_head) || + !QLIST_EMPTY(&s->pending_aio_head); } static int set_nodelay(int fd) @@ -1084,7 +1111,8 @@ static int sd_open(BlockDriverState *bs, const char *filename, int flags) strstart(filename, "sheepdog:", (const char **)&filename); - QLIST_INIT(&s->outstanding_aio_head); + QLIST_INIT(&s->inflight_aio_head); + QLIST_INIT(&s->pending_aio_head); s->fd = -1; memset(vdi, 0, sizeof(vdi)); @@ -1446,6 +1474,7 @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) iov.iov_len = sizeof(s->inode); aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), data_len, offset, 0, 0, offset); + QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); ret = add_aio_request(s, aio_req, &iov, 1, 0, AIOCB_WRITE_UDATA); if (ret) { free_aio_req(s, aio_req); @@ -1514,7 +1543,7 @@ out: * Send I/O requests to the server. * * This function sends requests to the server, links the requests to - * the outstanding_list in BDRVSheepdogState, and exits without + * the inflight_list in BDRVSheepdogState, and exits without * waiting the response. The responses are received in the * `aio_read_response' function which is called from the main loop as * a fd handler. @@ -1546,6 +1575,12 @@ static int coroutine_fn sd_co_rw_vector(void *p) } } + /* + * Make sure we don't free the aiocb before we are done with all requests. + * This additional reference is dropped at the end of this function. + */ + acb->nr_pending++; + while (done != total) { uint8_t flags = 0; uint64_t old_oid = 0; @@ -1570,22 +1605,18 @@ static int coroutine_fn sd_co_rw_vector(void *p) } if (create) { - dprintf("update ino (%" PRIu32") %" PRIu64 " %" PRIu64 - " %" PRIu64 "\n", inode->vdi_id, oid, + dprintf("update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld\n", + inode->vdi_id, oid, vid_to_data_oid(inode->data_vdi_id[idx], idx), idx); oid = vid_to_data_oid(inode->vdi_id, idx); - dprintf("new oid %lx\n", oid); + dprintf("new oid %" PRIx64 "\n", oid); } aio_req = alloc_aio_req(s, acb, oid, len, offset, flags, old_oid, done); if (create) { AIOReq *areq; - QLIST_FOREACH(areq, &s->outstanding_aio_head, - outstanding_aio_siblings) { - if (areq == aio_req) { - continue; - } + QLIST_FOREACH(areq, &s->inflight_aio_head, aio_siblings) { if (areq->oid == oid) { /* * Sheepdog cannot handle simultaneous create @@ -1595,11 +1626,14 @@ static int coroutine_fn sd_co_rw_vector(void *p) */ aio_req->flags = 0; aio_req->base_oid = 0; + QLIST_INSERT_HEAD(&s->pending_aio_head, aio_req, + aio_siblings); goto done; } } } + QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); ret = add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, create, acb->aiocb_type); if (ret < 0) { @@ -1614,7 +1648,7 @@ static int coroutine_fn sd_co_rw_vector(void *p) done += len; } out: - if (QLIST_EMPTY(&acb->aioreq_head)) { + if (!--acb->nr_pending) { return acb->ret; } return 1; @@ -1627,7 +1661,6 @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, int ret; if (bs->growable && sector_num + nb_sectors > bs->total_sectors) { - /* TODO: shouldn't block here */ ret = sd_truncate(bs, (sector_num + nb_sectors) * SECTOR_SIZE); if (ret < 0) { return ret; @@ -1695,7 +1728,7 @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) hdr.opcode = SD_OP_FLUSH_VDI; hdr.oid = vid_to_vdi_oid(inode->vdi_id); - ret = do_co_req(s->flush_fd, (SheepdogReq *)&hdr, NULL, &wlen, &rlen); + ret = do_req(s->flush_fd, (SheepdogReq *)&hdr, NULL, &wlen, &rlen); if (ret) { error_report("failed to send a request to the sheep"); return ret; @@ -1725,7 +1758,7 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) SheepdogInode *inode; unsigned int datalen; - dprintf("sn_info: name %s id_str %s s: name %s vm_state_size %d " + dprintf("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " " "is_snapshot %d\n", sn_info->name, sn_info->id_str, s->name, sn_info->vm_state_size, s->is_snapshot); diff --git a/block/stream.c b/block/stream.c index 8e5832273b..37c46525d2 100644 --- a/block/stream.c +++ b/block/stream.c @@ -13,6 +13,7 @@ #include "trace.h" #include "block_int.h" +#include "qemu/ratelimit.h" enum { /* @@ -25,34 +26,6 @@ enum { #define SLICE_TIME 100000000ULL /* ns */ -typedef struct { - int64_t next_slice_time; - uint64_t slice_quota; - uint64_t dispatched; -} RateLimit; - -static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) -{ - int64_t now = qemu_get_clock_ns(rt_clock); - - if (limit->next_slice_time < now) { - limit->next_slice_time = now + SLICE_TIME; - limit->dispatched = 0; - } - if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) { - limit->dispatched += n; - return 0; - } else { - limit->dispatched = n; - return limit->next_slice_time - now; - } -} - -static void ratelimit_set_speed(RateLimit *limit, uint64_t speed) -{ - limit->slice_quota = speed / (1000000000ULL / SLICE_TIME); -} - typedef struct StreamBlockJob { BlockJob common; RateLimit limit; @@ -98,67 +71,6 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base, top->backing_hd = base; } -/* - * Given an image chain: [BASE] -> [INTER1] -> [INTER2] -> [TOP] - * - * Return true if the given sector is allocated in top. - * Return false if the given sector is allocated in intermediate images. - * Return true otherwise. - * - * 'pnum' is set to the number of sectors (including and immediately following - * the specified sector) that are known to be in the same - * allocated/unallocated state. - * - */ -static int coroutine_fn is_allocated_base(BlockDriverState *top, - BlockDriverState *base, - int64_t sector_num, - int nb_sectors, int *pnum) -{ - BlockDriverState *intermediate; - int ret, n; - - ret = bdrv_co_is_allocated(top, sector_num, nb_sectors, &n); - if (ret) { - *pnum = n; - return ret; - } - - /* - * Is the unallocated chunk [sector_num, n] also - * unallocated between base and top? - */ - intermediate = top->backing_hd; - - while (intermediate != base) { - int pnum_inter; - - ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors, - &pnum_inter); - if (ret < 0) { - return ret; - } else if (ret) { - *pnum = pnum_inter; - return 0; - } - - /* - * [sector_num, nb_sectors] is unallocated on top but intermediate - * might have - * - * [sector_num+x, nr_sectors] allocated. - */ - if (n > pnum_inter) { - n = pnum_inter; - } - - intermediate = intermediate->backing_hd; - } - - *pnum = n; - return 1; -} - static void coroutine_fn stream_run(void *opaque) { StreamBlockJob *s = opaque; @@ -189,6 +101,7 @@ static void coroutine_fn stream_run(void *opaque) for (sector_num = 0; sector_num < end; sector_num += n) { uint64_t delay_ns = 0; + bool copy; wait: /* Note that even when no rate limit is applied we need to yield @@ -199,10 +112,20 @@ wait: break; } - ret = is_allocated_base(bs, base, sector_num, - STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); + ret = bdrv_co_is_allocated(bs, sector_num, + STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); + if (ret == 1) { + /* Allocated in the top, no need to copy. */ + copy = false; + } else { + /* Copy if allocated in the intermediate images. Limit to the + * known-unallocated area [sector_num, sector_num+n). */ + ret = bdrv_co_is_allocated_above(bs->backing_hd, base, + sector_num, n, &n); + copy = (ret == 1); + } trace_stream_one_iteration(s, sector_num, n, ret); - if (ret == 0) { + if (ret >= 0 && copy) { if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, n); if (delay_ns > 0) { @@ -248,7 +171,7 @@ static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp) error_set(errp, QERR_INVALID_PARAMETER, "speed"); return; } - ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE); + ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); } static BlockJobType stream_job_type = { diff --git a/block/vdi.c b/block/vdi.c index 119d3c74da..57325d65c4 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -277,7 +277,8 @@ static void vdi_header_print(VdiHeader *header) } #endif -static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res) +static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res, + BdrvCheckMode fix) { /* TODO: additional checks possible. */ BDRVVdiState *s = (BDRVVdiState *)bs->opaque; @@ -286,6 +287,10 @@ static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res) uint32_t *bmap; logout("\n"); + if (fix) { + return -ENOTSUP; + } + bmap = g_malloc(s->header.blocks_in_image * sizeof(uint32_t)); memset(bmap, 0xff, s->header.blocks_in_image * sizeof(uint32_t)); diff --git a/block_int.h b/block_int.h index 3d4abc6575..1fb5352d0e 100644 --- a/block_int.h +++ b/block_int.h @@ -241,7 +241,8 @@ struct BlockDriver { * Returns 0 for completed check, -errno for internal errors. * The check results are stored in result. */ - int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result); + int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, + BdrvCheckMode fix); void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event); diff --git a/blockdev.c b/blockdev.c index 622ecba04e..a85a429aef 100644 --- a/blockdev.c +++ b/blockdev.c @@ -278,7 +278,6 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) { const char *buf; const char *file = NULL; - char devname[128]; const char *serial; const char *mediastr = ""; BlockInterfaceType type; @@ -318,7 +317,6 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) serial = qemu_opt_get(opts, "serial"); if ((buf = qemu_opt_get(opts, "if")) != NULL) { - pstrcpy(devname, sizeof(devname), buf); for (type = 0; type < IF_COUNT && strcmp(buf, if_name[type]); type++) ; if (type == IF_COUNT) { @@ -327,7 +325,6 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) } } else { type = default_to_scsi ? IF_SCSI : IF_IDE; - pstrcpy(devname, sizeof(devname), if_name[type]); } max_devs = if_max_devs[type]; @@ -523,10 +520,10 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; if (max_devs) snprintf(dinfo->id, 32, "%s%i%s%i", - devname, bus_id, mediastr, unit_id); + if_name[type], bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, "%s%s%i", - devname, mediastr, unit_id); + if_name[type], mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->devaddr = devaddr; @@ -612,6 +609,10 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) bdrv_flags |= ro ? 0 : BDRV_O_RDWR; + if (ro && copy_on_read) { + error_report("warning: disabling copy_on_read on readonly drive"); + } + ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv); if (ret < 0) { error_report("could not open disk image %s: %s", diff --git a/bsd-user/Makefile.objs b/bsd-user/Makefile.objs new file mode 100644 index 0000000000..5e77f57782 --- /dev/null +++ b/bsd-user/Makefile.objs @@ -0,0 +1,2 @@ +obj-y = main.o bsdload.o elfload.o mmap.o signal.o strace.o syscall.o \ + uaccess.o @@ -42,6 +42,7 @@ compile_prog() { # symbolically link $1 to $2. Portable version of "ln -sf". symlink() { rm -rf "$2" + mkdir -p "$(dirname "$2")" ln -s "$1" "$2" } @@ -133,9 +134,9 @@ vnc_tls="" vnc_sasl="" vnc_jpeg="" vnc_png="" -vnc_thread="no" xen="" xen_ctrl_version="" +xen_pci_passthrough="" linux_aio="" cap_ng="" attr="" @@ -255,7 +256,6 @@ QEMU_CFLAGS="-fno-strict-aliasing $QEMU_CFLAGS" QEMU_CFLAGS="-Wall -Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" -QEMU_CFLAGS="-D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" QEMU_INCLUDES="-I. -I\$(SRC_PATH) -I\$(SRC_PATH)/fpu" if test "$debug_info" = "yes"; then CFLAGS="-g $CFLAGS" @@ -666,10 +666,6 @@ for opt do ;; --enable-vnc-png) vnc_png="yes" ;; - --disable-vnc-thread) vnc_thread="no" - ;; - --enable-vnc-thread) vnc_thread="yes" - ;; --disable-slirp) slirp="no" ;; --disable-uuid) uuid="no" @@ -684,6 +680,10 @@ for opt do ;; --enable-xen) xen="yes" ;; + --disable-xen-pci-passthrough) xen_pci_passthrough="no" + ;; + --enable-xen-pci-passthrough) xen_pci_passthrough="yes" + ;; --disable-brlapi) brlapi="no" ;; --enable-brlapi) brlapi="yes" @@ -1031,6 +1031,8 @@ echo " (affects only QEMU, not qemu-img)" echo " --enable-mixemu enable mixer emulation" echo " --disable-xen disable xen backend driver support" echo " --enable-xen enable xen backend driver support" +echo " --disable-xen-pci-passthrough" +echo " --enable-xen-pci-passthrough" echo " --disable-brlapi disable BrlAPI" echo " --enable-brlapi enable BrlAPI" echo " --disable-vnc-tls disable TLS encryption for VNC server" @@ -1041,8 +1043,6 @@ echo " --disable-vnc-jpeg disable JPEG lossy compression for VNC server" echo " --enable-vnc-jpeg enable JPEG lossy compression for VNC server" echo " --disable-vnc-png disable PNG compression for VNC server (default)" echo " --enable-vnc-png enable PNG compression for VNC server" -echo " --disable-vnc-thread disable threaded VNC server" -echo " --enable-vnc-thread enable threaded VNC server" echo " --disable-curses disable curses output" echo " --enable-curses enable curses output" echo " --disable-curl disable curl connectivity" @@ -1381,7 +1381,7 @@ EOF elif ( cat > $TMPC <<EOF #include <xenctrl.h> -#include <xs.h> +#include <xenstore.h> #include <stdint.h> #include <xen/hvm/hvm_info_table.h> #if !defined(HVM_MAX_VCPUS) @@ -1507,6 +1507,25 @@ EOF fi fi +if test "$xen_pci_passthrough" != "no"; then + if test "$xen" = "yes" && test "$linux" = "yes" && + test "$xen_ctrl_version" -ge 340; then + xen_pci_passthrough=yes + else + if test "$xen_pci_passthrough" = "yes"; then + echo "ERROR" + echo "ERROR: User requested feature Xen PCI Passthrough" + echo "ERROR: but this feature require /sys from Linux" + if test "$xen_ctrl_version" -lt 340; then + echo "ERROR: This feature does not work with Xen 3.3" + fi + echo "ERROR" + exit 1; + fi + xen_pci_passthrough=no + fi +fi + ########################################## # pkg-config probe @@ -2808,10 +2827,14 @@ fi ########################################## # check if we have open_by_handle_at -open_by_hande_at=no +open_by_handle_at=no cat > $TMPC << EOF #include <fcntl.h> +#if !defined(AT_EMPTY_PATH) +# error missing definition +#else int main(void) { struct file_handle fh; return open_by_handle_at(0, &fh, 0); } +#endif EOF if compile_prog "" "" ; then open_by_handle_at=yes @@ -2851,7 +2874,7 @@ fi # After here, no more $cc or $ld runs if test "$debug" = "no" ; then - CFLAGS="-O2 $CFLAGS" + CFLAGS="-O2 -D_FORTIFY_SOURCE=2 $CFLAGS" fi # Consult white-list to determine whether to enable werror @@ -2915,7 +2938,8 @@ if test "$softmmu" = yes ; then tools="$tools fsdev/virtfs-proxy-helper\$(EXESUF)" else if test "$virtfs" = yes; then - feature_not_found "virtfs" + echo "VirtFS is supported only on Linux and requires libcap-devel and libattr-devel" + exit 1 fi virtfs=no fi @@ -2927,6 +2951,9 @@ if test "$softmmu" = yes ; then fi fi fi +if test "$smartcard_nss" = "yes" ; then + tools="vscclient\$(EXESUF) $tools" +fi # Mac OS X ships with a broken assembler roms= @@ -2990,7 +3017,6 @@ if test "$vnc" = "yes" ; then echo "VNC SASL support $vnc_sasl" echo "VNC JPEG support $vnc_jpeg" echo "VNC PNG support $vnc_png" - echo "VNC thread $vnc_thread" fi if test -n "$sparc_cpu"; then echo "Target Sparc Arch $sparc_cpu" @@ -3166,9 +3192,6 @@ if test "$vnc_png" = "yes" ; then echo "CONFIG_VNC_PNG=y" >> $config_host_mak echo "VNC_PNG_CFLAGS=$vnc_png_cflags" >> $config_host_mak fi -if test "$vnc_thread" = "yes" ; then - echo "CONFIG_VNC_THREAD=y" >> $config_host_mak -fi if test "$fnmatch" = "yes" ; then echo "CONFIG_FNMATCH=y" >> $config_host_mak fi @@ -3451,7 +3474,6 @@ if test -f ${config_host_ld}~ ; then fi for d in libdis libdis-user; do - mkdir -p $d symlink "$source_path/Makefile.dis" "$d/Makefile" echo > $d/config.mak done @@ -3514,18 +3536,6 @@ case "$target" in esac mkdir -p $target_dir -mkdir -p $target_dir/fpu -mkdir -p $target_dir/tcg -mkdir -p $target_dir/ide -mkdir -p $target_dir/usb -mkdir -p $target_dir/9pfs -mkdir -p $target_dir/kvm -if test "$target" = "arm-linux-user" -o "$target" = "armeb-linux-user" -o "$target" = "arm-bsd-user" -o "$target" = "armeb-bsd-user" ; then - mkdir -p $target_dir/nwfpe -fi -symlink "$source_path/Makefile.target" "$target_dir/Makefile" - - echo "# Automatically generated by configure - do not modify" > $config_target_mak bflt="no" @@ -3676,9 +3686,16 @@ case "$target_arch2" in exit 1 ;; esac +# TARGET_BASE_ARCH needs to be defined after TARGET_ARCH +if [ "$TARGET_BASE_ARCH" = "" ]; then + TARGET_BASE_ARCH=$TARGET_ARCH +fi + +symlink "$source_path/Makefile.target" "$target_dir/Makefile" + case "$target_arch2" in - alpha | sparc*) + alpha | sparc* | xtensa* | ppc*) echo "CONFIG_TCG_PASS_AREG0=y" >> $config_target_mak ;; esac @@ -3691,10 +3708,6 @@ echo "TARGET_ARCH=$TARGET_ARCH" >> $config_target_mak target_arch_name="`echo $TARGET_ARCH | LC_ALL=C tr '[a-z]' '[A-Z]'`" echo "TARGET_$target_arch_name=y" >> $config_target_mak echo "TARGET_ARCH2=$target_arch2" >> $config_target_mak -# TARGET_BASE_ARCH needs to be defined after TARGET_ARCH -if [ "$TARGET_BASE_ARCH" = "" ]; then - TARGET_BASE_ARCH=$TARGET_ARCH -fi echo "TARGET_BASE_ARCH=$TARGET_BASE_ARCH" >> $config_target_mak if [ "$TARGET_ABI_DIR" = "" ]; then TARGET_ABI_DIR=$TARGET_ARCH @@ -3705,6 +3718,9 @@ case "$target_arch2" in if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then target_phys_bits=64 echo "CONFIG_XEN=y" >> $config_target_mak + if test "$xen_pci_passthrough" = yes; then + echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak" + fi else echo "CONFIG_NO_XEN=y" >> $config_target_mak fi @@ -3901,12 +3917,6 @@ if test "$target_softmmu" = "yes" ; then esac fi -if test "$target_softmmu" = "yes" -a \( \ - "$TARGET_ARCH" = "microblaze" -o \ - "$TARGET_ARCH" = "cris" \) ; then - echo "CONFIG_NEED_MMU=y" >> $config_target_mak -fi - if test "$gprof" = "yes" ; then echo "TARGET_GPROF=yes" >> $config_target_mak if test "$target_linux_user" = "yes" ; then @@ -3948,15 +3958,13 @@ done # for target in $targets # build tree in object directory in case the source is not in the current directory DIRS="tests tests/tcg tests/tcg/cris tests/tcg/lm32" -DIRS="$DIRS slirp audio block net pc-bios/optionrom" -DIRS="$DIRS pc-bios/spapr-rtas" +DIRS="$DIRS pc-bios/optionrom pc-bios/spapr-rtas" DIRS="$DIRS roms/seabios roms/vgabios" -DIRS="$DIRS fsdev ui usb" -DIRS="$DIRS qapi qapi-generated" -DIRS="$DIRS qga trace qom" +DIRS="$DIRS qapi-generated" +DIRS="$DIRS libcacard libcacard/libcacard libcacard/trace" FILES="Makefile tests/tcg/Makefile qdict-test-data.txt" FILES="$FILES tests/tcg/cris/Makefile tests/tcg/cris/.gdbinit" -FILES="$FILES tests/tcg/lm32/Makefile" +FILES="$FILES tests/tcg/lm32/Makefile libcacard/Makefile" FILES="$FILES pc-bios/optionrom/Makefile pc-bios/keymaps" FILES="$FILES pc-bios/spapr-rtas/Makefile" FILES="$FILES roms/seabios/Makefile roms/vgabios/Makefile" @@ -3991,24 +3999,11 @@ done for hwlib in 32 64; do d=libhw$hwlib - mkdir -p $d - mkdir -p $d/ide - mkdir -p $d/usb symlink "$source_path/Makefile.hw" "$d/Makefile" - mkdir -p $d/9pfs echo "QEMU_CFLAGS+=-DTARGET_PHYS_ADDR_BITS=$hwlib" > $d/config.mak done -if [ "$source_path" != `pwd` ]; then - # out of tree build - mkdir -p libcacard - symlink "$source_path/libcacard/Makefile" libcacard/Makefile -fi - d=libuser -mkdir -p $d -mkdir -p $d/trace -mkdir -p $d/qom symlink "$source_path/Makefile.user" "$d/Makefile" if test "$docs" = "yes" ; then @@ -22,8 +22,6 @@ #include "qemu-common.h" #include "qemu-tls.h" #include "cpu-common.h" -#include "memory_mapping.h" -#include "dump.h" /* some important defines: * @@ -293,6 +291,15 @@ extern unsigned long reserved_va; #define stfl_kernel(p, v) stfl_raw(p, v) #define stfq_kernel(p, vt) stfq_raw(p, v) +#ifdef CONFIG_TCG_PASS_AREG0 +#define cpu_ldub_data(env, addr) ldub_raw(addr) +#define cpu_lduw_data(env, addr) lduw_raw(addr) +#define cpu_ldl_data(env, addr) ldl_raw(addr) + +#define cpu_stb_data(env, addr, data) stb_raw(addr, data) +#define cpu_stw_data(env, addr, data) stw_raw(addr, data) +#define cpu_stl_data(env, addr, data) stl_raw(addr, data) +#endif #endif /* defined(CONFIG_USER_ONLY) */ /* page related stuff */ @@ -448,30 +455,6 @@ void cpu_single_step(CPUArchState *env, int enabled); int cpu_is_stopped(CPUArchState *env); void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data); -#define CPU_LOG_TB_OUT_ASM (1 << 0) -#define CPU_LOG_TB_IN_ASM (1 << 1) -#define CPU_LOG_TB_OP (1 << 2) -#define CPU_LOG_TB_OP_OPT (1 << 3) -#define CPU_LOG_INT (1 << 4) -#define CPU_LOG_EXEC (1 << 5) -#define CPU_LOG_PCALL (1 << 6) -#define CPU_LOG_IOPORT (1 << 7) -#define CPU_LOG_TB_CPU (1 << 8) -#define CPU_LOG_RESET (1 << 9) - -/* define log items */ -typedef struct CPULogItem { - int mask; - const char *name; - const char *help; -} CPULogItem; - -extern const CPULogItem cpu_log_items[]; - -void cpu_set_log(int log_flags); -void cpu_set_log_filename(const char *filename); -int cpu_str_to_log_mask(const char *str); - #if !defined(CONFIG_USER_ONLY) /* Return the physical page corresponding to a virtual one. Use it @@ -503,6 +486,7 @@ typedef struct RAMBlock { typedef struct RAMList { uint8_t *phys_dirty; QLIST_HEAD(, RAMBlock) blocks; + uint64_t dirty_pages; } RAMList; extern RAMList ram_list; @@ -525,72 +509,4 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write); -#if defined(CONFIG_HAVE_GET_MEMORY_MAPPING) -int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env); -bool cpu_paging_enabled(CPUArchState *env); -#else -static inline int cpu_get_memory_mapping(MemoryMappingList *list, - CPUArchState *env) -{ - return -1; -} - -static inline bool cpu_paging_enabled(CPUArchState *env) -{ - return true; -} -#endif - -typedef int (*write_core_dump_function)(void *buf, size_t size, void *opaque); -#if defined(CONFIG_HAVE_CORE_DUMP) -int cpu_write_elf64_note(write_core_dump_function f, CPUArchState *env, - int cpuid, void *opaque); -int cpu_write_elf32_note(write_core_dump_function f, CPUArchState *env, - int cpuid, void *opaque); -int cpu_write_elf64_qemunote(write_core_dump_function f, CPUArchState *env, - void *opaque); -int cpu_write_elf32_qemunote(write_core_dump_function f, CPUArchState *env, - void *opaque); -int cpu_get_dump_info(ArchDumpInfo *info); -size_t cpu_get_note_size(int class, int machine, int nr_cpus); -#else -static inline int cpu_write_elf64_note(write_core_dump_function f, - CPUArchState *env, int cpuid, - void *opaque) -{ - return -1; -} - -static inline int cpu_write_elf32_note(write_core_dump_function f, - CPUArchState *env, int cpuid, - void *opaque) -{ - return -1; -} - -static inline int cpu_write_elf64_qemunote(write_core_dump_function f, - CPUArchState *env, - void *opaque) -{ - return -1; -} - -static inline int cpu_write_elf32_qemunote(write_core_dump_function f, - CPUArchState *env, - void *opaque) -{ - return -1; -} - -static inline int cpu_get_dump_info(ArchDumpInfo *info) -{ - return -1; -} - -static inline int cpu_get_note_size(int class, int machine, int nr_cpus) -{ - return -1; -} -#endif - #endif /* CPU_ALL_H */ diff --git a/cpu-common.h b/cpu-common.h index 1fe3280701..85548de5ea 100644 --- a/cpu-common.h +++ b/cpu-common.h @@ -3,9 +3,7 @@ /* CPU interfaces that are target independent. */ -#ifdef TARGET_PHYS_ADDR_BITS #include "targphys.h" -#endif #ifndef NEED_CPU_H #include "poison.h" @@ -71,9 +69,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); void cpu_unregister_map_client(void *cookie); -#ifndef CONFIG_USER_ONLY bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr); -#endif /* Coalesced MMIO regions are areas where write operations can be reordered. * This usually implies that write operations are side-effect free. This allows diff --git a/cpu-exec.c b/cpu-exec.c index 83cac932ed..08c35f72d4 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -289,7 +289,8 @@ int cpu_exec(CPUArchState *env) #endif #if defined(TARGET_I386) if (interrupt_request & CPU_INTERRUPT_INIT) { - svm_check_intercept(env, SVM_EXIT_INIT); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, + 0); do_cpu_init(x86_env_get_cpu(env)); env->exception_index = EXCP_HALTED; cpu_loop_exit(env); @@ -298,7 +299,8 @@ int cpu_exec(CPUArchState *env) } else if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { - svm_check_intercept(env, SVM_EXIT_SMI); + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, + 0); env->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(env); next_tb = 0; @@ -319,7 +321,8 @@ int cpu_exec(CPUArchState *env) (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { int intno; - svm_check_intercept(env, SVM_EXIT_INTR); + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, + 0); env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); @@ -333,7 +336,8 @@ int cpu_exec(CPUArchState *env) !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; /* FIXME: this should respect TPR */ - svm_check_intercept(env, SVM_EXIT_VINTR); + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, + 0); intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); @@ -467,11 +471,18 @@ int cpu_exec(CPUArchState *env) do_interrupt(env); next_tb = 0; } - if (interrupt_request & CPU_INTERRUPT_NMI - && (env->pregs[PR_CCS] & M_FLAG)) { - env->exception_index = EXCP_NMI; - do_interrupt(env); - next_tb = 0; + if (interrupt_request & CPU_INTERRUPT_NMI) { + unsigned int m_flag_archval; + if (env->pregs[PR_VR] < 32) { + m_flag_archval = M_FLAG_V10; + } else { + m_flag_archval = M_FLAG_V32; + } + if ((env->pregs[PR_CCS] & m_flag_archval)) { + env->exception_index = EXCP_NMI; + do_interrupt(env); + next_tb = 0; + } } #elif defined(TARGET_M68K) if (interrupt_request & CPU_INTERRUPT_HARD diff --git a/default-configs/microblaze-softmmu.mak b/default-configs/microblaze-softmmu.mak index 613edab742..64c9485de4 100644 --- a/default-configs/microblaze-softmmu.mak +++ b/default-configs/microblaze-softmmu.mak @@ -3,3 +3,5 @@ CONFIG_PTIMER=y CONFIG_PFLASH_CFI01=y CONFIG_SERIAL=y +CONFIG_XILINX=y +CONFIG_XILINX_AXI=y diff --git a/default-configs/microblazeel-softmmu.mak b/default-configs/microblazeel-softmmu.mak index 4b40fb21a5..a9622760c9 100644 --- a/default-configs/microblazeel-softmmu.mak +++ b/default-configs/microblazeel-softmmu.mak @@ -3,3 +3,5 @@ CONFIG_PTIMER=y CONFIG_PFLASH_CFI01=y CONFIG_SERIAL=y +CONFIG_XILINX=y +CONFIG_XILINX_AXI=y diff --git a/default-configs/pci.mak b/default-configs/pci.mak index 9d3e1dbda1..4b49c0012b 100644 --- a/default-configs/pci.mak +++ b/default-configs/pci.mak @@ -10,6 +10,7 @@ CONFIG_EEPRO100_PCI=y CONFIG_PCNET_PCI=y CONFIG_PCNET_COMMON=y CONFIG_LSI_SCSI_PCI=y +CONFIG_MEGASAS_SCSI_PCI=y CONFIG_RTL8139_PCI=y CONFIG_E1000_PCI=y CONFIG_IDE_CORE=y diff --git a/default-configs/ppc-softmmu.mak b/default-configs/ppc-softmmu.mak index 1a768fc519..d0fde7b93d 100644 --- a/default-configs/ppc-softmmu.mak +++ b/default-configs/ppc-softmmu.mak @@ -36,3 +36,4 @@ CONFIG_PFLASH_CFI01=y CONFIG_PFLASH_CFI02=y CONFIG_PTIMER=y CONFIG_I8259=y +CONFIG_XILINX=y diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak index f490368c64..e4265b4978 100644 --- a/default-configs/ppc64-softmmu.mak +++ b/default-configs/ppc64-softmmu.mak @@ -33,3 +33,4 @@ CONFIG_PFLASH_CFI01=y CONFIG_PFLASH_CFI02=y CONFIG_PTIMER=y CONFIG_I8259=y +CONFIG_XILINX=y diff --git a/default-configs/ppcemb-softmmu.mak b/default-configs/ppcemb-softmmu.mak index 829f462105..aaa9cdc1f7 100644 --- a/default-configs/ppcemb-softmmu.mak +++ b/default-configs/ppcemb-softmmu.mak @@ -33,3 +33,4 @@ CONFIG_PFLASH_CFI01=y CONFIG_PFLASH_CFI02=y CONFIG_PTIMER=y CONFIG_I8259=y +CONFIG_XILINX=y diff --git a/device_tree.c b/device_tree.c index 86a694c955..b366fddeaf 100644 --- a/device_tree.c +++ b/device_tree.c @@ -22,9 +22,48 @@ #include "qemu-common.h" #include "device_tree.h" #include "hw/loader.h" +#include "qemu-option.h" +#include "qemu-config.h" #include <libfdt.h> +#define FDT_MAX_SIZE 0x10000 + +void *create_device_tree(int *sizep) +{ + void *fdt; + int ret; + + *sizep = FDT_MAX_SIZE; + fdt = g_malloc0(FDT_MAX_SIZE); + ret = fdt_create(fdt, FDT_MAX_SIZE); + if (ret < 0) { + goto fail; + } + ret = fdt_begin_node(fdt, ""); + if (ret < 0) { + goto fail; + } + ret = fdt_end_node(fdt); + if (ret < 0) { + goto fail; + } + ret = fdt_finish(fdt); + if (ret < 0) { + goto fail; + } + ret = fdt_open_into(fdt, fdt, *sizep); + if (ret) { + fprintf(stderr, "Unable to copy device tree in memory\n"); + exit(1); + } + + return fdt; +fail: + fprintf(stderr, "%s Couldn't create dt: %s\n", __func__, fdt_strerror(ret)); + exit(1); +} + void *load_device_tree(const char *filename_path, int *sizep) { int dt_size; @@ -88,7 +127,7 @@ static int findnode_nofail(void *fdt, const char *node_path) } int qemu_devtree_setprop(void *fdt, const char *node_path, - const char *property, void *val_array, int size) + const char *property, const void *val_array, int size) { int r; @@ -117,6 +156,13 @@ int qemu_devtree_setprop_cell(void *fdt, const char *node_path, return r; } +int qemu_devtree_setprop_u64(void *fdt, const char *node_path, + const char *property, uint64_t val) +{ + val = cpu_to_be64(val); + return qemu_devtree_setprop(fdt, node_path, property, &val, sizeof(val)); +} + int qemu_devtree_setprop_string(void *fdt, const char *node_path, const char *property, const char *string) { @@ -132,6 +178,59 @@ int qemu_devtree_setprop_string(void *fdt, const char *node_path, return r; } +uint32_t qemu_devtree_get_phandle(void *fdt, const char *path) +{ + uint32_t r; + + r = fdt_get_phandle(fdt, findnode_nofail(fdt, path)); + if (r <= 0) { + fprintf(stderr, "%s: Couldn't get phandle for %s: %s\n", __func__, + path, fdt_strerror(r)); + exit(1); + } + + return r; +} + +int qemu_devtree_setprop_phandle(void *fdt, const char *node_path, + const char *property, + const char *target_node_path) +{ + uint32_t phandle = qemu_devtree_get_phandle(fdt, target_node_path); + return qemu_devtree_setprop_cell(fdt, node_path, property, phandle); +} + +uint32_t qemu_devtree_alloc_phandle(void *fdt) +{ + static int phandle = 0x0; + + /* + * We need to find out if the user gave us special instruction at + * which phandle id to start allocting phandles. + */ + if (!phandle) { + QemuOpts *machine_opts; + machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); + if (machine_opts) { + const char *phandle_start; + phandle_start = qemu_opt_get(machine_opts, "phandle_start"); + if (phandle_start) { + phandle = strtoul(phandle_start, NULL, 0); + } + } + } + + if (!phandle) { + /* + * None or invalid phandle given on the command line, so fall back to + * default starting point. + */ + phandle = 0x8000; + } + + return phandle++; +} + int qemu_devtree_nop_node(void *fdt, const char *node_path) { int r; @@ -151,6 +250,7 @@ int qemu_devtree_add_subnode(void *fdt, const char *name) char *dupname = g_strdup(name); char *basename = strrchr(dupname, '/'); int retval; + int parent = 0; if (!basename) { g_free(dupname); @@ -160,7 +260,11 @@ int qemu_devtree_add_subnode(void *fdt, const char *name) basename[0] = '\0'; basename++; - retval = fdt_add_subnode(fdt, findnode_nofail(fdt, dupname), basename); + if (dupname[0]) { + parent = findnode_nofail(fdt, dupname); + } + + retval = fdt_add_subnode(fdt, parent, basename); if (retval < 0) { fprintf(stderr, "FDT: Failed to create subnode %s: %s\n", name, fdt_strerror(retval)); diff --git a/device_tree.h b/device_tree.h index 4378685b7a..2244270b2d 100644 --- a/device_tree.h +++ b/device_tree.h @@ -14,15 +14,35 @@ #ifndef __DEVICE_TREE_H__ #define __DEVICE_TREE_H__ +void *create_device_tree(int *sizep); void *load_device_tree(const char *filename_path, int *sizep); int qemu_devtree_setprop(void *fdt, const char *node_path, - const char *property, void *val_array, int size); + const char *property, const void *val_array, int size); int qemu_devtree_setprop_cell(void *fdt, const char *node_path, const char *property, uint32_t val); +int qemu_devtree_setprop_u64(void *fdt, const char *node_path, + const char *property, uint64_t val); int qemu_devtree_setprop_string(void *fdt, const char *node_path, const char *property, const char *string); +int qemu_devtree_setprop_phandle(void *fdt, const char *node_path, + const char *property, + const char *target_node_path); +uint32_t qemu_devtree_get_phandle(void *fdt, const char *path); +uint32_t qemu_devtree_alloc_phandle(void *fdt); int qemu_devtree_nop_node(void *fdt, const char *node_path); int qemu_devtree_add_subnode(void *fdt, const char *name); +#define qemu_devtree_setprop_cells(fdt, node_path, property, ...) \ + do { \ + uint32_t qdt_tmp[] = { __VA_ARGS__ }; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \ + qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \ + } \ + qemu_devtree_setprop(fdt, node_path, property, qdt_tmp, \ + sizeof(qdt_tmp)); \ + } while (0) + #endif /* __DEVICE_TREE_H__ */ diff --git a/dma-helpers.c b/dma-helpers.c index 7971a89c14..35cb500581 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -9,13 +9,45 @@ #include "dma.h" #include "trace.h" +#include "range.h" +#include "qemu-thread.h" -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint) +/* #define DEBUG_IOMMU */ + +static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) +{ +#define FILLBUF_SIZE 512 + uint8_t fillbuf[FILLBUF_SIZE]; + int l; + + memset(fillbuf, c, FILLBUF_SIZE); + while (len > 0) { + l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; + cpu_physical_memory_rw(addr, fillbuf, l, true); + len -= len; + addr += len; + } +} + +int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) +{ + dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); + + if (dma_has_iommu(dma)) { + return iommu_dma_memory_set(dma, addr, c, len); + } + do_dma_memory_set(addr, c, len); + + return 0; +} + +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma) { qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry)); qsg->nsg = 0; qsg->nalloc = alloc_hint; qsg->size = 0; + qsg->dma = dma; } void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) @@ -74,10 +106,9 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs) int i; for (i = 0; i < dbs->iov.niov; ++i) { - cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base, - dbs->iov.iov[i].iov_len, - dbs->dir != DMA_DIRECTION_TO_DEVICE, - dbs->iov.iov[i].iov_len); + dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base, + dbs->iov.iov[i].iov_len, dbs->dir, + dbs->iov.iov[i].iov_len); } qemu_iovec_reset(&dbs->iov); } @@ -106,7 +137,7 @@ static void dma_complete(DMAAIOCB *dbs, int ret) static void dma_bdrv_cb(void *opaque, int ret) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; - target_phys_addr_t cur_addr, cur_len; + dma_addr_t cur_addr, cur_len; void *mem; trace_dma_bdrv_cb(dbs, ret); @@ -123,8 +154,7 @@ static void dma_bdrv_cb(void *opaque, int ret) while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; - mem = cpu_physical_memory_map(cur_addr, &cur_len, - dbs->dir != DMA_DIRECTION_TO_DEVICE); + mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir); if (!mem) break; qemu_iovec_add(&dbs->iov, mem, cur_len); @@ -209,7 +239,8 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, } -static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev) +static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, + DMADirection dir) { uint64_t resid; int sg_cur_index; @@ -220,7 +251,7 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_de while (len > 0) { ScatterGatherEntry entry = sg->sg[sg_cur_index++]; int32_t xfer = MIN(len, entry.len); - cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev); + dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir); ptr += xfer; len -= xfer; resid -= xfer; @@ -231,12 +262,12 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_de uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) { - return dma_buf_rw(ptr, len, sg, 0); + return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); } uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) { - return dma_buf_rw(ptr, len, sg, 1); + return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); } void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, @@ -244,3 +275,160 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, { bdrv_acct_start(bs, cookie, sg->size, type); } + +bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, + DMADirection dir) +{ + target_phys_addr_t paddr, plen; + +#ifdef DEBUG_IOMMU + fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT + " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); +#endif + + while (len) { + if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) { + return false; + } + + /* The translation might be valid for larger regions. */ + if (plen > len) { + plen = len; + } + + len -= plen; + addr += plen; + } + + return true; +} + +int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, DMADirection dir) +{ + target_phys_addr_t paddr, plen; + int err; + +#ifdef DEBUG_IOMMU + fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x" + DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir); +#endif + + while (len) { + err = dma->translate(dma, addr, &paddr, &plen, dir); + if (err) { + /* + * In case of failure on reads from the guest, we clean the + * destination buffer so that a device that doesn't test + * for errors will not expose qemu internal memory. + */ + memset(buf, 0, len); + return -1; + } + + /* The translation might be valid for larger regions. */ + if (plen > len) { + plen = len; + } + + cpu_physical_memory_rw(paddr, buf, plen, + dir == DMA_DIRECTION_FROM_DEVICE); + + len -= plen; + addr += plen; + buf += plen; + } + + return 0; +} + +int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, + dma_addr_t len) +{ + target_phys_addr_t paddr, plen; + int err; + +#ifdef DEBUG_IOMMU + fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT + " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); +#endif + + while (len) { + err = dma->translate(dma, addr, &paddr, &plen, + DMA_DIRECTION_FROM_DEVICE); + if (err) { + return err; + } + + /* The translation might be valid for larger regions. */ + if (plen > len) { + plen = len; + } + + do_dma_memory_set(paddr, c, plen); + + len -= plen; + addr += plen; + } + + return 0; +} + +void dma_context_init(DMAContext *dma, DMATranslateFunc translate, + DMAMapFunc map, DMAUnmapFunc unmap) +{ +#ifdef DEBUG_IOMMU + fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n", + dma, translate, map, unmap); +#endif + dma->translate = translate; + dma->map = map; + dma->unmap = unmap; +} + +void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len, + DMADirection dir) +{ + int err; + target_phys_addr_t paddr, plen; + void *buf; + + if (dma->map) { + return dma->map(dma, addr, len, dir); + } + + plen = *len; + err = dma->translate(dma, addr, &paddr, &plen, dir); + if (err) { + return NULL; + } + + /* + * If this is true, the virtual region is contiguous, + * but the translated physical region isn't. We just + * clamp *len, much like cpu_physical_memory_map() does. + */ + if (plen < *len) { + *len = plen; + } + + buf = cpu_physical_memory_map(paddr, &plen, + dir == DMA_DIRECTION_FROM_DEVICE); + *len = plen; + + return buf; +} + +void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, + DMADirection dir, dma_addr_t access_len) +{ + if (dma->unmap) { + dma->unmap(dma, buffer, len, dir, access_len); + return; + } + + cpu_physical_memory_unmap(buffer, len, + dir == DMA_DIRECTION_FROM_DEVICE, + access_len); + +} @@ -13,7 +13,9 @@ #include <stdio.h> #include "hw/hw.h" #include "block.h" +#include "kvm.h" +typedef struct DMAContext DMAContext; typedef struct ScatterGatherEntry ScatterGatherEntry; typedef enum { @@ -26,19 +28,229 @@ struct QEMUSGList { int nsg; int nalloc; size_t size; + DMAContext *dma; }; #if defined(TARGET_PHYS_ADDR_BITS) -typedef target_phys_addr_t dma_addr_t; -#define DMA_ADDR_FMT TARGET_FMT_plx +/* + * When an IOMMU is present, bus addresses become distinct from + * CPU/memory physical addresses and may be a different size. Because + * the IOVA size depends more on the bus than on the platform, we more + * or less have to treat these as 64-bit always to cover all (or at + * least most) cases. + */ +typedef uint64_t dma_addr_t; + +#define DMA_ADDR_BITS 64 +#define DMA_ADDR_FMT "%" PRIx64 + +typedef int DMATranslateFunc(DMAContext *dma, + dma_addr_t addr, + target_phys_addr_t *paddr, + target_phys_addr_t *len, + DMADirection dir); +typedef void* DMAMapFunc(DMAContext *dma, + dma_addr_t addr, + dma_addr_t *len, + DMADirection dir); +typedef void DMAUnmapFunc(DMAContext *dma, + void *buffer, + dma_addr_t len, + DMADirection dir, + dma_addr_t access_len); + +struct DMAContext { + DMATranslateFunc *translate; + DMAMapFunc *map; + DMAUnmapFunc *unmap; +}; + +static inline void dma_barrier(DMAContext *dma, DMADirection dir) +{ + /* + * This is called before DMA read and write operations + * unless the _relaxed form is used and is responsible + * for providing some sane ordering of accesses vs + * concurrently running VCPUs. + * + * Users of map(), unmap() or lower level st/ld_* + * operations are responsible for providing their own + * ordering via barriers. + * + * This primitive implementation does a simple smp_mb() + * before each operation which provides pretty much full + * ordering. + * + * A smarter implementation can be devised if needed to + * use lighter barriers based on the direction of the + * transfer, the DMA context, etc... + */ + if (kvm_enabled()) { + smp_mb(); + } +} + +static inline bool dma_has_iommu(DMAContext *dma) +{ + return !!dma; +} + +/* Checks that the given range of addresses is valid for DMA. This is + * useful for certain cases, but usually you should just use + * dma_memory_{read,write}() and check for errors */ +bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, + DMADirection dir); +static inline bool dma_memory_valid(DMAContext *dma, + dma_addr_t addr, dma_addr_t len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + return true; + } else { + return iommu_dma_memory_valid(dma, addr, len, dir); + } +} + +int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, DMADirection dir); +static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + /* Fast-path for no IOMMU */ + cpu_physical_memory_rw(addr, buf, len, + dir == DMA_DIRECTION_FROM_DEVICE); + return 0; + } else { + return iommu_dma_memory_rw(dma, addr, buf, len, dir); + } +} + +static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, + DMA_DIRECTION_FROM_DEVICE); +} + +static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) +{ + dma_barrier(dma, dir); + + return dma_memory_rw_relaxed(dma, addr, buf, len, dir); +} + +static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + return dma_memory_rw(dma, addr, (void *)buf, len, + DMA_DIRECTION_FROM_DEVICE); +} + +int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, + dma_addr_t len); + +int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); + +void *iommu_dma_memory_map(DMAContext *dma, + dma_addr_t addr, dma_addr_t *len, + DMADirection dir); +static inline void *dma_memory_map(DMAContext *dma, + dma_addr_t addr, dma_addr_t *len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + target_phys_addr_t xlen = *len; + void *p; + + p = cpu_physical_memory_map(addr, &xlen, + dir == DMA_DIRECTION_FROM_DEVICE); + *len = xlen; + return p; + } else { + return iommu_dma_memory_map(dma, addr, len, dir); + } +} + +void iommu_dma_memory_unmap(DMAContext *dma, + void *buffer, dma_addr_t len, + DMADirection dir, dma_addr_t access_len); +static inline void dma_memory_unmap(DMAContext *dma, + void *buffer, dma_addr_t len, + DMADirection dir, dma_addr_t access_len) +{ + if (!dma_has_iommu(dma)) { + return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len, + dir == DMA_DIRECTION_FROM_DEVICE, + access_len); + } else { + iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); + } +} + +#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ + static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ + dma_addr_t addr) \ + { \ + uint##_bits##_t val; \ + dma_memory_read(dma, addr, &val, (_bits) / 8); \ + return _end##_bits##_to_cpu(val); \ + } \ + static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ + dma_addr_t addr, \ + uint##_bits##_t val) \ + { \ + val = cpu_to_##_end##_bits(val); \ + dma_memory_write(dma, addr, &val, (_bits) / 8); \ + } + +static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) +{ + uint8_t val; + + dma_memory_read(dma, addr, &val, 1); + return val; +} + +static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) +{ + dma_memory_write(dma, addr, &val, 1); +} + +DEFINE_LDST_DMA(uw, w, 16, le); +DEFINE_LDST_DMA(l, l, 32, le); +DEFINE_LDST_DMA(q, q, 64, le); +DEFINE_LDST_DMA(uw, w, 16, be); +DEFINE_LDST_DMA(l, l, 32, be); +DEFINE_LDST_DMA(q, q, 64, be); + +#undef DEFINE_LDST_DMA + +void dma_context_init(DMAContext *dma, DMATranslateFunc translate, + DMAMapFunc map, DMAUnmapFunc unmap); struct ScatterGatherEntry { dma_addr_t base; dma_addr_t len; }; -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint); +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); void qemu_sglist_destroy(QEMUSGList *qsg); #endif diff --git a/docs/specs/ppc-spapr-hcalls.txt b/docs/specs/ppc-spapr-hcalls.txt new file mode 100644 index 0000000000..52ba8d42ab --- /dev/null +++ b/docs/specs/ppc-spapr-hcalls.txt @@ -0,0 +1,78 @@ +When used with the "pseries" machine type, QEMU-system-ppc64 implements +a set of hypervisor calls using a subset of the server "PAPR" specification +(IBM internal at this point), which is also what IBM's proprietary hypervisor +adheres too. + +The subset is selected based on the requirements of Linux as a guest. + +In addition to those calls, we have added our own private hypervisor +calls which are mostly used as a private interface between the firmware +running in the guest and QEMU. + +All those hypercalls start at hcall number 0xf000 which correspond +to a implementation specific range in PAPR. + +- H_RTAS (0xf000) + +RTAS is a set of runtime services generally provided by the firmware +inside the guest to the operating system. It predates the existence +of hypervisors (it was originally an extension to Open Firmware) and +is still used by PAPR to provide various services that aren't performance +sensitive. + +We currently implement the RTAS services in QEMU itself. The actual RTAS +"firmware" blob in the guest is a small stub of a few instructions which +calls our private H_RTAS hypervisor call to pass the RTAS calls to QEMU. + +Arguments: + + r3 : H_RTAS (0xf000) + r4 : Guest physical address of RTAS parameter block + +Returns: + + H_SUCCESS : Successully called the RTAS function (RTAS result + will have been stored in the parameter block) + H_PARAMETER : Unknown token + +- H_LOGICAL_MEMOP (0xf001) + +When the guest runs in "real mode" (in powerpc lingua this means +with MMU disabled, ie guest effective == guest physical), it only +has access to a subset of memory and no IOs. + +PAPR provides a set of hypervisor calls to perform cachable or +non-cachable accesses to any guest physical addresses that the +guest can use in order to access IO devices while in real mode. + +This is typically used by the firmware running in the guest. + +However, doing a hypercall for each access is extremely inefficient +(even more so when running KVM) when accessing the frame buffer. In +that case, things like scrolling become unusably slow. + +This hypercall allows the guest to request a "memory op" to be applied +to memory. The supported memory ops at this point are to copy a range +of memory (supports overlap of source and destination) and XOR which +is used by our SLOF firmware to invert the screen. + +Arguments: + + r3: H_LOGICAL_MEMOP (0xf001) + r4: Guest physical address of destination + r5: Guest physical address of source + r6: Individual element size + 0 = 1 byte + 1 = 2 bytes + 2 = 4 bytes + 3 = 8 bytes + r7: Number of elements + r8: Operation + 0 = copy + 1 = xor + +Returns: + + H_SUCCESS : Success + H_PARAMETER : Invalid argument + diff --git a/dump-stub.c b/dump-stub.c new file mode 100644 index 0000000000..56d4564f0f --- /dev/null +++ b/dump-stub.c @@ -0,0 +1,64 @@ +/* + * QEMU dump + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang <wency@cn.fujitsu.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "dump.h" +#include "qerror.h" +#include "qmp-commands.h" + +/* we need this function in hmp.c */ +void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin, + int64_t begin, bool has_length, int64_t length, + Error **errp) +{ + error_set(errp, QERR_UNSUPPORTED); +} + +int cpu_write_elf64_note(write_core_dump_function f, + CPUArchState *env, int cpuid, + void *opaque) +{ + return -1; +} + +int cpu_write_elf32_note(write_core_dump_function f, + CPUArchState *env, int cpuid, + void *opaque) +{ + return -1; +} + +int cpu_write_elf64_qemunote(write_core_dump_function f, + CPUArchState *env, + void *opaque) +{ + return -1; +} + +int cpu_write_elf32_qemunote(write_core_dump_function f, + CPUArchState *env, + void *opaque) +{ + return -1; +} + +int cpu_get_dump_info(ArchDumpInfo *info) +{ + return -1; +} + +ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) +{ + return -1; +} + @@ -6,16 +6,13 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ #include "qemu-common.h" -#include <unistd.h> #include "elf.h" -#include <sys/procfs.h> -#include <glib.h> #include "cpu.h" #include "cpu-all.h" #include "targphys.h" @@ -23,13 +20,11 @@ #include "kvm.h" #include "dump.h" #include "sysemu.h" -#include "bswap.h" #include "memory_mapping.h" #include "error.h" #include "qmp-commands.h" #include "gdbstub.h" -#if defined(CONFIG_HAVE_CORE_DUMP) static uint16_t cpu_convert_to_target16(uint16_t val, int endian) { if (endian == ELFDATA2LSB) { @@ -750,6 +745,13 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter, goto cleanup; } + s->note_size = cpu_get_note_size(s->dump_info.d_class, + s->dump_info.d_machine, nr_cpus); + if (ret < 0) { + error_set(errp, QERR_UNSUPPORTED); + goto cleanup; + } + /* get memory mapping */ memory_mapping_list_init(&s->list); if (paging) { @@ -784,8 +786,6 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter, } } - s->note_size = cpu_get_note_size(s->dump_info.d_class, - s->dump_info.d_machine, nr_cpus); if (s->dump_info.d_class == ELFCLASS64) { if (s->have_section) { s->memory_offset = sizeof(Elf64_Ehdr) + @@ -871,13 +871,3 @@ void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin, g_free(s); } - -#else -/* we need this function in hmp.c */ -void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin, - int64_t begin, bool has_length, int64_t length, - Error **errp) -{ - error_set(errp, QERR_UNSUPPORTED); -} -#endif @@ -6,8 +6,8 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ @@ -20,4 +20,16 @@ typedef struct ArchDumpInfo { int d_class; /* ELFCLASS32 or ELFCLASS64 */ } ArchDumpInfo; +typedef int (*write_core_dump_function)(void *buf, size_t size, void *opaque); +int cpu_write_elf64_note(write_core_dump_function f, CPUArchState *env, + int cpuid, void *opaque); +int cpu_write_elf32_note(write_core_dump_function f, CPUArchState *env, + int cpuid, void *opaque); +int cpu_write_elf64_qemunote(write_core_dump_function f, CPUArchState *env, + void *opaque); +int cpu_write_elf32_qemunote(write_core_dump_function f, CPUArchState *env, + void *opaque); +int cpu_get_dump_info(ArchDumpInfo *info); +ssize_t cpu_get_note_size(int class, int machine, int nr_cpus); + #endif diff --git a/exec-obsolete.h b/exec-obsolete.h index 792c831718..c09925610d 100644 --- a/exec-obsolete.h +++ b/exec-obsolete.h @@ -45,15 +45,15 @@ int cpu_physical_memory_set_dirty_tracking(int enable); #define CODE_DIRTY_FLAG 0x02 #define MIGRATION_DIRTY_FLAG 0x08 -/* read dirty bit (return 0 or 1) */ -static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) +static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) { - return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff; + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; } -static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) +/* read dirty bit (return 0 or 1) */ +static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) { - return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; + return cpu_physical_memory_get_dirty_flags(addr) == 0xff; } static inline int cpu_physical_memory_get_dirty(ram_addr_t start, @@ -61,41 +61,55 @@ static inline int cpu_physical_memory_get_dirty(ram_addr_t start, int dirty_flags) { int ret = 0; - uint8_t *p; ram_addr_t addr, end; end = TARGET_PAGE_ALIGN(start + length); start &= TARGET_PAGE_MASK; - p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { - ret |= *p++ & dirty_flags; + ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags; } return ret; } +static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, + int dirty_flags) +{ + if ((dirty_flags & MIGRATION_DIRTY_FLAG) && + !cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE, + MIGRATION_DIRTY_FLAG)) { + ram_list.dirty_pages++; + } + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; +} + static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) { - ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff; + cpu_physical_memory_set_dirty_flags(addr, 0xff); } -static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, - int dirty_flags) +static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr, + int dirty_flags) { - return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; + int mask = ~dirty_flags; + + if ((dirty_flags & MIGRATION_DIRTY_FLAG) && + cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE, + MIGRATION_DIRTY_FLAG)) { + ram_list.dirty_pages--; + } + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask; } static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length, int dirty_flags) { - uint8_t *p; ram_addr_t addr, end; end = TARGET_PAGE_ALIGN(start + length); start &= TARGET_PAGE_MASK; - p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { - *p++ |= dirty_flags; + cpu_physical_memory_set_dirty_flags(addr, dirty_flags); } } @@ -103,16 +117,12 @@ static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, ram_addr_t length, int dirty_flags) { - int mask; - uint8_t *p; ram_addr_t addr, end; end = TARGET_PAGE_ALIGN(start + length); start &= TARGET_PAGE_MASK; - mask = ~dirty_flags; - p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { - *p++ &= mask; + cpu_physical_memory_clear_dirty_flags(addr, dirty_flags); } } @@ -216,16 +216,6 @@ static void memory_map_init(void); static MemoryRegion io_mem_watch; #endif -/* log support */ -#ifdef WIN32 -static const char *logfilename = "qemu.log"; -#else -static const char *logfilename = "/tmp/qemu.log"; -#endif -FILE *logfile; -int loglevel; -static int log_append = 0; - /* statistics */ static int tb_flush_count; static int tb_phys_invalidate_count; @@ -1076,11 +1066,11 @@ TranslationBlock *tb_gen_code(CPUArchState *env, } /* - * invalidate all TBs which intersect with the target physical pages - * starting in range [start;end[. NOTE: start and end may refer to - * different physical pages. 'is_cpu_write_access' should be true if called - * from a real cpu write access: the virtual CPU will exit the current - * TB if code is modified inside this TB. + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. */ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) @@ -1092,11 +1082,13 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, } } -/* invalidate all TBs which intersect with the target physical page - starting in range [start;end[. NOTE: start and end must refer to - the same physical page. 'is_cpu_write_access' should be true if called - from a real cpu write access: the virtual CPU will exit the current - TB if code is modified inside this TB. */ +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + */ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) { @@ -1492,7 +1484,8 @@ void tb_invalidate_phys_addr(target_phys_addr_t addr) static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) { - tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc)); + tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | + (pc & ~TARGET_PAGE_MASK)); } #endif #endif /* TARGET_HAS_ICE */ @@ -1670,46 +1663,6 @@ void cpu_single_step(CPUArchState *env, int enabled) #endif } -/* enable or disable low levels log */ -void cpu_set_log(int log_flags) -{ - loglevel = log_flags; - if (loglevel && !logfile) { - logfile = fopen(logfilename, log_append ? "a" : "w"); - if (!logfile) { - perror(logfilename); - _exit(1); - } -#if !defined(CONFIG_SOFTMMU) - /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ - { - static char logfile_buf[4096]; - setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); - } -#elif defined(_WIN32) - /* Win32 doesn't support line-buffering, so use unbuffered output. */ - setvbuf(logfile, NULL, _IONBF, 0); -#else - setvbuf(logfile, NULL, _IOLBF, 0); -#endif - log_append = 1; - } - if (!loglevel && logfile) { - fclose(logfile); - logfile = NULL; - } -} - -void cpu_set_log_filename(const char *filename) -{ - logfilename = strdup(filename); - if (logfile) { - fclose(logfile); - logfile = NULL; - } - cpu_set_log(loglevel); -} - static void cpu_unlink_tb(CPUArchState *env) { /* FIXME: TB unchaining isn't SMP safe. For now just ignore the @@ -1781,78 +1734,6 @@ void cpu_exit(CPUArchState *env) cpu_unlink_tb(env); } -const CPULogItem cpu_log_items[] = { - { CPU_LOG_TB_OUT_ASM, "out_asm", - "show generated host assembly code for each compiled TB" }, - { CPU_LOG_TB_IN_ASM, "in_asm", - "show target assembly code for each compiled TB" }, - { CPU_LOG_TB_OP, "op", - "show micro ops for each compiled TB" }, - { CPU_LOG_TB_OP_OPT, "op_opt", - "show micro ops " -#ifdef TARGET_I386 - "before eflags optimization and " -#endif - "after liveness analysis" }, - { CPU_LOG_INT, "int", - "show interrupts/exceptions in short format" }, - { CPU_LOG_EXEC, "exec", - "show trace before each executed TB (lots of logs)" }, - { CPU_LOG_TB_CPU, "cpu", - "show CPU state before block translation" }, -#ifdef TARGET_I386 - { CPU_LOG_PCALL, "pcall", - "show protected mode far calls/returns/exceptions" }, - { CPU_LOG_RESET, "cpu_reset", - "show CPU state before CPU resets" }, -#endif -#ifdef DEBUG_IOPORT - { CPU_LOG_IOPORT, "ioport", - "show all i/o ports accesses" }, -#endif - { 0, NULL, NULL }, -}; - -static int cmp1(const char *s1, int n, const char *s2) -{ - if (strlen(s2) != n) - return 0; - return memcmp(s1, s2, n) == 0; -} - -/* takes a comma separated list of log masks. Return 0 if error. */ -int cpu_str_to_log_mask(const char *str) -{ - const CPULogItem *item; - int mask; - const char *p, *p1; - - p = str; - mask = 0; - for(;;) { - p1 = strchr(p, ','); - if (!p1) - p1 = p + strlen(p); - if(cmp1(p,p1-p,"all")) { - for(item = cpu_log_items; item->mask != 0; item++) { - mask |= item->mask; - } - } else { - for(item = cpu_log_items; item->mask != 0; item++) { - if (cmp1(p, p1 - p, item->name)) - goto found; - } - return 0; - } - found: - mask |= item->mask; - if (*p1 != ',') - break; - p = p1 + 1; - } - return mask; -} - void cpu_abort(CPUArchState *env, const char *fmt, ...) { va_list ap; @@ -1943,11 +1824,29 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); } +static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, + uintptr_t length) +{ + uintptr_t start1; + + /* we modify the TLB cache so that the dirty bit will be set again + when accessing the range */ + start1 = (uintptr_t)qemu_safe_ram_ptr(start); + /* Check that we don't span multiple blocks - this breaks the + address comparisons below. */ + if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 + != (end - 1) - start) { + abort(); + } + cpu_tlb_reset_dirty_all(start1, length); + +} + /* Note: start and end must be within the same ram block. */ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags) { - uintptr_t length, start1; + uintptr_t length; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); @@ -1957,16 +1856,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, return; cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); - /* we modify the TLB cache so that the dirty bit will be set again - when accessing the range */ - start1 = (uintptr_t)qemu_safe_ram_ptr(start); - /* Check that we don't span multiple blocks - this breaks the - address comparisons below. */ - if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 - != (end - 1) - start) { - abort(); + if (tcg_enabled()) { + tlb_reset_dirty_range_all(start, end, length); } - cpu_tlb_reset_dirty_all(start1, length); } int cpu_physical_memory_set_dirty_tracking(int enable) @@ -2600,8 +2492,8 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) assert(new_block); assert(!new_block->idstr[0]); - if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { - char *id = dev->parent_bus->info->get_dev_path(dev); + if (dev) { + char *id = qdev_get_dev_path(dev); if (id) { snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); g_free(id); @@ -2673,8 +2565,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); - memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), - 0xff, size >> TARGET_PAGE_BITS); + cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); diff --git a/fsdev/Makefile.objs b/fsdev/Makefile.objs new file mode 100644 index 0000000000..cb1e2500b9 --- /dev/null +++ b/fsdev/Makefile.objs @@ -0,0 +1,9 @@ +ifeq ($(CONFIG_REALLY_VIRTFS),y) +common-obj-y = qemu-fsdev.o virtio-9p-marshal.o + +# Toplevel always builds this; targets without virtio will put it in +# common-obj-y +extra-obj-y = qemu-fsdev-dummy.o +else +common-obj-y = qemu-fsdev-dummy.o +endif @@ -18,6 +18,7 @@ #include "qemu-option.h" #include "qemu-timer.h" #include "qmp-commands.h" +#include "monitor.h" static void hmp_handle_error(Monitor *mon, Error **errp) { @@ -144,6 +145,8 @@ void hmp_info_migrate(Monitor *mon) info->ram->remaining >> 10); monitor_printf(mon, "total ram: %" PRIu64 " kbytes\n", info->ram->total >> 10); + monitor_printf(mon, "total time: %" PRIu64 " milliseconds\n", + info->ram->total_time); } if (info->has_disk) { diff --git a/hw/9pfs/Makefile.objs b/hw/9pfs/Makefile.objs new file mode 100644 index 0000000000..972df24050 --- /dev/null +++ b/hw/9pfs/Makefile.objs @@ -0,0 +1,9 @@ +hw-obj-y = virtio-9p.o +hw-obj-y += virtio-9p-local.o virtio-9p-xattr.o +hw-obj-y += virtio-9p-xattr-user.o virtio-9p-posix-acl.o +hw-obj-y += virtio-9p-coth.o cofs.o codir.o cofile.o +hw-obj-y += coxattr.o virtio-9p-synth.o +hw-obj-$(CONFIG_OPEN_BY_HANDLE) += virtio-9p-handle.o +hw-obj-y += virtio-9p-proxy.o + +obj-y += virtio-9p-device.o diff --git a/hw/Makefile.objs b/hw/Makefile.objs new file mode 100644 index 0000000000..9a350deafb --- /dev/null +++ b/hw/Makefile.objs @@ -0,0 +1,173 @@ +hw-obj-y = usb/ ide/ +hw-obj-y += loader.o +hw-obj-$(CONFIG_VIRTIO) += virtio-console.o +hw-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o +hw-obj-y += fw_cfg.o +hw-obj-$(CONFIG_PCI) += pci.o pci_bridge.o pci_bridge_dev.o +hw-obj-$(CONFIG_PCI) += msix.o msi.o +hw-obj-$(CONFIG_PCI) += shpc.o +hw-obj-$(CONFIG_PCI) += slotid_cap.o +hw-obj-$(CONFIG_PCI) += pci_host.o pcie_host.o +hw-obj-$(CONFIG_PCI) += ioh3420.o xio3130_upstream.o xio3130_downstream.o +hw-obj-y += watchdog.o +hw-obj-$(CONFIG_ISA_MMIO) += isa_mmio.o +hw-obj-$(CONFIG_ECC) += ecc.o +hw-obj-$(CONFIG_NAND) += nand.o +hw-obj-$(CONFIG_PFLASH_CFI01) += pflash_cfi01.o +hw-obj-$(CONFIG_PFLASH_CFI02) += pflash_cfi02.o + +hw-obj-$(CONFIG_M48T59) += m48t59.o +hw-obj-$(CONFIG_ESCC) += escc.o +hw-obj-$(CONFIG_EMPTY_SLOT) += empty_slot.o + +hw-obj-$(CONFIG_SERIAL) += serial.o +hw-obj-$(CONFIG_PARALLEL) += parallel.o +hw-obj-$(CONFIG_I8254) += i8254_common.o i8254.o +hw-obj-$(CONFIG_PCSPK) += pcspk.o +hw-obj-$(CONFIG_PCKBD) += pckbd.o +hw-obj-$(CONFIG_FDC) += fdc.o +hw-obj-$(CONFIG_ACPI) += acpi.o acpi_piix4.o +hw-obj-$(CONFIG_APM) += pm_smbus.o apm.o +hw-obj-$(CONFIG_DMA) += dma.o +hw-obj-$(CONFIG_I82374) += i82374.o +hw-obj-$(CONFIG_HPET) += hpet.o +hw-obj-$(CONFIG_APPLESMC) += applesmc.o +hw-obj-$(CONFIG_SMARTCARD) += ccid-card-passthru.o +hw-obj-$(CONFIG_SMARTCARD_NSS) += ccid-card-emulated.o +hw-obj-$(CONFIG_I8259) += i8259_common.o i8259.o + +# PPC devices +hw-obj-$(CONFIG_PREP_PCI) += prep_pci.o +hw-obj-$(CONFIG_I82378) += i82378.o +# Mac shared devices +hw-obj-$(CONFIG_MACIO) += macio.o +hw-obj-$(CONFIG_CUDA) += cuda.o +hw-obj-$(CONFIG_ADB) += adb.o +hw-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o +hw-obj-$(CONFIG_MAC_DBDMA) += mac_dbdma.o +# OldWorld PowerMac +hw-obj-$(CONFIG_HEATHROW_PIC) += heathrow_pic.o +hw-obj-$(CONFIG_GRACKLE_PCI) += grackle_pci.o +# NewWorld PowerMac +hw-obj-$(CONFIG_UNIN_PCI) += unin_pci.o +hw-obj-$(CONFIG_DEC_PCI) += dec_pci.o +# PowerPC E500 boards +hw-obj-$(CONFIG_PPCE500_PCI) += ppce500_pci.o + +# MIPS devices +hw-obj-$(CONFIG_PIIX4) += piix4.o +hw-obj-$(CONFIG_G364FB) += g364fb.o +hw-obj-$(CONFIG_JAZZ_LED) += jazz_led.o + +# Xilinx devices +hw-obj-$(CONFIG_XILINX) += xilinx_intc.o +hw-obj-$(CONFIG_XILINX) += xilinx_timer.o +hw-obj-$(CONFIG_XILINX) += xilinx_uartlite.o +hw-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o +hw-obj-$(CONFIG_XILINX_AXI) += xilinx_axienet.o + +# PCI watchdog devices +hw-obj-$(CONFIG_PCI) += wdt_i6300esb.o + +hw-obj-$(CONFIG_PCI) += pcie.o pcie_aer.o pcie_port.o + +# PCI network cards +hw-obj-$(CONFIG_NE2000_PCI) += ne2000.o +hw-obj-$(CONFIG_EEPRO100_PCI) += eepro100.o +hw-obj-$(CONFIG_PCNET_PCI) += pcnet-pci.o +hw-obj-$(CONFIG_PCNET_COMMON) += pcnet.o +hw-obj-$(CONFIG_E1000_PCI) += e1000.o +hw-obj-$(CONFIG_RTL8139_PCI) += rtl8139.o + +hw-obj-$(CONFIG_SMC91C111) += smc91c111.o +hw-obj-$(CONFIG_LAN9118) += lan9118.o +hw-obj-$(CONFIG_NE2000_ISA) += ne2000-isa.o +hw-obj-$(CONFIG_OPENCORES_ETH) += opencores_eth.o + +# SCSI layer +hw-obj-$(CONFIG_LSI_SCSI_PCI) += lsi53c895a.o +hw-obj-$(CONFIG_MEGASAS_SCSI_PCI) += megasas.o +hw-obj-$(CONFIG_ESP) += esp.o + +hw-obj-y += sysbus.o isa-bus.o +hw-obj-y += qdev-addr.o + +# VGA +hw-obj-$(CONFIG_VGA_PCI) += vga-pci.o +hw-obj-$(CONFIG_VGA_ISA) += vga-isa.o +hw-obj-$(CONFIG_VGA_ISA_MM) += vga-isa-mm.o +hw-obj-$(CONFIG_VMWARE_VGA) += vmware_vga.o +hw-obj-$(CONFIG_VMMOUSE) += vmmouse.o +hw-obj-$(CONFIG_VGA_CIRRUS) += cirrus_vga.o + +hw-obj-$(CONFIG_RC4030) += rc4030.o +hw-obj-$(CONFIG_DP8393X) += dp8393x.o +hw-obj-$(CONFIG_DS1225Y) += ds1225y.o +hw-obj-$(CONFIG_MIPSNET) += mipsnet.o + +# Sound +sound-obj-y = +sound-obj-$(CONFIG_SB16) += sb16.o +sound-obj-$(CONFIG_ES1370) += es1370.o +sound-obj-$(CONFIG_AC97) += ac97.o +sound-obj-$(CONFIG_ADLIB) += fmopl.o adlib.o +sound-obj-$(CONFIG_GUS) += gus.o gusemu_hal.o gusemu_mixer.o +sound-obj-$(CONFIG_CS4231A) += cs4231a.o +sound-obj-$(CONFIG_HDA) += intel-hda.o hda-audio.o + +$(obj)/adlib.o $(obj)/fmopl.o: QEMU_CFLAGS += -DBUILD_Y8950=0 + +hw-obj-$(CONFIG_SOUND) += $(sound-obj-y) + +hw-obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/ + +common-obj-y += usb/ +common-obj-y += irq.o +common-obj-$(CONFIG_PTIMER) += ptimer.o +common-obj-$(CONFIG_MAX7310) += max7310.o +common-obj-$(CONFIG_WM8750) += wm8750.o +common-obj-$(CONFIG_TWL92230) += twl92230.o +common-obj-$(CONFIG_TSC2005) += tsc2005.o +common-obj-$(CONFIG_LM832X) += lm832x.o +common-obj-$(CONFIG_TMP105) += tmp105.o +common-obj-$(CONFIG_STELLARIS_INPUT) += stellaris_input.o +common-obj-$(CONFIG_SSD0303) += ssd0303.o +common-obj-$(CONFIG_SSD0323) += ssd0323.o +common-obj-$(CONFIG_ADS7846) += ads7846.o +common-obj-$(CONFIG_MAX111X) += max111x.o +common-obj-$(CONFIG_DS1338) += ds1338.o +common-obj-y += i2c.o smbus.o smbus_eeprom.o +common-obj-y += eeprom93xx.o +common-obj-y += scsi-disk.o cdrom.o +common-obj-y += scsi-generic.o scsi-bus.o +common-obj-y += hid.o +common-obj-$(CONFIG_SSI) += ssi.o +common-obj-$(CONFIG_SSI_SD) += ssi-sd.o +common-obj-$(CONFIG_SD) += sd.o +common-obj-y += bt.o bt-l2cap.o bt-sdp.o bt-hci.o bt-hid.o +common-obj-y += bt-hci-csr.o +common-obj-y += msmouse.o ps2.o +common-obj-y += qdev.o qdev-properties.o qdev-monitor.o +common-obj-$(CONFIG_BRLAPI) += baum.o + +# xen backend driver support +common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o +common-obj-$(CONFIG_XEN_BACKEND) += xen_console.o xenfb.o xen_disk.o xen_nic.o + +# Per-target files +# virtio has to be here due to weird dependency between PCI and virtio-net. +# need to fix this properly +obj-$(CONFIG_VIRTIO) += virtio.o virtio-blk.o virtio-balloon.o virtio-net.o +obj-$(CONFIG_VIRTIO) += virtio-serial-bus.o virtio-scsi.o +obj-$(CONFIG_SOFTMMU) += vhost_net.o +obj-$(CONFIG_VHOST_NET) += vhost.o +obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/ +obj-$(CONFIG_NO_PCI) += pci-stub.o +obj-$(CONFIG_VGA) += vga.o +obj-$(CONFIG_SOFTMMU) += device-hotplug.o +obj-$(CONFIG_XEN) += xen_domainbuild.o xen_machine_pv.o + +# Inter-VM PCI shared memory +ifeq ($(CONFIG_PCI), y) +obj-$(CONFIG_KVM) += ivshmem.o +endif diff --git a/hw/a15mpcore.c b/hw/a15mpcore.c index 5a7b365548..fc0a02ae86 100644 --- a/hw/a15mpcore.c +++ b/hw/a15mpcore.c @@ -44,6 +44,7 @@ static int a15mp_priv_init(SysBusDevice *dev) s->gic = qdev_create(NULL, "arm_gic"); qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu); qdev_prop_set_uint32(s->gic, "num-irq", s->num_irq); + qdev_prop_set_uint32(s->gic, "revision", 2); qdev_init_nofail(s->gic); busdev = sysbus_from_qdev(s->gic); diff --git a/hw/a9mpcore.c b/hw/a9mpcore.c index c2ff74d4b6..ebd5b29173 100644 --- a/hw/a9mpcore.c +++ b/hw/a9mpcore.c @@ -75,7 +75,7 @@ static void a9_scu_write(void *opaque, target_phys_addr_t offset, break; default: fprintf(stderr, "Invalid size %u in write to a9 scu register %x\n", - size, offset); + size, (unsigned)offset); return; } @@ -370,7 +370,7 @@ void acpi_pm1_cnt_init(ACPIREGS *ar) qemu_register_wakeup_notifier(&ar->wakeup); } -void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) +void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val, char s4) { ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE); @@ -385,6 +385,9 @@ void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) qemu_system_suspend_request(); break; default: + if (sus_typ == s4) { /* S4 request */ + qemu_system_shutdown_request(); + } break; } } @@ -139,7 +139,7 @@ void acpi_pm1_evt_reset(ACPIREGS *ar); /* PM1a_CNT: piix and ich9 don't implement PM1b CNT. */ void acpi_pm1_cnt_init(ACPIREGS *ar); -void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val); +void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val, char s4); void acpi_pm1_cnt_update(ACPIREGS *ar, bool sci_enable, bool sci_disable); void acpi_pm1_cnt_reset(ACPIREGS *ar); diff --git a/hw/acpi_piix4.c b/hw/acpi_piix4.c index 0345490ee0..0aace60f24 100644 --- a/hw/acpi_piix4.c +++ b/hw/acpi_piix4.c @@ -27,6 +27,7 @@ #include "sysemu.h" #include "range.h" #include "ioport.h" +#include "fw_cfg.h" //#define DEBUG @@ -71,6 +72,10 @@ typedef struct PIIX4PMState { struct pci_status pci0_status; uint32_t pci0_hotplug_enable; uint32_t pci0_slot_device_present; + + uint8_t disable_s3; + uint8_t disable_s4; + uint8_t s4_val; } PIIX4PMState; static void piix4_acpi_system_hot_add_init(PCIBus *bus, PIIX4PMState *s); @@ -123,7 +128,7 @@ static void pm_ioport_write(IORange *ioport, uint64_t addr, unsigned width, pm_update_sci(s); break; case 0x04: - acpi_pm1_cnt_write(&s->ar, val); + acpi_pm1_cnt_write(&s->ar, val, s->s4_val); break; default: break; @@ -284,7 +289,7 @@ static const VMStateDescription vmstate_acpi = { static void acpi_piix_eject_slot(PIIX4PMState *s, unsigned slots) { - DeviceState *qdev, *next; + BusChild *kid, *next; BusState *bus = qdev_get_parent_bus(&s->dev.qdev); int slot = ffs(slots) - 1; bool slot_free = true; @@ -292,7 +297,8 @@ static void acpi_piix_eject_slot(PIIX4PMState *s, unsigned slots) /* Mark request as complete */ s->pci0_status.down &= ~(1U << slot); - QTAILQ_FOREACH_SAFE(qdev, &bus->children, sibling, next) { + QTAILQ_FOREACH_SAFE(kid, &bus->children, sibling, next) { + DeviceState *qdev = kid->child; PCIDevice *dev = PCI_DEVICE(qdev); PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); if (PCI_SLOT(dev->devfn) == slot) { @@ -313,7 +319,7 @@ static void piix4_update_hotplug(PIIX4PMState *s) { PCIDevice *dev = &s->dev; BusState *bus = qdev_get_parent_bus(&dev->qdev); - DeviceState *qdev, *next; + BusChild *kid, *next; /* Execute any pending removes during reset */ while (s->pci0_status.down) { @@ -323,7 +329,8 @@ static void piix4_update_hotplug(PIIX4PMState *s) s->pci0_hotplug_enable = ~0; s->pci0_slot_device_present = 0; - QTAILQ_FOREACH_SAFE(qdev, &bus->children, sibling, next) { + QTAILQ_FOREACH_SAFE(kid, &bus->children, sibling, next) { + DeviceState *qdev = kid->child; PCIDevice *pdev = PCI_DEVICE(qdev); PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pdev); int slot = PCI_SLOT(pdev->devfn); @@ -422,7 +429,7 @@ static int piix4_pm_initfn(PCIDevice *dev) i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, qemu_irq sci_irq, qemu_irq smi_irq, - int kvm_enabled) + int kvm_enabled, void *fw_cfg) { PCIDevice *dev; PIIX4PMState *s; @@ -438,11 +445,22 @@ i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, qdev_init_nofail(&dev->qdev); + if (fw_cfg) { + uint8_t suspend[6] = {128, 0, 0, 129, 128, 128}; + suspend[3] = 1 | ((!s->disable_s3) << 7); + suspend[4] = s->s4_val | ((!s->disable_s4) << 7); + + fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6); + } + return s->smb.smbus; } static Property piix4_pm_properties[] = { DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0), + DEFINE_PROP_UINT8("disable_s3", PIIX4PMState, disable_s3, 0), + DEFINE_PROP_UINT8("disable_s4", PIIX4PMState, disable_s4, 0), + DEFINE_PROP_UINT8("s4_val", PIIX4PMState, s4_val, 2), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/alpha/Makefile.objs b/hw/alpha/Makefile.objs new file mode 100644 index 0000000000..af1c07fa7c --- /dev/null +++ b/hw/alpha/Makefile.objs @@ -0,0 +1,4 @@ +obj-y = mc146818rtc.o +obj-y += alpha_pci.o alpha_dp264.o alpha_typhoon.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/apic-msidef.h b/hw/apic-msidef.h new file mode 100644 index 0000000000..6e2eb71f2f --- /dev/null +++ b/hw/apic-msidef.h @@ -0,0 +1,30 @@ +#ifndef HW_APIC_MSIDEF_H +#define HW_APIC_MSIDEF_H + +/* + * Intel APIC constants: from include/asm/msidef.h + */ + +/* + * Shifts for MSI data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR_MASK 0x000000ff + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_TRIGGER_SHIFT 15 + +/* + * Shift/mask fields for msi address + */ + +#define MSI_ADDR_DEST_MODE_SHIFT 2 + +#define MSI_ADDR_REDIRECTION_SHIFT 3 + +#define MSI_ADDR_DEST_ID_SHIFT 12 +#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 + +#endif /* HW_APIC_MSIDEF_H */ @@ -23,19 +23,10 @@ #include "host-utils.h" #include "trace.h" #include "pc.h" +#include "apic-msidef.h" #define MAX_APIC_WORDS 8 -/* Intel APIC constants: from include/asm/msidef.h */ -#define MSI_DATA_VECTOR_SHIFT 0 -#define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_DELIVERY_MODE_SHIFT 8 -#define MSI_DATA_TRIGGER_SHIFT 15 -#define MSI_DATA_LEVEL_SHIFT 14 -#define MSI_ADDR_DEST_MODE_SHIFT 2 -#define MSI_ADDR_DEST_ID_SHIFT 12 -#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 - #define SYNC_FROM_VAPIC 0x1 #define SYNC_TO_VAPIC 0x2 #define SYNC_ISR_IRR_TO_VAPIC 0x4 diff --git a/hw/arm-misc.h b/hw/arm-misc.h index 2f46e214cf..1f96229d3c 100644 --- a/hw/arm-misc.h +++ b/hw/arm-misc.h @@ -16,7 +16,7 @@ /* The CPU is also modeled as an interrupt controller. */ #define ARM_PIC_CPU_IRQ 0 #define ARM_PIC_CPU_FIQ 1 -qemu_irq *arm_pic_init_cpu(CPUARMState *env); +qemu_irq *arm_pic_init_cpu(ARMCPU *cpu); /* armv7m.c */ qemu_irq *armv7m_init(MemoryRegion *address_space_mem, @@ -45,21 +45,21 @@ struct arm_boot_info { /* multicore boards that use the default secondary core boot functions * can ignore these two function calls. If the default functions won't * work, then write_secondary_boot() should write a suitable blob of - * code mimicing the secondary CPU startup process used by the board's + * code mimicking the secondary CPU startup process used by the board's * boot loader/boot ROM code, and secondary_cpu_reset_hook() should - * perform any necessary CPU reset handling and set the PC for thei + * perform any necessary CPU reset handling and set the PC for the * secondary CPUs to point at this boot blob. */ - void (*write_secondary_boot)(CPUARMState *env, + void (*write_secondary_boot)(ARMCPU *cpu, const struct arm_boot_info *info); - void (*secondary_cpu_reset_hook)(CPUARMState *env, + void (*secondary_cpu_reset_hook)(ARMCPU *cpu, const struct arm_boot_info *info); /* Used internally by arm_boot.c */ int is_linux; target_phys_addr_t initrd_size; target_phys_addr_t entry; }; -void arm_load_kernel(CPUARMState *env, struct arm_boot_info *info); +void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info); /* Multiplication factor to convert from system clock ticks to qemu timer ticks. */ diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs new file mode 100644 index 0000000000..236786eb5a --- /dev/null +++ b/hw/arm/Makefile.objs @@ -0,0 +1,43 @@ +obj-y = integratorcp.o versatilepb.o arm_pic.o arm_timer.o +obj-y += arm_boot.o pl011.o pl031.o pl050.o pl080.o pl110.o pl181.o pl190.o +obj-y += versatile_pci.o +obj-y += versatile_i2c.o +obj-y += cadence_uart.o +obj-y += cadence_ttc.o +obj-y += cadence_gem.o +obj-y += xilinx_zynq.o zynq_slcr.o +obj-y += arm_gic.o arm_gic_common.o +obj-y += realview_gic.o realview.o arm_sysctl.o arm11mpcore.o a9mpcore.o +obj-y += exynos4210_gic.o exynos4210_combiner.o exynos4210.o +obj-y += exynos4_boards.o exynos4210_uart.o exynos4210_pwm.o +obj-y += exynos4210_pmu.o exynos4210_mct.o exynos4210_fimd.o +obj-y += exynos4210_rtc.o +obj-y += arm_l2x0.o +obj-y += arm_mptimer.o a15mpcore.o +obj-y += armv7m.o armv7m_nvic.o stellaris.o pl022.o stellaris_enet.o +obj-y += highbank.o +obj-y += pl061.o +obj-y += xgmac.o +obj-y += pxa2xx.o pxa2xx_pic.o pxa2xx_gpio.o pxa2xx_timer.o pxa2xx_dma.o +obj-y += pxa2xx_lcd.o pxa2xx_mmci.o pxa2xx_pcmcia.o pxa2xx_keypad.o +obj-y += gumstix.o +obj-y += zaurus.o ide/microdrive.o spitz.o tosa.o tc6393xb.o +obj-y += omap1.o omap_lcdc.o omap_dma.o omap_clk.o omap_mmc.o omap_i2c.o \ + omap_gpio.o omap_intc.o omap_uart.o +obj-y += omap2.o omap_dss.o soc_dma.o omap_gptimer.o omap_synctimer.o \ + omap_gpmc.o omap_sdrc.o omap_spi.o omap_tap.o omap_l4.o +obj-y += omap_sx1.o palm.o tsc210x.o +obj-y += nseries.o blizzard.o onenand.o cbus.o tusb6010.o usb/hcd-musb.o +obj-y += mst_fpga.o mainstone.o +obj-y += z2.o +obj-y += musicpal.o bitbang_i2c.o marvell_88w8618_audio.o +obj-y += framebuffer.o +obj-y += vexpress.o +obj-y += strongarm.o +obj-y += collie.o +obj-y += imx_serial.o imx_ccm.o imx_timer.o imx_avic.o +obj-y += kzm.o +obj-y += pl041.o lm4549.o +obj-$(CONFIG_FDT) += ../device_tree.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/arm11mpcore.c b/hw/arm11mpcore.c index c528d7aa01..1bff3d3282 100644 --- a/hw/arm11mpcore.c +++ b/hw/arm11mpcore.c @@ -123,6 +123,8 @@ static int mpcore_priv_init(SysBusDevice *dev) s->gic = qdev_create(NULL, "arm_gic"); qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu); qdev_prop_set_uint32(s->gic, "num-irq", s->num_irq); + /* Request the legacy 11MPCore GIC behaviour: */ + qdev_prop_set_uint32(s->gic, "revision", 0); qdev_init_nofail(s->gic); /* Pass through outbound IRQ lines from the GIC */ diff --git a/hw/arm_boot.c b/hw/arm_boot.c index eb2d1760b0..a1e6ddbc1c 100644 --- a/hw/arm_boot.c +++ b/hw/arm_boot.c @@ -59,7 +59,7 @@ static uint32_t smpboot[] = { 0 /* bootreg: Boot register address is held here */ }; -static void default_write_secondary(CPUARMState *env, +static void default_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info) { int n; @@ -72,9 +72,11 @@ static void default_write_secondary(CPUARMState *env, info->smp_loader_start); } -static void default_reset_secondary(CPUARMState *env, +static void default_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) { + CPUARMState *env = &cpu->env; + stl_phys_notdirty(info->smp_bootreg_addr, 0); env->regs[15] = info->smp_loader_start; } @@ -240,10 +242,12 @@ static int load_dtb(target_phys_addr_t addr, const struct arm_boot_info *binfo) fprintf(stderr, "couldn't set /memory/reg\n"); } - rc = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", - binfo->kernel_cmdline); - if (rc < 0) { - fprintf(stderr, "couldn't set /chosen/bootargs\n"); + if (binfo->kernel_cmdline && *binfo->kernel_cmdline) { + rc = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", + binfo->kernel_cmdline); + if (rc < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + } } if (binfo->initrd_size) { @@ -295,15 +299,15 @@ static void do_cpu_reset(void *opaque) } } } else { - info->secondary_cpu_reset_hook(env, info); + info->secondary_cpu_reset_hook(cpu, info); } } } } -void arm_load_kernel(CPUARMState *env, struct arm_boot_info *info) +void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info) { - ARMCPU *cpu; + CPUARMState *env = &cpu->env; int kernel_size; int initrd_size; int n; @@ -402,7 +406,7 @@ void arm_load_kernel(CPUARMState *env, struct arm_boot_info *info) rom_add_blob_fixed("bootloader", bootloader, sizeof(bootloader), info->loader_start); if (info->nb_cpus > 1) { - info->write_secondary_boot(env, info); + info->write_secondary_boot(cpu, info); } } info->is_linux = is_linux; diff --git a/hw/arm_gic.c b/hw/arm_gic.c index 72298b4b41..186ac66f00 100644 --- a/hw/arm_gic.c +++ b/hw/arm_gic.c @@ -19,135 +19,34 @@ */ #include "sysbus.h" - -/* Maximum number of possible interrupts, determined by the GIC architecture */ -#define GIC_MAXIRQ 1020 -/* First 32 are private to each CPU (SGIs and PPIs). */ -#define GIC_INTERNAL 32 -/* Maximum number of possible CPU interfaces, determined by GIC architecture */ -#ifdef NVIC -#define NCPU 1 -#else -#define NCPU 8 -#endif +#include "arm_gic_internal.h" //#define DEBUG_GIC #ifdef DEBUG_GIC #define DPRINTF(fmt, ...) \ -do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0) +do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) do {} while(0) #endif -#ifdef NVIC -static const uint8_t gic_id[] = -{ 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 }; -/* The NVIC has 16 internal vectors. However these are not exposed - through the normal GIC interface. */ -#define GIC_BASE_IRQ 32 -#else -static const uint8_t gic_id[] = -{ 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; -#define GIC_BASE_IRQ 0 -#endif - -#define FROM_SYSBUSGIC(type, dev) \ - DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev)) +static const uint8_t gic_id[] = { + 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; -typedef struct gic_irq_state -{ - /* The enable bits are only banked for per-cpu interrupts. */ - unsigned enabled:NCPU; - unsigned pending:NCPU; - unsigned active:NCPU; - unsigned level:NCPU; - unsigned model:1; /* 0 = N:N, 1 = 1:N */ - unsigned trigger:1; /* nonzero = edge triggered. */ -} gic_irq_state; - -#define ALL_CPU_MASK ((unsigned)(((1 << NCPU) - 1))) -#if NCPU > 1 #define NUM_CPU(s) ((s)->num_cpu) -#else -#define NUM_CPU(s) 1 -#endif - -#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm) -#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm) -#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) -#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm) -#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm) -#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) -#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm) -#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm) -#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) -#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 -#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 -#define GIC_TEST_MODEL(irq) s->irq_state[irq].model -#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm) -#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm) -#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) -#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 -#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 -#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger -#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \ - s->priority1[irq][cpu] : \ - s->priority2[(irq) - GIC_INTERNAL]) -#ifdef NVIC -#define GIC_TARGET(irq) 1 -#else -#define GIC_TARGET(irq) s->irq_target[irq] -#endif - -typedef struct gic_state -{ - SysBusDevice busdev; - qemu_irq parent_irq[NCPU]; - int enabled; - int cpu_enabled[NCPU]; - - gic_irq_state irq_state[GIC_MAXIRQ]; -#ifndef NVIC - int irq_target[GIC_MAXIRQ]; -#endif - int priority1[GIC_INTERNAL][NCPU]; - int priority2[GIC_MAXIRQ - GIC_INTERNAL]; - int last_active[GIC_MAXIRQ][NCPU]; - - int priority_mask[NCPU]; - int running_irq[NCPU]; - int running_priority[NCPU]; - int current_pending[NCPU]; - -#if NCPU > 1 - uint32_t num_cpu; -#endif - - MemoryRegion iomem; /* Distributor */ -#ifndef NVIC - /* This is just so we can have an opaque pointer which identifies - * both this GIC and which CPU interface we should be accessing. - */ - struct gic_state *backref[NCPU]; - MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */ -#endif - uint32_t num_irq; -} gic_state; static inline int gic_get_current_cpu(gic_state *s) { -#if NCPU > 1 if (s->num_cpu > 1) { return cpu_single_env->cpu_index; } -#endif return 0; } /* TODO: Many places that call this routine could be optimized. */ /* Update interrupt status after enabled or pending bits have been changed. */ -static void gic_update(gic_state *s) +void gic_update(gic_state *s) { int best_irq; int best_prio; @@ -185,8 +84,7 @@ static void gic_update(gic_state *s) } } -#ifdef NVIC -static void gic_set_pending_private(gic_state *s, int cpu, int irq) +void gic_set_pending_private(gic_state *s, int cpu, int irq) { int cm = 1 << cpu; @@ -197,7 +95,6 @@ static void gic_set_pending_private(gic_state *s, int cpu, int irq) GIC_SET_PENDING(irq, cm); gic_update(s); } -#endif /* Process a change in an external IRQ input. */ static void gic_set_irq(void *opaque, int irq, int level) @@ -251,7 +148,7 @@ static void gic_set_running_irq(gic_state *s, int cpu, int irq) gic_update(s); } -static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) +uint32_t gic_acknowledge_irq(gic_state *s, int cpu) { int new_irq; int cm = 1 << cpu; @@ -270,7 +167,7 @@ static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) return new_irq; } -static void gic_complete_irq(gic_state * s, int cpu, int irq) +void gic_complete_irq(gic_state *s, int cpu, int irq) { int update = 0; int cm = 1 << cpu; @@ -328,7 +225,6 @@ static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) cpu = gic_get_current_cpu(s); cm = 1 << cpu; if (offset < 0x100) { -#ifndef NVIC if (offset == 0) return s->enabled; if (offset == 4) @@ -339,7 +235,6 @@ static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) /* Interrupt Security , RAZ/WI */ return 0; } -#endif goto bad_reg; } else if (offset < 0x200) { /* Interrupt Set/Clear Enable. */ @@ -390,16 +285,21 @@ static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) if (irq >= s->num_irq) goto bad_reg; res = GIC_GET_PRIORITY(irq, cpu); -#ifndef NVIC } else if (offset < 0xc00) { /* Interrupt CPU Target. */ - irq = (offset - 0x800) + GIC_BASE_IRQ; - if (irq >= s->num_irq) - goto bad_reg; - if (irq >= 29 && irq <= 31) { - res = cm; + if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { + /* For uniprocessor GICs these RAZ/WI */ + res = 0; } else { - res = GIC_TARGET(irq); + irq = (offset - 0x800) + GIC_BASE_IRQ; + if (irq >= s->num_irq) { + goto bad_reg; + } + if (irq >= 29 && irq <= 31) { + res = cm; + } else { + res = GIC_TARGET(irq); + } } } else if (offset < 0xf00) { /* Interrupt Configuration. */ @@ -413,7 +313,6 @@ static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) if (GIC_TEST_TRIGGER(irq + i)) res |= (2 << (i * 2)); } -#endif } else if (offset < 0xfe0) { goto bad_reg; } else /* offset >= 0xfe0 */ { @@ -440,13 +339,6 @@ static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) { uint32_t val; -#ifdef NVIC - gic_state *s = (gic_state *)opaque; - uint32_t addr; - addr = offset; - if (addr < 0x100 || addr > 0xd00) - return nvic_readl(s, addr); -#endif val = gic_dist_readw(opaque, offset); val |= gic_dist_readw(opaque, offset + 2) << 16; return val; @@ -462,9 +354,6 @@ static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, cpu = gic_get_current_cpu(s); if (offset < 0x100) { -#ifdef NVIC - goto bad_reg; -#else if (offset == 0) { s->enabled = (value & 1); DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); @@ -475,7 +364,6 @@ static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, } else { goto bad_reg; } -#endif } else if (offset < 0x180) { /* Interrupt Set Enable. */ irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; @@ -557,17 +445,22 @@ static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, } else { s->priority2[irq - GIC_INTERNAL] = value; } -#ifndef NVIC } else if (offset < 0xc00) { - /* Interrupt CPU Target. */ - irq = (offset - 0x800) + GIC_BASE_IRQ; - if (irq >= s->num_irq) - goto bad_reg; - if (irq < 29) - value = 0; - else if (irq < GIC_INTERNAL) - value = ALL_CPU_MASK; - s->irq_target[irq] = value & ALL_CPU_MASK; + /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the + * annoying exception of the 11MPCore's GIC. + */ + if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { + irq = (offset - 0x800) + GIC_BASE_IRQ; + if (irq >= s->num_irq) { + goto bad_reg; + } + if (irq < 29) { + value = 0; + } else if (irq < GIC_INTERNAL) { + value = ALL_CPU_MASK; + } + s->irq_target[irq] = value & ALL_CPU_MASK; + } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; @@ -587,7 +480,6 @@ static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, GIC_CLEAR_TRIGGER(irq + i); } } -#endif } else { /* 0xf00 is only handled for 32-bit writes. */ goto bad_reg; @@ -609,14 +501,6 @@ static void gic_dist_writel(void *opaque, target_phys_addr_t offset, uint32_t value) { gic_state *s = (gic_state *)opaque; -#ifdef NVIC - uint32_t addr; - addr = offset; - if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) { - nvic_writel(s, addr, value); - return; - } -#endif if (offset == 0xf00) { int cpu; int irq; @@ -655,7 +539,6 @@ static const MemoryRegionOps gic_dist_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -#ifndef NVIC static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) { switch (offset) { @@ -747,141 +630,12 @@ static const MemoryRegionOps gic_cpu_ops = { .write = gic_do_cpu_write, .endianness = DEVICE_NATIVE_ENDIAN, }; -#endif - -static void gic_reset(DeviceState *dev) -{ - gic_state *s = FROM_SYSBUS(gic_state, sysbus_from_qdev(dev)); - int i; - memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); - for (i = 0 ; i < NUM_CPU(s); i++) { - s->priority_mask[i] = 0xf0; - s->current_pending[i] = 1023; - s->running_irq[i] = 1023; - s->running_priority[i] = 0x100; -#ifdef NVIC - /* The NVIC doesn't have per-cpu interfaces, so enable by default. */ - s->cpu_enabled[i] = 1; -#else - s->cpu_enabled[i] = 0; -#endif - } - for (i = 0; i < 16; i++) { - GIC_SET_ENABLED(i, ALL_CPU_MASK); - GIC_SET_TRIGGER(i); - } -#ifdef NVIC - /* The NVIC is always enabled. */ - s->enabled = 1; -#else - s->enabled = 0; -#endif -} -static void gic_save(QEMUFile *f, void *opaque) -{ - gic_state *s = (gic_state *)opaque; - int i; - int j; - - qemu_put_be32(f, s->enabled); - for (i = 0; i < NUM_CPU(s); i++) { - qemu_put_be32(f, s->cpu_enabled[i]); - for (j = 0; j < GIC_INTERNAL; j++) - qemu_put_be32(f, s->priority1[j][i]); - for (j = 0; j < s->num_irq; j++) - qemu_put_be32(f, s->last_active[j][i]); - qemu_put_be32(f, s->priority_mask[i]); - qemu_put_be32(f, s->running_irq[i]); - qemu_put_be32(f, s->running_priority[i]); - qemu_put_be32(f, s->current_pending[i]); - } - for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { - qemu_put_be32(f, s->priority2[i]); - } - for (i = 0; i < s->num_irq; i++) { -#ifndef NVIC - qemu_put_be32(f, s->irq_target[i]); -#endif - qemu_put_byte(f, s->irq_state[i].enabled); - qemu_put_byte(f, s->irq_state[i].pending); - qemu_put_byte(f, s->irq_state[i].active); - qemu_put_byte(f, s->irq_state[i].level); - qemu_put_byte(f, s->irq_state[i].model); - qemu_put_byte(f, s->irq_state[i].trigger); - } -} - -static int gic_load(QEMUFile *f, void *opaque, int version_id) -{ - gic_state *s = (gic_state *)opaque; - int i; - int j; - - if (version_id != 2) - return -EINVAL; - - s->enabled = qemu_get_be32(f); - for (i = 0; i < NUM_CPU(s); i++) { - s->cpu_enabled[i] = qemu_get_be32(f); - for (j = 0; j < GIC_INTERNAL; j++) - s->priority1[j][i] = qemu_get_be32(f); - for (j = 0; j < s->num_irq; j++) - s->last_active[j][i] = qemu_get_be32(f); - s->priority_mask[i] = qemu_get_be32(f); - s->running_irq[i] = qemu_get_be32(f); - s->running_priority[i] = qemu_get_be32(f); - s->current_pending[i] = qemu_get_be32(f); - } - for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { - s->priority2[i] = qemu_get_be32(f); - } - for (i = 0; i < s->num_irq; i++) { -#ifndef NVIC - s->irq_target[i] = qemu_get_be32(f); -#endif - s->irq_state[i].enabled = qemu_get_byte(f); - s->irq_state[i].pending = qemu_get_byte(f); - s->irq_state[i].active = qemu_get_byte(f); - s->irq_state[i].level = qemu_get_byte(f); - s->irq_state[i].model = qemu_get_byte(f); - s->irq_state[i].trigger = qemu_get_byte(f); - } - - return 0; -} - -#if NCPU > 1 -static void gic_init(gic_state *s, int num_cpu, int num_irq) -#else -static void gic_init(gic_state *s, int num_irq) -#endif +void gic_init_irqs_and_distributor(gic_state *s, int num_irq) { int i; -#if NCPU > 1 - s->num_cpu = num_cpu; - if (s->num_cpu > NCPU) { - hw_error("requested %u CPUs exceeds GIC maximum %d\n", - num_cpu, NCPU); - } -#endif - s->num_irq = num_irq + GIC_BASE_IRQ; - if (s->num_irq > GIC_MAXIRQ) { - hw_error("requested %u interrupt lines exceeds GIC maximum %d\n", - num_irq, GIC_MAXIRQ); - } - /* ITLinesNumber is represented as (N / 32) - 1 (see - * gic_dist_readb) so this is an implementation imposed - * restriction, not an architectural one: - */ - if (s->num_irq < 32 || (s->num_irq % 32)) { - hw_error("%d interrupt lines unsupported: not divisible by 32\n", - num_irq); - } - i = s->num_irq - GIC_INTERNAL; -#ifndef NVIC /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. * GPIO array layout is thus: * [0..N-1] SPIs @@ -889,14 +643,27 @@ static void gic_init(gic_state *s, int num_irq) * [N+32..N+63] PPIs for CPU 1 * ... */ - i += (GIC_INTERNAL * num_cpu); -#endif + if (s->revision != REV_NVIC) { + i += (GIC_INTERNAL * s->num_cpu); + } qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, i); for (i = 0; i < NUM_CPU(s); i++) { sysbus_init_irq(&s->busdev, &s->parent_irq[i]); } memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000); -#ifndef NVIC +} + +static int arm_gic_init(SysBusDevice *dev) +{ + /* Device instance init function for the GIC sysbus device */ + int i; + gic_state *s = FROM_SYSBUS(gic_state, dev); + ARMGICClass *agc = ARM_GIC_GET_CLASS(s); + + agc->parent_init(dev); + + gic_init_irqs_and_distributor(s, s->num_irq); + /* Memory regions for the CPU interfaces (NVIC doesn't have these): * a region for "CPU interface for this core", then a region for * "CPU interface for core 0", "for core 1", ... @@ -912,19 +679,6 @@ static void gic_init(gic_state *s, int num_irq) memory_region_init_io(&s->cpuiomem[i+1], &gic_cpu_ops, &s->backref[i], "gic_cpu", 0x100); } -#endif - - register_savevm(NULL, "arm_gic", -1, 2, gic_save, gic_load, s); -} - -#ifndef NVIC - -static int arm_gic_init(SysBusDevice *dev) -{ - /* Device instance init function for the GIC sysbus device */ - int i; - gic_state *s = FROM_SYSBUS(gic_state, dev); - gic_init(s, s->num_cpu, s->num_irq); /* Distributor */ sysbus_init_mmio(dev, &s->iomem); /* cpu interfaces (one for "current cpu" plus one per cpu) */ @@ -934,25 +688,19 @@ static int arm_gic_init(SysBusDevice *dev) return 0; } -static Property arm_gic_properties[] = { - DEFINE_PROP_UINT32("num-cpu", gic_state, num_cpu, 1), - DEFINE_PROP_UINT32("num-irq", gic_state, num_irq, 32), - DEFINE_PROP_END_OF_LIST(), -}; - static void arm_gic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + ARMGICClass *agc = ARM_GIC_CLASS(klass); + agc->parent_init = sbc->init; sbc->init = arm_gic_init; - dc->props = arm_gic_properties; - dc->reset = gic_reset; dc->no_user = 1; } static TypeInfo arm_gic_info = { - .name = "arm_gic", - .parent = TYPE_SYS_BUS_DEVICE, + .name = TYPE_ARM_GIC, + .parent = TYPE_ARM_GIC_COMMON, .instance_size = sizeof(gic_state), .class_init = arm_gic_class_init, }; @@ -963,5 +711,3 @@ static void arm_gic_register_types(void) } type_init(arm_gic_register_types) - -#endif diff --git a/hw/arm_gic_common.c b/hw/arm_gic_common.c new file mode 100644 index 0000000000..360e7823f7 --- /dev/null +++ b/hw/arm_gic_common.c @@ -0,0 +1,184 @@ +/* + * ARM GIC support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "arm_gic_internal.h" + +static void gic_save(QEMUFile *f, void *opaque) +{ + gic_state *s = (gic_state *)opaque; + int i; + int j; + + qemu_put_be32(f, s->enabled); + for (i = 0; i < s->num_cpu; i++) { + qemu_put_be32(f, s->cpu_enabled[i]); + for (j = 0; j < GIC_INTERNAL; j++) { + qemu_put_be32(f, s->priority1[j][i]); + } + for (j = 0; j < s->num_irq; j++) { + qemu_put_be32(f, s->last_active[j][i]); + } + qemu_put_be32(f, s->priority_mask[i]); + qemu_put_be32(f, s->running_irq[i]); + qemu_put_be32(f, s->running_priority[i]); + qemu_put_be32(f, s->current_pending[i]); + } + for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { + qemu_put_be32(f, s->priority2[i]); + } + for (i = 0; i < s->num_irq; i++) { + qemu_put_be32(f, s->irq_target[i]); + qemu_put_byte(f, s->irq_state[i].enabled); + qemu_put_byte(f, s->irq_state[i].pending); + qemu_put_byte(f, s->irq_state[i].active); + qemu_put_byte(f, s->irq_state[i].level); + qemu_put_byte(f, s->irq_state[i].model); + qemu_put_byte(f, s->irq_state[i].trigger); + } +} + +static int gic_load(QEMUFile *f, void *opaque, int version_id) +{ + gic_state *s = (gic_state *)opaque; + int i; + int j; + + if (version_id != 3) { + return -EINVAL; + } + + s->enabled = qemu_get_be32(f); + for (i = 0; i < s->num_cpu; i++) { + s->cpu_enabled[i] = qemu_get_be32(f); + for (j = 0; j < GIC_INTERNAL; j++) { + s->priority1[j][i] = qemu_get_be32(f); + } + for (j = 0; j < s->num_irq; j++) { + s->last_active[j][i] = qemu_get_be32(f); + } + s->priority_mask[i] = qemu_get_be32(f); + s->running_irq[i] = qemu_get_be32(f); + s->running_priority[i] = qemu_get_be32(f); + s->current_pending[i] = qemu_get_be32(f); + } + for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { + s->priority2[i] = qemu_get_be32(f); + } + for (i = 0; i < s->num_irq; i++) { + s->irq_target[i] = qemu_get_be32(f); + s->irq_state[i].enabled = qemu_get_byte(f); + s->irq_state[i].pending = qemu_get_byte(f); + s->irq_state[i].active = qemu_get_byte(f); + s->irq_state[i].level = qemu_get_byte(f); + s->irq_state[i].model = qemu_get_byte(f); + s->irq_state[i].trigger = qemu_get_byte(f); + } + + return 0; +} + +static int arm_gic_common_init(SysBusDevice *dev) +{ + gic_state *s = FROM_SYSBUS(gic_state, dev); + int num_irq = s->num_irq; + + if (s->num_cpu > NCPU) { + hw_error("requested %u CPUs exceeds GIC maximum %d\n", + s->num_cpu, NCPU); + } + s->num_irq += GIC_BASE_IRQ; + if (s->num_irq > GIC_MAXIRQ) { + hw_error("requested %u interrupt lines exceeds GIC maximum %d\n", + num_irq, GIC_MAXIRQ); + } + /* ITLinesNumber is represented as (N / 32) - 1 (see + * gic_dist_readb) so this is an implementation imposed + * restriction, not an architectural one: + */ + if (s->num_irq < 32 || (s->num_irq % 32)) { + hw_error("%d interrupt lines unsupported: not divisible by 32\n", + num_irq); + } + + register_savevm(NULL, "arm_gic", -1, 3, gic_save, gic_load, s); + return 0; +} + +static void arm_gic_common_reset(DeviceState *dev) +{ + gic_state *s = FROM_SYSBUS(gic_state, sysbus_from_qdev(dev)); + int i; + memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); + for (i = 0 ; i < s->num_cpu; i++) { + s->priority_mask[i] = 0xf0; + s->current_pending[i] = 1023; + s->running_irq[i] = 1023; + s->running_priority[i] = 0x100; + s->cpu_enabled[i] = 0; + } + for (i = 0; i < 16; i++) { + GIC_SET_ENABLED(i, ALL_CPU_MASK); + GIC_SET_TRIGGER(i); + } + if (s->num_cpu == 1) { + /* For uniprocessor GICs all interrupts always target the sole CPU */ + for (i = 0; i < GIC_MAXIRQ; i++) { + s->irq_target[i] = 1; + } + } + s->enabled = 0; +} + +static Property arm_gic_common_properties[] = { + DEFINE_PROP_UINT32("num-cpu", gic_state, num_cpu, 1), + DEFINE_PROP_UINT32("num-irq", gic_state, num_irq, 32), + /* Revision can be 1 or 2 for GIC architecture specification + * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. + * (Internally, 0xffffffff also indicates "not a GIC but an NVIC".) + */ + DEFINE_PROP_UINT32("revision", gic_state, revision, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void arm_gic_common_class_init(ObjectClass *klass, void *data) +{ + SysBusDeviceClass *sc = SYS_BUS_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = arm_gic_common_reset; + dc->props = arm_gic_common_properties; + dc->no_user = 1; + sc->init = arm_gic_common_init; +} + +static TypeInfo arm_gic_common_type = { + .name = TYPE_ARM_GIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(gic_state), + .class_size = sizeof(ARMGICCommonClass), + .class_init = arm_gic_common_class_init, + .abstract = true, +}; + +static void register_types(void) +{ + type_register_static(&arm_gic_common_type); +} + +type_init(register_types) diff --git a/hw/arm_gic_internal.h b/hw/arm_gic_internal.h new file mode 100644 index 0000000000..db4fad564f --- /dev/null +++ b/hw/arm_gic_internal.h @@ -0,0 +1,136 @@ +/* + * ARM GIC support - internal interfaces + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_ARM_GIC_INTERNAL_H +#define QEMU_ARM_GIC_INTERNAL_H + +#include "sysbus.h" + +/* Maximum number of possible interrupts, determined by the GIC architecture */ +#define GIC_MAXIRQ 1020 +/* First 32 are private to each CPU (SGIs and PPIs). */ +#define GIC_INTERNAL 32 +/* Maximum number of possible CPU interfaces, determined by GIC architecture */ +#define NCPU 8 + +#define ALL_CPU_MASK ((unsigned)(((1 << NCPU) - 1))) + +/* The NVIC has 16 internal vectors. However these are not exposed + through the normal GIC interface. */ +#define GIC_BASE_IRQ ((s->revision == REV_NVIC) ? 32 : 0) + +#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm) +#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm) +#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) +#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm) +#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm) +#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) +#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm) +#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm) +#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) +#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 +#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 +#define GIC_TEST_MODEL(irq) s->irq_state[irq].model +#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm) +#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm) +#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) +#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 +#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 +#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger +#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \ + s->priority1[irq][cpu] : \ + s->priority2[(irq) - GIC_INTERNAL]) +#define GIC_TARGET(irq) s->irq_target[irq] + +typedef struct gic_irq_state { + /* The enable bits are only banked for per-cpu interrupts. */ + unsigned enabled:NCPU; + unsigned pending:NCPU; + unsigned active:NCPU; + unsigned level:NCPU; + unsigned model:1; /* 0 = N:N, 1 = 1:N */ + unsigned trigger:1; /* nonzero = edge triggered. */ +} gic_irq_state; + +typedef struct gic_state { + SysBusDevice busdev; + qemu_irq parent_irq[NCPU]; + int enabled; + int cpu_enabled[NCPU]; + + gic_irq_state irq_state[GIC_MAXIRQ]; + int irq_target[GIC_MAXIRQ]; + int priority1[GIC_INTERNAL][NCPU]; + int priority2[GIC_MAXIRQ - GIC_INTERNAL]; + int last_active[GIC_MAXIRQ][NCPU]; + + int priority_mask[NCPU]; + int running_irq[NCPU]; + int running_priority[NCPU]; + int current_pending[NCPU]; + + uint32_t num_cpu; + + MemoryRegion iomem; /* Distributor */ + /* This is just so we can have an opaque pointer which identifies + * both this GIC and which CPU interface we should be accessing. + */ + struct gic_state *backref[NCPU]; + MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */ + uint32_t num_irq; + uint32_t revision; +} gic_state; + +/* The special cases for the revision property: */ +#define REV_11MPCORE 0 +#define REV_NVIC 0xffffffff + +void gic_set_pending_private(gic_state *s, int cpu, int irq); +uint32_t gic_acknowledge_irq(gic_state *s, int cpu); +void gic_complete_irq(gic_state *s, int cpu, int irq); +void gic_update(gic_state *s); +void gic_init_irqs_and_distributor(gic_state *s, int num_irq); + +#define TYPE_ARM_GIC_COMMON "arm_gic_common" +#define ARM_GIC_COMMON(obj) \ + OBJECT_CHECK(gic_state, (obj), TYPE_ARM_GIC_COMMON) +#define ARM_GIC_COMMON_CLASS(klass) \ + OBJECT_CLASS_CHECK(ARMGICCommonClass, (klass), TYPE_ARM_GIC_COMMON) +#define ARM_GIC_COMMON_GET_CLASS(obj) \ + OBJECT_GET_CLASS(ARMGICCommonClass, (obj), TYPE_ARM_GIC_COMMON) + +typedef struct ARMGICCommonClass { + SysBusDeviceClass parent_class; +} ARMGICCommonClass; + +#define TYPE_ARM_GIC "arm_gic" +#define ARM_GIC(obj) \ + OBJECT_CHECK(gic_state, (obj), TYPE_ARM_GIC) +#define ARM_GIC_CLASS(klass) \ + OBJECT_CLASS_CHECK(ARMGICClass, (klass), TYPE_ARM_GIC) +#define ARM_GIC_GET_CLASS(obj) \ + OBJECT_GET_CLASS(ARMGICClass, (obj), TYPE_ARM_GIC) + +typedef struct ARMGICClass { + ARMGICCommonClass parent_class; + int (*parent_init)(SysBusDevice *dev); +} ARMGICClass; + +#endif /* !QEMU_ARM_GIC_INTERNAL_H */ diff --git a/hw/arm_l2x0.c b/hw/arm_l2x0.c index 09f290c85f..de6a0863d8 100644 --- a/hw/arm_l2x0.c +++ b/hw/arm_l2x0.c @@ -161,7 +161,7 @@ static int l2x0_priv_init(SysBusDevice *dev) } static Property l2x0_properties[] = { - DEFINE_PROP_UINT32("type", l2x0_state, cache_type, 0x1c100100), + DEFINE_PROP_UINT32("cache-type", l2x0_state, cache_type, 0x1c100100), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/arm_pic.c b/hw/arm_pic.c index 109496528c..ffb4d4171a 100644 --- a/hw/arm_pic.c +++ b/hw/arm_pic.c @@ -13,7 +13,9 @@ /* Input 0 is IRQ and input 1 is FIQ. */ static void arm_pic_cpu_handler(void *opaque, int irq, int level) { - CPUARMState *env = (CPUARMState *)opaque; + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + switch (irq) { case ARM_PIC_CPU_IRQ: if (level) @@ -32,7 +34,7 @@ static void arm_pic_cpu_handler(void *opaque, int irq, int level) } } -qemu_irq *arm_pic_init_cpu(CPUARMState *env) +qemu_irq *arm_pic_init_cpu(ARMCPU *cpu) { - return qemu_allocate_irqs(arm_pic_cpu_handler, env, 2); + return qemu_allocate_irqs(arm_pic_cpu_handler, cpu, 2); } diff --git a/hw/armv7m.c b/hw/armv7m.c index 418139aa08..8cec78db96 100644 --- a/hw/armv7m.c +++ b/hw/armv7m.c @@ -215,7 +215,7 @@ qemu_irq *armv7m_init(MemoryRegion *address_space_mem, nvic = qdev_create(NULL, "armv7m_nvic"); env->nvic = nvic; qdev_init_nofail(nvic); - cpu_pic = arm_pic_init_cpu(env); + cpu_pic = arm_pic_init_cpu(cpu); sysbus_connect_irq(sysbus_from_qdev(nvic), 0, cpu_pic[ARM_PIC_CPU_IRQ]); for (i = 0; i < 64; i++) { pic[i] = qdev_get_gpio_in(nvic, i); diff --git a/hw/armv7m_nvic.c b/hw/armv7m_nvic.c index 986a6bbd0c..4867c1d5fa 100644 --- a/hw/armv7m_nvic.c +++ b/hw/armv7m_nvic.c @@ -14,13 +14,7 @@ #include "qemu-timer.h" #include "arm-misc.h" #include "exec-memory.h" - -#define NVIC 1 - -static uint32_t nvic_readl(void *opaque, uint32_t offset); -static void nvic_writel(void *opaque, uint32_t offset, uint32_t value); - -#include "arm_gic.c" +#include "arm_gic_internal.h" typedef struct { gic_state gic; @@ -30,9 +24,38 @@ typedef struct { int64_t tick; QEMUTimer *timer; } systick; + MemoryRegion sysregmem; + MemoryRegion gic_iomem_alias; + MemoryRegion container; uint32_t num_irq; } nvic_state; +#define TYPE_NVIC "armv7m_nvic" +/** + * NVICClass: + * @parent_reset: the parent class' reset handler. + * + * A model of the v7M NVIC and System Controller + */ +typedef struct NVICClass { + /*< private >*/ + ARMGICClass parent_class; + /*< public >*/ + int (*parent_init)(SysBusDevice *dev); + void (*parent_reset)(DeviceState *dev); +} NVICClass; + +#define NVIC_CLASS(klass) \ + OBJECT_CLASS_CHECK(NVICClass, (klass), TYPE_NVIC) +#define NVIC_GET_CLASS(obj) \ + OBJECT_GET_CLASS(NVICClass, (obj), TYPE_NVIC) +#define NVIC(obj) \ + OBJECT_CHECK(nvic_state, (obj), TYPE_NVIC) + +static const uint8_t nvic_id[] = { + 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 +}; + /* qemu timers run at 1GHz. We want something closer to 1MHz. */ #define SYSTICK_SCALE 1000ULL @@ -358,12 +381,54 @@ static void nvic_writel(void *opaque, uint32_t offset, uint32_t value) case 0xd38: /* Bus Fault Address. */ case 0xd3c: /* Aux Fault Status. */ goto bad_reg; + case 0xf00: /* Software Triggered Interrupt Register */ + if ((value & 0x1ff) < s->num_irq) { + gic_set_pending_private(&s->gic, 0, value & 0x1ff); + } + break; default: bad_reg: hw_error("NVIC: Bad write offset 0x%x\n", offset); } } +static uint64_t nvic_sysreg_read(void *opaque, target_phys_addr_t addr, + unsigned size) +{ + /* At the moment we only support the ID registers for byte/word access. + * This is not strictly correct as a few of the other registers also + * allow byte access. + */ + uint32_t offset = addr; + if (offset >= 0xfe0) { + if (offset & 3) { + return 0; + } + return nvic_id[(offset - 0xfe0) >> 2]; + } + if (size == 4) { + return nvic_readl(opaque, offset); + } + hw_error("NVIC: Bad read of size %d at offset 0x%x\n", size, offset); +} + +static void nvic_sysreg_write(void *opaque, target_phys_addr_t addr, + uint64_t value, unsigned size) +{ + uint32_t offset = addr; + if (size == 4) { + nvic_writel(opaque, offset, value); + return; + } + hw_error("NVIC: Bad write of size %d at offset 0x%x\n", size, offset); +} + +static const MemoryRegionOps nvic_sysreg_ops = { + .read = nvic_sysreg_read, + .write = nvic_sysreg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + static const VMStateDescription vmstate_nvic = { .name = "armv7m_nvic", .version_id = 1, @@ -380,20 +445,55 @@ static const VMStateDescription vmstate_nvic = { static void armv7m_nvic_reset(DeviceState *dev) { - nvic_state *s = FROM_SYSBUSGIC(nvic_state, sysbus_from_qdev(dev)); - gic_reset(&s->gic.busdev.qdev); + nvic_state *s = NVIC(dev); + NVICClass *nc = NVIC_GET_CLASS(s); + nc->parent_reset(dev); + /* Common GIC reset resets to disabled; the NVIC doesn't have + * per-CPU interfaces so mark our non-existent CPU interface + * as enabled by default. + */ + s->gic.cpu_enabled[0] = 1; + /* The NVIC as a whole is always enabled. */ + s->gic.enabled = 1; systick_reset(s); } static int armv7m_nvic_init(SysBusDevice *dev) { - nvic_state *s= FROM_SYSBUSGIC(nvic_state, dev); + nvic_state *s = NVIC(dev); + NVICClass *nc = NVIC_GET_CLASS(s); - /* note that for the M profile gic_init() takes the number of external - * interrupt lines only. - */ - gic_init(&s->gic, s->num_irq); - memory_region_add_subregion(get_system_memory(), 0xe000e000, &s->gic.iomem); + /* The NVIC always has only one CPU */ + s->gic.num_cpu = 1; + /* Tell the common code we're an NVIC */ + s->gic.revision = 0xffffffff; + s->gic.num_irq = s->num_irq; + nc->parent_init(dev); + gic_init_irqs_and_distributor(&s->gic, s->num_irq); + /* The NVIC and system controller register area looks like this: + * 0..0xff : system control registers, including systick + * 0x100..0xcff : GIC-like registers + * 0xd00..0xfff : system control registers + * We use overlaying to put the GIC like registers + * over the top of the system control register region. + */ + memory_region_init(&s->container, "nvic", 0x1000); + /* The system register region goes at the bottom of the priority + * stack as it covers the whole page. + */ + memory_region_init_io(&s->sysregmem, &nvic_sysreg_ops, s, + "nvic_sysregs", 0x1000); + memory_region_add_subregion(&s->container, 0, &s->sysregmem); + /* Alias the GIC region so we can get only the section of it + * we need, and layer it on top of the system register region. + */ + memory_region_init_alias(&s->gic_iomem_alias, "nvic-gic", &s->gic.iomem, + 0x100, 0xc00); + memory_region_add_subregion_overlap(&s->container, 0x100, &s->gic.iomem, 1); + /* Map the whole thing into system memory at the location required + * by the v7M architecture. + */ + memory_region_add_subregion(get_system_memory(), 0xe000e000, &s->container); s->systick.timer = qemu_new_timer_ns(vm_clock, systick_timer_tick, s); return 0; } @@ -409,9 +509,12 @@ static Property armv7m_nvic_properties[] = { static void armv7m_nvic_class_init(ObjectClass *klass, void *data) { + NVICClass *nc = NVIC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass); + nc->parent_reset = dc->reset; + nc->parent_init = sdc->init; sdc->init = armv7m_nvic_init; dc->vmsd = &vmstate_nvic; dc->reset = armv7m_nvic_reset; @@ -419,10 +522,11 @@ static void armv7m_nvic_class_init(ObjectClass *klass, void *data) } static TypeInfo armv7m_nvic_info = { - .name = "armv7m_nvic", - .parent = TYPE_SYS_BUS_DEVICE, + .name = TYPE_NVIC, + .parent = TYPE_ARM_GIC_COMMON, .instance_size = sizeof(nvic_state), .class_init = armv7m_nvic_class_init, + .class_size = sizeof(NVICClass), }; static void armv7m_nvic_register_types(void) diff --git a/hw/boards.h b/hw/boards.h index 667177d76d..59c01d0367 100644 --- a/hw/boards.h +++ b/hw/boards.h @@ -29,6 +29,7 @@ typedef struct QEMUMachine { const char *default_machine_opts; GlobalProperty *compat_props; struct QEMUMachine *next; + const char *hw_version; } QEMUMachine; int qemu_register_machine(QEMUMachine *m); diff --git a/hw/bt-sdp.c b/hw/bt-sdp.c index 3e390ab5b9..c0431d1a40 100644 --- a/hw/bt-sdp.c +++ b/hw/bt-sdp.c @@ -834,7 +834,7 @@ SERVICE(hid, ATTRIBUTE(DOC_URL, URL("http://bellard.org/qemu/user-doc.html")) ATTRIBUTE(SVCNAME_PRIMARY, STRING("QEMU Bluetooth HID")) ATTRIBUTE(SVCDESC_PRIMARY, STRING("QEMU Keyboard/Mouse")) - ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU " QEMU_VERSION)) + ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU")) /* Profile specific */ ATTRIBUTE(DEVICE_RELEASE_NUMBER, UINT16(0x0091)) /* Deprecated, remove */ @@ -908,7 +908,7 @@ SERVICE(sdp, LIST(UUID128(SDP_SERVER_PROFILE_ID) UINT16(0x0100)) )) ATTRIBUTE(DOC_URL, URL("http://bellard.org/qemu/user-doc.html")) - ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU " QEMU_VERSION)) + ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU")) /* Profile specific */ ATTRIBUTE(VERSION_NUM_LIST, LIST(UINT16(0x0100))) @@ -931,7 +931,7 @@ SERVICE(pnp, LIST(UUID128(PNP_INFO_PROFILE_ID) UINT16(0x0100)) )) ATTRIBUTE(DOC_URL, URL("http://bellard.org/qemu/user-doc.html")) - ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU " QEMU_VERSION)) + ATTRIBUTE(SVCPROV_PRIMARY, STRING("QEMU")) /* Profile specific */ ATTRIBUTE(SPECIFICATION_ID, UINT16(0x0100)) diff --git a/hw/cadence_gem.c b/hw/cadence_gem.c index e2140aea2b..87143caf2d 100644 --- a/hw/cadence_gem.c +++ b/hw/cadence_gem.c @@ -339,8 +339,8 @@ typedef struct { uint8_t phy_loop; /* Are we in phy loopback? */ /* The current DMA descriptor pointers */ - target_phys_addr_t rx_desc_addr; - target_phys_addr_t tx_desc_addr; + uint32_t rx_desc_addr; + uint32_t tx_desc_addr; } GemState; @@ -664,7 +664,7 @@ static ssize_t gem_receive(VLANClientState *nc, const uint8_t *buf, size_t size) */ memcpy(rxbuf, buf, size); - memset(rxbuf + size, 0, sizeof(rxbuf - size)); + memset(rxbuf + size, 0, sizeof(rxbuf) - size); rxbuf_ptr = rxbuf; crc_val = cpu_to_le32(crc32(0, rxbuf, MAX(size, 60))); if (size < 60) { diff --git a/hw/cadence_ttc.c b/hw/cadence_ttc.c index 2b5477b688..dd02f86eb9 100644 --- a/hw/cadence_ttc.c +++ b/hw/cadence_ttc.c @@ -405,7 +405,7 @@ static int cadence_ttc_init(SysBusDevice *dev) int i; for (i = 0; i < 3; ++i) { - cadence_timer_init(2500000, &s->timer[i]); + cadence_timer_init(133000000, &s->timer[i]); sysbus_init_irq(dev, &s->timer[i].irq); } diff --git a/hw/cirrus_vga.c b/hw/cirrus_vga.c index afedaa43d3..623dd688d9 100644 --- a/hw/cirrus_vga.c +++ b/hw/cirrus_vga.c @@ -43,6 +43,8 @@ //#define DEBUG_CIRRUS //#define DEBUG_BITBLT +#define VGA_RAM_SIZE (8192 * 1024) + /*************************************** * * definitions @@ -2891,7 +2893,8 @@ static int vga_initfn(ISADevice *dev) ISACirrusVGAState *d = DO_UPCAST(ISACirrusVGAState, dev, dev); VGACommonState *s = &d->cirrus_vga.vga; - vga_common_init(s, VGA_RAM_SIZE); + s->vram_size_mb = VGA_RAM_SIZE >> 20; + vga_common_init(s); cirrus_init_common(&d->cirrus_vga, CIRRUS_ID_CLGD5430, 0, isa_address_space(dev)); s->ds = graphic_console_init(s->update, s->invalidate, @@ -2933,7 +2936,8 @@ static int pci_cirrus_vga_initfn(PCIDevice *dev) int16_t device_id = pc->device_id; /* setup VGA */ - vga_common_init(&s->vga, VGA_RAM_SIZE); + s->vga.vram_size_mb = VGA_RAM_SIZE >> 20; + vga_common_init(&s->vga); cirrus_init_common(s, device_id, 1, pci_address_space(dev)); s->vga.ds = graphic_console_init(s->vga.update, s->vga.invalidate, s->vga.screen_dump, s->vga.text_update, diff --git a/hw/collie.c b/hw/collie.c index 42f4310816..56f89a9f2e 100644 --- a/hw/collie.c +++ b/hw/collie.c @@ -54,7 +54,7 @@ static void collie_init(ram_addr_t ram_size, collie_binfo.kernel_cmdline = kernel_cmdline; collie_binfo.initrd_filename = initrd_filename; collie_binfo.board_id = 0x208; - arm_load_kernel(s->env, &collie_binfo); + arm_load_kernel(s->cpu, &collie_binfo); } static QEMUMachine collie_machine = { diff --git a/hw/cris/Makefile.objs b/hw/cris/Makefile.objs new file mode 100644 index 0000000000..aa9298a0ed --- /dev/null +++ b/hw/cris/Makefile.objs @@ -0,0 +1,13 @@ +# Boards +obj-y = cris_pic_cpu.o +obj-y += cris-boot.o +obj-y += axis_dev88.o + +# IO blocks +obj-y += etraxfs_dma.o +obj-y += etraxfs_pic.o +obj-y += etraxfs_eth.o +obj-y += etraxfs_timer.o +obj-y += etraxfs_ser.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/exynos4210.c b/hw/exynos4210.c index afc4bdc7e0..7c58c906de 100644 --- a/hw/exynos4210.c +++ b/hw/exynos4210.c @@ -33,6 +33,9 @@ /* PWM */ #define EXYNOS4210_PWM_BASE_ADDR 0x139D0000 +/* RTC */ +#define EXYNOS4210_RTC_BASE_ADDR 0x10070000 + /* MCT */ #define EXYNOS4210_MCT_BASE_ADDR 0x10050000 @@ -65,7 +68,7 @@ static uint8_t chipid_and_omr[] = { 0x11, 0x02, 0x21, 0x43, 0x09, 0x00, 0x00, 0x00 }; -void exynos4210_write_secondary(CPUARMState *env, +void exynos4210_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info) { int n; @@ -97,23 +100,24 @@ void exynos4210_write_secondary(CPUARMState *env, Exynos4210State *exynos4210_init(MemoryRegion *system_mem, unsigned long ram_size) { - qemu_irq cpu_irq[4]; - int n; + qemu_irq cpu_irq[EXYNOS4210_NCPUS]; + int i, n; Exynos4210State *s = g_new(Exynos4210State, 1); qemu_irq *irqp; - qemu_irq gate_irq[EXYNOS4210_IRQ_GATE_NINPUTS]; + qemu_irq gate_irq[EXYNOS4210_NCPUS][EXYNOS4210_IRQ_GATE_NINPUTS]; unsigned long mem_size; DeviceState *dev; SysBusDevice *busdev; for (n = 0; n < EXYNOS4210_NCPUS; n++) { - s->env[n] = cpu_init("cortex-a9"); - if (!s->env[n]) { + s->cpu[n] = cpu_arm_init("cortex-a9"); + if (!s->cpu[n]) { fprintf(stderr, "Unable to find CPU %d definition\n", n); exit(1); } + /* Create PIC controller for each processor instance */ - irqp = arm_pic_init_cpu(s->env[n]); + irqp = arm_pic_init_cpu(s->cpu[n]); /* * Get GICs gpio_in cpu_irq to connect a combiner to them later. @@ -127,16 +131,18 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, s->irq_table = exynos4210_init_irq(&s->irqs); /* IRQ Gate */ - dev = qdev_create(NULL, "exynos4210.irq_gate"); - qdev_init_nofail(dev); - /* Get IRQ Gate input in gate_irq */ - for (n = 0; n < EXYNOS4210_IRQ_GATE_NINPUTS; n++) { - gate_irq[n] = qdev_get_gpio_in(dev, n); - } - busdev = sysbus_from_qdev(dev); - /* Connect IRQ Gate output to cpu_irq */ - for (n = 0; n < EXYNOS4210_NCPUS; n++) { - sysbus_connect_irq(busdev, n, cpu_irq[n]); + for (i = 0; i < EXYNOS4210_NCPUS; i++) { + dev = qdev_create(NULL, "exynos4210.irq_gate"); + qdev_prop_set_uint32(dev, "n_in", EXYNOS4210_IRQ_GATE_NINPUTS); + qdev_init_nofail(dev); + /* Get IRQ Gate input in gate_irq */ + for (n = 0; n < EXYNOS4210_IRQ_GATE_NINPUTS; n++) { + gate_irq[i][n] = qdev_get_gpio_in(dev, n); + } + busdev = sysbus_from_qdev(dev); + + /* Connect IRQ Gate output to cpu_irq */ + sysbus_connect_irq(busdev, 0, cpu_irq[i]); } /* Private memory region and Internal GIC */ @@ -146,7 +152,7 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, busdev = sysbus_from_qdev(dev); sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { - sysbus_connect_irq(busdev, n, gate_irq[n * 2]); + sysbus_connect_irq(busdev, n, gate_irq[n][0]); } for (n = 0; n < EXYNOS4210_INT_GIC_NIRQ; n++) { s->irqs.int_gic_irq[n] = qdev_get_gpio_in(dev, n); @@ -165,7 +171,7 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, /* Map Distributer interface */ sysbus_mmio_map(busdev, 1, EXYNOS4210_EXT_GIC_DIST_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { - sysbus_connect_irq(busdev, n, gate_irq[n * 2 + 1]); + sysbus_connect_irq(busdev, n, gate_irq[n][1]); } for (n = 0; n < EXYNOS4210_EXT_GIC_NIRQ; n++) { s->irqs.ext_gic_irq[n] = qdev_get_gpio_in(dev, n); @@ -213,7 +219,7 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, /* mirror of iROM */ memory_region_init_alias(&s->irom_alias_mem, "exynos4210.irom_alias", &s->irom_mem, - EXYNOS4210_IROM_BASE_ADDR, + 0, EXYNOS4210_IROM_SIZE); memory_region_set_readonly(&s->irom_alias_mem, true); memory_region_add_subregion(system_mem, EXYNOS4210_IROM_MIRROR_BASE_ADDR, @@ -255,6 +261,11 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem, s->irq_table[exynos4210_get_irq(22, 3)], s->irq_table[exynos4210_get_irq(22, 4)], NULL); + /* RTC */ + sysbus_create_varargs("exynos4210.rtc", EXYNOS4210_RTC_BASE_ADDR, + s->irq_table[exynos4210_get_irq(23, 0)], + s->irq_table[exynos4210_get_irq(23, 1)], + NULL); /* Multi Core Timer */ dev = qdev_create(NULL, "exynos4210.mct"); diff --git a/hw/exynos4210.h b/hw/exynos4210.h index f7c7027302..9b1ae4c8b1 100644 --- a/hw/exynos4210.h +++ b/hw/exynos4210.h @@ -56,7 +56,7 @@ /* * exynos4210 IRQ subsystem stub definitions. */ -#define EXYNOS4210_IRQ_GATE_NINPUTS 8 +#define EXYNOS4210_IRQ_GATE_NINPUTS 2 /* Internal and External GIC */ #define EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ 64 #define EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ 16 @@ -83,7 +83,7 @@ typedef struct Exynos4210Irq { } Exynos4210Irq; typedef struct Exynos4210State { - CPUARMState * env[EXYNOS4210_NCPUS]; + ARMCPU *cpu[EXYNOS4210_NCPUS]; Exynos4210Irq irqs; qemu_irq *irq_table; @@ -97,7 +97,7 @@ typedef struct Exynos4210State { MemoryRegion bootreg_mem; } Exynos4210State; -void exynos4210_write_secondary(CPUARMState *env, +void exynos4210_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info); Exynos4210State *exynos4210_init(MemoryRegion *system_mem, diff --git a/hw/exynos4210_gic.c b/hw/exynos4210_gic.c index e1b215eff0..7d03dd9ae3 100644 --- a/hw/exynos4210_gic.c +++ b/hw/exynos4210_gic.c @@ -362,61 +362,64 @@ static void exynos4210_gic_register_types(void) type_init(exynos4210_gic_register_types) -/* - * IRQGate struct. - * IRQ Gate represents OR gate between GICs to pass IRQ to PIC. +/* IRQ OR Gate struct. + * + * This device models an OR gate. There are n_in input qdev gpio lines and one + * output sysbus IRQ line. The output IRQ level is formed as OR between all + * gpio inputs. */ typedef struct { SysBusDevice busdev; - qemu_irq pic_irq[EXYNOS4210_NCPUS]; /* output IRQs to PICs */ - uint32_t gpio_level[EXYNOS4210_IRQ_GATE_NINPUTS]; /* Input levels */ + uint32_t n_in; /* inputs amount */ + uint32_t *level; /* input levels */ + qemu_irq out; /* output IRQ */ } Exynos4210IRQGateState; +static Property exynos4210_irq_gate_properties[] = { + DEFINE_PROP_UINT32("n_in", Exynos4210IRQGateState, n_in, 1), + DEFINE_PROP_END_OF_LIST(), +}; + static const VMStateDescription vmstate_exynos4210_irq_gate = { .name = "exynos4210.irq_gate", - .version_id = 1, - .minimum_version_id = 1, - .minimum_version_id_old = 1, + .version_id = 2, + .minimum_version_id = 2, + .minimum_version_id_old = 2, .fields = (VMStateField[]) { - VMSTATE_UINT32_ARRAY(gpio_level, Exynos4210IRQGateState, - EXYNOS4210_IRQ_GATE_NINPUTS), + VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, 0, n_in), VMSTATE_END_OF_LIST() } }; -/* Process a change in an external IRQ input. */ +/* Process a change in IRQ input. */ static void exynos4210_irq_gate_handler(void *opaque, int irq, int level) { - Exynos4210IRQGateState *s = - (Exynos4210IRQGateState *)opaque; - uint32_t odd, even; - - if (irq & 1) { - odd = irq; - even = irq & ~1; - } else { - even = irq; - odd = irq | 1; - } + Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)opaque; + uint32_t i; - assert(irq < EXYNOS4210_IRQ_GATE_NINPUTS); - s->gpio_level[irq] = level; + assert(irq < s->n_in); - if (s->gpio_level[odd] >= 1 || s->gpio_level[even] >= 1) { - qemu_irq_raise(s->pic_irq[even >> 1]); - } else { - qemu_irq_lower(s->pic_irq[even >> 1]); + s->level[irq] = level; + + for (i = 0; i < s->n_in; i++) { + if (s->level[i] >= 1) { + qemu_irq_raise(s->out); + return; + } } + qemu_irq_lower(s->out); + return; } static void exynos4210_irq_gate_reset(DeviceState *d) { - Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)d; + Exynos4210IRQGateState *s = + DO_UPCAST(Exynos4210IRQGateState, busdev.qdev, d); - memset(&s->gpio_level, 0, sizeof(s->gpio_level)); + memset(s->level, 0, s->n_in * sizeof(*s->level)); } /* @@ -424,19 +427,15 @@ static void exynos4210_irq_gate_reset(DeviceState *d) */ static int exynos4210_irq_gate_init(SysBusDevice *dev) { - unsigned int i; - Exynos4210IRQGateState *s = - FROM_SYSBUS(Exynos4210IRQGateState, dev); + Exynos4210IRQGateState *s = FROM_SYSBUS(Exynos4210IRQGateState, dev); /* Allocate general purpose input signals and connect a handler to each of * them */ - qdev_init_gpio_in(&s->busdev.qdev, exynos4210_irq_gate_handler, - EXYNOS4210_IRQ_GATE_NINPUTS); + qdev_init_gpio_in(&s->busdev.qdev, exynos4210_irq_gate_handler, s->n_in); - /* Connect SysBusDev irqs to device specific irqs */ - for (i = 0; i < EXYNOS4210_NCPUS; i++) { - sysbus_init_irq(dev, &s->pic_irq[i]); - } + s->level = g_malloc0(s->n_in * sizeof(*s->level)); + + sysbus_init_irq(dev, &s->out); return 0; } @@ -449,6 +448,7 @@ static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data) k->init = exynos4210_irq_gate_init; dc->reset = exynos4210_irq_gate_reset; dc->vmsd = &vmstate_exynos4210_irq_gate; + dc->props = exynos4210_irq_gate_properties; } static TypeInfo exynos4210_irq_gate_info = { diff --git a/hw/exynos4210_mct.c b/hw/exynos4210_mct.c index 7474fcf802..7a22b1f900 100644 --- a/hw/exynos4210_mct.c +++ b/hw/exynos4210_mct.c @@ -376,10 +376,6 @@ static uint64_t exynos4210_gfrc_get_count(Exynos4210MCTGT *s) { uint64_t count = 0; count = ptimer_get_count(s->ptimer_frc); - if (!count) { - /* Timer event was generated and s->reg.cnt holds adequate value */ - return s->reg.cnt; - } count = s->count - count; return s->reg.cnt + count; } diff --git a/hw/exynos4210_pwm.c b/hw/exynos4210_pwm.c index 6243e59c48..0c228280a9 100644 --- a/hw/exynos4210_pwm.c +++ b/hw/exynos4210_pwm.c @@ -200,7 +200,7 @@ static void exynos4210_pwm_tick(void *opaque) ptimer_run(p->timer[id].ptimer, 1); } else { /* stop timer, set status to STOP, see Basic Timer Operation */ - p->reg_tcon = ~TCON_TIMER_START(id); + p->reg_tcon &= ~TCON_TIMER_START(id); ptimer_stop(p->timer[id].ptimer); } } diff --git a/hw/exynos4210_rtc.c b/hw/exynos4210_rtc.c new file mode 100644 index 0000000000..f78102049b --- /dev/null +++ b/hw/exynos4210_rtc.c @@ -0,0 +1,595 @@ +/* + * Samsung exynos4210 Real Time Clock + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Ogurtsov Oleg <o.ogurtsov@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +/* Description: + * Register RTCCON: + * CLKSEL Bit[1] not used + * CLKOUTEN Bit[9] not used + */ + +#include "sysbus.h" +#include "qemu-timer.h" +#include "qemu-common.h" +#include "ptimer.h" + +#include "hw.h" +#include "qemu-timer.h" +#include "sysemu.h" + +#include "exynos4210.h" + +#define DEBUG_RTC 0 + +#if DEBUG_RTC +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "RTC: [%24s:%5d] " fmt, __func__, __LINE__, \ + ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define EXYNOS4210_RTC_REG_MEM_SIZE 0x0100 + +#define INTP 0x0030 +#define RTCCON 0x0040 +#define TICCNT 0x0044 +#define RTCALM 0x0050 +#define ALMSEC 0x0054 +#define ALMMIN 0x0058 +#define ALMHOUR 0x005C +#define ALMDAY 0x0060 +#define ALMMON 0x0064 +#define ALMYEAR 0x0068 +#define BCDSEC 0x0070 +#define BCDMIN 0x0074 +#define BCDHOUR 0x0078 +#define BCDDAY 0x007C +#define BCDDAYWEEK 0x0080 +#define BCDMON 0x0084 +#define BCDYEAR 0x0088 +#define CURTICNT 0x0090 + +#define TICK_TIMER_ENABLE 0x0100 +#define TICNT_THRESHHOLD 2 + + +#define RTC_ENABLE 0x0001 + +#define INTP_TICK_ENABLE 0x0001 +#define INTP_ALM_ENABLE 0x0002 + +#define ALARM_INT_ENABLE 0x0040 + +#define RTC_BASE_FREQ 32768 + +typedef struct Exynos4210RTCState { + SysBusDevice busdev; + MemoryRegion iomem; + + /* registers */ + uint32_t reg_intp; + uint32_t reg_rtccon; + uint32_t reg_ticcnt; + uint32_t reg_rtcalm; + uint32_t reg_almsec; + uint32_t reg_almmin; + uint32_t reg_almhour; + uint32_t reg_almday; + uint32_t reg_almmon; + uint32_t reg_almyear; + uint32_t reg_curticcnt; + + ptimer_state *ptimer; /* tick timer */ + ptimer_state *ptimer_1Hz; /* clock timer */ + uint32_t freq; + + qemu_irq tick_irq; /* Time Tick Generator irq */ + qemu_irq alm_irq; /* alarm irq */ + + struct tm current_tm; /* current time */ +} Exynos4210RTCState; + +#define TICCKSEL(value) ((value & (0x0F << 4)) >> 4) + +/*** VMState ***/ +static const VMStateDescription vmstate_exynos4210_rtc_state = { + .name = "exynos4210.rtc", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg_intp, Exynos4210RTCState), + VMSTATE_UINT32(reg_rtccon, Exynos4210RTCState), + VMSTATE_UINT32(reg_ticcnt, Exynos4210RTCState), + VMSTATE_UINT32(reg_rtcalm, Exynos4210RTCState), + VMSTATE_UINT32(reg_almsec, Exynos4210RTCState), + VMSTATE_UINT32(reg_almmin, Exynos4210RTCState), + VMSTATE_UINT32(reg_almhour, Exynos4210RTCState), + VMSTATE_UINT32(reg_almday, Exynos4210RTCState), + VMSTATE_UINT32(reg_almmon, Exynos4210RTCState), + VMSTATE_UINT32(reg_almyear, Exynos4210RTCState), + VMSTATE_UINT32(reg_curticcnt, Exynos4210RTCState), + VMSTATE_PTIMER(ptimer, Exynos4210RTCState), + VMSTATE_PTIMER(ptimer_1Hz, Exynos4210RTCState), + VMSTATE_UINT32(freq, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_sec, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_min, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_hour, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_wday, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_mday, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_mon, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_year, Exynos4210RTCState), + VMSTATE_END_OF_LIST() + } +}; + +#define BCD3DIGITS(x) \ + ((uint32_t)to_bcd((uint8_t)x) + \ + ((uint32_t)to_bcd((uint8_t)((x % 1000) / 100)) << 8)) + +static void check_alarm_raise(Exynos4210RTCState *s) +{ + unsigned int alarm_raise = 0; + struct tm stm = s->current_tm; + + if ((s->reg_rtcalm & 0x01) && + (to_bcd((uint8_t)stm.tm_sec) == (uint8_t)s->reg_almsec)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x02) && + (to_bcd((uint8_t)stm.tm_min) == (uint8_t)s->reg_almmin)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x04) && + (to_bcd((uint8_t)stm.tm_hour) == (uint8_t)s->reg_almhour)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x08) && + (to_bcd((uint8_t)stm.tm_mday) == (uint8_t)s->reg_almday)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x10) && + (to_bcd((uint8_t)stm.tm_mon) == (uint8_t)s->reg_almmon)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x20) && + (BCD3DIGITS(stm.tm_year) == s->reg_almyear)) { + alarm_raise = 1; + } + + if (alarm_raise) { + DPRINTF("ALARM IRQ\n"); + /* set irq status */ + s->reg_intp |= INTP_ALM_ENABLE; + qemu_irq_raise(s->alm_irq); + } +} + +/* + * RTC update frequency + * Parameters: + * reg_value - current RTCCON register or his new value + */ +static void exynos4210_rtc_update_freq(Exynos4210RTCState *s, + uint32_t reg_value) +{ + uint32_t freq; + + freq = s->freq; + /* set frequncy for time generator */ + s->freq = RTC_BASE_FREQ / (1 << TICCKSEL(reg_value)); + + if (freq != s->freq) { + ptimer_set_freq(s->ptimer, s->freq); + DPRINTF("freq=%dHz\n", s->freq); + } +} + +/* month is between 0 and 11. */ +static int get_days_in_month(int month, int year) +{ + static const int days_tab[12] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 + }; + int d; + if ((unsigned)month >= 12) { + return 31; + } + d = days_tab[month]; + if (month == 1) { + if ((year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)) { + d++; + } + } + return d; +} + +/* update 'tm' to the next second */ +static void rtc_next_second(struct tm *tm) +{ + int days_in_month; + + tm->tm_sec++; + if ((unsigned)tm->tm_sec >= 60) { + tm->tm_sec = 0; + tm->tm_min++; + if ((unsigned)tm->tm_min >= 60) { + tm->tm_min = 0; + tm->tm_hour++; + if ((unsigned)tm->tm_hour >= 24) { + tm->tm_hour = 0; + /* next day */ + tm->tm_wday++; + if ((unsigned)tm->tm_wday >= 7) { + tm->tm_wday = 0; + } + days_in_month = get_days_in_month(tm->tm_mon, + tm->tm_year + 1900); + tm->tm_mday++; + if (tm->tm_mday < 1) { + tm->tm_mday = 1; + } else if (tm->tm_mday > days_in_month) { + tm->tm_mday = 1; + tm->tm_mon++; + if (tm->tm_mon >= 12) { + tm->tm_mon = 0; + tm->tm_year++; + } + } + } + } + } +} + +/* + * tick handler + */ +static void exynos4210_rtc_tick(void *opaque) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + DPRINTF("TICK IRQ\n"); + /* set irq status */ + s->reg_intp |= INTP_TICK_ENABLE; + /* raise IRQ */ + qemu_irq_raise(s->tick_irq); + + /* restart timer */ + ptimer_set_count(s->ptimer, s->reg_ticcnt); + ptimer_run(s->ptimer, 1); +} + +/* + * 1Hz clock handler + */ +static void exynos4210_rtc_1Hz_tick(void *opaque) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + rtc_next_second(&s->current_tm); + /* DPRINTF("1Hz tick\n"); */ + + /* raise IRQ */ + if (s->reg_rtcalm & ALARM_INT_ENABLE) { + check_alarm_raise(s); + } + + ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ); + ptimer_run(s->ptimer_1Hz, 1); +} + +/* + * RTC Read + */ +static uint64_t exynos4210_rtc_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + uint32_t value = 0; + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + switch (offset) { + case INTP: + value = s->reg_intp; + break; + case RTCCON: + value = s->reg_rtccon; + break; + case TICCNT: + value = s->reg_ticcnt; + break; + case RTCALM: + value = s->reg_rtcalm; + break; + case ALMSEC: + value = s->reg_almsec; + break; + case ALMMIN: + value = s->reg_almmin; + break; + case ALMHOUR: + value = s->reg_almhour; + break; + case ALMDAY: + value = s->reg_almday; + break; + case ALMMON: + value = s->reg_almmon; + break; + case ALMYEAR: + value = s->reg_almyear; + break; + + case BCDSEC: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_sec); + break; + case BCDMIN: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_min); + break; + case BCDHOUR: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_hour); + break; + case BCDDAYWEEK: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_wday); + break; + case BCDDAY: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mday); + break; + case BCDMON: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mon + 1); + break; + case BCDYEAR: + value = BCD3DIGITS(s->current_tm.tm_year); + break; + + case CURTICNT: + s->reg_curticcnt = ptimer_get_count(s->ptimer); + value = s->reg_curticcnt; + break; + + default: + fprintf(stderr, + "[exynos4210.rtc: bad read offset " TARGET_FMT_plx "]\n", + offset); + break; + } + return value; +} + +/* + * RTC Write + */ +static void exynos4210_rtc_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + switch (offset) { + case INTP: + if (value & INTP_ALM_ENABLE) { + qemu_irq_lower(s->alm_irq); + s->reg_intp &= (~INTP_ALM_ENABLE); + } + if (value & INTP_TICK_ENABLE) { + qemu_irq_lower(s->tick_irq); + s->reg_intp &= (~INTP_TICK_ENABLE); + } + break; + case RTCCON: + if (value & RTC_ENABLE) { + exynos4210_rtc_update_freq(s, value); + } + if ((value & RTC_ENABLE) > (s->reg_rtccon & RTC_ENABLE)) { + /* clock timer */ + ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ); + ptimer_run(s->ptimer_1Hz, 1); + DPRINTF("run clock timer\n"); + } + if ((value & RTC_ENABLE) < (s->reg_rtccon & RTC_ENABLE)) { + /* tick timer */ + ptimer_stop(s->ptimer); + /* clock timer */ + ptimer_stop(s->ptimer_1Hz); + DPRINTF("stop all timers\n"); + } + if (value & RTC_ENABLE) { + if ((value & TICK_TIMER_ENABLE) > + (s->reg_rtccon & TICK_TIMER_ENABLE) && + (s->reg_ticcnt)) { + ptimer_set_count(s->ptimer, s->reg_ticcnt); + ptimer_run(s->ptimer, 1); + DPRINTF("run tick timer\n"); + } + if ((value & TICK_TIMER_ENABLE) < + (s->reg_rtccon & TICK_TIMER_ENABLE)) { + ptimer_stop(s->ptimer); + } + } + s->reg_rtccon = value; + break; + case TICCNT: + if (value > TICNT_THRESHHOLD) { + s->reg_ticcnt = value; + } else { + fprintf(stderr, + "[exynos4210.rtc: bad TICNT value %u ]\n", + (uint32_t)value); + } + break; + + case RTCALM: + s->reg_rtcalm = value; + break; + case ALMSEC: + s->reg_almsec = (value & 0x7f); + break; + case ALMMIN: + s->reg_almmin = (value & 0x7f); + break; + case ALMHOUR: + s->reg_almhour = (value & 0x3f); + break; + case ALMDAY: + s->reg_almday = (value & 0x3f); + break; + case ALMMON: + s->reg_almmon = (value & 0x1f); + break; + case ALMYEAR: + s->reg_almyear = (value & 0x0fff); + break; + + case BCDSEC: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_sec = (int)from_bcd((uint8_t)value); + } + break; + case BCDMIN: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_min = (int)from_bcd((uint8_t)value); + } + break; + case BCDHOUR: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_hour = (int)from_bcd((uint8_t)value); + } + break; + case BCDDAYWEEK: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_wday = (int)from_bcd((uint8_t)value); + } + break; + case BCDDAY: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_mday = (int)from_bcd((uint8_t)value); + } + break; + case BCDMON: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_mon = (int)from_bcd((uint8_t)value) - 1; + } + break; + case BCDYEAR: + if (s->reg_rtccon & RTC_ENABLE) { + /* 3 digits */ + s->current_tm.tm_year = (int)from_bcd((uint8_t)value) + + (int)from_bcd((uint8_t)((value >> 8) & 0x0f)) * 100; + } + break; + + default: + fprintf(stderr, + "[exynos4210.rtc: bad write offset " TARGET_FMT_plx "]\n", + offset); + break; + + } +} + +/* + * Set default values to timer fields and registers + */ +static void exynos4210_rtc_reset(DeviceState *d) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)d; + + struct tm tm; + + qemu_get_timedate(&tm, 0); + s->current_tm = tm; + + DPRINTF("Get time from host: %d-%d-%d %2d:%02d:%02d\n", + s->current_tm.tm_year, s->current_tm.tm_mon, s->current_tm.tm_mday, + s->current_tm.tm_hour, s->current_tm.tm_min, s->current_tm.tm_sec); + + s->reg_intp = 0; + s->reg_rtccon = 0; + s->reg_ticcnt = 0; + s->reg_rtcalm = 0; + s->reg_almsec = 0; + s->reg_almmin = 0; + s->reg_almhour = 0; + s->reg_almday = 0; + s->reg_almmon = 0; + s->reg_almyear = 0; + + s->reg_curticcnt = 0; + + exynos4210_rtc_update_freq(s, s->reg_rtccon); + ptimer_stop(s->ptimer); + ptimer_stop(s->ptimer_1Hz); +} + +static const MemoryRegionOps exynos4210_rtc_ops = { + .read = exynos4210_rtc_read, + .write = exynos4210_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * RTC timer initialization + */ +static int exynos4210_rtc_init(SysBusDevice *dev) +{ + Exynos4210RTCState *s = FROM_SYSBUS(Exynos4210RTCState, dev); + QEMUBH *bh; + + bh = qemu_bh_new(exynos4210_rtc_tick, s); + s->ptimer = ptimer_init(bh); + ptimer_set_freq(s->ptimer, RTC_BASE_FREQ); + exynos4210_rtc_update_freq(s, 0); + + bh = qemu_bh_new(exynos4210_rtc_1Hz_tick, s); + s->ptimer_1Hz = ptimer_init(bh); + ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ); + + sysbus_init_irq(dev, &s->alm_irq); + sysbus_init_irq(dev, &s->tick_irq); + + memory_region_init_io(&s->iomem, &exynos4210_rtc_ops, s, "exynos4210-rtc", + EXYNOS4210_RTC_REG_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); + + return 0; +} + +static void exynos4210_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + + k->init = exynos4210_rtc_init; + dc->reset = exynos4210_rtc_reset; + dc->vmsd = &vmstate_exynos4210_rtc_state; +} + +static const TypeInfo exynos4210_rtc_info = { + .name = "exynos4210.rtc", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210RTCState), + .class_init = exynos4210_rtc_class_init, +}; + +static void exynos4210_rtc_register_types(void) +{ + type_register_static(&exynos4210_rtc_info); +} + +type_init(exynos4210_rtc_register_types) diff --git a/hw/exynos4_boards.c b/hw/exynos4_boards.c index ea32c51dcc..e5c2a5f388 100644 --- a/hw/exynos4_boards.c +++ b/hw/exynos4_boards.c @@ -138,7 +138,7 @@ static void nuri_init(ram_addr_t ram_size, exynos4_boards_init_common(kernel_filename, kernel_cmdline, initrd_filename, EXYNOS4_BOARD_NURI); - arm_load_kernel(first_cpu, &exynos4_board_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); } static void smdkc210_init(ram_addr_t ram_size, @@ -151,7 +151,7 @@ static void smdkc210_init(ram_addr_t ram_size, lan9215_init(SMDK_LAN9118_BASE_ADDR, qemu_irq_invert(s->irq_table[exynos4210_get_irq(37, 1)])); - arm_load_kernel(first_cpu, &exynos4_board_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); } static QEMUMachine exynos4_machines[EXYNOS4_NUM_OF_BOARDS] = { @@ -36,6 +36,7 @@ #include "qdev-addr.h" #include "blockdev.h" #include "sysemu.h" +#include "qemu-log.h" /********************************************************/ /* debug Floppy devices */ @@ -48,9 +49,6 @@ #define FLOPPY_DPRINTF(fmt, ...) #endif -#define FLOPPY_ERROR(fmt, ...) \ - do { printf("FLOPPY ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0) - /********************************************************/ /* Floppy drive emulation */ @@ -147,18 +145,28 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect, if (sector != fd_sector(drv)) { #if 0 if (!enable_seek) { - FLOPPY_ERROR("no implicit seek %d %02x %02x (max=%d %02x %02x)\n", - head, track, sect, 1, drv->max_track, drv->last_sect); + FLOPPY_DPRINTF("error: no implicit seek %d %02x %02x" + " (max=%d %02x %02x)\n", + head, track, sect, 1, drv->max_track, + drv->last_sect); return 4; } #endif drv->head = head; - if (drv->track != track) + if (drv->track != track) { + if (drv->bs != NULL && bdrv_is_inserted(drv->bs)) { + drv->media_changed = 0; + } ret = 1; + } drv->track = track; drv->sect = sect; } + if (drv->bs == NULL || !bdrv_is_inserted(drv->bs)) { + ret = 2; + } + return ret; } @@ -166,9 +174,7 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect, static void fd_recalibrate(FDrive *drv) { FLOPPY_DPRINTF("recalibrate\n"); - drv->head = 0; - drv->track = 0; - drv->sect = 1; + fd_seek(drv, 0, 0, 1, 1); } /* Revalidate a disk drive after a disk change */ @@ -185,9 +191,6 @@ static void fd_revalidate(FDrive *drv) &last_sect, drv->drive, &drive, &rate); if (!bdrv_is_inserted(drv->bs)) { FLOPPY_DPRINTF("No disk in drive\n"); - } else if (nb_heads != 0 && max_track != 0 && last_sect != 0) { - FLOPPY_DPRINTF("User defined disk (%d %d %d)\n", - nb_heads - 1, max_track, last_sect); } else { FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", nb_heads, max_track, last_sect, ro ? "ro" : "rw"); @@ -301,6 +304,9 @@ enum { }; enum { + FD_SR0_DS0 = 0x01, + FD_SR0_DS1 = 0x02, + FD_SR0_HEAD = 0x04, FD_SR0_EQPMT = 0x10, FD_SR0_SEEK = 0x20, FD_SR0_ABNTERM = 0x40, @@ -707,14 +713,6 @@ static void fdctrl_raise_irq(FDCtrl *fdctrl, uint8_t status0) qemu_set_irq(fdctrl->irq, 1); fdctrl->sra |= FD_SRA_INTPEND; } - if (status0 & FD_SR0_SEEK) { - FDrive *cur_drv; - /* A seek clears the disk change line (if a disk is inserted) */ - cur_drv = get_cur_drv(fdctrl); - if (cur_drv->bs != NULL && bdrv_is_inserted(cur_drv->bs)) { - cur_drv->media_changed = 0; - } - } fdctrl->reset_sensei = 0; fdctrl->status0 = status0; @@ -974,25 +972,30 @@ static void fdctrl_reset_fifo(FDCtrl *fdctrl) } /* Set FIFO status for the host to read */ -static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len, int do_irq) +static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len, uint8_t status0) { fdctrl->data_dir = FD_DIR_READ; fdctrl->data_len = fifo_len; fdctrl->data_pos = 0; fdctrl->msr |= FD_MSR_CMDBUSY | FD_MSR_RQM | FD_MSR_DIO; - if (do_irq) - fdctrl_raise_irq(fdctrl, 0x00); + if (status0) { + fdctrl_raise_irq(fdctrl, status0); + } } /* Set an error: unimplemented/unknown command */ static void fdctrl_unimplemented(FDCtrl *fdctrl, int direction) { - FLOPPY_ERROR("unimplemented command 0x%02x\n", fdctrl->fifo[0]); + qemu_log_mask(LOG_UNIMP, "fdc: unimplemented command 0x%02x\n", + fdctrl->fifo[0]); fdctrl->fifo[0] = FD_SR0_INVCMD; fdctrl_set_fifo(fdctrl, 1, 0); } -/* Seek to next sector */ +/* Seek to next sector + * returns 0 when end of track reached (for DBL_SIDES on head 1) + * otherwise returns 1 + */ static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv) { FLOPPY_DPRINTF("seek to next sector (%d %02x %02x => %d)\n", @@ -1000,30 +1003,39 @@ static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv) fd_sector(cur_drv)); /* XXX: cur_drv->sect >= cur_drv->last_sect should be an error in fact */ - if (cur_drv->sect >= cur_drv->last_sect || - cur_drv->sect == fdctrl->eot) { - cur_drv->sect = 1; + uint8_t new_head = cur_drv->head; + uint8_t new_track = cur_drv->track; + uint8_t new_sect = cur_drv->sect; + + int ret = 1; + + if (new_sect >= cur_drv->last_sect || + new_sect == fdctrl->eot) { + new_sect = 1; if (FD_MULTI_TRACK(fdctrl->data_state)) { - if (cur_drv->head == 0 && + if (new_head == 0 && (cur_drv->flags & FDISK_DBL_SIDES) != 0) { - cur_drv->head = 1; + new_head = 1; } else { - cur_drv->head = 0; - cur_drv->track++; - if ((cur_drv->flags & FDISK_DBL_SIDES) == 0) - return 0; + new_head = 0; + new_track++; + if ((cur_drv->flags & FDISK_DBL_SIDES) == 0) { + ret = 0; + } } } else { - cur_drv->track++; - return 0; + new_track++; + ret = 0; + } + if (ret == 1) { + FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n", + new_head, new_track, new_sect, fd_sector(cur_drv)); } - FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n", - cur_drv->head, cur_drv->track, - cur_drv->sect, fd_sector(cur_drv)); } else { - cur_drv->sect++; + new_sect++; } - return 1; + fd_seek(cur_drv, new_head, new_track, new_sect, 1); + return ret; } /* Callback for transfer end (stop or abort) */ @@ -1033,10 +1045,12 @@ static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0, FDrive *cur_drv; cur_drv = get_cur_drv(fdctrl); + fdctrl->status0 = status0 | FD_SR0_SEEK | (cur_drv->head << 2) | + GET_CUR_DRV(fdctrl); + FLOPPY_DPRINTF("transfer status: %02x %02x %02x (%02x)\n", - status0, status1, status2, - status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl)); - fdctrl->fifo[0] = status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl); + status0, status1, status2, fdctrl->status0); + fdctrl->fifo[0] = fdctrl->status0; fdctrl->fifo[1] = status1; fdctrl->fifo[2] = status2; fdctrl->fifo[3] = cur_drv->track; @@ -1049,7 +1063,7 @@ static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0, } fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO; fdctrl->msr &= ~FD_MSR_NONDMA; - fdctrl_set_fifo(fdctrl, 7, 1); + fdctrl_set_fifo(fdctrl, 7, fdctrl->status0); } /* Prepare a data transfer (either DMA or FIFO) */ @@ -1155,7 +1169,8 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) DMA_schedule(fdctrl->dma_chann); return; } else { - FLOPPY_ERROR("dma_mode=%d direction=%d\n", dma_mode, direction); + FLOPPY_DPRINTF("bad dma_mode=%d direction=%d\n", dma_mode, + direction); } } FLOPPY_DPRINTF("start non-DMA transfer\n"); @@ -1163,7 +1178,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) if (direction != FD_DIR_WRITE) fdctrl->msr |= FD_MSR_DIO; /* IO based transfer: calculate len */ - fdctrl_raise_irq(fdctrl, 0x00); + fdctrl_raise_irq(fdctrl, FD_SR0_SEEK); return; } @@ -1171,7 +1186,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) /* Prepare a transfer of deleted data */ static void fdctrl_start_transfer_del(FDCtrl *fdctrl, int direction) { - FLOPPY_ERROR("fdctrl_start_transfer_del() unimplemented\n"); + qemu_log_mask(LOG_UNIMP, "fdctrl_start_transfer_del() unimplemented\n"); /* We don't handle deleted data, * so we don't return *ANYTHING* @@ -1250,7 +1265,8 @@ static int fdctrl_transfer_handler (void *opaque, int nchan, fdctrl->data_pos, len); if (bdrv_write(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) { - FLOPPY_ERROR("writing sector %d\n", fd_sector(cur_drv)); + FLOPPY_DPRINTF("error writing sector %d\n", + fd_sector(cur_drv)); fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); goto transfer_error; } @@ -1309,7 +1325,7 @@ static uint32_t fdctrl_read_data(FDCtrl *fdctrl) cur_drv = get_cur_drv(fdctrl); fdctrl->dsr &= ~FD_DSR_PWRDOWN; if (!(fdctrl->msr & FD_MSR_RQM) || !(fdctrl->msr & FD_MSR_DIO)) { - FLOPPY_ERROR("controller not ready for reading\n"); + FLOPPY_DPRINTF("error: controller not ready for reading\n"); return 0; } pos = fdctrl->data_pos; @@ -1393,7 +1409,7 @@ static void fdctrl_format_sector(FDCtrl *fdctrl) memset(fdctrl->fifo, 0, FD_SECTOR_LEN); if (cur_drv->bs == NULL || bdrv_write(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) { - FLOPPY_ERROR("formatting sector %d\n", fd_sector(cur_drv)); + FLOPPY_DPRINTF("error formatting sector %d\n", fd_sector(cur_drv)); fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); } else { if (cur_drv->sect == cur_drv->last_sect) { @@ -1591,16 +1607,18 @@ static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction) { FDrive *cur_drv = get_cur_drv(fdctrl); - if(fdctrl->reset_sensei > 0) { + if (fdctrl->reset_sensei > 0) { fdctrl->fifo[0] = FD_SR0_RDYCHG + FD_RESET_SENSEI_COUNT - fdctrl->reset_sensei; fdctrl->reset_sensei--; + } else if (!(fdctrl->sra & FD_SRA_INTPEND)) { + fdctrl->fifo[0] = FD_SR0_INVCMD; + fdctrl_set_fifo(fdctrl, 1, 0); + return; } else { - /* XXX: status0 handling is broken for read/write - commands, so we do this hack. It should be suppressed - ASAP */ fdctrl->fifo[0] = - FD_SR0_SEEK | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl); + (fdctrl->status0 & ~(FD_SR0_HEAD | FD_SR0_DS1 | FD_SR0_DS0)) + | GET_CUR_DRV(fdctrl); } fdctrl->fifo[1] = cur_drv->track; @@ -1619,11 +1637,7 @@ static void fdctrl_handle_seek(FDCtrl *fdctrl, int direction) /* The seek command just sends step pulses to the drive and doesn't care if * there is a medium inserted of if it's banging the head against the drive. */ - if (fdctrl->fifo[2] > cur_drv->max_track) { - cur_drv->track = cur_drv->max_track; - } else { - cur_drv->track = fdctrl->fifo[2]; - } + fd_seek(cur_drv, cur_drv->head, fdctrl->fifo[2], cur_drv->sect, 1); /* Raise Interrupt */ fdctrl_raise_irq(fdctrl, FD_SR0_SEEK); } @@ -1688,9 +1702,10 @@ static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction) SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); cur_drv = get_cur_drv(fdctrl); if (fdctrl->fifo[2] + cur_drv->track >= cur_drv->max_track) { - cur_drv->track = cur_drv->max_track - 1; + fd_seek(cur_drv, cur_drv->head, cur_drv->max_track - 1, + cur_drv->sect, 1); } else { - cur_drv->track += fdctrl->fifo[2]; + fd_seek(cur_drv, cur_drv->head, fdctrl->fifo[2], cur_drv->sect, 1); } fdctrl_reset_fifo(fdctrl); /* Raise Interrupt */ @@ -1704,9 +1719,9 @@ static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction) SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); cur_drv = get_cur_drv(fdctrl); if (fdctrl->fifo[2] > cur_drv->track) { - cur_drv->track = 0; + fd_seek(cur_drv, cur_drv->head, 0, cur_drv->sect, 1); } else { - cur_drv->track -= fdctrl->fifo[2]; + fd_seek(cur_drv, cur_drv->head, fdctrl->fifo[2], cur_drv->sect, 1); } fdctrl_reset_fifo(fdctrl); /* Raise Interrupt */ @@ -1768,7 +1783,7 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value) return; } if (!(fdctrl->msr & FD_MSR_RQM) || (fdctrl->msr & FD_MSR_DIO)) { - FLOPPY_ERROR("controller not ready for writing\n"); + FLOPPY_DPRINTF("error: controller not ready for writing\n"); return; } fdctrl->dsr &= ~FD_DSR_PWRDOWN; @@ -1782,7 +1797,8 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value) fdctrl->data_pos == fdctrl->data_len) { cur_drv = get_cur_drv(fdctrl); if (bdrv_write(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) { - FLOPPY_ERROR("writing sector %d\n", fd_sector(cur_drv)); + FLOPPY_DPRINTF("error writing sector %d\n", + fd_sector(cur_drv)); return; } if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) { @@ -1888,6 +1904,26 @@ static int fdctrl_connect_drives(FDCtrl *fdctrl) return 0; } +ISADevice *fdctrl_init_isa(ISABus *bus, DriveInfo **fds) +{ + ISADevice *dev; + + dev = isa_try_create(bus, "isa-fdc"); + if (!dev) { + return NULL; + } + + if (fds[0]) { + qdev_prop_set_drive_nofail(&dev->qdev, "driveA", fds[0]->bdrv); + } + if (fds[1]) { + qdev_prop_set_drive_nofail(&dev->qdev, "driveB", fds[1]->bdrv); + } + qdev_init_nofail(&dev->qdev); + + return dev; +} + void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, target_phys_addr_t mmio_base, DriveInfo **fds) { @@ -1,32 +1,12 @@ #ifndef HW_FDC_H #define HW_FDC_H -#include "isa.h" -#include "blockdev.h" +#include "qemu-common.h" /* fdc.c */ #define MAX_FD 2 -static inline ISADevice *fdctrl_init_isa(ISABus *bus, DriveInfo **fds) -{ - ISADevice *dev; - - dev = isa_try_create(bus, "isa-fdc"); - if (!dev) { - return NULL; - } - - if (fds[0]) { - qdev_prop_set_drive_nofail(&dev->qdev, "driveA", fds[0]->bdrv); - } - if (fds[1]) { - qdev_prop_set_drive_nofail(&dev->qdev, "driveB", fds[1]->bdrv); - } - qdev_init_nofail(&dev->qdev); - - return dev; -} - +ISADevice *fdctrl_init_isa(ISABus *bus, DriveInfo **fds); void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, target_phys_addr_t mmio_base, DriveInfo **fds); void sun4m_fdctrl_init(qemu_irq irq, target_phys_addr_t io_base, diff --git a/hw/highbank.c b/hw/highbank.c index 4d6d728a28..4bdea5df7d 100644 --- a/hw/highbank.c +++ b/hw/highbank.c @@ -36,7 +36,7 @@ /* Board init. */ -static void hb_write_secondary(CPUARMState *env, const struct arm_boot_info *info) +static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info) { int n; uint32_t smpboot[] = { @@ -60,8 +60,10 @@ static void hb_write_secondary(CPUARMState *env, const struct arm_boot_info *inf rom_add_blob_fixed("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR); } -static void hb_reset_secondary(CPUARMState *env, const struct arm_boot_info *info) +static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) { + CPUARMState *env = &cpu->env; + switch (info->nb_cpus) { case 4: stl_phys_notdirty(SMP_BOOT_REG + 0x30, 0); @@ -190,7 +192,6 @@ static void highbank_init(ram_addr_t ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { - CPUARMState *env = NULL; DeviceState *dev; SysBusDevice *busdev; qemu_irq *irqp; @@ -213,10 +214,10 @@ static void highbank_init(ram_addr_t ram_size, fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - env = &cpu->env; + /* This will become a QOM property eventually */ cpu->reset_cbar = GIC_BASE_ADDR; - irqp = arm_pic_init_cpu(env); + irqp = arm_pic_init_cpu(cpu); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } @@ -316,7 +317,7 @@ static void highbank_init(ram_addr_t ram_size, highbank_binfo.loader_start = 0; highbank_binfo.write_secondary_boot = hb_write_secondary; highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; - arm_load_kernel(first_cpu, &highbank_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &highbank_binfo); } static QEMUMachine highbank_machine = { @@ -17,13 +17,18 @@ struct i2c_bus uint8_t saved_address; }; -static struct BusInfo i2c_bus_info = { - .name = "I2C", - .size = sizeof(i2c_bus), - .props = (Property[]) { - DEFINE_PROP_UINT8("address", struct I2CSlave, address, 0), - DEFINE_PROP_END_OF_LIST(), - } +static Property i2c_props[] = { + DEFINE_PROP_UINT8("address", struct I2CSlave, address, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +#define TYPE_I2C_BUS "i2c-bus" +#define I2C_BUS(obj) OBJECT_CHECK(i2c_bus, (obj), TYPE_I2C_BUS) + +static const TypeInfo i2c_bus_info = { + .name = TYPE_I2C_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(i2c_bus), }; static void i2c_bus_pre_save(void *opaque) @@ -61,7 +66,7 @@ i2c_bus *i2c_init_bus(DeviceState *parent, const char *name) { i2c_bus *bus; - bus = FROM_QBUS(i2c_bus, qbus_create(&i2c_bus_info, parent, name)); + bus = FROM_QBUS(i2c_bus, qbus_create(TYPE_I2C_BUS, parent, name)); vmstate_register(NULL, -1, &vmstate_i2c_bus, bus); return bus; } @@ -81,11 +86,12 @@ int i2c_bus_busy(i2c_bus *bus) /* TODO: Make this handle multiple masters. */ int i2c_start_transfer(i2c_bus *bus, uint8_t address, int recv) { - DeviceState *qdev; + BusChild *kid; I2CSlave *slave = NULL; I2CSlaveClass *sc; - QTAILQ_FOREACH(qdev, &bus->qbus.children, sibling) { + QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; I2CSlave *candidate = I2C_SLAVE_FROM_QDEV(qdev); if (candidate->address == address) { slave = candidate; @@ -218,7 +224,8 @@ static void i2c_slave_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = i2c_slave_qdev_init; - k->bus_info = &i2c_bus_info; + k->bus_type = TYPE_I2C_BUS; + k->props = i2c_props; } static TypeInfo i2c_slave_type_info = { @@ -232,6 +239,7 @@ static TypeInfo i2c_slave_type_info = { static void i2c_slave_register_types(void) { + type_register_static(&i2c_bus_info); type_register_static(&i2c_slave_type_info); } diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs new file mode 100644 index 0000000000..8c764bbfef --- /dev/null +++ b/hw/i386/Makefile.objs @@ -0,0 +1,15 @@ +obj-y += mc146818rtc.o pc.o +obj-y += apic_common.o apic.o kvmvapic.o +obj-y += sga.o ioapic_common.o ioapic.o piix_pci.o +obj-y += vmport.o +obj-y += pci-hotplug.o smbios.o wdt_ib700.o +obj-y += debugcon.o multiboot.o +obj-y += pc_piix.o +obj-y += pc_sysfw.o +obj-$(CONFIG_XEN) += xen_platform.o xen_apic.o +obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o +obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_msi.o +obj-y += kvm/ +obj-$(CONFIG_SPICE) += qxl.o qxl-logger.o qxl-render.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/ide/Makefile.objs b/hw/ide/Makefile.objs new file mode 100644 index 0000000000..cf718dd016 --- /dev/null +++ b/hw/ide/Makefile.objs @@ -0,0 +1,10 @@ +hw-obj-$(CONFIG_IDE_CORE) += core.o atapi.o +hw-obj-$(CONFIG_IDE_QDEV) += qdev.o +hw-obj-$(CONFIG_IDE_PCI) += pci.o +hw-obj-$(CONFIG_IDE_ISA) += isa.o +hw-obj-$(CONFIG_IDE_PIIX) += piix.o +hw-obj-$(CONFIG_IDE_CMD646) += cmd646.o +hw-obj-$(CONFIG_IDE_MACIO) += macio.o +hw-obj-$(CONFIG_IDE_VIA) += via.o +hw-obj-$(CONFIG_AHCI) += ahci.o +hw-obj-$(CONFIG_AHCI) += ich.o diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 2d7d03d772..efea93f0b4 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -339,7 +339,7 @@ static void ahci_mem_write(void *opaque, target_phys_addr_t addr, case HOST_CTL: /* R/W */ if (val & HOST_CTL_RESET) { DPRINTF(-1, "HBA Reset\n"); - ahci_reset(container_of(s, AHCIPCIState, ahci)); + ahci_reset(s); } else { s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; ahci_check_irq(s); @@ -588,7 +588,7 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) AHCIPortRegs *pr = &ad->port_regs; uint8_t *d2h_fis; int i; - target_phys_addr_t cmd_len = 0x80; + dma_addr_t cmd_len = 0x80; int cmd_mapped = 0; if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { @@ -598,7 +598,8 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) if (!cmd_fis) { /* map cmd_fis */ uint64_t tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); - cmd_fis = cpu_physical_memory_map(tbl_addr, &cmd_len, 0); + cmd_fis = dma_memory_map(ad->hba->dma, tbl_addr, &cmd_len, + DMA_DIRECTION_TO_DEVICE); cmd_mapped = 1; } @@ -630,7 +631,8 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS); if (cmd_mapped) { - cpu_physical_memory_unmap(cmd_fis, cmd_len, 0, cmd_len); + dma_memory_unmap(ad->hba->dma, cmd_fis, cmd_len, + DMA_DIRECTION_TO_DEVICE, cmd_len); } } @@ -640,8 +642,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist) uint32_t opts = le32_to_cpu(cmd->opts); uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80; int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN; - target_phys_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); - target_phys_addr_t real_prdt_len = prdt_len; + dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); + dma_addr_t real_prdt_len = prdt_len; uint8_t *prdt; int i; int r = 0; @@ -652,7 +654,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist) } /* map PRDT */ - if (!(prdt = cpu_physical_memory_map(prdt_addr, &prdt_len, 0))){ + if (!(prdt = dma_memory_map(ad->hba->dma, prdt_addr, &prdt_len, + DMA_DIRECTION_TO_DEVICE))){ DPRINTF(ad->port_no, "map failed\n"); return -1; } @@ -667,7 +670,7 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist) if (sglist_alloc_hint > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; - qemu_sglist_init(sglist, sglist_alloc_hint); + qemu_sglist_init(sglist, sglist_alloc_hint, ad->hba->dma); for (i = 0; i < sglist_alloc_hint; i++) { /* flags_size is zero-based */ qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), @@ -676,7 +679,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist) } out: - cpu_physical_memory_unmap(prdt, prdt_len, 0, prdt_len); + dma_memory_unmap(ad->hba->dma, prdt, prdt_len, + DMA_DIRECTION_TO_DEVICE, prdt_len); return r; } @@ -786,7 +790,7 @@ static int handle_cmd(AHCIState *s, int port, int slot) uint64_t tbl_addr; AHCICmdHdr *cmd; uint8_t *cmd_fis; - target_phys_addr_t cmd_len; + dma_addr_t cmd_len; if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* Engine currently busy, try again later */ @@ -808,7 +812,8 @@ static int handle_cmd(AHCIState *s, int port, int slot) tbl_addr = le64_to_cpu(cmd->tbl_addr); cmd_len = 0x80; - cmd_fis = cpu_physical_memory_map(tbl_addr, &cmd_len, 1); + cmd_fis = dma_memory_map(s->dma, tbl_addr, &cmd_len, + DMA_DIRECTION_FROM_DEVICE); if (!cmd_fis) { DPRINTF(port, "error: guest passed us an invalid cmd fis\n"); @@ -934,7 +939,8 @@ static int handle_cmd(AHCIState *s, int port, int slot) } out: - cpu_physical_memory_unmap(cmd_fis, cmd_len, 1, cmd_len); + dma_memory_unmap(s->dma, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, + cmd_len); if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* async command, complete later */ @@ -1114,11 +1120,12 @@ static const IDEDMAOps ahci_dma_ops = { .reset = ahci_dma_reset, }; -void ahci_init(AHCIState *s, DeviceState *qdev, int ports) +void ahci_init(AHCIState *s, DeviceState *qdev, DMAContext *dma, int ports) { qemu_irq *irqs; int i; + s->dma = dma; s->ports = ports; s->dev = g_malloc0(sizeof(AHCIDevice) * ports); ahci_reg_init(s); @@ -1149,21 +1156,20 @@ void ahci_uninit(AHCIState *s) g_free(s->dev); } -void ahci_reset(void *opaque) +void ahci_reset(AHCIState *s) { - struct AHCIPCIState *d = opaque; AHCIPortRegs *pr; int i; - d->ahci.control_regs.irqstatus = 0; - d->ahci.control_regs.ghc = 0; + s->control_regs.irqstatus = 0; + s->control_regs.ghc = 0; - for (i = 0; i < d->ahci.ports; i++) { - pr = &d->ahci.dev[i].port_regs; + for (i = 0; i < s->ports; i++) { + pr = &s->dev[i].port_regs; pr->irq_stat = 0; pr->irq_mask = 0; pr->scr_ctl = 0; - ahci_reset_port(&d->ahci, i); + ahci_reset_port(s, i); } } @@ -1178,15 +1184,20 @@ static const VMStateDescription vmstate_sysbus_ahci = { .unmigratable = 1, }; +static void sysbus_ahci_reset(DeviceState *dev) +{ + SysbusAHCIState *s = DO_UPCAST(SysbusAHCIState, busdev.qdev, dev); + + ahci_reset(&s->ahci); +} + static int sysbus_ahci_init(SysBusDevice *dev) { SysbusAHCIState *s = FROM_SYSBUS(SysbusAHCIState, dev); - ahci_init(&s->ahci, &dev->qdev, s->num_ports); + ahci_init(&s->ahci, &dev->qdev, NULL, s->num_ports); sysbus_init_mmio(dev, &s->ahci.mem); sysbus_init_irq(dev, &s->ahci.irq); - - qemu_register_reset(ahci_reset, &s->ahci); return 0; } @@ -1203,6 +1214,7 @@ static void sysbus_ahci_class_init(ObjectClass *klass, void *data) sbc->init = sysbus_ahci_init; dc->vmsd = &vmstate_sysbus_ahci; dc->props = sysbus_ahci_properties; + dc->reset = sysbus_ahci_reset; } static TypeInfo sysbus_ahci_info = { diff --git a/hw/ide/ahci.h b/hw/ide/ahci.h index b223d2c055..1200a56ada 100644 --- a/hw/ide/ahci.h +++ b/hw/ide/ahci.h @@ -299,6 +299,7 @@ typedef struct AHCIState { uint32_t idp_index; /* Current IDP index */ int ports; qemu_irq irq; + DMAContext *dma; } AHCIState; typedef struct AHCIPCIState { @@ -329,9 +330,9 @@ typedef struct NCQFrame { uint8_t reserved10; } QEMU_PACKED NCQFrame; -void ahci_init(AHCIState *s, DeviceState *qdev, int ports); +void ahci_init(AHCIState *s, DeviceState *qdev, DMAContext *dma, int ports); void ahci_uninit(AHCIState *s); -void ahci_reset(void *opaque); +void ahci_reset(AHCIState *s); #endif /* HW_IDE_AHCI_H */ diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index 5919cf52d8..f7f714c726 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -956,6 +956,36 @@ static void cmd_read_cdvd_capacity(IDEState *s, uint8_t* buf) ide_atapi_cmd_reply(s, 8, 8); } +static void cmd_read_disc_information(IDEState *s, uint8_t* buf) +{ + uint8_t type = buf[1] & 7; + uint32_t max_len = ube16_to_cpu(buf + 7); + + /* Types 1/2 are only defined for Blu-Ray. */ + if (type != 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + memset(buf, 0, 34); + buf[1] = 32; + buf[2] = 0xe; /* last session complete, disc finalized */ + buf[3] = 1; /* first track on disc */ + buf[4] = 1; /* # of sessions */ + buf[5] = 1; /* first track of last session */ + buf[6] = 1; /* last track of last session */ + buf[7] = 0x20; /* unrestricted use */ + buf[8] = 0x00; /* CD-ROM or DVD-ROM */ + /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ + /* 12-23: not meaningful for CD-ROM or DVD-ROM */ + /* 24-31: disc bar code */ + /* 32: disc application code */ + /* 33: number of OPC tables */ + + ide_atapi_cmd_reply(s, 34, max_len); +} + static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf) { int max_len; @@ -1045,6 +1075,7 @@ static const struct { [ 0x43 ] = { cmd_read_toc_pma_atip, CHECK_READY }, [ 0x46 ] = { cmd_get_configuration, ALLOW_UA }, [ 0x4a ] = { cmd_get_event_status_notification, ALLOW_UA }, + [ 0x51 ] = { cmd_read_disc_information, CHECK_READY }, [ 0x5a ] = { cmd_mode_sense, /* (10) */ 0 }, [ 0xa8 ] = { cmd_read, /* (12) */ CHECK_READY }, [ 0xad ] = { cmd_read_dvd_structure, CHECK_READY }, diff --git a/hw/ide/core.c b/hw/ide/core.c index 9785d5f713..71d4d7732a 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -1047,6 +1047,7 @@ static bool ide_cmd_permitted(IDEState *s, uint32_t cmd) void ide_exec_cmd(IDEBus *bus, uint32_t val) { + uint16_t *identify_data; IDEState *s; int n; int lba48 = 0; @@ -1231,10 +1232,21 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val) goto abort_cmd; /* XXX: valid for CDROM ? */ switch(s->feature) { - case 0xcc: /* reverting to power-on defaults enable */ - case 0x66: /* reverting to power-on defaults disable */ case 0x02: /* write cache enable */ + bdrv_set_enable_write_cache(s->bs, true); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1); + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + break; case 0x82: /* write cache disable */ + bdrv_set_enable_write_cache(s->bs, false); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | 1); + ide_flush_cache(s); + break; + case 0xcc: /* reverting to power-on defaults enable */ + case 0x66: /* reverting to power-on defaults disable */ case 0xaa: /* read look-ahead enable */ case 0x55: /* read look-ahead disable */ case 0x05: /* set advanced power management mode */ @@ -1250,7 +1262,7 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val) break; case 0x03: { /* set transfer mode */ uint8_t val = s->nsector & 0x07; - uint16_t *identify_data = (uint16_t *)s->identify_data; + identify_data = (uint16_t *)s->identify_data; switch (s->nsector >> 3) { case 0x00: /* pio default */ @@ -1983,7 +1995,7 @@ int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind, if (version) { pstrcpy(s->version, sizeof(s->version), version); } else { - pstrcpy(s->version, sizeof(s->version), QEMU_VERSION); + pstrcpy(s->version, sizeof(s->version), qemu_get_version()); } ide_reset(s); @@ -2146,6 +2158,9 @@ static int ide_drive_post_load(void *opaque, int version_id) s->cdrom_changed = 1; } } + if (s->identify_set) { + bdrv_set_enable_write_cache(s->bs, !!(s->identify_data[85] & (1 << 5))); + } return 0; } diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 560ae37618..319bc2bb10 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -84,6 +84,13 @@ static const VMStateDescription vmstate_ahci = { .unmigratable = 1, }; +static void pci_ich9_reset(DeviceState *dev) +{ + struct AHCIPCIState *d = DO_UPCAST(struct AHCIPCIState, card.qdev, dev); + + ahci_reset(&d->ahci); +} + static int pci_ich9_ahci_init(PCIDevice *dev) { struct AHCIPCIState *d; @@ -91,7 +98,7 @@ static int pci_ich9_ahci_init(PCIDevice *dev) uint8_t *sata_cap; d = DO_UPCAST(struct AHCIPCIState, card, dev); - ahci_init(&d->ahci, &dev->qdev, 6); + ahci_init(&d->ahci, &dev->qdev, pci_dma_context(dev), 6); pci_config_set_prog_interface(d->card.config, AHCI_PROGMODE_MAJOR_REV_1); @@ -102,8 +109,6 @@ static int pci_ich9_ahci_init(PCIDevice *dev) /* XXX Software should program this register */ d->card.config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ - qemu_register_reset(ahci_reset, d); - msi_init(dev, 0x50, 1, true, false); d->ahci.irq = d->card.irq[0]; @@ -133,19 +138,11 @@ static int pci_ich9_uninit(PCIDevice *dev) d = DO_UPCAST(struct AHCIPCIState, card, dev); msi_uninit(dev); - qemu_unregister_reset(ahci_reset, d); ahci_uninit(&d->ahci); return 0; } -static void pci_ich9_write_config(PCIDevice *pci, uint32_t addr, - uint32_t val, int len) -{ - pci_default_write_config(pci, addr, val, len); - msi_write_config(pci, addr, val, len); -} - static void ich_ahci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -153,12 +150,12 @@ static void ich_ahci_class_init(ObjectClass *klass, void *data) k->init = pci_ich9_ahci_init; k->exit = pci_ich9_uninit; - k->config_write = pci_ich9_write_config; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_82801IR; k->revision = 0x02; k->class_id = PCI_CLASS_STORAGE_SATA; dc->vmsd = &vmstate_ahci; + dc->reset = pci_ich9_reset; } static TypeInfo ich_ahci_info = { diff --git a/hw/ide/internal.h b/hw/ide/internal.h index f8a027d0e4..1a02f57bf5 100644 --- a/hw/ide/internal.h +++ b/hw/ide/internal.h @@ -25,6 +25,9 @@ typedef struct IDEState IDEState; typedef struct IDEDMA IDEDMA; typedef struct IDEDMAOps IDEDMAOps; +#define TYPE_IDE_BUS "IDE" +#define IDE_BUS(obj) OBJECT_CHECK(IDEBus, (obj), TYPE_IDE_BUS) + /* Bits of HD_STATUS */ #define ERR_STAT 0x01 #define INDEX_STAT 0x02 diff --git a/hw/ide/macio.c b/hw/ide/macio.c index 7b38d9e683..848cb31429 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -76,7 +76,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) s->io_buffer_size = io->len; - qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1); + qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL); qemu_sglist_add(&s->sg, io->addr, io->len); io->addr += io->len; io->len = 0; @@ -133,7 +133,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) s->io_buffer_index = 0; s->io_buffer_size = io->len; - qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1); + qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL); qemu_sglist_add(&s->sg, io->addr, io->len); io->addr += io->len; io->len = 0; diff --git a/hw/ide/piix.c b/hw/ide/piix.c index bcaa400e2d..f5a74c293a 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -22,11 +22,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ + #include <hw/hw.h> #include <hw/pc.h> #include <hw/pci.h> #include <hw/isa.h> -#include "block.h" +#include "blockdev.h" #include "sysemu.h" #include "dma.h" diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index a46578d685..c122395401 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -27,19 +27,28 @@ static char *idebus_get_fw_dev_path(DeviceState *dev); -static struct BusInfo ide_bus_info = { - .name = "IDE", - .size = sizeof(IDEBus), - .get_fw_dev_path = idebus_get_fw_dev_path, - .props = (Property[]) { - DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), - DEFINE_PROP_END_OF_LIST(), - }, +static Property ide_props[] = { + DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_fw_dev_path = idebus_get_fw_dev_path; +} + +static const TypeInfo ide_bus_info = { + .name = TYPE_IDE_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(IDEBus), + .class_init = ide_bus_class_init, }; void ide_bus_new(IDEBus *idebus, DeviceState *dev, int bus_id) { - qbus_create_inplace(&idebus->qbus, &ide_bus_info, dev, NULL); + qbus_create_inplace(&idebus->qbus, TYPE_IDE_BUS, dev, NULL); idebus->bus_id = bus_id; } @@ -248,7 +257,8 @@ static void ide_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = ide_qdev_init; - k->bus_info = &ide_bus_info; + k->bus_type = TYPE_IDE_BUS; + k->props = ide_props; } static TypeInfo ide_device_type_info = { @@ -262,6 +272,7 @@ static TypeInfo ide_device_type_info = { static void ide_register_types(void) { + type_register_static(&ide_bus_info); type_register_static(&ide_hd_info); type_register_static(&ide_cd_info); type_register_static(&ide_drive_info); diff --git a/hw/imx.h b/hw/imx.h new file mode 100644 index 0000000000..ccf586fefe --- /dev/null +++ b/hw/imx.h @@ -0,0 +1,34 @@ +/* + * i.MX31 emulation + * + * Copyright (C) 2012 Peter Chubb + * NICTA + * + * This code is released under the GPL, version 2.0 or later + * See the file `../COPYING' for details. + */ + +#ifndef IMX_H +#define IMX_H + +void imx_serial_create(int uart, const target_phys_addr_t addr, qemu_irq irq); + +typedef enum { + NOCLK, + MCU, + HSP, + IPG, + CLK_32k +} IMXClk; + +uint32_t imx_clock_frequency(DeviceState *s, IMXClk clock); + +void imx_timerp_create(const target_phys_addr_t addr, + qemu_irq irq, + DeviceState *ccm); +void imx_timerg_create(const target_phys_addr_t addr, + qemu_irq irq, + DeviceState *ccm); + + +#endif /* IMX_H */ diff --git a/hw/imx_avic.c b/hw/imx_avic.c new file mode 100644 index 0000000000..25f47f331b --- /dev/null +++ b/hw/imx_avic.c @@ -0,0 +1,408 @@ +/* + * i.MX31 Vectored Interrupt Controller + * + * Note this is NOT the PL192 provided by ARM, but + * a custom implementation by Freescale. + * + * Copyright (c) 2008 OKL + * Copyright (c) 2011 NICTA Pty Ltd + * Originally Written by Hans Jiang + * + * This code is licenced under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * TODO: implement vectors. + */ + +#include "hw.h" +#include "sysbus.h" +#include "host-utils.h" + +#define DEBUG_INT 1 +#undef DEBUG_INT /* comment out for debugging */ + +#ifdef DEBUG_INT +#define DPRINTF(fmt, args...) \ +do { printf("imx_avic: " fmt , ##args); } while (0) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +/* + * Define to 1 for messages about attempts to + * access unimplemented registers or similar. + */ +#define DEBUG_IMPLEMENTATION 1 +#if DEBUG_IMPLEMENTATION +# define IPRINTF(fmt, args...) \ + do { fprintf(stderr, "imx_avic: " fmt, ##args); } while (0) +#else +# define IPRINTF(fmt, args...) do {} while (0) +#endif + +#define IMX_AVIC_NUM_IRQS 64 + +/* Interrupt Control Bits */ +#define ABFLAG (1<<25) +#define ABFEN (1<<24) +#define NIDIS (1<<22) /* Normal Interrupt disable */ +#define FIDIS (1<<21) /* Fast interrupt disable */ +#define NIAD (1<<20) /* Normal Interrupt Arbiter Rise ARM level */ +#define FIAD (1<<19) /* Fast Interrupt Arbiter Rise ARM level */ +#define NM (1<<18) /* Normal interrupt mode */ + + +#define PRIO_PER_WORD (sizeof(uint32_t) * 8 / 4) +#define PRIO_WORDS (IMX_AVIC_NUM_IRQS/PRIO_PER_WORD) + +typedef struct { + SysBusDevice busdev; + MemoryRegion iomem; + uint64_t pending; + uint64_t enabled; + uint64_t is_fiq; + uint32_t intcntl; + uint32_t intmask; + qemu_irq irq; + qemu_irq fiq; + uint32_t prio[PRIO_WORDS]; /* Priorities are 4-bits each */ +} IMXAVICState; + +static const VMStateDescription vmstate_imx_avic = { + .name = "imx-avic", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(pending, IMXAVICState), + VMSTATE_UINT64(enabled, IMXAVICState), + VMSTATE_UINT64(is_fiq, IMXAVICState), + VMSTATE_UINT32(intcntl, IMXAVICState), + VMSTATE_UINT32(intmask, IMXAVICState), + VMSTATE_UINT32_ARRAY(prio, IMXAVICState, PRIO_WORDS), + VMSTATE_END_OF_LIST() + }, +}; + + + +static inline int imx_avic_prio(IMXAVICState *s, int irq) +{ + uint32_t word = irq / PRIO_PER_WORD; + uint32_t part = 4 * (irq % PRIO_PER_WORD); + return 0xf & (s->prio[word] >> part); +} + +static inline void imx_avic_set_prio(IMXAVICState *s, int irq, int prio) +{ + uint32_t word = irq / PRIO_PER_WORD; + uint32_t part = 4 * (irq % PRIO_PER_WORD); + uint32_t mask = ~(0xf << part); + s->prio[word] &= mask; + s->prio[word] |= prio << part; +} + +/* Update interrupts. */ +static void imx_avic_update(IMXAVICState *s) +{ + int i; + uint64_t new = s->pending & s->enabled; + uint64_t flags; + + flags = new & s->is_fiq; + qemu_set_irq(s->fiq, !!flags); + + flags = new & ~s->is_fiq; + if (!flags || (s->intmask == 0x1f)) { + qemu_set_irq(s->irq, !!flags); + return; + } + + /* + * Take interrupt if there's a pending interrupt with + * priority higher than the value of intmask + */ + for (i = 0; i < IMX_AVIC_NUM_IRQS; i++) { + if (flags & (1UL << i)) { + if (imx_avic_prio(s, i) > s->intmask) { + qemu_set_irq(s->irq, 1); + return; + } + } + } + qemu_set_irq(s->irq, 0); +} + +static void imx_avic_set_irq(void *opaque, int irq, int level) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + if (level) { + DPRINTF("Raising IRQ %d, prio %d\n", + irq, imx_avic_prio(s, irq)); + s->pending |= (1ULL << irq); + } else { + DPRINTF("Clearing IRQ %d, prio %d\n", + irq, imx_avic_prio(s, irq)); + s->pending &= ~(1ULL << irq); + } + + imx_avic_update(s); +} + + +static uint64_t imx_avic_read(void *opaque, + target_phys_addr_t offset, unsigned size) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + + DPRINTF("read(offset = 0x%x)\n", offset >> 2); + switch (offset >> 2) { + case 0: /* INTCNTL */ + return s->intcntl; + + case 1: /* Normal Interrupt Mask Register, NIMASK */ + return s->intmask; + + case 2: /* Interrupt Enable Number Register, INTENNUM */ + case 3: /* Interrupt Disable Number Register, INTDISNUM */ + return 0; + + case 4: /* Interrupt Enabled Number Register High */ + return s->enabled >> 32; + + case 5: /* Interrupt Enabled Number Register Low */ + return s->enabled & 0xffffffffULL; + + case 6: /* Interrupt Type Register High */ + return s->is_fiq >> 32; + + case 7: /* Interrupt Type Register Low */ + return s->is_fiq & 0xffffffffULL; + + case 8: /* Normal Interrupt Priority Register 7 */ + case 9: /* Normal Interrupt Priority Register 6 */ + case 10:/* Normal Interrupt Priority Register 5 */ + case 11:/* Normal Interrupt Priority Register 4 */ + case 12:/* Normal Interrupt Priority Register 3 */ + case 13:/* Normal Interrupt Priority Register 2 */ + case 14:/* Normal Interrupt Priority Register 1 */ + case 15:/* Normal Interrupt Priority Register 0 */ + return s->prio[15-(offset>>2)]; + + case 16: /* Normal interrupt vector and status register */ + { + /* + * This returns the highest priority + * outstanding interrupt. Where there is more than + * one pending IRQ with the same priority, + * take the highest numbered one. + */ + uint64_t flags = s->pending & s->enabled & ~s->is_fiq; + int i; + int prio = -1; + int irq = -1; + for (i = 63; i >= 0; --i) { + if (flags & (1ULL<<i)) { + int irq_prio = imx_avic_prio(s, i); + if (irq_prio > prio) { + irq = i; + prio = irq_prio; + } + } + } + if (irq >= 0) { + imx_avic_set_irq(s, irq, 0); + return irq << 16 | prio; + } + return 0xffffffffULL; + } + case 17:/* Fast Interrupt vector and status register */ + { + uint64_t flags = s->pending & s->enabled & s->is_fiq; + int i = ctz64(flags); + if (i < 64) { + imx_avic_set_irq(opaque, i, 0); + return i; + } + return 0xffffffffULL; + } + case 18:/* Interrupt source register high */ + return s->pending >> 32; + + case 19:/* Interrupt source register low */ + return s->pending & 0xffffffffULL; + + case 20:/* Interrupt Force Register high */ + case 21:/* Interrupt Force Register low */ + return 0; + + case 22:/* Normal Interrupt Pending Register High */ + return (s->pending & s->enabled & ~s->is_fiq) >> 32; + + case 23:/* Normal Interrupt Pending Register Low */ + return (s->pending & s->enabled & ~s->is_fiq) & 0xffffffffULL; + + case 24: /* Fast Interrupt Pending Register High */ + return (s->pending & s->enabled & s->is_fiq) >> 32; + + case 25: /* Fast Interrupt Pending Register Low */ + return (s->pending & s->enabled & s->is_fiq) & 0xffffffffULL; + + case 0x40: /* AVIC vector 0, use for WFI WAR */ + return 0x4; + + default: + IPRINTF("imx_avic_read: Bad offset 0x%x\n", (int)offset); + return 0; + } +} + +static void imx_avic_write(void *opaque, target_phys_addr_t offset, + uint64_t val, unsigned size) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + /* Vector Registers not yet supported */ + if (offset >= 0x100 && offset <= 0x2fc) { + IPRINTF("imx_avic_write to vector register %d ignored\n", + (offset - 0x100) >> 2); + return; + } + + DPRINTF("imx_avic_write(0x%x) = %x\n", + (unsigned int)offset>>2, (unsigned int)val); + switch (offset >> 2) { + case 0: /* Interrupt Control Register, INTCNTL */ + s->intcntl = val & (ABFEN | NIDIS | FIDIS | NIAD | FIAD | NM); + if (s->intcntl & ABFEN) { + s->intcntl &= ~(val & ABFLAG); + } + break; + + case 1: /* Normal Interrupt Mask Register, NIMASK */ + s->intmask = val & 0x1f; + break; + + case 2: /* Interrupt Enable Number Register, INTENNUM */ + DPRINTF("enable(%d)\n", (int)val); + val &= 0x3f; + s->enabled |= (1ULL << val); + break; + + case 3: /* Interrupt Disable Number Register, INTDISNUM */ + DPRINTF("disable(%d)\n", (int)val); + val &= 0x3f; + s->enabled &= ~(1ULL << val); + break; + + case 4: /* Interrupt Enable Number Register High */ + s->enabled = (s->enabled & 0xffffffffULL) | (val << 32); + break; + + case 5: /* Interrupt Enable Number Register Low */ + s->enabled = (s->enabled & 0xffffffff00000000ULL) | val; + break; + + case 6: /* Interrupt Type Register High */ + s->is_fiq = (s->is_fiq & 0xffffffffULL) | (val << 32); + break; + + case 7: /* Interrupt Type Register Low */ + s->is_fiq = (s->is_fiq & 0xffffffff00000000ULL) | val; + break; + + case 8: /* Normal Interrupt Priority Register 7 */ + case 9: /* Normal Interrupt Priority Register 6 */ + case 10:/* Normal Interrupt Priority Register 5 */ + case 11:/* Normal Interrupt Priority Register 4 */ + case 12:/* Normal Interrupt Priority Register 3 */ + case 13:/* Normal Interrupt Priority Register 2 */ + case 14:/* Normal Interrupt Priority Register 1 */ + case 15:/* Normal Interrupt Priority Register 0 */ + s->prio[15-(offset>>2)] = val; + break; + + /* Read-only registers, writes ignored */ + case 16:/* Normal Interrupt Vector and Status register */ + case 17:/* Fast Interrupt vector and status register */ + case 18:/* Interrupt source register high */ + case 19:/* Interrupt source register low */ + return; + + case 20:/* Interrupt Force Register high */ + s->pending = (s->pending & 0xffffffffULL) | (val << 32); + break; + + case 21:/* Interrupt Force Register low */ + s->pending = (s->pending & 0xffffffff00000000ULL) | val; + break; + + case 22:/* Normal Interrupt Pending Register High */ + case 23:/* Normal Interrupt Pending Register Low */ + case 24: /* Fast Interrupt Pending Register High */ + case 25: /* Fast Interrupt Pending Register Low */ + return; + + default: + IPRINTF("imx_avic_write: Bad offset %x\n", (int)offset); + } + imx_avic_update(s); +} + +static const MemoryRegionOps imx_avic_ops = { + .read = imx_avic_read, + .write = imx_avic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_avic_reset(DeviceState *dev) +{ + IMXAVICState *s = container_of(dev, IMXAVICState, busdev.qdev); + s->pending = 0; + s->enabled = 0; + s->is_fiq = 0; + s->intmask = 0x1f; + s->intcntl = 0; + memset(s->prio, 0, sizeof s->prio); +} + +static int imx_avic_init(SysBusDevice *dev) +{ + IMXAVICState *s = FROM_SYSBUS(IMXAVICState, dev);; + + memory_region_init_io(&s->iomem, &imx_avic_ops, s, "imx_avic", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + + qdev_init_gpio_in(&dev->qdev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS); + sysbus_init_irq(dev, &s->irq); + sysbus_init_irq(dev, &s->fiq); + + return 0; +} + + +static void imx_avic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + k->init = imx_avic_init; + dc->vmsd = &vmstate_imx_avic; + dc->reset = imx_avic_reset; + dc->desc = "i.MX Advanced Vector Interrupt Controller"; +} + +static const TypeInfo imx_avic_info = { + .name = "imx_avic", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXAVICState), + .class_init = imx_avic_class_init, +}; + +static void imx_avic_register_types(void) +{ + type_register_static(&imx_avic_info); +} + +type_init(imx_avic_register_types) diff --git a/hw/imx_ccm.c b/hw/imx_ccm.c new file mode 100644 index 0000000000..10952c6ea1 --- /dev/null +++ b/hw/imx_ccm.c @@ -0,0 +1,321 @@ +/* + * IMX31 Clock Control Module + * + * Copyright (C) 2012 NICTA + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the CCM. + */ + +#include "hw.h" +#include "sysbus.h" +#include "sysemu.h" +#include "imx.h" + +#define CKIH_FREQ 26000000 /* 26MHz crystal input */ +#define CKIL_FREQ 32768 /* nominal 32khz clock */ + + +//#define DEBUG_CCM 1 +#ifdef DEBUG_CCM +#define DPRINTF(fmt, args...) \ +do { printf("imx_ccm: " fmt , ##args); } while (0) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +static int imx_ccm_post_load(void *opaque, int version_id); + +typedef struct { + SysBusDevice busdev; + MemoryRegion iomem; + + uint32_t ccmr; + uint32_t pdr0; + uint32_t pdr1; + uint32_t mpctl; + uint32_t spctl; + uint32_t cgr[3]; + uint32_t pmcr0; + uint32_t pmcr1; + + /* Frequencies precalculated on register changes */ + uint32_t pll_refclk_freq; + uint32_t mcu_clk_freq; + uint32_t hsp_clk_freq; + uint32_t ipg_clk_freq; +} IMXCCMState; + +static const VMStateDescription vmstate_imx_ccm = { + .name = "imx-ccm", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ccmr, IMXCCMState), + VMSTATE_UINT32(pdr0, IMXCCMState), + VMSTATE_UINT32(pdr1, IMXCCMState), + VMSTATE_UINT32(mpctl, IMXCCMState), + VMSTATE_UINT32(spctl, IMXCCMState), + VMSTATE_UINT32_ARRAY(cgr, IMXCCMState, 3), + VMSTATE_UINT32(pmcr0, IMXCCMState), + VMSTATE_UINT32(pmcr1, IMXCCMState), + VMSTATE_UINT32(pll_refclk_freq, IMXCCMState), + }, + .post_load = imx_ccm_post_load, +}; + +/* CCMR */ +#define CCMR_FPME (1<<0) +#define CCMR_MPE (1<<3) +#define CCMR_MDS (1<<7) +#define CCMR_FPMF (1<<26) +#define CCMR_PRCS (3<<1) + +/* PDR0 */ +#define PDR0_MCU_PODF_SHIFT (0) +#define PDR0_MCU_PODF_MASK (0x7) +#define PDR0_MAX_PODF_SHIFT (3) +#define PDR0_MAX_PODF_MASK (0x7) +#define PDR0_IPG_PODF_SHIFT (6) +#define PDR0_IPG_PODF_MASK (0x3) +#define PDR0_NFC_PODF_SHIFT (8) +#define PDR0_NFC_PODF_MASK (0x7) +#define PDR0_HSP_PODF_SHIFT (11) +#define PDR0_HSP_PODF_MASK (0x7) +#define PDR0_PER_PODF_SHIFT (16) +#define PDR0_PER_PODF_MASK (0x1f) +#define PDR0_CSI_PODF_SHIFT (23) +#define PDR0_CSI_PODF_MASK (0x1ff) + +#define EXTRACT(value, name) (((value) >> PDR0_##name##_PODF_SHIFT) \ + & PDR0_##name##_PODF_MASK) +#define INSERT(value, name) (((value) & PDR0_##name##_PODF_MASK) << \ + PDR0_##name##_PODF_SHIFT) +/* PLL control registers */ +#define PD(v) (((v) >> 26) & 0xf) +#define MFD(v) (((v) >> 16) & 0x3ff) +#define MFI(v) (((v) >> 10) & 0xf); +#define MFN(v) ((v) & 0x3ff) + +#define PLL_PD(x) (((x) & 0xf) << 26) +#define PLL_MFD(x) (((x) & 0x3ff) << 16) +#define PLL_MFI(x) (((x) & 0xf) << 10) +#define PLL_MFN(x) (((x) & 0x3ff) << 0) + +uint32_t imx_clock_frequency(DeviceState *dev, IMXClk clock) +{ + IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev); + + switch (clock) { + case NOCLK: + return 0; + case MCU: + return s->mcu_clk_freq; + case HSP: + return s->hsp_clk_freq; + case IPG: + return s->ipg_clk_freq; + case CLK_32k: + return CKIL_FREQ; + } + return 0; +} + +/* + * Calculate PLL output frequency + */ +static uint32_t calc_pll(uint32_t pllreg, uint32_t base_freq) +{ + int32_t mfn = MFN(pllreg); /* Numerator */ + uint32_t mfi = MFI(pllreg); /* Integer part */ + uint32_t mfd = 1 + MFD(pllreg); /* Denominator */ + uint32_t pd = 1 + PD(pllreg); /* Pre-divider */ + + if (mfi < 5) { + mfi = 5; + } + /* mfn is 10-bit signed twos-complement */ + mfn <<= 32 - 10; + mfn >>= 32 - 10; + + return ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) / + (mfd * pd)) << 10; +} + +static void update_clocks(IMXCCMState *s) +{ + /* + * If we ever emulate more clocks, this should switch to a data-driven + * approach + */ + + if ((s->ccmr & CCMR_PRCS) == 1) { + s->pll_refclk_freq = CKIL_FREQ * 1024; + } else { + s->pll_refclk_freq = CKIH_FREQ; + } + + /* ipg_clk_arm aka MCU clock */ + if ((s->ccmr & CCMR_MDS) || !(s->ccmr & CCMR_MPE)) { + s->mcu_clk_freq = s->pll_refclk_freq; + } else { + s->mcu_clk_freq = calc_pll(s->mpctl, s->pll_refclk_freq); + } + + /* High-speed clock */ + s->hsp_clk_freq = s->mcu_clk_freq / (1 + EXTRACT(s->pdr0, HSP)); + s->ipg_clk_freq = s->hsp_clk_freq / (1 + EXTRACT(s->pdr0, IPG)); + + DPRINTF("Clocks: mcu %uMHz, HSP %uMHz, IPG %uHz\n", + s->mcu_clk_freq / 1000000, + s->hsp_clk_freq / 1000000, + s->ipg_clk_freq); +} + +static void imx_ccm_reset(DeviceState *dev) +{ + IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev); + + s->ccmr = 0x074b0b7b; + s->pdr0 = 0xff870b48; + s->pdr1 = 0x49fcfe7f; + s->mpctl = PLL_PD(1) | PLL_MFD(0) | PLL_MFI(6) | PLL_MFN(0); + s->cgr[0] = s->cgr[1] = s->cgr[2] = 0xffffffff; + s->spctl = PLL_PD(1) | PLL_MFD(4) | PLL_MFI(0xc) | PLL_MFN(1); + s->pmcr0 = 0x80209828; + + update_clocks(s); +} + +static uint64_t imx_ccm_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + IMXCCMState *s = (IMXCCMState *)opaque; + + DPRINTF("read(offset=%x)", offset >> 2); + switch (offset >> 2) { + case 0: /* CCMR */ + DPRINTF(" ccmr = 0x%x\n", s->ccmr); + return s->ccmr; + case 1: + DPRINTF(" pdr0 = 0x%x\n", s->pdr0); + return s->pdr0; + case 2: + DPRINTF(" pdr1 = 0x%x\n", s->pdr1); + return s->pdr1; + case 4: + DPRINTF(" mpctl = 0x%x\n", s->mpctl); + return s->mpctl; + case 6: + DPRINTF(" spctl = 0x%x\n", s->spctl); + return s->spctl; + case 8: + DPRINTF(" cgr0 = 0x%x\n", s->cgr[0]); + return s->cgr[0]; + case 9: + DPRINTF(" cgr1 = 0x%x\n", s->cgr[1]); + return s->cgr[1]; + case 10: + DPRINTF(" cgr2 = 0x%x\n", s->cgr[2]); + return s->cgr[2]; + case 18: /* LTR1 */ + return 0x00004040; + case 23: + DPRINTF(" pcmr0 = 0x%x\n", s->pmcr0); + return s->pmcr0; + } + DPRINTF(" return 0\n"); + return 0; +} + +static void imx_ccm_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + IMXCCMState *s = (IMXCCMState *)opaque; + + DPRINTF("write(offset=%x, value = %x)\n", + offset >> 2, (unsigned int)value); + switch (offset >> 2) { + case 0: + s->ccmr = CCMR_FPMF | (value & 0x3b6fdfff); + break; + case 1: + s->pdr0 = value & 0xff9f3fff; + break; + case 2: + s->pdr1 = value; + break; + case 4: + s->mpctl = value & 0xbfff3fff; + break; + case 6: + s->spctl = value & 0xbfff3fff; + break; + case 8: + s->cgr[0] = value; + return; + case 9: + s->cgr[1] = value; + return; + case 10: + s->cgr[2] = value; + return; + + default: + return; + } + update_clocks(s); +} + +static const struct MemoryRegionOps imx_ccm_ops = { + .read = imx_ccm_read, + .write = imx_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int imx_ccm_init(SysBusDevice *dev) +{ + IMXCCMState *s = FROM_SYSBUS(typeof(*s), dev); + + memory_region_init_io(&s->iomem, &imx_ccm_ops, s, "imx_ccm", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + + return 0; +} + +static int imx_ccm_post_load(void *opaque, int version_id) +{ + IMXCCMState *s = (IMXCCMState *)opaque; + + update_clocks(s); + return 0; +} + +static void imx_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + + sbc->init = imx_ccm_init; + dc->reset = imx_ccm_reset; + dc->vmsd = &vmstate_imx_ccm; + dc->desc = "i.MX Clock Control Module"; +} + +static TypeInfo imx_ccm_info = { + .name = "imx_ccm", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXCCMState), + .class_init = imx_ccm_class_init, +}; + +static void imx_ccm_register_types(void) +{ + type_register_static(&imx_ccm_info); +} + +type_init(imx_ccm_register_types) diff --git a/hw/imx_serial.c b/hw/imx_serial.c new file mode 100644 index 0000000000..d4eae430f5 --- /dev/null +++ b/hw/imx_serial.c @@ -0,0 +1,467 @@ +/* + * IMX31 UARTS + * + * Copyright (c) 2008 OKL + * Originally Written by Hans Jiang + * Copyright (c) 2011 NICTA Pty Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This is a `bare-bones' implementation of the IMX series serial ports. + * TODO: + * -- implement FIFOs. The real hardware has 32 word transmit + * and receive FIFOs; we currently use a 1-char buffer + * -- implement DMA + * -- implement BAUD-rate and modem lines, for when the backend + * is a real serial device. + */ + +#include "hw.h" +#include "sysbus.h" +#include "sysemu.h" +#include "qemu-char.h" +#include "imx.h" + +//#define DEBUG_SERIAL 1 +#ifdef DEBUG_SERIAL +#define DPRINTF(fmt, args...) \ +do { printf("imx_serial: " fmt , ##args); } while (0) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +/* + * Define to 1 for messages about attempts to + * access unimplemented registers or similar. + */ +//#define DEBUG_IMPLEMENTATION 1 +#ifdef DEBUG_IMPLEMENTATION +# define IPRINTF(fmt, args...) \ + do { fprintf(stderr, "imx_serial: " fmt, ##args); } while (0) +#else +# define IPRINTF(fmt, args...) do {} while (0) +#endif + +typedef struct { + SysBusDevice busdev; + MemoryRegion iomem; + int32_t readbuff; + + uint32_t usr1; + uint32_t usr2; + uint32_t ucr1; + uint32_t ucr2; + uint32_t uts1; + + /* + * The registers below are implemented just so that the + * guest OS sees what it has written + */ + uint32_t onems; + uint32_t ufcr; + uint32_t ubmr; + uint32_t ubrc; + uint32_t ucr3; + + qemu_irq irq; + CharDriverState *chr; +} IMXSerialState; + +static const VMStateDescription vmstate_imx_serial = { + .name = "imx-serial", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(readbuff, IMXSerialState), + VMSTATE_UINT32(usr1, IMXSerialState), + VMSTATE_UINT32(usr2, IMXSerialState), + VMSTATE_UINT32(ucr1, IMXSerialState), + VMSTATE_UINT32(uts1, IMXSerialState), + VMSTATE_UINT32(onems, IMXSerialState), + VMSTATE_UINT32(ufcr, IMXSerialState), + VMSTATE_UINT32(ubmr, IMXSerialState), + VMSTATE_UINT32(ubrc, IMXSerialState), + VMSTATE_UINT32(ucr3, IMXSerialState), + VMSTATE_END_OF_LIST() + }, +}; + + +#define URXD_CHARRDY (1<<15) /* character read is valid */ +#define URXD_ERR (1<<14) /* Character has error */ +#define URXD_BRK (1<<11) /* Break received */ + +#define USR1_PARTYER (1<<15) /* Parity Error */ +#define USR1_RTSS (1<<14) /* RTS pin status */ +#define USR1_TRDY (1<<13) /* Tx ready */ +#define USR1_RTSD (1<<12) /* RTS delta: pin changed state */ +#define USR1_ESCF (1<<11) /* Escape sequence interrupt */ +#define USR1_FRAMERR (1<<10) /* Framing error */ +#define USR1_RRDY (1<<9) /* receiver ready */ +#define USR1_AGTIM (1<<8) /* Aging timer interrupt */ +#define USR1_DTRD (1<<7) /* DTR changed */ +#define USR1_RXDS (1<<6) /* Receiver is idle */ +#define USR1_AIRINT (1<<5) /* Aysnch IR interrupt */ +#define USR1_AWAKE (1<<4) /* Falling edge detected on RXd pin */ + +#define USR2_ADET (1<<15) /* Autobaud complete */ +#define USR2_TXFE (1<<14) /* Transmit FIFO empty */ +#define USR2_DTRF (1<<13) /* DTR/DSR transition */ +#define USR2_IDLE (1<<12) /* UART has been idle for too long */ +#define USR2_ACST (1<<11) /* Autobaud counter stopped */ +#define USR2_RIDELT (1<<10) /* Ring Indicator delta */ +#define USR2_RIIN (1<<9) /* Ring Indicator Input */ +#define USR2_IRINT (1<<8) /* Serial Infrared Interrupt */ +#define USR2_WAKE (1<<7) /* Start bit detected */ +#define USR2_DCDDELT (1<<6) /* Data Carrier Detect delta */ +#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */ +#define USR2_RTSF (1<<4) /* RTS transition */ +#define USR2_TXDC (1<<3) /* Transmission complete */ +#define USR2_BRCD (1<<2) /* Break condition detected */ +#define USR2_ORE (1<<1) /* Overrun error */ +#define USR2_RDR (1<<0) /* Receive data ready */ + +#define UCR1_TRDYEN (1<<13) /* Tx Ready Interrupt Enable */ +#define UCR1_RRDYEN (1<<9) /* Rx Ready Interrupt Enable */ +#define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */ +#define UCR1_UARTEN (1<<0) /* UART Enable */ + +#define UCR2_TXEN (1<<2) /* Transmitter enable */ +#define UCR2_RXEN (1<<1) /* Receiver enable */ +#define UCR2_SRST (1<<0) /* Reset complete */ + +#define UTS1_TXEMPTY (1<<6) +#define UTS1_RXEMPTY (1<<5) +#define UTS1_TXFULL (1<<4) +#define UTS1_RXFULL (1<<3) + +static void imx_update(IMXSerialState *s) +{ + uint32_t flags; + + flags = (s->usr1 & s->ucr1) & (USR1_TRDY|USR1_RRDY); + if (!(s->ucr1 & UCR1_TXMPTYEN)) { + flags &= ~USR1_TRDY; + } + + qemu_set_irq(s->irq, !!flags); +} + +static void imx_serial_reset(IMXSerialState *s) +{ + + s->usr1 = USR1_TRDY | USR1_RXDS; + /* + * Fake attachment of a terminal: assert RTS. + */ + s->usr1 |= USR1_RTSS; + s->usr2 = USR2_TXFE | USR2_TXDC | USR2_DCDIN; + s->uts1 = UTS1_RXEMPTY | UTS1_TXEMPTY; + s->ucr1 = 0; + s->ucr2 = UCR2_SRST; + s->ucr3 = 0x700; + s->ubmr = 0; + s->ubrc = 4; + s->readbuff = URXD_ERR; +} + +static void imx_serial_reset_at_boot(DeviceState *dev) +{ + IMXSerialState *s = container_of(dev, IMXSerialState, busdev.qdev); + + imx_serial_reset(s); + + /* + * enable the uart on boot, so messages from the linux decompresser + * are visible. On real hardware this is done by the boot rom + * before anything else is loaded. + */ + s->ucr1 = UCR1_UARTEN; + s->ucr2 = UCR2_TXEN; + +} + +static uint64_t imx_serial_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + uint32_t c; + + DPRINTF("read(offset=%x)\n", offset >> 2); + switch (offset >> 2) { + case 0x0: /* URXD */ + c = s->readbuff; + if (!(s->uts1 & UTS1_RXEMPTY)) { + /* Character is valid */ + c |= URXD_CHARRDY; + s->usr1 &= ~USR1_RRDY; + s->usr2 &= ~USR2_RDR; + s->uts1 |= UTS1_RXEMPTY; + imx_update(s); + qemu_chr_accept_input(s->chr); + } + return c; + + case 0x20: /* UCR1 */ + return s->ucr1; + + case 0x21: /* UCR2 */ + return s->ucr2; + + case 0x25: /* USR1 */ + return s->usr1; + + case 0x26: /* USR2 */ + return s->usr2; + + case 0x2A: /* BRM Modulator */ + return s->ubmr; + + case 0x2B: /* Baud Rate Count */ + return s->ubrc; + + case 0x2d: /* Test register */ + return s->uts1; + + case 0x24: /* UFCR */ + return s->ufcr; + + case 0x2c: + return s->onems; + + case 0x22: /* UCR3 */ + return s->ucr3; + + case 0x23: /* UCR4 */ + case 0x29: /* BRM Incremental */ + return 0x0; /* TODO */ + + default: + IPRINTF("imx_serial_read: bad offset: 0x%x\n", (int)offset); + return 0; + } +} + +static void imx_serial_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + unsigned char ch; + + DPRINTF("write(offset=%x, value = %x) to %s\n", + offset >> 2, + (unsigned int)value, s->chr ? s->chr->label : "NODEV"); + + switch (offset >> 2) { + case 0x10: /* UTXD */ + ch = value; + if (s->ucr2 & UCR2_TXEN) { + if (s->chr) { + qemu_chr_fe_write(s->chr, &ch, 1); + } + s->usr1 &= ~USR1_TRDY; + imx_update(s); + s->usr1 |= USR1_TRDY; + imx_update(s); + } + break; + + case 0x20: /* UCR1 */ + s->ucr1 = value & 0xffff; + DPRINTF("write(ucr1=%x)\n", (unsigned int)value); + imx_update(s); + break; + + case 0x21: /* UCR2 */ + /* + * Only a few bits in control register 2 are implemented as yet. + * If it's intended to use a real serial device as a back-end, this + * register will have to be implemented more fully. + */ + if (!(value & UCR2_SRST)) { + imx_serial_reset(s); + imx_update(s); + value |= UCR2_SRST; + } + if (value & UCR2_RXEN) { + if (!(s->ucr2 & UCR2_RXEN)) { + qemu_chr_accept_input(s->chr); + } + } + s->ucr2 = value & 0xffff; + break; + + case 0x25: /* USR1 */ + value &= USR1_AWAKE | USR1_AIRINT | USR1_DTRD | USR1_AGTIM | + USR1_FRAMERR | USR1_ESCF | USR1_RTSD | USR1_PARTYER; + s->usr1 &= ~value; + break; + + case 0x26: /* USR2 */ + /* + * Writing 1 to some bits clears them; all other + * values are ignored + */ + value &= USR2_ADET | USR2_DTRF | USR2_IDLE | USR2_ACST | + USR2_RIDELT | USR2_IRINT | USR2_WAKE | + USR2_DCDDELT | USR2_RTSF | USR2_BRCD | USR2_ORE; + s->usr2 &= ~value; + break; + + /* + * Linux expects to see what it writes to these registers + * We don't currently alter the baud rate + */ + case 0x29: /* UBIR */ + s->ubrc = value & 0xffff; + break; + + case 0x2a: /* UBMR */ + s->ubmr = value & 0xffff; + break; + + case 0x2c: /* One ms reg */ + s->onems = value & 0xffff; + break; + + case 0x24: /* FIFO control register */ + s->ufcr = value & 0xffff; + break; + + case 0x22: /* UCR3 */ + s->ucr3 = value & 0xffff; + break; + + case 0x2d: /* UTS1 */ + case 0x23: /* UCR4 */ + IPRINTF("Unimplemented Register %x written to\n", offset >> 2); + /* TODO */ + break; + + default: + IPRINTF("imx_serial_write: Bad offset 0x%x\n", (int)offset); + } +} + +static int imx_can_receive(void *opaque) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + return !(s->usr1 & USR1_RRDY); +} + +static void imx_put_data(void *opaque, uint32_t value) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + DPRINTF("received char\n"); + s->usr1 |= USR1_RRDY; + s->usr2 |= USR2_RDR; + s->uts1 &= ~UTS1_RXEMPTY; + s->readbuff = value; + imx_update(s); +} + +static void imx_receive(void *opaque, const uint8_t *buf, int size) +{ + imx_put_data(opaque, *buf); +} + +static void imx_event(void *opaque, int event) +{ + if (event == CHR_EVENT_BREAK) { + imx_put_data(opaque, URXD_BRK); + } +} + + +static const struct MemoryRegionOps imx_serial_ops = { + .read = imx_serial_read, + .write = imx_serial_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int imx_serial_init(SysBusDevice *dev) +{ + IMXSerialState *s = FROM_SYSBUS(IMXSerialState, dev); + + + memory_region_init_io(&s->iomem, &imx_serial_ops, s, "imx-serial", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); + + if (s->chr) { + qemu_chr_add_handlers(s->chr, imx_can_receive, imx_receive, + imx_event, s); + } else { + DPRINTF("No char dev for uart at 0x%lx\n", + (unsigned long)s->iomem.ram_addr); + } + + return 0; +} + +void imx_serial_create(int uart, const target_phys_addr_t addr, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *bus; + CharDriverState *chr; + const char chr_name[] = "serial"; + char label[ARRAY_SIZE(chr_name) + 1]; + + dev = qdev_create(NULL, "imx-serial"); + + if (uart >= MAX_SERIAL_PORTS) { + hw_error("Cannot assign uart %d: QEMU supports only %d ports\n", + uart, MAX_SERIAL_PORTS); + } + chr = serial_hds[uart]; + if (!chr) { + snprintf(label, ARRAY_SIZE(label), "%s%d", chr_name, uart); + chr = qemu_chr_new(label, "null", NULL); + if (!(chr)) { + hw_error("Can't assign serial port to imx-uart%d.\n", uart); + } + } + + qdev_prop_set_chr(dev, "chardev", chr); + bus = sysbus_from_qdev(dev); + qdev_init_nofail(dev); + if (addr != (target_phys_addr_t)-1) { + sysbus_mmio_map(bus, 0, addr); + } + sysbus_connect_irq(bus, 0, irq); + +} + + +static Property imx32_serial_properties[] = { + DEFINE_PROP_CHR("chardev", IMXSerialState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void imx_serial_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + + k->init = imx_serial_init; + dc->vmsd = &vmstate_imx_serial; + dc->reset = imx_serial_reset_at_boot; + dc->desc = "i.MX series UART"; + dc->props = imx32_serial_properties; +} + +static TypeInfo imx_serial_info = { + .name = "imx-serial", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXSerialState), + .class_init = imx_serial_class_init, +}; + +static void imx_serial_register_types(void) +{ + type_register_static(&imx_serial_info); +} + +type_init(imx_serial_register_types) diff --git a/hw/imx_timer.c b/hw/imx_timer.c new file mode 100644 index 0000000000..16215ccf04 --- /dev/null +++ b/hw/imx_timer.c @@ -0,0 +1,689 @@ +/* + * IMX31 Timer + * + * Copyright (c) 2008 OK Labs + * Copyright (c) 2011 NICTA Pty Ltd + * Originally Written by Hans Jiang + * Updated by Peter Chubb + * + * This code is licenced under GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + */ + +#include "hw.h" +#include "qemu-timer.h" +#include "ptimer.h" +#include "sysbus.h" +#include "imx.h" + +//#define DEBUG_TIMER 1 +#ifdef DEBUG_TIMER +# define DPRINTF(fmt, args...) \ + do { printf("imx_timer: " fmt , ##args); } while (0) +#else +# define DPRINTF(fmt, args...) do {} while (0) +#endif + +/* + * Define to 1 for messages about attempts to + * access unimplemented registers or similar. + */ +#define DEBUG_IMPLEMENTATION 1 +#if DEBUG_IMPLEMENTATION +# define IPRINTF(fmt, args...) \ + do { fprintf(stderr, "imx_timer: " fmt, ##args); } while (0) +#else +# define IPRINTF(fmt, args...) do {} while (0) +#endif + +/* + * GPT : General purpose timer + * + * This timer counts up continuously while it is enabled, resetting itself + * to 0 when it reaches TIMER_MAX (in freerun mode) or when it + * reaches the value of ocr1 (in periodic mode). WE simulate this using a + * QEMU ptimer counting down from ocr1 and reloading from ocr1 in + * periodic mode, or counting from ocr1 to zero, then TIMER_MAX - ocr1. + * waiting_rov is set when counting from TIMER_MAX. + * + * In the real hardware, there are three comparison registers that can + * trigger interrupts, and compare channel 1 can be used to + * force-reset the timer. However, this is a `bare-bones' + * implementation: only what Linux 3.x uses has been implemented + * (free-running timer from 0 to OCR1 or TIMER_MAX) . + */ + + +#define TIMER_MAX 0XFFFFFFFFUL + +/* Control register. Not all of these bits have any effect (yet) */ +#define GPT_CR_EN (1 << 0) /* GPT Enable */ +#define GPT_CR_ENMOD (1 << 1) /* GPT Enable Mode */ +#define GPT_CR_DBGEN (1 << 2) /* GPT Debug mode enable */ +#define GPT_CR_WAITEN (1 << 3) /* GPT Wait Mode Enable */ +#define GPT_CR_DOZEN (1 << 4) /* GPT Doze mode enable */ +#define GPT_CR_STOPEN (1 << 5) /* GPT Stop Mode Enable */ +#define GPT_CR_CLKSRC_SHIFT (6) +#define GPT_CR_CLKSRC_MASK (0x7) + +#define GPT_CR_FRR (1 << 9) /* Freerun or Restart */ +#define GPT_CR_SWR (1 << 15) /* Software Reset */ +#define GPT_CR_IM1 (3 << 16) /* Input capture channel 1 mode (2 bits) */ +#define GPT_CR_IM2 (3 << 18) /* Input capture channel 2 mode (2 bits) */ +#define GPT_CR_OM1 (7 << 20) /* Output Compare Channel 1 Mode (3 bits) */ +#define GPT_CR_OM2 (7 << 23) /* Output Compare Channel 2 Mode (3 bits) */ +#define GPT_CR_OM3 (7 << 26) /* Output Compare Channel 3 Mode (3 bits) */ +#define GPT_CR_FO1 (1 << 29) /* Force Output Compare Channel 1 */ +#define GPT_CR_FO2 (1 << 30) /* Force Output Compare Channel 2 */ +#define GPT_CR_FO3 (1 << 31) /* Force Output Compare Channel 3 */ + +#define GPT_SR_OF1 (1 << 0) +#define GPT_SR_ROV (1 << 5) + +#define GPT_IR_OF1IE (1 << 0) +#define GPT_IR_ROVIE (1 << 5) + +typedef struct { + SysBusDevice busdev; + ptimer_state *timer; + MemoryRegion iomem; + DeviceState *ccm; + + uint32_t cr; + uint32_t pr; + uint32_t sr; + uint32_t ir; + uint32_t ocr1; + uint32_t cnt; + + uint32_t waiting_rov; + qemu_irq irq; +} IMXTimerGState; + +static const VMStateDescription vmstate_imx_timerg = { + .name = "imx-timerg", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, IMXTimerGState), + VMSTATE_UINT32(pr, IMXTimerGState), + VMSTATE_UINT32(sr, IMXTimerGState), + VMSTATE_UINT32(ir, IMXTimerGState), + VMSTATE_UINT32(ocr1, IMXTimerGState), + VMSTATE_UINT32(cnt, IMXTimerGState), + VMSTATE_UINT32(waiting_rov, IMXTimerGState), + VMSTATE_PTIMER(timer, IMXTimerGState), + VMSTATE_END_OF_LIST() + } +}; + +static const IMXClk imx_timerg_clocks[] = { + NOCLK, /* 000 No clock source */ + IPG, /* 001 ipg_clk, 532MHz*/ + IPG, /* 010 ipg_clk_highfreq */ + NOCLK, /* 011 not defined */ + CLK_32k, /* 100 ipg_clk_32k */ + NOCLK, /* 101 not defined */ + NOCLK, /* 110 not defined */ + NOCLK, /* 111 not defined */ +}; + + +static void imx_timerg_set_freq(IMXTimerGState *s) +{ + int clksrc; + uint32_t freq; + + clksrc = (s->cr >> GPT_CR_CLKSRC_SHIFT) & GPT_CR_CLKSRC_MASK; + freq = imx_clock_frequency(s->ccm, imx_timerg_clocks[clksrc]) / (1 + s->pr); + + DPRINTF("Setting gtimer clksrc %d to frequency %d\n", clksrc, freq); + if (freq) { + ptimer_set_freq(s->timer, freq); + } +} + +static void imx_timerg_update(IMXTimerGState *s) +{ + uint32_t flags = s->sr & s->ir & (GPT_SR_OF1 | GPT_SR_ROV); + + DPRINTF("g-timer SR: %s %s IR=%s %s, %s\n", + s->sr & GPT_SR_OF1 ? "OF1" : "", + s->sr & GPT_SR_ROV ? "ROV" : "", + s->ir & GPT_SR_OF1 ? "OF1" : "", + s->ir & GPT_SR_ROV ? "ROV" : "", + s->cr & GPT_CR_EN ? "CR_EN" : "Not Enabled"); + + + qemu_set_irq(s->irq, (s->cr & GPT_CR_EN) && flags); +} + +static uint32_t imx_timerg_update_counts(IMXTimerGState *s) +{ + uint64_t target = s->waiting_rov ? TIMER_MAX : s->ocr1; + uint64_t cnt = ptimer_get_count(s->timer); + s->cnt = target - cnt; + return s->cnt; +} + +static void imx_timerg_reload(IMXTimerGState *s, uint32_t timeout) +{ + uint64_t diff_cnt; + + if (!(s->cr & GPT_CR_FRR)) { + IPRINTF("IMX_timerg_reload --- called in reset-mode\n"); + return; + } + + /* + * For small timeouts, qemu sometimes runs too slow. + * Better deliver a late interrupt than none. + * + * In Reset mode (FRR bit clear) + * the ptimer reloads itself from OCR1; + * in free-running mode we need to fake + * running from 0 to ocr1 to TIMER_MAX + */ + if (timeout > s->cnt) { + diff_cnt = timeout - s->cnt; + } else { + diff_cnt = 0; + } + ptimer_set_count(s->timer, diff_cnt); +} + +static uint64_t imx_timerg_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + IMXTimerGState *s = (IMXTimerGState *)opaque; + + DPRINTF("g-read(offset=%x)", offset >> 2); + switch (offset >> 2) { + case 0: /* Control Register */ + DPRINTF(" cr = %x\n", s->cr); + return s->cr; + + case 1: /* prescaler */ + DPRINTF(" pr = %x\n", s->pr); + return s->pr; + + case 2: /* Status Register */ + DPRINTF(" sr = %x\n", s->sr); + return s->sr; + + case 3: /* Interrupt Register */ + DPRINTF(" ir = %x\n", s->ir); + return s->ir; + + case 4: /* Output Compare Register 1 */ + DPRINTF(" ocr1 = %x\n", s->ocr1); + return s->ocr1; + + + case 9: /* cnt */ + imx_timerg_update_counts(s); + DPRINTF(" cnt = %x\n", s->cnt); + return s->cnt; + } + + IPRINTF("imx_timerg_read: Bad offset %x\n", + (int)offset >> 2); + return 0; +} + +static void imx_timerg_reset(DeviceState *dev) +{ + IMXTimerGState *s = container_of(dev, IMXTimerGState, busdev.qdev); + + /* + * Soft reset doesn't touch some bits; hard reset clears them + */ + s->cr &= ~(GPT_CR_EN|GPT_CR_DOZEN|GPT_CR_WAITEN|GPT_CR_DBGEN); + s->sr = 0; + s->pr = 0; + s->ir = 0; + s->cnt = 0; + s->ocr1 = TIMER_MAX; + ptimer_stop(s->timer); + ptimer_set_limit(s->timer, TIMER_MAX, 1); + imx_timerg_set_freq(s); +} + +static void imx_timerg_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + IMXTimerGState *s = (IMXTimerGState *)opaque; + DPRINTF("g-write(offset=%x, value = 0x%x)\n", (unsigned int)offset >> 2, + (unsigned int)value); + + switch (offset >> 2) { + case 0: { + uint32_t oldcr = s->cr; + /* CR */ + if (value & GPT_CR_SWR) { /* force reset */ + value &= ~GPT_CR_SWR; + imx_timerg_reset(&s->busdev.qdev); + imx_timerg_update(s); + } + + s->cr = value & ~0x7c00; + imx_timerg_set_freq(s); + if ((oldcr ^ value) & GPT_CR_EN) { + if (value & GPT_CR_EN) { + if (value & GPT_CR_ENMOD) { + ptimer_set_count(s->timer, s->ocr1); + s->cnt = 0; + } + ptimer_run(s->timer, + (value & GPT_CR_FRR) && (s->ocr1 != TIMER_MAX)); + } else { + ptimer_stop(s->timer); + }; + } + return; + } + + case 1: /* Prescaler */ + s->pr = value & 0xfff; + imx_timerg_set_freq(s); + return; + + case 2: /* SR */ + /* + * No point in implementing the status register bits to do with + * external interrupt sources. + */ + value &= GPT_SR_OF1 | GPT_SR_ROV; + s->sr &= ~value; + imx_timerg_update(s); + return; + + case 3: /* IR -- interrupt register */ + s->ir = value & 0x3f; + imx_timerg_update(s); + return; + + case 4: /* OCR1 -- output compare register */ + /* In non-freerun mode, reset count when this register is written */ + if (!(s->cr & GPT_CR_FRR)) { + s->waiting_rov = 0; + ptimer_set_limit(s->timer, value, 1); + } else { + imx_timerg_update_counts(s); + if (value > s->cnt) { + s->waiting_rov = 0; + imx_timerg_reload(s, value); + } else { + s->waiting_rov = 1; + imx_timerg_reload(s, TIMER_MAX - s->cnt); + } + } + s->ocr1 = value; + return; + + default: + IPRINTF("imx_timerg_write: Bad offset %x\n", + (int)offset >> 2); + } +} + +static void imx_timerg_timeout(void *opaque) +{ + IMXTimerGState *s = (IMXTimerGState *)opaque; + + DPRINTF("imx_timerg_timeout, waiting rov=%d\n", s->waiting_rov); + if (s->cr & GPT_CR_FRR) { + /* + * Free running timer from 0 -> TIMERMAX + * Generates interrupt at TIMER_MAX and at cnt==ocr1 + * If ocr1 == TIMER_MAX, then no need to reload timer. + */ + if (s->ocr1 == TIMER_MAX) { + DPRINTF("s->ocr1 == TIMER_MAX, FRR\n"); + s->sr |= GPT_SR_OF1 | GPT_SR_ROV; + imx_timerg_update(s); + return; + } + + if (s->waiting_rov) { + /* + * We were waiting for cnt==TIMER_MAX + */ + s->sr |= GPT_SR_ROV; + s->waiting_rov = 0; + s->cnt = 0; + imx_timerg_reload(s, s->ocr1); + } else { + /* Must have got a cnt==ocr1 timeout. */ + s->sr |= GPT_SR_OF1; + s->cnt = s->ocr1; + s->waiting_rov = 1; + imx_timerg_reload(s, TIMER_MAX); + } + imx_timerg_update(s); + return; + } + + s->sr |= GPT_SR_OF1; + imx_timerg_update(s); +} + +static const MemoryRegionOps imx_timerg_ops = { + .read = imx_timerg_read, + .write = imx_timerg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + + +static int imx_timerg_init(SysBusDevice *dev) +{ + IMXTimerGState *s = FROM_SYSBUS(IMXTimerGState, dev); + QEMUBH *bh; + + sysbus_init_irq(dev, &s->irq); + memory_region_init_io(&s->iomem, &imx_timerg_ops, + s, "imxg-timer", + 0x00001000); + sysbus_init_mmio(dev, &s->iomem); + + bh = qemu_bh_new(imx_timerg_timeout, s); + s->timer = ptimer_init(bh); + + /* Hard reset resets extra bits in CR */ + s->cr = 0; + return 0; +} + + + +/* + * EPIT: Enhanced periodic interrupt timer + */ + +#define CR_EN (1 << 0) +#define CR_ENMOD (1 << 1) +#define CR_OCIEN (1 << 2) +#define CR_RLD (1 << 3) +#define CR_PRESCALE_SHIFT (4) +#define CR_PRESCALE_MASK (0xfff) +#define CR_SWR (1 << 16) +#define CR_IOVW (1 << 17) +#define CR_DBGEN (1 << 18) +#define CR_EPIT (1 << 19) +#define CR_DOZEN (1 << 20) +#define CR_STOPEN (1 << 21) +#define CR_CLKSRC_SHIFT (24) +#define CR_CLKSRC_MASK (0x3 << CR_CLKSRC_SHIFT) + + +/* + * Exact clock frequencies vary from board to board. + * These are typical. + */ +static const IMXClk imx_timerp_clocks[] = { + 0, /* disabled */ + IPG, /* ipg_clk, ~532MHz */ + IPG, /* ipg_clk_highfreq */ + CLK_32k, /* ipg_clk_32k -- ~32kHz */ +}; + +typedef struct { + SysBusDevice busdev; + ptimer_state *timer; + MemoryRegion iomem; + DeviceState *ccm; + + uint32_t cr; + uint32_t lr; + uint32_t cmp; + + uint32_t freq; + int int_level; + qemu_irq irq; +} IMXTimerPState; + +/* + * Update interrupt status + */ +static void imx_timerp_update(IMXTimerPState *s) +{ + if (s->int_level && (s->cr & CR_OCIEN)) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void imx_timerp_reset(DeviceState *dev) +{ + IMXTimerPState *s = container_of(dev, IMXTimerPState, busdev.qdev); + + s->cr = 0; + s->lr = TIMER_MAX; + s->int_level = 0; + s->cmp = 0; + ptimer_stop(s->timer); + ptimer_set_count(s->timer, TIMER_MAX); +} + +static uint64_t imx_timerp_read(void *opaque, target_phys_addr_t offset, + unsigned size) +{ + IMXTimerPState *s = (IMXTimerPState *)opaque; + + DPRINTF("p-read(offset=%x)", offset >> 2); + switch (offset >> 2) { + case 0: /* Control Register */ + DPRINTF("cr %x\n", s->cr); + return s->cr; + + case 1: /* Status Register */ + DPRINTF("int_level %x\n", s->int_level); + return s->int_level; + + case 2: /* LR - ticks*/ + DPRINTF("lr %x\n", s->lr); + return s->lr; + + case 3: /* CMP */ + DPRINTF("cmp %x\n", s->cmp); + return s->cmp; + + case 4: /* CNT */ + return ptimer_get_count(s->timer); + } + IPRINTF("imx_timerp_read: Bad offset %x\n", + (int)offset >> 2); + return 0; +} + +static void set_timerp_freq(IMXTimerPState *s) +{ + int clksrc; + unsigned prescaler; + uint32_t freq; + + clksrc = (s->cr & CR_CLKSRC_MASK) >> CR_CLKSRC_SHIFT; + prescaler = 1 + ((s->cr >> CR_PRESCALE_SHIFT) & CR_PRESCALE_MASK); + freq = imx_clock_frequency(s->ccm, imx_timerp_clocks[clksrc]) / prescaler; + + s->freq = freq; + DPRINTF("Setting ptimer frequency to %u\n", freq); + + if (freq) { + ptimer_set_freq(s->timer, freq); + } +} + +static void imx_timerp_write(void *opaque, target_phys_addr_t offset, + uint64_t value, unsigned size) +{ + IMXTimerPState *s = (IMXTimerPState *)opaque; + DPRINTF("p-write(offset=%x, value = %x)\n", (unsigned int)offset >> 2, + (unsigned int)value); + + switch (offset >> 2) { + case 0: /* CR */ + if (value & CR_SWR) { + imx_timerp_reset(&s->busdev.qdev); + value &= ~CR_SWR; + } + s->cr = value & 0x03ffffff; + set_timerp_freq(s); + + if (s->freq && (s->cr & CR_EN)) { + if (!(s->cr & CR_ENMOD)) { + ptimer_set_count(s->timer, s->lr); + } + ptimer_run(s->timer, 0); + } else { + ptimer_stop(s->timer); + } + break; + + case 1: /* SR - ACK*/ + s->int_level = 0; + imx_timerp_update(s); + break; + + case 2: /* LR - set ticks */ + s->lr = value; + ptimer_set_limit(s->timer, value, !!(s->cr & CR_IOVW)); + break; + + case 3: /* CMP */ + s->cmp = value; + if (value) { + IPRINTF( + "Values for EPIT comparison other than zero not supported\n" + ); + } + break; + + default: + IPRINTF("imx_timerp_write: Bad offset %x\n", + (int)offset >> 2); + } +} + +static void imx_timerp_tick(void *opaque) +{ + IMXTimerPState *s = (IMXTimerPState *)opaque; + + DPRINTF("imxp tick\n"); + if (!(s->cr & CR_RLD)) { + ptimer_set_count(s->timer, TIMER_MAX); + } + s->int_level = 1; + imx_timerp_update(s); +} + +void imx_timerp_create(const target_phys_addr_t addr, + qemu_irq irq, + DeviceState *ccm) +{ + IMXTimerPState *pp; + DeviceState *dev; + + dev = sysbus_create_simple("imx_timerp", addr, irq); + pp = container_of(dev, IMXTimerPState, busdev.qdev); + pp->ccm = ccm; +} + +static const MemoryRegionOps imx_timerp_ops = { + .read = imx_timerp_read, + .write = imx_timerp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_imx_timerp = { + .name = "imx-timerp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, IMXTimerPState), + VMSTATE_UINT32(lr, IMXTimerPState), + VMSTATE_UINT32(cmp, IMXTimerPState), + VMSTATE_UINT32(freq, IMXTimerPState), + VMSTATE_INT32(int_level, IMXTimerPState), + VMSTATE_PTIMER(timer, IMXTimerPState), + VMSTATE_END_OF_LIST() + } +}; + +static int imx_timerp_init(SysBusDevice *dev) +{ + IMXTimerPState *s = FROM_SYSBUS(IMXTimerPState, dev); + QEMUBH *bh; + + DPRINTF("imx_timerp_init\n"); + + sysbus_init_irq(dev, &s->irq); + memory_region_init_io(&s->iomem, &imx_timerp_ops, + s, "imxp-timer", + 0x00001000); + sysbus_init_mmio(dev, &s->iomem); + + bh = qemu_bh_new(imx_timerp_tick, s); + s->timer = ptimer_init(bh); + + return 0; +} + + +void imx_timerg_create(const target_phys_addr_t addr, + qemu_irq irq, + DeviceState *ccm) +{ + IMXTimerGState *pp; + DeviceState *dev; + + dev = sysbus_create_simple("imx_timerg", addr, irq); + pp = container_of(dev, IMXTimerGState, busdev.qdev); + pp->ccm = ccm; +} + +static void imx_timerg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + k->init = imx_timerg_init; + dc->vmsd = &vmstate_imx_timerg; + dc->reset = imx_timerg_reset; + dc->desc = "i.MX general timer"; +} + +static void imx_timerp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + k->init = imx_timerp_init; + dc->vmsd = &vmstate_imx_timerp; + dc->reset = imx_timerp_reset; + dc->desc = "i.MX periodic timer"; +} + +static const TypeInfo imx_timerp_info = { + .name = "imx_timerp", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXTimerPState), + .class_init = imx_timerp_class_init, +}; + +static const TypeInfo imx_timerg_info = { + .name = "imx_timerg", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXTimerGState), + .class_init = imx_timerg_class_init, +}; + +static void imx_timer_register_types(void) +{ + type_register_static(&imx_timerp_info); + type_register_static(&imx_timerg_info); +} + +type_init(imx_timer_register_types) diff --git a/hw/integratorcp.c b/hw/integratorcp.c index 9bdb9e62d6..deacbf4d0d 100644 --- a/hw/integratorcp.c +++ b/hw/integratorcp.c @@ -443,7 +443,7 @@ static void integratorcp_init(ram_addr_t ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { - CPUARMState *env; + ARMCPU *cpu; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *ram_alias = g_new(MemoryRegion, 1); @@ -452,13 +452,15 @@ static void integratorcp_init(ram_addr_t ram_size, DeviceState *dev; int i; - if (!cpu_model) + if (!cpu_model) { cpu_model = "arm926"; - env = cpu_init(cpu_model); - if (!env) { + } + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } + memory_region_init_ram(ram, "integrator.ram", ram_size); vmstate_register_ram_global(ram); /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */ @@ -474,7 +476,7 @@ static void integratorcp_init(ram_addr_t ram_size, qdev_init_nofail(dev); sysbus_mmio_map((SysBusDevice *)dev, 0, 0x10000000); - cpu_pic = arm_pic_init_cpu(env); + cpu_pic = arm_pic_init_cpu(cpu); dev = sysbus_create_varargs("integrator_pic", 0x14000000, cpu_pic[ARM_PIC_CPU_IRQ], cpu_pic[ARM_PIC_CPU_FIQ], NULL); @@ -500,7 +502,7 @@ static void integratorcp_init(ram_addr_t ram_size, integrator_binfo.kernel_filename = kernel_filename; integrator_binfo.kernel_cmdline = kernel_cmdline; integrator_binfo.initrd_filename = initrd_filename; - arm_load_kernel(env, &integrator_binfo); + arm_load_kernel(cpu, &integrator_binfo); } static QEMUMachine integratorcp_machine = { diff --git a/hw/intel-hda.c b/hw/intel-hda.c index bb11af286a..31fe1c54f6 100644 --- a/hw/intel-hda.c +++ b/hw/intel-hda.c @@ -29,20 +29,22 @@ /* --------------------------------------------------------------------- */ /* hda bus */ -static struct BusInfo hda_codec_bus_info = { - .name = "HDA", - .size = sizeof(HDACodecBus), - .props = (Property[]) { - DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1), - DEFINE_PROP_END_OF_LIST() - } +static Property hda_props[] = { + DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1), + DEFINE_PROP_END_OF_LIST() +}; + +static const TypeInfo hda_codec_bus_info = { + .name = TYPE_HDA_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(HDACodecBus), }; void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, hda_codec_response_func response, hda_codec_xfer_func xfer) { - qbus_create_inplace(&bus->qbus, &hda_codec_bus_info, dev, NULL); + qbus_create_inplace(&bus->qbus, TYPE_HDA_BUS, dev, NULL); bus->response = response; bus->xfer = xfer; } @@ -76,10 +78,11 @@ static int hda_codec_dev_exit(DeviceState *qdev) HDACodecDevice *hda_codec_find(HDACodecBus *bus, uint32_t cad) { - DeviceState *qdev; + BusChild *kid; HDACodecDevice *cdev; - QTAILQ_FOREACH(qdev, &bus->qbus.children, sibling) { + QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; cdev = DO_UPCAST(HDACodecDevice, qdev, qdev); if (cdev->cad == cad) { return cdev; @@ -481,10 +484,11 @@ static void intel_hda_parse_bdl(IntelHDAState *d, IntelHDAStream *st) static void intel_hda_notify_codecs(IntelHDAState *d, uint32_t stream, bool running, bool output) { - DeviceState *qdev; + BusChild *kid; HDACodecDevice *cdev; - QTAILQ_FOREACH(qdev, &d->codecs.qbus.children, sibling) { + QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) { + DeviceState *qdev = kid->child; HDACodecDeviceClass *cdc; cdev = DO_UPCAST(HDACodecDevice, qdev, qdev); @@ -1103,15 +1107,16 @@ static const MemoryRegionOps intel_hda_mmio_ops = { static void intel_hda_reset(DeviceState *dev) { + BusChild *kid; IntelHDAState *d = DO_UPCAST(IntelHDAState, pci.qdev, dev); - DeviceState *qdev; HDACodecDevice *cdev; intel_hda_regs_reset(d); d->wall_base_ns = qemu_get_clock_ns(vm_clock); /* reset codecs */ - QTAILQ_FOREACH(qdev, &d->codecs.qbus.children, sibling) { + QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) { + DeviceState *qdev = kid->child; cdev = DO_UPCAST(HDACodecDevice, qdev, qdev); device_reset(DEVICE(cdev)); d->state_sts |= (1 << cdev->cad); @@ -1153,17 +1158,6 @@ static int intel_hda_exit(PCIDevice *pci) return 0; } -static void intel_hda_write_config(PCIDevice *pci, uint32_t addr, - uint32_t val, int len) -{ - IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci); - - pci_default_write_config(pci, addr, val, len); - if (d->msi) { - msi_write_config(pci, addr, val, len); - } -} - static int intel_hda_post_load(void *opaque, int version) { IntelHDAState* d = opaque; @@ -1252,7 +1246,6 @@ static void intel_hda_class_init(ObjectClass *klass, void *data) k->init = intel_hda_init; k->exit = intel_hda_exit; - k->config_write = intel_hda_write_config; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = 0x2668; k->revision = 1; @@ -1275,7 +1268,8 @@ static void hda_codec_device_class_init(ObjectClass *klass, void *data) DeviceClass *k = DEVICE_CLASS(klass); k->init = hda_codec_dev_init; k->exit = hda_codec_dev_exit; - k->bus_info = &hda_codec_bus_info; + k->bus_type = TYPE_HDA_BUS; + k->props = hda_props; } static TypeInfo hda_codec_device_type_info = { @@ -1289,6 +1283,7 @@ static TypeInfo hda_codec_device_type_info = { static void intel_hda_register_types(void) { + type_register_static(&hda_codec_bus_info); type_register_static(&intel_hda_info); type_register_static(&hda_codec_device_type_info); } diff --git a/hw/intel-hda.h b/hw/intel-hda.h index a1cca5b1bb..22e0968d50 100644 --- a/hw/intel-hda.h +++ b/hw/intel-hda.h @@ -14,6 +14,9 @@ #define HDA_CODEC_DEVICE_GET_CLASS(obj) \ OBJECT_GET_CLASS(HDACodecDeviceClass, (obj), TYPE_HDA_CODEC_DEVICE) +#define TYPE_HDA_BUS "HDA" +#define HDA_BUS(obj) OBJECT_CHECK(HDACodecBus, (obj), TYPE_HDA_BUS) + typedef struct HDACodecBus HDACodecBus; typedef struct HDACodecDevice HDACodecDevice; diff --git a/hw/ioh3420.c b/hw/ioh3420.c index 1632d31c19..0a2601cac4 100644 --- a/hw/ioh3420.c +++ b/hw/ioh3420.c @@ -71,7 +71,6 @@ static void ioh3420_write_config(PCIDevice *d, pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND); pci_bridge_write_config(d, address, val, len); - msi_write_config(d, address, val, len); ioh3420_aer_vector_update(d); pcie_cap_slot_write_config(d, address, val, len); pcie_aer_write_config(d, address, val, len); @@ -81,7 +80,7 @@ static void ioh3420_write_config(PCIDevice *d, static void ioh3420_reset(DeviceState *qdev) { PCIDevice *d = PCI_DEVICE(qdev); - msi_reset(d); + ioh3420_aer_vector_update(d); pcie_cap_root_reset(d); pcie_cap_deverr_reset(d); diff --git a/hw/isa-bus.c b/hw/isa-bus.c index 5a43f03a7c..f9b237387a 100644 --- a/hw/isa-bus.c +++ b/hw/isa-bus.c @@ -28,11 +28,19 @@ target_phys_addr_t isa_mem_base = 0; static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent); static char *isabus_get_fw_dev_path(DeviceState *dev); -static struct BusInfo isa_bus_info = { - .name = "ISA", - .size = sizeof(ISABus), - .print_dev = isabus_dev_print, - .get_fw_dev_path = isabus_get_fw_dev_path, +static void isa_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = isabus_dev_print; + k->get_fw_dev_path = isabus_get_fw_dev_path; +} + +static const TypeInfo isa_bus_info = { + .name = TYPE_ISA_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(ISABus), + .class_init = isa_bus_class_init, }; ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space_io) @@ -46,7 +54,7 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space_io) qdev_init_nofail(dev); } - isabus = FROM_QBUS(ISABus, qbus_create(&isa_bus_info, dev, NULL)); + isabus = FROM_QBUS(ISABus, qbus_create(TYPE_ISA_BUS, dev, NULL)); isabus->address_space_io = address_space_io; return isabus; } @@ -198,7 +206,7 @@ static void isa_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = isa_qdev_init; - k->bus_info = &isa_bus_info; + k->bus_type = TYPE_ISA_BUS; } static TypeInfo isa_device_type_info = { @@ -212,6 +220,7 @@ static TypeInfo isa_device_type_info = { static void isabus_register_types(void) { + type_register_static(&isa_bus_info); type_register_static(&isabus_bridge_info); type_register_static(&isa_device_type_info); } @@ -9,8 +9,6 @@ #define ISA_NUM_IRQS 16 -typedef struct ISADevice ISADevice; - #define TYPE_ISA_DEVICE "isa-device" #define ISA_DEVICE(obj) \ OBJECT_CHECK(ISADevice, (obj), TYPE_ISA_DEVICE) @@ -19,6 +17,9 @@ typedef struct ISADevice ISADevice; #define ISA_DEVICE_GET_CLASS(obj) \ OBJECT_GET_CLASS(ISADeviceClass, (obj), TYPE_ISA_DEVICE) +#define TYPE_ISA_BUS "ISA" +#define ISA_BUS(obj) OBJECT_CHECK(ISABus, (obj), TYPE_ISA_BUS) + typedef struct ISADeviceClass { DeviceClass parent_class; int (*init)(ISADevice *dev); diff --git a/hw/ivshmem.c b/hw/ivshmem.c index d48e5f9906..05559b639c 100644 --- a/hw/ivshmem.c +++ b/hw/ivshmem.c @@ -530,7 +530,6 @@ static void ivshmem_reset(DeviceState *d) IVShmemState *s = DO_UPCAST(IVShmemState, dev.qdev, d); s->intrstatus = 0; - msix_reset(&s->dev); ivshmem_use_msix(s); return; } diff --git a/hw/kvm/Makefile.objs b/hw/kvm/Makefile.objs new file mode 100644 index 0000000000..226497a58f --- /dev/null +++ b/hw/kvm/Makefile.objs @@ -0,0 +1 @@ +obj-$(CONFIG_KVM) += clock.o apic.o i8259.o ioapic.o i8254.o diff --git a/hw/kvm/apic.c b/hw/kvm/apic.c index 8ba4079025..80e3e48333 100644 --- a/hw/kvm/apic.c +++ b/hw/kvm/apic.c @@ -30,7 +30,7 @@ void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic) APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d); int i; - memset(kapic, 0, sizeof(kapic)); + memset(kapic, 0, sizeof(*kapic)); kvm_apic_set_reg(kapic, 0x2, s->id << 24); kvm_apic_set_reg(kapic, 0x8, s->tpr); kvm_apic_set_reg(kapic, 0xd, s->log_dest << 24); diff --git a/hw/kvm/i8254.c b/hw/kvm/i8254.c index bb5fe07d1e..c5d3711a04 100644 --- a/hw/kvm/i8254.c +++ b/hw/kvm/i8254.c @@ -23,31 +23,63 @@ * THE SOFTWARE. */ #include "qemu-timer.h" +#include "sysemu.h" #include "hw/i8254.h" #include "hw/i8254_internal.h" #include "kvm.h" #define KVM_PIT_REINJECT_BIT 0 +#define CALIBRATION_ROUNDS 3 + typedef struct KVMPITState { PITCommonState pit; LostTickPolicy lost_tick_policy; + bool state_valid; } KVMPITState; -static void kvm_pit_get(PITCommonState *s) +static int64_t abs64(int64_t v) { + return v < 0 ? -v : v; +} + +static void kvm_pit_get(PITCommonState *pit) +{ + KVMPITState *s = DO_UPCAST(KVMPITState, pit, pit); struct kvm_pit_state2 kpit; struct kvm_pit_channel_state *kchan; struct PITChannelState *sc; + int64_t offset, clock_offset; + struct timespec ts; int i, ret; + if (s->state_valid) { + return; + } + + /* + * Measure the delta between CLOCK_MONOTONIC, the base used for + * kvm_pit_channel_state::count_load_time, and vm_clock. Take the + * minimum of several samples to filter out scheduling noise. + */ + clock_offset = INT64_MAX; + for (i = 0; i < CALIBRATION_ROUNDS; i++) { + offset = qemu_get_clock_ns(vm_clock); + clock_gettime(CLOCK_MONOTONIC, &ts); + offset -= ts.tv_nsec; + offset -= (int64_t)ts.tv_sec * 1000000000; + if (abs64(offset) < abs64(clock_offset)) { + clock_offset = offset; + } + } + if (kvm_has_pit_state2()) { ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); if (ret < 0) { fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(ret)); abort(); } - s->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; + pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; } else { /* * kvm_pit_state2 is superset of kvm_pit_state struct, @@ -61,7 +93,7 @@ static void kvm_pit_get(PITCommonState *s) } for (i = 0; i < 3; i++) { kchan = &kpit.channels[i]; - sc = &s->channels[i]; + sc = &pit->channels[i]; sc->count = kchan->count; sc->latched_count = kchan->latched_count; sc->count_latched = kchan->count_latched; @@ -74,10 +106,10 @@ static void kvm_pit_get(PITCommonState *s) sc->mode = kchan->mode; sc->bcd = kchan->bcd; sc->gate = kchan->gate; - sc->count_load_time = kchan->count_load_time; + sc->count_load_time = kchan->count_load_time + clock_offset; } - sc = &s->channels[0]; + sc = &pit->channels[0]; sc->next_transition_time = pit_get_next_transition_time(sc, sc->count_load_time); } @@ -173,6 +205,19 @@ static void kvm_pit_irq_control(void *opaque, int n, int enable) kvm_pit_put(pit); } +static void kvm_pit_vm_state_change(void *opaque, int running, + RunState state) +{ + KVMPITState *s = opaque; + + if (running) { + s->state_valid = false; + } else { + kvm_pit_get(&s->pit); + s->state_valid = true; + } +} + static int kvm_pit_initfn(PITCommonState *pit) { KVMPITState *s = DO_UPCAST(KVMPITState, pit, pit); @@ -215,6 +260,8 @@ static int kvm_pit_initfn(PITCommonState *pit) qdev_init_gpio_in(&pit->dev.qdev, kvm_pit_irq_control, 1); + qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s); + return 0; } diff --git a/hw/kzm.c b/hw/kzm.c new file mode 100644 index 0000000000..08aaf43231 --- /dev/null +++ b/hw/kzm.c @@ -0,0 +1,154 @@ +/* + * KZM Board System emulation. + * + * Copyright (c) 2008 OKL and 2011 NICTA + * Written by Hans at OK-Labs + * Updated by Peter Chubb. + * + * This code is licenced under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a Kyoto Microcomputer + * KZM-ARM11-01 evaluation board, with a Freescale + * i.MX31 SoC + */ + +#include "sysbus.h" +#include "exec-memory.h" +#include "hw.h" +#include "arm-misc.h" +#include "devices.h" +#include "net.h" +#include "sysemu.h" +#include "boards.h" +#include "pc.h" /* for the FPGA UART that emulates a 16550 */ +#include "imx.h" + + /* Memory map for Kzm Emulation Baseboard: + * 0x00000000-0x00003fff 16k secure ROM IGNORED + * 0x00004000-0x00407fff Reserved IGNORED + * 0x00404000-0x00407fff ROM IGNORED + * 0x00408000-0x0fffffff Reserved IGNORED + * 0x10000000-0x1fffbfff RAM aliasing IGNORED + * 0x1fffc000-0x1fffffff RAM EMULATED + * 0x20000000-0x2fffffff Reserved IGNORED + * 0x30000000-0x7fffffff I.MX31 Internal Register Space + * 0x43f00000 IO_AREA0 + * 0x43f90000 UART1 EMULATED + * 0x43f94000 UART2 EMULATED + * 0x68000000 AVIC EMULATED + * 0x53f80000 CCM EMULATED + * 0x53f94000 PIT 1 EMULATED + * 0x53f98000 PIT 2 EMULATED + * 0x53f90000 GPT EMULATED + * 0x80000000-0x87ffffff RAM EMULATED + * 0x88000000-0x8fffffff RAM Aliasing EMULATED + * 0xa0000000-0xafffffff NAND Flash IGNORED + * 0xb0000000-0xb3ffffff Unavailable IGNORED + * 0xb4000000-0xb4000fff 8-bit free space IGNORED + * 0xb4001000-0xb400100f Board control IGNORED + * 0xb4001003 DIP switch + * 0xb4001010-0xb400101f 7-segment LED IGNORED + * 0xb4001020-0xb400102f LED IGNORED + * 0xb4001030-0xb400103f LED IGNORED + * 0xb4001040-0xb400104f FPGA, UART EMULATED + * 0xb4001050-0xb400105f FPGA, UART EMULATED + * 0xb4001060-0xb40fffff FPGA IGNORED + * 0xb6000000-0xb61fffff LAN controller EMULATED + * 0xb6200000-0xb62fffff FPGA NAND Controller IGNORED + * 0xb6300000-0xb7ffffff Free IGNORED + * 0xb8000000-0xb8004fff Memory control registers IGNORED + * 0xc0000000-0xc3ffffff PCMCIA/CF IGNORED + * 0xc4000000-0xffffffff Reserved IGNORED + */ + +#define KZM_RAMADDRESS (0x80000000) +#define KZM_FPGA (0xb4001040) + +static struct arm_boot_info kzm_binfo = { + .loader_start = KZM_RAMADDRESS, + .board_id = 1722, +}; + +static void kzm_init(ram_addr_t ram_size, + const char *boot_device, + const char *kernel_filename, const char *kernel_cmdline, + const char *initrd_filename, const char *cpu_model) +{ + ARMCPU *cpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *ram = g_new(MemoryRegion, 1); + MemoryRegion *sram = g_new(MemoryRegion, 1); + MemoryRegion *ram_alias = g_new(MemoryRegion, 1); + qemu_irq *cpu_pic; + DeviceState *dev; + DeviceState *ccm; + + if (!cpu_model) { + cpu_model = "arm1136"; + } + + cpu = cpu_arm_init(cpu_model); + if (!cpu) { + fprintf(stderr, "Unable to find CPU definition\n"); + exit(1); + } + + /* On a real system, the first 16k is a `secure boot rom' */ + + memory_region_init_ram(ram, "kzm.ram", ram_size); + vmstate_register_ram_global(ram); + memory_region_add_subregion(address_space_mem, KZM_RAMADDRESS, ram); + + memory_region_init_alias(ram_alias, "ram.alias", ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, 0x88000000, ram_alias); + + memory_region_init_ram(sram, "kzm.sram", 0x4000); + memory_region_add_subregion(address_space_mem, 0x1FFFC000, sram); + + cpu_pic = arm_pic_init_cpu(cpu); + dev = sysbus_create_varargs("imx_avic", 0x68000000, + cpu_pic[ARM_PIC_CPU_IRQ], + cpu_pic[ARM_PIC_CPU_FIQ], NULL); + + + imx_serial_create(0, 0x43f90000, qdev_get_gpio_in(dev, 45)); + imx_serial_create(1, 0x43f94000, qdev_get_gpio_in(dev, 32)); + + ccm = sysbus_create_simple("imx_ccm", 0x53f80000, NULL); + + imx_timerp_create(0x53f94000, qdev_get_gpio_in(dev, 28), ccm); + imx_timerp_create(0x53f98000, qdev_get_gpio_in(dev, 27), ccm); + imx_timerg_create(0x53f90000, qdev_get_gpio_in(dev, 29), ccm); + + if (nd_table[0].vlan) { + lan9118_init(&nd_table[0], 0xb6000000, qdev_get_gpio_in(dev, 52)); + } + + if (serial_hds[2]) { /* touchscreen */ + serial_mm_init(address_space_mem, KZM_FPGA+0x10, 0, + qdev_get_gpio_in(dev, 52), + 14745600, serial_hds[2], + DEVICE_NATIVE_ENDIAN); + } + + kzm_binfo.ram_size = ram_size; + kzm_binfo.kernel_filename = kernel_filename; + kzm_binfo.kernel_cmdline = kernel_cmdline; + kzm_binfo.initrd_filename = initrd_filename; + kzm_binfo.nb_cpus = 1; + arm_load_kernel(cpu, &kzm_binfo); +} + +static QEMUMachine kzm_machine = { + .name = "kzm", + .desc = "ARM KZM Emulation Baseboard (ARM1136)", + .init = kzm_init, +}; + +static void kzm_machine_init(void) +{ + qemu_register_machine(&kzm_machine); +} + +machine_init(kzm_machine_init) diff --git a/hw/lm32/Makefile.objs b/hw/lm32/Makefile.objs new file mode 100644 index 0000000000..4e1843c11d --- /dev/null +++ b/hw/lm32/Makefile.objs @@ -0,0 +1,23 @@ +# LM32 boards +obj-y += lm32_boards.o +obj-y += milkymist.o + +# LM32 peripherals +obj-y += lm32_pic.o +obj-y += lm32_juart.o +obj-y += lm32_timer.o +obj-y += lm32_uart.o +obj-y += lm32_sys.o +obj-y += milkymist-ac97.o +obj-y += milkymist-hpdmc.o +obj-y += milkymist-memcard.o +obj-y += milkymist-minimac2.o +obj-y += milkymist-pfpu.o +obj-y += milkymist-softusb.o +obj-y += milkymist-sysctl.o +obj-$(CONFIG_OPENGL) += milkymist-tmu2.o +obj-y += milkymist-uart.o +obj-y += milkymist-vgafb.o +obj-y += framebuffer.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/loader.c b/hw/loader.c index 7d64113e7f..33acc2fdab 100644 --- a/hw/loader.c +++ b/hw/loader.c @@ -377,9 +377,9 @@ static void zfree(void *x, void *addr) #define DEFLATED 8 -/* This is the maximum in uboot, so if a uImage overflows this, it would +/* This is the usual maximum in uboot, so if a uImage overflows this, it would * overflow on real hardware too. */ -#define UBOOT_MAX_GUNZIP_BYTES 0x800000 +#define UBOOT_MAX_GUNZIP_BYTES (64 << 20) static ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) diff --git a/hw/lsi53c895a.c b/hw/lsi53c895a.c index f022a02447..2fe141d24e 100644 --- a/hw/lsi53c895a.c +++ b/hw/lsi53c895a.c @@ -1677,9 +1677,10 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) } if (val & LSI_SCNTL1_RST) { if (!(s->sstat0 & LSI_SSTAT0_RST)) { - DeviceState *dev; + BusChild *kid; - QTAILQ_FOREACH(dev, &s->bus.qbus.children, sibling) { + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + DeviceState *dev = kid->child; device_reset(dev); } s->sstat0 |= LSI_SSTAT0_RST; diff --git a/hw/m48t59.c b/hw/m48t59.c index 0c50f450ad..dd6cb37ba6 100644 --- a/hw/m48t59.c +++ b/hw/m48t59.c @@ -65,7 +65,7 @@ struct M48t59State { /* NVRAM storage */ uint8_t *buffer; /* Model parameters */ - uint32_t type; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */ + uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */ /* NVRAM storage */ uint16_t addr; uint8_t lock; @@ -197,10 +197,11 @@ void m48t59_write (void *opaque, uint32_t addr, uint32_t val) NVRAM_PRINTF("%s: 0x%08x => 0x%08x\n", __func__, addr, val); /* check for NVRAM access */ - if ((NVRAM->type == 2 && addr < 0x7f8) || - (NVRAM->type == 8 && addr < 0x1ff8) || - (NVRAM->type == 59 && addr < 0x1ff0)) + if ((NVRAM->model == 2 && addr < 0x7f8) || + (NVRAM->model == 8 && addr < 0x1ff8) || + (NVRAM->model == 59 && addr < 0x1ff0)) { goto do_write; + } /* TOD access */ switch (addr) { @@ -334,10 +335,11 @@ void m48t59_write (void *opaque, uint32_t addr, uint32_t val) tmp = from_bcd(val); if (tmp >= 0 && tmp <= 99) { get_time(NVRAM, &tm); - if (NVRAM->type == 8) + if (NVRAM->model == 8) { tm.tm_year = from_bcd(val) + 68; // Base year is 1968 - else + } else { tm.tm_year = from_bcd(val); + } set_time(NVRAM, &tm); } break; @@ -362,10 +364,11 @@ uint32_t m48t59_read (void *opaque, uint32_t addr) uint32_t retval = 0xFF; /* check for NVRAM access */ - if ((NVRAM->type == 2 && addr < 0x078f) || - (NVRAM->type == 8 && addr < 0x1ff8) || - (NVRAM->type == 59 && addr < 0x1ff0)) + if ((NVRAM->model == 2 && addr < 0x078f) || + (NVRAM->model == 8 && addr < 0x1ff8) || + (NVRAM->model == 59 && addr < 0x1ff0)) { goto do_read; + } /* TOD access */ switch (addr) { @@ -439,10 +442,11 @@ uint32_t m48t59_read (void *opaque, uint32_t addr) case 0x07FF: /* year */ get_time(NVRAM, &tm); - if (NVRAM->type == 8) + if (NVRAM->model == 8) { retval = to_bcd(tm.tm_year - 68); // Base year is 1968 - else + } else { retval = to_bcd(tm.tm_year); + } break; default: /* Check lock registers state */ @@ -633,7 +637,7 @@ static const MemoryRegionOps m48t59_io_ops = { /* Initialisation routine */ M48t59State *m48t59_init(qemu_irq IRQ, target_phys_addr_t mem_base, - uint32_t io_base, uint16_t size, int type) + uint32_t io_base, uint16_t size, int model) { DeviceState *dev; SysBusDevice *s; @@ -641,7 +645,7 @@ M48t59State *m48t59_init(qemu_irq IRQ, target_phys_addr_t mem_base, M48t59State *state; dev = qdev_create(NULL, "m48t59"); - qdev_prop_set_uint32(dev, "type", type); + qdev_prop_set_uint32(dev, "model", model); qdev_prop_set_uint32(dev, "size", size); qdev_prop_set_uint32(dev, "io_base", io_base); qdev_init_nofail(dev); @@ -661,14 +665,14 @@ M48t59State *m48t59_init(qemu_irq IRQ, target_phys_addr_t mem_base, } M48t59State *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size, - int type) + int model) { M48t59ISAState *d; ISADevice *dev; M48t59State *s; dev = isa_create(bus, "m48t59_isa"); - qdev_prop_set_uint32(&dev->qdev, "type", type); + qdev_prop_set_uint32(&dev->qdev, "model", model); qdev_prop_set_uint32(&dev->qdev, "size", size); qdev_prop_set_uint32(&dev->qdev, "io_base", io_base); qdev_init_nofail(&dev->qdev); @@ -686,7 +690,7 @@ M48t59State *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size, static void m48t59_init_common(M48t59State *s) { s->buffer = g_malloc0(s->size); - if (s->type == 59) { + if (s->model == 59) { s->alrm_timer = qemu_new_timer_ns(rtc_clock, &alarm_cb, s); s->wd_timer = qemu_new_timer_ns(vm_clock, &watchdog_cb, s); } @@ -722,7 +726,7 @@ static int m48t59_init1(SysBusDevice *dev) static Property m48t59_isa_properties[] = { DEFINE_PROP_UINT32("size", M48t59ISAState, state.size, -1), - DEFINE_PROP_UINT32("type", M48t59ISAState, state.type, -1), + DEFINE_PROP_UINT32("model", M48t59ISAState, state.model, -1), DEFINE_PROP_HEX32( "io_base", M48t59ISAState, state.io_base, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -746,7 +750,7 @@ static TypeInfo m48t59_isa_info = { static Property m48t59_properties[] = { DEFINE_PROP_UINT32("size", M48t59SysBusState, state.size, -1), - DEFINE_PROP_UINT32("type", M48t59SysBusState, state.type, -1), + DEFINE_PROP_UINT32("model", M48t59SysBusState, state.model, -1), DEFINE_PROP_HEX32( "io_base", M48t59SysBusState, state.io_base, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/m68k/Makefile.objs b/hw/m68k/Makefile.objs new file mode 100644 index 0000000000..93b6d25baf --- /dev/null +++ b/hw/m68k/Makefile.objs @@ -0,0 +1,4 @@ +obj-y = an5206.o mcf5206.o mcf_uart.o mcf_intc.o mcf5208.o mcf_fec.o +obj-y += dummy_m68k.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/mainstone.c b/hw/mainstone.c index 00a8adc850..97687b6eeb 100644 --- a/hw/mainstone.c +++ b/hw/mainstone.c @@ -102,7 +102,7 @@ static void mainstone_common_init(MemoryRegion *address_space_mem, { uint32_t sector_len = 256 * 1024; target_phys_addr_t mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; - PXA2xxState *cpu; + PXA2xxState *mpu; DeviceState *mst_irq; DriveInfo *dinfo; int i; @@ -113,7 +113,7 @@ static void mainstone_common_init(MemoryRegion *address_space_mem, cpu_model = "pxa270-c5"; /* Setup CPU & memory */ - cpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, cpu_model); + mpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, cpu_model); memory_region_init_ram(rom, "mainstone.rom", MAINSTONE_ROM); vmstate_register_ram_global(rom); memory_region_set_readonly(rom, true); @@ -145,19 +145,19 @@ static void mainstone_common_init(MemoryRegion *address_space_mem, } mst_irq = sysbus_create_simple("mainstone-fpga", MST_FPGA_PHYS, - qdev_get_gpio_in(cpu->gpio, 0)); + qdev_get_gpio_in(mpu->gpio, 0)); /* setup keypad */ printf("map addr %p\n", &map); - pxa27x_register_keypad(cpu->kp, map, 0xe0); + pxa27x_register_keypad(mpu->kp, map, 0xe0); /* MMC/SD host */ - pxa2xx_mmci_handlers(cpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); + pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0], + pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0], qdev_get_gpio_in(mst_irq, S0_IRQ), qdev_get_gpio_in(mst_irq, S0_CD_IRQ)); - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1], + pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1], qdev_get_gpio_in(mst_irq, S1_IRQ), qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); @@ -168,7 +168,7 @@ static void mainstone_common_init(MemoryRegion *address_space_mem, mainstone_binfo.kernel_cmdline = kernel_cmdline; mainstone_binfo.initrd_filename = initrd_filename; mainstone_binfo.board_id = arm_id; - arm_load_kernel(&cpu->cpu->env, &mainstone_binfo); + arm_load_kernel(mpu->cpu, &mainstone_binfo); } static void mainstone_init(ram_addr_t ram_size, diff --git a/hw/mc146818rtc.c b/hw/mc146818rtc.c index 9c64e0ae25..3777f858a1 100644 --- a/hw/mc146818rtc.c +++ b/hw/mc146818rtc.c @@ -599,13 +599,6 @@ static const MemoryRegionOps cmos_ops = { .old_portio = cmos_portio }; -// FIXME add int32 visitor -static void visit_type_int32(Visitor *v, int *value, const char *name, Error **errp) -{ - int64_t val = *value; - visit_type_int(v, &val, name, errp); -} - static void rtc_get_date(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { diff --git a/hw/megasas.c b/hw/megasas.c new file mode 100644 index 0000000000..b48836fff1 --- /dev/null +++ b/hw/megasas.c @@ -0,0 +1,2198 @@ +/* + * QEMU MegaRAID SAS 8708EM2 Host Bus Adapter emulation + * Based on the linux driver code at drivers/scsi/megaraid + * + * Copyright (c) 2009-2012 Hannes Reinecke, SUSE Labs + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "hw.h" +#include "pci.h" +#include "dma.h" +#include "msix.h" +#include "iov.h" +#include "scsi.h" +#include "scsi-defs.h" +#include "block_int.h" +#include "trace.h" + +#include "mfi.h" + +#define MEGASAS_VERSION "1.70" +#define MEGASAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */ +#define MEGASAS_DEFAULT_FRAMES 1000 /* Windows requires this */ +#define MEGASAS_MAX_SGE 128 /* Firmware limit */ +#define MEGASAS_DEFAULT_SGE 80 +#define MEGASAS_MAX_SECTORS 0xFFFF /* No real limit */ +#define MEGASAS_MAX_ARRAYS 128 + +#define MEGASAS_FLAG_USE_JBOD 0 +#define MEGASAS_MASK_USE_JBOD (1 << MEGASAS_FLAG_USE_JBOD) +#define MEGASAS_FLAG_USE_MSIX 1 +#define MEGASAS_MASK_USE_MSIX (1 << MEGASAS_FLAG_USE_MSIX) +#define MEGASAS_FLAG_USE_QUEUE64 2 +#define MEGASAS_MASK_USE_QUEUE64 (1 << MEGASAS_FLAG_USE_QUEUE64) + +const char *mfi_frame_desc[] = { + "MFI init", "LD Read", "LD Write", "LD SCSI", "PD SCSI", + "MFI Doorbell", "MFI Abort", "MFI SMP", "MFI Stop"}; + +typedef struct MegasasCmd { + uint32_t index; + uint16_t flags; + uint16_t count; + uint64_t context; + + target_phys_addr_t pa; + target_phys_addr_t pa_size; + union mfi_frame *frame; + SCSIRequest *req; + QEMUSGList qsg; + void *iov_buf; + size_t iov_size; + size_t iov_offset; + struct MegasasState *state; +} MegasasCmd; + +typedef struct MegasasState { + PCIDevice dev; + MemoryRegion mmio_io; + MemoryRegion port_io; + MemoryRegion queue_io; + uint32_t frame_hi; + + int fw_state; + uint32_t fw_sge; + uint32_t fw_cmds; + uint32_t flags; + int fw_luns; + int intr_mask; + int doorbell; + int busy; + + MegasasCmd *event_cmd; + int event_locale; + int event_class; + int event_count; + int shutdown_event; + int boot_event; + + uint64_t reply_queue_pa; + void *reply_queue; + int reply_queue_len; + int reply_queue_head; + int reply_queue_tail; + uint64_t consumer_pa; + uint64_t producer_pa; + + MegasasCmd frames[MEGASAS_MAX_FRAMES]; + + SCSIBus bus; +} MegasasState; + +#define MEGASAS_INTR_DISABLED_MASK 0xFFFFFFFF + +static bool megasas_intr_enabled(MegasasState *s) +{ + if ((s->intr_mask & MEGASAS_INTR_DISABLED_MASK) != + MEGASAS_INTR_DISABLED_MASK) { + return true; + } + return false; +} + +static bool megasas_use_queue64(MegasasState *s) +{ + return s->flags & MEGASAS_MASK_USE_QUEUE64; +} + +static bool megasas_use_msix(MegasasState *s) +{ + return s->flags & MEGASAS_MASK_USE_MSIX; +} + +static bool megasas_is_jbod(MegasasState *s) +{ + return s->flags & MEGASAS_MASK_USE_JBOD; +} + +static void megasas_frame_set_cmd_status(unsigned long frame, uint8_t v) +{ + stb_phys(frame + offsetof(struct mfi_frame_header, cmd_status), v); +} + +static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v) +{ + stb_phys(frame + offsetof(struct mfi_frame_header, scsi_status), v); +} + +/* + * Context is considered opaque, but the HBA firmware is running + * in little endian mode. So convert it to little endian, too. + */ +static uint64_t megasas_frame_get_context(unsigned long frame) +{ + return ldq_le_phys(frame + offsetof(struct mfi_frame_header, context)); +} + +static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_IEEE_SGL; +} + +static bool megasas_frame_is_sgl64(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_SGL64; +} + +static bool megasas_frame_is_sense64(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_SENSE64; +} + +static uint64_t megasas_sgl_get_addr(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint64_t addr; + + if (megasas_frame_is_ieee_sgl(cmd)) { + addr = le64_to_cpu(sgl->sg_skinny->addr); + } else if (megasas_frame_is_sgl64(cmd)) { + addr = le64_to_cpu(sgl->sg64->addr); + } else { + addr = le32_to_cpu(sgl->sg32->addr); + } + return addr; +} + +static uint32_t megasas_sgl_get_len(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint32_t len; + + if (megasas_frame_is_ieee_sgl(cmd)) { + len = le32_to_cpu(sgl->sg_skinny->len); + } else if (megasas_frame_is_sgl64(cmd)) { + len = le32_to_cpu(sgl->sg64->len); + } else { + len = le32_to_cpu(sgl->sg32->len); + } + return len; +} + +static union mfi_sgl *megasas_sgl_next(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint8_t *next = (uint8_t *)sgl; + + if (megasas_frame_is_ieee_sgl(cmd)) { + next += sizeof(struct mfi_sg_skinny); + } else if (megasas_frame_is_sgl64(cmd)) { + next += sizeof(struct mfi_sg64); + } else { + next += sizeof(struct mfi_sg32); + } + + if (next >= (uint8_t *)cmd->frame + cmd->pa_size) { + return NULL; + } + return (union mfi_sgl *)next; +} + +static void megasas_soft_reset(MegasasState *s); + +static int megasas_map_sgl(MegasasState *s, MegasasCmd *cmd, union mfi_sgl *sgl) +{ + int i; + int iov_count = 0; + size_t iov_size = 0; + + cmd->flags = le16_to_cpu(cmd->frame->header.flags); + iov_count = cmd->frame->header.sge_count; + if (iov_count > MEGASAS_MAX_SGE) { + trace_megasas_iovec_sgl_overflow(cmd->index, iov_count, + MEGASAS_MAX_SGE); + return iov_count; + } + qemu_sglist_init(&cmd->qsg, iov_count, pci_dma_context(&s->dev)); + for (i = 0; i < iov_count; i++) { + dma_addr_t iov_pa, iov_size_p; + + if (!sgl) { + trace_megasas_iovec_sgl_underflow(cmd->index, i); + goto unmap; + } + iov_pa = megasas_sgl_get_addr(cmd, sgl); + iov_size_p = megasas_sgl_get_len(cmd, sgl); + if (!iov_pa || !iov_size_p) { + trace_megasas_iovec_sgl_invalid(cmd->index, i, + iov_pa, iov_size_p); + goto unmap; + } + qemu_sglist_add(&cmd->qsg, iov_pa, iov_size_p); + sgl = megasas_sgl_next(cmd, sgl); + iov_size += (size_t)iov_size_p; + } + if (cmd->iov_size > iov_size) { + trace_megasas_iovec_overflow(cmd->index, iov_size, cmd->iov_size); + } else if (cmd->iov_size < iov_size) { + trace_megasas_iovec_underflow(cmd->iov_size, iov_size, cmd->iov_size); + } + cmd->iov_offset = 0; + return 0; +unmap: + qemu_sglist_destroy(&cmd->qsg); + return iov_count - i; +} + +static void megasas_unmap_sgl(MegasasCmd *cmd) +{ + qemu_sglist_destroy(&cmd->qsg); + cmd->iov_offset = 0; +} + +/* + * passthrough sense and io sense are at the same offset + */ +static int megasas_build_sense(MegasasCmd *cmd, uint8_t *sense_ptr, + uint8_t sense_len) +{ + uint32_t pa_hi = 0, pa_lo; + target_phys_addr_t pa; + + if (sense_len > cmd->frame->header.sense_len) { + sense_len = cmd->frame->header.sense_len; + } + if (sense_len) { + pa_lo = le32_to_cpu(cmd->frame->pass.sense_addr_lo); + if (megasas_frame_is_sense64(cmd)) { + pa_hi = le32_to_cpu(cmd->frame->pass.sense_addr_hi); + } + pa = ((uint64_t) pa_hi << 32) | pa_lo; + cpu_physical_memory_write(pa, sense_ptr, sense_len); + cmd->frame->header.sense_len = sense_len; + } + return sense_len; +} + +static void megasas_write_sense(MegasasCmd *cmd, SCSISense sense) +{ + uint8_t sense_buf[SCSI_SENSE_BUF_SIZE]; + uint8_t sense_len = 18; + + memset(sense_buf, 0, sense_len); + sense_buf[0] = 0xf0; + sense_buf[2] = sense.key; + sense_buf[7] = 10; + sense_buf[12] = sense.asc; + sense_buf[13] = sense.ascq; + megasas_build_sense(cmd, sense_buf, sense_len); +} + +static void megasas_copy_sense(MegasasCmd *cmd) +{ + uint8_t sense_buf[SCSI_SENSE_BUF_SIZE]; + uint8_t sense_len; + + sense_len = scsi_req_get_sense(cmd->req, sense_buf, + SCSI_SENSE_BUF_SIZE); + megasas_build_sense(cmd, sense_buf, sense_len); +} + +/* + * Format an INQUIRY CDB + */ +static int megasas_setup_inquiry(uint8_t *cdb, int pg, int len) +{ + memset(cdb, 0, 6); + cdb[0] = INQUIRY; + if (pg > 0) { + cdb[1] = 0x1; + cdb[2] = pg; + } + cdb[3] = (len >> 8) & 0xff; + cdb[4] = (len & 0xff); + return len; +} + +/* + * Encode lba and len into a READ_16/WRITE_16 CDB + */ +static void megasas_encode_lba(uint8_t *cdb, uint64_t lba, + uint32_t len, bool is_write) +{ + memset(cdb, 0x0, 16); + if (is_write) { + cdb[0] = WRITE_16; + } else { + cdb[0] = READ_16; + } + cdb[2] = (lba >> 56) & 0xff; + cdb[3] = (lba >> 48) & 0xff; + cdb[4] = (lba >> 40) & 0xff; + cdb[5] = (lba >> 32) & 0xff; + cdb[6] = (lba >> 24) & 0xff; + cdb[7] = (lba >> 16) & 0xff; + cdb[8] = (lba >> 8) & 0xff; + cdb[9] = (lba) & 0xff; + cdb[10] = (len >> 24) & 0xff; + cdb[11] = (len >> 16) & 0xff; + cdb[12] = (len >> 8) & 0xff; + cdb[13] = (len) & 0xff; +} + +/* + * Utility functions + */ +static uint64_t megasas_fw_time(void) +{ + struct tm curtime; + uint64_t bcd_time; + + qemu_get_timedate(&curtime, 0); + bcd_time = ((uint64_t)curtime.tm_sec & 0xff) << 48 | + ((uint64_t)curtime.tm_min & 0xff) << 40 | + ((uint64_t)curtime.tm_hour & 0xff) << 32 | + ((uint64_t)curtime.tm_mday & 0xff) << 24 | + ((uint64_t)curtime.tm_mon & 0xff) << 16 | + ((uint64_t)(curtime.tm_year + 1900) & 0xffff); + + return bcd_time; +} + +static uint64_t megasas_gen_sas_addr(uint64_t id) +{ + uint64_t addr; + + addr = 0x5001a4aULL << 36; + addr |= id & 0xfffffffff; + + return addr; +} + +/* + * Frame handling + */ +static int megasas_next_index(MegasasState *s, int index, int limit) +{ + index++; + if (index == limit) { + index = 0; + } + return index; +} + +static MegasasCmd *megasas_lookup_frame(MegasasState *s, + target_phys_addr_t frame) +{ + MegasasCmd *cmd = NULL; + int num = 0, index; + + index = s->reply_queue_head; + + while (num < s->fw_cmds) { + if (s->frames[index].pa && s->frames[index].pa == frame) { + cmd = &s->frames[index]; + break; + } + index = megasas_next_index(s, index, s->fw_cmds); + num++; + } + + return cmd; +} + +static MegasasCmd *megasas_next_frame(MegasasState *s, + target_phys_addr_t frame) +{ + MegasasCmd *cmd = NULL; + int num = 0, index; + + cmd = megasas_lookup_frame(s, frame); + if (cmd) { + trace_megasas_qf_found(cmd->index, cmd->pa); + return cmd; + } + index = s->reply_queue_head; + num = 0; + while (num < s->fw_cmds) { + if (!s->frames[index].pa) { + cmd = &s->frames[index]; + break; + } + index = megasas_next_index(s, index, s->fw_cmds); + num++; + } + if (!cmd) { + trace_megasas_qf_failed(frame); + } + trace_megasas_qf_new(index, cmd); + return cmd; +} + +static MegasasCmd *megasas_enqueue_frame(MegasasState *s, + target_phys_addr_t frame, uint64_t context, int count) +{ + MegasasCmd *cmd = NULL; + int frame_size = MFI_FRAME_SIZE * 16; + target_phys_addr_t frame_size_p = frame_size; + + cmd = megasas_next_frame(s, frame); + /* All frames busy */ + if (!cmd) { + return NULL; + } + if (!cmd->pa) { + cmd->pa = frame; + /* Map all possible frames */ + cmd->frame = cpu_physical_memory_map(frame, &frame_size_p, 0); + if (frame_size_p != frame_size) { + trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame); + if (cmd->frame) { + cpu_physical_memory_unmap(cmd->frame, frame_size_p, 0, 0); + cmd->frame = NULL; + cmd->pa = 0; + } + s->event_count++; + return NULL; + } + cmd->pa_size = frame_size_p; + cmd->context = context; + if (!megasas_use_queue64(s)) { + cmd->context &= (uint64_t)0xFFFFFFFF; + } + } + cmd->count = count; + s->busy++; + + trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context, + s->reply_queue_head, s->busy); + + return cmd; +} + +static void megasas_complete_frame(MegasasState *s, uint64_t context) +{ + int tail, queue_offset; + + /* Decrement busy count */ + s->busy--; + + if (s->reply_queue_pa) { + /* + * Put command on the reply queue. + * Context is opaque, but emulation is running in + * little endian. So convert it. + */ + tail = s->reply_queue_head; + if (megasas_use_queue64(s)) { + queue_offset = tail * sizeof(uint64_t); + stq_le_phys(s->reply_queue_pa + queue_offset, context); + } else { + queue_offset = tail * sizeof(uint32_t); + stl_le_phys(s->reply_queue_pa + queue_offset, context); + } + s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds); + trace_megasas_qf_complete(context, tail, queue_offset, + s->busy, s->doorbell); + } + + if (megasas_intr_enabled(s)) { + /* Notify HBA */ + s->doorbell++; + if (s->doorbell == 1) { + if (msix_enabled(&s->dev)) { + trace_megasas_msix_raise(0); + msix_notify(&s->dev, 0); + } else { + trace_megasas_irq_raise(); + qemu_irq_raise(s->dev.irq[0]); + } + } + } else { + trace_megasas_qf_complete_noirq(context); + } +} + +static void megasas_reset_frames(MegasasState *s) +{ + int i; + MegasasCmd *cmd; + + for (i = 0; i < s->fw_cmds; i++) { + cmd = &s->frames[i]; + if (cmd->pa) { + cpu_physical_memory_unmap(cmd->frame, cmd->pa_size, 0, 0); + cmd->frame = NULL; + cmd->pa = 0; + } + } +} + +static void megasas_abort_command(MegasasCmd *cmd) +{ + if (cmd->req) { + scsi_req_abort(cmd->req, ABORTED_COMMAND); + cmd->req = NULL; + } +} + +static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) +{ + uint32_t pa_hi, pa_lo; + target_phys_addr_t iq_pa, initq_size; + struct mfi_init_qinfo *initq; + uint32_t flags; + int ret = MFI_STAT_OK; + + pa_lo = le32_to_cpu(cmd->frame->init.qinfo_new_addr_lo); + pa_hi = le32_to_cpu(cmd->frame->init.qinfo_new_addr_hi); + iq_pa = (((uint64_t) pa_hi << 32) | pa_lo); + trace_megasas_init_firmware((uint64_t)iq_pa); + initq_size = sizeof(*initq); + initq = cpu_physical_memory_map(iq_pa, &initq_size, 0); + if (!initq || initq_size != sizeof(*initq)) { + trace_megasas_initq_map_failed(cmd->index); + s->event_count++; + ret = MFI_STAT_MEMORY_NOT_AVAILABLE; + goto out; + } + s->reply_queue_len = le32_to_cpu(initq->rq_entries) & 0xFFFF; + if (s->reply_queue_len > s->fw_cmds) { + trace_megasas_initq_mismatch(s->reply_queue_len, s->fw_cmds); + s->event_count++; + ret = MFI_STAT_INVALID_PARAMETER; + goto out; + } + pa_lo = le32_to_cpu(initq->rq_addr_lo); + pa_hi = le32_to_cpu(initq->rq_addr_hi); + s->reply_queue_pa = ((uint64_t) pa_hi << 32) | pa_lo; + pa_lo = le32_to_cpu(initq->ci_addr_lo); + pa_hi = le32_to_cpu(initq->ci_addr_hi); + s->consumer_pa = ((uint64_t) pa_hi << 32) | pa_lo; + pa_lo = le32_to_cpu(initq->pi_addr_lo); + pa_hi = le32_to_cpu(initq->pi_addr_hi); + s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo; + s->reply_queue_head = ldl_le_phys(s->producer_pa); + s->reply_queue_tail = ldl_le_phys(s->consumer_pa); + flags = le32_to_cpu(initq->flags); + if (flags & MFI_QUEUE_FLAG_CONTEXT64) { + s->flags |= MEGASAS_MASK_USE_QUEUE64; + } + trace_megasas_init_queue((unsigned long)s->reply_queue_pa, + s->reply_queue_len, s->reply_queue_head, + s->reply_queue_tail, flags); + megasas_reset_frames(s); + s->fw_state = MFI_FWSTATE_OPERATIONAL; +out: + if (initq) { + cpu_physical_memory_unmap(initq, initq_size, 0, 0); + } + return ret; +} + +static int megasas_map_dcmd(MegasasState *s, MegasasCmd *cmd) +{ + dma_addr_t iov_pa, iov_size; + + cmd->flags = le16_to_cpu(cmd->frame->header.flags); + if (!cmd->frame->header.sge_count) { + trace_megasas_dcmd_zero_sge(cmd->index); + cmd->iov_size = 0; + return 0; + } else if (cmd->frame->header.sge_count > 1) { + trace_megasas_dcmd_invalid_sge(cmd->index, + cmd->frame->header.sge_count); + cmd->iov_size = 0; + return -1; + } + iov_pa = megasas_sgl_get_addr(cmd, &cmd->frame->dcmd.sgl); + iov_size = megasas_sgl_get_len(cmd, &cmd->frame->dcmd.sgl); + qemu_sglist_init(&cmd->qsg, 1, pci_dma_context(&s->dev)); + qemu_sglist_add(&cmd->qsg, iov_pa, iov_size); + cmd->iov_size = iov_size; + return cmd->iov_size; +} + +static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size) +{ + trace_megasas_finish_dcmd(cmd->index, iov_size); + + if (cmd->frame->header.sge_count) { + qemu_sglist_destroy(&cmd->qsg); + } + if (iov_size > cmd->iov_size) { + if (megasas_frame_is_ieee_sgl(cmd)) { + cmd->frame->dcmd.sgl.sg_skinny->len = cpu_to_le32(iov_size); + } else if (megasas_frame_is_sgl64(cmd)) { + cmd->frame->dcmd.sgl.sg64->len = cpu_to_le32(iov_size); + } else { + cmd->frame->dcmd.sgl.sg32->len = cpu_to_le32(iov_size); + } + } + cmd->iov_size = 0; + return; +} + +static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ctrl_info info; + size_t dcmd_size = sizeof(info); + BusChild *kid; + int num_ld_disks = 0; + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + num_ld_disks++; + } + + memset(&info, 0x0, cmd->iov_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + info.pci.vendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC); + info.pci.device = cpu_to_le16(PCI_DEVICE_ID_LSI_SAS1078); + info.pci.subvendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC); + info.pci.subdevice = cpu_to_le16(0x1013); + + info.host.type = MFI_INFO_HOST_PCIX; + info.device.type = MFI_INFO_DEV_SAS3G; + info.device.port_count = 2; + info.device.port_addr[0] = cpu_to_le64(megasas_gen_sas_addr((uint64_t)s)); + + memcpy(info.product_name, "MegaRAID SAS 8708EM2", 20); + snprintf(info.serial_number, 32, "QEMU%08lx", + (unsigned long)s & 0xFFFFFFFF); + snprintf(info.package_version, 0x60, "%s-QEMU", QEMU_VERSION); + memcpy(info.image_component[0].name, "APP", 3); + memcpy(info.image_component[0].version, MEGASAS_VERSION "-QEMU", 9); + memcpy(info.image_component[0].build_date, __DATE__, 11); + memcpy(info.image_component[0].build_time, __TIME__, 8); + info.image_component_count = 1; + if (s->dev.has_rom) { + uint8_t biosver[32]; + uint8_t *ptr; + + ptr = memory_region_get_ram_ptr(&s->dev.rom); + memcpy(biosver, ptr + 0x41, 31); + qemu_put_ram_ptr(ptr); + memcpy(info.image_component[1].name, "BIOS", 4); + memcpy(info.image_component[1].version, biosver, + strlen((const char *)biosver)); + info.image_component_count++; + } + info.current_fw_time = cpu_to_le32(megasas_fw_time()); + info.max_arms = 32; + info.max_spans = 8; + info.max_arrays = MEGASAS_MAX_ARRAYS; + info.max_lds = s->fw_luns; + info.max_cmds = cpu_to_le16(s->fw_cmds); + info.max_sg_elements = cpu_to_le16(s->fw_sge); + info.max_request_size = cpu_to_le32(MEGASAS_MAX_SECTORS); + info.lds_present = cpu_to_le16(num_ld_disks); + info.pd_present = cpu_to_le16(num_ld_disks); + info.pd_disks_present = cpu_to_le16(num_ld_disks); + info.hw_present = cpu_to_le32(MFI_INFO_HW_NVRAM | + MFI_INFO_HW_MEM | + MFI_INFO_HW_FLASH); + info.memory_size = cpu_to_le16(512); + info.nvram_size = cpu_to_le16(32); + info.flash_size = cpu_to_le16(16); + info.raid_levels = cpu_to_le32(MFI_INFO_RAID_0); + info.adapter_ops = cpu_to_le32(MFI_INFO_AOPS_RBLD_RATE | + MFI_INFO_AOPS_SELF_DIAGNOSTIC | + MFI_INFO_AOPS_MIXED_ARRAY); + info.ld_ops = cpu_to_le32(MFI_INFO_LDOPS_DISK_CACHE_POLICY | + MFI_INFO_LDOPS_ACCESS_POLICY | + MFI_INFO_LDOPS_IO_POLICY | + MFI_INFO_LDOPS_WRITE_POLICY | + MFI_INFO_LDOPS_READ_POLICY); + info.max_strips_per_io = cpu_to_le16(s->fw_sge); + info.stripe_sz_ops.min = 3; + info.stripe_sz_ops.max = ffs(MEGASAS_MAX_SECTORS + 1) - 1; + info.properties.pred_fail_poll_interval = cpu_to_le16(300); + info.properties.intr_throttle_cnt = cpu_to_le16(16); + info.properties.intr_throttle_timeout = cpu_to_le16(50); + info.properties.rebuild_rate = 30; + info.properties.patrol_read_rate = 30; + info.properties.bgi_rate = 30; + info.properties.cc_rate = 30; + info.properties.recon_rate = 30; + info.properties.cache_flush_interval = 4; + info.properties.spinup_drv_cnt = 2; + info.properties.spinup_delay = 6; + info.properties.ecc_bucket_size = 15; + info.properties.ecc_bucket_leak_rate = cpu_to_le16(1440); + info.properties.expose_encl_devices = 1; + info.properties.OnOffProperties = cpu_to_le32(MFI_CTRL_PROP_EnableJBOD); + info.pd_ops = cpu_to_le32(MFI_INFO_PDOPS_FORCE_ONLINE | + MFI_INFO_PDOPS_FORCE_OFFLINE); + info.pd_mix_support = cpu_to_le32(MFI_INFO_PDMIX_SAS | + MFI_INFO_PDMIX_SATA | + MFI_INFO_PDMIX_LD); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_defaults info; + size_t dcmd_size = sizeof(struct mfi_defaults); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + info.sas_addr = cpu_to_le64(megasas_gen_sas_addr((uint64_t)s)); + info.stripe_size = 3; + info.flush_time = 4; + info.background_rate = 30; + info.allow_mix_in_enclosure = 1; + info.allow_mix_in_ld = 1; + info.direct_pd_mapping = 1; + /* Enable for BIOS support */ + info.bios_enumerate_lds = 1; + info.disable_ctrl_r = 1; + info.expose_enclosure_devices = 1; + info.disable_preboot_cli = 1; + info.cluster_disable = 1; + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_bios_data info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + info.continue_on_error = 1; + info.verbose = 1; + if (megasas_is_jbod(s)) { + info.expose_all_drives = 1; + } + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t fw_time; + size_t dcmd_size = sizeof(fw_time); + + fw_time = cpu_to_le64(megasas_fw_time()); + + cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_set_fw_time(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t fw_time; + + /* This is a dummy; setting of firmware time is not allowed */ + memcpy(&fw_time, cmd->frame->dcmd.mbox, sizeof(fw_time)); + + trace_megasas_dcmd_set_fw_time(cmd->index, fw_time); + fw_time = cpu_to_le64(megasas_fw_time()); + return MFI_STAT_OK; +} + +static int megasas_event_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_evt_log_state info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0, dcmd_size); + + info.newest_seq_num = cpu_to_le32(s->event_count); + info.shutdown_seq_num = cpu_to_le32(s->shutdown_event); + info.boot_seq_num = cpu_to_le32(s->boot_event); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd) +{ + union mfi_evt event; + + if (cmd->iov_size < sizeof(struct mfi_evt_detail)) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + sizeof(struct mfi_evt_detail)); + return MFI_STAT_INVALID_PARAMETER; + } + s->event_count = cpu_to_le32(cmd->frame->dcmd.mbox[0]); + event.word = cpu_to_le32(cmd->frame->dcmd.mbox[4]); + s->event_locale = event.members.locale; + s->event_class = event.members.class; + s->event_cmd = cmd; + /* Decrease busy count; event frame doesn't count here */ + s->busy--; + cmd->iov_size = sizeof(struct mfi_evt_detail); + return MFI_STAT_INVALID_STATUS; +} + +static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_pd_list info; + size_t dcmd_size = sizeof(info); + BusChild *kid; + uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks; + uint16_t sdev_id; + + memset(&info, 0, dcmd_size); + offset = 8; + dcmd_limit = offset + sizeof(struct mfi_pd_address); + if (cmd->iov_size < dcmd_limit) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_limit); + return MFI_STAT_INVALID_PARAMETER; + } + + max_pd_disks = (cmd->iov_size - offset) / sizeof(struct mfi_pd_address); + if (max_pd_disks > s->fw_luns) { + max_pd_disks = s->fw_luns; + } + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); + + sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF); + info.addr[num_pd_disks].device_id = cpu_to_le16(sdev_id); + info.addr[num_pd_disks].encl_device_id = 0xFFFF; + info.addr[num_pd_disks].encl_index = 0; + info.addr[num_pd_disks].slot_number = (sdev->id & 0xFF); + info.addr[num_pd_disks].scsi_dev_type = sdev->type; + info.addr[num_pd_disks].connect_port_bitmap = 0x1; + info.addr[num_pd_disks].sas_addr[0] = + cpu_to_le64(megasas_gen_sas_addr((uint64_t)sdev)); + num_pd_disks++; + offset += sizeof(struct mfi_pd_address); + } + trace_megasas_dcmd_pd_get_list(cmd->index, num_pd_disks, + max_pd_disks, offset); + + info.size = cpu_to_le32(offset); + info.count = cpu_to_le32(num_pd_disks); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_pd_list_query(MegasasState *s, MegasasCmd *cmd) +{ + uint16_t flags; + + /* mbox0 contains flags */ + flags = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_pd_list_query(cmd->index, flags); + if (flags == MR_PD_QUERY_TYPE_ALL || + megasas_is_jbod(s)) { + return megasas_dcmd_pd_get_list(s, cmd); + } + + return MFI_STAT_OK; +} + +static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, + MegasasCmd *cmd) +{ + struct mfi_pd_info *info = cmd->iov_buf; + size_t dcmd_size = sizeof(struct mfi_pd_info); + BlockConf *conf = &sdev->conf; + uint64_t pd_size; + uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (lun & 0xFF); + uint8_t cmdbuf[6]; + SCSIRequest *req; + size_t len, resid; + + if (!cmd->iov_buf) { + cmd->iov_buf = g_malloc(dcmd_size); + memset(cmd->iov_buf, 0, dcmd_size); + info = cmd->iov_buf; + info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */ + info->vpd_page83[0] = 0x7f; + megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data)); + req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + if (!req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "PD get info std inquiry"); + g_free(cmd->iov_buf); + cmd->iov_buf = NULL; + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "PD get info std inquiry", lun); + len = scsi_req_enqueue(req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(req); + } + return MFI_STAT_INVALID_STATUS; + } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) { + megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83)); + req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + if (!req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "PD get info vpd inquiry"); + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "PD get info vpd inquiry", lun); + len = scsi_req_enqueue(req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(req); + } + return MFI_STAT_INVALID_STATUS; + } + /* Finished, set FW state */ + if ((info->inquiry_data[0] >> 5) == 0) { + if (megasas_is_jbod(cmd->state)) { + info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM); + } else { + info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE); + } + } else { + info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE); + } + + info->ref.v.device_id = cpu_to_le16(sdev_id); + info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD| + MFI_PD_DDF_TYPE_INTF_SAS); + bdrv_get_geometry(conf->bs, &pd_size); + info->raw_size = cpu_to_le64(pd_size); + info->non_coerced_size = cpu_to_le64(pd_size); + info->coerced_size = cpu_to_le64(pd_size); + info->encl_device_id = 0xFFFF; + info->slot_number = (sdev->id & 0xFF); + info->path_info.count = 1; + info->path_info.sas_addr[0] = + cpu_to_le64(megasas_gen_sas_addr((uint64_t)sdev)); + info->connected_port_bitmap = 0x1; + info->device_speed = 1; + info->link_speed = 1; + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + g_free(cmd->iov_buf); + cmd->iov_size = dcmd_size - resid; + cmd->iov_buf = NULL; + return MFI_STAT_OK; +} + +static int megasas_dcmd_pd_get_info(MegasasState *s, MegasasCmd *cmd) +{ + size_t dcmd_size = sizeof(struct mfi_pd_info); + uint16_t pd_id; + SCSIDevice *sdev = NULL; + int retval = MFI_STAT_DEVICE_NOT_FOUND; + + if (cmd->iov_size < dcmd_size) { + return MFI_STAT_INVALID_PARAMETER; + } + + /* mbox0 has the ID */ + pd_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + sdev = scsi_device_find(&s->bus, 0, pd_id, 0); + trace_megasas_dcmd_pd_get_info(cmd->index, pd_id); + + if (sdev) { + /* Submit inquiry */ + retval = megasas_pd_get_info_submit(sdev, pd_id, cmd); + } + + return retval; +} + +static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ld_list info; + size_t dcmd_size = sizeof(info), resid; + uint32_t num_ld_disks = 0, max_ld_disks = s->fw_luns; + uint64_t ld_size; + BusChild *kid; + + memset(&info, 0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + if (megasas_is_jbod(s)) { + max_ld_disks = 0; + } + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); + BlockConf *conf = &sdev->conf; + + if (num_ld_disks >= max_ld_disks) { + break; + } + /* Logical device size is in blocks */ + bdrv_get_geometry(conf->bs, &ld_size); + info.ld_list[num_ld_disks].ld.v.target_id = sdev->id; + info.ld_list[num_ld_disks].state = MFI_LD_STATE_OPTIMAL; + info.ld_list[num_ld_disks].size = cpu_to_le64(ld_size); + num_ld_disks++; + } + info.ld_count = cpu_to_le32(num_ld_disks); + trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); + + resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size = dcmd_size - resid; + return MFI_STAT_OK; +} + +static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, + MegasasCmd *cmd) +{ + struct mfi_ld_info *info = cmd->iov_buf; + size_t dcmd_size = sizeof(struct mfi_ld_info); + uint8_t cdb[6]; + SCSIRequest *req; + ssize_t len, resid; + BlockConf *conf = &sdev->conf; + uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (lun & 0xFF); + uint64_t ld_size; + + if (!cmd->iov_buf) { + cmd->iov_buf = g_malloc(dcmd_size); + memset(cmd->iov_buf, 0x0, dcmd_size); + info = cmd->iov_buf; + megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83)); + req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd); + if (!req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "LD get info vpd inquiry"); + g_free(cmd->iov_buf); + cmd->iov_buf = NULL; + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "LD get info vpd inquiry", lun); + len = scsi_req_enqueue(req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(req); + } + return MFI_STAT_INVALID_STATUS; + } + + info->ld_config.params.state = MFI_LD_STATE_OPTIMAL; + info->ld_config.properties.ld.v.target_id = lun; + info->ld_config.params.stripe_size = 3; + info->ld_config.params.num_drives = 1; + info->ld_config.params.is_consistent = 1; + /* Logical device size is in blocks */ + bdrv_get_geometry(conf->bs, &ld_size); + info->size = cpu_to_le64(ld_size); + memset(info->ld_config.span, 0, sizeof(info->ld_config.span)); + info->ld_config.span[0].start_block = 0; + info->ld_config.span[0].num_blocks = info->size; + info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id); + + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + g_free(cmd->iov_buf); + cmd->iov_size = dcmd_size - resid; + cmd->iov_buf = NULL; + return MFI_STAT_OK; +} + +static int megasas_dcmd_ld_get_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ld_info info; + size_t dcmd_size = sizeof(info); + uint16_t ld_id; + uint32_t max_ld_disks = s->fw_luns; + SCSIDevice *sdev = NULL; + int retval = MFI_STAT_DEVICE_NOT_FOUND; + + if (cmd->iov_size < dcmd_size) { + return MFI_STAT_INVALID_PARAMETER; + } + + /* mbox0 has the ID */ + ld_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_ld_get_info(cmd->index, ld_id); + + if (megasas_is_jbod(s)) { + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (ld_id < max_ld_disks) { + sdev = scsi_device_find(&s->bus, 0, ld_id, 0); + } + + if (sdev) { + retval = megasas_ld_get_info_submit(sdev, ld_id, cmd); + } + + return retval; +} + +static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) +{ + uint8_t data[4096]; + struct mfi_config_data *info; + int num_pd_disks = 0, array_offset, ld_offset; + BusChild *kid; + + if (cmd->iov_size > 4096) { + return MFI_STAT_INVALID_PARAMETER; + } + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + num_pd_disks++; + } + info = (struct mfi_config_data *)&data; + /* + * Array mapping: + * - One array per SCSI device + * - One logical drive per SCSI device + * spanning the entire device + */ + info->array_count = num_pd_disks; + info->array_size = sizeof(struct mfi_array) * num_pd_disks; + info->log_drv_count = num_pd_disks; + info->log_drv_size = sizeof(struct mfi_ld_config) * num_pd_disks; + info->spares_count = 0; + info->spares_size = sizeof(struct mfi_spare); + info->size = sizeof(struct mfi_config_data) + info->array_size + + info->log_drv_size; + if (info->size > 4096) { + return MFI_STAT_INVALID_PARAMETER; + } + + array_offset = sizeof(struct mfi_config_data); + ld_offset = array_offset + sizeof(struct mfi_array) * num_pd_disks; + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); + BlockConf *conf = &sdev->conf; + uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF); + struct mfi_array *array; + struct mfi_ld_config *ld; + uint64_t pd_size; + int i; + + array = (struct mfi_array *)(data + array_offset); + bdrv_get_geometry(conf->bs, &pd_size); + array->size = cpu_to_le64(pd_size); + array->num_drives = 1; + array->array_ref = cpu_to_le16(sdev_id); + array->pd[0].ref.v.device_id = cpu_to_le16(sdev_id); + array->pd[0].ref.v.seq_num = 0; + array->pd[0].fw_state = MFI_PD_STATE_ONLINE; + array->pd[0].encl.pd = 0xFF; + array->pd[0].encl.slot = (sdev->id & 0xFF); + for (i = 1; i < MFI_MAX_ROW_SIZE; i++) { + array->pd[i].ref.v.device_id = 0xFFFF; + array->pd[i].ref.v.seq_num = 0; + array->pd[i].fw_state = MFI_PD_STATE_UNCONFIGURED_GOOD; + array->pd[i].encl.pd = 0xFF; + array->pd[i].encl.slot = 0xFF; + } + array_offset += sizeof(struct mfi_array); + ld = (struct mfi_ld_config *)(data + ld_offset); + memset(ld, 0, sizeof(struct mfi_ld_config)); + ld->properties.ld.v.target_id = (sdev->id & 0xFF); + ld->properties.default_cache_policy = MR_LD_CACHE_READ_AHEAD | + MR_LD_CACHE_READ_ADAPTIVE; + ld->properties.current_cache_policy = MR_LD_CACHE_READ_AHEAD | + MR_LD_CACHE_READ_ADAPTIVE; + ld->params.state = MFI_LD_STATE_OPTIMAL; + ld->params.stripe_size = 3; + ld->params.num_drives = 1; + ld->params.span_depth = 1; + ld->params.is_consistent = 1; + ld->span[0].start_block = 0; + ld->span[0].num_blocks = cpu_to_le64(pd_size); + ld->span[0].array_ref = cpu_to_le16(sdev_id); + ld_offset += sizeof(struct mfi_ld_config); + } + + cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ctrl_props info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + info.pred_fail_poll_interval = cpu_to_le16(300); + info.intr_throttle_cnt = cpu_to_le16(16); + info.intr_throttle_timeout = cpu_to_le16(50); + info.rebuild_rate = 30; + info.patrol_read_rate = 30; + info.bgi_rate = 30; + info.cc_rate = 30; + info.recon_rate = 30; + info.cache_flush_interval = 4; + info.spinup_drv_cnt = 2; + info.spinup_delay = 6; + info.ecc_bucket_size = 15; + info.ecc_bucket_leak_rate = cpu_to_le16(1440); + info.expose_encl_devices = 1; + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_cache_flush(MegasasState *s, MegasasCmd *cmd) +{ + qemu_aio_flush(); + return MFI_STAT_OK; +} + +static int megasas_ctrl_shutdown(MegasasState *s, MegasasCmd *cmd) +{ + s->fw_state = MFI_FWSTATE_READY; + return MFI_STAT_OK; +} + +static int megasas_cluster_reset_ld(MegasasState *s, MegasasCmd *cmd) +{ + return MFI_STAT_INVALID_DCMD; +} + +static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd) +{ + uint8_t *dummy = g_malloc(cmd->iov_size); + + dma_buf_write(dummy, cmd->iov_size, &cmd->qsg); + + trace_megasas_dcmd_dump_frame(0, + dummy[0x00], dummy[0x01], dummy[0x02], dummy[0x03], + dummy[0x04], dummy[0x05], dummy[0x06], dummy[0x07]); + trace_megasas_dcmd_dump_frame(1, + dummy[0x08], dummy[0x09], dummy[0x0a], dummy[0x0b], + dummy[0x0c], dummy[0x0d], dummy[0x0e], dummy[0x0f]); + trace_megasas_dcmd_dump_frame(2, + dummy[0x10], dummy[0x11], dummy[0x12], dummy[0x13], + dummy[0x14], dummy[0x15], dummy[0x16], dummy[0x17]); + trace_megasas_dcmd_dump_frame(3, + dummy[0x18], dummy[0x19], dummy[0x1a], dummy[0x1b], + dummy[0x1c], dummy[0x1d], dummy[0x1e], dummy[0x1f]); + trace_megasas_dcmd_dump_frame(4, + dummy[0x20], dummy[0x21], dummy[0x22], dummy[0x23], + dummy[0x24], dummy[0x25], dummy[0x26], dummy[0x27]); + trace_megasas_dcmd_dump_frame(5, + dummy[0x28], dummy[0x29], dummy[0x2a], dummy[0x2b], + dummy[0x2c], dummy[0x2d], dummy[0x2e], dummy[0x2f]); + trace_megasas_dcmd_dump_frame(6, + dummy[0x30], dummy[0x31], dummy[0x32], dummy[0x33], + dummy[0x34], dummy[0x35], dummy[0x36], dummy[0x37]); + trace_megasas_dcmd_dump_frame(7, + dummy[0x38], dummy[0x39], dummy[0x3a], dummy[0x3b], + dummy[0x3c], dummy[0x3d], dummy[0x3e], dummy[0x3f]); + g_free(dummy); + return MFI_STAT_OK; +} + +static int megasas_dcmd_dummy(MegasasState *s, MegasasCmd *cmd) +{ + trace_megasas_dcmd_dummy(cmd->index, cmd->iov_size); + return MFI_STAT_OK; +} + +static const struct dcmd_cmd_tbl_t { + int opcode; + const char *desc; + int (*func)(MegasasState *s, MegasasCmd *cmd); +} dcmd_cmd_tbl[] = { + { MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC, "CTRL_HOST_MEM_ALLOC", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_GET_INFO, "CTRL_GET_INFO", + megasas_ctrl_get_info }, + { MFI_DCMD_CTRL_GET_PROPERTIES, "CTRL_GET_PROPERTIES", + megasas_dcmd_get_properties }, + { MFI_DCMD_CTRL_SET_PROPERTIES, "CTRL_SET_PROPERTIES", + megasas_dcmd_set_properties }, + { MFI_DCMD_CTRL_ALARM_GET, "CTRL_ALARM_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_ENABLE, "CTRL_ALARM_ENABLE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_DISABLE, "CTRL_ALARM_DISABLE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_SILENCE, "CTRL_ALARM_SILENCE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_TEST, "CTRL_ALARM_TEST", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_EVENT_GETINFO, "CTRL_EVENT_GETINFO", + megasas_event_info }, + { MFI_DCMD_CTRL_EVENT_GET, "CTRL_EVENT_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_EVENT_WAIT, "CTRL_EVENT_WAIT", + megasas_event_wait }, + { MFI_DCMD_CTRL_SHUTDOWN, "CTRL_SHUTDOWN", + megasas_ctrl_shutdown }, + { MFI_DCMD_HIBERNATE_STANDBY, "CTRL_STANDBY", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_GET_TIME, "CTRL_GET_TIME", + megasas_dcmd_get_fw_time }, + { MFI_DCMD_CTRL_SET_TIME, "CTRL_SET_TIME", + megasas_dcmd_set_fw_time }, + { MFI_DCMD_CTRL_BIOS_DATA_GET, "CTRL_BIOS_DATA_GET", + megasas_dcmd_get_bios_info }, + { MFI_DCMD_CTRL_FACTORY_DEFAULTS, "CTRL_FACTORY_DEFAULTS", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_MFC_DEFAULTS_GET, "CTRL_MFC_DEFAULTS_GET", + megasas_mfc_get_defaults }, + { MFI_DCMD_CTRL_MFC_DEFAULTS_SET, "CTRL_MFC_DEFAULTS_SET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_CACHE_FLUSH, "CTRL_CACHE_FLUSH", + megasas_cache_flush }, + { MFI_DCMD_PD_GET_LIST, "PD_GET_LIST", + megasas_dcmd_pd_get_list }, + { MFI_DCMD_PD_LIST_QUERY, "PD_LIST_QUERY", + megasas_dcmd_pd_list_query }, + { MFI_DCMD_PD_GET_INFO, "PD_GET_INFO", + megasas_dcmd_pd_get_info }, + { MFI_DCMD_PD_STATE_SET, "PD_STATE_SET", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_REBUILD, "PD_REBUILD", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_BLINK, "PD_BLINK", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_UNBLINK, "PD_UNBLINK", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_GET_LIST, "LD_GET_LIST", + megasas_dcmd_ld_get_list}, + { MFI_DCMD_LD_GET_INFO, "LD_GET_INFO", + megasas_dcmd_ld_get_info }, + { MFI_DCMD_LD_GET_PROP, "LD_GET_PROP", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_SET_PROP, "LD_SET_PROP", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_DELETE, "LD_DELETE", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_READ, "CFG_READ", + megasas_dcmd_cfg_read }, + { MFI_DCMD_CFG_ADD, "CFG_ADD", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_CLEAR, "CFG_CLEAR", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_FOREIGN_READ, "CFG_FOREIGN_READ", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_FOREIGN_IMPORT, "CFG_FOREIGN_IMPORT", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_STATUS, "BBU_STATUS", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_CAPACITY_INFO, "BBU_CAPACITY_INFO", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_DESIGN_INFO, "BBU_DESIGN_INFO", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_PROP_GET, "BBU_PROP_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER, "CLUSTER", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER_RESET_ALL, "CLUSTER_RESET_ALL", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER_RESET_LD, "CLUSTER_RESET_LD", + megasas_cluster_reset_ld }, + { -1, NULL, NULL } +}; + +static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd) +{ + int opcode, len; + int retval = 0; + const struct dcmd_cmd_tbl_t *cmdptr = dcmd_cmd_tbl; + + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + trace_megasas_handle_dcmd(cmd->index, opcode); + len = megasas_map_dcmd(s, cmd); + if (len < 0) { + return MFI_STAT_MEMORY_NOT_AVAILABLE; + } + while (cmdptr->opcode != -1 && cmdptr->opcode != opcode) { + cmdptr++; + } + if (cmdptr->opcode == -1) { + trace_megasas_dcmd_unhandled(cmd->index, opcode, len); + retval = megasas_dcmd_dummy(s, cmd); + } else { + trace_megasas_dcmd_enter(cmd->index, cmdptr->desc, len); + retval = cmdptr->func(s, cmd); + } + if (retval != MFI_STAT_INVALID_STATUS) { + megasas_finish_dcmd(cmd, len); + } + return retval; +} + +static int megasas_finish_internal_dcmd(MegasasCmd *cmd, + SCSIRequest *req) +{ + int opcode; + int retval = MFI_STAT_OK; + int lun = req->lun; + + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + scsi_req_unref(req); + trace_megasas_dcmd_internal_finish(cmd->index, opcode, lun); + switch (opcode) { + case MFI_DCMD_PD_GET_INFO: + retval = megasas_pd_get_info_submit(req->dev, lun, cmd); + break; + case MFI_DCMD_LD_GET_INFO: + retval = megasas_ld_get_info_submit(req->dev, lun, cmd); + break; + default: + trace_megasas_dcmd_internal_invalid(cmd->index, opcode); + retval = MFI_STAT_INVALID_DCMD; + break; + } + if (retval != MFI_STAT_INVALID_STATUS) { + megasas_finish_dcmd(cmd, cmd->iov_size); + } + return retval; +} + +static int megasas_enqueue_req(MegasasCmd *cmd, bool is_write) +{ + int len; + + len = scsi_req_enqueue(cmd->req); + if (len < 0) { + len = -len; + } + if (len > 0) { + if (len > cmd->iov_size) { + if (is_write) { + trace_megasas_iov_write_overflow(cmd->index, len, + cmd->iov_size); + } else { + trace_megasas_iov_read_overflow(cmd->index, len, + cmd->iov_size); + } + } + if (len < cmd->iov_size) { + if (is_write) { + trace_megasas_iov_write_underflow(cmd->index, len, + cmd->iov_size); + } else { + trace_megasas_iov_read_underflow(cmd->index, len, + cmd->iov_size); + } + cmd->iov_size = len; + } + scsi_req_continue(cmd->req); + } + return len; +} + +static int megasas_handle_scsi(MegasasState *s, MegasasCmd *cmd, + bool is_logical) +{ + uint8_t *cdb; + int len; + bool is_write; + struct SCSIDevice *sdev = NULL; + + cdb = cmd->frame->pass.cdb; + + if (cmd->frame->header.target_id < s->fw_luns) { + sdev = scsi_device_find(&s->bus, 0, cmd->frame->header.target_id, + cmd->frame->header.lun_id); + } + cmd->iov_size = le32_to_cpu(cmd->frame->header.data_len); + trace_megasas_handle_scsi(mfi_frame_desc[cmd->frame->header.frame_cmd], + is_logical, cmd->frame->header.target_id, + cmd->frame->header.lun_id, sdev, cmd->iov_size); + + if (!sdev || (megasas_is_jbod(s) && is_logical)) { + trace_megasas_scsi_target_not_present( + mfi_frame_desc[cmd->frame->header.frame_cmd], is_logical, + cmd->frame->header.target_id, cmd->frame->header.lun_id); + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (cmd->frame->header.cdb_len > 16) { + trace_megasas_scsi_invalid_cdb_len( + mfi_frame_desc[cmd->frame->header.frame_cmd], is_logical, + cmd->frame->header.target_id, cmd->frame->header.lun_id, + cmd->frame->header.cdb_len); + megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + if (megasas_map_sgl(s, cmd, &cmd->frame->pass.sgl)) { + megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + cmd->req = scsi_req_new(sdev, cmd->index, + cmd->frame->header.lun_id, cdb, cmd); + if (!cmd->req) { + trace_megasas_scsi_req_alloc_failed( + mfi_frame_desc[cmd->frame->header.frame_cmd], + cmd->frame->header.target_id, cmd->frame->header.lun_id); + megasas_write_sense(cmd, SENSE_CODE(NO_SENSE)); + cmd->frame->header.scsi_status = BUSY; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + is_write = (cmd->req->cmd.mode == SCSI_XFER_TO_DEV); + len = megasas_enqueue_req(cmd, is_write); + if (len > 0) { + if (is_write) { + trace_megasas_scsi_write_start(cmd->index, len); + } else { + trace_megasas_scsi_read_start(cmd->index, len); + } + } else { + trace_megasas_scsi_nodata(cmd->index); + } + return MFI_STAT_INVALID_STATUS; +} + +static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd) +{ + uint32_t lba_count, lba_start_hi, lba_start_lo; + uint64_t lba_start; + bool is_write = (cmd->frame->header.frame_cmd == MFI_CMD_LD_WRITE); + uint8_t cdb[16]; + int len; + struct SCSIDevice *sdev = NULL; + + lba_count = le32_to_cpu(cmd->frame->io.header.data_len); + lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo); + lba_start_hi = le32_to_cpu(cmd->frame->io.lba_hi); + lba_start = ((uint64_t)lba_start_hi << 32) | lba_start_lo; + + if (cmd->frame->header.target_id < s->fw_luns) { + sdev = scsi_device_find(&s->bus, 0, cmd->frame->header.target_id, + cmd->frame->header.lun_id); + } + + trace_megasas_handle_io(cmd->index, + mfi_frame_desc[cmd->frame->header.frame_cmd], + cmd->frame->header.target_id, + cmd->frame->header.lun_id, + (unsigned long)lba_start, (unsigned long)lba_count); + if (!sdev) { + trace_megasas_io_target_not_present(cmd->index, + mfi_frame_desc[cmd->frame->header.frame_cmd], + cmd->frame->header.target_id, cmd->frame->header.lun_id); + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (cmd->frame->header.cdb_len > 16) { + trace_megasas_scsi_invalid_cdb_len( + mfi_frame_desc[cmd->frame->header.frame_cmd], 1, + cmd->frame->header.target_id, cmd->frame->header.lun_id, + cmd->frame->header.cdb_len); + megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + cmd->iov_size = lba_count * sdev->blocksize; + if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) { + megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + megasas_encode_lba(cdb, lba_start, lba_count, is_write); + cmd->req = scsi_req_new(sdev, cmd->index, + cmd->frame->header.lun_id, cdb, cmd); + if (!cmd->req) { + trace_megasas_scsi_req_alloc_failed( + mfi_frame_desc[cmd->frame->header.frame_cmd], + cmd->frame->header.target_id, cmd->frame->header.lun_id); + megasas_write_sense(cmd, SENSE_CODE(NO_SENSE)); + cmd->frame->header.scsi_status = BUSY; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + len = megasas_enqueue_req(cmd, is_write); + if (len > 0) { + if (is_write) { + trace_megasas_io_write_start(cmd->index, lba_start, lba_count, len); + } else { + trace_megasas_io_read_start(cmd->index, lba_start, lba_count, len); + } + } + return MFI_STAT_INVALID_STATUS; +} + +static int megasas_finish_internal_command(MegasasCmd *cmd, + SCSIRequest *req, size_t resid) +{ + int retval = MFI_STAT_INVALID_CMD; + + if (cmd->frame->header.frame_cmd == MFI_CMD_DCMD) { + cmd->iov_size -= resid; + retval = megasas_finish_internal_dcmd(cmd, req); + } + return retval; +} + +static QEMUSGList *megasas_get_sg_list(SCSIRequest *req) +{ + MegasasCmd *cmd = req->hba_private; + + if (cmd->frame->header.frame_cmd == MFI_CMD_DCMD) { + return NULL; + } else { + return &cmd->qsg; + } +} + +static void megasas_xfer_complete(SCSIRequest *req, uint32_t len) +{ + MegasasCmd *cmd = req->hba_private; + uint8_t *buf; + uint32_t opcode; + + trace_megasas_io_complete(cmd->index, len); + + if (cmd->frame->header.frame_cmd != MFI_CMD_DCMD) { + scsi_req_continue(req); + return; + } + + buf = scsi_req_get_buf(req); + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + if (opcode == MFI_DCMD_PD_GET_INFO && cmd->iov_buf) { + struct mfi_pd_info *info = cmd->iov_buf; + + if (info->inquiry_data[0] == 0x7f) { + memset(info->inquiry_data, 0, sizeof(info->inquiry_data)); + memcpy(info->inquiry_data, buf, len); + } else if (info->vpd_page83[0] == 0x7f) { + memset(info->vpd_page83, 0, sizeof(info->vpd_page83)); + memcpy(info->vpd_page83, buf, len); + } + scsi_req_continue(req); + } else if (opcode == MFI_DCMD_LD_GET_INFO) { + struct mfi_ld_info *info = cmd->iov_buf; + + if (cmd->iov_buf) { + memcpy(info->vpd_page83, buf, sizeof(info->vpd_page83)); + scsi_req_continue(req); + } + } +} + +static void megasas_command_complete(SCSIRequest *req, uint32_t status, + size_t resid) +{ + MegasasCmd *cmd = req->hba_private; + uint8_t cmd_status = MFI_STAT_OK; + + trace_megasas_command_complete(cmd->index, status, resid); + + if (cmd->req != req) { + /* + * Internal command complete + */ + cmd_status = megasas_finish_internal_command(cmd, req, resid); + if (cmd_status == MFI_STAT_INVALID_STATUS) { + return; + } + } else { + req->status = status; + trace_megasas_scsi_complete(cmd->index, req->status, + cmd->iov_size, req->cmd.xfer); + if (req->status != GOOD) { + cmd_status = MFI_STAT_SCSI_DONE_WITH_ERROR; + } + if (req->status == CHECK_CONDITION) { + megasas_copy_sense(cmd); + } + + megasas_unmap_sgl(cmd); + cmd->frame->header.scsi_status = req->status; + scsi_req_unref(cmd->req); + cmd->req = NULL; + } + cmd->frame->header.cmd_status = cmd_status; + megasas_complete_frame(cmd->state, cmd->context); +} + +static void megasas_command_cancel(SCSIRequest *req) +{ + MegasasCmd *cmd = req->hba_private; + + if (cmd) { + megasas_abort_command(cmd); + } else { + scsi_req_unref(req); + } +} + +static int megasas_handle_abort(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t abort_ctx = le64_to_cpu(cmd->frame->abort.abort_context); + target_phys_addr_t abort_addr, addr_hi, addr_lo; + MegasasCmd *abort_cmd; + + addr_hi = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_hi); + addr_lo = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_lo); + abort_addr = ((uint64_t)addr_hi << 32) | addr_lo; + + abort_cmd = megasas_lookup_frame(s, abort_addr); + if (!abort_cmd) { + trace_megasas_abort_no_cmd(cmd->index, abort_ctx); + s->event_count++; + return MFI_STAT_OK; + } + if (!megasas_use_queue64(s)) { + abort_ctx &= (uint64_t)0xFFFFFFFF; + } + if (abort_cmd->context != abort_ctx) { + trace_megasas_abort_invalid_context(cmd->index, abort_cmd->index, + abort_cmd->context); + s->event_count++; + return MFI_STAT_ABORT_NOT_POSSIBLE; + } + trace_megasas_abort_frame(cmd->index, abort_cmd->index); + megasas_abort_command(abort_cmd); + if (!s->event_cmd || abort_cmd != s->event_cmd) { + s->event_cmd = NULL; + } + s->event_count++; + return MFI_STAT_OK; +} + +static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr, + uint32_t frame_count) +{ + uint8_t frame_status = MFI_STAT_INVALID_CMD; + uint64_t frame_context; + MegasasCmd *cmd; + + /* + * Always read 64bit context, top bits will be + * masked out if required in megasas_enqueue_frame() + */ + frame_context = megasas_frame_get_context(frame_addr); + + cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count); + if (!cmd) { + /* reply queue full */ + trace_megasas_frame_busy(frame_addr); + megasas_frame_set_scsi_status(frame_addr, BUSY); + megasas_frame_set_cmd_status(frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR); + megasas_complete_frame(s, frame_context); + s->event_count++; + return; + } + switch (cmd->frame->header.frame_cmd) { + case MFI_CMD_INIT: + frame_status = megasas_init_firmware(s, cmd); + break; + case MFI_CMD_DCMD: + frame_status = megasas_handle_dcmd(s, cmd); + break; + case MFI_CMD_ABORT: + frame_status = megasas_handle_abort(s, cmd); + break; + case MFI_CMD_PD_SCSI_IO: + frame_status = megasas_handle_scsi(s, cmd, 0); + break; + case MFI_CMD_LD_SCSI_IO: + frame_status = megasas_handle_scsi(s, cmd, 1); + break; + case MFI_CMD_LD_READ: + case MFI_CMD_LD_WRITE: + frame_status = megasas_handle_io(s, cmd); + break; + default: + trace_megasas_unhandled_frame_cmd(cmd->index, + cmd->frame->header.frame_cmd); + s->event_count++; + break; + } + if (frame_status != MFI_STAT_INVALID_STATUS) { + if (cmd->frame) { + cmd->frame->header.cmd_status = frame_status; + } else { + megasas_frame_set_cmd_status(frame_addr, frame_status); + } + megasas_complete_frame(s, cmd->context); + } +} + +static uint64_t megasas_mmio_read(void *opaque, target_phys_addr_t addr, + unsigned size) +{ + MegasasState *s = opaque; + uint32_t retval = 0; + + switch (addr) { + case MFI_IDB: + retval = 0; + break; + case MFI_OMSG0: + case MFI_OSP0: + retval = (megasas_use_msix(s) ? MFI_FWSTATE_MSIX_SUPPORTED : 0) | + (s->fw_state & MFI_FWSTATE_MASK) | + ((s->fw_sge & 0xff) << 16) | + (s->fw_cmds & 0xFFFF); + break; + case MFI_OSTS: + if (megasas_intr_enabled(s) && s->doorbell) { + retval = MFI_1078_RM | 1; + } + break; + case MFI_OMSK: + retval = s->intr_mask; + break; + case MFI_ODCR0: + retval = s->doorbell; + break; + default: + trace_megasas_mmio_invalid_readl(addr); + break; + } + trace_megasas_mmio_readl(addr, retval); + return retval; +} + +static void megasas_mmio_write(void *opaque, target_phys_addr_t addr, + uint64_t val, unsigned size) +{ + MegasasState *s = opaque; + uint64_t frame_addr; + uint32_t frame_count; + int i; + + trace_megasas_mmio_writel(addr, val); + switch (addr) { + case MFI_IDB: + if (val & MFI_FWINIT_ABORT) { + /* Abort all pending cmds */ + for (i = 0; i < s->fw_cmds; i++) { + megasas_abort_command(&s->frames[i]); + } + } + if (val & MFI_FWINIT_READY) { + /* move to FW READY */ + megasas_soft_reset(s); + } + if (val & MFI_FWINIT_MFIMODE) { + /* discard MFIs */ + } + break; + case MFI_OMSK: + s->intr_mask = val; + if (!megasas_intr_enabled(s) && !msix_enabled(&s->dev)) { + trace_megasas_irq_lower(); + qemu_irq_lower(s->dev.irq[0]); + } + if (megasas_intr_enabled(s)) { + trace_megasas_intr_enabled(); + } else { + trace_megasas_intr_disabled(); + } + break; + case MFI_ODCR0: + s->doorbell = 0; + if (s->producer_pa && megasas_intr_enabled(s)) { + /* Update reply queue pointer */ + trace_megasas_qf_update(s->reply_queue_head, s->busy); + stl_le_phys(s->producer_pa, s->reply_queue_head); + if (!msix_enabled(&s->dev)) { + trace_megasas_irq_lower(); + qemu_irq_lower(s->dev.irq[0]); + } + } + break; + case MFI_IQPH: + /* Received high 32 bits of a 64 bit MFI frame address */ + s->frame_hi = val; + break; + case MFI_IQPL: + /* Received low 32 bits of a 64 bit MFI frame address */ + case MFI_IQP: + /* Received 32 bit MFI frame address */ + frame_addr = (val & ~0x1F); + /* Add possible 64 bit offset */ + frame_addr |= ((uint64_t)s->frame_hi << 32); + s->frame_hi = 0; + frame_count = (val >> 1) & 0xF; + megasas_handle_frame(s, frame_addr, frame_count); + break; + default: + trace_megasas_mmio_invalid_writel(addr, val); + break; + } +} + +static const MemoryRegionOps megasas_mmio_ops = { + .read = megasas_mmio_read, + .write = megasas_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static uint64_t megasas_port_read(void *opaque, target_phys_addr_t addr, + unsigned size) +{ + return megasas_mmio_read(opaque, addr & 0xff, size); +} + +static void megasas_port_write(void *opaque, target_phys_addr_t addr, + uint64_t val, unsigned size) +{ + megasas_mmio_write(opaque, addr & 0xff, val, size); +} + +static const MemoryRegionOps megasas_port_ops = { + .read = megasas_port_read, + .write = megasas_port_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static uint64_t megasas_queue_read(void *opaque, target_phys_addr_t addr, + unsigned size) +{ + return 0; +} + +static const MemoryRegionOps megasas_queue_ops = { + .read = megasas_queue_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static void megasas_soft_reset(MegasasState *s) +{ + int i; + MegasasCmd *cmd; + + trace_megasas_reset(); + for (i = 0; i < s->fw_cmds; i++) { + cmd = &s->frames[i]; + megasas_abort_command(cmd); + } + megasas_reset_frames(s); + s->reply_queue_len = s->fw_cmds; + s->reply_queue_pa = 0; + s->consumer_pa = 0; + s->producer_pa = 0; + s->fw_state = MFI_FWSTATE_READY; + s->doorbell = 0; + s->intr_mask = MEGASAS_INTR_DISABLED_MASK; + s->frame_hi = 0; + s->flags &= ~MEGASAS_MASK_USE_QUEUE64; + s->event_count++; + s->boot_event = s->event_count; +} + +static void megasas_scsi_reset(DeviceState *dev) +{ + MegasasState *s = DO_UPCAST(MegasasState, dev.qdev, dev); + + megasas_soft_reset(s); +} + +static const VMStateDescription vmstate_megasas = { + .name = "megasas", + .version_id = 0, + .minimum_version_id = 0, + .minimum_version_id_old = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, MegasasState), + + VMSTATE_INT32(fw_state, MegasasState), + VMSTATE_INT32(intr_mask, MegasasState), + VMSTATE_INT32(doorbell, MegasasState), + VMSTATE_UINT64(reply_queue_pa, MegasasState), + VMSTATE_UINT64(consumer_pa, MegasasState), + VMSTATE_UINT64(producer_pa, MegasasState), + VMSTATE_END_OF_LIST() + } +}; + +static int megasas_scsi_uninit(PCIDevice *d) +{ + MegasasState *s = DO_UPCAST(MegasasState, dev, d); + +#ifdef USE_MSIX + msix_uninit(&s->dev, &s->mmio_io); +#endif + memory_region_destroy(&s->mmio_io); + memory_region_destroy(&s->port_io); + memory_region_destroy(&s->queue_io); + return 0; +} + +static const struct SCSIBusInfo megasas_scsi_info = { + .tcq = true, + .max_target = MFI_MAX_LD, + .max_lun = 255, + + .transfer_data = megasas_xfer_complete, + .get_sg_list = megasas_get_sg_list, + .complete = megasas_command_complete, + .cancel = megasas_command_cancel, +}; + +static int megasas_scsi_init(PCIDevice *dev) +{ + MegasasState *s = DO_UPCAST(MegasasState, dev, dev); + uint8_t *pci_conf; + int i, bar_type; + + pci_conf = s->dev.config; + + /* PCI latency timer = 0 */ + pci_conf[PCI_LATENCY_TIMER] = 0; + /* Interrupt pin 1 */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; + + memory_region_init_io(&s->mmio_io, &megasas_mmio_ops, s, + "megasas-mmio", 0x4000); + memory_region_init_io(&s->port_io, &megasas_port_ops, s, + "megasas-io", 256); + memory_region_init_io(&s->queue_io, &megasas_queue_ops, s, + "megasas-queue", 0x40000); + +#ifdef USE_MSIX + /* MSI-X support is currently broken */ + if (megasas_use_msix(s) && + msix_init(&s->dev, 15, &s->mmio_io, 0, 0x2000)) { + s->flags &= ~MEGASAS_MASK_USE_MSIX; + } +#else + s->flags &= ~MEGASAS_MASK_USE_MSIX; +#endif + + bar_type = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64; + pci_register_bar(&s->dev, 0, bar_type, &s->mmio_io); + pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); + pci_register_bar(&s->dev, 3, bar_type, &s->queue_io); + + if (megasas_use_msix(s)) { + msix_vector_use(&s->dev, 0); + } + + if (s->fw_sge >= MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE) { + s->fw_sge = MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE; + } else if (s->fw_sge >= 128 - MFI_PASS_FRAME_SIZE) { + s->fw_sge = 128 - MFI_PASS_FRAME_SIZE; + } else { + s->fw_sge = 64 - MFI_PASS_FRAME_SIZE; + } + if (s->fw_cmds > MEGASAS_MAX_FRAMES) { + s->fw_cmds = MEGASAS_MAX_FRAMES; + } + trace_megasas_init(s->fw_sge, s->fw_cmds, + megasas_use_msix(s) ? "MSI-X" : "INTx", + megasas_is_jbod(s) ? "jbod" : "raid"); + s->fw_luns = (MFI_MAX_LD > MAX_SCSI_DEVS) ? + MAX_SCSI_DEVS : MFI_MAX_LD; + s->producer_pa = 0; + s->consumer_pa = 0; + for (i = 0; i < s->fw_cmds; i++) { + s->frames[i].index = i; + s->frames[i].context = -1; + s->frames[i].pa = 0; + s->frames[i].state = s; + } + + scsi_bus_new(&s->bus, &dev->qdev, &megasas_scsi_info); + scsi_bus_legacy_handle_cmdline(&s->bus); + return 0; +} + +static Property megasas_properties[] = { + DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge, + MEGASAS_DEFAULT_SGE), + DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds, + MEGASAS_DEFAULT_FRAMES), +#ifdef USE_MSIX + DEFINE_PROP_BIT("use_msix", MegasasState, flags, + MEGASAS_FLAG_USE_MSIX, false), +#endif + DEFINE_PROP_BIT("use_jbod", MegasasState, flags, + MEGASAS_FLAG_USE_JBOD, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void megasas_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + + pc->init = megasas_scsi_init; + pc->exit = megasas_scsi_uninit; + pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->device_id = PCI_DEVICE_ID_LSI_SAS1078; + pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->subsystem_id = 0x1013; + pc->class_id = PCI_CLASS_STORAGE_RAID; + dc->props = megasas_properties; + dc->reset = megasas_scsi_reset; + dc->vmsd = &vmstate_megasas; + dc->desc = "LSI MegaRAID SAS 1078"; +} + +static const TypeInfo megasas_info = { + .name = "megasas", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MegasasState), + .class_init = megasas_class_init, +}; + +static void megasas_register_types(void) +{ + type_register_static(&megasas_info); +} + +type_init(megasas_register_types) diff --git a/hw/mfi.h b/hw/mfi.h new file mode 100644 index 0000000000..8a821623e0 --- /dev/null +++ b/hw/mfi.h @@ -0,0 +1,1248 @@ +/* + * NetBSD header file, copied from + * http://gitorious.org/freebsd/freebsd/blobs/HEAD/sys/dev/mfi/mfireg.h + */ +/*- + * Copyright (c) 2006 IronPort Systems + * Copyright (c) 2007 LSI Corp. + * Copyright (c) 2007 Rajesh Prabhakaran. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef MFI_REG_H +#define MFI_REG_H + +/* + * MegaRAID SAS MFI firmware definitions + */ + +/* + * Start with the register set. All registers are 32 bits wide. + * The usual Intel IOP style setup. + */ +#define MFI_IMSG0 0x10 /* Inbound message 0 */ +#define MFI_IMSG1 0x14 /* Inbound message 1 */ +#define MFI_OMSG0 0x18 /* Outbound message 0 */ +#define MFI_OMSG1 0x1c /* Outbound message 1 */ +#define MFI_IDB 0x20 /* Inbound doorbell */ +#define MFI_ISTS 0x24 /* Inbound interrupt status */ +#define MFI_IMSK 0x28 /* Inbound interrupt mask */ +#define MFI_ODB 0x2c /* Outbound doorbell */ +#define MFI_OSTS 0x30 /* Outbound interrupt status */ +#define MFI_OMSK 0x34 /* Outbound interrupt mask */ +#define MFI_IQP 0x40 /* Inbound queue port */ +#define MFI_OQP 0x44 /* Outbound queue port */ + +/* + * 1078 specific related register + */ +#define MFI_ODR0 0x9c /* outbound doorbell register0 */ +#define MFI_ODCR0 0xa0 /* outbound doorbell clear register0 */ +#define MFI_OSP0 0xb0 /* outbound scratch pad0 */ +#define MFI_IQPL 0xc0 /* Inbound queue port (low bytes) */ +#define MFI_IQPH 0xc4 /* Inbound queue port (high bytes) */ +#define MFI_DIAG 0xf8 /* Host diag */ +#define MFI_SEQ 0xfc /* Sequencer offset */ +#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */ +#define MFI_RMI 0x2 /* reply message interrupt */ +#define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */ +#define MFI_ODC 0x4 /* outbound doorbell change interrupt */ + +/* + * gen2 specific changes + */ +#define MFI_GEN2_EIM 0x00000005 /* gen2 enable interrupt mask */ +#define MFI_GEN2_RM 0x00000001 /* reply gen2 message interrupt */ + +/* + * skinny specific changes + */ +#define MFI_SKINNY_IDB 0x00 /* Inbound doorbell is at 0x00 for skinny */ +#define MFI_SKINNY_RM 0x00000001 /* reply skinny message interrupt */ + +/* Bits for MFI_OSTS */ +#define MFI_OSTS_INTR_VALID 0x00000002 + +/* + * Firmware state values. Found in OMSG0 during initialization. + */ +#define MFI_FWSTATE_MASK 0xf0000000 +#define MFI_FWSTATE_UNDEFINED 0x00000000 +#define MFI_FWSTATE_BB_INIT 0x10000000 +#define MFI_FWSTATE_FW_INIT 0x40000000 +#define MFI_FWSTATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_FWSTATE_FW_INIT_2 0x70000000 +#define MFI_FWSTATE_DEVICE_SCAN 0x80000000 +#define MFI_FWSTATE_BOOT_MSG_PENDING 0x90000000 +#define MFI_FWSTATE_FLUSH_CACHE 0xa0000000 +#define MFI_FWSTATE_READY 0xb0000000 +#define MFI_FWSTATE_OPERATIONAL 0xc0000000 +#define MFI_FWSTATE_FAULT 0xf0000000 +#define MFI_FWSTATE_MAXSGL_MASK 0x00ff0000 +#define MFI_FWSTATE_MAXCMD_MASK 0x0000ffff +#define MFI_FWSTATE_MSIX_SUPPORTED 0x04000000 +#define MFI_FWSTATE_HOSTMEMREQD_MASK 0x08000000 + +/* + * Control bits to drive the card to ready state. These go into the IDB + * register. + */ +#define MFI_FWINIT_ABORT 0x00000001 /* Abort all pending commands */ +#define MFI_FWINIT_READY 0x00000002 /* Move from operational to ready */ +#define MFI_FWINIT_MFIMODE 0x00000004 /* unknown */ +#define MFI_FWINIT_CLEAR_HANDSHAKE 0x00000008 /* Respond to WAIT_HANDSHAKE */ +#define MFI_FWINIT_HOTPLUG 0x00000010 +#define MFI_FWINIT_STOP_ADP 0x00000020 /* Move to operational, stop */ +#define MFI_FWINIT_ADP_RESET 0x00000040 /* Reset ADP */ + +/* MFI Commands */ +typedef enum { + MFI_CMD_INIT = 0x00, + MFI_CMD_LD_READ, + MFI_CMD_LD_WRITE, + MFI_CMD_LD_SCSI_IO, + MFI_CMD_PD_SCSI_IO, + MFI_CMD_DCMD, + MFI_CMD_ABORT, + MFI_CMD_SMP, + MFI_CMD_STP +} mfi_cmd_t; + +/* Direct commands */ +typedef enum { + MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC = 0x0100e100, + MFI_DCMD_CTRL_GET_INFO = 0x01010000, + MFI_DCMD_CTRL_GET_PROPERTIES = 0x01020100, + MFI_DCMD_CTRL_SET_PROPERTIES = 0x01020200, + MFI_DCMD_CTRL_ALARM = 0x01030000, + MFI_DCMD_CTRL_ALARM_GET = 0x01030100, + MFI_DCMD_CTRL_ALARM_ENABLE = 0x01030200, + MFI_DCMD_CTRL_ALARM_DISABLE = 0x01030300, + MFI_DCMD_CTRL_ALARM_SILENCE = 0x01030400, + MFI_DCMD_CTRL_ALARM_TEST = 0x01030500, + MFI_DCMD_CTRL_EVENT_GETINFO = 0x01040100, + MFI_DCMD_CTRL_EVENT_CLEAR = 0x01040200, + MFI_DCMD_CTRL_EVENT_GET = 0x01040300, + MFI_DCMD_CTRL_EVENT_COUNT = 0x01040400, + MFI_DCMD_CTRL_EVENT_WAIT = 0x01040500, + MFI_DCMD_CTRL_SHUTDOWN = 0x01050000, + MFI_DCMD_HIBERNATE_STANDBY = 0x01060000, + MFI_DCMD_CTRL_GET_TIME = 0x01080101, + MFI_DCMD_CTRL_SET_TIME = 0x01080102, + MFI_DCMD_CTRL_BIOS_DATA_GET = 0x010c0100, + MFI_DCMD_CTRL_BIOS_DATA_SET = 0x010c0200, + MFI_DCMD_CTRL_FACTORY_DEFAULTS = 0x010d0000, + MFI_DCMD_CTRL_MFC_DEFAULTS_GET = 0x010e0201, + MFI_DCMD_CTRL_MFC_DEFAULTS_SET = 0x010e0202, + MFI_DCMD_CTRL_CACHE_FLUSH = 0x01101000, + MFI_DCMD_PD_GET_LIST = 0x02010000, + MFI_DCMD_PD_LIST_QUERY = 0x02010100, + MFI_DCMD_PD_GET_INFO = 0x02020000, + MFI_DCMD_PD_STATE_SET = 0x02030100, + MFI_DCMD_PD_REBUILD = 0x02040100, + MFI_DCMD_PD_BLINK = 0x02070100, + MFI_DCMD_PD_UNBLINK = 0x02070200, + MFI_DCMD_LD_GET_LIST = 0x03010000, + MFI_DCMD_LD_GET_INFO = 0x03020000, + MFI_DCMD_LD_GET_PROP = 0x03030000, + MFI_DCMD_LD_SET_PROP = 0x03040000, + MFI_DCMD_LD_DELETE = 0x03090000, + MFI_DCMD_CFG_READ = 0x04010000, + MFI_DCMD_CFG_ADD = 0x04020000, + MFI_DCMD_CFG_CLEAR = 0x04030000, + MFI_DCMD_CFG_FOREIGN_READ = 0x04060100, + MFI_DCMD_CFG_FOREIGN_IMPORT = 0x04060400, + MFI_DCMD_BBU_STATUS = 0x05010000, + MFI_DCMD_BBU_CAPACITY_INFO = 0x05020000, + MFI_DCMD_BBU_DESIGN_INFO = 0x05030000, + MFI_DCMD_BBU_PROP_GET = 0x05050100, + MFI_DCMD_CLUSTER = 0x08000000, + MFI_DCMD_CLUSTER_RESET_ALL = 0x08010100, + MFI_DCMD_CLUSTER_RESET_LD = 0x08010200 +} mfi_dcmd_t; + +/* Modifiers for MFI_DCMD_CTRL_FLUSHCACHE */ +#define MFI_FLUSHCACHE_CTRL 0x01 +#define MFI_FLUSHCACHE_DISK 0x02 + +/* Modifiers for MFI_DCMD_CTRL_SHUTDOWN */ +#define MFI_SHUTDOWN_SPINDOWN 0x01 + +/* + * MFI Frame flags + */ +typedef enum { + MFI_FRAME_DONT_POST_IN_REPLY_QUEUE = 0x0001, + MFI_FRAME_SGL64 = 0x0002, + MFI_FRAME_SENSE64 = 0x0004, + MFI_FRAME_DIR_WRITE = 0x0008, + MFI_FRAME_DIR_READ = 0x0010, + MFI_FRAME_IEEE_SGL = 0x0020, +} mfi_frame_flags; + +/* MFI Status codes */ +typedef enum { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD, + MFI_STAT_INVALID_DCMD, + MFI_STAT_INVALID_PARAMETER, + MFI_STAT_INVALID_SEQUENCE_NUMBER, + MFI_STAT_ABORT_NOT_POSSIBLE, + MFI_STAT_APP_HOST_CODE_NOT_FOUND, + MFI_STAT_APP_IN_USE, + MFI_STAT_APP_NOT_INITIALIZED, + MFI_STAT_ARRAY_INDEX_INVALID, + MFI_STAT_ARRAY_ROW_NOT_EMPTY, + MFI_STAT_CONFIG_RESOURCE_CONFLICT, + MFI_STAT_DEVICE_NOT_FOUND, + MFI_STAT_DRIVE_TOO_SMALL, + MFI_STAT_FLASH_ALLOC_FAIL, + MFI_STAT_FLASH_BUSY, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD, + MFI_STAT_FLASH_IMAGE_INCOMPLETE, + MFI_STAT_FLASH_NOT_OPEN, + MFI_STAT_FLASH_NOT_STARTED, + MFI_STAT_FLUSH_FAILED, + MFI_STAT_HOST_CODE_NOT_FOUNT, + MFI_STAT_LD_CC_IN_PROGRESS, + MFI_STAT_LD_INIT_IN_PROGRESS, + MFI_STAT_LD_LBA_OUT_OF_RANGE, + MFI_STAT_LD_MAX_CONFIGURED, + MFI_STAT_LD_NOT_OPTIMAL, + MFI_STAT_LD_RBLD_IN_PROGRESS, + MFI_STAT_LD_RECON_IN_PROGRESS, + MFI_STAT_LD_WRONG_RAID_LEVEL, + MFI_STAT_MAX_SPARES_EXCEEDED, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR, + MFI_STAT_NO_HW_PRESENT, + MFI_STAT_NOT_FOUND, + MFI_STAT_NOT_IN_ENCL, + MFI_STAT_PD_CLEAR_IN_PROGRESS, + MFI_STAT_PD_TYPE_WRONG, + MFI_STAT_PR_DISABLED, + MFI_STAT_ROW_INDEX_INVALID, + MFI_STAT_SAS_CONFIG_INVALID_ACTION, + MFI_STAT_SAS_CONFIG_INVALID_DATA, + MFI_STAT_SAS_CONFIG_INVALID_PAGE, + MFI_STAT_SAS_CONFIG_INVALID_TYPE, + MFI_STAT_SCSI_DONE_WITH_ERROR, + MFI_STAT_SCSI_IO_FAILED, + MFI_STAT_SCSI_RESERVATION_CONFLICT, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET, + MFI_STAT_WRONG_STATE, + MFI_STAT_LD_OFFLINE, + MFI_STAT_PEER_NOTIFICATION_REJECTED, + MFI_STAT_PEER_NOTIFICATION_FAILED, + MFI_STAT_RESERVATION_IN_PROGRESS, + MFI_STAT_I2C_ERRORS_DETECTED, + MFI_STAT_PCI_ERRORS_DETECTED, + MFI_STAT_DIAG_FAILED, + MFI_STAT_BOOT_MSG_PENDING, + MFI_STAT_FOREIGN_CONFIG_INCOMPLETE, + MFI_STAT_INVALID_SGL, + MFI_STAT_UNSUPPORTED_HW, + MFI_STAT_CC_SCHEDULE_DISABLED, + MFI_STAT_PD_COPYBACK_IN_PROGRESS, + MFI_STAT_MULTIPLE_PDS_IN_ARRAY = 0x40, + MFI_STAT_FW_DOWNLOAD_ERROR, + MFI_STAT_FEATURE_SECURITY_NOT_ENABLED, + MFI_STAT_LOCK_KEY_ALREADY_EXISTS, + MFI_STAT_LOCK_KEY_BACKUP_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_VERIFY_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_VERIFY_FAILED, + MFI_STAT_LOCK_KEY_REKEY_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_INVALID, + MFI_STAT_LOCK_KEY_ESCROW_INVALID, + MFI_STAT_LOCK_KEY_BACKUP_REQUIRED, + MFI_STAT_SECURE_LD_EXISTS, + MFI_STAT_LD_SECURE_NOT_ALLOWED, + MFI_STAT_REPROVISION_NOT_ALLOWED, + MFI_STAT_PD_SECURITY_TYPE_WRONG, + MFI_STAT_LD_ENCRYPTION_TYPE_INVALID, + MFI_STAT_CONFIG_FDE_NON_FDE_MIX_NOT_ALLOWED = 0x50, + MFI_STAT_CONFIG_LD_ENCRYPTION_TYPE_MIX_NOT_ALLOWED, + MFI_STAT_SECRET_KEY_NOT_ALLOWED, + MFI_STAT_PD_HW_ERRORS_DETECTED, + MFI_STAT_LD_CACHE_PINNED, + MFI_STAT_POWER_STATE_SET_IN_PROGRESS, + MFI_STAT_POWER_STATE_SET_BUSY, + MFI_STAT_POWER_STATE_WRONG, + MFI_STAT_PR_NO_AVAILABLE_PD_FOUND, + MFI_STAT_CTRL_RESET_REQUIRED, + MFI_STAT_LOCK_KEY_EKM_NO_BOOT_AGENT, + MFI_STAT_SNAP_NO_SPACE, + MFI_STAT_SNAP_PARTIAL_FAILURE, + MFI_STAT_UPGRADE_KEY_INCOMPATIBLE, + MFI_STAT_PFK_INCOMPATIBLE, + MFI_STAT_PD_MAX_UNCONFIGURED, + MFI_STAT_IO_METRICS_DISABLED = 0x60, + MFI_STAT_AEC_NOT_STOPPED, + MFI_STAT_PI_TYPE_WRONG, + MFI_STAT_LD_PD_PI_INCOMPATIBLE, + MFI_STAT_PI_NOT_ENABLED, + MFI_STAT_LD_BLOCK_SIZE_MISMATCH, + MFI_STAT_INVALID_STATUS = 0xFF +} mfi_status_t; + +/* Event classes */ +typedef enum { + MFI_EVT_CLASS_DEBUG = -2, + MFI_EVT_CLASS_PROGRESS = -1, + MFI_EVT_CLASS_INFO = 0, + MFI_EVT_CLASS_WARNING = 1, + MFI_EVT_CLASS_CRITICAL = 2, + MFI_EVT_CLASS_FATAL = 3, + MFI_EVT_CLASS_DEAD = 4 +} mfi_evt_class_t; + +/* Event locales */ +typedef enum { + MFI_EVT_LOCALE_LD = 0x0001, + MFI_EVT_LOCALE_PD = 0x0002, + MFI_EVT_LOCALE_ENCL = 0x0004, + MFI_EVT_LOCALE_BBU = 0x0008, + MFI_EVT_LOCALE_SAS = 0x0010, + MFI_EVT_LOCALE_CTRL = 0x0020, + MFI_EVT_LOCALE_CONFIG = 0x0040, + MFI_EVT_LOCALE_CLUSTER = 0x0080, + MFI_EVT_LOCALE_ALL = 0xffff +} mfi_evt_locale_t; + +/* Event args */ +typedef enum { + MR_EVT_ARGS_NONE = 0x00, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC, + MR_EVT_ARGS_LD_PROP, + MR_EVT_ARGS_PD_SPARE, + MR_EVT_ARGS_PD_INDEX, + MR_EVT_ARGS_DIAG_PASS, + MR_EVT_ARGS_DIAG_FAIL, + MR_EVT_ARGS_PD_LBA_LBA, + MR_EVT_ARGS_PORT_PHY, + MR_EVT_ARGS_PD_MISSING, + MR_EVT_ARGS_PD_ADDRESS, + MR_EVT_ARGS_BITMAP, + MR_EVT_ARGS_CONNECTOR, + MR_EVT_ARGS_PD_PD, + MR_EVT_ARGS_PD_FRU, + MR_EVT_ARGS_PD_PATHINFO, + MR_EVT_ARGS_PD_POWER_STATE, + MR_EVT_ARGS_GENERIC, +} mfi_evt_args; + +/* Event codes */ +#define MR_EVT_CFG_CLEARED 0x0004 +#define MR_EVT_CTRL_SHUTDOWN 0x002a +#define MR_EVT_LD_STATE_CHANGE 0x0051 +#define MR_EVT_PD_INSERTED 0x005b +#define MR_EVT_PD_REMOVED 0x0070 +#define MR_EVT_PD_STATE_CHANGED 0x0072 +#define MR_EVT_LD_CREATED 0x008a +#define MR_EVT_LD_DELETED 0x008b +#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db +#define MR_EVT_LD_OFFLINE 0x00fc +#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 + +typedef enum { + MR_LD_CACHE_WRITE_BACK = 0x01, + MR_LD_CACHE_WRITE_ADAPTIVE = 0x02, + MR_LD_CACHE_READ_AHEAD = 0x04, + MR_LD_CACHE_READ_ADAPTIVE = 0x08, + MR_LD_CACHE_WRITE_CACHE_BAD_BBU = 0x10, + MR_LD_CACHE_ALLOW_WRITE_CACHE = 0x20, + MR_LD_CACHE_ALLOW_READ_CACHE = 0x40 +} mfi_ld_cache; + +typedef enum { + MR_PD_CACHE_UNCHANGED = 0, + MR_PD_CACHE_ENABLE = 1, + MR_PD_CACHE_DISABLE = 2 +} mfi_pd_cache; + +typedef enum { + MR_PD_QUERY_TYPE_ALL = 0, + MR_PD_QUERY_TYPE_STATE = 1, + MR_PD_QUERY_TYPE_POWER_STATE = 2, + MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, + MR_PD_QUERY_TYPE_SPEED = 4, + MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, /*query for system drives */ +} mfi_pd_query_type; + +/* + * Other propertities and definitions + */ +#define MFI_MAX_PD_CHANNELS 2 +#define MFI_MAX_LD_CHANNELS 2 +#define MFI_MAX_CHANNELS (MFI_MAX_PD_CHANNELS + MFI_MAX_LD_CHANNELS) +#define MFI_MAX_CHANNEL_DEVS 128 +#define MFI_DEFAULT_ID -1 +#define MFI_MAX_LUN 8 +#define MFI_MAX_LD 64 + +#define MFI_FRAME_SIZE 64 +#define MFI_MBOX_SIZE 12 + +/* Firmware flashing can take 40s */ +#define MFI_POLL_TIMEOUT_SECS 50 + +/* Allow for speedier math calculations */ +#define MFI_SECTOR_LEN 512 + +/* Scatter Gather elements */ +struct mfi_sg32 { + uint32_t addr; + uint32_t len; +} __attribute__ ((packed)); + +struct mfi_sg64 { + uint64_t addr; + uint32_t len; +} __attribute__ ((packed)); + +struct mfi_sg_skinny { + uint64_t addr; + uint32_t len; + uint32_t flag; +} __attribute__ ((packed)); + +union mfi_sgl { + struct mfi_sg32 sg32[1]; + struct mfi_sg64 sg64[1]; + struct mfi_sg_skinny sg_skinny[1]; +} __attribute__ ((packed)); + +/* Message frames. All messages have a common header */ +struct mfi_frame_header { + uint8_t frame_cmd; + uint8_t sense_len; + uint8_t cmd_status; + uint8_t scsi_status; + uint8_t target_id; + uint8_t lun_id; + uint8_t cdb_len; + uint8_t sge_count; + uint64_t context; + uint16_t flags; + uint16_t timeout; + uint32_t data_len; +} __attribute__ ((packed)); + +struct mfi_init_frame { + struct mfi_frame_header header; + uint32_t qinfo_new_addr_lo; + uint32_t qinfo_new_addr_hi; + uint32_t qinfo_old_addr_lo; + uint32_t qinfo_old_addr_hi; + uint32_t reserved[6]; +}; + +#define MFI_IO_FRAME_SIZE 40 +struct mfi_io_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint32_t lba_lo; + uint32_t lba_hi; + union mfi_sgl sgl; +} __attribute__ ((packed)); + +#define MFI_PASS_FRAME_SIZE 48 +struct mfi_pass_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint8_t cdb[16]; + union mfi_sgl sgl; +} __attribute__ ((packed)); + +#define MFI_DCMD_FRAME_SIZE 40 +struct mfi_dcmd_frame { + struct mfi_frame_header header; + uint32_t opcode; + uint8_t mbox[MFI_MBOX_SIZE]; + union mfi_sgl sgl; +} __attribute__ ((packed)); + +struct mfi_abort_frame { + struct mfi_frame_header header; + uint64_t abort_context; + uint32_t abort_mfi_addr_lo; + uint32_t abort_mfi_addr_hi; + uint32_t reserved1[6]; +} __attribute__ ((packed)); + +struct mfi_smp_frame { + struct mfi_frame_header header; + uint64_t sas_addr; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} __attribute__ ((packed)); + +struct mfi_stp_frame { + struct mfi_frame_header header; + uint16_t fis[10]; + uint32_t stp_flags; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} __attribute__ ((packed)); + +union mfi_frame { + struct mfi_frame_header header; + struct mfi_init_frame init; + struct mfi_io_frame io; + struct mfi_pass_frame pass; + struct mfi_dcmd_frame dcmd; + struct mfi_abort_frame abort; + struct mfi_smp_frame smp; + struct mfi_stp_frame stp; + uint64_t raw[8]; + uint8_t bytes[MFI_FRAME_SIZE]; +}; + +#define MFI_SENSE_LEN 128 +struct mfi_sense { + uint8_t data[MFI_SENSE_LEN]; +}; + +#define MFI_QUEUE_FLAG_CONTEXT64 0x00000002 + +/* The queue init structure that is passed with the init message */ +struct mfi_init_qinfo { + uint32_t flags; + uint32_t rq_entries; + uint32_t rq_addr_lo; + uint32_t rq_addr_hi; + uint32_t pi_addr_lo; + uint32_t pi_addr_hi; + uint32_t ci_addr_lo; + uint32_t ci_addr_hi; +} __attribute__ ((packed)); + +/* Controller properties */ +struct mfi_ctrl_props { + uint16_t seq_num; + uint16_t pred_fail_poll_interval; + uint16_t intr_throttle_cnt; + uint16_t intr_throttle_timeout; + uint8_t rebuild_rate; + uint8_t patrol_read_rate; + uint8_t bgi_rate; + uint8_t cc_rate; + uint8_t recon_rate; + uint8_t cache_flush_interval; + uint8_t spinup_drv_cnt; + uint8_t spinup_delay; + uint8_t cluster_enable; + uint8_t coercion_mode; + uint8_t alarm_enable; + uint8_t disable_auto_rebuild; + uint8_t disable_battery_warn; + uint8_t ecc_bucket_size; + uint16_t ecc_bucket_leak_rate; + uint8_t restore_hotspare_on_insertion; + uint8_t expose_encl_devices; + uint8_t maintainPdFailHistory; + uint8_t disallowHostRequestReordering; + uint8_t abortCCOnError; + uint8_t loadBalanceMode; + uint8_t disableAutoDetectBackplane; + uint8_t snapVDSpace; + uint32_t OnOffProperties; +/* set TRUE to disable copyBack (0=copyback enabled) */ +#define MFI_CTRL_PROP_CopyBackDisabled (1 << 0) +#define MFI_CTRL_PROP_SMARTerEnabled (1 << 1) +#define MFI_CTRL_PROP_PRCorrectUnconfiguredAreas (1 << 2) +#define MFI_CTRL_PROP_UseFdeOnly (1 << 3) +#define MFI_CTRL_PROP_DisableNCQ (1 << 4) +#define MFI_CTRL_PROP_SSDSMARTerEnabled (1 << 5) +#define MFI_CTRL_PROP_SSDPatrolReadEnabled (1 << 6) +#define MFI_CTRL_PROP_EnableSpinDownUnconfigured (1 << 7) +#define MFI_CTRL_PROP_AutoEnhancedImport (1 << 8) +#define MFI_CTRL_PROP_EnableSecretKeyControl (1 << 9) +#define MFI_CTRL_PROP_DisableOnlineCtrlReset (1 << 10) +#define MFI_CTRL_PROP_AllowBootWithPinnedCache (1 << 11) +#define MFI_CTRL_PROP_DisableSpinDownHS (1 << 12) +#define MFI_CTRL_PROP_EnableJBOD (1 << 13) + + uint8_t autoSnapVDSpace; /* % of source LD to be + * reserved for auto snapshot + * in snapshot repository, for + * metadata and user data + * 1=5%, 2=10%, 3=15% and so on + */ + uint8_t viewSpace; /* snapshot writeable VIEWs + * capacity as a % of source LD + * capacity. 0=READ only + * 1=5%, 2=10%, 3=15% and so on + */ + uint16_t spinDownTime; /* # of idle minutes before device + * is spun down (0=use FW defaults) + */ + uint8_t reserved[24]; +} __attribute__ ((packed)); + +/* PCI information about the card. */ +struct mfi_info_pci { + uint16_t vendor; + uint16_t device; + uint16_t subvendor; + uint16_t subdevice; + uint8_t reserved[24]; +} __attribute__ ((packed)); + +/* Host (front end) interface information */ +struct mfi_info_host { + uint8_t type; +#define MFI_INFO_HOST_PCIX 0x01 +#define MFI_INFO_HOST_PCIE 0x02 +#define MFI_INFO_HOST_ISCSI 0x04 +#define MFI_INFO_HOST_SAS3G 0x08 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} __attribute__ ((packed)); + +/* Device (back end) interface information */ +struct mfi_info_device { + uint8_t type; +#define MFI_INFO_DEV_SPI 0x01 +#define MFI_INFO_DEV_SAS3G 0x02 +#define MFI_INFO_DEV_SATA1 0x04 +#define MFI_INFO_DEV_SATA3G 0x08 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} __attribute__ ((packed)); + +/* Firmware component information */ +struct mfi_info_component { + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; +} __attribute__ ((packed)); + +/* Controller default settings */ +struct mfi_defaults { + uint64_t sas_addr; + uint8_t phy_polarity; + uint8_t background_rate; + uint8_t stripe_size; + uint8_t flush_time; + uint8_t write_back; + uint8_t read_ahead; + uint8_t cache_when_bbu_bad; + uint8_t cached_io; + uint8_t smart_mode; + uint8_t alarm_disable; + uint8_t coercion; + uint8_t zrc_config; + uint8_t dirty_led_shows_drive_activity; + uint8_t bios_continue_on_error; + uint8_t spindown_mode; + uint8_t allowed_device_types; + uint8_t allow_mix_in_enclosure; + uint8_t allow_mix_in_ld; + uint8_t allow_sata_in_cluster; + uint8_t max_chained_enclosures; + uint8_t disable_ctrl_r; + uint8_t enable_web_bios; + uint8_t phy_polarity_split; + uint8_t direct_pd_mapping; + uint8_t bios_enumerate_lds; + uint8_t restored_hot_spare_on_insertion; + uint8_t expose_enclosure_devices; + uint8_t maintain_pd_fail_history; + uint8_t disable_puncture; + uint8_t zero_based_enumeration; + uint8_t disable_preboot_cli; + uint8_t show_drive_led_on_activity; + uint8_t cluster_disable; + uint8_t sas_disable; + uint8_t auto_detect_backplane; + uint8_t fde_only; + uint8_t delay_during_post; + uint8_t resv[19]; +} __attribute__ ((packed)); + +/* Controller default settings */ +struct mfi_bios_data { + uint16_t boot_target_id; + uint8_t do_not_int_13; + uint8_t continue_on_error; + uint8_t verbose; + uint8_t geometry; + uint8_t expose_all_drives; + uint8_t reserved[56]; + uint8_t check_sum; +} __attribute__ ((packed)); + +/* SAS (?) controller info, returned from MFI_DCMD_CTRL_GETINFO. */ +struct mfi_ctrl_info { + struct mfi_info_pci pci; + struct mfi_info_host host; + struct mfi_info_device device; + + /* Firmware components that are present and active. */ + uint32_t image_check_word; + uint32_t image_component_count; + struct mfi_info_component image_component[8]; + + /* Firmware components that have been flashed but are inactive */ + uint32_t pending_image_component_count; + struct mfi_info_component pending_image_component[8]; + + uint8_t max_arms; + uint8_t max_spans; + uint8_t max_arrays; + uint8_t max_lds; + char product_name[80]; + char serial_number[32]; + uint32_t hw_present; +#define MFI_INFO_HW_BBU 0x01 +#define MFI_INFO_HW_ALARM 0x02 +#define MFI_INFO_HW_NVRAM 0x04 +#define MFI_INFO_HW_UART 0x08 +#define MFI_INFO_HW_MEM 0x10 +#define MFI_INFO_HW_FLASH 0x20 + uint32_t current_fw_time; + uint16_t max_cmds; + uint16_t max_sg_elements; + uint32_t max_request_size; + uint16_t lds_present; + uint16_t lds_degraded; + uint16_t lds_offline; + uint16_t pd_present; + uint16_t pd_disks_present; + uint16_t pd_disks_pred_failure; + uint16_t pd_disks_failed; + uint16_t nvram_size; + uint16_t memory_size; + uint16_t flash_size; + uint16_t ram_correctable_errors; + uint16_t ram_uncorrectable_errors; + uint8_t cluster_allowed; + uint8_t cluster_active; + uint16_t max_strips_per_io; + + uint32_t raid_levels; +#define MFI_INFO_RAID_0 0x01 +#define MFI_INFO_RAID_1 0x02 +#define MFI_INFO_RAID_5 0x04 +#define MFI_INFO_RAID_1E 0x08 +#define MFI_INFO_RAID_6 0x10 + + uint32_t adapter_ops; +#define MFI_INFO_AOPS_RBLD_RATE 0x0001 +#define MFI_INFO_AOPS_CC_RATE 0x0002 +#define MFI_INFO_AOPS_BGI_RATE 0x0004 +#define MFI_INFO_AOPS_RECON_RATE 0x0008 +#define MFI_INFO_AOPS_PATROL_RATE 0x0010 +#define MFI_INFO_AOPS_ALARM_CONTROL 0x0020 +#define MFI_INFO_AOPS_CLUSTER_SUPPORTED 0x0040 +#define MFI_INFO_AOPS_BBU 0x0080 +#define MFI_INFO_AOPS_SPANNING_ALLOWED 0x0100 +#define MFI_INFO_AOPS_DEDICATED_SPARES 0x0200 +#define MFI_INFO_AOPS_REVERTIBLE_SPARES 0x0400 +#define MFI_INFO_AOPS_FOREIGN_IMPORT 0x0800 +#define MFI_INFO_AOPS_SELF_DIAGNOSTIC 0x1000 +#define MFI_INFO_AOPS_MIXED_ARRAY 0x2000 +#define MFI_INFO_AOPS_GLOBAL_SPARES 0x4000 + + uint32_t ld_ops; +#define MFI_INFO_LDOPS_READ_POLICY 0x01 +#define MFI_INFO_LDOPS_WRITE_POLICY 0x02 +#define MFI_INFO_LDOPS_IO_POLICY 0x04 +#define MFI_INFO_LDOPS_ACCESS_POLICY 0x08 +#define MFI_INFO_LDOPS_DISK_CACHE_POLICY 0x10 + + struct { + uint8_t min; + uint8_t max; + uint8_t reserved[2]; + } __attribute__ ((packed)) stripe_sz_ops; + + uint32_t pd_ops; +#define MFI_INFO_PDOPS_FORCE_ONLINE 0x01 +#define MFI_INFO_PDOPS_FORCE_OFFLINE 0x02 +#define MFI_INFO_PDOPS_FORCE_REBUILD 0x04 + + uint32_t pd_mix_support; +#define MFI_INFO_PDMIX_SAS 0x01 +#define MFI_INFO_PDMIX_SATA 0x02 +#define MFI_INFO_PDMIX_ENCL 0x04 +#define MFI_INFO_PDMIX_LD 0x08 +#define MFI_INFO_PDMIX_SATA_CLUSTER 0x10 + + uint8_t ecc_bucket_count; + uint8_t reserved2[11]; + struct mfi_ctrl_props properties; + char package_version[0x60]; + uint8_t pad[0x800 - 0x6a0]; +} __attribute__ ((packed)); + +/* keep track of an event. */ +union mfi_evt { + struct { + uint16_t locale; + uint8_t reserved; + int8_t class; + } members; + uint32_t word; +} __attribute__ ((packed)); + +/* event log state. */ +struct mfi_evt_log_state { + uint32_t newest_seq_num; + uint32_t oldest_seq_num; + uint32_t clear_seq_num; + uint32_t shutdown_seq_num; + uint32_t boot_seq_num; +} __attribute__ ((packed)); + +struct mfi_progress { + uint16_t progress; + uint16_t elapsed_seconds; +} __attribute__ ((packed)); + +struct mfi_evt_ld { + uint16_t target_id; + uint8_t ld_index; + uint8_t reserved; +} __attribute__ ((packed)); + +struct mfi_evt_pd { + uint16_t device_id; + uint8_t enclosure_index; + uint8_t slot_number; +} __attribute__ ((packed)); + +/* event detail, returned from MFI_DCMD_CTRL_EVENT_WAIT. */ +struct mfi_evt_detail { + uint32_t seq; + uint32_t time; + uint32_t code; + union mfi_evt class; + uint8_t arg_type; + uint8_t reserved1[15]; + + union { + struct { + struct mfi_evt_pd pd; + uint8_t cdb_len; + uint8_t sense_len; + uint8_t reserved[2]; + uint8_t cdb[16]; + uint8_t sense[64]; + } cdb_sense; + + struct mfi_evt_ld ld; + + struct { + struct mfi_evt_ld ld; + uint64_t count; + } ld_count; + + struct { + uint64_t lba; + struct mfi_evt_ld ld; + } ld_lba; + + struct { + struct mfi_evt_ld ld; + uint32_t pre_owner; + uint32_t new_owner; + } ld_owner; + + struct { + uint64_t ld_lba; + uint64_t pd_lba; + struct mfi_evt_ld ld; + struct mfi_evt_pd pd; + } ld_lba_pd_lba; + + struct { + struct mfi_evt_ld ld; + struct mfi_progress prog; + } ld_prog; + + struct { + struct mfi_evt_ld ld; + uint32_t prev_state; + uint32_t new_state; + } ld_state; + + struct { + uint64_t strip; + struct mfi_evt_ld ld; + } ld_strip; + + struct mfi_evt_pd pd; + + struct { + struct mfi_evt_pd pd; + uint32_t err; + } pd_err; + + struct { + uint64_t lba; + struct mfi_evt_pd pd; + } pd_lba; + + struct { + uint64_t lba; + struct mfi_evt_pd pd; + struct mfi_evt_ld ld; + } pd_lba_ld; + + struct { + struct mfi_evt_pd pd; + struct mfi_progress prog; + } pd_prog; + + struct { + struct mfi_evt_pd ld; + uint32_t prev_state; + uint32_t new_state; + } pd_state; + + struct { + uint16_t venderId; + uint16_t deviceId; + uint16_t subVenderId; + uint16_t subDeviceId; + } pci; + + uint32_t rate; + + char str[96]; + + struct { + uint32_t rtc; + uint16_t elapsedSeconds; + } time; + + struct { + uint32_t ecar; + uint32_t elog; + char str[64]; + } ecc; + + uint8_t b[96]; + uint16_t s[48]; + uint32_t w[24]; + uint64_t d[12]; + } args; + + char description[128]; +} __attribute__ ((packed)); + +struct mfi_evt_list { + uint32_t count; + uint32_t reserved; + struct mfi_evt_detail event[1]; +} __attribute__ ((packed)); + +union mfi_pd_ref { + struct { + uint16_t device_id; + uint16_t seq_num; + } v; + uint32_t ref; +} __attribute__ ((packed)); + +union mfi_pd_ddf_type { + struct { + uint16_t pd_type; +#define MFI_PD_DDF_TYPE_FORCED_PD_GUID (1 << 0) +#define MFI_PD_DDF_TYPE_IN_VD (1 << 1) +#define MFI_PD_DDF_TYPE_IS_GLOBAL_SPARE (1 << 2) +#define MFI_PD_DDF_TYPE_IS_SPARE (1 << 3) +#define MFI_PD_DDF_TYPE_IS_FOREIGN (1 << 4) +#define MFI_PD_DDF_TYPE_INTF_SPI (1 << 12) +#define MFI_PD_DDF_TYPE_INTF_SAS (1 << 13) +#define MFI_PD_DDF_TYPE_INTF_SATA1 (1 << 14) +#define MFI_PD_DDF_TYPE_INTF_SATA3G (1 << 15) + uint16_t reserved; + } ddf; + struct { + uint32_t reserved; + } non_disk; + uint32_t type; +} __attribute__ ((packed)); + +struct mfi_pd_progress { + uint32_t active; +#define PD_PROGRESS_ACTIVE_REBUILD (1 << 0) +#define PD_PROGRESS_ACTIVE_PATROL (1 << 1) +#define PD_PROGRESS_ACTIVE_CLEAR (1 << 2) + struct mfi_progress rbld; + struct mfi_progress patrol; + struct mfi_progress clear; + struct mfi_progress reserved[4]; +} __attribute__ ((packed)); + +struct mfi_pd_info { + union mfi_pd_ref ref; + uint8_t inquiry_data[96]; + uint8_t vpd_page83[64]; + uint8_t not_supported; + uint8_t scsi_dev_type; + uint8_t connected_port_bitmap; + uint8_t device_speed; + uint32_t media_err_count; + uint32_t other_err_count; + uint32_t pred_fail_count; + uint32_t last_pred_fail_event_seq_num; + uint16_t fw_state; + uint8_t disable_for_removal; + uint8_t link_speed; + union mfi_pd_ddf_type state; + struct { + uint8_t count; + uint8_t is_path_broken; + uint8_t reserved[6]; + uint64_t sas_addr[4]; + } path_info; + uint64_t raw_size; + uint64_t non_coerced_size; + uint64_t coerced_size; + uint16_t encl_device_id; + uint8_t encl_index; + uint8_t slot_number; + struct mfi_pd_progress prog_info; + uint8_t bad_block_table_full; + uint8_t unusable_in_current_config; + uint8_t vpd_page83_ext[64]; + uint8_t reserved[512-358]; +} __attribute__ ((packed)); + +struct mfi_pd_address { + uint16_t device_id; + uint16_t encl_device_id; + uint8_t encl_index; + uint8_t slot_number; + uint8_t scsi_dev_type; + uint8_t connect_port_bitmap; + uint64_t sas_addr[2]; +} __attribute__ ((packed)); + +#define MFI_MAX_SYS_PDS 240 +struct mfi_pd_list { + uint32_t size; + uint32_t count; + struct mfi_pd_address addr[MFI_MAX_SYS_PDS]; +} __attribute__ ((packed)); + +union mfi_ld_ref { + struct { + uint8_t target_id; + uint8_t reserved; + uint16_t seq; + } v; + uint32_t ref; +} __attribute__ ((packed)); + +struct mfi_ld_list { + uint32_t ld_count; + uint32_t reserved1; + struct { + union mfi_ld_ref ld; + uint8_t state; + uint8_t reserved2[3]; + uint64_t size; + } ld_list[MFI_MAX_LD]; +} __attribute__ ((packed)); + +enum mfi_ld_access { + MFI_LD_ACCESS_RW = 0, + MFI_LD_ACCSSS_RO = 2, + MFI_LD_ACCESS_BLOCKED = 3, +}; +#define MFI_LD_ACCESS_MASK 3 + +enum mfi_ld_state { + MFI_LD_STATE_OFFLINE = 0, + MFI_LD_STATE_PARTIALLY_DEGRADED = 1, + MFI_LD_STATE_DEGRADED = 2, + MFI_LD_STATE_OPTIMAL = 3 +}; + +enum mfi_syspd_state { + MFI_PD_STATE_UNCONFIGURED_GOOD = 0x00, + MFI_PD_STATE_UNCONFIGURED_BAD = 0x01, + MFI_PD_STATE_HOT_SPARE = 0x02, + MFI_PD_STATE_OFFLINE = 0x10, + MFI_PD_STATE_FAILED = 0x11, + MFI_PD_STATE_REBUILD = 0x14, + MFI_PD_STATE_ONLINE = 0x18, + MFI_PD_STATE_COPYBACK = 0x20, + MFI_PD_STATE_SYSTEM = 0x40 +}; + +struct mfi_ld_props { + union mfi_ld_ref ld; + char name[16]; + uint8_t default_cache_policy; + uint8_t access_policy; + uint8_t disk_cache_policy; + uint8_t current_cache_policy; + uint8_t no_bgi; + uint8_t reserved[7]; +} __attribute__ ((packed)); + +struct mfi_ld_params { + uint8_t primary_raid_level; + uint8_t raid_level_qualifier; + uint8_t secondary_raid_level; + uint8_t stripe_size; + uint8_t num_drives; + uint8_t span_depth; + uint8_t state; + uint8_t init_state; + uint8_t is_consistent; + uint8_t reserved[23]; +} __attribute__ ((packed)); + +struct mfi_ld_progress { + uint32_t active; +#define MFI_LD_PROGRESS_CC (1<<0) +#define MFI_LD_PROGRESS_BGI (1<<1) +#define MFI_LD_PROGRESS_FGI (1<<2) +#define MFI_LD_PORGRESS_RECON (1<<3) + struct mfi_progress cc; + struct mfi_progress bgi; + struct mfi_progress fgi; + struct mfi_progress recon; + struct mfi_progress reserved[4]; +} __attribute__ ((packed)); + +struct mfi_span { + uint64_t start_block; + uint64_t num_blocks; + uint16_t array_ref; + uint8_t reserved[6]; +} __attribute__ ((packed)); + +#define MFI_MAX_SPAN_DEPTH 8 +struct mfi_ld_config { + struct mfi_ld_props properties; + struct mfi_ld_params params; + struct mfi_span span[MFI_MAX_SPAN_DEPTH]; +} __attribute__ ((packed)); + +struct mfi_ld_info { + struct mfi_ld_config ld_config; + uint64_t size; + struct mfi_ld_progress progress; + uint16_t cluster_owner; + uint8_t reconstruct_active; + uint8_t reserved1[1]; + uint8_t vpd_page83[64]; + uint8_t reserved2[16]; +} __attribute__ ((packed)); + +union mfi_spare_type { + uint8_t flags; +#define MFI_SPARE_IS_DEDICATED (1 << 0) +#define MFI_SPARE_IS_REVERTABLE (1 << 1) +#define MFI_SPARE_IS_ENCL_AFFINITY (1 << 2) + uint8_t type; +} __attribute__ ((packed)); + +#define MFI_MAX_ARRAYS 16 +struct mfi_spare { + union mfi_pd_ref ref; + union mfi_spare_type spare_type; + uint8_t reserved[2]; + uint8_t array_count; + uint16_t array_refd[MFI_MAX_ARRAYS]; +} __attribute__ ((packed)); + +#define MFI_MAX_ROW_SIZE 32 +struct mfi_array { + uint64_t size; + uint8_t num_drives; + uint8_t reserved; + uint16_t array_ref; + uint8_t pad[20]; + struct { + union mfi_pd_ref ref; + uint16_t fw_state; /* enum mfi_syspd_state */ + struct { + uint8_t pd; + uint8_t slot; + } encl; + } pd[MFI_MAX_ROW_SIZE]; +} __attribute__ ((packed)); + +struct mfi_config_data { + uint32_t size; + uint16_t array_count; + uint16_t array_size; + uint16_t log_drv_count; + uint16_t log_drv_size; + uint16_t spares_count; + uint16_t spares_size; + uint8_t reserved[16]; + /* + struct mfi_array array[]; + struct mfi_ld_config ld[]; + struct mfi_spare spare[]; + */ +} __attribute__ ((packed)); + +#define MFI_SCSI_MAX_TARGETS 128 +#define MFI_SCSI_MAX_LUNS 8 +#define MFI_SCSI_INITIATOR_ID 255 +#define MFI_SCSI_MAX_CMDS 8 +#define MFI_SCSI_MAX_CDB_LEN 16 + +#endif /* MFI_REG_H */ diff --git a/hw/microblaze/Makefile.objs b/hw/microblaze/Makefile.objs new file mode 100644 index 0000000000..274d2c543e --- /dev/null +++ b/hw/microblaze/Makefile.objs @@ -0,0 +1,9 @@ +obj-y = petalogix_s3adsp1800_mmu.o +obj-y += petalogix_ml605_mmu.o +obj-y += microblaze_boot.o + +obj-y += microblaze_pic_cpu.o +obj-y += xilinx_ethlite.o +obj-$(CONFIG_FDT) += ../device_tree.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/mips/Makefile.objs b/hw/mips/Makefile.objs new file mode 100644 index 0000000000..29a5d0db04 --- /dev/null +++ b/hw/mips/Makefile.objs @@ -0,0 +1,6 @@ +obj-y = mips_r4k.o mips_jazz.o mips_malta.o mips_mipssim.o +obj-y += mips_addr.o mips_timer.o mips_int.o +obj-y += gt64xxx.o mc146818rtc.o +obj-$(CONFIG_FULONG) += bonito.o vt82c686.o mips_fulong2e.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/mips_malta.c b/hw/mips_malta.c index dfd7b6b113..351c88ebca 100644 --- a/hw/mips_malta.c +++ b/hw/mips_malta.c @@ -959,7 +959,7 @@ void mips_malta_init (ram_addr_t ram_size, pci_piix4_ide_init(pci_bus, hd, piix4_devfn + 1); pci_create_simple(pci_bus, piix4_devfn + 2, "piix4-usb-uhci"); smbus = piix4_pm_init(pci_bus, piix4_devfn + 3, 0x1100, - isa_get_irq(NULL, 9), NULL, 0); + isa_get_irq(NULL, 9), NULL, 0, NULL); /* TODO: Populate SPD eeprom data. */ smbus_eeprom_init(smbus, 8, NULL, 0); pit = pit_init(isa_bus, 0x40, 0, NULL); @@ -175,7 +175,7 @@ void msi_uninit(struct PCIDevice *dev) uint16_t flags; uint8_t cap_size; - if (!(dev->cap_present & QEMU_PCI_CAP_MSI)) { + if (!msi_present(dev)) { return; } flags = pci_get_word(dev->config + msi_flags_off(dev)); @@ -191,6 +191,10 @@ void msi_reset(PCIDevice *dev) uint16_t flags; bool msi64bit; + if (!msi_present(dev)) { + return; + } + flags = pci_get_word(dev->config + msi_flags_off(dev)); flags &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); msi64bit = flags & PCI_MSI_FLAGS_64BIT; @@ -260,7 +264,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector) stl_le_phys(address, data); } -/* call this function after updating configs by pci_default_write_config(). */ +/* Normally called by pci_default_write_config(). */ void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); @@ -272,7 +276,8 @@ void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) unsigned int vector; uint32_t pending; - if (!ranges_overlap(addr, len, dev->msi_cap, msi_cap_sizeof(flags))) { + if (!msi_present(dev) || + !ranges_overlap(addr, len, dev->msi_cap, msi_cap_sizeof(flags))) { return; } @@ -187,7 +187,7 @@ void msix_write_config(PCIDevice *dev, uint32_t addr, int vector; bool was_masked; - if (!range_covers_byte(addr, len, enable_pos)) { + if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { return; } @@ -319,8 +319,9 @@ static void msix_free_irq_entries(PCIDevice *dev) /* Clean up resources for the device. */ int msix_uninit(PCIDevice *dev, MemoryRegion *bar) { - if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) + if (!msix_present(dev)) { return 0; + } pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); dev->msix_cap = 0; msix_free_irq_entries(dev); @@ -339,7 +340,7 @@ void msix_save(PCIDevice *dev, QEMUFile *f) { unsigned n = dev->msix_entries_nr; - if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { + if (!msix_present(dev)) { return; } @@ -353,7 +354,7 @@ void msix_load(PCIDevice *dev, QEMUFile *f) unsigned n = dev->msix_entries_nr; unsigned int vector; - if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { + if (!msix_present(dev)) { return; } @@ -407,8 +408,9 @@ void msix_notify(PCIDevice *dev, unsigned vector) void msix_reset(PCIDevice *dev) { - if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) + if (!msix_present(dev)) { return; + } msix_free_irq_entries(dev); dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; @@ -447,8 +449,9 @@ void msix_vector_unuse(PCIDevice *dev, unsigned vector) void msix_unuse_all_vectors(PCIDevice *dev) { - if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) + if (!msix_present(dev)) { return; + } msix_free_irq_entries(dev); } diff --git a/hw/musicpal.c b/hw/musicpal.c index c9f845a3f2..f14f20d689 100644 --- a/hw/musicpal.c +++ b/hw/musicpal.c @@ -1513,7 +1513,7 @@ static void musicpal_init(ram_addr_t ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { - CPUARMState *env; + ARMCPU *cpu; qemu_irq *cpu_pic; qemu_irq pic[32]; DeviceState *dev; @@ -1533,12 +1533,12 @@ static void musicpal_init(ram_addr_t ram_size, if (!cpu_model) { cpu_model = "arm926"; } - env = cpu_init(cpu_model); - if (!env) { + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - cpu_pic = arm_pic_init_cpu(env); + cpu_pic = arm_pic_init_cpu(cpu); /* For now we use a fixed - the original - RAM size */ memory_region_init_ram(ram, "musicpal.ram", MP_RAM_DEFAULT_SIZE); @@ -1651,7 +1651,7 @@ static void musicpal_init(ram_addr_t ram_size, musicpal_binfo.kernel_filename = kernel_filename; musicpal_binfo.kernel_cmdline = kernel_cmdline; musicpal_binfo.initrd_filename = initrd_filename; - arm_load_kernel(env, &musicpal_binfo); + arm_load_kernel(cpu, &musicpal_binfo); } static QEMUMachine musicpal_machine = { diff --git a/hw/nseries.c b/hw/nseries.c index b8c6a29227..4df2670327 100644 --- a/hw/nseries.c +++ b/hw/nseries.c @@ -37,7 +37,7 @@ /* Nokia N8x0 support */ struct n800_s { - struct omap_mpu_state_s *cpu; + struct omap_mpu_state_s *mpu; struct rfbi_chip_s blizzard; struct { @@ -135,10 +135,10 @@ static void n800_mmc_cs_cb(void *opaque, int line, int level) static void n8x0_gpio_setup(struct n800_s *s) { - qemu_irq *mmc_cs = qemu_allocate_irqs(n800_mmc_cs_cb, s->cpu->mmc, 1); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_MMC_CS_GPIO, mmc_cs[0]); + qemu_irq *mmc_cs = qemu_allocate_irqs(n800_mmc_cs_cb, s->mpu->mmc, 1); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO, mmc_cs[0]); - qemu_irq_lower(qdev_get_gpio_in(s->cpu->gpio, N800_BAT_COVER_GPIO)); + qemu_irq_lower(qdev_get_gpio_in(s->mpu->gpio, N800_BAT_COVER_GPIO)); } #define MAEMO_CAL_HEADER(...) \ @@ -179,8 +179,8 @@ static void n8x0_nand_setup(struct n800_s *s) } qdev_init_nofail(s->nand); sysbus_connect_irq(sysbus_from_qdev(s->nand), 0, - qdev_get_gpio_in(s->cpu->gpio, N8X0_ONENAND_GPIO)); - omap_gpmc_attach(s->cpu->gpmc, N8X0_ONENAND_CS, + qdev_get_gpio_in(s->mpu->gpio, N8X0_ONENAND_GPIO)); + omap_gpmc_attach(s->mpu->gpmc, N8X0_ONENAND_CS, sysbus_mmio_get_region(sysbus_from_qdev(s->nand), 0)); otp_region = onenand_raw_otp(s->nand); @@ -192,13 +192,13 @@ static void n8x0_nand_setup(struct n800_s *s) static void n8x0_i2c_setup(struct n800_s *s) { DeviceState *dev; - qemu_irq tmp_irq = qdev_get_gpio_in(s->cpu->gpio, N8X0_TMP105_GPIO); - i2c_bus *i2c = omap_i2c_bus(s->cpu->i2c[0]); + qemu_irq tmp_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TMP105_GPIO); + i2c_bus *i2c = omap_i2c_bus(s->mpu->i2c[0]); /* Attach a menelaus PM chip */ dev = i2c_create_slave(i2c, "twl92230", N8X0_MENELAUS_ADDR); qdev_connect_gpio_out(dev, 3, - qdev_get_gpio_in(s->cpu->ih[0], + qdev_get_gpio_in(s->mpu->ih[0], OMAP_INT_24XX_SYS_NIRQ)); qemu_system_powerdown = qdev_get_gpio_in(dev, 3); @@ -263,8 +263,8 @@ static void n800_tsc_kbd_setup(struct n800_s *s) /* XXX: are the three pins inverted inside the chip between the * tsc and the cpu (N4111)? */ qemu_irq penirq = NULL; /* NC */ - qemu_irq kbirq = qdev_get_gpio_in(s->cpu->gpio, N800_TSC_KP_IRQ_GPIO); - qemu_irq dav = qdev_get_gpio_in(s->cpu->gpio, N800_TSC_TS_GPIO); + qemu_irq kbirq = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_KP_IRQ_GPIO); + qemu_irq dav = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_TS_GPIO); s->ts.chip = tsc2301_init(penirq, kbirq, dav); s->ts.opaque = s->ts.chip->opaque; @@ -283,7 +283,7 @@ static void n800_tsc_kbd_setup(struct n800_s *s) static void n810_tsc_setup(struct n800_s *s) { - qemu_irq pintdav = qdev_get_gpio_in(s->cpu->gpio, N810_TSC_TS_GPIO); + qemu_irq pintdav = qdev_get_gpio_in(s->mpu->gpio, N810_TSC_TS_GPIO); s->ts.opaque = tsc2005_init(pintdav); s->ts.txrx = tsc2005_txrx; @@ -375,7 +375,7 @@ static int n810_keys[0x80] = { static void n810_kbd_setup(struct n800_s *s) { - qemu_irq kbd_irq = qdev_get_gpio_in(s->cpu->gpio, N810_KEYBOARD_GPIO); + qemu_irq kbd_irq = qdev_get_gpio_in(s->mpu->gpio, N810_KEYBOARD_GPIO); int i; for (i = 0; i < 0x80; i ++) @@ -388,7 +388,7 @@ static void n810_kbd_setup(struct n800_s *s) /* Attach the LM8322 keyboard to the I2C bus, * should happen in n8x0_i2c_setup and s->kbd be initialised here. */ - s->kbd = i2c_create_slave(omap_i2c_bus(s->cpu->i2c[0]), + s->kbd = i2c_create_slave(omap_i2c_bus(s->mpu->i2c[0]), "lm8323", N810_LM8323_ADDR); qdev_connect_gpio_out(s->kbd, 0, kbd_irq); } @@ -679,8 +679,8 @@ static void n8x0_spi_setup(struct n800_s *s) void *tsc = s->ts.opaque; void *mipid = mipid_init(); - omap_mcspi_attach(s->cpu->mcspi[0], s->ts.txrx, tsc, 0); - omap_mcspi_attach(s->cpu->mcspi[0], mipid_txrx, mipid, 1); + omap_mcspi_attach(s->mpu->mcspi[0], s->ts.txrx, tsc, 0); + omap_mcspi_attach(s->mpu->mcspi[0], mipid_txrx, mipid, 1); } /* This task is normally performed by the bootloader. If we're loading @@ -735,20 +735,20 @@ static void n8x0_dss_setup(struct n800_s *s) s->blizzard.write = s1d13745_write; s->blizzard.read = s1d13745_read; - omap_rfbi_attach(s->cpu->dss, 0, &s->blizzard); + omap_rfbi_attach(s->mpu->dss, 0, &s->blizzard); } static void n8x0_cbus_setup(struct n800_s *s) { - qemu_irq dat_out = qdev_get_gpio_in(s->cpu->gpio, N8X0_CBUS_DAT_GPIO); - qemu_irq retu_irq = qdev_get_gpio_in(s->cpu->gpio, N8X0_RETU_GPIO); - qemu_irq tahvo_irq = qdev_get_gpio_in(s->cpu->gpio, N8X0_TAHVO_GPIO); + qemu_irq dat_out = qdev_get_gpio_in(s->mpu->gpio, N8X0_CBUS_DAT_GPIO); + qemu_irq retu_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_RETU_GPIO); + qemu_irq tahvo_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TAHVO_GPIO); CBus *cbus = cbus_init(dat_out); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_CBUS_CLK_GPIO, cbus->clk); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_CBUS_DAT_GPIO, cbus->dat); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_CBUS_SEL_GPIO, cbus->sel); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_CLK_GPIO, cbus->clk); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_DAT_GPIO, cbus->dat); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_SEL_GPIO, cbus->sel); cbus_attach(cbus, s->retu = retu_init(retu_irq, 1)); cbus_attach(cbus, s->tahvo = tahvo_init(tahvo_irq, 1)); @@ -757,14 +757,14 @@ static void n8x0_cbus_setup(struct n800_s *s) static void n8x0_uart_setup(struct n800_s *s) { CharDriverState *radio = uart_hci_init( - qdev_get_gpio_in(s->cpu->gpio, N8X0_BT_HOST_WKUP_GPIO)); + qdev_get_gpio_in(s->mpu->gpio, N8X0_BT_HOST_WKUP_GPIO)); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_BT_RESET_GPIO, + qdev_connect_gpio_out(s->mpu->gpio, N8X0_BT_RESET_GPIO, csrhci_pins_get(radio)[csrhci_pin_reset]); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_BT_WKUP_GPIO, + qdev_connect_gpio_out(s->mpu->gpio, N8X0_BT_WKUP_GPIO, csrhci_pins_get(radio)[csrhci_pin_wakeup]); - omap_uart_attach(s->cpu->uart[BT_UART], radio); + omap_uart_attach(s->mpu->uart[BT_UART], radio); } static void n8x0_usb_setup(struct n800_s *s) @@ -774,13 +774,13 @@ static void n8x0_usb_setup(struct n800_s *s) dev = sysbus_from_qdev(s->usb); qdev_init_nofail(s->usb); sysbus_connect_irq(dev, 0, - qdev_get_gpio_in(s->cpu->gpio, N8X0_TUSB_INT_GPIO)); + qdev_get_gpio_in(s->mpu->gpio, N8X0_TUSB_INT_GPIO)); /* Using the NOR interface */ - omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_ASYNC_CS, + omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_ASYNC_CS, sysbus_mmio_get_region(dev, 0)); - omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_SYNC_CS, + omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_SYNC_CS, sysbus_mmio_get_region(dev, 1)); - qdev_connect_gpio_out(s->cpu->gpio, N8X0_TUSB_ENABLE_GPIO, + qdev_connect_gpio_out(s->mpu->gpio, N8X0_TUSB_ENABLE_GPIO, qdev_get_gpio_in(s->usb, 0)); /* tusb_pwr */ } @@ -1023,11 +1023,11 @@ static void n8x0_boot_init(void *opaque) n800_dss_init(&s->blizzard); /* CPU setup */ - s->cpu->cpu->env.GE = 0x5; + s->mpu->cpu->env.GE = 0x5; /* If the machine has a slided keyboard, open it */ if (s->kbd) - qemu_irq_raise(qdev_get_gpio_in(s->cpu->gpio, N810_SLIDE_GPIO)); + qemu_irq_raise(qdev_get_gpio_in(s->mpu->gpio, N810_SLIDE_GPIO)); } #define OMAP_TAG_NOKIA_BT 0x4e01 @@ -1247,7 +1247,8 @@ static int n8x0_atag_setup(void *p, int model) stw_raw(w ++, 24); /* u16 len */ strcpy((void *) w, "hw-build"); /* char component[12] */ w += 6; - strcpy((void *) w, "QEMU " QEMU_VERSION); /* char version[12] */ + strcpy((void *) w, "QEMU "); + pstrcat((void *) w, 12, qemu_get_version()); /* char version[12] */ w += 6; tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu"; @@ -1281,7 +1282,7 @@ static void n8x0_init(ram_addr_t ram_size, const char *boot_device, int sdram_size = binfo->ram_size; DisplayState *ds; - s->cpu = omap2420_mpu_init(sysmem, sdram_size, cpu_model); + s->mpu = omap2420_mpu_init(sysmem, sdram_size, cpu_model); /* Setup peripherals * @@ -1329,7 +1330,7 @@ static void n8x0_init(ram_addr_t ram_size, const char *boot_device, binfo->kernel_filename = kernel_filename; binfo->kernel_cmdline = kernel_cmdline; binfo->initrd_filename = initrd_filename; - arm_load_kernel(&s->cpu->cpu->env, binfo); + arm_load_kernel(s->mpu->cpu, binfo); qemu_register_reset(n8x0_boot_init, s); } @@ -1338,7 +1339,7 @@ static void n8x0_init(ram_addr_t ram_size, const char *boot_device, int rom_size; uint8_t nolo_tags[0x10000]; /* No, wait, better start at the ROM. */ - s->cpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; + s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; /* This is intended for loading the `secondary.bin' program from * Nokia images (the NOLO bootloader). The entry point seems @@ -998,7 +998,6 @@ enum { #define OMAP_GPIOSW_OUTPUT 0x0002 # define TCMI_VERBOSE 1 -//# define MEM_VERBOSE 1 # ifdef TCMI_VERBOSE # define OMAP_8B_REG(paddr) \ @@ -1018,98 +1017,4 @@ enum { # define OMAP_MPUI_REG_MASK 0x000007ff -# ifdef MEM_VERBOSE -struct io_fn { - CPUReadMemoryFunc * const *mem_read; - CPUWriteMemoryFunc * const *mem_write; - void *opaque; - int in; -}; - -static uint32_t io_readb(void *opaque, target_phys_addr_t addr) -{ - struct io_fn *s = opaque; - uint32_t ret; - - s->in ++; - ret = s->mem_read[0](s->opaque, addr); - s->in --; - if (!s->in) - fprintf(stderr, "%08x ---> %02x\n", (uint32_t) addr, ret); - return ret; -} -static uint32_t io_readh(void *opaque, target_phys_addr_t addr) -{ - struct io_fn *s = opaque; - uint32_t ret; - - s->in ++; - ret = s->mem_read[1](s->opaque, addr); - s->in --; - if (!s->in) - fprintf(stderr, "%08x ---> %04x\n", (uint32_t) addr, ret); - return ret; -} -static uint32_t io_readw(void *opaque, target_phys_addr_t addr) -{ - struct io_fn *s = opaque; - uint32_t ret; - - s->in ++; - ret = s->mem_read[2](s->opaque, addr); - s->in --; - if (!s->in) - fprintf(stderr, "%08x ---> %08x\n", (uint32_t) addr, ret); - return ret; -} -static void io_writeb(void *opaque, target_phys_addr_t addr, uint32_t value) -{ - struct io_fn *s = opaque; - - if (!s->in) - fprintf(stderr, "%08x <--- %02x\n", (uint32_t) addr, value); - s->in ++; - s->mem_write[0](s->opaque, addr, value); - s->in --; -} -static void io_writeh(void *opaque, target_phys_addr_t addr, uint32_t value) -{ - struct io_fn *s = opaque; - - if (!s->in) - fprintf(stderr, "%08x <--- %04x\n", (uint32_t) addr, value); - s->in ++; - s->mem_write[1](s->opaque, addr, value); - s->in --; -} -static void io_writew(void *opaque, target_phys_addr_t addr, uint32_t value) -{ - struct io_fn *s = opaque; - - if (!s->in) - fprintf(stderr, "%08x <--- %08x\n", (uint32_t) addr, value); - s->in ++; - s->mem_write[2](s->opaque, addr, value); - s->in --; -} - -static CPUReadMemoryFunc * const io_readfn[] = { io_readb, io_readh, io_readw, }; -static CPUWriteMemoryFunc * const io_writefn[] = { io_writeb, io_writeh, io_writew, }; - -inline static int debug_register_io_memory(CPUReadMemoryFunc * const *mem_read, - CPUWriteMemoryFunc * const *mem_write, - void *opaque) -{ - struct io_fn *s = g_malloc(sizeof(struct io_fn)); - - s->mem_read = mem_read; - s->mem_write = mem_write; - s->opaque = opaque; - s->in = 0; - return cpu_register_io_memory(io_readfn, io_writefn, s, - DEVICE_NATIVE_ENDIAN); -} -# define cpu_register_io_memory debug_register_io_memory -# endif - #endif /* hw_omap_h */ diff --git a/hw/omap1.c b/hw/omap1.c index a997d300b5..ad60cc4919 100644 --- a/hw/omap1.c +++ b/hw/omap1.c @@ -3854,7 +3854,7 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory, omap_clkm_init(system_memory, 0xfffece00, 0xe1008000, s); - cpu_irq = arm_pic_init_cpu(&s->cpu->env); + cpu_irq = arm_pic_init_cpu(s->cpu); s->ih[0] = qdev_create(NULL, "omap-intc"); qdev_prop_set_uint32(s->ih[0], "size", 0x100); qdev_prop_set_ptr(s->ih[0], "clk", omap_findclk(s, "arminth_ck")); diff --git a/hw/omap2.c b/hw/omap2.c index 196c4b6cc8..4278dd19c4 100644 --- a/hw/omap2.c +++ b/hw/omap2.c @@ -2277,7 +2277,7 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem, s->l4 = omap_l4_init(sysmem, OMAP2_L4_BASE, 54); /* Actually mapped at any 2K boundary in the ARM11 private-peripheral if */ - cpu_irq = arm_pic_init_cpu(&s->cpu->env); + cpu_irq = arm_pic_init_cpu(s->cpu); s->ih[0] = qdev_create(NULL, "omap2-intc"); qdev_prop_set_uint8(s->ih[0], "revision", 0x21); qdev_prop_set_ptr(s->ih[0], "fclk", omap_findclk(s, "mpu_intc_fclk")); diff --git a/hw/omap_sx1.c b/hw/omap_sx1.c index c7618c6cdf..abca341926 100644 --- a/hw/omap_sx1.c +++ b/hw/omap_sx1.c @@ -103,7 +103,7 @@ static void sx1_init(ram_addr_t ram_size, const char *initrd_filename, const char *cpu_model, const int version) { - struct omap_mpu_state_s *cpu; + struct omap_mpu_state_s *mpu; MemoryRegion *address_space = get_system_memory(); MemoryRegion *flash = g_new(MemoryRegion, 1); MemoryRegion *flash_1 = g_new(MemoryRegion, 1); @@ -121,7 +121,7 @@ static void sx1_init(ram_addr_t ram_size, flash_size = flash2_size; } - cpu = omap310_mpu_init(address_space, sx1_binfo.ram_size, cpu_model); + mpu = omap310_mpu_init(address_space, sx1_binfo.ram_size, cpu_model); /* External Flash (EMIFS) */ memory_region_init_ram(flash, "omap_sx1.flash0-0", flash_size); @@ -202,7 +202,7 @@ static void sx1_init(ram_addr_t ram_size, sx1_binfo.kernel_filename = kernel_filename; sx1_binfo.kernel_cmdline = kernel_cmdline; sx1_binfo.initrd_filename = initrd_filename; - arm_load_kernel(&cpu->cpu->env, &sx1_binfo); + arm_load_kernel(mpu->cpu, &sx1_binfo); } /* TODO: fix next line */ @@ -196,7 +196,7 @@ static void palmte_init(ram_addr_t ram_size, const char *initrd_filename, const char *cpu_model) { MemoryRegion *address_space_mem = get_system_memory(); - struct omap_mpu_state_s *cpu; + struct omap_mpu_state_s *mpu; int flash_size = 0x00800000; int sdram_size = palmte_binfo.ram_size; static uint32_t cs0val = 0xffffffff; @@ -208,7 +208,7 @@ static void palmte_init(ram_addr_t ram_size, MemoryRegion *flash = g_new(MemoryRegion, 1); MemoryRegion *cs = g_new(MemoryRegion, 4); - cpu = omap310_mpu_init(address_space_mem, sdram_size, cpu_model); + mpu = omap310_mpu_init(address_space_mem, sdram_size, cpu_model); /* External Flash (EMIFS) */ memory_region_init_ram(flash, "palmte.flash", flash_size); @@ -230,11 +230,11 @@ static void palmte_init(ram_addr_t ram_size, OMAP_CS3_SIZE); memory_region_add_subregion(address_space_mem, OMAP_CS3_BASE, &cs[3]); - palmte_microwire_setup(cpu); + palmte_microwire_setup(mpu); - qemu_add_kbd_event_handler(palmte_button_event, cpu); + qemu_add_kbd_event_handler(palmte_button_event, mpu); - palmte_gpio_setup(cpu); + palmte_gpio_setup(mpu); /* Setup initial (reset) machine state */ if (nb_option_roms) { @@ -265,7 +265,7 @@ static void palmte_init(ram_addr_t ram_size, palmte_binfo.kernel_filename = kernel_filename; palmte_binfo.kernel_cmdline = kernel_cmdline; palmte_binfo.initrd_filename = initrd_filename; - arm_load_kernel(&cpu->cpu->env, &palmte_binfo); + arm_load_kernel(mpu->cpu, &palmte_binfo); } /* FIXME: We shouldn't really be doing this here. The LCD controller @@ -970,7 +970,7 @@ void pc_cpus_init(const char *cpu_model) } } -void pc_memory_init(MemoryRegion *system_memory, +void *pc_memory_init(MemoryRegion *system_memory, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, @@ -1029,6 +1029,7 @@ void pc_memory_init(MemoryRegion *system_memory, for (i = 0; i < nb_option_roms; i++) { rom_add_option(option_rom[i].name, option_rom[i].bootindex); } + return fw_cfg; } qemu_irq *pc_allocate_cpu_irq(void) @@ -106,7 +106,7 @@ void pc_register_ferr_irq(qemu_irq irq); void pc_acpi_smi_interrupt(void *opaque, int irq, int level); void pc_cpus_init(const char *cpu_model); -void pc_memory_init(MemoryRegion *system_memory, +void *pc_memory_init(MemoryRegion *system_memory, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, @@ -142,7 +142,7 @@ int acpi_table_add(const char *table_desc); i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, qemu_irq sci_irq, qemu_irq smi_irq, - int kvm_enabled); + int kvm_enabled, void *fw_cfg); void piix4_smbus_register_device(SMBusDevice *dev, uint8_t addr); /* hpet.c */ diff --git a/hw/pc_piix.c b/hw/pc_piix.c index f49b0aaf89..0c0096fd7e 100644 --- a/hw/pc_piix.c +++ b/hw/pc_piix.c @@ -29,6 +29,7 @@ #include "apic.h" #include "pci.h" #include "pci_ids.h" +#include "usb.h" #include "net.h" #include "boards.h" #include "ide.h" @@ -146,6 +147,7 @@ static void pc_init1(MemoryRegion *system_memory, MemoryRegion *ram_memory; MemoryRegion *pci_memory; MemoryRegion *rom_memory; + void *fw_cfg = NULL; pc_cpus_init(cpu_model); @@ -172,7 +174,7 @@ static void pc_init1(MemoryRegion *system_memory, /* allocate ram and load rom/bios */ if (!xen_enabled()) { - pc_memory_init(system_memory, + fw_cfg = pc_memory_init(system_memory, kernel_filename, kernel_cmdline, initrd_filename, below_4g_mem_size, above_4g_mem_size, pci_enabled ? rom_memory : system_memory, &ram_memory); @@ -276,7 +278,7 @@ static void pc_init1(MemoryRegion *system_memory, /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, gsi[9], *smi_irq, - kvm_enabled()); + kvm_enabled(), fw_cfg); smbus_eeprom_init(smbus, 8, NULL, 0); } @@ -347,8 +349,8 @@ static void pc_xen_hvm_init(ram_addr_t ram_size, } #endif -static QEMUMachine pc_machine_v1_1 = { - .name = "pc-1.1", +static QEMUMachine pc_machine_v1_2 = { + .name = "pc-1.2", .alias = "pc", .desc = "Standard PC", .init = pc_init_pci, @@ -356,7 +358,38 @@ static QEMUMachine pc_machine_v1_1 = { .is_default = 1, }; +#define PC_COMPAT_1_1 \ + {\ + .driver = "VGA",\ + .property = "vgamem_mb",\ + .value = stringify(8),\ + },{\ + .driver = "vmware-svga",\ + .property = "vgamem_mb",\ + .value = stringify(8),\ + },{\ + .driver = "qxl-vga",\ + .property = "vgamem_mb",\ + .value = stringify(8),\ + },{\ + .driver = "qxl",\ + .property = "vgamem_mb",\ + .value = stringify(8),\ + } + +static QEMUMachine pc_machine_v1_1 = { + .name = "pc-1.1", + .desc = "Standard PC", + .init = pc_init_pci, + .max_cpus = 255, + .compat_props = (GlobalProperty[]) { + PC_COMPAT_1_1, + { /* end of list */ } + }, +}; + #define PC_COMPAT_1_0 \ + PC_COMPAT_1_1,\ {\ .driver = "pc-sysfw",\ .property = "rom_only",\ @@ -374,7 +407,7 @@ static QEMUMachine pc_machine_v1_1 = { .property = "vapic",\ .value = "off",\ },{\ - .driver = "USB",\ + .driver = TYPE_USB_DEVICE,\ .property = "full-path",\ .value = "no",\ } @@ -388,6 +421,7 @@ static QEMUMachine pc_machine_v1_0 = { PC_COMPAT_1_0, { /* end of list */ } }, + .hw_version = "1.0", }; #define PC_COMPAT_0_15 \ @@ -402,6 +436,7 @@ static QEMUMachine pc_machine_v0_15 = { PC_COMPAT_0_15, { /* end of list */ } }, + .hw_version = "0.15", }; #define PC_COMPAT_0_14 \ @@ -442,12 +477,13 @@ static QEMUMachine pc_machine_v0_14 = { }, { /* end of list */ } }, + .hw_version = "0.14", }; #define PC_COMPAT_0_13 \ PC_COMPAT_0_14,\ {\ - .driver = "PCI",\ + .driver = TYPE_PCI_DEVICE,\ .property = "command_serr_enable",\ .value = "off",\ },{\ @@ -478,6 +514,7 @@ static QEMUMachine pc_machine_v0_13 = { }, { /* end of list */ } }, + .hw_version = "0.13", }; #define PC_COMPAT_0_12 \ @@ -509,7 +546,8 @@ static QEMUMachine pc_machine_v0_12 = { .value = stringify(0), }, { /* end of list */ } - } + }, + .hw_version = "0.12", }; #define PC_COMPAT_0_11 \ @@ -519,7 +557,7 @@ static QEMUMachine pc_machine_v0_12 = { .property = "vectors",\ .value = stringify(0),\ },{\ - .driver = "PCI",\ + .driver = TYPE_PCI_DEVICE,\ .property = "rombar",\ .value = stringify(0),\ } @@ -541,7 +579,8 @@ static QEMUMachine pc_machine_v0_11 = { .value = "0.11", }, { /* end of list */ } - } + }, + .hw_version = "0.11", }; static QEMUMachine pc_machine_v0_10 = { @@ -574,6 +613,7 @@ static QEMUMachine pc_machine_v0_10 = { }, { /* end of list */ } }, + .hw_version = "0.10", }; static QEMUMachine isapc_machine = { @@ -603,6 +643,7 @@ static QEMUMachine xenfv_machine = { static void pc_machine_init(void) { + qemu_register_machine(&pc_machine_v1_2); qemu_register_machine(&pc_machine_v1_1); qemu_register_machine(&pc_machine_v1_0); qemu_register_machine(&pc_machine_v0_15); diff --git a/hw/pc_sysfw.c b/hw/pc_sysfw.c index f0d7c21b5c..b45f0acc7d 100644 --- a/hw/pc_sysfw.c +++ b/hw/pc_sysfw.c @@ -23,6 +23,7 @@ * THE SOFTWARE. */ +#include "blockdev.h" #include "sysbus.h" #include "hw.h" #include "pc.h" diff --git a/hw/pci-hotplug.c b/hw/pci-hotplug.c index 61257f457b..e7fb780a08 100644 --- a/hw/pci-hotplug.c +++ b/hw/pci-hotplug.c @@ -80,11 +80,7 @@ static int scsi_hot_add(Monitor *mon, DeviceState *adapter, SCSIBus *scsibus; SCSIDevice *scsidev; - scsibus = DO_UPCAST(SCSIBus, qbus, QLIST_FIRST(&adapter->child_bus)); - if (!scsibus || strcmp(scsibus->qbus.info->name, "SCSI") != 0) { - error_report("Device is not a SCSI adapter"); - return -1; - } + scsibus = SCSI_BUS(QLIST_FIRST(&adapter->child_bus)); /* * drive_init() tries to find a default for dinfo->unit. Doesn't @@ -31,6 +31,8 @@ #include "loader.h" #include "range.h" #include "qmp-commands.h" +#include "msi.h" +#include "msix.h" //#define DEBUG_PCI #ifdef DEBUG_PCI @@ -44,23 +46,32 @@ static char *pcibus_get_dev_path(DeviceState *dev); static char *pcibus_get_fw_dev_path(DeviceState *dev); static int pcibus_reset(BusState *qbus); -struct BusInfo pci_bus_info = { - .name = "PCI", - .size = sizeof(PCIBus), - .print_dev = pcibus_dev_print, - .get_dev_path = pcibus_get_dev_path, - .get_fw_dev_path = pcibus_get_fw_dev_path, - .reset = pcibus_reset, - .props = (Property[]) { - DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), - DEFINE_PROP_STRING("romfile", PCIDevice, romfile), - DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), - DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, - QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), - DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present, - QEMU_PCI_CAP_SERR_BITNR, true), - DEFINE_PROP_END_OF_LIST() - } +static Property pci_props[] = { + DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), + DEFINE_PROP_STRING("romfile", PCIDevice, romfile), + DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), + DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, + QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), + DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present, + QEMU_PCI_CAP_SERR_BITNR, true), + DEFINE_PROP_END_OF_LIST() +}; + +static void pci_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = pcibus_dev_print; + k->get_dev_path = pcibus_get_dev_path; + k->get_fw_dev_path = pcibus_get_fw_dev_path; + k->reset = pcibus_reset; +} + +static const TypeInfo pci_bus_info = { + .name = TYPE_PCI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(PCIBus), + .class_init = pci_bus_class_init, }; static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num); @@ -188,6 +199,9 @@ void pci_device_reset(PCIDevice *dev) } } pci_update_mappings(dev); + + msi_reset(dev); + msix_reset(dev); } /* @@ -265,7 +279,7 @@ void pci_bus_new_inplace(PCIBus *bus, DeviceState *parent, MemoryRegion *address_space_io, uint8_t devfn_min) { - qbus_create_inplace(&bus->qbus, &pci_bus_info, parent, name); + qbus_create_inplace(&bus->qbus, TYPE_PCI_BUS, parent, name); assert(PCI_FUNC(devfn_min) == 0); bus->devfn_min = devfn_min; bus->address_space_mem = address_space_mem; @@ -286,7 +300,7 @@ PCIBus *pci_bus_new(DeviceState *parent, const char *name, PCIBus *bus; bus = g_malloc0(sizeof(*bus)); - bus->qbus.qdev_allocated = 1; + bus->qbus.glib_allocated = true; pci_bus_new_inplace(bus, parent, name, address_space_mem, address_space_io, devfn_min); return bus; @@ -761,6 +775,9 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus, return NULL; } pci_dev->bus = bus; + if (bus->dma_context_fn) { + pci_dev->dma = bus->dma_context_fn(bus, bus->dma_context_opaque, devfn); + } pci_dev->devfn = devfn; pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); pci_dev->irq_state = 0; @@ -1037,6 +1054,9 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l) if (range_covers_byte(addr, l, PCI_COMMAND)) pci_update_irq_disabled(d, was_irq_disabled); + + msi_write_config(d, addr, val, l); + msix_write_config(d, addr, val, l); } /***********************************************************/ @@ -1127,7 +1147,9 @@ static const pci_class_desc pci_class_descriptions[] = }; static void pci_for_each_device_under_bus(PCIBus *bus, - void (*fn)(PCIBus *b, PCIDevice *d)) + void (*fn)(PCIBus *b, PCIDevice *d, + void *opaque), + void *opaque) { PCIDevice *d; int devfn; @@ -1135,18 +1157,19 @@ static void pci_for_each_device_under_bus(PCIBus *bus, for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { d = bus->devices[devfn]; if (d) { - fn(bus, d); + fn(bus, d, opaque); } } } void pci_for_each_device(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *b, PCIDevice *d)) + void (*fn)(PCIBus *b, PCIDevice *d, void *opaque), + void *opaque) { bus = pci_find_bus_nr(bus, bus_num); if (bus) { - pci_for_each_device_under_bus(bus, fn); + pci_for_each_device_under_bus(bus, fn, opaque); } } @@ -1537,7 +1560,7 @@ PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction, DeviceState *dev; dev = qdev_create(&bus->qbus, name); - qdev_prop_set_uint32(dev, "addr", devfn); + qdev_prop_set_int32(dev, "addr", devfn); qdev_prop_set_bit(dev, "multifunction", multifunction); return PCI_DEVICE(dev); } @@ -2000,7 +2023,14 @@ static void pci_device_class_init(ObjectClass *klass, void *data) k->init = pci_qdev_init; k->unplug = pci_unplug_device; k->exit = pci_unregister_device; - k->bus_info = &pci_bus_info; + k->bus_type = TYPE_PCI_BUS; + k->props = pci_props; +} + +void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque) +{ + bus->dma_context_fn = fn; + bus->dma_context_opaque = opaque; } static TypeInfo pci_device_type_info = { @@ -2014,6 +2044,7 @@ static TypeInfo pci_device_type_info = { static void pci_register_types(void) { + type_register_static(&pci_bus_info); type_register_static(&pci_device_type_info); } @@ -179,6 +179,7 @@ typedef void (*MSIVectorReleaseNotifier)(PCIDevice *dev, unsigned int vector); struct PCIDevice { DeviceState qdev; + /* PCI config space */ uint8_t *config; @@ -197,9 +198,10 @@ struct PCIDevice { /* the following fields are read only */ PCIBus *bus; - uint32_t devfn; + int32_t devfn; char name[64]; PCIIORegion io_regions[PCI_NUM_REGIONS]; + DMAContext *dma; /* do not access the following fields */ PCIConfigReadFunc *config_read; @@ -312,7 +314,9 @@ PCIDevice *pci_nic_init(NICInfo *nd, const char *default_model, PCIDevice *pci_nic_init_nofail(NICInfo *nd, const char *default_model, const char *default_devaddr); int pci_bus_num(PCIBus *s); -void pci_for_each_device(PCIBus *bus, int bus_num, void (*fn)(PCIBus *bus, PCIDevice *d)); +void pci_for_each_device(PCIBus *bus, int bus_num, + void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque), + void *opaque); PCIBus *pci_find_root_bus(int domain); int pci_find_domain(const PCIBus *bus); PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn); @@ -324,6 +328,10 @@ int pci_read_devaddr(Monitor *mon, const char *addr, int *domp, int *busp, void pci_device_deassert_intx(PCIDevice *dev); +typedef DMAContext *(*PCIDMAContextFunc)(PCIBus *, void *, int); + +void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque); + static inline void pci_set_byte(uint8_t *config, uint8_t val) { @@ -558,10 +566,15 @@ static inline uint32_t pci_config_size(const PCIDevice *d) } /* DMA access functions */ +static inline DMAContext *pci_dma_context(PCIDevice *dev) +{ + return dev->dma; +} + static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr, void *buf, dma_addr_t len, DMADirection dir) { - cpu_physical_memory_rw(addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); + dma_memory_rw(pci_dma_context(dev), addr, buf, len, dir); return 0; } @@ -581,12 +594,12 @@ static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr, static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \ dma_addr_t addr) \ { \ - return ld##_l##_phys(addr); \ + return ld##_l##_dma(pci_dma_context(dev), addr); \ } \ static inline void st##_s##_pci_dma(PCIDevice *dev, \ - dma_addr_t addr, uint##_bits##_t val) \ + dma_addr_t addr, uint##_bits##_t val) \ { \ - st##_s##_phys(addr, val); \ + st##_s##_dma(pci_dma_context(dev), addr, val); \ } PCI_DMA_DEFINE_LDST(ub, b, 8); @@ -602,25 +615,22 @@ PCI_DMA_DEFINE_LDST(q_be, q_be, 64); static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t *plen, DMADirection dir) { - target_phys_addr_t len = *plen; void *buf; - buf = cpu_physical_memory_map(addr, &len, dir == DMA_DIRECTION_FROM_DEVICE); - *plen = len; + buf = dma_memory_map(pci_dma_context(dev), addr, plen, dir); return buf; } static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len, DMADirection dir, dma_addr_t access_len) { - cpu_physical_memory_unmap(buffer, len, dir == DMA_DIRECTION_FROM_DEVICE, - access_len); + dma_memory_unmap(pci_dma_context(dev), buffer, len, dir, access_len); } static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev, int alloc_hint) { - qemu_sglist_init(qsg, alloc_hint); + qemu_sglist_init(qsg, alloc_hint, pci_dma_context(dev)); } extern const VMStateDescription vmstate_pci_device; diff --git a/hw/pci_bridge.c b/hw/pci_bridge.c index 866f0b6c52..0916276c4d 100644 --- a/hw/pci_bridge.c +++ b/hw/pci_bridge.c @@ -254,8 +254,9 @@ void pci_bridge_disable_base_limit(PCIDevice *dev) } /* reset bridge specific configuration registers */ -void pci_bridge_reset_reg(PCIDevice *dev) +void pci_bridge_reset(DeviceState *qdev) { + PCIDevice *dev = PCI_DEVICE(qdev); uint8_t *conf = dev->config; conf[PCI_PRIMARY_BUS] = 0; @@ -291,13 +292,6 @@ void pci_bridge_reset_reg(PCIDevice *dev) pci_set_word(conf + PCI_BRIDGE_CONTROL, 0); } -/* default reset function for PCI-to-PCI bridge */ -void pci_bridge_reset(DeviceState *qdev) -{ - PCIDevice *dev = PCI_DEVICE(qdev); - pci_bridge_reset_reg(dev); -} - /* default qdev initialization function for PCI-to-PCI bridge */ int pci_bridge_initfn(PCIDevice *dev) { @@ -324,7 +318,7 @@ int pci_bridge_initfn(PCIDevice *dev) br->bus_name = dev->qdev.id; } - qbus_create_inplace(&sec_bus->qbus, &pci_bus_info, &dev->qdev, + qbus_create_inplace(&sec_bus->qbus, TYPE_PCI_BUS, &dev->qdev, br->bus_name); sec_bus->parent_dev = dev; sec_bus->map_irq = br->map_irq; diff --git a/hw/pci_bridge_dev.c b/hw/pci_bridge_dev.c index eccaa5831e..1cc1d2049c 100644 --- a/hw/pci_bridge_dev.c +++ b/hw/pci_bridge_dev.c @@ -52,7 +52,7 @@ static int pci_bridge_dev_initfn(PCIDevice *dev) { PCIBridge *br = DO_UPCAST(PCIBridge, dev, dev); PCIBridgeDev *bridge_dev = DO_UPCAST(PCIBridgeDev, bridge, br); - int err; + int err, ret; pci_bridge_map_irq(br, NULL, pci_bridge_dev_map_irq_fn); err = pci_bridge_initfn(dev); if (err) { @@ -86,6 +86,8 @@ slotid_error: shpc_cleanup(dev, &bridge_dev->bar); shpc_error: memory_region_destroy(&bridge_dev->bar); + ret = pci_bridge_exitfn(dev); + assert(!ret); bridge_error: return err; } @@ -119,10 +121,8 @@ static void pci_bridge_dev_write_config(PCIDevice *d, static void qdev_pci_bridge_dev_reset(DeviceState *qdev) { PCIDevice *dev = DO_UPCAST(PCIDevice, qdev, qdev); + pci_bridge_reset(qdev); - if (msi_present(dev)) { - msi_reset(dev); - } shpc_reset(dev); } diff --git a/hw/pci_ids.h b/hw/pci_ids.h index e8235a7d05..b4801d2168 100644 --- a/hw/pci_ids.h +++ b/hw/pci_ids.h @@ -15,6 +15,7 @@ #define PCI_CLASS_STORAGE_SCSI 0x0100 #define PCI_CLASS_STORAGE_IDE 0x0101 +#define PCI_CLASS_STORAGE_RAID 0x0104 #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_OTHER 0x0180 @@ -47,6 +48,7 @@ #define PCI_VENDOR_ID_LSI_LOGIC 0x1000 #define PCI_DEVICE_ID_LSI_53C895A 0x0012 +#define PCI_DEVICE_ID_LSI_SAS1078 0x0060 #define PCI_VENDOR_ID_DEC 0x1011 #define PCI_DEVICE_ID_DEC_21154 0x0026 @@ -118,6 +120,7 @@ #define PCI_DEVICE_ID_INTEL_82801I_UHCI6 0x2939 #define PCI_DEVICE_ID_INTEL_82801I_EHCI1 0x293a #define PCI_DEVICE_ID_INTEL_82801I_EHCI2 0x293c +#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed #define PCI_VENDOR_ID_XEN 0x5853 #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 diff --git a/hw/pci_internals.h b/hw/pci_internals.h index 96690b72d3..e8bc9f61eb 100644 --- a/hw/pci_internals.h +++ b/hw/pci_internals.h @@ -12,10 +12,13 @@ * Use accessor function in pci.h, pci_bridge.h */ -extern struct BusInfo pci_bus_info; +#define TYPE_PCI_BUS "PCI" +#define PCI_BUS(obj) OBJECT_CHECK(PCIBus, (obj), TYPE_PCI_BUS) struct PCIBus { BusState qbus; + PCIDMAContextFunc dma_context_fn; + void *dma_context_opaque; uint8_t devfn_min; pci_set_irq_fn set_irq; pci_map_irq_fn map_irq; diff --git a/hw/petalogix_ml605_mmu.c b/hw/petalogix_ml605_mmu.c index bff63e389a..6a7d0c0bff 100644 --- a/hw/petalogix_ml605_mmu.c +++ b/hw/petalogix_ml605_mmu.c @@ -123,7 +123,7 @@ petalogix_ml605_init(ram_addr_t ram_size, irq[5], 115200, serial_hds[0], DEVICE_LITTLE_ENDIAN); /* 2 timers at irq 2 @ 100 Mhz. */ - xilinx_timer_create(TIMER_BASEADDR, irq[2], 2, 100 * 1000000); + xilinx_timer_create(TIMER_BASEADDR, irq[2], 0, 100 * 1000000); /* axi ethernet and dma initialization. TODO: Dynamically connect them. */ { diff --git a/hw/petalogix_s3adsp1800_mmu.c b/hw/petalogix_s3adsp1800_mmu.c index f41c559e61..2cf68828ed 100644 --- a/hw/petalogix_s3adsp1800_mmu.c +++ b/hw/petalogix_s3adsp1800_mmu.c @@ -104,9 +104,9 @@ petalogix_s3adsp1800_init(ram_addr_t ram_size, irq[i] = qdev_get_gpio_in(dev, i); } - sysbus_create_simple("xilinx,uartlite", UARTLITE_BASEADDR, irq[3]); + sysbus_create_simple("xlnx.xps-uartlite", UARTLITE_BASEADDR, irq[3]); /* 2 timers at irq 2 @ 62 Mhz. */ - xilinx_timer_create(TIMER_BASEADDR, irq[0], 2, 62 * 1000000); + xilinx_timer_create(TIMER_BASEADDR, irq[0], 0, 62 * 1000000); xilinx_ethlite_create(&nd_table[0], ETHLITE_BASEADDR, irq[1], 0, 0); microblaze_load_kernel(cpu, ddr_base, ram_size, diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs new file mode 100644 index 0000000000..aa4bbeb664 --- /dev/null +++ b/hw/ppc/Makefile.objs @@ -0,0 +1,28 @@ +# shared objects +obj-y = ppc.o ppc_booke.o +# PREP target +obj-y += mc146818rtc.o +obj-y += ppc_prep.o +# OldWorld PowerMac +obj-y += ppc_oldworld.o +# NewWorld PowerMac +obj-y += ppc_newworld.o +# IBM pSeries (sPAPR) +obj-$(CONFIG_PSERIES) += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o +obj-$(CONFIG_PSERIES) += xics.o spapr_vty.o spapr_llan.o spapr_vscsi.o +obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o spapr_iommu.o +# PowerPC 4xx boards +obj-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o +obj-y += ppc440_bamboo.o +# PowerPC E500 boards +obj-$(CONFIG_FDT) += ppce500_mpc8544ds.o mpc8544_guts.o ppce500_spin.o +# PowerPC 440 Xilinx ML507 reference board. +obj-y += virtex_ml507.o +# PowerPC OpenPIC +obj-y += openpic.o +obj-$(CONFIG_FDT) += ../device_tree.o + +# Xilinx PPC peripherals +obj-y += xilinx_ethlite.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/ppce500_mpc8544ds.c b/hw/ppce500_mpc8544ds.c index 3eb8a23779..8b9fd83ce1 100644 --- a/hw/ppce500_mpc8544ds.c +++ b/hw/ppce500_mpc8544ds.c @@ -31,6 +31,7 @@ #include "elf.h" #include "sysbus.h" #include "exec-memory.h" +#include "host-utils.h" #define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb" #define UIMAGE_LOAD_BASE 0 @@ -41,57 +42,150 @@ #define RAM_SIZES_ALIGN (64UL << 20) -#define MPC8544_CCSRBAR_BASE 0xE0000000 -#define MPC8544_MPIC_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x40000) -#define MPC8544_SERIAL0_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x4500) -#define MPC8544_SERIAL1_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x4600) -#define MPC8544_PCI_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x8000) -#define MPC8544_PCI_REGS_SIZE 0x1000 -#define MPC8544_PCI_IO 0xE1000000 -#define MPC8544_PCI_IOLEN 0x10000 -#define MPC8544_UTIL_BASE (MPC8544_CCSRBAR_BASE + 0xe0000) -#define MPC8544_SPIN_BASE 0xEF000000 +#define MPC8544_CCSRBAR_BASE 0xE0000000ULL +#define MPC8544_CCSRBAR_SIZE 0x00100000ULL +#define MPC8544_MPIC_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x40000ULL) +#define MPC8544_SERIAL0_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x4500ULL) +#define MPC8544_SERIAL1_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x4600ULL) +#define MPC8544_PCI_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x8000ULL) +#define MPC8544_PCI_REGS_SIZE 0x1000ULL +#define MPC8544_PCI_IO 0xE1000000ULL +#define MPC8544_PCI_IOLEN 0x10000ULL +#define MPC8544_UTIL_BASE (MPC8544_CCSRBAR_BASE + 0xe0000ULL) +#define MPC8544_SPIN_BASE 0xEF000000ULL struct boot_info { uint32_t dt_base; + uint32_t dt_size; uint32_t entry; }; +static void pci_map_create(void *fdt, uint32_t *pci_map, uint32_t mpic) +{ + int i; + const uint32_t tmp[] = { + /* IDSEL 0x11 J17 Slot 1 */ + 0x8800, 0x0, 0x0, 0x1, mpic, 0x2, 0x1, 0x0, 0x0, + 0x8800, 0x0, 0x0, 0x2, mpic, 0x3, 0x1, 0x0, 0x0, + 0x8800, 0x0, 0x0, 0x3, mpic, 0x4, 0x1, 0x0, 0x0, + 0x8800, 0x0, 0x0, 0x4, mpic, 0x1, 0x1, 0x0, 0x0, + + /* IDSEL 0x12 J16 Slot 2 */ + 0x9000, 0x0, 0x0, 0x1, mpic, 0x3, 0x1, 0x0, 0x0, + 0x9000, 0x0, 0x0, 0x2, mpic, 0x4, 0x1, 0x0, 0x0, + 0x9000, 0x0, 0x0, 0x3, mpic, 0x2, 0x1, 0x0, 0x0, + 0x9000, 0x0, 0x0, 0x4, mpic, 0x1, 0x1, 0x0, 0x0, + }; + for (i = 0; i < ARRAY_SIZE(tmp); i++) { + pci_map[i] = cpu_to_be32(tmp[i]); + } +} + +static void dt_serial_create(void *fdt, unsigned long long offset, + const char *soc, const char *mpic, + const char *alias, int idx, bool defcon) +{ + char ser[128]; + + snprintf(ser, sizeof(ser), "%s/serial@%llx", soc, offset); + qemu_devtree_add_subnode(fdt, ser); + qemu_devtree_setprop_string(fdt, ser, "device_type", "serial"); + qemu_devtree_setprop_string(fdt, ser, "compatible", "ns16550"); + qemu_devtree_setprop_cells(fdt, ser, "reg", offset, 0x100); + qemu_devtree_setprop_cell(fdt, ser, "cell-index", idx); + qemu_devtree_setprop_cell(fdt, ser, "clock-frequency", 0); + qemu_devtree_setprop_cells(fdt, ser, "interrupts", 42, 2, 0, 0); + qemu_devtree_setprop_phandle(fdt, ser, "interrupt-parent", mpic); + qemu_devtree_setprop_string(fdt, "/aliases", alias, ser); + + if (defcon) { + qemu_devtree_setprop_string(fdt, "/chosen", "linux,stdout-path", ser); + } +} + static int mpc8544_load_device_tree(CPUPPCState *env, target_phys_addr_t addr, - uint32_t ramsize, + target_phys_addr_t ramsize, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *kernel_cmdline) { int ret = -1; -#ifdef CONFIG_FDT - uint32_t mem_reg_property[] = {0, cpu_to_be32(ramsize)}; - char *filename; + uint64_t mem_reg_property[] = { 0, cpu_to_be64(ramsize) }; int fdt_size; void *fdt; uint8_t hypercall[16]; uint32_t clock_freq = 400000000; uint32_t tb_freq = 400000000; int i; + const char *compatible = "MPC8544DS\0MPC85xxDS"; + int compatible_len = sizeof("MPC8544DS\0MPC85xxDS"); + char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus"; + char model[] = "MPC8544DS"; + char soc[128]; + char mpic[128]; + uint32_t mpic_ph; + char gutil[128]; + char pci[128]; + uint32_t pci_map[9 * 8]; + uint32_t pci_ranges[14] = + { + 0x2000000, 0x0, 0xc0000000, + 0x0, 0xc0000000, + 0x0, 0x20000000, + + 0x1000000, 0x0, 0x0, + 0x0, 0xe1000000, + 0x0, 0x10000, + }; + QemuOpts *machine_opts; + const char *dumpdtb = NULL; + const char *dtb_file = NULL; + + machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); + if (machine_opts) { + const char *tmp; + dumpdtb = qemu_opt_get(machine_opts, "dumpdtb"); + dtb_file = qemu_opt_get(machine_opts, "dtb"); + tmp = qemu_opt_get(machine_opts, "dt_compatible"); + if (tmp) { + compatible = tmp; + compatible_len = strlen(compatible) + 1; + } + } - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); - if (!filename) { - goto out; + if (dtb_file) { + char *filename; + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file); + if (!filename) { + goto out; + } + + fdt = load_device_tree(filename, &fdt_size); + if (!fdt) { + goto out; + } + goto done; } - fdt = load_device_tree(filename, &fdt_size); - g_free(filename); + + fdt = create_device_tree(&fdt_size); if (fdt == NULL) { goto out; } /* Manipulate device tree in memory. */ - ret = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, - sizeof(mem_reg_property)); - if (ret < 0) - fprintf(stderr, "couldn't set /memory/reg\n"); + qemu_devtree_setprop_string(fdt, "/", "model", model); + qemu_devtree_setprop(fdt, "/", "compatible", compatible, compatible_len); + qemu_devtree_setprop_cell(fdt, "/", "#address-cells", 2); + qemu_devtree_setprop_cell(fdt, "/", "#size-cells", 2); + + qemu_devtree_add_subnode(fdt, "/memory"); + qemu_devtree_setprop_string(fdt, "/memory", "device_type", "memory"); + qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, + sizeof(mem_reg_property)); + qemu_devtree_add_subnode(fdt, "/chosen"); if (initrd_size) { ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); @@ -117,6 +211,7 @@ static int mpc8544_load_device_tree(CPUPPCState *env, tb_freq = kvmppc_get_tbfreq(); /* indicate KVM hypercall interface */ + qemu_devtree_add_subnode(fdt, "/hypervisor"); qemu_devtree_setprop_string(fdt, "/hypervisor", "compatible", "linux,kvm"); kvmppc_get_hypercall(env, hypercall, sizeof(hypercall)); @@ -124,11 +219,16 @@ static int mpc8544_load_device_tree(CPUPPCState *env, hypercall, sizeof(hypercall)); } + /* Create CPU nodes */ + qemu_devtree_add_subnode(fdt, "/cpus"); + qemu_devtree_setprop_cell(fdt, "/cpus", "#address-cells", 1); + qemu_devtree_setprop_cell(fdt, "/cpus", "#size-cells", 0); + /* We need to generate the cpu nodes in reverse order, so Linux can pick the first node as boot node and be happy */ for (i = smp_cpus - 1; i >= 0; i--) { char cpu_name[128]; - uint64_t cpu_release_addr = cpu_to_be64(MPC8544_SPIN_BASE + (i * 0x20)); + uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20); for (env = first_cpu; env != NULL; env = env->next_cpu) { if (env->cpu_index == i) { @@ -156,39 +256,133 @@ static int mpc8544_load_device_tree(CPUPPCState *env, if (env->cpu_index) { qemu_devtree_setprop_string(fdt, cpu_name, "status", "disabled"); qemu_devtree_setprop_string(fdt, cpu_name, "enable-method", "spin-table"); - qemu_devtree_setprop(fdt, cpu_name, "cpu-release-addr", - &cpu_release_addr, sizeof(cpu_release_addr)); + qemu_devtree_setprop_u64(fdt, cpu_name, "cpu-release-addr", + cpu_release_addr); } else { qemu_devtree_setprop_string(fdt, cpu_name, "status", "okay"); } } + qemu_devtree_add_subnode(fdt, "/aliases"); + /* XXX These should go into their respective devices' code */ + snprintf(soc, sizeof(soc), "/soc@%llx", MPC8544_CCSRBAR_BASE); + qemu_devtree_add_subnode(fdt, soc); + qemu_devtree_setprop_string(fdt, soc, "device_type", "soc"); + qemu_devtree_setprop(fdt, soc, "compatible", compatible_sb, + sizeof(compatible_sb)); + qemu_devtree_setprop_cell(fdt, soc, "#address-cells", 1); + qemu_devtree_setprop_cell(fdt, soc, "#size-cells", 1); + qemu_devtree_setprop_cells(fdt, soc, "ranges", 0x0, + MPC8544_CCSRBAR_BASE >> 32, MPC8544_CCSRBAR_BASE, + MPC8544_CCSRBAR_SIZE); + /* XXX should contain a reasonable value */ + qemu_devtree_setprop_cell(fdt, soc, "bus-frequency", 0); + + snprintf(mpic, sizeof(mpic), "%s/pic@%llx", soc, + MPC8544_MPIC_REGS_BASE - MPC8544_CCSRBAR_BASE); + qemu_devtree_add_subnode(fdt, mpic); + qemu_devtree_setprop_string(fdt, mpic, "device_type", "open-pic"); + qemu_devtree_setprop_string(fdt, mpic, "compatible", "fsl,mpic"); + qemu_devtree_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_BASE - + MPC8544_CCSRBAR_BASE, 0x40000); + qemu_devtree_setprop_cell(fdt, mpic, "#address-cells", 0); + qemu_devtree_setprop_cell(fdt, mpic, "#interrupt-cells", 4); + mpic_ph = qemu_devtree_alloc_phandle(fdt); + qemu_devtree_setprop_cell(fdt, mpic, "phandle", mpic_ph); + qemu_devtree_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph); + qemu_devtree_setprop(fdt, mpic, "interrupt-controller", NULL, 0); + qemu_devtree_setprop(fdt, mpic, "big-endian", NULL, 0); + qemu_devtree_setprop(fdt, mpic, "single-cpu-affinity", NULL, 0); + qemu_devtree_setprop_cell(fdt, mpic, "last-interrupt-source", 255); + + /* + * We have to generate ser1 first, because Linux takes the first + * device it finds in the dt as serial output device. And we generate + * devices in reverse order to the dt. + */ + dt_serial_create(fdt, MPC8544_SERIAL1_REGS_BASE - MPC8544_CCSRBAR_BASE, + soc, mpic, "serial1", 1, false); + dt_serial_create(fdt, MPC8544_SERIAL0_REGS_BASE - MPC8544_CCSRBAR_BASE, + soc, mpic, "serial0", 0, true); + + snprintf(gutil, sizeof(gutil), "%s/global-utilities@%llx", soc, + MPC8544_UTIL_BASE - MPC8544_CCSRBAR_BASE); + qemu_devtree_add_subnode(fdt, gutil); + qemu_devtree_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts"); + qemu_devtree_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_BASE - + MPC8544_CCSRBAR_BASE, 0x1000); + qemu_devtree_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0); + + snprintf(pci, sizeof(pci), "/pci@%llx", MPC8544_PCI_REGS_BASE); + qemu_devtree_add_subnode(fdt, pci); + qemu_devtree_setprop_cell(fdt, pci, "cell-index", 0); + qemu_devtree_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci"); + qemu_devtree_setprop_string(fdt, pci, "device_type", "pci"); + qemu_devtree_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0, + 0x0, 0x7); + pci_map_create(fdt, pci_map, qemu_devtree_get_phandle(fdt, mpic)); + qemu_devtree_setprop(fdt, pci, "interrupt-map", pci_map, sizeof(pci_map)); + qemu_devtree_setprop_phandle(fdt, pci, "interrupt-parent", mpic); + qemu_devtree_setprop_cells(fdt, pci, "interrupts", 24, 2, 0, 0); + qemu_devtree_setprop_cells(fdt, pci, "bus-range", 0, 255); + for (i = 0; i < 14; i++) { + pci_ranges[i] = cpu_to_be32(pci_ranges[i]); + } + qemu_devtree_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges)); + qemu_devtree_setprop_cells(fdt, pci, "reg", MPC8544_PCI_REGS_BASE >> 32, + MPC8544_PCI_REGS_BASE, 0, 0x1000); + qemu_devtree_setprop_cell(fdt, pci, "clock-frequency", 66666666); + qemu_devtree_setprop_cell(fdt, pci, "#interrupt-cells", 1); + qemu_devtree_setprop_cell(fdt, pci, "#size-cells", 2); + qemu_devtree_setprop_cell(fdt, pci, "#address-cells", 3); + qemu_devtree_setprop_string(fdt, "/aliases", "pci0", pci); + +done: + if (dumpdtb) { + /* Dump the dtb to a file and quit */ + FILE *f = fopen(dumpdtb, "wb"); + size_t len; + len = fwrite(fdt, fdt_size, 1, f); + fclose(f); + if (len != fdt_size) { + exit(1); + } + exit(0); + } + ret = rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); + if (ret < 0) { + goto out; + } g_free(fdt); + ret = fdt_size; out: -#endif return ret; } -/* Create -kernel TLB entries for BookE, linearly spanning 256MB. */ +/* Create -kernel TLB entries for BookE. */ static inline target_phys_addr_t booke206_page_size_to_tlb(uint64_t size) { - return ffs(size >> 10) - 1; + return 63 - clz64(size >> 10); } -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - target_phys_addr_t pa) +static void mmubooke_create_initial_mapping(CPUPPCState *env) { + struct boot_info *bi = env->load_info; ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); - target_phys_addr_t size; - - size = (booke206_page_size_to_tlb(256 * 1024 * 1024) << MAS1_TSIZE_SHIFT); + target_phys_addr_t size, dt_end; + int ps; + + /* Our initial TLB entry needs to cover everything from 0 to + the device tree top */ + dt_end = bi->dt_base + bi->dt_size; + ps = booke206_page_size_to_tlb(dt_end) + 1; + size = (ps << MAS1_TSIZE_SHIFT); tlb->mas1 = MAS1_VALID | size; - tlb->mas2 = va & TARGET_PAGE_MASK; - tlb->mas7_3 = pa & TARGET_PAGE_MASK; + tlb->mas2 = 0; + tlb->mas7_3 = 0; tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; env->tlb_dirty = true; @@ -220,7 +414,7 @@ static void mpc8544ds_cpu_reset(void *opaque) env->gpr[1] = (16<<20) - 8; env->gpr[3] = bi->dt_base; env->nip = bi->entry; - mmubooke_create_initial_mapping(env, 0, 0); + mmubooke_create_initial_mapping(env); } static void mpc8544ds_init(ram_addr_t ram_size, @@ -275,6 +469,7 @@ static void mpc8544ds_init(ram_addr_t ram_size, irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; env->spr[SPR_BOOKE_PIR] = env->cpu_index = i; + env->mpic_cpu_base = MPC8544_MPIC_REGS_BASE + 0x20000; ppc_booke_timers_init(env, 400000000, PPC_TIMER_E500); @@ -379,13 +574,12 @@ static void mpc8544ds_init(ram_addr_t ram_size, /* If we're loading a kernel directly, we must load the device tree too. */ if (kernel_filename) { struct boot_info *boot_info; + int dt_size; -#ifndef CONFIG_FDT - cpu_abort(env, "Compiled without FDT support - can't load kernel\n"); -#endif - dt_base = (kernel_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK; - if (mpc8544_load_device_tree(env, dt_base, ram_size, - initrd_base, initrd_size, kernel_cmdline) < 0) { + dt_base = (loadaddr + kernel_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK; + dt_size = mpc8544_load_device_tree(env, dt_base, ram_size, initrd_base, + initrd_size, kernel_cmdline); + if (dt_size < 0) { fprintf(stderr, "couldn't load device tree\n"); exit(1); } @@ -393,6 +587,7 @@ static void mpc8544ds_init(ram_addr_t ram_size, boot_info = env->load_info; boot_info->entry = entry; boot_info->dt_base = dt_base; + boot_info->dt_size = dt_size; } if (kvm_enabled()) { @@ -65,7 +65,7 @@ # define PXA2XX_INTERNAL_SIZE 0x40000 /* pxa2xx_pic.c */ -DeviceState *pxa2xx_pic_init(target_phys_addr_t base, CPUARMState *env); +DeviceState *pxa2xx_pic_init(target_phys_addr_t base, ARMCPU *cpu); /* pxa2xx_gpio.c */ DeviceState *pxa2xx_gpio_init(target_phys_addr_t base, diff --git a/hw/pxa2xx.c b/hw/pxa2xx.c index 5f8f226a29..d5f1420ed9 100644 --- a/hw/pxa2xx.c +++ b/hw/pxa2xx.c @@ -224,210 +224,161 @@ static const VMStateDescription vmstate_pxa2xx_cm = { } }; -static uint32_t pxa2xx_clkpwr_read(void *opaque, int op2, int reg, int crm) +static int pxa2xx_clkcfg_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) { - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (reg) { - case 6: /* Clock Configuration register */ - return s->clkcfg; - - case 7: /* Power Mode register */ - return 0; + PXA2xxState *s = (PXA2xxState *)ri->opaque; + *value = s->clkcfg; + return 0; +} - default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; +static int pxa2xx_clkcfg_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + s->clkcfg = value & 0xf; + if (value & 2) { + printf("%s: CPU frequency change attempt\n", __func__); } return 0; } -static void pxa2xx_clkpwr_write(void *opaque, int op2, int reg, int crm, - uint32_t value) +static int pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - PXA2xxState *s = (PXA2xxState *) opaque; + PXA2xxState *s = (PXA2xxState *)ri->opaque; static const char *pwrmode[8] = { "Normal", "Idle", "Deep-idle", "Standby", "Sleep", "reserved (!)", "reserved (!)", "Deep-sleep", }; - switch (reg) { - case 6: /* Clock Configuration register */ - s->clkcfg = value & 0xf; - if (value & 2) - printf("%s: CPU frequency change attempt\n", __FUNCTION__); + if (value & 8) { + printf("%s: CPU voltage change attempt\n", __func__); + } + switch (value & 7) { + case 0: + /* Do nothing */ break; - case 7: /* Power Mode register */ - if (value & 8) - printf("%s: CPU voltage change attempt\n", __FUNCTION__); - switch (value & 7) { - case 0: - /* Do nothing */ + case 1: + /* Idle */ + if (!(s->cm_regs[CCCR >> 2] & (1 << 31))) { /* CPDIS */ + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HALT); break; + } + /* Fall through. */ - case 1: - /* Idle */ - if (!(s->cm_regs[CCCR >> 2] & (1 << 31))) { /* CPDIS */ - cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HALT); - break; - } - /* Fall through. */ - - case 2: - /* Deep-Idle */ - cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HALT); - s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ - goto message; - - case 3: - s->cpu->env.uncached_cpsr = - ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I; - s->cpu->env.cp15.c1_sys = 0; - s->cpu->env.cp15.c1_coproc = 0; - s->cpu->env.cp15.c2_base0 = 0; - s->cpu->env.cp15.c3 = 0; - s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ - s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ - - /* - * The scratch-pad register is almost universally used - * for storing the return address on suspend. For the - * lack of a resuming bootloader, perform a jump - * directly to that address. - */ - memset(s->cpu->env.regs, 0, 4 * 15); - s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2]; + case 2: + /* Deep-Idle */ + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HALT); + s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ + goto message; + + case 3: + s->cpu->env.uncached_cpsr = + ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I; + s->cpu->env.cp15.c1_sys = 0; + s->cpu->env.cp15.c1_coproc = 0; + s->cpu->env.cp15.c2_base0 = 0; + s->cpu->env.cp15.c3 = 0; + s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ + s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ + + /* + * The scratch-pad register is almost universally used + * for storing the return address on suspend. For the + * lack of a resuming bootloader, perform a jump + * directly to that address. + */ + memset(s->cpu->env.regs, 0, 4 * 15); + s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2]; #if 0 - buffer = 0xe59ff000; /* ldr pc, [pc, #0] */ - cpu_physical_memory_write(0, &buffer, 4); - buffer = s->pm_regs[PSPR >> 2]; - cpu_physical_memory_write(8, &buffer, 4); + buffer = 0xe59ff000; /* ldr pc, [pc, #0] */ + cpu_physical_memory_write(0, &buffer, 4); + buffer = s->pm_regs[PSPR >> 2]; + cpu_physical_memory_write(8, &buffer, 4); #endif - /* Suspend */ - cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HALT); + /* Suspend */ + cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HALT); - goto message; - - default: - message: - printf("%s: machine entered %s mode\n", __FUNCTION__, - pwrmode[value & 7]); - } - break; + goto message; default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; + message: + printf("%s: machine entered %s mode\n", __func__, + pwrmode[value & 7]); } -} - -/* Performace Monitoring Registers */ -#define CPPMNC 0 /* Performance Monitor Control register */ -#define CPCCNT 1 /* Clock Counter register */ -#define CPINTEN 4 /* Interrupt Enable register */ -#define CPFLAG 5 /* Overflow Flag register */ -#define CPEVTSEL 8 /* Event Selection register */ -#define CPPMN0 0 /* Performance Count register 0 */ -#define CPPMN1 1 /* Performance Count register 1 */ -#define CPPMN2 2 /* Performance Count register 2 */ -#define CPPMN3 3 /* Performance Count register 3 */ + return 0; +} -static uint32_t pxa2xx_perf_read(void *opaque, int op2, int reg, int crm) +static int pxa2xx_cppmnc_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) { - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (reg) { - case CPPMNC: - return s->pmnc; - case CPCCNT: - if (s->pmnc & 1) - return qemu_get_clock_ns(vm_clock); - else - return 0; - case CPINTEN: - case CPFLAG: - case CPEVTSEL: - return 0; - - default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; - } + PXA2xxState *s = (PXA2xxState *)ri->opaque; + *value = s->pmnc; return 0; } -static void pxa2xx_perf_write(void *opaque, int op2, int reg, int crm, - uint32_t value) +static int pxa2xx_cppmnc_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (reg) { - case CPPMNC: - s->pmnc = value; - break; - - case CPCCNT: - case CPINTEN: - case CPFLAG: - case CPEVTSEL: - break; - - default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; - } + PXA2xxState *s = (PXA2xxState *)ri->opaque; + s->pmnc = value; + return 0; } -static uint32_t pxa2xx_cp14_read(void *opaque, int op2, int reg, int crm) +static int pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) { - switch (crm) { - case 0: - return pxa2xx_clkpwr_read(opaque, op2, reg, crm); - case 1: - return pxa2xx_perf_read(opaque, op2, reg, crm); - case 2: - switch (reg) { - case CPPMN0: - case CPPMN1: - case CPPMN2: - case CPPMN3: - return 0; - } - /* Fall through */ - default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; + PXA2xxState *s = (PXA2xxState *)ri->opaque; + if (s->pmnc & 1) { + *value = qemu_get_clock_ns(vm_clock); + } else { + *value = 0; } return 0; } -static void pxa2xx_cp14_write(void *opaque, int op2, int reg, int crm, - uint32_t value) +static const ARMCPRegInfo pxa_cp_reginfo[] = { + /* cp14 crn==1: perf registers */ + { .name = "CPPMNC", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .readfn = pxa2xx_cppmnc_read, .writefn = pxa2xx_cppmnc_write }, + { .name = "CPCCNT", .cp = 14, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .readfn = pxa2xx_cpccnt_read, .writefn = arm_cp_write_ignore }, + { .name = "CPINTEN", .cp = 14, .crn = 1, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPFLAG", .cp = 14, .crn = 1, .crm = 5, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPEVTSEL", .cp = 14, .crn = 1, .crm = 8, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* cp14 crn==2: performance count registers */ + { .name = "CPPMN0", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN1", .cp = 14, .crn = 2, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN2", .cp = 14, .crn = 2, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN3", .cp = 14, .crn = 2, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* cp14 crn==6: CLKCFG */ + { .name = "CLKCFG", .cp = 14, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .readfn = pxa2xx_clkcfg_read, .writefn = pxa2xx_clkcfg_write }, + /* cp14 crn==7: PWRMODE */ + { .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write }, + REGINFO_SENTINEL +}; + +static void pxa2xx_setup_cp14(PXA2xxState *s) { - switch (crm) { - case 0: - pxa2xx_clkpwr_write(opaque, op2, reg, crm, value); - break; - case 1: - pxa2xx_perf_write(opaque, op2, reg, crm, value); - break; - case 2: - switch (reg) { - case CPPMN0: - case CPPMN1: - case CPPMN2: - case CPPMN3: - return; - } - /* Fall through */ - default: - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - break; - } + define_arm_cp_regs_with_opaque(s->cpu, pxa_cp_reginfo, s); } #define MDCNFG 0x00 /* SDRAM Configuration register */ @@ -2081,7 +2032,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space, memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, &s->internal); - s->pic = pxa2xx_pic_init(0x40d00000, &s->cpu->env); + s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); s->dma = pxa27x_dma_init(0x40000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); @@ -2133,7 +2084,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space, memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); - cpu_arm_set_cp_io(&s->cpu->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); + pxa2xx_setup_cp14(s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; @@ -2213,7 +2164,7 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size) memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, &s->internal); - s->pic = pxa2xx_pic_init(0x40d00000, &s->cpu->env); + s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); s->dma = pxa255_dma_init(0x40000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); @@ -2264,7 +2215,7 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size) memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); - cpu_arm_set_cp_io(&s->cpu->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); + pxa2xx_setup_cp14(s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; diff --git a/hw/pxa2xx_gpio.c b/hw/pxa2xx_gpio.c index 09a408b781..3c90c9c4e0 100644 --- a/hw/pxa2xx_gpio.c +++ b/hw/pxa2xx_gpio.c @@ -20,7 +20,7 @@ struct PXA2xxGPIOInfo { qemu_irq irq0, irq1, irqX; int lines; int ncpu; - CPUARMState *cpu_env; + ARMCPU *cpu; /* XXX: GNU C vectors are more suitable */ uint32_t ilevel[PXA2XX_GPIO_BANKS]; @@ -118,8 +118,9 @@ static void pxa2xx_gpio_set(void *opaque, int line, int level) pxa2xx_gpio_irq_update(s); /* Wake-up GPIOs */ - if (s->cpu_env->halted && (mask & ~s->dir[bank] & pxa2xx_gpio_wake[bank])) - cpu_interrupt(s->cpu_env, CPU_INTERRUPT_EXITTB); + if (s->cpu->env.halted && (mask & ~s->dir[bank] & pxa2xx_gpio_wake[bank])) { + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_EXITTB); + } } static void pxa2xx_gpio_handler_update(PXA2xxGPIOInfo *s) { @@ -275,7 +276,7 @@ static int pxa2xx_gpio_initfn(SysBusDevice *dev) s = FROM_SYSBUS(PXA2xxGPIOInfo, dev); - s->cpu_env = qemu_get_cpu(s->ncpu); + s->cpu = arm_env_get_cpu(qemu_get_cpu(s->ncpu)); qdev_init_gpio_in(&dev->qdev, pxa2xx_gpio_set, s->lines); qdev_init_gpio_out(&dev->qdev, s->handler, s->lines); diff --git a/hw/pxa2xx_pic.c b/hw/pxa2xx_pic.c index a806b80b0f..e1e8830ff0 100644 --- a/hw/pxa2xx_pic.c +++ b/hw/pxa2xx_pic.c @@ -34,7 +34,7 @@ typedef struct { SysBusDevice busdev; MemoryRegion iomem; - CPUARMState *cpu_env; + ARMCPU *cpu; uint32_t int_enabled[2]; uint32_t int_pending[2]; uint32_t is_fiq[2]; @@ -47,25 +47,28 @@ static void pxa2xx_pic_update(void *opaque) uint32_t mask[2]; PXA2xxPICState *s = (PXA2xxPICState *) opaque; - if (s->cpu_env->halted) { + if (s->cpu->env.halted) { mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle); mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle); - if (mask[0] || mask[1]) - cpu_interrupt(s->cpu_env, CPU_INTERRUPT_EXITTB); + if (mask[0] || mask[1]) { + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_EXITTB); + } } mask[0] = s->int_pending[0] & s->int_enabled[0]; mask[1] = s->int_pending[1] & s->int_enabled[1]; - if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1])) - cpu_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ); - else - cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ); + if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1])) { + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_FIQ); + } else { + cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_FIQ); + } - if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1])) - cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD); - else - cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD); + if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1])) { + cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD); + } } /* Note: Here level means state of the signal on a pin, not @@ -206,33 +209,42 @@ static const int pxa2xx_cp_reg_map[0x10] = { [0xa] = ICPR2, }; -static uint32_t pxa2xx_pic_cp_read(void *opaque, int op2, int reg, int crm) +static int pxa2xx_pic_cp_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) { - target_phys_addr_t offset; - - if (pxa2xx_cp_reg_map[reg] == -1) { - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - return 0; - } - - offset = pxa2xx_cp_reg_map[reg]; - return pxa2xx_pic_mem_read(opaque, offset, 4); + int offset = pxa2xx_cp_reg_map[ri->crn]; + *value = pxa2xx_pic_mem_read(ri->opaque, offset, 4); + return 0; } -static void pxa2xx_pic_cp_write(void *opaque, int op2, int reg, int crm, - uint32_t value) +static int pxa2xx_pic_cp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - target_phys_addr_t offset; - - if (pxa2xx_cp_reg_map[reg] == -1) { - printf("%s: Bad register 0x%x\n", __FUNCTION__, reg); - return; - } - - offset = pxa2xx_cp_reg_map[reg]; - pxa2xx_pic_mem_write(opaque, offset, value, 4); + int offset = pxa2xx_cp_reg_map[ri->crn]; + pxa2xx_pic_mem_write(ri->opaque, offset, value, 4); + return 0; } +#define REGINFO_FOR_PIC_CP(NAME, CRN) \ + { .name = NAME, .cp = 6, .crn = CRN, .crm = 0, .opc1 = 0, .opc2 = 0, \ + .access = PL1_RW, \ + .readfn = pxa2xx_pic_cp_read, .writefn = pxa2xx_pic_cp_write } + +static const ARMCPRegInfo pxa_pic_cp_reginfo[] = { + REGINFO_FOR_PIC_CP("ICIP", 0), + REGINFO_FOR_PIC_CP("ICMR", 1), + REGINFO_FOR_PIC_CP("ICLR", 2), + REGINFO_FOR_PIC_CP("ICFP", 3), + REGINFO_FOR_PIC_CP("ICPR", 4), + REGINFO_FOR_PIC_CP("ICHP", 5), + REGINFO_FOR_PIC_CP("ICIP2", 6), + REGINFO_FOR_PIC_CP("ICMR2", 7), + REGINFO_FOR_PIC_CP("ICLR2", 8), + REGINFO_FOR_PIC_CP("ICFP2", 9), + REGINFO_FOR_PIC_CP("ICPR2", 0xa), + REGINFO_SENTINEL +}; + static const MemoryRegionOps pxa2xx_pic_ops = { .read = pxa2xx_pic_mem_read, .write = pxa2xx_pic_mem_write, @@ -245,12 +257,13 @@ static int pxa2xx_pic_post_load(void *opaque, int version_id) return 0; } -DeviceState *pxa2xx_pic_init(target_phys_addr_t base, CPUARMState *env) +DeviceState *pxa2xx_pic_init(target_phys_addr_t base, ARMCPU *cpu) { + CPUARMState *env = &cpu->env; DeviceState *dev = qdev_create(NULL, "pxa2xx_pic"); PXA2xxPICState *s = FROM_SYSBUS(PXA2xxPICState, sysbus_from_qdev(dev)); - s->cpu_env = env; + s->cpu = cpu; s->int_pending[0] = 0; s->int_pending[1] = 0; @@ -270,7 +283,7 @@ DeviceState *pxa2xx_pic_init(target_phys_addr_t base, CPUARMState *env) sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); /* Enable IC coprocessor access. */ - cpu_arm_set_cp_io(env, 6, pxa2xx_pic_cp_read, pxa2xx_pic_cp_write, s); + define_arm_cp_regs_with_opaque(arm_env_get_cpu(env), pxa_pic_cp_reginfo, s); return dev; } diff --git a/hw/qdev-addr.c b/hw/qdev-addr.c index 0bb16c7eb3..b711b6bf96 100644 --- a/hw/qdev-addr.c +++ b/hw/qdev-addr.c @@ -27,7 +27,7 @@ static void get_taddr(Object *obj, Visitor *v, void *opaque, int64_t value; value = *ptr; - visit_type_int(v, &value, name, errp); + visit_type_int64(v, &value, name, errp); } static void set_taddr(Object *obj, Visitor *v, void *opaque, @@ -44,7 +44,7 @@ static void set_taddr(Object *obj, Visitor *v, void *opaque, return; } - visit_type_int(v, &value, name, &local_err); + visit_type_int64(v, &value, name, &local_err); if (local_err) { error_propagate(errp, local_err); return; diff --git a/hw/qdev-dma.h b/hw/qdev-dma.h new file mode 100644 index 0000000000..6812735e3d --- /dev/null +++ b/hw/qdev-dma.h @@ -0,0 +1,10 @@ +/* + * Support for dma_addr_t typed properties + * + * Copyright (C) 2012 David Gibson, IBM Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#define DEFINE_PROP_DMAADDR(_n, _s, _f, _d) \ + DEFINE_PROP_HEX64(_n, _s, _f, _d) diff --git a/hw/qdev-monitor.c b/hw/qdev-monitor.c index b01ef0600e..7915b4500d 100644 --- a/hw/qdev-monitor.c +++ b/hw/qdev-monitor.c @@ -20,6 +20,7 @@ #include "qdev.h" #include "monitor.h" #include "qmp-commands.h" +#include "arch_init.h" /* * Aliases were a bad idea from the start. Let's keep them @@ -29,16 +30,18 @@ typedef struct QDevAlias { const char *typename; const char *alias; + uint32_t arch_mask; } QDevAlias; static const QDevAlias qdev_alias_table[] = { - { "virtio-blk-pci", "virtio-blk" }, - { "virtio-net-pci", "virtio-net" }, - { "virtio-serial-pci", "virtio-serial" }, - { "virtio-balloon-pci", "virtio-balloon" }, - { "virtio-blk-s390", "virtio-blk" }, - { "virtio-net-s390", "virtio-net" }, - { "virtio-serial-s390", "virtio-serial" }, + { "virtio-blk-pci", "virtio-blk", QEMU_ARCH_ALL & ~QEMU_ARCH_S390X }, + { "virtio-net-pci", "virtio-net", QEMU_ARCH_ALL & ~QEMU_ARCH_S390X }, + { "virtio-serial-pci", "virtio-serial", QEMU_ARCH_ALL & ~QEMU_ARCH_S390X }, + { "virtio-balloon-pci", "virtio-balloon", + QEMU_ARCH_ALL & ~QEMU_ARCH_S390X }, + { "virtio-blk-s390", "virtio-blk", QEMU_ARCH_S390X }, + { "virtio-net-s390", "virtio-net", QEMU_ARCH_S390X }, + { "virtio-serial-s390", "virtio-serial", QEMU_ARCH_S390X }, { "lsi53c895a", "lsi" }, { "ich9-ahci", "ahci" }, { } @@ -50,6 +53,11 @@ static const char *qdev_class_get_alias(DeviceClass *dc) int i; for (i = 0; qdev_alias_table[i].typename; i++) { + if (qdev_alias_table[i].arch_mask && + !(qdev_alias_table[i].arch_mask & arch_type)) { + continue; + } + if (strcmp(qdev_alias_table[i].typename, typename) == 0) { return qdev_alias_table[i].alias; } @@ -75,8 +83,8 @@ static void qdev_print_devinfo(ObjectClass *klass, void *opaque) } error_printf("name \"%s\"", object_class_get_name(klass)); - if (dc->bus_info) { - error_printf(", bus %s", dc->bus_info->name); + if (dc->bus_type) { + error_printf(", bus %s", dc->bus_type); } if (qdev_class_has_alias(dc)) { error_printf(", alias \"%s\"", qdev_class_get_alias(dc)); @@ -110,6 +118,11 @@ static const char *find_typename_by_alias(const char *alias) int i; for (i = 0; qdev_alias_table[i].alias; i++) { + if (qdev_alias_table[i].arch_mask && + !(qdev_alias_table[i].arch_mask & arch_type)) { + continue; + } + if (strcmp(qdev_alias_table[i].alias, alias) == 0) { return qdev_alias_table[i].typename; } @@ -123,7 +136,6 @@ int qdev_device_help(QemuOpts *opts) const char *driver; Property *prop; ObjectClass *klass; - DeviceClass *info; driver = qemu_opt_get(opts, "driver"); if (driver && !strcmp(driver, "?")) { @@ -149,30 +161,22 @@ int qdev_device_help(QemuOpts *opts) if (!klass) { return 0; } - info = DEVICE_CLASS(klass); - - for (prop = info->props; prop && prop->name; prop++) { - /* - * TODO Properties without a parser are just for dirty hacks. - * qdev_prop_ptr is the only such PropertyInfo. It's marked - * for removal. This conditional should be removed along with - * it. - */ - if (!prop->info->set) { - continue; /* no way to set it, don't show */ - } - error_printf("%s.%s=%s\n", driver, prop->name, - prop->info->legacy_name ?: prop->info->name); - } - if (info->bus_info) { - for (prop = info->bus_info->props; prop && prop->name; prop++) { + do { + for (prop = DEVICE_CLASS(klass)->props; prop && prop->name; prop++) { + /* + * TODO Properties without a parser are just for dirty hacks. + * qdev_prop_ptr is the only such PropertyInfo. It's marked + * for removal. This conditional should be removed along with + * it. + */ if (!prop->info->set) { continue; /* no way to set it, don't show */ } error_printf("%s.%s=%s\n", driver, prop->name, prop->info->legacy_name ?: prop->info->name); } - } + klass = object_class_get_parent(klass); + } while (klass != object_class_by_name(TYPE_DEVICE)); return 1; } @@ -214,11 +218,12 @@ static void qbus_list_bus(DeviceState *dev) static void qbus_list_dev(BusState *bus) { - DeviceState *dev; + BusChild *kid; const char *sep = " "; error_printf("devices at \"%s\":", bus->name); - QTAILQ_FOREACH(dev, &bus->children, sibling) { + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; error_printf("%s\"%s\"", sep, object_get_typename(OBJECT(dev))); if (dev->id) error_printf("/\"%s\"", dev->id); @@ -241,7 +246,7 @@ static BusState *qbus_find_bus(DeviceState *dev, char *elem) static DeviceState *qbus_find_dev(BusState *bus, char *elem) { - DeviceState *dev; + BusChild *kid; /* * try to match in order: @@ -249,17 +254,20 @@ static DeviceState *qbus_find_dev(BusState *bus, char *elem) * (2) driver name * (3) driver alias, if present */ - QTAILQ_FOREACH(dev, &bus->children, sibling) { + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; if (dev->id && strcmp(dev->id, elem) == 0) { return dev; } } - QTAILQ_FOREACH(dev, &bus->children, sibling) { + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; if (strcmp(object_get_typename(OBJECT(dev)), elem) == 0) { return dev; } } - QTAILQ_FOREACH(dev, &bus->children, sibling) { + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; DeviceClass *dc = DEVICE_GET_CLASS(dev); if (qdev_class_has_alias(dc) && @@ -271,25 +279,27 @@ static DeviceState *qbus_find_dev(BusState *bus, char *elem) } static BusState *qbus_find_recursive(BusState *bus, const char *name, - const BusInfo *info) + const char *bus_typename) { - DeviceState *dev; + BusChild *kid; BusState *child, *ret; int match = 1; if (name && (strcmp(bus->name, name) != 0)) { match = 0; } - if (info && (bus->info != info)) { + if (bus_typename && + (strcmp(object_get_typename(OBJECT(bus)), bus_typename) != 0)) { match = 0; } if (match) { return bus; } - QTAILQ_FOREACH(dev, &bus->children, sibling) { + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; QLIST_FOREACH(child, &dev->child_bus, sibling) { - ret = qbus_find_recursive(child, name, info); + ret = qbus_find_recursive(child, name, bus_typename); if (ret) { return ret; } @@ -424,16 +434,16 @@ DeviceState *qdev_device_add(QemuOpts *opts) if (!bus) { return NULL; } - if (bus->info != k->bus_info) { + if (strcmp(object_get_typename(OBJECT(bus)), k->bus_type) != 0) { qerror_report(QERR_BAD_BUS_FOR_DEVICE, - driver, bus->info->name); + driver, object_get_typename(OBJECT(bus))); return NULL; } } else { - bus = qbus_find_recursive(sysbus_get_default(), NULL, k->bus_info); + bus = qbus_find_recursive(sysbus_get_default(), NULL, k->bus_type); if (!bus) { qerror_report(QERR_NO_BUS_FOR_DEVICE, - driver, k->bus_info->name); + driver, k->bus_type); return NULL; } } @@ -449,7 +459,6 @@ DeviceState *qdev_device_add(QemuOpts *opts) /* create device, set properties */ qdev = DEVICE(object_new(driver)); qdev_set_parent_bus(qdev, bus); - qdev_prop_set_globals(qdev); id = qemu_opts_id(opts); if (id) { @@ -482,7 +491,7 @@ DeviceState *qdev_device_add(QemuOpts *opts) static void qbus_print(Monitor *mon, BusState *bus, int indent); static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props, - const char *prefix, int indent) + int indent) { if (!props) return; @@ -501,14 +510,24 @@ static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props, error_free(err); continue; } - qdev_printf("%s-prop: %s = %s\n", prefix, props->name, + qdev_printf("%s = %s\n", props->name, value && *value ? value : "<null>"); g_free(value); } } +static void bus_print_dev(BusState *bus, Monitor *mon, DeviceState *dev, int indent) +{ + BusClass *bc = BUS_GET_CLASS(bus); + + if (bc->print_dev) { + bc->print_dev(mon, dev, indent); + } +} + static void qdev_print(Monitor *mon, DeviceState *dev, int indent) { + ObjectClass *class; BusState *child; qdev_printf("dev: %s, id \"%s\"\n", object_get_typename(OBJECT(dev)), dev->id ? dev->id : ""); @@ -519,10 +538,12 @@ static void qdev_print(Monitor *mon, DeviceState *dev, int indent) if (dev->num_gpio_out) { qdev_printf("gpio-out %d\n", dev->num_gpio_out); } - qdev_print_props(mon, dev, qdev_get_props(dev), "dev", indent); - qdev_print_props(mon, dev, dev->parent_bus->info->props, "bus", indent); - if (dev->parent_bus->info->print_dev) - dev->parent_bus->info->print_dev(mon, dev, indent); + class = object_get_class(OBJECT(dev)); + do { + qdev_print_props(mon, dev, DEVICE_CLASS(class)->props, indent); + class = object_class_get_parent(class); + } while (class != object_class_by_name(TYPE_DEVICE)); + bus_print_dev(dev->parent_bus, mon, dev, indent + 2); QLIST_FOREACH(child, &dev->child_bus, sibling) { qbus_print(mon, child, indent); } @@ -530,12 +551,13 @@ static void qdev_print(Monitor *mon, DeviceState *dev, int indent) static void qbus_print(Monitor *mon, BusState *bus, int indent) { - struct DeviceState *dev; + BusChild *kid; qdev_printf("bus: %s\n", bus->name); indent += 2; - qdev_printf("type %s\n", bus->info->name); - QTAILQ_FOREACH(dev, &bus->children, sibling) { + qdev_printf("type %s\n", object_get_typename(OBJECT(bus))); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; qdev_print(mon, dev, indent); } } diff --git a/hw/qdev-properties.c b/hw/qdev-properties.c index b7b5597c62..0b894620c9 100644 --- a/hw/qdev-properties.c +++ b/hw/qdev-properties.c @@ -76,52 +76,35 @@ PropertyInfo qdev_prop_bit = { /* --- 8bit integer --- */ -static void get_int8(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void get_uint8(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int8_t *ptr = qdev_get_prop_ptr(dev, prop); - int64_t value; + uint8_t *ptr = qdev_get_prop_ptr(dev, prop); - value = *ptr; - visit_type_int(v, &value, name, errp); + visit_type_uint8(v, ptr, name, errp); } -static void set_int8(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void set_uint8(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int8_t *ptr = qdev_get_prop_ptr(dev, prop); - Error *local_err = NULL; - int64_t value; + uint8_t *ptr = qdev_get_prop_ptr(dev, prop); if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - if (value >= prop->info->min && value <= prop->info->max) { - *ptr = value; - } else { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, - dev->id?:"", name, value, prop->info->min, - prop->info->max); - } + visit_type_uint8(v, ptr, name, errp); } PropertyInfo qdev_prop_uint8 = { .name = "uint8", - .get = get_int8, - .set = set_int8, - .min = 0, - .max = 255, + .get = get_uint8, + .set = set_uint8, }; /* --- 8bit hex value --- */ @@ -154,74 +137,78 @@ PropertyInfo qdev_prop_hex8 = { .legacy_name = "hex8", .parse = parse_hex8, .print = print_hex8, - .get = get_int8, - .set = set_int8, - .min = 0, - .max = 255, + .get = get_uint8, + .set = set_uint8, }; /* --- 16bit integer --- */ -static void get_int16(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void get_uint16(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int16_t *ptr = qdev_get_prop_ptr(dev, prop); - int64_t value; + uint16_t *ptr = qdev_get_prop_ptr(dev, prop); - value = *ptr; - visit_type_int(v, &value, name, errp); + visit_type_uint16(v, ptr, name, errp); } -static void set_int16(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void set_uint16(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int16_t *ptr = qdev_get_prop_ptr(dev, prop); - Error *local_err = NULL; - int64_t value; + uint16_t *ptr = qdev_get_prop_ptr(dev, prop); if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - if (value >= prop->info->min && value <= prop->info->max) { - *ptr = value; - } else { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, - dev->id?:"", name, value, prop->info->min, - prop->info->max); - } + visit_type_uint16(v, ptr, name, errp); } PropertyInfo qdev_prop_uint16 = { .name = "uint16", - .get = get_int16, - .set = set_int16, - .min = 0, - .max = 65535, + .get = get_uint16, + .set = set_uint16, }; /* --- 32bit integer --- */ +static void get_uint32(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + uint32_t *ptr = qdev_get_prop_ptr(dev, prop); + + visit_type_uint32(v, ptr, name, errp); +} + +static void set_uint32(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + uint32_t *ptr = qdev_get_prop_ptr(dev, prop); + + if (dev->state != DEV_STATE_CREATED) { + error_set(errp, QERR_PERMISSION_DENIED); + return; + } + + visit_type_uint32(v, ptr, name, errp); +} + static void get_int32(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; int32_t *ptr = qdev_get_prop_ptr(dev, prop); - int64_t value; - value = *ptr; - visit_type_int(v, &value, name, errp); + visit_type_int32(v, ptr, name, errp); } static void set_int32(Object *obj, Visitor *v, void *opaque, @@ -230,42 +217,25 @@ static void set_int32(Object *obj, Visitor *v, void *opaque, DeviceState *dev = DEVICE(obj); Property *prop = opaque; int32_t *ptr = qdev_get_prop_ptr(dev, prop); - Error *local_err = NULL; - int64_t value; if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - if (value >= prop->info->min && value <= prop->info->max) { - *ptr = value; - } else { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, - dev->id?:"", name, value, prop->info->min, - prop->info->max); - } + visit_type_int32(v, ptr, name, errp); } PropertyInfo qdev_prop_uint32 = { .name = "uint32", - .get = get_int32, - .set = set_int32, - .min = 0, - .max = 0xFFFFFFFFULL, + .get = get_uint32, + .set = set_uint32, }; PropertyInfo qdev_prop_int32 = { .name = "int32", .get = get_int32, .set = set_int32, - .min = -0x80000000LL, - .max = 0x7FFFFFFFLL, }; /* --- 32bit hex value --- */ @@ -298,43 +268,41 @@ PropertyInfo qdev_prop_hex32 = { .legacy_name = "hex32", .parse = parse_hex32, .print = print_hex32, - .get = get_int32, - .set = set_int32, - .min = 0, - .max = 0xFFFFFFFFULL, + .get = get_uint32, + .set = set_uint32, }; /* --- 64bit integer --- */ -static void get_int64(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void get_uint64(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int64_t *ptr = qdev_get_prop_ptr(dev, prop); + uint64_t *ptr = qdev_get_prop_ptr(dev, prop); - visit_type_int(v, ptr, name, errp); + visit_type_uint64(v, ptr, name, errp); } -static void set_int64(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) +static void set_uint64(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int64_t *ptr = qdev_get_prop_ptr(dev, prop); + uint64_t *ptr = qdev_get_prop_ptr(dev, prop); if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } - visit_type_int(v, ptr, name, errp); + visit_type_uint64(v, ptr, name, errp); } PropertyInfo qdev_prop_uint64 = { .name = "uint64", - .get = get_int64, - .set = set_int64, + .get = get_uint64, + .set = set_uint64, }; /* --- 64bit hex value --- */ @@ -367,8 +335,8 @@ PropertyInfo qdev_prop_hex64 = { .legacy_name = "hex64", .parse = parse_hex64, .print = print_hex64, - .get = get_int64, - .set = set_int64, + .get = get_uint64, + .set = set_uint64, }; /* --- string --- */ @@ -645,7 +613,7 @@ static void get_vlan(Object *obj, Visitor *v, void *opaque, int64_t id; id = *ptr ? (*ptr)->id : -1; - visit_type_int(v, &id, name, errp); + visit_type_int64(v, &id, name, errp); } static void set_vlan(Object *obj, Visitor *v, void *opaque, @@ -663,7 +631,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque, return; } - visit_type_int(v, &id, name, &local_err); + visit_type_int64(v, &id, name, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -824,7 +792,7 @@ static void set_pci_devfn(Object *obj, Visitor *v, void *opaque, { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - uint32_t *ptr = qdev_get_prop_ptr(dev, prop); + int32_t value, *ptr = qdev_get_prop_ptr(dev, prop); unsigned int slot, fn, n; Error *local_err = NULL; char *str; @@ -837,7 +805,17 @@ static void set_pci_devfn(Object *obj, Visitor *v, void *opaque, visit_type_str(v, &str, name, &local_err); if (local_err) { error_free(local_err); - return set_int32(obj, v, opaque, name, errp); + local_err = NULL; + visit_type_int32(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + } else if (value < -1 || value > 255) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "pci_devfn"); + } else { + *ptr = value; + } + return; } if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { @@ -860,7 +838,7 @@ invalid: static int print_pci_devfn(DeviceState *dev, Property *prop, char *dest, size_t len) { - uint32_t *ptr = qdev_get_prop_ptr(dev, prop); + int32_t *ptr = qdev_get_prop_ptr(dev, prop); if (*ptr == -1) { return snprintf(dest, len, "<unset>"); @@ -875,11 +853,6 @@ PropertyInfo qdev_prop_pci_devfn = { .print = print_pci_devfn, .get = get_int32, .set = set_pci_devfn, - /* FIXME: this should be -1...255, but the address is stored - * into an uint32_t rather than int32_t. - */ - .min = 0, - .max = 0xFFFFFFFFULL, }; /* --- blocksize --- */ @@ -889,31 +862,31 @@ static void set_blocksize(Object *obj, Visitor *v, void *opaque, { DeviceState *dev = DEVICE(obj); Property *prop = opaque; - int16_t *ptr = qdev_get_prop_ptr(dev, prop); + uint16_t value, *ptr = qdev_get_prop_ptr(dev, prop); Error *local_err = NULL; - int64_t value; + const int64_t min = 512; + const int64_t max = 32768; if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } - visit_type_int(v, &value, name, &local_err); + visit_type_uint16(v, &value, name, &local_err); if (local_err) { error_propagate(errp, local_err); return; } - if (value < prop->info->min || value > prop->info->max) { + if (value < min || value > max) { error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, - dev->id?:"", name, value, prop->info->min, - prop->info->max); + dev->id?:"", name, (int64_t)value, min, max); return; } /* We rely on power-of-2 blocksizes for bitmasks */ if ((value & (value - 1)) != 0) { error_set(errp, QERR_PROPERTY_VALUE_NOT_POWER_OF_2, - dev->id?:"", name, value); + dev->id?:"", name, (int64_t)value); return; } @@ -922,10 +895,115 @@ static void set_blocksize(Object *obj, Visitor *v, void *opaque, PropertyInfo qdev_prop_blocksize = { .name = "blocksize", - .get = get_int16, + .get = get_uint16, .set = set_blocksize, - .min = 512, - .max = 65024, +}; + +/* --- pci host address --- */ + +static void get_pci_host_devaddr(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + PCIHostDeviceAddress *addr = qdev_get_prop_ptr(dev, prop); + char buffer[] = "xxxx:xx:xx.x"; + char *p = buffer; + int rc = 0; + + rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%d", + addr->domain, addr->bus, addr->slot, addr->function); + assert(rc == sizeof(buffer) - 1); + + visit_type_str(v, &p, name, errp); +} + +/* + * Parse [<domain>:]<bus>:<slot>.<func> + * if <domain> is not supplied, it's assumed to be 0. + */ +static void set_pci_host_devaddr(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + PCIHostDeviceAddress *addr = qdev_get_prop_ptr(dev, prop); + Error *local_err = NULL; + char *str, *p; + char *e; + unsigned long val; + unsigned long dom = 0, bus = 0; + unsigned int slot = 0, func = 0; + + if (dev->state != DEV_STATE_CREATED) { + error_set(errp, QERR_PERMISSION_DENIED); + return; + } + + visit_type_str(v, &str, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + p = str; + val = strtoul(p, &e, 16); + if (e == p || *e != ':') { + goto inval; + } + bus = val; + + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) { + goto inval; + } + if (*e == ':') { + dom = bus; + bus = val; + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) { + goto inval; + } + } + slot = val; + + if (*e != '.') { + goto inval; + } + p = e + 1; + val = strtoul(p, &e, 10); + if (e == p) { + goto inval; + } + func = val; + + if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) { + goto inval; + } + + if (*e) { + goto inval; + } + + addr->domain = dom; + addr->bus = bus; + addr->slot = slot; + addr->function = func; + + g_free(str); + return; + +inval: + error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str); + g_free(str); +} + +PropertyInfo qdev_prop_pci_host_devaddr = { + .name = "pci-host-devaddr", + .get = get_pci_host_devaddr, + .set = set_pci_host_devaddr, }; /* --- public helpers --- */ @@ -944,26 +1022,22 @@ static Property *qdev_prop_walk(Property *props, const char *name) static Property *qdev_prop_find(DeviceState *dev, const char *name) { + ObjectClass *class; Property *prop; /* device properties */ - prop = qdev_prop_walk(qdev_get_props(dev), name); - if (prop) - return prop; - - /* bus properties */ - prop = qdev_prop_walk(dev->parent_bus->info->props, name); - if (prop) - return prop; + class = object_get_class(OBJECT(dev)); + do { + prop = qdev_prop_walk(DEVICE_CLASS(class)->props, name); + if (prop) { + return prop; + } + class = object_class_get_parent(class); + } while (class != object_class_by_name(TYPE_DEVICE)); return NULL; } -int qdev_prop_exists(DeviceState *dev, const char *name) -{ - return qdev_prop_find(dev, name) ? true : false; -} - void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev, Property *prop, const char *value) { @@ -1134,28 +1208,6 @@ void qdev_prop_set_ptr(DeviceState *dev, const char *name, void *value) *ptr = value; } -void qdev_prop_set_defaults(DeviceState *dev, Property *props) -{ - Object *obj = OBJECT(dev); - if (!props) - return; - for (; props->name; props++) { - Error *errp = NULL; - if (props->qtype == QTYPE_NONE) { - continue; - } - if (props->qtype == QTYPE_QBOOL) { - object_property_set_bool(obj, props->defval, props->name, &errp); - } else if (props->info->enum_table) { - object_property_set_str(obj, props->info->enum_table[props->defval], - props->name, &errp); - } else if (props->qtype == QTYPE_QINT) { - object_property_set_int(obj, props->defval, props->name, &errp); - } - assert_no_error(errp); - } -} - static QTAILQ_HEAD(, GlobalProperty) global_props = QTAILQ_HEAD_INITIALIZER(global_props); static void qdev_prop_register_global(GlobalProperty *prop) @@ -1174,17 +1226,20 @@ void qdev_prop_register_global_list(GlobalProperty *props) void qdev_prop_set_globals(DeviceState *dev) { - GlobalProperty *prop; - - QTAILQ_FOREACH(prop, &global_props, next) { - if (strcmp(object_get_typename(OBJECT(dev)), prop->driver) != 0 && - strcmp(qdev_get_bus_info(dev)->name, prop->driver) != 0) { - continue; + ObjectClass *class = object_get_class(OBJECT(dev)); + + do { + GlobalProperty *prop; + QTAILQ_FOREACH(prop, &global_props, next) { + if (strcmp(object_class_get_name(class), prop->driver) != 0) { + continue; + } + if (qdev_prop_parse(dev, prop->property, prop->value) != 0) { + exit(1); + } } - if (qdev_prop_parse(dev, prop->property, prop->value) != 0) { - exit(1); - } - } + class = object_class_get_parent(class); + } while (class); } static int qdev_add_one_global(QemuOpts *opts, void *opaque) @@ -34,10 +34,6 @@ int qdev_hotplug = 0; static bool qdev_hot_added = false; static bool qdev_hot_removed = false; -/* This is a nasty hack to allow passing a NULL bus to qdev_create. */ -static BusState *main_system_bus; -static void main_system_bus_create(void); - /* Register a new device type. */ const VMStateDescription *qdev_get_vmsd(DeviceState *dev) { @@ -45,18 +41,6 @@ const VMStateDescription *qdev_get_vmsd(DeviceState *dev) return dc->vmsd; } -BusInfo *qdev_get_bus_info(DeviceState *dev) -{ - DeviceClass *dc = DEVICE_GET_CLASS(dev); - return dc->bus_info; -} - -Property *qdev_get_props(DeviceState *dev) -{ - DeviceClass *dc = DEVICE_GET_CLASS(dev); - return dc->props; -} - const char *qdev_fw_name(DeviceState *dev) { DeviceClass *dc = DEVICE_GET_CLASS(dev); @@ -76,22 +60,48 @@ bool qdev_exists(const char *name) static void qdev_property_add_legacy(DeviceState *dev, Property *prop, Error **errp); -void qdev_set_parent_bus(DeviceState *dev, BusState *bus) +static void bus_remove_child(BusState *bus, DeviceState *child) { - Property *prop; + BusChild *kid; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + if (kid->child == child) { + char name[32]; + + snprintf(name, sizeof(name), "child[%d]", kid->index); + QTAILQ_REMOVE(&bus->children, kid, sibling); + object_property_del(OBJECT(bus), name, NULL); + g_free(kid); + return; + } + } +} + +static void bus_add_child(BusState *bus, DeviceState *child) +{ + char name[32]; + BusChild *kid = g_malloc0(sizeof(*kid)); if (qdev_hotplug) { assert(bus->allow_hotplug); } - dev->parent_bus = bus; - QTAILQ_INSERT_HEAD(&bus->children, dev, sibling); + kid->index = bus->max_index++; + kid->child = child; - for (prop = qdev_get_bus_info(dev)->props; prop && prop->name; prop++) { - qdev_property_add_legacy(dev, prop, NULL); - qdev_property_add_static(dev, prop, NULL); - } - qdev_prop_set_defaults(dev, dev->parent_bus->info->props); + QTAILQ_INSERT_HEAD(&bus->children, kid, sibling); + + snprintf(name, sizeof(name), "child[%d]", kid->index); + object_property_add_link(OBJECT(bus), name, + object_get_typename(OBJECT(child)), + (Object **)&kid->child, + NULL); +} + +void qdev_set_parent_bus(DeviceState *dev, BusState *bus) +{ + dev->parent_bus = bus; + bus_add_child(bus, dev); } /* Create a new device. This only initializes the device state structure @@ -105,7 +115,7 @@ DeviceState *qdev_create(BusState *bus, const char *name) if (!dev) { if (bus) { hw_error("Unknown device '%s' for bus '%s'\n", name, - bus->info->name); + object_get_typename(OBJECT(bus))); } else { hw_error("Unknown device '%s' for default sysbus\n", name); } @@ -131,7 +141,6 @@ DeviceState *qdev_try_create(BusState *bus, const char *type) } qdev_set_parent_bus(dev, bus); - qdev_prop_set_globals(dev); return dev; } @@ -150,6 +159,7 @@ int qdev_init(DeviceState *dev) rc = dc->init(dev); if (rc < 0) { + object_unparent(OBJECT(dev)); qdev_free(dev); return rc; } @@ -209,18 +219,11 @@ static int qdev_reset_one(DeviceState *dev, void *opaque) return 0; } -BusState *sysbus_get_default(void) -{ - if (!main_system_bus) { - main_system_bus_create(); - } - return main_system_bus; -} - static int qbus_reset_one(BusState *bus, void *opaque) { - if (bus->info->reset) { - return bus->info->reset(bus); + BusClass *bc = BUS_GET_CLASS(bus); + if (bc->reset) { + return bc->reset(bus); } return 0; } @@ -255,9 +258,10 @@ int qdev_simple_unplug_cb(DeviceState *dev) way is somewhat unclean, and best avoided. */ void qdev_init_nofail(DeviceState *dev) { + const char *typename = object_get_typename(OBJECT(dev)); + if (qdev_init(dev) < 0) { - error_report("Initialization of device %s failed", - object_get_typename(OBJECT(dev))); + error_report("Initialization of device %s failed", typename); exit(1); } } @@ -321,7 +325,7 @@ void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd) if (nd->netdev) qdev_prop_set_netdev(dev, "netdev", nd->netdev); if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED && - qdev_prop_exists(dev, "vectors")) { + object_property_find(OBJECT(dev), "vectors", NULL)) { qdev_prop_set_uint32(dev, "vectors", nd->nvectors); } nd->instantiated = 1; @@ -342,7 +346,7 @@ BusState *qdev_get_child_bus(DeviceState *dev, const char *name) int qbus_walk_children(BusState *bus, qdev_walkerfn *devfn, qbus_walkerfn *busfn, void *opaque) { - DeviceState *dev; + BusChild *kid; int err; if (busfn) { @@ -352,8 +356,8 @@ int qbus_walk_children(BusState *bus, qdev_walkerfn *devfn, } } - QTAILQ_FOREACH(dev, &bus->children, sibling) { - err = qdev_walk_children(dev, devfn, busfn, opaque); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + err = qdev_walk_children(kid->child, devfn, busfn, opaque); if (err < 0) { return err; } @@ -387,12 +391,17 @@ int qdev_walk_children(DeviceState *dev, qdev_walkerfn *devfn, DeviceState *qdev_find_recursive(BusState *bus, const char *id) { - DeviceState *dev, *ret; + BusChild *kid; + DeviceState *ret; BusState *child; - QTAILQ_FOREACH(dev, &bus->children, sibling) { - if (dev->id && strcmp(dev->id, id) == 0) + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + if (dev->id && strcmp(dev->id, id) == 0) { return dev; + } + QLIST_FOREACH(child, &dev->child_bus, sibling) { ret = qdev_find_recursive(child, id); if (ret) { @@ -403,84 +412,87 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id) return NULL; } -void qbus_create_inplace(BusState *bus, BusInfo *info, - DeviceState *parent, const char *name) +static void qbus_realize(BusState *bus) { + const char *typename = object_get_typename(OBJECT(bus)); char *buf; int i,len; - bus->info = info; - bus->parent = parent; - - if (name) { + if (bus->name) { /* use supplied name */ - bus->name = g_strdup(name); - } else if (parent && parent->id) { + } else if (bus->parent && bus->parent->id) { /* parent device has id -> use it for bus name */ - len = strlen(parent->id) + 16; + len = strlen(bus->parent->id) + 16; buf = g_malloc(len); - snprintf(buf, len, "%s.%d", parent->id, parent->num_child_bus); + snprintf(buf, len, "%s.%d", bus->parent->id, bus->parent->num_child_bus); bus->name = buf; } else { /* no id -> use lowercase bus type for bus name */ - len = strlen(info->name) + 16; + len = strlen(typename) + 16; buf = g_malloc(len); - len = snprintf(buf, len, "%s.%d", info->name, - parent ? parent->num_child_bus : 0); + len = snprintf(buf, len, "%s.%d", typename, + bus->parent ? bus->parent->num_child_bus : 0); for (i = 0; i < len; i++) buf[i] = qemu_tolower(buf[i]); bus->name = buf; } - QTAILQ_INIT(&bus->children); - if (parent) { - QLIST_INSERT_HEAD(&parent->child_bus, bus, sibling); - parent->num_child_bus++; - } else if (bus != main_system_bus) { + if (bus->parent) { + QLIST_INSERT_HEAD(&bus->parent->child_bus, bus, sibling); + bus->parent->num_child_bus++; + object_property_add_child(OBJECT(bus->parent), bus->name, OBJECT(bus), NULL); + } else if (bus != sysbus_get_default()) { /* TODO: once all bus devices are qdevified, only reset handler for main_system_bus should be registered here. */ qemu_register_reset(qbus_reset_all_fn, bus); } } -BusState *qbus_create(BusInfo *info, DeviceState *parent, const char *name) +void qbus_create_inplace(BusState *bus, const char *typename, + DeviceState *parent, const char *name) { - BusState *bus; + object_initialize(bus, typename); - bus = g_malloc0(info->size); - bus->qdev_allocated = 1; - qbus_create_inplace(bus, info, parent, name); - return bus; + bus->parent = parent; + bus->name = name ? g_strdup(name) : NULL; + qbus_realize(bus); } -static void main_system_bus_create(void) +BusState *qbus_create(const char *typename, DeviceState *parent, const char *name) { - /* assign main_system_bus before qbus_create_inplace() - * in order to make "if (bus != main_system_bus)" work */ - main_system_bus = g_malloc0(system_bus_info.size); - main_system_bus->qdev_allocated = 1; - qbus_create_inplace(main_system_bus, &system_bus_info, NULL, - "main-system-bus"); + BusState *bus; + + bus = BUS(object_new(typename)); + bus->qom_allocated = true; + + bus->parent = parent; + bus->name = name ? g_strdup(name) : NULL; + qbus_realize(bus); + + return bus; } void qbus_free(BusState *bus) { - DeviceState *dev; - - while ((dev = QTAILQ_FIRST(&bus->children)) != NULL) { - qdev_free(dev); - } - if (bus->parent) { - QLIST_REMOVE(bus, sibling); - bus->parent->num_child_bus--; + if (bus->qom_allocated) { + object_delete(OBJECT(bus)); } else { - assert(bus != main_system_bus); /* main_system_bus is never freed */ - qemu_unregister_reset(qbus_reset_all_fn, bus); + object_finalize(OBJECT(bus)); + if (bus->glib_allocated) { + g_free(bus); + } } - g_free((void*)bus->name); - if (bus->qdev_allocated) { - g_free(bus); +} + +static char *bus_get_fw_dev_path(BusState *bus, DeviceState *dev) +{ + BusClass *bc = BUS_GET_CLASS(bus); + + if (bc->get_fw_dev_path) { + return bc->get_fw_dev_path(dev); } + + return NULL; } static int qdev_get_fw_dev_path_helper(DeviceState *dev, char *p, int size) @@ -490,8 +502,8 @@ static int qdev_get_fw_dev_path_helper(DeviceState *dev, char *p, int size) if (dev && dev->parent_bus) { char *d; l = qdev_get_fw_dev_path_helper(dev->parent_bus->parent, p, size); - if (dev->parent_bus->info->get_fw_dev_path) { - d = dev->parent_bus->info->get_fw_dev_path(dev); + d = bus_get_fw_dev_path(dev->parent_bus, dev); + if (d) { l += snprintf(p + l, size - l, "%s", d); g_free(d); } else { @@ -515,9 +527,20 @@ char* qdev_get_fw_dev_path(DeviceState *dev) return strdup(path); } -static char *qdev_get_type(Object *obj, Error **errp) +char *qdev_get_dev_path(DeviceState *dev) { - return g_strdup(object_get_typename(obj)); + BusClass *bc; + + if (!dev || !dev->parent_bus) { + return NULL; + } + + bc = BUS_GET_CLASS(dev->parent_bus); + if (bc->get_dev_path) { + return bc->get_dev_path(dev); + } + + return NULL; } /** @@ -605,6 +628,9 @@ void qdev_property_add_legacy(DeviceState *dev, Property *prop, void qdev_property_add_static(DeviceState *dev, Property *prop, Error **errp) { + Error *local_err = NULL; + Object *obj = OBJECT(dev); + /* * TODO qdev_prop_ptr does not have getters or setters. It must * go now that it can be replaced with links. The test should be @@ -614,15 +640,34 @@ void qdev_property_add_static(DeviceState *dev, Property *prop, return; } - object_property_add(OBJECT(dev), prop->name, prop->info->name, + object_property_add(obj, prop->name, prop->info->name, prop->info->get, prop->info->set, prop->info->release, - prop, errp); + prop, &local_err); + + if (local_err) { + error_propagate(errp, local_err); + return; + } + if (prop->qtype == QTYPE_NONE) { + return; + } + + if (prop->qtype == QTYPE_QBOOL) { + object_property_set_bool(obj, prop->defval, prop->name, &local_err); + } else if (prop->info->enum_table) { + object_property_set_str(obj, prop->info->enum_table[prop->defval], + prop->name, &local_err); + } else if (prop->qtype == QTYPE_QINT) { + object_property_set_int(obj, prop->defval, prop->name, &local_err); + } + assert_no_error(local_err); } static void device_initfn(Object *obj) { DeviceState *dev = DEVICE(obj); + ObjectClass *class; Property *prop; if (qdev_hotplug) { @@ -633,13 +678,18 @@ static void device_initfn(Object *obj) dev->instance_id_alias = -1; dev->state = DEV_STATE_CREATED; - for (prop = qdev_get_props(dev); prop && prop->name; prop++) { - qdev_property_add_legacy(dev, prop, NULL); - qdev_property_add_static(dev, prop, NULL); - } + class = object_get_class(OBJECT(dev)); + do { + for (prop = DEVICE_CLASS(class)->props; prop && prop->name; prop++) { + qdev_property_add_legacy(dev, prop, NULL); + qdev_property_add_static(dev, prop, NULL); + } + class = object_class_get_parent(class); + } while (class != object_class_by_name(TYPE_DEVICE)); + qdev_prop_set_globals(dev); - object_property_add_str(OBJECT(dev), "type", qdev_get_type, NULL, NULL); - qdev_prop_set_defaults(dev, qdev_get_props(dev)); + object_property_add_link(OBJECT(dev), "parent_bus", TYPE_BUS, + (Object **)&dev->parent_bus, NULL); } /* Unlink device from bus and free the structure. */ @@ -664,7 +714,19 @@ static void device_finalize(Object *obj) qemu_opts_del(dev->opts); } } - QTAILQ_REMOVE(&dev->parent_bus->children, dev, sibling); + if (dev->parent_bus) { + bus_remove_child(dev->parent_bus, dev); + } +} + +static void device_class_base_init(ObjectClass *class, void *data) +{ + DeviceClass *klass = DEVICE_CLASS(class); + + /* We explicitly look up properties in the superclasses, + * so do not propagate them to the subclasses. + */ + klass->props = NULL; } void device_reset(DeviceState *dev) @@ -693,12 +755,50 @@ static TypeInfo device_type_info = { .instance_size = sizeof(DeviceState), .instance_init = device_initfn, .instance_finalize = device_finalize, + .class_base_init = device_class_base_init, .abstract = true, .class_size = sizeof(DeviceClass), }; +static void qbus_initfn(Object *obj) +{ + BusState *bus = BUS(obj); + + QTAILQ_INIT(&bus->children); +} + +static void qbus_finalize(Object *obj) +{ + BusState *bus = BUS(obj); + BusChild *kid; + + while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { + DeviceState *dev = kid->child; + qdev_free(dev); + } + if (bus->parent) { + QLIST_REMOVE(bus, sibling); + bus->parent->num_child_bus--; + } else { + assert(bus != sysbus_get_default()); /* main_system_bus is never freed */ + qemu_unregister_reset(qbus_reset_all_fn, bus); + } + g_free((char *)bus->name); +} + +static const TypeInfo bus_info = { + .name = TYPE_BUS, + .parent = TYPE_OBJECT, + .instance_size = sizeof(BusState), + .abstract = true, + .class_size = sizeof(BusClass), + .instance_init = qbus_initfn, + .instance_finalize = qbus_finalize, +}; + static void qdev_register_types(void) { + type_register_static(&bus_info); type_register_static(&device_type_info); } @@ -17,7 +17,7 @@ typedef struct CompatProperty CompatProperty; typedef struct BusState BusState; -typedef struct BusInfo BusInfo; +typedef struct BusClass BusClass; enum DevState { DEV_STATE_CREATED = 1, @@ -55,7 +55,7 @@ typedef struct DeviceClass { qdev_initfn init; qdev_event unplug; qdev_event exit; - BusInfo *bus_info; + const char *bus_type; } DeviceClass; /* This structure should not be accessed directly. We declare it here @@ -74,38 +74,52 @@ struct DeviceState { qemu_irq *gpio_in; QLIST_HEAD(, BusState) child_bus; int num_child_bus; - QTAILQ_ENTRY(DeviceState) sibling; int instance_id_alias; int alias_required_for_version; }; -typedef void (*bus_dev_printfn)(Monitor *mon, DeviceState *dev, int indent); -typedef char *(*bus_get_dev_path)(DeviceState *dev); /* * This callback is used to create Open Firmware device path in accordance with * OF spec http://forthworks.com/standards/of1275.pdf. Indicidual bus bindings * can be found here http://playground.sun.com/1275/bindings/. */ -typedef char *(*bus_get_fw_dev_path)(DeviceState *dev); -typedef int (qbus_resetfn)(BusState *bus); -struct BusInfo { - const char *name; - size_t size; - bus_dev_printfn print_dev; - bus_get_dev_path get_dev_path; - bus_get_fw_dev_path get_fw_dev_path; - qbus_resetfn *reset; - Property *props; +#define TYPE_BUS "bus" +#define BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_BUS) +#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS) +#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS) + +struct BusClass { + ObjectClass parent_class; + + /* FIXME first arg should be BusState */ + void (*print_dev)(Monitor *mon, DeviceState *dev, int indent); + char *(*get_dev_path)(DeviceState *dev); + char *(*get_fw_dev_path)(DeviceState *dev); + int (*reset)(BusState *bus); }; +typedef struct BusChild { + DeviceState *child; + int index; + QTAILQ_ENTRY(BusChild) sibling; +} BusChild; + +/** + * BusState: + * @qom_allocated: Indicates whether the object was allocated by QOM. + * @glib_allocated: Indicates whether the object was initialized in-place + * yet is expected to be freed with g_free(). + */ struct BusState { + Object obj; DeviceState *parent; - BusInfo *info; const char *name; int allow_hotplug; - int qdev_allocated; - QTAILQ_HEAD(ChildrenHead, DeviceState) children; + bool qom_allocated; + bool glib_allocated; + int max_index; + QTAILQ_HEAD(ChildrenHead, BusChild) children; QLIST_ENTRY(BusState) sibling; }; @@ -122,8 +136,6 @@ struct PropertyInfo { const char *name; const char *legacy_name; const char **enum_table; - int64_t min; - int64_t max; int (*parse)(DeviceState *dev, Property *prop, const char *str); int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len); ObjectPropertyAccessor *get; @@ -177,9 +189,9 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id); typedef int (qbus_walkerfn)(BusState *bus, void *opaque); typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); -void qbus_create_inplace(BusState *bus, BusInfo *info, +void qbus_create_inplace(BusState *bus, const char *typename, DeviceState *parent, const char *name); -BusState *qbus_create(BusInfo *info, DeviceState *parent, const char *name); +BusState *qbus_create(const char *typename, DeviceState *parent, const char *name); /* Returns > 0 if either devfn or busfn skip walk somewhere in cursion, * < 0 if either devfn or busfn terminate walk somewhere in cursion, * 0 otherwise. */ @@ -225,6 +237,7 @@ extern PropertyInfo qdev_prop_netdev; extern PropertyInfo qdev_prop_vlan; extern PropertyInfo qdev_prop_pci_devfn; extern PropertyInfo qdev_prop_blocksize; +extern PropertyInfo qdev_prop_pci_host_devaddr; #define DEFINE_PROP(_name, _state, _field, _prop, _type) { \ .name = (_name), \ @@ -267,7 +280,7 @@ extern PropertyInfo qdev_prop_blocksize; #define DEFINE_PROP_HEX64(_n, _s, _f, _d) \ DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_hex64, uint64_t) #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ - DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_pci_devfn, uint32_t) + DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) #define DEFINE_PROP_PTR(_n, _s, _f) \ DEFINE_PROP(_n, _s, _f, qdev_prop_ptr, void*) @@ -288,13 +301,14 @@ extern PropertyInfo qdev_prop_blocksize; LostTickPolicy) #define DEFINE_PROP_BLOCKSIZE(_n, _s, _f, _d) \ DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_blocksize, uint16_t) +#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \ + DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress) #define DEFINE_PROP_END_OF_LIST() \ {} /* Set properties between creation and init. */ void *qdev_get_prop_ptr(DeviceState *dev, Property *prop); -int qdev_prop_exists(DeviceState *dev, const char *name); int qdev_prop_parse(DeviceState *dev, const char *name, const char *value); void qdev_prop_set_bit(DeviceState *dev, const char *name, bool value); void qdev_prop_set_uint8(DeviceState *dev, const char *name, uint8_t value); @@ -312,7 +326,6 @@ void qdev_prop_set_macaddr(DeviceState *dev, const char *name, uint8_t *value); void qdev_prop_set_enum(DeviceState *dev, const char *name, int value); /* FIXME: Remove opaque pointer properties. */ void qdev_prop_set_ptr(DeviceState *dev, const char *name, void *value); -void qdev_prop_set_defaults(DeviceState *dev, Property *props); void qdev_prop_register_global_list(GlobalProperty *props); void qdev_prop_set_globals(DeviceState *dev); @@ -321,9 +334,6 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev, char *qdev_get_fw_dev_path(DeviceState *dev); -/* This is a nasty hack to allow passing a NULL bus to qdev_create. */ -extern struct BusInfo system_bus_info; - /** * @qdev_property_add_static - add a @Property to a device referencing a * field in a struct. @@ -349,10 +359,6 @@ const VMStateDescription *qdev_get_vmsd(DeviceState *dev); const char *qdev_fw_name(DeviceState *dev); -BusInfo *qdev_get_bus_info(DeviceState *dev); - -Property *qdev_get_props(DeviceState *dev); - Object *qdev_get_machine(void); /* FIXME: make this a link<> */ @@ -360,4 +366,6 @@ void qdev_set_parent_bus(DeviceState *dev, BusState *bus); extern int qdev_hotplug; +char *qdev_get_dev_path(DeviceState *dev); + #endif @@ -30,7 +30,7 @@ /* * NOTE: SPICE_RING_PROD_ITEM accesses memory on the pci bar and as * such can be changed by the guest, so to avoid a guest trigerrable - * abort we just set qxl_guest_bug and set the return to NULL. Still + * abort we just qxl_set_guest_bug and set the return to NULL. Still * it may happen as a result of emulator bug as well. */ #undef SPICE_RING_PROD_ITEM @@ -40,7 +40,7 @@ uint32_t prod = (r)->prod & SPICE_RING_INDEX_MASK(r); \ typeof(&(r)->items[prod]) m_item = &(r)->items[prod]; \ if (!((uint8_t*)m_item >= (uint8_t*)(start) && (uint8_t*)(m_item + 1) <= (uint8_t*)(end))) { \ - qxl_guest_bug(qxl, "SPICE_RING_PROD_ITEM indices mismatch " \ + qxl_set_guest_bug(qxl, "SPICE_RING_PROD_ITEM indices mismatch " \ "! %p <= %p < %p", (uint8_t *)start, \ (uint8_t *)m_item, (uint8_t *)end); \ ret = NULL; \ @@ -56,7 +56,7 @@ uint32_t cons = (r)->cons & SPICE_RING_INDEX_MASK(r); \ typeof(&(r)->items[cons]) m_item = &(r)->items[cons]; \ if (!((uint8_t*)m_item >= (uint8_t*)(start) && (uint8_t*)(m_item + 1) <= (uint8_t*)(end))) { \ - qxl_guest_bug(qxl, "SPICE_RING_CONS_ITEM indices mismatch " \ + qxl_set_guest_bug(qxl, "SPICE_RING_CONS_ITEM indices mismatch " \ "! %p <= %p < %p", (uint8_t *)start, \ (uint8_t *)m_item, (uint8_t *)end); \ ret = NULL; \ @@ -114,20 +114,16 @@ static QXLMode qxl_modes[] = { QXL_MODE_EX(1600, 1200), QXL_MODE_EX(1680, 1050), QXL_MODE_EX(1920, 1080), -#if VGA_RAM_SIZE >= (16 * 1024 * 1024) /* these modes need more than 8 MB video memory */ QXL_MODE_EX(1920, 1200), QXL_MODE_EX(1920, 1440), QXL_MODE_EX(2048, 1536), QXL_MODE_EX(2560, 1440), QXL_MODE_EX(2560, 1600), -#endif -#if VGA_RAM_SIZE >= (32 * 1024 * 1024) /* these modes need more than 16 MB video memory */ QXL_MODE_EX(2560, 2048), QXL_MODE_EX(2800, 2100), QXL_MODE_EX(3200, 2400), -#endif }; static PCIQXLDevice *qxl0; @@ -138,9 +134,10 @@ static void qxl_reset_memslots(PCIQXLDevice *d); static void qxl_reset_surfaces(PCIQXLDevice *d); static void qxl_ring_set_dirty(PCIQXLDevice *qxl); -void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) +void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) { qxl_send_events(qxl, QXL_INTERRUPT_ERROR); + qxl->guest_bug = 1; if (qxl->guestdebug) { va_list ap; va_start(ap, msg); @@ -151,6 +148,10 @@ void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) } } +static void qxl_clear_guest_bug(PCIQXLDevice *qxl) +{ + qxl->guest_bug = 0; +} void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id, struct QXLRect *area, struct QXLRect *dirty_rects, @@ -279,6 +280,7 @@ static inline uint32_t msb_mask(uint32_t val) static ram_addr_t qxl_rom_size(void) { uint32_t rom_size = sizeof(QXLRom) + sizeof(QXLModes) + sizeof(qxl_modes); + rom_size = MAX(rom_size, TARGET_PAGE_SIZE); rom_size = msb_mask(rom_size * 2 - 1); return rom_size; @@ -291,8 +293,8 @@ static void init_qxl_rom(PCIQXLDevice *d) uint32_t ram_header_size; uint32_t surface0_area_size; uint32_t num_pages; - uint32_t fb, maxfb = 0; - int i; + uint32_t fb; + int i, n; memset(rom, 0, d->rom_size); @@ -307,26 +309,25 @@ static void init_qxl_rom(PCIQXLDevice *d) rom->slots_end = NUM_MEMSLOTS - 1; rom->n_surfaces = cpu_to_le32(NUM_SURFACES); - modes->n_modes = cpu_to_le32(ARRAY_SIZE(qxl_modes)); - for (i = 0; i < modes->n_modes; i++) { + for (i = 0, n = 0; i < ARRAY_SIZE(qxl_modes); i++) { fb = qxl_modes[i].y_res * qxl_modes[i].stride; - if (maxfb < fb) { - maxfb = fb; + if (fb > d->vgamem_size) { + continue; } - modes->modes[i].id = cpu_to_le32(i); - modes->modes[i].x_res = cpu_to_le32(qxl_modes[i].x_res); - modes->modes[i].y_res = cpu_to_le32(qxl_modes[i].y_res); - modes->modes[i].bits = cpu_to_le32(qxl_modes[i].bits); - modes->modes[i].stride = cpu_to_le32(qxl_modes[i].stride); - modes->modes[i].x_mili = cpu_to_le32(qxl_modes[i].x_mili); - modes->modes[i].y_mili = cpu_to_le32(qxl_modes[i].y_mili); - modes->modes[i].orientation = cpu_to_le32(qxl_modes[i].orientation); - } - if (maxfb < VGA_RAM_SIZE && d->id == 0) - maxfb = VGA_RAM_SIZE; + modes->modes[n].id = cpu_to_le32(i); + modes->modes[n].x_res = cpu_to_le32(qxl_modes[i].x_res); + modes->modes[n].y_res = cpu_to_le32(qxl_modes[i].y_res); + modes->modes[n].bits = cpu_to_le32(qxl_modes[i].bits); + modes->modes[n].stride = cpu_to_le32(qxl_modes[i].stride); + modes->modes[n].x_mili = cpu_to_le32(qxl_modes[i].x_mili); + modes->modes[n].y_mili = cpu_to_le32(qxl_modes[i].y_mili); + modes->modes[n].orientation = cpu_to_le32(qxl_modes[i].orientation); + n++; + } + modes->n_modes = cpu_to_le32(n); ram_header_size = ALIGN(sizeof(QXLRam), 4096); - surface0_area_size = ALIGN(maxfb, 4096); + surface0_area_size = ALIGN(d->vgamem_size, 4096); num_pages = d->vga.vram_size; num_pages -= ram_header_size; num_pages -= surface0_area_size; @@ -411,7 +412,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) uint32_t id = le32_to_cpu(cmd->surface_id); if (id >= NUM_SURFACES) { - qxl_guest_bug(qxl, "QXL_CMD_SURFACE id %d >= %d", id, NUM_SURFACES); + qxl_set_guest_bug(qxl, "QXL_CMD_SURFACE id %d >= %d", id, + NUM_SURFACES); return 1; } qemu_mutex_lock(&qxl->track_lock); @@ -571,7 +573,7 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) case QXL_MODE_NATIVE: case QXL_MODE_UNDEFINED: ring = &qxl->ram->cmd_ring; - if (SPICE_RING_IS_EMPTY(ring)) { + if (qxl->guest_bug || SPICE_RING_IS_EMPTY(ring)) { return false; } SPICE_RING_CONS_ITEM(qxl, ring, cmd); @@ -931,6 +933,7 @@ static void qxl_enter_vga_mode(PCIQXLDevice *d) qemu_spice_create_host_primary(&d->ssd); d->mode = QXL_MODE_VGA; memset(&d->ssd.dirty, 0, sizeof(d->ssd.dirty)); + vga_dirty_log_start(&d->vga); } static void qxl_exit_vga_mode(PCIQXLDevice *d) @@ -939,6 +942,7 @@ static void qxl_exit_vga_mode(PCIQXLDevice *d) return; } trace_qxl_exit_vga_mode(d->id); + vga_dirty_log_stop(&d->vga); qxl_destroy_primary(d, QXL_SYNC); } @@ -977,6 +981,8 @@ static void qxl_soft_reset(PCIQXLDevice *d) { trace_qxl_soft_reset(d->id); qxl_check_state(d); + qxl_clear_guest_bug(d); + d->current_async = QXL_UNDEFINED_IO; if (d->id == 0) { qxl_enter_vga_mode(d); @@ -1061,12 +1067,12 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, trace_qxl_memslot_add_guest(d->id, slot_id, guest_start, guest_end); if (slot_id >= NUM_MEMSLOTS) { - qxl_guest_bug(d, "%s: slot_id >= NUM_MEMSLOTS %d >= %d", __func__, + qxl_set_guest_bug(d, "%s: slot_id >= NUM_MEMSLOTS %d >= %d", __func__, slot_id, NUM_MEMSLOTS); return 1; } if (guest_start > guest_end) { - qxl_guest_bug(d, "%s: guest_start > guest_end 0x%" PRIx64 + qxl_set_guest_bug(d, "%s: guest_start > guest_end 0x%" PRIx64 " > 0x%" PRIx64, __func__, guest_start, guest_end); return 1; } @@ -1091,7 +1097,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, break; } if (i == ARRAY_SIZE(regions)) { - qxl_guest_bug(d, "%s: finished loop without match", __func__); + qxl_set_guest_bug(d, "%s: finished loop without match", __func__); return 1; } @@ -1105,7 +1111,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, break; default: /* should not happen */ - qxl_guest_bug(d, "%s: pci_region = %d", __func__, pci_region); + qxl_set_guest_bug(d, "%s: pci_region = %d", __func__, pci_region); return 1; } @@ -1156,21 +1162,24 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) return (void *)(intptr_t)offset; case MEMSLOT_GROUP_GUEST: if (slot >= NUM_MEMSLOTS) { - qxl_guest_bug(qxl, "slot too large %d >= %d", slot, NUM_MEMSLOTS); + qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, + NUM_MEMSLOTS); return NULL; } if (!qxl->guest_slots[slot].active) { - qxl_guest_bug(qxl, "inactive slot %d\n", slot); + qxl_set_guest_bug(qxl, "inactive slot %d\n", slot); return NULL; } if (offset < qxl->guest_slots[slot].delta) { - qxl_guest_bug(qxl, "slot %d offset %"PRIu64" < delta %"PRIu64"\n", + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" < delta %"PRIu64"\n", slot, offset, qxl->guest_slots[slot].delta); return NULL; } offset -= qxl->guest_slots[slot].delta; if (offset > qxl->guest_slots[slot].size) { - qxl_guest_bug(qxl, "slot %d offset %"PRIu64" > size %"PRIu64"\n", + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" > size %"PRIu64"\n", slot, offset, qxl->guest_slots[slot].size); return NULL; } @@ -1190,9 +1199,19 @@ static void qxl_create_guest_primary(PCIQXLDevice *qxl, int loadvm, { QXLDevSurfaceCreate surface; QXLSurfaceCreate *sc = &qxl->guest_primary.surface; + int size; + int requested_height = le32_to_cpu(sc->height); + int requested_stride = le32_to_cpu(sc->stride); + + size = abs(requested_stride) * requested_height; + if (size > qxl->vgamem_size) { + qxl_set_guest_bug(qxl, "%s: requested primary larger then framebuffer" + " size", __func__); + return; + } if (qxl->mode == QXL_MODE_NATIVE) { - qxl_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE", + qxl_set_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE", __func__); } qxl_exit_vga_mode(qxl); @@ -1291,6 +1310,10 @@ static void ioport_write(void *opaque, target_phys_addr_t addr, qxl_async_io async = QXL_SYNC; uint32_t orig_io_port = io_port; + if (d->guest_bug && !io_port == QXL_IO_RESET) { + return; + } + switch (io_port) { case QXL_IO_RESET: case QXL_IO_SET_MODE: @@ -1342,7 +1365,7 @@ async_common: async = QXL_ASYNC; qemu_mutex_lock(&d->async_lock); if (d->current_async != QXL_UNDEFINED_IO) { - qxl_guest_bug(d, "%d async started before last (%d) complete", + qxl_set_guest_bug(d, "%d async started before last (%d) complete", io_port, d->current_async); qemu_mutex_unlock(&d->async_lock); return; @@ -1403,11 +1426,12 @@ async_common: break; case QXL_IO_MEMSLOT_ADD: if (val >= NUM_MEMSLOTS) { - qxl_guest_bug(d, "QXL_IO_MEMSLOT_ADD: val out of range"); + qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_ADD: val out of range"); break; } if (d->guest_slots[val].active) { - qxl_guest_bug(d, "QXL_IO_MEMSLOT_ADD: memory slot already active"); + qxl_set_guest_bug(d, + "QXL_IO_MEMSLOT_ADD: memory slot already active"); break; } d->guest_slots[val].slot = d->ram->mem_slot; @@ -1415,14 +1439,14 @@ async_common: break; case QXL_IO_MEMSLOT_DEL: if (val >= NUM_MEMSLOTS) { - qxl_guest_bug(d, "QXL_IO_MEMSLOT_DEL: val out of range"); + qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_DEL: val out of range"); break; } qxl_del_memslot(d, val); break; case QXL_IO_CREATE_PRIMARY: if (val != 0) { - qxl_guest_bug(d, "QXL_IO_CREATE_PRIMARY (async=%d): val != 0", + qxl_set_guest_bug(d, "QXL_IO_CREATE_PRIMARY (async=%d): val != 0", async); goto cancel_async; } @@ -1431,7 +1455,7 @@ async_common: break; case QXL_IO_DESTROY_PRIMARY: if (val != 0) { - qxl_guest_bug(d, "QXL_IO_DESTROY_PRIMARY (async=%d): val != 0", + qxl_set_guest_bug(d, "QXL_IO_DESTROY_PRIMARY (async=%d): val != 0", async); goto cancel_async; } @@ -1443,7 +1467,7 @@ async_common: break; case QXL_IO_DESTROY_SURFACE_WAIT: if (val >= NUM_SURFACES) { - qxl_guest_bug(d, "QXL_IO_DESTROY_SURFACE (async=%d):" + qxl_set_guest_bug(d, "QXL_IO_DESTROY_SURFACE (async=%d):" "%" PRIu64 " >= NUM_SURFACES", async, val); goto cancel_async; } @@ -1467,7 +1491,7 @@ async_common: qxl_spice_destroy_surfaces(d, async); break; default: - qxl_guest_bug(d, "%s: unexpected ioport=0x%x\n", __func__, io_port); + qxl_set_guest_bug(d, "%s: unexpected ioport=0x%x\n", __func__, io_port); } return; cancel_async: @@ -1694,14 +1718,20 @@ static DisplayChangeListener display_listener = { .dpy_refresh = display_refresh, }; -static void qxl_init_ramsize(PCIQXLDevice *qxl, uint32_t ram_min_mb) +static void qxl_init_ramsize(PCIQXLDevice *qxl) { - /* vga ram (bar 0) */ + /* vga mode framebuffer / primary surface (bar 0, first part) */ + if (qxl->vgamem_size_mb < 8) { + qxl->vgamem_size_mb = 8; + } + qxl->vgamem_size = qxl->vgamem_size_mb * 1024 * 1024; + + /* vga ram (bar 0, total) */ if (qxl->ram_size_mb != -1) { qxl->vga.vram_size = qxl->ram_size_mb * 1024 * 1024; } - if (qxl->vga.vram_size < ram_min_mb * 1024 * 1024) { - qxl->vga.vram_size = ram_min_mb * 1024 * 1024; + if (qxl->vga.vram_size < qxl->vgamem_size * 2) { + qxl->vga.vram_size = qxl->vgamem_size * 2; } /* vram32 (surfaces, 32bit, bar 1) */ @@ -1724,6 +1754,7 @@ static void qxl_init_ramsize(PCIQXLDevice *qxl, uint32_t ram_min_mb) qxl->vram32_size = 4096; qxl->vram_size = 4096; } + qxl->vgamem_size = msb_mask(qxl->vgamem_size * 2 - 1); qxl->vga.vram_size = msb_mask(qxl->vga.vram_size * 2 - 1); qxl->vram32_size = msb_mask(qxl->vram32_size * 2 - 1); qxl->vram_size = msb_mask(qxl->vram_size * 2 - 1); @@ -1742,6 +1773,7 @@ static int qxl_init_common(PCIQXLDevice *qxl) qemu_mutex_init(&qxl->track_lock); qemu_mutex_init(&qxl->async_lock); qxl->current_async = QXL_UNDEFINED_IO; + qxl->guest_bug = 0; switch (qxl->revision) { case 1: /* spice 0.4 -- qxl-1 */ @@ -1834,8 +1866,9 @@ static int qxl_init_primary(PCIDevice *dev) PortioList *qxl_vga_port_list = g_new(PortioList, 1); qxl->id = 0; - qxl_init_ramsize(qxl, 32); - vga_common_init(vga, qxl->vga.vram_size); + qxl_init_ramsize(qxl); + vga->vram_size_mb = qxl->vga.vram_size >> 20; + vga_common_init(vga); vga_init(vga, pci_address_space(dev), pci_address_space_io(dev), false); portio_list_init(qxl_vga_port_list, qxl_vga_portio_list, vga, "vga"); portio_list_add(qxl_vga_port_list, pci_address_space_io(dev), 0x3b0); @@ -1856,7 +1889,7 @@ static int qxl_init_secondary(PCIDevice *dev) PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev); qxl->id = device_id++; - qxl_init_ramsize(qxl, 16); + qxl_init_ramsize(qxl); memory_region_init_ram(&qxl->vga.vram, "qxl.vgavram", qxl->vga.vram_size); vmstate_register_ram(&qxl->vga.vram, &qxl->pci.qdev); qxl->vga.vram_ptr = memory_region_get_ram_ptr(&qxl->vga.vram); @@ -2034,6 +2067,7 @@ static Property qxl_properties[] = { DEFINE_PROP_UINT32("ram_size_mb", PCIQXLDevice, ram_size_mb, -1), DEFINE_PROP_UINT32("vram_size_mb", PCIQXLDevice, vram32_size_mb, -1), DEFINE_PROP_UINT32("vram64_size_mb", PCIQXLDevice, vram_size_mb, -1), + DEFINE_PROP_UINT32("vgamem_mb", PCIQXLDevice, vgamem_size_mb, 16), DEFINE_PROP_END_OF_LIST(), }; @@ -31,6 +31,9 @@ typedef struct PCIQXLDevice { uint32_t debug; uint32_t guestdebug; uint32_t cmdlog; + + uint32_t guest_bug; + enum qxl_mode mode; uint32_t cmdflags; int generation; @@ -81,6 +84,7 @@ typedef struct PCIQXLDevice { QXLReleaseInfo *last_release; uint32_t last_release_offset; uint32_t oom_running; + uint32_t vgamem_size; /* rom pci bar */ QXLRom shadow_rom; @@ -102,6 +106,7 @@ typedef struct PCIQXLDevice { uint32_t ram_size_mb; uint32_t vram_size_mb; uint32_t vram32_size_mb; + uint32_t vgamem_size_mb; /* qxl_render_update state */ int render_update_cookie_num; @@ -127,7 +132,8 @@ typedef struct PCIQXLDevice { /* qxl.c */ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id); -void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) GCC_FMT_ATTR(2, 3); +void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) + GCC_FMT_ATTR(2, 3); void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id, struct QXLRect *area, struct QXLRect *dirty_rects, diff --git a/hw/realview.c b/hw/realview.c index ecf470179a..19db4d026b 100644 --- a/hw/realview.c +++ b/hw/realview.c @@ -50,7 +50,8 @@ static void realview_init(ram_addr_t ram_size, const char *initrd_filename, const char *cpu_model, enum realview_board_type board_type) { - CPUARMState *env = NULL; + ARMCPU *cpu = NULL; + CPUARMState *env; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram_lo = g_new(MemoryRegion, 1); MemoryRegion *ram_hi = g_new(MemoryRegion, 1); @@ -88,14 +89,15 @@ static void realview_init(ram_addr_t ram_size, break; } for (n = 0; n < smp_cpus; n++) { - env = cpu_init(cpu_model); - if (!env) { + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - irqp = arm_pic_init_cpu(env); + irqp = arm_pic_init_cpu(cpu); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } + env = &cpu->env; if (arm_feature(env, ARM_FEATURE_V7)) { if (is_mpcore) { proc_id = 0x0c000000; @@ -325,7 +327,7 @@ static void realview_init(ram_addr_t ram_size, realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); - arm_load_kernel(first_cpu, &realview_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &realview_binfo); } static void realview_eb_init(ram_addr_t ram_size, diff --git a/hw/rtl8139.c b/hw/rtl8139.c index 8128b64a0f..436b015c64 100644 --- a/hw/rtl8139.c +++ b/hw/rtl8139.c @@ -781,6 +781,13 @@ static inline dma_addr_t rtl8139_addr64(uint32_t low, uint32_t high) #endif } +/* Workaround for buggy guest driver such as linux who allocates rx + * rings after the receiver were enabled. */ +static bool rtl8139_cp_rx_valid(RTL8139State *s) +{ + return !(s->RxRingAddrLO == 0 && s->RxRingAddrHI == 0); +} + static int rtl8139_can_receive(VLANClientState *nc) { RTL8139State *s = DO_UPCAST(NICState, nc, nc)->opaque; @@ -791,18 +798,15 @@ static int rtl8139_can_receive(VLANClientState *nc) return 1; if (!rtl8139_receiver_enabled(s)) return 1; - /* network/host communication happens only in normal mode */ - if ((s->Cfg9346 & Chip9346_op_mask) != Cfg9346_Normal) - return 0; - if (rtl8139_cp_receiver_enabled(s)) { + if (rtl8139_cp_receiver_enabled(s) && rtl8139_cp_rx_valid(s)) { /* ??? Flow control not implemented in c+ mode. This is a hack to work around slirp deficiencies anyway. */ return 1; } else { avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, s->RxBufferSize); - return (avail == 0 || avail >= 1514); + return (avail == 0 || avail >= 1514 || (s->IntrMask & RxOverflow)); } } @@ -836,12 +840,6 @@ static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_ return -1; } - /* check whether we are in normal mode */ - if ((s->Cfg9346 & Chip9346_op_mask) != Cfg9346_Normal) { - DPRINTF("not in normal op mode\n"); - return -1; - } - /* XXX: check this */ if (s->RxConfig & AcceptAllPhys) { /* promiscuous: receive all */ @@ -946,6 +944,10 @@ static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_ if (rtl8139_cp_receiver_enabled(s)) { + if (!rtl8139_cp_rx_valid(s)) { + return size; + } + DPRINTF("in C+ Rx mode ================\n"); /* begin C+ receiver mode */ diff --git a/hw/s390-virtio-bus.c b/hw/s390-virtio-bus.c index 1d38a8f5c5..4d49b96f94 100644 --- a/hw/s390-virtio-bus.c +++ b/hw/s390-virtio-bus.c @@ -45,9 +45,10 @@ #define VIRTIO_EXT_CODE 0x2603 -struct BusInfo s390_virtio_bus_info = { - .name = "s390-virtio", - .size = sizeof(VirtIOS390Bus), +static const TypeInfo s390_virtio_bus_info = { + .name = TYPE_S390_VIRTIO_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtIOS390Bus), }; static const VirtIOBindings virtio_s390_bindings; @@ -92,7 +93,7 @@ VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size) /* Create bus on bridge device */ - _bus = qbus_create(&s390_virtio_bus_info, dev, "s390-virtio"); + _bus = qbus_create(TYPE_S390_VIRTIO_BUS, dev, "s390-virtio"); bus = DO_UPCAST(VirtIOS390Bus, bus, _bus); bus->dev_page = *ram_size; @@ -140,7 +141,8 @@ static int s390_virtio_device_init(VirtIOS390Device *dev, VirtIODevice *vdev) s390_virtio_device_sync(dev); s390_virtio_reset_idx(dev); if (dev->qdev.hotplugged) { - CPUS390XState *env = s390_cpu_addr2state(0); + S390CPU *cpu = s390_cpu_addr2state(0); + CPUS390XState *env = &cpu->env; s390_virtio_irq(env, VIRTIO_PARAM_DEV_ADD, dev->dev_offs); } @@ -313,20 +315,20 @@ VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus, ram_addr_t mem, int *vq_num) { - VirtIOS390Device *_dev; - DeviceState *dev; + BusChild *kid; int i; - QTAILQ_FOREACH(dev, &bus->bus.children, sibling) { - _dev = (VirtIOS390Device *)dev; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + VirtIOS390Device *dev = (VirtIOS390Device *)kid->child; + for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { - if (!virtio_queue_get_addr(_dev->vdev, i)) + if (!virtio_queue_get_addr(dev->vdev, i)) break; - if (virtio_queue_get_addr(_dev->vdev, i) == mem) { + if (virtio_queue_get_addr(dev->vdev, i) == mem) { if (vq_num) { *vq_num = i; } - return _dev; + return dev; } } } @@ -337,13 +339,12 @@ VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus, /* Find a device by device descriptor location */ VirtIOS390Device *s390_virtio_bus_find_mem(VirtIOS390Bus *bus, ram_addr_t mem) { - VirtIOS390Device *_dev; - DeviceState *dev; + BusChild *kid; - QTAILQ_FOREACH(dev, &bus->bus.children, sibling) { - _dev = (VirtIOS390Device *)dev; - if (_dev->dev_offs == mem) { - return _dev; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + VirtIOS390Device *dev = (VirtIOS390Device *)kid->child; + if (dev->dev_offs == mem) { + return dev; } } @@ -354,7 +355,8 @@ static void virtio_s390_notify(void *opaque, uint16_t vector) { VirtIOS390Device *dev = (VirtIOS390Device*)opaque; uint64_t token = s390_virtio_device_vq_token(dev, vector); - CPUS390XState *env = s390_cpu_addr2state(0); + S390CPU *cpu = s390_cpu_addr2state(0); + CPUS390XState *env = &cpu->env; s390_virtio_irq(env, 0, token); } @@ -458,7 +460,7 @@ static void virtio_s390_device_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); dc->init = s390_virtio_busdev_init; - dc->bus_info = &s390_virtio_bus_info; + dc->bus_type = TYPE_S390_VIRTIO_BUS; dc->unplug = qdev_simple_unplug_cb; } @@ -519,6 +521,7 @@ static TypeInfo s390_virtio_bridge_info = { static void s390_virtio_register_types(void) { + type_register_static(&s390_virtio_bus_info); type_register_static(&virtio_s390_device_info); type_register_static(&s390_virtio_serial); type_register_static(&s390_virtio_blk); diff --git a/hw/s390-virtio-bus.h b/hw/s390-virtio-bus.h index 4b99d02298..4873134ae9 100644 --- a/hw/s390-virtio-bus.h +++ b/hw/s390-virtio-bus.h @@ -52,6 +52,10 @@ #define VIRTIO_S390_DEVICE_GET_CLASS(obj) \ OBJECT_GET_CLASS(VirtIOS390DeviceClass, (obj), TYPE_VIRTIO_S390_DEVICE) +#define TYPE_S390_VIRTIO_BUS "s390-virtio-bus" +#define S390_VIRTIO_BUS(obj) \ + OBJECT_CHECK(VirtIOS390Bus, (obj), TYPE_S390_VIRTIO_BUS) + typedef struct VirtIOS390Device VirtIOS390Device; typedef struct VirtIOS390DeviceClass { diff --git a/hw/s390-virtio.c b/hw/s390-virtio.c index c0e19fd66d..47eed35da3 100644 --- a/hw/s390-virtio.c +++ b/hw/s390-virtio.c @@ -61,9 +61,9 @@ #define MAX_BLK_DEVS 10 static VirtIOS390Bus *s390_bus; -static CPUS390XState **ipi_states; +static S390CPU **ipi_states; -CPUS390XState *s390_cpu_addr2state(uint16_t cpu_addr) +S390CPU *s390_cpu_addr2state(uint16_t cpu_addr) { if (cpu_addr >= smp_cpus) { return NULL; @@ -206,16 +206,18 @@ static void s390_init(ram_addr_t my_ram_size, cpu_model = "host"; } - ipi_states = g_malloc(sizeof(CPUS390XState *) * smp_cpus); + ipi_states = g_malloc(sizeof(S390CPU *) * smp_cpus); for (i = 0; i < smp_cpus; i++) { + S390CPU *cpu; CPUS390XState *tmp_env; - tmp_env = cpu_init(cpu_model); + cpu = cpu_s390x_init(cpu_model); + tmp_env = &cpu->env; if (!env) { env = tmp_env; } - ipi_states[i] = tmp_env; + ipi_states[i] = cpu; tmp_env->halted = 1; tmp_env->exception_index = EXCP_HLT; tmp_env->storage_keys = storage_keys; diff --git a/hw/s390x/Makefile.objs b/hw/s390x/Makefile.objs new file mode 100644 index 0000000000..dcdcac8a81 --- /dev/null +++ b/hw/s390x/Makefile.objs @@ -0,0 +1,3 @@ +obj-y = s390-virtio-bus.o s390-virtio.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/scsi-bus.c b/hw/scsi-bus.c index f10f3ec25c..5ad1013be1 100644 --- a/hw/scsi-bus.c +++ b/hw/scsi-bus.c @@ -12,17 +12,26 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev); static int scsi_req_parse(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf); static void scsi_req_dequeue(SCSIRequest *req); -static struct BusInfo scsi_bus_info = { - .name = "SCSI", - .size = sizeof(SCSIBus), - .get_dev_path = scsibus_get_dev_path, - .get_fw_dev_path = scsibus_get_fw_dev_path, - .props = (Property[]) { - DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), - DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), - DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), - DEFINE_PROP_END_OF_LIST(), - }, +static Property scsi_props[] = { + DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), + DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), + DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void scsi_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_dev_path = scsibus_get_dev_path; + k->get_fw_dev_path = scsibus_get_fw_dev_path; +} + +static const TypeInfo scsi_bus_info = { + .name = TYPE_SCSI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SCSIBus), + .class_init = scsi_bus_class_init, }; static int next_scsi_bus; @@ -65,7 +74,7 @@ static void scsi_device_unit_attention_reported(SCSIDevice *s) /* Create a scsi bus, and attach devices to it. */ void scsi_bus_new(SCSIBus *bus, DeviceState *host, const SCSIBusInfo *info) { - qbus_create_inplace(&bus->qbus, &scsi_bus_info, host, NULL); + qbus_create_inplace(&bus->qbus, TYPE_SCSI_BUS, host, NULL); bus->busnr = next_scsi_bus++; bus->info = info; bus->qbus.allow_hotplug = 1; @@ -205,7 +214,7 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockDriverState *bdrv, if (bootindex >= 0) { qdev_prop_set_int32(dev, "bootindex", bootindex); } - if (qdev_prop_exists(dev, "removable")) { + if (object_property_find(OBJECT(dev), "removable", NULL)) { qdev_prop_set_bit(dev, "removable", removable); } if (qdev_prop_set_drive(dev, "drive", bdrv) < 0) { @@ -306,7 +315,7 @@ static void store_lun(uint8_t *outbuf, int lun) static bool scsi_target_emulate_report_luns(SCSITargetReq *r) { - DeviceState *qdev; + BusChild *kid; int i, len, n; int channel, id; bool found_lun0; @@ -321,7 +330,8 @@ static bool scsi_target_emulate_report_luns(SCSITargetReq *r) id = r->req.dev->id; found_lun0 = false; n = 0; - QTAILQ_FOREACH(qdev, &r->req.bus->qbus.children, sibling) { + QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; SCSIDevice *dev = SCSI_DEVICE(qdev); if (dev->channel == channel && dev->id == id) { @@ -343,7 +353,8 @@ static bool scsi_target_emulate_report_luns(SCSITargetReq *r) memset(r->buf, 0, len); stl_be_p(&r->buf, n); i = found_lun0 ? 8 : 16; - QTAILQ_FOREACH(qdev, &r->req.bus->qbus.children, sibling) { + QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; SCSIDevice *dev = SCSI_DEVICE(qdev); if (dev->channel == channel && dev->id == id) { @@ -406,7 +417,7 @@ static bool scsi_target_emulate_inquiry(SCSITargetReq *r) r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ memcpy(&r->buf[8], "QEMU ", 8); memcpy(&r->buf[16], "QEMU TARGET ", 16); - strncpy((char *) &r->buf[32], QEMU_VERSION, 4); + pstrcpy((char *) &r->buf[32], 4, qemu_get_version()); } return true; } @@ -723,20 +734,16 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) switch (buf[0] >> 5) { case 0: cmd->xfer = buf[4]; - cmd->len = 6; break; case 1: case 2: cmd->xfer = lduw_be_p(&buf[7]); - cmd->len = 10; break; case 4: cmd->xfer = ldl_be_p(&buf[10]) & 0xffffffffULL; - cmd->len = 16; break; case 5: cmd->xfer = ldl_be_p(&buf[6]) & 0xffffffffULL; - cmd->len = 12; break; default: return -1; @@ -760,11 +767,9 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) case SYNCHRONIZE_CACHE_16: case LOCATE_16: case LOCK_UNLOCK_CACHE: - case LOAD_UNLOAD: case SET_CD_SPEED: case SET_LIMITS: case WRITE_LONG_10: - case MOVE_MEDIUM: case UPDATE_BLOCK: case RESERVE_TRACK: case SET_READ_AHEAD: @@ -874,7 +879,6 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu case READ_REVERSE: case RECOVER_BUFFERED_DATA: case WRITE_6: - cmd->len = 6; cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); if (buf[1] & 0x01) { /* fixed */ cmd->xfer *= dev->blocksize; @@ -884,22 +888,34 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu case READ_REVERSE_16: case VERIFY_16: case WRITE_16: - cmd->len = 16; cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); if (buf[1] & 0x01) { /* fixed */ cmd->xfer *= dev->blocksize; } break; case REWIND: - case START_STOP: - cmd->len = 6; + case LOAD_UNLOAD: cmd->xfer = 0; break; case SPACE_16: cmd->xfer = buf[13] | (buf[12] << 8); break; case READ_POSITION: - cmd->xfer = buf[8] | (buf[7] << 8); + switch (buf[1] & 0x1f) /* operation code */ { + case SHORT_FORM_BLOCK_ID: + case SHORT_FORM_VENDOR_SPECIFIC: + cmd->xfer = 20; + break; + case LONG_FORM: + cmd->xfer = 32; + break; + case EXTENDED_FORM: + cmd->xfer = buf[8] | (buf[7] << 8); + break; + default: + return -1; + } + break; case FORMAT_UNIT: cmd->xfer = buf[4] | (buf[3] << 8); @@ -911,6 +927,29 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu return 0; } +static int scsi_req_medium_changer_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) +{ + switch (buf[0]) { + /* medium changer commands */ + case EXCHANGE_MEDIUM: + case INITIALIZE_ELEMENT_STATUS: + case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: + case MOVE_MEDIUM: + case POSITION_TO_ELEMENT: + cmd->xfer = 0; + break; + case READ_ELEMENT_STATUS: + cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); + break; + + /* generic commands */ + default: + return scsi_req_length(cmd, dev, buf); + } + return 0; +} + + static void scsi_cmd_xfer_mode(SCSICommand *cmd) { if (!cmd->xfer) { @@ -990,11 +1029,36 @@ int scsi_req_parse(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) { int rc; - if (dev->type == TYPE_TAPE) { + switch (buf[0] >> 5) { + case 0: + cmd->len = 6; + break; + case 1: + case 2: + cmd->len = 10; + break; + case 4: + cmd->len = 16; + break; + case 5: + cmd->len = 12; + break; + default: + return -1; + } + + switch (dev->type) { + case TYPE_TAPE: rc = scsi_req_stream_length(cmd, dev, buf); - } else { + break; + case TYPE_MEDIUM_CHANGER: + rc = scsi_req_medium_changer_length(cmd, dev, buf); + break; + default: rc = scsi_req_length(cmd, dev, buf); + break; } + if (rc != 0) return rc; @@ -1172,7 +1236,8 @@ static const char *scsi_command_name(uint8_t cmd) [ REQUEST_SENSE ] = "REQUEST_SENSE", [ FORMAT_UNIT ] = "FORMAT_UNIT", [ READ_BLOCK_LIMITS ] = "READ_BLOCK_LIMITS", - [ REASSIGN_BLOCKS ] = "REASSIGN_BLOCKS", + [ REASSIGN_BLOCKS ] = "REASSIGN_BLOCKS/INITIALIZE ELEMENT STATUS", + /* LOAD_UNLOAD and INITIALIZE_ELEMENT_STATUS use the same operation code */ [ READ_6 ] = "READ_6", [ WRITE_6 ] = "WRITE_6", [ SET_CAPACITY ] = "SET_CAPACITY", @@ -1189,14 +1254,16 @@ static const char *scsi_command_name(uint8_t cmd) [ COPY ] = "COPY", [ ERASE ] = "ERASE", [ MODE_SENSE ] = "MODE_SENSE", - [ START_STOP ] = "START_STOP", + [ START_STOP ] = "START_STOP/LOAD_UNLOAD", + /* LOAD_UNLOAD and START_STOP use the same operation code */ [ RECEIVE_DIAGNOSTIC ] = "RECEIVE_DIAGNOSTIC", [ SEND_DIAGNOSTIC ] = "SEND_DIAGNOSTIC", [ ALLOW_MEDIUM_REMOVAL ] = "ALLOW_MEDIUM_REMOVAL", [ READ_CAPACITY_10 ] = "READ_CAPACITY_10", [ READ_10 ] = "READ_10", [ WRITE_10 ] = "WRITE_10", - [ SEEK_10 ] = "SEEK_10", + [ SEEK_10 ] = "SEEK_10/POSITION_TO_ELEMENT", + /* SEEK_10 and POSITION_TO_ELEMENT use the same operation code */ [ WRITE_VERIFY_10 ] = "WRITE_VERIFY_10", [ VERIFY_10 ] = "VERIFY_10", [ SEARCH_HIGH ] = "SEARCH_HIGH", @@ -1207,7 +1274,8 @@ static const char *scsi_command_name(uint8_t cmd) /* READ_POSITION and PRE_FETCH use the same operation code */ [ SYNCHRONIZE_CACHE ] = "SYNCHRONIZE_CACHE", [ LOCK_UNLOCK_CACHE ] = "LOCK_UNLOCK_CACHE", - [ READ_DEFECT_DATA ] = "READ_DEFECT_DATA", + [ READ_DEFECT_DATA ] = "READ_DEFECT_DATA/INITIALIZE_ELEMENT_STATUS_WITH_RANGE", + /* READ_DEFECT_DATA and INITIALIZE_ELEMENT_STATUS_WITH_RANGE use the same operation code */ [ MEDIUM_SCAN ] = "MEDIUM_SCAN", [ COMPARE ] = "COMPARE", [ COPY_VERIFY ] = "COPY_VERIFY", @@ -1252,6 +1320,7 @@ static const char *scsi_command_name(uint8_t cmd) [ REPORT_LUNS ] = "REPORT_LUNS", [ BLANK ] = "BLANK", [ MOVE_MEDIUM ] = "MOVE_MEDIUM", + [ EXCHANGE_MEDIUM ] = "EXCHANGE MEDIUM", [ LOAD_UNLOAD ] = "LOAD_UNLOAD", [ READ_12 ] = "READ_12", [ WRITE_12 ] = "WRITE_12", @@ -1378,7 +1447,7 @@ void scsi_req_complete(SCSIRequest *req, int status) assert(req->status == -1); req->status = status; - assert(req->sense_len < sizeof(req->sense)); + assert(req->sense_len <= sizeof(req->sense)); if (status == GOOD) { req->sense_len = 0; } @@ -1452,12 +1521,10 @@ static char *scsibus_get_dev_path(DeviceState *dev) { SCSIDevice *d = DO_UPCAST(SCSIDevice, qdev, dev); DeviceState *hba = dev->parent_bus->parent; - char *id = NULL; + char *id; char *path; - if (hba && hba->parent_bus && hba->parent_bus->info->get_dev_path) { - id = hba->parent_bus->info->get_dev_path(hba); - } + id = qdev_get_dev_path(hba); if (id) { path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); } else { @@ -1480,10 +1547,11 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev) SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) { - DeviceState *qdev; + BusChild *kid; SCSIDevice *target_dev = NULL; - QTAILQ_FOREACH_REVERSE(qdev, &bus->qbus.children, ChildrenHead, sibling) { + QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, ChildrenHead, sibling) { + DeviceState *qdev = kid->child; SCSIDevice *dev = SCSI_DEVICE(qdev); if (dev->channel == channel && dev->id == id) { @@ -1507,10 +1575,9 @@ static void put_scsi_requests(QEMUFile *f, void *pv, size_t size) QTAILQ_FOREACH(req, &s->requests, next) { assert(!req->io_canceled); assert(req->status == -1); - assert(req->retry); assert(req->enqueued); - qemu_put_sbyte(f, 1); + qemu_put_sbyte(f, req->retry ? 1 : 2); qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); qemu_put_be32s(f, &req->tag); qemu_put_be32s(f, &req->lun); @@ -1528,8 +1595,9 @@ static int get_scsi_requests(QEMUFile *f, void *pv, size_t size) { SCSIDevice *s = pv; SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); + int8_t sbyte; - while (qemu_get_sbyte(f)) { + while ((sbyte = qemu_get_sbyte(f)) > 0) { uint8_t buf[SCSI_CMD_BUF_SIZE]; uint32_t tag; uint32_t lun; @@ -1539,6 +1607,7 @@ static int get_scsi_requests(QEMUFile *f, void *pv, size_t size) qemu_get_be32s(f, &tag); qemu_get_be32s(f, &lun); req = scsi_req_new(s, tag, lun, buf, NULL); + req->retry = (sbyte == 1); if (bus->info->load_request) { req->hba_private = bus->info->load_request(f, req); } @@ -1547,7 +1616,6 @@ static int get_scsi_requests(QEMUFile *f, void *pv, size_t size) } /* Just restart it later. */ - req->retry = true; scsi_req_enqueue_internal(req); /* At this point, the request will be kept alive by the reference @@ -1595,10 +1663,11 @@ const VMStateDescription vmstate_scsi_device = { static void scsi_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); - k->bus_info = &scsi_bus_info; + k->bus_type = TYPE_SCSI_BUS; k->init = scsi_qdev_init; k->unplug = qdev_simple_unplug_cb; k->exit = scsi_qdev_exit; + k->props = scsi_props; } static TypeInfo scsi_device_type_info = { @@ -1612,6 +1681,7 @@ static TypeInfo scsi_device_type_info = { static void scsi_register_types(void) { + type_register_static(&scsi_bus_info); type_register_static(&scsi_device_type_info); } diff --git a/hw/scsi-defs.h b/hw/scsi-defs.h index 219c84dfb1..8a73f745ba 100644 --- a/hw/scsi-defs.h +++ b/hw/scsi-defs.h @@ -29,6 +29,7 @@ #define REQUEST_SENSE 0x03 #define FORMAT_UNIT 0x04 #define READ_BLOCK_LIMITS 0x05 +#define INITIALIZE_ELEMENT_STATUS 0x07 #define REASSIGN_BLOCKS 0x07 #define READ_6 0x08 #define WRITE_6 0x0a @@ -44,6 +45,7 @@ #define COPY 0x18 #define ERASE 0x19 #define MODE_SENSE 0x1a +#define LOAD_UNLOAD 0x1b #define START_STOP 0x1b #define RECEIVE_DIAGNOSTIC 0x1c #define SEND_DIAGNOSTIC 0x1d @@ -53,6 +55,7 @@ #define WRITE_10 0x2a #define SEEK_10 0x2b #define LOCATE_10 0x2b +#define POSITION_TO_ELEMENT 0x2b #define WRITE_VERIFY_10 0x2e #define VERIFY_10 0x2f #define SEARCH_HIGH 0x30 @@ -63,6 +66,7 @@ #define READ_POSITION 0x34 #define SYNCHRONIZE_CACHE 0x35 #define LOCK_UNLOCK_CACHE 0x36 +#define INITIALIZE_ELEMENT_STATUS_WITH_RANGE 0x37 #define READ_DEFECT_DATA 0x37 #define MEDIUM_SCAN 0x38 #define COMPARE 0x39 @@ -82,6 +86,7 @@ #define GET_EVENT_STATUS_NOTIFICATION 0x4a #define LOG_SELECT 0x4c #define LOG_SENSE 0x4d +#define READ_DISC_INFORMATION 0x51 #define RESERVE_TRACK 0x53 #define MODE_SELECT_10 0x55 #define RESERVE_10 0x56 @@ -116,7 +121,7 @@ #define MAINTENANCE_IN 0xa3 #define MAINTENANCE_OUT 0xa4 #define MOVE_MEDIUM 0xa5 -#define LOAD_UNLOAD 0xa6 +#define EXCHANGE_MEDIUM 0xa6 #define SET_READ_AHEAD 0xa7 #define READ_12 0xa8 #define WRITE_12 0xaa @@ -142,6 +147,14 @@ #define SAI_READ_CAPACITY_16 0x10 /* + * READ POSITION service action codes + */ +#define SHORT_FORM_BLOCK_ID 0x00 +#define SHORT_FORM_VENDOR_SPECIFIC 0x01 +#define LONG_FORM 0x06 +#define EXTENDED_FORM 0x08 + +/* * SAM Status codes */ diff --git a/hw/scsi-disk.c b/hw/scsi-disk.c index 045c764d9b..34336b1b58 100644 --- a/hw/scsi-disk.c +++ b/hw/scsi-disk.c @@ -34,7 +34,6 @@ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) #include "scsi-defs.h" #include "sysemu.h" #include "blockdev.h" -#include "block_int.h" #include "dma.h" #ifdef __linux @@ -68,6 +67,7 @@ struct SCSIDiskState bool media_changed; bool media_event; bool eject_request; + uint64_t wwn; QEMUBH *bh; char *version; char *serial; @@ -132,8 +132,14 @@ static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) qemu_put_be64s(f, &r->sector); qemu_put_be32s(f, &r->sector_count); qemu_put_be32s(f, &r->buflen); - if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { - qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); + if (r->buflen) { + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); + } else if (!req->retry) { + uint32_t len = r->iov.iov_len; + qemu_put_be32s(f, &len); + qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); + } } } @@ -148,6 +154,12 @@ static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) scsi_init_iovec(r, r->buflen); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); + } else if (!r->req.retry) { + uint32_t len; + qemu_get_be32s(f, &len); + r->iov.iov_len = len; + assert(r->iov.iov_len <= r->buflen); + qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); } } @@ -511,6 +523,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int buflen = 0; + int start; if (req->cmd.buf[1] & 0x1) { /* Vital product data */ @@ -519,14 +532,14 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) outbuf[buflen++] = s->qdev.type & 0x1f; outbuf[buflen++] = page_code ; // this page outbuf[buflen++] = 0x00; + outbuf[buflen++] = 0x00; + start = buflen; switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { - int pages; DPRINTF("Inquiry EVPD[Supported pages] " "buffer size %zd\n", req->cmd.xfer); - pages = buflen++; outbuf[buflen++] = 0x00; // list of supported pages (this page) if (s->serial) { outbuf[buflen++] = 0x80; // unit serial number @@ -536,7 +549,6 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) outbuf[buflen++] = 0xb0; // block limits outbuf[buflen++] = 0xb2; // thin provisioning } - outbuf[pages] = buflen - pages - 1; // number of pages break; } case 0x80: /* Device serial number, optional */ @@ -555,7 +567,6 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) DPRINTF("Inquiry EVPD[Serial number] " "buffer size %zd\n", req->cmd.xfer); - outbuf[buflen++] = l; memcpy(outbuf+buflen, s->serial, l); buflen += l; break; @@ -573,14 +584,21 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) DPRINTF("Inquiry EVPD[Device identification] " "buffer size %zd\n", req->cmd.xfer); - outbuf[buflen++] = 4 + id_len; outbuf[buflen++] = 0x2; // ASCII outbuf[buflen++] = 0; // not officially assigned outbuf[buflen++] = 0; // reserved outbuf[buflen++] = id_len; // length of data following - memcpy(outbuf+buflen, str, id_len); buflen += id_len; + + if (s->wwn) { + outbuf[buflen++] = 0x1; // Binary + outbuf[buflen++] = 0x3; // NAA + outbuf[buflen++] = 0; // reserved + outbuf[buflen++] = 8; + stq_be_p(&outbuf[buflen], s->wwn); + buflen += 8; + } break; } case 0xb0: /* block limits */ @@ -598,8 +616,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) return -1; } /* required VPD size with unmap support */ - outbuf[3] = buflen = 0x3c; - + buflen = 0x40; memset(outbuf + 4, 0, buflen - 4); /* optimal transfer length granularity */ @@ -621,7 +638,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) } case 0xb2: /* thin provisioning */ { - outbuf[3] = buflen = 8; + buflen = 8; outbuf[4] = 0; outbuf[5] = 0x60; /* write_same 10/16 supported */ outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; @@ -632,6 +649,8 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) return -1; } /* done with EVPD */ + assert(buflen - start <= 255); + outbuf[start - 1] = buflen - start; return buflen; } @@ -705,6 +724,39 @@ static inline bool media_is_cd(SCSIDiskState *s) return nb_sectors <= CD_MAX_SECTORS; } +static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, + uint8_t *outbuf) +{ + uint8_t type = r->req.cmd.buf[1] & 7; + + if (s->qdev.type != TYPE_ROM) { + return -1; + } + + /* Types 1/2 are only defined for Blu-Ray. */ + if (type != 0) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + return -1; + } + + memset(outbuf, 0, 34); + outbuf[1] = 32; + outbuf[2] = 0xe; /* last session complete, disc finalized */ + outbuf[3] = 1; /* first track on disc */ + outbuf[4] = 1; /* # of sessions */ + outbuf[5] = 1; /* first track of last session */ + outbuf[6] = 1; /* last track of last session */ + outbuf[7] = 0x20; /* unrestricted use */ + outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ + /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ + /* 12-23: not meaningful for CD-ROM or DVD-ROM */ + /* 24-31: disc bar code */ + /* 32: disc application code */ + /* 33: number of OPC tables */ + + return 34; +} + static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, uint8_t *outbuf) { @@ -1344,6 +1396,12 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r) goto illegal_request; } break; + case READ_DISC_INFORMATION: + buflen = scsi_read_disc_information(s, r, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; case READ_DVD_STRUCTURE: buflen = scsi_read_dvd_structure(s, r, outbuf); if (buflen < 0) { @@ -1471,6 +1529,7 @@ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf) case ALLOW_MEDIUM_REMOVAL: case READ_CAPACITY_10: case READ_TOC: + case READ_DISC_INFORMATION: case READ_DVD_STRUCTURE: case GET_CONFIGURATION: case GET_EVENT_STATUS_NOTIFICATION: @@ -1704,7 +1763,7 @@ static int scsi_initfn(SCSIDevice *dev) } if (!s->version) { - s->version = g_strdup(QEMU_VERSION); + s->version = g_strdup(qemu_get_version()); } if (bdrv_is_sg(s->qdev.conf.bs)) { @@ -1877,7 +1936,7 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, * ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without * O_DIRECT everything must go through SG_IO. */ - if (!(s->qdev.conf.bs->open_flags & BDRV_O_NOCACHE)) { + if (bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE) { break; } @@ -1914,6 +1973,7 @@ static Property scsi_hd_properties[] = { SCSI_DISK_F_REMOVABLE, false), DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, SCSI_DISK_F_DPOFUA, false), + DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -1958,6 +2018,7 @@ static TypeInfo scsi_hd_info = { static Property scsi_cd_properties[] = { DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -2019,6 +2080,7 @@ static Property scsi_disk_properties[] = { SCSI_DISK_F_REMOVABLE, false), DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, SCSI_DISK_F_DPOFUA, false), + DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/scsi-generic.c b/hw/scsi-generic.c index d856d23b3b..8d5106061e 100644 --- a/hw/scsi-generic.c +++ b/hw/scsi-generic.c @@ -400,12 +400,6 @@ static int scsi_generic_initfn(SCSIDevice *s) return -1; } - /* check we are really using a /dev/sg* file */ - if (!bdrv_is_sg(s->conf.bs)) { - error_report("not /dev/sg*"); - return -1; - } - if (bdrv_get_on_error(s->conf.bs, 0) != BLOCK_ERR_STOP_ENOSPC) { error_report("Device doesn't support drive option werror"); return -1; @@ -416,8 +410,11 @@ static int scsi_generic_initfn(SCSIDevice *s) } /* check we are using a driver managing SG_IO (version 3 and after */ - if (bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version) < 0 || - sg_version < 30000) { + if (bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version) < 0) { + error_report("scsi generic interface not supported"); + return -1; + } + if (sg_version < 30000) { error_report("scsi generic interface too old"); return -1; } @@ -136,6 +136,9 @@ struct SCSIBusInfo { void *(*load_request)(QEMUFile *f, SCSIRequest *req); }; +#define TYPE_SCSI_BUS "SCSI" +#define SCSI_BUS(obj) OBJECT_CHECK(SCSIBus, (obj), TYPE_SCSI_BUS) + struct SCSIBus { BusState qbus; int busnr; diff --git a/hw/sh4/Makefile.objs b/hw/sh4/Makefile.objs new file mode 100644 index 0000000000..68c5921790 --- /dev/null +++ b/hw/sh4/Makefile.objs @@ -0,0 +1,5 @@ +obj-y = shix.o r2d.o sh7750.o sh7750_regnames.o tc58128.o +obj-y += sh_timer.o sh_serial.o sh_intc.o sh_pci.o sm501.o +obj-y += ide/mmio.o + +obj-y := $(addprefix ../,$(obj-y)) @@ -253,6 +253,7 @@ static void shpc_free_devices_in_slot(SHPCDevice *shpc, int slot) ++devfn) { PCIDevice *affected_dev = shpc->sec_bus->devices[devfn]; if (affected_dev) { + object_unparent(OBJECT(affected_dev)); qdev_free(&affected_dev->qdev); } } diff --git a/hw/spapr.c b/hw/spapr.c index d0bddbce95..81c9343ca5 100644 --- a/hw/spapr.c +++ b/hw/spapr.c @@ -146,6 +146,40 @@ static int spapr_set_associativity(void *fdt, sPAPREnvironment *spapr) return ret; } + +static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop, + size_t maxsize) +{ + size_t maxcells = maxsize / sizeof(uint32_t); + int i, j, count; + uint32_t *p = prop; + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; + + if (!sps->page_shift) { + break; + } + for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) { + if (sps->enc[count].page_shift == 0) { + break; + } + } + if ((p - prop) >= (maxcells - 3 - count * 2)) { + break; + } + *(p++) = cpu_to_be32(sps->page_shift); + *(p++) = cpu_to_be32(sps->slb_enc); + *(p++) = cpu_to_be32(count); + for (j = 0; j < count; j++) { + *(p++) = cpu_to_be32(sps->enc[j].page_shift); + *(p++) = cpu_to_be32(sps->enc[j].pte_enc); + } + } + + return (p - prop) * sizeof(uint32_t); +} + static void *spapr_create_fdt_skel(const char *cpu_model, target_phys_addr_t rma_size, target_phys_addr_t initrd_base, @@ -163,6 +197,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model, uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)}; char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt" "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk"; + char qemu_hypertas_prop[] = "hcall-memop1"; uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; int i; char *modelname; @@ -298,6 +333,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model, 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; + uint32_t page_sizes_prop[64]; + size_t page_sizes_prop_size; if ((index % smt) != 0) { continue; @@ -362,6 +399,13 @@ static void *spapr_create_fdt_skel(const char *cpu_model, _FDT((fdt_property_cell(fdt, "ibm,dfp", 1))); } + page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop, + sizeof(page_sizes_prop)); + if (page_sizes_prop_size) { + _FDT((fdt_property(fdt, "ibm,segment-page-sizes", + page_sizes_prop, page_sizes_prop_size))); + } + _FDT((fdt_end_node(fdt))); } @@ -374,6 +418,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model, _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop, sizeof(hypertas_prop)))); + _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas_prop, + sizeof(qemu_hypertas_prop)))); _FDT((fdt_property(fdt, "ibm,associativity-reference-points", refpoints, sizeof(refpoints)))); @@ -628,6 +674,9 @@ static void ppc_spapr_init(ram_addr_t ram_size, spapr->icp = xics_system_init(XICS_IRQS); spapr->next_irq = 16; + /* Set up IOMMU */ + spapr_iommu_init(); + /* Set up VIO bus */ spapr->vio_bus = spapr_vio_bus_init(); diff --git a/hw/spapr.h b/hw/spapr.h index 654a7a8a34..9153f29a60 100644 --- a/hw/spapr.h +++ b/hw/spapr.h @@ -1,6 +1,7 @@ #if !defined(__HW_SPAPR_H__) #define __HW_SPAPR_H__ +#include "dma.h" #include "hw/xics.h" struct VIOsPAPRBus; @@ -264,7 +265,8 @@ typedef struct sPAPREnvironment { */ #define KVMPPC_HCALL_BASE 0xf000 #define KVMPPC_H_RTAS (KVMPPC_HCALL_BASE + 0x0) -#define KVMPPC_HCALL_MAX KVMPPC_H_RTAS +#define KVMPPC_H_LOGICAL_MEMOP (KVMPPC_HCALL_BASE + 0x1) +#define KVMPPC_HCALL_MAX KVMPPC_H_LOGICAL_MEMOP extern sPAPREnvironment *spapr; @@ -319,4 +321,21 @@ target_ulong spapr_rtas_call(sPAPREnvironment *spapr, int spapr_rtas_device_tree_setup(void *fdt, target_phys_addr_t rtas_addr, target_phys_addr_t rtas_size); +#define SPAPR_TCE_PAGE_SHIFT 12 +#define SPAPR_TCE_PAGE_SIZE (1ULL << SPAPR_TCE_PAGE_SHIFT) +#define SPAPR_TCE_PAGE_MASK (SPAPR_TCE_PAGE_SIZE - 1) + +typedef struct sPAPRTCE { + uint64_t tce; +} sPAPRTCE; + +#define SPAPR_VIO_BASE_LIOBN 0x00000000 +#define SPAPR_PCI_BASE_LIOBN 0x80000000 + +void spapr_iommu_init(void); +DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size); +void spapr_tce_free(DMAContext *dma); +int spapr_dma_dt(void *fdt, int node_off, const char *propname, + DMAContext *dma); + #endif /* !defined (__HW_SPAPR_H__) */ diff --git a/hw/spapr_hcall.c b/hw/spapr_hcall.c index 94bb504ca6..a5990a9617 100644 --- a/hw/spapr_hcall.c +++ b/hw/spapr_hcall.c @@ -608,6 +608,73 @@ static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr, return H_PARAMETER; } +static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong dst = args[0]; /* Destination address */ + target_ulong src = args[1]; /* Source address */ + target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ + target_ulong count = args[3]; /* Element count */ + target_ulong op = args[4]; /* 0 = copy, 1 = invert */ + uint64_t tmp; + unsigned int mask = (1 << esize) - 1; + int step = 1 << esize; + + if (count > 0x80000000) { + return H_PARAMETER; + } + + if ((dst & mask) || (src & mask) || (op > 1)) { + return H_PARAMETER; + } + + if (dst >= src && dst < (src + (count << esize))) { + dst = dst + ((count - 1) << esize); + src = src + ((count - 1) << esize); + step = -step; + } + + while (count--) { + switch (esize) { + case 0: + tmp = ldub_phys(src); + break; + case 1: + tmp = lduw_phys(src); + break; + case 2: + tmp = ldl_phys(src); + break; + case 3: + tmp = ldq_phys(src); + break; + default: + return H_PARAMETER; + } + if (op == 1) { + tmp = ~tmp; + } + switch (esize) { + case 0: + stb_phys(dst, tmp); + break; + case 1: + stw_phys(dst, tmp); + break; + case 2: + stl_phys(dst, tmp); + break; + case 3: + stq_phys(dst, tmp); + break; + } + dst = dst + step; + src = src + step; + } + + return H_SUCCESS; +} + static target_ulong h_logical_icbi(CPUPPCState *env, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { @@ -700,6 +767,7 @@ static void hypercall_register_types(void) spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); + spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); /* qemu/KVM-PPC specific hcalls */ spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); diff --git a/hw/spapr_iommu.c b/hw/spapr_iommu.c new file mode 100644 index 0000000000..388ffa4b22 --- /dev/null +++ b/hw/spapr_iommu.c @@ -0,0 +1,246 @@ +/* + * QEMU sPAPR IOMMU (TCE) code + * + * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "hw.h" +#include "kvm.h" +#include "qdev.h" +#include "kvm_ppc.h" +#include "dma.h" + +#include "hw/spapr.h" + +#include <libfdt.h> + +/* #define DEBUG_TCE */ + +enum sPAPRTCEAccess { + SPAPR_TCE_FAULT = 0, + SPAPR_TCE_RO = 1, + SPAPR_TCE_WO = 2, + SPAPR_TCE_RW = 3, +}; + +typedef struct sPAPRTCETable sPAPRTCETable; + +struct sPAPRTCETable { + DMAContext dma; + uint32_t liobn; + uint32_t window_size; + sPAPRTCE *table; + int fd; + QLIST_ENTRY(sPAPRTCETable) list; +}; + + +QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; + +static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn) +{ + sPAPRTCETable *tcet; + + QLIST_FOREACH(tcet, &spapr_tce_tables, list) { + if (tcet->liobn == liobn) { + return tcet; + } + } + + return NULL; +} + +static int spapr_tce_translate(DMAContext *dma, + dma_addr_t addr, + target_phys_addr_t *paddr, + target_phys_addr_t *len, + DMADirection dir) +{ + sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma); + enum sPAPRTCEAccess access = (dir == DMA_DIRECTION_FROM_DEVICE) + ? SPAPR_TCE_WO : SPAPR_TCE_RO; + uint64_t tce; + +#ifdef DEBUG_TCE + fprintf(stderr, "spapr_tce_translate liobn=0x%" PRIx32 " addr=0x" + DMA_ADDR_FMT "\n", tcet->liobn, addr); +#endif + + /* Check if we are in bound */ + if (addr >= tcet->window_size) { +#ifdef DEBUG_TCE + fprintf(stderr, "spapr_tce_translate out of bounds\n"); +#endif + return -EFAULT; + } + + tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT].tce; + + /* Check TCE */ + if (!(tce & access)) { + return -EPERM; + } + + /* How much til end of page ? */ + *len = ((~addr) & SPAPR_TCE_PAGE_MASK) + 1; + + /* Translate */ + *paddr = (tce & ~SPAPR_TCE_PAGE_MASK) | + (addr & SPAPR_TCE_PAGE_MASK); + +#ifdef DEBUG_TCE + fprintf(stderr, " -> *paddr=0x" TARGET_FMT_plx ", *len=0x" + TARGET_FMT_plx "\n", *paddr, *len); +#endif + + return 0; +} + +DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size) +{ + sPAPRTCETable *tcet; + + if (!window_size) { + return NULL; + } + + tcet = g_malloc0(sizeof(*tcet)); + dma_context_init(&tcet->dma, spapr_tce_translate, NULL, NULL); + + tcet->liobn = liobn; + tcet->window_size = window_size; + + if (kvm_enabled()) { + tcet->table = kvmppc_create_spapr_tce(liobn, + window_size, + &tcet->fd); + } + + if (!tcet->table) { + size_t table_size = (window_size >> SPAPR_TCE_PAGE_SHIFT) + * sizeof(sPAPRTCE); + tcet->table = g_malloc0(table_size); + } + +#ifdef DEBUG_TCE + fprintf(stderr, "spapr_iommu: New TCE table, liobn=0x%x, context @ %p, " + "table @ %p, fd=%d\n", liobn, &tcet->dma, tcet->table, tcet->fd); +#endif + + QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); + + return &tcet->dma; +} + +void spapr_tce_free(DMAContext *dma) +{ + + if (dma) { + sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma); + + QLIST_REMOVE(tcet, list); + + if (!kvm_enabled() || + (kvmppc_remove_spapr_tce(tcet->table, tcet->fd, + tcet->window_size) != 0)) { + g_free(tcet->table); + } + + g_free(tcet); + } +} + +static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, + target_ulong tce) +{ + sPAPRTCE *tcep; + + if (ioba >= tcet->window_size) { + hcall_dprintf("spapr_vio_put_tce on out-of-boards IOBA 0x" + TARGET_FMT_lx "\n", ioba); + return H_PARAMETER; + } + + tcep = tcet->table + (ioba >> SPAPR_TCE_PAGE_SHIFT); + tcep->tce = tce; + + return H_SUCCESS; +} + +static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong liobn = args[0]; + target_ulong ioba = args[1]; + target_ulong tce = args[2]; + sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); + + if (liobn & 0xFFFFFFFF00000000ULL) { + hcall_dprintf("spapr_vio_put_tce on out-of-boundsw LIOBN " + TARGET_FMT_lx "\n", liobn); + return H_PARAMETER; + } + + ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1); + + if (tcet) { + return put_tce_emu(tcet, ioba, tce); + } +#ifdef DEBUG_TCE + fprintf(stderr, "%s on liobn=" TARGET_FMT_lx /*%s*/ + " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx "\n", + __func__, liobn, /*dev->qdev.id, */ioba, tce); +#endif + + return H_PARAMETER; +} + +void spapr_iommu_init(void) +{ + QLIST_INIT(&spapr_tce_tables); + + /* hcall-tce */ + spapr_register_hypercall(H_PUT_TCE, h_put_tce); +} + +int spapr_dma_dt(void *fdt, int node_off, const char *propname, + DMAContext *dma) +{ + if (dma) { + sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma); + uint32_t dma_prop[] = {cpu_to_be32(tcet->liobn), + 0, 0, + 0, cpu_to_be32(tcet->window_size)}; + int ret; + + ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop(fdt, node_off, propname, dma_prop, + sizeof(dma_prop)); + if (ret < 0) { + return ret; + } + } + + return 0; +} diff --git a/hw/spapr_llan.c b/hw/spapr_llan.c index 8313043652..d26fe9fea3 100644 --- a/hw/spapr_llan.c +++ b/hw/spapr_llan.c @@ -71,7 +71,7 @@ typedef uint64_t vlan_bd_t; #define VLAN_RXQ_BD_OFF 0 #define VLAN_FILTER_BD_OFF 8 #define VLAN_RX_BDS_OFF 16 -#define VLAN_MAX_BUFS ((SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8) +#define VLAN_MAX_BUFS ((SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8) typedef struct VIOsPAPRVLANDevice { VIOsPAPRDevice sdev; @@ -95,7 +95,7 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf, { VIOsPAPRDevice *sdev = DO_UPCAST(NICState, nc, nc)->opaque; VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev; - vlan_bd_t rxq_bd = ldq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); + vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); vlan_bd_t bd; int buf_ptr = dev->use_buf_ptr; uint64_t handle; @@ -114,11 +114,11 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf, do { buf_ptr += 8; - if (buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) { + if (buf_ptr >= SPAPR_TCE_PAGE_SIZE) { buf_ptr = VLAN_RX_BDS_OFF; } - bd = ldq_tce(sdev, dev->buf_list + buf_ptr); + bd = vio_ldq(sdev, dev->buf_list + buf_ptr); dprintf("use_buf_ptr=%d bd=0x%016llx\n", buf_ptr, (unsigned long long)bd); } while ((!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8))) @@ -132,12 +132,12 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf, /* Remove the buffer from the pool */ dev->rx_bufs--; dev->use_buf_ptr = buf_ptr; - stq_tce(sdev, dev->buf_list + dev->use_buf_ptr, 0); + vio_stq(sdev, dev->buf_list + dev->use_buf_ptr, 0); dprintf("Found buffer: ptr=%d num=%d\n", dev->use_buf_ptr, dev->rx_bufs); /* Transfer the packet data */ - if (spapr_tce_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { + if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { return -1; } @@ -149,23 +149,23 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf, control ^= VLAN_RXQC_TOGGLE; } - handle = ldq_tce(sdev, VLAN_BD_ADDR(bd)); - stq_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); - stw_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); - sth_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); - stb_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); + handle = vio_ldq(sdev, VLAN_BD_ADDR(bd)); + vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); + vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); + vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); + vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); dprintf("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n", (unsigned long long)dev->rxq_ptr, - (unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) + + (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr), - (unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) + + (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8)); dev->rxq_ptr += 16; if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) { dev->rxq_ptr = 0; - stq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); + vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); } if (sdev->signal_state & 1) { @@ -254,8 +254,10 @@ static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd, return -1; } - if (spapr_vio_check_tces(&dev->sdev, VLAN_BD_ADDR(bd), - VLAN_BD_LEN(bd), SPAPR_TCE_RW) != 0) { + if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), + VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE) + || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), + VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) { return -1; } @@ -285,14 +287,14 @@ static target_ulong h_register_logical_lan(CPUPPCState *env, return H_RESOURCE; } - if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_VIO_TCE_PAGE_SIZE), - SPAPR_VIO_TCE_PAGE_SIZE) < 0) { + if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE), + SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list); return H_PARAMETER; } - filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_VIO_TCE_PAGE_SIZE); - if (check_bd(dev, filter_list_bd, SPAPR_VIO_TCE_PAGE_SIZE) < 0) { + filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE); + if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) { hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list); return H_PARAMETER; } @@ -309,17 +311,17 @@ static target_ulong h_register_logical_lan(CPUPPCState *env, rec_queue &= ~VLAN_BD_TOGGLE; /* Initialize the buffer list */ - stq_tce(sdev, buf_list, rec_queue); - stq_tce(sdev, buf_list + 8, filter_list_bd); - spapr_tce_dma_zero(sdev, buf_list + VLAN_RX_BDS_OFF, - SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); + vio_stq(sdev, buf_list, rec_queue); + vio_stq(sdev, buf_list + 8, filter_list_bd); + spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0, + SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8; dev->rx_bufs = 0; dev->rxq_ptr = 0; /* Initialize the receive queue */ - spapr_tce_dma_zero(sdev, VLAN_BD_ADDR(rec_queue), VLAN_BD_LEN(rec_queue)); + spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue)); dev->isopen = 1; return H_SUCCESS; @@ -378,14 +380,14 @@ static target_ulong h_add_logical_lan_buffer(CPUPPCState *env, do { dev->add_buf_ptr += 8; - if (dev->add_buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) { + if (dev->add_buf_ptr >= SPAPR_TCE_PAGE_SIZE) { dev->add_buf_ptr = VLAN_RX_BDS_OFF; } - bd = ldq_tce(sdev, dev->buf_list + dev->add_buf_ptr); + bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr); } while (bd & VLAN_BD_VALID); - stq_tce(sdev, dev->buf_list + dev->add_buf_ptr, buf); + vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf); dev->rx_bufs++; @@ -451,7 +453,7 @@ static target_ulong h_send_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr lbuf = alloca(total_len); p = lbuf; for (i = 0; i < nbufs; i++) { - ret = spapr_tce_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), + ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), p, VLAN_BD_LEN(bufs[i])); if (ret < 0) { return ret; @@ -479,7 +481,7 @@ static target_ulong h_multicast_ctrl(CPUPPCState *env, sPAPREnvironment *spapr, } static Property spapr_vlan_properties[] = { - DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev, 0x10000000), + DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev), DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf), DEFINE_PROP_END_OF_LIST(), }; @@ -497,6 +499,7 @@ static void spapr_vlan_class_init(ObjectClass *klass, void *data) k->dt_compatible = "IBM,l-lan"; k->signal_mask = 0x1; dc->props = spapr_vlan_properties; + k->rtce_window_size = 0x10000000; } static TypeInfo spapr_vlan_info = { diff --git a/hw/spapr_pci.c b/hw/spapr_pci.c index 25b400aa47..47ba5ff770 100644 --- a/hw/spapr_pci.c +++ b/hw/spapr_pci.c @@ -35,17 +35,18 @@ static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid, uint32_t config_addr) { - DeviceState *qdev; int devfn = (config_addr >> 8) & 0xFF; sPAPRPHBState *phb; QLIST_FOREACH(phb, &spapr->phbs, list) { + BusChild *kid; + if (phb->buid != buid) { continue; } - QTAILQ_FOREACH(qdev, &phb->host_state.bus->qbus.children, sibling) { - PCIDevice *dev = (PCIDevice *)qdev; + QTAILQ_FOREACH(kid, &phb->host_state.bus->qbus.children, sibling) { + PCIDevice *dev = (PCIDevice *)kid->child; if (dev->devfn == devfn) { return dev; } @@ -265,12 +266,21 @@ static const MemoryRegionOps spapr_io_ops = { /* * PHB PCI device */ +static DMAContext *spapr_pci_dma_context_fn(PCIBus *bus, void *opaque, + int devfn) +{ + sPAPRPHBState *phb = opaque; + + return phb->dma; +} + static int spapr_phb_init(SysBusDevice *s) { sPAPRPHBState *phb = FROM_SYSBUS(sPAPRPHBState, s); char *namebuf; int i; PCIBus *bus; + uint32_t liobn; phb->dtbusname = g_strdup_printf("pci@%" PRIx64, phb->buid); namebuf = alloca(strlen(phb->dtbusname) + 32); @@ -311,6 +321,10 @@ static int spapr_phb_init(SysBusDevice *s) PCI_DEVFN(0, 0), PCI_NUM_PINS); phb->host_state.bus = bus; + liobn = SPAPR_PCI_BASE_LIOBN | (pci_find_domain(bus) << 16); + phb->dma = spapr_tce_new_dma_context(liobn, 0x40000000); + pci_setup_iommu(bus, spapr_pci_dma_context_fn, phb); + QLIST_INSERT_HEAD(&spapr->phbs, phb, list); /* Initialize the LSI table */ @@ -471,6 +485,8 @@ int spapr_populate_pci_devices(sPAPRPHBState *phb, _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map, sizeof(interrupt_map))); + spapr_dma_dt(fdt, bus_off, "ibm,dma-window", phb->dma); + return 0; } diff --git a/hw/spapr_pci.h b/hw/spapr_pci.h index f54c2e8108..d9e46e22e3 100644 --- a/hw/spapr_pci.h +++ b/hw/spapr_pci.h @@ -38,6 +38,7 @@ typedef struct sPAPRPHBState { MemoryRegion memspace, iospace; target_phys_addr_t mem_win_addr, mem_win_size, io_win_addr, io_win_size; MemoryRegion memwindow, iowindow; + DMAContext *dma; struct { uint32_t dt_irq; diff --git a/hw/spapr_vio.c b/hw/spapr_vio.c index 315ab8091c..05b55032a9 100644 --- a/hw/spapr_vio.c +++ b/hw/spapr_vio.c @@ -39,7 +39,6 @@ #endif /* CONFIG_FDT */ /* #define DEBUG_SPAPR */ -/* #define DEBUG_TCE */ #ifdef DEBUG_SPAPR #define dprintf(fmt, ...) \ @@ -49,22 +48,24 @@ do { } while (0) #endif -static struct BusInfo spapr_vio_bus_info = { - .name = "spapr-vio", - .size = sizeof(VIOsPAPRBus), - .props = (Property[]) { - DEFINE_PROP_UINT32("irq", VIOsPAPRDevice, vio_irq_num, 0), \ - DEFINE_PROP_END_OF_LIST(), - }, +static Property spapr_vio_props[] = { + DEFINE_PROP_UINT32("irq", VIOsPAPRDevice, vio_irq_num, 0), \ + DEFINE_PROP_END_OF_LIST(), +}; + +static const TypeInfo spapr_vio_bus_info = { + .name = TYPE_SPAPR_VIO_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VIOsPAPRBus), }; VIOsPAPRDevice *spapr_vio_find_by_reg(VIOsPAPRBus *bus, uint32_t reg) { - DeviceState *qdev; + BusChild *kid; VIOsPAPRDevice *dev = NULL; - QTAILQ_FOREACH(qdev, &bus->bus.children, sibling) { - dev = (VIOsPAPRDevice *)qdev; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + dev = (VIOsPAPRDevice *)kid->child; if (dev->reg == reg) { return dev; } @@ -141,26 +142,9 @@ static int vio_make_devnode(VIOsPAPRDevice *dev, } } - if (dev->rtce_window_size) { - uint32_t dma_prop[] = {cpu_to_be32(dev->reg), - 0, 0, - 0, cpu_to_be32(dev->rtce_window_size)}; - - ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); - if (ret < 0) { - return ret; - } - - ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); - if (ret < 0) { - return ret; - } - - ret = fdt_setprop(fdt, node_off, "ibm,my-dma-window", dma_prop, - sizeof(dma_prop)); - if (ret < 0) { - return ret; - } + ret = spapr_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->dma); + if (ret < 0) { + return ret; } if (pc->devnode) { @@ -175,232 +159,6 @@ static int vio_make_devnode(VIOsPAPRDevice *dev, #endif /* CONFIG_FDT */ /* - * RTCE handling - */ - -static void rtce_init(VIOsPAPRDevice *dev) -{ - size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT) - * sizeof(VIOsPAPR_RTCE); - - if (size) { - dev->rtce_table = kvmppc_create_spapr_tce(dev->reg, - dev->rtce_window_size, - &dev->kvmtce_fd); - - if (!dev->rtce_table) { - dev->rtce_table = g_malloc0(size); - } - } -} - -static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr, - target_ulong opcode, target_ulong *args) -{ - target_ulong liobn = args[0]; - target_ulong ioba = args[1]; - target_ulong tce = args[2]; - VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, liobn); - VIOsPAPR_RTCE *rtce; - - if (!dev) { - hcall_dprintf("LIOBN 0x" TARGET_FMT_lx " does not exist\n", liobn); - return H_PARAMETER; - } - - ioba &= ~(SPAPR_VIO_TCE_PAGE_SIZE - 1); - -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_vio_put_tce on %s ioba 0x" TARGET_FMT_lx - " TCE 0x" TARGET_FMT_lx "\n", dev->qdev.id, ioba, tce); -#endif - - if (ioba >= dev->rtce_window_size) { - hcall_dprintf("Out-of-bounds IOBA 0x" TARGET_FMT_lx "\n", ioba); - return H_PARAMETER; - } - - rtce = dev->rtce_table + (ioba >> SPAPR_VIO_TCE_PAGE_SHIFT); - rtce->tce = tce; - - return H_SUCCESS; -} - -int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba, - target_ulong len, enum VIOsPAPR_TCEAccess access) -{ - int start, end, i; - - start = ioba >> SPAPR_VIO_TCE_PAGE_SHIFT; - end = (ioba + len - 1) >> SPAPR_VIO_TCE_PAGE_SHIFT; - - for (i = start; i <= end; i++) { - if ((dev->rtce_table[i].tce & access) != access) { -#ifdef DEBUG_TCE - fprintf(stderr, "FAIL on %d\n", i); -#endif - return -1; - } - } - - return 0; -} - -int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, const void *buf, - uint32_t size) -{ -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n", - (unsigned long long)taddr, size); -#endif - - /* Check for bypass */ - if (dev->flags & VIO_PAPR_FLAG_DMA_BYPASS) { - cpu_physical_memory_write(taddr, buf, size); - return 0; - } - - while (size) { - uint64_t tce; - uint32_t lsize; - uint64_t txaddr; - - /* Check if we are in bound */ - if (taddr >= dev->rtce_window_size) { -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_tce_dma_write out of bounds\n"); -#endif - return H_DEST_PARM; - } - tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce; - - /* How much til end of page ? */ - lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1); - - /* Check TCE */ - if (!(tce & 2)) { - return H_DEST_PARM; - } - - /* Translate */ - txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) | - (taddr & SPAPR_VIO_TCE_PAGE_MASK); - -#ifdef DEBUG_TCE - fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n", - (unsigned long long)txaddr, lsize); -#endif - - /* Do it */ - cpu_physical_memory_write(txaddr, buf, lsize); - buf += lsize; - taddr += lsize; - size -= lsize; - } - return 0; -} - -int spapr_tce_dma_zero(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t size) -{ - /* FIXME: allocating a temp buffer is nasty, but just stepping - * through writing zeroes is awkward. This will do for now. */ - uint8_t zeroes[size]; - -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_tce_dma_zero taddr=0x%llx size=0x%x\n", - (unsigned long long)taddr, size); -#endif - - memset(zeroes, 0, size); - return spapr_tce_dma_write(dev, taddr, zeroes, size); -} - -void stb_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint8_t val) -{ - spapr_tce_dma_write(dev, taddr, &val, sizeof(val)); -} - -void sth_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint16_t val) -{ - val = tswap16(val); - spapr_tce_dma_write(dev, taddr, &val, sizeof(val)); -} - - -void stw_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t val) -{ - val = tswap32(val); - spapr_tce_dma_write(dev, taddr, &val, sizeof(val)); -} - -void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val) -{ - val = tswap64(val); - spapr_tce_dma_write(dev, taddr, &val, sizeof(val)); -} - -int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, void *buf, - uint32_t size) -{ -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n", - (unsigned long long)taddr, size); -#endif - - /* Check for bypass */ - if (dev->flags & VIO_PAPR_FLAG_DMA_BYPASS) { - cpu_physical_memory_read(taddr, buf, size); - return 0; - } - - while (size) { - uint64_t tce; - uint32_t lsize; - uint64_t txaddr; - - /* Check if we are in bound */ - if (taddr >= dev->rtce_window_size) { -#ifdef DEBUG_TCE - fprintf(stderr, "spapr_tce_dma_read out of bounds\n"); -#endif - return H_DEST_PARM; - } - tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce; - - /* How much til end of page ? */ - lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1); - - /* Check TCE */ - if (!(tce & 1)) { - return H_DEST_PARM; - } - - /* Translate */ - txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) | - (taddr & SPAPR_VIO_TCE_PAGE_MASK); - -#ifdef DEBUG_TCE - fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n", - (unsigned long long)txaddr, lsize); -#endif - /* Do it */ - cpu_physical_memory_read(txaddr, buf, lsize); - buf += lsize; - taddr += lsize; - size -= lsize; - } - return H_SUCCESS; -} - -uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr) -{ - uint64_t val; - - spapr_tce_dma_read(dev, taddr, &val, sizeof(val)); - return tswap64(val); -} - -/* * CRQ handling */ static target_ulong h_reg_crq(CPUPPCState *env, sPAPREnvironment *spapr, @@ -524,7 +282,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) } /* Maybe do a fast path for KVM just writing to the pages */ - rc = spapr_tce_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); + rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); if (rc) { return rc; } @@ -532,7 +290,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) return 1; } - rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, + rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, &crq[8], 8); if (rc) { return rc; @@ -540,7 +298,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) kvmppc_eieio(); - rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); + rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); if (rc) { return rc; } @@ -558,13 +316,13 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev) { - dev->flags &= ~VIO_PAPR_FLAG_DMA_BYPASS; + VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg; - if (dev->rtce_table) { - size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT) - * sizeof(VIOsPAPR_RTCE); - memset(dev->rtce_table, 0, size); + if (dev->dma) { + spapr_tce_free(dev->dma); } + dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size); dev->crq.qladdr = 0; dev->crq.qsize = 0; @@ -591,9 +349,13 @@ static void rtas_set_tce_bypass(sPAPREnvironment *spapr, uint32_t token, return; } if (enable) { - dev->flags |= VIO_PAPR_FLAG_DMA_BYPASS; + spapr_tce_free(dev->dma); + dev->dma = NULL; } else { - dev->flags &= ~VIO_PAPR_FLAG_DMA_BYPASS; + VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg; + + dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size); } rtas_st(rets, 0, 0); @@ -604,7 +366,7 @@ static void rtas_quiesce(sPAPREnvironment *spapr, uint32_t token, uint32_t nret, target_ulong rets) { VIOsPAPRBus *bus = spapr->vio_bus; - DeviceState *qdev; + BusChild *kid; VIOsPAPRDevice *dev = NULL; if (nargs != 0) { @@ -612,8 +374,8 @@ static void rtas_quiesce(sPAPREnvironment *spapr, uint32_t token, return; } - QTAILQ_FOREACH(qdev, &bus->bus.children, sibling) { - dev = (VIOsPAPRDevice *)qdev; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + dev = (VIOsPAPRDevice *)kid->child; spapr_vio_quiesce_one(dev); } @@ -623,7 +385,7 @@ static void rtas_quiesce(sPAPREnvironment *spapr, uint32_t token, static VIOsPAPRDevice *reg_conflict(VIOsPAPRDevice *dev) { VIOsPAPRBus *bus = DO_UPCAST(VIOsPAPRBus, bus, dev->qdev.parent_bus); - DeviceState *qdev; + BusChild *kid; VIOsPAPRDevice *other; /* @@ -631,8 +393,8 @@ static VIOsPAPRDevice *reg_conflict(VIOsPAPRDevice *dev) * using the requested address. We have to open code this because * the given dev might already be in the list. */ - QTAILQ_FOREACH(qdev, &bus->bus.children, sibling) { - other = DO_UPCAST(VIOsPAPRDevice, qdev, qdev); + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + other = DO_UPCAST(VIOsPAPRDevice, qdev, kid->child); if (other != dev && other->reg == dev->reg) { return other; @@ -660,6 +422,7 @@ static int spapr_vio_busdev_init(DeviceState *qdev) { VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev; VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + uint32_t liobn; char *id; if (dev->reg != -1) { @@ -701,7 +464,8 @@ static int spapr_vio_busdev_init(DeviceState *qdev) return -1; } - rtce_init(dev); + liobn = SPAPR_VIO_BASE_LIOBN | dev->reg; + dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size); return pc->init(dev); } @@ -742,16 +506,13 @@ VIOsPAPRBus *spapr_vio_bus_init(void) /* Create bus on bridge device */ - qbus = qbus_create(&spapr_vio_bus_info, dev, "spapr-vio"); + qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); bus = DO_UPCAST(VIOsPAPRBus, bus, qbus); bus->next_reg = 0x1000; /* hcall-vio */ spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal); - /* hcall-tce */ - spapr_register_hypercall(H_PUT_TCE, h_put_tce); - /* hcall-crq */ spapr_register_hypercall(H_REG_CRQ, h_reg_crq); spapr_register_hypercall(H_FREE_CRQ, h_free_crq); @@ -794,7 +555,8 @@ static void vio_spapr_device_class_init(ObjectClass *klass, void *data) DeviceClass *k = DEVICE_CLASS(klass); k->init = spapr_vio_busdev_init; k->reset = spapr_vio_busdev_reset; - k->bus_info = &spapr_vio_bus_info; + k->bus_type = TYPE_SPAPR_VIO_BUS; + k->props = spapr_vio_props; } static TypeInfo spapr_vio_type_info = { @@ -808,6 +570,7 @@ static TypeInfo spapr_vio_type_info = { static void spapr_vio_register_types(void) { + type_register_static(&spapr_vio_bus_info); type_register_static(&spapr_vio_bridge_info); type_register_static(&spapr_vio_type_info); } @@ -836,19 +599,20 @@ static int compare_reg(const void *p1, const void *p2) int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt) { DeviceState *qdev, **qdevs; + BusChild *kid; int i, num, ret = 0; /* Count qdevs on the bus list */ num = 0; - QTAILQ_FOREACH(qdev, &bus->bus.children, sibling) { + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { num++; } /* Copy out into an array of pointers */ qdevs = g_malloc(sizeof(qdev) * num); num = 0; - QTAILQ_FOREACH(qdev, &bus->bus.children, sibling) { - qdevs[num++] = qdev; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + qdevs[num++] = kid->child; } /* Sort the array */ diff --git a/hw/spapr_vio.h b/hw/spapr_vio.h index 87816e456d..6f9a498ccd 100644 --- a/hw/spapr_vio.h +++ b/hw/spapr_vio.h @@ -21,16 +21,7 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ -#define SPAPR_VIO_TCE_PAGE_SHIFT 12 -#define SPAPR_VIO_TCE_PAGE_SIZE (1ULL << SPAPR_VIO_TCE_PAGE_SHIFT) -#define SPAPR_VIO_TCE_PAGE_MASK (SPAPR_VIO_TCE_PAGE_SIZE - 1) - -enum VIOsPAPR_TCEAccess { - SPAPR_TCE_FAULT = 0, - SPAPR_TCE_RO = 1, - SPAPR_TCE_WO = 2, - SPAPR_TCE_RW = 3, -}; +#include "dma.h" #define TYPE_VIO_SPAPR_DEVICE "vio-spapr-device" #define VIO_SPAPR_DEVICE(obj) \ @@ -40,11 +31,10 @@ enum VIOsPAPR_TCEAccess { #define VIO_SPAPR_DEVICE_GET_CLASS(obj) \ OBJECT_GET_CLASS(VIOsPAPRDeviceClass, (obj), TYPE_VIO_SPAPR_DEVICE) -struct VIOsPAPRDevice; +#define TYPE_SPAPR_VIO_BUS "spapr-vio-bus" +#define SPAPR_VIO_BUS(obj) OBJECT_CHECK(VIOsPAPRBus, (obj), TYPE_SPAPR_VIO_BUS) -typedef struct VIOsPAPR_RTCE { - uint64_t tce; -} VIOsPAPR_RTCE; +struct VIOsPAPRDevice; typedef struct VIOsPAPR_CRQ { uint64_t qladdr; @@ -61,6 +51,7 @@ typedef struct VIOsPAPRDeviceClass { const char *dt_name, *dt_type, *dt_compatible; target_ulong signal_mask; + uint32_t rtce_window_size; int (*init)(VIOsPAPRDevice *dev); void (*reset)(VIOsPAPRDevice *dev); int (*devnode)(VIOsPAPRDevice *dev, void *fdt, int node_off); @@ -70,20 +61,15 @@ struct VIOsPAPRDevice { DeviceState qdev; uint32_t reg; uint32_t flags; -#define VIO_PAPR_FLAG_DMA_BYPASS 0x1 qemu_irq qirq; uint32_t vio_irq_num; target_ulong signal_state; - uint32_t rtce_window_size; - VIOsPAPR_RTCE *rtce_table; - int kvmtce_fd; VIOsPAPR_CRQ crq; + DMAContext *dma; }; -#define DEFINE_SPAPR_PROPERTIES(type, field, default_dma_window) \ - DEFINE_PROP_UINT32("reg", type, field.reg, -1), \ - DEFINE_PROP_UINT32("dma-window", type, field.rtce_window_size, \ - default_dma_window) +#define DEFINE_SPAPR_PROPERTIES(type, field) \ + DEFINE_PROP_UINT32("reg", type, field.reg, -1) struct VIOsPAPRBus { BusState bus; @@ -99,20 +85,38 @@ extern int spapr_populate_chosen_stdout(void *fdt, VIOsPAPRBus *bus); extern int spapr_vio_signal(VIOsPAPRDevice *dev, target_ulong mode); -int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba, - target_ulong len, - enum VIOsPAPR_TCEAccess access); - -int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, - void *buf, uint32_t size); -int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, - const void *buf, uint32_t size); -int spapr_tce_dma_zero(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t size); -void stb_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint8_t val); -void sth_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint16_t val); -void stw_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t val); -void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val); -uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr); +static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr, + uint32_t size, DMADirection dir) +{ + return dma_memory_valid(dev->dma, taddr, size, dir); +} + +static inline int spapr_vio_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, + void *buf, uint32_t size) +{ + return (dma_memory_read(dev->dma, taddr, buf, size) != 0) ? + H_DEST_PARM : H_SUCCESS; +} + +static inline int spapr_vio_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, + const void *buf, uint32_t size) +{ + return (dma_memory_write(dev->dma, taddr, buf, size) != 0) ? + H_DEST_PARM : H_SUCCESS; +} + +static inline int spapr_vio_dma_set(VIOsPAPRDevice *dev, uint64_t taddr, + uint8_t c, uint32_t size) +{ + return (dma_memory_set(dev->dma, taddr, c, size) != 0) ? + H_DEST_PARM : H_SUCCESS; +} + +#define vio_stb(_dev, _addr, _val) (stb_dma((_dev)->dma, (_addr), (_val))) +#define vio_sth(_dev, _addr, _val) (stw_be_dma((_dev)->dma, (_addr), (_val))) +#define vio_stl(_dev, _addr, _val) (stl_be_dma((_dev)->dma, (_addr), (_val))) +#define vio_stq(_dev, _addr, _val) (stq_be_dma((_dev)->dma, (_addr), (_val))) +#define vio_ldq(_dev, _addr) (ldq_be_dma((_dev)->dma, (_addr))) int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq); diff --git a/hw/spapr_vscsi.c b/hw/spapr_vscsi.c index 037867ab4f..3cf5844e0f 100644 --- a/hw/spapr_vscsi.c +++ b/hw/spapr_vscsi.c @@ -165,7 +165,7 @@ static int vscsi_send_iu(VSCSIState *s, vscsi_req *req, long rc, rc1; /* First copy the SRP */ - rc = spapr_tce_dma_write(&s->vdev, req->crq.s.IU_data_ptr, + rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr, &req->iu, length); if (rc) { fprintf(stderr, "vscsi_send_iu: DMA write failure !\n"); @@ -281,9 +281,9 @@ static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req, llen = MIN(len, md->len); if (llen) { if (req->writing) { /* writing = to device = reading from memory */ - rc = spapr_tce_dma_read(&s->vdev, md->va, buf, llen); + rc = spapr_vio_dma_read(&s->vdev, md->va, buf, llen); } else { - rc = spapr_tce_dma_write(&s->vdev, md->va, buf, llen); + rc = spapr_vio_dma_write(&s->vdev, md->va, buf, llen); } } md->len -= llen; @@ -329,10 +329,11 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req, md = req->cur_desc = &req->ext_desc; dprintf("VSCSI: Reading desc from 0x%llx\n", (unsigned long long)td->va); - rc = spapr_tce_dma_read(&s->vdev, td->va, md, + rc = spapr_vio_dma_read(&s->vdev, td->va, md, sizeof(struct srp_direct_buf)); if (rc) { - dprintf("VSCSI: tce_dma_read -> %d reading ext_desc\n", rc); + dprintf("VSCSI: spapr_vio_dma_read -> %d reading ext_desc\n", + rc); break; } vscsi_swap_desc(md); @@ -345,12 +346,12 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req, /* Perform transfer */ llen = MIN(len, md->len); if (req->writing) { /* writing = to device = reading from memory */ - rc = spapr_tce_dma_read(&s->vdev, md->va, buf, llen); + rc = spapr_vio_dma_read(&s->vdev, md->va, buf, llen); } else { - rc = spapr_tce_dma_write(&s->vdev, md->va, buf, llen); + rc = spapr_vio_dma_write(&s->vdev, md->va, buf, llen); } if (rc) { - dprintf("VSCSI: tce_dma_r/w(%d) -> %d\n", req->writing, rc); + dprintf("VSCSI: spapr_vio_dma_r/w(%d) -> %d\n", req->writing, rc); break; } dprintf("VSCSI: data: %02x %02x %02x %02x...\n", @@ -728,7 +729,7 @@ static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req) sinfo = &req->iu.mad.adapter_info; #if 0 /* What for ? */ - rc = spapr_tce_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer), + rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer), &info, be16_to_cpu(sinfo->common.length)); if (rc) { fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n"); @@ -742,7 +743,7 @@ static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req) info.os_type = cpu_to_be32(2); info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9); - rc = spapr_tce_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer), + rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer), &info, be16_to_cpu(sinfo->common.length)); if (rc) { fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n"); @@ -800,14 +801,16 @@ static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq) if (crq->s.IU_length > sizeof(union viosrp_iu)) { fprintf(stderr, "VSCSI: SRP IU too long (%d bytes) !\n", crq->s.IU_length); + vscsi_put_req(req); return; } /* XXX Handle failure differently ? */ - if (spapr_tce_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu, + if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu, crq->s.IU_length)) { fprintf(stderr, "vscsi_got_payload: DMA read failure !\n"); - g_free(req); + vscsi_put_req(req); + return; } memcpy(&req->crq, crq, sizeof(vscsi_crq)); @@ -945,7 +948,7 @@ static int spapr_vscsi_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off) } static Property spapr_vscsi_properties[] = { - DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev, 0x10000000), + DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev), DEFINE_PROP_END_OF_LIST(), }; @@ -962,6 +965,7 @@ static void spapr_vscsi_class_init(ObjectClass *klass, void *data) k->dt_compatible = "IBM,v-scsi"; k->signal_mask = 0x00000001; dc->props = spapr_vscsi_properties; + k->rtce_window_size = 0x10000000; } static TypeInfo spapr_vscsi_info = { diff --git a/hw/spapr_vty.c b/hw/spapr_vty.c index c9674f36a6..99e52cc6b7 100644 --- a/hw/spapr_vty.c +++ b/hw/spapr_vty.c @@ -133,7 +133,7 @@ void spapr_vty_create(VIOsPAPRBus *bus, CharDriverState *chardev) } static Property spapr_vty_properties[] = { - DEFINE_SPAPR_PROPERTIES(VIOsPAPRVTYDevice, sdev, 0), + DEFINE_SPAPR_PROPERTIES(VIOsPAPRVTYDevice, sdev), DEFINE_PROP_CHR("chardev", VIOsPAPRVTYDevice, chardev), DEFINE_PROP_END_OF_LIST(), }; @@ -160,7 +160,7 @@ static TypeInfo spapr_vty_info = { VIOsPAPRDevice *spapr_vty_get_default(VIOsPAPRBus *bus) { VIOsPAPRDevice *sdev, *selected; - DeviceState *iter; + BusChild *kid; /* * To avoid the console bouncing around we want one VTY to be @@ -169,7 +169,9 @@ VIOsPAPRDevice *spapr_vty_get_default(VIOsPAPRBus *bus) */ selected = NULL; - QTAILQ_FOREACH(iter, &bus->bus.children, sibling) { + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + DeviceState *iter = kid->child; + /* Only look at VTY devices */ if (!object_dynamic_cast(OBJECT(iter), "spapr-vty")) { continue; diff --git a/hw/sparc/Makefile.objs b/hw/sparc/Makefile.objs new file mode 100644 index 0000000000..a39a511c52 --- /dev/null +++ b/hw/sparc/Makefile.objs @@ -0,0 +1,8 @@ +obj-y = sun4m.o lance.o tcx.o sun4m_iommu.o slavio_intctl.o +obj-y += slavio_timer.o slavio_misc.o sparc32_dma.o +obj-y += cs4231.o eccmemctl.o sbi.o sun4c_intctl.o leon3.o + +# GRLIB +obj-y += grlib_gptimer.o grlib_irqmp.o grlib_apbuart.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/sparc64/Makefile.objs b/hw/sparc64/Makefile.objs new file mode 100644 index 0000000000..8c65fc4215 --- /dev/null +++ b/hw/sparc64/Makefile.objs @@ -0,0 +1,4 @@ +obj-y = sun4u.o apb_pci.o +obj-y += mc146818rtc.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/spitz.c b/hw/spitz.c index 9042d44596..20e7835191 100644 --- a/hw/spitz.c +++ b/hw/spitz.c @@ -884,7 +884,7 @@ static void spitz_common_init(ram_addr_t ram_size, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum spitz_model_e model, int arm_id) { - PXA2xxState *cpu; + PXA2xxState *mpu; DeviceState *scp0, *scp1 = NULL; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *rom = g_new(MemoryRegion, 1); @@ -893,9 +893,9 @@ static void spitz_common_init(ram_addr_t ram_size, cpu_model = (model == terrier) ? "pxa270-c5" : "pxa270-c0"; /* Setup CPU & memory */ - cpu = pxa270_init(address_space_mem, spitz_binfo.ram_size, cpu_model); + mpu = pxa270_init(address_space_mem, spitz_binfo.ram_size, cpu_model); - sl_flash_register(cpu, (model == spitz) ? FLASH_128M : FLASH_1024M); + sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M); memory_region_init_ram(rom, "spitz.rom", SPITZ_ROM); vmstate_register_ram_global(rom); @@ -903,36 +903,36 @@ static void spitz_common_init(ram_addr_t ram_size, memory_region_add_subregion(address_space_mem, 0, rom); /* Setup peripherals */ - spitz_keyboard_register(cpu); + spitz_keyboard_register(mpu); - spitz_ssp_attach(cpu); + spitz_ssp_attach(mpu); scp0 = sysbus_create_simple("scoop", 0x10800000, NULL); if (model != akita) { scp1 = sysbus_create_simple("scoop", 0x08800040, NULL); } - spitz_scoop_gpio_setup(cpu, scp0, scp1); + spitz_scoop_gpio_setup(mpu, scp0, scp1); - spitz_gpio_setup(cpu, (model == akita) ? 1 : 2); + spitz_gpio_setup(mpu, (model == akita) ? 1 : 2); - spitz_i2c_setup(cpu); + spitz_i2c_setup(mpu); if (model == akita) - spitz_akita_i2c_setup(cpu); + spitz_akita_i2c_setup(mpu); if (model == terrier) /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */ - spitz_microdrive_attach(cpu, 1); + spitz_microdrive_attach(mpu, 1); else if (model != akita) /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */ - spitz_microdrive_attach(cpu, 0); + spitz_microdrive_attach(mpu, 0); spitz_binfo.kernel_filename = kernel_filename; spitz_binfo.kernel_cmdline = kernel_cmdline; spitz_binfo.initrd_filename = initrd_filename; spitz_binfo.board_id = arm_id; - arm_load_kernel(&cpu->cpu->env, &spitz_binfo); + arm_load_kernel(mpu->cpu, &spitz_binfo); sl_bootparam_write(SL_PXA_PARAM_BASE); } @@ -16,9 +16,13 @@ struct SSIBus { BusState qbus; }; -static struct BusInfo ssi_bus_info = { - .name = "SSI", - .size = sizeof(SSIBus), +#define TYPE_SSI_BUS "SSI" +#define SSI_BUS(obj) OBJECT_CHECK(SSIBus, (obj), TYPE_SSI_BUS) + +static const TypeInfo ssi_bus_info = { + .name = TYPE_SSI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SSIBus), }; static int ssi_slave_init(DeviceState *dev) @@ -26,10 +30,11 @@ static int ssi_slave_init(DeviceState *dev) SSISlave *s = SSI_SLAVE(dev); SSISlaveClass *ssc = SSI_SLAVE_GET_CLASS(s); SSIBus *bus; + BusChild *kid; bus = FROM_QBUS(SSIBus, qdev_get_parent_bus(dev)); - if (QTAILQ_FIRST(&bus->qbus.children) != dev - || QTAILQ_NEXT(dev, sibling) != NULL) { + kid = QTAILQ_FIRST(&bus->qbus.children); + if (kid->child != dev || QTAILQ_NEXT(kid, sibling) != NULL) { hw_error("Too many devices on SSI bus"); } @@ -40,7 +45,7 @@ static void ssi_slave_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->init = ssi_slave_init; - dc->bus_info = &ssi_bus_info; + dc->bus_type = TYPE_SSI_BUS; } static TypeInfo ssi_slave_info = { @@ -62,26 +67,28 @@ DeviceState *ssi_create_slave(SSIBus *bus, const char *name) SSIBus *ssi_create_bus(DeviceState *parent, const char *name) { BusState *bus; - bus = qbus_create(&ssi_bus_info, parent, name); + bus = qbus_create(TYPE_SSI_BUS, parent, name); return FROM_QBUS(SSIBus, bus); } uint32_t ssi_transfer(SSIBus *bus, uint32_t val) { - DeviceState *dev; + BusChild *kid; SSISlave *slave; SSISlaveClass *ssc; - dev = QTAILQ_FIRST(&bus->qbus.children); - if (!dev) { + + kid = QTAILQ_FIRST(&bus->qbus.children); + if (!kid) { return 0; } - slave = SSI_SLAVE(dev); + slave = SSI_SLAVE(kid->child); ssc = SSI_SLAVE_GET_CLASS(slave); return ssc->transfer(slave, val); } static void ssi_slave_register_types(void) { + type_register_static(&ssi_bus_info); type_register_static(&ssi_slave_info); } diff --git a/hw/strongarm.c b/hw/strongarm.c index 1b15f399f1..7150eeb2db 100644 --- a/hw/strongarm.c +++ b/hw/strongarm.c @@ -1563,9 +1563,9 @@ StrongARMState *sa1110_init(MemoryRegion *sysmem, exit(1); } - s->env = cpu_init(rev); + s->cpu = cpu_arm_init(rev); - if (!s->env) { + if (!s->cpu) { error_report("Unable to find CPU definition"); exit(1); } @@ -1574,7 +1574,7 @@ StrongARMState *sa1110_init(MemoryRegion *sysmem, vmstate_register_ram_global(&s->sdram); memory_region_add_subregion(sysmem, SA_SDCS0, &s->sdram); - pic = arm_pic_init_cpu(s->env); + pic = arm_pic_init_cpu(s->cpu); s->pic = sysbus_create_varargs("strongarm_pic", 0x90050000, pic[ARM_PIC_CPU_IRQ], pic[ARM_PIC_CPU_FIQ], NULL); diff --git a/hw/strongarm.h b/hw/strongarm.h index 02acac3db1..d30dd6ac5e 100644 --- a/hw/strongarm.h +++ b/hw/strongarm.h @@ -53,7 +53,7 @@ enum { }; typedef struct { - CPUARMState *env; + ARMCPU *cpu; MemoryRegion sdram; DeviceState *pic; DeviceState *gpio; diff --git a/hw/sysbus.c b/hw/sysbus.c index db4efccef4..9d8b1eaf7d 100644 --- a/hw/sysbus.c +++ b/hw/sysbus.c @@ -24,11 +24,19 @@ static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent); static char *sysbus_get_fw_dev_path(DeviceState *dev); -struct BusInfo system_bus_info = { - .name = "System", - .size = sizeof(BusState), - .print_dev = sysbus_dev_print, - .get_fw_dev_path = sysbus_get_fw_dev_path, +static void system_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = sysbus_dev_print; + k->get_fw_dev_path = sysbus_get_fw_dev_path; +} + +static const TypeInfo system_bus_info = { + .name = TYPE_SYSTEM_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(BusState), + .class_init = system_bus_class_init, }; void sysbus_connect_irq(SysBusDevice *dev, int n, qemu_irq irq) @@ -244,7 +252,7 @@ static void sysbus_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = sysbus_device_init; - k->bus_info = &system_bus_info; + k->bus_type = TYPE_SYSTEM_BUS; } static TypeInfo sysbus_device_type_info = { @@ -256,8 +264,33 @@ static TypeInfo sysbus_device_type_info = { .class_init = sysbus_device_class_init, }; +/* This is a nasty hack to allow passing a NULL bus to qdev_create. */ +static BusState *main_system_bus; + +static void main_system_bus_create(void) +{ + /* assign main_system_bus before qbus_create_inplace() + * in order to make "if (bus != sysbus_get_default())" work */ + main_system_bus = g_malloc0(system_bus_info.instance_size); + qbus_create_inplace(main_system_bus, TYPE_SYSTEM_BUS, NULL, + "main-system-bus"); + main_system_bus->glib_allocated = true; + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + "sysbus", OBJECT(main_system_bus), NULL); +} + +BusState *sysbus_get_default(void) +{ + if (!main_system_bus) { + main_system_bus_create(); + } + return main_system_bus; +} + static void sysbus_register_types(void) { + type_register_static(&system_bus_info); type_register_static(&sysbus_device_type_info); } diff --git a/hw/sysbus.h b/hw/sysbus.h index 22555cd443..acfbcfba52 100644 --- a/hw/sysbus.h +++ b/hw/sysbus.h @@ -10,6 +10,9 @@ #define QDEV_MAX_PIO 32 #define QDEV_MAX_IRQ 512 +#define TYPE_SYSTEM_BUS "System" +#define SYSTEM_BUS(obj) OBJECT_CHECK(IDEBus, (obj), TYPE_IDE_BUS) + typedef struct SysBusDevice SysBusDevice; #define TYPE_SYS_BUS_DEVICE "sys-bus-device" @@ -212,14 +212,14 @@ static void tosa_init(ram_addr_t ram_size, { MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *rom = g_new(MemoryRegion, 1); - PXA2xxState *cpu; + PXA2xxState *mpu; TC6393xbState *tmio; DeviceState *scp0, *scp1; if (!cpu_model) cpu_model = "pxa255"; - cpu = pxa255_init(address_space_mem, tosa_binfo.ram_size); + mpu = pxa255_init(address_space_mem, tosa_binfo.ram_size); memory_region_init_ram(rom, "tosa.rom", TOSA_ROM); vmstate_register_ram_global(rom); @@ -227,22 +227,22 @@ static void tosa_init(ram_addr_t ram_size, memory_region_add_subregion(address_space_mem, 0, rom); tmio = tc6393xb_init(address_space_mem, 0x10000000, - qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_TC6393XB_INT)); + qdev_get_gpio_in(mpu->gpio, TOSA_GPIO_TC6393XB_INT)); scp0 = sysbus_create_simple("scoop", 0x08800000, NULL); scp1 = sysbus_create_simple("scoop", 0x14800040, NULL); - tosa_gpio_setup(cpu, scp0, scp1, tmio); + tosa_gpio_setup(mpu, scp0, scp1, tmio); - tosa_microdrive_attach(cpu); + tosa_microdrive_attach(mpu); - tosa_tg_init(cpu); + tosa_tg_init(mpu); tosa_binfo.kernel_filename = kernel_filename; tosa_binfo.kernel_cmdline = kernel_cmdline; tosa_binfo.initrd_filename = initrd_filename; tosa_binfo.board_id = 0x208; - arm_load_kernel(&cpu->cpu->env, &tosa_binfo); + arm_load_kernel(mpu->cpu, &tosa_binfo); sl_bootparam_write(SL_PXA_PARAM_BASE); } @@ -145,6 +145,8 @@ #define USB_ENDPOINT_XFER_INT 3 #define USB_ENDPOINT_XFER_INVALID 255 +#define USB_INTERFACE_INVALID 255 + typedef struct USBBus USBBus; typedef struct USBBusOps USBBusOps; typedef struct USBPort USBPort; @@ -345,7 +347,7 @@ void usb_packet_check_state(USBPacket *p, USBPacketState expected); void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep); void usb_packet_addbuf(USBPacket *p, void *ptr, size_t len); int usb_packet_map(USBPacket *p, QEMUSGList *sgl); -void usb_packet_unmap(USBPacket *p); +void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl); void usb_packet_copy(USBPacket *p, void *ptr, size_t bytes); void usb_packet_skip(USBPacket *p, size_t bytes); void usb_packet_cleanup(USBPacket *p); @@ -363,6 +365,7 @@ void usb_packet_complete(USBDevice *dev, USBPacket *p); void usb_cancel_packet(USBPacket * p); void usb_ep_init(USBDevice *dev); +void usb_ep_reset(USBDevice *dev); void usb_ep_dump(USBDevice *dev); struct USBEndpoint *usb_ep_get(USBDevice *dev, int pid, int ep); uint8_t usb_ep_get_type(USBDevice *dev, int pid, int ep); @@ -421,6 +424,9 @@ void musb_set_size(MUSBState *s, int epnum, int size, int is_tx); /* usb-bus.c */ +#define TYPE_USB_BUS "usb-bus" +#define USB_BUS(obj) OBJECT_CHECK(USBBus, (obj), TYPE_USB_BUS) + struct USBBus { BusState qbus; USBBusOps *ops; diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs new file mode 100644 index 0000000000..9c7ddf5cb2 --- /dev/null +++ b/hw/usb/Makefile.objs @@ -0,0 +1,13 @@ +hw-obj-$(CONFIG_USB_UHCI) += hcd-uhci.o +hw-obj-$(CONFIG_USB_OHCI) += hcd-ohci.o +hw-obj-$(CONFIG_USB_EHCI) += hcd-ehci.o +hw-obj-$(CONFIG_USB_XHCI) += hcd-xhci.o +hw-obj-y += libhw.o + +hw-obj-$(CONFIG_SMARTCARD) += dev-smartcard-reader.o +hw-obj-$(CONFIG_USB_REDIR) += redirect.o + +common-obj-y += core.o bus.o desc.o dev-hub.o +common-obj-y += host-$(HOST_USB).o dev-bluetooth.o +common-obj-y += dev-hid.o dev-storage.o dev-wacom.o +common-obj-y += dev-serial.o dev-network.o dev-audio.o diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 2068640a58..b649360dd3 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -11,26 +11,49 @@ static char *usb_get_dev_path(DeviceState *dev); static char *usb_get_fw_dev_path(DeviceState *qdev); static int usb_qdev_exit(DeviceState *qdev); -static struct BusInfo usb_bus_info = { - .name = "USB", - .size = sizeof(USBBus), - .print_dev = usb_bus_dev_print, - .get_dev_path = usb_get_dev_path, - .get_fw_dev_path = usb_get_fw_dev_path, - .props = (Property[]) { - DEFINE_PROP_STRING("port", USBDevice, port_path), - DEFINE_PROP_BIT("full-path", USBDevice, flags, - USB_DEV_FLAG_FULL_PATH, true), - DEFINE_PROP_END_OF_LIST() - }, +static Property usb_props[] = { + DEFINE_PROP_STRING("port", USBDevice, port_path), + DEFINE_PROP_BIT("full-path", USBDevice, flags, + USB_DEV_FLAG_FULL_PATH, true), + DEFINE_PROP_END_OF_LIST() }; + +static void usb_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = usb_bus_dev_print; + k->get_dev_path = usb_get_dev_path; + k->get_fw_dev_path = usb_get_fw_dev_path; +} + +static const TypeInfo usb_bus_info = { + .name = TYPE_USB_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(USBBus), + .class_init = usb_bus_class_init, +}; + static int next_usb_bus = 0; static QTAILQ_HEAD(, USBBus) busses = QTAILQ_HEAD_INITIALIZER(busses); +static int usb_device_post_load(void *opaque, int version_id) +{ + USBDevice *dev = opaque; + + if (dev->state == USB_STATE_NOTATTACHED) { + dev->attached = 0; + } else { + dev->attached = 1; + } + return 0; +} + const VMStateDescription vmstate_usb_device = { .name = "USBDevice", .version_id = 1, .minimum_version_id = 1, + .post_load = usb_device_post_load, .fields = (VMStateField []) { VMSTATE_UINT8(addr, USBDevice), VMSTATE_INT32(state, USBDevice), @@ -45,7 +68,7 @@ const VMStateDescription vmstate_usb_device = { void usb_bus_new(USBBus *bus, USBBusOps *ops, DeviceState *host) { - qbus_create_inplace(&bus->qbus, &usb_bus_info, host, NULL); + qbus_create_inplace(&bus->qbus, TYPE_USB_BUS, host, NULL); bus->ops = ops; bus->busnr = next_usb_bus++; bus->qbus.allow_hotplug = 1; /* Yes, we can */ @@ -465,9 +488,8 @@ static char *usb_get_dev_path(DeviceState *qdev) DeviceState *hcd = qdev->parent_bus->parent; char *id = NULL; - if ((dev->flags & (1 << USB_DEV_FLAG_FULL_PATH)) && - hcd && hcd->parent_bus && hcd->parent_bus->info->get_dev_path) { - id = hcd->parent_bus->info->get_dev_path(hcd); + if (dev->flags & (1 << USB_DEV_FLAG_FULL_PATH)) { + id = qdev_get_dev_path(hcd); } if (id) { char *ret = g_strdup_printf("%s/%s", id, dev->port->path); @@ -576,10 +598,11 @@ USBDevice *usbdevice_create(const char *cmdline) static void usb_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); - k->bus_info = &usb_bus_info; + k->bus_type = TYPE_USB_BUS; k->init = usb_qdev_init; k->unplug = qdev_simple_unplug_cb; k->exit = usb_qdev_exit; + k->props = usb_props; } static TypeInfo usb_device_type_info = { @@ -593,6 +616,7 @@ static TypeInfo usb_device_type_info = { static void usb_register_types(void) { + type_register_static(&usb_bus_info); type_register_static(&usb_device_type_info); } diff --git a/hw/usb/core.c b/hw/usb/core.c index 2641685a32..01a7622837 100644 --- a/hw/usb/core.c +++ b/hw/usb/core.c @@ -550,7 +550,7 @@ void usb_packet_cleanup(USBPacket *p) qemu_iovec_destroy(&p->iov); } -void usb_ep_init(USBDevice *dev) +void usb_ep_reset(USBDevice *dev) { int ep; @@ -559,7 +559,6 @@ void usb_ep_init(USBDevice *dev) dev->ep_ctl.ifnum = 0; dev->ep_ctl.dev = dev; dev->ep_ctl.pipeline = false; - QTAILQ_INIT(&dev->ep_ctl.queue); for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { dev->ep_in[ep].nr = ep + 1; dev->ep_out[ep].nr = ep + 1; @@ -567,12 +566,22 @@ void usb_ep_init(USBDevice *dev) dev->ep_out[ep].pid = USB_TOKEN_OUT; dev->ep_in[ep].type = USB_ENDPOINT_XFER_INVALID; dev->ep_out[ep].type = USB_ENDPOINT_XFER_INVALID; - dev->ep_in[ep].ifnum = 0; - dev->ep_out[ep].ifnum = 0; + dev->ep_in[ep].ifnum = USB_INTERFACE_INVALID; + dev->ep_out[ep].ifnum = USB_INTERFACE_INVALID; dev->ep_in[ep].dev = dev; dev->ep_out[ep].dev = dev; dev->ep_in[ep].pipeline = false; dev->ep_out[ep].pipeline = false; + } +} + +void usb_ep_init(USBDevice *dev) +{ + int ep; + + usb_ep_reset(dev); + QTAILQ_INIT(&dev->ep_ctl.queue); + for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { QTAILQ_INIT(&dev->ep_in[ep].queue); QTAILQ_INIT(&dev->ep_out[ep].queue); } diff --git a/hw/usb/desc.c b/hw/usb/desc.c index e8a3c6af3d..0a9d3c9f60 100644 --- a/hw/usb/desc.c +++ b/hw/usb/desc.c @@ -432,12 +432,13 @@ void usb_desc_create_serial(USBDevice *dev) const USBDesc *desc = usb_device_get_usb_desc(dev); int index = desc->id.iSerialNumber; char serial[64]; + char *path; int dst; assert(index != 0 && desc->str[index] != NULL); dst = snprintf(serial, sizeof(serial), "%s", desc->str[index]); - if (hcd && hcd->parent_bus && hcd->parent_bus->info->get_dev_path) { - char *path = hcd->parent_bus->info->get_dev_path(hcd); + path = qdev_get_dev_path(hcd); + if (path) { dst += snprintf(serial+dst, sizeof(serial)-dst, "-%s", path); } dst += snprintf(serial+dst, sizeof(serial)-dst, "-%s", dev->port->path); diff --git a/hw/usb/dev-bluetooth.c b/hw/usb/dev-bluetooth.c index 6b74eff4ad..55bc19184b 100644 --- a/hw/usb/dev-bluetooth.c +++ b/hw/usb/dev-bluetooth.c @@ -57,7 +57,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_SERIALNUMBER] = "1", }; diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index f29544d954..b3dcd23109 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -60,7 +60,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT_MOUSE] = "QEMU USB Mouse", [STR_PRODUCT_TABLET] = "QEMU USB Tablet", [STR_PRODUCT_KEYBOARD] = "QEMU USB Keyboard", diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index b5962da72a..8fd30df0e6 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -90,7 +90,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT] = "QEMU USB Hub", [STR_SERIALNUMBER] = "314159", }; diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c index 56743ee020..8aa655286b 100644 --- a/hw/usb/dev-serial.c +++ b/hw/usb/dev-serial.c @@ -111,7 +111,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT_SERIAL] = "QEMU USB SERIAL", [STR_PRODUCT_BRAILLE] = "QEMU USB BRAILLE", [STR_SERIALNUMBER] = "1", diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index 3b7604e8b1..1ea079176a 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -81,7 +81,7 @@ do { \ #define CCID_CONTROL_GET_DATA_RATES 0x3 #define CCID_PRODUCT_DESCRIPTION "QEMU USB CCID" -#define CCID_VENDOR_DESCRIPTION "QEMU " QEMU_VERSION +#define CCID_VENDOR_DESCRIPTION "QEMU" #define CCID_INTERFACE_NAME "CCID Interface" #define CCID_SERIAL_NUMBER_STRING "1" /* @@ -401,7 +401,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT] = "QEMU USB CCID", [STR_SERIALNUMBER] = "1", [STR_INTERFACE] = "CCID Interface", @@ -1055,13 +1055,18 @@ static Answer *ccid_peek_next_answer(USBCCIDState *s) : &s->pending_answers[s->pending_answers_start % PENDING_ANSWERS_NUM]; } -static struct BusInfo ccid_bus_info = { - .name = "ccid-bus", - .size = sizeof(CCIDBus), - .props = (Property[]) { - DEFINE_PROP_UINT32("slot", struct CCIDCardState, slot, 0), - DEFINE_PROP_END_OF_LIST(), - } +static Property ccid_props[] = { + DEFINE_PROP_UINT32("slot", struct CCIDCardState, slot, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +#define TYPE_CCID_BUS "ccid-bus" +#define CCID_BUS(obj) OBJECT_CHECK(CCIDBus, (obj), TYPE_CCID_BUS) + +static const TypeInfo ccid_bus_info = { + .name = TYPE_CCID_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(CCIDBus), }; void ccid_card_send_apdu_to_guest(CCIDCardState *card, @@ -1191,7 +1196,7 @@ static int ccid_initfn(USBDevice *dev) usb_desc_create_serial(dev); usb_desc_init(dev); - qbus_create_inplace(&s->bus.qbus, &ccid_bus_info, &dev->qdev, NULL); + qbus_create_inplace(&s->bus.qbus, TYPE_CCID_BUS, &dev->qdev, NULL); s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP); s->bus.qbus.allow_hotplug = 1; s->card = NULL; @@ -1342,9 +1347,10 @@ static TypeInfo ccid_info = { static void ccid_card_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); - k->bus_info = &ccid_bus_info; + k->bus_type = TYPE_CCID_BUS; k->init = ccid_card_init; k->exit = ccid_card_exit; + k->props = ccid_props; } static TypeInfo ccid_card_type_info = { @@ -1358,6 +1364,7 @@ static TypeInfo ccid_card_type_info = { static void ccid_register_types(void) { + type_register_static(&ccid_bus_info); type_register_static(&ccid_card_type_info); type_register_static(&ccid_info); usb_legacy_register(CCID_DEV_NAME, "ccid", NULL); diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index a96c0b9e5e..251e7de1cd 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -48,10 +48,9 @@ struct usb_msd_csw { typedef struct { USBDevice dev; enum USBMSDMode mode; + uint32_t scsi_off; uint32_t scsi_len; - uint8_t *scsi_buf; uint32_t data_len; - uint32_t residue; struct usb_msd_csw csw; SCSIRequest *req; SCSIBus bus; @@ -82,7 +81,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT] = "QEMU USB HARDDRIVE", [STR_SERIALNUMBER] = "1", [STR_CONFIG_FULL] = "Full speed config (usb 1.1)", @@ -179,9 +178,9 @@ static void usb_msd_copy_data(MSDState *s, USBPacket *p) len = p->iov.size - p->result; if (len > s->scsi_len) len = s->scsi_len; - usb_packet_copy(p, s->scsi_buf, len); + usb_packet_copy(p, scsi_req_get_buf(s->req) + s->scsi_off, len); s->scsi_len -= len; - s->scsi_buf += len; + s->scsi_off += len; s->data_len -= len; if (s->scsi_len == 0 || s->data_len == 0) { scsi_req_continue(s->req); @@ -201,6 +200,18 @@ static void usb_msd_send_status(MSDState *s, USBPacket *p) memset(&s->csw, 0, sizeof(s->csw)); } +static void usb_msd_packet_complete(MSDState *s) +{ + USBPacket *p = s->packet; + + /* Set s->packet to NULL before calling usb_packet_complete + because another request may be issued before + usb_packet_complete returns. */ + DPRINTF("Packet complete %p\n", p); + s->packet = NULL; + usb_packet_complete(&s->dev, p); +} + static void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) { MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); @@ -208,17 +219,12 @@ static void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) assert((s->mode == USB_MSDM_DATAOUT) == (req->cmd.mode == SCSI_XFER_TO_DEV)); s->scsi_len = len; - s->scsi_buf = scsi_req_get_buf(req); + s->scsi_off = 0; if (p) { usb_msd_copy_data(s, p); p = s->packet; if (p && p->result == p->iov.size) { - /* Set s->packet to NULL before calling usb_packet_complete - because another request may be issued before - usb_packet_complete returns. */ - DPRINTF("Packet complete %p\n", p); - s->packet = NULL; - usb_packet_complete(&s->dev, p); + usb_msd_packet_complete(s); } } } @@ -229,11 +235,10 @@ static void usb_msd_command_complete(SCSIRequest *req, uint32_t status, size_t r USBPacket *p = s->packet; DPRINTF("Command complete %d tag 0x%x\n", status, req->tag); - s->residue = s->data_len; s->csw.sig = cpu_to_le32(0x53425355); s->csw.tag = cpu_to_le32(req->tag); - s->csw.residue = cpu_to_le32(s->residue); + s->csw.residue = cpu_to_le32(s->data_len); s->csw.status = status != 0; if (s->packet) { @@ -252,8 +257,7 @@ static void usb_msd_command_complete(SCSIRequest *req, uint32_t status, size_t r s->mode = USB_MSDM_CSW; } } - s->packet = NULL; - usb_packet_complete(&s->dev, p); + usb_msd_packet_complete(s); } else if (s->data_len == 0) { s->mode = USB_MSDM_CSW; } @@ -283,10 +287,8 @@ static void usb_msd_handle_reset(USBDevice *dev) assert(s->req == NULL); if (s->packet) { - USBPacket *p = s->packet; - s->packet = NULL; - p->result = USB_RET_STALL; - usb_packet_complete(dev, p); + s->packet->result = USB_RET_STALL; + usb_msd_packet_complete(s); } s->mode = USB_MSDM_CBW; @@ -378,7 +380,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) } DPRINTF("Command tag 0x%x flags %08x len %d data %d\n", tag, cbw.flags, cbw.cmd_len, s->data_len); - s->residue = 0; + assert(le32_to_cpu(s->csw.residue) == 0); s->scsi_len = 0; s->req = scsi_req_new(s->scsi_dev, tag, 0, cbw.cmd, NULL); scsi_req_enqueue(s->req); @@ -397,7 +399,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) if (s->scsi_len) { usb_msd_copy_data(s, p); } - if (s->residue) { + if (le32_to_cpu(s->csw.residue)) { int len = p->iov.size - p->result; if (len) { usb_packet_skip(p, len); @@ -458,7 +460,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p) if (s->scsi_len) { usb_msd_copy_data(s, p); } - if (s->residue) { + if (le32_to_cpu(s->csw.residue)) { int len = p->iov.size - p->result; if (len) { usb_packet_skip(p, len); @@ -504,6 +506,17 @@ static void usb_msd_password_cb(void *opaque, int err) qdev_unplug(&s->dev.qdev, NULL); } +static void *usb_msd_load_request(QEMUFile *f, SCSIRequest *req) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + + /* nothing to load, just store req in our state struct */ + assert(s->req == NULL); + scsi_req_ref(req); + s->req = req; + return NULL; +} + static const struct SCSIBusInfo usb_msd_scsi_info = { .tcq = false, .max_target = 0, @@ -511,7 +524,8 @@ static const struct SCSIBusInfo usb_msd_scsi_info = { .transfer_data = usb_msd_transfer_data, .complete = usb_msd_command_complete, - .cancel = usb_msd_request_cancelled + .cancel = usb_msd_request_cancelled, + .load_request = usb_msd_load_request, }; static int usb_msd_initfn(USBDevice *dev) @@ -631,11 +645,18 @@ static USBDevice *usb_msd_init(USBBus *bus, const char *filename) static const VMStateDescription vmstate_usb_msd = { .name = "usb-storage", - .unmigratable = 1, /* FIXME: handle transactions which are in flight */ .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField []) { VMSTATE_USB_DEVICE(dev, MSDState), + VMSTATE_UINT32(mode, MSDState), + VMSTATE_UINT32(scsi_len, MSDState), + VMSTATE_UINT32(scsi_off, MSDState), + VMSTATE_UINT32(data_len, MSDState), + VMSTATE_UINT32(csw.sig, MSDState), + VMSTATE_UINT32(csw.tag, MSDState), + VMSTATE_UINT32(csw.residue, MSDState), + VMSTATE_UINT8(csw.status, MSDState), VMSTATE_END_OF_LIST() } }; diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c index 3b51d458f4..ed9a5ee358 100644 --- a/hw/usb/dev-wacom.c +++ b/hw/usb/dev-wacom.c @@ -62,7 +62,7 @@ enum { }; static const USBDescStrings desc_strings = { - [STR_MANUFACTURER] = "QEMU " QEMU_VERSION, + [STR_MANUFACTURER] = "QEMU", [STR_PRODUCT] = "Wacom PenPartner", [STR_SERIALNUMBER] = "1", }; diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index e759c996ce..080f62c00d 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -334,6 +334,7 @@ typedef struct EHCIfstn { uint32_t backptr; // Standard next link pointer } EHCIfstn; +typedef struct EHCIPacket EHCIPacket; typedef struct EHCIQueue EHCIQueue; typedef struct EHCIState EHCIState; @@ -343,26 +344,37 @@ enum async_state { EHCI_ASYNC_FINISHED, }; +struct EHCIPacket { + EHCIQueue *queue; + QTAILQ_ENTRY(EHCIPacket) next; + + EHCIqtd qtd; /* copy of current QTD (being worked on) */ + uint32_t qtdaddr; /* address QTD read from */ + + USBPacket packet; + QEMUSGList sgl; + int pid; + uint32_t tbytes; + enum async_state async; + int usb_status; +}; + struct EHCIQueue { EHCIState *ehci; QTAILQ_ENTRY(EHCIQueue) next; uint32_t seen; uint64_t ts; + int async; + int revalidate; /* cached data from guest - needs to be flushed * when guest removes an entry (doorbell, handshake sequence) */ - EHCIqh qh; // copy of current QH (being worked on) - uint32_t qhaddr; // address QH read from - EHCIqtd qtd; // copy of current QTD (being worked on) - uint32_t qtdaddr; // address QTD read from - - USBPacket packet; - QEMUSGList sgl; - int pid; - uint32_t tbytes; - enum async_state async; - int usb_status; + EHCIqh qh; /* copy of current QH (being worked on) */ + uint32_t qhaddr; /* address QH read from */ + uint32_t qtdaddr; /* address QTD read from */ + USBDevice *dev; + QTAILQ_HEAD(, EHCIPacket) packets; }; typedef QTAILQ_HEAD(EHCIQueueHead, EHCIQueue) EHCIQueueHead; @@ -375,7 +387,6 @@ struct EHCIState { int companion_count; /* properties */ - uint32_t freq; uint32_t maxframes; /* @@ -403,22 +414,24 @@ struct EHCIState { * Internal states, shadow registers, etc */ QEMUTimer *frame_timer; - int attach_poll_counter; - int astate; // Current state in asynchronous schedule - int pstate; // Current state in periodic schedule + QEMUBH *async_bh; + uint32_t astate; /* Current state in asynchronous schedule */ + uint32_t pstate; /* Current state in periodic schedule */ USBPort ports[NB_PORTS]; USBPort *companion_ports[NB_PORTS]; uint32_t usbsts_pending; EHCIQueueHead aqueues; EHCIQueueHead pqueues; - uint32_t a_fetch_addr; // which address to look at next - uint32_t p_fetch_addr; // which address to look at next + /* which address to look at next */ + uint32_t a_fetch_addr; + uint32_t p_fetch_addr; USBPacket ipacket; QEMUSGList isgl; uint64_t last_run_ns; + uint32_t async_stepdown; }; #define SET_LAST_RUN_CLOCK(s) \ @@ -557,6 +570,7 @@ static inline void ehci_set_interrupt(EHCIState *s, int intr) level = 1; } + trace_usb_ehci_interrupt(level, s->usbsts, s->usbintr); qemu_set_irq(s->irq, level); } @@ -574,14 +588,37 @@ static inline void ehci_commit_interrupt(EHCIState *s) s->usbsts_pending = 0; } +static void ehci_update_halt(EHCIState *s) +{ + if (s->usbcmd & USBCMD_RUNSTOP) { + ehci_clear_usbsts(s, USBSTS_HALT); + } else { + if (s->astate == EST_INACTIVE && s->pstate == EST_INACTIVE) { + ehci_set_usbsts(s, USBSTS_HALT); + } + } +} + static void ehci_set_state(EHCIState *s, int async, int state) { if (async) { trace_usb_ehci_state("async", state2str(state)); s->astate = state; + if (s->astate == EST_INACTIVE) { + ehci_clear_usbsts(s, USBSTS_ASS); + ehci_update_halt(s); + } else { + ehci_set_usbsts(s, USBSTS_ASS); + } } else { trace_usb_ehci_state("periodic", state2str(state)); s->pstate = state; + if (s->pstate == EST_INACTIVE) { + ehci_clear_usbsts(s, USBSTS_PSS); + ehci_update_halt(s); + } else { + ehci_set_usbsts(s, USBSTS_PSS); + } } } @@ -655,27 +692,71 @@ static void ehci_trace_sitd(EHCIState *s, target_phys_addr_t addr, (bool)(sitd->results & SITD_RESULTS_ACTIVE)); } +static inline bool ehci_enabled(EHCIState *s) +{ + return s->usbcmd & USBCMD_RUNSTOP; +} + +static inline bool ehci_async_enabled(EHCIState *s) +{ + return ehci_enabled(s) && (s->usbcmd & USBCMD_ASE); +} + +static inline bool ehci_periodic_enabled(EHCIState *s) +{ + return ehci_enabled(s) && (s->usbcmd & USBCMD_PSE); +} + +/* packet management */ + +static EHCIPacket *ehci_alloc_packet(EHCIQueue *q) +{ + EHCIPacket *p; + + p = g_new0(EHCIPacket, 1); + p->queue = q; + usb_packet_init(&p->packet); + QTAILQ_INSERT_TAIL(&q->packets, p, next); + trace_usb_ehci_packet_action(p->queue, p, "alloc"); + return p; +} + +static void ehci_free_packet(EHCIPacket *p) +{ + trace_usb_ehci_packet_action(p->queue, p, "free"); + if (p->async == EHCI_ASYNC_INFLIGHT) { + usb_cancel_packet(&p->packet); + } + QTAILQ_REMOVE(&p->queue->packets, p, next); + usb_packet_cleanup(&p->packet); + g_free(p); +} + /* queue management */ -static EHCIQueue *ehci_alloc_queue(EHCIState *ehci, int async) +static EHCIQueue *ehci_alloc_queue(EHCIState *ehci, uint32_t addr, int async) { EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; EHCIQueue *q; q = g_malloc0(sizeof(*q)); q->ehci = ehci; - usb_packet_init(&q->packet); + q->qhaddr = addr; + q->async = async; + QTAILQ_INIT(&q->packets); QTAILQ_INSERT_HEAD(head, q, next); trace_usb_ehci_queue_action(q, "alloc"); return q; } -static void ehci_free_queue(EHCIQueue *q, int async) +static void ehci_free_queue(EHCIQueue *q) { - EHCIQueueHead *head = async ? &q->ehci->aqueues : &q->ehci->pqueues; + EHCIQueueHead *head = q->async ? &q->ehci->aqueues : &q->ehci->pqueues; + EHCIPacket *p; + trace_usb_ehci_queue_action(q, "free"); - if (q->async == EHCI_ASYNC_INFLIGHT) { - usb_cancel_packet(&q->packet); + while ((p = QTAILQ_FIRST(&q->packets)) != NULL) { + ehci_free_packet(p); } QTAILQ_REMOVE(head, q, next); g_free(q); @@ -695,9 +776,21 @@ static EHCIQueue *ehci_find_queue_by_qh(EHCIState *ehci, uint32_t addr, return NULL; } -static void ehci_queues_rip_unused(EHCIState *ehci, int async, int flush) +static void ehci_queues_tag_unused_async(EHCIState *ehci) +{ + EHCIQueue *q; + + QTAILQ_FOREACH(q, &ehci->aqueues, next) { + if (!q->seen) { + q->revalidate = 1; + } + } +} + +static void ehci_queues_rip_unused(EHCIState *ehci, int async) { EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + uint64_t maxage = FRAME_TIMER_NS * ehci->maxframes * 4; EHCIQueue *q, *tmp; QTAILQ_FOREACH_SAFE(q, head, next, tmp) { @@ -706,11 +799,10 @@ static void ehci_queues_rip_unused(EHCIState *ehci, int async, int flush) q->ts = ehci->last_run_ns; continue; } - if (!flush && ehci->last_run_ns < q->ts + 250000000) { - /* allow 0.25 sec idle */ + if (ehci->last_run_ns < q->ts + maxage) { continue; } - ehci_free_queue(q, async); + ehci_free_queue(q); } } @@ -720,11 +812,10 @@ static void ehci_queues_rip_device(EHCIState *ehci, USBDevice *dev, int async) EHCIQueue *q, *tmp; QTAILQ_FOREACH_SAFE(q, head, next, tmp) { - if (!usb_packet_is_inflight(&q->packet) || - q->packet.ep->dev != dev) { + if (q->dev != dev) { continue; } - ehci_free_queue(q, async); + ehci_free_queue(q); } } @@ -734,7 +825,7 @@ static void ehci_queues_rip_all(EHCIState *ehci, int async) EHCIQueue *q, *tmp; QTAILQ_FOREACH_SAFE(q, head, next, tmp) { - ehci_free_queue(q, async); + ehci_free_queue(q); } } @@ -744,8 +835,9 @@ static void ehci_attach(USBPort *port) { EHCIState *s = port->opaque; uint32_t *portsc = &s->portsc[port->index]; + const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci"; - trace_usb_ehci_port_attach(port->index, port->dev->product_desc); + trace_usb_ehci_port_attach(port->index, owner, port->dev->product_desc); if (*portsc & PORTSC_POWNER) { USBPort *companion = s->companion_ports[port->index]; @@ -764,8 +856,9 @@ static void ehci_detach(USBPort *port) { EHCIState *s = port->opaque; uint32_t *portsc = &s->portsc[port->index]; + const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci"; - trace_usb_ehci_port_detach(port->index); + trace_usb_ehci_port_detach(port->index, owner); if (*portsc & PORTSC_POWNER) { USBPort *companion = s->companion_ports[port->index]; @@ -813,7 +906,10 @@ static void ehci_wakeup(USBPort *port) if (companion->ops->wakeup) { companion->ops->wakeup(companion); } + return; } + + qemu_bh_schedule(s->async_bh); } static int ehci_register_companion(USBBus *bus, USBPort *ports[], @@ -904,7 +1000,6 @@ static void ehci_reset(void *opaque) s->astate = EST_INACTIVE; s->pstate = EST_INACTIVE; - s->attach_poll_counter = 0; for(i = 0; i < NB_PORTS; i++) { if (s->companion_ports[i]) { @@ -920,6 +1015,7 @@ static void ehci_reset(void *opaque) ehci_queues_rip_all(s, 0); ehci_queues_rip_all(s, 1); qemu_del_timer(s->frame_timer); + qemu_bh_cancel(s->async_bh); } static uint32_t ehci_mem_readb(void *ptr, target_phys_addr_t addr) @@ -1064,22 +1160,20 @@ static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) /* Do any register specific pre-write processing here. */ switch(addr) { case USBCMD: - if ((val & USBCMD_RUNSTOP) && !(s->usbcmd & USBCMD_RUNSTOP)) { - qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock)); - SET_LAST_RUN_CLOCK(s); - ehci_clear_usbsts(s, USBSTS_HALT); - } - - if (!(val & USBCMD_RUNSTOP) && (s->usbcmd & USBCMD_RUNSTOP)) { - qemu_del_timer(s->frame_timer); - ehci_queues_rip_all(s, 0); - ehci_queues_rip_all(s, 1); - ehci_set_usbsts(s, USBSTS_HALT); - } - if (val & USBCMD_HCRESET) { ehci_reset(s); val = s->usbcmd; + break; + } + + if (((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & val) != + ((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & s->usbcmd)) { + if (s->pstate == EST_INACTIVE) { + SET_LAST_RUN_CLOCK(s); + } + ehci_update_halt(s); + s->async_stepdown = 0; + qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock)); } /* not supporting dynamic frame list size at the moment */ @@ -1114,7 +1208,7 @@ static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) break; case PERIODICLISTBASE: - if ((s->usbcmd & USBCMD_PSE) && (s->usbcmd & USBCMD_RUNSTOP)) { + if (ehci_periodic_enabled(s)) { fprintf(stderr, "ehci: PERIODIC list base register set while periodic schedule\n" " is enabled and HC is enabled\n"); @@ -1122,7 +1216,7 @@ static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) break; case ASYNCLISTADDR: - if ((s->usbcmd & USBCMD_ASE) && (s->usbcmd & USBCMD_RUNSTOP)) { + if (ehci_async_enabled(s)) { fprintf(stderr, "ehci: ASYNC list address register set while async schedule\n" " is enabled and HC is enabled\n"); @@ -1165,25 +1259,46 @@ static inline int put_dwords(EHCIState *ehci, uint32_t addr, return 1; } +/* + * Write the qh back to guest physical memory. This step isn't + * in the EHCI spec but we need to do it since we don't share + * physical memory with our guest VM. + * + * The first three dwords are read-only for the EHCI, so skip them + * when writing back the qh. + */ +static void ehci_flush_qh(EHCIQueue *q) +{ + uint32_t *qh = (uint32_t *) &q->qh; + uint32_t dwords = sizeof(EHCIqh) >> 2; + uint32_t addr = NLPTR_GET(q->qhaddr); + + put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qh + 3, dwords - 3); +} + // 4.10.2 static int ehci_qh_do_overlay(EHCIQueue *q) { + EHCIPacket *p = QTAILQ_FIRST(&q->packets); int i; int dtoggle; int ping; int eps; int reload; + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + // remember values in fields to preserve in qh after overlay dtoggle = q->qh.token & QTD_TOKEN_DTOGGLE; ping = q->qh.token & QTD_TOKEN_PING; - q->qh.current_qtd = q->qtdaddr; - q->qh.next_qtd = q->qtd.next; - q->qh.altnext_qtd = q->qtd.altnext; - q->qh.token = q->qtd.token; + q->qh.current_qtd = p->qtdaddr; + q->qh.next_qtd = p->qtd.next; + q->qh.altnext_qtd = p->qtd.altnext; + q->qh.token = p->qtd.token; eps = get_field(q->qh.epchar, QH_EPCHAR_EPS); @@ -1196,7 +1311,7 @@ static int ehci_qh_do_overlay(EHCIQueue *q) set_field(&q->qh.altnext_qtd, reload, QH_ALTNEXT_NAKCNT); for (i = 0; i < 5; i++) { - q->qh.bufptr[i] = q->qtd.bufptr[i]; + q->qh.bufptr[i] = p->qtd.bufptr[i]; } if (!(q->qh.epchar & QH_EPCHAR_DTC)) { @@ -1208,21 +1323,20 @@ static int ehci_qh_do_overlay(EHCIQueue *q) q->qh.bufptr[1] &= ~BUFPTR_CPROGMASK_MASK; q->qh.bufptr[2] &= ~BUFPTR_FRAMETAG_MASK; - put_dwords(q->ehci, NLPTR_GET(q->qhaddr), (uint32_t *) &q->qh, - sizeof(EHCIqh) >> 2); + ehci_flush_qh(q); return 0; } -static int ehci_init_transfer(EHCIQueue *q) +static int ehci_init_transfer(EHCIPacket *p) { uint32_t cpage, offset, bytes, plen; dma_addr_t page; - cpage = get_field(q->qh.token, QTD_TOKEN_CPAGE); - bytes = get_field(q->qh.token, QTD_TOKEN_TBYTES); - offset = q->qh.bufptr[0] & ~QTD_BUFPTR_MASK; - pci_dma_sglist_init(&q->sgl, &q->ehci->dev, 5); + cpage = get_field(p->qtd.token, QTD_TOKEN_CPAGE); + bytes = get_field(p->qtd.token, QTD_TOKEN_TBYTES); + offset = p->qtd.bufptr[0] & ~QTD_BUFPTR_MASK; + pci_dma_sglist_init(&p->sgl, &p->queue->ehci->dev, 5); while (bytes > 0) { if (cpage > 4) { @@ -1230,7 +1344,7 @@ static int ehci_init_transfer(EHCIQueue *q) return USB_RET_PROCERR; } - page = q->qh.bufptr[cpage] & QTD_BUFPTR_MASK; + page = p->qtd.bufptr[cpage] & QTD_BUFPTR_MASK; page += offset; plen = bytes; if (plen > 4096 - offset) { @@ -1239,7 +1353,7 @@ static int ehci_init_transfer(EHCIQueue *q) cpage++; } - qemu_sglist_add(&q->sgl, page, plen); + qemu_sglist_add(&p->sgl, page, plen); bytes -= plen; } return 0; @@ -1249,8 +1363,6 @@ static void ehci_finish_transfer(EHCIQueue *q, int status) { uint32_t cpage, offset; - qemu_sglist_destroy(&q->sgl); - if (status > 0) { /* update cpage & offset */ cpage = get_field(q->qh.token, QTD_TOKEN_CPAGE); @@ -1268,7 +1380,7 @@ static void ehci_finish_transfer(EHCIQueue *q, int status) static void ehci_async_complete_packet(USBPort *port, USBPacket *packet) { - EHCIQueue *q; + EHCIPacket *p; EHCIState *s = port->opaque; uint32_t portsc = s->portsc[port->index]; @@ -1278,23 +1390,31 @@ static void ehci_async_complete_packet(USBPort *port, USBPacket *packet) return; } - q = container_of(packet, EHCIQueue, packet); - trace_usb_ehci_queue_action(q, "wakeup"); - assert(q->async == EHCI_ASYNC_INFLIGHT); - q->async = EHCI_ASYNC_FINISHED; - q->usb_status = packet->result; + p = container_of(packet, EHCIPacket, packet); + trace_usb_ehci_packet_action(p->queue, p, "wakeup"); + assert(p->async == EHCI_ASYNC_INFLIGHT); + p->async = EHCI_ASYNC_FINISHED; + p->usb_status = packet->result; + + if (p->queue->async) { + qemu_bh_schedule(p->queue->ehci->async_bh); + } } static void ehci_execute_complete(EHCIQueue *q) { - assert(q->async != EHCI_ASYNC_INFLIGHT); - q->async = EHCI_ASYNC_NONE; + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + assert(p->async != EHCI_ASYNC_INFLIGHT); + p->async = EHCI_ASYNC_NONE; DPRINTF("execute_complete: qhaddr 0x%x, next %x, qtdaddr 0x%x, status %d\n", q->qhaddr, q->qh.next, q->qtdaddr, q->usb_status); - if (q->usb_status < 0) { - switch(q->usb_status) { + if (p->usb_status < 0) { + switch (p->usb_status) { case USB_RET_IOERROR: case USB_RET_NODEV: q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_XACTERR); @@ -1314,28 +1434,29 @@ static void ehci_execute_complete(EHCIQueue *q) break; default: /* should not be triggerable */ - fprintf(stderr, "USB invalid response %d to handle\n", q->usb_status); + fprintf(stderr, "USB invalid response %d\n", p->usb_status); assert(0); break; } - } else if ((q->usb_status > q->tbytes) && (q->pid == USB_TOKEN_IN)) { - q->usb_status = USB_RET_BABBLE; + } else if ((p->usb_status > p->tbytes) && (p->pid == USB_TOKEN_IN)) { + p->usb_status = USB_RET_BABBLE; q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_BABBLE); ehci_record_interrupt(q->ehci, USBSTS_ERRINT); } else { // TODO check 4.12 for splits - if (q->tbytes && q->pid == USB_TOKEN_IN) { - q->tbytes -= q->usb_status; + if (p->tbytes && p->pid == USB_TOKEN_IN) { + p->tbytes -= p->usb_status; } else { - q->tbytes = 0; + p->tbytes = 0; } - DPRINTF("updating tbytes to %d\n", q->tbytes); - set_field(&q->qh.token, q->tbytes, QTD_TOKEN_TBYTES); + DPRINTF("updating tbytes to %d\n", p->tbytes); + set_field(&q->qh.token, p->tbytes, QTD_TOKEN_TBYTES); } - ehci_finish_transfer(q, q->usb_status); - usb_packet_unmap(&q->packet); + ehci_finish_transfer(q, p->usb_status); + usb_packet_unmap(&p->packet, &p->sgl); + qemu_sglist_destroy(&p->sgl); q->qh.token ^= QTD_TOKEN_DTOGGLE; q->qh.token &= ~QTD_TOKEN_ACTIVE; @@ -1347,48 +1468,51 @@ static void ehci_execute_complete(EHCIQueue *q) // 4.10.3 -static int ehci_execute(EHCIQueue *q) +static int ehci_execute(EHCIPacket *p, const char *action) { - USBDevice *dev; USBEndpoint *ep; int ret; int endp; - int devadr; - if ( !(q->qh.token & QTD_TOKEN_ACTIVE)) { - fprintf(stderr, "Attempting to execute inactive QH\n"); + if (!(p->qtd.token & QTD_TOKEN_ACTIVE)) { + fprintf(stderr, "Attempting to execute inactive qtd\n"); return USB_RET_PROCERR; } - q->tbytes = (q->qh.token & QTD_TOKEN_TBYTES_MASK) >> QTD_TOKEN_TBYTES_SH; - if (q->tbytes > BUFF_SIZE) { + p->tbytes = (p->qtd.token & QTD_TOKEN_TBYTES_MASK) >> QTD_TOKEN_TBYTES_SH; + if (p->tbytes > BUFF_SIZE) { fprintf(stderr, "Request for more bytes than allowed\n"); return USB_RET_PROCERR; } - q->pid = (q->qh.token & QTD_TOKEN_PID_MASK) >> QTD_TOKEN_PID_SH; - switch(q->pid) { - case 0: q->pid = USB_TOKEN_OUT; break; - case 1: q->pid = USB_TOKEN_IN; break; - case 2: q->pid = USB_TOKEN_SETUP; break; - default: fprintf(stderr, "bad token\n"); break; + p->pid = (p->qtd.token & QTD_TOKEN_PID_MASK) >> QTD_TOKEN_PID_SH; + switch (p->pid) { + case 0: + p->pid = USB_TOKEN_OUT; + break; + case 1: + p->pid = USB_TOKEN_IN; + break; + case 2: + p->pid = USB_TOKEN_SETUP; + break; + default: + fprintf(stderr, "bad token\n"); + break; } - if (ehci_init_transfer(q) != 0) { + if (ehci_init_transfer(p) != 0) { return USB_RET_PROCERR; } - endp = get_field(q->qh.epchar, QH_EPCHAR_EP); - devadr = get_field(q->qh.epchar, QH_EPCHAR_DEVADDR); - - /* TODO: associating device with ehci port */ - dev = ehci_find_device(q->ehci, devadr); - ep = usb_ep_get(dev, q->pid, endp); + endp = get_field(p->queue->qh.epchar, QH_EPCHAR_EP); + ep = usb_ep_get(p->queue->dev, p->pid, endp); - usb_packet_setup(&q->packet, q->pid, ep); - usb_packet_map(&q->packet, &q->sgl); + usb_packet_setup(&p->packet, p->pid, ep); + usb_packet_map(&p->packet, &p->sgl); - ret = usb_handle_packet(dev, &q->packet); + trace_usb_ehci_packet_action(p->queue, p, action); + ret = usb_handle_packet(p->queue->dev, &p->packet); DPRINTF("submit: qh %x next %x qtd %x pid %x len %zd " "(total %d) endp %x ret %d\n", q->qhaddr, q->qh.next, q->qtdaddr, q->pid, @@ -1456,7 +1580,7 @@ static int ehci_process_itd(EHCIState *ehci, usb_packet_map(&ehci->ipacket, &ehci->isgl); ret = usb_handle_packet(dev, &ehci->ipacket); assert(ret != USB_RET_ASYNC); - usb_packet_unmap(&ehci->ipacket); + usb_packet_unmap(&ehci->ipacket, &ehci->isgl); } else { DPRINTF("ISOCH: attempt to addess non-iso endpoint\n"); ret = USB_RET_NAK; @@ -1504,6 +1628,7 @@ static int ehci_process_itd(EHCIState *ehci, return 0; } + /* This state is the entry point for asynchronous schedule * processing. Entry here consitutes a EHCI start event state (4.8.5) */ @@ -1519,7 +1644,7 @@ static int ehci_state_waitlisthead(EHCIState *ehci, int async) ehci_set_usbsts(ehci, USBSTS_REC); } - ehci_queues_rip_unused(ehci, async, 0); + ehci_queues_rip_unused(ehci, async); /* Find the head of the list (4.9.1.1) */ for(i = 0; i < MAX_QH; i++) { @@ -1601,17 +1726,19 @@ out: static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async) { - uint32_t entry; + EHCIPacket *p; + uint32_t entry, devaddr; EHCIQueue *q; + EHCIqh qh; entry = ehci_get_fetch_addr(ehci, async); q = ehci_find_queue_by_qh(ehci, entry, async); if (NULL == q) { - q = ehci_alloc_queue(ehci, async); + q = ehci_alloc_queue(ehci, entry, async); } - q->qhaddr = entry; - q->seen++; + p = QTAILQ_FIRST(&q->packets); + q->seen++; if (q->seen > 1) { /* we are going in circles -- stop processing */ ehci_set_state(ehci, async, EST_ACTIVE); @@ -1620,17 +1747,41 @@ static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async) } get_dwords(ehci, NLPTR_GET(q->qhaddr), - (uint32_t *) &q->qh, sizeof(EHCIqh) >> 2); + (uint32_t *) &qh, sizeof(EHCIqh) >> 2); + if (q->revalidate && (q->qh.epchar != qh.epchar || + q->qh.epcap != qh.epcap || + q->qh.current_qtd != qh.current_qtd)) { + ehci_free_queue(q); + q = ehci_alloc_queue(ehci, entry, async); + q->seen++; + p = NULL; + } + q->qh = qh; + q->revalidate = 0; ehci_trace_qh(q, NLPTR_GET(q->qhaddr), &q->qh); - if (q->async == EHCI_ASYNC_INFLIGHT) { + devaddr = get_field(q->qh.epchar, QH_EPCHAR_DEVADDR); + if (q->dev != NULL && q->dev->addr != devaddr) { + if (!QTAILQ_EMPTY(&q->packets)) { + /* should not happen (guest bug) */ + while ((p = QTAILQ_FIRST(&q->packets)) != NULL) { + ehci_free_packet(p); + } + } + q->dev = NULL; + } + if (q->dev == NULL) { + q->dev = ehci_find_device(q->ehci, devaddr); + } + + if (p && p->async == EHCI_ASYNC_INFLIGHT) { /* I/O still in progress -- skip queue */ ehci_set_state(ehci, async, EST_HORIZONTALQH); goto out; } - if (q->async == EHCI_ASYNC_FINISHED) { + if (p && p->async == EHCI_ASYNC_FINISHED) { /* I/O finished -- continue processing queue */ - trace_usb_ehci_queue_action(q, "resume"); + trace_usb_ehci_packet_action(p->queue, p, "complete"); ehci_set_state(ehci, async, EST_EXECUTING); goto out; } @@ -1726,7 +1877,7 @@ static int ehci_state_fetchsitd(EHCIState *ehci, int async) } /* Section 4.10.2 - paragraph 3 */ -static int ehci_state_advqueue(EHCIQueue *q, int async) +static int ehci_state_advqueue(EHCIQueue *q) { #if 0 /* TO-DO: 4.10.2 - paragraph 2 @@ -1745,81 +1896,117 @@ static int ehci_state_advqueue(EHCIQueue *q, int async) if (((q->qh.token & QTD_TOKEN_TBYTES_MASK) != 0) && (NLPTR_TBIT(q->qh.altnext_qtd) == 0)) { q->qtdaddr = q->qh.altnext_qtd; - ehci_set_state(q->ehci, async, EST_FETCHQTD); + ehci_set_state(q->ehci, q->async, EST_FETCHQTD); /* * next qTD is valid */ } else if (NLPTR_TBIT(q->qh.next_qtd) == 0) { q->qtdaddr = q->qh.next_qtd; - ehci_set_state(q->ehci, async, EST_FETCHQTD); + ehci_set_state(q->ehci, q->async, EST_FETCHQTD); /* * no valid qTD, try next QH */ } else { - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); } return 1; } /* Section 4.10.2 - paragraph 4 */ -static int ehci_state_fetchqtd(EHCIQueue *q, int async) +static int ehci_state_fetchqtd(EHCIQueue *q) { + EHCIqtd qtd; + EHCIPacket *p; int again = 0; - get_dwords(q->ehci, NLPTR_GET(q->qtdaddr), (uint32_t *) &q->qtd, + get_dwords(q->ehci, NLPTR_GET(q->qtdaddr), (uint32_t *) &qtd, sizeof(EHCIqtd) >> 2); - ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), &q->qtd); + ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), &qtd); - if (q->qtd.token & QTD_TOKEN_ACTIVE) { - ehci_set_state(q->ehci, async, EST_EXECUTE); + p = QTAILQ_FIRST(&q->packets); + while (p != NULL && p->qtdaddr != q->qtdaddr) { + /* should not happen (guest bug) */ + ehci_free_packet(p); + p = QTAILQ_FIRST(&q->packets); + } + if (p != NULL) { + ehci_qh_do_overlay(q); + ehci_flush_qh(q); + if (p->async == EHCI_ASYNC_INFLIGHT) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + } else { + ehci_set_state(q->ehci, q->async, EST_EXECUTING); + } + again = 1; + } else if (qtd.token & QTD_TOKEN_ACTIVE) { + p = ehci_alloc_packet(q); + p->qtdaddr = q->qtdaddr; + p->qtd = qtd; + ehci_set_state(q->ehci, q->async, EST_EXECUTE); again = 1; } else { - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); again = 1; } return again; } -static int ehci_state_horizqh(EHCIQueue *q, int async) +static int ehci_state_horizqh(EHCIQueue *q) { int again = 0; - if (ehci_get_fetch_addr(q->ehci, async) != q->qh.next) { - ehci_set_fetch_addr(q->ehci, async, q->qh.next); - ehci_set_state(q->ehci, async, EST_FETCHENTRY); + if (ehci_get_fetch_addr(q->ehci, q->async) != q->qh.next) { + ehci_set_fetch_addr(q->ehci, q->async, q->qh.next); + ehci_set_state(q->ehci, q->async, EST_FETCHENTRY); again = 1; } else { - ehci_set_state(q->ehci, async, EST_ACTIVE); + ehci_set_state(q->ehci, q->async, EST_ACTIVE); } return again; } -/* - * Write the qh back to guest physical memory. This step isn't - * in the EHCI spec but we need to do it since we don't share - * physical memory with our guest VM. - * - * The first three dwords are read-only for the EHCI, so skip them - * when writing back the qh. - */ -static void ehci_flush_qh(EHCIQueue *q) +static void ehci_fill_queue(EHCIPacket *p) { - uint32_t *qh = (uint32_t *) &q->qh; - uint32_t dwords = sizeof(EHCIqh) >> 2; - uint32_t addr = NLPTR_GET(q->qhaddr); + EHCIQueue *q = p->queue; + EHCIqtd qtd = p->qtd; + uint32_t qtdaddr; - put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qh + 3, dwords - 3); + for (;;) { + if (NLPTR_TBIT(qtd.altnext) == 0) { + break; + } + if (NLPTR_TBIT(qtd.next) != 0) { + break; + } + qtdaddr = qtd.next; + get_dwords(q->ehci, NLPTR_GET(qtdaddr), + (uint32_t *) &qtd, sizeof(EHCIqtd) >> 2); + ehci_trace_qtd(q, NLPTR_GET(qtdaddr), &qtd); + if (!(qtd.token & QTD_TOKEN_ACTIVE)) { + break; + } + p = ehci_alloc_packet(q); + p->qtdaddr = qtdaddr; + p->qtd = qtd; + p->usb_status = ehci_execute(p, "queue"); + assert(p->usb_status = USB_RET_ASYNC); + p->async = EHCI_ASYNC_INFLIGHT; + } } -static int ehci_state_execute(EHCIQueue *q, int async) +static int ehci_state_execute(EHCIQueue *q) { + EHCIPacket *p = QTAILQ_FIRST(&q->packets); int again = 0; + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + if (ehci_qh_do_overlay(q) != 0) { return -1; } @@ -1828,55 +2015,60 @@ static int ehci_state_execute(EHCIQueue *q, int async) // TODO write back ptr to async list when done or out of time // TODO Windows does not seem to ever set the MULT field - if (!async) { + if (!q->async) { int transactCtr = get_field(q->qh.epcap, QH_EPCAP_MULT); if (!transactCtr) { - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); again = 1; goto out; } } - if (async) { + if (q->async) { ehci_set_usbsts(q->ehci, USBSTS_REC); } - q->usb_status = ehci_execute(q); - if (q->usb_status == USB_RET_PROCERR) { + p->usb_status = ehci_execute(p, "process"); + if (p->usb_status == USB_RET_PROCERR) { again = -1; goto out; } - if (q->usb_status == USB_RET_ASYNC) { + if (p->usb_status == USB_RET_ASYNC) { ehci_flush_qh(q); - trace_usb_ehci_queue_action(q, "suspend"); - q->async = EHCI_ASYNC_INFLIGHT; - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + trace_usb_ehci_packet_action(p->queue, p, "async"); + p->async = EHCI_ASYNC_INFLIGHT; + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); again = 1; + ehci_fill_queue(p); goto out; } - ehci_set_state(q->ehci, async, EST_EXECUTING); + ehci_set_state(q->ehci, q->async, EST_EXECUTING); again = 1; out: return again; } -static int ehci_state_executing(EHCIQueue *q, int async) +static int ehci_state_executing(EHCIQueue *q) { + EHCIPacket *p = QTAILQ_FIRST(&q->packets); int again = 0; + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + ehci_execute_complete(q); - if (q->usb_status == USB_RET_ASYNC) { + if (p->usb_status == USB_RET_ASYNC) { goto out; } - if (q->usb_status == USB_RET_PROCERR) { + if (p->usb_status == USB_RET_PROCERR) { again = -1; goto out; } // 4.10.3 - if (!async) { + if (!q->async) { int transactCtr = get_field(q->qh.epcap, QH_EPCAP_MULT); transactCtr--; set_field(&q->qh.epcap, transactCtr, QH_EPCAP_MULT); @@ -1885,10 +2077,10 @@ static int ehci_state_executing(EHCIQueue *q, int async) } /* 4.10.5 */ - if (q->usb_status == USB_RET_NAK) { - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + if (p->usb_status == USB_RET_NAK) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); } else { - ehci_set_state(q->ehci, async, EST_WRITEBACK); + ehci_set_state(q->ehci, q->async, EST_WRITEBACK); } again = 1; @@ -1899,14 +2091,21 @@ out: } -static int ehci_state_writeback(EHCIQueue *q, int async) +static int ehci_state_writeback(EHCIQueue *q) { + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + uint32_t *qtd, addr; int again = 0; /* Write back the QTD from the QH area */ - ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), (EHCIqtd*) &q->qh.next_qtd); - put_dwords(q->ehci, NLPTR_GET(q->qtdaddr), (uint32_t *) &q->qh.next_qtd, - sizeof(EHCIqtd) >> 2); + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + + ehci_trace_qtd(q, NLPTR_GET(p->qtdaddr), (EHCIqtd *) &q->qh.next_qtd); + qtd = (uint32_t *) &q->qh.next_qtd; + addr = NLPTR_GET(p->qtdaddr); + put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 2); + ehci_free_packet(p); /* * EHCI specs say go horizontal here. @@ -1917,10 +2116,10 @@ static int ehci_state_writeback(EHCIQueue *q, int async) * bit is clear. */ if (q->qh.token & QTD_TOKEN_HALT) { - ehci_set_state(q->ehci, async, EST_HORIZONTALQH); + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); again = 1; } else { - ehci_set_state(q->ehci, async, EST_ADVANCEQUEUE); + ehci_set_state(q->ehci, q->async, EST_ADVANCEQUEUE); again = 1; } return again; @@ -1930,8 +2129,7 @@ static int ehci_state_writeback(EHCIQueue *q, int async) * This is the state machine that is common to both async and periodic */ -static void ehci_advance_state(EHCIState *ehci, - int async) +static void ehci_advance_state(EHCIState *ehci, int async) { EHCIQueue *q = NULL; int again; @@ -1948,7 +2146,12 @@ static void ehci_advance_state(EHCIState *ehci, case EST_FETCHQH: q = ehci_state_fetchqh(ehci, async); - again = q ? 1 : 0; + if (q != NULL) { + assert(q->async == async); + again = 1; + } else { + again = 0; + } break; case EST_FETCHITD: @@ -1960,29 +2163,35 @@ static void ehci_advance_state(EHCIState *ehci, break; case EST_ADVANCEQUEUE: - again = ehci_state_advqueue(q, async); + again = ehci_state_advqueue(q); break; case EST_FETCHQTD: - again = ehci_state_fetchqtd(q, async); + again = ehci_state_fetchqtd(q); break; case EST_HORIZONTALQH: - again = ehci_state_horizqh(q, async); + again = ehci_state_horizqh(q); break; case EST_EXECUTE: - again = ehci_state_execute(q, async); + again = ehci_state_execute(q); + if (async) { + ehci->async_stepdown = 0; + } break; case EST_EXECUTING: assert(q != NULL); - again = ehci_state_executing(q, async); + if (async) { + ehci->async_stepdown = 0; + } + again = ehci_state_executing(q); break; case EST_WRITEBACK: assert(q != NULL); - again = ehci_state_writeback(q, async); + again = ehci_state_writeback(q); break; default: @@ -2009,17 +2218,15 @@ static void ehci_advance_async_state(EHCIState *ehci) switch(ehci_get_state(ehci, async)) { case EST_INACTIVE: - if (!(ehci->usbcmd & USBCMD_ASE)) { + if (!ehci_async_enabled(ehci)) { break; } - ehci_set_usbsts(ehci, USBSTS_ASS); ehci_set_state(ehci, async, EST_ACTIVE); // No break, fall through to ACTIVE case EST_ACTIVE: - if ( !(ehci->usbcmd & USBCMD_ASE)) { + if (!ehci_async_enabled(ehci)) { ehci_queues_rip_all(ehci, async); - ehci_clear_usbsts(ehci, USBSTS_ASS); ehci_set_state(ehci, async, EST_INACTIVE); break; } @@ -2045,7 +2252,7 @@ static void ehci_advance_async_state(EHCIState *ehci) */ if (ehci->usbcmd & USBCMD_IAAD) { /* Remove all unseen qhs from the async qhs queue */ - ehci_queues_rip_unused(ehci, async, 1); + ehci_queues_tag_unused_async(ehci); DPRINTF("ASYNC: doorbell request acknowledged\n"); ehci->usbcmd &= ~USBCMD_IAAD; ehci_set_interrupt(ehci, USBSTS_IAA); @@ -2070,17 +2277,15 @@ static void ehci_advance_periodic_state(EHCIState *ehci) switch(ehci_get_state(ehci, async)) { case EST_INACTIVE: - if ( !(ehci->frindex & 7) && (ehci->usbcmd & USBCMD_PSE)) { - ehci_set_usbsts(ehci, USBSTS_PSS); + if (!(ehci->frindex & 7) && ehci_periodic_enabled(ehci)) { ehci_set_state(ehci, async, EST_ACTIVE); // No break, fall through to ACTIVE } else break; case EST_ACTIVE: - if ( !(ehci->frindex & 7) && !(ehci->usbcmd & USBCMD_PSE)) { + if (!(ehci->frindex & 7) && !ehci_periodic_enabled(ehci)) { ehci_queues_rip_all(ehci, async); - ehci_clear_usbsts(ehci, USBSTS_PSS); ehci_set_state(ehci, async, EST_INACTIVE); break; } @@ -2100,7 +2305,7 @@ static void ehci_advance_periodic_state(EHCIState *ehci) ehci_set_fetch_addr(ehci, async,entry); ehci_set_state(ehci, async, EST_FETCHENTRY); ehci_advance_state(ehci, async); - ehci_queues_rip_unused(ehci, async, 0); + ehci_queues_rip_unused(ehci, async); break; default: @@ -2111,58 +2316,86 @@ static void ehci_advance_periodic_state(EHCIState *ehci) } } +static void ehci_update_frindex(EHCIState *ehci, int frames) +{ + int i; + + if (!ehci_enabled(ehci)) { + return; + } + + for (i = 0; i < frames; i++) { + ehci->frindex += 8; + + if (ehci->frindex == 0x00002000) { + ehci_set_interrupt(ehci, USBSTS_FLR); + } + + if (ehci->frindex == 0x00004000) { + ehci_set_interrupt(ehci, USBSTS_FLR); + ehci->frindex = 0; + } + } +} + static void ehci_frame_timer(void *opaque) { EHCIState *ehci = opaque; + int schedules = 0; int64_t expire_time, t_now; uint64_t ns_elapsed; - int frames; + int frames, skipped_frames; int i; - int skipped_frames = 0; t_now = qemu_get_clock_ns(vm_clock); - expire_time = t_now + (get_ticks_per_sec() / ehci->freq); - ns_elapsed = t_now - ehci->last_run_ns; frames = ns_elapsed / FRAME_TIMER_NS; - for (i = 0; i < frames; i++) { - if ( !(ehci->usbsts & USBSTS_HALT)) { - ehci->frindex += 8; - - if (ehci->frindex == 0x00002000) { - ehci_set_interrupt(ehci, USBSTS_FLR); - } + if (ehci_periodic_enabled(ehci) || ehci->pstate != EST_INACTIVE) { + schedules++; + expire_time = t_now + (get_ticks_per_sec() / FRAME_TIMER_FREQ); - if (ehci->frindex == 0x00004000) { - ehci_set_interrupt(ehci, USBSTS_FLR); - ehci->frindex = 0; - } + if (frames > ehci->maxframes) { + skipped_frames = frames - ehci->maxframes; + ehci_update_frindex(ehci, skipped_frames); + ehci->last_run_ns += FRAME_TIMER_NS * skipped_frames; + frames -= skipped_frames; + DPRINTF("WARNING - EHCI skipped %d frames\n", skipped_frames); } - if (frames - i > ehci->maxframes) { - skipped_frames++; - } else { + for (i = 0; i < frames; i++) { + ehci_update_frindex(ehci, 1); ehci_advance_periodic_state(ehci); + ehci->last_run_ns += FRAME_TIMER_NS; } - - ehci->last_run_ns += FRAME_TIMER_NS; - } - -#if 0 - if (skipped_frames) { - DPRINTF("WARNING - EHCI skipped %d frames\n", skipped_frames); + } else { + if (ehci->async_stepdown < ehci->maxframes / 2) { + ehci->async_stepdown++; + } + expire_time = t_now + (get_ticks_per_sec() + * ehci->async_stepdown / FRAME_TIMER_FREQ); + ehci_update_frindex(ehci, frames); + ehci->last_run_ns += FRAME_TIMER_NS * frames; } -#endif /* Async is not inside loop since it executes everything it can once * called */ - ehci_advance_async_state(ehci); + if (ehci_async_enabled(ehci) || ehci->astate != EST_INACTIVE) { + schedules++; + qemu_bh_schedule(ehci->async_bh); + } - qemu_mod_timer(ehci->frame_timer, expire_time); + if (schedules) { + qemu_mod_timer(ehci->frame_timer, expire_time); + } } +static void ehci_async_bh(void *opaque) +{ + EHCIState *ehci = opaque; + ehci_advance_async_state(ehci); +} static const MemoryRegionOps ehci_mem_ops = { .old_mmio = { @@ -2186,13 +2419,61 @@ static USBBusOps ehci_bus_ops = { .register_companion = ehci_register_companion, }; +static int usb_ehci_post_load(void *opaque, int version_id) +{ + EHCIState *s = opaque; + int i; + + for (i = 0; i < NB_PORTS; i++) { + USBPort *companion = s->companion_ports[i]; + if (companion == NULL) { + continue; + } + if (s->portsc[i] & PORTSC_POWNER) { + companion->dev = s->ports[i].dev; + } else { + companion->dev = NULL; + } + } + + return 0; +} + static const VMStateDescription vmstate_ehci = { - .name = "ehci", - .unmigratable = 1, + .name = "ehci", + .version_id = 1, + .post_load = usb_ehci_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, EHCIState), + /* mmio registers */ + VMSTATE_UINT32(usbcmd, EHCIState), + VMSTATE_UINT32(usbsts, EHCIState), + VMSTATE_UINT32(usbintr, EHCIState), + VMSTATE_UINT32(frindex, EHCIState), + VMSTATE_UINT32(ctrldssegment, EHCIState), + VMSTATE_UINT32(periodiclistbase, EHCIState), + VMSTATE_UINT32(asynclistaddr, EHCIState), + VMSTATE_UINT32(configflag, EHCIState), + VMSTATE_UINT32(portsc[0], EHCIState), + VMSTATE_UINT32(portsc[1], EHCIState), + VMSTATE_UINT32(portsc[2], EHCIState), + VMSTATE_UINT32(portsc[3], EHCIState), + VMSTATE_UINT32(portsc[4], EHCIState), + VMSTATE_UINT32(portsc[5], EHCIState), + /* frame timer */ + VMSTATE_TIMER(frame_timer, EHCIState), + VMSTATE_UINT64(last_run_ns, EHCIState), + VMSTATE_UINT32(async_stepdown, EHCIState), + /* schedule state */ + VMSTATE_UINT32(astate, EHCIState), + VMSTATE_UINT32(pstate, EHCIState), + VMSTATE_UINT32(a_fetch_addr, EHCIState), + VMSTATE_UINT32(p_fetch_addr, EHCIState), + VMSTATE_END_OF_LIST() + } }; static Property ehci_properties[] = { - DEFINE_PROP_UINT32("freq", EHCIState, freq, FRAME_TIMER_FREQ), DEFINE_PROP_UINT32("maxframes", EHCIState, maxframes, 128), DEFINE_PROP_END_OF_LIST(), }; @@ -2298,8 +2579,10 @@ static int usb_ehci_initfn(PCIDevice *dev) } s->frame_timer = qemu_new_timer_ns(vm_clock, ehci_frame_timer, s); + s->async_bh = qemu_bh_new(ehci_async_bh, s); QTAILQ_INIT(&s->aqueues); QTAILQ_INIT(&s->pqueues); + usb_packet_init(&s->ipacket); qemu_register_reset(ehci_reset, s); diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 1a1cc88b1f..844e7ed166 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -31,7 +31,7 @@ #include "hw/usb.h" #include "hw/pci.h" #include "hw/sysbus.h" -#include "hw/qdev-addr.h" +#include "hw/qdev-dma.h" //#define DEBUG_OHCI /* Dump packet contents. */ @@ -62,6 +62,7 @@ typedef struct { USBBus bus; qemu_irq irq; MemoryRegion mem; + DMAContext *dma; int num_ports; const char *name; @@ -104,7 +105,7 @@ typedef struct { uint32_t htest; /* SM501 local memory offset */ - target_phys_addr_t localmem_base; + dma_addr_t localmem_base; /* Active packets. */ uint32_t old_ctl; @@ -482,14 +483,14 @@ static void ohci_reset(void *opaque) /* Get an array of dwords from main memory */ static inline int get_dwords(OHCIState *ohci, - uint32_t addr, uint32_t *buf, int num) + dma_addr_t addr, uint32_t *buf, int num) { int i; addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - cpu_physical_memory_read(addr, buf, sizeof(*buf)); + dma_memory_read(ohci->dma, addr, buf, sizeof(*buf)); *buf = le32_to_cpu(*buf); } @@ -498,7 +499,7 @@ static inline int get_dwords(OHCIState *ohci, /* Put an array of dwords in to main memory */ static inline int put_dwords(OHCIState *ohci, - uint32_t addr, uint32_t *buf, int num) + dma_addr_t addr, uint32_t *buf, int num) { int i; @@ -506,7 +507,7 @@ static inline int put_dwords(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint32_t tmp = cpu_to_le32(*buf); - cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); + dma_memory_write(ohci->dma, addr, &tmp, sizeof(tmp)); } return 1; @@ -514,14 +515,14 @@ static inline int put_dwords(OHCIState *ohci, /* Get an array of words from main memory */ static inline int get_words(OHCIState *ohci, - uint32_t addr, uint16_t *buf, int num) + dma_addr_t addr, uint16_t *buf, int num) { int i; addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - cpu_physical_memory_read(addr, buf, sizeof(*buf)); + dma_memory_read(ohci->dma, addr, buf, sizeof(*buf)); *buf = le16_to_cpu(*buf); } @@ -530,7 +531,7 @@ static inline int get_words(OHCIState *ohci, /* Put an array of words in to main memory */ static inline int put_words(OHCIState *ohci, - uint32_t addr, uint16_t *buf, int num) + dma_addr_t addr, uint16_t *buf, int num) { int i; @@ -538,40 +539,40 @@ static inline int put_words(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint16_t tmp = cpu_to_le16(*buf); - cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); + dma_memory_write(ohci->dma, addr, &tmp, sizeof(tmp)); } return 1; } static inline int ohci_read_ed(OHCIState *ohci, - uint32_t addr, struct ohci_ed *ed) + dma_addr_t addr, struct ohci_ed *ed) { return get_dwords(ohci, addr, (uint32_t *)ed, sizeof(*ed) >> 2); } static inline int ohci_read_td(OHCIState *ohci, - uint32_t addr, struct ohci_td *td) + dma_addr_t addr, struct ohci_td *td) { return get_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2); } static inline int ohci_read_iso_td(OHCIState *ohci, - uint32_t addr, struct ohci_iso_td *td) + dma_addr_t addr, struct ohci_iso_td *td) { return (get_dwords(ohci, addr, (uint32_t *)td, 4) && get_words(ohci, addr + 16, td->offset, 8)); } static inline int ohci_read_hcca(OHCIState *ohci, - uint32_t addr, struct ohci_hcca *hcca) + dma_addr_t addr, struct ohci_hcca *hcca) { - cpu_physical_memory_read(addr + ohci->localmem_base, hcca, sizeof(*hcca)); + dma_memory_read(ohci->dma, addr + ohci->localmem_base, hcca, sizeof(*hcca)); return 1; } static inline int ohci_put_ed(OHCIState *ohci, - uint32_t addr, struct ohci_ed *ed) + dma_addr_t addr, struct ohci_ed *ed) { /* ed->tail is under control of the HCD. * Since just ed->head is changed by HC, just write back this @@ -583,64 +584,63 @@ static inline int ohci_put_ed(OHCIState *ohci, } static inline int ohci_put_td(OHCIState *ohci, - uint32_t addr, struct ohci_td *td) + dma_addr_t addr, struct ohci_td *td) { return put_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2); } static inline int ohci_put_iso_td(OHCIState *ohci, - uint32_t addr, struct ohci_iso_td *td) + dma_addr_t addr, struct ohci_iso_td *td) { return (put_dwords(ohci, addr, (uint32_t *)td, 4) && put_words(ohci, addr + 16, td->offset, 8)); } static inline int ohci_put_hcca(OHCIState *ohci, - uint32_t addr, struct ohci_hcca *hcca) + dma_addr_t addr, struct ohci_hcca *hcca) { - cpu_physical_memory_write(addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET, - (char *)hcca + HCCA_WRITEBACK_OFFSET, - HCCA_WRITEBACK_SIZE); + dma_memory_write(ohci->dma, + addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET, + (char *)hcca + HCCA_WRITEBACK_OFFSET, + HCCA_WRITEBACK_SIZE); return 1; } /* Read/Write the contents of a TD from/to main memory. */ static void ohci_copy_td(OHCIState *ohci, struct ohci_td *td, - uint8_t *buf, int len, int write) + uint8_t *buf, int len, DMADirection dir) { - uint32_t ptr; - uint32_t n; + dma_addr_t ptr, n; ptr = td->cbp; n = 0x1000 - (ptr & 0xfff); if (n > len) n = len; - cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, n, write); + dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, n, dir); if (n == len) return; ptr = td->be & ~0xfffu; buf += n; - cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, len - n, write); + dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, len - n, dir); } /* Read/Write the contents of an ISO TD from/to main memory. */ static void ohci_copy_iso_td(OHCIState *ohci, uint32_t start_addr, uint32_t end_addr, - uint8_t *buf, int len, int write) + uint8_t *buf, int len, DMADirection dir) { - uint32_t ptr; - uint32_t n; + dma_addr_t ptr, n; ptr = start_addr; n = 0x1000 - (ptr & 0xfff); if (n > len) n = len; - cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, n, write); + dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, n, dir); if (n == len) return; ptr = end_addr & ~0xfffu; buf += n; - cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, len - n, write); + dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, len - n, dir); } static void ohci_process_lists(OHCIState *ohci, int completion); @@ -803,7 +803,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, } if (len && dir != OHCI_TD_DIR_IN) { - ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, 0); + ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, + DMA_DIRECTION_TO_DEVICE); } if (completion) { @@ -827,7 +828,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, /* Writeback */ if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) { /* IN transfer succeeded */ - ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, 1); + ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, + DMA_DIRECTION_FROM_DEVICE); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_NOERROR); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, ret); @@ -971,7 +973,8 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) pktlen = len; } if (!completion) { - ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen, 0); + ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen, + DMA_DIRECTION_TO_DEVICE); } } } @@ -1021,7 +1024,8 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) } if (ret >= 0) { if (dir == OHCI_TD_DIR_IN) { - ohci_copy_td(ohci, &td, ohci->usb_buf, ret, 1); + ohci_copy_td(ohci, &td, ohci->usb_buf, ret, + DMA_DIRECTION_FROM_DEVICE); #ifdef DEBUG_PACKET DPRINTF(" data:"); for (i = 0; i < ret; i++) @@ -1748,11 +1752,14 @@ static USBBusOps ohci_bus_ops = { }; static int usb_ohci_init(OHCIState *ohci, DeviceState *dev, - int num_ports, uint32_t localmem_base, - char *masterbus, uint32_t firstport) + int num_ports, dma_addr_t localmem_base, + char *masterbus, uint32_t firstport, + DMAContext *dma) { int i; + ohci->dma = dma; + if (usb_frame_time == 0) { #ifdef OHCI_TIME_WARP usb_frame_time = get_ticks_per_sec(); @@ -1817,7 +1824,8 @@ static int usb_ohci_initfn_pci(struct PCIDevice *dev) ohci->pci_dev.config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ if (usb_ohci_init(&ohci->state, &dev->qdev, ohci->num_ports, 0, - ohci->masterbus, ohci->firstport) != 0) { + ohci->masterbus, ohci->firstport, + pci_dma_context(dev)) != 0) { return -1; } ohci->state.irq = ohci->pci_dev.irq[0]; @@ -1831,7 +1839,7 @@ typedef struct { SysBusDevice busdev; OHCIState ohci; uint32_t num_ports; - target_phys_addr_t dma_offset; + dma_addr_t dma_offset; } OHCISysBusState; static int ohci_init_pxa(SysBusDevice *dev) @@ -1839,7 +1847,8 @@ static int ohci_init_pxa(SysBusDevice *dev) OHCISysBusState *s = FROM_SYSBUS(OHCISysBusState, dev); /* Cannot fail as we pass NULL for masterbus */ - usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset, NULL, 0); + usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset, NULL, 0, + NULL); sysbus_init_irq(dev, &s->ohci.irq); sysbus_init_mmio(dev, &s->ohci.mem); @@ -1875,7 +1884,7 @@ static TypeInfo ohci_pci_info = { static Property ohci_sysbus_properties[] = { DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3), - DEFINE_PROP_TADDR("dma-offset", OHCISysBusState, dma_offset, 3), + DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 3), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 9e211a0bb4..8f652d2f4a 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -131,10 +131,14 @@ struct UHCIState { uint8_t status2; /* bit 0 and 1 are used to generate UHCI_STS_USBINT */ int64_t expire_time; QEMUTimer *frame_timer; + QEMUBH *bh; + uint32_t frame_bytes; + uint32_t frame_bandwidth; UHCIPort ports[NB_PORTS]; /* Interrupts that should be raised at the end of the current frame. */ uint32_t pending_int_mask; + int irq_pin; /* Active packets */ QTAILQ_HEAD(, UHCIQueue) queues; @@ -288,10 +292,10 @@ static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev) static void uhci_async_cancel_all(UHCIState *s) { - UHCIQueue *queue; + UHCIQueue *queue, *nq; UHCIAsync *curr, *n; - QTAILQ_FOREACH(queue, &s->queues, next) { + QTAILQ_FOREACH_SAFE(queue, &s->queues, next, nq) { QTAILQ_FOREACH_SAFE(curr, &queue->asyncs, next, n) { uhci_async_unlink(curr); uhci_async_cancel(curr); @@ -337,7 +341,7 @@ static void uhci_update_irq(UHCIState *s) } else { level = 0; } - qemu_set_irq(s->dev.irq[3], level); + qemu_set_irq(s->dev.irq[s->irq_pin], level); } static void uhci_reset(void *opaque) @@ -369,16 +373,10 @@ static void uhci_reset(void *opaque) } uhci_async_cancel_all(s); + qemu_bh_cancel(s->bh); uhci_update_irq(s); } -static void uhci_pre_save(void *opaque) -{ - UHCIState *s = opaque; - - uhci_async_cancel_all(s); -} - static const VMStateDescription vmstate_uhci_port = { .name = "uhci port", .version_id = 1, @@ -395,7 +393,6 @@ static const VMStateDescription vmstate_uhci = { .version_id = 2, .minimum_version_id = 1, .minimum_version_id_old = 1, - .pre_save = uhci_pre_save, .fields = (VMStateField []) { VMSTATE_PCI_DEVICE(dev, UHCIState), VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState), @@ -874,7 +871,7 @@ static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td, done: len = uhci_complete_td(s, td, async, int_mask); - usb_packet_unmap(&async->packet); + usb_packet_unmap(&async->packet, &async->sgl); uhci_async_free(async); return len; } @@ -905,7 +902,9 @@ static void uhci_async_complete(USBPort *port, USBPacket *packet) uhci_async_free(async); } else { async->done = 1; - uhci_process_frame(s); + if (s->frame_bytes < s->frame_bandwidth) { + qemu_bh_schedule(s->bh); + } } } @@ -985,7 +984,7 @@ static void uhci_fill_queue(UHCIState *s, UHCI_TD *td) static void uhci_process_frame(UHCIState *s) { uint32_t frame_addr, link, old_td_ctrl, val, int_mask; - uint32_t curr_qh, td_count = 0, bytes_count = 0; + uint32_t curr_qh, td_count = 0; int cnt, ret; UHCI_TD td; UHCI_QH qh; @@ -1002,6 +1001,12 @@ static void uhci_process_frame(UHCIState *s) qhdb_reset(&qhdb); for (cnt = FRAME_MAX_LOOPS; is_valid(link) && cnt; cnt--) { + if (s->frame_bytes >= s->frame_bandwidth) { + /* We've reached the usb 1.1 bandwidth, which is + 1280 bytes/frame, stop processing */ + trace_usb_uhci_frame_stop_bandwidth(); + break; + } if (is_qh(link)) { /* QH */ trace_usb_uhci_qh_load(link & ~0xf); @@ -1011,18 +1016,12 @@ static void uhci_process_frame(UHCIState *s) * We're going in circles. Which is not a bug because * HCD is allowed to do that as part of the BW management. * - * Stop processing here if - * (a) no transaction has been done since we've been - * here last time, or - * (b) we've reached the usb 1.1 bandwidth, which is - * 1280 bytes/frame. + * Stop processing here if no transaction has been done + * since we've been here last time. */ if (td_count == 0) { trace_usb_uhci_frame_loop_stop_idle(); break; - } else if (bytes_count >= 1280) { - trace_usb_uhci_frame_loop_stop_bandwidth(); - break; } else { trace_usb_uhci_frame_loop_continue(); td_count = 0; @@ -1085,7 +1084,7 @@ static void uhci_process_frame(UHCIState *s) trace_usb_uhci_td_complete(curr_qh & ~0xf, link & ~0xf); link = td.link; td_count++; - bytes_count += (td.ctrl & 0x7ff) + 1; + s->frame_bytes += (td.ctrl & 0x7ff) + 1; if (curr_qh) { /* update QH element link */ @@ -1112,12 +1111,20 @@ out: s->pending_int_mask |= int_mask; } +static void uhci_bh(void *opaque) +{ + UHCIState *s = opaque; + uhci_process_frame(s); +} + static void uhci_frame_timer(void *opaque) { UHCIState *s = opaque; /* prepare the timer for the next frame */ s->expire_time += (get_ticks_per_sec() / FRAME_TIMER_FREQ); + s->frame_bytes = 0; + qemu_bh_cancel(s->bh); if (!(s->cmd & UHCI_CMD_RS)) { /* Full stop */ @@ -1178,15 +1185,31 @@ static USBBusOps uhci_bus_ops = { static int usb_uhci_common_initfn(PCIDevice *dev) { + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); UHCIState *s = DO_UPCAST(UHCIState, dev, dev); uint8_t *pci_conf = s->dev.config; int i; pci_conf[PCI_CLASS_PROG] = 0x00; /* TODO: reset value should be 0. */ - pci_conf[PCI_INTERRUPT_PIN] = 4; /* interrupt pin D */ pci_conf[USB_SBRN] = USB_RELEASE_1; // release number + switch (pc->device_id) { + case PCI_DEVICE_ID_INTEL_82801I_UHCI1: + s->irq_pin = 0; /* A */ + break; + case PCI_DEVICE_ID_INTEL_82801I_UHCI2: + s->irq_pin = 1; /* B */ + break; + case PCI_DEVICE_ID_INTEL_82801I_UHCI3: + s->irq_pin = 2; /* C */ + break; + default: + s->irq_pin = 3; /* D */ + break; + } + pci_config_set_interrupt_pin(pci_conf, s->irq_pin + 1); + if (s->masterbus) { USBPort *ports[NB_PORTS]; for(i = 0; i < NB_PORTS; i++) { @@ -1204,6 +1227,7 @@ static int usb_uhci_common_initfn(PCIDevice *dev) USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); } } + s->bh = qemu_bh_new(uhci_bh, s); s->frame_timer = qemu_new_timer_ns(vm_clock, uhci_frame_timer, s); s->num_ports_vmstate = NB_PORTS; QTAILQ_INIT(&s->queues); @@ -1244,6 +1268,7 @@ static int usb_uhci_exit(PCIDevice *dev) static Property uhci_properties[] = { DEFINE_PROP_STRING("masterbus", UHCIState, masterbus), DEFINE_PROP_UINT32("firstport", UHCIState, firstport, 0), + DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 5cf1a64699..6c2ff024e0 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -23,6 +23,7 @@ #include "hw/usb.h" #include "hw/pci.h" #include "hw/msi.h" +#include "trace.h" //#define DEBUG_XHCI //#define DEBUG_DATA @@ -421,7 +422,6 @@ typedef struct XHCIEvRingSeg { uint32_t rsvd; } XHCIEvRingSeg; -#ifdef DEBUG_XHCI static const char *TRBType_names[] = { [TRB_RESERVED] = "TRB_RESERVED", [TR_NORMAL] = "TR_NORMAL", @@ -473,7 +473,6 @@ static const char *trb_name(XHCITRB *trb) return lookup_name(TRB_TYPE(*trb), TRBType_names, ARRAY_SIZE(TRBType_names)); } -#endif static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid); @@ -505,14 +504,13 @@ static void xhci_irq_update(XHCIState *xhci) level = 1; } - DPRINTF("xhci_irq_update(): %d\n", level); - if (xhci->msi && msi_enabled(&xhci->pci_dev)) { if (level) { - DPRINTF("xhci_irq_update(): MSI signal\n"); + trace_usb_xhci_irq_msi(0); msi_notify(&xhci->pci_dev, 0); } } else { + trace_usb_xhci_irq_intx(level); qemu_set_irq(xhci->irq, level); } } @@ -542,9 +540,8 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event) } ev_trb.control = cpu_to_le32(ev_trb.control); - DPRINTF("xhci_write_event(): [%d] %016"PRIx64" %08x %08x %s\n", - xhci->er_ep_idx, ev_trb.parameter, ev_trb.status, ev_trb.control, - trb_name(&ev_trb)); + trace_usb_xhci_queue_event(xhci->er_ep_idx, trb_name(&ev_trb), + ev_trb.parameter, ev_trb.status, ev_trb.control); addr = xhci->er_start + TRB_SIZE*xhci->er_ep_idx; pci_dma_write(&xhci->pci_dev, addr, &ev_trb, TRB_SIZE); @@ -704,10 +701,8 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, le32_to_cpus(&trb->status); le32_to_cpus(&trb->control); - DPRINTF("xhci: TRB fetched [" DMA_ADDR_FMT "]: " - "%016" PRIx64 " %08x %08x %s\n", - ring->dequeue, trb->parameter, trb->status, trb->control, - trb_name(trb)); + trace_usb_xhci_fetch_trb(ring->dequeue, trb_name(trb), + trb->parameter, trb->status, trb->control); if ((trb->control & TRB_C) != ring->ccs) { return 0; @@ -746,10 +741,6 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) le32_to_cpus(&trb.status); le32_to_cpus(&trb.control); - DPRINTF("xhci: TRB peeked [" DMA_ADDR_FMT "]: " - "%016" PRIx64 " %08x %08x\n", - dequeue, trb.parameter, trb.status, trb.control); - if ((trb.control & TRB_C) != ccs) { return -length; } @@ -812,14 +803,13 @@ static void xhci_er_reset(XHCIState *xhci) static void xhci_run(XHCIState *xhci) { - DPRINTF("xhci_run()\n"); - + trace_usb_xhci_run(); xhci->usbsts &= ~USBSTS_HCH; } static void xhci_stop(XHCIState *xhci) { - DPRINTF("xhci_stop()\n"); + trace_usb_xhci_stop(); xhci->usbsts |= USBSTS_HCH; xhci->crcr_low &= ~CRCR_CRR; } @@ -852,11 +842,10 @@ static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid, dma_addr_t dequeue; int i; + trace_usb_xhci_ep_enable(slotid, epid); assert(slotid >= 1 && slotid <= MAXSLOTS); assert(epid >= 1 && epid <= 31); - DPRINTF("xhci_enable_ep(%d, %d)\n", slotid, epid); - slot = &xhci->slots[slotid-1]; if (slot->eps[epid-1]) { fprintf(stderr, "xhci: slot %d ep %d already enabled!\n", slotid, epid); @@ -971,11 +960,10 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid, XHCISlot *slot; XHCIEPContext *epctx; + trace_usb_xhci_ep_disable(slotid, epid); assert(slotid >= 1 && slotid <= MAXSLOTS); assert(epid >= 1 && epid <= 31); - DPRINTF("xhci_disable_ep(%d, %d)\n", slotid, epid); - slot = &xhci->slots[slotid-1]; if (!slot->eps[epid-1]) { @@ -1001,8 +989,7 @@ static TRBCCode xhci_stop_ep(XHCIState *xhci, unsigned int slotid, XHCISlot *slot; XHCIEPContext *epctx; - DPRINTF("xhci_stop_ep(%d, %d)\n", slotid, epid); - + trace_usb_xhci_ep_stop(slotid, epid); assert(slotid >= 1 && slotid <= MAXSLOTS); if (epid < 1 || epid > 31) { @@ -1036,10 +1023,9 @@ static TRBCCode xhci_reset_ep(XHCIState *xhci, unsigned int slotid, XHCIEPContext *epctx; USBDevice *dev; + trace_usb_xhci_ep_reset(slotid, epid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_reset_ep(%d, %d)\n", slotid, epid); - if (epid < 1 || epid > 31) { fprintf(stderr, "xhci: bad ep %d\n", epid); return CC_TRB_ERROR; @@ -1416,12 +1402,14 @@ static int xhci_setup_packet(XHCITransfer *xfer, USBDevice *dev) static int xhci_complete_packet(XHCITransfer *xfer, int ret) { if (ret == USB_RET_ASYNC) { + trace_usb_xhci_xfer_async(xfer); xfer->running_async = 1; xfer->running_retry = 0; xfer->complete = 0; xfer->cancelled = 0; return 0; } else if (ret == USB_RET_NAK) { + trace_usb_xhci_xfer_nak(xfer); xfer->running_async = 0; xfer->running_retry = 1; xfer->complete = 0; @@ -1436,10 +1424,12 @@ static int xhci_complete_packet(XHCITransfer *xfer, int ret) if (ret >= 0) { xfer->status = CC_SUCCESS; xhci_xfer_data(xfer, xfer->data, ret, xfer->in_xfer, 0, 1); + trace_usb_xhci_xfer_success(xfer, ret); return 0; } /* error */ + trace_usb_xhci_xfer_error(xfer, ret); switch (ret) { case USB_RET_NODEV: xfer->status = CC_USB_TRANSACTION_ERROR; @@ -1475,11 +1465,12 @@ static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer) USBDevice *dev; int ret; - DPRINTF("xhci_fire_ctl_transfer(slot=%d)\n", xfer->slotid); - trb_setup = &xfer->trbs[0]; trb_status = &xfer->trbs[xfer->trb_count-1]; + trace_usb_xhci_xfer_start(xfer, xfer->slotid, xfer->epid, + trb_setup->parameter >> 48); + /* at most one Event Data TRB allowed after STATUS */ if (TRB_TYPE(*trb_status) == TR_EVDATA && xfer->trb_count > 2) { trb_status--; @@ -1620,15 +1611,14 @@ static int xhci_fire_transfer(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext unsigned int length = 0; XHCITRB *trb; - DPRINTF("xhci_fire_transfer(slotid=%d,epid=%d)\n", xfer->slotid, xfer->epid); - for (i = 0; i < xfer->trb_count; i++) { trb = &xfer->trbs[i]; if (TRB_TYPE(*trb) == TR_NORMAL || TRB_TYPE(*trb) == TR_ISOCH) { length += trb->status & 0x1ffff; } } - DPRINTF("xhci: total TD length=%d\n", length); + + trace_usb_xhci_xfer_start(xfer, xfer->slotid, xfer->epid, length); if (!epctx->has_bg) { xfer->data_length = length; @@ -1664,9 +1654,9 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid int length; int i; + trace_usb_xhci_ep_kick(slotid, epid); assert(slotid >= 1 && slotid <= MAXSLOTS); assert(epid >= 1 && epid <= 31); - DPRINTF("xhci_kick_ep(%d, %d)\n", slotid, epid); if (!xhci->slots[slotid-1].enabled) { fprintf(stderr, "xhci: xhci_kick_ep for disabled slot %d\n", slotid); @@ -1684,15 +1674,13 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid XHCITransfer *xfer = epctx->retry; int result; - DPRINTF("xhci: retry nack'ed transfer ...\n"); + trace_usb_xhci_xfer_retry(xfer); assert(xfer->running_retry); xhci_setup_packet(xfer, xfer->packet.ep->dev); result = usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); if (result == USB_RET_NAK) { - DPRINTF("xhci: ... xfer still nacked\n"); return; } - DPRINTF("xhci: ... result %d\n", result); xhci_complete_packet(xfer, result); assert(!xfer->running_retry); epctx->retry = NULL; @@ -1708,21 +1696,14 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid while (1) { XHCITransfer *xfer = &epctx->transfers[epctx->next_xfer]; if (xfer->running_async || xfer->running_retry || xfer->backgrounded) { - DPRINTF("xhci: ep is busy (#%d,%d,%d,%d)\n", - epctx->next_xfer, xfer->running_async, - xfer->running_retry, xfer->backgrounded); break; - } else { - DPRINTF("xhci: ep: using #%d\n", epctx->next_xfer); } length = xhci_ring_chain_length(xhci, &epctx->ring); if (length < 0) { - DPRINTF("xhci: incomplete TD (%d TRBs)\n", -length); break; } else if (length == 0) { break; } - DPRINTF("xhci: fetching %d-TRB TD\n", length); if (xfer->trbs && xfer->trb_alloced < length) { xfer->trb_count = 0; xfer->trb_alloced = 0; @@ -1757,7 +1738,6 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid } if (epctx->state == EP_HALTED) { - DPRINTF("xhci: ep halted, stopping schedule\n"); break; } if (xfer->running_retry) { @@ -1770,8 +1750,8 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid static TRBCCode xhci_enable_slot(XHCIState *xhci, unsigned int slotid) { + trace_usb_xhci_slot_enable(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_enable_slot(%d)\n", slotid); xhci->slots[slotid-1].enabled = 1; xhci->slots[slotid-1].port = 0; memset(xhci->slots[slotid-1].eps, 0, sizeof(XHCIEPContext*)*31); @@ -1783,8 +1763,8 @@ static TRBCCode xhci_disable_slot(XHCIState *xhci, unsigned int slotid) { int i; + trace_usb_xhci_slot_disable(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_disable_slot(%d)\n", slotid); for (i = 1; i <= 31; i++) { if (xhci->slots[slotid-1].eps[i-1]) { @@ -1810,8 +1790,8 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, int i; TRBCCode res; + trace_usb_xhci_slot_address(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_address_slot(%d)\n", slotid); dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); pci_dma_read(&xhci->pci_dev, dcbaap + 8*slotid, &poctx, sizeof(poctx)); @@ -1897,8 +1877,8 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, int i; TRBCCode res; + trace_usb_xhci_slot_configure(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_configure_slot(%d)\n", slotid); ictx = xhci_mask64(pictx); octx = xhci->slots[slotid-1].ctx; @@ -1985,8 +1965,8 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, uint32_t islot_ctx[4]; uint32_t slot_ctx[4]; + trace_usb_xhci_slot_evaluate(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_evaluate_slot(%d)\n", slotid); ictx = xhci_mask64(pictx); octx = xhci->slots[slotid-1].ctx; @@ -2048,8 +2028,8 @@ static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid) dma_addr_t octx; int i; + trace_usb_xhci_slot_reset(slotid); assert(slotid >= 1 && slotid <= MAXSLOTS); - DPRINTF("xhci_reset_slot(%d)\n", slotid); octx = xhci->slots[slotid-1].ctx; @@ -2296,12 +2276,12 @@ static void xhci_update_port(XHCIState *xhci, XHCIPort *port, int is_detach) } } -static void xhci_reset(void *opaque) +static void xhci_reset(DeviceState *dev) { - XHCIState *xhci = opaque; + XHCIState *xhci = DO_UPCAST(XHCIState, pci_dev.qdev, dev); int i; - DPRINTF("xhci: full reset\n"); + trace_usb_xhci_reset(); if (!(xhci->usbsts & USBSTS_HCH)) { fprintf(stderr, "xhci: reset while running!\n"); } @@ -2342,77 +2322,98 @@ static void xhci_reset(void *opaque) static uint32_t xhci_cap_read(XHCIState *xhci, uint32_t reg) { - DPRINTF("xhci_cap_read(0x%x)\n", reg); + uint32_t ret; switch (reg) { case 0x00: /* HCIVERSION, CAPLENGTH */ - return 0x01000000 | LEN_CAP; + ret = 0x01000000 | LEN_CAP; + break; case 0x04: /* HCSPARAMS 1 */ - return (MAXPORTS<<24) | (MAXINTRS<<8) | MAXSLOTS; + ret = (MAXPORTS<<24) | (MAXINTRS<<8) | MAXSLOTS; + break; case 0x08: /* HCSPARAMS 2 */ - return 0x0000000f; + ret = 0x0000000f; + break; case 0x0c: /* HCSPARAMS 3 */ - return 0x00000000; + ret = 0x00000000; + break; case 0x10: /* HCCPARAMS */ -#if TARGET_PHYS_ADDR_BITS > 32 - return 0x00081001; -#else - return 0x00081000; -#endif + if (sizeof(dma_addr_t) == 4) { + ret = 0x00081000; + } else { + ret = 0x00081001; + } + break; case 0x14: /* DBOFF */ - return OFF_DOORBELL; + ret = OFF_DOORBELL; + break; case 0x18: /* RTSOFF */ - return OFF_RUNTIME; + ret = OFF_RUNTIME; + break; /* extended capabilities */ case 0x20: /* Supported Protocol:00 */ -#if USB3_PORTS > 0 - return 0x02000402; /* USB 2.0 */ -#else - return 0x02000002; /* USB 2.0 */ -#endif + ret = 0x02000402; /* USB 2.0 */ + break; case 0x24: /* Supported Protocol:04 */ - return 0x20425455; /* "USB " */ + ret = 0x20425455; /* "USB " */ + break; case 0x28: /* Supported Protocol:08 */ - return 0x00000001 | (USB2_PORTS<<8); + ret = 0x00000001 | (USB2_PORTS<<8); + break; case 0x2c: /* Supported Protocol:0c */ - return 0x00000000; /* reserved */ -#if USB3_PORTS > 0 + ret = 0x00000000; /* reserved */ + break; case 0x30: /* Supported Protocol:00 */ - return 0x03000002; /* USB 3.0 */ + ret = 0x03000002; /* USB 3.0 */ + break; case 0x34: /* Supported Protocol:04 */ - return 0x20425455; /* "USB " */ + ret = 0x20425455; /* "USB " */ + break; case 0x38: /* Supported Protocol:08 */ - return 0x00000000 | (USB2_PORTS+1) | (USB3_PORTS<<8); + ret = 0x00000000 | (USB2_PORTS+1) | (USB3_PORTS<<8); + break; case 0x3c: /* Supported Protocol:0c */ - return 0x00000000; /* reserved */ -#endif + ret = 0x00000000; /* reserved */ + break; default: fprintf(stderr, "xhci_cap_read: reg %d unimplemented\n", reg); + ret = 0; } - return 0; + + trace_usb_xhci_cap_read(reg, ret); + return ret; } static uint32_t xhci_port_read(XHCIState *xhci, uint32_t reg) { uint32_t port = reg >> 4; + uint32_t ret; + if (port >= MAXPORTS) { fprintf(stderr, "xhci_port_read: port %d out of bounds\n", port); - return 0; + ret = 0; + goto out; } switch (reg & 0xf) { case 0x00: /* PORTSC */ - return xhci->ports[port].portsc; + ret = xhci->ports[port].portsc; + break; case 0x04: /* PORTPMSC */ case 0x08: /* PORTLI */ - return 0; + ret = 0; + break; case 0x0c: /* reserved */ default: fprintf(stderr, "xhci_port_read (port %d): reg 0x%x unimplemented\n", port, reg); - return 0; + ret = 0; } + +out: + trace_usb_xhci_port_read(port, reg & 0x0f, ret); + return ret; } static void xhci_port_write(XHCIState *xhci, uint32_t reg, uint32_t val) @@ -2420,6 +2421,8 @@ static void xhci_port_write(XHCIState *xhci, uint32_t reg, uint32_t val) uint32_t port = reg >> 4; uint32_t portsc; + trace_usb_xhci_port_write(port, reg & 0x0f, val); + if (port >= MAXPORTS) { fprintf(stderr, "xhci_port_read: port %d out of bounds\n", port); return; @@ -2457,7 +2460,7 @@ static void xhci_port_write(XHCIState *xhci, uint32_t reg, uint32_t val) static uint32_t xhci_oper_read(XHCIState *xhci, uint32_t reg) { - DPRINTF("xhci_oper_read(0x%x)\n", reg); + uint32_t ret; if (reg >= 0x400) { return xhci_port_read(xhci, reg - 0x400); @@ -2465,38 +2468,50 @@ static uint32_t xhci_oper_read(XHCIState *xhci, uint32_t reg) switch (reg) { case 0x00: /* USBCMD */ - return xhci->usbcmd; + ret = xhci->usbcmd; + break; case 0x04: /* USBSTS */ - return xhci->usbsts; + ret = xhci->usbsts; + break; case 0x08: /* PAGESIZE */ - return 1; /* 4KiB */ + ret = 1; /* 4KiB */ + break; case 0x14: /* DNCTRL */ - return xhci->dnctrl; + ret = xhci->dnctrl; + break; case 0x18: /* CRCR low */ - return xhci->crcr_low & ~0xe; + ret = xhci->crcr_low & ~0xe; + break; case 0x1c: /* CRCR high */ - return xhci->crcr_high; + ret = xhci->crcr_high; + break; case 0x30: /* DCBAAP low */ - return xhci->dcbaap_low; + ret = xhci->dcbaap_low; + break; case 0x34: /* DCBAAP high */ - return xhci->dcbaap_high; + ret = xhci->dcbaap_high; + break; case 0x38: /* CONFIG */ - return xhci->config; + ret = xhci->config; + break; default: fprintf(stderr, "xhci_oper_read: reg 0x%x unimplemented\n", reg); + ret = 0; } - return 0; + + trace_usb_xhci_oper_read(reg, ret); + return ret; } static void xhci_oper_write(XHCIState *xhci, uint32_t reg, uint32_t val) { - DPRINTF("xhci_oper_write(0x%x, 0x%08x)\n", reg, val); - if (reg >= 0x400) { xhci_port_write(xhci, reg - 0x400, val); return; } + trace_usb_xhci_oper_write(reg, val); + switch (reg) { case 0x00: /* USBCMD */ if ((val & USBCMD_RS) && !(xhci->usbcmd & USBCMD_RS)) { @@ -2506,7 +2521,7 @@ static void xhci_oper_write(XHCIState *xhci, uint32_t reg, uint32_t val) } xhci->usbcmd = val & 0xc0f; if (val & USBCMD_HCRST) { - xhci_reset(xhci); + xhci_reset(&xhci->pci_dev.qdev); } xhci_irq_update(xhci); break; @@ -2552,35 +2567,46 @@ static void xhci_oper_write(XHCIState *xhci, uint32_t reg, uint32_t val) static uint32_t xhci_runtime_read(XHCIState *xhci, uint32_t reg) { - DPRINTF("xhci_runtime_read(0x%x)\n", reg); + uint32_t ret; switch (reg) { case 0x00: /* MFINDEX */ fprintf(stderr, "xhci_runtime_read: MFINDEX not yet implemented\n"); - return xhci->mfindex; + ret = xhci->mfindex; + break; case 0x20: /* IMAN */ - return xhci->iman; + ret = xhci->iman; + break; case 0x24: /* IMOD */ - return xhci->imod; + ret = xhci->imod; + break; case 0x28: /* ERSTSZ */ - return xhci->erstsz; + ret = xhci->erstsz; + break; case 0x30: /* ERSTBA low */ - return xhci->erstba_low; + ret = xhci->erstba_low; + break; case 0x34: /* ERSTBA high */ - return xhci->erstba_high; + ret = xhci->erstba_high; + break; case 0x38: /* ERDP low */ - return xhci->erdp_low; + ret = xhci->erdp_low; + break; case 0x3c: /* ERDP high */ - return xhci->erdp_high; + ret = xhci->erdp_high; + break; default: fprintf(stderr, "xhci_runtime_read: reg 0x%x unimplemented\n", reg); + ret = 0; } - return 0; + + trace_usb_xhci_runtime_read(reg, ret); + return ret; } static void xhci_runtime_write(XHCIState *xhci, uint32_t reg, uint32_t val) { - DPRINTF("xhci_runtime_write(0x%x, 0x%08x)\n", reg, val); + trace_usb_xhci_runtime_read(reg, val); switch (reg) { case 0x20: /* IMAN */ @@ -2623,14 +2649,14 @@ static void xhci_runtime_write(XHCIState *xhci, uint32_t reg, uint32_t val) static uint32_t xhci_doorbell_read(XHCIState *xhci, uint32_t reg) { - DPRINTF("xhci_doorbell_read(0x%x)\n", reg); /* doorbells always read as 0 */ + trace_usb_xhci_doorbell_read(reg, 0); return 0; } static void xhci_doorbell_write(XHCIState *xhci, uint32_t reg, uint32_t val) { - DPRINTF("xhci_doorbell_write(0x%x, 0x%08x)\n", reg, val); + trace_usb_xhci_doorbell_write(reg, val); if (!xhci_running(xhci)) { fprintf(stderr, "xhci: wrote doorbell while xHC stopped or paused\n"); @@ -2831,8 +2857,6 @@ static void usb_xhci_init(XHCIState *xhci, DeviceState *dev) for (i = 0; i < MAXSLOTS; i++) { xhci->slots[i].enabled = 0; } - - qemu_register_reset(xhci_reset, xhci); } static int usb_xhci_initfn(struct PCIDevice *dev) @@ -2895,6 +2919,7 @@ static void xhci_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_xhci; dc->props = xhci_properties; + dc->reset = xhci_reset; k->init = usb_xhci_initfn; k->vendor_id = PCI_VENDOR_ID_NEC; k->device_id = PCI_DEVICE_ID_NEC_UPD720200; diff --git a/hw/usb/host-linux.c b/hw/usb/host-linux.c index a95b0eda55..d55be878ad 100644 --- a/hw/usb/host-linux.c +++ b/hw/usb/host-linux.c @@ -111,6 +111,7 @@ typedef struct USBHostDevice { uint32_t iso_urb_count; uint32_t options; Notifier exit; + QEMUBH *bh; struct endp_data ep_in[USB_MAX_ENDPOINTS]; struct endp_data ep_out[USB_MAX_ENDPOINTS]; @@ -212,7 +213,7 @@ static int is_iso_started(USBHostDevice *s, int pid, int ep) static void clear_iso_started(USBHostDevice *s, int pid, int ep) { - trace_usb_host_ep_stop_iso(s->bus_num, s->addr, ep); + trace_usb_host_iso_stop(s->bus_num, s->addr, ep); get_endp(s, pid, ep)->iso_started = 0; } @@ -220,7 +221,7 @@ static void set_iso_started(USBHostDevice *s, int pid, int ep) { struct endp_data *e = get_endp(s, pid, ep); - trace_usb_host_ep_start_iso(s->bus_num, s->addr, ep); + trace_usb_host_iso_start(s->bus_num, s->addr, ep); if (!e->iso_started) { e->iso_started = 1; e->inflight = 0; @@ -318,7 +319,8 @@ static void async_complete(void *opaque) if (r < 0) { if (errno == EAGAIN) { if (urbs > 2) { - fprintf(stderr, "husb: %d iso urbs finished at once\n", urbs); + /* indicates possible latency issues */ + trace_usb_host_iso_many_urbs(s->bus_num, s->addr, urbs); } return; } @@ -351,7 +353,8 @@ static void async_complete(void *opaque) urbs++; inflight = change_iso_inflight(s, pid, ep, -1); if (inflight == 0 && is_iso_started(s, pid, ep)) { - fprintf(stderr, "husb: out of buffers for iso stream\n"); + /* can be latency issues, or simply end of stream */ + trace_usb_host_iso_out_of_bufs(s->bus_num, s->addr, ep); } continue; } @@ -1135,7 +1138,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s) USBDescriptor *d; bool active = false; - usb_ep_init(&s->dev); + usb_ep_reset(&s->dev); for (i = 0;; i += d->bLength) { if (i+2 >= s->descr_len) { @@ -1238,7 +1241,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s) return 0; error: - usb_ep_init(&s->dev); + usb_ep_reset(&s->dev); return 1; } @@ -1325,6 +1328,7 @@ static int usb_host_open(USBHostDevice *dev, int bus_num, goto fail; } + usb_ep_init(&dev->dev); ret = usb_linux_update_endp_table(dev); if (ret) { goto fail; @@ -1421,6 +1425,43 @@ static void usb_host_exit_notifier(struct Notifier *n, void *data) } } +/* + * This is *NOT* about restoring state. We have absolutely no idea + * what state the host device is in at the moment and whenever it is + * still present in the first place. Attemping to contine where we + * left off is impossible. + * + * What we are going to to to here is emulate a surprise removal of + * the usb device passed through, then kick host scan so the device + * will get re-attached (and re-initialized by the guest) in case it + * is still present. + * + * As the device removal will change the state of other devices (usb + * host controller, most likely interrupt controller too) we have to + * wait with it until *all* vmstate is loaded. Thus post_load just + * kicks a bottom half which then does the actual work. + */ +static void usb_host_post_load_bh(void *opaque) +{ + USBHostDevice *dev = opaque; + + if (dev->fd != -1) { + usb_host_close(dev); + } + if (dev->dev.attached) { + usb_device_detach(&dev->dev); + } + usb_host_auto_check(NULL); +} + +static int usb_host_post_load(void *opaque, int version_id) +{ + USBHostDevice *dev = opaque; + + qemu_bh_schedule(dev->bh); + return 0; +} + static int usb_host_initfn(USBDevice *dev) { USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev); @@ -1432,6 +1473,7 @@ static int usb_host_initfn(USBDevice *dev) QTAILQ_INSERT_TAIL(&hostdevs, s, next); s->exit.notify = usb_host_exit_notifier; qemu_add_exit_notifier(&s->exit); + s->bh = qemu_bh_new(usb_host_post_load_bh, s); usb_host_auto_check(NULL); if (s->match.bus_num != 0 && s->match.port != NULL) { @@ -1443,7 +1485,13 @@ static int usb_host_initfn(USBDevice *dev) static const VMStateDescription vmstate_usb_host = { .name = "usb-host", - .unmigratable = 1, + .version_id = 1, + .minimum_version_id = 1, + .post_load = usb_host_post_load, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, USBHostDevice), + VMSTATE_END_OF_LIST() + } }; static Property usb_host_dev_properties[] = { @@ -1737,25 +1785,27 @@ static void usb_host_auto_check(void *unused) struct USBHostDevice *s; int unconnected = 0; - usb_host_scan(NULL, usb_host_auto_scan); + if (runstate_is_running()) { + usb_host_scan(NULL, usb_host_auto_scan); - QTAILQ_FOREACH(s, &hostdevs, next) { - if (s->fd == -1) { - unconnected++; - } - if (s->seen == 0) { - s->errcount = 0; + QTAILQ_FOREACH(s, &hostdevs, next) { + if (s->fd == -1) { + unconnected++; + } + if (s->seen == 0) { + s->errcount = 0; + } + s->seen = 0; } - s->seen = 0; - } - if (unconnected == 0) { - /* nothing to watch */ - if (usb_auto_timer) { - qemu_del_timer(usb_auto_timer); - trace_usb_host_auto_scan_disabled(); + if (unconnected == 0) { + /* nothing to watch */ + if (usb_auto_timer) { + qemu_del_timer(usb_auto_timer); + trace_usb_host_auto_scan_disabled(); + } + return; } - return; } if (!usb_auto_timer) { diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c index 2462351389..c0de30ea88 100644 --- a/hw/usb/libhw.c +++ b/hw/usb/libhw.c @@ -26,15 +26,15 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl) { - int is_write = (p->pid == USB_TOKEN_IN); - target_phys_addr_t len; + DMADirection dir = (p->pid == USB_TOKEN_IN) ? + DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE; + dma_addr_t len; void *mem; int i; for (i = 0; i < sgl->nsg; i++) { len = sgl->sg[i].len; - mem = cpu_physical_memory_map(sgl->sg[i].base, &len, - is_write); + mem = dma_memory_map(sgl->dma, sgl->sg[i].base, &len, dir); if (!mem) { goto err; } @@ -46,18 +46,19 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl) return 0; err: - usb_packet_unmap(p); + usb_packet_unmap(p, sgl); return -1; } -void usb_packet_unmap(USBPacket *p) +void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl) { - int is_write = (p->pid == USB_TOKEN_IN); + DMADirection dir = (p->pid == USB_TOKEN_IN) ? + DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE; int i; for (i = 0; i < p->iov.niov; i++) { - cpu_physical_memory_unmap(p->iov.iov[i].iov_base, - p->iov.iov[i].iov_len, is_write, - p->iov.iov[i].iov_len); + dma_memory_unmap(sgl->dma, p->iov.iov[i].iov_base, + p->iov.iov[i].iov_len, dir, + p->iov.iov[i].iov_len); } } diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index 51c27b4051..10b4fbb3a7 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -143,8 +143,6 @@ static void usbredir_interrupt_packet(void *priv, uint32_t id, static int usbredir_handle_status(USBRedirDevice *dev, int status, int actual_len); -#define VERSION "qemu usb-redir guest " QEMU_VERSION - /* * Logging stuff */ @@ -794,6 +792,10 @@ static void usbredir_open_close_bh(void *opaque) { USBRedirDevice *dev = opaque; uint32_t caps[USB_REDIR_CAPS_SIZE] = { 0, }; + char version[32]; + + strcpy(version, "qemu usb-redir guest "); + pstrcat(version, sizeof(version), qemu_get_version()); usbredir_device_disconnect(dev); @@ -828,7 +830,7 @@ static void usbredir_open_close_bh(void *opaque) usbredirparser_caps_set_cap(caps, usb_redir_cap_connect_device_version); usbredirparser_caps_set_cap(caps, usb_redir_cap_filter); - usbredirparser_init(dev->parser, VERSION, caps, USB_REDIR_CAPS_SIZE, 0); + usbredirparser_init(dev->parser, version, caps, USB_REDIR_CAPS_SIZE, 0); usbredirparser_do_write(dev->parser); } } @@ -1031,6 +1033,8 @@ static int usbredir_handle_status(USBRedirDevice *dev, case usb_redir_inval: WARNING("got invalid param error from usb-host?\n"); return USB_RET_NAK; + case usb_redir_babble: + return USB_RET_BABBLE; case usb_redir_ioerror: case usb_redir_timeout: default: diff --git a/hw/versatilepb.c b/hw/versatilepb.c index 7c79c54d08..4fd5d9b04b 100644 --- a/hw/versatilepb.c +++ b/hw/versatilepb.c @@ -173,7 +173,7 @@ static void versatile_init(ram_addr_t ram_size, const char *initrd_filename, const char *cpu_model, int board_id) { - CPUARMState *env; + ARMCPU *cpu; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); qemu_irq *cpu_pic; @@ -189,10 +189,11 @@ static void versatile_init(ram_addr_t ram_size, int done_smc = 0; DriveInfo *dinfo; - if (!cpu_model) + if (!cpu_model) { cpu_model = "arm926"; - env = cpu_init(cpu_model); - if (!env) { + } + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } @@ -208,7 +209,7 @@ static void versatile_init(ram_addr_t ram_size, qdev_init_nofail(sysctl); sysbus_mmio_map(sysbus_from_qdev(sysctl), 0, 0x10000000); - cpu_pic = arm_pic_init_cpu(env); + cpu_pic = arm_pic_init_cpu(cpu); dev = sysbus_create_varargs("pl190", 0x10140000, cpu_pic[0], cpu_pic[1], NULL); for (n = 0; n < 32; n++) { @@ -338,7 +339,7 @@ static void versatile_init(ram_addr_t ram_size, versatile_binfo.kernel_cmdline = kernel_cmdline; versatile_binfo.initrd_filename = initrd_filename; versatile_binfo.board_id = board_id; - arm_load_kernel(env, &versatile_binfo); + arm_load_kernel(cpu, &versatile_binfo); } static void vpb_init(ram_addr_t ram_size, diff --git a/hw/vexpress.c b/hw/vexpress.c index 18d87ac378..8072c5ada9 100644 --- a/hw/vexpress.c +++ b/hw/vexpress.c @@ -159,7 +159,6 @@ static void a9_daughterboard_init(const VEDBoardInfo *daughterboard, const char *cpu_model, qemu_irq *pic, uint32_t *proc_id) { - CPUARMState *env = NULL; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *lowram = g_new(MemoryRegion, 1); @@ -177,12 +176,12 @@ static void a9_daughterboard_init(const VEDBoardInfo *daughterboard, *proc_id = 0x0c000191; for (n = 0; n < smp_cpus; n++) { - env = cpu_init(cpu_model); - if (!env) { + ARMCPU *cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - irqp = arm_pic_init_cpu(env); + irqp = arm_pic_init_cpu(cpu); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } @@ -259,7 +258,6 @@ static void a15_daughterboard_init(const VEDBoardInfo *daughterboard, qemu_irq *pic, uint32_t *proc_id) { int n; - CPUARMState *env = NULL; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *sram = g_new(MemoryRegion, 1); @@ -274,13 +272,15 @@ static void a15_daughterboard_init(const VEDBoardInfo *daughterboard, *proc_id = 0x14000217; for (n = 0; n < smp_cpus; n++) { + ARMCPU *cpu; qemu_irq *irqp; - env = cpu_init(cpu_model); - if (!env) { + + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - irqp = arm_pic_init_cpu(env); + irqp = arm_pic_init_cpu(cpu); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } @@ -438,7 +438,7 @@ static void vexpress_common_init(const VEDBoardInfo *daughterboard, vexpress_binfo.smp_loader_start = map[VE_SRAM]; vexpress_binfo.smp_bootreg_addr = map[VE_SYSREGS] + 0x30; vexpress_binfo.gic_cpu_if_addr = daughterboard->gic_cpu_if_addr; - arm_load_kernel(first_cpu, &vexpress_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &vexpress_binfo); } static void vexpress_a9_init(ram_addr_t ram_size, diff --git a/hw/vga-isa-mm.c b/hw/vga-isa-mm.c index f8984c62cb..44ae7d92c8 100644 --- a/hw/vga-isa-mm.c +++ b/hw/vga-isa-mm.c @@ -28,6 +28,8 @@ #include "pixel_ops.h" #include "qemu-timer.h" +#define VGA_RAM_SIZE (8192 * 1024) + typedef struct ISAVGAMMState { VGACommonState vga; int it_shift; @@ -128,7 +130,8 @@ int isa_vga_mm_init(target_phys_addr_t vram_base, s = g_malloc0(sizeof(*s)); - vga_common_init(&s->vga, VGA_RAM_SIZE); + s->vga.vram_size_mb = VGA_RAM_SIZE >> 20; + vga_common_init(&s->vga); vga_mm_init(s, vram_base, ctrl_base, it_shift, address_space); s->vga.ds = graphic_console_init(s->vga.update, s->vga.invalidate, diff --git a/hw/vga-isa.c b/hw/vga-isa.c index 4bcc4db62f..d2904737bc 100644 --- a/hw/vga-isa.c +++ b/hw/vga-isa.c @@ -49,7 +49,7 @@ static int vga_initfn(ISADevice *dev) MemoryRegion *vga_io_memory; const MemoryRegionPortio *vga_ports, *vbe_ports; - vga_common_init(s, VGA_RAM_SIZE); + vga_common_init(s); s->legacy_address_space = isa_address_space(dev); vga_io_memory = vga_init_io(s, &vga_ports, &vbe_ports); isa_register_portio_list(dev, 0x3b0, vga_ports, s, "vga"); @@ -69,6 +69,11 @@ static int vga_initfn(ISADevice *dev) return 0; } +static Property vga_isa_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", ISAVGAState, state.vram_size_mb, 8), + DEFINE_PROP_END_OF_LIST(), +}; + static void vga_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -76,6 +81,7 @@ static void vga_class_initfn(ObjectClass *klass, void *data) ic->init = vga_initfn; dc->reset = vga_reset_isa; dc->vmsd = &vmstate_vga_common; + dc->props = vga_isa_properties; } static TypeInfo vga_info = { diff --git a/hw/vga-pci.c b/hw/vga-pci.c index 465b643d21..37dc019a61 100644 --- a/hw/vga-pci.c +++ b/hw/vga-pci.c @@ -53,7 +53,7 @@ static int pci_vga_initfn(PCIDevice *dev) VGACommonState *s = &d->vga; // vga + console init - vga_common_init(s, VGA_RAM_SIZE); + vga_common_init(s); vga_init(s, pci_address_space(dev), pci_address_space_io(dev), true); s->ds = graphic_console_init(s->update, s->invalidate, @@ -75,6 +75,11 @@ DeviceState *pci_vga_init(PCIBus *bus) return &pci_create_simple(bus, -1, "VGA")->qdev; } +static Property vga_pci_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16), + DEFINE_PROP_END_OF_LIST(), +}; + static void vga_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -87,6 +92,7 @@ static void vga_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_QEMU_VGA; k->class_id = PCI_CLASS_DISPLAY_VGA; dc->vmsd = &vmstate_vga_pci; + dc->props = vga_pci_properties; } static TypeInfo vga_info = { @@ -2225,7 +2225,7 @@ const VMStateDescription vmstate_vga_common = { } }; -void vga_common_init(VGACommonState *s, int vga_ram_size) +void vga_common_init(VGACommonState *s) { int i, j, v, b; @@ -2252,16 +2252,23 @@ void vga_common_init(VGACommonState *s, int vga_ram_size) expand4to8[i] = v; } + /* valid range: 1 MB -> 256 MB */ + s->vram_size = 1024 * 1024; + while (s->vram_size < (s->vram_size_mb << 20) && + s->vram_size < (256 << 20)) { + s->vram_size <<= 1; + } + s->vram_size_mb = s->vram_size >> 20; + #ifdef CONFIG_BOCHS_VBE s->is_vbe_vmstate = 1; #else s->is_vbe_vmstate = 0; #endif - memory_region_init_ram(&s->vram, "vga.vram", vga_ram_size); + memory_region_init_ram(&s->vram, "vga.vram", s->vram_size); vmstate_register_ram_global(&s->vram); xen_register_framebuffer(&s->vram); s->vram_ptr = memory_region_get_ram_ptr(&s->vram); - s->vram_size = vga_ram_size; s->get_bpp = vga_get_bpp; s->get_offsets = vga_get_offsets; s->get_resolution = vga_get_resolution; diff --git a/hw/vga_int.h b/hw/vga_int.h index d244d8ff99..3b38764a38 100644 --- a/hw/vga_int.h +++ b/hw/vga_int.h @@ -31,8 +31,8 @@ /* bochs VBE support */ #define CONFIG_BOCHS_VBE -#define VBE_DISPI_MAX_XRES 1600 -#define VBE_DISPI_MAX_YRES 1200 +#define VBE_DISPI_MAX_XRES 16000 +#define VBE_DISPI_MAX_YRES 12000 #define VBE_DISPI_MAX_BPP 32 #define VBE_DISPI_INDEX_ID 0x0 @@ -107,6 +107,7 @@ typedef struct VGACommonState { MemoryRegion vram; MemoryRegion vram_vbe; uint32_t vram_size; + uint32_t vram_size_mb; /* property */ uint32_t latch; MemoryRegion *chain4_alias; uint8_t sr_index; @@ -184,7 +185,7 @@ static inline int c6_to_8(int v) return (v << 2) | (b << 1) | b; } -void vga_common_init(VGACommonState *s, int vga_ram_size); +void vga_common_init(VGACommonState *s); void vga_init(VGACommonState *s, MemoryRegion *address_space, MemoryRegion *address_space_io, bool init_vga_ports); MemoryRegion *vga_init_io(VGACommonState *s, @@ -209,7 +210,6 @@ void vga_init_vbe(VGACommonState *s, MemoryRegion *address_space); extern const uint8_t sr_mask[8]; extern const uint8_t gr_mask[16]; -#define VGA_RAM_SIZE (8192 * 1024) #define VGABIOS_FILENAME "vgabios.bin" #define VGABIOS_CIRRUS_FILENAME "vgabios-cirrus.bin" diff --git a/hw/virtex_ml507.c b/hw/virtex_ml507.c index cace86b5e4..79bc0d10ee 100644 --- a/hw/virtex_ml507.c +++ b/hw/virtex_ml507.c @@ -229,7 +229,7 @@ static void virtex_init(ram_addr_t ram_size, serial_hds[0], DEVICE_LITTLE_ENDIAN); /* 2 timers at irq 2 @ 62 Mhz. */ - xilinx_timer_create(0x83c00000, irq[3], 2, 62 * 1000000); + xilinx_timer_create(0x83c00000, irq[3], 0, 62 * 1000000); if (kernel_filename) { uint64_t entry, low, high; diff --git a/hw/virtio-balloon.c b/hw/virtio-balloon.c index 23effa40a0..dd1a6506cf 100644 --- a/hw/virtio-balloon.c +++ b/hw/virtio-balloon.c @@ -146,8 +146,13 @@ static void virtio_balloon_set_config(VirtIODevice *vdev, { VirtIOBalloon *dev = to_virtio_balloon(vdev); struct virtio_balloon_config config; + uint32_t oldactual = dev->actual; memcpy(&config, config_data, 8); dev->actual = le32_to_cpu(config.actual); + if (dev->actual != oldactual) { + qemu_balloon_changed(ram_size - + (dev->actual << VIRTIO_BALLOON_PFN_SHIFT)); + } } static uint32_t virtio_balloon_get_features(VirtIODevice *vdev, uint32_t f) diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c index d08c1590d2..9342eed070 100644 --- a/hw/virtio-pci.c +++ b/hw/virtio-pci.c @@ -278,7 +278,6 @@ void virtio_pci_reset(DeviceState *d) VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev); virtio_pci_stop_ioeventfd(proxy); virtio_reset(proxy->vdev); - msix_reset(&proxy->pci_dev); proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG; } @@ -521,8 +520,6 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, virtio_set_status(proxy->vdev, proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); } - - msix_write_config(pci_dev, address, val, len); } static unsigned virtio_pci_get_features(void *opaque) diff --git a/hw/virtio-scsi.c b/hw/virtio-scsi.c index 5e39ce93c4..0a5ac40e2f 100644 --- a/hw/virtio-scsi.c +++ b/hw/virtio-scsi.c @@ -275,7 +275,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) { SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf->lun); SCSIRequest *r, *next; - DeviceState *qdev; + BusChild *kid; int target; /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */ @@ -346,8 +346,8 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: target = req->req.tmf->lun[1]; s->resetting++; - QTAILQ_FOREACH(qdev, &s->bus.qbus.children, sibling) { - d = DO_UPCAST(SCSIDevice, qdev, qdev); + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + d = DO_UPCAST(SCSIDevice, qdev, kid->child); if (d->channel == 0 && d->id == target) { qdev_reset_all(&d->qdev); } @@ -405,6 +405,10 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) } } +static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) +{ +} + static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status, size_t resid) { @@ -609,7 +613,7 @@ VirtIODevice *virtio_scsi_init(DeviceState *dev, VirtIOSCSIConf *proxyconf) s->ctrl_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, virtio_scsi_handle_ctrl); s->event_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, - NULL); + virtio_scsi_handle_event); for (i = 0; i < s->conf->num_queues; i++) { s->cmd_vqs[i] = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, virtio_scsi_handle_cmd); diff --git a/hw/virtio-serial-bus.c b/hw/virtio-serial-bus.c index 728f218dcf..82073f5dc2 100644 --- a/hw/virtio-serial-bus.c +++ b/hw/virtio-serial-bus.c @@ -728,15 +728,27 @@ static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id) static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent); -static struct BusInfo virtser_bus_info = { - .name = "virtio-serial-bus", - .size = sizeof(VirtIOSerialBus), - .print_dev = virtser_bus_dev_print, - .props = (Property[]) { - DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID), - DEFINE_PROP_STRING("name", VirtIOSerialPort, name), - DEFINE_PROP_END_OF_LIST() - } +static Property virtser_props[] = { + DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID), + DEFINE_PROP_STRING("name", VirtIOSerialPort, name), + DEFINE_PROP_END_OF_LIST() +}; + +#define TYPE_VIRTIO_SERIAL_BUS "virtio-serial-bus" +#define VIRTIO_SERIAL_BUS(obj) \ + OBJECT_CHECK(VirtIOSerialBus, (obj), TYPE_VIRTIO_SERIAL_BUS) + +static void virtser_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + k->print_dev = virtser_bus_dev_print; +} + +static const TypeInfo virtser_bus_info = { + .name = TYPE_VIRTIO_SERIAL_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtIOSerialBus), + .class_init = virtser_bus_class_init, }; static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent) @@ -904,7 +916,7 @@ VirtIODevice *virtio_serial_init(DeviceState *dev, virtio_serial_conf *conf) vser = DO_UPCAST(VirtIOSerial, vdev, vdev); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ - qbus_create_inplace(&vser->bus.qbus, &virtser_bus_info, dev, NULL); + qbus_create_inplace(&vser->bus.qbus, TYPE_VIRTIO_SERIAL_BUS, dev, NULL); vser->bus.qbus.allow_hotplug = 1; vser->bus.vser = vser; QTAILQ_INIT(&vser->ports); @@ -980,9 +992,10 @@ static void virtio_serial_port_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = virtser_port_qdev_init; - k->bus_info = &virtser_bus_info; + k->bus_type = TYPE_VIRTIO_SERIAL_BUS; k->exit = virtser_port_qdev_exit; k->unplug = qdev_simple_unplug_cb; + k->props = virtser_props; } static TypeInfo virtio_serial_port_type_info = { @@ -996,6 +1009,7 @@ static TypeInfo virtio_serial_port_type_info = { static void virtio_serial_register_types(void) { + type_register_static(&virtser_bus_info); type_register_static(&virtio_serial_port_type_info); } diff --git a/hw/vmware_vga.c b/hw/vmware_vga.c index 142d9f4ea0..476dc89a0c 100644 --- a/hw/vmware_vga.c +++ b/hw/vmware_vga.c @@ -1078,7 +1078,7 @@ static const VMStateDescription vmstate_vmware_vga = { } }; -static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size, +static void vmsvga_init(struct vmsvga_state_s *s, MemoryRegion *address_space, MemoryRegion *io) { s->scratch_size = SVGA_SCRATCH_SIZE; @@ -1095,7 +1095,7 @@ static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size, vmstate_register_ram_global(&s->fifo_ram); s->fifo_ptr = memory_region_get_ram_ptr(&s->fifo_ram); - vga_common_init(&s->vga, vga_ram_size); + vga_common_init(&s->vga); vga_init(&s->vga, address_space, io, true); vmstate_register(NULL, 0, &vmstate_vga_common, &s->vga); @@ -1184,7 +1184,7 @@ static int pci_vmsvga_initfn(PCIDevice *dev) "vmsvga-io", 0x10); pci_register_bar(&s->card, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); - vmsvga_init(&s->chip, VGA_RAM_SIZE, pci_address_space(dev), + vmsvga_init(&s->chip, pci_address_space(dev), pci_address_space_io(dev)); pci_register_bar(&s->card, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, iomem); @@ -1199,6 +1199,12 @@ static int pci_vmsvga_initfn(PCIDevice *dev) return 0; } +static Property vga_vmware_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", struct pci_vmsvga_state_s, + chip.vga.vram_size_mb, 16), + DEFINE_PROP_END_OF_LIST(), +}; + static void vmsvga_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1214,6 +1220,7 @@ static void vmsvga_class_init(ObjectClass *klass, void *data) k->subsystem_id = SVGA_PCI_DEVICE_ID; dc->reset = vmsvga_reset; dc->vmsd = &vmstate_vmware_vga; + dc->props = vga_vmware_properties; } static TypeInfo vmsvga_info = { diff --git a/hw/vt82c686.c b/hw/vt82c686.c index 6fb7950fa6..5d7c00cf4b 100644 --- a/hw/vt82c686.c +++ b/hw/vt82c686.c @@ -210,7 +210,7 @@ static void pm_ioport_writew(void *opaque, uint32_t addr, uint32_t val) pm_update_sci(s); break; case 0x04: - acpi_pm1_cnt_write(&s->ar, val); + acpi_pm1_cnt_write(&s->ar, val, 0); break; default: break; diff --git a/hw/xen-host-pci-device.c b/hw/xen-host-pci-device.c new file mode 100644 index 0000000000..e7ff680ef2 --- /dev/null +++ b/hw/xen-host-pci-device.c @@ -0,0 +1,396 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "xen-host-pci-device.h" + +#define XEN_HOST_PCI_MAX_EXT_CAP \ + ((PCIE_CONFIG_SPACE_SIZE - PCI_CONFIG_SPACE_SIZE) / (PCI_CAP_SIZEOF + 4)) + +#ifdef XEN_HOST_PCI_DEVICE_DEBUG +# define XEN_HOST_PCI_LOG(f, a...) fprintf(stderr, "%s: " f, __func__, ##a) +#else +# define XEN_HOST_PCI_LOG(f, a...) (void)0 +#endif + +/* + * from linux/ioport.h + * IO resources have these defined flags. + */ +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ +#define IORESOURCE_MEM_64 0x00100000 + +static int xen_host_pci_sysfs_path(const XenHostPCIDevice *d, + const char *name, char *buf, ssize_t size) +{ + int rc; + + rc = snprintf(buf, size, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/%s", + d->domain, d->bus, d->dev, d->func, name); + + if (rc >= size || rc < 0) { + /* The ouput is truncated or an other error is encountered */ + return -ENODEV; + } + return 0; +} + + +/* This size should be enough to read the first 7 lines of a ressource file */ +#define XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE 400 +static int xen_host_pci_get_resource(XenHostPCIDevice *d) +{ + int i, rc, fd; + char path[PATH_MAX]; + char buf[XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE]; + unsigned long long start, end, flags, size; + char *endptr, *s; + uint8_t type; + + rc = xen_host_pci_sysfs_path(d, "resource", path, sizeof (path)); + if (rc) { + return rc; + } + fd = open(path, O_RDONLY); + if (fd == -1) { + XEN_HOST_PCI_LOG("Error: Can't open %s: %s\n", path, strerror(errno)); + return -errno; + } + + do { + rc = read(fd, &buf, sizeof (buf) - 1); + if (rc < 0 && errno != EINTR) { + rc = -errno; + goto out; + } + } while (rc < 0); + buf[rc] = 0; + rc = 0; + + s = buf; + for (i = 0; i < PCI_NUM_REGIONS; i++) { + type = 0; + + start = strtoll(s, &endptr, 16); + if (*endptr != ' ' || s == endptr) { + break; + } + s = endptr + 1; + end = strtoll(s, &endptr, 16); + if (*endptr != ' ' || s == endptr) { + break; + } + s = endptr + 1; + flags = strtoll(s, &endptr, 16); + if (*endptr != '\n' || s == endptr) { + break; + } + s = endptr + 1; + + if (start) { + size = end - start + 1; + } else { + size = 0; + } + + if (flags & IORESOURCE_IO) { + type |= XEN_HOST_PCI_REGION_TYPE_IO; + } + if (flags & IORESOURCE_MEM) { + type |= XEN_HOST_PCI_REGION_TYPE_MEM; + } + if (flags & IORESOURCE_PREFETCH) { + type |= XEN_HOST_PCI_REGION_TYPE_PREFETCH; + } + if (flags & IORESOURCE_MEM_64) { + type |= XEN_HOST_PCI_REGION_TYPE_MEM_64; + } + + if (i < PCI_ROM_SLOT) { + d->io_regions[i].base_addr = start; + d->io_regions[i].size = size; + d->io_regions[i].type = type; + d->io_regions[i].bus_flags = flags & IORESOURCE_BITS; + } else { + d->rom.base_addr = start; + d->rom.size = size; + d->rom.type = type; + d->rom.bus_flags = flags & IORESOURCE_BITS; + } + } + if (i != PCI_NUM_REGIONS) { + /* Invalid format or input to short */ + rc = -ENODEV; + } + +out: + close(fd); + return rc; +} + +/* This size should be enough to read a long from a file */ +#define XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE 22 +static int xen_host_pci_get_value(XenHostPCIDevice *d, const char *name, + unsigned int *pvalue, int base) +{ + char path[PATH_MAX]; + char buf[XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE]; + int fd, rc; + unsigned long value; + char *endptr; + + rc = xen_host_pci_sysfs_path(d, name, path, sizeof (path)); + if (rc) { + return rc; + } + fd = open(path, O_RDONLY); + if (fd == -1) { + XEN_HOST_PCI_LOG("Error: Can't open %s: %s\n", path, strerror(errno)); + return -errno; + } + do { + rc = read(fd, &buf, sizeof (buf) - 1); + if (rc < 0 && errno != EINTR) { + rc = -errno; + goto out; + } + } while (rc < 0); + buf[rc] = 0; + value = strtol(buf, &endptr, base); + if (endptr == buf || *endptr != '\n') { + rc = -1; + } else if ((value == LONG_MIN || value == LONG_MAX) && errno == ERANGE) { + rc = -errno; + } else { + rc = 0; + *pvalue = value; + } +out: + close(fd); + return rc; +} + +static inline int xen_host_pci_get_hex_value(XenHostPCIDevice *d, + const char *name, + unsigned int *pvalue) +{ + return xen_host_pci_get_value(d, name, pvalue, 16); +} + +static inline int xen_host_pci_get_dec_value(XenHostPCIDevice *d, + const char *name, + unsigned int *pvalue) +{ + return xen_host_pci_get_value(d, name, pvalue, 10); +} + +static bool xen_host_pci_dev_is_virtfn(XenHostPCIDevice *d) +{ + char path[PATH_MAX]; + struct stat buf; + + if (xen_host_pci_sysfs_path(d, "physfn", path, sizeof (path))) { + return false; + } + return !stat(path, &buf); +} + +static int xen_host_pci_config_open(XenHostPCIDevice *d) +{ + char path[PATH_MAX]; + int rc; + + rc = xen_host_pci_sysfs_path(d, "config", path, sizeof (path)); + if (rc) { + return rc; + } + d->config_fd = open(path, O_RDWR); + if (d->config_fd < 0) { + return -errno; + } + return 0; +} + +static int xen_host_pci_config_read(XenHostPCIDevice *d, + int pos, void *buf, int len) +{ + int rc; + + do { + rc = pread(d->config_fd, buf, len, pos); + } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); + if (rc != len) { + return -errno; + } + return 0; +} + +static int xen_host_pci_config_write(XenHostPCIDevice *d, + int pos, const void *buf, int len) +{ + int rc; + + do { + rc = pwrite(d->config_fd, buf, len, pos); + } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); + if (rc != len) { + return -errno; + } + return 0; +} + + +int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p) +{ + uint8_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 1); + if (!rc) { + *p = buf; + } + return rc; +} + +int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p) +{ + uint16_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 2); + if (!rc) { + *p = le16_to_cpu(buf); + } + return rc; +} + +int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p) +{ + uint32_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 4); + if (!rc) { + *p = le32_to_cpu(buf); + } + return rc; +} + +int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len) +{ + return xen_host_pci_config_read(d, pos, buf, len); +} + +int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data) +{ + return xen_host_pci_config_write(d, pos, &data, 1); +} + +int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data) +{ + data = cpu_to_le16(data); + return xen_host_pci_config_write(d, pos, &data, 2); +} + +int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data) +{ + data = cpu_to_le32(data); + return xen_host_pci_config_write(d, pos, &data, 4); +} + +int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len) +{ + return xen_host_pci_config_write(d, pos, buf, len); +} + +int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *d, uint32_t cap) +{ + uint32_t header = 0; + int max_cap = XEN_HOST_PCI_MAX_EXT_CAP; + int pos = PCI_CONFIG_SPACE_SIZE; + + do { + if (xen_host_pci_get_long(d, pos, &header)) { + break; + } + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) { + break; + } + + if (PCI_EXT_CAP_ID(header) == cap) { + return pos; + } + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CONFIG_SPACE_SIZE) { + break; + } + + max_cap--; + } while (max_cap > 0); + + return -1; +} + +int xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain, + uint8_t bus, uint8_t dev, uint8_t func) +{ + unsigned int v; + int rc = 0; + + d->config_fd = -1; + d->domain = domain; + d->bus = bus; + d->dev = dev; + d->func = func; + + rc = xen_host_pci_config_open(d); + if (rc) { + goto error; + } + rc = xen_host_pci_get_resource(d); + if (rc) { + goto error; + } + rc = xen_host_pci_get_hex_value(d, "vendor", &v); + if (rc) { + goto error; + } + d->vendor_id = v; + rc = xen_host_pci_get_hex_value(d, "device", &v); + if (rc) { + goto error; + } + d->device_id = v; + rc = xen_host_pci_get_dec_value(d, "irq", &v); + if (rc) { + goto error; + } + d->irq = v; + d->is_virtfn = xen_host_pci_dev_is_virtfn(d); + + return 0; +error: + if (d->config_fd >= 0) { + close(d->config_fd); + d->config_fd = -1; + } + return rc; +} + +void xen_host_pci_device_put(XenHostPCIDevice *d) +{ + if (d->config_fd >= 0) { + close(d->config_fd); + d->config_fd = -1; + } +} diff --git a/hw/xen-host-pci-device.h b/hw/xen-host-pci-device.h new file mode 100644 index 0000000000..0079daca51 --- /dev/null +++ b/hw/xen-host-pci-device.h @@ -0,0 +1,55 @@ +#ifndef XEN_HOST_PCI_DEVICE_H +#define XEN_HOST_PCI_DEVICE_H + +#include "pci.h" + +enum { + XEN_HOST_PCI_REGION_TYPE_IO = 1 << 1, + XEN_HOST_PCI_REGION_TYPE_MEM = 1 << 2, + XEN_HOST_PCI_REGION_TYPE_PREFETCH = 1 << 3, + XEN_HOST_PCI_REGION_TYPE_MEM_64 = 1 << 4, +}; + +typedef struct XenHostPCIIORegion { + pcibus_t base_addr; + pcibus_t size; + uint8_t type; + uint8_t bus_flags; /* Bus-specific bits */ +} XenHostPCIIORegion; + +typedef struct XenHostPCIDevice { + uint16_t domain; + uint8_t bus; + uint8_t dev; + uint8_t func; + + uint16_t vendor_id; + uint16_t device_id; + int irq; + + XenHostPCIIORegion io_regions[PCI_NUM_REGIONS - 1]; + XenHostPCIIORegion rom; + + bool is_virtfn; + + int config_fd; +} XenHostPCIDevice; + +int xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain, + uint8_t bus, uint8_t dev, uint8_t func); +void xen_host_pci_device_put(XenHostPCIDevice *pci_dev); + +int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p); +int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p); +int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p); +int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf, + int len); +int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data); +int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data); +int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data); +int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf, + int len); + +int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *s, uint32_t cap); + +#endif /* !XEN_HOST_PCI_DEVICE_H_ */ diff --git a/hw/xen_backend.c b/hw/xen_backend.c index 66cb144397..f83a1e1d09 100644 --- a/hw/xen_backend.c +++ b/hw/xen_backend.c @@ -34,15 +34,13 @@ #include <sys/mman.h> #include <sys/signal.h> -#include <xs.h> -#include <xenctrl.h> -#include <xen/grant_table.h> - #include "hw.h" #include "qemu-char.h" #include "qemu-log.h" #include "xen_backend.h" +#include <xen/grant_table.h> + /* ------------------------------------------------------------- */ /* public */ diff --git a/hw/xen_common.h b/hw/xen_common.h index fe7f227f92..727757afb4 100644 --- a/hw/xen_common.h +++ b/hw/xen_common.h @@ -7,7 +7,11 @@ #include <inttypes.h> #include <xenctrl.h> -#include <xs.h> +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420 +# include <xs.h> +#else +# include <xenstore.h> +#endif #include <xen/io/xenbus.h> #include "hw.h" @@ -150,4 +154,7 @@ static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom, void destroy_hvm_domain(bool reboot); +/* shutdown/destroy current domain because of an error */ +void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); + #endif /* QEMU_HW_XEN_COMMON_H */ diff --git a/hw/xen_console.c b/hw/xen_console.c index 3794b1972d..9426d7374f 100644 --- a/hw/xen_console.c +++ b/hw/xen_console.c @@ -28,14 +28,13 @@ #include <termios.h> #include <stdarg.h> #include <sys/mman.h> -#include <xs.h> -#include <xen/io/console.h> -#include <xenctrl.h> #include "hw.h" #include "qemu-char.h" #include "xen_backend.h" +#include <xen/io/console.h> + struct buffer { uint8_t *data; size_t consumed; diff --git a/hw/xen_devconfig.c b/hw/xen_devconfig.c index 41accbbfa9..0928613b55 100644 --- a/hw/xen_devconfig.c +++ b/hw/xen_devconfig.c @@ -1,6 +1,5 @@ #include "xen_backend.h" #include "blockdev.h" -#include "block_int.h" /* XXX */ /* ------------------------------------------------------------- */ @@ -94,16 +93,16 @@ static int xen_config_dev_all(char *fe, char *be) int xen_config_dev_blk(DriveInfo *disk) { - char fe[256], be[256]; + char fe[256], be[256], device_name[32]; int vdev = 202 * 256 + 16 * disk->unit; int cdrom = disk->media_cd; const char *devtype = cdrom ? "cdrom" : "disk"; const char *mode = cdrom ? "r" : "w"; + const char *filename = qemu_opt_get(disk->opts, "file"); - snprintf(disk->bdrv->device_name, sizeof(disk->bdrv->device_name), - "xvd%c", 'a' + disk->unit); + snprintf(device_name, sizeof(device_name), "xvd%c", 'a' + disk->unit); xen_be_printf(NULL, 1, "config disk %d [%s]: %s\n", - disk->unit, disk->bdrv->device_name, disk->bdrv->filename); + disk->unit, device_name, filename); xen_config_dev_dirs("vbd", "qdisk", vdev, fe, be, sizeof(fe)); /* frontend */ @@ -111,9 +110,9 @@ int xen_config_dev_blk(DriveInfo *disk) xenstore_write_str(fe, "device-type", devtype); /* backend */ - xenstore_write_str(be, "dev", disk->bdrv->device_name); + xenstore_write_str(be, "dev", device_name); xenstore_write_str(be, "type", "file"); - xenstore_write_str(be, "params", disk->bdrv->filename); + xenstore_write_str(be, "params", filename); xenstore_write_str(be, "mode", mode); /* common stuff */ diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 07594bc0c8..e6bb2f20b9 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -35,15 +35,10 @@ #include <sys/mman.h> #include <sys/uio.h> -#include <xs.h> -#include <xenctrl.h> -#include <xen/io/xenbus.h> - #include "hw.h" -#include "block_int.h" #include "qemu-char.h" -#include "xen_blkif.h" #include "xen_backend.h" +#include "xen_blkif.h" #include "blockdev.h" /* ------------------------------------------------------------- */ @@ -537,6 +532,15 @@ static void blk_bh(void *opaque) blk_handle_requests(blkdev); } +/* + * We need to account for the grant allocations requiring contiguous + * chunks; the worst case number would be + * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, + * but in order to keep things simple just use + * 2 * max_req * max_seg. + */ +#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) + static void blk_alloc(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); @@ -548,6 +552,11 @@ static void blk_alloc(struct XenDevice *xendev) if (xen_mode != XEN_EMULATE) { batch_maps = 1; } + if (xc_gnttab_set_max_grants(xendev->gnttabdev, + MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { + xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n", + strerror(errno)); + } } static int blk_init(struct XenDevice *xendev) @@ -636,7 +645,7 @@ static int blk_init(struct XenDevice *xendev) if (blkdev->file_size < 0) { xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), - blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); + bdrv_get_format_name(blkdev->bs) ?: "-"); blkdev->file_size = 0; } diff --git a/hw/xen_machine_pv.c b/hw/xen_machine_pv.c index 7eee770eea..4b72aa7557 100644 --- a/hw/xen_machine_pv.c +++ b/hw/xen_machine_pv.c @@ -36,6 +36,7 @@ static void xen_init_pv(ram_addr_t ram_size, const char *initrd_filename, const char *cpu_model) { + X86CPU *cpu; CPUX86State *env; DriveInfo *dinfo; int i; @@ -48,7 +49,8 @@ static void xen_init_pv(ram_addr_t ram_size, cpu_model = "qemu32"; #endif } - env = cpu_init(cpu_model); + cpu = cpu_x86_init(cpu_model); + env = &cpu->env; env->halted = 1; /* Initialize backend core & drivers */ diff --git a/hw/xen_nic.c b/hw/xen_nic.c index 9a59bdad6e..98db9bb8f6 100644 --- a/hw/xen_nic.c +++ b/hw/xen_nic.c @@ -35,11 +35,6 @@ #include <sys/mman.h> #include <sys/wait.h> -#include <xs.h> -#include <xenctrl.h> -#include <xen/io/xenbus.h> -#include <xen/io/netif.h> - #include "hw.h" #include "net.h" #include "net/checksum.h" @@ -47,6 +42,8 @@ #include "qemu-char.h" #include "xen_backend.h" +#include <xen/io/netif.h> + /* ------------------------------------------------------------- */ struct XenNetDev { diff --git a/hw/xen_platform.c b/hw/xen_platform.c index 0214f370b2..c1fe984f07 100644 --- a/hw/xen_platform.c +++ b/hw/xen_platform.c @@ -83,7 +83,7 @@ static void log_writeb(PCIXenPlatformState *s, char val) #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 -static void unplug_nic(PCIBus *b, PCIDevice *d) +static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) { if (pci_get_word(d->config + PCI_CLASS_DEVICE) == PCI_CLASS_NETWORK_ETHERNET) { @@ -96,10 +96,10 @@ static void unplug_nic(PCIBus *b, PCIDevice *d) static void pci_unplug_nics(PCIBus *bus) { - pci_for_each_device(bus, 0, unplug_nic); + pci_for_each_device(bus, 0, unplug_nic, NULL); } -static void unplug_disks(PCIBus *b, PCIDevice *d) +static void unplug_disks(PCIBus *b, PCIDevice *d, void *o) { if (pci_get_word(d->config + PCI_CLASS_DEVICE) == PCI_CLASS_STORAGE_IDE) { @@ -109,7 +109,7 @@ static void unplug_disks(PCIBus *b, PCIDevice *d) static void pci_unplug_disks(PCIBus *bus) { - pci_for_each_device(bus, 0, unplug_disks); + pci_for_each_device(bus, 0, unplug_disks, NULL); } static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val) diff --git a/hw/xen_pt.c b/hw/xen_pt.c new file mode 100644 index 0000000000..3b6d1867ab --- /dev/null +++ b/hw/xen_pt.c @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Alex Novik <alex@neocleus.com> + * Allen Kay <allen.m.kay@intel.com> + * Guy Zana <guy@neocleus.com> + * + * This file implements direct PCI assignment to a HVM guest + */ + +/* + * Interrupt Disable policy: + * + * INTx interrupt: + * Initialize(register_real_device) + * Map INTx(xc_physdev_map_pirq): + * <fail> + * - Set real Interrupt Disable bit to '1'. + * - Set machine_irq and assigned_device->machine_irq to '0'. + * * Don't bind INTx. + * + * Bind INTx(xc_domain_bind_pt_pci_irq): + * <fail> + * - Set real Interrupt Disable bit to '1'. + * - Unmap INTx. + * - Decrement xen_pt_mapped_machine_irq[machine_irq] + * - Set assigned_device->machine_irq to '0'. + * + * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write) + * Write '0' + * - Set real bit to '0' if assigned_device->machine_irq isn't '0'. + * + * Write '1' + * - Set real bit to '1'. + * + * MSI interrupt: + * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update) + * Bind MSI(xc_domain_update_msi_irq) + * <fail> + * - Unmap MSI. + * - Set dev->msi->pirq to '-1'. + * + * MSI-X interrupt: + * Initialize MSI-X register(xen_pt_msix_update_one) + * Bind MSI-X(xc_domain_update_msi_irq) + * <fail> + * - Unmap MSI-X. + * - Set entry->pirq to '-1'. + */ + +#include <sys/ioctl.h> + +#include "pci.h" +#include "xen.h" +#include "xen_backend.h" +#include "xen_pt.h" +#include "range.h" + +#define XEN_PT_NR_IRQS (256) +static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0}; + +void xen_pt_log(const PCIDevice *d, const char *f, ...) +{ + va_list ap; + + va_start(ap, f); + if (d) { + fprintf(stderr, "[%02x:%02x.%d] ", pci_bus_num(d->bus), + PCI_SLOT(d->devfn), PCI_FUNC(d->devfn)); + } + vfprintf(stderr, f, ap); + va_end(ap); +} + +/* Config Space */ + +static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len) +{ + /* check offset range */ + if (addr >= 0xFF) { + XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + /* check read size */ + if ((len != 1) && (len != 2) && (len != 4)) { + XEN_PT_ERR(d, "Failed to access register with invalid access length. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + /* check offset alignment */ + if (addr & (len - 1)) { + XEN_PT_ERR(d, "Failed to access register with invalid access size " + "alignment. (addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + return 0; +} + +int xen_pt_bar_offset_to_index(uint32_t offset) +{ + int index = 0; + + /* check Exp ROM BAR */ + if (offset == PCI_ROM_ADDRESS) { + return PCI_ROM_SLOT; + } + + /* calculate BAR index */ + index = (offset - PCI_BASE_ADDRESS_0) >> 2; + if (index >= PCI_NUM_REGIONS) { + return -1; + } + + return index; +} + +static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len) +{ + XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d); + uint32_t val = 0; + XenPTRegGroup *reg_grp_entry = NULL; + XenPTReg *reg_entry = NULL; + int rc = 0; + int emul_len = 0; + uint32_t find_addr = addr; + + if (xen_pt_pci_config_access_check(d, addr, len)) { + goto exit; + } + + /* find register group entry */ + reg_grp_entry = xen_pt_find_reg_grp(s, addr); + if (reg_grp_entry) { + /* check 0-Hardwired register group */ + if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { + /* no need to emulate, just return 0 */ + val = 0; + goto exit; + } + } + + /* read I/O device register value */ + rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len); + if (rc < 0) { + XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); + memset(&val, 0xff, len); + } + + /* just return the I/O device register value for + * passthrough type register group */ + if (reg_grp_entry == NULL) { + goto exit; + } + + /* adjust the read value to appropriate CFC-CFF window */ + val <<= (addr & 3) << 3; + emul_len = len; + + /* loop around the guest requested size */ + while (emul_len > 0) { + /* find register entry to be emulated */ + reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); + if (reg_entry) { + XenPTRegInfo *reg = reg_entry->reg; + uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; + uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); + uint8_t *ptr_val = NULL; + + valid_mask <<= (find_addr - real_offset) << 3; + ptr_val = (uint8_t *)&val + (real_offset & 3); + + /* do emulation based on register size */ + switch (reg->size) { + case 1: + if (reg->u.b.read) { + rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask); + } + break; + case 2: + if (reg->u.w.read) { + rc = reg->u.w.read(s, reg_entry, + (uint16_t *)ptr_val, valid_mask); + } + break; + case 4: + if (reg->u.dw.read) { + rc = reg->u.dw.read(s, reg_entry, + (uint32_t *)ptr_val, valid_mask); + } + break; + } + + if (rc < 0) { + xen_shutdown_fatal_error("Internal error: Invalid read " + "emulation. (%s, rc: %d)\n", + __func__, rc); + return 0; + } + + /* calculate next address to find */ + emul_len -= reg->size; + if (emul_len > 0) { + find_addr = real_offset + reg->size; + } + } else { + /* nothing to do with passthrough type register, + * continue to find next byte */ + emul_len--; + find_addr++; + } + } + + /* need to shift back before returning them to pci bus emulator */ + val >>= ((addr & 3) << 3); + +exit: + XEN_PT_LOG_CONFIG(d, addr, val, len); + return val; +} + +static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr, + uint32_t val, int len) +{ + XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d); + int index = 0; + XenPTRegGroup *reg_grp_entry = NULL; + int rc = 0; + uint32_t read_val = 0; + int emul_len = 0; + XenPTReg *reg_entry = NULL; + uint32_t find_addr = addr; + XenPTRegInfo *reg = NULL; + + if (xen_pt_pci_config_access_check(d, addr, len)) { + return; + } + + XEN_PT_LOG_CONFIG(d, addr, val, len); + + /* check unused BAR register */ + index = xen_pt_bar_offset_to_index(addr); + if ((index >= 0) && (val > 0 && val < XEN_PT_BAR_ALLF) && + (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) { + XEN_PT_WARN(d, "Guest attempt to set address to unused Base Address " + "Register. (addr: 0x%02x, len: %d)\n", addr, len); + } + + /* find register group entry */ + reg_grp_entry = xen_pt_find_reg_grp(s, addr); + if (reg_grp_entry) { + /* check 0-Hardwired register group */ + if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { + /* ignore silently */ + XEN_PT_WARN(d, "Access to 0-Hardwired register. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return; + } + } + + rc = xen_host_pci_get_block(&s->real_device, addr, + (uint8_t *)&read_val, len); + if (rc < 0) { + XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); + memset(&read_val, 0xff, len); + } + + /* pass directly to the real device for passthrough type register group */ + if (reg_grp_entry == NULL) { + goto out; + } + + memory_region_transaction_begin(); + pci_default_write_config(d, addr, val, len); + + /* adjust the read and write value to appropriate CFC-CFF window */ + read_val <<= (addr & 3) << 3; + val <<= (addr & 3) << 3; + emul_len = len; + + /* loop around the guest requested size */ + while (emul_len > 0) { + /* find register entry to be emulated */ + reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); + if (reg_entry) { + reg = reg_entry->reg; + uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; + uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); + uint8_t *ptr_val = NULL; + + valid_mask <<= (find_addr - real_offset) << 3; + ptr_val = (uint8_t *)&val + (real_offset & 3); + + /* do emulation based on register size */ + switch (reg->size) { + case 1: + if (reg->u.b.write) { + rc = reg->u.b.write(s, reg_entry, ptr_val, + read_val >> ((real_offset & 3) << 3), + valid_mask); + } + break; + case 2: + if (reg->u.w.write) { + rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val, + (read_val >> ((real_offset & 3) << 3)), + valid_mask); + } + break; + case 4: + if (reg->u.dw.write) { + rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val, + (read_val >> ((real_offset & 3) << 3)), + valid_mask); + } + break; + } + + if (rc < 0) { + xen_shutdown_fatal_error("Internal error: Invalid write" + " emulation. (%s, rc: %d)\n", + __func__, rc); + return; + } + + /* calculate next address to find */ + emul_len -= reg->size; + if (emul_len > 0) { + find_addr = real_offset + reg->size; + } + } else { + /* nothing to do with passthrough type register, + * continue to find next byte */ + emul_len--; + find_addr++; + } + } + + /* need to shift back before passing them to xen_host_pci_device */ + val >>= (addr & 3) << 3; + + memory_region_transaction_commit(); + +out: + if (!(reg && reg->no_wb)) { + /* unknown regs are passed through */ + rc = xen_host_pci_set_block(&s->real_device, addr, + (uint8_t *)&val, len); + + if (rc < 0) { + XEN_PT_ERR(d, "pci_write_block failed. return value: %d.\n", rc); + } + } +} + +/* register regions */ + +static uint64_t xen_pt_bar_read(void *o, target_phys_addr_t addr, + unsigned size) +{ + PCIDevice *d = o; + /* if this function is called, that probably means that there is a + * misconfiguration of the IOMMU. */ + XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n", + addr); + return 0; +} +static void xen_pt_bar_write(void *o, target_phys_addr_t addr, uint64_t val, + unsigned size) +{ + PCIDevice *d = o; + /* Same comment as xen_pt_bar_read function */ + XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n", + addr); +} + +static const MemoryRegionOps ops = { + .endianness = DEVICE_NATIVE_ENDIAN, + .read = xen_pt_bar_read, + .write = xen_pt_bar_write, +}; + +static int xen_pt_register_regions(XenPCIPassthroughState *s) +{ + int i = 0; + XenHostPCIDevice *d = &s->real_device; + + /* Register PIO/MMIO BARs */ + for (i = 0; i < PCI_ROM_SLOT; i++) { + XenHostPCIIORegion *r = &d->io_regions[i]; + uint8_t type; + + if (r->base_addr == 0 || r->size == 0) { + continue; + } + + s->bases[i].access.u = r->base_addr; + + if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) { + type = PCI_BASE_ADDRESS_SPACE_IO; + } else { + type = PCI_BASE_ADDRESS_SPACE_MEMORY; + if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) { + type |= PCI_BASE_ADDRESS_MEM_PREFETCH; + } + } + + memory_region_init_io(&s->bar[i], &ops, &s->dev, + "xen-pci-pt-bar", r->size); + pci_register_bar(&s->dev, i, type, &s->bar[i]); + + XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64 + " base_addr=0x%08"PRIx64" type: %#x)\n", + i, r->size, r->base_addr, type); + } + + /* Register expansion ROM address */ + if (d->rom.base_addr && d->rom.size) { + uint32_t bar_data = 0; + + /* Re-set BAR reported by OS, otherwise ROM can't be read. */ + if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) { + return 0; + } + if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) { + bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK; + xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data); + } + + s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr; + + memory_region_init_rom_device(&s->rom, NULL, NULL, + "xen-pci-pt-rom", d->rom.size); + pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH, + &s->rom); + + XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64 + " base_addr=0x%08"PRIx64")\n", + d->rom.size, d->rom.base_addr); + } + + return 0; +} + +static void xen_pt_unregister_regions(XenPCIPassthroughState *s) +{ + XenHostPCIDevice *d = &s->real_device; + int i; + + for (i = 0; i < PCI_NUM_REGIONS - 1; i++) { + XenHostPCIIORegion *r = &d->io_regions[i]; + + if (r->base_addr == 0 || r->size == 0) { + continue; + } + + memory_region_destroy(&s->bar[i]); + } + if (d->rom.base_addr && d->rom.size) { + memory_region_destroy(&s->rom); + } +} + +/* region mapping */ + +static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr) +{ + int i = 0; + + for (i = 0; i < PCI_NUM_REGIONS - 1; i++) { + if (mr == &s->bar[i]) { + return i; + } + } + if (mr == &s->rom) { + return PCI_ROM_SLOT; + } + return -1; +} + +/* + * This function checks if an io_region overlaps an io_region from another + * device. The io_region to check is provided with (addr, size and type) + * A callback can be provided and will be called for every region that is + * overlapped. + * The return value indicates if the region is overlappsed */ +struct CheckBarArgs { + XenPCIPassthroughState *s; + pcibus_t addr; + pcibus_t size; + uint8_t type; + bool rc; +}; +static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque) +{ + struct CheckBarArgs *arg = opaque; + XenPCIPassthroughState *s = arg->s; + uint8_t type = arg->type; + int i; + + if (d->devfn == s->dev.devfn) { + return; + } + + /* xxx: This ignores bridges. */ + for (i = 0; i < PCI_NUM_REGIONS; i++) { + const PCIIORegion *r = &d->io_regions[i]; + + if (!r->size) { + continue; + } + if ((type & PCI_BASE_ADDRESS_SPACE_IO) + != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) { + continue; + } + + if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) { + XEN_PT_WARN(&s->dev, + "Overlapped to device [%02x:%02x.%d] Region: %i" + " (addr: %#"FMT_PCIBUS", len: %#"FMT_PCIBUS")\n", + pci_bus_num(bus), PCI_SLOT(d->devfn), + PCI_FUNC(d->devfn), i, r->addr, r->size); + arg->rc = true; + } + } +} + +static void xen_pt_region_update(XenPCIPassthroughState *s, + MemoryRegionSection *sec, bool adding) +{ + PCIDevice *d = &s->dev; + MemoryRegion *mr = sec->mr; + int bar = -1; + int rc; + int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING; + struct CheckBarArgs args = { + .s = s, + .addr = sec->offset_within_address_space, + .size = sec->size, + .rc = false, + }; + + bar = xen_pt_bar_from_region(s, mr); + if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) { + return; + } + + if (s->msix && &s->msix->mmio == mr) { + if (adding) { + s->msix->mmio_base_addr = sec->offset_within_address_space; + rc = xen_pt_msix_update_remap(s, s->msix->bar_index); + } + return; + } + + args.type = d->io_regions[bar].type; + pci_for_each_device(d->bus, pci_bus_num(d->bus), + xen_pt_check_bar_overlap, &args); + if (args.rc) { + XEN_PT_WARN(d, "Region: %d (addr: %#"FMT_PCIBUS + ", len: %#"FMT_PCIBUS") is overlapped.\n", + bar, sec->offset_within_address_space, sec->size); + } + + if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) { + uint32_t guest_port = sec->offset_within_address_space; + uint32_t machine_port = s->bases[bar].access.pio_base; + uint32_t size = sec->size; + rc = xc_domain_ioport_mapping(xen_xc, xen_domid, + guest_port, machine_port, size, + op); + if (rc) { + XEN_PT_ERR(d, "%s ioport mapping failed! (rc: %i)\n", + adding ? "create new" : "remove old", rc); + } + } else { + pcibus_t guest_addr = sec->offset_within_address_space; + pcibus_t machine_addr = s->bases[bar].access.maddr + + sec->offset_within_region; + pcibus_t size = sec->size; + rc = xc_domain_memory_mapping(xen_xc, xen_domid, + XEN_PFN(guest_addr + XC_PAGE_SIZE - 1), + XEN_PFN(machine_addr + XC_PAGE_SIZE - 1), + XEN_PFN(size + XC_PAGE_SIZE - 1), + op); + if (rc) { + XEN_PT_ERR(d, "%s mem mapping failed! (rc: %i)\n", + adding ? "create new" : "remove old", rc); + } + } +} + +static void xen_pt_begin(MemoryListener *l) +{ +} + +static void xen_pt_commit(MemoryListener *l) +{ +} + +static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + memory_listener); + + xen_pt_region_update(s, sec, true); +} + +static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + memory_listener); + + xen_pt_region_update(s, sec, false); +} + +static void xen_pt_region_nop(MemoryListener *l, MemoryRegionSection *s) +{ +} + +static void xen_pt_log_fns(MemoryListener *l, MemoryRegionSection *s) +{ +} + +static void xen_pt_log_global_fns(MemoryListener *l) +{ +} + +static void xen_pt_eventfd_fns(MemoryListener *l, MemoryRegionSection *s, + bool match_data, uint64_t data, int fd) +{ +} + +static const MemoryListener xen_pt_memory_listener = { + .begin = xen_pt_begin, + .commit = xen_pt_commit, + .region_add = xen_pt_region_add, + .region_nop = xen_pt_region_nop, + .region_del = xen_pt_region_del, + .log_start = xen_pt_log_fns, + .log_stop = xen_pt_log_fns, + .log_sync = xen_pt_log_fns, + .log_global_start = xen_pt_log_global_fns, + .log_global_stop = xen_pt_log_global_fns, + .eventfd_add = xen_pt_eventfd_fns, + .eventfd_del = xen_pt_eventfd_fns, + .priority = 10, +}; + +/* init */ + +static int xen_pt_initfn(PCIDevice *d) +{ + XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d); + int rc = 0; + uint8_t machine_irq = 0; + int pirq = XEN_PT_UNASSIGNED_PIRQ; + + /* register real device */ + XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d" + " to devfn %#x\n", + s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, + s->dev.devfn); + + rc = xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function); + if (rc) { + XEN_PT_ERR(d, "Failed to \"open\" the real pci device. rc: %i\n", rc); + return -1; + } + + s->is_virtfn = s->real_device.is_virtfn; + if (s->is_virtfn) { + XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", + s->real_device.domain, bus, slot, func); + } + + /* Initialize virtualized PCI configuration (Extended 256 Bytes) */ + if (xen_host_pci_get_block(&s->real_device, 0, d->config, + PCI_CONFIG_SPACE_SIZE) == -1) { + xen_host_pci_device_put(&s->real_device); + return -1; + } + + s->memory_listener = xen_pt_memory_listener; + + /* Handle real device's MMIO/PIO BARs */ + xen_pt_register_regions(s); + + /* reinitialize each config register to be emulated */ + if (xen_pt_config_init(s)) { + XEN_PT_ERR(d, "PCI Config space initialisation failed.\n"); + xen_host_pci_device_put(&s->real_device); + return -1; + } + + /* Bind interrupt */ + if (!s->dev.config[PCI_INTERRUPT_PIN]) { + XEN_PT_LOG(d, "no pin interrupt\n"); + goto out; + } + + machine_irq = s->real_device.irq; + rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq); + + if (rc < 0) { + XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (rc: %d)\n", + machine_irq, pirq, rc); + + /* Disable PCI intx assertion (turn on bit10 of devctl) */ + xen_host_pci_set_word(&s->real_device, + PCI_COMMAND, + pci_get_word(s->dev.config + PCI_COMMAND) + | PCI_COMMAND_INTX_DISABLE); + machine_irq = 0; + s->machine_irq = 0; + } else { + machine_irq = pirq; + s->machine_irq = pirq; + xen_pt_mapped_machine_irq[machine_irq]++; + } + + /* bind machine_irq to device */ + if (machine_irq != 0) { + uint8_t e_intx = xen_pt_pci_intx(s); + + rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq, + pci_bus_num(d->bus), + PCI_SLOT(d->devfn), + e_intx); + if (rc < 0) { + XEN_PT_ERR(d, "Binding of interrupt %i failed! (rc: %d)\n", + e_intx, rc); + + /* Disable PCI intx assertion (turn on bit10 of devctl) */ + xen_host_pci_set_word(&s->real_device, PCI_COMMAND, + *(uint16_t *)(&s->dev.config[PCI_COMMAND]) + | PCI_COMMAND_INTX_DISABLE); + xen_pt_mapped_machine_irq[machine_irq]--; + + if (xen_pt_mapped_machine_irq[machine_irq] == 0) { + if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) { + XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!" + " (rc: %d)\n", machine_irq, rc); + } + } + s->machine_irq = 0; + } + } + +out: + memory_listener_register(&s->memory_listener, NULL); + XEN_PT_LOG(d, "Real physical device %02x:%02x.%d registered successfuly!\n", + bus, slot, func); + + return 0; +} + +static int xen_pt_unregister_device(PCIDevice *d) +{ + XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d); + uint8_t machine_irq = s->machine_irq; + uint8_t intx = xen_pt_pci_intx(s); + int rc; + + if (machine_irq) { + rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq, + PT_IRQ_TYPE_PCI, + pci_bus_num(d->bus), + PCI_SLOT(s->dev.devfn), + intx, + 0 /* isa_irq */); + if (rc < 0) { + XEN_PT_ERR(d, "unbinding of interrupt INT%c failed." + " (machine irq: %i, rc: %d)" + " But bravely continuing on..\n", + 'a' + intx, machine_irq, rc); + } + } + + if (s->msi) { + xen_pt_msi_disable(s); + } + if (s->msix) { + xen_pt_msix_disable(s); + } + + if (machine_irq) { + xen_pt_mapped_machine_irq[machine_irq]--; + + if (xen_pt_mapped_machine_irq[machine_irq] == 0) { + rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq); + + if (rc < 0) { + XEN_PT_ERR(d, "unmapping of interrupt %i failed. (rc: %d)" + " But bravely continuing on..\n", + machine_irq, rc); + } + } + } + + /* delete all emulated config registers */ + xen_pt_config_delete(s); + + xen_pt_unregister_regions(s); + memory_listener_unregister(&s->memory_listener); + + xen_host_pci_device_put(&s->real_device); + + return 0; +} + +static Property xen_pci_passthrough_properties[] = { + DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = xen_pt_initfn; + k->exit = xen_pt_unregister_device; + k->config_read = xen_pt_pci_read_config; + k->config_write = xen_pt_pci_write_config; + dc->desc = "Assign an host PCI device with Xen"; + dc->props = xen_pci_passthrough_properties; +}; + +static TypeInfo xen_pci_passthrough_info = { + .name = "xen-pci-passthrough", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(XenPCIPassthroughState), + .class_init = xen_pci_passthrough_class_init, +}; + +static void xen_pci_passthrough_register_types(void) +{ + type_register_static(&xen_pci_passthrough_info); +} + +type_init(xen_pci_passthrough_register_types) diff --git a/hw/xen_pt.h b/hw/xen_pt.h new file mode 100644 index 0000000000..41904ece93 --- /dev/null +++ b/hw/xen_pt.h @@ -0,0 +1,301 @@ +#ifndef XEN_PT_H +#define XEN_PT_H + +#include "qemu-common.h" +#include "xen_common.h" +#include "pci.h" +#include "xen-host-pci-device.h" + +void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3); + +#define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a) + +#ifdef XEN_PT_LOGGING_ENABLED +# define XEN_PT_LOG(d, _f, _a...) xen_pt_log(d, "%s: " _f, __func__, ##_a) +# define XEN_PT_WARN(d, _f, _a...) \ + xen_pt_log(d, "%s: Warning: "_f, __func__, ##_a) +#else +# define XEN_PT_LOG(d, _f, _a...) +# define XEN_PT_WARN(d, _f, _a...) +#endif + +#ifdef XEN_PT_DEBUG_PCI_CONFIG_ACCESS +# define XEN_PT_LOG_CONFIG(d, addr, val, len) \ + xen_pt_log(d, "%s: address=0x%04x val=0x%08x len=%d\n", \ + __func__, addr, val, len) +#else +# define XEN_PT_LOG_CONFIG(d, addr, val, len) +#endif + + +/* Helper */ +#define XEN_PFN(x) ((x) >> XC_PAGE_SHIFT) + +typedef struct XenPTRegInfo XenPTRegInfo; +typedef struct XenPTReg XenPTReg; + +typedef struct XenPCIPassthroughState XenPCIPassthroughState; + +/* function type for config reg */ +typedef int (*xen_pt_conf_reg_init) + (XenPCIPassthroughState *, XenPTRegInfo *, uint32_t real_offset, + uint32_t *data); +typedef int (*xen_pt_conf_dword_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, uint32_t valid_mask); +typedef int (*xen_pt_conf_word_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, uint16_t valid_mask); +typedef int (*xen_pt_conf_byte_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint8_t *val, uint8_t dev_value, uint8_t valid_mask); +typedef int (*xen_pt_conf_dword_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint32_t *val, uint32_t valid_mask); +typedef int (*xen_pt_conf_word_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint16_t *val, uint16_t valid_mask); +typedef int (*xen_pt_conf_byte_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint8_t *val, uint8_t valid_mask); + +#define XEN_PT_BAR_ALLF 0xFFFFFFFF +#define XEN_PT_BAR_UNMAPPED (-1) + +#define PCI_CAP_MAX 48 + + +typedef enum { + XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ + XEN_PT_GRP_TYPE_EMU, /* emul reg group */ +} XenPTRegisterGroupType; + +typedef enum { + XEN_PT_BAR_FLAG_MEM = 0, /* Memory type BAR */ + XEN_PT_BAR_FLAG_IO, /* I/O type BAR */ + XEN_PT_BAR_FLAG_UPPER, /* upper 64bit BAR */ + XEN_PT_BAR_FLAG_UNUSED, /* unused BAR */ +} XenPTBarFlag; + + +typedef struct XenPTRegion { + /* BAR flag */ + XenPTBarFlag bar_flag; + /* Translation of the emulated address */ + union { + uint64_t maddr; + uint64_t pio_base; + uint64_t u; + } access; +} XenPTRegion; + +/* XenPTRegInfo declaration + * - only for emulated register (either a part or whole bit). + * - for passthrough register that need special behavior (like interacting with + * other component), set emu_mask to all 0 and specify r/w func properly. + * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. + */ + +/* emulated register infomation */ +struct XenPTRegInfo { + uint32_t offset; + uint32_t size; + uint32_t init_val; + /* reg read only field mask (ON:RO/ROS, OFF:other) */ + uint32_t ro_mask; + /* reg emulate field mask (ON:emu, OFF:passthrough) */ + uint32_t emu_mask; + /* no write back allowed */ + uint32_t no_wb; + xen_pt_conf_reg_init init; + /* read/write function pointer + * for double_word/word/byte size */ + union { + struct { + xen_pt_conf_dword_write write; + xen_pt_conf_dword_read read; + } dw; + struct { + xen_pt_conf_word_write write; + xen_pt_conf_word_read read; + } w; + struct { + xen_pt_conf_byte_write write; + xen_pt_conf_byte_read read; + } b; + } u; +}; + +/* emulated register management */ +struct XenPTReg { + QLIST_ENTRY(XenPTReg) entries; + XenPTRegInfo *reg; + uint32_t data; /* emulated value */ +}; + +typedef struct XenPTRegGroupInfo XenPTRegGroupInfo; + +/* emul reg group size initialize method */ +typedef int (*xen_pt_reg_size_init_fn) + (XenPCIPassthroughState *, const XenPTRegGroupInfo *, + uint32_t base_offset, uint8_t *size); + +/* emulated register group infomation */ +struct XenPTRegGroupInfo { + uint8_t grp_id; + XenPTRegisterGroupType grp_type; + uint8_t grp_size; + xen_pt_reg_size_init_fn size_init; + XenPTRegInfo *emu_regs; +}; + +/* emul register group management table */ +typedef struct XenPTRegGroup { + QLIST_ENTRY(XenPTRegGroup) entries; + const XenPTRegGroupInfo *reg_grp; + uint32_t base_offset; + uint8_t size; + QLIST_HEAD(, XenPTReg) reg_tbl_list; +} XenPTRegGroup; + + +#define XEN_PT_UNASSIGNED_PIRQ (-1) +typedef struct XenPTMSI { + uint16_t flags; + uint32_t addr_lo; /* guest message address */ + uint32_t addr_hi; /* guest message upper address */ + uint16_t data; /* guest message data */ + uint32_t ctrl_offset; /* saved control offset */ + int pirq; /* guest pirq corresponding */ + bool initialized; /* when guest MSI is initialized */ + bool mapped; /* when pirq is mapped */ +} XenPTMSI; + +typedef struct XenPTMSIXEntry { + int pirq; + uint64_t addr; + uint32_t data; + uint32_t vector_ctrl; + bool updated; /* indicate whether MSI ADDR or DATA is updated */ +} XenPTMSIXEntry; +typedef struct XenPTMSIX { + uint32_t ctrl_offset; + bool enabled; + int total_entries; + int bar_index; + uint64_t table_base; + uint32_t table_offset_adjust; /* page align mmap */ + uint64_t mmio_base_addr; + MemoryRegion mmio; + void *phys_iomem_base; + XenPTMSIXEntry msix_entry[0]; +} XenPTMSIX; + +struct XenPCIPassthroughState { + PCIDevice dev; + + PCIHostDeviceAddress hostaddr; + bool is_virtfn; + XenHostPCIDevice real_device; + XenPTRegion bases[PCI_NUM_REGIONS]; /* Access regions */ + QLIST_HEAD(, XenPTRegGroup) reg_grps; + + uint32_t machine_irq; + + XenPTMSI *msi; + XenPTMSIX *msix; + + MemoryRegion bar[PCI_NUM_REGIONS - 1]; + MemoryRegion rom; + + MemoryListener memory_listener; +}; + +int xen_pt_config_init(XenPCIPassthroughState *s); +void xen_pt_config_delete(XenPCIPassthroughState *s); +XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address); +XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address); +int xen_pt_bar_offset_to_index(uint32_t offset); + +static inline pcibus_t xen_pt_get_emul_size(XenPTBarFlag flag, pcibus_t r_size) +{ + /* align resource size (memory type only) */ + if (flag == XEN_PT_BAR_FLAG_MEM) { + return (r_size + XC_PAGE_SIZE - 1) & XC_PAGE_MASK; + } else { + return r_size; + } +} + +/* INTx */ +/* The PCI Local Bus Specification, Rev. 3.0, + * Section 6.2.4 Miscellaneous Registers, pp 223 + * outlines 5 valid values for the interrupt pin (intx). + * 0: For devices (or device functions) that don't use an interrupt in + * 1: INTA# + * 2: INTB# + * 3: INTC# + * 4: INTD# + * + * Xen uses the following 4 values for intx + * 0: INTA# + * 1: INTB# + * 2: INTC# + * 3: INTD# + * + * Observing that these list of values are not the same, xen_pt_pci_read_intx() + * uses the following mapping from hw to xen values. + * This seems to reflect the current usage within Xen. + * + * PCI hardware | Xen | Notes + * ----------------+-----+---------------------------------------------------- + * 0 | 0 | No interrupt + * 1 | 0 | INTA# + * 2 | 1 | INTB# + * 3 | 2 | INTC# + * 4 | 3 | INTD# + * any other value | 0 | This should never happen, log error message + */ + +static inline uint8_t xen_pt_pci_read_intx(XenPCIPassthroughState *s) +{ + uint8_t v = 0; + xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &v); + return v; +} + +static inline uint8_t xen_pt_pci_intx(XenPCIPassthroughState *s) +{ + uint8_t r_val = xen_pt_pci_read_intx(s); + + XEN_PT_LOG(&s->dev, "intx=%i\n", r_val); + if (r_val < 1 || r_val > 4) { + XEN_PT_LOG(&s->dev, "Interrupt pin read from hardware is out of range:" + " value=%i, acceptable range is 1 - 4\n", r_val); + r_val = 0; + } else { + r_val -= 1; + } + + return r_val; +} + +/* MSI/MSI-X */ +int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool en); +int xen_pt_msi_setup(XenPCIPassthroughState *s); +int xen_pt_msi_update(XenPCIPassthroughState *d); +void xen_pt_msi_disable(XenPCIPassthroughState *s); + +int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base); +void xen_pt_msix_delete(XenPCIPassthroughState *s); +int xen_pt_msix_update(XenPCIPassthroughState *s); +int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index); +void xen_pt_msix_disable(XenPCIPassthroughState *s); + +static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar) +{ + return s->msix && s->msix->bar_index == bar; +} + + +#endif /* !XEN_PT_H */ diff --git a/hw/xen_pt_config_init.c b/hw/xen_pt_config_init.c new file mode 100644 index 0000000000..00eb3d997d --- /dev/null +++ b/hw/xen_pt_config_init.c @@ -0,0 +1,1869 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Alex Novik <alex@neocleus.com> + * Allen Kay <allen.m.kay@intel.com> + * Guy Zana <guy@neocleus.com> + * + * This file implements direct PCI assignment to a HVM guest + */ + +#include "qemu-timer.h" +#include "xen_backend.h" +#include "xen_pt.h" + +#define XEN_PT_MERGE_VALUE(value, data, val_mask) \ + (((value) & (val_mask)) | ((data) & ~(val_mask))) + +#define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ + +/* prototype */ + +static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, + uint32_t real_offset, uint32_t *data); + + +/* helper */ + +/* A return value of 1 means the capability should NOT be exposed to guest. */ +static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) +{ + switch (grp_id) { + case PCI_CAP_ID_EXP: + /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE + * Controller looks trivial, e.g., the PCI Express Capabilities + * Register is 0. We should not try to expose it to guest. + * + * The datasheet is available at + * http://download.intel.com/design/network/datashts/82599_datasheet.pdf + * + * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the + * PCI Express Capability Structure of the VF of Intel 82599 10GbE + * Controller looks trivial, e.g., the PCI Express Capabilities + * Register is 0, so the Capability Version is 0 and + * xen_pt_pcie_size_init() would fail. + */ + if (d->vendor_id == PCI_VENDOR_ID_INTEL && + d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { + return 1; + } + break; + } + return 0; +} + +/* find emulate register group entry */ +XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) +{ + XenPTRegGroup *entry = NULL; + + /* find register group entry */ + QLIST_FOREACH(entry, &s->reg_grps, entries) { + /* check address */ + if ((entry->base_offset <= address) + && ((entry->base_offset + entry->size) > address)) { + return entry; + } + } + + /* group entry not found */ + return NULL; +} + +/* find emulate register entry */ +XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) +{ + XenPTReg *reg_entry = NULL; + XenPTRegInfo *reg = NULL; + uint32_t real_offset = 0; + + /* find register entry */ + QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { + reg = reg_entry->reg; + real_offset = reg_grp->base_offset + reg->offset; + /* check address */ + if ((real_offset <= address) + && ((real_offset + reg->size) > address)) { + return reg_entry; + } + } + + return NULL; +} + + +/**************** + * general register functions + */ + +/* register initialization function */ + +static int xen_pt_common_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = reg->init_val; + return 0; +} + +/* Read register functions */ + +static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint8_t *value, uint8_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint8_t valid_emu_mask = 0; + + /* emulate byte register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t valid_emu_mask = 0; + + /* emulate word register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t valid_emu_mask = 0; + + /* emulate long register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} + +/* Write register functions */ + +static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint8_t *val, uint8_t dev_value, + uint8_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint8_t writable_mask = 0; + uint8_t throughable_mask = 0; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} +static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, + uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} +static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, + uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t throughable_mask = 0; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + + +/* XenPTRegInfo declaration + * - only for emulated register (either a part or whole bit). + * - for passthrough register that need special behavior (like interacting with + * other component), set emu_mask to all 0 and specify r/w func properly. + * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. + */ + +/******************** + * Header Type0 + */ + +static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = s->real_device.vendor_id; + return 0; +} +static int xen_pt_device_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = s->real_device.device_id; + return 0; +} +static int xen_pt_status_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + XenPTRegGroup *reg_grp_entry = NULL; + XenPTReg *reg_entry = NULL; + uint32_t reg_field = 0; + + /* find Header register group */ + reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); + if (reg_grp_entry) { + /* find Capabilities Pointer register */ + reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); + if (reg_entry) { + /* check Capabilities Pointer register */ + if (reg_entry->data) { + reg_field |= PCI_STATUS_CAP_LIST; + } else { + reg_field &= ~PCI_STATUS_CAP_LIST; + } + } else { + xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" + " for Capabilities Pointer register." + " (%s)\n", __func__); + return -1; + } + } else { + xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" + " for Header. (%s)\n", __func__); + return -1; + } + + *data = reg_field; + return 0; +} +static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + /* read PCI_HEADER_TYPE */ + *data = reg->init_val | 0x80; + return 0; +} + +/* initialize Interrupt Pin register */ +static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = xen_pt_pci_read_intx(s); + return 0; +} + +/* Command register */ +static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t valid_emu_mask = 0; + uint16_t emu_mask = reg->emu_mask; + + if (s->is_virtfn) { + emu_mask |= PCI_COMMAND_MEMORY; + } + + /* emulate word register */ + valid_emu_mask = emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, + uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + uint16_t emu_mask = reg->emu_mask; + + if (s->is_virtfn) { + emu_mask |= PCI_COMMAND_MEMORY; + } + + /* modify emulate register */ + writable_mask = ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~emu_mask & valid_mask; + + if (*val & PCI_COMMAND_INTX_DISABLE) { + throughable_mask |= PCI_COMMAND_INTX_DISABLE; + } else { + if (s->machine_irq) { + throughable_mask |= PCI_COMMAND_INTX_DISABLE; + } + } + + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +/* BAR */ +#define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ +#define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ +#define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ +#define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ + +static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, + XenPTRegInfo *reg) +{ + PCIDevice *d = &s->dev; + XenPTRegion *region = NULL; + PCIIORegion *r; + int index = 0; + + /* check 64bit BAR */ + index = xen_pt_bar_offset_to_index(reg->offset); + if ((0 < index) && (index < PCI_ROM_SLOT)) { + int type = s->real_device.io_regions[index - 1].type; + + if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) + && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { + region = &s->bases[index - 1]; + if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { + return XEN_PT_BAR_FLAG_UPPER; + } + } + } + + /* check unused BAR */ + r = &d->io_regions[index]; + if (r->size == 0) { + return XEN_PT_BAR_FLAG_UNUSED; + } + + /* for ExpROM BAR */ + if (index == PCI_ROM_SLOT) { + return XEN_PT_BAR_FLAG_MEM; + } + + /* check BAR I/O indicator */ + if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { + return XEN_PT_BAR_FLAG_IO; + } else { + return XEN_PT_BAR_FLAG_MEM; + } +} + +static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) +{ + if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { + return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); + } else { + return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); + } +} + +static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, + uint32_t real_offset, uint32_t *data) +{ + uint32_t reg_field = 0; + int index; + + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + /* set BAR flag */ + s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg); + if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { + reg_field = XEN_PT_INVALID_REG; + } + + *data = reg_field; + return 0; +} +static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t valid_emu_mask = 0; + uint32_t bar_emu_mask = 0; + int index; + + /* get BAR index */ + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + /* use fixed-up value from kernel sysfs */ + *value = base_address_with_flags(&s->real_device.io_regions[index]); + + /* set emulate mask depend on BAR flag */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_MEM: + bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; + break; + case XEN_PT_BAR_FLAG_IO: + bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; + break; + case XEN_PT_BAR_FLAG_UPPER: + bar_emu_mask = XEN_PT_BAR_ALLF; + break; + default: + break; + } + + /* emulate BAR */ + valid_emu_mask = bar_emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, + uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTRegion *base = NULL; + PCIDevice *d = &s->dev; + const PCIIORegion *r; + uint32_t writable_mask = 0; + uint32_t throughable_mask = 0; + uint32_t bar_emu_mask = 0; + uint32_t bar_ro_mask = 0; + uint32_t r_size = 0; + int index = 0; + + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS) { + XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + r = &d->io_regions[index]; + base = &s->bases[index]; + r_size = xen_pt_get_emul_size(base->bar_flag, r->size); + + /* set emulate mask and read-only mask values depend on the BAR flag */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_MEM: + bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; + bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); + break; + case XEN_PT_BAR_FLAG_IO: + bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; + bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); + break; + case XEN_PT_BAR_FLAG_UPPER: + bar_emu_mask = XEN_PT_BAR_ALLF; + bar_ro_mask = 0; /* all upper 32bit are R/W */ + break; + default: + break; + } + + /* modify emulate register */ + writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* check whether we need to update the virtual region address or not */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_MEM: + /* nothing to do */ + break; + case XEN_PT_BAR_FLAG_IO: + /* nothing to do */ + break; + case XEN_PT_BAR_FLAG_UPPER: + if (cfg_entry->data) { + if (cfg_entry->data != (XEN_PT_BAR_ALLF & ~bar_ro_mask)) { + XEN_PT_WARN(d, "Guest attempt to set high MMIO Base Address. " + "Ignore mapping. " + "(offset: 0x%02x, high address: 0x%08x)\n", + reg->offset, cfg_entry->data); + } + } + break; + default: + break; + } + + /* create value for writing to I/O device register */ + throughable_mask = ~bar_emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +/* write Exp ROM BAR */ +static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTRegion *base = NULL; + PCIDevice *d = (PCIDevice *)&s->dev; + uint32_t writable_mask = 0; + uint32_t throughable_mask = 0; + pcibus_t r_size = 0; + uint32_t bar_emu_mask = 0; + uint32_t bar_ro_mask = 0; + + r_size = d->io_regions[PCI_ROM_SLOT].size; + base = &s->bases[PCI_ROM_SLOT]; + /* align memory type resource size */ + r_size = xen_pt_get_emul_size(base->bar_flag, r_size); + + /* set emulate mask and read-only mask */ + bar_emu_mask = reg->emu_mask; + bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; + + /* modify emulate register */ + writable_mask = ~bar_ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~bar_emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +/* Header Type0 reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_header0[] = { + /* Vendor ID reg */ + { + .offset = PCI_VENDOR_ID, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xFFFF, + .init = xen_pt_vendor_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Device ID reg */ + { + .offset = PCI_DEVICE_ID, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xFFFF, + .init = xen_pt_device_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Command reg */ + { + .offset = PCI_COMMAND, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xF880, + .emu_mask = 0x0740, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_cmd_reg_read, + .u.w.write = xen_pt_cmd_reg_write, + }, + /* Capabilities Pointer reg */ + { + .offset = PCI_CAPABILITY_LIST, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Status reg */ + /* use emulated Cap Ptr value to initialize, + * so need to be declared after Cap Ptr reg + */ + { + .offset = PCI_STATUS, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x06FF, + .emu_mask = 0x0010, + .init = xen_pt_status_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Cache Line Size reg */ + { + .offset = PCI_CACHE_LINE_SIZE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Latency Timer reg */ + { + .offset = PCI_LATENCY_TIMER, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Header Type reg */ + { + .offset = PCI_HEADER_TYPE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0x00, + .init = xen_pt_header_type_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Interrupt Line reg */ + { + .offset = PCI_INTERRUPT_LINE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Interrupt Pin reg */ + { + .offset = PCI_INTERRUPT_PIN, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_irqpin_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* BAR 0 reg */ + /* mask of BAR need to be decided later, depends on IO/MEM type */ + { + .offset = PCI_BASE_ADDRESS_0, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 1 reg */ + { + .offset = PCI_BASE_ADDRESS_1, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 2 reg */ + { + .offset = PCI_BASE_ADDRESS_2, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 3 reg */ + { + .offset = PCI_BASE_ADDRESS_3, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 4 reg */ + { + .offset = PCI_BASE_ADDRESS_4, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 5 reg */ + { + .offset = PCI_BASE_ADDRESS_5, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* Expansion ROM BAR reg */ + { + .offset = PCI_ROM_ADDRESS, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x000007FE, + .emu_mask = 0xFFFFF800, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_exp_rom_bar_reg_write, + }, + { + .size = 0, + }, +}; + + +/********************************* + * Vital Product Data Capability + */ + +/* Vital Product Data Capability Structure reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_vpd[] = { + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + { + .size = 0, + }, +}; + + +/************************************** + * Vendor Specific Capability + */ + +/* Vendor Specific Capability Structure reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_vendor[] = { + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + { + .size = 0, + }, +}; + + +/***************************** + * PCI Express Capability + */ + +static inline uint8_t get_capability_version(XenPCIPassthroughState *s, + uint32_t offset) +{ + uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); + return flags & PCI_EXP_FLAGS_VERS; +} + +static inline uint8_t get_device_type(XenPCIPassthroughState *s, + uint32_t offset) +{ + uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); + return (flags & PCI_EXP_FLAGS_TYPE) >> 4; +} + +/* initialize Link Control register */ +static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + uint8_t dev_type = get_device_type(s, real_offset - reg->offset); + + /* no need to initialize in case of Root Complex Integrated Endpoint + * with cap_ver 1.x + */ + if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { + *data = XEN_PT_INVALID_REG; + } + + *data = reg->init_val; + return 0; +} +/* initialize Device Control 2 register */ +static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + + /* no need to initialize in case of cap_ver 1.x */ + if (cap_ver == 1) { + *data = XEN_PT_INVALID_REG; + } + + *data = reg->init_val; + return 0; +} +/* initialize Link Control 2 register */ +static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + uint32_t reg_field = 0; + + /* no need to initialize in case of cap_ver 1.x */ + if (cap_ver == 1) { + reg_field = XEN_PT_INVALID_REG; + } else { + /* set Supported Link Speed */ + uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset + + PCI_EXP_LNKCAP); + reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; + } + + *data = reg_field; + return 0; +} + +/* PCI Express Capability Structure reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_pcie[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Device Capabilities reg */ + { + .offset = PCI_EXP_DEVCAP, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x1FFCFFFF, + .emu_mask = 0x10000000, + .init = xen_pt_common_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_long_reg_write, + }, + /* Device Control reg */ + { + .offset = PCI_EXP_DEVCTL, + .size = 2, + .init_val = 0x2810, + .ro_mask = 0x8400, + .emu_mask = 0xFFFF, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Link Control reg */ + { + .offset = PCI_EXP_LNKCTL, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFC34, + .emu_mask = 0xFFFF, + .init = xen_pt_linkctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Device Control 2 reg */ + { + .offset = 0x28, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFE0, + .emu_mask = 0xFFFF, + .init = xen_pt_devctrl2_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Link Control 2 reg */ + { + .offset = 0x30, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xE040, + .emu_mask = 0xFFFF, + .init = xen_pt_linkctrl2_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + { + .size = 0, + }, +}; + + +/********************************* + * Power Management Capability + */ + +/* read Power Management Control/Status register */ +static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t valid_emu_mask = reg->emu_mask; + + valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; + + valid_emu_mask = valid_emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); + + return 0; +} +/* write Power Management Control/Status register */ +static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t emu_mask = reg->emu_mask; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + + emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; + + /* modify emulate register */ + writable_mask = emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +/* Power Management Capability reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_pm[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Power Management Capabilities reg */ + { + .offset = PCI_CAP_FLAGS, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xF9C8, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* PCI Power Management Control/Status reg */ + { + .offset = PCI_PM_CTRL, + .size = 2, + .init_val = 0x0008, + .ro_mask = 0xE1FC, + .emu_mask = 0x8100, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_pmcsr_reg_read, + .u.w.write = xen_pt_pmcsr_reg_write, + }, + { + .size = 0, + }, +}; + + +/******************************** + * MSI Capability + */ + +/* Helper */ +static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags) +{ + /* check the offset whether matches the type or not */ + bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT); + bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT); + return is_32 || is_64; +} + +/* Message Control register */ +static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + PCIDevice *d = &s->dev; + XenPTMSI *msi = s->msi; + uint16_t reg_field = 0; + + /* use I/O device register's value as initial value */ + reg_field = pci_get_word(d->config + real_offset); + + if (reg_field & PCI_MSI_FLAGS_ENABLE) { + XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); + xen_host_pci_set_word(&s->real_device, real_offset, + reg_field & ~PCI_MSI_FLAGS_ENABLE); + } + msi->flags |= reg_field; + msi->ctrl_offset = real_offset; + msi->initialized = false; + msi->mapped = false; + + *data = reg->init_val; + return 0; +} +static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTMSI *msi = s->msi; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + uint16_t raw_val; + + /* Currently no support for multi-vector */ + if (*val & PCI_MSI_FLAGS_QSIZE) { + XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE; + + /* create value for writing to I/O device register */ + raw_val = *val; + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI */ + if (raw_val & PCI_MSI_FLAGS_ENABLE) { + /* setup MSI pirq for the first time */ + if (!msi->initialized) { + /* Init physical one */ + XEN_PT_LOG(&s->dev, "setup MSI\n"); + if (xen_pt_msi_setup(s)) { + /* We do not broadcast the error to the framework code, so + * that MSI errors are contained in MSI emulation code and + * QEMU can go on running. + * Guest MSI would be actually not working. + */ + *val &= ~PCI_MSI_FLAGS_ENABLE; + XEN_PT_WARN(&s->dev, "Can not map MSI.\n"); + return 0; + } + if (xen_pt_msi_update(s)) { + *val &= ~PCI_MSI_FLAGS_ENABLE; + XEN_PT_WARN(&s->dev, "Can not bind MSI\n"); + return 0; + } + msi->initialized = true; + msi->mapped = true; + } + msi->flags |= PCI_MSI_FLAGS_ENABLE; + } else { + msi->flags &= ~PCI_MSI_FLAGS_ENABLE; + } + + /* pass through MSI_ENABLE bit */ + *val &= ~PCI_MSI_FLAGS_ENABLE; + *val |= raw_val & PCI_MSI_FLAGS_ENABLE; + + return 0; +} + +/* initialize Message Upper Address register */ +static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + /* no need to initialize in case of 32 bit type */ + if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { + *data = XEN_PT_INVALID_REG; + } else { + *data = reg->init_val; + } + + return 0; +} +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* initialize Message Data register */ +static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint32_t flags = s->msi->flags; + uint32_t offset = reg->offset; + + /* check the offset whether matches the type or not */ + if (xen_pt_msgdata_check_type(offset, flags)) { + *data = reg->init_val; + } else { + *data = XEN_PT_INVALID_REG; + } + return 0; +} + +/* write Message Address register */ +static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t throughable_mask = 0; + uint32_t old_addr = cfg_entry->data; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + s->msi->addr_lo = cfg_entry->data; + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI */ + if (cfg_entry->data != old_addr) { + if (s->msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} +/* write Message Upper Address register */ +static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t throughable_mask = 0; + uint32_t old_addr = cfg_entry->data; + + /* check whether the type is 64 bit or not */ + if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { + XEN_PT_ERR(&s->dev, + "Can't write to the upper address without 64 bit support\n"); + return -1; + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + /* update the msi_info too */ + s->msi->addr_hi = cfg_entry->data; + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI */ + if (cfg_entry->data != old_addr) { + if (s->msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} + + +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* write Message Data register */ +static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTMSI *msi = s->msi; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + uint16_t old_data = cfg_entry->data; + uint32_t offset = reg->offset; + + /* check the offset whether matches the type or not */ + if (!xen_pt_msgdata_check_type(offset, msi->flags)) { + /* exit I/O emulator */ + XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); + return -1; + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + /* update the msi_info too */ + msi->data = cfg_entry->data; + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI */ + if (cfg_entry->data != old_data) { + if (msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} + +/* MSI Capability Structure reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_msi[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Message Control reg */ + { + .offset = PCI_MSI_FLAGS, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFF8E, + .emu_mask = 0x007F, + .init = xen_pt_msgctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgctrl_reg_write, + }, + /* Message Address reg */ + { + .offset = PCI_MSI_ADDRESS_LO, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x00000003, + .emu_mask = 0xFFFFFFFF, + .no_wb = 1, + .init = xen_pt_common_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_msgaddr32_reg_write, + }, + /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ + { + .offset = PCI_MSI_ADDRESS_HI, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x00000000, + .emu_mask = 0xFFFFFFFF, + .no_wb = 1, + .init = xen_pt_msgaddr64_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_msgaddr64_reg_write, + }, + /* Message Data reg (16 bits of data for 32-bit devices) */ + { + .offset = PCI_MSI_DATA_32, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x0000, + .emu_mask = 0xFFFF, + .no_wb = 1, + .init = xen_pt_msgdata_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgdata_reg_write, + }, + /* Message Data reg (16 bits of data for 64-bit devices) */ + { + .offset = PCI_MSI_DATA_64, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x0000, + .emu_mask = 0xFFFF, + .no_wb = 1, + .init = xen_pt_msgdata_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgdata_reg_write, + }, + { + .size = 0, + }, +}; + + +/************************************** + * MSI-X Capability + */ + +/* Message Control register for MSI-X */ +static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + PCIDevice *d = &s->dev; + uint16_t reg_field = 0; + + /* use I/O device register's value as initial value */ + reg_field = pci_get_word(d->config + real_offset); + + if (reg_field & PCI_MSIX_FLAGS_ENABLE) { + XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n"); + xen_host_pci_set_word(&s->real_device, real_offset, + reg_field & ~PCI_MSIX_FLAGS_ENABLE); + } + + s->msix->ctrl_offset = real_offset; + + *data = reg->init_val; + return 0; +} +static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = 0; + int debug_msix_enabled_old; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); + + /* create value for writing to I/O device register */ + throughable_mask = ~reg->emu_mask & valid_mask; + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI-X */ + if ((*val & PCI_MSIX_FLAGS_ENABLE) + && !(*val & PCI_MSIX_FLAGS_MASKALL)) { + xen_pt_msix_update(s); + } + + debug_msix_enabled_old = s->msix->enabled; + s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); + if (s->msix->enabled != debug_msix_enabled_old) { + XEN_PT_LOG(&s->dev, "%s MSI-X\n", + s->msix->enabled ? "enable" : "disable"); + } + + return 0; +} + +/* MSI-X Capability Structure reg static infomation table */ +static XenPTRegInfo xen_pt_emu_reg_msix[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Message Control reg */ + { + .offset = PCI_MSI_FLAGS, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x3FFF, + .emu_mask = 0x0000, + .init = xen_pt_msixctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msixctrl_reg_write, + }, + { + .size = 0, + }, +}; + + +/**************************** + * Capabilities + */ + +/* capability structure register group size functions */ + +static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + *size = grp_reg->grp_size; + return 0; +} +/* get Vendor Specific Capability Structure register group size */ +static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + *size = pci_get_byte(s->dev.config + base_offset + 0x02); + return 0; +} +/* get PCI Express Capability Structure register group size */ +static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + PCIDevice *d = &s->dev; + uint8_t version = get_capability_version(s, base_offset); + uint8_t type = get_device_type(s, base_offset); + uint8_t pcie_size = 0; + + + /* calculate size depend on capability version and device/port type */ + /* in case of PCI Express Base Specification Rev 1.x */ + if (version == 1) { + /* The PCI Express Capabilities, Device Capabilities, and Device + * Status/Control registers are required for all PCI Express devices. + * The Link Capabilities and Link Status/Control are required for all + * Endpoints that are not Root Complex Integrated Endpoints. Endpoints + * are not required to implement registers other than those listed + * above and terminate the capability structure. + */ + switch (type) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + pcie_size = 0x14; + break; + case PCI_EXP_TYPE_RC_END: + /* has no link */ + pcie_size = 0x0C; + break; + /* only EndPoint passthrough is supported */ + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_PCIE_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + default: + XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); + return -1; + } + } + /* in case of PCI Express Base Specification Rev 2.0 */ + else if (version == 2) { + switch (type) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + /* For Functions that do not implement the registers, + * these spaces must be hardwired to 0b. + */ + pcie_size = 0x3C; + break; + /* only EndPoint passthrough is supported */ + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_PCIE_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + default: + XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); + return -1; + } + } else { + XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); + return -1; + } + + *size = pcie_size; + return 0; +} +/* get MSI Capability Structure register group size */ +static int xen_pt_msi_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + PCIDevice *d = &s->dev; + uint16_t msg_ctrl = 0; + uint8_t msi_size = 0xa; + + msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS)); + + /* check if 64-bit address is capable of per-vector masking */ + if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { + msi_size += 4; + } + if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { + msi_size += 10; + } + + s->msi = g_new0(XenPTMSI, 1); + s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; + + *size = msi_size; + return 0; +} +/* get MSI-X Capability Structure register group size */ +static int xen_pt_msix_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + int rc = 0; + + rc = xen_pt_msix_init(s, base_offset); + + if (rc < 0) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); + return rc; + } + + *size = grp_reg->grp_size; + return 0; +} + + +static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { + /* Header Type0 reg group */ + { + .grp_id = 0xFF, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x40, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_header0, + }, + /* PCI PowerManagement Capability reg group */ + { + .grp_id = PCI_CAP_ID_PM, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = PCI_PM_SIZEOF, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_pm, + }, + /* AGP Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_AGP, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x30, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Vital Product Data Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_VPD, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_vpd, + }, + /* Slot Identification reg group */ + { + .grp_id = PCI_CAP_ID_SLOTID, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x04, + .size_init = xen_pt_reg_grp_size_init, + }, + /* MSI Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_MSI, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_msi_size_init, + .emu_regs = xen_pt_emu_reg_msi, + }, + /* PCI-X Capabilities List Item reg group */ + { + .grp_id = PCI_CAP_ID_PCIX, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x18, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Vendor Specific Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_VNDR, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_vendor_size_init, + .emu_regs = xen_pt_emu_reg_vendor, + }, + /* SHPC Capability List Item reg group */ + { + .grp_id = PCI_CAP_ID_SHPC, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ + { + .grp_id = PCI_CAP_ID_SSVID, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + }, + /* AGP 8x Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_AGP3, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x30, + .size_init = xen_pt_reg_grp_size_init, + }, + /* PCI Express Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_EXP, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_pcie_size_init, + .emu_regs = xen_pt_emu_reg_pcie, + }, + /* MSI-X Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_MSIX, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x0C, + .size_init = xen_pt_msix_size_init, + .emu_regs = xen_pt_emu_reg_msix, + }, + { + .grp_size = 0, + }, +}; + +/* initialize Capabilities Pointer or Next Pointer register */ +static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + int i; + uint8_t *config = s->dev.config; + uint32_t reg_field = pci_get_byte(config + real_offset); + uint8_t cap_id = 0; + + /* find capability offset */ + while (reg_field) { + for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { + if (xen_pt_hide_dev_cap(&s->real_device, + xen_pt_emu_reg_grps[i].grp_id)) { + continue; + } + + cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID); + if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { + if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { + goto out; + } + /* ignore the 0 hardwired capability, find next one */ + break; + } + } + + /* next capability */ + reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT); + } + +out: + *data = reg_field; + return 0; +} + + +/************* + * Main + */ + +static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) +{ + uint8_t id; + unsigned max_cap = PCI_CAP_MAX; + uint8_t pos = PCI_CAPABILITY_LIST; + uint8_t status = 0; + + if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { + return 0; + } + if ((status & PCI_STATUS_CAP_LIST) == 0) { + return 0; + } + + while (max_cap--) { + if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { + break; + } + if (pos < PCI_CONFIG_HEADER_SIZE) { + break; + } + + pos &= ~3; + if (xen_host_pci_get_byte(&s->real_device, + pos + PCI_CAP_LIST_ID, &id)) { + break; + } + + if (id == 0xff) { + break; + } + if (id == cap) { + return pos; + } + + pos += PCI_CAP_LIST_NEXT; + } + return 0; +} + +static int xen_pt_config_reg_init(XenPCIPassthroughState *s, + XenPTRegGroup *reg_grp, XenPTRegInfo *reg) +{ + XenPTReg *reg_entry; + uint32_t data = 0; + int rc = 0; + + reg_entry = g_new0(XenPTReg, 1); + reg_entry->reg = reg; + + if (reg->init) { + /* initialize emulate register */ + rc = reg->init(s, reg_entry->reg, + reg_grp->base_offset + reg->offset, &data); + if (rc < 0) { + free(reg_entry); + return rc; + } + if (data == XEN_PT_INVALID_REG) { + /* free unused BAR register entry */ + free(reg_entry); + return 0; + } + /* set register value */ + reg_entry->data = data; + } + /* list add register entry */ + QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); + + return 0; +} + +int xen_pt_config_init(XenPCIPassthroughState *s) +{ + int i, rc; + + QLIST_INIT(&s->reg_grps); + + for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { + uint32_t reg_grp_offset = 0; + XenPTRegGroup *reg_grp_entry = NULL; + + if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) { + if (xen_pt_hide_dev_cap(&s->real_device, + xen_pt_emu_reg_grps[i].grp_id)) { + continue; + } + + reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); + + if (!reg_grp_offset) { + continue; + } + } + + reg_grp_entry = g_new0(XenPTRegGroup, 1); + QLIST_INIT(®_grp_entry->reg_tbl_list); + QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); + + reg_grp_entry->base_offset = reg_grp_offset; + reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; + if (xen_pt_emu_reg_grps[i].size_init) { + /* get register group size */ + rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, + reg_grp_offset, + ®_grp_entry->size); + if (rc < 0) { + xen_pt_config_delete(s); + return rc; + } + } + + if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { + if (xen_pt_emu_reg_grps[i].emu_regs) { + int j = 0; + XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; + /* initialize capability register */ + for (j = 0; regs->size != 0; j++, regs++) { + /* initialize capability register */ + rc = xen_pt_config_reg_init(s, reg_grp_entry, regs); + if (rc < 0) { + xen_pt_config_delete(s); + return rc; + } + } + } + } + } + + return 0; +} + +/* delete all emulate register */ +void xen_pt_config_delete(XenPCIPassthroughState *s) +{ + struct XenPTRegGroup *reg_group, *next_grp; + struct XenPTReg *reg, *next_reg; + + /* free MSI/MSI-X info table */ + if (s->msix) { + xen_pt_msix_delete(s); + } + if (s->msi) { + g_free(s->msi); + } + + /* free all register group entry */ + QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { + /* free all register entry */ + QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { + QLIST_REMOVE(reg, entries); + g_free(reg); + } + + QLIST_REMOVE(reg_group, entries); + g_free(reg_group); + } +} diff --git a/hw/xen_pt_msi.c b/hw/xen_pt_msi.c new file mode 100644 index 0000000000..2299cc7772 --- /dev/null +++ b/hw/xen_pt_msi.c @@ -0,0 +1,620 @@ +/* + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Jiang Yunhong <yunhong.jiang@intel.com> + * + * This file implements direct PCI assignment to a HVM guest + */ + +#include <sys/mman.h> + +#include "xen_backend.h" +#include "xen_pt.h" +#include "apic-msidef.h" + + +#define XEN_PT_AUTO_ASSIGN -1 + +/* shift count for gflags */ +#define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 +#define XEN_PT_GFLAGS_SHIFT_RH 8 +#define XEN_PT_GFLAGS_SHIFT_DM 9 +#define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 +#define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 + + +/* + * Helpers + */ + +static inline uint8_t msi_vector(uint32_t data) +{ + return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; +} + +static inline uint8_t msi_dest_id(uint32_t addr) +{ + return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; +} + +static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) +{ + return addr_hi & 0xffffff00; +} + +static uint32_t msi_gflags(uint32_t data, uint64_t addr) +{ + uint32_t result = 0; + int rh, dm, dest_id, deliv_mode, trig_mode; + + rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; + dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; + dest_id = msi_dest_id(addr); + deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; + trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; + + result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) + | (dm << XEN_PT_GFLAGS_SHIFT_DM) + | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) + | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); + + return result; +} + +static inline uint64_t msi_addr64(XenPTMSI *msi) +{ + return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; +} + +static int msi_msix_enable(XenPCIPassthroughState *s, + uint32_t address, + uint16_t flag, + bool enable) +{ + uint16_t val = 0; + + if (!address) { + return -1; + } + + xen_host_pci_get_word(&s->real_device, address, &val); + if (enable) { + val |= flag; + } else { + val &= ~flag; + } + xen_host_pci_set_word(&s->real_device, address, val); + return 0; +} + +static int msi_msix_setup(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int *ppirq, + bool is_msix, + int msix_entry, + bool is_not_mapped) +{ + uint8_t gvec = msi_vector(data); + int rc = 0; + + assert((!is_msix && msix_entry == 0) || is_msix); + + if (gvec == 0) { + /* if gvec is 0, the guest is asking for a particular pirq that + * is passed as dest_id */ + *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); + if (!*ppirq) { + /* this probably identifies an misconfiguration of the guest, + * try the emulated path */ + *ppirq = XEN_PT_UNASSIGNED_PIRQ; + } else { + XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" + " (vec: %#x, entry: %#x)\n", + *ppirq, is_msix ? "-X" : "", gvec, msix_entry); + } + } + + if (is_not_mapped) { + uint64_t table_base = 0; + + if (is_msix) { + table_base = s->msix->table_base; + } + + rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, + ppirq, PCI_DEVFN(s->real_device.dev, + s->real_device.func), + s->real_device.bus, + msix_entry, table_base); + if (rc) { + XEN_PT_ERR(&s->dev, + "Mapping of MSI%s (rc: %i, vec: %#x, entry %#x)\n", + is_msix ? "-X" : "", rc, gvec, msix_entry); + return rc; + } + } + + return 0; +} +static int msi_msix_update(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int pirq, + bool is_msix, + int msix_entry, + int *old_pirq) +{ + PCIDevice *d = &s->dev; + uint8_t gvec = msi_vector(data); + uint32_t gflags = msi_gflags(data, addr); + int rc = 0; + uint64_t table_addr = 0; + + XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x" + " (entry: %#x)\n", + is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); + + if (is_msix) { + table_addr = s->msix->mmio_base_addr; + } + + rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, + pirq, gflags, table_addr); + + if (rc) { + XEN_PT_ERR(d, "Updating of MSI%s failed. (rc: %d)\n", + is_msix ? "-X" : "", rc); + + if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { + XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed.\n", + is_msix ? "-X" : "", *old_pirq); + } + *old_pirq = XEN_PT_UNASSIGNED_PIRQ; + } + return rc; +} + +static int msi_msix_disable(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int pirq, + bool is_msix, + bool is_binded) +{ + PCIDevice *d = &s->dev; + uint8_t gvec = msi_vector(data); + uint32_t gflags = msi_gflags(data, addr); + int rc = 0; + + if (pirq == XEN_PT_UNASSIGNED_PIRQ) { + return 0; + } + + if (is_binded) { + XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n", + is_msix ? "-X" : "", pirq, gvec); + rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); + if (rc) { + XEN_PT_ERR(d, "Unbinding of MSI%s failed. (pirq: %d, gvec: %#x)\n", + is_msix ? "-X" : "", pirq, gvec); + return rc; + } + } + + XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); + rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); + if (rc) { + XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (rc: %i)\n", + is_msix ? "-X" : "", pirq, rc); + return rc; + } + + return 0; +} + +/* + * MSI virtualization functions + */ + +int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) +{ + XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); + + if (!s->msi) { + return -1; + } + + return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, + enable); +} + +/* setup physical msi, but don't enable it */ +int xen_pt_msi_setup(XenPCIPassthroughState *s) +{ + int pirq = XEN_PT_UNASSIGNED_PIRQ; + int rc = 0; + XenPTMSI *msi = s->msi; + + if (msi->initialized) { + XEN_PT_ERR(&s->dev, + "Setup physical MSI when it has been properly initialized.\n"); + return -1; + } + + rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); + if (rc) { + return rc; + } + + if (pirq < 0) { + XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); + return -1; + } + + msi->pirq = pirq; + XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); + + return 0; +} + +int xen_pt_msi_update(XenPCIPassthroughState *s) +{ + XenPTMSI *msi = s->msi; + return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, + false, 0, &msi->pirq); +} + +void xen_pt_msi_disable(XenPCIPassthroughState *s) +{ + XenPTMSI *msi = s->msi; + + if (!msi) { + return; + } + + xen_pt_msi_set_enable(s, false); + + msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, + msi->initialized); + + /* clear msi info */ + msi->flags = 0; + msi->mapped = false; + msi->pirq = XEN_PT_UNASSIGNED_PIRQ; +} + +/* + * MSI-X virtualization functions + */ + +static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) +{ + XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); + + if (!s->msix) { + return -1; + } + + return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, + enabled); +} + +static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr) +{ + XenPTMSIXEntry *entry = NULL; + int pirq; + int rc; + + if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { + return -EINVAL; + } + + entry = &s->msix->msix_entry[entry_nr]; + + if (!entry->updated) { + return 0; + } + + pirq = entry->pirq; + + rc = msi_msix_setup(s, entry->data, entry->data, &pirq, true, entry_nr, + entry->pirq == XEN_PT_UNASSIGNED_PIRQ); + if (rc) { + return rc; + } + if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { + entry->pirq = pirq; + } + + rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, + entry_nr, &entry->pirq); + + if (!rc) { + entry->updated = false; + } + + return rc; +} + +int xen_pt_msix_update(XenPCIPassthroughState *s) +{ + XenPTMSIX *msix = s->msix; + int i; + + for (i = 0; i < msix->total_entries; i++) { + xen_pt_msix_update_one(s, i); + } + + return 0; +} + +void xen_pt_msix_disable(XenPCIPassthroughState *s) +{ + int i = 0; + + msix_set_enable(s, false); + + for (i = 0; i < s->msix->total_entries; i++) { + XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; + + msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); + + /* clear MSI-X info */ + entry->pirq = XEN_PT_UNASSIGNED_PIRQ; + entry->updated = false; + } +} + +int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) +{ + XenPTMSIXEntry *entry; + int i, ret; + + if (!(s->msix && s->msix->bar_index == bar_index)) { + return 0; + } + + for (i = 0; i < s->msix->total_entries; i++) { + entry = &s->msix->msix_entry[i]; + if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { + ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, + PT_IRQ_TYPE_MSI, 0, 0, 0, 0); + if (ret) { + XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed\n", + entry->pirq); + } + entry->updated = true; + } + } + return xen_pt_msix_update(s); +} + +static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) +{ + switch (offset) { + case PCI_MSIX_ENTRY_LOWER_ADDR: + return e->addr & UINT32_MAX; + case PCI_MSIX_ENTRY_UPPER_ADDR: + return e->addr >> 32; + case PCI_MSIX_ENTRY_DATA: + return e->data; + case PCI_MSIX_ENTRY_VECTOR_CTRL: + return e->vector_ctrl; + default: + return 0; + } +} + +static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) +{ + switch (offset) { + case PCI_MSIX_ENTRY_LOWER_ADDR: + e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val; + break; + case PCI_MSIX_ENTRY_UPPER_ADDR: + e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX); + break; + case PCI_MSIX_ENTRY_DATA: + e->data = val; + break; + case PCI_MSIX_ENTRY_VECTOR_CTRL: + e->vector_ctrl = val; + break; + } +} + +static void pci_msix_write(void *opaque, target_phys_addr_t addr, + uint64_t val, unsigned size) +{ + XenPCIPassthroughState *s = opaque; + XenPTMSIX *msix = s->msix; + XenPTMSIXEntry *entry; + int entry_nr, offset; + + entry_nr = addr / PCI_MSIX_ENTRY_SIZE; + if (entry_nr < 0 || entry_nr >= msix->total_entries) { + XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); + return; + } + entry = &msix->msix_entry[entry_nr]; + offset = addr % PCI_MSIX_ENTRY_SIZE; + + if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { + const volatile uint32_t *vec_ctrl; + + if (get_entry_value(entry, offset) == val) { + return; + } + + /* + * If Xen intercepts the mask bit access, entry->vec_ctrl may not be + * up-to-date. Read from hardware directly. + */ + vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + + if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { + XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is" + " already enabled.\n", entry_nr); + return; + } + + entry->updated = true; + } + + set_entry_value(entry, offset, val); + + if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) { + if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { + xen_pt_msix_update_one(s, entry_nr); + } + } +} + +static uint64_t pci_msix_read(void *opaque, target_phys_addr_t addr, + unsigned size) +{ + XenPCIPassthroughState *s = opaque; + XenPTMSIX *msix = s->msix; + int entry_nr, offset; + + entry_nr = addr / PCI_MSIX_ENTRY_SIZE; + if (entry_nr < 0) { + XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); + return 0; + } + + offset = addr % PCI_MSIX_ENTRY_SIZE; + + if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { + return get_entry_value(&msix->msix_entry[entry_nr], offset); + } else { + /* Pending Bit Array (PBA) */ + return *(uint32_t *)(msix->phys_iomem_base + addr); + } +} + +static const MemoryRegionOps pci_msix_ops = { + .read = pci_msix_read, + .write = pci_msix_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) +{ + uint8_t id = 0; + uint16_t control = 0; + uint32_t table_off = 0; + int i, total_entries, bar_index; + XenHostPCIDevice *hd = &s->real_device; + PCIDevice *d = &s->dev; + int fd = -1; + XenPTMSIX *msix = NULL; + int rc = 0; + + rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); + if (rc) { + return rc; + } + + if (id != PCI_CAP_ID_MSIX) { + XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base); + return -1; + } + + xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); + total_entries = control & PCI_MSIX_FLAGS_QSIZE; + total_entries += 1; + + s->msix = g_malloc0(sizeof (XenPTMSIX) + + total_entries * sizeof (XenPTMSIXEntry)); + msix = s->msix; + + msix->total_entries = total_entries; + for (i = 0; i < total_entries; i++) { + msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; + } + + memory_region_init_io(&msix->mmio, &pci_msix_ops, s, "xen-pci-pt-msix", + (total_entries * PCI_MSIX_ENTRY_SIZE + + XC_PAGE_SIZE - 1) + & XC_PAGE_MASK); + + xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); + bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; + table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; + msix->table_base = s->real_device.io_regions[bar_index].base_addr; + XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); + + fd = open("/dev/mem", O_RDWR); + if (fd == -1) { + rc = -errno; + XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); + goto error_out; + } + XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n", + table_off, total_entries); + msix->table_offset_adjust = table_off & 0x0fff; + msix->phys_iomem_base = + mmap(NULL, + total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, + PROT_READ, + MAP_SHARED | MAP_LOCKED, + fd, + msix->table_base + table_off - msix->table_offset_adjust); + close(fd); + if (msix->phys_iomem_base == MAP_FAILED) { + rc = -errno; + XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); + goto error_out; + } + msix->phys_iomem_base = (char *)msix->phys_iomem_base + + msix->table_offset_adjust; + + XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", + msix->phys_iomem_base); + + memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, + &msix->mmio, + 2); /* Priority: pci default + 1 */ + + return 0; + +error_out: + memory_region_destroy(&msix->mmio); + g_free(s->msix); + s->msix = NULL; + return rc; +} + +void xen_pt_msix_delete(XenPCIPassthroughState *s) +{ + XenPTMSIX *msix = s->msix; + + if (!msix) { + return; + } + + /* unmap the MSI-X memory mapped register area */ + if (msix->phys_iomem_base) { + XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", + msix->phys_iomem_base); + munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE + + msix->table_offset_adjust); + } + + memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); + memory_region_destroy(&msix->mmio); + + g_free(s->msix); + s->msix = NULL; +} diff --git a/hw/xenfb.c b/hw/xenfb.c index 1bcf171b01..338800a4d9 100644 --- a/hw/xenfb.c +++ b/hw/xenfb.c @@ -35,19 +35,16 @@ #include <string.h> #include <time.h> -#include <xs.h> -#include <xenctrl.h> -#include <xen/event_channel.h> -#include <xen/io/xenbus.h> -#include <xen/io/fbif.h> -#include <xen/io/kbdif.h> -#include <xen/io/protocols.h> - #include "hw.h" #include "console.h" #include "qemu-char.h" #include "xen_backend.h" +#include <xen/event_channel.h> +#include <xen/io/fbif.h> +#include <xen/io/kbdif.h> +#include <xen/io/protocols.h> + #ifndef BTN_LEFT #define BTN_LEFT 0x110 /* from <linux/input.h> */ #endif diff --git a/hw/xilinx.h b/hw/xilinx.h index 35f35bd7fc..7df21eb958 100644 --- a/hw/xilinx.h +++ b/hw/xilinx.h @@ -6,7 +6,7 @@ xilinx_intc_create(target_phys_addr_t base, qemu_irq irq, int kind_of_intr) { DeviceState *dev; - dev = qdev_create(NULL, "xilinx,intc"); + dev = qdev_create(NULL, "xlnx.xps-intc"); qdev_prop_set_uint32(dev, "kind-of-intr", kind_of_intr); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); @@ -16,12 +16,12 @@ xilinx_intc_create(target_phys_addr_t base, qemu_irq irq, int kind_of_intr) /* OPB Timer/Counter. */ static inline DeviceState * -xilinx_timer_create(target_phys_addr_t base, qemu_irq irq, int nr, int freq) +xilinx_timer_create(target_phys_addr_t base, qemu_irq irq, int oto, int freq) { DeviceState *dev; - dev = qdev_create(NULL, "xilinx,timer"); - qdev_prop_set_uint32(dev, "nr-timers", nr); + dev = qdev_create(NULL, "xlnx,xps-timer"); + qdev_prop_set_uint32(dev, "one-timer-only", oto); qdev_prop_set_uint32(dev, "frequency", freq); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); @@ -36,12 +36,12 @@ xilinx_ethlite_create(NICInfo *nd, target_phys_addr_t base, qemu_irq irq, { DeviceState *dev; - qemu_check_nic_model(nd, "xilinx-ethlite"); + qemu_check_nic_model(nd, "xlnx.xps-ethernetlite"); - dev = qdev_create(NULL, "xilinx,ethlite"); + dev = qdev_create(NULL, "xlnx.xps-ethernetlite"); qdev_set_nic_properties(dev, nd); - qdev_prop_set_uint32(dev, "txpingpong", txpingpong); - qdev_prop_set_uint32(dev, "rxpingpong", rxpingpong); + qdev_prop_set_uint32(dev, "tx-ping-pong", txpingpong); + qdev_prop_set_uint32(dev, "rx-ping-pong", rxpingpong); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); sysbus_connect_irq(sysbus_from_qdev(dev), 0, irq); @@ -54,12 +54,12 @@ xilinx_axiethernet_create(void *dmach, int txmem, int rxmem) { DeviceState *dev; - qemu_check_nic_model(nd, "xilinx-axienet"); + qemu_check_nic_model(nd, "xlnx.axi-ethernet"); - dev = qdev_create(NULL, "xilinx,axienet"); + dev = qdev_create(NULL, "xlnx.axi-ethernet"); qdev_set_nic_properties(dev, nd); - qdev_prop_set_uint32(dev, "c_rxmem", rxmem); - qdev_prop_set_uint32(dev, "c_txmem", txmem); + qdev_prop_set_uint32(dev, "rxmem", rxmem); + qdev_prop_set_uint32(dev, "txmem", txmem); qdev_prop_set_ptr(dev, "dmach", dmach); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); @@ -75,14 +75,14 @@ xilinx_axiethernetdma_create(void *dmach, { DeviceState *dev = NULL; - dev = qdev_create(NULL, "xilinx,axidma"); + dev = qdev_create(NULL, "xlnx.axi-dma"); qdev_prop_set_uint32(dev, "freqhz", freqhz); qdev_prop_set_ptr(dev, "dmach", dmach); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); - sysbus_connect_irq(sysbus_from_qdev(dev), 0, irq2); - sysbus_connect_irq(sysbus_from_qdev(dev), 1, irq); + sysbus_connect_irq(sysbus_from_qdev(dev), 0, irq); + sysbus_connect_irq(sysbus_from_qdev(dev), 1, irq2); return dev; } diff --git a/hw/xilinx_axidma.c b/hw/xilinx_axidma.c index 85dfcbf2b9..f4bec37c7a 100644 --- a/hw/xilinx_axidma.c +++ b/hw/xilinx_axidma.c @@ -463,8 +463,8 @@ static int xilinx_axidma_init(SysBusDevice *dev) struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev); int i; - sysbus_init_irq(dev, &s->streams[1].irq); sysbus_init_irq(dev, &s->streams[0].irq); + sysbus_init_irq(dev, &s->streams[1].irq); if (!s->dmach) { hw_error("Unconnected DMA channel.\n"); @@ -473,7 +473,7 @@ static int xilinx_axidma_init(SysBusDevice *dev) xlx_dma_connect_dma(s->dmach, s, axidma_push); memory_region_init_io(&s->iomem, &axidma_ops, s, - "axidma", R_MAX * 4 * 2); + "xlnx.axi-dma", R_MAX * 4 * 2); sysbus_init_mmio(dev, &s->iomem); for (i = 0; i < 2; i++) { @@ -502,7 +502,7 @@ static void axidma_class_init(ObjectClass *klass, void *data) } static TypeInfo axidma_info = { - .name = "xilinx,axidma", + .name = "xlnx.axi-dma", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(struct XilinxAXIDMA), .class_init = axidma_class_init, diff --git a/hw/xilinx_axienet.c b/hw/xilinx_axienet.c index 7526273b4d..2e8d8a59ba 100644 --- a/hw/xilinx_axienet.c +++ b/hw/xilinx_axienet.c @@ -872,8 +872,8 @@ static int xilinx_enet_init(SysBusDevice *dev) static Property xilinx_enet_properties[] = { DEFINE_PROP_UINT32("phyaddr", struct XilinxAXIEnet, c_phyaddr, 7), - DEFINE_PROP_UINT32("c_rxmem", struct XilinxAXIEnet, c_rxmem, 0x1000), - DEFINE_PROP_UINT32("c_txmem", struct XilinxAXIEnet, c_txmem, 0x1000), + DEFINE_PROP_UINT32("rxmem", struct XilinxAXIEnet, c_rxmem, 0x1000), + DEFINE_PROP_UINT32("txmem", struct XilinxAXIEnet, c_txmem, 0x1000), DEFINE_PROP_PTR("dmach", struct XilinxAXIEnet, dmach), DEFINE_NIC_PROPERTIES(struct XilinxAXIEnet, conf), DEFINE_PROP_END_OF_LIST(), @@ -889,7 +889,7 @@ static void xilinx_enet_class_init(ObjectClass *klass, void *data) } static TypeInfo xilinx_enet_info = { - .name = "xilinx,axienet", + .name = "xlnx.axi-ethernet", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(struct XilinxAXIEnet), .class_init = xilinx_enet_class_init, diff --git a/hw/xilinx_ethlite.c b/hw/xilinx_ethlite.c index 857b33d172..affbb8bfff 100644 --- a/hw/xilinx_ethlite.c +++ b/hw/xilinx_ethlite.c @@ -216,7 +216,8 @@ static int xilinx_ethlite_init(SysBusDevice *dev) sysbus_init_irq(dev, &s->irq); s->rxbuf = 0; - memory_region_init_io(&s->mmio, ð_ops, s, "xilinx-ethlite", R_MAX * 4); + memory_region_init_io(&s->mmio, ð_ops, s, "xlnx.xps-ethernetlite", + R_MAX * 4); sysbus_init_mmio(dev, &s->mmio); qemu_macaddr_default_if_unset(&s->conf.macaddr); @@ -227,8 +228,8 @@ static int xilinx_ethlite_init(SysBusDevice *dev) } static Property xilinx_ethlite_properties[] = { - DEFINE_PROP_UINT32("txpingpong", struct xlx_ethlite, c_tx_pingpong, 1), - DEFINE_PROP_UINT32("rxpingpong", struct xlx_ethlite, c_rx_pingpong, 1), + DEFINE_PROP_UINT32("tx-ping-pong", struct xlx_ethlite, c_tx_pingpong, 1), + DEFINE_PROP_UINT32("rx-ping-pong", struct xlx_ethlite, c_rx_pingpong, 1), DEFINE_NIC_PROPERTIES(struct xlx_ethlite, conf), DEFINE_PROP_END_OF_LIST(), }; @@ -243,7 +244,7 @@ static void xilinx_ethlite_class_init(ObjectClass *klass, void *data) } static TypeInfo xilinx_ethlite_info = { - .name = "xilinx,ethlite", + .name = "xlnx.xps-ethernetlite", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(struct xlx_ethlite), .class_init = xilinx_ethlite_class_init, diff --git a/hw/xilinx_intc.c b/hw/xilinx_intc.c index 553f8488f6..386fd30743 100644 --- a/hw/xilinx_intc.c +++ b/hw/xilinx_intc.c @@ -156,7 +156,7 @@ static int xilinx_intc_init(SysBusDevice *dev) qdev_init_gpio_in(&dev->qdev, irq_handler, 32); sysbus_init_irq(dev, &p->parent_irq); - memory_region_init_io(&p->mmio, &pic_ops, p, "xilinx-pic", R_MAX * 4); + memory_region_init_io(&p->mmio, &pic_ops, p, "xlnx.xps-intc", R_MAX * 4); sysbus_init_mmio(dev, &p->mmio); return 0; } @@ -176,7 +176,7 @@ static void xilinx_intc_class_init(ObjectClass *klass, void *data) } static TypeInfo xilinx_intc_info = { - .name = "xilinx,intc", + .name = "xlnx.xps-intc", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(struct xlx_pic), .class_init = xilinx_intc_class_init, diff --git a/hw/xilinx_timer.c b/hw/xilinx_timer.c index 3ab2f2bb03..b562bd065e 100644 --- a/hw/xilinx_timer.c +++ b/hw/xilinx_timer.c @@ -23,7 +23,6 @@ */ #include "sysbus.h" -#include "qemu-timer.h" #include "ptimer.h" #define D(x) @@ -62,11 +61,16 @@ struct timerblock SysBusDevice busdev; MemoryRegion mmio; qemu_irq irq; - uint32_t nr_timers; + uint8_t one_timer_only; uint32_t freq_hz; struct xlx_timer *timers; }; +static inline unsigned int num_timers(struct timerblock *t) +{ + return 2 - t->one_timer_only; +} + static inline unsigned int timer_from_addr(target_phys_addr_t addr) { /* Timers get a 4x32bit control reg area each. */ @@ -78,7 +82,7 @@ static void timer_update_irq(struct timerblock *t) unsigned int i, irq = 0; uint32_t csr; - for (i = 0; i < t->nr_timers; i++) { + for (i = 0; i < num_timers(t); i++) { csr = t->timers[i].regs[R_TCSR]; irq |= (csr & TCSR_TINT) && (csr & TCSR_ENIT); } @@ -132,7 +136,7 @@ static void timer_enable(struct xlx_timer *xt) count = xt->regs[R_TLR]; else count = ~0 - xt->regs[R_TLR]; - ptimer_set_count(xt->ptimer, count); + ptimer_set_limit(xt->ptimer, count, 1); ptimer_run(xt->ptimer, 1); } @@ -202,8 +206,8 @@ static int xilinx_timer_init(SysBusDevice *dev) sysbus_init_irq(dev, &t->irq); /* Init all the ptimers. */ - t->timers = g_malloc0(sizeof t->timers[0] * t->nr_timers); - for (i = 0; i < t->nr_timers; i++) { + t->timers = g_malloc0(sizeof t->timers[0] * num_timers(t)); + for (i = 0; i < num_timers(t); i++) { struct xlx_timer *xt = &t->timers[i]; xt->parent = t; @@ -213,15 +217,15 @@ static int xilinx_timer_init(SysBusDevice *dev) ptimer_set_freq(xt->ptimer, t->freq_hz); } - memory_region_init_io(&t->mmio, &timer_ops, t, "xilinx-timer", - R_MAX * 4 * t->nr_timers); + memory_region_init_io(&t->mmio, &timer_ops, t, "xlnx,xps-timer", + R_MAX * 4 * num_timers(t)); sysbus_init_mmio(dev, &t->mmio); return 0; } static Property xilinx_timer_properties[] = { - DEFINE_PROP_UINT32("frequency", struct timerblock, freq_hz, 0), - DEFINE_PROP_UINT32("nr-timers", struct timerblock, nr_timers, 0), + DEFINE_PROP_UINT32("frequency", struct timerblock, freq_hz, 62 * 1000000), + DEFINE_PROP_UINT8("one-timer-only", struct timerblock, one_timer_only, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -235,7 +239,7 @@ static void xilinx_timer_class_init(ObjectClass *klass, void *data) } static TypeInfo xilinx_timer_info = { - .name = "xilinx,timer", + .name = "xlnx,xps-timer", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(struct timerblock), .class_init = xilinx_timer_class_init, diff --git a/hw/xilinx_uartlite.c b/hw/xilinx_uartlite.c index aa0170db49..d0f32db2c6 100644 --- a/hw/xilinx_uartlite.c +++ b/hw/xilinx_uartlite.c @@ -202,7 +202,8 @@ static int xilinx_uartlite_init(SysBusDevice *dev) sysbus_init_irq(dev, &s->irq); uart_update_status(s); - memory_region_init_io(&s->mmio, &uart_ops, s, "xilinx-uartlite", R_MAX * 4); + memory_region_init_io(&s->mmio, &uart_ops, s, "xlnx.xps-uartlite", + R_MAX * 4); sysbus_init_mmio(dev, &s->mmio); s->chr = qemu_char_get_next_serial(); @@ -219,7 +220,7 @@ static void xilinx_uartlite_class_init(ObjectClass *klass, void *data) } static TypeInfo xilinx_uartlite_info = { - .name = "xilinx,uartlite", + .name = "xlnx.xps-uartlite", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof (struct xlx_uartlite), .class_init = xilinx_uartlite_class_init, diff --git a/hw/xilinx_zynq.c b/hw/xilinx_zynq.c index 7290c64a4c..7e6c27359e 100644 --- a/hw/xilinx_zynq.c +++ b/hw/xilinx_zynq.c @@ -50,7 +50,7 @@ static void zynq_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { - CPUARMState *env = NULL; + ARMCPU *cpu; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ext_ram = g_new(MemoryRegion, 1); MemoryRegion *ocm_ram = g_new(MemoryRegion, 1); @@ -66,12 +66,12 @@ static void zynq_init(ram_addr_t ram_size, const char *boot_device, cpu_model = "cortex-a9"; } - env = cpu_init(cpu_model); - if (!env) { + cpu = cpu_arm_init(cpu_model); + if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - irqp = arm_pic_init_cpu(env); + irqp = arm_pic_init_cpu(cpu); cpu_irq = irqp[ARM_PIC_CPU_IRQ]; /* max 2GB ram */ @@ -137,7 +137,7 @@ static void zynq_init(ram_addr_t ram_size, const char *boot_device, zynq_binfo.nb_cpus = 1; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; - arm_load_kernel(first_cpu, &zynq_binfo); + arm_load_kernel(arm_env_get_cpu(first_cpu), &zynq_binfo); } static QEMUMachine zynq_machine = { diff --git a/hw/xio3130_downstream.c b/hw/xio3130_downstream.c index 319624f212..56d1b353d0 100644 --- a/hw/xio3130_downstream.c +++ b/hw/xio3130_downstream.c @@ -41,14 +41,13 @@ static void xio3130_downstream_write_config(PCIDevice *d, uint32_t address, pci_bridge_write_config(d, address, val, len); pcie_cap_flr_write_config(d, address, val, len); pcie_cap_slot_write_config(d, address, val, len); - msi_write_config(d, address, val, len); pcie_aer_write_config(d, address, val, len); } static void xio3130_downstream_reset(DeviceState *qdev) { PCIDevice *d = PCI_DEVICE(qdev); - msi_reset(d); + pcie_cap_deverr_reset(d); pcie_cap_slot_reset(d); pcie_cap_ari_reset(d); diff --git a/hw/xio3130_upstream.c b/hw/xio3130_upstream.c index 34a99bba08..79725813a2 100644 --- a/hw/xio3130_upstream.c +++ b/hw/xio3130_upstream.c @@ -40,14 +40,13 @@ static void xio3130_upstream_write_config(PCIDevice *d, uint32_t address, { pci_bridge_write_config(d, address, val, len); pcie_cap_flr_write_config(d, address, val, len); - msi_write_config(d, address, val, len); pcie_aer_write_config(d, address, val, len); } static void xio3130_upstream_reset(DeviceState *qdev) { PCIDevice *d = PCI_DEVICE(qdev); - msi_reset(d); + pci_bridge_reset(qdev); pcie_cap_deverr_reset(d); } diff --git a/hw/xtensa/Makefile.objs b/hw/xtensa/Makefile.objs new file mode 100644 index 0000000000..79698e903d --- /dev/null +++ b/hw/xtensa/Makefile.objs @@ -0,0 +1,5 @@ +obj-y += xtensa_pic.o +obj-y += xtensa_sim.o +obj-y += xtensa_lx60.o + +obj-y := $(addprefix ../,$(obj-y)) diff --git a/hw/xtensa_lx60.c b/hw/xtensa_lx60.c index b153bfdddf..152eed95d8 100644 --- a/hw/xtensa_lx60.c +++ b/hw/xtensa_lx60.c @@ -34,6 +34,7 @@ #include "pc.h" #include "sysbus.h" #include "flash.h" +#include "blockdev.h" #include "xtensa_bootparam.h" typedef struct LxBoardDesc { @@ -301,7 +301,7 @@ static void z2_init(ram_addr_t ram_size, { MemoryRegion *address_space_mem = get_system_memory(); uint32_t sector_len = 0x10000; - PXA2xxState *cpu; + PXA2xxState *mpu; DriveInfo *dinfo; int be; void *z2_lcd; @@ -313,7 +313,7 @@ static void z2_init(ram_addr_t ram_size, } /* Setup CPU & memory */ - cpu = pxa270_init(address_space_mem, z2_binfo.ram_size, cpu_model); + mpu = pxa270_init(address_space_mem, z2_binfo.ram_size, cpu_model); #ifdef TARGET_WORDS_BIGENDIAN be = 1; @@ -337,25 +337,25 @@ static void z2_init(ram_addr_t ram_size, } /* setup keypad */ - pxa27x_register_keypad(cpu->kp, map, 0x100); + pxa27x_register_keypad(mpu->kp, map, 0x100); /* MMC/SD host */ - pxa2xx_mmci_handlers(cpu->mmc, + pxa2xx_mmci_handlers(mpu->mmc, NULL, - qdev_get_gpio_in(cpu->gpio, Z2_GPIO_SD_DETECT)); + qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT)); type_register_static(&zipit_lcd_info); type_register_static(&aer915_info); - z2_lcd = ssi_create_slave(cpu->ssp[1], "zipit-lcd"); - bus = pxa2xx_i2c_bus(cpu->i2c[0]); + z2_lcd = ssi_create_slave(mpu->ssp[1], "zipit-lcd"); + bus = pxa2xx_i2c_bus(mpu->i2c[0]); i2c_create_slave(bus, "aer915", 0x55); wm = i2c_create_slave(bus, "wm8750", 0x1b); - cpu->i2s->opaque = wm; - cpu->i2s->codec_out = wm8750_dac_dat; - cpu->i2s->codec_in = wm8750_adc_dat; - wm8750_data_req_set(wm, cpu->i2s->data_req, cpu->i2s); + mpu->i2s->opaque = wm; + mpu->i2s->codec_out = wm8750_dac_dat; + mpu->i2s->codec_in = wm8750_adc_dat; + wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s); - qdev_connect_gpio_out(cpu->gpio, Z2_GPIO_LCD_CS, + qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS, qemu_allocate_irqs(z2_lcd_cs, z2_lcd, 1)[0]); if (kernel_filename) { @@ -363,7 +363,7 @@ static void z2_init(ram_addr_t ram_size, z2_binfo.kernel_cmdline = kernel_cmdline; z2_binfo.initrd_filename = initrd_filename; z2_binfo.board_id = 0x6dd; - arm_load_kernel(&cpu->cpu->env, &z2_binfo); + arm_load_kernel(mpu->cpu, &z2_binfo); } } diff --git a/include/qemu/object.h b/include/qemu/object.h index d93b77293f..8b17776bb3 100644 --- a/include/qemu/object.h +++ b/include/qemu/object.h @@ -33,7 +33,7 @@ typedef struct TypeInfo TypeInfo; typedef struct InterfaceClass InterfaceClass; typedef struct InterfaceInfo InterfaceInfo; -#define TYPE_OBJECT NULL +#define TYPE_OBJECT "object" /** * SECTION:object.h @@ -291,10 +291,15 @@ struct Object * has occurred to allow a class to set its default virtual method pointers. * This is also the function to use to override virtual methods from a parent * class. + * @class_base_init: This function is called for all base classes after all + * parent class initialization has occurred, but before the class itself + * is initialized. This is the function to use to undo the effects of + * memcpy from the parent class to the descendents. * @class_finalize: This function is called during class destruction and is * meant to release and dynamic parameters allocated by @class_init. - * @class_data: Data to pass to the @class_init and @class_finalize functions. - * This can be useful when building dynamic classes. + * @class_data: Data to pass to the @class_init, @class_base_init and + * @class_finalize functions. This can be useful when building dynamic + * classes. * @interfaces: The list of interfaces associated with this type. This * should point to a static array that's terminated with a zero filled * element. @@ -312,6 +317,7 @@ struct TypeInfo size_t class_size; void (*class_init)(ObjectClass *klass, void *data); + void (*class_base_init)(ObjectClass *klass, void *data); void (*class_finalize)(ObjectClass *klass, void *data); void *class_data; @@ -521,8 +527,6 @@ const char *object_get_typename(Object *obj); */ Type type_register_static(const TypeInfo *info); -#define type_register_static_alias(info, name) do { } while (0) - /** * type_register: * @info: The #TypeInfo of the new type @@ -548,6 +552,14 @@ ObjectClass *object_class_dynamic_cast(ObjectClass *klass, const char *typename); /** + * object_class_get_parent: + * @klass: The class to obtain the parent for. + * + * Returns: The parent for @klass or %NULL if none. + */ +ObjectClass *object_class_get_parent(ObjectClass *klass); + +/** * object_class_get_name: * @klass: The class to obtain the QOM typename for. * @@ -623,6 +635,17 @@ void object_property_add(Object *obj, const char *name, const char *type, void object_property_del(Object *obj, const char *name, struct Error **errp); +/** + * object_property_find: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Look up a property for an object and return its #ObjectProperty if found. + */ +ObjectProperty *object_property_find(Object *obj, const char *name, + struct Error **errp); + void object_unparent(Object *obj); /** @@ -910,6 +933,20 @@ void object_property_add_str(Object *obj, const char *name, struct Error **errp); /** + * object_child_foreach: + * @obj: the object whose children will be navigated + * @fn: the iterator function to be called + * @opaque: an opaque value that will be passed to the iterator + * + * Call @fn passing each child of @obj and @opaque to it, until @fn returns + * non-zero. + * + * Returns: The last value returned by @fn, or 0 if there is no child. + */ +int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), + void *opaque); + +/** * container_get: * @root: root of the #path, e.g., object_get_root() * @path: path to the container diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h new file mode 100644 index 0000000000..c6ac281141 --- /dev/null +++ b/include/qemu/ratelimit.h @@ -0,0 +1,48 @@ +/* + * Ratelimiting calculations + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_RATELIMIT_H +#define QEMU_RATELIMIT_H 1 + +typedef struct { + int64_t next_slice_time; + uint64_t slice_quota; + uint64_t slice_ns; + uint64_t dispatched; +} RateLimit; + +static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) +{ + int64_t now = qemu_get_clock_ns(rt_clock); + + if (limit->next_slice_time < now) { + limit->next_slice_time = now + limit->slice_ns; + limit->dispatched = 0; + } + if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) { + limit->dispatched += n; + return 0; + } else { + limit->dispatched = n; + return limit->next_slice_time - now; + } +} + +static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed, + uint64_t slice_ns) +{ + limit->slice_ns = slice_ns; + limit->slice_quota = ((double)speed * 1000000000ULL) / slice_ns; +} + +#endif @@ -22,6 +22,8 @@ #include "qemu-common.h" #include "qemu-barrier.h" +#include "qemu-option.h" +#include "qemu-config.h" #include "sysemu.h" #include "hw/hw.h" #include "hw/msi.h" diff --git a/libcacard/Makefile b/libcacard/Makefile index c6a896a739..63990b7003 100644 --- a/libcacard/Makefile +++ b/libcacard/Makefile @@ -2,29 +2,23 @@ -include $(SRC_PATH)/Makefile.objs -include $(SRC_PATH)/rules.mak -libcacard_srcpath=$(SRC_PATH)/libcacard libcacard_includedir=$(includedir)/cacard -$(call set-vpath, $(SRC_PATH):$(libcacard_srcpath)) - -# objects linked against normal qemu binaries, not compiled with libtool -QEMU_OBJS=$(addprefix ../,$(oslib-obj-y) qemu-timer-common.o $(trace-obj-y)) +$(call set-vpath, $(SRC_PATH)) # objects linked into a shared library, built with libtool with -fPIC if required -QEMU_OBJS_LIB=$(addsuffix .lo,$(basename $(QEMU_OBJS))) +QEMU_OBJS=$(oslib-obj-y) qemu-timer-common.o $(trace-obj-y) +QEMU_OBJS_LIB=$(patsubst %.o,%.lo,$(QEMU_OBJS)) QEMU_CFLAGS+=-I../ -libcacard.lib-y=$(addsuffix .lo,$(basename $(libcacard-y))) - -vscclient: $(libcacard-y) $(QEMU_OBJS) vscclient.o - $(call quiet-command,$(CC) -o $@ $^ $(libcacard_libs) $(LIBS)," LINK $@") +libcacard.lib-y=$(patsubst %.o,%.lo,$(libcacard-y)) clean: - rm -f *.o */*.o *.d */*.d *.a */*.a *~ */*~ vscclient *.lo .libs/* *.la *.pc - rm -Rf .libs + rm -f *.o */*.o *.d */*.d *.a */*.a *~ */*~ vscclient *.lo */*.lo .libs/* */.libs/* *.la */*.la *.pc + rm -Rf .libs */.libs -all: vscclient +all: libcacard.la libcacard.pc # Dummy command so that make thinks it has done something @true @@ -41,12 +35,14 @@ else libcacard.la: $(libcacard.lib-y) $(QEMU_OBJS_LIB) $(call quiet-command,$(LIBTOOL) --mode=link --quiet --tag=CC $(CC) -rpath $(libdir) -o $@ $^ $(libcacard_libs)," lt LINK $@") +libcacard_srcpath=$(SRC_PATH)/libcacard libcacard.pc: $(libcacard_srcpath)/libcacard.pc.in - sed -e 's|@LIBDIR@|$(libdir)|' \ + $(call quiet-command,sed -e 's|@LIBDIR@|$(libdir)|' \ -e 's|@INCLUDEDIR@|$(libcacard_includedir)|' \ -e 's|@VERSION@|$(shell cat $(SRC_PATH)/VERSION)|' \ -e 's|@PREFIX@|$(prefix)|' \ - < $(libcacard_srcpath)/libcacard.pc.in > libcacard.pc + < $(libcacard_srcpath)/libcacard.pc.in > libcacard.pc,\ + " GEN $@") .PHONY: install-libcacard diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index 96076676e2..bdcbe0f8dd 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -52,4 +52,9 @@ struct kvm_sync_regs { __u32 acrs[16]; /* access registers */ __u64 crs[16]; /* control registers */ }; + +#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) +#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2) +#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3) +#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4) #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index c4426ec73d..5a9d4e350d 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -616,6 +616,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_KVMCLOCK_CTRL 76 #define KVM_CAP_SIGNAL_MSI 77 #define KVM_CAP_PPC_GET_SMMU_INFO 78 +#define KVM_CAP_S390_COW 79 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/linux-user/Makefile.objs b/linux-user/Makefile.objs new file mode 100644 index 0000000000..5899d72d3e --- /dev/null +++ b/linux-user/Makefile.objs @@ -0,0 +1,7 @@ +obj-y = main.o syscall.o strace.o mmap.o signal.o \ + elfload.o linuxload.o uaccess.o cpu-uname.o + +obj-$(TARGET_HAS_BFLT) += flatload.o +obj-$(TARGET_I386) += vm86.o +obj-$(TARGET_ARM) += arm/nwfpe/ +obj-$(TARGET_M68K) += m68k-sim.o diff --git a/linux-user/arm/nwfpe/Makefile.objs b/linux-user/arm/nwfpe/Makefile.objs new file mode 100644 index 0000000000..51b0c32c2a --- /dev/null +++ b/linux-user/arm/nwfpe/Makefile.objs @@ -0,0 +1,2 @@ +obj-y = fpa11.o fpa11_cpdo.o fpa11_cpdt.o fpa11_cprt.o fpopcode.o +obj-y += single_cpdo.o double_cpdo.o extended_cpdo.o diff --git a/linux-user/cpu-uname.c b/linux-user/cpu-uname.c index ddc37be4f9..59cd6477d5 100644 --- a/linux-user/cpu-uname.c +++ b/linux-user/cpu-uname.c @@ -35,10 +35,7 @@ const char *cpu_to_uname_machine(void *cpu_env) * armv7l; to get a list of CPU arch names from the linux source, use: * grep arch_name: -A1 linux/arch/arm/mm/proc-*.S * see arch/arm/kernel/setup.c: setup_processor() - * - * to test by CPU id, compare cpu_env->cp15.c0_cpuid to ARM_CPUID_* - * defines and to test by CPU feature, use arm_feature(cpu_env, - * ARM_FEATURE_*) */ + */ /* in theory, endianness is configurable on some ARM CPUs, but this isn't * used in user mode emulation */ diff --git a/linux-user/main.c b/linux-user/main.c index 49108b81d3..d0e0e4fc6a 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -1306,8 +1306,9 @@ do { \ fprintf(stderr, fmt , ## __VA_ARGS__); \ cpu_dump_state(env, stderr, fprintf, 0); \ qemu_log(fmt, ## __VA_ARGS__); \ - if (logfile) \ + if (qemu_log_enabled()) { \ log_cpu_state(env, 0); \ + } \ } while (0) static int do_store_exclusive(CPUPPCState *env) diff --git a/linux-user/signal.c b/linux-user/signal.c index b1e139d6fd..43346dcbcc 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -4378,8 +4378,7 @@ static void setup_frame(int sig, struct target_sigaction *ka, sigsegv: unlock_user_struct(frame, frame_addr, 1); - if (logfile) - fprintf (logfile, "segfaulting from setup_frame\n"); + qemu_log("segfaulting from setup_frame\n"); force_sig(TARGET_SIGSEGV); } @@ -4447,8 +4446,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, sigsegv: unlock_user_struct(rt_sf, rt_sf_addr, 1); - if (logfile) - fprintf (logfile, "segfaulting from setup_rt_frame\n"); + qemu_log("segfaulting from setup_rt_frame\n"); force_sig(TARGET_SIGSEGV); } @@ -4489,8 +4487,7 @@ long do_sigreturn(CPUPPCState *env) sigsegv: unlock_user_struct(sr, sr_addr, 1); unlock_user_struct(sc, sc_addr, 1); - if (logfile) - fprintf (logfile, "segfaulting from do_sigreturn\n"); + qemu_log("segfaulting from do_sigreturn\n"); force_sig(TARGET_SIGSEGV); return 0; } @@ -4552,8 +4549,7 @@ long do_rt_sigreturn(CPUPPCState *env) sigsegv: unlock_user_struct(rt_sf, rt_sf_addr, 1); - if (logfile) - fprintf (logfile, "segfaulting from do_rt_sigreturn\n"); + qemu_log("segfaulting from do_rt_sigreturn\n"); force_sig(TARGET_SIGSEGV); return 0; } diff --git a/memory_mapping-stub.c b/memory_mapping-stub.c new file mode 100644 index 0000000000..76be34d89f --- /dev/null +++ b/memory_mapping-stub.c @@ -0,0 +1,33 @@ +/* + * QEMU memory mapping + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang <wency@cn.fujitsu.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "cpu.h" +#include "cpu-all.h" +#include "memory_mapping.h" + +int qemu_get_guest_memory_mapping(MemoryMappingList *list) +{ + return -2; +} + +int cpu_get_memory_mapping(MemoryMappingList *list, + CPUArchState *env) +{ + return -1; +} + +bool cpu_paging_enabled(CPUArchState *env) +{ + return true; +} + diff --git a/memory_mapping.c b/memory_mapping.c index 8810bb09e3..6f5a2e3f71 100644 --- a/memory_mapping.c +++ b/memory_mapping.c @@ -6,8 +6,8 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ @@ -165,8 +165,6 @@ void memory_mapping_list_init(MemoryMappingList *list) QTAILQ_INIT(&list->head); } -#if defined(CONFIG_HAVE_GET_MEMORY_MAPPING) - static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) { CPUArchState *env; @@ -210,7 +208,6 @@ int qemu_get_guest_memory_mapping(MemoryMappingList *list) return 0; } -#endif void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list) { diff --git a/memory_mapping.h b/memory_mapping.h index a1aa64f4ca..ef72b0abad 100644 --- a/memory_mapping.h +++ b/memory_mapping.h @@ -6,8 +6,8 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ @@ -16,7 +16,6 @@ #include "qemu-queue.h" -#ifndef CONFIG_USER_ONLY /* The physical and virtual address in the memory mapping are contiguous. */ typedef struct MemoryMapping { target_phys_addr_t phys_addr; @@ -31,6 +30,9 @@ typedef struct MemoryMappingList { QTAILQ_HEAD(, MemoryMapping) head; } MemoryMappingList; +int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env); +bool cpu_paging_enabled(CPUArchState *env); + /* * add or merge the memory region [phys_addr, phys_addr + length) into the * memory mapping's list. The region's virtual address starts with virt_addr, @@ -51,14 +53,7 @@ void memory_mapping_list_init(MemoryMappingList *list); * -1: failed * -2: unsupported */ -#if defined(CONFIG_HAVE_GET_MEMORY_MAPPING) int qemu_get_guest_memory_mapping(MemoryMappingList *list); -#else -static inline int qemu_get_guest_memory_mapping(MemoryMappingList *list) -{ - return -2; -} -#endif /* get guest's memory mapping without do paging(virtual address is 0). */ void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list); @@ -66,9 +61,4 @@ void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list); void memory_mapping_filter(MemoryMappingList *list, int64_t begin, int64_t length); -#else - -/* We use MemoryMappingList* in cpu-all.h */ -typedef struct MemoryMappingList MemoryMappingList; -#endif #endif diff --git a/migration.c b/migration.c index 3f485d33a5..8db1b433f0 100644 --- a/migration.c +++ b/migration.c @@ -131,6 +131,8 @@ MigrationInfo *qmp_query_migrate(Error **errp) info->ram->transferred = ram_bytes_transferred(); info->ram->remaining = ram_bytes_remaining(); info->ram->total = ram_bytes_total(); + info->ram->total_time = qemu_get_clock_ms(rt_clock) + - s->total_time; if (blk_mig_active()) { info->has_disk = true; @@ -143,6 +145,13 @@ MigrationInfo *qmp_query_migrate(Error **errp) case MIG_STATE_COMPLETED: info->has_status = true; info->status = g_strdup("completed"); + + info->has_ram = true; + info->ram = g_malloc0(sizeof(*info->ram)); + info->ram->transferred = ram_bytes_transferred(); + info->ram->remaining = 0; + info->ram->total = ram_bytes_total(); + info->ram->total_time = s->total_time; break; case MIG_STATE_ERROR: info->has_status = true; @@ -260,6 +269,7 @@ static void migrate_fd_put_ready(void *opaque) } else { migrate_fd_completed(s); } + s->total_time = qemu_get_clock_ms(rt_clock) - s->total_time; if (s->state != MIG_STATE_COMPLETED) { if (old_vm_running) { vm_start(); @@ -352,7 +362,7 @@ void migrate_fd_connect(MigrationState *s) migrate_fd_close); DPRINTF("beginning savevm\n"); - ret = qemu_savevm_state_begin(s->file, s->blk, s->shared); + ret = qemu_savevm_state_begin(s->file, &s->params); if (ret < 0) { DPRINTF("failed, %d\n", ret); migrate_fd_error(s); @@ -361,18 +371,18 @@ void migrate_fd_connect(MigrationState *s) migrate_fd_put_ready(s); } -static MigrationState *migrate_init(int blk, int inc) +static MigrationState *migrate_init(const MigrationParams *params) { MigrationState *s = migrate_get_current(); int64_t bandwidth_limit = s->bandwidth_limit; memset(s, 0, sizeof(*s)); s->bandwidth_limit = bandwidth_limit; - s->blk = blk; - s->shared = inc; + s->params = *params; s->bandwidth_limit = bandwidth_limit; s->state = MIG_STATE_SETUP; + s->total_time = qemu_get_clock_ms(rt_clock); return s; } @@ -394,9 +404,13 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, Error **errp) { MigrationState *s = migrate_get_current(); + MigrationParams params; const char *p; int ret; + params.blk = blk; + params.shared = inc; + if (s->state == MIG_STATE_ACTIVE) { error_set(errp, QERR_MIGRATION_ACTIVE); return; @@ -411,7 +425,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, return; } - s = migrate_init(blk, inc); + s = migrate_init(¶ms); if (strstart(uri, "tcp:", &p)) { ret = tcp_start_outgoing_migration(s, p, errp); diff --git a/migration.h b/migration.h index 2e9ca2edf2..de13004573 100644 --- a/migration.h +++ b/migration.h @@ -19,6 +19,11 @@ #include "notify.h" #include "error.h" +struct MigrationParams { + bool blk; + bool shared; +}; + typedef struct MigrationState MigrationState; struct MigrationState @@ -31,8 +36,8 @@ struct MigrationState int (*close)(MigrationState *s); int (*write)(MigrationState *s, const void *buff, size_t size); void *opaque; - int blk; - int shared; + MigrationParams params; + int64_t total_time; }; void process_incoming_migration(QEMUFile *f); @@ -66,6 +66,7 @@ #include "memory.h" #include "qmp-commands.h" #include "hmp.h" +#include "qemu-thread.h" /* for pic/irq_info */ #if defined(TARGET_SPARC) @@ -145,6 +146,19 @@ typedef struct MonitorControl { int command_mode; } MonitorControl; +/* + * To prevent flooding clients, events can be throttled. The + * throttling is calculated globally, rather than per-Monitor + * instance. + */ +typedef struct MonitorEventState { + MonitorEvent event; /* Event being tracked */ + int64_t rate; /* Period over which to throttle. 0 to disable */ + int64_t last; /* Time at which event was last emitted */ + QEMUTimer *timer; /* Timer for handling delayed events */ + QObject *data; /* Event pending delayed dispatch */ +} MonitorEventState; + struct Monitor { CharDriverState *chr; int mux_out; @@ -443,9 +457,145 @@ static const char *monitor_event_names[] = { [QEVENT_DEVICE_TRAY_MOVED] = "DEVICE_TRAY_MOVED", [QEVENT_SUSPEND] = "SUSPEND", [QEVENT_WAKEUP] = "WAKEUP", + [QEVENT_BALLOON_CHANGE] = "BALLOON_CHANGE", }; QEMU_BUILD_BUG_ON(ARRAY_SIZE(monitor_event_names) != QEVENT_MAX) +MonitorEventState monitor_event_state[QEVENT_MAX]; +QemuMutex monitor_event_state_lock; + +/* + * Emits the event to every monitor instance + */ +static void +monitor_protocol_event_emit(MonitorEvent event, + QObject *data) +{ + Monitor *mon; + + trace_monitor_protocol_event_emit(event, data); + QLIST_FOREACH(mon, &mon_list, entry) { + if (monitor_ctrl_mode(mon) && qmp_cmd_mode(mon)) { + monitor_json_emitter(mon, data); + } + } +} + + +/* + * Queue a new event for emission to Monitor instances, + * applying any rate limiting if required. + */ +static void +monitor_protocol_event_queue(MonitorEvent event, + QObject *data) +{ + MonitorEventState *evstate; + int64_t now = qemu_get_clock_ns(rt_clock); + assert(event < QEVENT_MAX); + + qemu_mutex_lock(&monitor_event_state_lock); + evstate = &(monitor_event_state[event]); + trace_monitor_protocol_event_queue(event, + data, + evstate->rate, + evstate->last, + now); + + /* Rate limit of 0 indicates no throttling */ + if (!evstate->rate) { + monitor_protocol_event_emit(event, data); + evstate->last = now; + } else { + int64_t delta = now - evstate->last; + if (evstate->data || + delta < evstate->rate) { + /* If there's an existing event pending, replace + * it with the new event, otherwise schedule a + * timer for delayed emission + */ + if (evstate->data) { + qobject_decref(evstate->data); + } else { + int64_t then = evstate->last + evstate->rate; + qemu_mod_timer_ns(evstate->timer, then); + } + evstate->data = data; + qobject_incref(evstate->data); + } else { + monitor_protocol_event_emit(event, data); + evstate->last = now; + } + } + qemu_mutex_unlock(&monitor_event_state_lock); +} + + +/* + * The callback invoked by QemuTimer when a delayed + * event is ready to be emitted + */ +static void monitor_protocol_event_handler(void *opaque) +{ + MonitorEventState *evstate = opaque; + int64_t now = qemu_get_clock_ns(rt_clock); + + qemu_mutex_lock(&monitor_event_state_lock); + + trace_monitor_protocol_event_handler(evstate->event, + evstate->data, + evstate->last, + now); + if (evstate->data) { + monitor_protocol_event_emit(evstate->event, evstate->data); + qobject_decref(evstate->data); + evstate->data = NULL; + } + evstate->last = now; + qemu_mutex_unlock(&monitor_event_state_lock); +} + + +/* + * @event: the event ID to be limited + * @rate: the rate limit in milliseconds + * + * Sets a rate limit on a particular event, so no + * more than 1 event will be emitted within @rate + * milliseconds + */ +static void +monitor_protocol_event_throttle(MonitorEvent event, + int64_t rate) +{ + MonitorEventState *evstate; + assert(event < QEVENT_MAX); + + evstate = &(monitor_event_state[event]); + + trace_monitor_protocol_event_throttle(event, rate); + evstate->event = event; + evstate->rate = rate * SCALE_MS; + evstate->timer = qemu_new_timer(rt_clock, + SCALE_MS, + monitor_protocol_event_handler, + evstate); + evstate->last = 0; + evstate->data = NULL; +} + + +/* Global, one-time initializer to configure the rate limiting + * and initialize state */ +static void monitor_protocol_event_init(void) +{ + qemu_mutex_init(&monitor_event_state_lock); + /* Limit RTC & BALLOON events to 1 per second */ + monitor_protocol_event_throttle(QEVENT_RTC_CHANGE, 1000); + monitor_protocol_event_throttle(QEVENT_BALLOON_CHANGE, 1000); + monitor_protocol_event_throttle(QEVENT_WATCHDOG, 1000); +} + /** * monitor_protocol_event(): Generate a Monitor event * @@ -455,7 +605,6 @@ void monitor_protocol_event(MonitorEvent event, QObject *data) { QDict *qmp; const char *event_name; - Monitor *mon; assert(event < QEVENT_MAX); @@ -470,11 +619,8 @@ void monitor_protocol_event(MonitorEvent event, QObject *data) qdict_put_obj(qmp, "data", data); } - QLIST_FOREACH(mon, &mon_list, entry) { - if (monitor_ctrl_mode(mon) && qmp_cmd_mode(mon)) { - monitor_json_emitter(mon, QOBJECT(qmp)); - } - } + trace_monitor_protocol_event(event, event_name, qmp); + monitor_protocol_event_queue(event, QOBJECT(qmp)); QDECREF(qmp); } @@ -4570,6 +4716,7 @@ void monitor_init(CharDriverState *chr, int flags) if (is_first_init) { key_timer = qemu_new_timer_ns(vm_clock, release_keys, NULL); + monitor_protocol_event_init(); is_first_init = 0; } @@ -41,6 +41,7 @@ typedef enum MonitorEvent { QEVENT_DEVICE_TRAY_MOVED, QEVENT_SUSPEND, QEVENT_WAKEUP, + QEVENT_BALLOON_CHANGE, /* Add to 'monitor_event_names' array in monitor.c when * defining new events here */ diff --git a/net/Makefile.objs b/net/Makefile.objs new file mode 100644 index 0000000000..72f50bc903 --- /dev/null +++ b/net/Makefile.objs @@ -0,0 +1,12 @@ +common-obj-y = queue.o checksum.o util.o +common-obj-y += socket.o +common-obj-y += dump.o +common-obj-$(CONFIG_POSIX) += tap.o +common-obj-$(CONFIG_LINUX) += tap-linux.o +common-obj-$(CONFIG_WIN32) += tap-win32.o +common-obj-$(CONFIG_BSD) += tap-bsd.o +common-obj-$(CONFIG_SOLARIS) += tap-solaris.o +common-obj-$(CONFIG_AIX) += tap-aix.o +common-obj-$(CONFIG_HAIKU) += tap-haiku.o +common-obj-$(CONFIG_SLIRP) += slirp.o +common-obj-$(CONFIG_VDE) += vde.o diff --git a/net/slirp.c b/net/slirp.c index 37b6ccfde9..b82eab0a07 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -26,6 +26,7 @@ #include "config-host.h" #ifndef _WIN32 +#include <pwd.h> #include <sys/wait.h> #endif #include "net.h" @@ -487,8 +488,27 @@ static int slirp_smb(SlirpState* s, const char *exported_dir, static int instance; char smb_conf[128]; char smb_cmdline[128]; + struct passwd *passwd; FILE *f; + passwd = getpwuid(geteuid()); + if (!passwd) { + error_report("failed to retrieve user name"); + return -1; + } + + if (access(CONFIG_SMBD_COMMAND, F_OK)) { + error_report("could not find '%s', please install it", + CONFIG_SMBD_COMMAND); + return -1; + } + + if (access(exported_dir, R_OK | X_OK)) { + error_report("error accessing shared directory '%s': %s", + exported_dir, strerror(errno)); + return -1; + } + snprintf(s->smb_dir, sizeof(s->smb_dir), "/tmp/qemu-smb.%ld-%d", (long)getpid(), instance++); if (mkdir(s->smb_dir, 0700) < 0) { @@ -517,14 +537,16 @@ static int slirp_smb(SlirpState* s, const char *exported_dir, "[qemu]\n" "path=%s\n" "read only=no\n" - "guest ok=yes\n", + "guest ok=yes\n" + "force user=%s\n", s->smb_dir, s->smb_dir, s->smb_dir, s->smb_dir, s->smb_dir, s->smb_dir, - exported_dir + exported_dir, + passwd->pw_name ); fclose(f); @@ -616,25 +638,35 @@ static int slirp_guestfwd(SlirpState *s, const char *config_str, fwd = g_malloc(sizeof(struct GuestFwd)); snprintf(buf, sizeof(buf), "guestfwd.tcp.%d", port); - fwd->hd = qemu_chr_new(buf, p, NULL); - if (!fwd->hd) { - error_report("could not open guest forwarding device '%s'", buf); - g_free(fwd); - return -1; - } - if (slirp_add_exec(s->slirp, 3, fwd->hd, &server, port) < 0) { - error_report("conflicting/invalid host:port in guest forwarding " - "rule '%s'", config_str); - g_free(fwd); - return -1; - } - fwd->server = server; - fwd->port = port; - fwd->slirp = s->slirp; + if ((strlen(p) > 4) && !strncmp(p, "cmd:", 4)) { + if (slirp_add_exec(s->slirp, 0, &p[4], &server, port) < 0) { + error_report("conflicting/invalid host:port in guest forwarding " + "rule '%s'", config_str); + g_free(fwd); + return -1; + } + } else { + fwd->hd = qemu_chr_new(buf, p, NULL); + if (!fwd->hd) { + error_report("could not open guest forwarding device '%s'", buf); + g_free(fwd); + return -1; + } - qemu_chr_add_handlers(fwd->hd, guestfwd_can_read, guestfwd_read, - NULL, fwd); + if (slirp_add_exec(s->slirp, 3, fwd->hd, &server, port) < 0) { + error_report("conflicting/invalid host:port in guest forwarding " + "rule '%s'", config_str); + g_free(fwd); + return -1; + } + fwd->server = server; + fwd->port = port; + fwd->slirp = s->slirp; + + qemu_chr_add_handlers(fwd->hd, guestfwd_can_read, guestfwd_read, + NULL, fwd); + } return 0; fail_syntax: diff --git a/net/tap-bsd.c b/net/tap-bsd.c index 4b6b3a41a0..937a94b11f 100644 --- a/net/tap-bsd.c +++ b/net/tap-bsd.c @@ -33,12 +33,6 @@ #include <net/if_tap.h> #endif -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) -#include <libutil.h> -#else -#include <util.h> -#endif - int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required) { int fd; @@ -34,6 +34,7 @@ #include <net/if.h> #include "net.h" +#include "monitor.h" #include "sysemu.h" #include "qemu-char.h" #include "qemu-common.h" diff --git a/os-win32.c b/os-win32.c index ad76370c7c..13892ba320 100644 --- a/os-win32.c +++ b/os-win32.c @@ -57,7 +57,13 @@ int setenv(const char *name, const char *value, int overwrite) static BOOL WINAPI qemu_ctrl_handler(DWORD type) { - exit(STATUS_CONTROL_C_EXIT); + qemu_system_shutdown_request(); + /* Windows 7 kills application when the function returns. + Sleep here to give QEMU a try for closing. + Sleep period is 10000ms because Windows kills the program + after 10 seconds anyway. */ + Sleep(10000); + return TRUE; } @@ -48,6 +48,8 @@ extern int madvise(caddr_t, size_t, int); #include "trace.h" #include "qemu_socket.h" +static const char *qemu_version = QEMU_VERSION; + int socket_set_cork(int fd, int v) { #if defined(SOL_TCP) && defined(TCP_CORK) @@ -242,3 +244,12 @@ ssize_t qemu_recv_full(int fd, void *buf, size_t count, int flags) return total; } +void qemu_set_version(const char *version) +{ + qemu_version = version; +} + +const char *qemu_get_version(void) +{ + return qemu_version; +} @@ -149,4 +149,7 @@ static inline void qemu_timersub(const struct timeval *val1, void qemu_set_cloexec(int fd); +void qemu_set_version(const char *); +const char *qemu_get_version(void); + #endif diff --git a/oslib-posix.c b/oslib-posix.c index b6a3c7fc55..6b7ba646c7 100644 --- a/oslib-posix.c +++ b/oslib-posix.c @@ -105,6 +105,8 @@ void *qemu_memalign(size_t alignment, size_t size) return ptr; } +/* conflicts with qemu_vmalloc in bsd-user/mmap.c */ +#if !defined(CONFIG_BSD_USER) /* alloc shared memory pages */ void *qemu_vmalloc(size_t size) { @@ -127,6 +129,7 @@ void *qemu_vmalloc(size_t size) trace_qemu_vmalloc(size, ptr); return ptr; } +#endif void qemu_vfree(void *ptr) { diff --git a/pc-bios/mpc8544ds.dtb b/pc-bios/mpc8544ds.dtb Binary files differdeleted file mode 100644 index c6d302153c..0000000000 --- a/pc-bios/mpc8544ds.dtb +++ /dev/null diff --git a/pc-bios/mpc8544ds.dts b/pc-bios/mpc8544ds.dts deleted file mode 100644 index 7eb31604fc..0000000000 --- a/pc-bios/mpc8544ds.dts +++ /dev/null @@ -1,119 +0,0 @@ -/* - * MPC8544 DS Device Tree Source - * - * Copyright 2007, 2008 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/dts-v1/; -/ { - model = "MPC8544DS"; - compatible = "MPC8544DS", "MPC85xxDS"; - #address-cells = <1>; - #size-cells = <1>; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - pci0 = &pci0; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - }; - - memory { - device_type = "memory"; - reg = <0x0 0x0>; // Filled by U-Boot - }; - - soc8544@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "simple-bus"; - - ranges = <0x0 0xe0000000 0x100000>; - reg = <0xe0000000 0x1000>; // CCSRBAR 1M - bus-frequency = <0>; // Filled out by uboot. - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - }; - - global-utilities@e0000 { //global utilities block - compatible = "fsl,mpc8544-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; - }; - - pci0: pci@e0008000 { - cell-index = <0>; - compatible = "fsl,mpc8540-pci"; - device_type = "pci"; - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - interrupt-map = < - - /* IDSEL 0x11 J17 Slot 1 */ - 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 - 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 - 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 - 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 - - /* IDSEL 0x12 J16 Slot 2 */ - - 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 - 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 - 0x9000 0x0 0x0 0x3 &mpic 0x2 0x1 - 0x9000 0x0 0x0 0x4 &mpic 0x1 0x1>; - - interrupt-parent = <&mpic>; - interrupts = <24 2>; - bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x0 0xe1000000 0x0 0x10000>; - clock-frequency = <66666666>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0xe0008000 0x1000>; - }; - - chosen { - linux,stdout-path = "/soc8544@e0000000/serial@4500"; - }; - - hypervisor { - }; -}; diff --git a/qapi-schema-guest.json b/qapi-schema-guest.json index d4055d262a..d955cf11fb 100644 --- a/qapi-schema-guest.json +++ b/qapi-schema-guest.json @@ -351,6 +351,26 @@ 'returns': 'int' } ## +# @guest-fstrim: +# +# Discard (or "trim") blocks which are not in use by the filesystem. +# +# @minimum: +# Minimum contiguous free range to discard, in bytes. Free ranges +# smaller than this may be ignored (this is a hint and the guest +# may not respect it). By increasing this value, the fstrim +# operation will complete more quickly for filesystems with badly +# fragmented free space, although not all blocks will be discarded. +# The default value is zero, meaning "discard every free block". +# +# Returns: Nothing. +# +# Since: 1.2 +## +{ 'command': 'guest-fstrim', + 'data': { '*minimum': 'int' } } + +## # @guest-suspend-disk # # Suspend guest to disk. diff --git a/qapi-schema.json b/qapi-schema.json index 3b6e3468b4..1ab5dbd5ff 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -260,10 +260,15 @@ # # @total: total amount of bytes involved in the migration process # +# @total_time: tota0l amount of ms since migration started. If +# migration has ended, it returns the total migration +# time. (since 1.2) +# # Since: 0.14.0. ## { 'type': 'MigrationStats', - 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' } } + 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , + 'total_time': 'int' } } ## # @MigrationInfo @@ -275,8 +280,9 @@ # 'cancelled'. If this field is not returned, no migration process # has been initiated # -# @ram: #optional @MigrationStats containing detailed migration status, -# only returned if status is 'active' +# @ram: #optional @MigrationStats containing detailed migration +# status, only returned if status is 'active' or +# 'completed'. 'comppleted' (since 1.2) # # @disk: #optional @MigrationStats containing detailed disk migration # status, only returned if status is 'active' and it is a block diff --git a/qapi/Makefile.objs b/qapi/Makefile.objs new file mode 100644 index 0000000000..d0b0c16b90 --- /dev/null +++ b/qapi/Makefile.objs @@ -0,0 +1,3 @@ +qapi-obj-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o +qapi-obj-y += qmp-output-visitor.o qmp-registry.o qmp-dispatch.o +qapi-obj-y += string-input-visitor.o string-output-visitor.o diff --git a/qapi/qapi-types-core.h b/qapi/qapi-types-core.h index 27e6be0a87..f781fc3ab7 100644 --- a/qapi/qapi-types-core.h +++ b/qapi/qapi-types-core.h @@ -16,8 +16,6 @@ #include "qemu-common.h" #include "error.h" - -/* FIXME this is temporary until we remove middle mode */ -#include "monitor.h" +#include "qerror.h" #endif diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c index a4e088c9fc..ffffbf79aa 100644 --- a/qapi/qapi-visit-core.c +++ b/qapi/qapi-visit-core.c @@ -97,6 +97,145 @@ void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp) } } +void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_uint8) { + v->type_uint8(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT8_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint8_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_uint16) { + v->type_uint16(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT16_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint16_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_uint32) { + v->type_uint32(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT32_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint32_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_uint64) { + v->type_uint64(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + *obj = value; + } + } +} + +void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_int8) { + v->type_int8(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT8_MIN || value > INT8_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int8_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_int16) { + v->type_int16(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT16_MIN || value > INT16_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int16_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp) +{ + int64_t value; + if (!error_is_set(errp)) { + if (v->type_int32) { + v->type_int32(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT32_MIN || value > INT32_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int32_t"); + return; + } + *obj = value; + } + } +} + +void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp) +{ + if (!error_is_set(errp)) { + if (v->type_int64) { + v->type_int64(v, obj, name, errp); + } else { + v->type_int(v, obj, name, errp); + } + } +} + void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp) { if (!error_is_set(errp)) { diff --git a/qapi/qapi-visit-core.h b/qapi/qapi-visit-core.h index e850746b75..a19d70c104 100644 --- a/qapi/qapi-visit-core.h +++ b/qapi/qapi-visit-core.h @@ -52,6 +52,14 @@ struct Visitor void (*start_handle)(Visitor *v, void **obj, const char *kind, const char *name, Error **errp); void (*end_handle)(Visitor *v, Error **errp); + void (*type_uint8)(Visitor *v, uint8_t *obj, const char *name, Error **errp); + void (*type_uint16)(Visitor *v, uint16_t *obj, const char *name, Error **errp); + void (*type_uint32)(Visitor *v, uint32_t *obj, const char *name, Error **errp); + void (*type_uint64)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + void (*type_int8)(Visitor *v, int8_t *obj, const char *name, Error **errp); + void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp); + void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp); + void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp); }; void visit_start_handle(Visitor *v, void **obj, const char *kind, @@ -69,6 +77,14 @@ void visit_end_optional(Visitor *v, Error **errp); void visit_type_enum(Visitor *v, int *obj, const char *strings[], const char *kind, const char *name, Error **errp); void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp); +void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp); +void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp); +void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp); +void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp); +void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp); +void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp); +void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp); +void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp); void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp); void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp); void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp); diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c index 92b0305212..34e525eadd 100644 --- a/qapi/string-output-visitor.c +++ b/qapi/string-output-visitor.c @@ -52,7 +52,7 @@ static void print_type_number(Visitor *v, double *obj, const char *name, Error **errp) { StringOutputVisitor *sov = DO_UPCAST(StringOutputVisitor, visitor, v); - string_output_set(sov, g_strdup_printf("%g", *obj)); + string_output_set(sov, g_strdup_printf("%f", *obj)); } char *string_output_get_string(StringOutputVisitor *sov) diff --git a/qemu-char.c b/qemu-char.c index 0bd903f58c..c2aaaeeb8f 100644 --- a/qemu-char.c +++ b/qemu-char.c @@ -56,19 +56,19 @@ #include <sys/select.h> #ifdef CONFIG_BSD #include <sys/stat.h> -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include <libutil.h> -#include <dev/ppbus/ppi.h> -#include <dev/ppbus/ppbconf.h> #if defined(__GLIBC__) #include <pty.h> +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) +#include <libutil.h> +#else +#include <util.h> #endif +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <dev/ppbus/ppi.h> +#include <dev/ppbus/ppbconf.h> #elif defined(__DragonFly__) -#include <libutil.h> #include <dev/misc/ppi/ppi.h> #include <bus/ppbus/ppbconf.h> -#else -#include <util.h> #endif #else #ifdef __linux__ diff --git a/qemu-common.h b/qemu-common.h index 71395771db..09676f529f 100644 --- a/qemu-common.h +++ b/qemu-common.h @@ -17,6 +17,7 @@ typedef struct DeviceState DeviceState; struct Monitor; typedef struct Monitor Monitor; +typedef struct MigrationParams MigrationParams; /* we put basic includes here to avoid repeating them in device drivers */ #include <stdlib.h> @@ -236,6 +237,7 @@ typedef struct VLANState VLANState; typedef struct VLANClientState VLANClientState; typedef struct i2c_bus i2c_bus; typedef struct ISABus ISABus; +typedef struct ISADevice ISADevice; typedef struct SMBusDevice SMBusDevice; typedef struct PCIHostState PCIHostState; typedef struct PCIExpressHost PCIExpressHost; @@ -271,6 +273,13 @@ typedef enum LostTickPolicy { LOST_TICK_MAX } LostTickPolicy; +typedef struct PCIHostDeviceAddress { + unsigned int domain; + unsigned int bus; + unsigned int slot; + unsigned int function; +} PCIHostDeviceAddress; + void tcg_exec_init(unsigned long tb_size); bool tcg_enabled(void); diff --git a/qemu-config.c b/qemu-config.c index bb3bff426a..5c3296b8c6 100644 --- a/qemu-config.c +++ b/qemu-config.c @@ -583,6 +583,18 @@ static QemuOptsList qemu_machine_opts = { .name = "dtb", .type = QEMU_OPT_STRING, .help = "Linux kernel device tree file", + }, { + .name = "dumpdtb", + .type = QEMU_OPT_STRING, + .help = "Dump current dtb to a file and quit", + }, { + .name = "phandle_start", + .type = QEMU_OPT_STRING, + .help = "The first phandle ID we may generate dynamically", + }, { + .name = "dt_compatible", + .type = QEMU_OPT_STRING, + .help = "Overrides the \"compatible\" property of the dt root node", }, { /* End of list */ } }, diff --git a/qemu-config.h b/qemu-config.h index e9f2ef4c7b..12ddf3ed97 100644 --- a/qemu-config.h +++ b/qemu-config.h @@ -19,7 +19,7 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname); int qemu_read_config_file(const char *filename); -/* Read default Qemu config files +/* Read default QEMU config files */ int qemu_read_default_config_files(bool userconfig); diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index 49dce7c928..39419a0314 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -10,9 +10,9 @@ STEXI ETEXI DEF("check", img_check, - "check [-f fmt] filename") + "check [-f fmt] [-r [leaks | all]] filename") STEXI -@item check [-f @var{fmt}] @var{filename} +@item check [-f @var{fmt}] [-r [leaks | all]] @var{filename} ETEXI DEF("create", img_create, diff --git a/qemu-img.c b/qemu-img.c index c8a70ffc93..80cfb9b167 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -85,6 +85,12 @@ static void help(void) " '-S' indicates the consecutive number of bytes that must contain only zeros\n" " for qemu-img to create a sparse image during conversion\n" "\n" + "Parameters to check subcommand:\n" + " '-r' tries to repair any inconsistencies that are found during the check.\n" + " '-r leaks' repairs only cluster leaks, whereas '-r all' fixes all\n" + " kinds of errors, with a higher risk of choosing the wrong fix or\n" + " hiding corruption that has already occured.\n" + "\n" "Parameters to snapshot subcommand:\n" " 'snapshot' is the name of the snapshot to create, apply or delete\n" " '-a' applies a snapshot (revert disk to saved state)\n" @@ -372,10 +378,12 @@ static int img_check(int argc, char **argv) const char *filename, *fmt; BlockDriverState *bs; BdrvCheckResult result; + int fix = 0; + int flags = BDRV_O_FLAGS; fmt = NULL; for(;;) { - c = getopt(argc, argv, "f:h"); + c = getopt(argc, argv, "f:hr:"); if (c == -1) { break; } @@ -387,6 +395,17 @@ static int img_check(int argc, char **argv) case 'f': fmt = optarg; break; + case 'r': + flags |= BDRV_O_RDWR; + + if (!strcmp(optarg, "leaks")) { + fix = BDRV_FIX_LEAKS; + } else if (!strcmp(optarg, "all")) { + fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS; + } else { + help(); + } + break; } } if (optind >= argc) { @@ -394,11 +413,11 @@ static int img_check(int argc, char **argv) } filename = argv[optind++]; - bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS); + bs = bdrv_new_open(filename, fmt, flags); if (!bs) { return 1; } - ret = bdrv_check(bs, &result); + ret = bdrv_check(bs, &result, fix); if (ret == -ENOTSUP) { error_report("This image format does not support checks"); @@ -406,6 +425,16 @@ static int img_check(int argc, char **argv) return 1; } + if (result.corruptions_fixed || result.leaks_fixed) { + printf("The following inconsistencies were found and repaired:\n\n" + " %d leaked clusters\n" + " %d corruptions\n\n" + "Double checking the fixed image now...\n", + result.leaks_fixed, + result.corruptions_fixed); + ret = bdrv_check(bs, &result, 0); + } + if (!(result.corruptions || result.leaks || result.check_errors)) { printf("No errors were found on the image.\n"); } else { @@ -1078,7 +1107,7 @@ static int img_info(int argc, char **argv) int c; const char *filename, *fmt; BlockDriverState *bs; - char fmt_name[128], size_buf[128], dsize_buf[128]; + char size_buf[128], dsize_buf[128]; uint64_t total_sectors; int64_t allocated_size; char backing_filename[1024]; @@ -1110,7 +1139,6 @@ static int img_info(int argc, char **argv) if (!bs) { return 1; } - bdrv_get_format(bs, fmt_name, sizeof(fmt_name)); bdrv_get_geometry(bs, &total_sectors); get_human_readable_size(size_buf, sizeof(size_buf), total_sectors * 512); allocated_size = bdrv_get_allocated_file_size(bs); @@ -1124,7 +1152,7 @@ static int img_info(int argc, char **argv) "file format: %s\n" "virtual size: %s (%" PRId64 " bytes)\n" "disk size: %s\n", - filename, fmt_name, size_buf, + filename, bdrv_get_format_name(bs), size_buf, (total_sectors * 512), dsize_buf); if (bdrv_is_encrypted(bs)) { diff --git a/qemu-img.texi b/qemu-img.texi index 6fc3c28e0d..77c6d0b6b0 100644 --- a/qemu-img.texi +++ b/qemu-img.texi @@ -70,10 +70,15 @@ lists all snapshots in the given image Command description: @table @option -@item check [-f @var{fmt}] @var{filename} +@item check [-f @var{fmt}] [-r [leaks | all]] @var{filename} Perform a consistency check on the disk image @var{filename}. +If @code{-r} is specified, qemu-img tries to repair any inconsistencies found +during the check. @code{-r leaks} repairs only cluster leaks, whereas +@code{-r all} fixes all kinds of errors, with a higher risk of choosing the +wrong fix or hiding corruption that has already occured. + Only the formats @code{qcow2}, @code{qed} and @code{vdi} support consistency checks. @@ -232,6 +237,29 @@ to grow. @end table +@item qed +Image format with support for backing files and compact image files (when your +filesystem or transport medium does not support holes). Good performance due +to less metadata than the more featureful qcow2 format, especially with +cache=writethrough or cache=directsync. Consider using qcow2 which will soon +have a similar optimization and is most actively developed. + +Supported options: +@table @code +@item backing_file +File name of a base image (see @option{create} subcommand). +@item backing_fmt +Image file format of backing file (optional). Useful if the format cannot be +autodetected because it has no header, like some vhd/vpc files. +@item cluster_size +Changes the cluster size (must be power-of-2 between 4K and 64K). Smaller +cluster sizes can improve the image file size whereas larger cluster sizes +generally provide better performance. +@item table_size +Changes the number of clusters per L1/L2 table (must be power-of-2 between 1 +and 16). There is normally no need to change this value but this option can be +used for performance benchmarking. +@end table @item qcow Old QEMU image format. Left for compatibility. diff --git a/qemu-log.c b/qemu-log.c new file mode 100644 index 0000000000..1ec70e7e83 --- /dev/null +++ b/qemu-log.c @@ -0,0 +1,170 @@ +/* + * Logging support + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu-common.h" +#include "qemu-log.h" + +#ifdef WIN32 +static const char *logfilename = "qemu.log"; +#else +static const char *logfilename = "/tmp/qemu.log"; +#endif +FILE *qemu_logfile; +int qemu_loglevel; +static int log_append = 0; + +void qemu_log(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + if (qemu_logfile) { + vfprintf(qemu_logfile, fmt, ap); + } + va_end(ap); +} + +void qemu_log_mask(int mask, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + if ((qemu_loglevel & mask) && qemu_logfile) { + vfprintf(qemu_logfile, fmt, ap); + } + va_end(ap); +} + +/* enable or disable low levels log */ +void cpu_set_log(int log_flags) +{ + qemu_loglevel = log_flags; + if (qemu_loglevel && !qemu_logfile) { + qemu_logfile = fopen(logfilename, log_append ? "a" : "w"); + if (!qemu_logfile) { + perror(logfilename); + _exit(1); + } +#if !defined(CONFIG_SOFTMMU) + /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ + { + static char logfile_buf[4096]; + setvbuf(qemu_logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); + } +#elif defined(_WIN32) + /* Win32 doesn't support line-buffering, so use unbuffered output. */ + setvbuf(qemu_logfile, NULL, _IONBF, 0); +#else + setvbuf(qemu_logfile, NULL, _IOLBF, 0); +#endif + log_append = 1; + } + if (!qemu_loglevel && qemu_logfile) { + fclose(qemu_logfile); + qemu_logfile = NULL; + } +} + +void cpu_set_log_filename(const char *filename) +{ + logfilename = strdup(filename); + if (qemu_logfile) { + fclose(qemu_logfile); + qemu_logfile = NULL; + } + cpu_set_log(qemu_loglevel); +} + +const CPULogItem cpu_log_items[] = { + { CPU_LOG_TB_OUT_ASM, "out_asm", + "show generated host assembly code for each compiled TB" }, + { CPU_LOG_TB_IN_ASM, "in_asm", + "show target assembly code for each compiled TB" }, + { CPU_LOG_TB_OP, "op", + "show micro ops for each compiled TB" }, + { CPU_LOG_TB_OP_OPT, "op_opt", + "show micro ops " +#ifdef TARGET_I386 + "before eflags optimization and " +#endif + "after liveness analysis" }, + { CPU_LOG_INT, "int", + "show interrupts/exceptions in short format" }, + { CPU_LOG_EXEC, "exec", + "show trace before each executed TB (lots of logs)" }, + { CPU_LOG_TB_CPU, "cpu", + "show CPU state before block translation" }, +#ifdef TARGET_I386 + { CPU_LOG_PCALL, "pcall", + "show protected mode far calls/returns/exceptions" }, + { CPU_LOG_RESET, "cpu_reset", + "show CPU state before CPU resets" }, +#endif +#ifdef DEBUG_IOPORT + { CPU_LOG_IOPORT, "ioport", + "show all i/o ports accesses" }, +#endif + { LOG_UNIMP, "unimp", + "log unimplemented functionality" }, + { 0, NULL, NULL }, +}; + +static int cmp1(const char *s1, int n, const char *s2) +{ + if (strlen(s2) != n) { + return 0; + } + return memcmp(s1, s2, n) == 0; +} + +/* takes a comma separated list of log masks. Return 0 if error. */ +int cpu_str_to_log_mask(const char *str) +{ + const CPULogItem *item; + int mask; + const char *p, *p1; + + p = str; + mask = 0; + for (;;) { + p1 = strchr(p, ','); + if (!p1) { + p1 = p + strlen(p); + } + if (cmp1(p,p1-p,"all")) { + for (item = cpu_log_items; item->mask != 0; item++) { + mask |= item->mask; + } + } else { + for (item = cpu_log_items; item->mask != 0; item++) { + if (cmp1(p, p1 - p, item->name)) { + goto found; + } + } + return 0; + } + found: + mask |= item->mask; + if (*p1 != ',') { + break; + } + p = p1 + 1; + } + return mask; +} diff --git a/qemu-log.h b/qemu-log.h index fccfb1100e..4cdc7c7a47 100644 --- a/qemu-log.h +++ b/qemu-log.h @@ -1,10 +1,14 @@ #ifndef QEMU_LOG_H #define QEMU_LOG_H -/* The deprecated global variables: */ -extern FILE *logfile; -extern int loglevel; +#include <stdarg.h> +#ifdef NEED_CPU_H +#include "disas.h" +#endif +/* Private global variables, don't use */ +extern FILE *qemu_logfile; +extern int qemu_loglevel; /* * The new API: @@ -15,79 +19,131 @@ extern int loglevel; /* Returns true if qemu_log() will really write somewhere */ -#define qemu_log_enabled() (logfile != NULL) +static inline bool qemu_log_enabled(void) +{ + return qemu_logfile != NULL; +} + +#define CPU_LOG_TB_OUT_ASM (1 << 0) +#define CPU_LOG_TB_IN_ASM (1 << 1) +#define CPU_LOG_TB_OP (1 << 2) +#define CPU_LOG_TB_OP_OPT (1 << 3) +#define CPU_LOG_INT (1 << 4) +#define CPU_LOG_EXEC (1 << 5) +#define CPU_LOG_PCALL (1 << 6) +#define CPU_LOG_IOPORT (1 << 7) +#define CPU_LOG_TB_CPU (1 << 8) +#define CPU_LOG_RESET (1 << 9) +#define LOG_UNIMP (1 << 10) /* Returns true if a bit is set in the current loglevel mask */ -#define qemu_loglevel_mask(b) ((loglevel & (b)) != 0) - +static inline bool qemu_loglevel_mask(int mask) +{ + return (qemu_loglevel & mask) != 0; +} /* Logging functions: */ /* main logging function */ -#define qemu_log(...) do { \ - if (logfile) \ - fprintf(logfile, ## __VA_ARGS__); \ - } while (0) +void GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...); /* vfprintf-like logging function */ -#define qemu_log_vprintf(fmt, va) do { \ - if (logfile) \ - vfprintf(logfile, fmt, va); \ - } while (0) +static inline void GCC_FMT_ATTR(1, 0) +qemu_log_vprintf(const char *fmt, va_list va) +{ + if (qemu_logfile) { + vfprintf(qemu_logfile, fmt, va); + } +} /* log only if a bit is set on the current loglevel mask */ -#define qemu_log_mask(b, ...) do { \ - if (loglevel & (b)) \ - fprintf(logfile, ## __VA_ARGS__); \ - } while (0) - - +void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...); /* Special cases: */ +#ifdef NEED_CPU_H /* cpu_dump_state() logging functions: */ -#define log_cpu_state(env, f) cpu_dump_state((env), logfile, fprintf, (f)); -#define log_cpu_state_mask(b, env, f) do { \ - if (loglevel & (b)) log_cpu_state((env), (f)); \ - } while (0) - -/* disas() and target_disas() to logfile: */ -#define log_target_disas(start, len, flags) \ - target_disas(logfile, (start), (len), (flags)) -#define log_disas(start, len) \ - disas(logfile, (start), (len)) - +static inline void log_cpu_state(CPUArchState *env1, int flags) +{ + if (qemu_log_enabled()) { + cpu_dump_state(env1, qemu_logfile, fprintf, flags); + } +} + +static inline void log_cpu_state_mask(int mask, CPUArchState *env1, int flags) +{ + if (qemu_loglevel & mask) { + log_cpu_state(env1, flags); + } +} + +/* disas() and target_disas() to qemu_logfile: */ +static inline void log_target_disas(target_ulong start, target_ulong len, + int flags) +{ + target_disas(qemu_logfile, start, len, flags); +} + +static inline void log_disas(void *code, unsigned long size) +{ + disas(qemu_logfile, code, size); +} + +#if defined(CONFIG_USER_ONLY) /* page_dump() output to the log file: */ -#define log_page_dump() page_dump(logfile) - +static inline void log_page_dump(void) +{ + page_dump(qemu_logfile); +} +#endif +#endif /* Maintenance: */ /* fflush() the log file */ -#define qemu_log_flush() fflush(logfile) +static inline void qemu_log_flush(void) +{ + fflush(qemu_logfile); +} /* Close the log file */ -#define qemu_log_close() do { \ - fclose(logfile); \ - logfile = NULL; \ - } while (0) +static inline void qemu_log_close(void) +{ + fclose(qemu_logfile); + qemu_logfile = NULL; +} /* Set up a new log file */ -#define qemu_log_set_file(f) do { \ - logfile = (f); \ - } while (0) +static inline void qemu_log_set_file(FILE *f) +{ + qemu_logfile = f; +} /* Set up a new log file, only if none is set */ -#define qemu_log_try_set_file(f) do { \ - if (!logfile) \ - logfile = (f); \ - } while (0) - +static inline void qemu_log_try_set_file(FILE *f) +{ + if (!qemu_logfile) { + qemu_logfile = f; + } +} + +/* define log items */ +typedef struct CPULogItem { + int mask; + const char *name; + const char *help; +} CPULogItem; + +extern const CPULogItem cpu_log_items[]; + +void cpu_set_log(int log_flags); +void cpu_set_log_filename(const char *filename); +int cpu_str_to_log_mask(const char *str); #endif diff --git a/qemu-options.hx b/qemu-options.hx index 8b662648ae..ecf7ca12d7 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1421,8 +1421,28 @@ Then when you use on the host @code{telnet localhost 5555}, you connect to the guest telnet server. @item guestfwd=[tcp]:@var{server}:@var{port}-@var{dev} +@item guestfwd=[tcp]:@var{server}:@var{port}-@var{cmd:command} Forward guest TCP connections to the IP address @var{server} on port @var{port} -to the character device @var{dev}. This option can be given multiple times. +to the character device @var{dev} or to a program executed by @var{cmd:command} +which gets spawned for each connection. This option can be given multiple times. + +You can either use a chardev directly and have that one used throughout Qemu's +lifetime, like in the following example: + +@example +# open 10.10.1.1:4321 on bootup, connect 10.0.2.100:1234 to it whenever +# the guest accesses it +qemu -net user,guestfwd=tcp:10.0.2.100:1234-tcp:10.10.1.1:4321 [...] +@end example + +Or you can execute a command on every TCP connection established by the guest, +so that Qemu behaves similar to an inetd process for that virtual server: + +@example +# call "netcat 10.10.1.1 4321" on every TCP connection to 10.0.2.100:1234 +# and connect the TCP stream to its stdin/stdout +qemu -net 'user,guestfwd=tcp:10.0.2.100:1234-cmd:netcat 10.10.1.1 4321' +@end example @end table diff --git a/qemu-tool.c b/qemu-tool.c index 07fc4f201a..318c5fcbca 100644 --- a/qemu-tool.c +++ b/qemu-tool.c @@ -24,8 +24,6 @@ #include <sys/time.h> -FILE *logfile; - struct QEMUBH { QEMUBHFunc *cb; diff --git a/qga/Makefile.objs b/qga/Makefile.objs new file mode 100644 index 0000000000..6a4d843436 --- /dev/null +++ b/qga/Makefile.objs @@ -0,0 +1,3 @@ +qga-obj-y = commands.o guest-agent-command-state.o +qga-obj-$(CONFIG_POSIX) += commands-posix.o channel-posix.o +qga-obj-$(CONFIG_WIN32) += commands-win32.o channel-win32.o service-win32.o diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 00d035da95..ce9042123c 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -38,9 +38,12 @@ extern char **environ; #include <sys/socket.h> #include <net/if.h> -#if defined(__linux__) && defined(FIFREEZE) +#ifdef FIFREEZE #define CONFIG_FSFREEZE #endif +#ifdef FITRIM +#define CONFIG_FSTRIM +#endif #endif void qmp_guest_shutdown(bool has_mode, const char *mode, Error **err) @@ -312,19 +315,18 @@ static void guest_file_init(void) /* linux-specific implementations. avoid this if at all possible. */ #if defined(__linux__) -#if defined(CONFIG_FSFREEZE) - -typedef struct GuestFsfreezeMount { +#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) +typedef struct FsMount { char *dirname; char *devtype; - QTAILQ_ENTRY(GuestFsfreezeMount) next; -} GuestFsfreezeMount; + QTAILQ_ENTRY(FsMount) next; +} FsMount; -typedef QTAILQ_HEAD(, GuestFsfreezeMount) GuestFsfreezeMountList; +typedef QTAILQ_HEAD(, FsMount) FsMountList; -static void guest_fsfreeze_free_mount_list(GuestFsfreezeMountList *mounts) +static void free_fs_mount_list(FsMountList *mounts) { - GuestFsfreezeMount *mount, *temp; + FsMount *mount, *temp; if (!mounts) { return; @@ -341,10 +343,10 @@ static void guest_fsfreeze_free_mount_list(GuestFsfreezeMountList *mounts) /* * Walk the mount table and build a list of local file systems */ -static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts) +static int build_fs_mount_list(FsMountList *mounts) { struct mntent *ment; - GuestFsfreezeMount *mount; + FsMount *mount; char const *mtab = "/proc/self/mounts"; FILE *fp; @@ -367,7 +369,7 @@ static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts) continue; } - mount = g_malloc0(sizeof(GuestFsfreezeMount)); + mount = g_malloc0(sizeof(FsMount)); mount->dirname = g_strdup(ment->mnt_dir); mount->devtype = g_strdup(ment->mnt_type); @@ -378,6 +380,9 @@ static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts) return 0; } +#endif + +#if defined(CONFIG_FSFREEZE) /* * Return status of freeze/thaw @@ -398,15 +403,15 @@ GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **err) int64_t qmp_guest_fsfreeze_freeze(Error **err) { int ret = 0, i = 0; - GuestFsfreezeMountList mounts; - struct GuestFsfreezeMount *mount; + FsMountList mounts; + struct FsMount *mount; int fd; char err_msg[512]; slog("guest-fsfreeze called"); QTAILQ_INIT(&mounts); - ret = guest_fsfreeze_build_mount_list(&mounts); + ret = build_fs_mount_list(&mounts); if (ret < 0) { return ret; } @@ -447,11 +452,11 @@ int64_t qmp_guest_fsfreeze_freeze(Error **err) close(fd); } - guest_fsfreeze_free_mount_list(&mounts); + free_fs_mount_list(&mounts); return i; error: - guest_fsfreeze_free_mount_list(&mounts); + free_fs_mount_list(&mounts); qmp_guest_fsfreeze_thaw(NULL); return 0; } @@ -462,12 +467,12 @@ error: int64_t qmp_guest_fsfreeze_thaw(Error **err) { int ret; - GuestFsfreezeMountList mounts; - GuestFsfreezeMount *mount; + FsMountList mounts; + FsMount *mount; int fd, i = 0, logged; QTAILQ_INIT(&mounts); - ret = guest_fsfreeze_build_mount_list(&mounts); + ret = build_fs_mount_list(&mounts); if (ret) { error_set(err, QERR_QGA_COMMAND_FAILED, "failed to enumerate filesystems"); @@ -507,7 +512,7 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err) } ga_unset_frozen(ga_state); - guest_fsfreeze_free_mount_list(&mounts); + free_fs_mount_list(&mounts); return i; } @@ -525,6 +530,65 @@ static void guest_fsfreeze_cleanup(void) } #endif /* CONFIG_FSFREEZE */ +#if defined(CONFIG_FSTRIM) +/* + * Walk list of mounted file systems in the guest, and trim them. + */ +void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err) +{ + int ret = 0; + FsMountList mounts; + struct FsMount *mount; + int fd; + char err_msg[512]; + struct fstrim_range r = { + .start = 0, + .len = -1, + .minlen = has_minimum ? minimum : 0, + }; + + slog("guest-fstrim called"); + + QTAILQ_INIT(&mounts); + ret = build_fs_mount_list(&mounts); + if (ret < 0) { + return; + } + + QTAILQ_FOREACH(mount, &mounts, next) { + fd = qemu_open(mount->dirname, O_RDONLY); + if (fd == -1) { + sprintf(err_msg, "failed to open %s, %s", mount->dirname, + strerror(errno)); + error_set(err, QERR_QGA_COMMAND_FAILED, err_msg); + goto error; + } + + /* We try to cull filesytems we know won't work in advance, but other + * filesytems may not implement fstrim for less obvious reasons. These + * will report EOPNOTSUPP; we simply ignore these errors. Any other + * error means an unexpected error, so return it in those cases. In + * some other cases ENOTTY will be reported (e.g. CD-ROMs). + */ + ret = ioctl(fd, FITRIM, &r); + if (ret == -1) { + if (errno != ENOTTY && errno != EOPNOTSUPP) { + sprintf(err_msg, "failed to trim %s, %s", + mount->dirname, strerror(errno)); + error_set(err, QERR_QGA_COMMAND_FAILED, err_msg); + close(fd); + goto error; + } + } + close(fd); + } + +error: + free_fs_mount_list(&mounts); +} +#endif /* CONFIG_FSTRIM */ + + #define LINUX_SYS_STATE_FILE "/sys/power/state" #define SUSPEND_SUPPORTED 0 #define SUSPEND_NOT_SUPPORTED 1 @@ -918,7 +982,15 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err) return 0; } +#endif /* CONFIG_FSFREEZE */ +#if !defined(CONFIG_FSTRIM) +void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err) +{ + error_set(err, QERR_UNSUPPORTED); + + return; +} #endif /* register init/cleanup routines for stateful command groups */ diff --git a/qga/commands-win32.c b/qga/commands-win32.c index eb8d1405d3..54bc5462e2 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -173,6 +173,17 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err) return 0; } +/* + * Walk list of mounted file systems in the guest, and discard unused + * areas. + */ +void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err) +{ + error_set(err, QERR_UNSUPPORTED); + + return; +} + typedef enum { GUEST_SUSPEND_MODE_DISK, GUEST_SUSPEND_MODE_RAM diff --git a/qom/Makefile b/qom/Makefile deleted file mode 100644 index 34c6de558a..0000000000 --- a/qom/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -qom-y = object.o container.o qom-qobject.o -qom-twice-y = cpu.o diff --git a/qom/Makefile.objs b/qom/Makefile.objs new file mode 100644 index 0000000000..5ef060a401 --- /dev/null +++ b/qom/Makefile.objs @@ -0,0 +1,4 @@ +qom-obj-y = object.o container.o qom-qobject.o +qom-obj-twice-y = cpu.o +common-obj-y = $(qom-obj-twice-y) +user-obj-y = $(qom-obj-twice-y) diff --git a/qom/object.c b/qom/object.c index 6f839ad8c9..00bb3b029c 100644 --- a/qom/object.c +++ b/qom/object.c @@ -45,6 +45,7 @@ struct TypeImpl size_t instance_size; void (*class_init)(ObjectClass *klass, void *data); + void (*class_base_init)(ObjectClass *klass, void *data); void (*class_finalize)(ObjectClass *klass, void *data); void *class_data; @@ -94,7 +95,7 @@ static TypeImpl *type_table_lookup(const char *name) return g_hash_table_lookup(type_table_get(), name); } -TypeImpl *type_register(const TypeInfo *info) +static TypeImpl *type_register_internal(const TypeInfo *info) { TypeImpl *ti = g_malloc0(sizeof(*ti)); @@ -112,6 +113,7 @@ TypeImpl *type_register(const TypeInfo *info) ti->instance_size = info->instance_size; ti->class_init = info->class_init; + ti->class_base_init = info->class_base_init; ti->class_finalize = info->class_finalize; ti->class_data = info->class_data; @@ -135,6 +137,12 @@ TypeImpl *type_register(const TypeInfo *info) return ti; } +TypeImpl *type_register(const TypeInfo *info) +{ + assert(info->parent); + return type_register_internal(info); +} + TypeImpl *type_register_static(const TypeInfo *info) { return type_register(info); @@ -202,13 +210,13 @@ static void type_class_interface_init(TypeImpl *ti, InterfaceImpl *iface) char *name = g_strdup_printf("<%s::%s>", ti->name, iface->parent); info.name = name; - iface->type = type_register(&info); + iface->type = type_register_internal(&info); g_free(name); } static void type_initialize(TypeImpl *ti) { - size_t class_size = sizeof(ObjectClass); + TypeImpl *parent; int i; if (ti->class) { @@ -219,22 +227,23 @@ static void type_initialize(TypeImpl *ti) ti->instance_size = type_object_get_size(ti); ti->class = g_malloc0(ti->class_size); - ti->class->type = ti; - - if (type_has_parent(ti)) { - TypeImpl *parent = type_get_parent(ti); + parent = type_get_parent(ti); + if (parent) { type_initialize(parent); - class_size = parent->class_size; g_assert(parent->class_size <= ti->class_size); - - memcpy((void *)ti->class + sizeof(ObjectClass), - (void *)parent->class + sizeof(ObjectClass), - parent->class_size - sizeof(ObjectClass)); + memcpy(ti->class, parent->class, parent->class_size); } - memset((void *)ti->class + class_size, 0, ti->class_size - class_size); + ti->class->type = ti; + + while (parent) { + if (parent->class_base_init) { + parent->class_base_init(ti->class, ti->class_data); + } + parent = type_get_parent(parent); + } for (i = 0; i < ti->num_interfaces; i++) { type_class_interface_init(ti, &ti->interfaces[i]); @@ -296,6 +305,16 @@ void object_initialize(void *data, const char *typename) object_initialize_with_type(data, type); } +static inline bool object_property_is_child(ObjectProperty *prop) +{ + return strstart(prop->type, "child<", NULL); +} + +static inline bool object_property_is_link(ObjectProperty *prop) +{ + return strstart(prop->type, "link<", NULL); +} + static void object_property_del_all(Object *obj) { while (!QTAILQ_EMPTY(&obj->properties)) { @@ -318,7 +337,7 @@ static void object_property_del_child(Object *obj, Object *child, Error **errp) ObjectProperty *prop; QTAILQ_FOREACH(prop, &obj->properties, node) { - if (strstart(prop->type, "child<", NULL) && prop->opaque == child) { + if (object_property_is_child(prop) && prop->opaque == child) { object_property_del(obj, prop->name, errp); break; } @@ -458,19 +477,6 @@ Object *object_dynamic_cast(Object *obj, const char *typename) } -static void register_types(void) -{ - static TypeInfo interface_info = { - .name = TYPE_INTERFACE, - .instance_size = sizeof(Interface), - .abstract = true, - }; - - type_interface = type_register_static(&interface_info); -} - -type_init(register_types) - Object *object_dynamic_cast_assert(Object *obj, const char *typename) { Object *inst; @@ -545,6 +551,19 @@ ObjectClass *object_class_by_name(const char *typename) return type->class; } +ObjectClass *object_class_get_parent(ObjectClass *class) +{ + TypeImpl *type = type_get_parent(class->type); + + if (!type) { + return NULL; + } + + type_initialize(type); + + return type->class; +} + typedef struct OCFData { void (*fn)(ObjectClass *klass, void *opaque); @@ -584,6 +603,23 @@ void object_class_foreach(void (*fn)(ObjectClass *klass, void *opaque), g_hash_table_foreach(type_table_get(), object_class_foreach_tramp, &data); } +int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), + void *opaque) +{ + ObjectProperty *prop; + int ret = 0; + + QTAILQ_FOREACH(prop, &obj->properties, node) { + if (object_property_is_child(prop)) { + ret = fn(prop->opaque, opaque); + if (ret != 0) { + break; + } + } + } + return ret; +} + static void object_class_get_list_tramp(ObjectClass *klass, void *opaque) { GSList **list = opaque; @@ -636,7 +672,8 @@ void object_property_add(Object *obj, const char *name, const char *type, QTAILQ_INSERT_TAIL(&obj->properties, prop, node); } -static ObjectProperty *object_property_find(Object *obj, const char *name) +ObjectProperty *object_property_find(Object *obj, const char *name, + Error **errp) { ObjectProperty *prop; @@ -646,16 +683,22 @@ static ObjectProperty *object_property_find(Object *obj, const char *name) } } + error_set(errp, QERR_PROPERTY_NOT_FOUND, "", name); return NULL; } void object_property_del(Object *obj, const char *name, Error **errp) { - ObjectProperty *prop = object_property_find(obj, name); + ObjectProperty *prop = object_property_find(obj, name, errp); + if (prop == NULL) { + return; + } - QTAILQ_REMOVE(&obj->properties, prop, node); + if (prop->release) { + prop->release(obj, name, prop->opaque); + } - prop->release(obj, prop->name, prop->opaque); + QTAILQ_REMOVE(&obj->properties, prop, node); g_free(prop->name); g_free(prop->type); @@ -665,10 +708,8 @@ void object_property_del(Object *obj, const char *name, Error **errp) void object_property_get(Object *obj, Visitor *v, const char *name, Error **errp) { - ObjectProperty *prop = object_property_find(obj, name); - + ObjectProperty *prop = object_property_find(obj, name, errp); if (prop == NULL) { - error_set(errp, QERR_PROPERTY_NOT_FOUND, "", name); return; } @@ -682,10 +723,8 @@ void object_property_get(Object *obj, Visitor *v, const char *name, void object_property_set(Object *obj, Visitor *v, const char *name, Error **errp) { - ObjectProperty *prop = object_property_find(obj, name); - + ObjectProperty *prop = object_property_find(obj, name, errp); if (prop == NULL) { - error_set(errp, QERR_PROPERTY_NOT_FOUND, "", name); return; } @@ -838,10 +877,8 @@ char *object_property_print(Object *obj, const char *name, const char *object_property_get_type(Object *obj, const char *name, Error **errp) { - ObjectProperty *prop = object_property_find(obj, name); - + ObjectProperty *prop = object_property_find(obj, name, errp); if (prop == NULL) { - error_set(errp, QERR_PROPERTY_NOT_FOUND, "", name); return NULL; } @@ -995,7 +1032,7 @@ gchar *object_get_canonical_path(Object *obj) g_assert(obj->parent != NULL); QTAILQ_FOREACH(prop, &obj->parent->properties, node) { - if (!strstart(prop->type, "child<", NULL)) { + if (!object_property_is_child(prop)) { continue; } @@ -1024,14 +1061,14 @@ gchar *object_get_canonical_path(Object *obj) Object *object_resolve_path_component(Object *parent, gchar *part) { - ObjectProperty *prop = object_property_find(parent, part); + ObjectProperty *prop = object_property_find(parent, part, NULL); if (prop == NULL) { return NULL; } - if (strstart(prop->type, "link<", NULL)) { + if (object_property_is_link(prop)) { return *(Object **)prop->opaque; - } else if (strstart(prop->type, "child<", NULL)) { + } else if (object_property_is_child(prop)) { return prop->opaque; } else { return NULL; @@ -1074,7 +1111,7 @@ static Object *object_resolve_partial_path(Object *parent, QTAILQ_FOREACH(prop, &parent->properties, node) { Object *found; - if (!strstart(prop->type, "child<", NULL)) { + if (!object_property_is_child(prop)) { continue; } @@ -1194,3 +1231,34 @@ void object_property_add_str(Object *obj, const char *name, property_release_str, prop, errp); } + +static char *qdev_get_type(Object *obj, Error **errp) +{ + return g_strdup(object_get_typename(obj)); +} + +static void object_instance_init(Object *obj) +{ + object_property_add_str(obj, "type", qdev_get_type, NULL, NULL); +} + +static void register_types(void) +{ + static TypeInfo interface_info = { + .name = TYPE_INTERFACE, + .instance_size = sizeof(Interface), + .abstract = true, + }; + + static TypeInfo object_info = { + .name = TYPE_OBJECT, + .instance_size = sizeof(Object), + .instance_init = object_instance_init, + .abstract = true, + }; + + type_interface = type_register_internal(&interface_info); + type_register_internal(&object_info); +} + +type_init(register_types) diff --git a/readline.c b/readline.c index a6c0039ad2..540cd8a025 100644 --- a/readline.c +++ b/readline.c @@ -337,6 +337,9 @@ static void readline_completion(ReadLineState *rs) } readline_show_prompt(rs); } + for (i = 0; i < rs->nb_completions; i++) { + g_free(rs->completions[i]); + } } /* return true if command handled */ @@ -73,3 +73,44 @@ TRACETOOL=$(PYTHON) $(SRC_PATH)/scripts/tracetool.py # will delete the target of a rule if commands exit with a nonzero exit status .DELETE_ON_ERROR: + +# magic to descend into other directories + +obj := . +old-nested-dirs := + +define push-var +$(eval save-$2-$1 = $(value $1)) +$(eval $1 :=) +endef + +define pop-var +$(eval subdir-$2-$1 := $(if $(filter $2,$(save-$2-$1)),$(addprefix $2,$($1)))) +$(eval $1 = $(value save-$2-$1) $$(subdir-$2-$1)) +$(eval save-$2-$1 :=) +endef + +define unnest-dir +$(foreach var,$(nested-vars),$(call push-var,$(var),$1/)) +$(eval obj := $(obj)/$1) +$(eval include $(SRC_PATH)/$1/Makefile.objs) +$(eval -include $(wildcard $1/*.d)) +$(eval obj := $(patsubst %/$1,%,$(obj))) +$(foreach var,$(nested-vars),$(call pop-var,$(var),$1/)) +endef + +define unnest-vars-1 +$(eval nested-dirs := $(filter-out \ + $(old-nested-dirs), \ + $(sort $(foreach var,$(nested-vars), $(filter %/, $($(var))))))) +$(if $(nested-dirs), + $(foreach dir,$(nested-dirs),$(call unnest-dir,$(patsubst %/,%,$(dir)))) + $(eval old-nested-dirs := $(old-nested-dirs) $(nested-dirs)) + $(call unnest-vars-1)) +endef + +define unnest-vars +$(call unnest-vars-1) +$(foreach var,$(nested-vars),$(eval $(var) := $(filter-out %/, $($(var))))) +$(shell mkdir -p $(sort $(foreach var,$(nested-vars),$(dir $($(var)))))) +endef @@ -85,6 +85,7 @@ #include "cpus.h" #include "memory.h" #include "qmp-commands.h" +#include "trace.h" #define SELF_ANNOUNCE_ROUNDS 5 @@ -400,7 +401,7 @@ static int block_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size) static int bdrv_fclose(void *opaque) { - return 0; + return bdrv_flush(opaque); } static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) @@ -1248,8 +1249,8 @@ int register_savevm_live(DeviceState *dev, se->is_ram = 1; } - if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { - char *id = dev->parent_bus->info->get_dev_path(dev); + if (dev) { + char *id = qdev_get_dev_path(dev); if (id) { pstrcpy(se->idstr, sizeof(se->idstr), id); pstrcat(se->idstr, sizeof(se->idstr), "/"); @@ -1292,8 +1293,8 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque) SaveStateEntry *se, *new_se; char id[256] = ""; - if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { - char *path = dev->parent_bus->info->get_dev_path(dev); + if (dev) { + char *path = qdev_get_dev_path(dev); if (path) { pstrcpy(id, sizeof(id), path); pstrcat(id, sizeof(id), "/"); @@ -1334,8 +1335,8 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id, se->alias_id = alias_id; se->no_migrate = vmsd->unmigratable; - if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { - char *id = dev->parent_bus->info->get_dev_path(dev); + if (dev) { + char *id = qdev_get_dev_path(dev); if (id) { pstrcpy(se->idstr, sizeof(se->idstr), id); pstrcat(se->idstr, sizeof(se->idstr), "/"); @@ -1561,7 +1562,8 @@ bool qemu_savevm_state_blocked(Error **errp) return false; } -int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared) +int qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params) { SaveStateEntry *se; int ret; @@ -1569,8 +1571,8 @@ int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared) QTAILQ_FOREACH(se, &savevm_handlers, entry) { if(se->set_params == NULL) { continue; - } - se->set_params(blk_enable, shared, se->opaque); + } + se->set_params(params, se->opaque); } qemu_put_be32(f, QEMU_VM_FILE_MAGIC); @@ -1624,11 +1626,17 @@ int qemu_savevm_state_iterate(QEMUFile *f) if (se->save_live_state == NULL) continue; + if (qemu_file_rate_limit(f)) { + return 0; + } + trace_savevm_section_start(); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_PART); qemu_put_be32(f, se->section_id); ret = se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque); + trace_savevm_section_end(se->section_id); + if (ret <= 0) { /* Do not proceed to the next vmstate before this one reported completion of the current stage. This serializes the migration @@ -1658,11 +1666,13 @@ int qemu_savevm_state_complete(QEMUFile *f) if (se->save_live_state == NULL) continue; + trace_savevm_section_start(); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_END); qemu_put_be32(f, se->section_id); ret = se->save_live_state(f, QEMU_VM_SECTION_END, se->opaque); + trace_savevm_section_end(se->section_id); if (ret < 0) { return ret; } @@ -1674,6 +1684,7 @@ int qemu_savevm_state_complete(QEMUFile *f) if (se->save_state == NULL && se->vmsd == NULL) continue; + trace_savevm_section_start(); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_FULL); qemu_put_be32(f, se->section_id); @@ -1687,6 +1698,7 @@ int qemu_savevm_state_complete(QEMUFile *f) qemu_put_be32(f, se->version_id); vmstate_save(f, se); + trace_savevm_section_end(se->section_id); } qemu_put_byte(f, QEMU_VM_EOF); @@ -1708,13 +1720,17 @@ void qemu_savevm_state_cancel(QEMUFile *f) static int qemu_savevm_state(QEMUFile *f) { int ret; + MigrationParams params = { + .blk = 0, + .shared = 0 + }; if (qemu_savevm_state_blocked(NULL)) { ret = -EINVAL; goto out; } - ret = qemu_savevm_state_begin(f, 0, 0); + ret = qemu_savevm_state_begin(f, ¶ms); if (ret < 0) goto out; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 8850a5f436..b98dc6cad1 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2849,6 +2849,11 @@ sub process { ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr); } } + +# QEMU specific tests + if ($rawline =~ /\b(?:Qemu|QEmu)\b/) { + WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr); + } } # If we have no input at all, then there is nothing to report on diff --git a/scripts/kvm/kvm_stat b/scripts/kvm/kvm_stat index 56d2bd7f21..e8d68f05ca 100755 --- a/scripts/kvm/kvm_stat +++ b/scripts/kvm/kvm_stat @@ -141,15 +141,39 @@ svm_exit_reasons = { 0x400: 'NPF', } +s390_exit_reasons = { + 0x000: 'UNKNOWN', + 0x001: 'EXCEPTION', + 0x002: 'IO', + 0x003: 'HYPERCALL', + 0x004: 'DEBUG', + 0x005: 'HLT', + 0x006: 'MMIO', + 0x007: 'IRQ_WINDOW_OPEN', + 0x008: 'SHUTDOWN', + 0x009: 'FAIL_ENTRY', + 0x010: 'INTR', + 0x011: 'SET_TPR', + 0x012: 'TPR_ACCESS', + 0x013: 'S390_SIEIC', + 0x014: 'S390_RESET', + 0x015: 'DCR', + 0x016: 'NMI', + 0x017: 'INTERNAL_ERROR', + 0x018: 'OSI', + 0x019: 'PAPR_HCALL', +} + vendor_exit_reasons = { 'vmx': vmx_exit_reasons, 'svm': svm_exit_reasons, + 'IBM/S390': s390_exit_reasons, } exit_reasons = None for line in file('/proc/cpuinfo').readlines(): - if line.startswith('flags'): + if line.startswith('flags') or line.startswith('vendor_id'): for flag in line.split(): if flag in vendor_exit_reasons: exit_reasons = vendor_exit_reasons[flag] diff --git a/slirp/Makefile.objs b/slirp/Makefile.objs new file mode 100644 index 0000000000..bb43d3c08c --- /dev/null +++ b/slirp/Makefile.objs @@ -0,0 +1,3 @@ +common-obj-y = cksum.o if.o ip_icmp.o ip_input.o ip_output.o +common-obj-y += slirp.o mbuf.o misc.o sbuf.o socket.o tcp_input.o tcp_output.o +common-obj-y += tcp_subr.o tcp_timer.o udp.o bootp.o tftp.o arp_table.o @@ -77,7 +77,8 @@ void do_info_snapshots(Monitor *mon); void qemu_announce_self(void); bool qemu_savevm_state_blocked(Error **errp); -int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared); +int qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params); int qemu_savevm_state_iterate(QEMUFile *f); int qemu_savevm_state_complete(QEMUFile *f); void qemu_savevm_state_cancel(QEMUFile *f); diff --git a/target-alpha/Makefile.objs b/target-alpha/Makefile.objs new file mode 100644 index 0000000000..590304cc61 --- /dev/null +++ b/target-alpha/Makefile.objs @@ -0,0 +1,3 @@ +obj-$(CONFIG_SOFTMMU) += machine.o +obj-y += translate.o helper.o cpu.o +obj-y += int_helper.o fpu_helper.o sys_helper.o mem_helper.o diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs new file mode 100644 index 0000000000..f447c4fdf2 --- /dev/null +++ b/target-arm/Makefile.objs @@ -0,0 +1,6 @@ +obj-y += arm-semi.o +obj-$(CONFIG_SOFTMMU) += machine.o +obj-y += translate.o op_helper.o helper.o cpu.o +obj-y += neon_helper.o iwmmxt_helper.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/arm-semi.c b/target-arm/arm-semi.c index 88ca9bb1b7..88ca9bb1b7 100644 --- a/arm-semi.c +++ b/target-arm/arm-semi.c diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h index a61c68d21b..beabf9a0a9 100644 --- a/target-arm/cpu-qom.h +++ b/target-arm/cpu-qom.h @@ -58,6 +58,9 @@ typedef struct ARMCPU { CPUARMState env; + /* Coprocessor information */ + GHashTable *cp_regs; + /* The instance init functions for implementation-specific subclasses * set these fields to specify the implementation-dependent values of * various constant registers and reset values of non-constant @@ -94,6 +97,7 @@ typedef struct ARMCPU { */ uint32_t ccsidr[16]; uint32_t reset_cbar; + uint32_t reset_auxcr; } ARMCPU; static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) @@ -104,5 +108,6 @@ static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) void arm_cpu_realize(ARMCPU *cpu); +void register_cp_regs_for_features(ARMCPU *cpu); #endif diff --git a/target-arm/cpu.c b/target-arm/cpu.c index 7eb323ae4d..ae5795337f 100644 --- a/target-arm/cpu.c +++ b/target-arm/cpu.c @@ -23,6 +23,38 @@ #if !defined(CONFIG_USER_ONLY) #include "hw/loader.h" #endif +#include "sysemu.h" + +static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) +{ + /* Reset a single ARMCPRegInfo register */ + ARMCPRegInfo *ri = value; + ARMCPU *cpu = opaque; + + if (ri->type & ARM_CP_SPECIAL) { + return; + } + + if (ri->resetfn) { + ri->resetfn(&cpu->env, ri); + return; + } + + /* A zero offset is never possible as it would be regs[0] + * so we use it to indicate that reset is being handled elsewhere. + * This is basically only used for fields in non-core coprocessors + * (like the pxa2xx ones). + */ + if (!ri->fieldoffset) { + return; + } + + if (ri->type & ARM_CP_64BIT) { + CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; + } else { + CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; + } +} /* CPUClass::reset() */ static void arm_cpu_reset(CPUState *s) @@ -39,30 +71,10 @@ static void arm_cpu_reset(CPUState *s) acc->parent_reset(s); memset(env, 0, offsetof(CPUARMState, breakpoints)); - env->cp15.c15_config_base_address = cpu->reset_cbar; - env->cp15.c0_cpuid = cpu->midr; + g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; - env->cp15.c0_cachetype = cpu->ctr; - env->cp15.c1_sys = cpu->reset_sctlr; - env->cp15.c0_c1[0] = cpu->id_pfr0; - env->cp15.c0_c1[1] = cpu->id_pfr1; - env->cp15.c0_c1[2] = cpu->id_dfr0; - env->cp15.c0_c1[3] = cpu->id_afr0; - env->cp15.c0_c1[4] = cpu->id_mmfr0; - env->cp15.c0_c1[5] = cpu->id_mmfr1; - env->cp15.c0_c1[6] = cpu->id_mmfr2; - env->cp15.c0_c1[7] = cpu->id_mmfr3; - env->cp15.c0_c2[0] = cpu->id_isar0; - env->cp15.c0_c2[1] = cpu->id_isar1; - env->cp15.c0_c2[2] = cpu->id_isar2; - env->cp15.c0_c2[3] = cpu->id_isar3; - env->cp15.c0_c2[4] = cpu->id_isar4; - env->cp15.c0_c2[5] = cpu->id_isar5; - env->cp15.c15_i_min = 0xff0; - env->cp15.c0_clid = cpu->clidr; - memcpy(env->cp15.c0_ccsid, cpu->ccsidr, ARRAY_SIZE(cpu->ccsidr)); if (arm_feature(env, ARM_FEATURE_IWMMXT)) { env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; @@ -99,11 +111,6 @@ static void arm_cpu_reset(CPUState *s) } } env->vfp.xregs[ARM_VFP_FPEXC] = 0; - env->cp15.c2_base_mask = 0xffffc000u; - /* v7 performance monitor control register: same implementor - * field as main ID register, and we implement no event counters. - */ - env->cp15.c9_pmcr = (cpu->midr & 0xff000000); #endif set_flush_to_zero(1, &env->vfp.standard_fp_status); set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); @@ -130,6 +137,14 @@ static void arm_cpu_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); cpu_exec_init(&cpu->env); + cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, + g_free, g_free); +} + +static void arm_cpu_finalizefn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + g_hash_table_destroy(cpu->cp_regs); } void arm_cpu_realize(ARMCPU *cpu) @@ -145,6 +160,7 @@ void arm_cpu_realize(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_V7)) { set_feature(env, ARM_FEATURE_VAPA); set_feature(env, ARM_FEATURE_THUMB2); + set_feature(env, ARM_FEATURE_MPIDR); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_V6K); } else { @@ -176,6 +192,8 @@ void arm_cpu_realize(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_VFP3)) { set_feature(env, ARM_FEATURE_VFP); } + + register_cp_regs_for_features(cpu); } /* CPU models */ @@ -185,7 +203,9 @@ static void arm926_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_VFP); - cpu->midr = ARM_CPUID_ARM926; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x41069265; cpu->reset_fpsid = 0x41011090; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; @@ -196,7 +216,8 @@ static void arm946_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_MPU); - cpu->midr = ARM_CPUID_ARM946; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x41059461; cpu->ctr = 0x0f004006; cpu->reset_sctlr = 0x00000078; } @@ -207,10 +228,23 @@ static void arm1026_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_AUXCR); - cpu->midr = ARM_CPUID_ARM1026; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x4106a262; cpu->reset_fpsid = 0x410110a0; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; + cpu->reset_auxcr = 1; + { + /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ + ARMCPRegInfo ifar = { + .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c6_insn), + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &ifar); + } } static void arm1136_r2_initfn(Object *obj) @@ -225,7 +259,10 @@ static void arm1136_r2_initfn(Object *obj) */ set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_VFP); - cpu->midr = ARM_CPUID_ARM1136_R2; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4107b362; cpu->reset_fpsid = 0x410120b4; cpu->mvfr0 = 0x11111111; cpu->mvfr1 = 0x00000000; @@ -243,6 +280,7 @@ static void arm1136_r2_initfn(Object *obj) cpu->id_isar2 = 0x11231111; cpu->id_isar3 = 0x01102131; cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 7; } static void arm1136_initfn(Object *obj) @@ -251,7 +289,10 @@ static void arm1136_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_VFP); - cpu->midr = ARM_CPUID_ARM1136; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4117b363; cpu->reset_fpsid = 0x410120b4; cpu->mvfr0 = 0x11111111; cpu->mvfr1 = 0x00000000; @@ -269,6 +310,7 @@ static void arm1136_initfn(Object *obj) cpu->id_isar2 = 0x11231111; cpu->id_isar3 = 0x01102131; cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 7; } static void arm1176_initfn(Object *obj) @@ -277,7 +319,10 @@ static void arm1176_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_VAPA); - cpu->midr = ARM_CPUID_ARM1176; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x410fb767; cpu->reset_fpsid = 0x410120b5; cpu->mvfr0 = 0x11111111; cpu->mvfr1 = 0x00000000; @@ -295,6 +340,7 @@ static void arm1176_initfn(Object *obj) cpu->id_isar2 = 0x11231121; cpu->id_isar3 = 0x01102131; cpu->id_isar4 = 0x01141; + cpu->reset_auxcr = 7; } static void arm11mpcore_initfn(Object *obj) @@ -303,11 +349,13 @@ static void arm11mpcore_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_VAPA); - cpu->midr = ARM_CPUID_ARM11MPCORE; + set_feature(&cpu->env, ARM_FEATURE_MPIDR); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x410fb022; cpu->reset_fpsid = 0x410120b4; cpu->mvfr0 = 0x11111111; cpu->mvfr1 = 0x00000000; - cpu->ctr = 0x1dd20d2; + cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; cpu->id_dfr0 = 0; @@ -320,6 +368,7 @@ static void arm11mpcore_initfn(Object *obj) cpu->id_isar2 = 0x11221011; cpu->id_isar3 = 0x01102131; cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 1; } static void cortex_m3_initfn(Object *obj) @@ -327,9 +376,17 @@ static void cortex_m3_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); - cpu->midr = ARM_CPUID_CORTEXM3; + cpu->midr = 0x410fc231; } +static const ARMCPRegInfo cortexa8_cp_reginfo[] = { + { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + static void cortex_a8_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -337,7 +394,8 @@ static void cortex_a8_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_VFP3); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); - cpu->midr = ARM_CPUID_CORTEXA8; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; cpu->mvfr0 = 0x11110222; cpu->mvfr1 = 0x00011100; @@ -360,8 +418,39 @@ static void cortex_a8_initfn(Object *obj) cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ + cpu->reset_auxcr = 2; + define_arm_cp_regs(cpu, cortexa8_cp_reginfo); } +static const ARMCPRegInfo cortexa9_cp_reginfo[] = { + /* power_control should be set to maximum latency. Again, + * default to 0 and set by private hook + */ + { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) }, + { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) }, + { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) }, + { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + /* TLB lockdown control */ + { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2, + .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, + { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4, + .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, + { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + REGINFO_SENTINEL +}; + static void cortex_a9_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -375,7 +464,7 @@ static void cortex_a9_initfn(Object *obj) * and valid configurations; we don't model A9UP). */ set_feature(&cpu->env, ARM_FEATURE_V7MP); - cpu->midr = ARM_CPUID_CORTEXA9; + cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; cpu->mvfr0 = 0x11110222; cpu->mvfr1 = 0x01111111; @@ -397,8 +486,40 @@ static void cortex_a9_initfn(Object *obj) cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe015; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe015; /* 16k L1 icache. */ + { + ARMCPRegInfo cbar = { + .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, + .opc2 = 0, .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, + .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address) + }; + define_one_arm_cp_reg(cpu, &cbar); + define_arm_cp_regs(cpu, cortexa9_cp_reginfo); + } } +#ifndef CONFIG_USER_ONLY +static int a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + /* Linux wants the number of processors from here. + * Might as well set the interrupt-controller bit too. + */ + *value = ((smp_cpus - 1) << 24) | (1 << 23); + return 0; +} +#endif + +static const ARMCPRegInfo cortexa15_cp_reginfo[] = { +#ifndef CONFIG_USER_ONLY + { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read, + .writefn = arm_cp_write_ignore, }, +#endif + { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + static void cortex_a15_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -410,7 +531,8 @@ static void cortex_a15_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - cpu->midr = ARM_CPUID_CORTEXA15; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x412fc0f1; cpu->reset_fpsid = 0x410430f0; cpu->mvfr0 = 0x10110222; cpu->mvfr1 = 0x11111111; @@ -433,6 +555,7 @@ static void cortex_a15_initfn(Object *obj) cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + define_arm_cp_regs(cpu, cortexa15_cp_reginfo); } static void ti925t_initfn(Object *obj) @@ -449,7 +572,8 @@ static void sa1100_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_STRONGARM); - cpu->midr = ARM_CPUID_SA1100; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x4401A11B; cpu->reset_sctlr = 0x00000070; } @@ -457,7 +581,8 @@ static void sa1110_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_STRONGARM); - cpu->midr = ARM_CPUID_SA1110; + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x6901B119; cpu->reset_sctlr = 0x00000070; } @@ -466,7 +591,7 @@ static void pxa250_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = ARM_CPUID_PXA250; + cpu->midr = 0x69052100; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -476,7 +601,7 @@ static void pxa255_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = ARM_CPUID_PXA255; + cpu->midr = 0x69052d00; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -486,7 +611,7 @@ static void pxa260_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = ARM_CPUID_PXA260; + cpu->midr = 0x69052903; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -496,7 +621,7 @@ static void pxa261_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = ARM_CPUID_PXA261; + cpu->midr = 0x69052d05; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -506,7 +631,7 @@ static void pxa262_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = ARM_CPUID_PXA262; + cpu->midr = 0x69052d06; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -517,7 +642,7 @@ static void pxa270a0_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_A0; + cpu->midr = 0x69054110; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -528,7 +653,7 @@ static void pxa270a1_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_A1; + cpu->midr = 0x69054111; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -539,7 +664,7 @@ static void pxa270b0_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_B0; + cpu->midr = 0x69054112; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -550,7 +675,7 @@ static void pxa270b1_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_B1; + cpu->midr = 0x69054113; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -561,7 +686,7 @@ static void pxa270c0_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_C0; + cpu->midr = 0x69054114; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -572,7 +697,7 @@ static void pxa270c5_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = ARM_CPUID_PXA270_C5; + cpu->midr = 0x69054117; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } @@ -587,7 +712,7 @@ static void arm_any_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); set_feature(&cpu->env, ARM_FEATURE_V7MP); - cpu->midr = ARM_CPUID_ANY; + cpu->midr = 0xffffffff; } typedef struct ARMCPUInfo { @@ -657,6 +782,7 @@ static const TypeInfo arm_cpu_type_info = { .parent = TYPE_CPU, .instance_size = sizeof(ARMCPU), .instance_init = arm_cpu_initfn, + .instance_finalize = arm_cpu_finalizefn, .abstract = true, .class_size = sizeof(ARMCPUClass), .class_init = arm_cpu_class_init, diff --git a/target-arm/cpu.h b/target-arm/cpu.h index d01285fd57..33afa185e9 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -107,12 +107,7 @@ typedef struct CPUARMState { /* System control coprocessor (cp15) */ struct { uint32_t c0_cpuid; - uint32_t c0_cachetype; - uint32_t c0_ccsid[16]; /* Cache size. */ - uint32_t c0_clid; /* Cache level. */ uint32_t c0_cssel; /* Cache size selection. */ - uint32_t c0_c1[8]; /* Feature registers. */ - uint32_t c0_c2[8]; /* Instruction set registers. */ uint32_t c1_sys; /* System control register. */ uint32_t c1_coproc; /* Coprocessor access register. */ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ @@ -228,12 +223,6 @@ typedef struct CPUARMState { /* Internal CPU feature flags. */ uint32_t features; - /* Coprocessor IO used by peripherals */ - struct { - ARMReadCPFunc *cp_read; - ARMWriteCPFunc *cp_write; - void *opaque; - } cp[15]; void *nvic; const struct arm_boot_info *boot_info; } CPUARMState; @@ -392,6 +381,11 @@ enum arm_features { ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ ARM_FEATURE_GENERIC_TIMER, ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ + ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ + ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ + ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ + ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ + ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ }; static inline int arm_feature(CPUARMState *env, int feature) @@ -406,45 +400,215 @@ void armv7m_nvic_set_pending(void *opaque, int irq); int armv7m_nvic_acknowledge_irq(void *opaque); void armv7m_nvic_complete_irq(void *opaque, int irq); -void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, - ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write, - void *opaque); +/* Interface for defining coprocessor registers. + * Registers are defined in tables of arm_cp_reginfo structs + * which are passed to define_arm_cp_regs(). + */ + +/* When looking up a coprocessor register we look for it + * via an integer which encodes all of: + * coprocessor number + * Crn, Crm, opc1, opc2 fields + * 32 or 64 bit register (ie is it accessed via MRC/MCR + * or via MRRC/MCRR?) + * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. + * (In this case crn and opc2 should be zero.) + */ +#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \ + (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \ + ((crm) << 7) | ((opc1) << 3) | (opc2)) + +#define DECODE_CPREG_CRN(enc) (((enc) >> 7) & 0xf) + +/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a + * special-behaviour cp reg and bits [15..8] indicate what behaviour + * it has. Otherwise it is a simple cp reg, where CONST indicates that + * TCG can assume the value to be constant (ie load at translate time) + * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END + * indicates that the TB should not be ended after a write to this register + * (the default is that the TB ends after cp writes). OVERRIDE permits + * a register definition to override a previous definition for the + * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the + * old must have the OVERRIDE bit set. + */ +#define ARM_CP_SPECIAL 1 +#define ARM_CP_CONST 2 +#define ARM_CP_64BIT 4 +#define ARM_CP_SUPPRESS_TB_END 8 +#define ARM_CP_OVERRIDE 16 +#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8)) +#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8)) +#define ARM_LAST_SPECIAL ARM_CP_WFI +/* Used only as a terminator for ARMCPRegInfo lists */ +#define ARM_CP_SENTINEL 0xffff +/* Mask of only the flag bits in a type field */ +#define ARM_CP_FLAG_MASK 0x1f + +/* Return true if cptype is a valid type field. This is used to try to + * catch errors where the sentinel has been accidentally left off the end + * of a list of registers. + */ +static inline bool cptype_valid(int cptype) +{ + return ((cptype & ~ARM_CP_FLAG_MASK) == 0) + || ((cptype & ARM_CP_SPECIAL) && + (cptype <= ARM_LAST_SPECIAL)); +} + +/* Access rights: + * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM + * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and + * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 + * (ie any of the privileged modes in Secure state, or Monitor mode). + * If a register is accessible in one privilege level it's always accessible + * in higher privilege levels too. Since "Secure PL1" also follows this rule + * (ie anything visible in PL2 is visible in S-PL1, some things are only + * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the + * terminology a little and call this PL3. + * + * If access permissions for a register are more complex than can be + * described with these bits, then use a laxer set of restrictions, and + * do the more restrictive/complex check inside a helper function. + */ +#define PL3_R 0x80 +#define PL3_W 0x40 +#define PL2_R (0x20 | PL3_R) +#define PL2_W (0x10 | PL3_W) +#define PL1_R (0x08 | PL2_R) +#define PL1_W (0x04 | PL2_W) +#define PL0_R (0x02 | PL1_R) +#define PL0_W (0x01 | PL1_W) + +#define PL3_RW (PL3_R | PL3_W) +#define PL2_RW (PL2_R | PL2_W) +#define PL1_RW (PL1_R | PL1_W) +#define PL0_RW (PL0_R | PL0_W) + +static inline int arm_current_pl(CPUARMState *env) +{ + if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) { + return 0; + } + /* We don't currently implement the Virtualization or TrustZone + * extensions, so PL2 and PL3 don't exist for us. + */ + return 1; +} + +typedef struct ARMCPRegInfo ARMCPRegInfo; + +/* Access functions for coprocessor registers. These should return + * 0 on success, or one of the EXCP_* constants if access should cause + * an exception (in which case *value is not written). + */ +typedef int CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t *value); +typedef int CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value); +/* Hook function for register reset */ +typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); + +#define CP_ANY 0xff + +/* Definition of an ARM coprocessor register */ +struct ARMCPRegInfo { + /* Name of register (useful mainly for debugging, need not be unique) */ + const char *name; + /* Location of register: coprocessor number and (crn,crm,opc1,opc2) + * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a + * 'wildcard' field -- any value of that field in the MRC/MCR insn + * will be decoded to this register. The register read and write + * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 + * used by the program, so it is possible to register a wildcard and + * then behave differently on read/write if necessary. + * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 + * must both be zero. + */ + uint8_t cp; + uint8_t crn; + uint8_t crm; + uint8_t opc1; + uint8_t opc2; + /* Register type: ARM_CP_* bits/values */ + int type; + /* Access rights: PL*_[RW] */ + int access; + /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when + * this register was defined: can be used to hand data through to the + * register read/write functions, since they are passed the ARMCPRegInfo*. + */ + void *opaque; + /* Value of this register, if it is ARM_CP_CONST. Otherwise, if + * fieldoffset is non-zero, the reset value of the register. + */ + uint64_t resetvalue; + /* Offset of the field in CPUARMState for this register. This is not + * needed if either: + * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs + * 2. both readfn and writefn are specified + */ + ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ + /* Function for handling reads of this register. If NULL, then reads + * will be done by loading from the offset into CPUARMState specified + * by fieldoffset. + */ + CPReadFn *readfn; + /* Function for handling writes of this register. If NULL, then writes + * will be done by writing to the offset into CPUARMState specified + * by fieldoffset. + */ + CPWriteFn *writefn; + /* Function for resetting the register. If NULL, then reset will be done + * by writing resetvalue to the field specified in fieldoffset. If + * fieldoffset is 0 then no reset will be done. + */ + CPResetFn *resetfn; +}; + +/* Macros which are lvalues for the field in CPUARMState for the + * ARMCPRegInfo *ri. + */ +#define CPREG_FIELD32(env, ri) \ + (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) +#define CPREG_FIELD64(env, ri) \ + (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) + +#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_arm_cp_regs_with_opaque(cpu, regs, 0); +} +static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_one_arm_cp_reg_with_opaque(cpu, regs, 0); +} +const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp); + +/* CPWriteFn that can be used to implement writes-ignored behaviour */ +int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value); +/* CPReadFn that can be used for read-as-zero behaviour */ +int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value); + +static inline bool cp_access_ok(CPUARMState *env, + const ARMCPRegInfo *ri, int isread) +{ + return (ri->access >> ((arm_current_pl(env) * 2) + isread)) & 1; +} /* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3. Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are conventional cores (ie. Application or Realtime profile). */ #define IS_M(env) arm_feature(env, ARM_FEATURE_M) -#define ARM_CPUID(env) (env->cp15.c0_cpuid) -#define ARM_CPUID_ARM1026 0x4106a262 -#define ARM_CPUID_ARM926 0x41069265 -#define ARM_CPUID_ARM946 0x41059461 #define ARM_CPUID_TI915T 0x54029152 #define ARM_CPUID_TI925T 0x54029252 -#define ARM_CPUID_SA1100 0x4401A11B -#define ARM_CPUID_SA1110 0x6901B119 -#define ARM_CPUID_PXA250 0x69052100 -#define ARM_CPUID_PXA255 0x69052d00 -#define ARM_CPUID_PXA260 0x69052903 -#define ARM_CPUID_PXA261 0x69052d05 -#define ARM_CPUID_PXA262 0x69052d06 -#define ARM_CPUID_PXA270 0x69054110 -#define ARM_CPUID_PXA270_A0 0x69054110 -#define ARM_CPUID_PXA270_A1 0x69054111 -#define ARM_CPUID_PXA270_B0 0x69054112 -#define ARM_CPUID_PXA270_B1 0x69054113 -#define ARM_CPUID_PXA270_C0 0x69054114 -#define ARM_CPUID_PXA270_C5 0x69054117 -#define ARM_CPUID_ARM1136 0x4117b363 -#define ARM_CPUID_ARM1136_R2 0x4107b362 -#define ARM_CPUID_ARM1176 0x410fb767 -#define ARM_CPUID_ARM11MPCORE 0x410fb022 -#define ARM_CPUID_CORTEXA8 0x410fc080 -#define ARM_CPUID_CORTEXA9 0x410fc090 -#define ARM_CPUID_CORTEXA15 0x412fc0f1 -#define ARM_CPUID_CORTEXM3 0x410fc231 -#define ARM_CPUID_ANY 0xffffffff #if defined(CONFIG_USER_ONLY) #define TARGET_PAGE_BITS 12 @@ -472,7 +636,7 @@ static inline CPUARMState *cpu_init(const char *cpu_model) #define cpu_signal_handler cpu_arm_signal_handler #define cpu_list arm_cpu_list -#define CPU_SAVE_VERSION 6 +#define CPU_SAVE_VERSION 7 /* MMU modes definitions */ #define MMU_MODE0_SUFFIX _kernel diff --git a/target-arm/helper.c b/target-arm/helper.c index bbb1d05d10..23099236ad 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -4,6 +4,13 @@ #include "host-utils.h" #include "sysemu.h" +#ifndef CONFIG_USER_ONLY +static inline int get_phys_addr(CPUARMState *env, uint32_t address, + int access_type, int is_user, + uint32_t *phys_ptr, int *prot, + target_ulong *page_size); +#endif + static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { int nregs; @@ -56,6 +63,1054 @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) return 0; } +static int dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + env->cp15.c3 = value; + tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ + return 0; +} + +static int fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + if (env->cp15.c13_fcse != value) { + /* Unlike real hardware the qemu TLB uses virtual addresses, + * not modified virtual addresses, so this causes a TLB flush. + */ + tlb_flush(env, 1); + env->cp15.c13_fcse = value; + } + return 0; +} +static int contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) { + /* For VMSA (when not using the LPAE long descriptor page table + * format) this register includes the ASID, so do a TLB flush. + * For PMSA it is purely a process ID and no action is needed. + */ + tlb_flush(env, 1); + } + env->cp15.c13_context = value; + return 0; +} + +static int tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate all (TLBIALL) */ + tlb_flush(env, 1); + return 0; +} + +static int tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ + tlb_flush_page(env, value & TARGET_PAGE_MASK); + return 0; +} + +static int tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by ASID (TLBIASID) */ + tlb_flush(env, value == 0); + return 0; +} + +static int tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ + tlb_flush_page(env, value & TARGET_PAGE_MASK); + return 0; +} + +static const ARMCPRegInfo cp_reginfo[] = { + /* DBGDIDR: just RAZ. In particular this means the "debug architecture + * version" bits will read as a reserved value, which should cause + * Linux to not try to use the debug hardware. + */ + { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* MMU Domain access control / MPU write buffer control */ + { .name = "DACR", .cp = 15, + .crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3), + .resetvalue = 0, .writefn = dacr_write }, + { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse), + .resetvalue = 0, .writefn = fcse_write }, + { .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse), + .resetvalue = 0, .writefn = contextidr_write }, + /* ??? This covers not just the impdef TLB lockdown registers but also + * some v7VMSA registers relating to TEX remap, so it is overly broad. + */ + { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, + /* MMU TLB control. Note that the wildcarding means we cover not just + * the unified TLB ops but also the dside/iside/inner-shareable variants. + */ + { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, }, + { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, }, + { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, }, + { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, }, + /* Cache maintenance ops; some of this space may be overridden later. */ + { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, + .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v6_cp_reginfo[] = { + /* Not all pre-v6 cores implemented this WFI, so this is slightly + * over-broad. + */ + { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, + .access = PL1_W, .type = ARM_CP_WFI }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v7_cp_reginfo[] = { + /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which + * is UNPREDICTABLE; we choose to NOP as most implementations do). + */ + { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_WFI }, + /* L1 cache lockdown. Not architectural in v6 and earlier but in practice + * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and + * OMAPCP will override this space. + */ + { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), + .resetvalue = 0 }, + { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), + .resetvalue = 0 }, + /* v6 doesn't have the cache ID registers but Linux reads them anyway */ + { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static int cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + if (env->cp15.c1_coproc != value) { + env->cp15.c1_coproc = value; + /* ??? Is this safe when called from within a TB? */ + tb_flush(env); + } + return 0; +} + +static const ARMCPRegInfo v6_cp_reginfo[] = { + /* prefetch by MVA in v6, NOP in v7 */ + { .name = "MVA_prefetch", + .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .type = ARM_CP_NOP }, + { .name = "ISB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .type = ARM_CP_NOP }, + { .name = "ISB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, + .access = PL0_W, .type = ARM_CP_NOP }, + { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_insn), + .resetvalue = 0, }, + /* Watchpoint Fault Address Register : should actually only be present + * for 1136, 1176, 11MPCore. + */ + { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, + { .name = "CPACR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc), + .resetvalue = 0, .writefn = cpacr_write }, + REGINFO_SENTINEL +}; + +static int pmreg_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + /* Generic performance monitor register read function for where + * user access may be allowed by PMUSERENR. + */ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + *value = CPREG_FIELD32(env, ri); + return 0; +} + +static int pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + /* only the DP, X, D and E bits are writable */ + env->cp15.c9_pmcr &= ~0x39; + env->cp15.c9_pmcr |= (value & 0x39); + return 0; +} + +static int pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + value &= (1 << 31); + env->cp15.c9_pmcnten |= value; + return 0; +} + +static int pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + value &= (1 << 31); + env->cp15.c9_pmcnten &= ~value; + return 0; +} + +static int pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + env->cp15.c9_pmovsr &= ~value; + return 0; +} + +static int pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { + return EXCP_UDEF; + } + env->cp15.c9_pmxevtyper = value & 0xff; + return 0; +} + +static int pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c9_pmuserenr = value & 1; + return 0; +} + +static int pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* We have no event counters so only the C bit can be changed */ + value &= (1 << 31); + env->cp15.c9_pminten |= value; + return 0; +} + +static int pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= (1 << 31); + env->cp15.c9_pminten &= ~value; + return 0; +} + +static int ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + *value = cpu->ccsidr[env->cp15.c0_cssel]; + return 0; +} + +static int csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c0_cssel = value & 0xf; + return 0; +} + +static const ARMCPRegInfo v7_cp_reginfo[] = { + /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped + * debug components + */ + { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDRAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ + { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NOP }, + /* Performance monitors are implementation defined in v7, + * but with an ARM recommended set of registers, which we + * follow (although we don't actually implement any counters) + * + * Performance registers fall into three categories: + * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) + * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) + * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) + * For the cases controlled by PMUSERENR we must set .access to PL0_RW + * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. + */ + { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), + .readfn = pmreg_read, .writefn = pmcntenset_write }, + { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), + .readfn = pmreg_read, .writefn = pmcntenclr_write }, + { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, + .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .readfn = pmreg_read, .writefn = pmovsr_write }, + /* Unimplemented so WI. Strictly speaking write accesses in PL0 should + * respect PMUSERENR. + */ + { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .type = ARM_CP_NOP }, + /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE. + * We choose to RAZ/WI. XXX should respect PMUSERENR. + */ + { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, + .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* Unimplemented, RAZ/WI. XXX PMUSERENR */ + { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper), + .readfn = pmreg_read, .writefn = pmxevtyper_write }, + /* Unimplemented, RAZ/WI. XXX PMUSERENR */ + { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, + .access = PL0_R | PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), + .resetvalue = 0, + .writefn = pmuserenr_write }, + { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .resetvalue = 0, + .writefn = pmintenset_write }, + { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .resetvalue = 0, + .writefn = pmintenclr_write }, + { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr), + .resetvalue = 0, }, + { .name = "CCSIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, + .access = PL1_R, .readfn = ccsidr_read }, + { .name = "CSSELR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel), + .writefn = csselr_write, .resetvalue = 0 }, + /* Auxiliary ID register: this actually has an IMPDEF value but for now + * just RAZ for all cores: + */ + { .name = "AIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static int teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + value &= 1; + env->teecr = value; + return 0; +} + +static int teehbr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + /* This is a helper function because the user access rights + * depend on the value of the TEECR. + */ + if (arm_current_pl(env) == 0 && (env->teecr & 1)) { + return EXCP_UDEF; + } + *value = env->teehbr; + return 0; +} + +static int teehbr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_current_pl(env) == 0 && (env->teecr & 1)) { + return EXCP_UDEF; + } + env->teehbr = value; + return 0; +} + +static const ARMCPRegInfo t2ee_cp_reginfo[] = { + { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), + .resetvalue = 0, + .writefn = teecr_write }, + { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), + .resetvalue = 0, + .readfn = teehbr_read, .writefn = teehbr_write }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v6k_cp_reginfo[] = { + { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c13_tls1), + .resetvalue = 0 }, + { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL0_R|PL1_W, + .fieldoffset = offsetof(CPUARMState, cp15.c13_tls2), + .resetvalue = 0 }, + { .name = "TPIDRPRW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c13_tls3), + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo generic_timer_cp_reginfo[] = { + /* Dummy implementation: RAZ/WI the whole crn=14 space */ + { .name = "GENERIC_TIMER", .cp = 15, .crn = 14, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + if (arm_feature(env, ARM_FEATURE_V7)) { + env->cp15.c7_par = value & 0xfffff6ff; + } else { + env->cp15.c7_par = value & 0xfffff1ff; + } + return 0; +} + +#ifndef CONFIG_USER_ONLY +/* get_phys_addr() isn't present for user-mode-only targets */ +static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + uint32_t phys_addr; + target_ulong page_size; + int prot; + int ret, is_user = ri->opc2 & 2; + int access_type = ri->opc2 & 1; + + if (ri->opc2 & 4) { + /* Other states are only available with TrustZone */ + return EXCP_UDEF; + } + ret = get_phys_addr(env, value, access_type, is_user, + &phys_addr, &prot, &page_size); + if (ret == 0) { + /* We do not set any attribute bits in the PAR */ + if (page_size == (1 << 24) + && arm_feature(env, ARM_FEATURE_V7)) { + env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1; + } else { + env->cp15.c7_par = phys_addr & 0xfffff000; + } + } else { + env->cp15.c7_par = ((ret & (10 << 1)) >> 5) | + ((ret & (12 << 1)) >> 6) | + ((ret & 0xf) << 1) | 1; + } + return 0; +} +#endif + +static const ARMCPRegInfo vapa_cp_reginfo[] = { + { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c7_par), + .writefn = par_write }, +#ifndef CONFIG_USER_ONLY + { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_W, .writefn = ats_write }, +#endif + REGINFO_SENTINEL +}; + +/* Return basic MPU access permission bits. */ +static uint32_t simple_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val >> i) & mask; + mask <<= 2; + } + return ret; +} + +/* Pad basic MPU access permission bits to extended format. */ +static uint32_t extended_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val & mask) << i; + mask <<= 2; + } + return ret; +} + +static int pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c5_data = extended_mpu_ap_bits(value); + return 0; +} + +static int pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + *value = simple_mpu_ap_bits(env->cp15.c5_data); + return 0; +} + +static int pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c5_insn = extended_mpu_ap_bits(value); + return 0; +} + +static int pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + *value = simple_mpu_ap_bits(env->cp15.c5_insn); + return 0; +} + +static int arm946_prbs_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + if (ri->crm > 8) { + return EXCP_UDEF; + } + *value = env->cp15.c6_region[ri->crm]; + return 0; +} + +static int arm946_prbs_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (ri->crm > 8) { + return EXCP_UDEF; + } + env->cp15.c6_region[ri->crm] = value; + return 0; +} + +static const ARMCPRegInfo pmsav5_cp_reginfo[] = { + { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, + .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, + { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, + .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, + { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, }, + { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, }, + { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, + { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, + /* Protection region base and size registers */ + { .name = "946_PRBS", .cp = 15, .crn = 6, .crm = CP_ANY, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, + .readfn = arm946_prbs_read, .writefn = arm946_prbs_write, }, + REGINFO_SENTINEL +}; + +static int vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= 7; + env->cp15.c2_control = value; + env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> value); + env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> value); + return 0; +} + +static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + env->cp15.c2_base_mask = 0xffffc000u; + env->cp15.c2_control = 0; + env->cp15.c2_mask = 0; +} + +static const ARMCPRegInfo vmsa_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, }, + { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, }, + { .name = "TTBR0", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, }, + { .name = "TTBR1", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, }, + { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .writefn = vmsa_ttbcr_write, + .resetfn = vmsa_ttbcr_reset, + .fieldoffset = offsetof(CPUARMState, cp15.c2_control) }, + { .name = "DFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_data), + .resetvalue = 0, }, + REGINFO_SENTINEL +}; + +static int omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_ticonfig = value & 0xe7; + /* The OS_TYPE bit in this register changes the reported CPUID! */ + env->cp15.c0_cpuid = (value & (1 << 5)) ? + ARM_CPUID_TI915T : ARM_CPUID_TI925T; + return 0; +} + +static int omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_threadid = value & 0xffff; + return 0; +} + +static int omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Wait-for-interrupt (deprecated) */ + cpu_interrupt(env, CPU_INTERRUPT_HALT); + return 0; +} + +static int omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* On OMAP there are registers indicating the max/min index of dcache lines + * containing a dirty line; cache flush operations have to reset these. + */ + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; + return 0; +} + +static const ARMCPRegInfo omap_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, + .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, }, + { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, + .writefn = omap_ticonfig_write }, + { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, + { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0xff0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, + { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, + .writefn = omap_threadid_write }, + { .name = "TI925T_STATUS", .cp = 15, .crn = 15, + .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, + /* TODO: Peripheral port remap register: + * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller + * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), + * when MMU is off. + */ + { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, + .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_OVERRIDE, + .writefn = omap_cachemaint_write }, + { .name = "C9", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, + .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static int xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= 0x3fff; + if (env->cp15.c15_cpar != value) { + /* Changes cp0 to cp13 behavior, so needs a TB flush. */ + tb_flush(env); + env->cp15.c15_cpar = value; + } + return 0; +} + +static const ARMCPRegInfo xscale_cp_reginfo[] = { + { .name = "XSCALE_CPAR", + .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, + .writefn = xscale_cpar_write, }, + { .name = "XSCALE_AUXCR", + .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), + .resetvalue = 0, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { + /* RAZ/WI the whole crn=15 space, when we don't have a more specific + * implementation of this implementation-defined space. + * Ideally this should eventually disappear in favour of actually + * implementing the correct behaviour for all cores. + */ + { .name = "C15_IMPDEF", .cp = 15, .crn = 15, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { + /* Cache status: RAZ because we have no cache so it's always clean */ + { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { + /* We never have a a block transfer operation in progress */ + { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* The cache ops themselves: these all NOP for QEMU */ + { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { + /* The cache test-and-clean instructions always return (1 << 30) + * to indicate that there are no dirty cache lines. + */ + { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = (1 << 30) }, + { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = (1 << 30) }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo strongarm_cp_reginfo[] = { + /* Ignore ReadBuffer accesses */ + { .name = "C9_READBUFFER", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static int mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t *value) +{ + uint32_t mpidr = env->cpu_index; + /* We don't support setting cluster ID ([8..11]) + * so these bits always RAZ. + */ + if (arm_feature(env, ARM_FEATURE_V7MP)) { + mpidr |= (1 << 31); + /* Cores which are uniprocessor (non-coherent) + * but still implement the MP extensions set + * bit 30. (For instance, A9UP.) However we do + * not currently model any of those cores. + */ + } + *value = mpidr; + return 0; +} + +static const ARMCPRegInfo mpidr_cp_reginfo[] = { + { .name = "MPIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, + .access = PL1_R, .readfn = mpidr_read }, + REGINFO_SENTINEL +}; + +static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + env->cp15.c1_sys = value; + /* ??? Lots of these bits are not implemented. */ + /* This may enable/disable the MMU, so do a TLB flush. */ + tlb_flush(env, 1); + return 0; +} + +void register_cp_regs_for_features(ARMCPU *cpu) +{ + /* Register all the coprocessor registers based on feature bits */ + CPUARMState *env = &cpu->env; + if (arm_feature(env, ARM_FEATURE_M)) { + /* M profile has no coprocessor registers */ + return; + } + + define_arm_cp_regs(cpu, cp_reginfo); + if (arm_feature(env, ARM_FEATURE_V6)) { + /* The ID registers all have impdef reset values */ + ARMCPRegInfo v6_idregs[] = { + { .name = "ID_PFR0", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_pfr0 }, + { .name = "ID_PFR1", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_pfr1 }, + { .name = "ID_DFR0", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_dfr0 }, + { .name = "ID_AFR0", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_afr0 }, + { .name = "ID_MMFR0", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_mmfr0 }, + { .name = "ID_MMFR1", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_mmfr1 }, + { .name = "ID_MMFR2", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_mmfr2 }, + { .name = "ID_MMFR3", .cp = 15, .crn = 0, .crm = 1, + .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_mmfr3 }, + { .name = "ID_ISAR0", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar0 }, + { .name = "ID_ISAR1", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar1 }, + { .name = "ID_ISAR2", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar2 }, + { .name = "ID_ISAR3", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar3 }, + { .name = "ID_ISAR4", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar4 }, + { .name = "ID_ISAR5", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->id_isar5 }, + /* 6..7 are as yet unallocated and must RAZ */ + { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2, + .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = 0 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v6_idregs); + define_arm_cp_regs(cpu, v6_cp_reginfo); + } else { + define_arm_cp_regs(cpu, not_v6_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + define_arm_cp_regs(cpu, v6k_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + /* v7 performance monitor control register: same implementor + * field as main ID register, and we implement no event counters. + */ + ARMCPRegInfo pmcr = { + .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), + .readfn = pmreg_read, .writefn = pmcr_write + }; + ARMCPRegInfo clidr = { + .name = "CLIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr + }; + define_one_arm_cp_reg(cpu, &pmcr); + define_one_arm_cp_reg(cpu, &clidr); + define_arm_cp_regs(cpu, v7_cp_reginfo); + } else { + define_arm_cp_regs(cpu, not_v7_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_MPU)) { + /* These are the MPU registers prior to PMSAv6. Any new + * PMSA core later than the ARM946 will require that we + * implement the PMSAv6 or PMSAv7 registers, which are + * completely different. + */ + assert(!arm_feature(env, ARM_FEATURE_V6)); + define_arm_cp_regs(cpu, pmsav5_cp_reginfo); + } else { + define_arm_cp_regs(cpu, vmsa_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { + define_arm_cp_regs(cpu, t2ee_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + define_arm_cp_regs(cpu, generic_timer_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_VAPA)) { + define_arm_cp_regs(cpu, vapa_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { + define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { + define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { + define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + define_arm_cp_regs(cpu, omap_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_STRONGARM)) { + define_arm_cp_regs(cpu, strongarm_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + define_arm_cp_regs(cpu, xscale_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { + define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_MPIDR)) { + define_arm_cp_regs(cpu, mpidr_cp_reginfo); + } + /* Slightly awkwardly, the OMAP and StrongARM cores need all of + * cp15 crn=0 to be writes-ignored, whereas for other cores they should + * be read-only (ie write causes UNDEF exception). + */ + { + ARMCPRegInfo id_cp_reginfo[] = { + /* Note that the MIDR isn't a simple constant register because + * of the TI925 behaviour where writes to another register can + * cause the MIDR value to change. + */ + { .name = "MIDR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_R, .resetvalue = cpu->midr, + .writefn = arm_cp_write_ignore, + .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid) }, + { .name = "CTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, + { .name = "TCMTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "TLBTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL + }; + ARMCPRegInfo crn0_wi_reginfo = { + .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_NOP | ARM_CP_OVERRIDE + }; + if (arm_feature(env, ARM_FEATURE_OMAPCP) || + arm_feature(env, ARM_FEATURE_STRONGARM)) { + ARMCPRegInfo *r; + /* Register the blanket "writes ignored" value first to cover the + * whole space. Then define the specific ID registers, but update + * their access field to allow write access, so that they ignore + * writes rather than causing them to UNDEF. + */ + define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); + for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { + r->access = PL1_RW; + define_one_arm_cp_reg(cpu, r); + } + } else { + /* Just register the standard ID registers (read-only, meaning + * that writes will UNDEF). + */ + define_arm_cp_regs(cpu, id_cp_reginfo); + } + } + + if (arm_feature(env, ARM_FEATURE_AUXCR)) { + ARMCPRegInfo auxcr = { + .name = "AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, + .resetvalue = cpu->reset_auxcr + }; + define_one_arm_cp_reg(cpu, &auxcr); + } + + /* Generic registers whose values depend on the implementation */ + { + ARMCPRegInfo sctlr = { + .name = "SCTLR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys), + .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr + }; + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + /* Normally we would always end the TB on an SCTLR write, but Linux + * arch/arm/mach-pxa/sleep.S expects two instructions following + * an MMU enable to execute from cache. Imitate this behaviour. + */ + sctlr.type |= ARM_CP_SUPPRESS_TB_END; + } + define_one_arm_cp_reg(cpu, &sctlr); + } +} + ARMCPU *cpu_arm_init(const char *cpu_model) { ARMCPU *cpu; @@ -137,6 +1192,107 @@ void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) g_slist_free(list); } +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *r, void *opaque) +{ + /* Define implementations of coprocessor registers. + * We store these in a hashtable because typically + * there are less than 150 registers in a space which + * is 16*16*16*8*8 = 262144 in size. + * Wildcarding is supported for the crm, opc1 and opc2 fields. + * If a register is defined twice then the second definition is + * used, so this can be used to define some generic registers and + * then override them with implementation specific variations. + * At least one of the original and the second definition should + * include ARM_CP_OVERRIDE in its type bits -- this is just a guard + * against accidental use. + */ + int crm, opc1, opc2; + int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; + int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; + int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; + int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; + int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; + int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; + /* 64 bit registers have only CRm and Opc1 fields */ + assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); + /* Check that the register definition has enough info to handle + * reads and writes if they are permitted. + */ + if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { + if (r->access & PL3_R) { + assert(r->fieldoffset || r->readfn); + } + if (r->access & PL3_W) { + assert(r->fieldoffset || r->writefn); + } + } + /* Bad type field probably means missing sentinel at end of reg list */ + assert(cptype_valid(r->type)); + for (crm = crmmin; crm <= crmmax; crm++) { + for (opc1 = opc1min; opc1 <= opc1max; opc1++) { + for (opc2 = opc2min; opc2 <= opc2max; opc2++) { + uint32_t *key = g_new(uint32_t, 1); + ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); + int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; + *key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2); + r2->opaque = opaque; + /* Make sure reginfo passed to helpers for wildcarded regs + * has the correct crm/opc1/opc2 for this reg, not CP_ANY: + */ + r2->crm = crm; + r2->opc1 = opc1; + r2->opc2 = opc2; + /* Overriding of an existing definition must be explicitly + * requested. + */ + if (!(r->type & ARM_CP_OVERRIDE)) { + ARMCPRegInfo *oldreg; + oldreg = g_hash_table_lookup(cpu->cp_regs, key); + if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { + fprintf(stderr, "Register redefined: cp=%d %d bit " + "crn=%d crm=%d opc1=%d opc2=%d, " + "was %s, now %s\n", r2->cp, 32 + 32 * is64, + r2->crn, r2->crm, r2->opc1, r2->opc2, + oldreg->name, r2->name); + assert(0); + } + } + g_hash_table_insert(cpu->cp_regs, key, r2); + } + } + } +} + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque) +{ + /* Define a whole list of registers */ + const ARMCPRegInfo *r; + for (r = regs; r->type != ARM_CP_SENTINEL; r++) { + define_one_arm_cp_reg_with_opaque(cpu, r, opaque); + } +} + +const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp) +{ + return g_hash_table_lookup(cpu->cp_regs, &encoded_cp); +} + +int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Helper coprocessor write function for write-ignore registers */ + return 0; +} + +int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value) +{ + /* Helper coprocessor write function for read-as-zero registers */ + *value = 0; + return 0; +} + static int bad_mode_switch(CPUARMState *env, int mode) { /* Return true if it is not valid for us to switch to @@ -286,31 +1442,6 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw, } /* These should probably raise undefined insn exceptions. */ -void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val) -{ - int op1 = (insn >> 8) & 0xf; - cpu_abort(env, "cp%i insn %08x\n", op1, insn); - return; -} - -uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn) -{ - int op1 = (insn >> 8) & 0xf; - cpu_abort(env, "cp%i insn %08x\n", op1, insn); - return 0; -} - -void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val) -{ - cpu_abort(env, "cp15 insn %08x\n", insn); -} - -uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn) -{ - cpu_abort(env, "cp15 insn %08x\n", insn); -} - -/* These should probably raise undefined insn exceptions. */ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) { cpu_abort(env, "v7m_mrs %d\n", reg); @@ -1036,872 +2167,6 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr) return phys_addr; } -void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val) -{ - int cp_num = (insn >> 8) & 0xf; - int cp_info = (insn >> 5) & 7; - int src = (insn >> 16) & 0xf; - int operand = insn & 0xf; - - if (env->cp[cp_num].cp_write) - env->cp[cp_num].cp_write(env->cp[cp_num].opaque, - cp_info, src, operand, val); -} - -uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn) -{ - int cp_num = (insn >> 8) & 0xf; - int cp_info = (insn >> 5) & 7; - int dest = (insn >> 16) & 0xf; - int operand = insn & 0xf; - - if (env->cp[cp_num].cp_read) - return env->cp[cp_num].cp_read(env->cp[cp_num].opaque, - cp_info, dest, operand); - return 0; -} - -/* Return basic MPU access permission bits. */ -static uint32_t simple_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val >> i) & mask; - mask <<= 2; - } - return ret; -} - -/* Pad basic MPU access permission bits to extended format. */ -static uint32_t extended_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val & mask) << i; - mask <<= 2; - } - return ret; -} - -void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val) -{ - int op1; - int op2; - int crm; - - op1 = (insn >> 21) & 7; - op2 = (insn >> 5) & 7; - crm = insn & 0xf; - switch ((insn >> 16) & 0xf) { - case 0: - /* ID codes. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) - break; - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - break; - if (arm_feature(env, ARM_FEATURE_V7) - && op1 == 2 && crm == 0 && op2 == 0) { - env->cp15.c0_cssel = val & 0xf; - break; - } - goto bad_reg; - case 1: /* System configuration. */ - if (arm_feature(env, ARM_FEATURE_V7) - && op1 == 0 && crm == 1 && op2 == 0) { - env->cp15.c1_scr = val; - break; - } - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: - if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0) - env->cp15.c1_sys = val; - /* ??? Lots of these bits are not implemented. */ - /* This may enable/disable the MMU, so do a TLB flush. */ - tlb_flush(env, 1); - break; - case 1: /* Auxiliary control register. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - env->cp15.c1_xscaleauxcr = val; - break; - } - /* Not implemented. */ - break; - case 2: - if (arm_feature(env, ARM_FEATURE_XSCALE)) - goto bad_reg; - if (env->cp15.c1_coproc != val) { - env->cp15.c1_coproc = val; - /* ??? Is this safe when called from within a TB? */ - tb_flush(env); - } - break; - default: - goto bad_reg; - } - break; - case 2: /* MMU Page table control / MPU cache control. */ - if (arm_feature(env, ARM_FEATURE_MPU)) { - switch (op2) { - case 0: - env->cp15.c2_data = val; - break; - case 1: - env->cp15.c2_insn = val; - break; - default: - goto bad_reg; - } - } else { - switch (op2) { - case 0: - env->cp15.c2_base0 = val; - break; - case 1: - env->cp15.c2_base1 = val; - break; - case 2: - val &= 7; - env->cp15.c2_control = val; - env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val); - env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val); - break; - default: - goto bad_reg; - } - } - break; - case 3: /* MMU Domain access control / MPU write buffer control. */ - env->cp15.c3 = val; - tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ - break; - case 4: /* Reserved. */ - goto bad_reg; - case 5: /* MMU Fault status / MPU access permission. */ - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: - if (arm_feature(env, ARM_FEATURE_MPU)) - val = extended_mpu_ap_bits(val); - env->cp15.c5_data = val; - break; - case 1: - if (arm_feature(env, ARM_FEATURE_MPU)) - val = extended_mpu_ap_bits(val); - env->cp15.c5_insn = val; - break; - case 2: - if (!arm_feature(env, ARM_FEATURE_MPU)) - goto bad_reg; - env->cp15.c5_data = val; - break; - case 3: - if (!arm_feature(env, ARM_FEATURE_MPU)) - goto bad_reg; - env->cp15.c5_insn = val; - break; - default: - goto bad_reg; - } - break; - case 6: /* MMU Fault address / MPU base/size. */ - if (arm_feature(env, ARM_FEATURE_MPU)) { - if (crm >= 8) - goto bad_reg; - env->cp15.c6_region[crm] = val; - } else { - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: - env->cp15.c6_data = val; - break; - case 1: /* ??? This is WFAR on armv6 */ - case 2: - env->cp15.c6_insn = val; - break; - default: - goto bad_reg; - } - } - break; - case 7: /* Cache control. */ - env->cp15.c15_i_max = 0x000; - env->cp15.c15_i_min = 0xff0; - if (op1 != 0) { - goto bad_reg; - } - /* No cache, so nothing to do except VA->PA translations. */ - if (arm_feature(env, ARM_FEATURE_VAPA)) { - switch (crm) { - case 4: - if (arm_feature(env, ARM_FEATURE_V7)) { - env->cp15.c7_par = val & 0xfffff6ff; - } else { - env->cp15.c7_par = val & 0xfffff1ff; - } - break; - case 8: { - uint32_t phys_addr; - target_ulong page_size; - int prot; - int ret, is_user = op2 & 2; - int access_type = op2 & 1; - - if (op2 & 4) { - /* Other states are only available with TrustZone */ - goto bad_reg; - } - ret = get_phys_addr(env, val, access_type, is_user, - &phys_addr, &prot, &page_size); - if (ret == 0) { - /* We do not set any attribute bits in the PAR */ - if (page_size == (1 << 24) - && arm_feature(env, ARM_FEATURE_V7)) { - env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1; - } else { - env->cp15.c7_par = phys_addr & 0xfffff000; - } - } else { - env->cp15.c7_par = ((ret & (10 << 1)) >> 5) | - ((ret & (12 << 1)) >> 6) | - ((ret & 0xf) << 1) | 1; - } - break; - } - } - } - break; - case 8: /* MMU TLB control. */ - switch (op2) { - case 0: /* Invalidate all (TLBIALL) */ - tlb_flush(env, 1); - break; - case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ - tlb_flush_page(env, val & TARGET_PAGE_MASK); - break; - case 2: /* Invalidate by ASID (TLBIASID) */ - tlb_flush(env, val == 0); - break; - case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ - tlb_flush_page(env, val & TARGET_PAGE_MASK); - break; - default: - goto bad_reg; - } - break; - case 9: - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - break; - if (arm_feature(env, ARM_FEATURE_STRONGARM)) - break; /* Ignore ReadBuffer access */ - switch (crm) { - case 0: /* Cache lockdown. */ - switch (op1) { - case 0: /* L1 cache. */ - switch (op2) { - case 0: - env->cp15.c9_data = val; - break; - case 1: - env->cp15.c9_insn = val; - break; - default: - goto bad_reg; - } - break; - case 1: /* L2 cache. */ - /* Ignore writes to L2 lockdown/auxiliary registers. */ - break; - default: - goto bad_reg; - } - break; - case 1: /* TCM memory region registers. */ - /* Not implemented. */ - goto bad_reg; - case 12: /* Performance monitor control */ - /* Performance monitors are implementation defined in v7, - * but with an ARM recommended set of registers, which we - * follow (although we don't actually implement any counters) - */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 0: /* performance monitor control register */ - /* only the DP, X, D and E bits are writable */ - env->cp15.c9_pmcr &= ~0x39; - env->cp15.c9_pmcr |= (val & 0x39); - break; - case 1: /* Count enable set register */ - val &= (1 << 31); - env->cp15.c9_pmcnten |= val; - break; - case 2: /* Count enable clear */ - val &= (1 << 31); - env->cp15.c9_pmcnten &= ~val; - break; - case 3: /* Overflow flag status */ - env->cp15.c9_pmovsr &= ~val; - break; - case 4: /* Software increment */ - /* RAZ/WI since we don't implement the software-count event */ - break; - case 5: /* Event counter selection register */ - /* Since we don't implement any events, writing to this register - * is actually UNPREDICTABLE. So we choose to RAZ/WI. - */ - break; - default: - goto bad_reg; - } - break; - case 13: /* Performance counters */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 0: /* Cycle count register: not implemented, so RAZ/WI */ - break; - case 1: /* Event type select */ - env->cp15.c9_pmxevtyper = val & 0xff; - break; - case 2: /* Event count register */ - /* Unimplemented (we have no events), RAZ/WI */ - break; - default: - goto bad_reg; - } - break; - case 14: /* Performance monitor control */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 0: /* user enable */ - env->cp15.c9_pmuserenr = val & 1; - /* changes access rights for cp registers, so flush tbs */ - tb_flush(env); - break; - case 1: /* interrupt enable set */ - /* We have no event counters so only the C bit can be changed */ - val &= (1 << 31); - env->cp15.c9_pminten |= val; - break; - case 2: /* interrupt enable clear */ - val &= (1 << 31); - env->cp15.c9_pminten &= ~val; - break; - } - break; - default: - goto bad_reg; - } - break; - case 10: /* MMU TLB lockdown. */ - /* ??? TLB lockdown not implemented. */ - break; - case 12: /* Reserved. */ - goto bad_reg; - case 13: /* Process ID. */ - switch (op2) { - case 0: - /* Unlike real hardware the qemu TLB uses virtual addresses, - not modified virtual addresses, so this causes a TLB flush. - */ - if (env->cp15.c13_fcse != val) - tlb_flush(env, 1); - env->cp15.c13_fcse = val; - break; - case 1: - /* This changes the ASID, so do a TLB flush. */ - if (env->cp15.c13_context != val - && !arm_feature(env, ARM_FEATURE_MPU)) - tlb_flush(env, 0); - env->cp15.c13_context = val; - break; - default: - goto bad_reg; - } - break; - case 14: /* Generic timer */ - if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { - /* Dummy implementation: RAZ/WI for all */ - break; - } - goto bad_reg; - case 15: /* Implementation specific. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - if (op2 == 0 && crm == 1) { - if (env->cp15.c15_cpar != (val & 0x3fff)) { - /* Changes cp0 to cp13 behavior, so needs a TB flush. */ - tb_flush(env); - env->cp15.c15_cpar = val & 0x3fff; - } - break; - } - goto bad_reg; - } - if (arm_feature(env, ARM_FEATURE_OMAPCP)) { - switch (crm) { - case 0: - break; - case 1: /* Set TI925T configuration. */ - env->cp15.c15_ticonfig = val & 0xe7; - env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */ - ARM_CPUID_TI915T : ARM_CPUID_TI925T; - break; - case 2: /* Set I_max. */ - env->cp15.c15_i_max = val; - break; - case 3: /* Set I_min. */ - env->cp15.c15_i_min = val; - break; - case 4: /* Set thread-ID. */ - env->cp15.c15_threadid = val & 0xffff; - break; - case 8: /* Wait-for-interrupt (deprecated). */ - cpu_interrupt(env, CPU_INTERRUPT_HALT); - break; - default: - goto bad_reg; - } - } - if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) { - switch (crm) { - case 0: - if ((op1 == 0) && (op2 == 0)) { - env->cp15.c15_power_control = val; - } else if ((op1 == 0) && (op2 == 1)) { - env->cp15.c15_diagnostic = val; - } else if ((op1 == 0) && (op2 == 2)) { - env->cp15.c15_power_diagnostic = val; - } - default: - break; - } - } - break; - } - return; -bad_reg: - /* ??? For debugging only. Should raise illegal instruction exception. */ - cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n", - (insn >> 16) & 0xf, crm, op1, op2); -} - -uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn) -{ - int op1; - int op2; - int crm; - - op1 = (insn >> 21) & 7; - op2 = (insn >> 5) & 7; - crm = insn & 0xf; - switch ((insn >> 16) & 0xf) { - case 0: /* ID codes. */ - switch (op1) { - case 0: - switch (crm) { - case 0: - switch (op2) { - case 0: /* Device ID. */ - return env->cp15.c0_cpuid; - case 1: /* Cache Type. */ - return env->cp15.c0_cachetype; - case 2: /* TCM status. */ - return 0; - case 3: /* TLB type register. */ - return 0; /* No lockable TLB entries. */ - case 5: /* MPIDR */ - /* The MPIDR was standardised in v7; prior to - * this it was implemented only in the 11MPCore. - * For all other pre-v7 cores it does not exist. - */ - if (arm_feature(env, ARM_FEATURE_V7) || - ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) { - int mpidr = env->cpu_index; - /* We don't support setting cluster ID ([8..11]) - * so these bits always RAZ. - */ - if (arm_feature(env, ARM_FEATURE_V7MP)) { - mpidr |= (1 << 31); - /* Cores which are uniprocessor (non-coherent) - * but still implement the MP extensions set - * bit 30. (For instance, A9UP.) However we do - * not currently model any of those cores. - */ - } - return mpidr; - } - /* otherwise fall through to the unimplemented-reg case */ - default: - goto bad_reg; - } - case 1: - if (!arm_feature(env, ARM_FEATURE_V6)) - goto bad_reg; - return env->cp15.c0_c1[op2]; - case 2: - if (!arm_feature(env, ARM_FEATURE_V6)) - goto bad_reg; - return env->cp15.c0_c2[op2]; - case 3: case 4: case 5: case 6: case 7: - return 0; - default: - goto bad_reg; - } - case 1: - /* These registers aren't documented on arm11 cores. However - Linux looks at them anyway. */ - if (!arm_feature(env, ARM_FEATURE_V6)) - goto bad_reg; - if (crm != 0) - goto bad_reg; - if (!arm_feature(env, ARM_FEATURE_V7)) - return 0; - - switch (op2) { - case 0: - return env->cp15.c0_ccsid[env->cp15.c0_cssel]; - case 1: - return env->cp15.c0_clid; - case 7: - return 0; - } - goto bad_reg; - case 2: - if (op2 != 0 || crm != 0) - goto bad_reg; - return env->cp15.c0_cssel; - default: - goto bad_reg; - } - case 1: /* System configuration. */ - if (arm_feature(env, ARM_FEATURE_V7) - && op1 == 0 && crm == 1 && op2 == 0) { - return env->cp15.c1_scr; - } - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: /* Control register. */ - return env->cp15.c1_sys; - case 1: /* Auxiliary control register. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) - return env->cp15.c1_xscaleauxcr; - if (!arm_feature(env, ARM_FEATURE_AUXCR)) - goto bad_reg; - switch (ARM_CPUID(env)) { - case ARM_CPUID_ARM1026: - return 1; - case ARM_CPUID_ARM1136: - case ARM_CPUID_ARM1136_R2: - case ARM_CPUID_ARM1176: - return 7; - case ARM_CPUID_ARM11MPCORE: - return 1; - case ARM_CPUID_CORTEXA8: - return 2; - case ARM_CPUID_CORTEXA9: - case ARM_CPUID_CORTEXA15: - return 0; - default: - goto bad_reg; - } - case 2: /* Coprocessor access register. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) - goto bad_reg; - return env->cp15.c1_coproc; - default: - goto bad_reg; - } - case 2: /* MMU Page table control / MPU cache control. */ - if (arm_feature(env, ARM_FEATURE_MPU)) { - switch (op2) { - case 0: - return env->cp15.c2_data; - break; - case 1: - return env->cp15.c2_insn; - break; - default: - goto bad_reg; - } - } else { - switch (op2) { - case 0: - return env->cp15.c2_base0; - case 1: - return env->cp15.c2_base1; - case 2: - return env->cp15.c2_control; - default: - goto bad_reg; - } - } - case 3: /* MMU Domain access control / MPU write buffer control. */ - return env->cp15.c3; - case 4: /* Reserved. */ - goto bad_reg; - case 5: /* MMU Fault status / MPU access permission. */ - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: - if (arm_feature(env, ARM_FEATURE_MPU)) - return simple_mpu_ap_bits(env->cp15.c5_data); - return env->cp15.c5_data; - case 1: - if (arm_feature(env, ARM_FEATURE_MPU)) - return simple_mpu_ap_bits(env->cp15.c5_insn); - return env->cp15.c5_insn; - case 2: - if (!arm_feature(env, ARM_FEATURE_MPU)) - goto bad_reg; - return env->cp15.c5_data; - case 3: - if (!arm_feature(env, ARM_FEATURE_MPU)) - goto bad_reg; - return env->cp15.c5_insn; - default: - goto bad_reg; - } - case 6: /* MMU Fault address. */ - if (arm_feature(env, ARM_FEATURE_MPU)) { - if (crm >= 8) - goto bad_reg; - return env->cp15.c6_region[crm]; - } else { - if (arm_feature(env, ARM_FEATURE_OMAPCP)) - op2 = 0; - switch (op2) { - case 0: - return env->cp15.c6_data; - case 1: - if (arm_feature(env, ARM_FEATURE_V6)) { - /* Watchpoint Fault Adrress. */ - return 0; /* Not implemented. */ - } else { - /* Instruction Fault Adrress. */ - /* Arm9 doesn't have an IFAR, but implementing it anyway - shouldn't do any harm. */ - return env->cp15.c6_insn; - } - case 2: - if (arm_feature(env, ARM_FEATURE_V6)) { - /* Instruction Fault Adrress. */ - return env->cp15.c6_insn; - } else { - goto bad_reg; - } - default: - goto bad_reg; - } - } - case 7: /* Cache control. */ - if (crm == 4 && op1 == 0 && op2 == 0) { - return env->cp15.c7_par; - } - /* FIXME: Should only clear Z flag if destination is r15. */ - env->ZF = 0; - return 0; - case 8: /* MMU TLB control. */ - goto bad_reg; - case 9: - switch (crm) { - case 0: /* Cache lockdown */ - switch (op1) { - case 0: /* L1 cache. */ - if (arm_feature(env, ARM_FEATURE_OMAPCP)) { - return 0; - } - switch (op2) { - case 0: - return env->cp15.c9_data; - case 1: - return env->cp15.c9_insn; - default: - goto bad_reg; - } - case 1: /* L2 cache */ - /* L2 Lockdown and Auxiliary control. */ - switch (op2) { - case 0: - /* L2 cache lockdown (A8 only) */ - return 0; - case 2: - /* L2 cache auxiliary control (A8) or control (A15) */ - if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) { - /* Linux wants the number of processors from here. - * Might as well set the interrupt-controller bit too. - */ - return ((smp_cpus - 1) << 24) | (1 << 23); - } - return 0; - case 3: - /* L2 cache extended control (A15) */ - return 0; - default: - goto bad_reg; - } - default: - goto bad_reg; - } - break; - case 12: /* Performance monitor control */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 0: /* performance monitor control register */ - return env->cp15.c9_pmcr; - case 1: /* count enable set */ - case 2: /* count enable clear */ - return env->cp15.c9_pmcnten; - case 3: /* overflow flag status */ - return env->cp15.c9_pmovsr; - case 4: /* software increment */ - case 5: /* event counter selection register */ - return 0; /* Unimplemented, RAZ/WI */ - default: - goto bad_reg; - } - case 13: /* Performance counters */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 1: /* Event type select */ - return env->cp15.c9_pmxevtyper; - case 0: /* Cycle count register */ - case 2: /* Event count register */ - /* Unimplemented, so RAZ/WI */ - return 0; - default: - goto bad_reg; - } - case 14: /* Performance monitor control */ - if (!arm_feature(env, ARM_FEATURE_V7)) { - goto bad_reg; - } - switch (op2) { - case 0: /* user enable */ - return env->cp15.c9_pmuserenr; - case 1: /* interrupt enable set */ - case 2: /* interrupt enable clear */ - return env->cp15.c9_pminten; - default: - goto bad_reg; - } - default: - goto bad_reg; - } - break; - case 10: /* MMU TLB lockdown. */ - /* ??? TLB lockdown not implemented. */ - return 0; - case 11: /* TCM DMA control. */ - case 12: /* Reserved. */ - goto bad_reg; - case 13: /* Process ID. */ - switch (op2) { - case 0: - return env->cp15.c13_fcse; - case 1: - return env->cp15.c13_context; - default: - goto bad_reg; - } - case 14: /* Generic timer */ - if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { - /* Dummy implementation: RAZ/WI for all */ - return 0; - } - goto bad_reg; - case 15: /* Implementation specific. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - if (op2 == 0 && crm == 1) - return env->cp15.c15_cpar; - - goto bad_reg; - } - if (arm_feature(env, ARM_FEATURE_OMAPCP)) { - switch (crm) { - case 0: - return 0; - case 1: /* Read TI925T configuration. */ - return env->cp15.c15_ticonfig; - case 2: /* Read I_max. */ - return env->cp15.c15_i_max; - case 3: /* Read I_min. */ - return env->cp15.c15_i_min; - case 4: /* Read thread-ID. */ - return env->cp15.c15_threadid; - case 8: /* TI925T_status */ - return 0; - } - /* TODO: Peripheral port remap register: - * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt - * controller base address at $rn & ~0xfff and map size of - * 0x200 << ($rn & 0xfff), when MMU is off. */ - goto bad_reg; - } - if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) { - switch (crm) { - case 0: - if ((op1 == 4) && (op2 == 0)) { - /* The config_base_address should hold the value of - * the peripheral base. ARM should get this from a CPU - * object property, but that support isn't available in - * December 2011. Default to 0 for now and board models - * that care can set it by a private hook */ - return env->cp15.c15_config_base_address; - } else if ((op1 == 0) && (op2 == 0)) { - /* power_control should be set to maximum latency. Again, - default to 0 and set by private hook */ - return env->cp15.c15_power_control; - } else if ((op1 == 0) && (op2 == 1)) { - return env->cp15.c15_diagnostic; - } else if ((op1 == 0) && (op2 == 2)) { - return env->cp15.c15_power_diagnostic; - } - break; - case 1: /* NEON Busy */ - return 0; - case 5: /* tlb lockdown */ - case 6: - case 7: - if ((op1 == 5) && (op2 == 2)) { - return 0; - } - break; - default: - break; - } - goto bad_reg; - } - return 0; - } -bad_reg: - /* ??? For debugging only. Should raise illegal instruction exception. */ - cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n", - (insn >> 16) & 0xf, crm, op1, op2); - return 0; -} - void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) { if ((env->uncached_cpsr & CPSR_M) == mode) { @@ -2024,20 +2289,6 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) } } -void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, - ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write, - void *opaque) -{ - if (cpnum < 0 || cpnum > 14) { - cpu_abort(env, "Bad coprocessor number: %i\n", cpnum); - return; - } - - env->cp[cpnum].cp_read = cp_read; - env->cp[cpnum].cp_write = cp_write; - env->cp[cpnum].opaque = opaque; -} - #endif /* Note that signed overflow is undefined in C. The following routines are @@ -2868,12 +3119,3 @@ float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) float_status *fpst = fpstp; return float64_muladd(a, b, c, 0, fpst); } - -void HELPER(set_teecr)(CPUARMState *env, uint32_t val) -{ - val &= 1; - if (env->teecr != val) { - env->teecr = val; - tb_flush(env); - } -} diff --git a/target-arm/helper.h b/target-arm/helper.h index 16dd5fcc89..21e9cfe05f 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -59,11 +59,10 @@ DEF_HELPER_0(cpsr_read, i32) DEF_HELPER_3(v7m_msr, void, env, i32, i32) DEF_HELPER_2(v7m_mrs, i32, env, i32) -DEF_HELPER_3(set_cp15, void, env, i32, i32) -DEF_HELPER_2(get_cp15, i32, env, i32) - -DEF_HELPER_3(set_cp, void, env, i32, i32) -DEF_HELPER_2(get_cp, i32, env, i32) +DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) +DEF_HELPER_2(get_cp_reg, i32, env, ptr) +DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) +DEF_HELPER_2(get_cp_reg64, i64, env, ptr) DEF_HELPER_2(get_r13_banked, i32, env, i32) DEF_HELPER_3(set_r13_banked, void, env, i32, i32) @@ -459,8 +458,6 @@ DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) -DEF_HELPER_2(set_teecr, void, env, i32) - DEF_HELPER_3(neon_unzip8, void, env, i32, i32) DEF_HELPER_3(neon_unzip16, void, env, i32, i32) DEF_HELPER_3(neon_qunzip8, void, env, i32, i32) diff --git a/target-arm/machine.c b/target-arm/machine.c index f66b8dfa1f..a2a75fbd19 100644 --- a/target-arm/machine.c +++ b/target-arm/machine.c @@ -21,7 +21,6 @@ void cpu_save(QEMUFile *f, void *opaque) qemu_put_be32(f, env->fiq_regs[i]); } qemu_put_be32(f, env->cp15.c0_cpuid); - qemu_put_be32(f, env->cp15.c0_cachetype); qemu_put_be32(f, env->cp15.c0_cssel); qemu_put_be32(f, env->cp15.c1_sys); qemu_put_be32(f, env->cp15.c1_coproc); @@ -139,7 +138,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id) env->fiq_regs[i] = qemu_get_be32(f); } env->cp15.c0_cpuid = qemu_get_be32(f); - env->cp15.c0_cachetype = qemu_get_be32(f); env->cp15.c0_cssel = qemu_get_be32(f); env->cp15.c1_sys = qemu_get_be32(f); env->cp15.c1_coproc = qemu_get_be32(f); diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index b53369d7cb..490111c22f 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -23,13 +23,11 @@ #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) -#if !defined(CONFIG_USER_ONLY) static void raise_exception(int tt) { env->exception_index = tt; cpu_loop_exit(env); } -#endif uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, uint32_t rn, uint32_t maxindex) @@ -287,6 +285,46 @@ void HELPER(set_user_reg)(uint32_t regno, uint32_t val) } } +void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) +{ + const ARMCPRegInfo *ri = rip; + int excp = ri->writefn(env, ri, value); + if (excp) { + raise_exception(excp); + } +} + +uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + uint64_t value; + int excp = ri->readfn(env, ri, &value); + if (excp) { + raise_exception(excp); + } + return value; +} + +void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) +{ + const ARMCPRegInfo *ri = rip; + int excp = ri->writefn(env, ri, value); + if (excp) { + raise_exception(excp); + } +} + +uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + uint64_t value; + int excp = ri->readfn(env, ri, &value); + if (excp) { + raise_exception(excp); + } + return value; +} + /* ??? Flag setting arithmetic is awkward because we need to do comparisons. The only way to do that in TCG is a conditional branch, which clobbers all our temporaries. For now implement these as helper functions. */ diff --git a/target-arm/translate.c b/target-arm/translate.c index 437d9dbf0e..a2a0ecddad 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -2439,226 +2439,6 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) return 1; } -/* Disassemble system coprocessor instruction. Return nonzero if - instruction is not defined. */ -static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ - TCGv tmp, tmp2; - uint32_t rd = (insn >> 12) & 0xf; - uint32_t cp = (insn >> 8) & 0xf; - if (IS_USER(s)) { - return 1; - } - - if (insn & ARM_CP_RW_BIT) { - if (!env->cp[cp].cp_read) - return 1; - gen_set_pc_im(s->pc); - tmp = tcg_temp_new_i32(); - tmp2 = tcg_const_i32(insn); - gen_helper_get_cp(tmp, cpu_env, tmp2); - tcg_temp_free(tmp2); - store_reg(s, rd, tmp); - } else { - if (!env->cp[cp].cp_write) - return 1; - gen_set_pc_im(s->pc); - tmp = load_reg(s, rd); - tmp2 = tcg_const_i32(insn); - gen_helper_set_cp(cpu_env, tmp2, tmp); - tcg_temp_free(tmp2); - tcg_temp_free_i32(tmp); - } - return 0; -} - -static int cp15_user_ok(CPUARMState *env, uint32_t insn) -{ - int cpn = (insn >> 16) & 0xf; - int cpm = insn & 0xf; - int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38); - - if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) { - /* Performance monitor registers fall into three categories: - * (a) always UNDEF in usermode - * (b) UNDEF only if PMUSERENR.EN is 0 - * (c) always read OK and UNDEF on write (PMUSERENR only) - */ - if ((cpm == 12 && (op < 6)) || - (cpm == 13 && (op < 3))) { - return env->cp15.c9_pmuserenr; - } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) { - /* PMUSERENR, read only */ - return 1; - } - return 0; - } - - if (cpn == 13 && cpm == 0) { - /* TLS register. */ - if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT))) - return 1; - } - return 0; -} - -static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd) -{ - TCGv tmp; - int cpn = (insn >> 16) & 0xf; - int cpm = insn & 0xf; - int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38); - - if (!arm_feature(env, ARM_FEATURE_V6K)) - return 0; - - if (!(cpn == 13 && cpm == 0)) - return 0; - - if (insn & ARM_CP_RW_BIT) { - switch (op) { - case 2: - tmp = load_cpu_field(cp15.c13_tls1); - break; - case 3: - tmp = load_cpu_field(cp15.c13_tls2); - break; - case 4: - tmp = load_cpu_field(cp15.c13_tls3); - break; - default: - return 0; - } - store_reg(s, rd, tmp); - - } else { - tmp = load_reg(s, rd); - switch (op) { - case 2: - store_cpu_field(tmp, cp15.c13_tls1); - break; - case 3: - store_cpu_field(tmp, cp15.c13_tls2); - break; - case 4: - store_cpu_field(tmp, cp15.c13_tls3); - break; - default: - tcg_temp_free_i32(tmp); - return 0; - } - } - return 1; -} - -/* Disassemble system coprocessor (cp15) instruction. Return nonzero if - instruction is not defined. */ -static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn) -{ - uint32_t rd; - TCGv tmp, tmp2; - - /* M profile cores use memory mapped registers instead of cp15. */ - if (arm_feature(env, ARM_FEATURE_M)) - return 1; - - if ((insn & (1 << 25)) == 0) { - if (insn & (1 << 20)) { - /* mrrc */ - return 1; - } - /* mcrr. Used for block cache operations, so implement as no-op. */ - return 0; - } - if ((insn & (1 << 4)) == 0) { - /* cdp */ - return 1; - } - /* We special case a number of cp15 instructions which were used - * for things which are real instructions in ARMv7. This allows - * them to work in linux-user mode which doesn't provide functional - * get_cp15/set_cp15 helpers, and is more efficient anyway. - */ - switch ((insn & 0x0fff0fff)) { - case 0x0e070f90: - /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores). - * In v7, this must NOP. - */ - if (IS_USER(s)) { - return 1; - } - if (!arm_feature(env, ARM_FEATURE_V7)) { - /* Wait for interrupt. */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_WFI; - } - return 0; - case 0x0e070f58: - /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI, - * so this is slightly over-broad. - */ - if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) { - /* Wait for interrupt. */ - gen_set_pc_im(s->pc); - s->is_jmp = DISAS_WFI; - return 0; - } - /* Otherwise continue to handle via helper function. - * In particular, on v7 and some v6 cores this is one of - * the VA-PA registers. - */ - break; - case 0x0e070f3d: - /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */ - if (arm_feature(env, ARM_FEATURE_V6)) { - return IS_USER(s) ? 1 : 0; - } - break; - case 0x0e070f95: /* 0,c7,c5,4 : ISB */ - case 0x0e070f9a: /* 0,c7,c10,4: DSB */ - case 0x0e070fba: /* 0,c7,c10,5: DMB */ - /* Barriers in both v6 and v7 */ - if (arm_feature(env, ARM_FEATURE_V6)) { - return 0; - } - break; - default: - break; - } - - if (IS_USER(s) && !cp15_user_ok(env, insn)) { - return 1; - } - - rd = (insn >> 12) & 0xf; - - if (cp15_tls_load_store(env, s, insn, rd)) - return 0; - - tmp2 = tcg_const_i32(insn); - if (insn & ARM_CP_RW_BIT) { - tmp = tcg_temp_new_i32(); - gen_helper_get_cp15(tmp, cpu_env, tmp2); - /* If the destination register is r15 then sets condition codes. */ - if (rd != 15) - store_reg(s, rd, tmp); - else - tcg_temp_free_i32(tmp); - } else { - tmp = load_reg(s, rd); - gen_helper_set_cp15(cpu_env, tmp2, tmp); - tcg_temp_free_i32(tmp); - /* Normally we would always end the TB here, but Linux - * arch/arm/mach-pxa/sleep.S expects two instructions following - * an MMU enable to execute from cache. Imitate this behaviour. */ - if (!arm_feature(env, ARM_FEATURE_XSCALE) || - (insn & 0x0fff0fff) != 0x0e010f10) - gen_lookup_tb(s); - } - tcg_temp_free_i32(tmp2); - return 0; -} - #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) #define VFP_SREG(insn, bigbit, smallbit) \ ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) @@ -6388,104 +6168,18 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins return 0; } -static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn) -{ - int crn = (insn >> 16) & 0xf; - int crm = insn & 0xf; - int op1 = (insn >> 21) & 7; - int op2 = (insn >> 5) & 7; - int rt = (insn >> 12) & 0xf; - TCGv tmp; - - /* Minimal set of debug registers, since we don't support debug */ - if (op1 == 0 && crn == 0 && op2 == 0) { - switch (crm) { - case 0: - /* DBGDIDR: just RAZ. In particular this means the - * "debug architecture version" bits will read as - * a reserved value, which should cause Linux to - * not try to use the debug hardware. - */ - tmp = tcg_const_i32(0); - store_reg(s, rt, tmp); - return 0; - case 1: - case 2: - /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we - * don't implement memory mapped debug components - */ - if (ENABLE_ARCH_7) { - tmp = tcg_const_i32(0); - store_reg(s, rt, tmp); - return 0; - } - break; - default: - break; - } - } - - if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { - if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) { - /* TEECR */ - if (IS_USER(s)) - return 1; - tmp = load_cpu_field(teecr); - store_reg(s, rt, tmp); - return 0; - } - if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) { - /* TEEHBR */ - if (IS_USER(s) && (env->teecr & 1)) - return 1; - tmp = load_cpu_field(teehbr); - store_reg(s, rt, tmp); - return 0; - } - } - return 1; -} - -static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn) -{ - int crn = (insn >> 16) & 0xf; - int crm = insn & 0xf; - int op1 = (insn >> 21) & 7; - int op2 = (insn >> 5) & 7; - int rt = (insn >> 12) & 0xf; - TCGv tmp; - - if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { - if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) { - /* TEECR */ - if (IS_USER(s)) - return 1; - tmp = load_reg(s, rt); - gen_helper_set_teecr(cpu_env, tmp); - tcg_temp_free_i32(tmp); - return 0; - } - if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) { - /* TEEHBR */ - if (IS_USER(s) && (env->teecr & 1)) - return 1; - tmp = load_reg(s, rt); - store_cpu_field(tmp, teehbr); - return 0; - } - } - return 1; -} - static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) { - int cpnum; + int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; + const ARMCPRegInfo *ri; + ARMCPU *cpu = arm_env_get_cpu(env); cpnum = (insn >> 8) & 0xf; if (arm_feature(env, ARM_FEATURE_XSCALE) && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum))) return 1; + /* First check for coprocessor space used for actual instructions */ switch (cpnum) { case 0: case 1: @@ -6498,22 +6192,154 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) case 10: case 11: return disas_vfp_insn (env, s, insn); - case 14: - /* Coprocessors 7-15 are architecturally reserved by ARM. - Unfortunately Intel decided to ignore this. */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) - goto board; - if (insn & (1 << 20)) - return disas_cp14_read(env, s, insn); - else - return disas_cp14_write(env, s, insn); - case 15: - return disas_cp15_insn (env, s, insn); default: - board: - /* Unknown coprocessor. See if the board has hooked it. */ - return disas_cp_insn (env, s, insn); + break; } + + /* Otherwise treat as a generic register access */ + is64 = (insn & (1 << 25)) == 0; + if (!is64 && ((insn & (1 << 4)) == 0)) { + /* cdp */ + return 1; + } + + crm = insn & 0xf; + if (is64) { + crn = 0; + opc1 = (insn >> 4) & 0xf; + opc2 = 0; + rt2 = (insn >> 16) & 0xf; + } else { + crn = (insn >> 16) & 0xf; + opc1 = (insn >> 21) & 7; + opc2 = (insn >> 5) & 7; + rt2 = 0; + } + isread = (insn >> 20) & 1; + rt = (insn >> 12) & 0xf; + + ri = get_arm_cp_reginfo(cpu, + ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2)); + if (ri) { + /* Check access permissions */ + if (!cp_access_ok(env, ri, isread)) { + return 1; + } + + /* Handle special cases first */ + switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + case ARM_CP_NOP: + return 0; + case ARM_CP_WFI: + if (isread) { + return 1; + } + gen_set_pc_im(s->pc); + s->is_jmp = DISAS_WFI; + break; + default: + break; + } + + if (isread) { + /* Read */ + if (is64) { + TCGv_i64 tmp64; + TCGv_i32 tmp; + if (ri->type & ARM_CP_CONST) { + tmp64 = tcg_const_i64(ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + gen_set_pc_im(s->pc); + tmp64 = tcg_temp_new_i64(); + tmpptr = tcg_const_ptr(ri); + gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr); + tcg_temp_free_ptr(tmpptr); + } else { + tmp64 = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset); + } + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, tmp64); + store_reg(s, rt, tmp); + tcg_gen_shri_i64(tmp64, tmp64, 32); + tcg_gen_trunc_i64_i32(tmp, tmp64); + store_reg(s, rt2, tmp); + } else { + TCGv tmp; + if (ri->type & ARM_CP_CONST) { + tmp = tcg_const_i32(ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + gen_set_pc_im(s->pc); + tmp = tcg_temp_new_i32(); + tmpptr = tcg_const_ptr(ri); + gen_helper_get_cp_reg(tmp, cpu_env, tmpptr); + tcg_temp_free_ptr(tmpptr); + } else { + tmp = load_cpu_offset(ri->fieldoffset); + } + if (rt == 15) { + /* Destination register of r15 for 32 bit loads sets + * the condition codes from the high 4 bits of the value + */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tmp); + } else { + store_reg(s, rt, tmp); + } + } + } else { + /* Write */ + if (ri->type & ARM_CP_CONST) { + /* If not forbidden by access permissions, treat as WI */ + return 0; + } + + if (is64) { + TCGv tmplo, tmphi; + TCGv_i64 tmp64 = tcg_temp_new_i64(); + tmplo = load_reg(s, rt); + tmphi = load_reg(s, rt2); + tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi); + tcg_temp_free_i32(tmplo); + tcg_temp_free_i32(tmphi); + if (ri->writefn) { + TCGv_ptr tmpptr = tcg_const_ptr(ri); + gen_set_pc_im(s->pc); + gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64); + tcg_temp_free_ptr(tmpptr); + } else { + tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); + } + tcg_temp_free_i64(tmp64); + } else { + if (ri->writefn) { + TCGv tmp; + TCGv_ptr tmpptr; + gen_set_pc_im(s->pc); + tmp = load_reg(s, rt); + tmpptr = tcg_const_ptr(ri); + gen_helper_set_cp_reg(cpu_env, tmpptr, tmp); + tcg_temp_free_ptr(tmpptr); + tcg_temp_free_i32(tmp); + } else { + TCGv tmp = load_reg(s, rt); + store_cpu_offset(tmp, ri->fieldoffset); + } + } + /* We default to ending the TB on a coprocessor register write, + * but allow this to be suppressed by the register definition + * (usually only necessary to work around guest bugs). + */ + if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) { + gen_lookup_tb(s); + } + } + return 0; + } + + return 1; } diff --git a/target-cris/Makefile.objs b/target-cris/Makefile.objs new file mode 100644 index 0000000000..4b09e8c6b5 --- /dev/null +++ b/target-cris/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += mmu.o machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-cris/cpu.h b/target-cris/cpu.h index a7603678fc..4f4df6d9b5 100644 --- a/target-cris/cpu.h +++ b/target-cris/cpu.h @@ -64,17 +64,19 @@ #define PR_NRP 12 #define PR_CCS 13 #define PR_USP 14 +#define PRV10_BRP 14 #define PR_SPC 15 /* CPU flags. */ #define Q_FLAG 0x80000000 -#define M_FLAG 0x40000000 +#define M_FLAG_V32 0x40000000 #define PFIX_FLAG 0x800 /* CRISv10 Only. */ #define F_FLAG_V10 0x400 #define P_FLAG_V10 0x200 #define S_FLAG 0x200 #define R_FLAG 0x100 #define P_FLAG 0x80 +#define M_FLAG_V10 0x80 #define U_FLAG 0x40 #define I_FLAG 0x20 #define X_FLAG 0x10 diff --git a/target-cris/helper.c b/target-cris/helper.c index 8680f436a0..bfbc29ec6a 100644 --- a/target-cris/helper.c +++ b/target-cris/helper.c @@ -121,14 +121,14 @@ static void do_interruptv10(CPUCRISState *env) /* These exceptions are genereated by the core itself. ERP should point to the insn following the brk. */ ex_vec = env->trap_vector; - env->pregs[PR_ERP] = env->pc; + env->pregs[PRV10_BRP] = env->pc; break; case EXCP_NMI: /* NMI is hardwired to vector zero. */ ex_vec = 0; - env->pregs[PR_CCS] &= ~M_FLAG; - env->pregs[PR_NRP] = env->pc; + env->pregs[PR_CCS] &= ~M_FLAG_V10; + env->pregs[PRV10_BRP] = env->pc; break; case EXCP_BUSFAULT: @@ -185,7 +185,7 @@ void do_interrupt(CPUCRISState *env) case EXCP_NMI: /* NMI is hardwired to vector zero. */ ex_vec = 0; - env->pregs[PR_CCS] &= ~M_FLAG; + env->pregs[PR_CCS] &= ~M_FLAG_V32; env->pregs[PR_NRP] = env->pc; break; diff --git a/target-cris/op_helper.c b/target-cris/op_helper.c index b92c106e10..ac7c98c8ed 100644 --- a/target-cris/op_helper.c +++ b/target-cris/op_helper.c @@ -247,8 +247,8 @@ void helper_rfn(void) if (!rflag) env->pregs[PR_CCS] |= P_FLAG; - /* Always set the M flag. */ - env->pregs[PR_CCS] |= M_FLAG; + /* Always set the M flag. */ + env->pregs[PR_CCS] |= M_FLAG_V32; } uint32_t helper_lz(uint32_t t0) diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c index 4ada3ed09f..3629629d9d 100644 --- a/target-cris/translate_v10.c +++ b/target-cris/translate_v10.c @@ -1132,6 +1132,7 @@ static unsigned int dec10_ind(DisasContext *dc) LOG_DIS("break %d\n", dc->src); cris_evaluate_flags(dc); tcg_gen_movi_tl(env_pc, dc->pc + 2); + t_gen_mov_env_TN(trap_vector, tcg_const_tl(dc->src + 2)); t_gen_raise_exception(EXCP_BREAK); dc->is_jmp = DISAS_UPDATE; return insn_len; diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs new file mode 100644 index 0000000000..683fd59af9 --- /dev/null +++ b/target-i386/Makefile.objs @@ -0,0 +1,16 @@ +obj-y += translate.o helper.o cpu.o +obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o +obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o +obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o +obj-$(CONFIG_KVM) += kvm.o hyperv.o +obj-$(CONFIG_LINUX_USER) += ioport-user.o +obj-$(CONFIG_BSD_USER) += ioport-user.o + +$(obj)/fpu_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/cc_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/int_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/svm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/smm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/misc_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/mem_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) +$(obj)/seg_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c index 135d855c4a..4240278edd 100644 --- a/target-i386/arch_dump.c +++ b/target-i386/arch_dump.c @@ -6,8 +6,8 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ @@ -415,7 +415,7 @@ int cpu_get_dump_info(ArchDumpInfo *info) return 0; } -size_t cpu_get_note_size(int class, int machine, int nr_cpus) +ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) { int name_size = 5; /* "CORE" or "QEMU" */ size_t elf_note_size = 0; diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c index bd50e1143a..8e5a56a3a8 100644 --- a/target-i386/arch_memory_mapping.c +++ b/target-i386/arch_memory_mapping.c @@ -6,13 +6,14 @@ * Authors: * Wen Congyang <wency@cn.fujitsu.com> * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ #include "cpu.h" #include "cpu-all.h" +#include "memory_mapping.h" /* PAE Paging or IA-32e Paging */ static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr, diff --git a/target-i386/cc_helper.c b/target-i386/cc_helper.c new file mode 100644 index 0000000000..ff654bc5ed --- /dev/null +++ b/target-i386/cc_helper.c @@ -0,0 +1,387 @@ +/* + * x86 condition code helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +const uint8_t parity_table[256] = { + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, +}; + +#define SHIFT 0 +#include "cc_helper_template.h" +#undef SHIFT + +#define SHIFT 1 +#include "cc_helper_template.h" +#undef SHIFT + +#define SHIFT 2 +#include "cc_helper_template.h" +#undef SHIFT + +#ifdef TARGET_X86_64 + +#define SHIFT 3 +#include "cc_helper_template.h" +#undef SHIFT + +#endif + +static int compute_all_eflags(void) +{ + return CC_SRC; +} + +static int compute_c_eflags(void) +{ + return CC_SRC & CC_C; +} + +uint32_t helper_cc_compute_all(int op) +{ + switch (op) { + default: /* should never happen */ + return 0; + + case CC_OP_EFLAGS: + return compute_all_eflags(); + + case CC_OP_MULB: + return compute_all_mulb(); + case CC_OP_MULW: + return compute_all_mulw(); + case CC_OP_MULL: + return compute_all_mull(); + + case CC_OP_ADDB: + return compute_all_addb(); + case CC_OP_ADDW: + return compute_all_addw(); + case CC_OP_ADDL: + return compute_all_addl(); + + case CC_OP_ADCB: + return compute_all_adcb(); + case CC_OP_ADCW: + return compute_all_adcw(); + case CC_OP_ADCL: + return compute_all_adcl(); + + case CC_OP_SUBB: + return compute_all_subb(); + case CC_OP_SUBW: + return compute_all_subw(); + case CC_OP_SUBL: + return compute_all_subl(); + + case CC_OP_SBBB: + return compute_all_sbbb(); + case CC_OP_SBBW: + return compute_all_sbbw(); + case CC_OP_SBBL: + return compute_all_sbbl(); + + case CC_OP_LOGICB: + return compute_all_logicb(); + case CC_OP_LOGICW: + return compute_all_logicw(); + case CC_OP_LOGICL: + return compute_all_logicl(); + + case CC_OP_INCB: + return compute_all_incb(); + case CC_OP_INCW: + return compute_all_incw(); + case CC_OP_INCL: + return compute_all_incl(); + + case CC_OP_DECB: + return compute_all_decb(); + case CC_OP_DECW: + return compute_all_decw(); + case CC_OP_DECL: + return compute_all_decl(); + + case CC_OP_SHLB: + return compute_all_shlb(); + case CC_OP_SHLW: + return compute_all_shlw(); + case CC_OP_SHLL: + return compute_all_shll(); + + case CC_OP_SARB: + return compute_all_sarb(); + case CC_OP_SARW: + return compute_all_sarw(); + case CC_OP_SARL: + return compute_all_sarl(); + +#ifdef TARGET_X86_64 + case CC_OP_MULQ: + return compute_all_mulq(); + + case CC_OP_ADDQ: + return compute_all_addq(); + + case CC_OP_ADCQ: + return compute_all_adcq(); + + case CC_OP_SUBQ: + return compute_all_subq(); + + case CC_OP_SBBQ: + return compute_all_sbbq(); + + case CC_OP_LOGICQ: + return compute_all_logicq(); + + case CC_OP_INCQ: + return compute_all_incq(); + + case CC_OP_DECQ: + return compute_all_decq(); + + case CC_OP_SHLQ: + return compute_all_shlq(); + + case CC_OP_SARQ: + return compute_all_sarq(); +#endif + } +} + +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op) +{ + CPUX86State *saved_env; + uint32_t ret; + + saved_env = env; + env = env1; + ret = helper_cc_compute_all(op); + env = saved_env; + return ret; +} + +uint32_t helper_cc_compute_c(int op) +{ + switch (op) { + default: /* should never happen */ + return 0; + + case CC_OP_EFLAGS: + return compute_c_eflags(); + + case CC_OP_MULB: + return compute_c_mull(); + case CC_OP_MULW: + return compute_c_mull(); + case CC_OP_MULL: + return compute_c_mull(); + + case CC_OP_ADDB: + return compute_c_addb(); + case CC_OP_ADDW: + return compute_c_addw(); + case CC_OP_ADDL: + return compute_c_addl(); + + case CC_OP_ADCB: + return compute_c_adcb(); + case CC_OP_ADCW: + return compute_c_adcw(); + case CC_OP_ADCL: + return compute_c_adcl(); + + case CC_OP_SUBB: + return compute_c_subb(); + case CC_OP_SUBW: + return compute_c_subw(); + case CC_OP_SUBL: + return compute_c_subl(); + + case CC_OP_SBBB: + return compute_c_sbbb(); + case CC_OP_SBBW: + return compute_c_sbbw(); + case CC_OP_SBBL: + return compute_c_sbbl(); + + case CC_OP_LOGICB: + return compute_c_logicb(); + case CC_OP_LOGICW: + return compute_c_logicw(); + case CC_OP_LOGICL: + return compute_c_logicl(); + + case CC_OP_INCB: + return compute_c_incl(); + case CC_OP_INCW: + return compute_c_incl(); + case CC_OP_INCL: + return compute_c_incl(); + + case CC_OP_DECB: + return compute_c_incl(); + case CC_OP_DECW: + return compute_c_incl(); + case CC_OP_DECL: + return compute_c_incl(); + + case CC_OP_SHLB: + return compute_c_shlb(); + case CC_OP_SHLW: + return compute_c_shlw(); + case CC_OP_SHLL: + return compute_c_shll(); + + case CC_OP_SARB: + return compute_c_sarl(); + case CC_OP_SARW: + return compute_c_sarl(); + case CC_OP_SARL: + return compute_c_sarl(); + +#ifdef TARGET_X86_64 + case CC_OP_MULQ: + return compute_c_mull(); + + case CC_OP_ADDQ: + return compute_c_addq(); + + case CC_OP_ADCQ: + return compute_c_adcq(); + + case CC_OP_SUBQ: + return compute_c_subq(); + + case CC_OP_SBBQ: + return compute_c_sbbq(); + + case CC_OP_LOGICQ: + return compute_c_logicq(); + + case CC_OP_INCQ: + return compute_c_incl(); + + case CC_OP_DECQ: + return compute_c_incl(); + + case CC_OP_SHLQ: + return compute_c_shlq(); + + case CC_OP_SARQ: + return compute_c_sarl(); +#endif + } +} + +void helper_write_eflags(target_ulong t0, uint32_t update_mask) +{ + cpu_load_eflags(env, t0, update_mask); +} + +target_ulong helper_read_eflags(void) +{ + uint32_t eflags; + + eflags = helper_cc_compute_all(CC_OP); + eflags |= (DF & DF_MASK); + eflags |= env->eflags & ~(VM_MASK | RF_MASK); + return eflags; +} + +void helper_clts(void) +{ + env->cr[0] &= ~CR0_TS_MASK; + env->hflags &= ~HF_TS_MASK; +} + +void helper_reset_rf(void) +{ + env->eflags &= ~RF_MASK; +} + +void helper_cli(void) +{ + env->eflags &= ~IF_MASK; +} + +void helper_sti(void) +{ + env->eflags |= IF_MASK; +} + +#if 0 +/* vm86plus instructions */ +void helper_cli_vm(void) +{ + env->eflags &= ~VIF_MASK; +} + +void helper_sti_vm(void) +{ + env->eflags |= VIF_MASK; + if (env->eflags & VIP_MASK) { + raise_exception(env, EXCP0D_GPF); + } +} +#endif + +void helper_set_inhibit_irq(void) +{ + env->hflags |= HF_INHIBIT_IRQ_MASK; +} + +void helper_reset_inhibit_irq(void) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; +} diff --git a/target-i386/helper_template.h b/target-i386/cc_helper_template.h index afc41fb980..ff22830f6b 100644 --- a/target-i386/helper_template.h +++ b/target-i386/cc_helper_template.h @@ -1,5 +1,5 @@ /* - * i386 helpers + * x86 condition code helpers * * Copyright (c) 2008 Fabrice Bellard * @@ -16,34 +16,25 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ + #define DATA_BITS (1 << (3 + SHIFT)) -#define SHIFT_MASK (DATA_BITS - 1) #define SIGN_MASK (((target_ulong)1) << (DATA_BITS - 1)) -#if DATA_BITS <= 32 -#define SHIFT1_MASK 0x1f -#else -#define SHIFT1_MASK 0x3f -#endif #if DATA_BITS == 8 #define SUFFIX b #define DATA_TYPE uint8_t -#define DATA_STYPE int8_t #define DATA_MASK 0xff #elif DATA_BITS == 16 #define SUFFIX w #define DATA_TYPE uint16_t -#define DATA_STYPE int16_t #define DATA_MASK 0xffff #elif DATA_BITS == 32 #define SUFFIX l #define DATA_TYPE uint32_t -#define DATA_STYPE int32_t #define DATA_MASK 0xffffffff #elif DATA_BITS == 64 #define SUFFIX q #define DATA_TYPE uint64_t -#define DATA_STYPE int64_t #define DATA_MASK 0xffffffffffffffffULL #else #error unhandled operand size @@ -55,6 +46,7 @@ static int glue(compute_all_add, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_SRC; src2 = CC_DST - CC_SRC; cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; @@ -70,6 +62,7 @@ static int glue(compute_c_add, SUFFIX)(void) { int cf; target_long src1; + src1 = CC_SRC; cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; return cf; @@ -79,6 +72,7 @@ static int glue(compute_all_adc, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_SRC; src2 = CC_DST - CC_SRC - 1; cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; @@ -94,6 +88,7 @@ static int glue(compute_c_adc, SUFFIX)(void) { int cf; target_long src1; + src1 = CC_SRC; cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; return cf; @@ -103,6 +98,7 @@ static int glue(compute_all_sub, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_DST + CC_SRC; src2 = CC_SRC; cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; @@ -118,6 +114,7 @@ static int glue(compute_c_sub, SUFFIX)(void) { int cf; target_long src1, src2; + src1 = CC_DST + CC_SRC; src2 = CC_SRC; cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; @@ -128,6 +125,7 @@ static int glue(compute_all_sbb, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_DST + CC_SRC + 1; src2 = CC_SRC; cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; @@ -143,6 +141,7 @@ static int glue(compute_c_sbb, SUFFIX)(void) { int cf; target_long src1, src2; + src1 = CC_DST + CC_SRC + 1; src2 = CC_SRC; cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; @@ -152,6 +151,7 @@ static int glue(compute_c_sbb, SUFFIX)(void) static int glue(compute_all_logic, SUFFIX)(void) { int cf, pf, af, zf, sf, of; + cf = 0; pf = parity_table[(uint8_t)CC_DST]; af = 0; @@ -170,6 +170,7 @@ static int glue(compute_all_inc, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_DST - 1; src2 = 1; cf = CC_SRC; @@ -192,6 +193,7 @@ static int glue(compute_all_dec, SUFFIX)(void) { int cf, pf, af, zf, sf, of; target_long src1, src2; + src1 = CC_DST + 1; src2 = 1; cf = CC_SRC; @@ -206,6 +208,7 @@ static int glue(compute_all_dec, SUFFIX)(void) static int glue(compute_all_shl, SUFFIX)(void) { int cf, pf, af, zf, sf, of; + cf = (CC_SRC >> (DATA_BITS - 1)) & CC_C; pf = parity_table[(uint8_t)CC_DST]; af = 0; /* undefined */ @@ -231,6 +234,7 @@ static int glue(compute_c_sar, SUFFIX)(void) static int glue(compute_all_sar, SUFFIX)(void) { int cf, pf, af, zf, sf, of; + cf = CC_SRC & 1; pf = parity_table[(uint8_t)CC_DST]; af = 0; /* undefined */ @@ -245,6 +249,7 @@ static int glue(compute_all_sar, SUFFIX)(void) static int glue(compute_c_mul, SUFFIX)(void) { int cf; + cf = (CC_SRC != 0); return cf; } @@ -255,6 +260,7 @@ static int glue(compute_c_mul, SUFFIX)(void) static int glue(compute_all_mul, SUFFIX)(void) { int cf, pf, af, zf, sf, of; + cf = (CC_SRC != 0); pf = parity_table[(uint8_t)CC_DST]; af = 0; /* undefined */ @@ -264,71 +270,8 @@ static int glue(compute_all_mul, SUFFIX)(void) return cf | pf | af | zf | sf | of; } -/* shifts */ - -target_ulong glue(helper_rcl, SUFFIX)(target_ulong t0, target_ulong t1) -{ - int count, eflags; - target_ulong src; - target_long res; - - count = t1 & SHIFT1_MASK; -#if DATA_BITS == 16 - count = rclw_table[count]; -#elif DATA_BITS == 8 - count = rclb_table[count]; -#endif - if (count) { - eflags = helper_cc_compute_all(CC_OP); - t0 &= DATA_MASK; - src = t0; - res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1)); - if (count > 1) - res |= t0 >> (DATA_BITS + 1 - count); - t0 = res; - env->cc_tmp = (eflags & ~(CC_C | CC_O)) | - (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | - ((src >> (DATA_BITS - count)) & CC_C); - } else { - env->cc_tmp = -1; - } - return t0; -} - -target_ulong glue(helper_rcr, SUFFIX)(target_ulong t0, target_ulong t1) -{ - int count, eflags; - target_ulong src; - target_long res; - - count = t1 & SHIFT1_MASK; -#if DATA_BITS == 16 - count = rclw_table[count]; -#elif DATA_BITS == 8 - count = rclb_table[count]; -#endif - if (count) { - eflags = helper_cc_compute_all(CC_OP); - t0 &= DATA_MASK; - src = t0; - res = (t0 >> count) | ((target_ulong)(eflags & CC_C) << (DATA_BITS - count)); - if (count > 1) - res |= t0 << (DATA_BITS + 1 - count); - t0 = res; - env->cc_tmp = (eflags & ~(CC_C | CC_O)) | - (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | - ((src >> (count - 1)) & CC_C); - } else { - env->cc_tmp = -1; - } - return t0; -} - #undef DATA_BITS -#undef SHIFT_MASK -#undef SHIFT1_MASK #undef SIGN_MASK #undef DATA_TYPE -#undef DATA_STYPE #undef DATA_MASK #undef SUFFIX diff --git a/target-i386/cpu.c b/target-i386/cpu.c index 388bc5c527..5521709240 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -305,7 +305,6 @@ static x86_def_t builtin_x86_defs[] = { .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, .xlevel = 0x8000000A, - .model_id = "QEMU Virtual CPU version " QEMU_VERSION, }, { .name = "phenom", @@ -388,7 +387,6 @@ static x86_def_t builtin_x86_defs[] = { .features = PPRO_FEATURES, .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT, .xlevel = 0x80000004, - .model_id = "QEMU Virtual CPU version " QEMU_VERSION, }, { .name = "kvm32", @@ -467,8 +465,6 @@ static x86_def_t builtin_x86_defs[] = { .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA, .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, .xlevel = 0x80000008, - /* XXX: put another string ? */ - .model_id = "QEMU Virtual CPU version " QEMU_VERSION, }, { .name = "n270", @@ -723,66 +719,32 @@ static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { X86CPU *cpu = X86_CPU(obj); - int64_t value; - value = cpu->env.cpuid_level; - /* TODO Use visit_type_uint32() once available */ - visit_type_int(v, &value, name, errp); + visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); } static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { X86CPU *cpu = X86_CPU(obj); - const int64_t min = 0; - const int64_t max = UINT32_MAX; - int64_t value; - /* TODO Use visit_type_uint32() once available */ - visit_type_int(v, &value, name, errp); - if (error_is_set(errp)) { - return; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return; - } - - cpu->env.cpuid_level = value; + visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); } static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { X86CPU *cpu = X86_CPU(obj); - int64_t value; - value = cpu->env.cpuid_xlevel; - /* TODO Use visit_type_uint32() once available */ - visit_type_int(v, &value, name, errp); + visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); } static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { X86CPU *cpu = X86_CPU(obj); - const int64_t min = 0; - const int64_t max = UINT32_MAX; - int64_t value; - /* TODO Use visit_type_uint32() once available */ - visit_type_int(v, &value, name, errp); - if (error_is_set(errp)) { - return; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return; - } - - cpu->env.cpuid_xlevel = value; + visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); } static char *x86_cpuid_get_vendor(Object *obj, Error **errp) @@ -1333,11 +1295,23 @@ void cpu_clear_apic_feature(CPUX86State *env) */ void x86_cpudef_setup(void) { - int i; + int i, j; + static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" }; for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) { builtin_x86_defs[i].next = x86_defs; builtin_x86_defs[i].flags = 1; + + /* Look for specific "cpudef" models that */ + /* have the QEMU version in .model_id */ + for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) { + if (strcmp(model_with_versions[j], builtin_x86_defs[i].name) == 0) { + pstrcpy(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), "QEMU Virtual CPU version "); + pstrcat(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), qemu_get_version()); + break; + } + } + x86_defs = &builtin_x86_defs[i]; } #if !defined(CONFIG_USER_ONLY) diff --git a/target-i386/cpu.h b/target-i386/cpu.h index bcf663eeb5..f257c972fb 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -1010,6 +1010,16 @@ static inline int cpu_mmu_index (CPUX86State *env) #define CC_DST (env->cc_dst) #define CC_OP (env->cc_op) +/* n must be a constant to be efficient */ +static inline target_long lshift(target_long x, int n) +{ + if (n >= 0) { + return x << n; + } else { + return x >> (-n); + } +} + /* float macros */ #define FT0 (env->ft0) #define ST0 (env->fpregs[env->fpstt].d) @@ -1071,19 +1081,57 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags); +/* excp_helper.c */ +void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); +void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, + int error_code); +void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, + int error_code, int next_eip_addend); + +/* cc_helper.c */ +extern const uint8_t parity_table[256]; +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); + +static inline uint32_t cpu_compute_eflags(CPUX86State *env) +{ + return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK); +} + +/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ +static inline void cpu_load_eflags(CPUX86State *env, int eflags, + int update_mask) +{ + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + DF = 1 - (2 * ((eflags >> 10) & 1)); + env->eflags = (env->eflags & ~update_mask) | + (eflags & update_mask) | 0x2; +} + +/* load efer and update the corresponding hflags. XXX: do consistency + checks with cpuid bits? */ +static inline void cpu_load_efer(CPUX86State *env, uint64_t val) +{ + env->efer = val; + env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); + if (env->efer & MSR_EFER_LMA) { + env->hflags |= HF_LMA_MASK; + } + if (env->efer & MSR_EFER_SVME) { + env->hflags |= HF_SVME_MASK; + } +} + +/* svm_helper.c */ +void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, + uint64_t param); +void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1); + /* op_helper.c */ void do_interrupt(CPUX86State *env); void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); -void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv); -void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index, - int error_code); void do_smm_enter(CPUX86State *env1); -void svm_check_intercept(CPUX86State *env1, uint32_t type); - -uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); - void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); #endif /* CPU_I386_H */ diff --git a/target-i386/excp_helper.c b/target-i386/excp_helper.c new file mode 100644 index 0000000000..aaa5ca2090 --- /dev/null +++ b/target-i386/excp_helper.c @@ -0,0 +1,129 @@ +/* + * x86 exception helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "qemu-log.h" +#include "sysemu.h" +#include "helper.h" + +#if 0 +#define raise_exception_err(env, a, b) \ + do { \ + qemu_log("raise_exception line=%d\n", __LINE__); \ + (raise_exception_err)(env, a, b); \ + } while (0) +#endif + +void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) +{ + raise_interrupt(env, intno, 1, 0, next_eip_addend); +} + +void helper_raise_exception(CPUX86State *env, int exception_index) +{ + raise_exception(env, exception_index); +} + +/* + * Check nested exceptions and change to double or triple fault if + * needed. It should only be called, if this is not an interrupt. + * Returns the new exception number. + */ +static int check_exception(CPUX86State *env, int intno, int *error_code) +{ + int first_contributory = env->old_exception == 0 || + (env->old_exception >= 10 && + env->old_exception <= 13); + int second_contributory = intno == 0 || + (intno >= 10 && intno <= 13); + + qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", + env->old_exception, intno); + +#if !defined(CONFIG_USER_ONLY) + if (env->old_exception == EXCP08_DBLE) { + if (env->hflags & HF_SVMI_MASK) { + cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */ + } + + qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); + + qemu_system_reset_request(); + return EXCP_HLT; + } +#endif + + if ((first_contributory && second_contributory) + || (env->old_exception == EXCP0E_PAGE && + (second_contributory || (intno == EXCP0E_PAGE)))) { + intno = EXCP08_DBLE; + *error_code = 0; + } + + if (second_contributory || (intno == EXCP0E_PAGE) || + (intno == EXCP08_DBLE)) { + env->old_exception = intno; + } + + return intno; +} + +/* + * Signal an interruption. It is executed in the main CPU loop. + * is_int is TRUE if coming from the int instruction. next_eip is the + * EIP value AFTER the interrupt instruction. It is only relevant if + * is_int is TRUE. + */ +static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, + int is_int, int error_code, + int next_eip_addend) +{ + if (!is_int) { + cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, + error_code); + intno = check_exception(env, intno, &error_code); + } else { + cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0); + } + + env->exception_index = intno; + env->error_code = error_code; + env->exception_is_int = is_int; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(env); +} + +/* shortcuts to generate exceptions */ + +void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, + int error_code, int next_eip_addend) +{ + raise_interrupt2(env, intno, is_int, error_code, next_eip_addend); +} + +void raise_exception_err(CPUX86State *env, int exception_index, + int error_code) +{ + raise_interrupt2(env, exception_index, 0, error_code, 0); +} + +void raise_exception(CPUX86State *env, int exception_index) +{ + raise_interrupt2(env, exception_index, 0, 0, 0); +} diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c new file mode 100644 index 0000000000..6065c2e72d --- /dev/null +++ b/target-i386/fpu_helper.c @@ -0,0 +1,1304 @@ +/* + * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <math.h> +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif /* !defined(CONFIG_USER_ONLY) */ + +#define FPU_RC_MASK 0xc00 +#define FPU_RC_NEAR 0x000 +#define FPU_RC_DOWN 0x400 +#define FPU_RC_UP 0x800 +#define FPU_RC_CHOP 0xc00 + +#define MAXTAN 9223372036854775808.0 + +/* the following deal with x86 long double-precision numbers */ +#define MAXEXPD 0x7fff +#define EXPBIAS 16383 +#define EXPD(fp) (fp.l.upper & 0x7fff) +#define SIGND(fp) ((fp.l.upper) & 0x8000) +#define MANTD(fp) (fp.l.lower) +#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS + +#define FPUS_IE (1 << 0) +#define FPUS_DE (1 << 1) +#define FPUS_ZE (1 << 2) +#define FPUS_OE (1 << 3) +#define FPUS_UE (1 << 4) +#define FPUS_PE (1 << 5) +#define FPUS_SF (1 << 6) +#define FPUS_SE (1 << 7) +#define FPUS_B (1 << 15) + +#define FPUC_EM 0x3f + +#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL) +#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL) +#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL) + +static inline void fpush(void) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fptags[env->fpstt] = 0; /* validate stack entry */ +} + +static inline void fpop(void) +{ + env->fptags[env->fpstt] = 1; /* invalidate stack entry */ + env->fpstt = (env->fpstt + 1) & 7; +} + +static inline floatx80 helper_fldt(target_ulong ptr) +{ + CPU_LDoubleU temp; + + temp.l.lower = ldq(ptr); + temp.l.upper = lduw(ptr + 8); + return temp.d; +} + +static inline void helper_fstt(floatx80 f, target_ulong ptr) +{ + CPU_LDoubleU temp; + + temp.d = f; + stq(ptr, temp.l.lower); + stw(ptr + 8, temp.l.upper); +} + +/* x87 FPU helpers */ + +static inline double floatx80_to_double(floatx80 a) +{ + union { + float64 f64; + double d; + } u; + + u.f64 = floatx80_to_float64(a, &env->fp_status); + return u.d; +} + +static inline floatx80 double_to_floatx80(double a) +{ + union { + float64 f64; + double d; + } u; + + u.d = a; + return float64_to_floatx80(u.f64, &env->fp_status); +} + +static void fpu_set_exception(int mask) +{ + env->fpus |= mask; + if (env->fpus & (~env->fpuc & FPUC_EM)) { + env->fpus |= FPUS_SE | FPUS_B; + } +} + +static inline floatx80 helper_fdiv(floatx80 a, floatx80 b) +{ + if (floatx80_is_zero(b)) { + fpu_set_exception(FPUS_ZE); + } + return floatx80_div(a, b, &env->fp_status); +} + +static void fpu_raise_exception(void) +{ + if (env->cr[0] & CR0_NE_MASK) { + raise_exception(env, EXCP10_COPR); + } +#if !defined(CONFIG_USER_ONLY) + else { + cpu_set_ferr(env); + } +#endif +} + +void helper_flds_FT0(uint32_t val) +{ + union { + float32 f; + uint32_t i; + } u; + + u.i = val; + FT0 = float32_to_floatx80(u.f, &env->fp_status); +} + +void helper_fldl_FT0(uint64_t val) +{ + union { + float64 f; + uint64_t i; + } u; + + u.i = val; + FT0 = float64_to_floatx80(u.f, &env->fp_status); +} + +void helper_fildl_FT0(int32_t val) +{ + FT0 = int32_to_floatx80(val, &env->fp_status); +} + +void helper_flds_ST0(uint32_t val) +{ + int new_fpstt; + union { + float32 f; + uint32_t i; + } u; + + new_fpstt = (env->fpstt - 1) & 7; + u.i = val; + env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fldl_ST0(uint64_t val) +{ + int new_fpstt; + union { + float64 f; + uint64_t i; + } u; + + new_fpstt = (env->fpstt - 1) & 7; + u.i = val; + env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildl_ST0(int32_t val) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildll_ST0(int64_t val) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +uint32_t helper_fsts_ST0(void) +{ + union { + float32 f; + uint32_t i; + } u; + + u.f = floatx80_to_float32(ST0, &env->fp_status); + return u.i; +} + +uint64_t helper_fstl_ST0(void) +{ + union { + float64 f; + uint64_t i; + } u; + + u.f = floatx80_to_float64(ST0, &env->fp_status); + return u.i; +} + +int32_t helper_fist_ST0(void) +{ + int32_t val; + + val = floatx80_to_int32(ST0, &env->fp_status); + if (val != (int16_t)val) { + val = -32768; + } + return val; +} + +int32_t helper_fistl_ST0(void) +{ + int32_t val; + + val = floatx80_to_int32(ST0, &env->fp_status); + return val; +} + +int64_t helper_fistll_ST0(void) +{ + int64_t val; + + val = floatx80_to_int64(ST0, &env->fp_status); + return val; +} + +int32_t helper_fistt_ST0(void) +{ + int32_t val; + + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); + if (val != (int16_t)val) { + val = -32768; + } + return val; +} + +int32_t helper_fisttl_ST0(void) +{ + int32_t val; + + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); + return val; +} + +int64_t helper_fisttll_ST0(void) +{ + int64_t val; + + val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); + return val; +} + +void helper_fldt_ST0(target_ulong ptr) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = helper_fldt(ptr); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fstt_ST0(target_ulong ptr) +{ + helper_fstt(ST0, ptr); +} + +void helper_fpush(void) +{ + fpush(); +} + +void helper_fpop(void) +{ + fpop(); +} + +void helper_fdecstp(void) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fpus &= ~0x4700; +} + +void helper_fincstp(void) +{ + env->fpstt = (env->fpstt + 1) & 7; + env->fpus &= ~0x4700; +} + +/* FPU move */ + +void helper_ffree_STN(int st_index) +{ + env->fptags[(env->fpstt + st_index) & 7] = 1; +} + +void helper_fmov_ST0_FT0(void) +{ + ST0 = FT0; +} + +void helper_fmov_FT0_STN(int st_index) +{ + FT0 = ST(st_index); +} + +void helper_fmov_ST0_STN(int st_index) +{ + ST0 = ST(st_index); +} + +void helper_fmov_STN_ST0(int st_index) +{ + ST(st_index) = ST0; +} + +void helper_fxchg_ST0_STN(int st_index) +{ + floatx80 tmp; + + tmp = ST(st_index); + ST(st_index) = ST0; + ST0 = tmp; +} + +/* FPU operations */ + +static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; + +void helper_fcom_ST0_FT0(void) +{ + int ret; + + ret = floatx80_compare(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; +} + +void helper_fucom_ST0_FT0(void) +{ + int ret; + + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; +} + +static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; + +void helper_fcomi_ST0_FT0(void) +{ + int eflags; + int ret; + + ret = floatx80_compare(ST0, FT0, &env->fp_status); + eflags = helper_cc_compute_all(CC_OP); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; +} + +void helper_fucomi_ST0_FT0(void) +{ + int eflags; + int ret; + + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); + eflags = helper_cc_compute_all(CC_OP); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; +} + +void helper_fadd_ST0_FT0(void) +{ + ST0 = floatx80_add(ST0, FT0, &env->fp_status); +} + +void helper_fmul_ST0_FT0(void) +{ + ST0 = floatx80_mul(ST0, FT0, &env->fp_status); +} + +void helper_fsub_ST0_FT0(void) +{ + ST0 = floatx80_sub(ST0, FT0, &env->fp_status); +} + +void helper_fsubr_ST0_FT0(void) +{ + ST0 = floatx80_sub(FT0, ST0, &env->fp_status); +} + +void helper_fdiv_ST0_FT0(void) +{ + ST0 = helper_fdiv(ST0, FT0); +} + +void helper_fdivr_ST0_FT0(void) +{ + ST0 = helper_fdiv(FT0, ST0); +} + +/* fp operations between STN and ST0 */ + +void helper_fadd_STN_ST0(int st_index) +{ + ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); +} + +void helper_fmul_STN_ST0(int st_index) +{ + ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); +} + +void helper_fsub_STN_ST0(int st_index) +{ + ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); +} + +void helper_fsubr_STN_ST0(int st_index) +{ + ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); +} + +void helper_fdiv_STN_ST0(int st_index) +{ + floatx80 *p; + + p = &ST(st_index); + *p = helper_fdiv(*p, ST0); +} + +void helper_fdivr_STN_ST0(int st_index) +{ + floatx80 *p; + + p = &ST(st_index); + *p = helper_fdiv(ST0, *p); +} + +/* misc FPU operations */ +void helper_fchs_ST0(void) +{ + ST0 = floatx80_chs(ST0); +} + +void helper_fabs_ST0(void) +{ + ST0 = floatx80_abs(ST0); +} + +void helper_fld1_ST0(void) +{ + ST0 = floatx80_one; +} + +void helper_fldl2t_ST0(void) +{ + ST0 = floatx80_l2t; +} + +void helper_fldl2e_ST0(void) +{ + ST0 = floatx80_l2e; +} + +void helper_fldpi_ST0(void) +{ + ST0 = floatx80_pi; +} + +void helper_fldlg2_ST0(void) +{ + ST0 = floatx80_lg2; +} + +void helper_fldln2_ST0(void) +{ + ST0 = floatx80_ln2; +} + +void helper_fldz_ST0(void) +{ + ST0 = floatx80_zero; +} + +void helper_fldz_FT0(void) +{ + FT0 = floatx80_zero; +} + +uint32_t helper_fnstsw(void) +{ + return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; +} + +uint32_t helper_fnstcw(void) +{ + return env->fpuc; +} + +static void update_fp_status(void) +{ + int rnd_type; + + /* set rounding mode */ + switch (env->fpuc & FPU_RC_MASK) { + default: + case FPU_RC_NEAR: + rnd_type = float_round_nearest_even; + break; + case FPU_RC_DOWN: + rnd_type = float_round_down; + break; + case FPU_RC_UP: + rnd_type = float_round_up; + break; + case FPU_RC_CHOP: + rnd_type = float_round_to_zero; + break; + } + set_float_rounding_mode(rnd_type, &env->fp_status); + switch ((env->fpuc >> 8) & 3) { + case 0: + rnd_type = 32; + break; + case 2: + rnd_type = 64; + break; + case 3: + default: + rnd_type = 80; + break; + } + set_floatx80_rounding_precision(rnd_type, &env->fp_status); +} + +void helper_fldcw(uint32_t val) +{ + env->fpuc = val; + update_fp_status(); +} + +void helper_fclex(void) +{ + env->fpus &= 0x7f00; +} + +void helper_fwait(void) +{ + if (env->fpus & FPUS_SE) { + fpu_raise_exception(); + } +} + +void helper_fninit(void) +{ + env->fpus = 0; + env->fpstt = 0; + env->fpuc = 0x37f; + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +/* BCD ops */ + +void helper_fbld_ST0(target_ulong ptr) +{ + floatx80 tmp; + uint64_t val; + unsigned int v; + int i; + + val = 0; + for (i = 8; i >= 0; i--) { + v = ldub(ptr + i); + val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); + } + tmp = int64_to_floatx80(val, &env->fp_status); + if (ldub(ptr + 9) & 0x80) { + floatx80_chs(tmp); + } + fpush(); + ST0 = tmp; +} + +void helper_fbst_ST0(target_ulong ptr) +{ + int v; + target_ulong mem_ref, mem_end; + int64_t val; + + val = floatx80_to_int64(ST0, &env->fp_status); + mem_ref = ptr; + mem_end = mem_ref + 9; + if (val < 0) { + stb(mem_end, 0x80); + val = -val; + } else { + stb(mem_end, 0x00); + } + while (mem_ref < mem_end) { + if (val == 0) { + break; + } + v = val % 100; + val = val / 100; + v = ((v / 10) << 4) | (v % 10); + stb(mem_ref++, v); + } + while (mem_ref < mem_end) { + stb(mem_ref++, 0); + } +} + +void helper_f2xm1(void) +{ + double val = floatx80_to_double(ST0); + + val = pow(2.0, val) - 1.0; + ST0 = double_to_floatx80(val); +} + +void helper_fyl2x(void) +{ + double fptemp = floatx80_to_double(ST0); + + if (fptemp > 0.0) { + fptemp = log(fptemp) / log(2.0); /* log2(ST) */ + fptemp *= floatx80_to_double(ST1); + ST1 = double_to_floatx80(fptemp); + fpop(); + } else { + env->fpus &= ~0x4700; + env->fpus |= 0x400; + } +} + +void helper_fptan(void) +{ + double fptemp = floatx80_to_double(ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + fptemp = tan(fptemp); + ST0 = double_to_floatx80(fptemp); + fpush(); + ST0 = floatx80_one; + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**52 only */ + } +} + +void helper_fpatan(void) +{ + double fptemp, fpsrcop; + + fpsrcop = floatx80_to_double(ST1); + fptemp = floatx80_to_double(ST0); + ST1 = double_to_floatx80(atan2(fpsrcop, fptemp)); + fpop(); +} + +void helper_fxtract(void) +{ + CPU_LDoubleU temp; + + temp.d = ST0; + + if (floatx80_is_zero(ST0)) { + /* Easy way to generate -inf and raising division by 0 exception */ + ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, + &env->fp_status); + fpush(); + ST0 = temp.d; + } else { + int expdif; + + expdif = EXPD(temp) - EXPBIAS; + /* DP exponent bias */ + ST0 = int32_to_floatx80(expdif, &env->fp_status); + fpush(); + BIASEXPONENT(temp); + ST0 = temp.d; + } +} + +void helper_fprem1(void) +{ + double st0, st1, dblq, fpsrcop, fptemp; + CPU_LDoubleU fpsrcop1, fptemp1; + int expdif; + signed long long int q; + + st0 = floatx80_to_double(ST0); + st1 = floatx80_to_double(ST1); + + if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { + ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + return; + } + + fpsrcop = st0; + fptemp = st1; + fpsrcop1.d = ST0; + fptemp1.d = ST1; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + + if (expdif < 0) { + /* optimisation? taken from the AMD docs */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* ST0 is unchanged */ + return; + } + + if (expdif < 53) { + dblq = fpsrcop / fptemp; + /* round dblq towards nearest integer */ + dblq = rint(dblq); + st0 = fpsrcop - fptemp * dblq; + + /* convert dblq to q by truncating towards zero */ + if (dblq < 0.0) { + q = (signed long long int)(-dblq); + } else { + q = (signed long long int)dblq; + } + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C3,C1) <-- (q2,q1,q0) */ + env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ + env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ + env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ + } else { + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, expdif - 50); + fpsrcop = (st0 / st1) / fptemp; + /* fpsrcop = integer obtained by chopping */ + fpsrcop = (fpsrcop < 0.0) ? + -(floor(fabs(fpsrcop))) : floor(fpsrcop); + st0 -= (st1 * fpsrcop * fptemp); + } + ST0 = double_to_floatx80(st0); +} + +void helper_fprem(void) +{ + double st0, st1, dblq, fpsrcop, fptemp; + CPU_LDoubleU fpsrcop1, fptemp1; + int expdif; + signed long long int q; + + st0 = floatx80_to_double(ST0); + st1 = floatx80_to_double(ST1); + + if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { + ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + return; + } + + fpsrcop = st0; + fptemp = st1; + fpsrcop1.d = ST0; + fptemp1.d = ST1; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + + if (expdif < 0) { + /* optimisation? taken from the AMD docs */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* ST0 is unchanged */ + return; + } + + if (expdif < 53) { + dblq = fpsrcop / fptemp; /* ST0 / ST1 */ + /* round dblq towards zero */ + dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq); + st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */ + + /* convert dblq to q by truncating towards zero */ + if (dblq < 0.0) { + q = (signed long long int)(-dblq); + } else { + q = (signed long long int)dblq; + } + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C3,C1) <-- (q2,q1,q0) */ + env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ + env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ + env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ + } else { + int N = 32 + (expdif % 32); /* as per AMD docs */ + + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, (double)(expdif - N)); + fpsrcop = (st0 / st1) / fptemp; + /* fpsrcop = integer obtained by chopping */ + fpsrcop = (fpsrcop < 0.0) ? + -(floor(fabs(fpsrcop))) : floor(fpsrcop); + st0 -= (st1 * fpsrcop * fptemp); + } + ST0 = double_to_floatx80(st0); +} + +void helper_fyl2xp1(void) +{ + double fptemp = floatx80_to_double(ST0); + + if ((fptemp + 1.0) > 0.0) { + fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */ + fptemp *= floatx80_to_double(ST1); + ST1 = double_to_floatx80(fptemp); + fpop(); + } else { + env->fpus &= ~0x4700; + env->fpus |= 0x400; + } +} + +void helper_fsqrt(void) +{ + if (floatx80_is_neg(ST0)) { + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + env->fpus |= 0x400; + } + ST0 = floatx80_sqrt(ST0, &env->fp_status); +} + +void helper_fsincos(void) +{ + double fptemp = floatx80_to_double(ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(sin(fptemp)); + fpush(); + ST0 = double_to_floatx80(cos(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**63 only */ + } +} + +void helper_frndint(void) +{ + ST0 = floatx80_round_to_int(ST0, &env->fp_status); +} + +void helper_fscale(void) +{ + if (floatx80_is_any_nan(ST1)) { + ST0 = ST1; + } else { + int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); + ST0 = floatx80_scalbn(ST0, n, &env->fp_status); + } +} + +void helper_fsin(void) +{ + double fptemp = floatx80_to_double(ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(sin(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**53 only */ + } +} + +void helper_fcos(void) +{ + double fptemp = floatx80_to_double(ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(cos(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**63 only */ + } +} + +void helper_fxam_ST0(void) +{ + CPU_LDoubleU temp; + int expdif; + + temp.d = ST0; + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + if (SIGND(temp)) { + env->fpus |= 0x200; /* C1 <-- 1 */ + } + + /* XXX: test fptags too */ + expdif = EXPD(temp); + if (expdif == MAXEXPD) { + if (MANTD(temp) == 0x8000000000000000ULL) { + env->fpus |= 0x500; /* Infinity */ + } else { + env->fpus |= 0x100; /* NaN */ + } + } else if (expdif == 0) { + if (MANTD(temp) == 0) { + env->fpus |= 0x4000; /* Zero */ + } else { + env->fpus |= 0x4400; /* Denormal */ + } + } else { + env->fpus |= 0x400; + } +} + +void helper_fstenv(target_ulong ptr, int data32) +{ + int fpus, fptag, exp, i; + uint64_t mant; + CPU_LDoubleU tmp; + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i = 7; i >= 0; i--) { + fptag <<= 2; + if (env->fptags[i]) { + fptag |= 3; + } else { + tmp.d = env->fpregs[i].d; + exp = EXPD(tmp); + mant = MANTD(tmp); + if (exp == 0 && mant == 0) { + /* zero */ + fptag |= 1; + } else if (exp == 0 || exp == MAXEXPD + || (mant & (1LL << 63)) == 0) { + /* NaNs, infinity, denormal */ + fptag |= 2; + } + } + } + if (data32) { + /* 32 bit */ + stl(ptr, env->fpuc); + stl(ptr + 4, fpus); + stl(ptr + 8, fptag); + stl(ptr + 12, 0); /* fpip */ + stl(ptr + 16, 0); /* fpcs */ + stl(ptr + 20, 0); /* fpoo */ + stl(ptr + 24, 0); /* fpos */ + } else { + /* 16 bit */ + stw(ptr, env->fpuc); + stw(ptr + 2, fpus); + stw(ptr + 4, fptag); + stw(ptr + 6, 0); + stw(ptr + 8, 0); + stw(ptr + 10, 0); + stw(ptr + 12, 0); + } +} + +void helper_fldenv(target_ulong ptr, int data32) +{ + int i, fpus, fptag; + + if (data32) { + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 4); + fptag = lduw(ptr + 8); + } else { + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 2); + fptag = lduw(ptr + 4); + } + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + for (i = 0; i < 8; i++) { + env->fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } +} + +void helper_fsave(target_ulong ptr, int data32) +{ + floatx80 tmp; + int i; + + helper_fstenv(ptr, data32); + + ptr += (14 << data32); + for (i = 0; i < 8; i++) { + tmp = ST(i); + helper_fstt(tmp, ptr); + ptr += 10; + } + + /* fninit */ + env->fpus = 0; + env->fpstt = 0; + env->fpuc = 0x37f; + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +void helper_frstor(target_ulong ptr, int data32) +{ + floatx80 tmp; + int i; + + helper_fldenv(ptr, data32); + ptr += (14 << data32); + + for (i = 0; i < 8; i++) { + tmp = helper_fldt(ptr); + ST(i) = tmp; + ptr += 10; + } +} + +#if defined(CONFIG_USER_ONLY) +void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + + helper_fsave(ptr, data32); + + env = saved_env; +} + +void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + + helper_frstor(ptr, data32); + + env = saved_env; +} +#endif + +void helper_fxsave(target_ulong ptr, int data64) +{ + int fpus, fptag, i, nb_xmm_regs; + floatx80 tmp; + target_ulong addr; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception(env, EXCP0D_GPF); + } + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i = 0; i < 8; i++) { + fptag |= (env->fptags[i] << i); + } + stw(ptr, env->fpuc); + stw(ptr + 2, fpus); + stw(ptr + 4, fptag ^ 0xff); +#ifdef TARGET_X86_64 + if (data64) { + stq(ptr + 0x08, 0); /* rip */ + stq(ptr + 0x10, 0); /* rdp */ + } else +#endif + { + stl(ptr + 0x08, 0); /* eip */ + stl(ptr + 0x0c, 0); /* sel */ + stl(ptr + 0x10, 0); /* dp */ + stl(ptr + 0x14, 0); /* sel */ + } + + addr = ptr + 0x20; + for (i = 0; i < 8; i++) { + tmp = ST(i); + helper_fstt(tmp, addr); + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + stl(ptr + 0x18, env->mxcsr); /* mxcsr */ + stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + addr = ptr + 0xa0; + /* Fast FXSAVE leaves out the XMM registers */ + if (!(env->efer & MSR_EFER_FFXSR) + || (env->hflags & HF_CPL_MASK) + || !(env->hflags & HF_LMA_MASK)) { + for (i = 0; i < nb_xmm_regs; i++) { + stq(addr, env->xmm_regs[i].XMM_Q(0)); + stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); + addr += 16; + } + } + } +} + +void helper_fxrstor(target_ulong ptr, int data64) +{ + int i, fpus, fptag, nb_xmm_regs; + floatx80 tmp; + target_ulong addr; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception(env, EXCP0D_GPF); + } + + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 2); + fptag = lduw(ptr + 4); + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + fptag ^= 0xff; + for (i = 0; i < 8; i++) { + env->fptags[i] = ((fptag >> i) & 1); + } + + addr = ptr + 0x20; + for (i = 0; i < 8; i++) { + tmp = helper_fldt(addr); + ST(i) = tmp; + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + env->mxcsr = ldl(ptr + 0x18); + /* ldl(ptr + 0x1c); */ + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + addr = ptr + 0xa0; + /* Fast FXRESTORE leaves out the XMM registers */ + if (!(env->efer & MSR_EFER_FFXSR) + || (env->hflags & HF_CPL_MASK) + || !(env->hflags & HF_LMA_MASK)) { + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].XMM_Q(0) = ldq(addr); + env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); + addr += 16; + } + } + } +} + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) +{ + CPU_LDoubleU temp; + + temp.d = f; + *pmant = temp.l.lower; + *pexp = temp.l.upper; +} + +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) +{ + CPU_LDoubleU temp; + + temp.l.upper = upper; + temp.l.lower = mant; + return temp.d; +} + +/* MMX/SSE */ +/* XXX: optimize by storing fptt and fptags in the static cpu state */ + +#define SSE_DAZ 0x0040 +#define SSE_RC_MASK 0x6000 +#define SSE_RC_NEAR 0x0000 +#define SSE_RC_DOWN 0x2000 +#define SSE_RC_UP 0x4000 +#define SSE_RC_CHOP 0x6000 +#define SSE_FZ 0x8000 + +static void update_sse_status(void) +{ + int rnd_type; + + /* set rounding mode */ + switch (env->mxcsr & SSE_RC_MASK) { + default: + case SSE_RC_NEAR: + rnd_type = float_round_nearest_even; + break; + case SSE_RC_DOWN: + rnd_type = float_round_down; + break; + case SSE_RC_UP: + rnd_type = float_round_up; + break; + case SSE_RC_CHOP: + rnd_type = float_round_to_zero; + break; + } + set_float_rounding_mode(rnd_type, &env->sse_status); + + /* set denormals are zero */ + set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status); + + /* set flush to zero */ + set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status); +} + +void helper_ldmxcsr(uint32_t val) +{ + env->mxcsr = val; + update_sse_status(); +} + +void helper_enter_mmx(void) +{ + env->fpstt = 0; + *(uint32_t *)(env->fptags) = 0; + *(uint32_t *)(env->fptags + 4) = 0; +} + +void helper_emms(void) +{ + /* set to empty state */ + *(uint32_t *)(env->fptags) = 0x01010101; + *(uint32_t *)(env->fptags + 4) = 0x01010101; +} + +/* XXX: suppress */ +void helper_movq(void *d, void *s) +{ + *(uint64_t *)d = *(uint64_t *)s; +} + +#define SHIFT 0 +#include "ops_sse.h" + +#define SHIFT 1 +#include "ops_sse.h" diff --git a/target-i386/helper.c b/target-i386/helper.c index 2cc80977e8..d3af6eaf71 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -951,7 +951,7 @@ static void breakpoint_handler(CPUX86State *env) if (env->watchpoint_hit->flags & BP_CPU) { env->watchpoint_hit = NULL; if (check_hw_breakpoints(env, 0)) - raise_exception_env(EXCP01_DB, env); + raise_exception(env, EXCP01_DB); else cpu_resume_from_signal(env, NULL); } @@ -960,7 +960,7 @@ static void breakpoint_handler(CPUX86State *env) if (bp->pc == env->eip) { if (bp->flags & BP_CPU) { check_hw_breakpoints(env, 1); - raise_exception_env(EXCP01_DB, env); + raise_exception(env, EXCP01_DB); } break; } diff --git a/target-i386/helper.h b/target-i386/helper.h index 761954e925..99ca18396d 100644 --- a/target-i386/helper.h +++ b/target-i386/helper.h @@ -63,8 +63,8 @@ DEF_HELPER_1(monitor, void, tl) DEF_HELPER_1(mwait, void, int) DEF_HELPER_0(debug, void) DEF_HELPER_0(reset_rf, void) -DEF_HELPER_2(raise_interrupt, void, int, int) -DEF_HELPER_1(raise_exception, void, int) +DEF_HELPER_3(raise_interrupt, void, env, int, int) +DEF_HELPER_2(raise_exception, void, env, int) DEF_HELPER_0(cli, void) DEF_HELPER_0(sti, void) DEF_HELPER_0(set_inhibit_irq, void) diff --git a/target-i386/int_helper.c b/target-i386/int_helper.c new file mode 100644 index 0000000000..e1f66f5ad1 --- /dev/null +++ b/target-i386/int_helper.c @@ -0,0 +1,500 @@ +/* + * x86 integer helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "host-utils.h" +#include "helper.h" + +//#define DEBUG_MULDIV + +/* modulo 9 table */ +static const uint8_t rclb_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 0, 1, 2, 3, 4, 5, + 6, 7, 8, 0, 1, 2, 3, 4, +}; + +/* modulo 17 table */ +static const uint8_t rclw_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, +}; + +/* division, flags are undefined */ + +void helper_divb_AL(target_ulong t0) +{ + unsigned int num, den, q, r; + + num = (EAX & 0xffff); + den = (t0 & 0xff); + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xff) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xff; + r = (num % den) & 0xff; + EAX = (EAX & ~0xffff) | (r << 8) | q; +} + +void helper_idivb_AL(target_ulong t0) +{ + int num, den, q, r; + + num = (int16_t)EAX; + den = (int8_t)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q != (int8_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xff; + r = (num % den) & 0xff; + EAX = (EAX & ~0xffff) | (r << 8) | q; +} + +void helper_divw_AX(target_ulong t0) +{ + unsigned int num, den, q, r; + + num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); + den = (t0 & 0xffff); + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xffff) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xffff; + r = (num % den) & 0xffff; + EAX = (EAX & ~0xffff) | q; + EDX = (EDX & ~0xffff) | r; +} + +void helper_idivw_AX(target_ulong t0) +{ + int num, den, q, r; + + num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); + den = (int16_t)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q != (int16_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xffff; + r = (num % den) & 0xffff; + EAX = (EAX & ~0xffff) | q; + EDX = (EDX & ~0xffff) | r; +} + +void helper_divl_EAX(target_ulong t0) +{ + unsigned int den, r; + uint64_t num, q; + + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + den = t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + r = (num % den); + if (q > 0xffffffff) { + raise_exception(env, EXCP00_DIVZ); + } + EAX = (uint32_t)q; + EDX = (uint32_t)r; +} + +void helper_idivl_EAX(target_ulong t0) +{ + int den, r; + int64_t num, q; + + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + den = t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + r = (num % den); + if (q != (int32_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + EAX = (uint32_t)q; + EDX = (uint32_t)r; +} + +/* bcd */ + +/* XXX: exception */ +void helper_aam(int base) +{ + int al, ah; + + al = EAX & 0xff; + ah = al / base; + al = al % base; + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_DST = al; +} + +void helper_aad(int base) +{ + int al, ah; + + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + al = ((ah * base) + al) & 0xff; + EAX = (EAX & ~0xffff) | al; + CC_DST = al; +} + +void helper_aaa(void) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + af = eflags & CC_A; + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + + icarry = (al > 0xf9); + if (((al & 0x0f) > 9) || af) { + al = (al + 6) & 0x0f; + ah = (ah + 1 + icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; +} + +void helper_aas(void) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + af = eflags & CC_A; + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + + icarry = (al < 6); + if (((al & 0x0f) > 9) || af) { + al = (al - 6) & 0x0f; + ah = (ah - 1 - icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; +} + +void helper_daa(void) +{ + int old_al, al, af, cf; + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + cf = eflags & CC_C; + af = eflags & CC_A; + old_al = al = EAX & 0xff; + + eflags = 0; + if (((al & 0x0f) > 9) || af) { + al = (al + 6) & 0xff; + eflags |= CC_A; + } + if ((old_al > 0x99) || cf) { + al = (al + 0x60) & 0xff; + eflags |= CC_C; + } + EAX = (EAX & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; +} + +void helper_das(void) +{ + int al, al1, af, cf; + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + cf = eflags & CC_C; + af = eflags & CC_A; + al = EAX & 0xff; + + eflags = 0; + al1 = al; + if (((al & 0x0f) > 9) || af) { + eflags |= CC_A; + if (al < 6 || cf) { + eflags |= CC_C; + } + al = (al - 6) & 0xff; + } + if ((al1 > 0x99) || cf) { + al = (al - 0x60) & 0xff; + eflags |= CC_C; + } + EAX = (EAX & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; +} + +#ifdef TARGET_X86_64 +static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) +{ + *plow += a; + /* carry test */ + if (*plow < a) { + (*phigh)++; + } + *phigh += b; +} + +static void neg128(uint64_t *plow, uint64_t *phigh) +{ + *plow = ~*plow; + *phigh = ~*phigh; + add128(plow, phigh, 1, 0); +} + +/* return TRUE if overflow */ +static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) +{ + uint64_t q, r, a1, a0; + int i, qb, ab; + + a0 = *plow; + a1 = *phigh; + if (a1 == 0) { + q = a0 / b; + r = a0 % b; + *plow = q; + *phigh = r; + } else { + if (a1 >= b) { + return 1; + } + /* XXX: use a better algorithm */ + for (i = 0; i < 64; i++) { + ab = a1 >> 63; + a1 = (a1 << 1) | (a0 >> 63); + if (ab || a1 >= b) { + a1 -= b; + qb = 1; + } else { + qb = 0; + } + a0 = (a0 << 1) | qb; + } +#if defined(DEBUG_MULDIV) + printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 + ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", + *phigh, *plow, b, a0, a1); +#endif + *plow = a0; + *phigh = a1; + } + return 0; +} + +/* return TRUE if overflow */ +static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) +{ + int sa, sb; + + sa = ((int64_t)*phigh < 0); + if (sa) { + neg128(plow, phigh); + } + sb = (b < 0); + if (sb) { + b = -b; + } + if (div64(plow, phigh, b) != 0) { + return 1; + } + if (sa ^ sb) { + if (*plow > (1ULL << 63)) { + return 1; + } + *plow = -*plow; + } else { + if (*plow >= (1ULL << 63)) { + return 1; + } + } + if (sa) { + *phigh = -*phigh; + } + return 0; +} + +void helper_mulq_EAX_T0(target_ulong t0) +{ + uint64_t r0, r1; + + mulu64(&r0, &r1, EAX, t0); + EAX = r0; + EDX = r1; + CC_DST = r0; + CC_SRC = r1; +} + +void helper_imulq_EAX_T0(target_ulong t0) +{ + uint64_t r0, r1; + + muls64(&r0, &r1, EAX, t0); + EAX = r0; + EDX = r1; + CC_DST = r0; + CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); +} + +target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1) +{ + uint64_t r0, r1; + + muls64(&r0, &r1, t0, t1); + CC_DST = r0; + CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); + return r0; +} + +void helper_divq_EAX(target_ulong t0) +{ + uint64_t r0, r1; + + if (t0 == 0) { + raise_exception(env, EXCP00_DIVZ); + } + r0 = EAX; + r1 = EDX; + if (div64(&r0, &r1, t0)) { + raise_exception(env, EXCP00_DIVZ); + } + EAX = r0; + EDX = r1; +} + +void helper_idivq_EAX(target_ulong t0) +{ + uint64_t r0, r1; + + if (t0 == 0) { + raise_exception(env, EXCP00_DIVZ); + } + r0 = EAX; + r1 = EDX; + if (idiv64(&r0, &r1, t0)) { + raise_exception(env, EXCP00_DIVZ); + } + EAX = r0; + EDX = r1; +} +#endif + +/* bit operations */ +target_ulong helper_bsf(target_ulong t0) +{ + int count; + target_ulong res; + + res = t0; + count = 0; + while ((res & 1) == 0) { + count++; + res >>= 1; + } + return count; +} + +target_ulong helper_lzcnt(target_ulong t0, int wordsize) +{ + int count; + target_ulong res, mask; + + if (wordsize > 0 && t0 == 0) { + return wordsize; + } + res = t0; + count = TARGET_LONG_BITS - 1; + mask = (target_ulong)1 << (TARGET_LONG_BITS - 1); + while ((res & mask) == 0) { + count--; + res <<= 1; + } + if (wordsize > 0) { + return wordsize - 1 - count; + } + return count; +} + +target_ulong helper_bsr(target_ulong t0) +{ + return helper_lzcnt(t0, 0); +} + +#define SHIFT 0 +#include "shift_helper_template.h" +#undef SHIFT + +#define SHIFT 1 +#include "shift_helper_template.h" +#undef SHIFT + +#define SHIFT 2 +#include "shift_helper_template.h" +#undef SHIFT + +#ifdef TARGET_X86_64 +#define SHIFT 3 +#include "shift_helper_template.h" +#undef SHIFT +#endif diff --git a/ioport-user.c b/target-i386/ioport-user.c index 03fac22d22..03fac22d22 100644 --- a/ioport-user.c +++ b/target-i386/ioport-user.c diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c new file mode 100644 index 0000000000..91353c0788 --- /dev/null +++ b/target-i386/mem_helper.c @@ -0,0 +1,161 @@ +/* + * x86 memory access helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif /* !defined(CONFIG_USER_ONLY) */ + +/* broken thread support */ + +static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; + +void helper_lock(void) +{ + spin_lock(&global_cpu_lock); +} + +void helper_unlock(void) +{ + spin_unlock(&global_cpu_lock); +} + +void helper_cmpxchg8b(target_ulong a0) +{ + uint64_t d; + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + d = ldq(a0); + if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) { + stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX); + eflags |= CC_Z; + } else { + /* always do the store */ + stq(a0, d); + EDX = (uint32_t)(d >> 32); + EAX = (uint32_t)d; + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} + +#ifdef TARGET_X86_64 +void helper_cmpxchg16b(target_ulong a0) +{ + uint64_t d0, d1; + int eflags; + + if ((a0 & 0xf) != 0) { + raise_exception(env, EXCP0D_GPF); + } + eflags = helper_cc_compute_all(CC_OP); + d0 = ldq(a0); + d1 = ldq(a0 + 8); + if (d0 == EAX && d1 == EDX) { + stq(a0, EBX); + stq(a0 + 8, ECX); + eflags |= CC_Z; + } else { + /* always do the store */ + stq(a0, d0); + stq(a0 + 8, d1); + EDX = d1; + EAX = d0; + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} +#endif + +void helper_boundw(target_ulong a0, int v) +{ + int low, high; + + low = ldsw(a0); + high = ldsw(a0 + 2); + v = (int16_t)v; + if (v < low || v > high) { + raise_exception(env, EXCP05_BOUND); + } +} + +void helper_boundl(target_ulong a0, int v) +{ + int low, high; + + low = ldl(a0); + high = ldl(a0 + 4); + if (v < low || v > high) { + raise_exception(env, EXCP05_BOUND); + } +} + +#if !defined(CONFIG_USER_ONLY) + +#define MMUSUFFIX _mmu + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" + +#endif + +#if !defined(CONFIG_USER_ONLY) +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + TranslationBlock *tb; + int ret; + CPUX86State *saved_env; + + saved_env = env; + env = env1; + + ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx); + if (ret) { + if (retaddr) { + /* now we have a real cpu fault */ + tb = tb_find_pc(retaddr); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, retaddr); + } + } + raise_exception_err(env, env->exception_index, env->error_code); + } + env = saved_env; +} +#endif diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c new file mode 100644 index 0000000000..ce675b7218 --- /dev/null +++ b/target-i386/misc_helper.c @@ -0,0 +1,603 @@ +/* + * x86 misc helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "ioport.h" +#include "helper.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif /* !defined(CONFIG_USER_ONLY) */ + +/* check if Port I/O is allowed in TSS */ +static inline void check_io(int addr, int size) +{ + int io_offset, val, mask; + + /* TSS must be a valid 32 bit one */ + if (!(env->tr.flags & DESC_P_MASK) || + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || + env->tr.limit < 103) { + goto fail; + } + io_offset = lduw_kernel(env->tr.base + 0x66); + io_offset += (addr >> 3); + /* Note: the check needs two bytes */ + if ((io_offset + 1) > env->tr.limit) { + goto fail; + } + val = lduw_kernel(env->tr.base + io_offset); + val >>= (addr & 7); + mask = (1 << size) - 1; + /* all bits must be zero to allow the I/O */ + if ((val & mask) != 0) { + fail: + raise_exception_err(env, EXCP0D_GPF, 0); + } +} + +void helper_check_iob(uint32_t t0) +{ + check_io(t0, 1); +} + +void helper_check_iow(uint32_t t0) +{ + check_io(t0, 2); +} + +void helper_check_iol(uint32_t t0) +{ + check_io(t0, 4); +} + +void helper_outb(uint32_t port, uint32_t data) +{ + cpu_outb(port, data & 0xff); +} + +target_ulong helper_inb(uint32_t port) +{ + return cpu_inb(port); +} + +void helper_outw(uint32_t port, uint32_t data) +{ + cpu_outw(port, data & 0xffff); +} + +target_ulong helper_inw(uint32_t port) +{ + return cpu_inw(port); +} + +void helper_outl(uint32_t port, uint32_t data) +{ + cpu_outl(port, data); +} + +target_ulong helper_inl(uint32_t port) +{ + return cpu_inl(port); +} + +void helper_into(int next_eip_addend) +{ + int eflags; + + eflags = helper_cc_compute_all(CC_OP); + if (eflags & CC_O) { + raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); + } +} + +void helper_single_step(void) +{ +#ifndef CONFIG_USER_ONLY + check_hw_breakpoints(env, 1); + env->dr[6] |= DR6_BS; +#endif + raise_exception(env, EXCP01_DB); +} + +void helper_cpuid(void) +{ + uint32_t eax, ebx, ecx, edx; + + cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); + + cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); + EAX = eax; + EBX = ebx; + ECX = ecx; + EDX = edx; +} + +#if defined(CONFIG_USER_ONLY) +target_ulong helper_read_crN(int reg) +{ + return 0; +} + +void helper_write_crN(int reg, target_ulong t0) +{ +} + +void helper_movl_drN_T0(int reg, target_ulong t0) +{ +} +#else +target_ulong helper_read_crN(int reg) +{ + target_ulong val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); + switch (reg) { + default: + val = env->cr[reg]; + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + val = cpu_get_apic_tpr(env->apic_state); + } else { + val = env->v_tpr; + } + break; + } + return val; +} + +void helper_write_crN(int reg, target_ulong t0) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); + switch (reg) { + case 0: + cpu_x86_update_cr0(env, t0); + break; + case 3: + cpu_x86_update_cr3(env, t0); + break; + case 4: + cpu_x86_update_cr4(env, t0); + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + cpu_set_apic_tpr(env->apic_state, t0); + } + env->v_tpr = t0 & 0x0f; + break; + default: + env->cr[reg] = t0; + break; + } +} + +void helper_movl_drN_T0(int reg, target_ulong t0) +{ + int i; + + if (reg < 4) { + hw_breakpoint_remove(env, reg); + env->dr[reg] = t0; + hw_breakpoint_insert(env, reg); + } else if (reg == 7) { + for (i = 0; i < 4; i++) { + hw_breakpoint_remove(env, i); + } + env->dr[7] = t0; + for (i = 0; i < 4; i++) { + hw_breakpoint_insert(env, i); + } + } else { + env->dr[reg] = t0; + } +} +#endif + +void helper_lmsw(target_ulong t0) +{ + /* only 4 lower bits of CR0 are modified. PE cannot be set to zero + if already set to one. */ + t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); + helper_write_crN(0, t0); +} + +void helper_invlpg(target_ulong addr) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); + tlb_flush_page(env, addr); +} + +void helper_rdtsc(void) +{ + uint64_t val; + + if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); + + val = cpu_get_tsc(env) + env->tsc_offset; + EAX = (uint32_t)(val); + EDX = (uint32_t)(val >> 32); +} + +void helper_rdtscp(void) +{ + helper_rdtsc(); + ECX = (uint32_t)(env->tsc_aux); +} + +void helper_rdpmc(void) +{ + if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); + + /* currently unimplemented */ + qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); + raise_exception_err(env, EXCP06_ILLOP, 0); +} + +#if defined(CONFIG_USER_ONLY) +void helper_wrmsr(void) +{ +} + +void helper_rdmsr(void) +{ +} +#else +void helper_wrmsr(void) +{ + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); + + val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + + switch ((uint32_t)ECX) { + case MSR_IA32_SYSENTER_CS: + env->sysenter_cs = val & 0xffff; + break; + case MSR_IA32_SYSENTER_ESP: + env->sysenter_esp = val; + break; + case MSR_IA32_SYSENTER_EIP: + env->sysenter_eip = val; + break; + case MSR_IA32_APICBASE: + cpu_set_apic_base(env->apic_state, val); + break; + case MSR_EFER: + { + uint64_t update_mask; + + update_mask = 0; + if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) { + update_mask |= MSR_EFER_SCE; + } + if (env->cpuid_ext2_features & CPUID_EXT2_LM) { + update_mask |= MSR_EFER_LME; + } + if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + if (env->cpuid_ext2_features & CPUID_EXT2_NX) { + update_mask |= MSR_EFER_NXE; + } + if (env->cpuid_ext3_features & CPUID_EXT3_SVM) { + update_mask |= MSR_EFER_SVME; + } + if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + cpu_load_efer(env, (env->efer & ~update_mask) | + (val & update_mask)); + } + break; + case MSR_STAR: + env->star = val; + break; + case MSR_PAT: + env->pat = val; + break; + case MSR_VM_HSAVE_PA: + env->vm_hsave = val; + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + env->lstar = val; + break; + case MSR_CSTAR: + env->cstar = val; + break; + case MSR_FMASK: + env->fmask = val; + break; + case MSR_FSBASE: + env->segs[R_FS].base = val; + break; + case MSR_GSBASE: + env->segs[R_GS].base = val; + break; + case MSR_KERNELGSBASE: + env->kernelgsbase = val; + break; +#endif + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; + break; + case MSR_MTRRfix64K_00000: + env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; + break; + case MSR_MTRRdefType: + env->mtrr_deftype = val; + break; + case MSR_MCG_STATUS: + env->mcg_status = val; + break; + case MSR_MCG_CTL: + if ((env->mcg_cap & MCG_CTL_P) + && (val == 0 || val == ~(uint64_t)0)) { + env->mcg_ctl = val; + } + break; + case MSR_TSC_AUX: + env->tsc_aux = val; + break; + case MSR_IA32_MISC_ENABLE: + env->msr_ia32_misc_enable = val; + break; + default: + if ((uint32_t)ECX >= MSR_MC0_CTL + && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; + if ((offset & 0x3) != 0 + || (val == 0 || val == ~(uint64_t)0)) { + env->mce_banks[offset] = val; + } + break; + } + /* XXX: exception? */ + break; + } +} + +void helper_rdmsr(void) +{ + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); + + switch ((uint32_t)ECX) { + case MSR_IA32_SYSENTER_CS: + val = env->sysenter_cs; + break; + case MSR_IA32_SYSENTER_ESP: + val = env->sysenter_esp; + break; + case MSR_IA32_SYSENTER_EIP: + val = env->sysenter_eip; + break; + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(env->apic_state); + break; + case MSR_EFER: + val = env->efer; + break; + case MSR_STAR: + val = env->star; + break; + case MSR_PAT: + val = env->pat; + break; + case MSR_VM_HSAVE_PA: + val = env->vm_hsave; + break; + case MSR_IA32_PERF_STATUS: + /* tsc_increment_by_tick */ + val = 1000ULL; + /* CPU multiplier */ + val |= (((uint64_t)4ULL) << 40); + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + val = env->lstar; + break; + case MSR_CSTAR: + val = env->cstar; + break; + case MSR_FMASK: + val = env->fmask; + break; + case MSR_FSBASE: + val = env->segs[R_FS].base; + break; + case MSR_GSBASE: + val = env->segs[R_GS].base; + break; + case MSR_KERNELGSBASE: + val = env->kernelgsbase; + break; + case MSR_TSC_AUX: + val = env->tsc_aux; + break; +#endif + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; + break; + case MSR_MTRRfix64K_00000: + val = env->mtrr_fixed[0]; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; + break; + case MSR_MTRRdefType: + val = env->mtrr_deftype; + break; + case MSR_MTRRcap: + if (env->cpuid_features & CPUID_MTRR) { + val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | + MSR_MTRRcap_WC_SUPPORTED; + } else { + /* XXX: exception? */ + val = 0; + } + break; + case MSR_MCG_CAP: + val = env->mcg_cap; + break; + case MSR_MCG_CTL: + if (env->mcg_cap & MCG_CTL_P) { + val = env->mcg_ctl; + } else { + val = 0; + } + break; + case MSR_MCG_STATUS: + val = env->mcg_status; + break; + case MSR_IA32_MISC_ENABLE: + val = env->msr_ia32_misc_enable; + break; + default: + if ((uint32_t)ECX >= MSR_MC0_CTL + && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; + val = env->mce_banks[offset]; + break; + } + /* XXX: exception? */ + val = 0; + break; + } + EAX = (uint32_t)(val); + EDX = (uint32_t)(val >> 32); +} +#endif + +static void do_hlt(void) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ + env->halted = 1; + env->exception_index = EXCP_HLT; + cpu_loop_exit(env); +} + +void helper_hlt(int next_eip_addend) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); + EIP += next_eip_addend; + + do_hlt(); +} + +void helper_monitor(target_ulong ptr) +{ + if ((uint32_t)ECX != 0) { + raise_exception(env, EXCP0D_GPF); + } + /* XXX: store address? */ + cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); +} + +void helper_mwait(int next_eip_addend) +{ + if ((uint32_t)ECX != 0) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); + EIP += next_eip_addend; + + /* XXX: not complete but not completely erroneous */ + if (env->cpu_index != 0 || env->next_cpu != NULL) { + /* more than one CPU: do not sleep because another CPU may + wake this one */ + } else { + do_hlt(); + } +} + +void helper_debug(void) +{ + env->exception_index = EXCP_DEBUG; + cpu_loop_exit(env); +} diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c deleted file mode 100644 index bc3b94e149..0000000000 --- a/target-i386/op_helper.c +++ /dev/null @@ -1,5922 +0,0 @@ -/* - * i386 helpers - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <math.h> -#include "cpu.h" -#include "dyngen-exec.h" -#include "host-utils.h" -#include "ioport.h" -#include "qemu-log.h" -#include "cpu-defs.h" -#include "helper.h" - -#if !defined(CONFIG_USER_ONLY) -#include "softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ - -//#define DEBUG_PCALL - -#ifdef DEBUG_PCALL -# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) -# define LOG_PCALL_STATE(env) \ - log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP) -#else -# define LOG_PCALL(...) do { } while (0) -# define LOG_PCALL_STATE(env) do { } while (0) -#endif - -/* n must be a constant to be efficient */ -static inline target_long lshift(target_long x, int n) -{ - if (n >= 0) { - return x << n; - } else { - return x >> (-n); - } -} - -#define FPU_RC_MASK 0xc00 -#define FPU_RC_NEAR 0x000 -#define FPU_RC_DOWN 0x400 -#define FPU_RC_UP 0x800 -#define FPU_RC_CHOP 0xc00 - -#define MAXTAN 9223372036854775808.0 - -/* the following deal with x86 long double-precision numbers */ -#define MAXEXPD 0x7fff -#define EXPBIAS 16383 -#define EXPD(fp) (fp.l.upper & 0x7fff) -#define SIGND(fp) ((fp.l.upper) & 0x8000) -#define MANTD(fp) (fp.l.lower) -#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS - -static inline void fpush(void) -{ - env->fpstt = (env->fpstt - 1) & 7; - env->fptags[env->fpstt] = 0; /* validate stack entry */ -} - -static inline void fpop(void) -{ - env->fptags[env->fpstt] = 1; /* invvalidate stack entry */ - env->fpstt = (env->fpstt + 1) & 7; -} - -static inline floatx80 helper_fldt(target_ulong ptr) -{ - CPU_LDoubleU temp; - - temp.l.lower = ldq(ptr); - temp.l.upper = lduw(ptr + 8); - return temp.d; -} - -static inline void helper_fstt(floatx80 f, target_ulong ptr) -{ - CPU_LDoubleU temp; - - temp.d = f; - stq(ptr, temp.l.lower); - stw(ptr + 8, temp.l.upper); -} - -#define FPUS_IE (1 << 0) -#define FPUS_DE (1 << 1) -#define FPUS_ZE (1 << 2) -#define FPUS_OE (1 << 3) -#define FPUS_UE (1 << 4) -#define FPUS_PE (1 << 5) -#define FPUS_SF (1 << 6) -#define FPUS_SE (1 << 7) -#define FPUS_B (1 << 15) - -#define FPUC_EM 0x3f - -static inline uint32_t compute_eflags(void) -{ - return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); -} - -/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ -static inline void load_eflags(int eflags, int update_mask) -{ - CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); - DF = 1 - (2 * ((eflags >> 10) & 1)); - env->eflags = (env->eflags & ~update_mask) | - (eflags & update_mask) | 0x2; -} - -/* load efer and update the corresponding hflags. XXX: do consistency - checks with cpuid bits ? */ -static inline void cpu_load_efer(CPUX86State *env, uint64_t val) -{ - env->efer = val; - env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); - if (env->efer & MSR_EFER_LMA) { - env->hflags |= HF_LMA_MASK; - } - if (env->efer & MSR_EFER_SVME) { - env->hflags |= HF_SVME_MASK; - } -} - -#if 0 -#define raise_exception_err(a, b)\ -do {\ - qemu_log("raise_exception line=%d\n", __LINE__);\ - (raise_exception_err)(a, b);\ -} while (0) -#endif - -static void QEMU_NORETURN raise_exception_err(int exception_index, - int error_code); - -static const uint8_t parity_table[256] = { - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, -}; - -/* modulo 17 table */ -static const uint8_t rclw_table[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9,10,11,12,13,14,15, - 16, 0, 1, 2, 3, 4, 5, 6, - 7, 8, 9,10,11,12,13,14, -}; - -/* modulo 9 table */ -static const uint8_t rclb_table[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 0, 1, 2, 3, 4, 5, 6, - 7, 8, 0, 1, 2, 3, 4, 5, - 6, 7, 8, 0, 1, 2, 3, 4, -}; - -#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL ) -#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL ) -#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL ) - -/* broken thread support */ - -static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; - -void helper_lock(void) -{ - spin_lock(&global_cpu_lock); -} - -void helper_unlock(void) -{ - spin_unlock(&global_cpu_lock); -} - -void helper_write_eflags(target_ulong t0, uint32_t update_mask) -{ - load_eflags(t0, update_mask); -} - -target_ulong helper_read_eflags(void) -{ - uint32_t eflags; - eflags = helper_cc_compute_all(CC_OP); - eflags |= (DF & DF_MASK); - eflags |= env->eflags & ~(VM_MASK | RF_MASK); - return eflags; -} - -/* return non zero if error */ -static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, - int selector) -{ - SegmentCache *dt; - int index; - target_ulong ptr; - - if (selector & 0x4) - dt = &env->ldt; - else - dt = &env->gdt; - index = selector & ~7; - if ((index + 7) > dt->limit) - return -1; - ptr = dt->base + index; - *e1_ptr = ldl_kernel(ptr); - *e2_ptr = ldl_kernel(ptr + 4); - return 0; -} - -static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) -{ - unsigned int limit; - limit = (e1 & 0xffff) | (e2 & 0x000f0000); - if (e2 & DESC_G_MASK) - limit = (limit << 12) | 0xfff; - return limit; -} - -static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) -{ - return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); -} - -static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) -{ - sc->base = get_seg_base(e1, e2); - sc->limit = get_seg_limit(e1, e2); - sc->flags = e2; -} - -/* init the segment cache in vm86 mode. */ -static inline void load_seg_vm(int seg, int selector) -{ - selector &= 0xffff; - cpu_x86_load_seg_cache(env, seg, selector, - (selector << 4), 0xffff, 0); -} - -static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, - uint32_t *esp_ptr, int dpl) -{ - int type, index, shift; - -#if 0 - { - int i; - printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); - for(i=0;i<env->tr.limit;i++) { - printf("%02x ", env->tr.base[i]); - if ((i & 7) == 7) printf("\n"); - } - printf("\n"); - } -#endif - - if (!(env->tr.flags & DESC_P_MASK)) - cpu_abort(env, "invalid tss"); - type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; - if ((type & 7) != 1) - cpu_abort(env, "invalid tss type"); - shift = type >> 3; - index = (dpl * 4 + 2) << shift; - if (index + (4 << shift) - 1 > env->tr.limit) - raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); - if (shift == 0) { - *esp_ptr = lduw_kernel(env->tr.base + index); - *ss_ptr = lduw_kernel(env->tr.base + index + 2); - } else { - *esp_ptr = ldl_kernel(env->tr.base + index); - *ss_ptr = lduw_kernel(env->tr.base + index + 4); - } -} - -/* XXX: merge with load_seg() */ -static void tss_load_seg(int seg_reg, int selector) -{ - uint32_t e1, e2; - int rpl, dpl, cpl; - - if ((selector & 0xfffc) != 0) { - if (load_segment(&e1, &e2, selector) != 0) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - if (!(e2 & DESC_S_MASK)) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - if (seg_reg == R_CS) { - if (!(e2 & DESC_CS_MASK)) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - /* XXX: is it correct ? */ - if (dpl != rpl) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - if ((e2 & DESC_C_MASK) && dpl > rpl) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - } else if (seg_reg == R_SS) { - /* SS must be writable data */ - if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - if (dpl != cpl || dpl != rpl) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - } else { - /* not readable code */ - if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - /* if data or non conforming code, checks the rights */ - if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { - if (dpl < cpl || dpl < rpl) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - } - } - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - cpu_x86_load_seg_cache(env, seg_reg, selector, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - } else { - if (seg_reg == R_SS || seg_reg == R_CS) - raise_exception_err(EXCP0A_TSS, selector & 0xfffc); - } -} - -#define SWITCH_TSS_JMP 0 -#define SWITCH_TSS_IRET 1 -#define SWITCH_TSS_CALL 2 - -/* XXX: restore CPU state in registers (PowerPC case) */ -static void switch_tss(int tss_selector, - uint32_t e1, uint32_t e2, int source, - uint32_t next_eip) -{ - int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; - target_ulong tss_base; - uint32_t new_regs[8], new_segs[6]; - uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; - uint32_t old_eflags, eflags_mask; - SegmentCache *dt; - int index; - target_ulong ptr; - - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); - - /* if task gate, we read the TSS segment and we load it */ - if (type == 5) { - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); - tss_selector = e1 >> 16; - if (tss_selector & 4) - raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); - if (load_segment(&e1, &e2, tss_selector) != 0) - raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); - if (e2 & DESC_S_MASK) - raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - if ((type & 7) != 1) - raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); - } - - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); - - if (type & 8) - tss_limit_max = 103; - else - tss_limit_max = 43; - tss_limit = get_seg_limit(e1, e2); - tss_base = get_seg_base(e1, e2); - if ((tss_selector & 4) != 0 || - tss_limit < tss_limit_max) - raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); - old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; - if (old_type & 8) - old_tss_limit_max = 103; - else - old_tss_limit_max = 43; - - /* read all the registers from the new TSS */ - if (type & 8) { - /* 32 bit */ - new_cr3 = ldl_kernel(tss_base + 0x1c); - new_eip = ldl_kernel(tss_base + 0x20); - new_eflags = ldl_kernel(tss_base + 0x24); - for(i = 0; i < 8; i++) - new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); - for(i = 0; i < 6; i++) - new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); - new_ldt = lduw_kernel(tss_base + 0x60); - new_trap = ldl_kernel(tss_base + 0x64); - } else { - /* 16 bit */ - new_cr3 = 0; - new_eip = lduw_kernel(tss_base + 0x0e); - new_eflags = lduw_kernel(tss_base + 0x10); - for(i = 0; i < 8; i++) - new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; - for(i = 0; i < 4; i++) - new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); - new_ldt = lduw_kernel(tss_base + 0x2a); - new_segs[R_FS] = 0; - new_segs[R_GS] = 0; - new_trap = 0; - } - /* XXX: avoid a compiler warning, see - http://support.amd.com/us/Processor_TechDocs/24593.pdf - chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ - (void)new_trap; - - /* NOTE: we must avoid memory exceptions during the task switch, - so we make dummy accesses before */ - /* XXX: it can still fail in some cases, so a bigger hack is - necessary to valid the TLB after having done the accesses */ - - v1 = ldub_kernel(env->tr.base); - v2 = ldub_kernel(env->tr.base + old_tss_limit_max); - stb_kernel(env->tr.base, v1); - stb_kernel(env->tr.base + old_tss_limit_max, v2); - - /* clear busy bit (it is restartable) */ - if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { - target_ulong ptr; - uint32_t e2; - ptr = env->gdt.base + (env->tr.selector & ~7); - e2 = ldl_kernel(ptr + 4); - e2 &= ~DESC_TSS_BUSY_MASK; - stl_kernel(ptr + 4, e2); - } - old_eflags = compute_eflags(); - if (source == SWITCH_TSS_IRET) - old_eflags &= ~NT_MASK; - - /* save the current state in the old TSS */ - if (type & 8) { - /* 32 bit */ - stl_kernel(env->tr.base + 0x20, next_eip); - stl_kernel(env->tr.base + 0x24, old_eflags); - stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX); - stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX); - stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX); - stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX); - stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP); - stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP); - stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI); - stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI); - for(i = 0; i < 6; i++) - stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); - } else { - /* 16 bit */ - stw_kernel(env->tr.base + 0x0e, next_eip); - stw_kernel(env->tr.base + 0x10, old_eflags); - stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX); - stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX); - stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX); - stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX); - stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP); - stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP); - stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI); - stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI); - for(i = 0; i < 4; i++) - stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); - } - - /* now if an exception occurs, it will occurs in the next task - context */ - - if (source == SWITCH_TSS_CALL) { - stw_kernel(tss_base, env->tr.selector); - new_eflags |= NT_MASK; - } - - /* set busy bit */ - if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { - target_ulong ptr; - uint32_t e2; - ptr = env->gdt.base + (tss_selector & ~7); - e2 = ldl_kernel(ptr + 4); - e2 |= DESC_TSS_BUSY_MASK; - stl_kernel(ptr + 4, e2); - } - - /* set the new CPU state */ - /* from this point, any exception which occurs can give problems */ - env->cr[0] |= CR0_TS_MASK; - env->hflags |= HF_TS_MASK; - env->tr.selector = tss_selector; - env->tr.base = tss_base; - env->tr.limit = tss_limit; - env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; - - if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { - cpu_x86_update_cr3(env, new_cr3); - } - - /* load all registers without an exception, then reload them with - possible exception */ - env->eip = new_eip; - eflags_mask = TF_MASK | AC_MASK | ID_MASK | - IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; - if (!(type & 8)) - eflags_mask &= 0xffff; - load_eflags(new_eflags, eflags_mask); - /* XXX: what to do in 16 bit case ? */ - EAX = new_regs[0]; - ECX = new_regs[1]; - EDX = new_regs[2]; - EBX = new_regs[3]; - ESP = new_regs[4]; - EBP = new_regs[5]; - ESI = new_regs[6]; - EDI = new_regs[7]; - if (new_eflags & VM_MASK) { - for(i = 0; i < 6; i++) - load_seg_vm(i, new_segs[i]); - /* in vm86, CPL is always 3 */ - cpu_x86_set_cpl(env, 3); - } else { - /* CPL is set the RPL of CS */ - cpu_x86_set_cpl(env, new_segs[R_CS] & 3); - /* first just selectors as the rest may trigger exceptions */ - for(i = 0; i < 6; i++) - cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); - } - - env->ldt.selector = new_ldt & ~4; - env->ldt.base = 0; - env->ldt.limit = 0; - env->ldt.flags = 0; - - /* load the LDT */ - if (new_ldt & 4) - raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); - - if ((new_ldt & 0xfffc) != 0) { - dt = &env->gdt; - index = new_ldt & ~7; - if ((index + 7) > dt->limit) - raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); - ptr = dt->base + index; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) - raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); - load_seg_cache_raw_dt(&env->ldt, e1, e2); - } - - /* load the segments */ - if (!(new_eflags & VM_MASK)) { - tss_load_seg(R_CS, new_segs[R_CS]); - tss_load_seg(R_SS, new_segs[R_SS]); - tss_load_seg(R_ES, new_segs[R_ES]); - tss_load_seg(R_DS, new_segs[R_DS]); - tss_load_seg(R_FS, new_segs[R_FS]); - tss_load_seg(R_GS, new_segs[R_GS]); - } - - /* check that EIP is in the CS segment limits */ - if (new_eip > env->segs[R_CS].limit) { - /* XXX: different exception if CALL ? */ - raise_exception_err(EXCP0D_GPF, 0); - } - -#ifndef CONFIG_USER_ONLY - /* reset local breakpoints */ - if (env->dr[7] & 0x55) { - for (i = 0; i < 4; i++) { - if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) - hw_breakpoint_remove(env, i); - } - env->dr[7] &= ~0x55; - } -#endif -} - -/* check if Port I/O is allowed in TSS */ -static inline void check_io(int addr, int size) -{ - int io_offset, val, mask; - - /* TSS must be a valid 32 bit one */ - if (!(env->tr.flags & DESC_P_MASK) || - ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || - env->tr.limit < 103) - goto fail; - io_offset = lduw_kernel(env->tr.base + 0x66); - io_offset += (addr >> 3); - /* Note: the check needs two bytes */ - if ((io_offset + 1) > env->tr.limit) - goto fail; - val = lduw_kernel(env->tr.base + io_offset); - val >>= (addr & 7); - mask = (1 << size) - 1; - /* all bits must be zero to allow the I/O */ - if ((val & mask) != 0) { - fail: - raise_exception_err(EXCP0D_GPF, 0); - } -} - -void helper_check_iob(uint32_t t0) -{ - check_io(t0, 1); -} - -void helper_check_iow(uint32_t t0) -{ - check_io(t0, 2); -} - -void helper_check_iol(uint32_t t0) -{ - check_io(t0, 4); -} - -void helper_outb(uint32_t port, uint32_t data) -{ - cpu_outb(port, data & 0xff); -} - -target_ulong helper_inb(uint32_t port) -{ - return cpu_inb(port); -} - -void helper_outw(uint32_t port, uint32_t data) -{ - cpu_outw(port, data & 0xffff); -} - -target_ulong helper_inw(uint32_t port) -{ - return cpu_inw(port); -} - -void helper_outl(uint32_t port, uint32_t data) -{ - cpu_outl(port, data); -} - -target_ulong helper_inl(uint32_t port) -{ - return cpu_inl(port); -} - -static inline unsigned int get_sp_mask(unsigned int e2) -{ - if (e2 & DESC_B_MASK) - return 0xffffffff; - else - return 0xffff; -} - -static int exeption_has_error_code(int intno) -{ - switch(intno) { - case 8: - case 10: - case 11: - case 12: - case 13: - case 14: - case 17: - return 1; - } - return 0; -} - -#ifdef TARGET_X86_64 -#define SET_ESP(val, sp_mask)\ -do {\ - if ((sp_mask) == 0xffff)\ - ESP = (ESP & ~0xffff) | ((val) & 0xffff);\ - else if ((sp_mask) == 0xffffffffLL)\ - ESP = (uint32_t)(val);\ - else\ - ESP = (val);\ -} while (0) -#else -#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)) -#endif - -/* in 64-bit machines, this can overflow. So this segment addition macro - * can be used to trim the value to 32-bit whenever needed */ -#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) - -/* XXX: add a is_user flag to have proper security support */ -#define PUSHW(ssp, sp, sp_mask, val)\ -{\ - sp -= 2;\ - stw_kernel((ssp) + (sp & (sp_mask)), (val));\ -} - -#define PUSHL(ssp, sp, sp_mask, val)\ -{\ - sp -= 4;\ - stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\ -} - -#define POPW(ssp, sp, sp_mask, val)\ -{\ - val = lduw_kernel((ssp) + (sp & (sp_mask)));\ - sp += 2;\ -} - -#define POPL(ssp, sp, sp_mask, val)\ -{\ - val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\ - sp += 4;\ -} - -/* protected mode interrupt */ -static void do_interrupt_protected(int intno, int is_int, int error_code, - unsigned int next_eip, int is_hw) -{ - SegmentCache *dt; - target_ulong ptr, ssp; - int type, dpl, selector, ss_dpl, cpl; - int has_error_code, new_stack, shift; - uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; - uint32_t old_eip, sp_mask; - - has_error_code = 0; - if (!is_int && !is_hw) - has_error_code = exeption_has_error_code(intno); - if (is_int) - old_eip = next_eip; - else - old_eip = env->eip; - - dt = &env->idt; - if (intno * 8 + 7 > dt->limit) - raise_exception_err(EXCP0D_GPF, intno * 8 + 2); - ptr = dt->base + intno * 8; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - /* check gate type */ - type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; - switch(type) { - case 5: /* task gate */ - /* must do that check here to return the correct error code */ - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); - switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); - if (has_error_code) { - int type; - uint32_t mask; - /* push the error code */ - type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; - shift = type >> 3; - if (env->segs[R_SS].flags & DESC_B_MASK) - mask = 0xffffffff; - else - mask = 0xffff; - esp = (ESP - (2 << shift)) & mask; - ssp = env->segs[R_SS].base + esp; - if (shift) - stl_kernel(ssp, error_code); - else - stw_kernel(ssp, error_code); - SET_ESP(esp, mask); - } - return; - case 6: /* 286 interrupt gate */ - case 7: /* 286 trap gate */ - case 14: /* 386 interrupt gate */ - case 15: /* 386 trap gate */ - break; - default: - raise_exception_err(EXCP0D_GPF, intno * 8 + 2); - break; - } - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - /* check privilege if software int */ - if (is_int && dpl < cpl) - raise_exception_err(EXCP0D_GPF, intno * 8 + 2); - /* check valid bit */ - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); - selector = e1 >> 16; - offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); - if ((selector & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, 0); - - if (load_segment(&e1, &e2, selector) != 0) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (dpl > cpl) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - if (!(e2 & DESC_C_MASK) && dpl < cpl) { - /* to inner privilege */ - get_ss_esp_from_tss(&ss, &esp, dpl); - if ((ss & 0xfffc) == 0) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if ((ss & 3) != dpl) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (load_segment(&ss_e1, &ss_e2, ss) != 0) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; - if (ss_dpl != dpl) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (!(ss_e2 & DESC_S_MASK) || - (ss_e2 & DESC_CS_MASK) || - !(ss_e2 & DESC_W_MASK)) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (!(ss_e2 & DESC_P_MASK)) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - new_stack = 1; - sp_mask = get_sp_mask(ss_e2); - ssp = get_seg_base(ss_e1, ss_e2); - } else if ((e2 & DESC_C_MASK) || dpl == cpl) { - /* to same privilege */ - if (env->eflags & VM_MASK) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - esp = ESP; - dpl = cpl; - } else { - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; /* avoid warning */ - sp_mask = 0; /* avoid warning */ - ssp = 0; /* avoid warning */ - esp = 0; /* avoid warning */ - } - - shift = type >> 3; - -#if 0 - /* XXX: check that enough room is available */ - push_size = 6 + (new_stack << 2) + (has_error_code << 1); - if (env->eflags & VM_MASK) - push_size += 8; - push_size <<= shift; -#endif - if (shift == 1) { - if (new_stack) { - if (env->eflags & VM_MASK) { - PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); - PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); - } - PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); - PUSHL(ssp, esp, sp_mask, ESP); - } - PUSHL(ssp, esp, sp_mask, compute_eflags()); - PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, esp, sp_mask, old_eip); - if (has_error_code) { - PUSHL(ssp, esp, sp_mask, error_code); - } - } else { - if (new_stack) { - if (env->eflags & VM_MASK) { - PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); - PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); - } - PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); - PUSHW(ssp, esp, sp_mask, ESP); - } - PUSHW(ssp, esp, sp_mask, compute_eflags()); - PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, esp, sp_mask, old_eip); - if (has_error_code) { - PUSHW(ssp, esp, sp_mask, error_code); - } - } - - if (new_stack) { - if (env->eflags & VM_MASK) { - cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); - cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); - cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); - cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); - } - ss = (ss & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, - ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); - } - SET_ESP(esp, sp_mask); - - selector = (selector & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_CS, selector, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - cpu_x86_set_cpl(env, dpl); - env->eip = offset; - - /* interrupt gate clear IF mask */ - if ((type & 1) == 0) { - env->eflags &= ~IF_MASK; - } - env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); -} - -#ifdef TARGET_X86_64 - -#define PUSHQ(sp, val)\ -{\ - sp -= 8;\ - stq_kernel(sp, (val));\ -} - -#define POPQ(sp, val)\ -{\ - val = ldq_kernel(sp);\ - sp += 8;\ -} - -static inline target_ulong get_rsp_from_tss(int level) -{ - int index; - -#if 0 - printf("TR: base=" TARGET_FMT_lx " limit=%x\n", - env->tr.base, env->tr.limit); -#endif - - if (!(env->tr.flags & DESC_P_MASK)) - cpu_abort(env, "invalid tss"); - index = 8 * level + 4; - if ((index + 7) > env->tr.limit) - raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); - return ldq_kernel(env->tr.base + index); -} - -/* 64 bit interrupt */ -static void do_interrupt64(int intno, int is_int, int error_code, - target_ulong next_eip, int is_hw) -{ - SegmentCache *dt; - target_ulong ptr; - int type, dpl, selector, cpl, ist; - int has_error_code, new_stack; - uint32_t e1, e2, e3, ss; - target_ulong old_eip, esp, offset; - - has_error_code = 0; - if (!is_int && !is_hw) - has_error_code = exeption_has_error_code(intno); - if (is_int) - old_eip = next_eip; - else - old_eip = env->eip; - - dt = &env->idt; - if (intno * 16 + 15 > dt->limit) - raise_exception_err(EXCP0D_GPF, intno * 16 + 2); - ptr = dt->base + intno * 16; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - e3 = ldl_kernel(ptr + 8); - /* check gate type */ - type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; - switch(type) { - case 14: /* 386 interrupt gate */ - case 15: /* 386 trap gate */ - break; - default: - raise_exception_err(EXCP0D_GPF, intno * 16 + 2); - break; - } - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - /* check privilege if software int */ - if (is_int && dpl < cpl) - raise_exception_err(EXCP0D_GPF, intno * 16 + 2); - /* check valid bit */ - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2); - selector = e1 >> 16; - offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); - ist = e2 & 7; - if ((selector & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, 0); - - if (load_segment(&e1, &e2, selector) != 0) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (dpl > cpl) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { - /* to inner privilege */ - if (ist != 0) - esp = get_rsp_from_tss(ist + 3); - else - esp = get_rsp_from_tss(dpl); - esp &= ~0xfLL; /* align stack */ - ss = 0; - new_stack = 1; - } else if ((e2 & DESC_C_MASK) || dpl == cpl) { - /* to same privilege */ - if (env->eflags & VM_MASK) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; - if (ist != 0) - esp = get_rsp_from_tss(ist + 3); - else - esp = ESP; - esp &= ~0xfLL; /* align stack */ - dpl = cpl; - } else { - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; /* avoid warning */ - esp = 0; /* avoid warning */ - } - - PUSHQ(esp, env->segs[R_SS].selector); - PUSHQ(esp, ESP); - PUSHQ(esp, compute_eflags()); - PUSHQ(esp, env->segs[R_CS].selector); - PUSHQ(esp, old_eip); - if (has_error_code) { - PUSHQ(esp, error_code); - } - - if (new_stack) { - ss = 0 | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); - } - ESP = esp; - - selector = (selector & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_CS, selector, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - cpu_x86_set_cpl(env, dpl); - env->eip = offset; - - /* interrupt gate clear IF mask */ - if ((type & 1) == 0) { - env->eflags &= ~IF_MASK; - } - env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); -} -#endif - -#ifdef TARGET_X86_64 -#if defined(CONFIG_USER_ONLY) -void helper_syscall(int next_eip_addend) -{ - env->exception_index = EXCP_SYSCALL; - env->exception_next_eip = env->eip + next_eip_addend; - cpu_loop_exit(env); -} -#else -void helper_syscall(int next_eip_addend) -{ - int selector; - - if (!(env->efer & MSR_EFER_SCE)) { - raise_exception_err(EXCP06_ILLOP, 0); - } - selector = (env->star >> 32) & 0xffff; - if (env->hflags & HF_LMA_MASK) { - int code64; - - ECX = env->eip + next_eip_addend; - env->regs[11] = compute_eflags(); - - code64 = env->hflags & HF_CS64_MASK; - - cpu_x86_set_cpl(env, 0); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - env->eflags &= ~env->fmask; - load_eflags(env->eflags, 0); - if (code64) - env->eip = env->lstar; - else - env->eip = env->cstar; - } else { - ECX = (uint32_t)(env->eip + next_eip_addend); - - cpu_x86_set_cpl(env, 0); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); - env->eip = (uint32_t)env->star; - } -} -#endif -#endif - -#ifdef TARGET_X86_64 -void helper_sysret(int dflag) -{ - int cpl, selector; - - if (!(env->efer & MSR_EFER_SCE)) { - raise_exception_err(EXCP06_ILLOP, 0); - } - cpl = env->hflags & HF_CPL_MASK; - if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { - raise_exception_err(EXCP0D_GPF, 0); - } - selector = (env->star >> 48) & 0xffff; - if (env->hflags & HF_LMA_MASK) { - if (dflag == 2) { - cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | - DESC_L_MASK); - env->eip = ECX; - } else { - cpu_x86_load_seg_cache(env, R_CS, selector | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - env->eip = (uint32_t)ECX; - } - cpu_x86_load_seg_cache(env, R_SS, selector + 8, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_W_MASK | DESC_A_MASK); - load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | - IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); - cpu_x86_set_cpl(env, 3); - } else { - cpu_x86_load_seg_cache(env, R_CS, selector | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - env->eip = (uint32_t)ECX; - cpu_x86_load_seg_cache(env, R_SS, selector + 8, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_W_MASK | DESC_A_MASK); - env->eflags |= IF_MASK; - cpu_x86_set_cpl(env, 3); - } -} -#endif - -/* real mode interrupt */ -static void do_interrupt_real(int intno, int is_int, int error_code, - unsigned int next_eip) -{ - SegmentCache *dt; - target_ulong ptr, ssp; - int selector; - uint32_t offset, esp; - uint32_t old_cs, old_eip; - - /* real mode (simpler !) */ - dt = &env->idt; - if (intno * 4 + 3 > dt->limit) - raise_exception_err(EXCP0D_GPF, intno * 8 + 2); - ptr = dt->base + intno * 4; - offset = lduw_kernel(ptr); - selector = lduw_kernel(ptr + 2); - esp = ESP; - ssp = env->segs[R_SS].base; - if (is_int) - old_eip = next_eip; - else - old_eip = env->eip; - old_cs = env->segs[R_CS].selector; - /* XXX: use SS segment size ? */ - PUSHW(ssp, esp, 0xffff, compute_eflags()); - PUSHW(ssp, esp, 0xffff, old_cs); - PUSHW(ssp, esp, 0xffff, old_eip); - - /* update processor state */ - ESP = (ESP & ~0xffff) | (esp & 0xffff); - env->eip = offset; - env->segs[R_CS].selector = selector; - env->segs[R_CS].base = (selector << 4); - env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); -} - -#if defined(CONFIG_USER_ONLY) -/* fake user mode interrupt */ -static void do_interrupt_user(int intno, int is_int, int error_code, - target_ulong next_eip) -{ - SegmentCache *dt; - target_ulong ptr; - int dpl, cpl, shift; - uint32_t e2; - - dt = &env->idt; - if (env->hflags & HF_LMA_MASK) { - shift = 4; - } else { - shift = 3; - } - ptr = dt->base + (intno << shift); - e2 = ldl_kernel(ptr + 4); - - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - /* check privilege if software int */ - if (is_int && dpl < cpl) - raise_exception_err(EXCP0D_GPF, (intno << shift) + 2); - - /* Since we emulate only user space, we cannot do more than - exiting the emulation with the suitable exception and error - code */ - if (is_int) - EIP = next_eip; -} - -#else - -static void handle_even_inj(int intno, int is_int, int error_code, - int is_hw, int rm) -{ - uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); - if (!(event_inj & SVM_EVTINJ_VALID)) { - int type; - if (is_int) - type = SVM_EVTINJ_TYPE_SOFT; - else - type = SVM_EVTINJ_TYPE_EXEPT; - event_inj = intno | type | SVM_EVTINJ_VALID; - if (!rm && exeption_has_error_code(intno)) { - event_inj |= SVM_EVTINJ_VALID_ERR; - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code); - } - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj); - } -} -#endif - -/* - * Begin execution of an interruption. is_int is TRUE if coming from - * the int instruction. next_eip is the EIP value AFTER the interrupt - * instruction. It is only relevant if is_int is TRUE. - */ -static void do_interrupt_all(int intno, int is_int, int error_code, - target_ulong next_eip, int is_hw) -{ - if (qemu_loglevel_mask(CPU_LOG_INT)) { - if ((env->cr[0] & CR0_PE_MASK)) { - static int count; - qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, - count, intno, error_code, is_int, - env->hflags & HF_CPL_MASK, - env->segs[R_CS].selector, EIP, - (int)env->segs[R_CS].base + EIP, - env->segs[R_SS].selector, ESP); - if (intno == 0x0e) { - qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); - } else { - qemu_log(" EAX=" TARGET_FMT_lx, EAX); - } - qemu_log("\n"); - log_cpu_state(env, X86_DUMP_CCOP); -#if 0 - { - int i; - target_ulong ptr; - qemu_log(" code="); - ptr = env->segs[R_CS].base + env->eip; - for(i = 0; i < 16; i++) { - qemu_log(" %02x", ldub(ptr + i)); - } - qemu_log("\n"); - } -#endif - count++; - } - } - if (env->cr[0] & CR0_PE_MASK) { -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) - handle_even_inj(intno, is_int, error_code, is_hw, 0); -#endif -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - do_interrupt64(intno, is_int, error_code, next_eip, is_hw); - } else -#endif - { - do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); - } - } else { -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) - handle_even_inj(intno, is_int, error_code, is_hw, 1); -#endif - do_interrupt_real(intno, is_int, error_code, next_eip); - } - -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { - uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); - } -#endif -} - -void do_interrupt(CPUX86State *env1) -{ - CPUX86State *saved_env; - - saved_env = env; - env = env1; -#if defined(CONFIG_USER_ONLY) - /* if user mode only, we simulate a fake exception - which will be handled outside the cpu execution - loop */ - do_interrupt_user(env->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip); - /* successfully delivered */ - env->old_exception = -1; -#else - /* simulate a real cpu exception. On i386, it can - trigger new exceptions, but we do not handle - double or triple faults yet. */ - do_interrupt_all(env->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip, 0); - /* successfully delivered */ - env->old_exception = -1; -#endif - env = saved_env; -} - -void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw) -{ - CPUX86State *saved_env; - - saved_env = env; - env = env1; - do_interrupt_all(intno, 0, 0, 0, is_hw); - env = saved_env; -} - -/* This should come from sysemu.h - if we could include it here... */ -void qemu_system_reset_request(void); - -/* - * Check nested exceptions and change to double or triple fault if - * needed. It should only be called, if this is not an interrupt. - * Returns the new exception number. - */ -static int check_exception(int intno, int *error_code) -{ - int first_contributory = env->old_exception == 0 || - (env->old_exception >= 10 && - env->old_exception <= 13); - int second_contributory = intno == 0 || - (intno >= 10 && intno <= 13); - - qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", - env->old_exception, intno); - -#if !defined(CONFIG_USER_ONLY) - if (env->old_exception == EXCP08_DBLE) { - if (env->hflags & HF_SVMI_MASK) - helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */ - - qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); - - qemu_system_reset_request(); - return EXCP_HLT; - } -#endif - - if ((first_contributory && second_contributory) - || (env->old_exception == EXCP0E_PAGE && - (second_contributory || (intno == EXCP0E_PAGE)))) { - intno = EXCP08_DBLE; - *error_code = 0; - } - - if (second_contributory || (intno == EXCP0E_PAGE) || - (intno == EXCP08_DBLE)) - env->old_exception = intno; - - return intno; -} - -/* - * Signal an interruption. It is executed in the main CPU loop. - * is_int is TRUE if coming from the int instruction. next_eip is the - * EIP value AFTER the interrupt instruction. It is only relevant if - * is_int is TRUE. - */ -static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code, - int next_eip_addend) -{ - if (!is_int) { - helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code); - intno = check_exception(intno, &error_code); - } else { - helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0); - } - - env->exception_index = intno; - env->error_code = error_code; - env->exception_is_int = is_int; - env->exception_next_eip = env->eip + next_eip_addend; - cpu_loop_exit(env); -} - -/* shortcuts to generate exceptions */ - -static void QEMU_NORETURN raise_exception_err(int exception_index, - int error_code) -{ - raise_interrupt(exception_index, 0, error_code, 0); -} - -void raise_exception_err_env(CPUX86State *nenv, int exception_index, - int error_code) -{ - env = nenv; - raise_interrupt(exception_index, 0, error_code, 0); -} - -static void QEMU_NORETURN raise_exception(int exception_index) -{ - raise_interrupt(exception_index, 0, 0, 0); -} - -void raise_exception_env(int exception_index, CPUX86State *nenv) -{ - env = nenv; - raise_exception(exception_index); -} -/* SMM support */ - -#if defined(CONFIG_USER_ONLY) - -void do_smm_enter(CPUX86State *env1) -{ -} - -void helper_rsm(void) -{ -} - -#else - -#ifdef TARGET_X86_64 -#define SMM_REVISION_ID 0x00020064 -#else -#define SMM_REVISION_ID 0x00020000 -#endif - -void do_smm_enter(CPUX86State *env1) -{ - target_ulong sm_state; - SegmentCache *dt; - int i, offset; - CPUX86State *saved_env; - - saved_env = env; - env = env1; - - qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); - log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); - - env->hflags |= HF_SMM_MASK; - cpu_smm_update(env); - - sm_state = env->smbase + 0x8000; - -#ifdef TARGET_X86_64 - for(i = 0; i < 6; i++) { - dt = &env->segs[i]; - offset = 0x7e00 + i * 16; - stw_phys(sm_state + offset, dt->selector); - stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); - stl_phys(sm_state + offset + 4, dt->limit); - stq_phys(sm_state + offset + 8, dt->base); - } - - stq_phys(sm_state + 0x7e68, env->gdt.base); - stl_phys(sm_state + 0x7e64, env->gdt.limit); - - stw_phys(sm_state + 0x7e70, env->ldt.selector); - stq_phys(sm_state + 0x7e78, env->ldt.base); - stl_phys(sm_state + 0x7e74, env->ldt.limit); - stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); - - stq_phys(sm_state + 0x7e88, env->idt.base); - stl_phys(sm_state + 0x7e84, env->idt.limit); - - stw_phys(sm_state + 0x7e90, env->tr.selector); - stq_phys(sm_state + 0x7e98, env->tr.base); - stl_phys(sm_state + 0x7e94, env->tr.limit); - stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); - - stq_phys(sm_state + 0x7ed0, env->efer); - - stq_phys(sm_state + 0x7ff8, EAX); - stq_phys(sm_state + 0x7ff0, ECX); - stq_phys(sm_state + 0x7fe8, EDX); - stq_phys(sm_state + 0x7fe0, EBX); - stq_phys(sm_state + 0x7fd8, ESP); - stq_phys(sm_state + 0x7fd0, EBP); - stq_phys(sm_state + 0x7fc8, ESI); - stq_phys(sm_state + 0x7fc0, EDI); - for(i = 8; i < 16; i++) - stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); - stq_phys(sm_state + 0x7f78, env->eip); - stl_phys(sm_state + 0x7f70, compute_eflags()); - stl_phys(sm_state + 0x7f68, env->dr[6]); - stl_phys(sm_state + 0x7f60, env->dr[7]); - - stl_phys(sm_state + 0x7f48, env->cr[4]); - stl_phys(sm_state + 0x7f50, env->cr[3]); - stl_phys(sm_state + 0x7f58, env->cr[0]); - - stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); - stl_phys(sm_state + 0x7f00, env->smbase); -#else - stl_phys(sm_state + 0x7ffc, env->cr[0]); - stl_phys(sm_state + 0x7ff8, env->cr[3]); - stl_phys(sm_state + 0x7ff4, compute_eflags()); - stl_phys(sm_state + 0x7ff0, env->eip); - stl_phys(sm_state + 0x7fec, EDI); - stl_phys(sm_state + 0x7fe8, ESI); - stl_phys(sm_state + 0x7fe4, EBP); - stl_phys(sm_state + 0x7fe0, ESP); - stl_phys(sm_state + 0x7fdc, EBX); - stl_phys(sm_state + 0x7fd8, EDX); - stl_phys(sm_state + 0x7fd4, ECX); - stl_phys(sm_state + 0x7fd0, EAX); - stl_phys(sm_state + 0x7fcc, env->dr[6]); - stl_phys(sm_state + 0x7fc8, env->dr[7]); - - stl_phys(sm_state + 0x7fc4, env->tr.selector); - stl_phys(sm_state + 0x7f64, env->tr.base); - stl_phys(sm_state + 0x7f60, env->tr.limit); - stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); - - stl_phys(sm_state + 0x7fc0, env->ldt.selector); - stl_phys(sm_state + 0x7f80, env->ldt.base); - stl_phys(sm_state + 0x7f7c, env->ldt.limit); - stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); - - stl_phys(sm_state + 0x7f74, env->gdt.base); - stl_phys(sm_state + 0x7f70, env->gdt.limit); - - stl_phys(sm_state + 0x7f58, env->idt.base); - stl_phys(sm_state + 0x7f54, env->idt.limit); - - for(i = 0; i < 6; i++) { - dt = &env->segs[i]; - if (i < 3) - offset = 0x7f84 + i * 12; - else - offset = 0x7f2c + (i - 3) * 12; - stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector); - stl_phys(sm_state + offset + 8, dt->base); - stl_phys(sm_state + offset + 4, dt->limit); - stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff); - } - stl_phys(sm_state + 0x7f14, env->cr[4]); - - stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); - stl_phys(sm_state + 0x7ef8, env->smbase); -#endif - /* init SMM cpu state */ - -#ifdef TARGET_X86_64 - cpu_load_efer(env, 0); -#endif - load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->eip = 0x00008000; - cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, - 0xffffffff, 0); - cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); - cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0); - cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); - cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); - cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); - - cpu_x86_update_cr0(env, - env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); - cpu_x86_update_cr4(env, 0); - env->dr[7] = 0x00000400; - CC_OP = CC_OP_EFLAGS; - env = saved_env; -} - -void helper_rsm(void) -{ - target_ulong sm_state; - int i, offset; - uint32_t val; - - sm_state = env->smbase + 0x8000; -#ifdef TARGET_X86_64 - cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0)); - - for(i = 0; i < 6; i++) { - offset = 0x7e00 + i * 16; - cpu_x86_load_seg_cache(env, i, - lduw_phys(sm_state + offset), - ldq_phys(sm_state + offset + 8), - ldl_phys(sm_state + offset + 4), - (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8); - } - - env->gdt.base = ldq_phys(sm_state + 0x7e68); - env->gdt.limit = ldl_phys(sm_state + 0x7e64); - - env->ldt.selector = lduw_phys(sm_state + 0x7e70); - env->ldt.base = ldq_phys(sm_state + 0x7e78); - env->ldt.limit = ldl_phys(sm_state + 0x7e74); - env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; - - env->idt.base = ldq_phys(sm_state + 0x7e88); - env->idt.limit = ldl_phys(sm_state + 0x7e84); - - env->tr.selector = lduw_phys(sm_state + 0x7e90); - env->tr.base = ldq_phys(sm_state + 0x7e98); - env->tr.limit = ldl_phys(sm_state + 0x7e94); - env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; - - EAX = ldq_phys(sm_state + 0x7ff8); - ECX = ldq_phys(sm_state + 0x7ff0); - EDX = ldq_phys(sm_state + 0x7fe8); - EBX = ldq_phys(sm_state + 0x7fe0); - ESP = ldq_phys(sm_state + 0x7fd8); - EBP = ldq_phys(sm_state + 0x7fd0); - ESI = ldq_phys(sm_state + 0x7fc8); - EDI = ldq_phys(sm_state + 0x7fc0); - for(i = 8; i < 16; i++) - env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); - env->eip = ldq_phys(sm_state + 0x7f78); - load_eflags(ldl_phys(sm_state + 0x7f70), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->dr[6] = ldl_phys(sm_state + 0x7f68); - env->dr[7] = ldl_phys(sm_state + 0x7f60); - - cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48)); - cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50)); - cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58)); - - val = ldl_phys(sm_state + 0x7efc); /* revision ID */ - if (val & 0x20000) { - env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff; - } -#else - cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc)); - cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8)); - load_eflags(ldl_phys(sm_state + 0x7ff4), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->eip = ldl_phys(sm_state + 0x7ff0); - EDI = ldl_phys(sm_state + 0x7fec); - ESI = ldl_phys(sm_state + 0x7fe8); - EBP = ldl_phys(sm_state + 0x7fe4); - ESP = ldl_phys(sm_state + 0x7fe0); - EBX = ldl_phys(sm_state + 0x7fdc); - EDX = ldl_phys(sm_state + 0x7fd8); - ECX = ldl_phys(sm_state + 0x7fd4); - EAX = ldl_phys(sm_state + 0x7fd0); - env->dr[6] = ldl_phys(sm_state + 0x7fcc); - env->dr[7] = ldl_phys(sm_state + 0x7fc8); - - env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; - env->tr.base = ldl_phys(sm_state + 0x7f64); - env->tr.limit = ldl_phys(sm_state + 0x7f60); - env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; - - env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; - env->ldt.base = ldl_phys(sm_state + 0x7f80); - env->ldt.limit = ldl_phys(sm_state + 0x7f7c); - env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; - - env->gdt.base = ldl_phys(sm_state + 0x7f74); - env->gdt.limit = ldl_phys(sm_state + 0x7f70); - - env->idt.base = ldl_phys(sm_state + 0x7f58); - env->idt.limit = ldl_phys(sm_state + 0x7f54); - - for(i = 0; i < 6; i++) { - if (i < 3) - offset = 0x7f84 + i * 12; - else - offset = 0x7f2c + (i - 3) * 12; - cpu_x86_load_seg_cache(env, i, - ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, - ldl_phys(sm_state + offset + 8), - ldl_phys(sm_state + offset + 4), - (ldl_phys(sm_state + offset) & 0xf0ff) << 8); - } - cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14)); - - val = ldl_phys(sm_state + 0x7efc); /* revision ID */ - if (val & 0x20000) { - env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff; - } -#endif - CC_OP = CC_OP_EFLAGS; - env->hflags &= ~HF_SMM_MASK; - cpu_smm_update(env); - - qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); - log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); -} - -#endif /* !CONFIG_USER_ONLY */ - - -/* division, flags are undefined */ - -void helper_divb_AL(target_ulong t0) -{ - unsigned int num, den, q, r; - - num = (EAX & 0xffff); - den = (t0 & 0xff); - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - if (q > 0xff) - raise_exception(EXCP00_DIVZ); - q &= 0xff; - r = (num % den) & 0xff; - EAX = (EAX & ~0xffff) | (r << 8) | q; -} - -void helper_idivb_AL(target_ulong t0) -{ - int num, den, q, r; - - num = (int16_t)EAX; - den = (int8_t)t0; - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - if (q != (int8_t)q) - raise_exception(EXCP00_DIVZ); - q &= 0xff; - r = (num % den) & 0xff; - EAX = (EAX & ~0xffff) | (r << 8) | q; -} - -void helper_divw_AX(target_ulong t0) -{ - unsigned int num, den, q, r; - - num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); - den = (t0 & 0xffff); - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - if (q > 0xffff) - raise_exception(EXCP00_DIVZ); - q &= 0xffff; - r = (num % den) & 0xffff; - EAX = (EAX & ~0xffff) | q; - EDX = (EDX & ~0xffff) | r; -} - -void helper_idivw_AX(target_ulong t0) -{ - int num, den, q, r; - - num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); - den = (int16_t)t0; - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - if (q != (int16_t)q) - raise_exception(EXCP00_DIVZ); - q &= 0xffff; - r = (num % den) & 0xffff; - EAX = (EAX & ~0xffff) | q; - EDX = (EDX & ~0xffff) | r; -} - -void helper_divl_EAX(target_ulong t0) -{ - unsigned int den, r; - uint64_t num, q; - - num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); - den = t0; - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - r = (num % den); - if (q > 0xffffffff) - raise_exception(EXCP00_DIVZ); - EAX = (uint32_t)q; - EDX = (uint32_t)r; -} - -void helper_idivl_EAX(target_ulong t0) -{ - int den, r; - int64_t num, q; - - num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); - den = t0; - if (den == 0) { - raise_exception(EXCP00_DIVZ); - } - q = (num / den); - r = (num % den); - if (q != (int32_t)q) - raise_exception(EXCP00_DIVZ); - EAX = (uint32_t)q; - EDX = (uint32_t)r; -} - -/* bcd */ - -/* XXX: exception */ -void helper_aam(int base) -{ - int al, ah; - al = EAX & 0xff; - ah = al / base; - al = al % base; - EAX = (EAX & ~0xffff) | al | (ah << 8); - CC_DST = al; -} - -void helper_aad(int base) -{ - int al, ah; - al = EAX & 0xff; - ah = (EAX >> 8) & 0xff; - al = ((ah * base) + al) & 0xff; - EAX = (EAX & ~0xffff) | al; - CC_DST = al; -} - -void helper_aaa(void) -{ - int icarry; - int al, ah, af; - int eflags; - - eflags = helper_cc_compute_all(CC_OP); - af = eflags & CC_A; - al = EAX & 0xff; - ah = (EAX >> 8) & 0xff; - - icarry = (al > 0xf9); - if (((al & 0x0f) > 9 ) || af) { - al = (al + 6) & 0x0f; - ah = (ah + 1 + icarry) & 0xff; - eflags |= CC_C | CC_A; - } else { - eflags &= ~(CC_C | CC_A); - al &= 0x0f; - } - EAX = (EAX & ~0xffff) | al | (ah << 8); - CC_SRC = eflags; -} - -void helper_aas(void) -{ - int icarry; - int al, ah, af; - int eflags; - - eflags = helper_cc_compute_all(CC_OP); - af = eflags & CC_A; - al = EAX & 0xff; - ah = (EAX >> 8) & 0xff; - - icarry = (al < 6); - if (((al & 0x0f) > 9 ) || af) { - al = (al - 6) & 0x0f; - ah = (ah - 1 - icarry) & 0xff; - eflags |= CC_C | CC_A; - } else { - eflags &= ~(CC_C | CC_A); - al &= 0x0f; - } - EAX = (EAX & ~0xffff) | al | (ah << 8); - CC_SRC = eflags; -} - -void helper_daa(void) -{ - int old_al, al, af, cf; - int eflags; - - eflags = helper_cc_compute_all(CC_OP); - cf = eflags & CC_C; - af = eflags & CC_A; - old_al = al = EAX & 0xff; - - eflags = 0; - if (((al & 0x0f) > 9 ) || af) { - al = (al + 6) & 0xff; - eflags |= CC_A; - } - if ((old_al > 0x99) || cf) { - al = (al + 0x60) & 0xff; - eflags |= CC_C; - } - EAX = (EAX & ~0xff) | al; - /* well, speed is not an issue here, so we compute the flags by hand */ - eflags |= (al == 0) << 6; /* zf */ - eflags |= parity_table[al]; /* pf */ - eflags |= (al & 0x80); /* sf */ - CC_SRC = eflags; -} - -void helper_das(void) -{ - int al, al1, af, cf; - int eflags; - - eflags = helper_cc_compute_all(CC_OP); - cf = eflags & CC_C; - af = eflags & CC_A; - al = EAX & 0xff; - - eflags = 0; - al1 = al; - if (((al & 0x0f) > 9 ) || af) { - eflags |= CC_A; - if (al < 6 || cf) - eflags |= CC_C; - al = (al - 6) & 0xff; - } - if ((al1 > 0x99) || cf) { - al = (al - 0x60) & 0xff; - eflags |= CC_C; - } - EAX = (EAX & ~0xff) | al; - /* well, speed is not an issue here, so we compute the flags by hand */ - eflags |= (al == 0) << 6; /* zf */ - eflags |= parity_table[al]; /* pf */ - eflags |= (al & 0x80); /* sf */ - CC_SRC = eflags; -} - -void helper_into(int next_eip_addend) -{ - int eflags; - eflags = helper_cc_compute_all(CC_OP); - if (eflags & CC_O) { - raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend); - } -} - -void helper_cmpxchg8b(target_ulong a0) -{ - uint64_t d; - int eflags; - - eflags = helper_cc_compute_all(CC_OP); - d = ldq(a0); - if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) { - stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX); - eflags |= CC_Z; - } else { - /* always do the store */ - stq(a0, d); - EDX = (uint32_t)(d >> 32); - EAX = (uint32_t)d; - eflags &= ~CC_Z; - } - CC_SRC = eflags; -} - -#ifdef TARGET_X86_64 -void helper_cmpxchg16b(target_ulong a0) -{ - uint64_t d0, d1; - int eflags; - - if ((a0 & 0xf) != 0) - raise_exception(EXCP0D_GPF); - eflags = helper_cc_compute_all(CC_OP); - d0 = ldq(a0); - d1 = ldq(a0 + 8); - if (d0 == EAX && d1 == EDX) { - stq(a0, EBX); - stq(a0 + 8, ECX); - eflags |= CC_Z; - } else { - /* always do the store */ - stq(a0, d0); - stq(a0 + 8, d1); - EDX = d1; - EAX = d0; - eflags &= ~CC_Z; - } - CC_SRC = eflags; -} -#endif - -void helper_single_step(void) -{ -#ifndef CONFIG_USER_ONLY - check_hw_breakpoints(env, 1); - env->dr[6] |= DR6_BS; -#endif - raise_exception(EXCP01_DB); -} - -void helper_cpuid(void) -{ - uint32_t eax, ebx, ecx, edx; - - helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0); - - cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); - EAX = eax; - EBX = ebx; - ECX = ecx; - EDX = edx; -} - -void helper_enter_level(int level, int data32, target_ulong t1) -{ - target_ulong ssp; - uint32_t esp_mask, esp, ebp; - - esp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - ebp = EBP; - esp = ESP; - if (data32) { - /* 32 bit */ - esp -= 4; - while (--level) { - esp -= 4; - ebp -= 4; - stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask))); - } - esp -= 4; - stl(ssp + (esp & esp_mask), t1); - } else { - /* 16 bit */ - esp -= 2; - while (--level) { - esp -= 2; - ebp -= 2; - stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask))); - } - esp -= 2; - stw(ssp + (esp & esp_mask), t1); - } -} - -#ifdef TARGET_X86_64 -void helper_enter64_level(int level, int data64, target_ulong t1) -{ - target_ulong esp, ebp; - ebp = EBP; - esp = ESP; - - if (data64) { - /* 64 bit */ - esp -= 8; - while (--level) { - esp -= 8; - ebp -= 8; - stq(esp, ldq(ebp)); - } - esp -= 8; - stq(esp, t1); - } else { - /* 16 bit */ - esp -= 2; - while (--level) { - esp -= 2; - ebp -= 2; - stw(esp, lduw(ebp)); - } - esp -= 2; - stw(esp, t1); - } -} -#endif - -void helper_lldt(int selector) -{ - SegmentCache *dt; - uint32_t e1, e2; - int index, entry_limit; - target_ulong ptr; - - selector &= 0xffff; - if ((selector & 0xfffc) == 0) { - /* XXX: NULL selector case: invalid LDT */ - env->ldt.base = 0; - env->ldt.limit = 0; - } else { - if (selector & 0x4) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - dt = &env->gdt; - index = selector & ~7; -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) - entry_limit = 15; - else -#endif - entry_limit = 7; - if ((index + entry_limit) > dt->limit) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - ptr = dt->base + index; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - uint32_t e3; - e3 = ldl_kernel(ptr + 8); - load_seg_cache_raw_dt(&env->ldt, e1, e2); - env->ldt.base |= (target_ulong)e3 << 32; - } else -#endif - { - load_seg_cache_raw_dt(&env->ldt, e1, e2); - } - } - env->ldt.selector = selector; -} - -void helper_ltr(int selector) -{ - SegmentCache *dt; - uint32_t e1, e2; - int index, type, entry_limit; - target_ulong ptr; - - selector &= 0xffff; - if ((selector & 0xfffc) == 0) { - /* NULL selector case: invalid TR */ - env->tr.base = 0; - env->tr.limit = 0; - env->tr.flags = 0; - } else { - if (selector & 0x4) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - dt = &env->gdt; - index = selector & ~7; -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) - entry_limit = 15; - else -#endif - entry_limit = 7; - if ((index + entry_limit) > dt->limit) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - ptr = dt->base + index; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - if ((e2 & DESC_S_MASK) || - (type != 1 && type != 9)) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - uint32_t e3, e4; - e3 = ldl_kernel(ptr + 8); - e4 = ldl_kernel(ptr + 12); - if ((e4 >> DESC_TYPE_SHIFT) & 0xf) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - load_seg_cache_raw_dt(&env->tr, e1, e2); - env->tr.base |= (target_ulong)e3 << 32; - } else -#endif - { - load_seg_cache_raw_dt(&env->tr, e1, e2); - } - e2 |= DESC_TSS_BUSY_MASK; - stl_kernel(ptr + 4, e2); - } - env->tr.selector = selector; -} - -/* only works if protected mode and not VM86. seg_reg must be != R_CS */ -void helper_load_seg(int seg_reg, int selector) -{ - uint32_t e1, e2; - int cpl, dpl, rpl; - SegmentCache *dt; - int index; - target_ulong ptr; - - selector &= 0xffff; - cpl = env->hflags & HF_CPL_MASK; - if ((selector & 0xfffc) == 0) { - /* null selector case */ - if (seg_reg == R_SS -#ifdef TARGET_X86_64 - && (!(env->hflags & HF_CS64_MASK) || cpl == 3) -#endif - ) - raise_exception_err(EXCP0D_GPF, 0); - cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); - } else { - - if (selector & 0x4) - dt = &env->ldt; - else - dt = &env->gdt; - index = selector & ~7; - if ((index + 7) > dt->limit) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - ptr = dt->base + index; - e1 = ldl_kernel(ptr); - e2 = ldl_kernel(ptr + 4); - - if (!(e2 & DESC_S_MASK)) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (seg_reg == R_SS) { - /* must be writable segment */ - if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (rpl != cpl || dpl != cpl) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - } else { - /* must be readable segment */ - if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - - if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { - /* if not conforming code, test rights */ - if (dpl < cpl || dpl < rpl) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - } - } - - if (!(e2 & DESC_P_MASK)) { - if (seg_reg == R_SS) - raise_exception_err(EXCP0C_STACK, selector & 0xfffc); - else - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - } - - /* set the access bit if not already set */ - if (!(e2 & DESC_A_MASK)) { - e2 |= DESC_A_MASK; - stl_kernel(ptr + 4, e2); - } - - cpu_x86_load_seg_cache(env, seg_reg, selector, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); -#if 0 - qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", - selector, (unsigned long)sc->base, sc->limit, sc->flags); -#endif - } -} - -/* protected mode jump */ -void helper_ljmp_protected(int new_cs, target_ulong new_eip, - int next_eip_addend) -{ - int gate_cs, type; - uint32_t e1, e2, cpl, dpl, rpl, limit; - target_ulong next_eip; - - if ((new_cs & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, 0); - if (load_segment(&e1, &e2, new_cs) != 0) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->hflags & HF_CPL_MASK; - if (e2 & DESC_S_MASK) { - if (!(e2 & DESC_CS_MASK)) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (e2 & DESC_C_MASK) { - /* conforming code segment */ - if (dpl > cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } else { - /* non conforming code segment */ - rpl = new_cs & 3; - if (rpl > cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - if (dpl != cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - limit = get_seg_limit(e1, e2); - if (new_eip > limit && - !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, - get_seg_base(e1, e2), limit, e2); - EIP = new_eip; - } else { - /* jump to call or task gate */ - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - rpl = new_cs & 3; - cpl = env->hflags & HF_CPL_MASK; - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - switch(type) { - case 1: /* 286 TSS */ - case 9: /* 386 TSS */ - case 5: /* task gate */ - if (dpl < cpl || dpl < rpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - next_eip = env->eip + next_eip_addend; - switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); - CC_OP = CC_OP_EFLAGS; - break; - case 4: /* 286 call gate */ - case 12: /* 386 call gate */ - if ((dpl < cpl) || (dpl < rpl)) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - gate_cs = e1 >> 16; - new_eip = (e1 & 0xffff); - if (type == 12) - new_eip |= (e2 & 0xffff0000); - if (load_segment(&e1, &e2, gate_cs) != 0) - raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - /* must be code segment */ - if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != - (DESC_S_MASK | DESC_CS_MASK))) - raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); - if (((e2 & DESC_C_MASK) && (dpl > cpl)) || - (!(e2 & DESC_C_MASK) && (dpl != cpl))) - raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); - limit = get_seg_limit(e1, e2); - if (new_eip > limit) - raise_exception_err(EXCP0D_GPF, 0); - cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, - get_seg_base(e1, e2), limit, e2); - EIP = new_eip; - break; - default: - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - break; - } - } -} - -/* real mode call */ -void helper_lcall_real(int new_cs, target_ulong new_eip1, - int shift, int next_eip) -{ - int new_eip; - uint32_t esp, esp_mask; - target_ulong ssp; - - new_eip = new_eip1; - esp = ESP; - esp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - if (shift) { - PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); - PUSHL(ssp, esp, esp_mask, next_eip); - } else { - PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); - PUSHW(ssp, esp, esp_mask, next_eip); - } - - SET_ESP(esp, esp_mask); - env->eip = new_eip; - env->segs[R_CS].selector = new_cs; - env->segs[R_CS].base = (new_cs << 4); -} - -/* protected mode call */ -void helper_lcall_protected(int new_cs, target_ulong new_eip, - int shift, int next_eip_addend) -{ - int new_stack, i; - uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; - uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; - uint32_t val, limit, old_sp_mask; - target_ulong ssp, old_ssp, next_eip; - - next_eip = env->eip + next_eip_addend; - LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); - LOG_PCALL_STATE(env); - if ((new_cs & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, 0); - if (load_segment(&e1, &e2, new_cs) != 0) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->hflags & HF_CPL_MASK; - LOG_PCALL("desc=%08x:%08x\n", e1, e2); - if (e2 & DESC_S_MASK) { - if (!(e2 & DESC_CS_MASK)) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (e2 & DESC_C_MASK) { - /* conforming code segment */ - if (dpl > cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } else { - /* non conforming code segment */ - rpl = new_cs & 3; - if (rpl > cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - if (dpl != cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - -#ifdef TARGET_X86_64 - /* XXX: check 16/32 bit cases in long mode */ - if (shift == 2) { - target_ulong rsp; - /* 64 bit case */ - rsp = ESP; - PUSHQ(rsp, env->segs[R_CS].selector); - PUSHQ(rsp, next_eip); - /* from this point, not restartable */ - ESP = rsp; - cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), e2); - EIP = new_eip; - } else -#endif - { - sp = ESP; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, sp, sp_mask, next_eip); - } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, sp, sp_mask, next_eip); - } - - limit = get_seg_limit(e1, e2); - if (new_eip > limit) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - /* from this point, not restartable */ - SET_ESP(sp, sp_mask); - cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, - get_seg_base(e1, e2), limit, e2); - EIP = new_eip; - } - } else { - /* check gate type */ - type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - rpl = new_cs & 3; - switch(type) { - case 1: /* available 286 TSS */ - case 9: /* available 386 TSS */ - case 5: /* task gate */ - if (dpl < cpl || dpl < rpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); - CC_OP = CC_OP_EFLAGS; - return; - case 4: /* 286 call gate */ - case 12: /* 386 call gate */ - break; - default: - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - break; - } - shift = type >> 3; - - if (dpl < cpl || dpl < rpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - /* check valid bit */ - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - selector = e1 >> 16; - offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); - param_count = e2 & 0x1f; - if ((selector & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, 0); - - if (load_segment(&e1, &e2, selector) != 0) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (dpl > cpl) - raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - - if (!(e2 & DESC_C_MASK) && dpl < cpl) { - /* to inner privilege */ - get_ss_esp_from_tss(&ss, &sp, dpl); - LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", - ss, sp, param_count, ESP); - if ((ss & 0xfffc) == 0) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if ((ss & 3) != dpl) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (load_segment(&ss_e1, &ss_e2, ss) != 0) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; - if (ss_dpl != dpl) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (!(ss_e2 & DESC_S_MASK) || - (ss_e2 & DESC_CS_MASK) || - !(ss_e2 & DESC_W_MASK)) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - if (!(ss_e2 & DESC_P_MASK)) - raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - - // push_size = ((param_count * 2) + 8) << shift; - - old_sp_mask = get_sp_mask(env->segs[R_SS].flags); - old_ssp = env->segs[R_SS].base; - - sp_mask = get_sp_mask(ss_e2); - ssp = get_seg_base(ss_e1, ss_e2); - if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); - PUSHL(ssp, sp, sp_mask, ESP); - for(i = param_count - 1; i >= 0; i--) { - val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask)); - PUSHL(ssp, sp, sp_mask, val); - } - } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); - PUSHW(ssp, sp, sp_mask, ESP); - for(i = param_count - 1; i >= 0; i--) { - val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask)); - PUSHW(ssp, sp, sp_mask, val); - } - } - new_stack = 1; - } else { - /* to same privilege */ - sp = ESP; - sp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - // push_size = (4 << shift); - new_stack = 0; - } - - if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, sp, sp_mask, next_eip); - } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, sp, sp_mask, next_eip); - } - - /* from this point, not restartable */ - - if (new_stack) { - ss = (ss & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, - ssp, - get_seg_limit(ss_e1, ss_e2), - ss_e2); - } - - selector = (selector & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_CS, selector, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - cpu_x86_set_cpl(env, dpl); - SET_ESP(sp, sp_mask); - EIP = offset; - } -} - -/* real and vm86 mode iret */ -void helper_iret_real(int shift) -{ - uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; - target_ulong ssp; - int eflags_mask; - - sp_mask = 0xffff; /* XXXX: use SS segment size ? */ - sp = ESP; - ssp = env->segs[R_SS].base; - if (shift == 1) { - /* 32 bits */ - POPL(ssp, sp, sp_mask, new_eip); - POPL(ssp, sp, sp_mask, new_cs); - new_cs &= 0xffff; - POPL(ssp, sp, sp_mask, new_eflags); - } else { - /* 16 bits */ - POPW(ssp, sp, sp_mask, new_eip); - POPW(ssp, sp, sp_mask, new_cs); - POPW(ssp, sp, sp_mask, new_eflags); - } - ESP = (ESP & ~sp_mask) | (sp & sp_mask); - env->segs[R_CS].selector = new_cs; - env->segs[R_CS].base = (new_cs << 4); - env->eip = new_eip; - if (env->eflags & VM_MASK) - eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; - else - eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK; - if (shift == 0) - eflags_mask &= 0xffff; - load_eflags(new_eflags, eflags_mask); - env->hflags2 &= ~HF2_NMI_MASK; -} - -static inline void validate_seg(int seg_reg, int cpl) -{ - int dpl; - uint32_t e2; - - /* XXX: on x86_64, we do not want to nullify FS and GS because - they may still contain a valid base. I would be interested to - know how a real x86_64 CPU behaves */ - if ((seg_reg == R_FS || seg_reg == R_GS) && - (env->segs[seg_reg].selector & 0xfffc) == 0) - return; - - e2 = env->segs[seg_reg].flags; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { - /* data or non conforming code segment */ - if (dpl < cpl) { - cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); - } - } -} - -/* protected mode iret */ -static inline void helper_ret_protected(int shift, int is_iret, int addend) -{ - uint32_t new_cs, new_eflags, new_ss; - uint32_t new_es, new_ds, new_fs, new_gs; - uint32_t e1, e2, ss_e1, ss_e2; - int cpl, dpl, rpl, eflags_mask, iopl; - target_ulong ssp, sp, new_eip, new_esp, sp_mask; - -#ifdef TARGET_X86_64 - if (shift == 2) - sp_mask = -1; - else -#endif - sp_mask = get_sp_mask(env->segs[R_SS].flags); - sp = ESP; - ssp = env->segs[R_SS].base; - new_eflags = 0; /* avoid warning */ -#ifdef TARGET_X86_64 - if (shift == 2) { - POPQ(sp, new_eip); - POPQ(sp, new_cs); - new_cs &= 0xffff; - if (is_iret) { - POPQ(sp, new_eflags); - } - } else -#endif - if (shift == 1) { - /* 32 bits */ - POPL(ssp, sp, sp_mask, new_eip); - POPL(ssp, sp, sp_mask, new_cs); - new_cs &= 0xffff; - if (is_iret) { - POPL(ssp, sp, sp_mask, new_eflags); - if (new_eflags & VM_MASK) - goto return_to_vm86; - } - } else { - /* 16 bits */ - POPW(ssp, sp, sp_mask, new_eip); - POPW(ssp, sp, sp_mask, new_cs); - if (is_iret) - POPW(ssp, sp, sp_mask, new_eflags); - } - LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", - new_cs, new_eip, shift, addend); - LOG_PCALL_STATE(env); - if ((new_cs & 0xfffc) == 0) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - if (load_segment(&e1, &e2, new_cs) != 0) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - if (!(e2 & DESC_S_MASK) || - !(e2 & DESC_CS_MASK)) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->hflags & HF_CPL_MASK; - rpl = new_cs & 3; - if (rpl < cpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (e2 & DESC_C_MASK) { - if (dpl > rpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } else { - if (dpl != rpl) - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - } - if (!(e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - - sp += addend; - if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || - ((env->hflags & HF_CS64_MASK) && !is_iret))) { - /* return to same privilege level */ - cpu_x86_load_seg_cache(env, R_CS, new_cs, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - } else { - /* return to different privilege level */ -#ifdef TARGET_X86_64 - if (shift == 2) { - POPQ(sp, new_esp); - POPQ(sp, new_ss); - new_ss &= 0xffff; - } else -#endif - if (shift == 1) { - /* 32 bits */ - POPL(ssp, sp, sp_mask, new_esp); - POPL(ssp, sp, sp_mask, new_ss); - new_ss &= 0xffff; - } else { - /* 16 bits */ - POPW(ssp, sp, sp_mask, new_esp); - POPW(ssp, sp, sp_mask, new_ss); - } - LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", - new_ss, new_esp); - if ((new_ss & 0xfffc) == 0) { -#ifdef TARGET_X86_64 - /* NULL ss is allowed in long mode if cpl != 3*/ - /* XXX: test CS64 ? */ - if ((env->hflags & HF_LMA_MASK) && rpl != 3) { - cpu_x86_load_seg_cache(env, R_SS, new_ss, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | - DESC_W_MASK | DESC_A_MASK); - ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */ - } else -#endif - { - raise_exception_err(EXCP0D_GPF, 0); - } - } else { - if ((new_ss & 3) != rpl) - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); - if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); - if (!(ss_e2 & DESC_S_MASK) || - (ss_e2 & DESC_CS_MASK) || - !(ss_e2 & DESC_W_MASK)) - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); - dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; - if (dpl != rpl) - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); - if (!(ss_e2 & DESC_P_MASK)) - raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); - cpu_x86_load_seg_cache(env, R_SS, new_ss, - get_seg_base(ss_e1, ss_e2), - get_seg_limit(ss_e1, ss_e2), - ss_e2); - } - - cpu_x86_load_seg_cache(env, R_CS, new_cs, - get_seg_base(e1, e2), - get_seg_limit(e1, e2), - e2); - cpu_x86_set_cpl(env, rpl); - sp = new_esp; -#ifdef TARGET_X86_64 - if (env->hflags & HF_CS64_MASK) - sp_mask = -1; - else -#endif - sp_mask = get_sp_mask(ss_e2); - - /* validate data segments */ - validate_seg(R_ES, rpl); - validate_seg(R_DS, rpl); - validate_seg(R_FS, rpl); - validate_seg(R_GS, rpl); - - sp += addend; - } - SET_ESP(sp, sp_mask); - env->eip = new_eip; - if (is_iret) { - /* NOTE: 'cpl' is the _old_ CPL */ - eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; - if (cpl == 0) - eflags_mask |= IOPL_MASK; - iopl = (env->eflags >> IOPL_SHIFT) & 3; - if (cpl <= iopl) - eflags_mask |= IF_MASK; - if (shift == 0) - eflags_mask &= 0xffff; - load_eflags(new_eflags, eflags_mask); - } - return; - - return_to_vm86: - POPL(ssp, sp, sp_mask, new_esp); - POPL(ssp, sp, sp_mask, new_ss); - POPL(ssp, sp, sp_mask, new_es); - POPL(ssp, sp, sp_mask, new_ds); - POPL(ssp, sp, sp_mask, new_fs); - POPL(ssp, sp, sp_mask, new_gs); - - /* modify processor state */ - load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | - IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); - load_seg_vm(R_CS, new_cs & 0xffff); - cpu_x86_set_cpl(env, 3); - load_seg_vm(R_SS, new_ss & 0xffff); - load_seg_vm(R_ES, new_es & 0xffff); - load_seg_vm(R_DS, new_ds & 0xffff); - load_seg_vm(R_FS, new_fs & 0xffff); - load_seg_vm(R_GS, new_gs & 0xffff); - - env->eip = new_eip & 0xffff; - ESP = new_esp; -} - -void helper_iret_protected(int shift, int next_eip) -{ - int tss_selector, type; - uint32_t e1, e2; - - /* specific case for TSS */ - if (env->eflags & NT_MASK) { -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) - raise_exception_err(EXCP0D_GPF, 0); -#endif - tss_selector = lduw_kernel(env->tr.base + 0); - if (tss_selector & 4) - raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); - if (load_segment(&e1, &e2, tss_selector) != 0) - raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); - type = (e2 >> DESC_TYPE_SHIFT) & 0x17; - /* NOTE: we check both segment and busy TSS */ - if (type != 3) - raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); - switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); - } else { - helper_ret_protected(shift, 1, 0); - } - env->hflags2 &= ~HF2_NMI_MASK; -} - -void helper_lret_protected(int shift, int addend) -{ - helper_ret_protected(shift, 0, addend); -} - -void helper_sysenter(void) -{ - if (env->sysenter_cs == 0) { - raise_exception_err(EXCP0D_GPF, 0); - } - env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); - cpu_x86_set_cpl(env, 0); - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); - } else -#endif - { - cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - } - cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - ESP = env->sysenter_esp; - EIP = env->sysenter_eip; -} - -void helper_sysexit(int dflag) -{ - int cpl; - - cpl = env->hflags & HF_CPL_MASK; - if (env->sysenter_cs == 0 || cpl != 0) { - raise_exception_err(EXCP0D_GPF, 0); - } - cpu_x86_set_cpl(env, 3); -#ifdef TARGET_X86_64 - if (dflag == 2) { - cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); - cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_W_MASK | DESC_A_MASK); - } else -#endif - { - cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | (3 << DESC_DPL_SHIFT) | - DESC_W_MASK | DESC_A_MASK); - } - ESP = ECX; - EIP = EDX; -} - -#if defined(CONFIG_USER_ONLY) -target_ulong helper_read_crN(int reg) -{ - return 0; -} - -void helper_write_crN(int reg, target_ulong t0) -{ -} - -void helper_movl_drN_T0(int reg, target_ulong t0) -{ -} -#else -target_ulong helper_read_crN(int reg) -{ - target_ulong val; - - helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0); - switch(reg) { - default: - val = env->cr[reg]; - break; - case 8: - if (!(env->hflags2 & HF2_VINTR_MASK)) { - val = cpu_get_apic_tpr(env->apic_state); - } else { - val = env->v_tpr; - } - break; - } - return val; -} - -void helper_write_crN(int reg, target_ulong t0) -{ - helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0); - switch(reg) { - case 0: - cpu_x86_update_cr0(env, t0); - break; - case 3: - cpu_x86_update_cr3(env, t0); - break; - case 4: - cpu_x86_update_cr4(env, t0); - break; - case 8: - if (!(env->hflags2 & HF2_VINTR_MASK)) { - cpu_set_apic_tpr(env->apic_state, t0); - } - env->v_tpr = t0 & 0x0f; - break; - default: - env->cr[reg] = t0; - break; - } -} - -void helper_movl_drN_T0(int reg, target_ulong t0) -{ - int i; - - if (reg < 4) { - hw_breakpoint_remove(env, reg); - env->dr[reg] = t0; - hw_breakpoint_insert(env, reg); - } else if (reg == 7) { - for (i = 0; i < 4; i++) - hw_breakpoint_remove(env, i); - env->dr[7] = t0; - for (i = 0; i < 4; i++) - hw_breakpoint_insert(env, i); - } else - env->dr[reg] = t0; -} -#endif - -void helper_lmsw(target_ulong t0) -{ - /* only 4 lower bits of CR0 are modified. PE cannot be set to zero - if already set to one. */ - t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); - helper_write_crN(0, t0); -} - -void helper_clts(void) -{ - env->cr[0] &= ~CR0_TS_MASK; - env->hflags &= ~HF_TS_MASK; -} - -void helper_invlpg(target_ulong addr) -{ - helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0); - tlb_flush_page(env, addr); -} - -void helper_rdtsc(void) -{ - uint64_t val; - - if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { - raise_exception(EXCP0D_GPF); - } - helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0); - - val = cpu_get_tsc(env) + env->tsc_offset; - EAX = (uint32_t)(val); - EDX = (uint32_t)(val >> 32); -} - -void helper_rdtscp(void) -{ - helper_rdtsc(); - ECX = (uint32_t)(env->tsc_aux); -} - -void helper_rdpmc(void) -{ - if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { - raise_exception(EXCP0D_GPF); - } - helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0); - - /* currently unimplemented */ - raise_exception_err(EXCP06_ILLOP, 0); -} - -#if defined(CONFIG_USER_ONLY) -void helper_wrmsr(void) -{ -} - -void helper_rdmsr(void) -{ -} -#else -void helper_wrmsr(void) -{ - uint64_t val; - - helper_svm_check_intercept_param(SVM_EXIT_MSR, 1); - - val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); - - switch((uint32_t)ECX) { - case MSR_IA32_SYSENTER_CS: - env->sysenter_cs = val & 0xffff; - break; - case MSR_IA32_SYSENTER_ESP: - env->sysenter_esp = val; - break; - case MSR_IA32_SYSENTER_EIP: - env->sysenter_eip = val; - break; - case MSR_IA32_APICBASE: - cpu_set_apic_base(env->apic_state, val); - break; - case MSR_EFER: - { - uint64_t update_mask; - update_mask = 0; - if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) - update_mask |= MSR_EFER_SCE; - if (env->cpuid_ext2_features & CPUID_EXT2_LM) - update_mask |= MSR_EFER_LME; - if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) - update_mask |= MSR_EFER_FFXSR; - if (env->cpuid_ext2_features & CPUID_EXT2_NX) - update_mask |= MSR_EFER_NXE; - if (env->cpuid_ext3_features & CPUID_EXT3_SVM) - update_mask |= MSR_EFER_SVME; - if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) - update_mask |= MSR_EFER_FFXSR; - cpu_load_efer(env, (env->efer & ~update_mask) | - (val & update_mask)); - } - break; - case MSR_STAR: - env->star = val; - break; - case MSR_PAT: - env->pat = val; - break; - case MSR_VM_HSAVE_PA: - env->vm_hsave = val; - break; -#ifdef TARGET_X86_64 - case MSR_LSTAR: - env->lstar = val; - break; - case MSR_CSTAR: - env->cstar = val; - break; - case MSR_FMASK: - env->fmask = val; - break; - case MSR_FSBASE: - env->segs[R_FS].base = val; - break; - case MSR_GSBASE: - env->segs[R_GS].base = val; - break; - case MSR_KERNELGSBASE: - env->kernelgsbase = val; - break; -#endif - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; - break; - case MSR_MTRRfix64K_00000: - env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; - break; - case MSR_MTRRdefType: - env->mtrr_deftype = val; - break; - case MSR_MCG_STATUS: - env->mcg_status = val; - break; - case MSR_MCG_CTL: - if ((env->mcg_cap & MCG_CTL_P) - && (val == 0 || val == ~(uint64_t)0)) - env->mcg_ctl = val; - break; - case MSR_TSC_AUX: - env->tsc_aux = val; - break; - case MSR_IA32_MISC_ENABLE: - env->msr_ia32_misc_enable = val; - break; - default: - if ((uint32_t)ECX >= MSR_MC0_CTL - && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { - uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; - if ((offset & 0x3) != 0 - || (val == 0 || val == ~(uint64_t)0)) - env->mce_banks[offset] = val; - break; - } - /* XXX: exception ? */ - break; - } -} - -void helper_rdmsr(void) -{ - uint64_t val; - - helper_svm_check_intercept_param(SVM_EXIT_MSR, 0); - - switch((uint32_t)ECX) { - case MSR_IA32_SYSENTER_CS: - val = env->sysenter_cs; - break; - case MSR_IA32_SYSENTER_ESP: - val = env->sysenter_esp; - break; - case MSR_IA32_SYSENTER_EIP: - val = env->sysenter_eip; - break; - case MSR_IA32_APICBASE: - val = cpu_get_apic_base(env->apic_state); - break; - case MSR_EFER: - val = env->efer; - break; - case MSR_STAR: - val = env->star; - break; - case MSR_PAT: - val = env->pat; - break; - case MSR_VM_HSAVE_PA: - val = env->vm_hsave; - break; - case MSR_IA32_PERF_STATUS: - /* tsc_increment_by_tick */ - val = 1000ULL; - /* CPU multiplier */ - val |= (((uint64_t)4ULL) << 40); - break; -#ifdef TARGET_X86_64 - case MSR_LSTAR: - val = env->lstar; - break; - case MSR_CSTAR: - val = env->cstar; - break; - case MSR_FMASK: - val = env->fmask; - break; - case MSR_FSBASE: - val = env->segs[R_FS].base; - break; - case MSR_GSBASE: - val = env->segs[R_GS].base; - break; - case MSR_KERNELGSBASE: - val = env->kernelgsbase; - break; - case MSR_TSC_AUX: - val = env->tsc_aux; - break; -#endif - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; - break; - case MSR_MTRRfix64K_00000: - val = env->mtrr_fixed[0]; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; - break; - case MSR_MTRRdefType: - val = env->mtrr_deftype; - break; - case MSR_MTRRcap: - if (env->cpuid_features & CPUID_MTRR) - val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED; - else - /* XXX: exception ? */ - val = 0; - break; - case MSR_MCG_CAP: - val = env->mcg_cap; - break; - case MSR_MCG_CTL: - if (env->mcg_cap & MCG_CTL_P) - val = env->mcg_ctl; - else - val = 0; - break; - case MSR_MCG_STATUS: - val = env->mcg_status; - break; - case MSR_IA32_MISC_ENABLE: - val = env->msr_ia32_misc_enable; - break; - default: - if ((uint32_t)ECX >= MSR_MC0_CTL - && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { - uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; - val = env->mce_banks[offset]; - break; - } - /* XXX: exception ? */ - val = 0; - break; - } - EAX = (uint32_t)(val); - EDX = (uint32_t)(val >> 32); -} -#endif - -target_ulong helper_lsl(target_ulong selector1) -{ - unsigned int limit; - uint32_t e1, e2, eflags, selector; - int rpl, dpl, cpl, type; - - selector = selector1 & 0xffff; - eflags = helper_cc_compute_all(CC_OP); - if ((selector & 0xfffc) == 0) - goto fail; - if (load_segment(&e1, &e2, selector) != 0) - goto fail; - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - if (e2 & DESC_S_MASK) { - if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { - /* conforming */ - } else { - if (dpl < cpl || dpl < rpl) - goto fail; - } - } else { - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - switch(type) { - case 1: - case 2: - case 3: - case 9: - case 11: - break; - default: - goto fail; - } - if (dpl < cpl || dpl < rpl) { - fail: - CC_SRC = eflags & ~CC_Z; - return 0; - } - } - limit = get_seg_limit(e1, e2); - CC_SRC = eflags | CC_Z; - return limit; -} - -target_ulong helper_lar(target_ulong selector1) -{ - uint32_t e1, e2, eflags, selector; - int rpl, dpl, cpl, type; - - selector = selector1 & 0xffff; - eflags = helper_cc_compute_all(CC_OP); - if ((selector & 0xfffc) == 0) - goto fail; - if (load_segment(&e1, &e2, selector) != 0) - goto fail; - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - if (e2 & DESC_S_MASK) { - if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { - /* conforming */ - } else { - if (dpl < cpl || dpl < rpl) - goto fail; - } - } else { - type = (e2 >> DESC_TYPE_SHIFT) & 0xf; - switch(type) { - case 1: - case 2: - case 3: - case 4: - case 5: - case 9: - case 11: - case 12: - break; - default: - goto fail; - } - if (dpl < cpl || dpl < rpl) { - fail: - CC_SRC = eflags & ~CC_Z; - return 0; - } - } - CC_SRC = eflags | CC_Z; - return e2 & 0x00f0ff00; -} - -void helper_verr(target_ulong selector1) -{ - uint32_t e1, e2, eflags, selector; - int rpl, dpl, cpl; - - selector = selector1 & 0xffff; - eflags = helper_cc_compute_all(CC_OP); - if ((selector & 0xfffc) == 0) - goto fail; - if (load_segment(&e1, &e2, selector) != 0) - goto fail; - if (!(e2 & DESC_S_MASK)) - goto fail; - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - if (e2 & DESC_CS_MASK) { - if (!(e2 & DESC_R_MASK)) - goto fail; - if (!(e2 & DESC_C_MASK)) { - if (dpl < cpl || dpl < rpl) - goto fail; - } - } else { - if (dpl < cpl || dpl < rpl) { - fail: - CC_SRC = eflags & ~CC_Z; - return; - } - } - CC_SRC = eflags | CC_Z; -} - -void helper_verw(target_ulong selector1) -{ - uint32_t e1, e2, eflags, selector; - int rpl, dpl, cpl; - - selector = selector1 & 0xffff; - eflags = helper_cc_compute_all(CC_OP); - if ((selector & 0xfffc) == 0) - goto fail; - if (load_segment(&e1, &e2, selector) != 0) - goto fail; - if (!(e2 & DESC_S_MASK)) - goto fail; - rpl = selector & 3; - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - if (e2 & DESC_CS_MASK) { - goto fail; - } else { - if (dpl < cpl || dpl < rpl) - goto fail; - if (!(e2 & DESC_W_MASK)) { - fail: - CC_SRC = eflags & ~CC_Z; - return; - } - } - CC_SRC = eflags | CC_Z; -} - -/* x87 FPU helpers */ - -static inline double floatx80_to_double(floatx80 a) -{ - union { - float64 f64; - double d; - } u; - - u.f64 = floatx80_to_float64(a, &env->fp_status); - return u.d; -} - -static inline floatx80 double_to_floatx80(double a) -{ - union { - float64 f64; - double d; - } u; - - u.d = a; - return float64_to_floatx80(u.f64, &env->fp_status); -} - -static void fpu_set_exception(int mask) -{ - env->fpus |= mask; - if (env->fpus & (~env->fpuc & FPUC_EM)) - env->fpus |= FPUS_SE | FPUS_B; -} - -static inline floatx80 helper_fdiv(floatx80 a, floatx80 b) -{ - if (floatx80_is_zero(b)) { - fpu_set_exception(FPUS_ZE); - } - return floatx80_div(a, b, &env->fp_status); -} - -static void fpu_raise_exception(void) -{ - if (env->cr[0] & CR0_NE_MASK) { - raise_exception(EXCP10_COPR); - } -#if !defined(CONFIG_USER_ONLY) - else { - cpu_set_ferr(env); - } -#endif -} - -void helper_flds_FT0(uint32_t val) -{ - union { - float32 f; - uint32_t i; - } u; - u.i = val; - FT0 = float32_to_floatx80(u.f, &env->fp_status); -} - -void helper_fldl_FT0(uint64_t val) -{ - union { - float64 f; - uint64_t i; - } u; - u.i = val; - FT0 = float64_to_floatx80(u.f, &env->fp_status); -} - -void helper_fildl_FT0(int32_t val) -{ - FT0 = int32_to_floatx80(val, &env->fp_status); -} - -void helper_flds_ST0(uint32_t val) -{ - int new_fpstt; - union { - float32 f; - uint32_t i; - } u; - new_fpstt = (env->fpstt - 1) & 7; - u.i = val; - env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status); - env->fpstt = new_fpstt; - env->fptags[new_fpstt] = 0; /* validate stack entry */ -} - -void helper_fldl_ST0(uint64_t val) -{ - int new_fpstt; - union { - float64 f; - uint64_t i; - } u; - new_fpstt = (env->fpstt - 1) & 7; - u.i = val; - env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status); - env->fpstt = new_fpstt; - env->fptags[new_fpstt] = 0; /* validate stack entry */ -} - -void helper_fildl_ST0(int32_t val) -{ - int new_fpstt; - new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); - env->fpstt = new_fpstt; - env->fptags[new_fpstt] = 0; /* validate stack entry */ -} - -void helper_fildll_ST0(int64_t val) -{ - int new_fpstt; - new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); - env->fpstt = new_fpstt; - env->fptags[new_fpstt] = 0; /* validate stack entry */ -} - -uint32_t helper_fsts_ST0(void) -{ - union { - float32 f; - uint32_t i; - } u; - u.f = floatx80_to_float32(ST0, &env->fp_status); - return u.i; -} - -uint64_t helper_fstl_ST0(void) -{ - union { - float64 f; - uint64_t i; - } u; - u.f = floatx80_to_float64(ST0, &env->fp_status); - return u.i; -} - -int32_t helper_fist_ST0(void) -{ - int32_t val; - val = floatx80_to_int32(ST0, &env->fp_status); - if (val != (int16_t)val) - val = -32768; - return val; -} - -int32_t helper_fistl_ST0(void) -{ - int32_t val; - val = floatx80_to_int32(ST0, &env->fp_status); - return val; -} - -int64_t helper_fistll_ST0(void) -{ - int64_t val; - val = floatx80_to_int64(ST0, &env->fp_status); - return val; -} - -int32_t helper_fistt_ST0(void) -{ - int32_t val; - val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); - if (val != (int16_t)val) - val = -32768; - return val; -} - -int32_t helper_fisttl_ST0(void) -{ - int32_t val; - val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); - return val; -} - -int64_t helper_fisttll_ST0(void) -{ - int64_t val; - val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); - return val; -} - -void helper_fldt_ST0(target_ulong ptr) -{ - int new_fpstt; - new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = helper_fldt(ptr); - env->fpstt = new_fpstt; - env->fptags[new_fpstt] = 0; /* validate stack entry */ -} - -void helper_fstt_ST0(target_ulong ptr) -{ - helper_fstt(ST0, ptr); -} - -void helper_fpush(void) -{ - fpush(); -} - -void helper_fpop(void) -{ - fpop(); -} - -void helper_fdecstp(void) -{ - env->fpstt = (env->fpstt - 1) & 7; - env->fpus &= (~0x4700); -} - -void helper_fincstp(void) -{ - env->fpstt = (env->fpstt + 1) & 7; - env->fpus &= (~0x4700); -} - -/* FPU move */ - -void helper_ffree_STN(int st_index) -{ - env->fptags[(env->fpstt + st_index) & 7] = 1; -} - -void helper_fmov_ST0_FT0(void) -{ - ST0 = FT0; -} - -void helper_fmov_FT0_STN(int st_index) -{ - FT0 = ST(st_index); -} - -void helper_fmov_ST0_STN(int st_index) -{ - ST0 = ST(st_index); -} - -void helper_fmov_STN_ST0(int st_index) -{ - ST(st_index) = ST0; -} - -void helper_fxchg_ST0_STN(int st_index) -{ - floatx80 tmp; - tmp = ST(st_index); - ST(st_index) = ST0; - ST0 = tmp; -} - -/* FPU operations */ - -static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; - -void helper_fcom_ST0_FT0(void) -{ - int ret; - - ret = floatx80_compare(ST0, FT0, &env->fp_status); - env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; -} - -void helper_fucom_ST0_FT0(void) -{ - int ret; - - ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); - env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1]; -} - -static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; - -void helper_fcomi_ST0_FT0(void) -{ - int eflags; - int ret; - - ret = floatx80_compare(ST0, FT0, &env->fp_status); - eflags = helper_cc_compute_all(CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; -} - -void helper_fucomi_ST0_FT0(void) -{ - int eflags; - int ret; - - ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); - eflags = helper_cc_compute_all(CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; -} - -void helper_fadd_ST0_FT0(void) -{ - ST0 = floatx80_add(ST0, FT0, &env->fp_status); -} - -void helper_fmul_ST0_FT0(void) -{ - ST0 = floatx80_mul(ST0, FT0, &env->fp_status); -} - -void helper_fsub_ST0_FT0(void) -{ - ST0 = floatx80_sub(ST0, FT0, &env->fp_status); -} - -void helper_fsubr_ST0_FT0(void) -{ - ST0 = floatx80_sub(FT0, ST0, &env->fp_status); -} - -void helper_fdiv_ST0_FT0(void) -{ - ST0 = helper_fdiv(ST0, FT0); -} - -void helper_fdivr_ST0_FT0(void) -{ - ST0 = helper_fdiv(FT0, ST0); -} - -/* fp operations between STN and ST0 */ - -void helper_fadd_STN_ST0(int st_index) -{ - ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); -} - -void helper_fmul_STN_ST0(int st_index) -{ - ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); -} - -void helper_fsub_STN_ST0(int st_index) -{ - ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); -} - -void helper_fsubr_STN_ST0(int st_index) -{ - ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); -} - -void helper_fdiv_STN_ST0(int st_index) -{ - floatx80 *p; - p = &ST(st_index); - *p = helper_fdiv(*p, ST0); -} - -void helper_fdivr_STN_ST0(int st_index) -{ - floatx80 *p; - p = &ST(st_index); - *p = helper_fdiv(ST0, *p); -} - -/* misc FPU operations */ -void helper_fchs_ST0(void) -{ - ST0 = floatx80_chs(ST0); -} - -void helper_fabs_ST0(void) -{ - ST0 = floatx80_abs(ST0); -} - -void helper_fld1_ST0(void) -{ - ST0 = floatx80_one; -} - -void helper_fldl2t_ST0(void) -{ - ST0 = floatx80_l2t; -} - -void helper_fldl2e_ST0(void) -{ - ST0 = floatx80_l2e; -} - -void helper_fldpi_ST0(void) -{ - ST0 = floatx80_pi; -} - -void helper_fldlg2_ST0(void) -{ - ST0 = floatx80_lg2; -} - -void helper_fldln2_ST0(void) -{ - ST0 = floatx80_ln2; -} - -void helper_fldz_ST0(void) -{ - ST0 = floatx80_zero; -} - -void helper_fldz_FT0(void) -{ - FT0 = floatx80_zero; -} - -uint32_t helper_fnstsw(void) -{ - return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; -} - -uint32_t helper_fnstcw(void) -{ - return env->fpuc; -} - -static void update_fp_status(void) -{ - int rnd_type; - - /* set rounding mode */ - switch(env->fpuc & FPU_RC_MASK) { - default: - case FPU_RC_NEAR: - rnd_type = float_round_nearest_even; - break; - case FPU_RC_DOWN: - rnd_type = float_round_down; - break; - case FPU_RC_UP: - rnd_type = float_round_up; - break; - case FPU_RC_CHOP: - rnd_type = float_round_to_zero; - break; - } - set_float_rounding_mode(rnd_type, &env->fp_status); - switch((env->fpuc >> 8) & 3) { - case 0: - rnd_type = 32; - break; - case 2: - rnd_type = 64; - break; - case 3: - default: - rnd_type = 80; - break; - } - set_floatx80_rounding_precision(rnd_type, &env->fp_status); -} - -void helper_fldcw(uint32_t val) -{ - env->fpuc = val; - update_fp_status(); -} - -void helper_fclex(void) -{ - env->fpus &= 0x7f00; -} - -void helper_fwait(void) -{ - if (env->fpus & FPUS_SE) - fpu_raise_exception(); -} - -void helper_fninit(void) -{ - env->fpus = 0; - env->fpstt = 0; - env->fpuc = 0x37f; - env->fptags[0] = 1; - env->fptags[1] = 1; - env->fptags[2] = 1; - env->fptags[3] = 1; - env->fptags[4] = 1; - env->fptags[5] = 1; - env->fptags[6] = 1; - env->fptags[7] = 1; -} - -/* BCD ops */ - -void helper_fbld_ST0(target_ulong ptr) -{ - floatx80 tmp; - uint64_t val; - unsigned int v; - int i; - - val = 0; - for(i = 8; i >= 0; i--) { - v = ldub(ptr + i); - val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); - } - tmp = int64_to_floatx80(val, &env->fp_status); - if (ldub(ptr + 9) & 0x80) { - floatx80_chs(tmp); - } - fpush(); - ST0 = tmp; -} - -void helper_fbst_ST0(target_ulong ptr) -{ - int v; - target_ulong mem_ref, mem_end; - int64_t val; - - val = floatx80_to_int64(ST0, &env->fp_status); - mem_ref = ptr; - mem_end = mem_ref + 9; - if (val < 0) { - stb(mem_end, 0x80); - val = -val; - } else { - stb(mem_end, 0x00); - } - while (mem_ref < mem_end) { - if (val == 0) - break; - v = val % 100; - val = val / 100; - v = ((v / 10) << 4) | (v % 10); - stb(mem_ref++, v); - } - while (mem_ref < mem_end) { - stb(mem_ref++, 0); - } -} - -void helper_f2xm1(void) -{ - double val = floatx80_to_double(ST0); - val = pow(2.0, val) - 1.0; - ST0 = double_to_floatx80(val); -} - -void helper_fyl2x(void) -{ - double fptemp = floatx80_to_double(ST0); - - if (fptemp>0.0){ - fptemp = log(fptemp)/log(2.0); /* log2(ST) */ - fptemp *= floatx80_to_double(ST1); - ST1 = double_to_floatx80(fptemp); - fpop(); - } else { - env->fpus &= (~0x4700); - env->fpus |= 0x400; - } -} - -void helper_fptan(void) -{ - double fptemp = floatx80_to_double(ST0); - - if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { - env->fpus |= 0x400; - } else { - fptemp = tan(fptemp); - ST0 = double_to_floatx80(fptemp); - fpush(); - ST0 = floatx80_one; - env->fpus &= (~0x400); /* C2 <-- 0 */ - /* the above code is for |arg| < 2**52 only */ - } -} - -void helper_fpatan(void) -{ - double fptemp, fpsrcop; - - fpsrcop = floatx80_to_double(ST1); - fptemp = floatx80_to_double(ST0); - ST1 = double_to_floatx80(atan2(fpsrcop, fptemp)); - fpop(); -} - -void helper_fxtract(void) -{ - CPU_LDoubleU temp; - - temp.d = ST0; - - if (floatx80_is_zero(ST0)) { - /* Easy way to generate -inf and raising division by 0 exception */ - ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status); - fpush(); - ST0 = temp.d; - } else { - int expdif; - - expdif = EXPD(temp) - EXPBIAS; - /*DP exponent bias*/ - ST0 = int32_to_floatx80(expdif, &env->fp_status); - fpush(); - BIASEXPONENT(temp); - ST0 = temp.d; - } -} - -void helper_fprem1(void) -{ - double st0, st1, dblq, fpsrcop, fptemp; - CPU_LDoubleU fpsrcop1, fptemp1; - int expdif; - signed long long int q; - - st0 = floatx80_to_double(ST0); - st1 = floatx80_to_double(ST1); - - if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { - ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - return; - } - - fpsrcop = st0; - fptemp = st1; - fpsrcop1.d = ST0; - fptemp1.d = ST1; - expdif = EXPD(fpsrcop1) - EXPD(fptemp1); - - if (expdif < 0) { - /* optimisation? taken from the AMD docs */ - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - /* ST0 is unchanged */ - return; - } - - if (expdif < 53) { - dblq = fpsrcop / fptemp; - /* round dblq towards nearest integer */ - dblq = rint(dblq); - st0 = fpsrcop - fptemp * dblq; - - /* convert dblq to q by truncating towards zero */ - if (dblq < 0.0) - q = (signed long long int)(-dblq); - else - q = (signed long long int)dblq; - - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - /* (C0,C3,C1) <-- (q2,q1,q0) */ - env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ - env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ - env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ - } else { - env->fpus |= 0x400; /* C2 <-- 1 */ - fptemp = pow(2.0, expdif - 50); - fpsrcop = (st0 / st1) / fptemp; - /* fpsrcop = integer obtained by chopping */ - fpsrcop = (fpsrcop < 0.0) ? - -(floor(fabs(fpsrcop))) : floor(fpsrcop); - st0 -= (st1 * fpsrcop * fptemp); - } - ST0 = double_to_floatx80(st0); -} - -void helper_fprem(void) -{ - double st0, st1, dblq, fpsrcop, fptemp; - CPU_LDoubleU fpsrcop1, fptemp1; - int expdif; - signed long long int q; - - st0 = floatx80_to_double(ST0); - st1 = floatx80_to_double(ST1); - - if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { - ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */ - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - return; - } - - fpsrcop = st0; - fptemp = st1; - fpsrcop1.d = ST0; - fptemp1.d = ST1; - expdif = EXPD(fpsrcop1) - EXPD(fptemp1); - - if (expdif < 0) { - /* optimisation? taken from the AMD docs */ - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - /* ST0 is unchanged */ - return; - } - - if ( expdif < 53 ) { - dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/; - /* round dblq towards zero */ - dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq); - st0 = fpsrcop/*ST0*/ - fptemp * dblq; - - /* convert dblq to q by truncating towards zero */ - if (dblq < 0.0) - q = (signed long long int)(-dblq); - else - q = (signed long long int)dblq; - - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - /* (C0,C3,C1) <-- (q2,q1,q0) */ - env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ - env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ - env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ - } else { - int N = 32 + (expdif % 32); /* as per AMD docs */ - env->fpus |= 0x400; /* C2 <-- 1 */ - fptemp = pow(2.0, (double)(expdif - N)); - fpsrcop = (st0 / st1) / fptemp; - /* fpsrcop = integer obtained by chopping */ - fpsrcop = (fpsrcop < 0.0) ? - -(floor(fabs(fpsrcop))) : floor(fpsrcop); - st0 -= (st1 * fpsrcop * fptemp); - } - ST0 = double_to_floatx80(st0); -} - -void helper_fyl2xp1(void) -{ - double fptemp = floatx80_to_double(ST0); - - if ((fptemp+1.0)>0.0) { - fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */ - fptemp *= floatx80_to_double(ST1); - ST1 = double_to_floatx80(fptemp); - fpop(); - } else { - env->fpus &= (~0x4700); - env->fpus |= 0x400; - } -} - -void helper_fsqrt(void) -{ - if (floatx80_is_neg(ST0)) { - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - env->fpus |= 0x400; - } - ST0 = floatx80_sqrt(ST0, &env->fp_status); -} - -void helper_fsincos(void) -{ - double fptemp = floatx80_to_double(ST0); - - if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { - env->fpus |= 0x400; - } else { - ST0 = double_to_floatx80(sin(fptemp)); - fpush(); - ST0 = double_to_floatx80(cos(fptemp)); - env->fpus &= (~0x400); /* C2 <-- 0 */ - /* the above code is for |arg| < 2**63 only */ - } -} - -void helper_frndint(void) -{ - ST0 = floatx80_round_to_int(ST0, &env->fp_status); -} - -void helper_fscale(void) -{ - if (floatx80_is_any_nan(ST1)) { - ST0 = ST1; - } else { - int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); - ST0 = floatx80_scalbn(ST0, n, &env->fp_status); - } -} - -void helper_fsin(void) -{ - double fptemp = floatx80_to_double(ST0); - - if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { - env->fpus |= 0x400; - } else { - ST0 = double_to_floatx80(sin(fptemp)); - env->fpus &= (~0x400); /* C2 <-- 0 */ - /* the above code is for |arg| < 2**53 only */ - } -} - -void helper_fcos(void) -{ - double fptemp = floatx80_to_double(ST0); - - if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { - env->fpus |= 0x400; - } else { - ST0 = double_to_floatx80(cos(fptemp)); - env->fpus &= (~0x400); /* C2 <-- 0 */ - /* the above code is for |arg5 < 2**63 only */ - } -} - -void helper_fxam_ST0(void) -{ - CPU_LDoubleU temp; - int expdif; - - temp.d = ST0; - - env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ - if (SIGND(temp)) - env->fpus |= 0x200; /* C1 <-- 1 */ - - /* XXX: test fptags too */ - expdif = EXPD(temp); - if (expdif == MAXEXPD) { - if (MANTD(temp) == 0x8000000000000000ULL) - env->fpus |= 0x500 /*Infinity*/; - else - env->fpus |= 0x100 /*NaN*/; - } else if (expdif == 0) { - if (MANTD(temp) == 0) - env->fpus |= 0x4000 /*Zero*/; - else - env->fpus |= 0x4400 /*Denormal*/; - } else { - env->fpus |= 0x400; - } -} - -void helper_fstenv(target_ulong ptr, int data32) -{ - int fpus, fptag, exp, i; - uint64_t mant; - CPU_LDoubleU tmp; - - fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; - fptag = 0; - for (i=7; i>=0; i--) { - fptag <<= 2; - if (env->fptags[i]) { - fptag |= 3; - } else { - tmp.d = env->fpregs[i].d; - exp = EXPD(tmp); - mant = MANTD(tmp); - if (exp == 0 && mant == 0) { - /* zero */ - fptag |= 1; - } else if (exp == 0 || exp == MAXEXPD - || (mant & (1LL << 63)) == 0 - ) { - /* NaNs, infinity, denormal */ - fptag |= 2; - } - } - } - if (data32) { - /* 32 bit */ - stl(ptr, env->fpuc); - stl(ptr + 4, fpus); - stl(ptr + 8, fptag); - stl(ptr + 12, 0); /* fpip */ - stl(ptr + 16, 0); /* fpcs */ - stl(ptr + 20, 0); /* fpoo */ - stl(ptr + 24, 0); /* fpos */ - } else { - /* 16 bit */ - stw(ptr, env->fpuc); - stw(ptr + 2, fpus); - stw(ptr + 4, fptag); - stw(ptr + 6, 0); - stw(ptr + 8, 0); - stw(ptr + 10, 0); - stw(ptr + 12, 0); - } -} - -void helper_fldenv(target_ulong ptr, int data32) -{ - int i, fpus, fptag; - - if (data32) { - env->fpuc = lduw(ptr); - fpus = lduw(ptr + 4); - fptag = lduw(ptr + 8); - } - else { - env->fpuc = lduw(ptr); - fpus = lduw(ptr + 2); - fptag = lduw(ptr + 4); - } - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; - for(i = 0;i < 8; i++) { - env->fptags[i] = ((fptag & 3) == 3); - fptag >>= 2; - } -} - -void helper_fsave(target_ulong ptr, int data32) -{ - floatx80 tmp; - int i; - - helper_fstenv(ptr, data32); - - ptr += (14 << data32); - for(i = 0;i < 8; i++) { - tmp = ST(i); - helper_fstt(tmp, ptr); - ptr += 10; - } - - /* fninit */ - env->fpus = 0; - env->fpstt = 0; - env->fpuc = 0x37f; - env->fptags[0] = 1; - env->fptags[1] = 1; - env->fptags[2] = 1; - env->fptags[3] = 1; - env->fptags[4] = 1; - env->fptags[5] = 1; - env->fptags[6] = 1; - env->fptags[7] = 1; -} - -void helper_frstor(target_ulong ptr, int data32) -{ - floatx80 tmp; - int i; - - helper_fldenv(ptr, data32); - ptr += (14 << data32); - - for(i = 0;i < 8; i++) { - tmp = helper_fldt(ptr); - ST(i) = tmp; - ptr += 10; - } -} - - -#if defined(CONFIG_USER_ONLY) -void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { - selector &= 0xffff; - cpu_x86_load_seg_cache(env, seg_reg, selector, - (selector << 4), 0xffff, 0); - } else { - helper_load_seg(seg_reg, selector); - } - env = saved_env; -} - -void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - - helper_fsave(ptr, data32); - - env = saved_env; -} - -void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) -{ - CPUX86State *saved_env; - - saved_env = env; - env = s; - - helper_frstor(ptr, data32); - - env = saved_env; -} -#endif - -void helper_fxsave(target_ulong ptr, int data64) -{ - int fpus, fptag, i, nb_xmm_regs; - floatx80 tmp; - target_ulong addr; - - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception(EXCP0D_GPF); - } - - fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; - fptag = 0; - for(i = 0; i < 8; i++) { - fptag |= (env->fptags[i] << i); - } - stw(ptr, env->fpuc); - stw(ptr + 2, fpus); - stw(ptr + 4, fptag ^ 0xff); -#ifdef TARGET_X86_64 - if (data64) { - stq(ptr + 0x08, 0); /* rip */ - stq(ptr + 0x10, 0); /* rdp */ - } else -#endif - { - stl(ptr + 0x08, 0); /* eip */ - stl(ptr + 0x0c, 0); /* sel */ - stl(ptr + 0x10, 0); /* dp */ - stl(ptr + 0x14, 0); /* sel */ - } - - addr = ptr + 0x20; - for(i = 0;i < 8; i++) { - tmp = ST(i); - helper_fstt(tmp, addr); - addr += 16; - } - - if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - stl(ptr + 0x18, env->mxcsr); /* mxcsr */ - stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ - if (env->hflags & HF_CS64_MASK) - nb_xmm_regs = 16; - else - nb_xmm_regs = 8; - addr = ptr + 0xa0; - /* Fast FXSAVE leaves out the XMM registers */ - if (!(env->efer & MSR_EFER_FFXSR) - || (env->hflags & HF_CPL_MASK) - || !(env->hflags & HF_LMA_MASK)) { - for(i = 0; i < nb_xmm_regs; i++) { - stq(addr, env->xmm_regs[i].XMM_Q(0)); - stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); - addr += 16; - } - } - } -} - -void helper_fxrstor(target_ulong ptr, int data64) -{ - int i, fpus, fptag, nb_xmm_regs; - floatx80 tmp; - target_ulong addr; - - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception(EXCP0D_GPF); - } - - env->fpuc = lduw(ptr); - fpus = lduw(ptr + 2); - fptag = lduw(ptr + 4); - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; - fptag ^= 0xff; - for(i = 0;i < 8; i++) { - env->fptags[i] = ((fptag >> i) & 1); - } - - addr = ptr + 0x20; - for(i = 0;i < 8; i++) { - tmp = helper_fldt(addr); - ST(i) = tmp; - addr += 16; - } - - if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - env->mxcsr = ldl(ptr + 0x18); - //ldl(ptr + 0x1c); - if (env->hflags & HF_CS64_MASK) - nb_xmm_regs = 16; - else - nb_xmm_regs = 8; - addr = ptr + 0xa0; - /* Fast FXRESTORE leaves out the XMM registers */ - if (!(env->efer & MSR_EFER_FFXSR) - || (env->hflags & HF_CPL_MASK) - || !(env->hflags & HF_LMA_MASK)) { - for(i = 0; i < nb_xmm_regs; i++) { - env->xmm_regs[i].XMM_Q(0) = ldq(addr); - env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); - addr += 16; - } - } - } -} - -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) -{ - CPU_LDoubleU temp; - - temp.d = f; - *pmant = temp.l.lower; - *pexp = temp.l.upper; -} - -floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) -{ - CPU_LDoubleU temp; - - temp.l.upper = upper; - temp.l.lower = mant; - return temp.d; -} - -#ifdef TARGET_X86_64 - -//#define DEBUG_MULDIV - -static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) -{ - *plow += a; - /* carry test */ - if (*plow < a) - (*phigh)++; - *phigh += b; -} - -static void neg128(uint64_t *plow, uint64_t *phigh) -{ - *plow = ~ *plow; - *phigh = ~ *phigh; - add128(plow, phigh, 1, 0); -} - -/* return TRUE if overflow */ -static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) -{ - uint64_t q, r, a1, a0; - int i, qb, ab; - - a0 = *plow; - a1 = *phigh; - if (a1 == 0) { - q = a0 / b; - r = a0 % b; - *plow = q; - *phigh = r; - } else { - if (a1 >= b) - return 1; - /* XXX: use a better algorithm */ - for(i = 0; i < 64; i++) { - ab = a1 >> 63; - a1 = (a1 << 1) | (a0 >> 63); - if (ab || a1 >= b) { - a1 -= b; - qb = 1; - } else { - qb = 0; - } - a0 = (a0 << 1) | qb; - } -#if defined(DEBUG_MULDIV) - printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", - *phigh, *plow, b, a0, a1); -#endif - *plow = a0; - *phigh = a1; - } - return 0; -} - -/* return TRUE if overflow */ -static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) -{ - int sa, sb; - sa = ((int64_t)*phigh < 0); - if (sa) - neg128(plow, phigh); - sb = (b < 0); - if (sb) - b = -b; - if (div64(plow, phigh, b) != 0) - return 1; - if (sa ^ sb) { - if (*plow > (1ULL << 63)) - return 1; - *plow = - *plow; - } else { - if (*plow >= (1ULL << 63)) - return 1; - } - if (sa) - *phigh = - *phigh; - return 0; -} - -void helper_mulq_EAX_T0(target_ulong t0) -{ - uint64_t r0, r1; - - mulu64(&r0, &r1, EAX, t0); - EAX = r0; - EDX = r1; - CC_DST = r0; - CC_SRC = r1; -} - -void helper_imulq_EAX_T0(target_ulong t0) -{ - uint64_t r0, r1; - - muls64(&r0, &r1, EAX, t0); - EAX = r0; - EDX = r1; - CC_DST = r0; - CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); -} - -target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1) -{ - uint64_t r0, r1; - - muls64(&r0, &r1, t0, t1); - CC_DST = r0; - CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); - return r0; -} - -void helper_divq_EAX(target_ulong t0) -{ - uint64_t r0, r1; - if (t0 == 0) { - raise_exception(EXCP00_DIVZ); - } - r0 = EAX; - r1 = EDX; - if (div64(&r0, &r1, t0)) - raise_exception(EXCP00_DIVZ); - EAX = r0; - EDX = r1; -} - -void helper_idivq_EAX(target_ulong t0) -{ - uint64_t r0, r1; - if (t0 == 0) { - raise_exception(EXCP00_DIVZ); - } - r0 = EAX; - r1 = EDX; - if (idiv64(&r0, &r1, t0)) - raise_exception(EXCP00_DIVZ); - EAX = r0; - EDX = r1; -} -#endif - -static void do_hlt(void) -{ - env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ - env->halted = 1; - env->exception_index = EXCP_HLT; - cpu_loop_exit(env); -} - -void helper_hlt(int next_eip_addend) -{ - helper_svm_check_intercept_param(SVM_EXIT_HLT, 0); - EIP += next_eip_addend; - - do_hlt(); -} - -void helper_monitor(target_ulong ptr) -{ - if ((uint32_t)ECX != 0) - raise_exception(EXCP0D_GPF); - /* XXX: store address ? */ - helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0); -} - -void helper_mwait(int next_eip_addend) -{ - if ((uint32_t)ECX != 0) - raise_exception(EXCP0D_GPF); - helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0); - EIP += next_eip_addend; - - /* XXX: not complete but not completely erroneous */ - if (env->cpu_index != 0 || env->next_cpu != NULL) { - /* more than one CPU: do not sleep because another CPU may - wake this one */ - } else { - do_hlt(); - } -} - -void helper_debug(void) -{ - env->exception_index = EXCP_DEBUG; - cpu_loop_exit(env); -} - -void helper_reset_rf(void) -{ - env->eflags &= ~RF_MASK; -} - -void helper_raise_interrupt(int intno, int next_eip_addend) -{ - raise_interrupt(intno, 1, 0, next_eip_addend); -} - -void helper_raise_exception(int exception_index) -{ - raise_exception(exception_index); -} - -void helper_cli(void) -{ - env->eflags &= ~IF_MASK; -} - -void helper_sti(void) -{ - env->eflags |= IF_MASK; -} - -#if 0 -/* vm86plus instructions */ -void helper_cli_vm(void) -{ - env->eflags &= ~VIF_MASK; -} - -void helper_sti_vm(void) -{ - env->eflags |= VIF_MASK; - if (env->eflags & VIP_MASK) { - raise_exception(EXCP0D_GPF); - } -} -#endif - -void helper_set_inhibit_irq(void) -{ - env->hflags |= HF_INHIBIT_IRQ_MASK; -} - -void helper_reset_inhibit_irq(void) -{ - env->hflags &= ~HF_INHIBIT_IRQ_MASK; -} - -void helper_boundw(target_ulong a0, int v) -{ - int low, high; - low = ldsw(a0); - high = ldsw(a0 + 2); - v = (int16_t)v; - if (v < low || v > high) { - raise_exception(EXCP05_BOUND); - } -} - -void helper_boundl(target_ulong a0, int v) -{ - int low, high; - low = ldl(a0); - high = ldl(a0 + 4); - if (v < low || v > high) { - raise_exception(EXCP05_BOUND); - } -} - -#if !defined(CONFIG_USER_ONLY) - -#define MMUSUFFIX _mmu - -#define SHIFT 0 -#include "softmmu_template.h" - -#define SHIFT 1 -#include "softmmu_template.h" - -#define SHIFT 2 -#include "softmmu_template.h" - -#define SHIFT 3 -#include "softmmu_template.h" - -#endif - -#if !defined(CONFIG_USER_ONLY) -/* try to fill the TLB and return an exception if error. If retaddr is - NULL, it means that the function was called in C code (i.e. not - from generated code or from helper.c) */ -/* XXX: fix it to restore all registers */ -void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - TranslationBlock *tb; - int ret; - CPUX86State *saved_env; - - saved_env = env; - env = env1; - - ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx); - if (ret) { - if (retaddr) { - /* now we have a real cpu fault */ - tb = tb_find_pc(retaddr); - if (tb) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, retaddr); - } - } - raise_exception_err(env->exception_index, env->error_code); - } - env = saved_env; -} -#endif - -/* Secure Virtual Machine helpers */ - -#if defined(CONFIG_USER_ONLY) - -void helper_vmrun(int aflag, int next_eip_addend) -{ -} -void helper_vmmcall(void) -{ -} -void helper_vmload(int aflag) -{ -} -void helper_vmsave(int aflag) -{ -} -void helper_stgi(void) -{ -} -void helper_clgi(void) -{ -} -void helper_skinit(void) -{ -} -void helper_invlpga(int aflag) -{ -} -void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) -{ -} -void helper_svm_check_intercept_param(uint32_t type, uint64_t param) -{ -} - -void svm_check_intercept(CPUX86State *env1, uint32_t type) -{ -} - -void helper_svm_check_io(uint32_t port, uint32_t param, - uint32_t next_eip_addend) -{ -} -#else - -static inline void svm_save_seg(target_phys_addr_t addr, - const SegmentCache *sc) -{ - stw_phys(addr + offsetof(struct vmcb_seg, selector), - sc->selector); - stq_phys(addr + offsetof(struct vmcb_seg, base), - sc->base); - stl_phys(addr + offsetof(struct vmcb_seg, limit), - sc->limit); - stw_phys(addr + offsetof(struct vmcb_seg, attrib), - ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); -} - -static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc) -{ - unsigned int flags; - - sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector)); - sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base)); - sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit)); - flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib)); - sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); -} - -static inline void svm_load_seg_cache(target_phys_addr_t addr, - CPUX86State *env, int seg_reg) -{ - SegmentCache sc1, *sc = &sc1; - svm_load_seg(addr, sc); - cpu_x86_load_seg_cache(env, seg_reg, sc->selector, - sc->base, sc->limit, sc->flags); -} - -void helper_vmrun(int aflag, int next_eip_addend) -{ - target_ulong addr; - uint32_t event_inj; - uint32_t int_ctl; - - helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0); - - if (aflag == 2) - addr = EAX; - else - addr = (uint32_t)EAX; - - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); - - env->vm_vmcb = addr; - - /* save the current CPU state in the hsave page */ - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); - stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); - - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base); - stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); - - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); - - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags()); - - svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), - &env->segs[R_ES]); - svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), - &env->segs[R_CS]); - svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), - &env->segs[R_SS]); - svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), - &env->segs[R_DS]); - - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), - EIP + next_eip_addend); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP); - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX); - - /* load the interception bitmaps so we do not need to access the - vmcb in svm mode */ - env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)); - env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read)); - env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write)); - env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read)); - env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write)); - env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions)); - - /* enable intercepts */ - env->hflags |= HF_SVMI_MASK; - - env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); - - env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base)); - env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit)); - - env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base)); - env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit)); - - /* clear exit_info_2 so we behave like the real hardware */ - stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); - - cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0))); - cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3))); - env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); - int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); - env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); - if (int_ctl & V_INTR_MASKING_MASK) { - env->v_tpr = int_ctl & V_TPR_MASK; - env->hflags2 |= HF2_VINTR_MASK; - if (env->eflags & IF_MASK) - env->hflags2 |= HF2_HIF_MASK; - } - - cpu_load_efer(env, - ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); - env->eflags = 0; - load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - CC_OP = CC_OP_EFLAGS; - - svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es), - env, R_ES); - svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs), - env, R_CS); - svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss), - env, R_SS); - svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds), - env, R_DS); - - EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip)); - env->eip = EIP; - ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); - EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); - env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); - env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6)); - cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl))); - - /* FIXME: guest state consistency checks */ - - switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { - case TLB_CONTROL_DO_NOTHING: - break; - case TLB_CONTROL_FLUSH_ALL_ASID: - /* FIXME: this is not 100% correct but should work for now */ - tlb_flush(env, 1); - break; - } - - env->hflags2 |= HF2_GIF_MASK; - - if (int_ctl & V_IRQ_MASK) { - env->interrupt_request |= CPU_INTERRUPT_VIRQ; - } - - /* maybe we need to inject an event */ - event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); - if (event_inj & SVM_EVTINJ_VALID) { - uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; - uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; - uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)); - - qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); - /* FIXME: need to implement valid_err */ - switch (event_inj & SVM_EVTINJ_TYPE_MASK) { - case SVM_EVTINJ_TYPE_INTR: - env->exception_index = vector; - env->error_code = event_inj_err; - env->exception_is_int = 0; - env->exception_next_eip = -1; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); - /* XXX: is it always correct ? */ - do_interrupt_all(vector, 0, 0, 0, 1); - break; - case SVM_EVTINJ_TYPE_NMI: - env->exception_index = EXCP02_NMI; - env->error_code = event_inj_err; - env->exception_is_int = 0; - env->exception_next_eip = EIP; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); - cpu_loop_exit(env); - break; - case SVM_EVTINJ_TYPE_EXEPT: - env->exception_index = vector; - env->error_code = event_inj_err; - env->exception_is_int = 0; - env->exception_next_eip = -1; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); - cpu_loop_exit(env); - break; - case SVM_EVTINJ_TYPE_SOFT: - env->exception_index = vector; - env->error_code = event_inj_err; - env->exception_is_int = 1; - env->exception_next_eip = EIP; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); - cpu_loop_exit(env); - break; - } - qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code); - } -} - -void helper_vmmcall(void) -{ - helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0); - raise_exception(EXCP06_ILLOP); -} - -void helper_vmload(int aflag) -{ - target_ulong addr; - helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0); - - if (aflag == 2) - addr = EAX; - else - addr = (uint32_t)EAX; - - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), - env->segs[R_FS].base); - - svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs), - env, R_FS); - svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs), - env, R_GS); - svm_load_seg(addr + offsetof(struct vmcb, save.tr), - &env->tr); - svm_load_seg(addr + offsetof(struct vmcb, save.ldtr), - &env->ldt); - -#ifdef TARGET_X86_64 - env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base)); - env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar)); - env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar)); - env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask)); -#endif - env->star = ldq_phys(addr + offsetof(struct vmcb, save.star)); - env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs)); - env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp)); - env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip)); -} - -void helper_vmsave(int aflag) -{ - target_ulong addr; - helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0); - - if (aflag == 2) - addr = EAX; - else - addr = (uint32_t)EAX; - - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), - env->segs[R_FS].base); - - svm_save_seg(addr + offsetof(struct vmcb, save.fs), - &env->segs[R_FS]); - svm_save_seg(addr + offsetof(struct vmcb, save.gs), - &env->segs[R_GS]); - svm_save_seg(addr + offsetof(struct vmcb, save.tr), - &env->tr); - svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), - &env->ldt); - -#ifdef TARGET_X86_64 - stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase); - stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar); - stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar); - stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask); -#endif - stq_phys(addr + offsetof(struct vmcb, save.star), env->star); - stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); - stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp); - stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip); -} - -void helper_stgi(void) -{ - helper_svm_check_intercept_param(SVM_EXIT_STGI, 0); - env->hflags2 |= HF2_GIF_MASK; -} - -void helper_clgi(void) -{ - helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0); - env->hflags2 &= ~HF2_GIF_MASK; -} - -void helper_skinit(void) -{ - helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0); - /* XXX: not implemented */ - raise_exception(EXCP06_ILLOP); -} - -void helper_invlpga(int aflag) -{ - target_ulong addr; - helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0); - - if (aflag == 2) - addr = EAX; - else - addr = (uint32_t)EAX; - - /* XXX: could use the ASID to see if it is needed to do the - flush */ - tlb_flush_page(env, addr); -} - -void helper_svm_check_intercept_param(uint32_t type, uint64_t param) -{ - if (likely(!(env->hflags & HF_SVMI_MASK))) - return; - switch(type) { - case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: - if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { - helper_vmexit(type, param); - } - break; - case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: - if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { - helper_vmexit(type, param); - } - break; - case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7: - if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { - helper_vmexit(type, param); - } - break; - case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7: - if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { - helper_vmexit(type, param); - } - break; - case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31: - if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { - helper_vmexit(type, param); - } - break; - case SVM_EXIT_MSR: - if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { - /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa)); - uint32_t t0, t1; - switch((uint32_t)ECX) { - case 0 ... 0x1fff: - t0 = (ECX * 2) % 8; - t1 = (ECX * 2) / 8; - break; - case 0xc0000000 ... 0xc0001fff: - t0 = (8192 + ECX - 0xc0000000) * 2; - t1 = (t0 / 8); - t0 %= 8; - break; - case 0xc0010000 ... 0xc0011fff: - t0 = (16384 + ECX - 0xc0010000) * 2; - t1 = (t0 / 8); - t0 %= 8; - break; - default: - helper_vmexit(type, param); - t0 = 0; - t1 = 0; - break; - } - if (ldub_phys(addr + t1) & ((1 << param) << t0)) - helper_vmexit(type, param); - } - break; - default: - if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { - helper_vmexit(type, param); - } - break; - } -} - -void svm_check_intercept(CPUX86State *env1, uint32_t type) -{ - CPUX86State *saved_env; - - saved_env = env; - env = env1; - helper_svm_check_intercept_param(type, 0); - env = saved_env; -} - -void helper_svm_check_io(uint32_t port, uint32_t param, - uint32_t next_eip_addend) -{ - if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { - /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); - uint16_t mask = (1 << ((param >> 4) & 7)) - 1; - if(lduw_phys(addr + port / 8) & (mask << (port & 7))) { - /* next EIP */ - stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - env->eip + next_eip_addend); - helper_vmexit(SVM_EXIT_IOIO, param | (port << 16)); - } - } -} - -/* Note: currently only 32 bits of exit_code are used */ -void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) -{ - uint32_t int_ctl; - - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", - exit_code, exit_info_1, - ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)), - EIP); - - if(env->hflags & HF_INHIBIT_IRQ_MASK) { - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK); - env->hflags &= ~HF_INHIBIT_IRQ_MASK; - } else { - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); - } - - /* Save the VM state in the vmcb */ - svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), - &env->segs[R_ES]); - svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), - &env->segs[R_CS]); - svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), - &env->segs[R_SS]); - svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), - &env->segs[R_DS]); - - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); - stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); - - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base); - stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); - - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); - - int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); - int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); - int_ctl |= env->v_tpr & V_TPR_MASK; - if (env->interrupt_request & CPU_INTERRUPT_VIRQ) - int_ctl |= V_IRQ_MASK; - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); - - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags()); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); - stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); - - /* Reload the host state from vm_hsave */ - env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); - env->hflags &= ~HF_SVMI_MASK; - env->intercept = 0; - env->intercept_exceptions = 0; - env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; - env->tsc_offset = 0; - - env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); - env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); - - env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); - env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); - - cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); - cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3))); - /* we need to set the efer after the crs so the hidden flags get - set properly */ - cpu_load_efer(env, - ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer))); - env->eflags = 0; - load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - CC_OP = CC_OP_EFLAGS; - - svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es), - env, R_ES); - svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs), - env, R_CS); - svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss), - env, R_SS); - svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds), - env, R_DS); - - EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip)); - ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp)); - EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax)); - - env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6)); - env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7)); - - /* other setups */ - cpu_x86_set_cpl(env, 0); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code); - stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); - - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), - ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj))); - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), - ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); - - env->hflags2 &= ~HF2_GIF_MASK; - /* FIXME: Resets the current ASID register to zero (host ASID). */ - - /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ - - /* Clears the TSC_OFFSET inside the processor. */ - - /* If the host is in PAE mode, the processor reloads the host's PDPEs - from the page table indicated the host's CR3. If the PDPEs contain - illegal state, the processor causes a shutdown. */ - - /* Forces CR0.PE = 1, RFLAGS.VM = 0. */ - env->cr[0] |= CR0_PE_MASK; - env->eflags &= ~VM_MASK; - - /* Disables all breakpoints in the host DR7 register. */ - - /* Checks the reloaded host state for consistency. */ - - /* If the host's rIP reloaded by #VMEXIT is outside the limit of the - host's code segment or non-canonical (in the case of long mode), a - #GP fault is delivered inside the host.) */ - - /* remove any pending exception */ - env->exception_index = -1; - env->error_code = 0; - env->old_exception = -1; - - cpu_loop_exit(env); -} - -#endif - -/* MMX/SSE */ -/* XXX: optimize by storing fptt and fptags in the static cpu state */ - -#define SSE_DAZ 0x0040 -#define SSE_RC_MASK 0x6000 -#define SSE_RC_NEAR 0x0000 -#define SSE_RC_DOWN 0x2000 -#define SSE_RC_UP 0x4000 -#define SSE_RC_CHOP 0x6000 -#define SSE_FZ 0x8000 - -static void update_sse_status(void) -{ - int rnd_type; - - /* set rounding mode */ - switch(env->mxcsr & SSE_RC_MASK) { - default: - case SSE_RC_NEAR: - rnd_type = float_round_nearest_even; - break; - case SSE_RC_DOWN: - rnd_type = float_round_down; - break; - case SSE_RC_UP: - rnd_type = float_round_up; - break; - case SSE_RC_CHOP: - rnd_type = float_round_to_zero; - break; - } - set_float_rounding_mode(rnd_type, &env->sse_status); - - /* set denormals are zero */ - set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status); - - /* set flush to zero */ - set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status); -} - -void helper_ldmxcsr(uint32_t val) -{ - env->mxcsr = val; - update_sse_status(); -} - -void helper_enter_mmx(void) -{ - env->fpstt = 0; - *(uint32_t *)(env->fptags) = 0; - *(uint32_t *)(env->fptags + 4) = 0; -} - -void helper_emms(void) -{ - /* set to empty state */ - *(uint32_t *)(env->fptags) = 0x01010101; - *(uint32_t *)(env->fptags + 4) = 0x01010101; -} - -/* XXX: suppress */ -void helper_movq(void *d, void *s) -{ - *(uint64_t *)d = *(uint64_t *)s; -} - -#define SHIFT 0 -#include "ops_sse.h" - -#define SHIFT 1 -#include "ops_sse.h" - -#define SHIFT 0 -#include "helper_template.h" -#undef SHIFT - -#define SHIFT 1 -#include "helper_template.h" -#undef SHIFT - -#define SHIFT 2 -#include "helper_template.h" -#undef SHIFT - -#ifdef TARGET_X86_64 - -#define SHIFT 3 -#include "helper_template.h" -#undef SHIFT - -#endif - -/* bit operations */ -target_ulong helper_bsf(target_ulong t0) -{ - int count; - target_ulong res; - - res = t0; - count = 0; - while ((res & 1) == 0) { - count++; - res >>= 1; - } - return count; -} - -target_ulong helper_lzcnt(target_ulong t0, int wordsize) -{ - int count; - target_ulong res, mask; - - if (wordsize > 0 && t0 == 0) { - return wordsize; - } - res = t0; - count = TARGET_LONG_BITS - 1; - mask = (target_ulong)1 << (TARGET_LONG_BITS - 1); - while ((res & mask) == 0) { - count--; - res <<= 1; - } - if (wordsize > 0) { - return wordsize - 1 - count; - } - return count; -} - -target_ulong helper_bsr(target_ulong t0) -{ - return helper_lzcnt(t0, 0); -} - -static int compute_all_eflags(void) -{ - return CC_SRC; -} - -static int compute_c_eflags(void) -{ - return CC_SRC & CC_C; -} - -uint32_t helper_cc_compute_all(int op) -{ - switch (op) { - default: /* should never happen */ return 0; - - case CC_OP_EFLAGS: return compute_all_eflags(); - - case CC_OP_MULB: return compute_all_mulb(); - case CC_OP_MULW: return compute_all_mulw(); - case CC_OP_MULL: return compute_all_mull(); - - case CC_OP_ADDB: return compute_all_addb(); - case CC_OP_ADDW: return compute_all_addw(); - case CC_OP_ADDL: return compute_all_addl(); - - case CC_OP_ADCB: return compute_all_adcb(); - case CC_OP_ADCW: return compute_all_adcw(); - case CC_OP_ADCL: return compute_all_adcl(); - - case CC_OP_SUBB: return compute_all_subb(); - case CC_OP_SUBW: return compute_all_subw(); - case CC_OP_SUBL: return compute_all_subl(); - - case CC_OP_SBBB: return compute_all_sbbb(); - case CC_OP_SBBW: return compute_all_sbbw(); - case CC_OP_SBBL: return compute_all_sbbl(); - - case CC_OP_LOGICB: return compute_all_logicb(); - case CC_OP_LOGICW: return compute_all_logicw(); - case CC_OP_LOGICL: return compute_all_logicl(); - - case CC_OP_INCB: return compute_all_incb(); - case CC_OP_INCW: return compute_all_incw(); - case CC_OP_INCL: return compute_all_incl(); - - case CC_OP_DECB: return compute_all_decb(); - case CC_OP_DECW: return compute_all_decw(); - case CC_OP_DECL: return compute_all_decl(); - - case CC_OP_SHLB: return compute_all_shlb(); - case CC_OP_SHLW: return compute_all_shlw(); - case CC_OP_SHLL: return compute_all_shll(); - - case CC_OP_SARB: return compute_all_sarb(); - case CC_OP_SARW: return compute_all_sarw(); - case CC_OP_SARL: return compute_all_sarl(); - -#ifdef TARGET_X86_64 - case CC_OP_MULQ: return compute_all_mulq(); - - case CC_OP_ADDQ: return compute_all_addq(); - - case CC_OP_ADCQ: return compute_all_adcq(); - - case CC_OP_SUBQ: return compute_all_subq(); - - case CC_OP_SBBQ: return compute_all_sbbq(); - - case CC_OP_LOGICQ: return compute_all_logicq(); - - case CC_OP_INCQ: return compute_all_incq(); - - case CC_OP_DECQ: return compute_all_decq(); - - case CC_OP_SHLQ: return compute_all_shlq(); - - case CC_OP_SARQ: return compute_all_sarq(); -#endif - } -} - -uint32_t cpu_cc_compute_all(CPUX86State *env1, int op) -{ - CPUX86State *saved_env; - uint32_t ret; - - saved_env = env; - env = env1; - ret = helper_cc_compute_all(op); - env = saved_env; - return ret; -} - -uint32_t helper_cc_compute_c(int op) -{ - switch (op) { - default: /* should never happen */ return 0; - - case CC_OP_EFLAGS: return compute_c_eflags(); - - case CC_OP_MULB: return compute_c_mull(); - case CC_OP_MULW: return compute_c_mull(); - case CC_OP_MULL: return compute_c_mull(); - - case CC_OP_ADDB: return compute_c_addb(); - case CC_OP_ADDW: return compute_c_addw(); - case CC_OP_ADDL: return compute_c_addl(); - - case CC_OP_ADCB: return compute_c_adcb(); - case CC_OP_ADCW: return compute_c_adcw(); - case CC_OP_ADCL: return compute_c_adcl(); - - case CC_OP_SUBB: return compute_c_subb(); - case CC_OP_SUBW: return compute_c_subw(); - case CC_OP_SUBL: return compute_c_subl(); - - case CC_OP_SBBB: return compute_c_sbbb(); - case CC_OP_SBBW: return compute_c_sbbw(); - case CC_OP_SBBL: return compute_c_sbbl(); - - case CC_OP_LOGICB: return compute_c_logicb(); - case CC_OP_LOGICW: return compute_c_logicw(); - case CC_OP_LOGICL: return compute_c_logicl(); - - case CC_OP_INCB: return compute_c_incl(); - case CC_OP_INCW: return compute_c_incl(); - case CC_OP_INCL: return compute_c_incl(); - - case CC_OP_DECB: return compute_c_incl(); - case CC_OP_DECW: return compute_c_incl(); - case CC_OP_DECL: return compute_c_incl(); - - case CC_OP_SHLB: return compute_c_shlb(); - case CC_OP_SHLW: return compute_c_shlw(); - case CC_OP_SHLL: return compute_c_shll(); - - case CC_OP_SARB: return compute_c_sarl(); - case CC_OP_SARW: return compute_c_sarl(); - case CC_OP_SARL: return compute_c_sarl(); - -#ifdef TARGET_X86_64 - case CC_OP_MULQ: return compute_c_mull(); - - case CC_OP_ADDQ: return compute_c_addq(); - - case CC_OP_ADCQ: return compute_c_adcq(); - - case CC_OP_SUBQ: return compute_c_subq(); - - case CC_OP_SBBQ: return compute_c_sbbq(); - - case CC_OP_LOGICQ: return compute_c_logicq(); - - case CC_OP_INCQ: return compute_c_incl(); - - case CC_OP_DECQ: return compute_c_incl(); - - case CC_OP_SHLQ: return compute_c_shlq(); - - case CC_OP_SARQ: return compute_c_sarl(); -#endif - } -} diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h index 0d33ca1985..d109512d5b 100644 --- a/target-i386/ops_sse.h +++ b/target-i386/ops_sse.h @@ -203,12 +203,15 @@ void glue(helper_psrldq, SUFFIX)(Reg *d, Reg *s) int shift, i; shift = s->L(0); - if (shift > 16) + if (shift > 16) { shift = 16; - for(i = 0; i < 16 - shift; i++) + } + for (i = 0; i < 16 - shift; i++) { d->B(i) = d->B(i + shift); - for(i = 16 - shift; i < 16; i++) + } + for (i = 16 - shift; i < 16; i++) { d->B(i) = 0; + } } void glue(helper_pslldq, SUFFIX)(Reg *d, Reg *s) @@ -216,112 +219,119 @@ void glue(helper_pslldq, SUFFIX)(Reg *d, Reg *s) int shift, i; shift = s->L(0); - if (shift > 16) + if (shift > 16) { shift = 16; - for(i = 15; i >= shift; i--) + } + for (i = 15; i >= shift; i--) { d->B(i) = d->B(i - shift); - for(i = 0; i < shift; i++) + } + for (i = 0; i < shift; i++) { d->B(i) = 0; + } } #endif -#define SSE_HELPER_B(name, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->B(0) = F(d->B(0), s->B(0));\ - d->B(1) = F(d->B(1), s->B(1));\ - d->B(2) = F(d->B(2), s->B(2));\ - d->B(3) = F(d->B(3), s->B(3));\ - d->B(4) = F(d->B(4), s->B(4));\ - d->B(5) = F(d->B(5), s->B(5));\ - d->B(6) = F(d->B(6), s->B(6));\ - d->B(7) = F(d->B(7), s->B(7));\ - XMM_ONLY(\ - d->B(8) = F(d->B(8), s->B(8));\ - d->B(9) = F(d->B(9), s->B(9));\ - d->B(10) = F(d->B(10), s->B(10));\ - d->B(11) = F(d->B(11), s->B(11));\ - d->B(12) = F(d->B(12), s->B(12));\ - d->B(13) = F(d->B(13), s->B(13));\ - d->B(14) = F(d->B(14), s->B(14));\ - d->B(15) = F(d->B(15), s->B(15));\ - )\ -} - -#define SSE_HELPER_W(name, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->W(0) = F(d->W(0), s->W(0));\ - d->W(1) = F(d->W(1), s->W(1));\ - d->W(2) = F(d->W(2), s->W(2));\ - d->W(3) = F(d->W(3), s->W(3));\ - XMM_ONLY(\ - d->W(4) = F(d->W(4), s->W(4));\ - d->W(5) = F(d->W(5), s->W(5));\ - d->W(6) = F(d->W(6), s->W(6));\ - d->W(7) = F(d->W(7), s->W(7));\ - )\ -} - -#define SSE_HELPER_L(name, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->L(0) = F(d->L(0), s->L(0));\ - d->L(1) = F(d->L(1), s->L(1));\ - XMM_ONLY(\ - d->L(2) = F(d->L(2), s->L(2));\ - d->L(3) = F(d->L(3), s->L(3));\ - )\ -} - -#define SSE_HELPER_Q(name, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->Q(0) = F(d->Q(0), s->Q(0));\ - XMM_ONLY(\ - d->Q(1) = F(d->Q(1), s->Q(1));\ - )\ -} +#define SSE_HELPER_B(name, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->B(0) = F(d->B(0), s->B(0)); \ + d->B(1) = F(d->B(1), s->B(1)); \ + d->B(2) = F(d->B(2), s->B(2)); \ + d->B(3) = F(d->B(3), s->B(3)); \ + d->B(4) = F(d->B(4), s->B(4)); \ + d->B(5) = F(d->B(5), s->B(5)); \ + d->B(6) = F(d->B(6), s->B(6)); \ + d->B(7) = F(d->B(7), s->B(7)); \ + XMM_ONLY( \ + d->B(8) = F(d->B(8), s->B(8)); \ + d->B(9) = F(d->B(9), s->B(9)); \ + d->B(10) = F(d->B(10), s->B(10)); \ + d->B(11) = F(d->B(11), s->B(11)); \ + d->B(12) = F(d->B(12), s->B(12)); \ + d->B(13) = F(d->B(13), s->B(13)); \ + d->B(14) = F(d->B(14), s->B(14)); \ + d->B(15) = F(d->B(15), s->B(15)); \ + ) \ + } + +#define SSE_HELPER_W(name, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->W(0) = F(d->W(0), s->W(0)); \ + d->W(1) = F(d->W(1), s->W(1)); \ + d->W(2) = F(d->W(2), s->W(2)); \ + d->W(3) = F(d->W(3), s->W(3)); \ + XMM_ONLY( \ + d->W(4) = F(d->W(4), s->W(4)); \ + d->W(5) = F(d->W(5), s->W(5)); \ + d->W(6) = F(d->W(6), s->W(6)); \ + d->W(7) = F(d->W(7), s->W(7)); \ + ) \ + } + +#define SSE_HELPER_L(name, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->L(0) = F(d->L(0), s->L(0)); \ + d->L(1) = F(d->L(1), s->L(1)); \ + XMM_ONLY( \ + d->L(2) = F(d->L(2), s->L(2)); \ + d->L(3) = F(d->L(3), s->L(3)); \ + ) \ + } + +#define SSE_HELPER_Q(name, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->Q(0) = F(d->Q(0), s->Q(0)); \ + XMM_ONLY( \ + d->Q(1) = F(d->Q(1), s->Q(1)); \ + ) \ + } #if SHIFT == 0 static inline int satub(int x) { - if (x < 0) + if (x < 0) { return 0; - else if (x > 255) + } else if (x > 255) { return 255; - else + } else { return x; + } } static inline int satuw(int x) { - if (x < 0) + if (x < 0) { return 0; - else if (x > 65535) + } else if (x > 65535) { return 65535; - else + } else { return x; + } } static inline int satsb(int x) { - if (x < -128) + if (x < -128) { return -128; - else if (x > 127) + } else if (x > 127) { return 127; - else + } else { return x; + } } static inline int satsw(int x) { - if (x < -32768) + if (x < -32768) { return -32768; - else if (x > 32767) + } else if (x > 32767) { return 32767; - else + } else { return x; + } } #define FADD(a, b) ((a) + (b)) @@ -340,22 +350,22 @@ static inline int satsw(int x) #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) -#define FAND(a, b) (a) & (b) +#define FAND(a, b) ((a) & (b)) #define FANDN(a, b) ((~(a)) & (b)) -#define FOR(a, b) (a) | (b) -#define FXOR(a, b) (a) ^ (b) +#define FOR(a, b) ((a) | (b)) +#define FXOR(a, b) ((a) ^ (b)) -#define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0 -#define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0 -#define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0 -#define FCMPEQ(a, b) (a) == (b) ? -1 : 0 +#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0) +#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0) +#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0) +#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0) -#define FMULLW(a, b) (a) * (b) -#define FMULHRW(a, b) ((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16 -#define FMULHUW(a, b) (a) * (b) >> 16 -#define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16 +#define FMULLW(a, b) ((a) * (b)) +#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) +#define FMULHUW(a, b) ((a) * (b) >> 16) +#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) -#define FAVG(a, b) ((a) + (b) + 1) >> 1 +#define FAVG(a, b) (((a) + (b) + 1) >> 1) #endif SSE_HELPER_B(helper_paddb, FADD) @@ -407,7 +417,7 @@ SSE_HELPER_W(helper_pmulhw, FMULHW) SSE_HELPER_B(helper_pavgb, FAVG) SSE_HELPER_W(helper_pavgw, FAVG) -void glue(helper_pmuludq, SUFFIX) (Reg *d, Reg *s) +void glue(helper_pmuludq, SUFFIX)(Reg *d, Reg *s) { d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0); #if SHIFT == 1 @@ -415,26 +425,27 @@ void glue(helper_pmuludq, SUFFIX) (Reg *d, Reg *s) #endif } -void glue(helper_pmaddwd, SUFFIX) (Reg *d, Reg *s) +void glue(helper_pmaddwd, SUFFIX)(Reg *d, Reg *s) { int i; - for(i = 0; i < (2 << SHIFT); i++) { - d->L(i) = (int16_t)s->W(2*i) * (int16_t)d->W(2*i) + - (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1); + for (i = 0; i < (2 << SHIFT); i++) { + d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) + + (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1); } } #if SHIFT == 0 static inline int abs1(int a) { - if (a < 0) + if (a < 0) { return -a; - else + } else { return a; + } } #endif -void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_psadbw, SUFFIX)(Reg *d, Reg *s) { unsigned int val; @@ -462,16 +473,18 @@ void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s) #endif } -void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s, target_ulong a0) +void glue(helper_maskmov, SUFFIX)(Reg *d, Reg *s, target_ulong a0) { int i; - for(i = 0; i < (8 << SHIFT); i++) { - if (s->B(i) & 0x80) + + for (i = 0; i < (8 << SHIFT); i++) { + if (s->B(i) & 0x80) { stb(a0 + i, d->B(i)); + } } } -void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val) +void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val) { d->L(0) = val; d->L(1) = 0; @@ -481,7 +494,7 @@ void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val) } #ifdef TARGET_X86_64 -void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val) +void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val) { d->Q(0) = val; #if SHIFT == 1 @@ -491,9 +504,10 @@ void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val) #endif #if SHIFT == 0 -void glue(helper_pshufw, SUFFIX) (Reg *d, Reg *s, int order) +void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; + r.W(0) = s->W(order & 3); r.W(1) = s->W((order >> 2) & 3); r.W(2) = s->W((order >> 4) & 3); @@ -504,6 +518,7 @@ void glue(helper_pshufw, SUFFIX) (Reg *d, Reg *s, int order) void helper_shufps(Reg *d, Reg *s, int order) { Reg r; + r.L(0) = d->L(order & 3); r.L(1) = d->L((order >> 2) & 3); r.L(2) = s->L((order >> 4) & 3); @@ -514,14 +529,16 @@ void helper_shufps(Reg *d, Reg *s, int order) void helper_shufpd(Reg *d, Reg *s, int order) { Reg r; + r.Q(0) = d->Q(order & 1); r.Q(1) = s->Q((order >> 1) & 1); *d = r; } -void glue(helper_pshufd, SUFFIX) (Reg *d, Reg *s, int order) +void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; + r.L(0) = s->L(order & 3); r.L(1) = s->L((order >> 2) & 3); r.L(2) = s->L((order >> 4) & 3); @@ -529,9 +546,10 @@ void glue(helper_pshufd, SUFFIX) (Reg *d, Reg *s, int order) *d = r; } -void glue(helper_pshuflw, SUFFIX) (Reg *d, Reg *s, int order) +void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; + r.W(0) = s->W(order & 3); r.W(1) = s->W((order >> 2) & 3); r.W(2) = s->W((order >> 4) & 3); @@ -540,9 +558,10 @@ void glue(helper_pshuflw, SUFFIX) (Reg *d, Reg *s, int order) *d = r; } -void glue(helper_pshufhw, SUFFIX) (Reg *d, Reg *s, int order) +void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; + r.Q(0) = s->Q(0); r.W(4) = s->W(4 + (order & 3)); r.W(5) = s->W(4 + ((order >> 2) & 3)); @@ -556,29 +575,30 @@ void glue(helper_pshufhw, SUFFIX) (Reg *d, Reg *s, int order) /* FPU ops */ /* XXX: not accurate */ -#define SSE_HELPER_S(name, F)\ -void helper_ ## name ## ps (Reg *d, Reg *s)\ -{\ - d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ - d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ - d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ - d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ -}\ -\ -void helper_ ## name ## ss (Reg *d, Reg *s)\ -{\ - d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ -}\ -void helper_ ## name ## pd (Reg *d, Reg *s)\ -{\ - d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ - d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ -}\ -\ -void helper_ ## name ## sd (Reg *d, Reg *s)\ -{\ - d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ -} +#define SSE_HELPER_S(name, F) \ + void helper_ ## name ## ps(Reg *d, Reg *s) \ + { \ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ + d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ + d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + } \ + \ + void helper_ ## name ## ss(Reg *d, Reg *s) \ + { \ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + } \ + \ + void helper_ ## name ## pd(Reg *d, Reg *s) \ + { \ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + } \ + \ + void helper_ ## name ## sd(Reg *d, Reg *s) \ + { \ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + } #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) @@ -590,8 +610,10 @@ void helper_ ## name ## sd (Reg *d, Reg *s)\ * special cases right: for min and max Intel specifies that (-0,0), * (NaN, anything) and (anything, NaN) return the second argument. */ -#define FPU_MIN(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b) -#define FPU_MAX(size, a, b) float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b) +#define FPU_MIN(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)) +#define FPU_MAX(size, a, b) \ + (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)) SSE_HELPER_S(add, FPU_ADD) SSE_HELPER_S(sub, FPU_SUB) @@ -606,6 +628,7 @@ SSE_HELPER_S(sqrt, FPU_SQRT) void helper_cvtps2pd(Reg *d, Reg *s) { float32 s0, s1; + s0 = s->XMM_S(0); s1 = s->XMM_S(1); d->XMM_D(0) = float32_to_float64(s0, &env->sse_status); @@ -641,6 +664,7 @@ void helper_cvtdq2ps(Reg *d, Reg *s) void helper_cvtdq2pd(Reg *d, Reg *s) { int32_t l0, l1; + l0 = (int32_t)s->XMM_L(0); l1 = (int32_t)s->XMM_L(1); d->XMM_D(0) = int32_to_float64(l0, &env->sse_status); @@ -864,6 +888,7 @@ void helper_insertq_i(XMMReg *d, int index, int length) void helper_haddps(XMMReg *d, XMMReg *s) { XMMReg r; + r.XMM_S(0) = float32_add(d->XMM_S(0), d->XMM_S(1), &env->sse_status); r.XMM_S(1) = float32_add(d->XMM_S(2), d->XMM_S(3), &env->sse_status); r.XMM_S(2) = float32_add(s->XMM_S(0), s->XMM_S(1), &env->sse_status); @@ -874,6 +899,7 @@ void helper_haddps(XMMReg *d, XMMReg *s) void helper_haddpd(XMMReg *d, XMMReg *s) { XMMReg r; + r.XMM_D(0) = float64_add(d->XMM_D(0), d->XMM_D(1), &env->sse_status); r.XMM_D(1) = float64_add(s->XMM_D(0), s->XMM_D(1), &env->sse_status); *d = r; @@ -882,6 +908,7 @@ void helper_haddpd(XMMReg *d, XMMReg *s) void helper_hsubps(XMMReg *d, XMMReg *s) { XMMReg r; + r.XMM_S(0) = float32_sub(d->XMM_S(0), d->XMM_S(1), &env->sse_status); r.XMM_S(1) = float32_sub(d->XMM_S(2), d->XMM_S(3), &env->sse_status); r.XMM_S(2) = float32_sub(s->XMM_S(0), s->XMM_S(1), &env->sse_status); @@ -892,6 +919,7 @@ void helper_hsubps(XMMReg *d, XMMReg *s) void helper_hsubpd(XMMReg *d, XMMReg *s) { XMMReg r; + r.XMM_D(0) = float64_sub(d->XMM_D(0), d->XMM_D(1), &env->sse_status); r.XMM_D(1) = float64_sub(s->XMM_D(0), s->XMM_D(1), &env->sse_status); *d = r; @@ -912,38 +940,47 @@ void helper_addsubpd(XMMReg *d, XMMReg *s) } /* XXX: unordered */ -#define SSE_HELPER_CMP(name, F)\ -void helper_ ## name ## ps (Reg *d, Reg *s)\ -{\ - d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ - d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ - d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ - d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ -}\ -\ -void helper_ ## name ## ss (Reg *d, Reg *s)\ -{\ - d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ -}\ -void helper_ ## name ## pd (Reg *d, Reg *s)\ -{\ - d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ - d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ -}\ -\ -void helper_ ## name ## sd (Reg *d, Reg *s)\ -{\ - d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ -} - -#define FPU_CMPEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0 -#define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0 -#define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0 -#define FPU_CMPUNORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? - 1 : 0 -#define FPU_CMPNEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1 -#define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1 -#define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1 -#define FPU_CMPORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1 +#define SSE_HELPER_CMP(name, F) \ + void helper_ ## name ## ps(Reg *d, Reg *s) \ + { \ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ + d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ + d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + } \ + \ + void helper_ ## name ## ss(Reg *d, Reg *s) \ + { \ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + } \ + \ + void helper_ ## name ## pd(Reg *d, Reg *s) \ + { \ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + } \ + \ + void helper_ ## name ## sd(Reg *d, Reg *s) \ + { \ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + } + +#define FPU_CMPEQ(size, a, b) \ + (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPLT(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPLE(size, a, b) \ + (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPUNORD(size, a, b) \ + (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPNEQ(size, a, b) \ + (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPNLT(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPNLE(size, a, b) \ + (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPORD(size, a, b) \ + (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1) SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) SSE_HELPER_CMP(cmplt, FPU_CMPLT) @@ -1003,6 +1040,7 @@ void helper_comisd(Reg *d, Reg *s) uint32_t helper_movmskps(Reg *s) { int b0, b1, b2, b3; + b0 = s->XMM_L(0) >> 31; b1 = s->XMM_L(1) >> 31; b2 = s->XMM_L(2) >> 31; @@ -1013,6 +1051,7 @@ uint32_t helper_movmskps(Reg *s) uint32_t helper_movmskpd(Reg *s) { int b0, b1; + b0 = s->XMM_L(1) >> 31; b1 = s->XMM_L(3) >> 31; return b0 | (b1 << 1); @@ -1023,6 +1062,7 @@ uint32_t helper_movmskpd(Reg *s) uint32_t glue(helper_pmovmskb, SUFFIX)(Reg *s) { uint32_t val; + val = 0; val |= (s->B(0) >> 7); val |= (s->B(1) >> 6) & 0x02; @@ -1045,7 +1085,7 @@ uint32_t glue(helper_pmovmskb, SUFFIX)(Reg *s) return val; } -void glue(helper_packsswb, SUFFIX) (Reg *d, Reg *s) +void glue(helper_packsswb, SUFFIX)(Reg *d, Reg *s) { Reg r; @@ -1072,7 +1112,7 @@ void glue(helper_packsswb, SUFFIX) (Reg *d, Reg *s) *d = r; } -void glue(helper_packuswb, SUFFIX) (Reg *d, Reg *s) +void glue(helper_packuswb, SUFFIX)(Reg *d, Reg *s) { Reg r; @@ -1099,7 +1139,7 @@ void glue(helper_packuswb, SUFFIX) (Reg *d, Reg *s) *d = r; } -void glue(helper_packssdw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_packssdw, SUFFIX)(Reg *d, Reg *s) { Reg r; @@ -1118,73 +1158,74 @@ void glue(helper_packssdw, SUFFIX) (Reg *d, Reg *s) *d = r; } -#define UNPCK_OP(base_name, base) \ - \ -void glue(helper_punpck ## base_name ## bw, SUFFIX) (Reg *d, Reg *s) \ -{ \ - Reg r; \ - \ - r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ - r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ - r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ - r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ - r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ - r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ - r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ - r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ -XMM_ONLY( \ - r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ - r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ - r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ - r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ - r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ - r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ - r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ - r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ -) \ - *d = r; \ -} \ - \ -void glue(helper_punpck ## base_name ## wd, SUFFIX) (Reg *d, Reg *s) \ -{ \ - Reg r; \ - \ - r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ - r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ - r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ - r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ -XMM_ONLY( \ - r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ - r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ - r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ - r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ -) \ - *d = r; \ -} \ - \ -void glue(helper_punpck ## base_name ## dq, SUFFIX) (Reg *d, Reg *s) \ -{ \ - Reg r; \ - \ - r.L(0) = d->L((base << SHIFT) + 0); \ - r.L(1) = s->L((base << SHIFT) + 0); \ -XMM_ONLY( \ - r.L(2) = d->L((base << SHIFT) + 1); \ - r.L(3) = s->L((base << SHIFT) + 1); \ -) \ - *d = r; \ -} \ - \ -XMM_ONLY( \ -void glue(helper_punpck ## base_name ## qdq, SUFFIX) (Reg *d, Reg *s) \ -{ \ - Reg r; \ - \ - r.Q(0) = d->Q(base); \ - r.Q(1) = s->Q(base); \ - *d = r; \ -} \ -) +#define UNPCK_OP(base_name, base) \ + \ + void glue(helper_punpck ## base_name ## bw, SUFFIX)(Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ + r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ + r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ + r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ + r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ + r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ + r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ + r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ + XMM_ONLY( \ + r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ + r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ + r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ + r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ + r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ + r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ + r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ + r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ + ) \ + *d = r; \ + } \ + \ + void glue(helper_punpck ## base_name ## wd, SUFFIX)(Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ + r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ + r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ + r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ + XMM_ONLY( \ + r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ + r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ + r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ + r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ + ) \ + *d = r; \ + } \ + \ + void glue(helper_punpck ## base_name ## dq, SUFFIX)(Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.L(0) = d->L((base << SHIFT) + 0); \ + r.L(1) = s->L((base << SHIFT) + 0); \ + XMM_ONLY( \ + r.L(2) = d->L((base << SHIFT) + 1); \ + r.L(3) = s->L((base << SHIFT) + 1); \ + ) \ + *d = r; \ + } \ + \ + XMM_ONLY( \ + void glue(helper_punpck ## base_name ## qdq, SUFFIX)(Reg *d, \ + Reg *s) \ + { \ + Reg r; \ + \ + r.Q(0) = d->Q(base); \ + r.Q(1) = s->Q(base); \ + *d = r; \ + } \ + ) UNPCK_OP(l, 0) UNPCK_OP(h, 1) @@ -1211,13 +1252,16 @@ void helper_pf2id(MMXReg *d, MMXReg *s) void helper_pf2iw(MMXReg *d, MMXReg *s) { - d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status)); - d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status)); + d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), + &env->mmx_status)); + d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), + &env->mmx_status)); } void helper_pfacc(MMXReg *d, MMXReg *s) { MMXReg r; + r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; @@ -1231,36 +1275,46 @@ void helper_pfadd(MMXReg *d, MMXReg *s) void helper_pfcmpeq(MMXReg *d, MMXReg *s) { - d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), &env->mmx_status) ? -1 : 0; - d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), &env->mmx_status) ? -1 : 0; + d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), + &env->mmx_status) ? -1 : 0; } void helper_pfcmpge(MMXReg *d, MMXReg *s) { - d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0; - d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0; + d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), + &env->mmx_status) ? -1 : 0; } void helper_pfcmpgt(MMXReg *d, MMXReg *s) { - d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0; - d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0; + d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), + &env->mmx_status) ? -1 : 0; } void helper_pfmax(MMXReg *d, MMXReg *s) { - if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) + if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) { d->MMX_S(0) = s->MMX_S(0); - if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) + } + if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) { d->MMX_S(1) = s->MMX_S(1); + } } void helper_pfmin(MMXReg *d, MMXReg *s) { - if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) + if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) { d->MMX_S(0) = s->MMX_S(0); - if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) + } + if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) { d->MMX_S(1) = s->MMX_S(1); + } } void helper_pfmul(MMXReg *d, MMXReg *s) @@ -1272,6 +1326,7 @@ void helper_pfmul(MMXReg *d, MMXReg *s) void helper_pfnacc(MMXReg *d, MMXReg *s) { MMXReg r; + r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; @@ -1280,6 +1335,7 @@ void helper_pfnacc(MMXReg *d, MMXReg *s) void helper_pfpnacc(MMXReg *d, MMXReg *s) { MMXReg r; + r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; @@ -1316,6 +1372,7 @@ void helper_pfsubr(MMXReg *d, MMXReg *s) void helper_pswapd(MMXReg *d, MMXReg *s) { MMXReg r; + r.MMX_L(0) = s->MMX_L(1); r.MMX_L(1) = s->MMX_L(0); *d = r; @@ -1323,18 +1380,19 @@ void helper_pswapd(MMXReg *d, MMXReg *s) #endif /* SSSE3 op helpers */ -void glue(helper_pshufb, SUFFIX) (Reg *d, Reg *s) +void glue(helper_pshufb, SUFFIX)(Reg *d, Reg *s) { int i; Reg r; - for (i = 0; i < (8 << SHIFT); i++) + for (i = 0; i < (8 << SHIFT); i++) { r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1))); + } *d = r; } -void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phaddw, SUFFIX)(Reg *d, Reg *s) { d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1); d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3); @@ -1346,7 +1404,7 @@ void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7)); } -void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phaddd, SUFFIX)(Reg *d, Reg *s) { d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1); XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3)); @@ -1354,7 +1412,7 @@ void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3)); } -void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phaddsw, SUFFIX)(Reg *d, Reg *s) { d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1)); d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3)); @@ -1366,19 +1424,19 @@ void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7))); } -void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_pmaddubsw, SUFFIX)(Reg *d, Reg *s) { - d->W(0) = satsw((int8_t)s->B( 0) * (uint8_t)d->B( 0) + - (int8_t)s->B( 1) * (uint8_t)d->B( 1)); - d->W(1) = satsw((int8_t)s->B( 2) * (uint8_t)d->B( 2) + - (int8_t)s->B( 3) * (uint8_t)d->B( 3)); - d->W(2) = satsw((int8_t)s->B( 4) * (uint8_t)d->B( 4) + - (int8_t)s->B( 5) * (uint8_t)d->B( 5)); - d->W(3) = satsw((int8_t)s->B( 6) * (uint8_t)d->B( 6) + - (int8_t)s->B( 7) * (uint8_t)d->B( 7)); + d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) + + (int8_t)s->B(1) * (uint8_t)d->B(1)); + d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) + + (int8_t)s->B(3) * (uint8_t)d->B(3)); + d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) + + (int8_t)s->B(5) * (uint8_t)d->B(5)); + d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) + + (int8_t)s->B(7) * (uint8_t)d->B(7)); #if SHIFT == 1 - d->W(4) = satsw((int8_t)s->B( 8) * (uint8_t)d->B( 8) + - (int8_t)s->B( 9) * (uint8_t)d->B( 9)); + d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) + + (int8_t)s->B(9) * (uint8_t)d->B(9)); d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) + (int8_t)s->B(11) * (uint8_t)d->B(11)); d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) + @@ -1388,7 +1446,7 @@ void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s) #endif } -void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phsubw, SUFFIX)(Reg *d, Reg *s) { d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1); d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3); @@ -1400,7 +1458,7 @@ void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7)); } -void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phsubd, SUFFIX)(Reg *d, Reg *s) { d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1); XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3)); @@ -1408,7 +1466,7 @@ void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3)); } -void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phsubsw, SUFFIX)(Reg *d, Reg *s) { d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1)); d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3)); @@ -1420,24 +1478,24 @@ void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s) XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7))); } -#define FABSB(_, x) x > INT8_MAX ? -(int8_t ) x : x -#define FABSW(_, x) x > INT16_MAX ? -(int16_t) x : x -#define FABSL(_, x) x > INT32_MAX ? -(int32_t) x : x +#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x) +#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x) +#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x) SSE_HELPER_B(helper_pabsb, FABSB) SSE_HELPER_W(helper_pabsw, FABSW) SSE_HELPER_L(helper_pabsd, FABSL) -#define FMULHRSW(d, s) ((int16_t) d * (int16_t) s + 0x4000) >> 15 +#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) -#define FSIGNB(d, s) s <= INT8_MAX ? s ? d : 0 : -(int8_t ) d -#define FSIGNW(d, s) s <= INT16_MAX ? s ? d : 0 : -(int16_t) d -#define FSIGNL(d, s) s <= INT32_MAX ? s ? d : 0 : -(int32_t) d +#define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) +#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) +#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d) SSE_HELPER_B(helper_psignb, FSIGNB) SSE_HELPER_W(helper_psignw, FSIGNW) SSE_HELPER_L(helper_psignd, FSIGNL) -void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift) +void glue(helper_palignr, SUFFIX)(Reg *d, Reg *s, int32_t shift) { Reg r; @@ -1449,17 +1507,17 @@ void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift) shift <<= 3; #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) #if SHIFT == 0 - r.Q(0) = SHR(s->Q(0), shift - 0) | - SHR(d->Q(0), shift - 64); + r.Q(0) = SHR(s->Q(0), shift - 0) | + SHR(d->Q(0), shift - 64); #else - r.Q(0) = SHR(s->Q(0), shift - 0) | - SHR(s->Q(1), shift - 64) | - SHR(d->Q(0), shift - 128) | - SHR(d->Q(1), shift - 192); - r.Q(1) = SHR(s->Q(0), shift + 64) | - SHR(s->Q(1), shift - 0) | - SHR(d->Q(0), shift - 64) | - SHR(d->Q(1), shift - 128); + r.Q(0) = SHR(s->Q(0), shift - 0) | + SHR(s->Q(1), shift - 64) | + SHR(d->Q(0), shift - 128) | + SHR(d->Q(1), shift - 192); + r.Q(1) = SHR(s->Q(0), shift + 64) | + SHR(s->Q(1), shift - 0) | + SHR(d->Q(0), shift - 64) | + SHR(d->Q(1), shift - 128); #endif #undef SHR } @@ -1467,72 +1525,78 @@ void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift) *d = r; } -#define XMM0 env->xmm_regs[0] +#define XMM0 (env->xmm_regs[0]) #if SHIFT == 1 -#define SSE_HELPER_V(name, elem, num, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));\ - d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));\ - if (num > 2) {\ - d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));\ - d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));\ - if (num > 4) {\ - d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));\ - d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));\ - d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));\ - d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));\ - if (num > 8) {\ - d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8));\ - d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9));\ - d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10));\ - d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11));\ - d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12));\ - d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13));\ - d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14));\ - d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15));\ - }\ - }\ - }\ -} - -#define SSE_HELPER_I(name, elem, num, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s, uint32_t imm)\ -{\ - d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));\ - d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));\ - if (num > 2) {\ - d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));\ - d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));\ - if (num > 4) {\ - d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1));\ - d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1));\ - d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1));\ - d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1));\ - if (num > 8) {\ - d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1));\ - d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1));\ - d->elem(10) = F(d->elem(10), s->elem(10), ((imm >> 10) & 1));\ - d->elem(11) = F(d->elem(11), s->elem(11), ((imm >> 11) & 1));\ - d->elem(12) = F(d->elem(12), s->elem(12), ((imm >> 12) & 1));\ - d->elem(13) = F(d->elem(13), s->elem(13), ((imm >> 13) & 1));\ - d->elem(14) = F(d->elem(14), s->elem(14), ((imm >> 14) & 1));\ - d->elem(15) = F(d->elem(15), s->elem(15), ((imm >> 15) & 1));\ - }\ - }\ - }\ -} +#define SSE_HELPER_V(name, elem, num, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \ + d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \ + if (num > 2) { \ + d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \ + d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \ + if (num > 4) { \ + d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \ + d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \ + d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \ + d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \ + if (num > 8) { \ + d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \ + d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \ + d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \ + d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \ + d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \ + d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \ + d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \ + d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \ + } \ + } \ + } \ + } + +#define SSE_HELPER_I(name, elem, num, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s, uint32_t imm) \ + { \ + d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \ + d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \ + if (num > 2) { \ + d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \ + d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \ + if (num > 4) { \ + d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \ + d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \ + d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \ + d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \ + if (num > 8) { \ + d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \ + d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \ + d->elem(10) = F(d->elem(10), s->elem(10), \ + ((imm >> 10) & 1)); \ + d->elem(11) = F(d->elem(11), s->elem(11), \ + ((imm >> 11) & 1)); \ + d->elem(12) = F(d->elem(12), s->elem(12), \ + ((imm >> 12) & 1)); \ + d->elem(13) = F(d->elem(13), s->elem(13), \ + ((imm >> 13) & 1)); \ + d->elem(14) = F(d->elem(14), s->elem(14), \ + ((imm >> 14) & 1)); \ + d->elem(15) = F(d->elem(15), s->elem(15), \ + ((imm >> 15) & 1)); \ + } \ + } \ + } \ + } /* SSE4.1 op helpers */ -#define FBLENDVB(d, s, m) (m & 0x80) ? s : d -#define FBLENDVPS(d, s, m) (m & 0x80000000) ? s : d -#define FBLENDVPD(d, s, m) (m & 0x8000000000000000LL) ? s : d +#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d) +#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d) +#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d) SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB) SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS) SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD) -void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s) +void glue(helper_ptest, SUFFIX)(Reg *d, Reg *s) { uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1)); uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1)); @@ -1540,22 +1604,22 @@ void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s) CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); } -#define SSE_HELPER_F(name, elem, num, F)\ -void glue(name, SUFFIX) (Reg *d, Reg *s)\ -{\ - d->elem(0) = F(0);\ - d->elem(1) = F(1);\ - if (num > 2) {\ - d->elem(2) = F(2);\ - d->elem(3) = F(3);\ - if (num > 4) {\ - d->elem(4) = F(4);\ - d->elem(5) = F(5);\ - d->elem(6) = F(6);\ - d->elem(7) = F(7);\ - }\ - }\ -} +#define SSE_HELPER_F(name, elem, num, F) \ + void glue(name, SUFFIX)(Reg *d, Reg *s) \ + { \ + d->elem(0) = F(0); \ + d->elem(1) = F(1); \ + if (num > 2) { \ + d->elem(2) = F(2); \ + d->elem(3) = F(3); \ + if (num > 4) { \ + d->elem(4) = F(4); \ + d->elem(5) = F(5); \ + d->elem(6) = F(6); \ + d->elem(7) = F(7); \ + } \ + } \ + } SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B) @@ -1570,16 +1634,16 @@ SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W) SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W) SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L) -void glue(helper_pmuldq, SUFFIX) (Reg *d, Reg *s) +void glue(helper_pmuldq, SUFFIX)(Reg *d, Reg *s) { - d->Q(0) = (int64_t) (int32_t) d->L(0) * (int32_t) s->L(0); - d->Q(1) = (int64_t) (int32_t) d->L(2) * (int32_t) s->L(2); + d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0); + d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2); } -#define FCMPEQQ(d, s) d == s ? -1 : 0 +#define FCMPEQQ(d, s) (d == s ? -1 : 0) SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) -void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_packusdw, SUFFIX)(Reg *d, Reg *s) { d->W(0) = satuw((int32_t) d->L(0)); d->W(1) = satuw((int32_t) d->L(1)); @@ -1591,10 +1655,10 @@ void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s) d->W(7) = satuw((int32_t) s->L(3)); } -#define FMINSB(d, s) MIN((int8_t) d, (int8_t) s) -#define FMINSD(d, s) MIN((int32_t) d, (int32_t) s) -#define FMAXSB(d, s) MAX((int8_t) d, (int8_t) s) -#define FMAXSD(d, s) MAX((int32_t) d, (int32_t) s) +#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) +#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s) +#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s) +#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s) SSE_HELPER_B(helper_pminsb, FMINSB) SSE_HELPER_L(helper_pminsd, FMINSD) SSE_HELPER_W(helper_pminuw, MIN) @@ -1604,27 +1668,34 @@ SSE_HELPER_L(helper_pmaxsd, FMAXSD) SSE_HELPER_W(helper_pmaxuw, MAX) SSE_HELPER_L(helper_pmaxud, MAX) -#define FMULLD(d, s) (int32_t) d * (int32_t) s +#define FMULLD(d, s) ((int32_t)d * (int32_t)s) SSE_HELPER_L(helper_pmulld, FMULLD) -void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s) +void glue(helper_phminposuw, SUFFIX)(Reg *d, Reg *s) { int idx = 0; - if (s->W(1) < s->W(idx)) + if (s->W(1) < s->W(idx)) { idx = 1; - if (s->W(2) < s->W(idx)) + } + if (s->W(2) < s->W(idx)) { idx = 2; - if (s->W(3) < s->W(idx)) + } + if (s->W(3) < s->W(idx)) { idx = 3; - if (s->W(4) < s->W(idx)) + } + if (s->W(4) < s->W(idx)) { idx = 4; - if (s->W(5) < s->W(idx)) + } + if (s->W(5) < s->W(idx)) { idx = 5; - if (s->W(6) < s->W(idx)) + } + if (s->W(6) < s->W(idx)) { idx = 6; - if (s->W(7) < s->W(idx)) + } + if (s->W(7) < s->W(idx)) { idx = 7; + } d->Q(1) = 0; d->L(1) = 0; @@ -1632,12 +1703,12 @@ void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s) d->W(0) = s->W(idx); } -void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode) +void glue(helper_roundps, SUFFIX)(Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; - if (!(mode & (1 << 2))) + if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); @@ -1652,6 +1723,7 @@ void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode) set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } + } d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); d->XMM_S(1) = float32_round_to_int(s->XMM_S(1), &env->sse_status); @@ -1659,21 +1731,21 @@ void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode) d->XMM_S(3) = float32_round_to_int(s->XMM_S(3), &env->sse_status); #if 0 /* TODO */ - if (mode & (1 << 3)) - set_float_exception_flags( - get_float_exception_flags(&env->sse_status) & - ~float_flag_inexact, - &env->sse_status); + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } -void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode) +void glue(helper_roundpd, SUFFIX)(Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; - if (!(mode & (1 << 2))) + if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); @@ -1688,26 +1760,27 @@ void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode) set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } + } d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); d->XMM_D(1) = float64_round_to_int(s->XMM_D(1), &env->sse_status); #if 0 /* TODO */ - if (mode & (1 << 3)) - set_float_exception_flags( - get_float_exception_flags(&env->sse_status) & - ~float_flag_inexact, - &env->sse_status); + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } -void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode) +void glue(helper_roundss, SUFFIX)(Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; - if (!(mode & (1 << 2))) + if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); @@ -1722,25 +1795,26 @@ void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode) set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } + } d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); #if 0 /* TODO */ - if (mode & (1 << 3)) - set_float_exception_flags( - get_float_exception_flags(&env->sse_status) & - ~float_flag_inexact, - &env->sse_status); + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } -void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode) +void glue(helper_roundsd, SUFFIX)(Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; - if (!(mode & (1 << 2))) + if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); @@ -1755,67 +1829,80 @@ void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode) set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } + } d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); #if 0 /* TODO */ - if (mode & (1 << 3)) - set_float_exception_flags( - get_float_exception_flags(&env->sse_status) & - ~float_flag_inexact, - &env->sse_status); + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } -#define FBLENDP(d, s, m) m ? s : d +#define FBLENDP(d, s, m) (m ? s : d) SSE_HELPER_I(helper_blendps, L, 4, FBLENDP) SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) -void glue(helper_dpps, SUFFIX) (Reg *d, Reg *s, uint32_t mask) +void glue(helper_dpps, SUFFIX)(Reg *d, Reg *s, uint32_t mask) { float32 iresult = float32_zero; - if (mask & (1 << 4)) + if (mask & (1 << 4)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(0), s->XMM_S(0), &env->sse_status), - &env->sse_status); - if (mask & (1 << 5)) + float32_mul(d->XMM_S(0), s->XMM_S(0), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 5)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(1), s->XMM_S(1), &env->sse_status), - &env->sse_status); - if (mask & (1 << 6)) + float32_mul(d->XMM_S(1), s->XMM_S(1), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 6)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(2), s->XMM_S(2), &env->sse_status), - &env->sse_status); - if (mask & (1 << 7)) + float32_mul(d->XMM_S(2), s->XMM_S(2), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 7)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(3), s->XMM_S(3), &env->sse_status), - &env->sse_status); + float32_mul(d->XMM_S(3), s->XMM_S(3), + &env->sse_status), + &env->sse_status); + } d->XMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; d->XMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; d->XMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; d->XMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; } -void glue(helper_dppd, SUFFIX) (Reg *d, Reg *s, uint32_t mask) +void glue(helper_dppd, SUFFIX)(Reg *d, Reg *s, uint32_t mask) { float64 iresult = float64_zero; - if (mask & (1 << 4)) + if (mask & (1 << 4)) { iresult = float64_add(iresult, - float64_mul(d->XMM_D(0), s->XMM_D(0), &env->sse_status), - &env->sse_status); - if (mask & (1 << 5)) + float64_mul(d->XMM_D(0), s->XMM_D(0), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 5)) { iresult = float64_add(iresult, - float64_mul(d->XMM_D(1), s->XMM_D(1), &env->sse_status), - &env->sse_status); + float64_mul(d->XMM_D(1), s->XMM_D(1), + &env->sse_status), + &env->sse_status); + } d->XMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; d->XMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; } -void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset) +void glue(helper_mpsadbw, SUFFIX)(Reg *d, Reg *s, uint32_t offset) { int s0 = (offset & 3) << 2; int d0 = (offset & 4) << 0; @@ -1835,7 +1922,7 @@ void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset) /* SSE4.2 op helpers */ /* it's unclear whether signed or unsigned */ -#define FCMPGTQ(d, s) d > s ? -1 : 0 +#define FCMPGTQ(d, s) (d > s ? -1 : 0) SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ) static inline int pcmp_elen(int reg, uint32_t ctrl) @@ -1843,18 +1930,21 @@ static inline int pcmp_elen(int reg, uint32_t ctrl) int val; /* Presence of REX.W is indicated by a bit higher than 7 set */ - if (ctrl >> 8) - val = abs1((int64_t) env->regs[reg]); - else - val = abs1((int32_t) env->regs[reg]); + if (ctrl >> 8) { + val = abs1((int64_t)env->regs[reg]); + } else { + val = abs1((int32_t)env->regs[reg]); + } if (ctrl & 1) { - if (val > 8) + if (val > 8) { return 8; - } else - if (val > 16) + } + } else { + if (val > 16) { return 16; - + } + } return val; } @@ -1863,11 +1953,14 @@ static inline int pcmp_ilen(Reg *r, uint8_t ctrl) int val = 0; if (ctrl & 1) { - while (val < 8 && r->W(val)) + while (val < 8 && r->W(val)) { val++; - } else - while (val < 16 && r->B(val)) + } + } else { + while (val < 16 && r->B(val)) { val++; + } + } return val; } @@ -1880,15 +1973,15 @@ static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) case 1: return r->W(i); case 2: - return (int8_t) r->B(i); + return (int8_t)r->B(i); case 3: default: - return (int16_t) r->W(i); + return (int16_t)r->W(i); } } static inline unsigned pcmpxstrx(Reg *d, Reg *s, - int8_t ctrl, int valids, int validd) + int8_t ctrl, int valids, int validd) { unsigned int res = 0; int v; @@ -1905,17 +1998,19 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s, for (j = valids; j >= 0; j--) { res <<= 1; v = pcmp_val(s, ctrl, j); - for (i = validd; i >= 0; i--) + for (i = validd; i >= 0; i--) { res |= (v == pcmp_val(d, ctrl, i)); + } } break; case 1: for (j = valids; j >= 0; j--) { res <<= 1; v = pcmp_val(s, ctrl, j); - for (i = ((validd - 1) | 1); i >= 0; i -= 2) + for (i = ((validd - 1) | 1); i >= 0; i -= 2) { res |= (pcmp_val(d, ctrl, i - 0) <= v && pcmp_val(d, ctrl, i - 1) >= v); + } } break; case 2: @@ -1931,8 +2026,9 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s, for (j = valids - validd; j >= 0; j--) { res <<= 1; res |= 1; - for (i = MIN(upper - j, validd); i >= 0; i--) + for (i = MIN(upper - j, validd); i >= 0; i--) { res &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); + } } break; } @@ -1946,10 +2042,12 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s, break; } - if (res) - CC_SRC |= CC_C; - if (res & 1) - CC_SRC |= CC_O; + if (res) { + CC_SRC |= CC_C; + } + if (res & 1) { + CC_SRC |= CC_O; + } return res; } @@ -1958,11 +2056,12 @@ static inline int rffs1(unsigned int val) { int ret = 1, hi; - for (hi = sizeof(val) * 4; hi; hi /= 2) + for (hi = sizeof(val) * 4; hi; hi /= 2) { if (val >> hi) { val >>= hi; ret += hi; } + } return ret; } @@ -1971,77 +2070,82 @@ static inline int ffs1(unsigned int val) { int ret = 1, hi; - for (hi = sizeof(val) * 4; hi; hi /= 2) + for (hi = sizeof(val) * 4; hi; hi /= 2) { if (val << hi) { val <<= hi; ret += hi; } + } return ret; } -void glue(helper_pcmpestri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl) +void glue(helper_pcmpestri, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl) { unsigned int res = pcmpxstrx(d, s, ctrl, - pcmp_elen(R_EDX, ctrl), - pcmp_elen(R_EAX, ctrl)); + pcmp_elen(R_EDX, ctrl), + pcmp_elen(R_EAX, ctrl)); - if (res) + if (res) { env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1; - else + } else { env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); + } } -void glue(helper_pcmpestrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl) +void glue(helper_pcmpestrm, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl) { int i; unsigned int res = pcmpxstrx(d, s, ctrl, - pcmp_elen(R_EDX, ctrl), - pcmp_elen(R_EAX, ctrl)); + pcmp_elen(R_EDX, ctrl), + pcmp_elen(R_EAX, ctrl)); if ((ctrl >> 6) & 1) { - if (ctrl & 1) + if (ctrl & 1) { for (i = 0; i < 8; i++, res >>= 1) { d->W(i) = (res & 1) ? ~0 : 0; } - else + } else { for (i = 0; i < 16; i++, res >>= 1) { d->B(i) = (res & 1) ? ~0 : 0; } + } } else { d->Q(1) = 0; d->Q(0) = res; } } -void glue(helper_pcmpistri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl) +void glue(helper_pcmpistri, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl) { unsigned int res = pcmpxstrx(d, s, ctrl, - pcmp_ilen(s, ctrl), - pcmp_ilen(d, ctrl)); + pcmp_ilen(s, ctrl), + pcmp_ilen(d, ctrl)); - if (res) + if (res) { env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1; - else + } else { env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); + } } -void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl) +void glue(helper_pcmpistrm, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl) { int i; unsigned int res = pcmpxstrx(d, s, ctrl, - pcmp_ilen(s, ctrl), - pcmp_ilen(d, ctrl)); + pcmp_ilen(s, ctrl), + pcmp_ilen(d, ctrl)); if ((ctrl >> 6) & 1) { - if (ctrl & 1) + if (ctrl & 1) { for (i = 0; i < 8; i++, res >>= 1) { d->W(i) = (res & 1) ? ~0 : 0; } - else + } else { for (i = 0; i < 16; i++, res >>= 1) { d->B(i) = (res & 1) ? ~0 : 0; } + } } else { d->Q(1) = 0; d->Q(0) = res; @@ -2053,16 +2157,17 @@ void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl) target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) { target_ulong crc = (msg & ((target_ulong) -1 >> - (TARGET_LONG_BITS - len))) ^ crc1; + (TARGET_LONG_BITS - len))) ^ crc1; - while (len--) + while (len--) { crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0); + } return crc; } #define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1)) -#define POPCOUNT(n, i) (n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)) +#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))) target_ulong helper_popcnt(target_ulong n, uint32_t type) { CC_SRC = n ? 0 : CC_Z; @@ -2071,15 +2176,17 @@ target_ulong helper_popcnt(target_ulong n, uint32_t type) n = POPCOUNT(n, 1); n = POPCOUNT(n, 2); n = POPCOUNT(n, 3); - if (type == 1) + if (type == 1) { return n & 0xff; + } n = POPCOUNT(n, 4); #ifndef TARGET_X86_64 return n; #else - if (type == 2) + if (type == 2) { return n & 0xff; + } return POPCOUNT(n, 5); #endif diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c new file mode 100644 index 0000000000..a4b8b640a0 --- /dev/null +++ b/target-i386/seg_helper.c @@ -0,0 +1,2475 @@ +/* + * x86 segmentation related helpers: + * TSS, interrupts, system calls, jumps and call/task gates, descriptors + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "qemu-log.h" +#include "helper.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif /* !defined(CONFIG_USER_ONLY) */ + +//#define DEBUG_PCALL + +#ifdef DEBUG_PCALL +# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) +# define LOG_PCALL_STATE(env) \ + log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP) +#else +# define LOG_PCALL(...) do { } while (0) +# define LOG_PCALL_STATE(env) do { } while (0) +#endif + +/* return non zero if error */ +static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, + int selector) +{ + SegmentCache *dt; + int index; + target_ulong ptr; + + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + return -1; + } + ptr = dt->base + index; + *e1_ptr = ldl_kernel(ptr); + *e2_ptr = ldl_kernel(ptr + 4); + return 0; +} + +static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) +{ + unsigned int limit; + + limit = (e1 & 0xffff) | (e2 & 0x000f0000); + if (e2 & DESC_G_MASK) { + limit = (limit << 12) | 0xfff; + } + return limit; +} + +static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) +{ + return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); +} + +static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, + uint32_t e2) +{ + sc->base = get_seg_base(e1, e2); + sc->limit = get_seg_limit(e1, e2); + sc->flags = e2; +} + +/* init the segment cache in vm86 mode. */ +static inline void load_seg_vm(int seg, int selector) +{ + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg, selector, + (selector << 4), 0xffff, 0); +} + +static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, + uint32_t *esp_ptr, int dpl) +{ + int type, index, shift; + +#if 0 + { + int i; + printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); + for (i = 0; i < env->tr.limit; i++) { + printf("%02x ", env->tr.base[i]); + if ((i & 7) == 7) { + printf("\n"); + } + } + printf("\n"); + } +#endif + + if (!(env->tr.flags & DESC_P_MASK)) { + cpu_abort(env, "invalid tss"); + } + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) { + cpu_abort(env, "invalid tss type"); + } + shift = type >> 3; + index = (dpl * 4 + 2) << shift; + if (index + (4 << shift) - 1 > env->tr.limit) { + raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); + } + if (shift == 0) { + *esp_ptr = lduw_kernel(env->tr.base + index); + *ss_ptr = lduw_kernel(env->tr.base + index + 2); + } else { + *esp_ptr = ldl_kernel(env->tr.base + index); + *ss_ptr = lduw_kernel(env->tr.base + index + 4); + } +} + +/* XXX: merge with load_seg() */ +static void tss_load_seg(int seg_reg, int selector) +{ + uint32_t e1, e2; + int rpl, dpl, cpl; + + if ((selector & 0xfffc) != 0) { + if (load_segment(&e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (seg_reg == R_CS) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + /* XXX: is it correct? */ + if (dpl != rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if ((e2 & DESC_C_MASK) && dpl > rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } else if (seg_reg == R_SS) { + /* SS must be writable data */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if (dpl != cpl || dpl != rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } else { + /* not readable code */ + if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + /* if data or non conforming code, checks the rights */ + if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + if (seg_reg == R_SS || seg_reg == R_CS) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } +} + +#define SWITCH_TSS_JMP 0 +#define SWITCH_TSS_IRET 1 +#define SWITCH_TSS_CALL 2 + +/* XXX: restore CPU state in registers (PowerPC case) */ +static void switch_tss(int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip) +{ + int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; + target_ulong tss_base; + uint32_t new_regs[8], new_segs[6]; + uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; + uint32_t old_eflags, eflags_mask; + SegmentCache *dt; + int index; + target_ulong ptr; + + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, + source); + + /* if task gate, we read the TSS segment and we load it */ + if (type == 5) { + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + } + tss_selector = e1 >> 16; + if (tss_selector & 4) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + if (load_segment(&e1, &e2, tss_selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + if (e2 & DESC_S_MASK) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + } + + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + } + + if (type & 8) { + tss_limit_max = 103; + } else { + tss_limit_max = 43; + } + tss_limit = get_seg_limit(e1, e2); + tss_base = get_seg_base(e1, e2); + if ((tss_selector & 4) != 0 || + tss_limit < tss_limit_max) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if (old_type & 8) { + old_tss_limit_max = 103; + } else { + old_tss_limit_max = 43; + } + + /* read all the registers from the new TSS */ + if (type & 8) { + /* 32 bit */ + new_cr3 = ldl_kernel(tss_base + 0x1c); + new_eip = ldl_kernel(tss_base + 0x20); + new_eflags = ldl_kernel(tss_base + 0x24); + for (i = 0; i < 8; i++) { + new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); + } + for (i = 0; i < 6; i++) { + new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); + } + new_ldt = lduw_kernel(tss_base + 0x60); + new_trap = ldl_kernel(tss_base + 0x64); + } else { + /* 16 bit */ + new_cr3 = 0; + new_eip = lduw_kernel(tss_base + 0x0e); + new_eflags = lduw_kernel(tss_base + 0x10); + for (i = 0; i < 8; i++) { + new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; + } + for (i = 0; i < 4; i++) { + new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); + } + new_ldt = lduw_kernel(tss_base + 0x2a); + new_segs[R_FS] = 0; + new_segs[R_GS] = 0; + new_trap = 0; + } + /* XXX: avoid a compiler warning, see + http://support.amd.com/us/Processor_TechDocs/24593.pdf + chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ + (void)new_trap; + + /* NOTE: we must avoid memory exceptions during the task switch, + so we make dummy accesses before */ + /* XXX: it can still fail in some cases, so a bigger hack is + necessary to valid the TLB after having done the accesses */ + + v1 = ldub_kernel(env->tr.base); + v2 = ldub_kernel(env->tr.base + old_tss_limit_max); + stb_kernel(env->tr.base, v1); + stb_kernel(env->tr.base + old_tss_limit_max, v2); + + /* clear busy bit (it is restartable) */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { + target_ulong ptr; + uint32_t e2; + + ptr = env->gdt.base + (env->tr.selector & ~7); + e2 = ldl_kernel(ptr + 4); + e2 &= ~DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + old_eflags = cpu_compute_eflags(env); + if (source == SWITCH_TSS_IRET) { + old_eflags &= ~NT_MASK; + } + + /* save the current state in the old TSS */ + if (type & 8) { + /* 32 bit */ + stl_kernel(env->tr.base + 0x20, next_eip); + stl_kernel(env->tr.base + 0x24, old_eflags); + stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX); + stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX); + stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX); + stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX); + stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP); + stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP); + stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI); + stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI); + for (i = 0; i < 6; i++) { + stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); + } + } else { + /* 16 bit */ + stw_kernel(env->tr.base + 0x0e, next_eip); + stw_kernel(env->tr.base + 0x10, old_eflags); + stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX); + stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX); + stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX); + stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX); + stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP); + stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP); + stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI); + stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI); + for (i = 0; i < 4; i++) { + stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); + } + } + + /* now if an exception occurs, it will occurs in the next task + context */ + + if (source == SWITCH_TSS_CALL) { + stw_kernel(tss_base, env->tr.selector); + new_eflags |= NT_MASK; + } + + /* set busy bit */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { + target_ulong ptr; + uint32_t e2; + + ptr = env->gdt.base + (tss_selector & ~7); + e2 = ldl_kernel(ptr + 4); + e2 |= DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + + /* set the new CPU state */ + /* from this point, any exception which occurs can give problems */ + env->cr[0] |= CR0_TS_MASK; + env->hflags |= HF_TS_MASK; + env->tr.selector = tss_selector; + env->tr.base = tss_base; + env->tr.limit = tss_limit; + env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; + + if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { + cpu_x86_update_cr3(env, new_cr3); + } + + /* load all registers without an exception, then reload them with + possible exception */ + env->eip = new_eip; + eflags_mask = TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; + if (!(type & 8)) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + /* XXX: what to do in 16 bit case? */ + EAX = new_regs[0]; + ECX = new_regs[1]; + EDX = new_regs[2]; + EBX = new_regs[3]; + ESP = new_regs[4]; + EBP = new_regs[5]; + ESI = new_regs[6]; + EDI = new_regs[7]; + if (new_eflags & VM_MASK) { + for (i = 0; i < 6; i++) { + load_seg_vm(i, new_segs[i]); + } + /* in vm86, CPL is always 3 */ + cpu_x86_set_cpl(env, 3); + } else { + /* CPL is set the RPL of CS */ + cpu_x86_set_cpl(env, new_segs[R_CS] & 3); + /* first just selectors as the rest may trigger exceptions */ + for (i = 0; i < 6; i++) { + cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); + } + } + + env->ldt.selector = new_ldt & ~4; + env->ldt.base = 0; + env->ldt.limit = 0; + env->ldt.flags = 0; + + /* load the LDT */ + if (new_ldt & 4) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + + if ((new_ldt & 0xfffc) != 0) { + dt = &env->gdt; + index = new_ldt & ~7; + if ((index + 7) > dt->limit) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + + /* load the segments */ + if (!(new_eflags & VM_MASK)) { + tss_load_seg(R_CS, new_segs[R_CS]); + tss_load_seg(R_SS, new_segs[R_SS]); + tss_load_seg(R_ES, new_segs[R_ES]); + tss_load_seg(R_DS, new_segs[R_DS]); + tss_load_seg(R_FS, new_segs[R_FS]); + tss_load_seg(R_GS, new_segs[R_GS]); + } + + /* check that EIP is in the CS segment limits */ + if (new_eip > env->segs[R_CS].limit) { + /* XXX: different exception if CALL? */ + raise_exception_err(env, EXCP0D_GPF, 0); + } + +#ifndef CONFIG_USER_ONLY + /* reset local breakpoints */ + if (env->dr[7] & 0x55) { + for (i = 0; i < 4; i++) { + if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) { + hw_breakpoint_remove(env, i); + } + } + env->dr[7] &= ~0x55; + } +#endif +} + +static inline unsigned int get_sp_mask(unsigned int e2) +{ + if (e2 & DESC_B_MASK) { + return 0xffffffff; + } else { + return 0xffff; + } +} + +static int exception_has_error_code(int intno) +{ + switch (intno) { + case 8: + case 10: + case 11: + case 12: + case 13: + case 14: + case 17: + return 1; + } + return 0; +} + +#ifdef TARGET_X86_64 +#define SET_ESP(val, sp_mask) \ + do { \ + if ((sp_mask) == 0xffff) { \ + ESP = (ESP & ~0xffff) | ((val) & 0xffff); \ + } else if ((sp_mask) == 0xffffffffLL) { \ + ESP = (uint32_t)(val); \ + } else { \ + ESP = (val); \ + } \ + } while (0) +#else +#define SET_ESP(val, sp_mask) \ + do { \ + ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \ + } while (0) +#endif + +/* in 64-bit machines, this can overflow. So this segment addition macro + * can be used to trim the value to 32-bit whenever needed */ +#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) + +/* XXX: add a is_user flag to have proper security support */ +#define PUSHW(ssp, sp, sp_mask, val) \ + { \ + sp -= 2; \ + stw_kernel((ssp) + (sp & (sp_mask)), (val)); \ + } + +#define PUSHL(ssp, sp, sp_mask, val) \ + { \ + sp -= 4; \ + stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \ + } + +#define POPW(ssp, sp, sp_mask, val) \ + { \ + val = lduw_kernel((ssp) + (sp & (sp_mask))); \ + sp += 2; \ + } + +#define POPL(ssp, sp, sp_mask, val) \ + { \ + val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \ + sp += 4; \ + } + +/* protected mode interrupt */ +static void do_interrupt_protected(int intno, int is_int, int error_code, + unsigned int next_eip, int is_hw) +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int type, dpl, selector, ss_dpl, cpl; + int has_error_code, new_stack, shift; + uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; + uint32_t old_eip, sp_mask; + + has_error_code = 0; + if (!is_int && !is_hw) { + has_error_code = exception_has_error_code(intno); + } + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + + dt = &env->idt; + if (intno * 8 + 7 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + ptr = dt->base + intno * 8; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch (type) { + case 5: /* task gate */ + /* must do that check here to return the correct error code */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); + } + switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); + if (has_error_code) { + int type; + uint32_t mask; + + /* push the error code */ + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + shift = type >> 3; + if (env->segs[R_SS].flags & DESC_B_MASK) { + mask = 0xffffffff; + } else { + mask = 0xffff; + } + esp = (ESP - (2 << shift)) & mask; + ssp = env->segs[R_SS].base + esp; + if (shift) { + stl_kernel(ssp, error_code); + } else { + stw_kernel(ssp, error_code); + } + SET_ESP(esp, mask); + } + return; + case 6: /* 286 interrupt gate */ + case 7: /* 286 trap gate */ + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); + } + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(&e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner privilege */ + get_ss_esp_from_tss(&ss, &esp, dpl); + if ((ss & 0xfffc) == 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if ((ss & 3) != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (load_segment(&ss_e1, &ss_e2, ss) != 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + new_stack = 1; + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same privilege */ + if (env->eflags & VM_MASK) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + new_stack = 0; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + esp = ESP; + dpl = cpl; + } else { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + sp_mask = 0; /* avoid warning */ + ssp = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + + shift = type >> 3; + +#if 0 + /* XXX: check that enough room is available */ + push_size = 6 + (new_stack << 2) + (has_error_code << 1); + if (env->eflags & VM_MASK) { + push_size += 8; + } + push_size <<= shift; +#endif + if (shift == 1) { + if (new_stack) { + if (env->eflags & VM_MASK) { + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, esp, sp_mask, ESP); + } + PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); + PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHL(ssp, esp, sp_mask, error_code); + } + } else { + if (new_stack) { + if (env->eflags & VM_MASK) { + PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, esp, sp_mask, ESP); + } + PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); + PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHW(ssp, esp, sp_mask, error_code); + } + } + + if (new_stack) { + if (env->eflags & VM_MASK) { + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); + } + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); + } + SET_ESP(esp, sp_mask); + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + env->eip = offset; + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); +} + +#ifdef TARGET_X86_64 + +#define PUSHQ(sp, val) \ + { \ + sp -= 8; \ + stq_kernel(sp, (val)); \ + } + +#define POPQ(sp, val) \ + { \ + val = ldq_kernel(sp); \ + sp += 8; \ + } + +static inline target_ulong get_rsp_from_tss(int level) +{ + int index; + +#if 0 + printf("TR: base=" TARGET_FMT_lx " limit=%x\n", + env->tr.base, env->tr.limit); +#endif + + if (!(env->tr.flags & DESC_P_MASK)) { + cpu_abort(env, "invalid tss"); + } + index = 8 * level + 4; + if ((index + 7) > env->tr.limit) { + raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); + } + return ldq_kernel(env->tr.base + index); +} + +/* 64 bit interrupt */ +static void do_interrupt64(int intno, int is_int, int error_code, + target_ulong next_eip, int is_hw) +{ + SegmentCache *dt; + target_ulong ptr; + int type, dpl, selector, cpl, ist; + int has_error_code, new_stack; + uint32_t e1, e2, e3, ss; + target_ulong old_eip, esp, offset; + + has_error_code = 0; + if (!is_int && !is_hw) { + has_error_code = exception_has_error_code(intno); + } + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + + dt = &env->idt; + if (intno * 16 + 15 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + } + ptr = dt->base + intno * 16; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + e3 = ldl_kernel(ptr + 8); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch (type) { + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); + } + selector = e1 >> 16; + offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); + ist = e2 & 7; + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + + if (load_segment(&e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { + /* to inner privilege */ + if (ist != 0) { + esp = get_rsp_from_tss(ist + 3); + } else { + esp = get_rsp_from_tss(dpl); + } + esp &= ~0xfLL; /* align stack */ + ss = 0; + new_stack = 1; + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same privilege */ + if (env->eflags & VM_MASK) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + new_stack = 0; + if (ist != 0) { + esp = get_rsp_from_tss(ist + 3); + } else { + esp = ESP; + } + esp &= ~0xfLL; /* align stack */ + dpl = cpl; + } else { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + + PUSHQ(esp, env->segs[R_SS].selector); + PUSHQ(esp, ESP); + PUSHQ(esp, cpu_compute_eflags(env)); + PUSHQ(esp, env->segs[R_CS].selector); + PUSHQ(esp, old_eip); + if (has_error_code) { + PUSHQ(esp, error_code); + } + + if (new_stack) { + ss = 0 | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); + } + ESP = esp; + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + env->eip = offset; + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); +} +#endif + +#ifdef TARGET_X86_64 +#if defined(CONFIG_USER_ONLY) +void helper_syscall(int next_eip_addend) +{ + env->exception_index = EXCP_SYSCALL; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(env); +} +#else +void helper_syscall(int next_eip_addend) +{ + int selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(env, EXCP06_ILLOP, 0); + } + selector = (env->star >> 32) & 0xffff; + if (env->hflags & HF_LMA_MASK) { + int code64; + + ECX = env->eip + next_eip_addend; + env->regs[11] = cpu_compute_eflags(env); + + code64 = env->hflags & HF_CS64_MASK; + + cpu_x86_set_cpl(env, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eflags &= ~env->fmask; + cpu_load_eflags(env, env->eflags, 0); + if (code64) { + env->eip = env->lstar; + } else { + env->eip = env->cstar; + } + } else { + ECX = (uint32_t)(env->eip + next_eip_addend); + + cpu_x86_set_cpl(env, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); + env->eip = (uint32_t)env->star; + } +} +#endif +#endif + +#ifdef TARGET_X86_64 +void helper_sysret(int dflag) +{ + int cpl, selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(env, EXCP06_ILLOP, 0); + } + cpl = env->hflags & HF_CPL_MASK; + if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + selector = (env->star >> 48) & 0xffff; + if (env->hflags & HF_LMA_MASK) { + if (dflag == 2) { + cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + env->eip = ECX; + } else { + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)ECX; + } + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK + | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | + NT_MASK); + cpu_x86_set_cpl(env, 3); + } else { + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)ECX; + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + env->eflags |= IF_MASK; + cpu_x86_set_cpl(env, 3); + } +} +#endif + +/* real mode interrupt */ +static void do_interrupt_real(int intno, int is_int, int error_code, + unsigned int next_eip) +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int selector; + uint32_t offset, esp; + uint32_t old_cs, old_eip; + + /* real mode (simpler!) */ + dt = &env->idt; + if (intno * 4 + 3 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + ptr = dt->base + intno * 4; + offset = lduw_kernel(ptr); + selector = lduw_kernel(ptr + 2); + esp = ESP; + ssp = env->segs[R_SS].base; + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + old_cs = env->segs[R_CS].selector; + /* XXX: use SS segment size? */ + PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); + PUSHW(ssp, esp, 0xffff, old_cs); + PUSHW(ssp, esp, 0xffff, old_eip); + + /* update processor state */ + ESP = (ESP & ~0xffff) | (esp & 0xffff); + env->eip = offset; + env->segs[R_CS].selector = selector; + env->segs[R_CS].base = (selector << 4); + env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); +} + +#if defined(CONFIG_USER_ONLY) +/* fake user mode interrupt */ +static void do_interrupt_user(int intno, int is_int, int error_code, + target_ulong next_eip) +{ + SegmentCache *dt; + target_ulong ptr; + int dpl, cpl, shift; + uint32_t e2; + + dt = &env->idt; + if (env->hflags & HF_LMA_MASK) { + shift = 4; + } else { + shift = 3; + } + ptr = dt->base + (intno << shift); + e2 = ldl_kernel(ptr + 4); + + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); + } + + /* Since we emulate only user space, we cannot do more than + exiting the emulation with the suitable exception and error + code */ + if (is_int) { + EIP = next_eip; + } +} + +#else + +static void handle_even_inj(int intno, int is_int, int error_code, + int is_hw, int rm) +{ + uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + control.event_inj)); + + if (!(event_inj & SVM_EVTINJ_VALID)) { + int type; + + if (is_int) { + type = SVM_EVTINJ_TYPE_SOFT; + } else { + type = SVM_EVTINJ_TYPE_EXEPT; + } + event_inj = intno | type | SVM_EVTINJ_VALID; + if (!rm && exception_has_error_code(intno)) { + event_inj |= SVM_EVTINJ_VALID_ERR; + stl_phys(env->vm_vmcb + offsetof(struct vmcb, + control.event_inj_err), + error_code); + } + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), + event_inj); + } +} +#endif + +/* + * Begin execution of an interruption. is_int is TRUE if coming from + * the int instruction. next_eip is the EIP value AFTER the interrupt + * instruction. It is only relevant if is_int is TRUE. + */ +static void do_interrupt_all(int intno, int is_int, int error_code, + target_ulong next_eip, int is_hw) +{ + if (qemu_loglevel_mask(CPU_LOG_INT)) { + if ((env->cr[0] & CR0_PE_MASK)) { + static int count; + + qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx + " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, + count, intno, error_code, is_int, + env->hflags & HF_CPL_MASK, + env->segs[R_CS].selector, EIP, + (int)env->segs[R_CS].base + EIP, + env->segs[R_SS].selector, ESP); + if (intno == 0x0e) { + qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); + } else { + qemu_log(" EAX=" TARGET_FMT_lx, EAX); + } + qemu_log("\n"); + log_cpu_state(env, X86_DUMP_CCOP); +#if 0 + { + int i; + target_ulong ptr; + + qemu_log(" code="); + ptr = env->segs[R_CS].base + env->eip; + for (i = 0; i < 16; i++) { + qemu_log(" %02x", ldub(ptr + i)); + } + qemu_log("\n"); + } +#endif + count++; + } + } + if (env->cr[0] & CR0_PE_MASK) { +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + handle_even_inj(intno, is_int, error_code, is_hw, 0); + } +#endif +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + do_interrupt64(intno, is_int, error_code, next_eip, is_hw); + } else +#endif + { + do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); + } + } else { +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + handle_even_inj(intno, is_int, error_code, is_hw, 1); + } +#endif + do_interrupt_real(intno, is_int, error_code, next_eip); + } + +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + uint32_t event_inj = ldl_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.event_inj)); + + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), + event_inj & ~SVM_EVTINJ_VALID); + } +#endif +} + +void do_interrupt(CPUX86State *env1) +{ + CPUX86State *saved_env; + + saved_env = env; + env = env1; +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ + do_interrupt_user(env->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip); + /* successfully delivered */ + env->old_exception = -1; +#else + /* simulate a real cpu exception. On i386, it can + trigger new exceptions, but we do not handle + double or triple faults yet. */ + do_interrupt_all(env->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip, 0); + /* successfully delivered */ + env->old_exception = -1; +#endif + env = saved_env; +} + +void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw) +{ + CPUX86State *saved_env; + + saved_env = env; + env = env1; + do_interrupt_all(intno, 0, 0, 0, is_hw); + env = saved_env; +} + +void helper_enter_level(int level, int data32, target_ulong t1) +{ + target_ulong ssp; + uint32_t esp_mask, esp, ebp; + + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + ebp = EBP; + esp = ESP; + if (data32) { + /* 32 bit */ + esp -= 4; + while (--level) { + esp -= 4; + ebp -= 4; + stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask))); + } + esp -= 4; + stl(ssp + (esp & esp_mask), t1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask))); + } + esp -= 2; + stw(ssp + (esp & esp_mask), t1); + } +} + +#ifdef TARGET_X86_64 +void helper_enter64_level(int level, int data64, target_ulong t1) +{ + target_ulong esp, ebp; + + ebp = EBP; + esp = ESP; + + if (data64) { + /* 64 bit */ + esp -= 8; + while (--level) { + esp -= 8; + ebp -= 8; + stq(esp, ldq(ebp)); + } + esp -= 8; + stq(esp, t1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + stw(esp, lduw(ebp)); + } + esp -= 2; + stw(esp, t1); + } +} +#endif + +void helper_lldt(int selector) +{ + SegmentCache *dt; + uint32_t e1, e2; + int index, entry_limit; + target_ulong ptr; + + selector &= 0xffff; + if ((selector & 0xfffc) == 0) { + /* XXX: NULL selector case: invalid LDT */ + env->ldt.base = 0; + env->ldt.limit = 0; + } else { + if (selector & 0x4) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + entry_limit = 15; + } else +#endif + { + entry_limit = 7; + } + if ((index + entry_limit) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3; + + e3 = ldl_kernel(ptr + 8); + load_seg_cache_raw_dt(&env->ldt, e1, e2); + env->ldt.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + } + env->ldt.selector = selector; +} + +void helper_ltr(int selector) +{ + SegmentCache *dt; + uint32_t e1, e2; + int index, type, entry_limit; + target_ulong ptr; + + selector &= 0xffff; + if ((selector & 0xfffc) == 0) { + /* NULL selector case: invalid TR */ + env->tr.base = 0; + env->tr.limit = 0; + env->tr.flags = 0; + } else { + if (selector & 0x4) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + entry_limit = 15; + } else +#endif + { + entry_limit = 7; + } + if ((index + entry_limit) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((e2 & DESC_S_MASK) || + (type != 1 && type != 9)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3, e4; + + e3 = ldl_kernel(ptr + 8); + e4 = ldl_kernel(ptr + 12); + if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + load_seg_cache_raw_dt(&env->tr, e1, e2); + env->tr.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->tr, e1, e2); + } + e2 |= DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + env->tr.selector = selector; +} + +/* only works if protected mode and not VM86. seg_reg must be != R_CS */ +void helper_load_seg(int seg_reg, int selector) +{ + uint32_t e1, e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + selector &= 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); + } else { + + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + + if (!(e2 & DESC_S_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (rpl != cpl || dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) { + raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc); + } else { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + } + + /* set the access bit if not already set */ + if (!(e2 & DESC_A_MASK)) { + e2 |= DESC_A_MASK; + stl_kernel(ptr + 4, e2); + } + + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); +#if 0 + qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", + selector, (unsigned long)sc->base, sc->limit, sc->flags); +#endif + } +} + +/* protected mode jump */ +void helper_ljmp_protected(int new_cs, target_ulong new_eip, + int next_eip_addend) +{ + int gate_cs, type; + uint32_t e1, e2, cpl, dpl, rpl, limit; + target_ulong next_eip; + + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(&e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + limit = get_seg_limit(e1, e2); + if (new_eip > limit && + !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + } else { + /* jump to call or task gate */ + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + cpl = env->hflags & HF_CPL_MASK; + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: /* 286 TSS */ + case 9: /* 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + next_eip = env->eip + next_eip_addend; + switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); + CC_OP = CC_OP_EFLAGS; + break; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + if ((dpl < cpl) || (dpl < rpl)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + gate_cs = e1 >> 16; + new_eip = (e1 & 0xffff); + if (type == 12) { + new_eip |= (e2 & 0xffff0000); + } + if (load_segment(&e1, &e2, gate_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + /* must be code segment */ + if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != + (DESC_S_MASK | DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + if (((e2 & DESC_C_MASK) && (dpl > cpl)) || + (!(e2 & DESC_C_MASK) && (dpl != cpl))) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + limit = get_seg_limit(e1, e2); + if (new_eip > limit) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + break; + default: + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + break; + } + } +} + +/* real mode call */ +void helper_lcall_real(int new_cs, target_ulong new_eip1, + int shift, int next_eip) +{ + int new_eip; + uint32_t esp, esp_mask; + target_ulong ssp; + + new_eip = new_eip1; + esp = ESP; + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, esp_mask, next_eip); + } else { + PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, esp_mask, next_eip); + } + + SET_ESP(esp, esp_mask); + env->eip = new_eip; + env->segs[R_CS].selector = new_cs; + env->segs[R_CS].base = (new_cs << 4); +} + +/* protected mode call */ +void helper_lcall_protected(int new_cs, target_ulong new_eip, + int shift, int next_eip_addend) +{ + int new_stack, i; + uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; + uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; + uint32_t val, limit, old_sp_mask; + target_ulong ssp, old_ssp, next_eip; + + next_eip = env->eip + next_eip_addend; + LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); + LOG_PCALL_STATE(env); + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(&e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + LOG_PCALL("desc=%08x:%08x\n", e1, e2); + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + +#ifdef TARGET_X86_64 + /* XXX: check 16/32 bit cases in long mode */ + if (shift == 2) { + target_ulong rsp; + + /* 64 bit case */ + rsp = ESP; + PUSHQ(rsp, env->segs[R_CS].selector); + PUSHQ(rsp, next_eip); + /* from this point, not restartable */ + ESP = rsp; + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), e2); + EIP = new_eip; + } else +#endif + { + sp = ESP; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + limit = get_seg_limit(e1, e2); + if (new_eip > limit) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + /* from this point, not restartable */ + SET_ESP(sp, sp_mask); + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + } + } else { + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + switch (type) { + case 1: /* available 286 TSS */ + case 9: /* available 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); + CC_OP = CC_OP_EFLAGS; + return; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + break; + } + shift = type >> 3; + + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + param_count = e2 & 0x1f; + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + + if (load_segment(&e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner privilege */ + get_ss_esp_from_tss(&ss, &sp, dpl); + LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx + "\n", + ss, sp, param_count, ESP); + if ((ss & 0xfffc) == 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if ((ss & 3) != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (load_segment(&ss_e1, &ss_e2, ss) != 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + + /* push_size = ((param_count * 2) + 8) << shift; */ + + old_sp_mask = get_sp_mask(env->segs[R_SS].flags); + old_ssp = env->segs[R_SS].base; + + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, sp, sp_mask, ESP); + for (i = param_count - 1; i >= 0; i--) { + val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask)); + PUSHL(ssp, sp, sp_mask, val); + } + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, sp, sp_mask, ESP); + for (i = param_count - 1; i >= 0; i--) { + val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask)); + PUSHW(ssp, sp, sp_mask, val); + } + } + new_stack = 1; + } else { + /* to same privilege */ + sp = ESP; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + /* push_size = (4 << shift); */ + new_stack = 0; + } + + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + /* from this point, not restartable */ + + if (new_stack) { + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + SET_ESP(sp, sp_mask); + EIP = offset; + } +} + +/* real and vm86 mode iret */ +void helper_iret_real(int shift) +{ + uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; + target_ulong ssp; + int eflags_mask; + + sp_mask = 0xffff; /* XXXX: use SS segment size? */ + sp = ESP; + ssp = env->segs[R_SS].base; + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + POPL(ssp, sp, sp_mask, new_eflags); + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + POPW(ssp, sp, sp_mask, new_eflags); + } + ESP = (ESP & ~sp_mask) | (sp & sp_mask); + env->segs[R_CS].selector = new_cs; + env->segs[R_CS].base = (new_cs << 4); + env->eip = new_eip; + if (env->eflags & VM_MASK) { + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | + NT_MASK; + } else { + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | + RF_MASK | NT_MASK; + } + if (shift == 0) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + env->hflags2 &= ~HF2_NMI_MASK; +} + +static inline void validate_seg(int seg_reg, int cpl) +{ + int dpl; + uint32_t e2; + + /* XXX: on x86_64, we do not want to nullify FS and GS because + they may still contain a valid base. I would be interested to + know how a real x86_64 CPU behaves */ + if ((seg_reg == R_FS || seg_reg == R_GS) && + (env->segs[seg_reg].selector & 0xfffc) == 0) { + return; + } + + e2 = env->segs[seg_reg].flags; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* data or non conforming code segment */ + if (dpl < cpl) { + cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); + } + } +} + +/* protected mode iret */ +static inline void helper_ret_protected(int shift, int is_iret, int addend) +{ + uint32_t new_cs, new_eflags, new_ss; + uint32_t new_es, new_ds, new_fs, new_gs; + uint32_t e1, e2, ss_e1, ss_e2; + int cpl, dpl, rpl, eflags_mask, iopl; + target_ulong ssp, sp, new_eip, new_esp, sp_mask; + +#ifdef TARGET_X86_64 + if (shift == 2) { + sp_mask = -1; + } else +#endif + { + sp_mask = get_sp_mask(env->segs[R_SS].flags); + } + sp = ESP; + ssp = env->segs[R_SS].base; + new_eflags = 0; /* avoid warning */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_eip); + POPQ(sp, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPQ(sp, new_eflags); + } + } else +#endif + { + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPL(ssp, sp, sp_mask, new_eflags); + if (new_eflags & VM_MASK) { + goto return_to_vm86; + } + } + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + if (is_iret) { + POPW(ssp, sp, sp_mask, new_eflags); + } + } + } + LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", + new_cs, new_eip, shift, addend); + LOG_PCALL_STATE(env); + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (load_segment(&e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || + !(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + rpl = new_cs & 3; + if (rpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + if (dpl > rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + if (dpl != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + + sp += addend; + if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || + ((env->hflags & HF_CS64_MASK) && !is_iret))) { + /* return to same privilege level */ + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + /* return to different privilege level */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_esp); + POPQ(sp, new_ss); + new_ss &= 0xffff; + } else +#endif + { + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + new_ss &= 0xffff; + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_esp); + POPW(ssp, sp, sp_mask, new_ss); + } + } + LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", + new_ss, new_esp); + if ((new_ss & 0xfffc) == 0) { +#ifdef TARGET_X86_64 + /* NULL ss is allowed in long mode if cpl != 3 */ + /* XXX: test CS64? */ + if ((env->hflags & HF_LMA_MASK) && rpl != 3) { + cpu_x86_load_seg_cache(env, R_SS, new_ss, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ + } else +#endif + { + raise_exception_err(env, EXCP0D_GPF, 0); + } + } else { + if ((new_ss & 3) != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (dpl != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc); + } + cpu_x86_load_seg_cache(env, R_SS, new_ss, + get_seg_base(ss_e1, ss_e2), + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, rpl); + sp = new_esp; +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + sp_mask = -1; + } else +#endif + { + sp_mask = get_sp_mask(ss_e2); + } + + /* validate data segments */ + validate_seg(R_ES, rpl); + validate_seg(R_DS, rpl); + validate_seg(R_FS, rpl); + validate_seg(R_GS, rpl); + + sp += addend; + } + SET_ESP(sp, sp_mask); + env->eip = new_eip; + if (is_iret) { + /* NOTE: 'cpl' is the _old_ CPL */ + eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; + if (cpl == 0) { + eflags_mask |= IOPL_MASK; + } + iopl = (env->eflags >> IOPL_SHIFT) & 3; + if (cpl <= iopl) { + eflags_mask |= IF_MASK; + } + if (shift == 0) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + } + return; + + return_to_vm86: + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + POPL(ssp, sp, sp_mask, new_es); + POPL(ssp, sp, sp_mask, new_ds); + POPL(ssp, sp, sp_mask, new_fs); + POPL(ssp, sp, sp_mask, new_gs); + + /* modify processor state */ + cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | + VIP_MASK); + load_seg_vm(R_CS, new_cs & 0xffff); + cpu_x86_set_cpl(env, 3); + load_seg_vm(R_SS, new_ss & 0xffff); + load_seg_vm(R_ES, new_es & 0xffff); + load_seg_vm(R_DS, new_ds & 0xffff); + load_seg_vm(R_FS, new_fs & 0xffff); + load_seg_vm(R_GS, new_gs & 0xffff); + + env->eip = new_eip & 0xffff; + ESP = new_esp; +} + +void helper_iret_protected(int shift, int next_eip) +{ + int tss_selector, type; + uint32_t e1, e2; + + /* specific case for TSS */ + if (env->eflags & NT_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + raise_exception_err(env, EXCP0D_GPF, 0); + } +#endif + tss_selector = lduw_kernel(env->tr.base + 0); + if (tss_selector & 4) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + if (load_segment(&e1, &e2, tss_selector) != 0) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0x17; + /* NOTE: we check both segment and busy TSS */ + if (type != 3) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); + } else { + helper_ret_protected(shift, 1, 0); + } + env->hflags2 &= ~HF2_NMI_MASK; +} + +void helper_lret_protected(int shift, int addend) +{ + helper_ret_protected(shift, 0, addend); +} + +void helper_sysenter(void) +{ + if (env->sysenter_cs == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); + cpu_x86_set_cpl(env, 0); + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + } else +#endif + { + cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + } + cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + ESP = env->sysenter_esp; + EIP = env->sysenter_eip; +} + +void helper_sysexit(int dflag) +{ + int cpl; + + cpl = env->hflags & HF_CPL_MASK; + if (env->sysenter_cs == 0 || cpl != 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + cpu_x86_set_cpl(env, 3); +#ifdef TARGET_X86_64 + if (dflag == 2) { + cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } else +#endif + { + cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } + ESP = ECX; + EIP = EDX; +} + +target_ulong helper_lsl(target_ulong selector1) +{ + unsigned int limit; + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl, type; + + selector = selector1 & 0xffff; + eflags = helper_cc_compute_all(CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(&e1, &e2, selector) != 0) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: + case 2: + case 3: + case 9: + case 11: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return 0; + } + } + limit = get_seg_limit(e1, e2); + CC_SRC = eflags | CC_Z; + return limit; +} + +target_ulong helper_lar(target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl, type; + + selector = selector1 & 0xffff; + eflags = helper_cc_compute_all(CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(&e1, &e2, selector) != 0) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 9: + case 11: + case 12: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return 0; + } + } + CC_SRC = eflags | CC_Z; + return e2 & 0x00f0ff00; +} + +void helper_verr(target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl; + + selector = selector1 & 0xffff; + eflags = helper_cc_compute_all(CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(&e1, &e2, selector) != 0) { + goto fail; + } + if (!(e2 & DESC_S_MASK)) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + if (!(e2 & DESC_R_MASK)) { + goto fail; + } + if (!(e2 & DESC_C_MASK)) { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +void helper_verw(target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl; + + selector = selector1 & 0xffff; + eflags = helper_cc_compute_all(CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(&e1, &e2, selector) != 0) { + goto fail; + } + if (!(e2 & DESC_S_MASK)) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + goto fail; + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + if (!(e2 & DESC_W_MASK)) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +#if defined(CONFIG_USER_ONLY) +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) +{ + CPUX86State *saved_env; + + saved_env = env; + env = s; + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg_reg, selector, + (selector << 4), 0xffff, 0); + } else { + helper_load_seg(seg_reg, selector); + } + env = saved_env; +} +#endif diff --git a/target-i386/shift_helper_template.h b/target-i386/shift_helper_template.h new file mode 100644 index 0000000000..239ee0973c --- /dev/null +++ b/target-i386/shift_helper_template.h @@ -0,0 +1,110 @@ +/* + * x86 shift helpers + * + * Copyright (c) 2008 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#define DATA_BITS (1 << (3 + SHIFT)) +#define SHIFT_MASK (DATA_BITS - 1) +#if DATA_BITS <= 32 +#define SHIFT1_MASK 0x1f +#else +#define SHIFT1_MASK 0x3f +#endif + +#if DATA_BITS == 8 +#define SUFFIX b +#define DATA_MASK 0xff +#elif DATA_BITS == 16 +#define SUFFIX w +#define DATA_MASK 0xffff +#elif DATA_BITS == 32 +#define SUFFIX l +#define DATA_MASK 0xffffffff +#elif DATA_BITS == 64 +#define SUFFIX q +#define DATA_MASK 0xffffffffffffffffULL +#else +#error unhandled operand size +#endif + +target_ulong glue(helper_rcl, SUFFIX)(target_ulong t0, target_ulong t1) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = t1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = helper_cc_compute_all(CC_OP); + t0 &= DATA_MASK; + src = t0; + res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1)); + if (count > 1) { + res |= t0 >> (DATA_BITS + 1 - count); + } + t0 = res; + env->cc_tmp = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (DATA_BITS - count)) & CC_C); + } else { + env->cc_tmp = -1; + } + return t0; +} + +target_ulong glue(helper_rcr, SUFFIX)(target_ulong t0, target_ulong t1) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = t1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = helper_cc_compute_all(CC_OP); + t0 &= DATA_MASK; + src = t0; + res = (t0 >> count) | + ((target_ulong)(eflags & CC_C) << (DATA_BITS - count)); + if (count > 1) { + res |= t0 << (DATA_BITS + 1 - count); + } + t0 = res; + env->cc_tmp = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (count - 1)) & CC_C); + } else { + env->cc_tmp = -1; + } + return t0; +} + +#undef DATA_BITS +#undef SHIFT_MASK +#undef SHIFT1_MASK +#undef DATA_TYPE +#undef DATA_MASK +#undef SUFFIX diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c new file mode 100644 index 0000000000..bc1bfa2a59 --- /dev/null +++ b/target-i386/smm_helper.c @@ -0,0 +1,307 @@ +/* + * x86 SMM helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +/* SMM support */ + +#if defined(CONFIG_USER_ONLY) + +void do_smm_enter(CPUX86State *env1) +{ +} + +void helper_rsm(void) +{ +} + +#else + +#ifdef TARGET_X86_64 +#define SMM_REVISION_ID 0x00020064 +#else +#define SMM_REVISION_ID 0x00020000 +#endif + +void do_smm_enter(CPUX86State *env1) +{ + target_ulong sm_state; + SegmentCache *dt; + int i, offset; + CPUX86State *saved_env; + + saved_env = env; + env = env1; + + qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); + log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); + + env->hflags |= HF_SMM_MASK; + cpu_smm_update(env); + + sm_state = env->smbase + 0x8000; + +#ifdef TARGET_X86_64 + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + offset = 0x7e00 + i * 16; + stw_phys(sm_state + offset, dt->selector); + stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); + stl_phys(sm_state + offset + 4, dt->limit); + stq_phys(sm_state + offset + 8, dt->base); + } + + stq_phys(sm_state + 0x7e68, env->gdt.base); + stl_phys(sm_state + 0x7e64, env->gdt.limit); + + stw_phys(sm_state + 0x7e70, env->ldt.selector); + stq_phys(sm_state + 0x7e78, env->ldt.base); + stl_phys(sm_state + 0x7e74, env->ldt.limit); + stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); + + stq_phys(sm_state + 0x7e88, env->idt.base); + stl_phys(sm_state + 0x7e84, env->idt.limit); + + stw_phys(sm_state + 0x7e90, env->tr.selector); + stq_phys(sm_state + 0x7e98, env->tr.base); + stl_phys(sm_state + 0x7e94, env->tr.limit); + stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); + + stq_phys(sm_state + 0x7ed0, env->efer); + + stq_phys(sm_state + 0x7ff8, EAX); + stq_phys(sm_state + 0x7ff0, ECX); + stq_phys(sm_state + 0x7fe8, EDX); + stq_phys(sm_state + 0x7fe0, EBX); + stq_phys(sm_state + 0x7fd8, ESP); + stq_phys(sm_state + 0x7fd0, EBP); + stq_phys(sm_state + 0x7fc8, ESI); + stq_phys(sm_state + 0x7fc0, EDI); + for (i = 8; i < 16; i++) { + stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); + } + stq_phys(sm_state + 0x7f78, env->eip); + stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env)); + stl_phys(sm_state + 0x7f68, env->dr[6]); + stl_phys(sm_state + 0x7f60, env->dr[7]); + + stl_phys(sm_state + 0x7f48, env->cr[4]); + stl_phys(sm_state + 0x7f50, env->cr[3]); + stl_phys(sm_state + 0x7f58, env->cr[0]); + + stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); + stl_phys(sm_state + 0x7f00, env->smbase); +#else + stl_phys(sm_state + 0x7ffc, env->cr[0]); + stl_phys(sm_state + 0x7ff8, env->cr[3]); + stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env)); + stl_phys(sm_state + 0x7ff0, env->eip); + stl_phys(sm_state + 0x7fec, EDI); + stl_phys(sm_state + 0x7fe8, ESI); + stl_phys(sm_state + 0x7fe4, EBP); + stl_phys(sm_state + 0x7fe0, ESP); + stl_phys(sm_state + 0x7fdc, EBX); + stl_phys(sm_state + 0x7fd8, EDX); + stl_phys(sm_state + 0x7fd4, ECX); + stl_phys(sm_state + 0x7fd0, EAX); + stl_phys(sm_state + 0x7fcc, env->dr[6]); + stl_phys(sm_state + 0x7fc8, env->dr[7]); + + stl_phys(sm_state + 0x7fc4, env->tr.selector); + stl_phys(sm_state + 0x7f64, env->tr.base); + stl_phys(sm_state + 0x7f60, env->tr.limit); + stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); + + stl_phys(sm_state + 0x7fc0, env->ldt.selector); + stl_phys(sm_state + 0x7f80, env->ldt.base); + stl_phys(sm_state + 0x7f7c, env->ldt.limit); + stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); + + stl_phys(sm_state + 0x7f74, env->gdt.base); + stl_phys(sm_state + 0x7f70, env->gdt.limit); + + stl_phys(sm_state + 0x7f58, env->idt.base); + stl_phys(sm_state + 0x7f54, env->idt.limit); + + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector); + stl_phys(sm_state + offset + 8, dt->base); + stl_phys(sm_state + offset + 4, dt->limit); + stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff); + } + stl_phys(sm_state + 0x7f14, env->cr[4]); + + stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); + stl_phys(sm_state + 0x7ef8, env->smbase); +#endif + /* init SMM cpu state */ + +#ifdef TARGET_X86_64 + cpu_load_efer(env, 0); +#endif + cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | + DF_MASK)); + env->eip = 0x00008000; + cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, + 0xffffffff, 0); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); + + cpu_x86_update_cr0(env, + env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | + CR0_PG_MASK)); + cpu_x86_update_cr4(env, 0); + env->dr[7] = 0x00000400; + CC_OP = CC_OP_EFLAGS; + env = saved_env; +} + +void helper_rsm(void) +{ + target_ulong sm_state; + int i, offset; + uint32_t val; + + sm_state = env->smbase + 0x8000; +#ifdef TARGET_X86_64 + cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0)); + + for (i = 0; i < 6; i++) { + offset = 0x7e00 + i * 16; + cpu_x86_load_seg_cache(env, i, + lduw_phys(sm_state + offset), + ldq_phys(sm_state + offset + 8), + ldl_phys(sm_state + offset + 4), + (lduw_phys(sm_state + offset + 2) & + 0xf0ff) << 8); + } + + env->gdt.base = ldq_phys(sm_state + 0x7e68); + env->gdt.limit = ldl_phys(sm_state + 0x7e64); + + env->ldt.selector = lduw_phys(sm_state + 0x7e70); + env->ldt.base = ldq_phys(sm_state + 0x7e78); + env->ldt.limit = ldl_phys(sm_state + 0x7e74); + env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; + + env->idt.base = ldq_phys(sm_state + 0x7e88); + env->idt.limit = ldl_phys(sm_state + 0x7e84); + + env->tr.selector = lduw_phys(sm_state + 0x7e90); + env->tr.base = ldq_phys(sm_state + 0x7e98); + env->tr.limit = ldl_phys(sm_state + 0x7e94); + env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; + + EAX = ldq_phys(sm_state + 0x7ff8); + ECX = ldq_phys(sm_state + 0x7ff0); + EDX = ldq_phys(sm_state + 0x7fe8); + EBX = ldq_phys(sm_state + 0x7fe0); + ESP = ldq_phys(sm_state + 0x7fd8); + EBP = ldq_phys(sm_state + 0x7fd0); + ESI = ldq_phys(sm_state + 0x7fc8); + EDI = ldq_phys(sm_state + 0x7fc0); + for (i = 8; i < 16; i++) { + env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); + } + env->eip = ldq_phys(sm_state + 0x7f78); + cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->dr[6] = ldl_phys(sm_state + 0x7f68); + env->dr[7] = ldl_phys(sm_state + 0x7f60); + + cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48)); + cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50)); + cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58)); + + val = ldl_phys(sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff; + } +#else + cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc)); + cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8)); + cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->eip = ldl_phys(sm_state + 0x7ff0); + EDI = ldl_phys(sm_state + 0x7fec); + ESI = ldl_phys(sm_state + 0x7fe8); + EBP = ldl_phys(sm_state + 0x7fe4); + ESP = ldl_phys(sm_state + 0x7fe0); + EBX = ldl_phys(sm_state + 0x7fdc); + EDX = ldl_phys(sm_state + 0x7fd8); + ECX = ldl_phys(sm_state + 0x7fd4); + EAX = ldl_phys(sm_state + 0x7fd0); + env->dr[6] = ldl_phys(sm_state + 0x7fcc); + env->dr[7] = ldl_phys(sm_state + 0x7fc8); + + env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; + env->tr.base = ldl_phys(sm_state + 0x7f64); + env->tr.limit = ldl_phys(sm_state + 0x7f60); + env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; + + env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; + env->ldt.base = ldl_phys(sm_state + 0x7f80); + env->ldt.limit = ldl_phys(sm_state + 0x7f7c); + env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; + + env->gdt.base = ldl_phys(sm_state + 0x7f74); + env->gdt.limit = ldl_phys(sm_state + 0x7f70); + + env->idt.base = ldl_phys(sm_state + 0x7f58); + env->idt.limit = ldl_phys(sm_state + 0x7f54); + + for (i = 0; i < 6; i++) { + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + cpu_x86_load_seg_cache(env, i, + ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, + ldl_phys(sm_state + offset + 8), + ldl_phys(sm_state + offset + 4), + (ldl_phys(sm_state + offset) & 0xf0ff) << 8); + } + cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14)); + + val = ldl_phys(sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff; + } +#endif + CC_OP = CC_OP_EFLAGS; + env->hflags &= ~HF_SMM_MASK; + cpu_smm_update(env); + + qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); + log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); +} + +#endif /* !CONFIG_USER_ONLY */ diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c new file mode 100644 index 0000000000..64d842c82c --- /dev/null +++ b/target-i386/svm_helper.c @@ -0,0 +1,716 @@ +/* + * x86 SVM helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "dyngen-exec.h" +#include "helper.h" + +/* Secure Virtual Machine helpers */ + +#if defined(CONFIG_USER_ONLY) + +void helper_vmrun(int aflag, int next_eip_addend) +{ +} + +void helper_vmmcall(void) +{ +} + +void helper_vmload(int aflag) +{ +} + +void helper_vmsave(int aflag) +{ +} + +void helper_stgi(void) +{ +} + +void helper_clgi(void) +{ +} + +void helper_skinit(void) +{ +} + +void helper_invlpga(int aflag) +{ +} + +void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) +{ +} + +void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) +{ +} + +void helper_svm_check_intercept_param(uint32_t type, uint64_t param) +{ +} + +void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) +{ +} + +void helper_svm_check_io(uint32_t port, uint32_t param, + uint32_t next_eip_addend) +{ +} +#else + +static inline void svm_save_seg(target_phys_addr_t addr, + const SegmentCache *sc) +{ + stw_phys(addr + offsetof(struct vmcb_seg, selector), + sc->selector); + stq_phys(addr + offsetof(struct vmcb_seg, base), + sc->base); + stl_phys(addr + offsetof(struct vmcb_seg, limit), + sc->limit); + stw_phys(addr + offsetof(struct vmcb_seg, attrib), + ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); +} + +static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc) +{ + unsigned int flags; + + sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector)); + sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base)); + sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit)); + flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib)); + sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); +} + +static inline void svm_load_seg_cache(target_phys_addr_t addr, + CPUX86State *env, int seg_reg) +{ + SegmentCache sc1, *sc = &sc1; + + svm_load_seg(addr, sc); + cpu_x86_load_seg_cache(env, seg_reg, sc->selector, + sc->base, sc->limit, sc->flags); +} + +void helper_vmrun(int aflag, int next_eip_addend) +{ + target_ulong addr; + uint32_t event_inj; + uint32_t int_ctl; + + helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0); + + if (aflag == 2) { + addr = EAX; + } else { + addr = (uint32_t)EAX; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); + + env->vm_vmcb = addr; + + /* save the current CPU state in the hsave page */ + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), + env->gdt.base); + stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), + env->gdt.limit); + + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), + env->idt.base); + stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), + env->idt.limit); + + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); + + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), + cpu_compute_eflags(env)); + + svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), + &env->segs[R_ES]); + svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), + &env->segs[R_CS]); + svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), + &env->segs[R_SS]); + svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), + &env->segs[R_DS]); + + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), + EIP + next_eip_addend); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP); + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX); + + /* load the interception bitmaps so we do not need to access the + vmcb in svm mode */ + env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + control.intercept)); + env->intercept_cr_read = lduw_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_cr_read)); + env->intercept_cr_write = lduw_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_cr_write)); + env->intercept_dr_read = lduw_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_dr_read)); + env->intercept_dr_write = lduw_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_dr_write)); + env->intercept_exceptions = ldl_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_exceptions + )); + + /* enable intercepts */ + env->hflags |= HF_SVMI_MASK; + + env->tsc_offset = ldq_phys(env->vm_vmcb + + offsetof(struct vmcb, control.tsc_offset)); + + env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.gdtr.base)); + env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + save.gdtr.limit)); + + env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.idtr.base)); + env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + save.idtr.limit)); + + /* clear exit_info_2 so we behave like the real hardware */ + stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); + + cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.cr0))); + cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.cr4))); + cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.cr3))); + env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); + int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); + if (int_ctl & V_INTR_MASKING_MASK) { + env->v_tpr = int_ctl & V_TPR_MASK; + env->hflags2 |= HF2_VINTR_MASK; + if (env->eflags & IF_MASK) { + env->hflags2 |= HF2_HIF_MASK; + } + } + + cpu_load_efer(env, + ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); + env->eflags = 0; + cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + save.rflags)), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + CC_OP = CC_OP_EFLAGS; + + svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es), + env, R_ES); + svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs), + env, R_CS); + svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss), + env, R_SS); + svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds), + env, R_DS); + + EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip)); + env->eip = EIP; + ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); + EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); + env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); + env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6)); + cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, + save.cpl))); + + /* FIXME: guest state consistency checks */ + + switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { + case TLB_CONTROL_DO_NOTHING: + break; + case TLB_CONTROL_FLUSH_ALL_ASID: + /* FIXME: this is not 100% correct but should work for now */ + tlb_flush(env, 1); + break; + } + + env->hflags2 |= HF2_GIF_MASK; + + if (int_ctl & V_IRQ_MASK) { + env->interrupt_request |= CPU_INTERRUPT_VIRQ; + } + + /* maybe we need to inject an event */ + event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + control.event_inj)); + if (event_inj & SVM_EVTINJ_VALID) { + uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; + uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; + uint32_t event_inj_err = ldl_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.event_inj_err)); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); + /* FIXME: need to implement valid_err */ + switch (event_inj & SVM_EVTINJ_TYPE_MASK) { + case SVM_EVTINJ_TYPE_INTR: + env->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = -1; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); + /* XXX: is it always correct? */ + do_interrupt_x86_hardirq(env, vector, 1); + break; + case SVM_EVTINJ_TYPE_NMI: + env->exception_index = EXCP02_NMI; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = EIP; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); + cpu_loop_exit(env); + break; + case SVM_EVTINJ_TYPE_EXEPT: + env->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = -1; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); + cpu_loop_exit(env); + break; + case SVM_EVTINJ_TYPE_SOFT: + env->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 1; + env->exception_next_eip = EIP; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); + cpu_loop_exit(env); + break; + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, + env->error_code); + } +} + +void helper_vmmcall(void) +{ + helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0); + raise_exception(env, EXCP06_ILLOP); +} + +void helper_vmload(int aflag) +{ + target_ulong addr; + + helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0); + + if (aflag == 2) { + addr = EAX; + } else { + addr = (uint32_t)EAX; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx + "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), + env->segs[R_FS].base); + + svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs), + env, R_FS); + svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs), + env, R_GS); + svm_load_seg(addr + offsetof(struct vmcb, save.tr), + &env->tr); + svm_load_seg(addr + offsetof(struct vmcb, save.ldtr), + &env->ldt); + +#ifdef TARGET_X86_64 + env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, + save.kernel_gs_base)); + env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar)); + env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar)); + env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask)); +#endif + env->star = ldq_phys(addr + offsetof(struct vmcb, save.star)); + env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs)); + env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, + save.sysenter_esp)); + env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, + save.sysenter_eip)); +} + +void helper_vmsave(int aflag) +{ + target_ulong addr; + + helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0); + + if (aflag == 2) { + addr = EAX; + } else { + addr = (uint32_t)EAX; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx + "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), + env->segs[R_FS].base); + + svm_save_seg(addr + offsetof(struct vmcb, save.fs), + &env->segs[R_FS]); + svm_save_seg(addr + offsetof(struct vmcb, save.gs), + &env->segs[R_GS]); + svm_save_seg(addr + offsetof(struct vmcb, save.tr), + &env->tr); + svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), + &env->ldt); + +#ifdef TARGET_X86_64 + stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), + env->kernelgsbase); + stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar); + stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar); + stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask); +#endif + stq_phys(addr + offsetof(struct vmcb, save.star), env->star); + stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); + stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), + env->sysenter_esp); + stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), + env->sysenter_eip); +} + +void helper_stgi(void) +{ + helper_svm_check_intercept_param(SVM_EXIT_STGI, 0); + env->hflags2 |= HF2_GIF_MASK; +} + +void helper_clgi(void) +{ + helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0); + env->hflags2 &= ~HF2_GIF_MASK; +} + +void helper_skinit(void) +{ + helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0); + /* XXX: not implemented */ + raise_exception(env, EXCP06_ILLOP); +} + +void helper_invlpga(int aflag) +{ + target_ulong addr; + + helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0); + + if (aflag == 2) { + addr = EAX; + } else { + addr = (uint32_t)EAX; + } + + /* XXX: could use the ASID to see if it is needed to do the + flush */ + tlb_flush_page(env, addr); +} + +void helper_svm_check_intercept_param(uint32_t type, uint64_t param) +{ + if (likely(!(env->hflags & HF_SVMI_MASK))) { + return; + } + switch (type) { + case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: + if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { + helper_vmexit(type, param); + } + break; + case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: + if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { + helper_vmexit(type, param); + } + break; + case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7: + if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { + helper_vmexit(type, param); + } + break; + case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7: + if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { + helper_vmexit(type, param); + } + break; + case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31: + if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { + helper_vmexit(type, param); + } + break; + case SVM_EXIT_MSR: + if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { + /* FIXME: this should be read in at vmrun (faster this way?) */ + uint64_t addr = ldq_phys(env->vm_vmcb + + offsetof(struct vmcb, + control.msrpm_base_pa)); + uint32_t t0, t1; + + switch ((uint32_t)ECX) { + case 0 ... 0x1fff: + t0 = (ECX * 2) % 8; + t1 = (ECX * 2) / 8; + break; + case 0xc0000000 ... 0xc0001fff: + t0 = (8192 + ECX - 0xc0000000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + case 0xc0010000 ... 0xc0011fff: + t0 = (16384 + ECX - 0xc0010000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + default: + helper_vmexit(type, param); + t0 = 0; + t1 = 0; + break; + } + if (ldub_phys(addr + t1) & ((1 << param) << t0)) { + helper_vmexit(type, param); + } + } + break; + default: + if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { + helper_vmexit(type, param); + } + break; + } +} + +void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, + uint64_t param) +{ + CPUX86State *saved_env; + + saved_env = env; + env = env1; + helper_svm_check_intercept_param(type, param); + env = saved_env; +} + +void helper_svm_check_io(uint32_t port, uint32_t param, + uint32_t next_eip_addend) +{ + if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { + /* FIXME: this should be read in at vmrun (faster this way?) */ + uint64_t addr = ldq_phys(env->vm_vmcb + + offsetof(struct vmcb, control.iopm_base_pa)); + uint16_t mask = (1 << ((param >> 4) & 7)) - 1; + + if (lduw_phys(addr + port / 8) & (mask << (port & 7))) { + /* next EIP */ + stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + env->eip + next_eip_addend); + helper_vmexit(SVM_EXIT_IOIO, param | (port << 16)); + } + } +} + +/* Note: currently only 32 bits of exit_code are used */ +void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) +{ + uint32_t int_ctl; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" + PRIx64 ", " TARGET_FMT_lx ")!\n", + exit_code, exit_info_1, + ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + control.exit_info_2)), + EIP); + + if (env->hflags & HF_INHIBIT_IRQ_MASK) { + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), + SVM_INTERRUPT_SHADOW_MASK); + env->hflags &= ~HF_INHIBIT_IRQ_MASK; + } else { + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); + } + + /* Save the VM state in the vmcb */ + svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), + &env->segs[R_ES]); + svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), + &env->segs[R_CS]); + svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), + &env->segs[R_SS]); + svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), + &env->segs[R_DS]); + + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), + env->gdt.base); + stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), + env->gdt.limit); + + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), + env->idt.base); + stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), + env->idt.limit); + + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); + + int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); + int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); + int_ctl |= env->v_tpr & V_TPR_MASK; + if (env->interrupt_request & CPU_INTERRUPT_VIRQ) { + int_ctl |= V_IRQ_MASK; + } + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); + + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), + cpu_compute_eflags(env)); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); + stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), + env->hflags & HF_CPL_MASK); + + /* Reload the host state from vm_hsave */ + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); + env->hflags &= ~HF_SVMI_MASK; + env->intercept = 0; + env->intercept_exceptions = 0; + env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + env->tsc_offset = 0; + + env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.gdtr.base)); + env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, + save.gdtr.limit)); + + env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.idtr.base)); + env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, + save.idtr.limit)); + + cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.cr0)) | + CR0_PE_MASK); + cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.cr4))); + cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.cr3))); + /* we need to set the efer after the crs so the hidden flags get + set properly */ + cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.efer))); + env->eflags = 0; + cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + save.rflags)), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + CC_OP = CC_OP_EFLAGS; + + svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es), + env, R_ES); + svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs), + env, R_CS); + svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss), + env, R_SS); + svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds), + env, R_DS); + + EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip)); + ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp)); + EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax)); + + env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6)); + env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7)); + + /* other setups */ + cpu_x86_set_cpl(env, 0); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), + exit_code); + stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), + exit_info_1); + + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), + ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + control.event_inj))); + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), + ldl_phys(env->vm_vmcb + offsetof(struct vmcb, + control.event_inj_err))); + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); + + env->hflags2 &= ~HF2_GIF_MASK; + /* FIXME: Resets the current ASID register to zero (host ASID). */ + + /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ + + /* Clears the TSC_OFFSET inside the processor. */ + + /* If the host is in PAE mode, the processor reloads the host's PDPEs + from the page table indicated the host's CR3. If the PDPEs contain + illegal state, the processor causes a shutdown. */ + + /* Forces CR0.PE = 1, RFLAGS.VM = 0. */ + env->cr[0] |= CR0_PE_MASK; + env->eflags &= ~VM_MASK; + + /* Disables all breakpoints in the host DR7 register. */ + + /* Checks the reloaded host state for consistency. */ + + /* If the host's rIP reloaded by #VMEXIT is outside the limit of the + host's code segment or non-canonical (in the case of long mode), a + #GP fault is delivered inside the host. */ + + /* remove any pending exception */ + env->exception_index = -1; + env->error_code = 0; + env->old_exception = -1; + + cpu_loop_exit(env); +} + +void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) +{ + env = nenv; + helper_vmexit(exit_code, exit_info_1); +} + +#endif diff --git a/target-i386/translate.c b/target-i386/translate.c index c1ede1a756..1988dae290 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -38,18 +38,10 @@ #define PREFIX_ADR 0x10 #ifdef TARGET_X86_64 -#define X86_64_ONLY(x) x -#define X86_64_DEF(...) __VA_ARGS__ #define CODE64(s) ((s)->code64) #define REX_X(s) ((s)->rex_x) #define REX_B(s) ((s)->rex_b) -/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */ -#if 1 -#define BUGGY_64(x) NULL -#endif #else -#define X86_64_ONLY(x) NULL -#define X86_64_DEF(...) #define CODE64(s) 0 #define REX_X(s) 0 #define REX_B(s) 0 @@ -271,11 +263,30 @@ static inline void gen_op_andl_A0_ffff(void) #define REG_LH_OFFSET 4 #endif +/* In instruction encodings for byte register accesses the + * register number usually indicates "low 8 bits of register N"; + * however there are some special cases where N 4..7 indicates + * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return + * true for this special case, false otherwise. + */ +static inline bool byte_reg_is_xH(int reg) +{ + if (reg < 4) { + return false; + } +#ifdef TARGET_X86_64 + if (reg >= 8 || x86_64_hregs) { + return false; + } +#endif + return true; +} + static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0) { switch(ot) { case OT_BYTE: - if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { + if (!byte_reg_is_xH(reg)) { tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); } else { tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); @@ -330,19 +341,11 @@ static inline void gen_op_mov_reg_A0(int size, int reg) static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg) { - switch(ot) { - case OT_BYTE: - if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { - goto std_case; - } else { - tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); - tcg_gen_ext8u_tl(t0, t0); - } - break; - default: - std_case: + if (ot == OT_BYTE && byte_reg_is_xH(reg)) { + tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); + tcg_gen_ext8u_tl(t0, t0); + } else { tcg_gen_mov_tl(t0, cpu_regs[reg]); - break; } } @@ -2659,7 +2662,7 @@ static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); - gen_helper_raise_exception(tcg_const_i32(trapno)); + gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); s->is_jmp = DISAS_TB_JUMP; } @@ -2671,7 +2674,7 @@ static void gen_interrupt(DisasContext *s, int intno, if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); - gen_helper_raise_interrupt(tcg_const_i32(intno), + gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), tcg_const_i32(next_eip - cur_eip)); s->is_jmp = DISAS_TB_JUMP; } @@ -2786,6 +2789,14 @@ static inline void gen_op_movq_env_0(int d_offset) tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); } +typedef void (*SSEFunc_i_p)(TCGv_i32 val, TCGv_ptr reg); +typedef void (*SSEFunc_l_p)(TCGv_i64 val, TCGv_ptr reg); +typedef void (*SSEFunc_0_pi)(TCGv_ptr reg, TCGv_i32 val); +typedef void (*SSEFunc_0_pl)(TCGv_ptr reg, TCGv_i64 val); +typedef void (*SSEFunc_0_pp)(TCGv_ptr reg_a, TCGv_ptr reg_b); +typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); +typedef void (*SSEFunc_0_ppt)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv val); + #define SSE_SPECIAL ((void *)1) #define SSE_DUMMY ((void *)2) @@ -2793,7 +2804,7 @@ static inline void gen_op_movq_env_0(int d_offset) #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } -static void *sse_op_table1[256][4] = { +static const SSEFunc_0_pp sse_op_table1[256][4] = { /* 3DNow! extensions */ [0x0e] = { SSE_DUMMY }, /* femms */ [0x0f] = { SSE_DUMMY }, /* pf... */ @@ -2834,7 +2845,8 @@ static void *sse_op_table1[256][4] = { [0x5f] = SSE_FOP(max), [0xc2] = SSE_FOP(cmpeq), - [0xc6] = { gen_helper_shufps, gen_helper_shufpd }, + [0xc6] = { (SSEFunc_0_pp)gen_helper_shufps, + (SSEFunc_0_pp)gen_helper_shufpd }, /* XXX: casts */ [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */ [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */ @@ -2856,10 +2868,10 @@ static void *sse_op_table1[256][4] = { [0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ - [0x70] = { gen_helper_pshufw_mmx, - gen_helper_pshufd_xmm, - gen_helper_pshufhw_xmm, - gen_helper_pshuflw_xmm }, + [0x70] = { (SSEFunc_0_pp)gen_helper_pshufw_mmx, + (SSEFunc_0_pp)gen_helper_pshufd_xmm, + (SSEFunc_0_pp)gen_helper_pshufhw_xmm, + (SSEFunc_0_pp)gen_helper_pshuflw_xmm }, /* XXX: casts */ [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ @@ -2914,7 +2926,8 @@ static void *sse_op_table1[256][4] = { [0xf4] = MMX_OP2(pmuludq), [0xf5] = MMX_OP2(pmaddwd), [0xf6] = MMX_OP2(psadbw), - [0xf7] = MMX_OP2(maskmov), + [0xf7] = { (SSEFunc_0_pp)gen_helper_maskmov_mmx, + (SSEFunc_0_pp)gen_helper_maskmov_xmm }, /* XXX: casts */ [0xf8] = MMX_OP2(psubb), [0xf9] = MMX_OP2(psubw), [0xfa] = MMX_OP2(psubl), @@ -2924,7 +2937,7 @@ static void *sse_op_table1[256][4] = { [0xfe] = MMX_OP2(paddl), }; -static void *sse_op_table2[3 * 8][2] = { +static const SSEFunc_0_pp sse_op_table2[3 * 8][2] = { [0 + 2] = MMX_OP2(psrlw), [0 + 4] = MMX_OP2(psraw), [0 + 6] = MMX_OP2(psllw), @@ -2937,24 +2950,35 @@ static void *sse_op_table2[3 * 8][2] = { [16 + 7] = { NULL, gen_helper_pslldq_xmm }, }; -static void *sse_op_table3[4 * 3] = { +static const SSEFunc_0_pi sse_op_table3ai[] = { gen_helper_cvtsi2ss, - gen_helper_cvtsi2sd, - X86_64_ONLY(gen_helper_cvtsq2ss), - X86_64_ONLY(gen_helper_cvtsq2sd), + gen_helper_cvtsi2sd +}; + +#ifdef TARGET_X86_64 +static const SSEFunc_0_pl sse_op_table3aq[] = { + gen_helper_cvtsq2ss, + gen_helper_cvtsq2sd +}; +#endif +static const SSEFunc_i_p sse_op_table3bi[] = { gen_helper_cvttss2si, + gen_helper_cvtss2si, gen_helper_cvttsd2si, - X86_64_ONLY(gen_helper_cvttss2sq), - X86_64_ONLY(gen_helper_cvttsd2sq), + gen_helper_cvtsd2si +}; - gen_helper_cvtss2si, - gen_helper_cvtsd2si, - X86_64_ONLY(gen_helper_cvtss2sq), - X86_64_ONLY(gen_helper_cvtsd2sq), +#ifdef TARGET_X86_64 +static const SSEFunc_l_p sse_op_table3bq[] = { + gen_helper_cvttss2sq, + gen_helper_cvtss2sq, + gen_helper_cvttsd2sq, + gen_helper_cvtsd2sq }; +#endif -static void *sse_op_table4[8][4] = { +static const SSEFunc_0_pp sse_op_table4[8][4] = { SSE_FOP(cmpeq), SSE_FOP(cmplt), SSE_FOP(cmple), @@ -2965,7 +2989,7 @@ static void *sse_op_table4[8][4] = { SSE_FOP(cmpord), }; -static void *sse_op_table5[256] = { +static const SSEFunc_0_pp sse_op_table5[256] = { [0x0c] = gen_helper_pi2fw, [0x0d] = gen_helper_pi2fd, [0x1c] = gen_helper_pf2iw, @@ -2992,14 +3016,22 @@ static void *sse_op_table5[256] = { [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ }; -struct sse_op_helper_s { - void *op[2]; uint32_t ext_mask; +struct SSEOpHelper_pp { + SSEFunc_0_pp op[2]; + uint32_t ext_mask; +}; + +struct SSEOpHelper_ppi { + SSEFunc_0_ppi op[2]; + uint32_t ext_mask; }; + #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } -static struct sse_op_helper_s sse_op_table6[256] = { + +static const struct SSEOpHelper_pp sse_op_table6[256] = { [0x00] = SSSE3_OP(pshufb), [0x01] = SSSE3_OP(phaddw), [0x02] = SSSE3_OP(phaddd), @@ -3048,7 +3080,7 @@ static struct sse_op_helper_s sse_op_table6[256] = { [0x41] = SSE41_OP(phminposuw), }; -static struct sse_op_helper_s sse_op_table7[256] = { +static const struct SSEOpHelper_ppi sse_op_table7[256] = { [0x08] = SSE41_OP(roundps), [0x09] = SSE41_OP(roundpd), [0x0a] = SSE41_OP(roundss), @@ -3077,7 +3109,9 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) { int b1, op1_offset, op2_offset, is_xmm, val, ot; int modrm, mod, rm, reg, reg_addr, offset_addr; - void *sse_op2; + SSEFunc_0_pp sse_fn_pp; + SSEFunc_0_ppi sse_fn_ppi; + SSEFunc_0_ppt sse_fn_ppt; b &= 0xff; if (s->prefix & PREFIX_DATA) @@ -3088,9 +3122,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) b1 = 3; else b1 = 0; - sse_op2 = sse_op_table1[b][b1]; - if (!sse_op2) + sse_fn_pp = sse_op_table1[b][b1]; + if (!sse_fn_pp) { goto illegal_op; + } if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { is_xmm = 1; } else { @@ -3137,7 +3172,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) if (is_xmm) reg |= rex_r; mod = (modrm >> 6) & 3; - if (sse_op2 == SSE_SPECIAL) { + if (sse_fn_pp == SSE_SPECIAL) { b |= (b1 << 8); switch(b) { case 0x0e7: /* movntq */ @@ -3474,9 +3509,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); op1_offset = offsetof(CPUX86State,mmx_t0); } - sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; - if (!sse_op2) + sse_fn_pp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; + if (!sse_fn_pp) { goto illegal_op; + } if (is_xmm) { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); @@ -3486,7 +3522,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); - ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); + sse_fn_pp(cpu_ptr0, cpu_ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); @@ -3534,12 +3570,17 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); - sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)]; if (ot == OT_LONG) { + SSEFunc_0_pi sse_fn_pi = sse_op_table3ai[(b >> 8) & 1]; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); - ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32); + sse_fn_pi(cpu_ptr0, cpu_tmp2_i32); } else { - ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]); +#ifdef TARGET_X86_64 + SSEFunc_0_pl sse_fn_pl = sse_op_table3aq[(b >> 8) & 1]; + sse_fn_pl(cpu_ptr0, cpu_T[0]); +#else + goto illegal_op; +#endif } break; case 0x02c: /* cvttps2pi */ @@ -3591,14 +3632,20 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } - sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + - (b & 1) * 4]; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); if (ot == OT_LONG) { - ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0); + SSEFunc_i_p sse_fn_i_p = + sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; + sse_fn_i_p(cpu_tmp2_i32, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); } else { - ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0); +#ifdef TARGET_X86_64 + SSEFunc_l_p sse_fn_l_p = + sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; + sse_fn_l_p(cpu_T[0], cpu_ptr0); +#else + goto illegal_op; +#endif } gen_op_mov_reg_T0(ot, reg); break; @@ -3691,9 +3738,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) goto illegal_op; } - sse_op2 = sse_op_table6[b].op[b1]; - if (!sse_op2) + sse_fn_pp = sse_op_table6[b].op[b1]; + if (!sse_fn_pp) { goto illegal_op; + } if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) goto illegal_op; @@ -3742,12 +3790,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) gen_ldq_env_A0(s->mem_index, op2_offset); } } - if (sse_op2 == SSE_SPECIAL) + if (sse_fn_pp == SSE_SPECIAL) { goto illegal_op; + } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); + sse_fn_pp(cpu_ptr0, cpu_ptr1); if (b == 0x17) s->cc_op = CC_OP_EFLAGS; @@ -3793,13 +3842,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) goto illegal_op; } - sse_op2 = sse_op_table7[b].op[b1]; - if (!sse_op2) + sse_fn_ppi = sse_op_table7[b].op[b1]; + if (!sse_fn_ppi) { goto illegal_op; + } if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) goto illegal_op; - if (sse_op2 == SSE_SPECIAL) { + if (sse_fn_ppi == SSE_SPECIAL) { ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; rm = (modrm & 7) | REX_B(s); if (mod != 3) @@ -3960,7 +4010,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); + sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; default: goto illegal_op; @@ -4015,29 +4065,33 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) goto illegal_op; val = ldub_code(s->pc++); - sse_op2 = sse_op_table5[val]; - if (!sse_op2) + sse_fn_pp = sse_op_table5[val]; + if (!sse_fn_pp) { goto illegal_op; + } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); + sse_fn_pp(cpu_ptr0, cpu_ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = ldub_code(s->pc++); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); + /* XXX: introduce a new table? */ + sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_pp; + sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0xc2: /* compare insns */ val = ldub_code(s->pc++); if (val >= 8) goto illegal_op; - sse_op2 = sse_op_table4[val][b1]; + sse_fn_pp = sse_op_table4[val][b1]; + tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); + sse_fn_pp(cpu_ptr0, cpu_ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ @@ -4057,12 +4111,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0); + /* XXX: introduce a new table? */ + sse_fn_ppt = (SSEFunc_0_ppt)sse_fn_pp; + sse_fn_ppt(cpu_ptr0, cpu_ptr1, cpu_A0); break; default: tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); + sse_fn_pp(cpu_ptr0, cpu_ptr1); break; } if (b == 0x2e || b == 0x2f) { diff --git a/target-lm32/Makefile.objs b/target-lm32/Makefile.objs new file mode 100644 index 0000000000..2e0e093e1f --- /dev/null +++ b/target-lm32/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-m68k/Makefile.objs b/target-m68k/Makefile.objs new file mode 100644 index 0000000000..cda60157f5 --- /dev/null +++ b/target-m68k/Makefile.objs @@ -0,0 +1,5 @@ +obj-y += m68k-semi.o +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/m68k-semi.c b/target-m68k/m68k-semi.c index 3bb30cd1f7..3bb30cd1f7 100644 --- a/m68k-semi.c +++ b/target-m68k/m68k-semi.c diff --git a/target-microblaze/Makefile.objs b/target-microblaze/Makefile.objs new file mode 100644 index 0000000000..4b09e8c6b5 --- /dev/null +++ b/target-microblaze/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += mmu.o machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c index c0a6bfd9c3..7470149db3 100644 --- a/target-microblaze/translate.c +++ b/target-microblaze/translate.c @@ -1539,8 +1539,10 @@ static void dec_fpu(DisasContext *dc) cpu_R[dc->ra], cpu_R[dc->rb]); break; default: - qemu_log ("unimplemented fcmp fpu_insn=%x pc=%x opc=%x\n", - fpu_insn, dc->pc, dc->opcode); + qemu_log_mask(LOG_UNIMP, + "unimplemented fcmp fpu_insn=%x pc=%x" + " opc=%x\n", + fpu_insn, dc->pc, dc->opcode); dc->abort_at_next_insn = 1; break; } @@ -1568,8 +1570,9 @@ static void dec_fpu(DisasContext *dc) break; default: - qemu_log ("unimplemented FPU insn fpu_insn=%x pc=%x opc=%x\n", - fpu_insn, dc->pc, dc->opcode); + qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x" + " opc=%x\n", + fpu_insn, dc->pc, dc->opcode); dc->abort_at_next_insn = 1; break; } diff --git a/target-mips/Makefile.objs b/target-mips/Makefile.objs new file mode 100644 index 0000000000..2e0e093e1f --- /dev/null +++ b/target-mips/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-ppc/Makefile.objs b/target-ppc/Makefile.objs new file mode 100644 index 0000000000..237a0ed4f7 --- /dev/null +++ b/target-ppc/Makefile.objs @@ -0,0 +1,12 @@ +obj-y += translate.o helper.o +obj-$(CONFIG_SOFTMMU) += machine.o +obj-$(CONFIG_KVM) += kvm.o kvm_ppc.o +obj-y += helper.o +obj-y += excp_helper.o +obj-y += fpu_helper.o +obj-y += int_helper.o +obj-y += mmu_helper.o +obj-y += timebase_helper.o +obj-y += misc_helper.o +obj-y += mem_helper.o +obj-y += mpic_helper.o diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 77a28580af..ca2fc2198e 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -119,6 +119,8 @@ enum powerpc_mmu_t { POWERPC_MMU_620 = POWERPC_MMU_64 | 0x00000002, /* Architecture 2.06 variant */ POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | 0x00000003, + /* Architecture 2.06 "degraded" (no 1T segments) */ + POWERPC_MMU_2_06d = POWERPC_MMU_64 | 0x00000003, #endif /* defined(TARGET_PPC64) */ }; @@ -691,7 +693,7 @@ enum { #define MAS1_VALID 0x80000000 #define MAS2_EPN_SHIFT 12 -#define MAS2_EPN_MASK (0xfffff << MAS2_EPN_SHIFT) +#define MAS2_EPN_MASK (~0ULL << MAS2_EPN_SHIFT) #define MAS2_ACM_SHIFT 6 #define MAS2_ACM (1 << MAS2_ACM_SHIFT) @@ -874,6 +876,29 @@ enum { #define DBELL_PIRTAG_MASK 0x3fff /*****************************************************************************/ +/* Segment page size information, used by recent hash MMUs + * The format of this structure mirrors kvm_ppc_smmu_info + */ + +#define PPC_PAGE_SIZES_MAX_SZ 8 + +struct ppc_one_page_size { + uint32_t page_shift; /* Page shift (or 0) */ + uint32_t pte_enc; /* Encoding in the HPTE (>>12) */ +}; + +struct ppc_one_seg_page_size { + uint32_t page_shift; /* Base page shift of segment (or 0) */ + uint32_t slb_enc; /* SLB encoding for BookS */ + struct ppc_one_page_size enc[PPC_PAGE_SIZES_MAX_SZ]; +}; + +struct ppc_segment_page_sizes { + struct ppc_one_seg_page_size sps[PPC_PAGE_SIZES_MAX_SZ]; +}; + + +/*****************************************************************************/ /* The whole PowerPC CPU context */ #define NB_MMU_MODES 3 @@ -889,6 +914,9 @@ struct ppc_def_t { powerpc_input_t bus_model; uint32_t flags; int bfd_mach; +#if defined(TARGET_PPC64) + const struct ppc_segment_page_sizes *sps; +#endif void (*init_proc)(CPUPPCState *env); int (*check_pow)(CPUPPCState *env); }; @@ -1012,6 +1040,9 @@ struct CPUPPCState { uint32_t flags; uint64_t insns_flags; uint64_t insns_flags2; +#if defined(TARGET_PPC64) + struct ppc_segment_page_sizes sps; +#endif #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) target_phys_addr_t vpa; @@ -1035,6 +1066,7 @@ struct CPUPPCState { target_ulong ivor_mask; target_ulong ivpr_mask; target_ulong hreset_vector; + target_phys_addr_t mpic_cpu_base; #endif /* Those resources are used only during code translation */ @@ -1117,27 +1149,12 @@ int get_physical_address (CPUPPCState *env, mmu_ctx_t *ctx, target_ulong vaddr, void do_interrupt (CPUPPCState *env); void ppc_hw_interrupt (CPUPPCState *env); -void cpu_dump_rfi (target_ulong RA, target_ulong msr); - #if !defined(CONFIG_USER_ONLY) -void ppc6xx_tlb_store (CPUPPCState *env, target_ulong EPN, int way, int is_code, - target_ulong pte0, target_ulong pte1); -void ppc_store_ibatu (CPUPPCState *env, int nr, target_ulong value); -void ppc_store_ibatl (CPUPPCState *env, int nr, target_ulong value); -void ppc_store_dbatu (CPUPPCState *env, int nr, target_ulong value); -void ppc_store_dbatl (CPUPPCState *env, int nr, target_ulong value); -void ppc_store_ibatu_601 (CPUPPCState *env, int nr, target_ulong value); -void ppc_store_ibatl_601 (CPUPPCState *env, int nr, target_ulong value); void ppc_store_sdr1 (CPUPPCState *env, target_ulong value); #if defined(TARGET_PPC64) void ppc_store_asr (CPUPPCState *env, target_ulong value); -target_ulong ppc_load_slb (CPUPPCState *env, int slb_nr); -target_ulong ppc_load_sr (CPUPPCState *env, int sr_nr); int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs); -int ppc_load_slb_esid (CPUPPCState *env, target_ulong rb, target_ulong *rt); -int ppc_load_slb_vsid (CPUPPCState *env, target_ulong rb, target_ulong *rt); #endif /* defined(TARGET_PPC64) */ -void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value); #endif /* !defined(CONFIG_USER_ONLY) */ void ppc_store_msr (CPUPPCState *env, target_ulong value); @@ -1176,19 +1193,11 @@ void store_booke_tcr (CPUPPCState *env, target_ulong val); void store_booke_tsr (CPUPPCState *env, target_ulong val); void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot); target_phys_addr_t booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb); -int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, - target_phys_addr_t *raddrp, target_ulong address, - uint32_t pid, int ext, int i); int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, target_phys_addr_t *raddrp, target_ulong address, uint32_t pid); void ppc_tlb_invalidate_all (CPUPPCState *env); void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr); -#if defined(TARGET_PPC64) -void ppc_slb_invalidate_all (CPUPPCState *env); -void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0); -#endif -int ppcemb_tlb_search (CPUPPCState *env, target_ulong address, uint32_t pid); #endif #endif @@ -1387,6 +1396,7 @@ static inline void cpu_clone_regs(CPUPPCState *env, target_ulong newsp) #define SPR_BOOKE_TLB1PS (0x159) #define SPR_BOOKE_TLB2PS (0x15A) #define SPR_BOOKE_TLB3PS (0x15B) +#define SPR_BOOKE_MAS7_MAS3 (0x174) #define SPR_BOOKE_IVOR0 (0x190) #define SPR_BOOKE_IVOR1 (0x191) #define SPR_BOOKE_IVOR2 (0x192) @@ -1754,6 +1764,27 @@ static inline void cpu_clone_regs(CPUPPCState *env, target_ulong newsp) #define SPR_604_HID15 (0x3FF) #define SPR_E500_SVR (0x3FF) +/* Disable MAS Interrupt Updates for Hypervisor */ +#define EPCR_DMIUH (1 << 22) +/* Disable Guest TLB Management Instructions */ +#define EPCR_DGTMI (1 << 23) +/* Guest Interrupt Computation Mode */ +#define EPCR_GICM (1 << 24) +/* Interrupt Computation Mode */ +#define EPCR_ICM (1 << 25) +/* Disable Embedded Hypervisor Debug */ +#define EPCR_DUVD (1 << 26) +/* Instruction Storage Interrupt Directed to Guest State */ +#define EPCR_ISIGS (1 << 27) +/* Data Storage Interrupt Directed to Guest State */ +#define EPCR_DSIGS (1 << 28) +/* Instruction TLB Error Interrupt Directed to Guest State */ +#define EPCR_ITLBGS (1 << 29) +/* Data TLB Error Interrupt Directed to Guest State */ +#define EPCR_DTLBGS (1 << 30) +/* External Input Interrupt Directed to Guest State */ +#define EPCR_EXTGS (1 << 31) + /*****************************************************************************/ /* PowerPC Instructions types definitions */ enum { @@ -2182,6 +2213,15 @@ static inline uint32_t booke206_tlbnps(CPUPPCState *env, const int tlbn) #endif +static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr) +{ + if (env->mmu_model == POWERPC_MMU_BOOKE206) { + return msr & (1ULL << MSR_CM); + } + + return msr & (1ULL << MSR_SF); +} + extern void (*cpu_ppc_hypercall)(CPUPPCState *); static inline bool cpu_has_work(CPUPPCState *env) diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c new file mode 100644 index 0000000000..1a593f6f3f --- /dev/null +++ b/target-ppc/excp_helper.c @@ -0,0 +1,969 @@ +/* + * PowerPC exception emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" + +#include "helper_regs.h" + +//#define DEBUG_OP +//#define DEBUG_EXCEPTIONS + +#ifdef DEBUG_EXCEPTIONS +# define LOG_EXCP(...) qemu_log(__VA_ARGS__) +#else +# define LOG_EXCP(...) do { } while (0) +#endif + +/*****************************************************************************/ +/* PowerPC Hypercall emulation */ + +void (*cpu_ppc_hypercall)(CPUPPCState *); + +/*****************************************************************************/ +/* Exception processing */ +#if defined(CONFIG_USER_ONLY) +void do_interrupt(CPUPPCState *env) +{ + env->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; +} + +void ppc_hw_interrupt(CPUPPCState *env) +{ + env->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; +} +#else /* defined(CONFIG_USER_ONLY) */ +static inline void dump_syscall(CPUPPCState *env) +{ + qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 + " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 + " nip=" TARGET_FMT_lx "\n", + ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), + ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), + ppc_dump_gpr(env, 6), env->nip); +} + +/* Note that this function should be greatly optimized + * when called with a constant excp, from ppc_hw_interrupt + */ +static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp) +{ + target_ulong msr, new_msr, vector; + int srr0, srr1, asrr0, asrr1; + int lpes0, lpes1, lev; + + if (0) { + /* XXX: find a suitable condition to enable the hypervisor mode */ + lpes0 = (env->spr[SPR_LPCR] >> 1) & 1; + lpes1 = (env->spr[SPR_LPCR] >> 2) & 1; + } else { + /* Those values ensure we won't enter the hypervisor mode */ + lpes0 = 0; + lpes1 = 1; + } + + qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx + " => %08x (%02x)\n", env->nip, excp, env->error_code); + + /* new srr1 value excluding must-be-zero bits */ + msr = env->msr & ~0x783f0000ULL; + + /* new interrupt handler msr */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); + + /* target registers */ + srr0 = SPR_SRR0; + srr1 = SPR_SRR1; + asrr0 = -1; + asrr1 = -1; + + switch (excp) { + case POWERPC_EXCP_NONE: + /* Should never happen */ + return; + case POWERPC_EXCP_CRITICAL: /* Critical input */ + switch (excp_model) { + case POWERPC_EXCP_40x: + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_G2: + break; + default: + goto excp_invalid; + } + goto store_next; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (msr_me == 0) { + /* Machine check exception is not enabled. + * Enter checkstop state. + */ + if (qemu_log_enabled()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } else { + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); + } + env->halted = 1; + env->interrupt_request |= CPU_INTERRUPT_EXITTB; + } + if (0) { + /* XXX: find a suitable condition to enable the hypervisor mode */ + new_msr |= (target_ulong)MSR_HVB; + } + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + /* XXX: should also have something loaded in DAR / DSISR */ + switch (excp_model) { + case POWERPC_EXCP_40x: + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_MCSRR0; + srr1 = SPR_BOOKE_MCSRR1; + asrr0 = SPR_BOOKE_CSRR0; + asrr1 = SPR_BOOKE_CSRR1; + break; + default: + break; + } + goto store_next; + case POWERPC_EXCP_DSI: /* Data storage exception */ + LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx + "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx + "\n", msr, env->nip); + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + msr |= env->error_code; + goto store_next; + case POWERPC_EXCP_EXTERNAL: /* External input */ + if (lpes0 == 1) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + /* XXX: this is false */ + /* Get rS/rD and rA from faulting opcode */ + env->spr[SPR_DSISR] |= (cpu_ldl_code(env, (env->nip - 4)) + & 0x03FF0000) >> 16; + goto store_current; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { + LOG_EXCP("Ignore floating point exception\n"); + env->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; + return; + } + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + msr |= 0x00100000; + if (msr_fe0 == msr_fe1) { + goto store_next; + } + msr |= 0x00010000; + break; + case POWERPC_EXCP_INVAL: + LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + msr |= 0x00080000; + env->spr[SPR_BOOKE_ESR] = ESR_PIL; + break; + case POWERPC_EXCP_PRIV: + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + msr |= 0x00040000; + env->spr[SPR_BOOKE_ESR] = ESR_PPR; + break; + case POWERPC_EXCP_TRAP: + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + msr |= 0x00020000; + env->spr[SPR_BOOKE_ESR] = ESR_PTR; + break; + default: + /* Should never occur */ + cpu_abort(env, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + goto store_current; + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_current; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + dump_syscall(env); + lev = env->error_code; + if ((lev == 1) && cpu_ppc_hypercall) { + cpu_ppc_hypercall(env); + return; + } + if (lev == 1 || (lpes0 == 0 && lpes1 == 0)) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ + goto store_current; + case POWERPC_EXCP_DECR: /* Decrementer exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + /* FIT on 4xx */ + LOG_EXCP("FIT exception\n"); + goto store_next; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + LOG_EXCP("WDT exception\n"); + switch (excp_model) { + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + default: + break; + } + goto store_next; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + goto store_next; + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + goto store_next; + case POWERPC_EXCP_DEBUG: /* Debug interrupt */ + switch (excp_model) { + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_DSRR0; + srr1 = SPR_BOOKE_DSRR1; + asrr0 = SPR_BOOKE_CSRR0; + asrr1 = SPR_BOOKE_CSRR1; + break; + default: + break; + } + /* XXX: TODO */ + cpu_abort(env, "Debug exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + goto store_current; + case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ + /* XXX: TODO */ + cpu_abort(env, "Embedded floating point data exception " + "is not implemented yet !\n"); + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + goto store_next; + case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ + /* XXX: TODO */ + cpu_abort(env, "Embedded floating point round exception " + "is not implemented yet !\n"); + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + goto store_next; + case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ + /* XXX: TODO */ + cpu_abort(env, + "Performance counter exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ + goto store_next; + case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + goto store_next; + case POWERPC_EXCP_RESET: /* System reset exception */ + if (msr_pow) { + /* indicate that we resumed from power save mode */ + msr |= 0x10000; + } else { + new_msr &= ~((target_ulong)1 << MSR_ME); + } + + if (0) { + /* XXX: find a suitable condition to enable the hypervisor mode */ + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_DSEG: /* Data segment exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_ISEG: /* Instruction segment exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + goto store_next; + case POWERPC_EXCP_TRACE: /* Trace exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_next; + case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + goto store_next; + case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + goto store_next; + case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + goto store_next; + case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + goto store_next; + case POWERPC_EXCP_VPU: /* Vector unavailable exception */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + goto store_current; + case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ + LOG_EXCP("PIT exception\n"); + goto store_next; + case POWERPC_EXCP_IO: /* IO error exception */ + /* XXX: TODO */ + cpu_abort(env, "601 IO error exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_RUNM: /* Run mode exception */ + /* XXX: TODO */ + cpu_abort(env, "601 run mode exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_EMUL: /* Emulation trap exception */ + /* XXX: TODO */ + cpu_abort(env, "602 emulation trap exception " + "is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ + if (lpes1 == 0) { /* XXX: check this */ + new_msr |= (target_ulong)MSR_HVB; + } + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + goto tlb_miss_tgpr; + case POWERPC_EXCP_7x5: + goto tlb_miss; + case POWERPC_EXCP_74xx: + goto tlb_miss_74xx; + default: + cpu_abort(env, "Invalid instruction TLB miss exception\n"); + break; + } + break; + case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ + if (lpes1 == 0) { /* XXX: check this */ + new_msr |= (target_ulong)MSR_HVB; + } + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + goto tlb_miss_tgpr; + case POWERPC_EXCP_7x5: + goto tlb_miss; + case POWERPC_EXCP_74xx: + goto tlb_miss_74xx; + default: + cpu_abort(env, "Invalid data load TLB miss exception\n"); + break; + } + break; + case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ + if (lpes1 == 0) { /* XXX: check this */ + new_msr |= (target_ulong)MSR_HVB; + } + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + tlb_miss_tgpr: + /* Swap temporary saved registers with GPRs */ + if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { + new_msr |= (target_ulong)1 << MSR_TGPR; + hreg_swap_gpr_tgpr(env); + } + goto tlb_miss; + case POWERPC_EXCP_7x5: + tlb_miss: +#if defined(DEBUG_SOFTWARE_TLB) + if (qemu_log_enabled()) { + const char *es; + target_ulong *miss, *cmp; + int en; + + if (excp == POWERPC_EXCP_IFTLB) { + es = "I"; + en = 'I'; + miss = &env->spr[SPR_IMISS]; + cmp = &env->spr[SPR_ICMP]; + } else { + if (excp == POWERPC_EXCP_DLTLB) { + es = "DL"; + } else { + es = "DS"; + } + en = 'D'; + miss = &env->spr[SPR_DMISS]; + cmp = &env->spr[SPR_DCMP]; + } + qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " + TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " + TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, + env->spr[SPR_HASH1], env->spr[SPR_HASH2], + env->error_code); + } +#endif + msr |= env->crf[0] << 28; + msr |= env->error_code; /* key, D/I, S/L bits */ + /* Set way using a LRU mechanism */ + msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; + break; + case POWERPC_EXCP_74xx: + tlb_miss_74xx: +#if defined(DEBUG_SOFTWARE_TLB) + if (qemu_log_enabled()) { + const char *es; + target_ulong *miss, *cmp; + int en; + + if (excp == POWERPC_EXCP_IFTLB) { + es = "I"; + en = 'I'; + miss = &env->spr[SPR_TLBMISS]; + cmp = &env->spr[SPR_PTEHI]; + } else { + if (excp == POWERPC_EXCP_DLTLB) { + es = "DL"; + } else { + es = "DS"; + } + en = 'D'; + miss = &env->spr[SPR_TLBMISS]; + cmp = &env->spr[SPR_PTEHI]; + } + qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " + TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, + env->error_code); + } +#endif + msr |= env->error_code; /* key bit */ + break; + default: + cpu_abort(env, "Invalid data store TLB miss exception\n"); + break; + } + goto store_next; + case POWERPC_EXCP_FPA: /* Floating-point assist exception */ + /* XXX: TODO */ + cpu_abort(env, "Floating point assist exception " + "is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_DABR: /* Data address breakpoint */ + /* XXX: TODO */ + cpu_abort(env, "DABR exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + /* XXX: TODO */ + cpu_abort(env, "IABR exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_SMI: /* System management interrupt */ + /* XXX: TODO */ + cpu_abort(env, "SMI exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_THERM: /* Thermal interrupt */ + /* XXX: TODO */ + cpu_abort(env, "Thermal management exception " + "is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ + if (lpes1 == 0) { + new_msr |= (target_ulong)MSR_HVB; + } + /* XXX: TODO */ + cpu_abort(env, + "Performance counter exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_VPUA: /* Vector assist exception */ + /* XXX: TODO */ + cpu_abort(env, "VPU assist exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_SOFTP: /* Soft patch exception */ + /* XXX: TODO */ + cpu_abort(env, + "970 soft-patch exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_MAINT: /* Maintenance exception */ + /* XXX: TODO */ + cpu_abort(env, + "970 maintenance exception is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ + /* XXX: TODO */ + cpu_abort(env, "Maskable external exception " + "is not implemented yet !\n"); + goto store_next; + case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ + /* XXX: TODO */ + cpu_abort(env, "Non maskable external exception " + "is not implemented yet !\n"); + goto store_next; + default: + excp_invalid: + cpu_abort(env, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + store_current: + /* save current instruction location */ + env->spr[srr0] = env->nip - 4; + break; + store_next: + /* save next instruction location */ + env->spr[srr0] = env->nip; + break; + } + /* Save MSR */ + env->spr[srr1] = msr; + /* If any alternate SRR register are defined, duplicate saved values */ + if (asrr0 != -1) { + env->spr[asrr0] = env->spr[srr0]; + } + if (asrr1 != -1) { + env->spr[asrr1] = env->spr[srr1]; + } + /* If we disactivated any translation, flush TLBs */ + if (msr & ((1 << MSR_IR) | (1 << MSR_DR))) { + tlb_flush(env, 1); + } + + if (msr_ile) { + new_msr |= (target_ulong)1 << MSR_LE; + } + + /* Jump to handler */ + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(env, "Raised an exception without defined vector %d\n", + excp); + } + vector |= env->excp_prefix; +#if defined(TARGET_PPC64) + if (excp_model == POWERPC_EXCP_BOOKE) { + if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { + /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ + new_msr |= (target_ulong)1 << MSR_CM; + } else { + vector = (uint32_t)vector; + } + } else { + if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { + vector = (uint32_t)vector; + } else { + new_msr |= (target_ulong)1 << MSR_SF; + } + } +#endif + /* XXX: we don't use hreg_store_msr here as already have treated + * any special case that could occur. Just store MSR and update hflags + */ + env->msr = new_msr & env->msr_mask; + hreg_compute_hflags(env); + env->nip = vector; + /* Reset exception state */ + env->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; + + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + /* XXX: The BookE changes address space when switching modes, + we should probably implement that as different MMU indexes, + but for the moment we do it the slow way and flush all. */ + tlb_flush(env, 1); + } +} + +void do_interrupt(CPUPPCState *env) +{ + powerpc_excp(env, env->excp_model, env->exception_index); +} + +void ppc_hw_interrupt(CPUPPCState *env) +{ + int hdice; + +#if 0 + qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n", + __func__, env, env->pending_interrupts, + env->interrupt_request, (int)msr_me, (int)msr_ee); +#endif + /* External reset */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_RESET); + return; + } + /* Machine check exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_MCHECK); + return; + } +#if 0 /* TODO */ + /* External debug exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_DEBUG); + return; + } +#endif + if (0) { + /* XXX: find a suitable condition to enable the hypervisor mode */ + hdice = env->spr[SPR_LPCR] & 1; + } else { + hdice = 0; + } + if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) { + /* Hypervisor decrementer exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_HDECR); + return; + } + } + if (msr_ce != 0) { + /* External critical interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { + /* Taking a critical external interrupt does not clear the external + * critical interrupt status + */ +#if 0 + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT); +#endif + powerpc_excp(env, env->excp_model, POWERPC_EXCP_CRITICAL); + return; + } + } + if (msr_ee != 0) { + /* Watchdog timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_WDT); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORCI); + return; + } + /* Fixed interval timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_FIT); + return; + } + /* Programmable interval timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_PIT); + return; + } + /* Decrementer exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_DECR); + return; + } + /* External interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { + /* Taking an external interrupt does not clear the external + * interrupt status + */ +#if 0 + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT); +#endif + powerpc_excp(env, env->excp_model, POWERPC_EXCP_EXTERNAL); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORI); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_PERFM); + return; + } + /* Thermal interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); + powerpc_excp(env, env->excp_model, POWERPC_EXCP_THERM); + return; + } + } +} +#endif /* !CONFIG_USER_ONLY */ + +#if defined(DEBUG_OP) +static void cpu_dump_rfi(target_ulong RA, target_ulong msr) +{ + qemu_log("Return from exception at " TARGET_FMT_lx " with flags " + TARGET_FMT_lx "\n", RA, msr); +} +#endif + +/*****************************************************************************/ +/* Exceptions processing helpers */ + +void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code) +{ +#if 0 + printf("Raise exception %3x code : %d\n", exception, error_code); +#endif + env->exception_index = exception; + env->error_code = error_code; + cpu_loop_exit(env); +} + +void helper_raise_exception(CPUPPCState *env, uint32_t exception) +{ + helper_raise_exception_err(env, exception, 0); +} + +#if !defined(CONFIG_USER_ONLY) +void helper_store_msr(CPUPPCState *env, target_ulong val) +{ + val = hreg_store_msr(env, val, 0); + if (val != 0) { + env->interrupt_request |= CPU_INTERRUPT_EXITTB; + helper_raise_exception(env, val); + } +} + +static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr, + target_ulong msrm, int keep_msrh) +{ +#if defined(TARGET_PPC64) + if (msr_is_64bit(env, msr)) { + nip = (uint64_t)nip; + msr &= (uint64_t)msrm; + } else { + nip = (uint32_t)nip; + msr = (uint32_t)(msr & msrm); + if (keep_msrh) { + msr |= env->msr & ~((uint64_t)0xFFFFFFFF); + } + } +#else + nip = (uint32_t)nip; + msr &= (uint32_t)msrm; +#endif + /* XXX: beware: this is false if VLE is supported */ + env->nip = nip & ~((target_ulong)0x00000003); + hreg_store_msr(env, msr, 1); +#if defined(DEBUG_OP) + cpu_dump_rfi(env->nip, env->msr); +#endif + /* No need to raise an exception here, + * as rfi is always the last insn of a TB + */ + env->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +void helper_rfi(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1], + ~((target_ulong)0x783F0000), 1); +} + +#if defined(TARGET_PPC64) +void helper_rfid(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1], + ~((target_ulong)0x783F0000), 0); +} + +void helper_hrfid(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1], + ~((target_ulong)0x783F0000), 0); +} +#endif + +/*****************************************************************************/ +/* Embedded PowerPC specific helpers */ +void helper_40x_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3], + ~((target_ulong)0xFFFF0000), 0); +} + +void helper_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1, + ~((target_ulong)0x3FFF0000), 0); +} + +void helper_rfdi(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1, + ~((target_ulong)0x3FFF0000), 0); +} + +void helper_rfmci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1, + ~((target_ulong)0x3FFF0000), 0); +} +#endif + +void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || + ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || + ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || + ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || + ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP); + } +} + +#if defined(TARGET_PPC64) +void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || + ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || + ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || + ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || + ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP); + } +} +#endif + +#if !defined(CONFIG_USER_ONLY) +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +void helper_rfsvc(CPUPPCState *env) +{ + do_rfi(env, env->lr, env->ctr, 0x0000FFFF, 0); +} + +/* Embedded.Processor Control */ +static int dbell2irq(target_ulong rb) +{ + int msg = rb & DBELL_TYPE_MASK; + int irq = -1; + + switch (msg) { + case DBELL_TYPE_DBELL: + irq = PPC_INTERRUPT_DOORBELL; + break; + case DBELL_TYPE_DBELL_CRIT: + irq = PPC_INTERRUPT_CDOORBELL; + break; + case DBELL_TYPE_G_DBELL: + case DBELL_TYPE_G_DBELL_CRIT: + case DBELL_TYPE_G_DBELL_MC: + /* XXX implement */ + default: + break; + } + + return irq; +} + +void helper_msgclr(CPUPPCState *env, target_ulong rb) +{ + int irq = dbell2irq(rb); + + if (irq < 0) { + return; + } + + env->pending_interrupts &= ~(1 << irq); +} + +void helper_msgsnd(target_ulong rb) +{ + int irq = dbell2irq(rb); + int pir = rb & DBELL_PIRTAG_MASK; + CPUPPCState *cenv; + + if (irq < 0) { + return; + } + + for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { + if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { + cenv->pending_interrupts |= 1 << irq; + cpu_interrupt(cenv, CPU_INTERRUPT_HARD); + } + } +} +#endif diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c new file mode 100644 index 0000000000..9d67926209 --- /dev/null +++ b/target-ppc/fpu_helper.c @@ -0,0 +1,1740 @@ +/* + * PowerPC floating point and SPE emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" + +/*****************************************************************************/ +/* Floating point operations helpers */ +uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg) +{ + CPU_FloatU f; + CPU_DoubleU d; + + f.l = arg; + d.d = float32_to_float64(f.f, &env->fp_status); + return d.ll; +} + +uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg) +{ + CPU_FloatU f; + CPU_DoubleU d; + + d.ll = arg; + f.f = float64_to_float32(d.d, &env->fp_status); + return f.l; +} + +static inline int isden(float64 d) +{ + CPU_DoubleU u; + + u.d = d; + + return ((u.ll >> 52) & 0x7FF) == 0; +} + +uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf) +{ + CPU_DoubleU farg; + int isneg; + int ret; + + farg.ll = arg; + isneg = float64_is_neg(farg.d); + if (unlikely(float64_is_any_nan(farg.d))) { + if (float64_is_signaling_nan(farg.d)) { + /* Signaling NaN: flags are undefined */ + ret = 0x00; + } else { + /* Quiet NaN */ + ret = 0x11; + } + } else if (unlikely(float64_is_infinity(farg.d))) { + /* +/- infinity */ + if (isneg) { + ret = 0x09; + } else { + ret = 0x05; + } + } else { + if (float64_is_zero(farg.d)) { + /* +/- zero */ + if (isneg) { + ret = 0x12; + } else { + ret = 0x02; + } + } else { + if (isden(farg.d)) { + /* Denormalized numbers */ + ret = 0x10; + } else { + /* Normalized numbers */ + ret = 0x00; + } + if (isneg) { + ret |= 0x08; + } else { + ret |= 0x04; + } + } + } + if (set_fprf) { + /* We update FPSCR_FPRF */ + env->fpscr &= ~(0x1F << FPSCR_FPRF); + env->fpscr |= ret << FPSCR_FPRF; + } + /* We just need fpcc to update Rc1 */ + return ret & 0xF; +} + +/* Floating-point invalid operations exception */ +static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op) +{ + uint64_t ret = 0; + int ve; + + ve = fpscr_ve; + switch (op) { + case POWERPC_EXCP_FP_VXSNAN: + env->fpscr |= 1 << FPSCR_VXSNAN; + break; + case POWERPC_EXCP_FP_VXSOFT: + env->fpscr |= 1 << FPSCR_VXSOFT; + break; + case POWERPC_EXCP_FP_VXISI: + /* Magnitude subtraction of infinities */ + env->fpscr |= 1 << FPSCR_VXISI; + goto update_arith; + case POWERPC_EXCP_FP_VXIDI: + /* Division of infinity by infinity */ + env->fpscr |= 1 << FPSCR_VXIDI; + goto update_arith; + case POWERPC_EXCP_FP_VXZDZ: + /* Division of zero by zero */ + env->fpscr |= 1 << FPSCR_VXZDZ; + goto update_arith; + case POWERPC_EXCP_FP_VXIMZ: + /* Multiplication of zero by infinity */ + env->fpscr |= 1 << FPSCR_VXIMZ; + goto update_arith; + case POWERPC_EXCP_FP_VXVC: + /* Ordered comparison of NaN */ + env->fpscr |= 1 << FPSCR_VXVC; + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + /* We must update the target FPR before raising the exception */ + if (ve != 0) { + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + /* Exception is differed */ + ve = 0; + } + break; + case POWERPC_EXCP_FP_VXSQRT: + /* Square root of a negative number */ + env->fpscr |= 1 << FPSCR_VXSQRT; + update_arith: + env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); + if (ve == 0) { + /* Set the result to quiet NaN */ + ret = 0x7FF8000000000000ULL; + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + } + break; + case POWERPC_EXCP_FP_VXCVI: + /* Invalid conversion */ + env->fpscr |= 1 << FPSCR_VXCVI; + env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); + if (ve == 0) { + /* Set the result to quiet NaN */ + ret = 0x7FF8000000000000ULL; + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + } + break; + } + /* Update the floating-point invalid operation summary */ + env->fpscr |= 1 << FPSCR_VX; + /* Update the floating-point exception summary */ + env->fpscr |= 1 << FPSCR_FX; + if (ve != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + if (msr_fe0 != 0 || msr_fe1 != 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_FP | op); + } + } + return ret; +} + +static inline void float_zero_divide_excp(CPUPPCState *env) +{ + env->fpscr |= 1 << FPSCR_ZX; + env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); + /* Update the floating-point exception summary */ + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ze != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + if (msr_fe0 != 0 || msr_fe1 != 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX); + } + } +} + +static inline void float_overflow_excp(CPUPPCState *env) +{ + env->fpscr |= 1 << FPSCR_OX; + /* Update the floating-point exception summary */ + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_oe != 0) { + /* XXX: should adjust the result */ + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + /* We must update the target FPR before raising the exception */ + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; + } else { + env->fpscr |= 1 << FPSCR_XX; + env->fpscr |= 1 << FPSCR_FI; + } +} + +static inline void float_underflow_excp(CPUPPCState *env) +{ + env->fpscr |= 1 << FPSCR_UX; + /* Update the floating-point exception summary */ + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ue != 0) { + /* XXX: should adjust the result */ + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + /* We must update the target FPR before raising the exception */ + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; + } +} + +static inline void float_inexact_excp(CPUPPCState *env) +{ + env->fpscr |= 1 << FPSCR_XX; + /* Update the floating-point exception summary */ + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_xe != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + /* We must update the target FPR before raising the exception */ + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; + } +} + +static inline void fpscr_set_rounding_mode(CPUPPCState *env) +{ + int rnd_type; + + /* Set rounding mode */ + switch (fpscr_rn) { + case 0: + /* Best approximation (round to nearest) */ + rnd_type = float_round_nearest_even; + break; + case 1: + /* Smaller magnitude (round toward zero) */ + rnd_type = float_round_to_zero; + break; + case 2: + /* Round toward +infinite */ + rnd_type = float_round_up; + break; + default: + case 3: + /* Round toward -infinite */ + rnd_type = float_round_down; + break; + } + set_float_rounding_mode(rnd_type, &env->fp_status); +} + +void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit) +{ + int prev; + + prev = (env->fpscr >> bit) & 1; + env->fpscr &= ~(1 << bit); + if (prev == 1) { + switch (bit) { + case FPSCR_RN1: + case FPSCR_RN: + fpscr_set_rounding_mode(env); + break; + default: + break; + } + } +} + +void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit) +{ + int prev; + + prev = (env->fpscr >> bit) & 1; + env->fpscr |= 1 << bit; + if (prev == 0) { + switch (bit) { + case FPSCR_VX: + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ve) { + goto raise_ve; + } + break; + case FPSCR_OX: + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_oe) { + goto raise_oe; + } + break; + case FPSCR_UX: + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ue) { + goto raise_ue; + } + break; + case FPSCR_ZX: + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ze) { + goto raise_ze; + } + break; + case FPSCR_XX: + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_xe) { + goto raise_xe; + } + break; + case FPSCR_VXSNAN: + case FPSCR_VXISI: + case FPSCR_VXIDI: + case FPSCR_VXZDZ: + case FPSCR_VXIMZ: + case FPSCR_VXVC: + case FPSCR_VXSOFT: + case FPSCR_VXSQRT: + case FPSCR_VXCVI: + env->fpscr |= 1 << FPSCR_VX; + env->fpscr |= 1 << FPSCR_FX; + if (fpscr_ve != 0) { + goto raise_ve; + } + break; + case FPSCR_VE: + if (fpscr_vx != 0) { + raise_ve: + env->error_code = POWERPC_EXCP_FP; + if (fpscr_vxsnan) { + env->error_code |= POWERPC_EXCP_FP_VXSNAN; + } + if (fpscr_vxisi) { + env->error_code |= POWERPC_EXCP_FP_VXISI; + } + if (fpscr_vxidi) { + env->error_code |= POWERPC_EXCP_FP_VXIDI; + } + if (fpscr_vxzdz) { + env->error_code |= POWERPC_EXCP_FP_VXZDZ; + } + if (fpscr_vximz) { + env->error_code |= POWERPC_EXCP_FP_VXIMZ; + } + if (fpscr_vxvc) { + env->error_code |= POWERPC_EXCP_FP_VXVC; + } + if (fpscr_vxsoft) { + env->error_code |= POWERPC_EXCP_FP_VXSOFT; + } + if (fpscr_vxsqrt) { + env->error_code |= POWERPC_EXCP_FP_VXSQRT; + } + if (fpscr_vxcvi) { + env->error_code |= POWERPC_EXCP_FP_VXCVI; + } + goto raise_excp; + } + break; + case FPSCR_OE: + if (fpscr_ox != 0) { + raise_oe: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; + goto raise_excp; + } + break; + case FPSCR_UE: + if (fpscr_ux != 0) { + raise_ue: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; + goto raise_excp; + } + break; + case FPSCR_ZE: + if (fpscr_zx != 0) { + raise_ze: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX; + goto raise_excp; + } + break; + case FPSCR_XE: + if (fpscr_xx != 0) { + raise_xe: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; + goto raise_excp; + } + break; + case FPSCR_RN1: + case FPSCR_RN: + fpscr_set_rounding_mode(env); + break; + default: + break; + raise_excp: + /* Update the floating-point enabled exception summary */ + env->fpscr |= 1 << FPSCR_FEX; + /* We have to update Rc1 before raising the exception */ + env->exception_index = POWERPC_EXCP_PROGRAM; + break; + } + } +} + +void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) +{ + /* + * We use only the 32 LSB of the incoming fpr + */ + uint32_t prev, new; + int i; + + prev = env->fpscr; + new = (uint32_t)arg; + new &= ~0x60000000; + new |= prev & 0x60000000; + for (i = 0; i < 8; i++) { + if (mask & (1 << i)) { + env->fpscr &= ~(0xF << (4 * i)); + env->fpscr |= new & (0xF << (4 * i)); + } + } + /* Update VX and FEX */ + if (fpscr_ix != 0) { + env->fpscr |= 1 << FPSCR_VX; + } else { + env->fpscr &= ~(1 << FPSCR_VX); + } + if ((fpscr_ex & fpscr_eex) != 0) { + env->fpscr |= 1 << FPSCR_FEX; + env->exception_index = POWERPC_EXCP_PROGRAM; + /* XXX: we should compute it properly */ + env->error_code = POWERPC_EXCP_FP; + } else { + env->fpscr &= ~(1 << FPSCR_FEX); + } + fpscr_set_rounding_mode(env); +} + +void helper_float_check_status(CPUPPCState *env) +{ + if (env->exception_index == POWERPC_EXCP_PROGRAM && + (env->error_code & POWERPC_EXCP_FP)) { + /* Differred floating-point exception after target FPR update */ + if (msr_fe0 != 0 || msr_fe1 != 0) { + helper_raise_exception_err(env, env->exception_index, + env->error_code); + } + } else { + int status = get_float_exception_flags(&env->fp_status); + if (status & float_flag_divbyzero) { + float_zero_divide_excp(env); + } else if (status & float_flag_overflow) { + float_overflow_excp(env); + } else if (status & float_flag_underflow) { + float_underflow_excp(env); + } else if (status & float_flag_inexact) { + float_inexact_excp(env); + } + } +} + +void helper_reset_fpstatus(CPUPPCState *env) +{ + set_float_exception_flags(0, &env->fp_status); +} + +/* fadd - fadd. */ +uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2) +{ + CPU_DoubleU farg1, farg2; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && + float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d))) { + /* sNaN addition */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status); + } + + return farg1.ll; +} + +/* fsub - fsub. */ +uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2) +{ + CPU_DoubleU farg1, farg2; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && + float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d))) { + /* sNaN subtraction */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status); + } + + return farg1.ll; +} + +/* fmul - fmul. */ +uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2) +{ + CPU_DoubleU farg1, farg2; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || + (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { + /* Multiplication of zero by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d))) { + /* sNaN multiplication */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); + } + + return farg1.ll; +} + +/* fdiv - fdiv. */ +uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) +{ + CPU_DoubleU farg1, farg2; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_infinity(farg1.d) && + float64_is_infinity(farg2.d))) { + /* Division of infinity by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI); + } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) { + /* Division of zero by zero */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d))) { + /* sNaN division */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status); + } + + return farg1.ll; +} + +/* fabs */ +uint64_t helper_fabs(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + farg.d = float64_abs(farg.d); + return farg.ll; +} + +/* fnabs */ +uint64_t helper_fnabs(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + farg.d = float64_abs(farg.d); + farg.d = float64_chs(farg.d); + return farg.ll; +} + +/* fneg */ +uint64_t helper_fneg(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + farg.d = float64_chs(farg.d); + return farg.ll; +} + +/* fctiw - fctiw. */ +uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXCVI); + } else if (unlikely(float64_is_quiet_nan(farg.d) || + float64_is_infinity(farg.d))) { + /* qNan / infinity conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + } else { + farg.ll = float64_to_int32(farg.d, &env->fp_status); + /* XXX: higher bits are not supposed to be significant. + * to make tests easier, return the same as a real PowerPC 750 + */ + farg.ll |= 0xFFF80000ULL << 32; + } + return farg.ll; +} + +/* fctiwz - fctiwz. */ +uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXCVI); + } else if (unlikely(float64_is_quiet_nan(farg.d) || + float64_is_infinity(farg.d))) { + /* qNan / infinity conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + } else { + farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status); + /* XXX: higher bits are not supposed to be significant. + * to make tests easier, return the same as a real PowerPC 750 + */ + farg.ll |= 0xFFF80000ULL << 32; + } + return farg.ll; +} + +#if defined(TARGET_PPC64) +/* fcfid - fcfid. */ +uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.d = int64_to_float64(arg, &env->fp_status); + return farg.ll; +} + +/* fctid - fctid. */ +uint64_t helper_fctid(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXCVI); + } else if (unlikely(float64_is_quiet_nan(farg.d) || + float64_is_infinity(farg.d))) { + /* qNan / infinity conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + } else { + farg.ll = float64_to_int64(farg.d, &env->fp_status); + } + return farg.ll; +} + +/* fctidz - fctidz. */ +uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXCVI); + } else if (unlikely(float64_is_quiet_nan(farg.d) || + float64_is_infinity(farg.d))) { + /* qNan / infinity conversion */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + } else { + farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status); + } + return farg.ll; +} + +#endif + +static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, + int rounding_mode) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN round */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXCVI); + } else if (unlikely(float64_is_quiet_nan(farg.d) || + float64_is_infinity(farg.d))) { + /* qNan / infinity round */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + } else { + set_float_rounding_mode(rounding_mode, &env->fp_status); + farg.ll = float64_round_to_int(farg.d, &env->fp_status); + /* Restore rounding mode from FPSCR */ + fpscr_set_rounding_mode(env); + } + return farg.ll; +} + +uint64_t helper_frin(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_nearest_even); +} + +uint64_t helper_friz(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_to_zero); +} + +uint64_t helper_frip(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_up); +} + +uint64_t helper_frim(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_down); +} + +/* fmadd - fmadd. */ +uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1, farg2, farg3; + + farg1.ll = arg1; + farg2.ll = arg2; + farg3.ll = arg3; + + if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || + (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { + /* Multiplication of zero by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d) || + float64_is_signaling_nan(farg3.d))) { + /* sNaN operation */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + /* This is the way the PowerPC specification defines it */ + float128 ft0_128, ft1_128; + + ft0_128 = float64_to_float128(farg1.d, &env->fp_status); + ft1_128 = float64_to_float128(farg2.d, &env->fp_status); + ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); + if (unlikely(float128_is_infinity(ft0_128) && + float64_is_infinity(farg3.d) && + float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + ft1_128 = float64_to_float128(farg3.d, &env->fp_status); + ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); + farg1.d = float128_to_float64(ft0_128, &env->fp_status); + } + } + + return farg1.ll; +} + +/* fmsub - fmsub. */ +uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1, farg2, farg3; + + farg1.ll = arg1; + farg2.ll = arg2; + farg3.ll = arg3; + + if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || + (float64_is_zero(farg1.d) && + float64_is_infinity(farg2.d)))) { + /* Multiplication of zero by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d) || + float64_is_signaling_nan(farg3.d))) { + /* sNaN operation */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + /* This is the way the PowerPC specification defines it */ + float128 ft0_128, ft1_128; + + ft0_128 = float64_to_float128(farg1.d, &env->fp_status); + ft1_128 = float64_to_float128(farg2.d, &env->fp_status); + ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); + if (unlikely(float128_is_infinity(ft0_128) && + float64_is_infinity(farg3.d) && + float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + ft1_128 = float64_to_float128(farg3.d, &env->fp_status); + ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); + farg1.d = float128_to_float64(ft0_128, &env->fp_status); + } + } + return farg1.ll; +} + +/* fnmadd - fnmadd. */ +uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1, farg2, farg3; + + farg1.ll = arg1; + farg2.ll = arg2; + farg3.ll = arg3; + + if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || + (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { + /* Multiplication of zero by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d) || + float64_is_signaling_nan(farg3.d))) { + /* sNaN operation */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + /* This is the way the PowerPC specification defines it */ + float128 ft0_128, ft1_128; + + ft0_128 = float64_to_float128(farg1.d, &env->fp_status); + ft1_128 = float64_to_float128(farg2.d, &env->fp_status); + ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); + if (unlikely(float128_is_infinity(ft0_128) && + float64_is_infinity(farg3.d) && + float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + ft1_128 = float64_to_float128(farg3.d, &env->fp_status); + ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); + farg1.d = float128_to_float64(ft0_128, &env->fp_status); + } + if (likely(!float64_is_any_nan(farg1.d))) { + farg1.d = float64_chs(farg1.d); + } + } + return farg1.ll; +} + +/* fnmsub - fnmsub. */ +uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1, farg2, farg3; + + farg1.ll = arg1; + farg2.ll = arg2; + farg3.ll = arg3; + + if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || + (float64_is_zero(farg1.d) && + float64_is_infinity(farg2.d)))) { + /* Multiplication of zero by infinity */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + } else { + if (unlikely(float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d) || + float64_is_signaling_nan(farg3.d))) { + /* sNaN operation */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + /* This is the way the PowerPC specification defines it */ + float128 ft0_128, ft1_128; + + ft0_128 = float64_to_float128(farg1.d, &env->fp_status); + ft1_128 = float64_to_float128(farg2.d, &env->fp_status); + ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); + if (unlikely(float128_is_infinity(ft0_128) && + float64_is_infinity(farg3.d) && + float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { + /* Magnitude subtraction of infinities */ + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + } else { + ft1_128 = float64_to_float128(farg3.d, &env->fp_status); + ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); + farg1.d = float128_to_float64(ft0_128, &env->fp_status); + } + if (likely(!float64_is_any_nan(farg1.d))) { + farg1.d = float64_chs(farg1.d); + } + } + return farg1.ll; +} + +/* frsp - frsp. */ +uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + float32 f32; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN square root */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + f32 = float64_to_float32(farg.d, &env->fp_status); + farg.d = float32_to_float64(f32, &env->fp_status); + + return farg.ll; +} + +/* fsqrt - fsqrt. */ +uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { + /* Square root of a negative nonzero number */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT); + } else { + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN square root */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg.d = float64_sqrt(farg.d, &env->fp_status); + } + return farg.ll; +} + +/* fre - fre. */ +uint64_t helper_fre(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN reciprocal */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg.d = float64_div(float64_one, farg.d, &env->fp_status); + return farg.d; +} + +/* fres - fres. */ +uint64_t helper_fres(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + float32 f32; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN reciprocal */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg.d = float64_div(float64_one, farg.d, &env->fp_status); + f32 = float64_to_float32(farg.d, &env->fp_status); + farg.d = float32_to_float64(f32, &env->fp_status); + + return farg.ll; +} + +/* frsqrte - frsqrte. */ +uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + float32 f32; + + farg.ll = arg; + + if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { + /* Reciprocal square root of a negative nonzero number */ + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT); + } else { + if (unlikely(float64_is_signaling_nan(farg.d))) { + /* sNaN reciprocal square root */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } + farg.d = float64_sqrt(farg.d, &env->fp_status); + farg.d = float64_div(float64_one, farg.d, &env->fp_status); + f32 = float64_to_float32(farg.d, &env->fp_status); + farg.d = float32_to_float64(f32, &env->fp_status); + } + return farg.ll; +} + +/* fsel - fsel. */ +uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1; + + farg1.ll = arg1; + + if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && + !float64_is_any_nan(farg1.d)) { + return arg2; + } else { + return arg3; + } +} + +void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint32_t crfD) +{ + CPU_DoubleU farg1, farg2; + uint32_t ret = 0; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_any_nan(farg1.d) || + float64_is_any_nan(farg2.d))) { + ret = 0x01UL; + } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x08UL; + } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x04UL; + } else { + ret = 0x02UL; + } + + env->fpscr &= ~(0x0F << FPSCR_FPRF); + env->fpscr |= ret << FPSCR_FPRF; + env->crf[crfD] = ret; + if (unlikely(ret == 0x01UL + && (float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d)))) { + /* sNaN comparison */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + } +} + +void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint32_t crfD) +{ + CPU_DoubleU farg1, farg2; + uint32_t ret = 0; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_any_nan(farg1.d) || + float64_is_any_nan(farg2.d))) { + ret = 0x01UL; + } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x08UL; + } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x04UL; + } else { + ret = 0x02UL; + } + + env->fpscr &= ~(0x0F << FPSCR_FPRF); + env->fpscr |= ret << FPSCR_FPRF; + env->crf[crfD] = ret; + if (unlikely(ret == 0x01UL)) { + if (float64_is_signaling_nan(farg1.d) || + float64_is_signaling_nan(farg2.d)) { + /* sNaN comparison */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | + POWERPC_EXCP_FP_VXVC); + } else { + /* qNaN comparison */ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC); + } + } +} + +/* Single-precision floating-point conversions */ +static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.f = int32_to_float32(val, &env->vec_status); + + return u.l; +} + +static inline uint32_t efscfui(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.f = uint32_to_float32(val, &env->vec_status); + + return u.l; +} + +static inline int32_t efsctsi(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + + return float32_to_int32(u.f, &env->vec_status); +} + +static inline uint32_t efsctui(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + + return float32_to_uint32(u.f, &env->vec_status); +} + +static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + + return float32_to_int32_round_to_zero(u.f, &env->vec_status); +} + +static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + + return float32_to_uint32_round_to_zero(u.f, &env->vec_status); +} + +static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.f = int32_to_float32(val, &env->vec_status); + tmp = int64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_div(u.f, tmp, &env->vec_status); + + return u.l; +} + +static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.f = uint32_to_float32(val, &env->vec_status); + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_div(u.f, tmp, &env->vec_status); + + return u.l; +} + +static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_mul(u.f, tmp, &env->vec_status); + + return float32_to_int32(u.f, &env->vec_status); +} + +static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f))) { + return 0; + } + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_mul(u.f, tmp, &env->vec_status); + + return float32_to_uint32(u.f, &env->vec_status); +} + +#define HELPER_SPE_SINGLE_CONV(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \ + { \ + return e##name(env, val); \ + } +/* efscfsi */ +HELPER_SPE_SINGLE_CONV(fscfsi); +/* efscfui */ +HELPER_SPE_SINGLE_CONV(fscfui); +/* efscfuf */ +HELPER_SPE_SINGLE_CONV(fscfuf); +/* efscfsf */ +HELPER_SPE_SINGLE_CONV(fscfsf); +/* efsctsi */ +HELPER_SPE_SINGLE_CONV(fsctsi); +/* efsctui */ +HELPER_SPE_SINGLE_CONV(fsctui); +/* efsctsiz */ +HELPER_SPE_SINGLE_CONV(fsctsiz); +/* efsctuiz */ +HELPER_SPE_SINGLE_CONV(fsctuiz); +/* efsctsf */ +HELPER_SPE_SINGLE_CONV(fsctsf); +/* efsctuf */ +HELPER_SPE_SINGLE_CONV(fsctuf); + +#define HELPER_SPE_VECTOR_CONV(name) \ + uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \ + { \ + return ((uint64_t)e##name(env, val >> 32) << 32) | \ + (uint64_t)e##name(env, val); \ + } +/* evfscfsi */ +HELPER_SPE_VECTOR_CONV(fscfsi); +/* evfscfui */ +HELPER_SPE_VECTOR_CONV(fscfui); +/* evfscfuf */ +HELPER_SPE_VECTOR_CONV(fscfuf); +/* evfscfsf */ +HELPER_SPE_VECTOR_CONV(fscfsf); +/* evfsctsi */ +HELPER_SPE_VECTOR_CONV(fsctsi); +/* evfsctui */ +HELPER_SPE_VECTOR_CONV(fsctui); +/* evfsctsiz */ +HELPER_SPE_VECTOR_CONV(fsctsiz); +/* evfsctuiz */ +HELPER_SPE_VECTOR_CONV(fsctuiz); +/* evfsctsf */ +HELPER_SPE_VECTOR_CONV(fsctsf); +/* evfsctuf */ +HELPER_SPE_VECTOR_CONV(fsctuf); + +/* Single-precision floating-point arithmetic */ +static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_add(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_sub(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_mul(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_div(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +#define HELPER_SPE_SINGLE_ARITH(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ + { \ + return e##name(env, op1, op2); \ + } +/* efsadd */ +HELPER_SPE_SINGLE_ARITH(fsadd); +/* efssub */ +HELPER_SPE_SINGLE_ARITH(fssub); +/* efsmul */ +HELPER_SPE_SINGLE_ARITH(fsmul); +/* efsdiv */ +HELPER_SPE_SINGLE_ARITH(fsdiv); + +#define HELPER_SPE_VECTOR_ARITH(name) \ + uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ + { \ + return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \ + (uint64_t)e##name(env, op1, op2); \ + } +/* evfsadd */ +HELPER_SPE_VECTOR_ARITH(fsadd); +/* evfssub */ +HELPER_SPE_VECTOR_ARITH(fssub); +/* evfsmul */ +HELPER_SPE_VECTOR_ARITH(fsmul); +/* evfsdiv */ +HELPER_SPE_VECTOR_ARITH(fsdiv); + +/* Single-precision floating-point comparisons */ +static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; +} + +static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; +} + +static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; +} + +static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmplt(env, op1, op2); +} + +static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmpgt(env, op1, op2); +} + +static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmpeq(env, op1, op2); +} + +#define HELPER_SINGLE_SPE_CMP(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ + { \ + return e##name(env, op1, op2) << 2; \ + } +/* efststlt */ +HELPER_SINGLE_SPE_CMP(fststlt); +/* efststgt */ +HELPER_SINGLE_SPE_CMP(fststgt); +/* efststeq */ +HELPER_SINGLE_SPE_CMP(fststeq); +/* efscmplt */ +HELPER_SINGLE_SPE_CMP(fscmplt); +/* efscmpgt */ +HELPER_SINGLE_SPE_CMP(fscmpgt); +/* efscmpeq */ +HELPER_SINGLE_SPE_CMP(fscmpeq); + +static inline uint32_t evcmp_merge(int t0, int t1) +{ + return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); +} + +#define HELPER_VECTOR_SPE_CMP(name) \ + uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ + { \ + return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \ + e##name(env, op1, op2)); \ + } +/* evfststlt */ +HELPER_VECTOR_SPE_CMP(fststlt); +/* evfststgt */ +HELPER_VECTOR_SPE_CMP(fststgt); +/* evfststeq */ +HELPER_VECTOR_SPE_CMP(fststeq); +/* evfscmplt */ +HELPER_VECTOR_SPE_CMP(fscmplt); +/* evfscmpgt */ +HELPER_VECTOR_SPE_CMP(fscmpgt); +/* evfscmpeq */ +HELPER_VECTOR_SPE_CMP(fscmpeq); + +/* Double-precision floating-point conversion */ +uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + + u.d = int32_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.d = int64_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + + u.d = uint32_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.d = uint64_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int32(u.d, &env->vec_status); +} + +uint32_t helper_efdctui(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint32(u.d, &env->vec_status); +} + +uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int32_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int64_round_to_zero(u.d, &env->vec_status); +} + +uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint32_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint64_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.d = int32_to_float64(val, &env->vec_status); + tmp = int64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_div(u.d, tmp, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.d = uint32_to_float64(val, &env->vec_status); + tmp = int64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_div(u.d, tmp, &env->vec_status); + + return u.ll; +} + +uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + tmp = uint64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_mul(u.d, tmp, &env->vec_status); + + return float64_to_int32(u.d, &env->vec_status); +} + +uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + tmp = uint64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_mul(u.d, tmp, &env->vec_status); + + return float64_to_uint32(u.d, &env->vec_status); +} + +uint32_t helper_efscfd(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u1; + CPU_FloatU u2; + + u1.ll = val; + u2.f = float64_to_float32(u1.d, &env->vec_status); + + return u2.l; +} + +uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u2; + CPU_FloatU u1; + + u1.l = val; + u2.d = float32_to_float64(u1.f, &env->vec_status); + + return u2.ll; +} + +/* Double precision fixed-point arithmetic */ +uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_add(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_sub(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_mul(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_div(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +/* Double precision floating point helpers */ +uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; +} + +uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; +} + +uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; +} + +uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstlt(env, op1, op2); +} + +uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstgt(env, op1, op2); +} + +uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtsteq(env, op1, op2); +} diff --git a/target-ppc/helper.c b/target-ppc/helper.c index f556f8567a..48b19a7e1d 100644 --- a/target-ppc/helper.c +++ b/target-ppc/helper.c @@ -1,5 +1,5 @@ /* - * PowerPC emulation helpers for qemu. + * PowerPC emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * @@ -23,3169 +23,6 @@ #include "kvm_ppc.h" #include "cpus.h" -//#define DEBUG_MMU -//#define DEBUG_BATS -//#define DEBUG_SLB -//#define DEBUG_SOFTWARE_TLB -//#define DUMP_PAGE_TABLES -//#define DEBUG_EXCEPTIONS -//#define FLUSH_ALL_TLBS - -#ifdef DEBUG_MMU -# define LOG_MMU(...) qemu_log(__VA_ARGS__) -# define LOG_MMU_STATE(env) log_cpu_state((env), 0) -#else -# define LOG_MMU(...) do { } while (0) -# define LOG_MMU_STATE(...) do { } while (0) -#endif - - -#ifdef DEBUG_SOFTWARE_TLB -# define LOG_SWTLB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_SWTLB(...) do { } while (0) -#endif - -#ifdef DEBUG_BATS -# define LOG_BATS(...) qemu_log(__VA_ARGS__) -#else -# define LOG_BATS(...) do { } while (0) -#endif - -#ifdef DEBUG_SLB -# define LOG_SLB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_SLB(...) do { } while (0) -#endif - -#ifdef DEBUG_EXCEPTIONS -# define LOG_EXCP(...) qemu_log(__VA_ARGS__) -#else -# define LOG_EXCP(...) do { } while (0) -#endif - -/*****************************************************************************/ -/* PowerPC Hypercall emulation */ - -void (*cpu_ppc_hypercall)(CPUPPCState *); - -/*****************************************************************************/ -/* PowerPC MMU emulation */ - -#if defined(CONFIG_USER_ONLY) -int cpu_ppc_handle_mmu_fault (CPUPPCState *env, target_ulong address, int rw, - int mmu_idx) -{ - int exception, error_code; - - if (rw == 2) { - exception = POWERPC_EXCP_ISI; - error_code = 0x40000000; - } else { - exception = POWERPC_EXCP_DSI; - error_code = 0x40000000; - if (rw) - error_code |= 0x02000000; - env->spr[SPR_DAR] = address; - env->spr[SPR_DSISR] = error_code; - } - env->exception_index = exception; - env->error_code = error_code; - - return 1; -} - -#else -/* Common routines used by software and hardware TLBs emulation */ -static inline int pte_is_valid(target_ulong pte0) -{ - return pte0 & 0x80000000 ? 1 : 0; -} - -static inline void pte_invalidate(target_ulong *pte0) -{ - *pte0 &= ~0x80000000; -} - -#if defined(TARGET_PPC64) -static inline int pte64_is_valid(target_ulong pte0) -{ - return pte0 & 0x0000000000000001ULL ? 1 : 0; -} - -static inline void pte64_invalidate(target_ulong *pte0) -{ - *pte0 &= ~0x0000000000000001ULL; -} -#endif - -#define PTE_PTEM_MASK 0x7FFFFFBF -#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) -#if defined(TARGET_PPC64) -#define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL -#define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F) -#endif - -static inline int pp_check(int key, int pp, int nx) -{ - int access; - - /* Compute access rights */ - /* When pp is 3/7, the result is undefined. Set it to noaccess */ - access = 0; - if (key == 0) { - switch (pp) { - case 0x0: - case 0x1: - case 0x2: - access |= PAGE_WRITE; - /* No break here */ - case 0x3: - case 0x6: - access |= PAGE_READ; - break; - } - } else { - switch (pp) { - case 0x0: - case 0x6: - access = 0; - break; - case 0x1: - case 0x3: - access = PAGE_READ; - break; - case 0x2: - access = PAGE_READ | PAGE_WRITE; - break; - } - } - if (nx == 0) - access |= PAGE_EXEC; - - return access; -} - -static inline int check_prot(int prot, int rw, int access_type) -{ - int ret; - - if (access_type == ACCESS_CODE) { - if (prot & PAGE_EXEC) - ret = 0; - else - ret = -2; - } else if (rw) { - if (prot & PAGE_WRITE) - ret = 0; - else - ret = -2; - } else { - if (prot & PAGE_READ) - ret = 0; - else - ret = -2; - } - - return ret; -} - -static inline int _pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0, - target_ulong pte1, int h, int rw, int type) -{ - target_ulong ptem, mmask; - int access, ret, pteh, ptev, pp; - - ret = -1; - /* Check validity and table match */ -#if defined(TARGET_PPC64) - if (is_64b) { - ptev = pte64_is_valid(pte0); - pteh = (pte0 >> 1) & 1; - } else -#endif - { - ptev = pte_is_valid(pte0); - pteh = (pte0 >> 6) & 1; - } - if (ptev && h == pteh) { - /* Check vsid & api */ -#if defined(TARGET_PPC64) - if (is_64b) { - ptem = pte0 & PTE64_PTEM_MASK; - mmask = PTE64_CHECK_MASK; - pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004); - ctx->nx = (pte1 >> 2) & 1; /* No execute bit */ - ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */ - } else -#endif - { - ptem = pte0 & PTE_PTEM_MASK; - mmask = PTE_CHECK_MASK; - pp = pte1 & 0x00000003; - } - if (ptem == ctx->ptem) { - if (ctx->raddr != (target_phys_addr_t)-1ULL) { - /* all matches should have equal RPN, WIMG & PP */ - if ((ctx->raddr & mmask) != (pte1 & mmask)) { - qemu_log("Bad RPN/WIMG/PP\n"); - return -3; - } - } - /* Compute access rights */ - access = pp_check(ctx->key, pp, ctx->nx); - /* Keep the matching PTE informations */ - ctx->raddr = pte1; - ctx->prot = access; - ret = check_prot(ctx->prot, rw, type); - if (ret == 0) { - /* Access granted */ - LOG_MMU("PTE access granted !\n"); - } else { - /* Access right violation */ - LOG_MMU("PTE access rejected\n"); - } - } - } - - return ret; -} - -static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0, - target_ulong pte1, int h, int rw, int type) -{ - return _pte_check(ctx, 0, pte0, pte1, h, rw, type); -} - -#if defined(TARGET_PPC64) -static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0, - target_ulong pte1, int h, int rw, int type) -{ - return _pte_check(ctx, 1, pte0, pte1, h, rw, type); -} -#endif - -static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, - int ret, int rw) -{ - int store = 0; - - /* Update page flags */ - if (!(*pte1p & 0x00000100)) { - /* Update accessed flag */ - *pte1p |= 0x00000100; - store = 1; - } - if (!(*pte1p & 0x00000080)) { - if (rw == 1 && ret == 0) { - /* Update changed flag */ - *pte1p |= 0x00000080; - store = 1; - } else { - /* Force page fault for first write access */ - ctx->prot &= ~PAGE_WRITE; - } - } - - return store; -} - -/* Software driven TLB helpers */ -static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, int way, - int is_code) -{ - int nr; - - /* Select TLB num in a way from address */ - nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); - /* Select TLB way */ - nr += env->tlb_per_way * way; - /* 6xx have separate TLBs for instructions and data */ - if (is_code && env->id_tlbs == 1) - nr += env->nb_tlb; - - return nr; -} - -static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) -{ - ppc6xx_tlb_t *tlb; - int nr, max; - - //LOG_SWTLB("Invalidate all TLBs\n"); - /* Invalidate all defined software TLB */ - max = env->nb_tlb; - if (env->id_tlbs == 1) - max *= 2; - for (nr = 0; nr < max; nr++) { - tlb = &env->tlb.tlb6[nr]; - pte_invalidate(&tlb->pte0); - } - tlb_flush(env, 1); -} - -static inline void __ppc6xx_tlb_invalidate_virt(CPUPPCState *env, - target_ulong eaddr, - int is_code, int match_epn) -{ -#if !defined(FLUSH_ALL_TLBS) - ppc6xx_tlb_t *tlb; - int way, nr; - - /* Invalidate ITLB + DTLB, all ways */ - for (way = 0; way < env->nb_ways; way++) { - nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); - tlb = &env->tlb.tlb6[nr]; - if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { - LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, - env->nb_tlb, eaddr); - pte_invalidate(&tlb->pte0); - tlb_flush_page(env, tlb->EPN); - } - } -#else - /* XXX: PowerPC specification say this is valid as well */ - ppc6xx_tlb_invalidate_all(env); -#endif -} - -static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, - target_ulong eaddr, int is_code) -{ - __ppc6xx_tlb_invalidate_virt(env, eaddr, is_code, 0); -} - -void ppc6xx_tlb_store (CPUPPCState *env, target_ulong EPN, int way, int is_code, - target_ulong pte0, target_ulong pte1) -{ - ppc6xx_tlb_t *tlb; - int nr; - - nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); - tlb = &env->tlb.tlb6[nr]; - LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); - /* Invalidate any pending reference in QEMU for this virtual address */ - __ppc6xx_tlb_invalidate_virt(env, EPN, is_code, 1); - tlb->pte0 = pte0; - tlb->pte1 = pte1; - tlb->EPN = EPN; - /* Store last way for LRU mechanism */ - env->last_way = way; -} - -static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int access_type) -{ - ppc6xx_tlb_t *tlb; - int nr, best, way; - int ret; - - best = -1; - ret = -1; /* No TLB found */ - for (way = 0; way < env->nb_ways; way++) { - nr = ppc6xx_tlb_getnum(env, eaddr, way, - access_type == ACCESS_CODE ? 1 : 0); - tlb = &env->tlb.tlb6[nr]; - /* This test "emulates" the PTE index match for hardware TLBs */ - if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { - LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx - "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, - pte_is_valid(tlb->pte0) ? "valid" : "inval", - tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); - continue; - } - LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " - TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, - pte_is_valid(tlb->pte0) ? "valid" : "inval", - tlb->EPN, eaddr, tlb->pte1, - rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); - switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) { - case -3: - /* TLB inconsistency */ - return -1; - case -2: - /* Access violation */ - ret = -2; - best = nr; - break; - case -1: - default: - /* No match */ - break; - case 0: - /* access granted */ - /* XXX: we should go on looping to check all TLBs consistency - * but we can speed-up the whole thing as the - * result would be undefined if TLBs are not consistent. - */ - ret = 0; - best = nr; - goto done; - } - } - if (best != -1) { - done: - LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", - ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); - /* Update page flags */ - pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); - } - - return ret; -} - -/* Perform BAT hit & translation */ -static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, int *validp, - int *protp, target_ulong *BATu, - target_ulong *BATl) -{ - target_ulong bl; - int pp, valid, prot; - - bl = (*BATu & 0x00001FFC) << 15; - valid = 0; - prot = 0; - if (((msr_pr == 0) && (*BATu & 0x00000002)) || - ((msr_pr != 0) && (*BATu & 0x00000001))) { - valid = 1; - pp = *BATl & 0x00000003; - if (pp != 0) { - prot = PAGE_READ | PAGE_EXEC; - if (pp == 0x2) - prot |= PAGE_WRITE; - } - } - *blp = bl; - *validp = valid; - *protp = prot; -} - -static inline void bat_601_size_prot(CPUPPCState *env, target_ulong *blp, - int *validp, int *protp, - target_ulong *BATu, target_ulong *BATl) -{ - target_ulong bl; - int key, pp, valid, prot; - - bl = (*BATl & 0x0000003F) << 17; - LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n", - (uint8_t)(*BATl & 0x0000003F), bl, ~bl); - prot = 0; - valid = (*BATl >> 6) & 1; - if (valid) { - pp = *BATu & 0x00000003; - if (msr_pr == 0) - key = (*BATu >> 3) & 1; - else - key = (*BATu >> 2) & 1; - prot = pp_check(key, pp, 0); - } - *blp = bl; - *validp = valid; - *protp = prot; -} - -static inline int get_bat(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong virtual, - int rw, int type) -{ - target_ulong *BATlt, *BATut, *BATu, *BATl; - target_ulong BEPIl, BEPIu, bl; - int i, valid, prot; - int ret = -1; - - LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, - type == ACCESS_CODE ? 'I' : 'D', virtual); - switch (type) { - case ACCESS_CODE: - BATlt = env->IBAT[1]; - BATut = env->IBAT[0]; - break; - default: - BATlt = env->DBAT[1]; - BATut = env->DBAT[0]; - break; - } - for (i = 0; i < env->nb_BATs; i++) { - BATu = &BATut[i]; - BATl = &BATlt[i]; - BEPIu = *BATu & 0xF0000000; - BEPIl = *BATu & 0x0FFE0000; - if (unlikely(env->mmu_model == POWERPC_MMU_601)) { - bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl); - } else { - bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); - } - LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n", __func__, - type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); - if ((virtual & 0xF0000000) == BEPIu && - ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { - /* BAT matches */ - if (valid != 0) { - /* Get physical address */ - ctx->raddr = (*BATl & 0xF0000000) | - ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | - (virtual & 0x0001F000); - /* Compute access rights */ - ctx->prot = prot; - ret = check_prot(ctx->prot, rw, type); - if (ret == 0) - LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", - i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', - ctx->prot & PAGE_WRITE ? 'W' : '-'); - break; - } - } - } - if (ret < 0) { -#if defined(DEBUG_BATS) - if (qemu_log_enabled()) { - LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); - for (i = 0; i < 4; i++) { - BATu = &BATut[i]; - BATl = &BATlt[i]; - BEPIu = *BATu & 0xF0000000; - BEPIl = *BATu & 0x0FFE0000; - bl = (*BATu & 0x00001FFC) << 15; - LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " - TARGET_FMT_lx " " TARGET_FMT_lx "\n", - __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, - *BATu, *BATl, BEPIu, BEPIl, bl); - } - } -#endif - } - /* No hit */ - return ret; -} - -static inline target_phys_addr_t get_pteg_offset(CPUPPCState *env, - target_phys_addr_t hash, - int pte_size) -{ - return (hash * pte_size * 8) & env->htab_mask; -} - -/* PTE table lookup */ -static inline int _find_pte(CPUPPCState *env, mmu_ctx_t *ctx, int is_64b, int h, - int rw, int type, int target_page_bits) -{ - target_phys_addr_t pteg_off; - target_ulong pte0, pte1; - int i, good = -1; - int ret, r; - - ret = -1; /* No entry found */ - pteg_off = get_pteg_offset(env, ctx->hash[h], - is_64b ? HASH_PTE_SIZE_64 : HASH_PTE_SIZE_32); - for (i = 0; i < 8; i++) { -#if defined(TARGET_PPC64) - if (is_64b) { - if (env->external_htab) { - pte0 = ldq_p(env->external_htab + pteg_off + (i * 16)); - pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8); - } else { - pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16)); - pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8); - } - - r = pte64_check(ctx, pte0, pte1, h, rw, type); - LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " - TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", - pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h, - (int)((pte0 >> 1) & 1), ctx->ptem); - } else -#endif - { - if (env->external_htab) { - pte0 = ldl_p(env->external_htab + pteg_off + (i * 8)); - pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4); - } else { - pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8)); - pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4); - } - r = pte32_check(ctx, pte0, pte1, h, rw, type); - LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " - TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", - pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, - (int)((pte0 >> 6) & 1), ctx->ptem); - } - switch (r) { - case -3: - /* PTE inconsistency */ - return -1; - case -2: - /* Access violation */ - ret = -2; - good = i; - break; - case -1: - default: - /* No PTE match */ - break; - case 0: - /* access granted */ - /* XXX: we should go on looping to check all PTEs consistency - * but if we can speed-up the whole thing as the - * result would be undefined if PTEs are not consistent. - */ - ret = 0; - good = i; - goto done; - } - } - if (good != -1) { - done: - LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n", - ctx->raddr, ctx->prot, ret); - /* Update page flags */ - pte1 = ctx->raddr; - if (pte_update_flags(ctx, &pte1, ret, rw) == 1) { -#if defined(TARGET_PPC64) - if (is_64b) { - if (env->external_htab) { - stq_p(env->external_htab + pteg_off + (good * 16) + 8, - pte1); - } else { - stq_phys_notdirty(env->htab_base + pteg_off + - (good * 16) + 8, pte1); - } - } else -#endif - { - if (env->external_htab) { - stl_p(env->external_htab + pteg_off + (good * 8) + 4, - pte1); - } else { - stl_phys_notdirty(env->htab_base + pteg_off + - (good * 8) + 4, pte1); - } - } - } - } - - /* We have a TLB that saves 4K pages, so let's - * split a huge page to 4k chunks */ - if (target_page_bits != TARGET_PAGE_BITS) { - ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1)) - & TARGET_PAGE_MASK; - } - return ret; -} - -static inline int find_pte(CPUPPCState *env, mmu_ctx_t *ctx, int h, int rw, - int type, int target_page_bits) -{ -#if defined(TARGET_PPC64) - if (env->mmu_model & POWERPC_MMU_64) - return _find_pte(env, ctx, 1, h, rw, type, target_page_bits); -#endif - - return _find_pte(env, ctx, 0, h, rw, type, target_page_bits); -} - -#if defined(TARGET_PPC64) -static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) -{ - uint64_t esid_256M, esid_1T; - int n; - - LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); - - esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; - esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; - - for (n = 0; n < env->slb_nr; n++) { - ppc_slb_t *slb = &env->slb[n]; - - LOG_SLB("%s: slot %d %016" PRIx64 " %016" - PRIx64 "\n", __func__, n, slb->esid, slb->vsid); - /* We check for 1T matches on all MMUs here - if the MMU - * doesn't have 1T segment support, we will have prevented 1T - * entries from being inserted in the slbmte code. */ - if (((slb->esid == esid_256M) && - ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) - || ((slb->esid == esid_1T) && - ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { - return slb; - } - } - - return NULL; -} - -void ppc_slb_invalidate_all (CPUPPCState *env) -{ - int n, do_invalidate; - - do_invalidate = 0; - /* XXX: Warning: slbia never invalidates the first segment */ - for (n = 1; n < env->slb_nr; n++) { - ppc_slb_t *slb = &env->slb[n]; - - if (slb->esid & SLB_ESID_V) { - slb->esid &= ~SLB_ESID_V; - /* XXX: given the fact that segment size is 256 MB or 1TB, - * and we still don't have a tlb_flush_mask(env, n, mask) - * in QEMU, we just invalidate all TLBs - */ - do_invalidate = 1; - } - } - if (do_invalidate) - tlb_flush(env, 1); -} - -void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0) -{ - ppc_slb_t *slb; - - slb = slb_lookup(env, T0); - if (!slb) { - return; - } - - if (slb->esid & SLB_ESID_V) { - slb->esid &= ~SLB_ESID_V; - - /* XXX: given the fact that segment size is 256 MB or 1TB, - * and we still don't have a tlb_flush_mask(env, n, mask) - * in QEMU, we just invalidate all TLBs - */ - tlb_flush(env, 1); - } -} - -int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs) -{ - int slot = rb & 0xfff; - ppc_slb_t *slb = &env->slb[slot]; - - if (rb & (0x1000 - env->slb_nr)) { - return -1; /* Reserved bits set or slot too high */ - } - if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) { - return -1; /* Bad segment size */ - } - if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { - return -1; /* 1T segment on MMU that doesn't support it */ - } - - /* Mask out the slot number as we store the entry */ - slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); - slb->vsid = rs; - - LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 - " %016" PRIx64 "\n", __func__, slot, rb, rs, - slb->esid, slb->vsid); - - return 0; -} - -int ppc_load_slb_esid (CPUPPCState *env, target_ulong rb, target_ulong *rt) -{ - int slot = rb & 0xfff; - ppc_slb_t *slb = &env->slb[slot]; - - if (slot >= env->slb_nr) { - return -1; - } - - *rt = slb->esid; - return 0; -} - -int ppc_load_slb_vsid (CPUPPCState *env, target_ulong rb, target_ulong *rt) -{ - int slot = rb & 0xfff; - ppc_slb_t *slb = &env->slb[slot]; - - if (slot >= env->slb_nr) { - return -1; - } - - *rt = slb->vsid; - return 0; -} -#endif /* defined(TARGET_PPC64) */ - -/* Perform segment based translation */ -static inline int get_segment(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw, int type) -{ - target_phys_addr_t hash; - target_ulong vsid; - int ds, pr, target_page_bits; - int ret, ret2; - - pr = msr_pr; - ctx->eaddr = eaddr; -#if defined(TARGET_PPC64) - if (env->mmu_model & POWERPC_MMU_64) { - ppc_slb_t *slb; - target_ulong pageaddr; - int segment_bits; - - LOG_MMU("Check SLBs\n"); - slb = slb_lookup(env, eaddr); - if (!slb) { - return -5; - } - - if (slb->vsid & SLB_VSID_B) { - vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; - segment_bits = 40; - } else { - vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; - segment_bits = 28; - } - - target_page_bits = (slb->vsid & SLB_VSID_L) - ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; - ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) - : (slb->vsid & SLB_VSID_KS)); - ds = 0; - ctx->nx = !!(slb->vsid & SLB_VSID_N); - - pageaddr = eaddr & ((1ULL << segment_bits) - - (1ULL << target_page_bits)); - if (slb->vsid & SLB_VSID_B) { - hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); - } else { - hash = vsid ^ (pageaddr >> target_page_bits); - } - /* Only 5 bits of the page index are used in the AVPN */ - ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | - ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); - } else -#endif /* defined(TARGET_PPC64) */ - { - target_ulong sr, pgidx; - - sr = env->sr[eaddr >> 28]; - ctx->key = (((sr & 0x20000000) && (pr != 0)) || - ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; - ds = sr & 0x80000000 ? 1 : 0; - ctx->nx = sr & 0x10000000 ? 1 : 0; - vsid = sr & 0x00FFFFFF; - target_page_bits = TARGET_PAGE_BITS; - LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" - TARGET_FMT_lx " lr=" TARGET_FMT_lx - " ir=%d dr=%d pr=%d %d t=%d\n", - eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, - (int)msr_dr, pr != 0 ? 1 : 0, rw, type); - pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; - hash = vsid ^ pgidx; - ctx->ptem = (vsid << 7) | (pgidx >> 10); - } - LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", - ctx->key, ds, ctx->nx, vsid); - ret = -1; - if (!ds) { - /* Check if instruction fetch is allowed, if needed */ - if (type != ACCESS_CODE || ctx->nx == 0) { - /* Page address translation */ - LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx - " hash " TARGET_FMT_plx "\n", - env->htab_base, env->htab_mask, hash); - ctx->hash[0] = hash; - ctx->hash[1] = ~hash; - - /* Initialize real address with an invalid value */ - ctx->raddr = (target_phys_addr_t)-1ULL; - if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || - env->mmu_model == POWERPC_MMU_SOFT_74xx)) { - /* Software TLB search */ - ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); - } else { - LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx - " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx - " hash=" TARGET_FMT_plx "\n", - env->htab_base, env->htab_mask, vsid, ctx->ptem, - ctx->hash[0]); - /* Primary table lookup */ - ret = find_pte(env, ctx, 0, rw, type, target_page_bits); - if (ret < 0) { - /* Secondary table lookup */ - if (eaddr != 0xEFFFFFFF) - LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx - " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx - " hash=" TARGET_FMT_plx "\n", env->htab_base, - env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); - ret2 = find_pte(env, ctx, 1, rw, type, - target_page_bits); - if (ret2 != -1) - ret = ret2; - } - } -#if defined (DUMP_PAGE_TABLES) - if (qemu_log_enabled()) { - target_phys_addr_t curaddr; - uint32_t a0, a1, a2, a3; - qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx - "\n", sdr, mask + 0x80); - for (curaddr = sdr; curaddr < (sdr + mask + 0x80); - curaddr += 16) { - a0 = ldl_phys(curaddr); - a1 = ldl_phys(curaddr + 4); - a2 = ldl_phys(curaddr + 8); - a3 = ldl_phys(curaddr + 12); - if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { - qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", - curaddr, a0, a1, a2, a3); - } - } - } -#endif - } else { - LOG_MMU("No access allowed\n"); - ret = -3; - } - } else { - target_ulong sr; - LOG_MMU("direct store...\n"); - /* Direct-store segment : absolutely *BUGGY* for now */ - - /* Direct-store implies a 32-bit MMU. - * Check the Segment Register's bus unit ID (BUID). - */ - sr = env->sr[eaddr >> 28]; - if ((sr & 0x1FF00000) >> 20 == 0x07f) { - /* Memory-forced I/O controller interface access */ - /* If T=1 and BUID=x'07F', the 601 performs a memory access - * to SR[28-31] LA[4-31], bypassing all protection mechanisms. - */ - ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); - ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return 0; - } - - switch (type) { - case ACCESS_INT: - /* Integer load/store : only access allowed */ - break; - case ACCESS_CODE: - /* No code fetch is allowed in direct-store areas */ - return -4; - case ACCESS_FLOAT: - /* Floating point load/store */ - return -4; - case ACCESS_RES: - /* lwarx, ldarx or srwcx. */ - return -4; - case ACCESS_CACHE: - /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ - /* Should make the instruction do no-op. - * As it already do no-op, it's quite easy :-) - */ - ctx->raddr = eaddr; - return 0; - case ACCESS_EXT: - /* eciwx or ecowx */ - return -4; - default: - qemu_log("ERROR: instruction should not need " - "address translation\n"); - return -4; - } - if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { - ctx->raddr = eaddr; - ret = 2; - } else { - ret = -2; - } - } - - return ret; -} - -/* Generic TLB check function for embedded PowerPC implementations */ -int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, - target_phys_addr_t *raddrp, - target_ulong address, uint32_t pid, int ext, - int i) -{ - target_ulong mask; - - /* Check valid flag */ - if (!(tlb->prot & PAGE_VALID)) { - return -1; - } - mask = ~(tlb->size - 1); - LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx - " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, - mask, (uint32_t)tlb->PID, tlb->prot); - /* Check PID */ - if (tlb->PID != 0 && tlb->PID != pid) - return -1; - /* Check effective address */ - if ((address & mask) != tlb->EPN) - return -1; - *raddrp = (tlb->RPN & mask) | (address & ~mask); -#if (TARGET_PHYS_ADDR_BITS >= 36) - if (ext) { - /* Extend the physical address to 36 bits */ - *raddrp |= (target_phys_addr_t)(tlb->RPN & 0xF) << 32; - } -#endif - - return 0; -} - -/* Generic TLB search function for PowerPC embedded implementations */ -int ppcemb_tlb_search (CPUPPCState *env, target_ulong address, uint32_t pid) -{ - ppcemb_tlb_t *tlb; - target_phys_addr_t raddr; - int i, ret; - - /* Default return value is no match */ - ret = -1; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { - ret = i; - break; - } - } - - return ret; -} - -/* Helpers specific to PowerPC 40x implementations */ -static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) -{ - ppcemb_tlb_t *tlb; - int i; - - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - tlb->prot &= ~PAGE_VALID; - } - tlb_flush(env, 1); -} - -static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env, - target_ulong eaddr, uint32_t pid) -{ -#if !defined(FLUSH_ALL_TLBS) - ppcemb_tlb_t *tlb; - target_phys_addr_t raddr; - target_ulong page, end; - int i; - - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) { - end = tlb->EPN + tlb->size; - for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) - tlb_flush_page(env, page); - tlb->prot &= ~PAGE_VALID; - break; - } - } -#else - ppc4xx_tlb_invalidate_all(env); -#endif -} - -static int mmu40x_get_physical_address (CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, int access_type) -{ - ppcemb_tlb_t *tlb; - target_phys_addr_t raddr; - int i, ret, zsel, zpr, pr; - - ret = -1; - raddr = (target_phys_addr_t)-1ULL; - pr = msr_pr; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (ppcemb_tlb_check(env, tlb, &raddr, address, - env->spr[SPR_40x_PID], 0, i) < 0) - continue; - zsel = (tlb->attr >> 4) & 0xF; - zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; - LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", - __func__, i, zsel, zpr, rw, tlb->attr); - /* Check execute enable bit */ - switch (zpr) { - case 0x2: - if (pr != 0) - goto check_perms; - /* No break here */ - case 0x3: - /* All accesses granted */ - ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - ret = 0; - break; - case 0x0: - if (pr != 0) { - /* Raise Zone protection fault. */ - env->spr[SPR_40x_ESR] = 1 << 22; - ctx->prot = 0; - ret = -2; - break; - } - /* No break here */ - case 0x1: - check_perms: - /* Check from TLB entry */ - ctx->prot = tlb->prot; - ret = check_prot(ctx->prot, rw, access_type); - if (ret == -2) - env->spr[SPR_40x_ESR] = 0; - break; - } - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - return 0; - } - } - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - - return ret; -} - -void store_40x_sler (CPUPPCState *env, uint32_t val) -{ - /* XXX: TO BE FIXED */ - if (val != 0x00000000) { - cpu_abort(env, "Little-endian regions are not supported by now\n"); - } - env->spr[SPR_405_SLER] = val; -} - -static inline int mmubooke_check_tlb (CPUPPCState *env, ppcemb_tlb_t *tlb, - target_phys_addr_t *raddr, int *prot, - target_ulong address, int rw, - int access_type, int i) -{ - int ret, _prot; - - if (ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID], - !env->nb_pids, i) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID1] && - ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID2] && - ppcemb_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { - goto found_tlb; - } - - LOG_SWTLB("%s: TLB entry not found\n", __func__); - return -1; - -found_tlb: - - if (msr_pr != 0) { - _prot = tlb->prot & 0xF; - } else { - _prot = (tlb->prot >> 4) & 0xF; - } - - /* Check the address space */ - if (access_type == ACCESS_CODE) { - if (msr_ir != (tlb->attr & 1)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = _prot; - if (_prot & PAGE_EXEC) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, _prot); - ret = -3; - } else { - if (msr_dr != (tlb->attr & 1)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = _prot; - if ((!rw && _prot & PAGE_READ) || (rw && (_prot & PAGE_WRITE))) { - LOG_SWTLB("%s: found TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, _prot); - ret = -2; - } - - return ret; -} - -static int mmubooke_get_physical_address (CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, - int access_type) -{ - ppcemb_tlb_t *tlb; - target_phys_addr_t raddr; - int i, ret; - - ret = -1; - raddr = (target_phys_addr_t)-1ULL; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, - access_type, i); - if (!ret) { - break; - } - } - - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - } else { - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - } - - return ret; -} - -void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot) -{ - int tlb_size; - int i, j; - ppcmas_tlb_t *tlb = env->tlb.tlbm; - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - if (flags & (1 << i)) { - tlb_size = booke206_tlb_size(env, i); - for (j = 0; j < tlb_size; j++) { - if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { - tlb[j].mas1 &= ~MAS1_VALID; - } - } - } - tlb += booke206_tlb_size(env, i); - } - - tlb_flush(env, 1); -} - -target_phys_addr_t booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) -{ - int tlbm_size; - - tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - - return 1024ULL << tlbm_size; -} - -/* TLB check function for MAS based SoftTLBs */ -int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, - target_phys_addr_t *raddrp, - target_ulong address, uint32_t pid) -{ - target_ulong mask; - uint32_t tlb_pid; - - /* Check valid flag */ - if (!(tlb->mas1 & MAS1_VALID)) { - return -1; - } - - mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); - LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" - PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n", - __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3, - tlb->mas8); - - /* Check PID */ - tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; - if (tlb_pid != 0 && tlb_pid != pid) { - return -1; - } - - /* Check effective address */ - if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { - return -1; - } - - if (raddrp) { - *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); - } - - return 0; -} - -static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, - target_phys_addr_t *raddr, int *prot, - target_ulong address, int rw, - int access_type) -{ - int ret; - int _prot = 0; - - if (ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID]) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID1] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID1]) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID2] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID2]) >= 0) { - goto found_tlb; - } - - LOG_SWTLB("%s: TLB entry not found\n", __func__); - return -1; - -found_tlb: - - if (msr_pr != 0) { - if (tlb->mas7_3 & MAS3_UR) { - _prot |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_UW) { - _prot |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_UX) { - _prot |= PAGE_EXEC; - } - } else { - if (tlb->mas7_3 & MAS3_SR) { - _prot |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_SW) { - _prot |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_SX) { - _prot |= PAGE_EXEC; - } - } - - /* Check the address space and permissions */ - if (access_type == ACCESS_CODE) { - if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = _prot; - if (_prot & PAGE_EXEC) { - LOG_SWTLB("%s: good TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, _prot); - ret = -3; - } else { - if (msr_dr != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - LOG_SWTLB("%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = _prot; - if ((!rw && _prot & PAGE_READ) || (rw && (_prot & PAGE_WRITE))) { - LOG_SWTLB("%s: found TLB!\n", __func__); - return 0; - } - - LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, _prot); - ret = -2; - } - - return ret; -} - -static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, int rw, - int access_type) -{ - ppcmas_tlb_t *tlb; - target_phys_addr_t raddr; - int i, j, ret; - - ret = -1; - raddr = (target_phys_addr_t)-1ULL; - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int ways = booke206_tlb_ways(env, i); - - for (j = 0; j < ways; j++) { - tlb = booke206_get_tlbm(env, i, address, j); - if (!tlb) { - continue; - } - ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, - rw, access_type); - if (ret != -1) { - goto found_tlb; - } - } - } - -found_tlb: - - if (ret >= 0) { - ctx->raddr = raddr; - LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - } else { - LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - } - - return ret; -} - -static const char *book3e_tsize_to_str[32] = { - "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", - "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", - "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", - "1T", "2T" -}; - -static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf, - CPUPPCState *env) -{ - ppcemb_tlb_t *entry; - int i; - - if (kvm_enabled() && !env->kvm_sw_tlb) { - cpu_fprintf(f, "Cannot access KVM TLB\n"); - return; - } - - cpu_fprintf(f, "\nTLB:\n"); - cpu_fprintf(f, "Effective Physical Size PID Prot " - "Attr\n"); - - entry = &env->tlb.tlbe[0]; - for (i = 0; i < env->nb_tlb; i++, entry++) { - target_phys_addr_t ea, pa; - target_ulong mask; - uint64_t size = (uint64_t)entry->size; - char size_buf[20]; - - /* Check valid flag */ - if (!(entry->prot & PAGE_VALID)) { - continue; - } - - mask = ~(entry->size - 1); - ea = entry->EPN & mask; - pa = entry->RPN & mask; -#if (TARGET_PHYS_ADDR_BITS >= 36) - /* Extend the physical address to 36 bits */ - pa |= (target_phys_addr_t)(entry->RPN & 0xF) << 32; -#endif - size /= 1024; - if (size >= 1024) { - snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024); - } else { - snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size); - } - cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", - (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, - entry->prot, entry->attr); - } - -} - -static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf, - CPUPPCState *env, int tlbn, int offset, - int tlbsize) -{ - ppcmas_tlb_t *entry; - int i; - - cpu_fprintf(f, "\nTLB%d:\n", tlbn); - cpu_fprintf(f, "Effective Physical Size TID TS SRWX URWX WIMGE U0123\n"); - - entry = &env->tlb.tlbm[offset]; - for (i = 0; i < tlbsize; i++, entry++) { - target_phys_addr_t ea, pa, size; - int tsize; - - if (!(entry->mas1 & MAS1_VALID)) { - continue; - } - - tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - size = 1024ULL << tsize; - ea = entry->mas2 & ~(size - 1); - pa = entry->mas7_3 & ~(size - 1); - - cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c U%c%c%c %c%c%c%c%c U%c%c%c%c\n", - (uint64_t)ea, (uint64_t)pa, - book3e_tsize_to_str[tsize], - (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, - (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, - entry->mas7_3 & MAS3_SR ? 'R' : '-', - entry->mas7_3 & MAS3_SW ? 'W' : '-', - entry->mas7_3 & MAS3_SX ? 'X' : '-', - entry->mas7_3 & MAS3_UR ? 'R' : '-', - entry->mas7_3 & MAS3_UW ? 'W' : '-', - entry->mas7_3 & MAS3_UX ? 'X' : '-', - entry->mas2 & MAS2_W ? 'W' : '-', - entry->mas2 & MAS2_I ? 'I' : '-', - entry->mas2 & MAS2_M ? 'M' : '-', - entry->mas2 & MAS2_G ? 'G' : '-', - entry->mas2 & MAS2_E ? 'E' : '-', - entry->mas7_3 & MAS3_U0 ? '0' : '-', - entry->mas7_3 & MAS3_U1 ? '1' : '-', - entry->mas7_3 & MAS3_U2 ? '2' : '-', - entry->mas7_3 & MAS3_U3 ? '3' : '-'); - } -} - -static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf, - CPUPPCState *env) -{ - int offset = 0; - int i; - - if (kvm_enabled() && !env->kvm_sw_tlb) { - cpu_fprintf(f, "Cannot access KVM TLB\n"); - return; - } - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int size = booke206_tlb_size(env, i); - - if (size == 0) { - continue; - } - - mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size); - offset += size; - } -} - -#if defined(TARGET_PPC64) -static void mmubooks_dump_mmu(FILE *f, fprintf_function cpu_fprintf, - CPUPPCState *env) -{ - int i; - uint64_t slbe, slbv; - - cpu_synchronize_state(env); - - cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); - for (i = 0; i < env->slb_nr; i++) { - slbe = env->slb[i].esid; - slbv = env->slb[i].vsid; - if (slbe == 0 && slbv == 0) { - continue; - } - cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", - i, slbe, slbv); - } -} -#endif - -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) -{ - switch (env->mmu_model) { - case POWERPC_MMU_BOOKE: - mmubooke_dump_mmu(f, cpu_fprintf, env); - break; - case POWERPC_MMU_BOOKE206: - mmubooke206_dump_mmu(f, cpu_fprintf, env); - break; -#if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: - mmubooks_dump_mmu(f, cpu_fprintf, env); - break; -#endif - default: - cpu_fprintf(f, "%s: unimplemented\n", __func__); - } -} - -static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, int rw) -{ - int in_plb, ret; - - ctx->raddr = eaddr; - ctx->prot = PAGE_READ | PAGE_EXEC; - ret = 0; - switch (env->mmu_model) { - case POWERPC_MMU_32B: - case POWERPC_MMU_601: - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_REAL: - case POWERPC_MMU_BOOKE: - ctx->prot |= PAGE_WRITE; - break; -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: - /* Real address are 60 bits long */ - ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL; - ctx->prot |= PAGE_WRITE; - break; -#endif - case POWERPC_MMU_SOFT_4xx_Z: - if (unlikely(msr_pe != 0)) { - /* 403 family add some particular protections, - * using PBL/PBU registers for accesses with no translation. - */ - in_plb = - /* Check PLB validity */ - (env->pb[0] < env->pb[1] && - /* and address in plb area */ - eaddr >= env->pb[0] && eaddr < env->pb[1]) || - (env->pb[2] < env->pb[3] && - eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; - if (in_plb ^ msr_px) { - /* Access in protected area */ - if (rw == 1) { - /* Access is not allowed */ - ret = -2; - } - } else { - /* Read-write access is allowed */ - ctx->prot |= PAGE_WRITE; - } - } - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_BOOKE206: - cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n"); - break; - default: - cpu_abort(env, "Unknown or invalid MMU model\n"); - return -1; - } - - return ret; -} - -int get_physical_address (CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, - int rw, int access_type) -{ - int ret; - -#if 0 - qemu_log("%s\n", __func__); -#endif - if ((access_type == ACCESS_CODE && msr_ir == 0) || - (access_type != ACCESS_CODE && msr_dr == 0)) { - if (env->mmu_model == POWERPC_MMU_BOOKE) { - /* The BookE MMU always performs address translation. The - IS and DS bits only affect the address space. */ - ret = mmubooke_get_physical_address(env, ctx, eaddr, - rw, access_type); - } else if (env->mmu_model == POWERPC_MMU_BOOKE206) { - ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, - access_type); - } else { - /* No address translation. */ - ret = check_physical(env, ctx, eaddr, rw); - } - } else { - ret = -1; - switch (env->mmu_model) { - case POWERPC_MMU_32B: - case POWERPC_MMU_601: - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - /* Try to find a BAT */ - if (env->nb_BATs != 0) - ret = get_bat(env, ctx, eaddr, rw, access_type); -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: -#endif - if (ret < 0) { - /* We didn't match any BAT entry or don't have BATs */ - ret = get_segment(env, ctx, eaddr, rw, access_type); - } - break; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - ret = mmu40x_get_physical_address(env, ctx, eaddr, - rw, access_type); - break; - case POWERPC_MMU_BOOKE: - ret = mmubooke_get_physical_address(env, ctx, eaddr, - rw, access_type); - break; - case POWERPC_MMU_BOOKE206: - ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, - access_type); - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_REAL: - cpu_abort(env, "PowerPC in real mode do not do any translation\n"); - return -1; - default: - cpu_abort(env, "Unknown or invalid MMU model\n"); - return -1; - } - } -#if 0 - qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n", - __func__, eaddr, ret, ctx->raddr); -#endif - - return ret; -} - -target_phys_addr_t cpu_get_phys_page_debug (CPUPPCState *env, target_ulong addr) -{ - mmu_ctx_t ctx; - - if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) - return -1; - - return ctx.raddr & TARGET_PAGE_MASK; -} - -static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, - int rw) -{ - env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; - env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; - env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; - env->spr[SPR_BOOKE_MAS3] = 0; - env->spr[SPR_BOOKE_MAS6] = 0; - env->spr[SPR_BOOKE_MAS7] = 0; - - /* AS */ - if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) { - env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; - env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; - } - - env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; - env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; - - switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { - case MAS4_TIDSELD_PID0: - env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT; - break; - case MAS4_TIDSELD_PID1: - env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT; - break; - case MAS4_TIDSELD_PID2: - env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT; - break; - } - - env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; - - /* next victim logic */ - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; - env->last_way++; - env->last_way &= booke206_tlb_ways(env, 0) - 1; - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; -} - -/* Perform address translation */ -int cpu_ppc_handle_mmu_fault (CPUPPCState *env, target_ulong address, int rw, - int mmu_idx) -{ - mmu_ctx_t ctx; - int access_type; - int ret = 0; - - if (rw == 2) { - /* code access */ - rw = 0; - access_type = ACCESS_CODE; - } else { - /* data access */ - access_type = env->access_type; - } - ret = get_physical_address(env, &ctx, address, rw, access_type); - if (ret == 0) { - tlb_set_page(env, address & TARGET_PAGE_MASK, - ctx.raddr & TARGET_PAGE_MASK, ctx.prot, - mmu_idx, TARGET_PAGE_SIZE); - ret = 0; - } else if (ret < 0) { - LOG_MMU_STATE(env); - if (access_type == ACCESS_CODE) { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - env->exception_index = POWERPC_EXCP_IFTLB; - env->error_code = 1 << 18; - env->spr[SPR_IMISS] = address; - env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; - goto tlb_miss; - case POWERPC_MMU_SOFT_74xx: - env->exception_index = POWERPC_EXCP_IFTLB; - goto tlb_miss_74xx; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - env->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = address; - env->spr[SPR_40x_ESR] = 0x00000000; - break; - case POWERPC_MMU_32B: - case POWERPC_MMU_601: -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: -#endif - env->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x40000000; - break; - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, address, rw); - /* fall through */ - case POWERPC_MMU_BOOKE: - env->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = address; - return -1; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_REAL: - cpu_abort(env, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - return -1; - default: - cpu_abort(env, "Unknown or invalid MMU model\n"); - return -1; - } - break; - case -2: - /* Access rights violation */ - env->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x08000000; - break; - case -3: - /* No execute protection violation */ - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_ESR] = 0x00000000; - } - env->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x10000000; - break; - case -4: - /* Direct store exception */ - /* No code fetch is allowed in direct-store areas */ - env->exception_index = POWERPC_EXCP_ISI; - env->error_code = 0x10000000; - break; -#if defined(TARGET_PPC64) - case -5: - /* No match in segment table */ - if (env->mmu_model == POWERPC_MMU_620) { - env->exception_index = POWERPC_EXCP_ISI; - /* XXX: this might be incorrect */ - env->error_code = 0x40000000; - } else { - env->exception_index = POWERPC_EXCP_ISEG; - env->error_code = 0; - } - break; -#endif - } - } else { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - if (rw == 1) { - env->exception_index = POWERPC_EXCP_DSTLB; - env->error_code = 1 << 16; - } else { - env->exception_index = POWERPC_EXCP_DLTLB; - env->error_code = 0; - } - env->spr[SPR_DMISS] = address; - env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; - tlb_miss: - env->error_code |= ctx.key << 19; - env->spr[SPR_HASH1] = env->htab_base + - get_pteg_offset(env, ctx.hash[0], HASH_PTE_SIZE_32); - env->spr[SPR_HASH2] = env->htab_base + - get_pteg_offset(env, ctx.hash[1], HASH_PTE_SIZE_32); - break; - case POWERPC_MMU_SOFT_74xx: - if (rw == 1) { - env->exception_index = POWERPC_EXCP_DSTLB; - } else { - env->exception_index = POWERPC_EXCP_DLTLB; - } - tlb_miss_74xx: - /* Implement LRU algorithm */ - env->error_code = ctx.key << 19; - env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | - ((env->last_way + 1) & (env->nb_ways - 1)); - env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; - break; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - env->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = address; - if (rw) - env->spr[SPR_40x_ESR] = 0x00800000; - else - env->spr[SPR_40x_ESR] = 0x00000000; - break; - case POWERPC_MMU_32B: - case POWERPC_MMU_601: -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: -#endif - env->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = address; - if (rw == 1) - env->spr[SPR_DSISR] = 0x42000000; - else - env->spr[SPR_DSISR] = 0x40000000; - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, address, rw); - /* fall through */ - case POWERPC_MMU_BOOKE: - env->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = address; - env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; - return -1; - case POWERPC_MMU_REAL: - cpu_abort(env, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - return -1; - default: - cpu_abort(env, "Unknown or invalid MMU model\n"); - return -1; - } - break; - case -2: - /* Access rights violation */ - env->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - if (env->mmu_model == POWERPC_MMU_SOFT_4xx - || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { - env->spr[SPR_40x_DEAR] = address; - if (rw) { - env->spr[SPR_40x_ESR] |= 0x00800000; - } - } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_DEAR] = address; - env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; - } else { - env->spr[SPR_DAR] = address; - if (rw == 1) { - env->spr[SPR_DSISR] = 0x0A000000; - } else { - env->spr[SPR_DSISR] = 0x08000000; - } - } - break; - case -4: - /* Direct store exception */ - switch (access_type) { - case ACCESS_FLOAT: - /* Floating point load/store */ - env->exception_index = POWERPC_EXCP_ALIGN; - env->error_code = POWERPC_EXCP_ALIGN_FP; - env->spr[SPR_DAR] = address; - break; - case ACCESS_RES: - /* lwarx, ldarx or stwcx. */ - env->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = address; - if (rw == 1) - env->spr[SPR_DSISR] = 0x06000000; - else - env->spr[SPR_DSISR] = 0x04000000; - break; - case ACCESS_EXT: - /* eciwx or ecowx */ - env->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = address; - if (rw == 1) - env->spr[SPR_DSISR] = 0x06100000; - else - env->spr[SPR_DSISR] = 0x04100000; - break; - default: - printf("DSI: invalid exception (%d)\n", ret); - env->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; - env->spr[SPR_DAR] = address; - break; - } - break; -#if defined(TARGET_PPC64) - case -5: - /* No match in segment table */ - if (env->mmu_model == POWERPC_MMU_620) { - env->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = address; - /* XXX: this might be incorrect */ - if (rw == 1) - env->spr[SPR_DSISR] = 0x42000000; - else - env->spr[SPR_DSISR] = 0x40000000; - } else { - env->exception_index = POWERPC_EXCP_DSEG; - env->error_code = 0; - env->spr[SPR_DAR] = address; - } - break; -#endif - } - } -#if 0 - printf("%s: set exception to %d %02x\n", __func__, - env->exception, env->error_code); -#endif - ret = 1; - } - - return ret; -} - -/*****************************************************************************/ -/* BATs management */ -#if !defined(FLUSH_ALL_TLBS) -static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, - target_ulong mask) -{ - target_ulong base, end, page; - - base = BATu & ~0x0001FFFF; - end = base + mask + 0x00020000; - LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" - TARGET_FMT_lx ")\n", base, end, mask); - for (page = base; page != end; page += TARGET_PAGE_SIZE) - tlb_flush_page(env, page); - LOG_BATS("Flush done\n"); -} -#endif - -static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, - target_ulong value) -{ - LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, - nr, ul == 0 ? 'u' : 'l', value, env->nip); -} - -void ppc_store_ibatu (CPUPPCState *env, int nr, target_ulong value) -{ - target_ulong mask; - - dump_store_bat(env, 'I', 0, nr, value); - if (env->IBAT[0][nr] != value) { - mask = (value << 15) & 0x0FFE0000UL; -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#endif - /* When storing valid upper BAT, mask BEPI and BRPN - * and invalidate all TLBs covered by this BAT - */ - mask = (value << 15) & 0x0FFE0000UL; - env->IBAT[0][nr] = (value & 0x00001FFFUL) | - (value & ~0x0001FFFFUL & ~mask); - env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | - (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - tlb_flush(env, 1); -#endif - } -} - -void ppc_store_ibatl (CPUPPCState *env, int nr, target_ulong value) -{ - dump_store_bat(env, 'I', 1, nr, value); - env->IBAT[1][nr] = value; -} - -void ppc_store_dbatu (CPUPPCState *env, int nr, target_ulong value) -{ - target_ulong mask; - - dump_store_bat(env, 'D', 0, nr, value); - if (env->DBAT[0][nr] != value) { - /* When storing valid upper BAT, mask BEPI and BRPN - * and invalidate all TLBs covered by this BAT - */ - mask = (value << 15) & 0x0FFE0000UL; -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->DBAT[0][nr], mask); -#endif - mask = (value << 15) & 0x0FFE0000UL; - env->DBAT[0][nr] = (value & 0x00001FFFUL) | - (value & ~0x0001FFFFUL & ~mask); - env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | - (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->DBAT[0][nr], mask); -#else - tlb_flush(env, 1); -#endif - } -} - -void ppc_store_dbatl (CPUPPCState *env, int nr, target_ulong value) -{ - dump_store_bat(env, 'D', 1, nr, value); - env->DBAT[1][nr] = value; -} - -void ppc_store_ibatu_601 (CPUPPCState *env, int nr, target_ulong value) -{ - target_ulong mask; -#if defined(FLUSH_ALL_TLBS) - int do_inval; -#endif - - dump_store_bat(env, 'I', 0, nr, value); - if (env->IBAT[0][nr] != value) { -#if defined(FLUSH_ALL_TLBS) - do_inval = 0; -#endif - mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; - if (env->IBAT[1][nr] & 0x40) { - /* Invalidate BAT only if it is valid */ -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - /* When storing valid upper BAT, mask BEPI and BRPN - * and invalidate all TLBs covered by this BAT - */ - env->IBAT[0][nr] = (value & 0x00001FFFUL) | - (value & ~0x0001FFFFUL & ~mask); - env->DBAT[0][nr] = env->IBAT[0][nr]; - if (env->IBAT[1][nr] & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } -#if defined(FLUSH_ALL_TLBS) - if (do_inval) - tlb_flush(env, 1); -#endif - } -} - -void ppc_store_ibatl_601 (CPUPPCState *env, int nr, target_ulong value) -{ - target_ulong mask; -#if defined(FLUSH_ALL_TLBS) - int do_inval; -#endif - - dump_store_bat(env, 'I', 1, nr, value); - if (env->IBAT[1][nr] != value) { -#if defined(FLUSH_ALL_TLBS) - do_inval = 0; -#endif - if (env->IBAT[1][nr] & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - if (value & 0x40) { -#if !defined(FLUSH_ALL_TLBS) - mask = (value << 17) & 0x0FFE0000UL; - do_invalidate_BAT(env, env->IBAT[0][nr], mask); -#else - do_inval = 1; -#endif - } - env->IBAT[1][nr] = value; - env->DBAT[1][nr] = value; -#if defined(FLUSH_ALL_TLBS) - if (do_inval) - tlb_flush(env, 1); -#endif - } -} - -/*****************************************************************************/ -/* TLB management */ -void ppc_tlb_invalidate_all (CPUPPCState *env) -{ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - ppc6xx_tlb_invalidate_all(env); - break; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - ppc4xx_tlb_invalidate_all(env); - break; - case POWERPC_MMU_REAL: - cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_BOOKE: - tlb_flush(env, 1); - break; - case POWERPC_MMU_BOOKE206: - booke206_flush_tlb(env, -1, 0); - break; - case POWERPC_MMU_32B: - case POWERPC_MMU_601: -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: -#endif /* defined(TARGET_PPC64) */ - tlb_flush(env, 1); - break; - default: - /* XXX: TODO */ - cpu_abort(env, "Unknown MMU model\n"); - break; - } -} - -void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr) -{ -#if !defined(FLUSH_ALL_TLBS) - addr &= TARGET_PAGE_MASK; - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_74xx: - ppc6xx_tlb_invalidate_virt(env, addr, 0); - if (env->id_tlbs == 1) - ppc6xx_tlb_invalidate_virt(env, addr, 1); - break; - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_SOFT_4xx_Z: - ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]); - break; - case POWERPC_MMU_REAL: - cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env, "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_BOOKE: - /* XXX: TODO */ - cpu_abort(env, "BookE MMU model is not implemented\n"); - break; - case POWERPC_MMU_BOOKE206: - /* XXX: TODO */ - cpu_abort(env, "BookE 2.06 MMU model is not implemented\n"); - break; - case POWERPC_MMU_32B: - case POWERPC_MMU_601: - /* tlbie invalidate TLBs for all segments */ - addr &= ~((target_ulong)-1ULL << 28); - /* XXX: this case should be optimized, - * giving a mask to tlb_flush_page - */ - tlb_flush_page(env, addr | (0x0 << 28)); - tlb_flush_page(env, addr | (0x1 << 28)); - tlb_flush_page(env, addr | (0x2 << 28)); - tlb_flush_page(env, addr | (0x3 << 28)); - tlb_flush_page(env, addr | (0x4 << 28)); - tlb_flush_page(env, addr | (0x5 << 28)); - tlb_flush_page(env, addr | (0x6 << 28)); - tlb_flush_page(env, addr | (0x7 << 28)); - tlb_flush_page(env, addr | (0x8 << 28)); - tlb_flush_page(env, addr | (0x9 << 28)); - tlb_flush_page(env, addr | (0xA << 28)); - tlb_flush_page(env, addr | (0xB << 28)); - tlb_flush_page(env, addr | (0xC << 28)); - tlb_flush_page(env, addr | (0xD << 28)); - tlb_flush_page(env, addr | (0xE << 28)); - tlb_flush_page(env, addr | (0xF << 28)); - break; -#if defined(TARGET_PPC64) - case POWERPC_MMU_620: - case POWERPC_MMU_64B: - case POWERPC_MMU_2_06: - /* tlbie invalidate TLBs for all segments */ - /* XXX: given the fact that there are too many segments to invalidate, - * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, - * we just invalidate all TLBs - */ - tlb_flush(env, 1); - break; -#endif /* defined(TARGET_PPC64) */ - default: - /* XXX: TODO */ - cpu_abort(env, "Unknown MMU model\n"); - break; - } -#else - ppc_tlb_invalidate_all(env); -#endif -} - -/*****************************************************************************/ -/* Special registers manipulation */ -#if defined(TARGET_PPC64) -void ppc_store_asr (CPUPPCState *env, target_ulong value) -{ - if (env->asr != value) { - env->asr = value; - tlb_flush(env, 1); - } -} -#endif - -void ppc_store_sdr1 (CPUPPCState *env, target_ulong value) -{ - LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value); - if (env->spr[SPR_SDR1] != value) { - env->spr[SPR_SDR1] = value; -#if defined(TARGET_PPC64) - if (env->mmu_model & POWERPC_MMU_64) { - target_ulong htabsize = value & SDR_64_HTABSIZE; - - if (htabsize > 28) { - fprintf(stderr, "Invalid HTABSIZE 0x" TARGET_FMT_lx - " stored in SDR1\n", htabsize); - htabsize = 28; - } - env->htab_mask = (1ULL << (htabsize + 18)) - 1; - env->htab_base = value & SDR_64_HTABORG; - } else -#endif /* defined(TARGET_PPC64) */ - { - /* FIXME: Should check for valid HTABMASK values */ - env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF; - env->htab_base = value & SDR_32_HTABORG; - } - tlb_flush(env, 1); - } -} - -#if defined(TARGET_PPC64) -target_ulong ppc_load_sr (CPUPPCState *env, int slb_nr) -{ - // XXX - return 0; -} -#endif - -void ppc_store_sr (CPUPPCState *env, int srnum, target_ulong value) -{ - LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, - srnum, value, env->sr[srnum]); -#if defined(TARGET_PPC64) - if (env->mmu_model & POWERPC_MMU_64) { - uint64_t rb = 0, rs = 0; - - /* ESID = srnum */ - rb |= ((uint32_t)srnum & 0xf) << 28; - /* Set the valid bit */ - rb |= 1 << 27; - /* Index = ESID */ - rb |= (uint32_t)srnum; - - /* VSID = VSID */ - rs |= (value & 0xfffffff) << 12; - /* flags = flags */ - rs |= ((value >> 27) & 0xf) << 8; - - ppc_store_slb(env, rb, rs); - } else -#endif - if (env->sr[srnum] != value) { - env->sr[srnum] = value; -/* Invalidating 256MB of virtual memory in 4kB pages is way longer than - flusing the whole TLB. */ -#if !defined(FLUSH_ALL_TLBS) && 0 - { - target_ulong page, end; - /* Invalidate 256 MB of virtual memory */ - page = (16 << 20) * srnum; - end = page + (16 << 20); - for (; page != end; page += TARGET_PAGE_SIZE) - tlb_flush_page(env, page); - } -#else - tlb_flush(env, 1); -#endif - } -} -#endif /* !defined (CONFIG_USER_ONLY) */ - -/* GDBstub can read and write MSR... */ -void ppc_store_msr (CPUPPCState *env, target_ulong value) -{ - hreg_store_msr(env, value, 0); -} - -/*****************************************************************************/ -/* Exception processing */ -#if defined (CONFIG_USER_ONLY) -void do_interrupt (CPUPPCState *env) -{ - env->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; -} - -void ppc_hw_interrupt (CPUPPCState *env) -{ - env->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; -} -#else /* defined (CONFIG_USER_ONLY) */ -static inline void dump_syscall(CPUPPCState *env) -{ - qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 - " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 - " nip=" TARGET_FMT_lx "\n", - ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), - ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), - ppc_dump_gpr(env, 6), env->nip); -} - -/* Note that this function should be greatly optimized - * when called with a constant excp, from ppc_hw_interrupt - */ -static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp) -{ - target_ulong msr, new_msr, vector; - int srr0, srr1, asrr0, asrr1; - int lpes0, lpes1, lev; - - if (0) { - /* XXX: find a suitable condition to enable the hypervisor mode */ - lpes0 = (env->spr[SPR_LPCR] >> 1) & 1; - lpes1 = (env->spr[SPR_LPCR] >> 2) & 1; - } else { - /* Those values ensure we won't enter the hypervisor mode */ - lpes0 = 0; - lpes1 = 1; - } - - qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx - " => %08x (%02x)\n", env->nip, excp, env->error_code); - - /* new srr1 value excluding must-be-zero bits */ - msr = env->msr & ~0x783f0000ULL; - - /* new interrupt handler msr */ - new_msr = env->msr & ((target_ulong)1 << MSR_ME); - - /* target registers */ - srr0 = SPR_SRR0; - srr1 = SPR_SRR1; - asrr0 = -1; - asrr1 = -1; - - switch (excp) { - case POWERPC_EXCP_NONE: - /* Should never happen */ - return; - case POWERPC_EXCP_CRITICAL: /* Critical input */ - switch (excp_model) { - case POWERPC_EXCP_40x: - srr0 = SPR_40x_SRR2; - srr1 = SPR_40x_SRR3; - break; - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - break; - case POWERPC_EXCP_G2: - break; - default: - goto excp_invalid; - } - goto store_next; - case POWERPC_EXCP_MCHECK: /* Machine check exception */ - if (msr_me == 0) { - /* Machine check exception is not enabled. - * Enter checkstop state. - */ - if (qemu_log_enabled()) { - qemu_log("Machine check while not allowed. " - "Entering checkstop state\n"); - } else { - fprintf(stderr, "Machine check while not allowed. " - "Entering checkstop state\n"); - } - env->halted = 1; - env->interrupt_request |= CPU_INTERRUPT_EXITTB; - } - if (0) { - /* XXX: find a suitable condition to enable the hypervisor mode */ - new_msr |= (target_ulong)MSR_HVB; - } - - /* machine check exceptions don't have ME set */ - new_msr &= ~((target_ulong)1 << MSR_ME); - - /* XXX: should also have something loaded in DAR / DSISR */ - switch (excp_model) { - case POWERPC_EXCP_40x: - srr0 = SPR_40x_SRR2; - srr1 = SPR_40x_SRR3; - break; - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_MCSRR0; - srr1 = SPR_BOOKE_MCSRR1; - asrr0 = SPR_BOOKE_CSRR0; - asrr1 = SPR_BOOKE_CSRR1; - break; - default: - break; - } - goto store_next; - case POWERPC_EXCP_DSI: /* Data storage exception */ - LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx - "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_ISI: /* Instruction storage exception */ - LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx - "\n", msr, env->nip); - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - msr |= env->error_code; - goto store_next; - case POWERPC_EXCP_EXTERNAL: /* External input */ - if (lpes0 == 1) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_ALIGN: /* Alignment exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - /* XXX: this is false */ - /* Get rS/rD and rA from faulting opcode */ - env->spr[SPR_DSISR] |= (ldl_code((env->nip - 4)) & 0x03FF0000) >> 16; - goto store_current; - case POWERPC_EXCP_PROGRAM: /* Program exception */ - switch (env->error_code & ~0xF) { - case POWERPC_EXCP_FP: - if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { - LOG_EXCP("Ignore floating point exception\n"); - env->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; - return; - } - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - msr |= 0x00100000; - if (msr_fe0 == msr_fe1) - goto store_next; - msr |= 0x00010000; - break; - case POWERPC_EXCP_INVAL: - LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - msr |= 0x00080000; - env->spr[SPR_BOOKE_ESR] = ESR_PIL; - break; - case POWERPC_EXCP_PRIV: - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - msr |= 0x00040000; - env->spr[SPR_BOOKE_ESR] = ESR_PPR; - break; - case POWERPC_EXCP_TRAP: - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - msr |= 0x00020000; - env->spr[SPR_BOOKE_ESR] = ESR_PTR; - break; - default: - /* Should never occur */ - cpu_abort(env, "Invalid program exception %d. Aborting\n", - env->error_code); - break; - } - goto store_current; - case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_current; - case POWERPC_EXCP_SYSCALL: /* System call exception */ - dump_syscall(env); - lev = env->error_code; - if ((lev == 1) && cpu_ppc_hypercall) { - cpu_ppc_hypercall(env); - return; - } - if (lev == 1 || (lpes0 == 0 && lpes1 == 0)) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ - goto store_current; - case POWERPC_EXCP_DECR: /* Decrementer exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ - /* FIT on 4xx */ - LOG_EXCP("FIT exception\n"); - goto store_next; - case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ - LOG_EXCP("WDT exception\n"); - switch (excp_model) { - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - break; - default: - break; - } - goto store_next; - case POWERPC_EXCP_DTLB: /* Data TLB error */ - goto store_next; - case POWERPC_EXCP_ITLB: /* Instruction TLB error */ - goto store_next; - case POWERPC_EXCP_DEBUG: /* Debug interrupt */ - switch (excp_model) { - case POWERPC_EXCP_BOOKE: - srr0 = SPR_BOOKE_DSRR0; - srr1 = SPR_BOOKE_DSRR1; - asrr0 = SPR_BOOKE_CSRR0; - asrr1 = SPR_BOOKE_CSRR1; - break; - default: - break; - } - /* XXX: TODO */ - cpu_abort(env, "Debug exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - goto store_current; - case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ - /* XXX: TODO */ - cpu_abort(env, "Embedded floating point data exception " - "is not implemented yet !\n"); - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - goto store_next; - case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ - /* XXX: TODO */ - cpu_abort(env, "Embedded floating point round exception " - "is not implemented yet !\n"); - env->spr[SPR_BOOKE_ESR] = ESR_SPV; - goto store_next; - case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ - /* XXX: TODO */ - cpu_abort(env, - "Performance counter exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ - goto store_next; - case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ - srr0 = SPR_BOOKE_CSRR0; - srr1 = SPR_BOOKE_CSRR1; - goto store_next; - case POWERPC_EXCP_RESET: /* System reset exception */ - if (msr_pow) { - /* indicate that we resumed from power save mode */ - msr |= 0x10000; - } else { - new_msr &= ~((target_ulong)1 << MSR_ME); - } - - if (0) { - /* XXX: find a suitable condition to enable the hypervisor mode */ - new_msr |= (target_ulong)MSR_HVB; - } - goto store_next; - case POWERPC_EXCP_DSEG: /* Data segment exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_ISEG: /* Instruction segment exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ - srr0 = SPR_HSRR0; - srr1 = SPR_HSRR1; - new_msr |= (target_ulong)MSR_HVB; - new_msr |= env->msr & ((target_ulong)1 << MSR_RI); - goto store_next; - case POWERPC_EXCP_TRACE: /* Trace exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_next; - case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ - srr0 = SPR_HSRR0; - srr1 = SPR_HSRR1; - new_msr |= (target_ulong)MSR_HVB; - new_msr |= env->msr & ((target_ulong)1 << MSR_RI); - goto store_next; - case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ - srr0 = SPR_HSRR0; - srr1 = SPR_HSRR1; - new_msr |= (target_ulong)MSR_HVB; - new_msr |= env->msr & ((target_ulong)1 << MSR_RI); - goto store_next; - case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ - srr0 = SPR_HSRR0; - srr1 = SPR_HSRR1; - new_msr |= (target_ulong)MSR_HVB; - new_msr |= env->msr & ((target_ulong)1 << MSR_RI); - goto store_next; - case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ - srr0 = SPR_HSRR0; - srr1 = SPR_HSRR1; - new_msr |= (target_ulong)MSR_HVB; - new_msr |= env->msr & ((target_ulong)1 << MSR_RI); - goto store_next; - case POWERPC_EXCP_VPU: /* Vector unavailable exception */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - goto store_current; - case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ - LOG_EXCP("PIT exception\n"); - goto store_next; - case POWERPC_EXCP_IO: /* IO error exception */ - /* XXX: TODO */ - cpu_abort(env, "601 IO error exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_RUNM: /* Run mode exception */ - /* XXX: TODO */ - cpu_abort(env, "601 run mode exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_EMUL: /* Emulation trap exception */ - /* XXX: TODO */ - cpu_abort(env, "602 emulation trap exception " - "is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ - if (lpes1 == 0) /* XXX: check this */ - new_msr |= (target_ulong)MSR_HVB; - switch (excp_model) { - case POWERPC_EXCP_602: - case POWERPC_EXCP_603: - case POWERPC_EXCP_603E: - case POWERPC_EXCP_G2: - goto tlb_miss_tgpr; - case POWERPC_EXCP_7x5: - goto tlb_miss; - case POWERPC_EXCP_74xx: - goto tlb_miss_74xx; - default: - cpu_abort(env, "Invalid instruction TLB miss exception\n"); - break; - } - break; - case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ - if (lpes1 == 0) /* XXX: check this */ - new_msr |= (target_ulong)MSR_HVB; - switch (excp_model) { - case POWERPC_EXCP_602: - case POWERPC_EXCP_603: - case POWERPC_EXCP_603E: - case POWERPC_EXCP_G2: - goto tlb_miss_tgpr; - case POWERPC_EXCP_7x5: - goto tlb_miss; - case POWERPC_EXCP_74xx: - goto tlb_miss_74xx; - default: - cpu_abort(env, "Invalid data load TLB miss exception\n"); - break; - } - break; - case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ - if (lpes1 == 0) /* XXX: check this */ - new_msr |= (target_ulong)MSR_HVB; - switch (excp_model) { - case POWERPC_EXCP_602: - case POWERPC_EXCP_603: - case POWERPC_EXCP_603E: - case POWERPC_EXCP_G2: - tlb_miss_tgpr: - /* Swap temporary saved registers with GPRs */ - if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { - new_msr |= (target_ulong)1 << MSR_TGPR; - hreg_swap_gpr_tgpr(env); - } - goto tlb_miss; - case POWERPC_EXCP_7x5: - tlb_miss: -#if defined (DEBUG_SOFTWARE_TLB) - if (qemu_log_enabled()) { - const char *es; - target_ulong *miss, *cmp; - int en; - if (excp == POWERPC_EXCP_IFTLB) { - es = "I"; - en = 'I'; - miss = &env->spr[SPR_IMISS]; - cmp = &env->spr[SPR_ICMP]; - } else { - if (excp == POWERPC_EXCP_DLTLB) - es = "DL"; - else - es = "DS"; - en = 'D'; - miss = &env->spr[SPR_DMISS]; - cmp = &env->spr[SPR_DCMP]; - } - qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " - TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " - TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, - env->spr[SPR_HASH1], env->spr[SPR_HASH2], - env->error_code); - } -#endif - msr |= env->crf[0] << 28; - msr |= env->error_code; /* key, D/I, S/L bits */ - /* Set way using a LRU mechanism */ - msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; - break; - case POWERPC_EXCP_74xx: - tlb_miss_74xx: -#if defined (DEBUG_SOFTWARE_TLB) - if (qemu_log_enabled()) { - const char *es; - target_ulong *miss, *cmp; - int en; - if (excp == POWERPC_EXCP_IFTLB) { - es = "I"; - en = 'I'; - miss = &env->spr[SPR_TLBMISS]; - cmp = &env->spr[SPR_PTEHI]; - } else { - if (excp == POWERPC_EXCP_DLTLB) - es = "DL"; - else - es = "DS"; - en = 'D'; - miss = &env->spr[SPR_TLBMISS]; - cmp = &env->spr[SPR_PTEHI]; - } - qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " - TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, - env->error_code); - } -#endif - msr |= env->error_code; /* key bit */ - break; - default: - cpu_abort(env, "Invalid data store TLB miss exception\n"); - break; - } - goto store_next; - case POWERPC_EXCP_FPA: /* Floating-point assist exception */ - /* XXX: TODO */ - cpu_abort(env, "Floating point assist exception " - "is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_DABR: /* Data address breakpoint */ - /* XXX: TODO */ - cpu_abort(env, "DABR exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ - /* XXX: TODO */ - cpu_abort(env, "IABR exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_SMI: /* System management interrupt */ - /* XXX: TODO */ - cpu_abort(env, "SMI exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_THERM: /* Thermal interrupt */ - /* XXX: TODO */ - cpu_abort(env, "Thermal management exception " - "is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ - if (lpes1 == 0) - new_msr |= (target_ulong)MSR_HVB; - /* XXX: TODO */ - cpu_abort(env, - "Performance counter exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_VPUA: /* Vector assist exception */ - /* XXX: TODO */ - cpu_abort(env, "VPU assist exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_SOFTP: /* Soft patch exception */ - /* XXX: TODO */ - cpu_abort(env, - "970 soft-patch exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_MAINT: /* Maintenance exception */ - /* XXX: TODO */ - cpu_abort(env, - "970 maintenance exception is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ - /* XXX: TODO */ - cpu_abort(env, "Maskable external exception " - "is not implemented yet !\n"); - goto store_next; - case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ - /* XXX: TODO */ - cpu_abort(env, "Non maskable external exception " - "is not implemented yet !\n"); - goto store_next; - default: - excp_invalid: - cpu_abort(env, "Invalid PowerPC exception %d. Aborting\n", excp); - break; - store_current: - /* save current instruction location */ - env->spr[srr0] = env->nip - 4; - break; - store_next: - /* save next instruction location */ - env->spr[srr0] = env->nip; - break; - } - /* Save MSR */ - env->spr[srr1] = msr; - /* If any alternate SRR register are defined, duplicate saved values */ - if (asrr0 != -1) - env->spr[asrr0] = env->spr[srr0]; - if (asrr1 != -1) - env->spr[asrr1] = env->spr[srr1]; - /* If we disactivated any translation, flush TLBs */ - if (msr & ((1 << MSR_IR) | (1 << MSR_DR))) - tlb_flush(env, 1); - - if (msr_ile) { - new_msr |= (target_ulong)1 << MSR_LE; - } - - /* Jump to handler */ - vector = env->excp_vectors[excp]; - if (vector == (target_ulong)-1ULL) { - cpu_abort(env, "Raised an exception without defined vector %d\n", - excp); - } - vector |= env->excp_prefix; -#if defined(TARGET_PPC64) - if (excp_model == POWERPC_EXCP_BOOKE) { - if (!msr_icm) { - vector = (uint32_t)vector; - } else { - new_msr |= (target_ulong)1 << MSR_CM; - } - } else { - if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { - vector = (uint32_t)vector; - } else { - new_msr |= (target_ulong)1 << MSR_SF; - } - } -#endif - /* XXX: we don't use hreg_store_msr here as already have treated - * any special case that could occur. Just store MSR and update hflags - */ - env->msr = new_msr & env->msr_mask; - hreg_compute_hflags(env); - env->nip = vector; - /* Reset exception state */ - env->exception_index = POWERPC_EXCP_NONE; - env->error_code = 0; - - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - /* XXX: The BookE changes address space when switching modes, - we should probably implement that as different MMU indexes, - but for the moment we do it the slow way and flush all. */ - tlb_flush(env, 1); - } -} - -void do_interrupt (CPUPPCState *env) -{ - powerpc_excp(env, env->excp_model, env->exception_index); -} - -void ppc_hw_interrupt (CPUPPCState *env) -{ - int hdice; - -#if 0 - qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n", - __func__, env, env->pending_interrupts, - env->interrupt_request, (int)msr_me, (int)msr_ee); -#endif - /* External reset */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_RESET); - return; - } - /* Machine check exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_MCHECK); - return; - } -#if 0 /* TODO */ - /* External debug exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_DEBUG); - return; - } -#endif - if (0) { - /* XXX: find a suitable condition to enable the hypervisor mode */ - hdice = env->spr[SPR_LPCR] & 1; - } else { - hdice = 0; - } - if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) { - /* Hypervisor decrementer exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_HDECR); - return; - } - } - if (msr_ce != 0) { - /* External critical interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { - /* Taking a critical external interrupt does not clear the external - * critical interrupt status - */ -#if 0 - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT); -#endif - powerpc_excp(env, env->excp_model, POWERPC_EXCP_CRITICAL); - return; - } - } - if (msr_ee != 0) { - /* Watchdog timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_WDT); - return; - } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORCI); - return; - } - /* Fixed interval timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_FIT); - return; - } - /* Programmable interval timer on embedded PowerPC */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_PIT); - return; - } - /* Decrementer exception */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_DECR); - return; - } - /* External interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { - /* Taking an external interrupt does not clear the external - * interrupt status - */ -#if 0 - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT); -#endif - powerpc_excp(env, env->excp_model, POWERPC_EXCP_EXTERNAL); - return; - } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORI); - return; - } - if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_PERFM); - return; - } - /* Thermal interrupt */ - if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { - env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); - powerpc_excp(env, env->excp_model, POWERPC_EXCP_THERM); - return; - } - } -} -#endif /* !CONFIG_USER_ONLY */ - -void cpu_dump_rfi (target_ulong RA, target_ulong msr) -{ - qemu_log("Return from exception at " TARGET_FMT_lx " with flags " - TARGET_FMT_lx "\n", RA, msr); -} - PowerPCCPU *cpu_ppc_init(const char *cpu_model) { PowerPCCPU *cpu; @@ -3193,8 +30,9 @@ PowerPCCPU *cpu_ppc_init(const char *cpu_model) const ppc_def_t *def; def = cpu_ppc_find_by_name(cpu_model); - if (!def) + if (!def) { return NULL; + } cpu = POWERPC_CPU(object_new(TYPE_POWERPC_CPU)); env = &cpu->env; diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 148543a8a1..fd04c063e2 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -1,96 +1,96 @@ #include "def-helper.h" -DEF_HELPER_2(raise_exception_err, void, i32, i32) -DEF_HELPER_1(raise_exception, void, i32) -DEF_HELPER_3(tw, void, tl, tl, i32) +DEF_HELPER_3(raise_exception_err, void, env, i32, i32) +DEF_HELPER_2(raise_exception, void, env, i32) +DEF_HELPER_4(tw, void, env, tl, tl, i32) #if defined(TARGET_PPC64) -DEF_HELPER_3(td, void, tl, tl, i32) +DEF_HELPER_4(td, void, env, tl, tl, i32) #endif #if !defined(CONFIG_USER_ONLY) -DEF_HELPER_1(store_msr, void, tl) -DEF_HELPER_0(rfi, void) -DEF_HELPER_0(rfsvc, void) -DEF_HELPER_0(40x_rfci, void) -DEF_HELPER_0(rfci, void) -DEF_HELPER_0(rfdi, void) -DEF_HELPER_0(rfmci, void) +DEF_HELPER_2(store_msr, void, env, tl) +DEF_HELPER_1(rfi, void, env) +DEF_HELPER_1(rfsvc, void, env) +DEF_HELPER_1(40x_rfci, void, env) +DEF_HELPER_1(rfci, void, env) +DEF_HELPER_1(rfdi, void, env) +DEF_HELPER_1(rfmci, void, env) #if defined(TARGET_PPC64) -DEF_HELPER_0(rfid, void) -DEF_HELPER_0(hrfid, void) +DEF_HELPER_1(rfid, void, env) +DEF_HELPER_1(hrfid, void, env) #endif #endif -DEF_HELPER_2(lmw, void, tl, i32) -DEF_HELPER_2(stmw, void, tl, i32) -DEF_HELPER_3(lsw, void, tl, i32, i32) -DEF_HELPER_4(lswx, void, tl, i32, i32, i32) -DEF_HELPER_3(stsw, void, tl, i32, i32) -DEF_HELPER_1(dcbz, void, tl) -DEF_HELPER_1(dcbz_970, void, tl) -DEF_HELPER_1(icbi, void, tl) -DEF_HELPER_4(lscbx, tl, tl, i32, i32, i32) +DEF_HELPER_3(lmw, void, env, tl, i32) +DEF_HELPER_3(stmw, void, env, tl, i32) +DEF_HELPER_4(lsw, void, env, tl, i32, i32) +DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32) +DEF_HELPER_4(stsw, void, env, tl, i32, i32) +DEF_HELPER_2(dcbz, void, env, tl) +DEF_HELPER_2(dcbz_970, void, env, tl) +DEF_HELPER_2(icbi, void, env, tl) +DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(mulhd, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) DEF_HELPER_FLAGS_2(mulhdu, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64) -DEF_HELPER_2(mulldo, i64, i64, i64) +DEF_HELPER_3(mulldo, i64, env, i64, i64) #endif DEF_HELPER_FLAGS_1(cntlzw, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) -DEF_HELPER_2(sraw, tl, tl, tl) +DEF_HELPER_3(sraw, tl, env, tl, tl) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_1(cntlzd, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) -DEF_HELPER_2(srad, tl, tl, tl) +DEF_HELPER_3(srad, tl, env, tl, tl) #endif DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32) DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32) DEF_HELPER_FLAGS_2(brinc, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl, tl) -DEF_HELPER_0(float_check_status, void) -DEF_HELPER_0(reset_fpstatus, void) -DEF_HELPER_2(compute_fprf, i32, i64, i32) -DEF_HELPER_2(store_fpscr, void, i64, i32) -DEF_HELPER_1(fpscr_clrbit, void, i32) -DEF_HELPER_1(fpscr_setbit, void, i32) -DEF_HELPER_1(float64_to_float32, i32, i64) -DEF_HELPER_1(float32_to_float64, i64, i32) +DEF_HELPER_1(float_check_status, void, env) +DEF_HELPER_1(reset_fpstatus, void, env) +DEF_HELPER_3(compute_fprf, i32, env, i64, i32) +DEF_HELPER_3(store_fpscr, void, env, i64, i32) +DEF_HELPER_2(fpscr_clrbit, void, env, i32) +DEF_HELPER_2(fpscr_setbit, void, env, i32) +DEF_HELPER_2(float64_to_float32, i32, env, i64) +DEF_HELPER_2(float32_to_float64, i64, env, i32) -DEF_HELPER_3(fcmpo, void, i64, i64, i32) -DEF_HELPER_3(fcmpu, void, i64, i64, i32) +DEF_HELPER_4(fcmpo, void, env, i64, i64, i32) +DEF_HELPER_4(fcmpu, void, env, i64, i64, i32) -DEF_HELPER_1(fctiw, i64, i64) -DEF_HELPER_1(fctiwz, i64, i64) +DEF_HELPER_2(fctiw, i64, env, i64) +DEF_HELPER_2(fctiwz, i64, env, i64) #if defined(TARGET_PPC64) -DEF_HELPER_1(fcfid, i64, i64) -DEF_HELPER_1(fctid, i64, i64) -DEF_HELPER_1(fctidz, i64, i64) +DEF_HELPER_2(fcfid, i64, env, i64) +DEF_HELPER_2(fctid, i64, env, i64) +DEF_HELPER_2(fctidz, i64, env, i64) #endif -DEF_HELPER_1(frsp, i64, i64) -DEF_HELPER_1(frin, i64, i64) -DEF_HELPER_1(friz, i64, i64) -DEF_HELPER_1(frip, i64, i64) -DEF_HELPER_1(frim, i64, i64) +DEF_HELPER_2(frsp, i64, env, i64) +DEF_HELPER_2(frin, i64, env, i64) +DEF_HELPER_2(friz, i64, env, i64) +DEF_HELPER_2(frip, i64, env, i64) +DEF_HELPER_2(frim, i64, env, i64) -DEF_HELPER_2(fadd, i64, i64, i64) -DEF_HELPER_2(fsub, i64, i64, i64) -DEF_HELPER_2(fmul, i64, i64, i64) -DEF_HELPER_2(fdiv, i64, i64, i64) -DEF_HELPER_3(fmadd, i64, i64, i64, i64) -DEF_HELPER_3(fmsub, i64, i64, i64, i64) -DEF_HELPER_3(fnmadd, i64, i64, i64, i64) -DEF_HELPER_3(fnmsub, i64, i64, i64, i64) -DEF_HELPER_1(fabs, i64, i64) -DEF_HELPER_1(fnabs, i64, i64) -DEF_HELPER_1(fneg, i64, i64) -DEF_HELPER_1(fsqrt, i64, i64) -DEF_HELPER_1(fre, i64, i64) -DEF_HELPER_1(fres, i64, i64) -DEF_HELPER_1(frsqrte, i64, i64) -DEF_HELPER_3(fsel, i64, i64, i64, i64) +DEF_HELPER_3(fadd, i64, env, i64, i64) +DEF_HELPER_3(fsub, i64, env, i64, i64) +DEF_HELPER_3(fmul, i64, env, i64, i64) +DEF_HELPER_3(fdiv, i64, env, i64, i64) +DEF_HELPER_4(fmadd, i64, env, i64, i64, i64) +DEF_HELPER_4(fmsub, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64) +DEF_HELPER_2(fabs, i64, env, i64) +DEF_HELPER_2(fnabs, i64, env, i64) +DEF_HELPER_2(fneg, i64, env, i64) +DEF_HELPER_2(fsqrt, i64, env, i64) +DEF_HELPER_2(fre, i64, env, i64) +DEF_HELPER_2(fres, i64, env, i64) +DEF_HELPER_2(frsqrte, i64, env, i64) +DEF_HELPER_4(fsel, i64, env, i64, i64, i64) #define dh_alias_avr ptr #define dh_ctype_avr ppc_avr_t * @@ -120,32 +120,32 @@ DEF_HELPER_3(vminuw, void, avr, avr, avr) DEF_HELPER_3(vmaxub, void, avr, avr, avr) DEF_HELPER_3(vmaxuh, void, avr, avr, avr) DEF_HELPER_3(vmaxuw, void, avr, avr, avr) -DEF_HELPER_3(vcmpequb, void, avr, avr, avr) -DEF_HELPER_3(vcmpequh, void, avr, avr, avr) -DEF_HELPER_3(vcmpequw, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtub, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtuh, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtuw, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsb, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsh, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsw, void, avr, avr, avr) -DEF_HELPER_3(vcmpeqfp, void, avr, avr, avr) -DEF_HELPER_3(vcmpgefp, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtfp, void, avr, avr, avr) -DEF_HELPER_3(vcmpbfp, void, avr, avr, avr) -DEF_HELPER_3(vcmpequb_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpequh_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpequw_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtub_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtuh_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtuw_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsb_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsh_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtsw_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpeqfp_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgefp_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpgtfp_dot, void, avr, avr, avr) -DEF_HELPER_3(vcmpbfp_dot, void, avr, avr, avr) +DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr) DEF_HELPER_3(vmrglb, void, avr, avr, avr) DEF_HELPER_3(vmrglh, void, avr, avr, avr) DEF_HELPER_3(vmrglw, void, avr, avr, avr) @@ -175,18 +175,18 @@ DEF_HELPER_3(vaddcuw, void, avr, avr, avr) DEF_HELPER_3(vsubcuw, void, avr, avr, avr) DEF_HELPER_2(lvsl, void, avr, tl); DEF_HELPER_2(lvsr, void, avr, tl); -DEF_HELPER_3(vaddsbs, void, avr, avr, avr) -DEF_HELPER_3(vaddshs, void, avr, avr, avr) -DEF_HELPER_3(vaddsws, void, avr, avr, avr) -DEF_HELPER_3(vsubsbs, void, avr, avr, avr) -DEF_HELPER_3(vsubshs, void, avr, avr, avr) -DEF_HELPER_3(vsubsws, void, avr, avr, avr) -DEF_HELPER_3(vaddubs, void, avr, avr, avr) -DEF_HELPER_3(vadduhs, void, avr, avr, avr) -DEF_HELPER_3(vadduws, void, avr, avr, avr) -DEF_HELPER_3(vsububs, void, avr, avr, avr) -DEF_HELPER_3(vsubuhs, void, avr, avr, avr) -DEF_HELPER_3(vsubuws, void, avr, avr, avr) +DEF_HELPER_4(vaddsbs, void, env, avr, avr, avr) +DEF_HELPER_4(vaddshs, void, env, avr, avr, avr) +DEF_HELPER_4(vaddsws, void, env, avr, avr, avr) +DEF_HELPER_4(vsubsbs, void, env, avr, avr, avr) +DEF_HELPER_4(vsubshs, void, env, avr, avr, avr) +DEF_HELPER_4(vsubsws, void, env, avr, avr, avr) +DEF_HELPER_4(vaddubs, void, env, avr, avr, avr) +DEF_HELPER_4(vadduhs, void, env, avr, avr, avr) +DEF_HELPER_4(vadduws, void, env, avr, avr, avr) +DEF_HELPER_4(vsububs, void, env, avr, avr, avr) +DEF_HELPER_4(vsubuhs, void, env, avr, avr, avr) +DEF_HELPER_4(vsubuws, void, env, avr, avr, avr) DEF_HELPER_3(vrlb, void, avr, avr, avr) DEF_HELPER_3(vrlh, void, avr, avr, avr) DEF_HELPER_3(vrlw, void, avr, avr, avr) @@ -205,212 +205,213 @@ DEF_HELPER_2(vupkhsb, void, avr, avr) DEF_HELPER_2(vupkhsh, void, avr, avr) DEF_HELPER_2(vupklsb, void, avr, avr) DEF_HELPER_2(vupklsh, void, avr, avr) -DEF_HELPER_4(vmsumubm, void, avr, avr, avr, avr) -DEF_HELPER_4(vmsummbm, void, avr, avr, avr, avr) -DEF_HELPER_4(vsel, void, avr, avr, avr, avr) -DEF_HELPER_4(vperm, void, avr, avr, avr, avr) -DEF_HELPER_3(vpkshss, void, avr, avr, avr) -DEF_HELPER_3(vpkshus, void, avr, avr, avr) -DEF_HELPER_3(vpkswss, void, avr, avr, avr) -DEF_HELPER_3(vpkswus, void, avr, avr, avr) -DEF_HELPER_3(vpkuhus, void, avr, avr, avr) -DEF_HELPER_3(vpkuwus, void, avr, avr, avr) -DEF_HELPER_3(vpkuhum, void, avr, avr, avr) -DEF_HELPER_3(vpkuwum, void, avr, avr, avr) +DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr) +DEF_HELPER_4(vpkshss, void, env, avr, avr, avr) +DEF_HELPER_4(vpkshus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkswss, void, env, avr, avr, avr) +DEF_HELPER_4(vpkswus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuhus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuwus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr) DEF_HELPER_3(vpkpx, void, avr, avr, avr) -DEF_HELPER_4(vmhaddshs, void, avr, avr, avr, avr) -DEF_HELPER_4(vmhraddshs, void, avr, avr, avr, avr) -DEF_HELPER_4(vmsumuhm, void, avr, avr, avr, avr) -DEF_HELPER_4(vmsumuhs, void, avr, avr, avr, avr) -DEF_HELPER_4(vmsumshm, void, avr, avr, avr, avr) -DEF_HELPER_4(vmsumshs, void, avr, avr, avr, avr) +DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr) DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr) -DEF_HELPER_1(mtvscr, void, avr); -DEF_HELPER_2(lvebx, void, avr, tl) -DEF_HELPER_2(lvehx, void, avr, tl) -DEF_HELPER_2(lvewx, void, avr, tl) -DEF_HELPER_2(stvebx, void, avr, tl) -DEF_HELPER_2(stvehx, void, avr, tl) -DEF_HELPER_2(stvewx, void, avr, tl) -DEF_HELPER_3(vsumsws, void, avr, avr, avr) -DEF_HELPER_3(vsum2sws, void, avr, avr, avr) -DEF_HELPER_3(vsum4sbs, void, avr, avr, avr) -DEF_HELPER_3(vsum4shs, void, avr, avr, avr) -DEF_HELPER_3(vsum4ubs, void, avr, avr, avr) -DEF_HELPER_3(vaddfp, void, avr, avr, avr) -DEF_HELPER_3(vsubfp, void, avr, avr, avr) -DEF_HELPER_3(vmaxfp, void, avr, avr, avr) -DEF_HELPER_3(vminfp, void, avr, avr, avr) -DEF_HELPER_2(vrefp, void, avr, avr) -DEF_HELPER_2(vrsqrtefp, void, avr, avr) -DEF_HELPER_4(vmaddfp, void, avr, avr, avr, avr) -DEF_HELPER_4(vnmsubfp, void, avr, avr, avr, avr) -DEF_HELPER_2(vexptefp, void, avr, avr) -DEF_HELPER_2(vlogefp, void, avr, avr) -DEF_HELPER_2(vrfim, void, avr, avr) -DEF_HELPER_2(vrfin, void, avr, avr) -DEF_HELPER_2(vrfip, void, avr, avr) -DEF_HELPER_2(vrfiz, void, avr, avr) -DEF_HELPER_3(vcfux, void, avr, avr, i32) -DEF_HELPER_3(vcfsx, void, avr, avr, i32) -DEF_HELPER_3(vctuxs, void, avr, avr, i32) -DEF_HELPER_3(vctsxs, void, avr, avr, i32) +DEF_HELPER_2(mtvscr, void, env, avr); +DEF_HELPER_3(lvebx, void, env, avr, tl) +DEF_HELPER_3(lvehx, void, env, avr, tl) +DEF_HELPER_3(lvewx, void, env, avr, tl) +DEF_HELPER_3(stvebx, void, env, avr, tl) +DEF_HELPER_3(stvehx, void, env, avr, tl) +DEF_HELPER_3(stvewx, void, env, avr, tl) +DEF_HELPER_4(vsumsws, void, env, avr, avr, avr) +DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4sbs, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4shs, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4ubs, void, env, avr, avr, avr) +DEF_HELPER_4(vaddfp, void, env, avr, avr, avr) +DEF_HELPER_4(vsubfp, void, env, avr, avr, avr) +DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr) +DEF_HELPER_4(vminfp, void, env, avr, avr, avr) +DEF_HELPER_3(vrefp, void, env, avr, avr) +DEF_HELPER_3(vrsqrtefp, void, env, avr, avr) +DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr) +DEF_HELPER_3(vexptefp, void, env, avr, avr) +DEF_HELPER_3(vlogefp, void, env, avr, avr) +DEF_HELPER_3(vrfim, void, env, avr, avr) +DEF_HELPER_3(vrfin, void, env, avr, avr) +DEF_HELPER_3(vrfip, void, env, avr, avr) +DEF_HELPER_3(vrfiz, void, env, avr, avr) +DEF_HELPER_4(vcfux, void, env, avr, avr, i32) +DEF_HELPER_4(vcfsx, void, env, avr, avr, i32) +DEF_HELPER_4(vctuxs, void, env, avr, avr, i32) +DEF_HELPER_4(vctsxs, void, env, avr, avr, i32) -DEF_HELPER_1(efscfsi, i32, i32) -DEF_HELPER_1(efscfui, i32, i32) -DEF_HELPER_1(efscfuf, i32, i32) -DEF_HELPER_1(efscfsf, i32, i32) -DEF_HELPER_1(efsctsi, i32, i32) -DEF_HELPER_1(efsctui, i32, i32) -DEF_HELPER_1(efsctsiz, i32, i32) -DEF_HELPER_1(efsctuiz, i32, i32) -DEF_HELPER_1(efsctsf, i32, i32) -DEF_HELPER_1(efsctuf, i32, i32) -DEF_HELPER_1(evfscfsi, i64, i64) -DEF_HELPER_1(evfscfui, i64, i64) -DEF_HELPER_1(evfscfuf, i64, i64) -DEF_HELPER_1(evfscfsf, i64, i64) -DEF_HELPER_1(evfsctsi, i64, i64) -DEF_HELPER_1(evfsctui, i64, i64) -DEF_HELPER_1(evfsctsiz, i64, i64) -DEF_HELPER_1(evfsctuiz, i64, i64) -DEF_HELPER_1(evfsctsf, i64, i64) -DEF_HELPER_1(evfsctuf, i64, i64) -DEF_HELPER_2(efsadd, i32, i32, i32) -DEF_HELPER_2(efssub, i32, i32, i32) -DEF_HELPER_2(efsmul, i32, i32, i32) -DEF_HELPER_2(efsdiv, i32, i32, i32) -DEF_HELPER_2(evfsadd, i64, i64, i64) -DEF_HELPER_2(evfssub, i64, i64, i64) -DEF_HELPER_2(evfsmul, i64, i64, i64) -DEF_HELPER_2(evfsdiv, i64, i64, i64) -DEF_HELPER_2(efststlt, i32, i32, i32) -DEF_HELPER_2(efststgt, i32, i32, i32) -DEF_HELPER_2(efststeq, i32, i32, i32) -DEF_HELPER_2(efscmplt, i32, i32, i32) -DEF_HELPER_2(efscmpgt, i32, i32, i32) -DEF_HELPER_2(efscmpeq, i32, i32, i32) -DEF_HELPER_2(evfststlt, i32, i64, i64) -DEF_HELPER_2(evfststgt, i32, i64, i64) -DEF_HELPER_2(evfststeq, i32, i64, i64) -DEF_HELPER_2(evfscmplt, i32, i64, i64) -DEF_HELPER_2(evfscmpgt, i32, i64, i64) -DEF_HELPER_2(evfscmpeq, i32, i64, i64) -DEF_HELPER_1(efdcfsi, i64, i32) -DEF_HELPER_1(efdcfsid, i64, i64) -DEF_HELPER_1(efdcfui, i64, i32) -DEF_HELPER_1(efdcfuid, i64, i64) -DEF_HELPER_1(efdctsi, i32, i64) -DEF_HELPER_1(efdctui, i32, i64) -DEF_HELPER_1(efdctsiz, i32, i64) -DEF_HELPER_1(efdctsidz, i64, i64) -DEF_HELPER_1(efdctuiz, i32, i64) -DEF_HELPER_1(efdctuidz, i64, i64) -DEF_HELPER_1(efdcfsf, i64, i32) -DEF_HELPER_1(efdcfuf, i64, i32) -DEF_HELPER_1(efdctsf, i32, i64) -DEF_HELPER_1(efdctuf, i32, i64) -DEF_HELPER_1(efscfd, i32, i64) -DEF_HELPER_1(efdcfs, i64, i32) -DEF_HELPER_2(efdadd, i64, i64, i64) -DEF_HELPER_2(efdsub, i64, i64, i64) -DEF_HELPER_2(efdmul, i64, i64, i64) -DEF_HELPER_2(efddiv, i64, i64, i64) -DEF_HELPER_2(efdtstlt, i32, i64, i64) -DEF_HELPER_2(efdtstgt, i32, i64, i64) -DEF_HELPER_2(efdtsteq, i32, i64, i64) -DEF_HELPER_2(efdcmplt, i32, i64, i64) -DEF_HELPER_2(efdcmpgt, i32, i64, i64) -DEF_HELPER_2(efdcmpeq, i32, i64, i64) +DEF_HELPER_2(efscfsi, i32, env, i32) +DEF_HELPER_2(efscfui, i32, env, i32) +DEF_HELPER_2(efscfuf, i32, env, i32) +DEF_HELPER_2(efscfsf, i32, env, i32) +DEF_HELPER_2(efsctsi, i32, env, i32) +DEF_HELPER_2(efsctui, i32, env, i32) +DEF_HELPER_2(efsctsiz, i32, env, i32) +DEF_HELPER_2(efsctuiz, i32, env, i32) +DEF_HELPER_2(efsctsf, i32, env, i32) +DEF_HELPER_2(efsctuf, i32, env, i32) +DEF_HELPER_2(evfscfsi, i64, env, i64) +DEF_HELPER_2(evfscfui, i64, env, i64) +DEF_HELPER_2(evfscfuf, i64, env, i64) +DEF_HELPER_2(evfscfsf, i64, env, i64) +DEF_HELPER_2(evfsctsi, i64, env, i64) +DEF_HELPER_2(evfsctui, i64, env, i64) +DEF_HELPER_2(evfsctsiz, i64, env, i64) +DEF_HELPER_2(evfsctuiz, i64, env, i64) +DEF_HELPER_2(evfsctsf, i64, env, i64) +DEF_HELPER_2(evfsctuf, i64, env, i64) +DEF_HELPER_3(efsadd, i32, env, i32, i32) +DEF_HELPER_3(efssub, i32, env, i32, i32) +DEF_HELPER_3(efsmul, i32, env, i32, i32) +DEF_HELPER_3(efsdiv, i32, env, i32, i32) +DEF_HELPER_3(evfsadd, i64, env, i64, i64) +DEF_HELPER_3(evfssub, i64, env, i64, i64) +DEF_HELPER_3(evfsmul, i64, env, i64, i64) +DEF_HELPER_3(evfsdiv, i64, env, i64, i64) +DEF_HELPER_3(efststlt, i32, env, i32, i32) +DEF_HELPER_3(efststgt, i32, env, i32, i32) +DEF_HELPER_3(efststeq, i32, env, i32, i32) +DEF_HELPER_3(efscmplt, i32, env, i32, i32) +DEF_HELPER_3(efscmpgt, i32, env, i32, i32) +DEF_HELPER_3(efscmpeq, i32, env, i32, i32) +DEF_HELPER_3(evfststlt, i32, env, i64, i64) +DEF_HELPER_3(evfststgt, i32, env, i64, i64) +DEF_HELPER_3(evfststeq, i32, env, i64, i64) +DEF_HELPER_3(evfscmplt, i32, env, i64, i64) +DEF_HELPER_3(evfscmpgt, i32, env, i64, i64) +DEF_HELPER_3(evfscmpeq, i32, env, i64, i64) +DEF_HELPER_2(efdcfsi, i64, env, i32) +DEF_HELPER_2(efdcfsid, i64, env, i64) +DEF_HELPER_2(efdcfui, i64, env, i32) +DEF_HELPER_2(efdcfuid, i64, env, i64) +DEF_HELPER_2(efdctsi, i32, env, i64) +DEF_HELPER_2(efdctui, i32, env, i64) +DEF_HELPER_2(efdctsiz, i32, env, i64) +DEF_HELPER_2(efdctsidz, i64, env, i64) +DEF_HELPER_2(efdctuiz, i32, env, i64) +DEF_HELPER_2(efdctuidz, i64, env, i64) +DEF_HELPER_2(efdcfsf, i64, env, i32) +DEF_HELPER_2(efdcfuf, i64, env, i32) +DEF_HELPER_2(efdctsf, i32, env, i64) +DEF_HELPER_2(efdctuf, i32, env, i64) +DEF_HELPER_2(efscfd, i32, env, i64) +DEF_HELPER_2(efdcfs, i64, env, i32) +DEF_HELPER_3(efdadd, i64, env, i64, i64) +DEF_HELPER_3(efdsub, i64, env, i64, i64) +DEF_HELPER_3(efdmul, i64, env, i64, i64) +DEF_HELPER_3(efddiv, i64, env, i64, i64) +DEF_HELPER_3(efdtstlt, i32, env, i64, i64) +DEF_HELPER_3(efdtstgt, i32, env, i64, i64) +DEF_HELPER_3(efdtsteq, i32, env, i64, i64) +DEF_HELPER_3(efdcmplt, i32, env, i64, i64) +DEF_HELPER_3(efdcmpgt, i32, env, i64, i64) +DEF_HELPER_3(efdcmpeq, i32, env, i64, i64) #if !defined(CONFIG_USER_ONLY) -DEF_HELPER_1(4xx_tlbre_hi, tl, tl) -DEF_HELPER_1(4xx_tlbre_lo, tl, tl) -DEF_HELPER_2(4xx_tlbwe_hi, void, tl, tl) -DEF_HELPER_2(4xx_tlbwe_lo, void, tl, tl) -DEF_HELPER_1(4xx_tlbsx, tl, tl) -DEF_HELPER_2(440_tlbre, tl, i32, tl) -DEF_HELPER_3(440_tlbwe, void, i32, tl, tl) -DEF_HELPER_1(440_tlbsx, tl, tl) -DEF_HELPER_0(booke206_tlbre, void) -DEF_HELPER_0(booke206_tlbwe, void) -DEF_HELPER_1(booke206_tlbsx, void, tl) -DEF_HELPER_1(booke206_tlbivax, void, tl) -DEF_HELPER_1(booke206_tlbilx0, void, tl) -DEF_HELPER_1(booke206_tlbilx1, void, tl) -DEF_HELPER_1(booke206_tlbilx3, void, tl) -DEF_HELPER_1(booke206_tlbflush, void, i32) -DEF_HELPER_2(booke_setpid, void, i32, tl) -DEF_HELPER_1(6xx_tlbd, void, tl) -DEF_HELPER_1(6xx_tlbi, void, tl) -DEF_HELPER_1(74xx_tlbd, void, tl) -DEF_HELPER_1(74xx_tlbi, void, tl) -DEF_HELPER_FLAGS_0(tlbia, TCG_CALL_CONST, void) -DEF_HELPER_FLAGS_1(tlbie, TCG_CALL_CONST, void, tl) +DEF_HELPER_2(4xx_tlbre_hi, tl, env, tl) +DEF_HELPER_2(4xx_tlbre_lo, tl, env, tl) +DEF_HELPER_3(4xx_tlbwe_hi, void, env, tl, tl) +DEF_HELPER_3(4xx_tlbwe_lo, void, env, tl, tl) +DEF_HELPER_2(4xx_tlbsx, tl, env, tl) +DEF_HELPER_3(440_tlbre, tl, env, i32, tl) +DEF_HELPER_4(440_tlbwe, void, env, i32, tl, tl) +DEF_HELPER_2(440_tlbsx, tl, env, tl) +DEF_HELPER_1(booke206_tlbre, void, env) +DEF_HELPER_1(booke206_tlbwe, void, env) +DEF_HELPER_2(booke206_tlbsx, void, env, tl) +DEF_HELPER_2(booke206_tlbivax, void, env, tl) +DEF_HELPER_2(booke206_tlbilx0, void, env, tl) +DEF_HELPER_2(booke206_tlbilx1, void, env, tl) +DEF_HELPER_2(booke206_tlbilx3, void, env, tl) +DEF_HELPER_2(booke206_tlbflush, void, env, i32) +DEF_HELPER_3(booke_setpid, void, env, i32, tl) +DEF_HELPER_2(6xx_tlbd, void, env, tl) +DEF_HELPER_2(6xx_tlbi, void, env, tl) +DEF_HELPER_2(74xx_tlbd, void, env, tl) +DEF_HELPER_2(74xx_tlbi, void, env, tl) +DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_CONST, void, env) +DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_CONST, void, env, tl) #if defined(TARGET_PPC64) -DEF_HELPER_FLAGS_2(store_slb, TCG_CALL_CONST, void, tl, tl) -DEF_HELPER_1(load_slb_esid, tl, tl) -DEF_HELPER_1(load_slb_vsid, tl, tl) -DEF_HELPER_FLAGS_0(slbia, TCG_CALL_CONST, void) -DEF_HELPER_FLAGS_1(slbie, TCG_CALL_CONST, void, tl) +DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_CONST, void, env, tl, tl) +DEF_HELPER_2(load_slb_esid, tl, env, tl) +DEF_HELPER_2(load_slb_vsid, tl, env, tl) +DEF_HELPER_FLAGS_1(slbia, TCG_CALL_CONST, void, env) +DEF_HELPER_FLAGS_2(slbie, TCG_CALL_CONST, void, env, tl) #endif -DEF_HELPER_FLAGS_1(load_sr, TCG_CALL_CONST, tl, tl); -DEF_HELPER_FLAGS_2(store_sr, TCG_CALL_CONST, void, tl, tl) +DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_CONST, tl, env, tl); +DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_CONST, void, env, tl, tl) DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl) DEF_HELPER_1(msgsnd, void, tl) -DEF_HELPER_1(msgclr, void, tl) +DEF_HELPER_2(msgclr, void, env, tl) #endif -DEF_HELPER_3(dlmzb, tl, tl, tl, i32) -DEF_HELPER_FLAGS_1(clcs, TCG_CALL_CONST | TCG_CALL_PURE, tl, i32) +DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) +DEF_HELPER_FLAGS_2(clcs, TCG_CALL_CONST | TCG_CALL_PURE, tl, env, i32) #if !defined(CONFIG_USER_ONLY) -DEF_HELPER_1(rac, tl, tl) +DEF_HELPER_2(rac, tl, env, tl) #endif -DEF_HELPER_2(div, tl, tl, tl) -DEF_HELPER_2(divo, tl, tl, tl) -DEF_HELPER_2(divs, tl, tl, tl) -DEF_HELPER_2(divso, tl, tl, tl) +DEF_HELPER_3(div, tl, env, tl, tl) +DEF_HELPER_3(divo, tl, env, tl, tl) +DEF_HELPER_3(divs, tl, env, tl, tl) +DEF_HELPER_3(divso, tl, env, tl, tl) -DEF_HELPER_1(load_dcr, tl, tl); -DEF_HELPER_2(store_dcr, void, tl, tl) +DEF_HELPER_2(load_dcr, tl, env, tl); +DEF_HELPER_3(store_dcr, void, env, tl, tl) -DEF_HELPER_1(load_dump_spr, void, i32) -DEF_HELPER_1(store_dump_spr, void, i32) -DEF_HELPER_0(load_tbl, tl) -DEF_HELPER_0(load_tbu, tl) -DEF_HELPER_0(load_atbl, tl) -DEF_HELPER_0(load_atbu, tl) -DEF_HELPER_0(load_601_rtcl, tl) -DEF_HELPER_0(load_601_rtcu, tl) +DEF_HELPER_2(load_dump_spr, void, env, i32) +DEF_HELPER_2(store_dump_spr, void, env, i32) +DEF_HELPER_1(load_tbl, tl, env) +DEF_HELPER_1(load_tbu, tl, env) +DEF_HELPER_1(load_atbl, tl, env) +DEF_HELPER_1(load_atbu, tl, env) +DEF_HELPER_1(load_601_rtcl, tl, env) +DEF_HELPER_1(load_601_rtcu, tl, env) #if !defined(CONFIG_USER_ONLY) #if defined(TARGET_PPC64) -DEF_HELPER_1(store_asr, void, tl) -DEF_HELPER_0(load_purr, tl) +DEF_HELPER_2(store_asr, void, env, tl) +DEF_HELPER_1(load_purr, tl, env) #endif -DEF_HELPER_1(store_sdr1, void, tl) -DEF_HELPER_1(store_tbl, void, tl) -DEF_HELPER_1(store_tbu, void, tl) -DEF_HELPER_1(store_atbl, void, tl) -DEF_HELPER_1(store_atbu, void, tl) -DEF_HELPER_1(store_601_rtcl, void, tl) -DEF_HELPER_1(store_601_rtcu, void, tl) -DEF_HELPER_0(load_decr, tl) -DEF_HELPER_1(store_decr, void, tl) -DEF_HELPER_1(store_hid0_601, void, tl) -DEF_HELPER_2(store_403_pbr, void, i32, tl) -DEF_HELPER_0(load_40x_pit, tl) -DEF_HELPER_1(store_40x_pit, void, tl) -DEF_HELPER_1(store_40x_dbcr0, void, tl) -DEF_HELPER_1(store_40x_sler, void, tl) -DEF_HELPER_1(store_booke_tcr, void, tl) -DEF_HELPER_1(store_booke_tsr, void, tl) -DEF_HELPER_2(store_ibatl, void, i32, tl) -DEF_HELPER_2(store_ibatu, void, i32, tl) -DEF_HELPER_2(store_dbatl, void, i32, tl) -DEF_HELPER_2(store_dbatu, void, i32, tl) -DEF_HELPER_2(store_601_batl, void, i32, tl) -DEF_HELPER_2(store_601_batu, void, i32, tl) +DEF_HELPER_2(store_sdr1, void, env, tl) +DEF_HELPER_2(store_tbl, void, env, tl) +DEF_HELPER_2(store_tbu, void, env, tl) +DEF_HELPER_2(store_atbl, void, env, tl) +DEF_HELPER_2(store_atbu, void, env, tl) +DEF_HELPER_2(store_601_rtcl, void, env, tl) +DEF_HELPER_2(store_601_rtcu, void, env, tl) +DEF_HELPER_1(load_decr, tl, env) +DEF_HELPER_2(store_decr, void, env, tl) +DEF_HELPER_2(store_hid0_601, void, env, tl) +DEF_HELPER_3(store_403_pbr, void, env, i32, tl) +DEF_HELPER_1(load_40x_pit, tl, env) +DEF_HELPER_2(store_40x_pit, void, env, tl) +DEF_HELPER_2(store_40x_dbcr0, void, env, tl) +DEF_HELPER_2(store_40x_sler, void, env, tl) +DEF_HELPER_2(store_booke_tcr, void, env, tl) +DEF_HELPER_2(store_booke_tsr, void, env, tl) +DEF_HELPER_1(load_epr, tl, env) +DEF_HELPER_3(store_ibatl, void, env, i32, tl) +DEF_HELPER_3(store_ibatu, void, env, i32, tl) +DEF_HELPER_3(store_dbatl, void, env, i32, tl) +DEF_HELPER_3(store_dbatu, void, env, i32, tl) +DEF_HELPER_3(store_601_batl, void, env, i32, tl) +DEF_HELPER_3(store_601_batu, void, env, i32, tl) #endif #include "def-helper.h" diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c new file mode 100644 index 0000000000..f638b2a07c --- /dev/null +++ b/target-ppc/int_helper.c @@ -0,0 +1,1564 @@ +/* + * PowerPC integer and vector emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "host-utils.h" +#include "helper.h" + +#include "helper_regs.h" +/*****************************************************************************/ +/* Fixed point operations helpers */ +#if defined(TARGET_PPC64) + +/* multiply high word */ +uint64_t helper_mulhd(uint64_t arg1, uint64_t arg2) +{ + uint64_t tl, th; + + muls64(&tl, &th, arg1, arg2); + return th; +} + +/* multiply high word unsigned */ +uint64_t helper_mulhdu(uint64_t arg1, uint64_t arg2) +{ + uint64_t tl, th; + + mulu64(&tl, &th, arg1, arg2); + return th; +} + +uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2) +{ + int64_t th; + uint64_t tl; + + muls64(&tl, (uint64_t *)&th, arg1, arg2); + /* If th != 0 && th != -1, then we had an overflow */ + if (likely((uint64_t)(th + 1) <= 1)) { + env->xer &= ~(1 << XER_OV); + } else { + env->xer |= (1 << XER_OV) | (1 << XER_SO); + } + return (int64_t)tl; +} +#endif + +target_ulong helper_cntlzw(target_ulong t) +{ + return clz32(t); +} + +#if defined(TARGET_PPC64) +target_ulong helper_cntlzd(target_ulong t) +{ + return clz64(t); +} +#endif + +/* shift right arithmetic helper */ +target_ulong helper_sraw(CPUPPCState *env, target_ulong value, + target_ulong shift) +{ + int32_t ret; + + if (likely(!(shift & 0x20))) { + if (likely((uint32_t)shift != 0)) { + shift &= 0x1f; + ret = (int32_t)value >> shift; + if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { + env->xer &= ~(1 << XER_CA); + } else { + env->xer |= (1 << XER_CA); + } + } else { + ret = (int32_t)value; + env->xer &= ~(1 << XER_CA); + } + } else { + ret = (int32_t)value >> 31; + if (ret) { + env->xer |= (1 << XER_CA); + } else { + env->xer &= ~(1 << XER_CA); + } + } + return (target_long)ret; +} + +#if defined(TARGET_PPC64) +target_ulong helper_srad(CPUPPCState *env, target_ulong value, + target_ulong shift) +{ + int64_t ret; + + if (likely(!(shift & 0x40))) { + if (likely((uint64_t)shift != 0)) { + shift &= 0x3f; + ret = (int64_t)value >> shift; + if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { + env->xer &= ~(1 << XER_CA); + } else { + env->xer |= (1 << XER_CA); + } + } else { + ret = (int64_t)value; + env->xer &= ~(1 << XER_CA); + } + } else { + ret = (int64_t)value >> 63; + if (ret) { + env->xer |= (1 << XER_CA); + } else { + env->xer &= ~(1 << XER_CA); + } + } + return ret; +} +#endif + +#if defined(TARGET_PPC64) +target_ulong helper_popcntb(target_ulong val) +{ + val = (val & 0x5555555555555555ULL) + ((val >> 1) & + 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & + 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & + 0x0f0f0f0f0f0f0f0fULL); + return val; +} + +target_ulong helper_popcntw(target_ulong val) +{ + val = (val & 0x5555555555555555ULL) + ((val >> 1) & + 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & + 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & + 0x0f0f0f0f0f0f0f0fULL); + val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & + 0x00ff00ff00ff00ffULL); + val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & + 0x0000ffff0000ffffULL); + return val; +} + +target_ulong helper_popcntd(target_ulong val) +{ + return ctpop64(val); +} +#else +target_ulong helper_popcntb(target_ulong val) +{ + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); + return val; +} + +target_ulong helper_popcntw(target_ulong val) +{ + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); + val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); + val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); + return val; +} +#endif + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ +target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) +{ + uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + + if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = tmp % arg2; + return tmp / (int32_t)arg2; + } +} + +target_ulong helper_divo(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + + if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->xer |= (1 << XER_OV) | (1 << XER_SO); + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = tmp % arg2; + tmp /= (int32_t)arg2; + if ((int32_t)tmp != tmp) { + env->xer |= (1 << XER_OV) | (1 << XER_SO); + } else { + env->xer &= ~(1 << XER_OV); + } + return tmp; + } +} + +target_ulong helper_divs(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; + return (int32_t)arg1 / (int32_t)arg2; + } +} + +target_ulong helper_divso(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->xer |= (1 << XER_OV) | (1 << XER_SO); + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->xer &= ~(1 << XER_OV); + env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; + return (int32_t)arg1 / (int32_t)arg2; + } +} + +/*****************************************************************************/ +/* 602 specific instructions */ +/* mfrom is the most crazy instruction ever seen, imho ! */ +/* Real implementation uses a ROM table. Do the same */ +/* Extremely decomposed: + * -arg / 256 + * return 256 * log10(10 + 1.0) + 0.5 + */ +#if !defined(CONFIG_USER_ONLY) +target_ulong helper_602_mfrom(target_ulong arg) +{ + if (likely(arg < 602)) { +#include "mfrom_table.c" + return mfrom_ROM_table[arg]; + } else { + return 0; + } +} +#endif + +/*****************************************************************************/ +/* Altivec extension helpers */ +#if defined(HOST_WORDS_BIGENDIAN) +#define HI_IDX 0 +#define LO_IDX 1 +#else +#define HI_IDX 1 +#define LO_IDX 0 +#endif + +#if defined(HOST_WORDS_BIGENDIAN) +#define VECTOR_FOR_INORDER_I(index, element) \ + for (index = 0; index < ARRAY_SIZE(r->element); index++) +#else +#define VECTOR_FOR_INORDER_I(index, element) \ + for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--) +#endif + +/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise, + * execute the following block. */ +#define DO_HANDLE_NAN(result, x) \ + if (float32_is_any_nan(x)) { \ + CPU_FloatU __f; \ + __f.f = x; \ + __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \ + result = __f.f; \ + } else + +#define HANDLE_NAN1(result, x) \ + DO_HANDLE_NAN(result, x) +#define HANDLE_NAN2(result, x, y) \ + DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) +#define HANDLE_NAN3(result, x, y, z) \ + DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z) + +/* Saturating arithmetic helpers. */ +#define SATCVT(from, to, from_type, to_type, min, max) \ + static inline to_type cvt##from##to(from_type x, int *sat) \ + { \ + to_type r; \ + \ + if (x < (from_type)min) { \ + r = min; \ + *sat = 1; \ + } else if (x > (from_type)max) { \ + r = max; \ + *sat = 1; \ + } else { \ + r = x; \ + } \ + return r; \ + } +#define SATCVTU(from, to, from_type, to_type, min, max) \ + static inline to_type cvt##from##to(from_type x, int *sat) \ + { \ + to_type r; \ + \ + if (x > (from_type)max) { \ + r = max; \ + *sat = 1; \ + } else { \ + r = x; \ + } \ + return r; \ + } +SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX) +SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX) +SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX) + +SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX) +SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX) +SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX) +SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX) +SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX) +SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX) +#undef SATCVT +#undef SATCVTU + +void helper_lvsl(ppc_avr_t *r, target_ulong sh) +{ + int i, j = (sh & 0xf); + + VECTOR_FOR_INORDER_I(i, u8) { + r->u8[i] = j++; + } +} + +void helper_lvsr(ppc_avr_t *r, target_ulong sh) +{ + int i, j = 0x10 - (sh & 0xf); + + VECTOR_FOR_INORDER_I(i, u8) { + r->u8[i] = j++; + } +} + +void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r) +{ +#if defined(HOST_WORDS_BIGENDIAN) + env->vscr = r->u32[3]; +#else + env->vscr = r->u32[0]; +#endif + set_flush_to_zero(vscr_nj, &env->vec_status); +} + +void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + r->u32[i] = ~a->u32[i] < b->u32[i]; + } +} + +#define VARITH_DO(name, op, element) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = a->element[i] op b->element[i]; \ + } \ + } +#define VARITH(suffix, element) \ + VARITH_DO(add##suffix, +, element) \ + VARITH_DO(sub##suffix, -, element) +VARITH(ubm, u8) +VARITH(uhm, u16) +VARITH(uwm, u32) +#undef VARITH_DO +#undef VARITH + +#define VARITHFP(suffix, func) \ + void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \ + r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \ + } \ + } \ + } +VARITHFP(addfp, float32_add) +VARITHFP(subfp, float32_sub) +#undef VARITHFP + +#define VARITHSAT_CASE(type, op, cvt, element) \ + { \ + type result = (type)a->element[i] op (type)b->element[i]; \ + r->element[i] = cvt(result, &sat); \ + } + +#define VARITHSAT_DO(name, op, optype, cvt, element) \ + void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b) \ + { \ + int sat = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + switch (sizeof(r->element[0])) { \ + case 1: \ + VARITHSAT_CASE(optype, op, cvt, element); \ + break; \ + case 2: \ + VARITHSAT_CASE(optype, op, cvt, element); \ + break; \ + case 4: \ + VARITHSAT_CASE(optype, op, cvt, element); \ + break; \ + } \ + } \ + if (sat) { \ + env->vscr |= (1 << VSCR_SAT); \ + } \ + } +#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \ + VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \ + VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element) +#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \ + VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \ + VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element) +VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb) +VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh) +VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw) +VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub) +VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh) +VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw) +#undef VARITHSAT_CASE +#undef VARITHSAT_DO +#undef VARITHSAT_SIGNED +#undef VARITHSAT_UNSIGNED + +#define VAVG_DO(name, element, etype) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ + r->element[i] = x >> 1; \ + } \ + } + +#define VAVG(type, signed_element, signed_type, unsigned_element, \ + unsigned_type) \ + VAVG_DO(avgs##type, signed_element, signed_type) \ + VAVG_DO(avgu##type, unsigned_element, unsigned_type) +VAVG(b, s8, int16_t, u8, uint16_t) +VAVG(h, s16, int32_t, u16, uint32_t) +VAVG(w, s32, int64_t, u32, uint64_t) +#undef VAVG_DO +#undef VAVG + +#define VCF(suffix, cvt, element) \ + void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b, uint32_t uim) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + float32 t = cvt(b->element[i], &env->vec_status); \ + r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \ + } \ + } +VCF(ux, uint32_to_float32, u32) +VCF(sx, int32_to_float32, s32) +#undef VCF + +#define VCMP_DO(suffix, compare, element, record) \ + void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + uint32_t ones = (uint32_t)-1; \ + uint32_t all = ones; \ + uint32_t none = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + uint32_t result = (a->element[i] compare b->element[i] ? \ + ones : 0x0); \ + switch (sizeof(a->element[0])) { \ + case 4: \ + r->u32[i] = result; \ + break; \ + case 2: \ + r->u16[i] = result; \ + break; \ + case 1: \ + r->u8[i] = result; \ + break; \ + } \ + all &= result; \ + none |= result; \ + } \ + if (record) { \ + env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ + } \ + } +#define VCMP(suffix, compare, element) \ + VCMP_DO(suffix, compare, element, 0) \ + VCMP_DO(suffix##_dot, compare, element, 1) +VCMP(equb, ==, u8) +VCMP(equh, ==, u16) +VCMP(equw, ==, u32) +VCMP(gtub, >, u8) +VCMP(gtuh, >, u16) +VCMP(gtuw, >, u32) +VCMP(gtsb, >, s8) +VCMP(gtsh, >, s16) +VCMP(gtsw, >, s32) +#undef VCMP_DO +#undef VCMP + +#define VCMPFP_DO(suffix, compare, order, record) \ + void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + uint32_t ones = (uint32_t)-1; \ + uint32_t all = ones; \ + uint32_t none = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + uint32_t result; \ + int rel = float32_compare_quiet(a->f[i], b->f[i], \ + &env->vec_status); \ + if (rel == float_relation_unordered) { \ + result = 0; \ + } else if (rel compare order) { \ + result = ones; \ + } else { \ + result = 0; \ + } \ + r->u32[i] = result; \ + all &= result; \ + none |= result; \ + } \ + if (record) { \ + env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ + } \ + } +#define VCMPFP(suffix, compare, order) \ + VCMPFP_DO(suffix, compare, order, 0) \ + VCMPFP_DO(suffix##_dot, compare, order, 1) +VCMPFP(eqfp, ==, float_relation_equal) +VCMPFP(gefp, !=, float_relation_less) +VCMPFP(gtfp, ==, float_relation_greater) +#undef VCMPFP_DO +#undef VCMPFP + +static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r, + ppc_avr_t *a, ppc_avr_t *b, int record) +{ + int i; + int all_in = 0; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); + if (le_rel == float_relation_unordered) { + r->u32[i] = 0xc0000000; + /* ALL_IN does not need to be updated here. */ + } else { + float32 bneg = float32_chs(b->f[i]); + int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status); + int le = le_rel != float_relation_greater; + int ge = ge_rel != float_relation_less; + + r->u32[i] = ((!le) << 31) | ((!ge) << 30); + all_in |= (!le | !ge); + } + } + if (record) { + env->crf[6] = (all_in == 0) << 1; + } +} + +void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + vcmpbfp_internal(env, r, a, b, 0); +} + +void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b) +{ + vcmpbfp_internal(env, r, a, b, 1); +} + +#define VCT(suffix, satcvt, element) \ + void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b, uint32_t uim) \ + { \ + int i; \ + int sat = 0; \ + float_status s = env->vec_status; \ + \ + set_float_rounding_mode(float_round_to_zero, &s); \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + if (float32_is_any_nan(b->f[i])) { \ + r->element[i] = 0; \ + } else { \ + float64 t = float32_to_float64(b->f[i], &s); \ + int64_t j; \ + \ + t = float64_scalbn(t, uim, &s); \ + j = float64_to_int64(t, &s); \ + r->element[i] = satcvt(j, &sat); \ + } \ + } \ + if (sat) { \ + env->vscr |= (1 << VSCR_SAT); \ + } \ + } +VCT(uxs, cvtsduw, u32) +VCT(sxs, cvtsdsw, s32) +#undef VCT + +void helper_vmaddfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { + /* Need to do the computation in higher precision and round + * once at the end. */ + float64 af, bf, cf, t; + + af = float32_to_float64(a->f[i], &env->vec_status); + bf = float32_to_float64(b->f[i], &env->vec_status); + cf = float32_to_float64(c->f[i], &env->vec_status); + t = float64_mul(af, cf, &env->vec_status); + t = float64_add(t, bf, &env->vec_status); + r->f[i] = float64_to_float32(t, &env->vec_status); + } + } +} + +void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i]; + int32_t t = (int32_t)c->s16[i] + (prod >> 15); + + r->s16[i] = cvtswsh(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i] + 0x00004000; + int32_t t = (int32_t)c->s16[i] + (prod >> 15); + r->s16[i] = cvtswsh(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +#define VMINMAX_DO(name, compare, element) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + if (a->element[i] compare b->element[i]) { \ + r->element[i] = b->element[i]; \ + } else { \ + r->element[i] = a->element[i]; \ + } \ + } \ + } +#define VMINMAX(suffix, element) \ + VMINMAX_DO(min##suffix, >, element) \ + VMINMAX_DO(max##suffix, <, element) +VMINMAX(sb, s8) +VMINMAX(sh, s16) +VMINMAX(sw, s32) +VMINMAX(ub, u8) +VMINMAX(uh, u16) +VMINMAX(uw, u32) +#undef VMINMAX_DO +#undef VMINMAX + +#define VMINMAXFP(suffix, rT, rF) \ + void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \ + if (float32_lt_quiet(a->f[i], b->f[i], \ + &env->vec_status)) { \ + r->f[i] = rT->f[i]; \ + } else { \ + r->f[i] = rF->f[i]; \ + } \ + } \ + } \ + } +VMINMAXFP(minfp, a, b) +VMINMAXFP(maxfp, b, a) +#undef VMINMAXFP + +void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i]; + r->s16[i] = (int16_t) (prod + c->s16[i]); + } +} + +#define VMRG_DO(name, element, highp) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + ppc_avr_t result; \ + int i; \ + size_t n_elems = ARRAY_SIZE(r->element); \ + \ + for (i = 0; i < n_elems / 2; i++) { \ + if (highp) { \ + result.element[i*2+HI_IDX] = a->element[i]; \ + result.element[i*2+LO_IDX] = b->element[i]; \ + } else { \ + result.element[n_elems - i * 2 - (1 + HI_IDX)] = \ + b->element[n_elems - i - 1]; \ + result.element[n_elems - i * 2 - (1 + LO_IDX)] = \ + a->element[n_elems - i - 1]; \ + } \ + } \ + *r = result; \ + } +#if defined(HOST_WORDS_BIGENDIAN) +#define MRGHI 0 +#define MRGLO 1 +#else +#define MRGHI 1 +#define MRGLO 0 +#endif +#define VMRG(suffix, element) \ + VMRG_DO(mrgl##suffix, element, MRGHI) \ + VMRG_DO(mrgh##suffix, element, MRGLO) +VMRG(b, u8) +VMRG(h, u16) +VMRG(w, u32) +#undef VMRG_DO +#undef VMRG +#undef MRGHI +#undef MRGLO + +void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[16]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s8); i++) { + prod[i] = (int32_t)a->s8[i] * b->u8[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] + + prod[4 * i + 2] + prod[4 * i + 3]; + } +} + +void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[8]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + prod[i] = a->s16[i] * b->s16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1]; + } +} + +void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[8]; + int i; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + prod[i] = (int32_t)a->s16[i] * b->s16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1]; + + r->u32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint16_t prod[16]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + prod[i] = a->u8[i] * b->u8[i]; + } + + VECTOR_FOR_INORDER_I(i, u32) { + r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] + + prod[4 * i + 2] + prod[4 * i + 3]; + } +} + +void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint32_t prod[8]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u16); i++) { + prod[i] = a->u16[i] * b->u16[i]; + } + + VECTOR_FOR_INORDER_I(i, u32) { + r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1]; + } +} + +void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint32_t prod[8]; + int i; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->u16); i++) { + prod[i] = a->u16[i] * b->u16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1]; + + r->u32[i] = cvtuduw(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +#define VMUL_DO(name, mul_element, prod_element, evenp) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + VECTOR_FOR_INORDER_I(i, prod_element) { \ + if (evenp) { \ + r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \ + b->mul_element[i * 2 + HI_IDX]; \ + } else { \ + r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \ + b->mul_element[i * 2 + LO_IDX]; \ + } \ + } \ + } +#define VMUL(suffix, mul_element, prod_element) \ + VMUL_DO(mule##suffix, mul_element, prod_element, 1) \ + VMUL_DO(mulo##suffix, mul_element, prod_element, 0) +VMUL(sb, s8, s16) +VMUL(sh, s16, s32) +VMUL(ub, u8, u16) +VMUL(uh, u16, u32) +#undef VMUL_DO +#undef VMUL + +void helper_vnmsubfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { + /* Need to do the computation is higher precision and round + * once at the end. */ + float64 af, bf, cf, t; + + af = float32_to_float64(a->f[i], &env->vec_status); + bf = float32_to_float64(b->f[i], &env->vec_status); + cf = float32_to_float64(c->f[i], &env->vec_status); + t = float64_mul(af, cf, &env->vec_status); + t = float64_sub(t, bf, &env->vec_status); + t = float64_chs(t); + r->f[i] = float64_to_float32(t, &env->vec_status); + } + } +} + +void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + ppc_avr_t result; + int i; + + VECTOR_FOR_INORDER_I(i, u8) { + int s = c->u8[i] & 0x1f; +#if defined(HOST_WORDS_BIGENDIAN) + int index = s & 0xf; +#else + int index = 15 - (s & 0xf); +#endif + + if (s & 0x10) { + result.u8[i] = b->u8[index]; + } else { + result.u8[i] = a->u8[index]; + } + } + *r = result; +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define PKBIG 1 +#else +#define PKBIG 0 +#endif +void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + ppc_avr_t result; +#if defined(HOST_WORDS_BIGENDIAN) + const ppc_avr_t *x[2] = { a, b }; +#else + const ppc_avr_t *x[2] = { b, a }; +#endif + + VECTOR_FOR_INORDER_I(i, u64) { + VECTOR_FOR_INORDER_I(j, u32) { + uint32_t e = x[i]->u32[j]; + + result.u16[4*i+j] = (((e >> 9) & 0xfc00) | + ((e >> 6) & 0x3e0) | + ((e >> 3) & 0x1f)); + } + } + *r = result; +} + +#define VPK(suffix, from, to, cvt, dosat) \ + void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + int sat = 0; \ + ppc_avr_t result; \ + ppc_avr_t *a0 = PKBIG ? a : b; \ + ppc_avr_t *a1 = PKBIG ? b : a; \ + \ + VECTOR_FOR_INORDER_I(i, from) { \ + result.to[i] = cvt(a0->from[i], &sat); \ + result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \ + } \ + *r = result; \ + if (dosat && sat) { \ + env->vscr |= (1 << VSCR_SAT); \ + } \ + } +#define I(x, y) (x) +VPK(shss, s16, s8, cvtshsb, 1) +VPK(shus, s16, u8, cvtshub, 1) +VPK(swss, s32, s16, cvtswsh, 1) +VPK(swus, s32, u16, cvtswuh, 1) +VPK(uhus, u16, u8, cvtuhub, 1) +VPK(uwus, u32, u16, cvtuwuh, 1) +VPK(uhum, u16, u8, I, 0) +VPK(uwum, u32, u16, I, 0) +#undef I +#undef VPK +#undef PKBIG + +void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN1(r->f[i], b->f[i]) { + r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status); + } + } +} + +#define VRFI(suffix, rounding) \ + void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b) \ + { \ + int i; \ + float_status s = env->vec_status; \ + \ + set_float_rounding_mode(rounding, &s); \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + HANDLE_NAN1(r->f[i], b->f[i]) { \ + r->f[i] = float32_round_to_int (b->f[i], &s); \ + } \ + } \ + } +VRFI(n, float_round_nearest_even) +VRFI(m, float_round_down) +VRFI(p, float_round_up) +VRFI(z, float_round_to_zero) +#undef VRFI + +#define VROTATE(suffix, element) \ + void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + unsigned int mask = ((1 << \ + (3 + (sizeof(a->element[0]) >> 1))) \ + - 1); \ + unsigned int shift = b->element[i] & mask; \ + r->element[i] = (a->element[i] << shift) | \ + (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \ + } \ + } +VROTATE(b, u8) +VROTATE(h, u16) +VROTATE(w, u32) +#undef VROTATE + +void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN1(r->f[i], b->f[i]) { + float32 t = float32_sqrt(b->f[i], &env->vec_status); + + r->f[i] = float32_div(float32_one, t, &env->vec_status); + } + } +} + +void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]); + r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]); +} + +void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN1(r->f[i], b->f[i]) { + r->f[i] = float32_exp2(b->f[i], &env->vec_status); + } + } +} + +void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f); i++) { + HANDLE_NAN1(r->f[i], b->f[i]) { + r->f[i] = float32_log2(b->f[i], &env->vec_status); + } + } +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define LEFT 0 +#define RIGHT 1 +#else +#define LEFT 1 +#define RIGHT 0 +#endif +/* The specification says that the results are undefined if all of the + * shift counts are not identical. We check to make sure that they are + * to conform to what real hardware appears to do. */ +#define VSHIFT(suffix, leftp) \ + void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int shift = b->u8[LO_IDX*15] & 0x7; \ + int doit = 1; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \ + doit = doit && ((b->u8[i] & 0x7) == shift); \ + } \ + if (doit) { \ + if (shift == 0) { \ + *r = *a; \ + } else if (leftp) { \ + uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \ + \ + r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \ + r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \ + } else { \ + uint64_t carry = a->u64[HI_IDX] << (64 - shift); \ + \ + r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \ + r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \ + } \ + } \ + } +VSHIFT(l, LEFT) +VSHIFT(r, RIGHT) +#undef VSHIFT +#undef LEFT +#undef RIGHT + +#define VSL(suffix, element) \ + void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + unsigned int mask = ((1 << \ + (3 + (sizeof(a->element[0]) >> 1))) \ + - 1); \ + unsigned int shift = b->element[i] & mask; \ + \ + r->element[i] = a->element[i] << shift; \ + } \ + } +VSL(b, u8) +VSL(h, u16) +VSL(w, u32) +#undef VSL + +void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift) +{ + int sh = shift & 0xf; + int i; + ppc_avr_t result; + +#if defined(HOST_WORDS_BIGENDIAN) + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int index = sh + i; + if (index > 0xf) { + result.u8[i] = b->u8[index - 0x10]; + } else { + result.u8[i] = a->u8[index]; + } + } +#else + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int index = (16 - sh) + i; + if (index > 0xf) { + result.u8[i] = a->u8[index - 0x10]; + } else { + result.u8[i] = b->u8[index]; + } + } +#endif + *r = result; +} + +void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf; + +#if defined(HOST_WORDS_BIGENDIAN) + memmove(&r->u8[0], &a->u8[sh], 16 - sh); + memset(&r->u8[16-sh], 0, sh); +#else + memmove(&r->u8[sh], &a->u8[0], 16 - sh); + memset(&r->u8[0], 0, sh); +#endif +} + +/* Experimental testing shows that hardware masks the immediate. */ +#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1)) +#if defined(HOST_WORDS_BIGENDIAN) +#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element) +#else +#define SPLAT_ELEMENT(element) \ + (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element)) +#endif +#define VSPLT(suffix, element) \ + void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \ + { \ + uint32_t s = b->element[SPLAT_ELEMENT(element)]; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = s; \ + } \ + } +VSPLT(b, u8) +VSPLT(h, u16) +VSPLT(w, u32) +#undef VSPLT +#undef SPLAT_ELEMENT +#undef _SPLAT_MASKED + +#define VSPLTI(suffix, element, splat_type) \ + void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \ + { \ + splat_type x = (int8_t)(splat << 3) >> 3; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = x; \ + } \ + } +VSPLTI(b, s8, int8_t) +VSPLTI(h, s16, int16_t) +VSPLTI(w, s32, int32_t) +#undef VSPLTI + +#define VSR(suffix, element) \ + void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + unsigned int mask = ((1 << \ + (3 + (sizeof(a->element[0]) >> 1))) \ + - 1); \ + unsigned int shift = b->element[i] & mask; \ + \ + r->element[i] = a->element[i] >> shift; \ + } \ + } +VSR(ab, s8) +VSR(ah, s16) +VSR(aw, s32) +VSR(b, u8) +VSR(h, u16) +VSR(w, u32) +#undef VSR + +void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf; + +#if defined(HOST_WORDS_BIGENDIAN) + memmove(&r->u8[sh], &a->u8[0], 16 - sh); + memset(&r->u8[0], 0, sh); +#else + memmove(&r->u8[0], &a->u8[sh], 16 - sh); + memset(&r->u8[16 - sh], 0, sh); +#endif +} + +void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + r->u32[i] = a->u32[i] >= b->u32[i]; + } +} + +void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int64_t t; + int i, upper; + ppc_avr_t result; + int sat = 0; + +#if defined(HOST_WORDS_BIGENDIAN) + upper = ARRAY_SIZE(r->s32)-1; +#else + upper = 0; +#endif + t = (int64_t)b->s32[upper]; + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + t += a->s32[i]; + result.s32[i] = 0; + } + result.s32[upper] = cvtsdsw(t, &sat); + *r = result; + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j, upper; + ppc_avr_t result; + int sat = 0; + +#if defined(HOST_WORDS_BIGENDIAN) + upper = 1; +#else + upper = 0; +#endif + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + int64_t t = (int64_t)b->s32[upper + i * 2]; + + result.u64[i] = 0; + for (j = 0; j < ARRAY_SIZE(r->u64); j++) { + t += a->s32[2 * i + j]; + } + result.s32[upper + i * 2] = cvtsdsw(t, &sat); + } + + *r = result; + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + int64_t t = (int64_t)b->s32[i]; + + for (j = 0; j < ARRAY_SIZE(r->s32); j++) { + t += a->s8[4 * i + j]; + } + r->s32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + int64_t t = (int64_t)b->s32[i]; + + t += a->s16[2 * i] + a->s16[2 * i + 1]; + r->s32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + uint64_t t = (uint64_t)b->u32[i]; + + for (j = 0; j < ARRAY_SIZE(r->u32); j++) { + t += a->u8[4 * i + j]; + } + r->u32[i] = cvtuduw(t, &sat); + } + + if (sat) { + env->vscr |= (1 << VSCR_SAT); + } +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define UPKHI 1 +#define UPKLO 0 +#else +#define UPKHI 0 +#define UPKLO 1 +#endif +#define VUPKPX(suffix, hi) \ + void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ + { \ + int i; \ + ppc_avr_t result; \ + \ + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \ + uint16_t e = b->u16[hi ? i : i+4]; \ + uint8_t a = (e >> 15) ? 0xff : 0; \ + uint8_t r = (e >> 10) & 0x1f; \ + uint8_t g = (e >> 5) & 0x1f; \ + uint8_t b = e & 0x1f; \ + \ + result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \ + } \ + *r = result; \ + } +VUPKPX(lpx, UPKLO) +VUPKPX(hpx, UPKHI) +#undef VUPKPX + +#define VUPK(suffix, unpacked, packee, hi) \ + void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ + { \ + int i; \ + ppc_avr_t result; \ + \ + if (hi) { \ + for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \ + result.unpacked[i] = b->packee[i]; \ + } \ + } else { \ + for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \ + i++) { \ + result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \ + } \ + } \ + *r = result; \ + } +VUPK(hsb, s16, s8, UPKHI) +VUPK(hsh, s32, s16, UPKHI) +VUPK(lsb, s16, s8, UPKLO) +VUPK(lsh, s32, s16, UPKLO) +#undef VUPK +#undef UPKHI +#undef UPKLO + +#undef DO_HANDLE_NAN +#undef HANDLE_NAN1 +#undef HANDLE_NAN2 +#undef HANDLE_NAN3 +#undef VECTOR_FOR_INORDER_I +#undef HI_IDX +#undef LO_IDX + +/*****************************************************************************/ +/* SPE extension helpers */ +/* Use a table to make this quicker */ +static const uint8_t hbrev[16] = { + 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, + 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF, +}; + +static inline uint8_t byte_reverse(uint8_t val) +{ + return hbrev[val >> 4] | (hbrev[val & 0xF] << 4); +} + +static inline uint32_t word_reverse(uint32_t val) +{ + return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) | + (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24); +} + +#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */ +target_ulong helper_brinc(target_ulong arg1, target_ulong arg2) +{ + uint32_t a, b, d, mask; + + mask = UINT32_MAX >> (32 - MASKBITS); + a = arg1 & mask; + b = arg2 & mask; + d = word_reverse(1 + word_reverse(a | ~b)); + return (arg1 & ~mask) | (d & b); +} + +uint32_t helper_cntlsw32(uint32_t val) +{ + if (val & 0x80000000) { + return clz32(~val); + } else { + return clz32(val); + } +} + +uint32_t helper_cntlzw32(uint32_t val) +{ + return clz32(val); +} + +/* 440 specific */ +target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high, + target_ulong low, uint32_t update_Rc) +{ + target_ulong mask; + int i; + + i = 1; + for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { + if ((high & mask) == 0) { + if (update_Rc) { + env->crf[0] = 0x4; + } + goto done; + } + i++; + } + for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { + if ((low & mask) == 0) { + if (update_Rc) { + env->crf[0] = 0x8; + } + goto done; + } + i++; + } + if (update_Rc) { + env->crf[0] = 0x2; + } + done: + env->xer = (env->xer & ~0x7F) | i; + if (update_Rc) { + env->crf[0] |= xer_so; + } + return i; +} diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index c09cc39c78..829e180f8b 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -18,6 +18,7 @@ #include <sys/types.h> #include <sys/ioctl.h> #include <sys/mman.h> +#include <sys/vfs.h> #include <linux/kvm.h> @@ -167,10 +168,217 @@ static int kvm_booke206_tlb_init(CPUPPCState *env) return 0; } + +#if defined(TARGET_PPC64) +static void kvm_get_fallback_smmu_info(CPUPPCState *env, + struct kvm_ppc_smmu_info *info) +{ + memset(info, 0, sizeof(*info)); + + /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so + * need to "guess" what the supported page sizes are. + * + * For that to work we make a few assumptions: + * + * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR" + * KVM which only supports 4K and 16M pages, but supports them + * regardless of the backing store characteritics. We also don't + * support 1T segments. + * + * This is safe as if HV KVM ever supports that capability or PR + * KVM grows supports for more page/segment sizes, those versions + * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we + * will not hit this fallback + * + * - Else we are running HV KVM. This means we only support page + * sizes that fit in the backing store. Additionally we only + * advertize 64K pages if the processor is ARCH 2.06 and we assume + * P7 encodings for the SLB and hash table. Here too, we assume + * support for any newer processor will mean a kernel that + * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit + * this fallback. + */ + if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO)) { + /* No flags */ + info->flags = 0; + info->slb_size = 64; + + /* Standard 4k base page size segment */ + info->sps[0].page_shift = 12; + info->sps[0].slb_enc = 0; + info->sps[0].enc[0].page_shift = 12; + info->sps[0].enc[0].pte_enc = 0; + + /* Standard 16M large page size segment */ + info->sps[1].page_shift = 24; + info->sps[1].slb_enc = SLB_VSID_L; + info->sps[1].enc[0].page_shift = 24; + info->sps[1].enc[0].pte_enc = 0; + } else { + int i = 0; + + /* HV KVM has backing store size restrictions */ + info->flags = KVM_PPC_PAGE_SIZES_REAL; + + if (env->mmu_model & POWERPC_MMU_1TSEG) { + info->flags |= KVM_PPC_1T_SEGMENTS; + } + + if (env->mmu_model == POWERPC_MMU_2_06) { + info->slb_size = 32; + } else { + info->slb_size = 64; + } + + /* Standard 4k base page size segment */ + info->sps[i].page_shift = 12; + info->sps[i].slb_enc = 0; + info->sps[i].enc[0].page_shift = 12; + info->sps[i].enc[0].pte_enc = 0; + i++; + + /* 64K on MMU 2.06 */ + if (env->mmu_model == POWERPC_MMU_2_06) { + info->sps[i].page_shift = 16; + info->sps[i].slb_enc = 0x110; + info->sps[i].enc[0].page_shift = 16; + info->sps[i].enc[0].pte_enc = 1; + i++; + } + + /* Standard 16M large page size segment */ + info->sps[i].page_shift = 24; + info->sps[i].slb_enc = SLB_VSID_L; + info->sps[i].enc[0].page_shift = 24; + info->sps[i].enc[0].pte_enc = 0; + } +} + +static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info) +{ + int ret; + + if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { + ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info); + if (ret == 0) { + return; + } + } + + kvm_get_fallback_smmu_info(env, info); +} + +static long getrampagesize(void) +{ + struct statfs fs; + int ret; + + if (!mem_path) { + /* guest RAM is backed by normal anonymous pages */ + return getpagesize(); + } + + do { + ret = statfs(mem_path, &fs); + } while (ret != 0 && errno == EINTR); + + if (ret != 0) { + fprintf(stderr, "Couldn't statfs() memory path: %s\n", + strerror(errno)); + exit(1); + } + +#define HUGETLBFS_MAGIC 0x958458f6 + + if (fs.f_type != HUGETLBFS_MAGIC) { + /* Explicit mempath, but it's ordinary pages */ + return getpagesize(); + } + + /* It's hugepage, return the huge page size */ + return fs.f_bsize; +} + +static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) +{ + if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) { + return true; + } + + return (1ul << shift) <= rampgsize; +} + +static void kvm_fixup_page_sizes(CPUPPCState *env) +{ + static struct kvm_ppc_smmu_info smmu_info; + static bool has_smmu_info; + long rampagesize; + int iq, ik, jq, jk; + + /* We only handle page sizes for 64-bit server guests for now */ + if (!(env->mmu_model & POWERPC_MMU_64)) { + return; + } + + /* Collect MMU info from kernel if not already */ + if (!has_smmu_info) { + kvm_get_smmu_info(env, &smmu_info); + has_smmu_info = true; + } + + rampagesize = getrampagesize(); + + /* Convert to QEMU form */ + memset(&env->sps, 0, sizeof(env->sps)); + + for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) { + struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; + struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; + + if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + ksps->page_shift)) { + continue; + } + qsps->page_shift = ksps->page_shift; + qsps->slb_enc = ksps->slb_enc; + for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { + if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + ksps->enc[jk].page_shift)) { + continue; + } + qsps->enc[jq].page_shift = ksps->enc[jk].page_shift; + qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc; + if (++jq >= PPC_PAGE_SIZES_MAX_SZ) { + break; + } + } + if (++iq >= PPC_PAGE_SIZES_MAX_SZ) { + break; + } + } + env->slb_nr = smmu_info.slb_size; + if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) { + env->mmu_model |= POWERPC_MMU_1TSEG; + } else { + env->mmu_model &= ~POWERPC_MMU_1TSEG; + } +} +#else /* defined (TARGET_PPC64) */ + +static inline void kvm_fixup_page_sizes(CPUPPCState *env) +{ +} + +#endif /* !defined (TARGET_PPC64) */ + int kvm_arch_init_vcpu(CPUPPCState *cenv) { int ret; + /* Gather server mmu info from KVM and update the CPU state */ + kvm_fixup_page_sizes(cenv); + + /* Synchronize sregs with kvm */ ret = kvm_arch_sync_sregs(cenv); if (ret) { return ret; @@ -859,7 +1067,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd) return NULL; } - len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE) * sizeof(VIOsPAPR_RTCE); + len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE); /* FIXME: round this up to page size */ table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); @@ -882,7 +1090,7 @@ int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size) return -1; } - len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE)*sizeof(VIOsPAPR_RTCE); + len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE); if ((munmap(table, len) < 0) || (close(fd) < 0)) { fprintf(stderr, "KVM: Unexpected error removing TCE table: %s", diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h index 34ecad3e26..e2f8703853 100644 --- a/target-ppc/kvm_ppc.h +++ b/target-ppc/kvm_ppc.h @@ -58,6 +58,11 @@ static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_l return -1; } +static inline int kvmppc_read_segment_page_sizes(uint32_t *prop, int maxcells) +{ + return -1; +} + static inline int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level) { return -1; diff --git a/target-ppc/mem_helper.c b/target-ppc/mem_helper.c new file mode 100644 index 0000000000..5b5f1bdd23 --- /dev/null +++ b/target-ppc/mem_helper.c @@ -0,0 +1,295 @@ +/* + * PowerPC memory access emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "host-utils.h" +#include "helper.h" + +#include "helper_regs.h" + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif /* !defined(CONFIG_USER_ONLY) */ + +//#define DEBUG_OP + +/*****************************************************************************/ +/* Memory load and stores */ + +static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, + target_long arg) +{ +#if defined(TARGET_PPC64) + if (!msr_is_64bit(env, env->msr)) { + return (uint32_t)(addr + arg); + } else +#endif + { + return addr + arg; + } +} + +void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) +{ + for (; reg < 32; reg++) { + if (msr_le) { + env->gpr[reg] = bswap32(cpu_ldl_data(env, addr)); + } else { + env->gpr[reg] = cpu_ldl_data(env, addr); + } + addr = addr_add(env, addr, 4); + } +} + +void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) +{ + for (; reg < 32; reg++) { + if (msr_le) { + cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg])); + } else { + cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]); + } + addr = addr_add(env, addr, 4); + } +} + +void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) +{ + int sh; + + for (; nb > 3; nb -= 4) { + env->gpr[reg] = cpu_ldl_data(env, addr); + reg = (reg + 1) % 32; + addr = addr_add(env, addr, 4); + } + if (unlikely(nb > 0)) { + env->gpr[reg] = 0; + for (sh = 24; nb > 0; nb--, sh -= 8) { + env->gpr[reg] |= cpu_ldub_data(env, addr) << sh; + addr = addr_add(env, addr, 1); + } + } +} +/* PPC32 specification says we must generate an exception if + * rA is in the range of registers to be loaded. + * In an other hand, IBM says this is valid, but rA won't be loaded. + * For now, I'll follow the spec... + */ +void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, + uint32_t ra, uint32_t rb) +{ + if (likely(xer_bc != 0)) { + if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) || + (reg < rb && (reg + xer_bc) > rb))) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_LSWX); + } else { + helper_lsw(env, addr, xer_bc, reg); + } + } +} + +void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, + uint32_t reg) +{ + int sh; + + for (; nb > 3; nb -= 4) { + cpu_stl_data(env, addr, env->gpr[reg]); + reg = (reg + 1) % 32; + addr = addr_add(env, addr, 4); + } + if (unlikely(nb > 0)) { + for (sh = 24; nb > 0; nb--, sh -= 8) { + cpu_stb_data(env, addr, (env->gpr[reg] >> sh) & 0xFF); + addr = addr_add(env, addr, 1); + } + } +} + +static void do_dcbz(CPUPPCState *env, target_ulong addr, int dcache_line_size) +{ + int i; + + addr &= ~(dcache_line_size - 1); + for (i = 0; i < dcache_line_size; i += 4) { + cpu_stl_data(env, addr + i, 0); + } + if (env->reserve_addr == addr) { + env->reserve_addr = (target_ulong)-1ULL; + } +} + +void helper_dcbz(CPUPPCState *env, target_ulong addr) +{ + do_dcbz(env, addr, env->dcache_line_size); +} + +void helper_dcbz_970(CPUPPCState *env, target_ulong addr) +{ + if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { + do_dcbz(env, addr, 32); + } else { + do_dcbz(env, addr, env->dcache_line_size); + } +} + +void helper_icbi(CPUPPCState *env, target_ulong addr) +{ + addr &= ~(env->dcache_line_size - 1); + /* Invalidate one cache line : + * PowerPC specification says this is to be treated like a load + * (not a fetch) by the MMU. To be sure it will be so, + * do the load "by hand". + */ + cpu_ldl_data(env, addr); +} + +/* XXX: to be tested */ +target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, + uint32_t ra, uint32_t rb) +{ + int i, c, d; + + d = 24; + for (i = 0; i < xer_bc; i++) { + c = cpu_ldub_data(env, addr); + addr = addr_add(env, addr, 1); + /* ra (if not 0) and rb are never modified */ + if (likely(reg != rb && (ra == 0 || reg != ra))) { + env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); + } + if (unlikely(c == xer_cmp)) { + break; + } + if (likely(d != 0)) { + d -= 8; + } else { + d = 24; + reg++; + reg = reg & 0x1F; + } + } + return i; +} + +/*****************************************************************************/ +/* Altivec extension helpers */ +#if defined(HOST_WORDS_BIGENDIAN) +#define HI_IDX 0 +#define LO_IDX 1 +#else +#define HI_IDX 1 +#define LO_IDX 0 +#endif + +#define LVE(name, access, swap, element) \ + void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ + target_ulong addr) \ + { \ + size_t n_elems = ARRAY_SIZE(r->element); \ + int adjust = HI_IDX*(n_elems - 1); \ + int sh = sizeof(r->element[0]) >> 1; \ + int index = (addr & 0xf) >> sh; \ + \ + if (msr_le) { \ + r->element[LO_IDX ? index : (adjust - index)] = \ + swap(access(env, addr)); \ + } else { \ + r->element[LO_IDX ? index : (adjust - index)] = \ + access(env, addr); \ + } \ + } +#define I(x) (x) +LVE(lvebx, cpu_ldub_data, I, u8) +LVE(lvehx, cpu_lduw_data, bswap16, u16) +LVE(lvewx, cpu_ldl_data, bswap32, u32) +#undef I +#undef LVE + +#define STVE(name, access, swap, element) \ + void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ + target_ulong addr) \ + { \ + size_t n_elems = ARRAY_SIZE(r->element); \ + int adjust = HI_IDX * (n_elems - 1); \ + int sh = sizeof(r->element[0]) >> 1; \ + int index = (addr & 0xf) >> sh; \ + \ + if (msr_le) { \ + access(env, addr, swap(r->element[LO_IDX ? index : \ + (adjust - index)])); \ + } else { \ + access(env, addr, r->element[LO_IDX ? index : \ + (adjust - index)]); \ + } \ + } +#define I(x) (x) +STVE(stvebx, cpu_stb_data, I, u8) +STVE(stvehx, cpu_stw_data, bswap16, u16) +STVE(stvewx, cpu_stl_data, bswap32, u32) +#undef I +#undef LVE + +#undef HI_IDX +#undef LO_IDX + +/*****************************************************************************/ +/* Softmmu support */ +#if !defined(CONFIG_USER_ONLY) + +#define MMUSUFFIX _mmu + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUPPCState *env, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + TranslationBlock *tb; + int ret; + + ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx); + if (unlikely(ret != 0)) { + if (likely(retaddr)) { + /* now we have a real cpu fault */ + tb = tb_find_pc(retaddr); + if (likely(tb)) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, retaddr); + } + } + helper_raise_exception_err(env, env->exception_index, env->error_code); + } +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target-ppc/misc_helper.c b/target-ppc/misc_helper.c new file mode 100644 index 0000000000..26edcca2df --- /dev/null +++ b/target-ppc/misc_helper.c @@ -0,0 +1,124 @@ +/* + * Miscellaneous PowerPC emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" + +#include "helper_regs.h" + +/*****************************************************************************/ +/* SPR accesses */ +void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn) +{ + qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn, + env->spr[sprn]); +} + +void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn) +{ + qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn, + env->spr[sprn]); +} +#if !defined(CONFIG_USER_ONLY) +#if defined(TARGET_PPC64) +void helper_store_asr(CPUPPCState *env, target_ulong val) +{ + ppc_store_asr(env, val); +} +#endif + +void helper_store_sdr1(CPUPPCState *env, target_ulong val) +{ + ppc_store_sdr1(env, val); +} + +void helper_store_hid0_601(CPUPPCState *env, target_ulong val) +{ + target_ulong hid0; + + hid0 = env->spr[SPR_HID0]; + if ((val ^ hid0) & 0x00000008) { + /* Change current endianness */ + env->hflags &= ~(1 << MSR_LE); + env->hflags_nmsr &= ~(1 << MSR_LE); + env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE); + env->hflags |= env->hflags_nmsr; + qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__, + val & 0x8 ? 'l' : 'b', env->hflags); + } + env->spr[SPR_HID0] = (uint32_t)val; +} + +void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) +{ + if (likely(env->pb[num] != value)) { + env->pb[num] = value; + /* Should be optimized */ + tlb_flush(env, 1); + } +} + +void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) +{ + store_40x_dbcr0(env, val); +} + +void helper_store_40x_sler(CPUPPCState *env, target_ulong val) +{ + store_40x_sler(env, val); +} +#endif +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +target_ulong helper_clcs(CPUPPCState *env, uint32_t arg) +{ + switch (arg) { + case 0x0CUL: + /* Instruction cache line size */ + return env->icache_line_size; + break; + case 0x0DUL: + /* Data cache line size */ + return env->dcache_line_size; + break; + case 0x0EUL: + /* Minimum cache line size */ + return (env->icache_line_size < env->dcache_line_size) ? + env->icache_line_size : env->dcache_line_size; + break; + case 0x0FUL: + /* Maximum cache line size */ + return (env->icache_line_size > env->dcache_line_size) ? + env->icache_line_size : env->dcache_line_size; + break; + default: + /* Undefined */ + return 0; + break; + } +} + +/*****************************************************************************/ +/* Special registers manipulation */ + +/* GDBstub can read and write MSR... */ +void ppc_store_msr(CPUPPCState *env, target_ulong value) +{ + hreg_store_msr(env, value, 0); +} diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c new file mode 100644 index 0000000000..d2664acef0 --- /dev/null +++ b/target-ppc/mmu_helper.c @@ -0,0 +1,3326 @@ +/* + * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" +#include "kvm.h" +#include "kvm_ppc.h" + +//#define DEBUG_MMU +//#define DEBUG_BATS +//#define DEBUG_SLB +//#define DEBUG_SOFTWARE_TLB +//#define DUMP_PAGE_TABLES +//#define DEBUG_SOFTWARE_TLB +//#define FLUSH_ALL_TLBS + +#ifdef DEBUG_MMU +# define LOG_MMU(...) qemu_log(__VA_ARGS__) +# define LOG_MMU_STATE(env) log_cpu_state((env), 0) +#else +# define LOG_MMU(...) do { } while (0) +# define LOG_MMU_STATE(...) do { } while (0) +#endif + +#ifdef DEBUG_SOFTWARE_TLB +# define LOG_SWTLB(...) qemu_log(__VA_ARGS__) +#else +# define LOG_SWTLB(...) do { } while (0) +#endif + +#ifdef DEBUG_BATS +# define LOG_BATS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_BATS(...) do { } while (0) +#endif + +#ifdef DEBUG_SLB +# define LOG_SLB(...) qemu_log(__VA_ARGS__) +#else +# define LOG_SLB(...) do { } while (0) +#endif + +/*****************************************************************************/ +/* PowerPC MMU emulation */ +#if defined(CONFIG_USER_ONLY) +int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, + int mmu_idx) +{ + int exception, error_code; + + if (rw == 2) { + exception = POWERPC_EXCP_ISI; + error_code = 0x40000000; + } else { + exception = POWERPC_EXCP_DSI; + error_code = 0x40000000; + if (rw) { + error_code |= 0x02000000; + } + env->spr[SPR_DAR] = address; + env->spr[SPR_DSISR] = error_code; + } + env->exception_index = exception; + env->error_code = error_code; + + return 1; +} + +#else +/* Common routines used by software and hardware TLBs emulation */ +static inline int pte_is_valid(target_ulong pte0) +{ + return pte0 & 0x80000000 ? 1 : 0; +} + +static inline void pte_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x80000000; +} + +#if defined(TARGET_PPC64) +static inline int pte64_is_valid(target_ulong pte0) +{ + return pte0 & 0x0000000000000001ULL ? 1 : 0; +} + +static inline void pte64_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x0000000000000001ULL; +} +#endif + +#define PTE_PTEM_MASK 0x7FFFFFBF +#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) +#if defined(TARGET_PPC64) +#define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL +#define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F) +#endif + +static inline int pp_check(int key, int pp, int nx) +{ + int access; + + /* Compute access rights */ + /* When pp is 3/7, the result is undefined. Set it to noaccess */ + access = 0; + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + access |= PAGE_WRITE; + /* No break here */ + case 0x3: + case 0x6: + access |= PAGE_READ; + break; + } + } else { + switch (pp) { + case 0x0: + case 0x6: + access = 0; + break; + case 0x1: + case 0x3: + access = PAGE_READ; + break; + case 0x2: + access = PAGE_READ | PAGE_WRITE; + break; + } + } + if (nx == 0) { + access |= PAGE_EXEC; + } + + return access; +} + +static inline int check_prot(int prot, int rw, int access_type) +{ + int ret; + + if (access_type == ACCESS_CODE) { + if (prot & PAGE_EXEC) { + ret = 0; + } else { + ret = -2; + } + } else if (rw) { + if (prot & PAGE_WRITE) { + ret = 0; + } else { + ret = -2; + } + } else { + if (prot & PAGE_READ) { + ret = 0; + } else { + ret = -2; + } + } + + return ret; +} + +static inline int pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + target_ulong ptem, mmask; + int access, ret, pteh, ptev, pp; + + ret = -1; + /* Check validity and table match */ +#if defined(TARGET_PPC64) + if (is_64b) { + ptev = pte64_is_valid(pte0); + pteh = (pte0 >> 1) & 1; + } else +#endif + { + ptev = pte_is_valid(pte0); + pteh = (pte0 >> 6) & 1; + } + if (ptev && h == pteh) { + /* Check vsid & api */ +#if defined(TARGET_PPC64) + if (is_64b) { + ptem = pte0 & PTE64_PTEM_MASK; + mmask = PTE64_CHECK_MASK; + pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004); + ctx->nx = (pte1 >> 2) & 1; /* No execute bit */ + ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */ + } else +#endif + { + ptem = pte0 & PTE_PTEM_MASK; + mmask = PTE_CHECK_MASK; + pp = pte1 & 0x00000003; + } + if (ptem == ctx->ptem) { + if (ctx->raddr != (target_phys_addr_t)-1ULL) { + /* all matches should have equal RPN, WIMG & PP */ + if ((ctx->raddr & mmask) != (pte1 & mmask)) { + qemu_log("Bad RPN/WIMG/PP\n"); + return -3; + } + } + /* Compute access rights */ + access = pp_check(ctx->key, pp, ctx->nx); + /* Keep the matching PTE informations */ + ctx->raddr = pte1; + ctx->prot = access; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + /* Access granted */ + LOG_MMU("PTE access granted !\n"); + } else { + /* Access right violation */ + LOG_MMU("PTE access rejected\n"); + } + } + } + + return ret; +} + +static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + return pte_check(ctx, 0, pte0, pte1, h, rw, type); +} + +#if defined(TARGET_PPC64) +static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + return pte_check(ctx, 1, pte0, pte1, h, rw, type); +} +#endif + +static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, + int ret, int rw) +{ + int store = 0; + + /* Update page flags */ + if (!(*pte1p & 0x00000100)) { + /* Update accessed flag */ + *pte1p |= 0x00000100; + store = 1; + } + if (!(*pte1p & 0x00000080)) { + if (rw == 1 && ret == 0) { + /* Update changed flag */ + *pte1p |= 0x00000080; + store = 1; + } else { + /* Force page fault for first write access */ + ctx->prot &= ~PAGE_WRITE; + } + } + + return store; +} + +/* Software driven TLB helpers */ +static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, + int way, int is_code) +{ + int nr; + + /* Select TLB num in a way from address */ + nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); + /* Select TLB way */ + nr += env->tlb_per_way * way; + /* 6xx have separate TLBs for instructions and data */ + if (is_code && env->id_tlbs == 1) { + nr += env->nb_tlb; + } + + return nr; +} + +static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppc6xx_tlb_t *tlb; + int nr, max; + + /* LOG_SWTLB("Invalidate all TLBs\n"); */ + /* Invalidate all defined software TLB */ + max = env->nb_tlb; + if (env->id_tlbs == 1) { + max *= 2; + } + for (nr = 0; nr < max; nr++) { + tlb = &env->tlb.tlb6[nr]; + pte_invalidate(&tlb->pte0); + } + tlb_flush(env, 1); +} + +static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, + target_ulong eaddr, + int is_code, int match_epn) +{ +#if !defined(FLUSH_ALL_TLBS) + ppc6xx_tlb_t *tlb; + int way, nr; + + /* Invalidate ITLB + DTLB, all ways */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); + tlb = &env->tlb.tlb6[nr]; + if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { + LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, + env->nb_tlb, eaddr); + pte_invalidate(&tlb->pte0); + tlb_flush_page(env, tlb->EPN); + } + } +#else + /* XXX: PowerPC specification say this is valid as well */ + ppc6xx_tlb_invalidate_all(env); +#endif +} + +static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, + target_ulong eaddr, int is_code) +{ + ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); +} + +static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, + int is_code, target_ulong pte0, target_ulong pte1) +{ + ppc6xx_tlb_t *tlb; + int nr; + + nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); + tlb = &env->tlb.tlb6[nr]; + LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); + /* Invalidate any pending reference in QEMU for this virtual address */ + ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); + tlb->pte0 = pte0; + tlb->pte1 = pte1; + tlb->EPN = EPN; + /* Store last way for LRU mechanism */ + env->last_way = way; +} + +static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int access_type) +{ + ppc6xx_tlb_t *tlb; + int nr, best, way; + int ret; + + best = -1; + ret = -1; /* No TLB found */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, + access_type == ACCESS_CODE ? 1 : 0); + tlb = &env->tlb.tlb6[nr]; + /* This test "emulates" the PTE index match for hardware TLBs */ + if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { + LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx + "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); + continue; + } + LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " + TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, eaddr, tlb->pte1, + rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); + switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) { + case -3: + /* TLB inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + best = nr; + break; + case -1: + default: + /* No match */ + break; + case 0: + /* access granted */ + /* XXX: we should go on looping to check all TLBs consistency + * but we can speed-up the whole thing as the + * result would be undefined if TLBs are not consistent. + */ + ret = 0; + best = nr; + goto done; + } + } + if (best != -1) { + done: + LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", + ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); + /* Update page flags */ + pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); + } + + return ret; +} + +/* Perform BAT hit & translation */ +static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, target_ulong *BATu, + target_ulong *BATl) +{ + target_ulong bl; + int pp, valid, prot; + + bl = (*BATu & 0x00001FFC) << 15; + valid = 0; + prot = 0; + if (((msr_pr == 0) && (*BATu & 0x00000002)) || + ((msr_pr != 0) && (*BATu & 0x00000001))) { + valid = 1; + pp = *BATl & 0x00000003; + if (pp != 0) { + prot = PAGE_READ | PAGE_EXEC; + if (pp == 0x2) { + prot |= PAGE_WRITE; + } + } + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static inline void bat_601_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, + target_ulong *BATu, target_ulong *BATl) +{ + target_ulong bl; + int key, pp, valid, prot; + + bl = (*BATl & 0x0000003F) << 17; + LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n", + (uint8_t)(*BATl & 0x0000003F), bl, ~bl); + prot = 0; + valid = (*BATl >> 6) & 1; + if (valid) { + pp = *BATu & 0x00000003; + if (msr_pr == 0) { + key = (*BATu >> 3) & 1; + } else { + key = (*BATu >> 2) & 1; + } + prot = pp_check(key, pp, 0); + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static inline int get_bat(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong virtual, int rw, int type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i, valid, prot; + int ret = -1; + + LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', virtual); + switch (type) { + case ACCESS_CODE: + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + break; + default: + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + break; + } + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + if (unlikely(env->mmu_model == POWERPC_MMU_601)) { + bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl); + } else { + bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); + } + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); + if ((virtual & 0xF0000000) == BEPIu && + ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { + /* BAT matches */ + if (valid != 0) { + /* Get physical address */ + ctx->raddr = (*BATl & 0xF0000000) | + ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | + (virtual & 0x0001F000); + /* Compute access rights */ + ctx->prot = prot; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", + i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', + ctx->prot & PAGE_WRITE ? 'W' : '-'); + } + break; + } + } + } + if (ret < 0) { +#if defined(DEBUG_BATS) + if (qemu_log_enabled()) { + LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); + for (i = 0; i < 4; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, + *BATu, *BATl, BEPIu, BEPIl, bl); + } + } +#endif + } + /* No hit */ + return ret; +} + +static inline target_phys_addr_t get_pteg_offset(CPUPPCState *env, + target_phys_addr_t hash, + int pte_size) +{ + return (hash * pte_size * 8) & env->htab_mask; +} + +/* PTE table lookup */ +static inline int find_pte2(CPUPPCState *env, mmu_ctx_t *ctx, int is_64b, int h, + int rw, int type, int target_page_bits) +{ + target_phys_addr_t pteg_off; + target_ulong pte0, pte1; + int i, good = -1; + int ret, r; + + ret = -1; /* No entry found */ + pteg_off = get_pteg_offset(env, ctx->hash[h], + is_64b ? HASH_PTE_SIZE_64 : HASH_PTE_SIZE_32); + for (i = 0; i < 8; i++) { +#if defined(TARGET_PPC64) + if (is_64b) { + if (env->external_htab) { + pte0 = ldq_p(env->external_htab + pteg_off + (i * 16)); + pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8); + } else { + pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16)); + pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8); + } + + r = pte64_check(ctx, pte0, pte1, h, rw, type); + LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " + TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", + pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h, + (int)((pte0 >> 1) & 1), ctx->ptem); + } else +#endif + { + if (env->external_htab) { + pte0 = ldl_p(env->external_htab + pteg_off + (i * 8)); + pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4); + } else { + pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8)); + pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4); + } + r = pte32_check(ctx, pte0, pte1, h, rw, type); + LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " + TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", + pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, + (int)((pte0 >> 6) & 1), ctx->ptem); + } + switch (r) { + case -3: + /* PTE inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + good = i; + break; + case -1: + default: + /* No PTE match */ + break; + case 0: + /* access granted */ + /* XXX: we should go on looping to check all PTEs consistency + * but if we can speed-up the whole thing as the + * result would be undefined if PTEs are not consistent. + */ + ret = 0; + good = i; + goto done; + } + } + if (good != -1) { + done: + LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n", + ctx->raddr, ctx->prot, ret); + /* Update page flags */ + pte1 = ctx->raddr; + if (pte_update_flags(ctx, &pte1, ret, rw) == 1) { +#if defined(TARGET_PPC64) + if (is_64b) { + if (env->external_htab) { + stq_p(env->external_htab + pteg_off + (good * 16) + 8, + pte1); + } else { + stq_phys_notdirty(env->htab_base + pteg_off + + (good * 16) + 8, pte1); + } + } else +#endif + { + if (env->external_htab) { + stl_p(env->external_htab + pteg_off + (good * 8) + 4, + pte1); + } else { + stl_phys_notdirty(env->htab_base + pteg_off + + (good * 8) + 4, pte1); + } + } + } + } + + /* We have a TLB that saves 4K pages, so let's + * split a huge page to 4k chunks */ + if (target_page_bits != TARGET_PAGE_BITS) { + ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1)) + & TARGET_PAGE_MASK; + } + return ret; +} + +static inline int find_pte(CPUPPCState *env, mmu_ctx_t *ctx, int h, int rw, + int type, int target_page_bits) +{ +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + return find_pte2(env, ctx, 1, h, rw, type, target_page_bits); + } +#endif + + return find_pte2(env, ctx, 0, h, rw, type, target_page_bits); +} + +#if defined(TARGET_PPC64) +static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) +{ + uint64_t esid_256M, esid_1T; + int n; + + LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); + + esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; + esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; + + for (n = 0; n < env->slb_nr; n++) { + ppc_slb_t *slb = &env->slb[n]; + + LOG_SLB("%s: slot %d %016" PRIx64 " %016" + PRIx64 "\n", __func__, n, slb->esid, slb->vsid); + /* We check for 1T matches on all MMUs here - if the MMU + * doesn't have 1T segment support, we will have prevented 1T + * entries from being inserted in the slbmte code. */ + if (((slb->esid == esid_256M) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) + || ((slb->esid == esid_1T) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { + return slb; + } + } + + return NULL; +} + +/*****************************************************************************/ +/* SPR accesses */ + +void helper_slbia(CPUPPCState *env) +{ + int n, do_invalidate; + + do_invalidate = 0; + /* XXX: Warning: slbia never invalidates the first segment */ + for (n = 1; n < env->slb_nr; n++) { + ppc_slb_t *slb = &env->slb[n]; + + if (slb->esid & SLB_ESID_V) { + slb->esid &= ~SLB_ESID_V; + /* XXX: given the fact that segment size is 256 MB or 1TB, + * and we still don't have a tlb_flush_mask(env, n, mask) + * in QEMU, we just invalidate all TLBs + */ + do_invalidate = 1; + } + } + if (do_invalidate) { + tlb_flush(env, 1); + } +} + +void helper_slbie(CPUPPCState *env, target_ulong addr) +{ + ppc_slb_t *slb; + + slb = slb_lookup(env, addr); + if (!slb) { + return; + } + + if (slb->esid & SLB_ESID_V) { + slb->esid &= ~SLB_ESID_V; + + /* XXX: given the fact that segment size is 256 MB or 1TB, + * and we still don't have a tlb_flush_mask(env, n, mask) + * in QEMU, we just invalidate all TLBs + */ + tlb_flush(env, 1); + } +} + +int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (rb & (0x1000 - env->slb_nr)) { + return -1; /* Reserved bits set or slot too high */ + } + if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) { + return -1; /* Bad segment size */ + } + if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { + return -1; /* 1T segment on MMU that doesn't support it */ + } + + /* Mask out the slot number as we store the entry */ + slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); + slb->vsid = rs; + + LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 + " %016" PRIx64 "\n", __func__, slot, rb, rs, + slb->esid, slb->vsid); + + return 0; +} + +static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb, + target_ulong *rt) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= env->slb_nr) { + return -1; + } + + *rt = slb->esid; + return 0; +} + +static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb, + target_ulong *rt) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= env->slb_nr) { + return -1; + } + + *rt = slb->vsid; + return 0; +} +#endif /* defined(TARGET_PPC64) */ + +/* Perform segment based translation */ +static inline int get_segment(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int type) +{ + target_phys_addr_t hash; + target_ulong vsid; + int ds, pr, target_page_bits; + int ret, ret2; + + pr = msr_pr; + ctx->eaddr = eaddr; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + ppc_slb_t *slb; + target_ulong pageaddr; + int segment_bits; + + LOG_MMU("Check SLBs\n"); + slb = slb_lookup(env, eaddr); + if (!slb) { + return -5; + } + + if (slb->vsid & SLB_VSID_B) { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; + segment_bits = 40; + } else { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; + segment_bits = 28; + } + + target_page_bits = (slb->vsid & SLB_VSID_L) + ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; + ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) + : (slb->vsid & SLB_VSID_KS)); + ds = 0; + ctx->nx = !!(slb->vsid & SLB_VSID_N); + + pageaddr = eaddr & ((1ULL << segment_bits) + - (1ULL << target_page_bits)); + if (slb->vsid & SLB_VSID_B) { + hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); + } else { + hash = vsid ^ (pageaddr >> target_page_bits); + } + /* Only 5 bits of the page index are used in the AVPN */ + ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | + ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); + } else +#endif /* defined(TARGET_PPC64) */ + { + target_ulong sr, pgidx; + + sr = env->sr[eaddr >> 28]; + ctx->key = (((sr & 0x20000000) && (pr != 0)) || + ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; + ds = sr & 0x80000000 ? 1 : 0; + ctx->nx = sr & 0x10000000 ? 1 : 0; + vsid = sr & 0x00FFFFFF; + target_page_bits = TARGET_PAGE_BITS; + LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" + TARGET_FMT_lx " lr=" TARGET_FMT_lx + " ir=%d dr=%d pr=%d %d t=%d\n", + eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, + (int)msr_dr, pr != 0 ? 1 : 0, rw, type); + pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; + hash = vsid ^ pgidx; + ctx->ptem = (vsid << 7) | (pgidx >> 10); + } + LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", + ctx->key, ds, ctx->nx, vsid); + ret = -1; + if (!ds) { + /* Check if instruction fetch is allowed, if needed */ + if (type != ACCESS_CODE || ctx->nx == 0) { + /* Page address translation */ + LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + env->htab_base, env->htab_mask, hash); + ctx->hash[0] = hash; + ctx->hash[1] = ~hash; + + /* Initialize real address with an invalid value */ + ctx->raddr = (target_phys_addr_t)-1ULL; + if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || + env->mmu_model == POWERPC_MMU_SOFT_74xx)) { + /* Software TLB search */ + ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); + } else { + LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", + env->htab_base, env->htab_mask, vsid, ctx->ptem, + ctx->hash[0]); + /* Primary table lookup */ + ret = find_pte(env, ctx, 0, rw, type, target_page_bits); + if (ret < 0) { + /* Secondary table lookup */ + if (eaddr != 0xEFFFFFFF) { + LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", env->htab_base, + env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); + } + ret2 = find_pte(env, ctx, 1, rw, type, + target_page_bits); + if (ret2 != -1) { + ret = ret2; + } + } + } +#if defined(DUMP_PAGE_TABLES) + if (qemu_log_enabled()) { + target_phys_addr_t curaddr; + uint32_t a0, a1, a2, a3; + + qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx + "\n", sdr, mask + 0x80); + for (curaddr = sdr; curaddr < (sdr + mask + 0x80); + curaddr += 16) { + a0 = ldl_phys(curaddr); + a1 = ldl_phys(curaddr + 4); + a2 = ldl_phys(curaddr + 8); + a3 = ldl_phys(curaddr + 12); + if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { + qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", + curaddr, a0, a1, a2, a3); + } + } + } +#endif + } else { + LOG_MMU("No access allowed\n"); + ret = -3; + } + } else { + target_ulong sr; + + LOG_MMU("direct store...\n"); + /* Direct-store segment : absolutely *BUGGY* for now */ + + /* Direct-store implies a 32-bit MMU. + * Check the Segment Register's bus unit ID (BUID). + */ + sr = env->sr[eaddr >> 28]; + if ((sr & 0x1FF00000) >> 20 == 0x07f) { + /* Memory-forced I/O controller interface access */ + /* If T=1 and BUID=x'07F', the 601 performs a memory access + * to SR[28-31] LA[4-31], bypassing all protection mechanisms. + */ + ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + switch (type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_CODE: + /* No code fetch is allowed in direct-store areas */ + return -4; + case ACCESS_FLOAT: + /* Floating point load/store */ + return -4; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + return -4; + case ACCESS_CACHE: + /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ + /* Should make the instruction do no-op. + * As it already do no-op, it's quite easy :-) + */ + ctx->raddr = eaddr; + return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + return -4; + default: + qemu_log("ERROR: instruction should not need " + "address translation\n"); + return -4; + } + if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { + ctx->raddr = eaddr; + ret = 2; + } else { + ret = -2; + } + } + + return ret; +} + +/* Generic TLB check function for embedded PowerPC implementations */ +static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddrp, + target_ulong address, uint32_t pid, int ext, + int i) +{ + target_ulong mask; + + /* Check valid flag */ + if (!(tlb->prot & PAGE_VALID)) { + return -1; + } + mask = ~(tlb->size - 1); + LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx + " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); + /* Check PID */ + if (tlb->PID != 0 && tlb->PID != pid) { + return -1; + } + /* Check effective address */ + if ((address & mask) != tlb->EPN) { + return -1; + } + *raddrp = (tlb->RPN & mask) | (address & ~mask); +#if (TARGET_PHYS_ADDR_BITS >= 36) + if (ext) { + /* Extend the physical address to 36 bits */ + *raddrp |= (target_phys_addr_t)(tlb->RPN & 0xF) << 32; + } +#endif + + return 0; +} + +/* Generic TLB search function for PowerPC embedded implementations */ +static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, + uint32_t pid) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret; + + /* Default return value is no match */ + ret = -1; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { + ret = i; + break; + } + } + + return ret; +} + +/* Helpers specific to PowerPC 40x implementations */ +static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppcemb_tlb_t *tlb; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + tlb->prot &= ~PAGE_VALID; + } + tlb_flush(env, 1); +} + +static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env, + target_ulong eaddr, uint32_t pid) +{ +#if !defined(FLUSH_ALL_TLBS) + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + target_ulong page, end; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) { + end = tlb->EPN + tlb->size; + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + tlb->prot &= ~PAGE_VALID; + break; + } + } +#else + ppc4xx_tlb_invalidate_all(env); +#endif +} + +static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret, zsel, zpr, pr; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + pr = msr_pr; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, + env->spr[SPR_40x_PID], 0, i) < 0) { + continue; + } + zsel = (tlb->attr >> 4) & 0xF; + zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; + LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", + __func__, i, zsel, zpr, rw, tlb->attr); + /* Check execute enable bit */ + switch (zpr) { + case 0x2: + if (pr != 0) { + goto check_perms; + } + /* No break here */ + case 0x3: + /* All accesses granted */ + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = 0; + break; + case 0x0: + if (pr != 0) { + /* Raise Zone protection fault. */ + env->spr[SPR_40x_ESR] = 1 << 22; + ctx->prot = 0; + ret = -2; + break; + } + /* No break here */ + case 0x1: + check_perms: + /* Check from TLB entry */ + ctx->prot = tlb->prot; + ret = check_prot(ctx->prot, rw, access_type); + if (ret == -2) { + env->spr[SPR_40x_ESR] = 0; + } + break; + } + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + return 0; + } + } + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + + return ret; +} + +void store_40x_sler(CPUPPCState *env, uint32_t val) +{ + /* XXX: TO BE FIXED */ + if (val != 0x00000000) { + cpu_abort(env, "Little-endian regions are not supported by now\n"); + } + env->spr[SPR_405_SLER] = val; +} + +static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddr, int *prot, + target_ulong address, int rw, + int access_type, int i) +{ + int ret, prot2; + + if (ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID], + !env->nb_pids, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + prot2 = tlb->prot & 0xF; + } else { + prot2 = (tlb->prot >> 4) & 0xF; + } + + /* Check the address space */ + if (access_type == ACCESS_CODE) { + if (msr_ir != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (msr_dr != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + access_type, i); + if (!ret) { + break; + } + } + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot) +{ + int tlb_size; + int i, j; + ppcmas_tlb_t *tlb = env->tlb.tlbm; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + if (flags & (1 << i)) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { + tlb[j].mas1 &= ~MAS1_VALID; + } + } + } + tlb += booke206_tlb_size(env, i); + } + + tlb_flush(env, 1); +} + +target_phys_addr_t booke206_tlb_to_page_size(CPUPPCState *env, + ppcmas_tlb_t *tlb) +{ + int tlbm_size; + + tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + + return 1024ULL << tlbm_size; +} + +/* TLB check function for MAS based SoftTLBs */ +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, + target_phys_addr_t *raddrp, + target_ulong address, uint32_t pid) +{ + target_ulong mask; + uint32_t tlb_pid; + + /* Check valid flag */ + if (!(tlb->mas1 & MAS1_VALID)) { + return -1; + } + + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" + PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n", + __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3, + tlb->mas8); + + /* Check PID */ + tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlb_pid != 0 && tlb_pid != pid) { + return -1; + } + + /* Check effective address */ + if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { + return -1; + } + + if (raddrp) { + *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); + } + + return 0; +} + +static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, + target_phys_addr_t *raddr, int *prot, + target_ulong address, int rw, + int access_type) +{ + int ret; + int prot2 = 0; + + if (ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2]) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + if (tlb->mas7_3 & MAS3_UR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_UW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_UX) { + prot2 |= PAGE_EXEC; + } + } else { + if (tlb->mas7_3 & MAS3_SR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_SW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_SX) { + prot2 |= PAGE_EXEC; + } + } + + /* Check the address space and permissions */ + if (access_type == ACCESS_CODE) { + if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (msr_dr != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcmas_tlb_t *tlb; + target_phys_addr_t raddr; + int i, j, ret; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, + rw, access_type); + if (ret != -1) { + goto found_tlb; + } + } + } + +found_tlb: + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +static const char *book3e_tsize_to_str[32] = { + "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", + "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", + "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", + "1T", "2T" +}; + +static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + ppcemb_tlb_t *entry; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + cpu_fprintf(f, "Cannot access KVM TLB\n"); + return; + } + + cpu_fprintf(f, "\nTLB:\n"); + cpu_fprintf(f, "Effective Physical Size PID Prot " + "Attr\n"); + + entry = &env->tlb.tlbe[0]; + for (i = 0; i < env->nb_tlb; i++, entry++) { + target_phys_addr_t ea, pa; + target_ulong mask; + uint64_t size = (uint64_t)entry->size; + char size_buf[20]; + + /* Check valid flag */ + if (!(entry->prot & PAGE_VALID)) { + continue; + } + + mask = ~(entry->size - 1); + ea = entry->EPN & mask; + pa = entry->RPN & mask; +#if (TARGET_PHYS_ADDR_BITS >= 36) + /* Extend the physical address to 36 bits */ + pa |= (target_phys_addr_t)(entry->RPN & 0xF) << 32; +#endif + size /= 1024; + if (size >= 1024) { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024); + } else { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size); + } + cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", + (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, + entry->prot, entry->attr); + } + +} + +static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env, int tlbn, int offset, + int tlbsize) +{ + ppcmas_tlb_t *entry; + int i; + + cpu_fprintf(f, "\nTLB%d:\n", tlbn); + cpu_fprintf(f, "Effective Physical Size TID TS SRWX" + " URWX WIMGE U0123\n"); + + entry = &env->tlb.tlbm[offset]; + for (i = 0; i < tlbsize; i++, entry++) { + target_phys_addr_t ea, pa, size; + int tsize; + + if (!(entry->mas1 & MAS1_VALID)) { + continue; + } + + tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size = 1024ULL << tsize; + ea = entry->mas2 & ~(size - 1); + pa = entry->mas7_3 & ~(size - 1); + + cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" + "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", + (uint64_t)ea, (uint64_t)pa, + book3e_tsize_to_str[tsize], + (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, + (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, + entry->mas7_3 & MAS3_SR ? 'R' : '-', + entry->mas7_3 & MAS3_SW ? 'W' : '-', + entry->mas7_3 & MAS3_SX ? 'X' : '-', + entry->mas7_3 & MAS3_UR ? 'R' : '-', + entry->mas7_3 & MAS3_UW ? 'W' : '-', + entry->mas7_3 & MAS3_UX ? 'X' : '-', + entry->mas2 & MAS2_W ? 'W' : '-', + entry->mas2 & MAS2_I ? 'I' : '-', + entry->mas2 & MAS2_M ? 'M' : '-', + entry->mas2 & MAS2_G ? 'G' : '-', + entry->mas2 & MAS2_E ? 'E' : '-', + entry->mas7_3 & MAS3_U0 ? '0' : '-', + entry->mas7_3 & MAS3_U1 ? '1' : '-', + entry->mas7_3 & MAS3_U2 ? '2' : '-', + entry->mas7_3 & MAS3_U3 ? '3' : '-'); + } +} + +static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + int offset = 0; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + cpu_fprintf(f, "Cannot access KVM TLB\n"); + return; + } + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int size = booke206_tlb_size(env, i); + + if (size == 0) { + continue; + } + + mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size); + offset += size; + } +} + +#if defined(TARGET_PPC64) +static void mmubooks_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + int i; + uint64_t slbe, slbv; + + cpu_synchronize_state(env); + + cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); + for (i = 0; i < env->slb_nr; i++) { + slbe = env->slb[i].esid; + slbv = env->slb[i].vsid; + if (slbe == 0 && slbv == 0) { + continue; + } + cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", + i, slbe, slbv); + } +} +#endif + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_BOOKE: + mmubooke_dump_mmu(f, cpu_fprintf, env); + break; + case POWERPC_MMU_BOOKE206: + mmubooke206_dump_mmu(f, cpu_fprintf, env); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: + mmubooks_dump_mmu(f, cpu_fprintf, env); + break; +#endif + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); + } +} + +static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw) +{ + int in_plb, ret; + + ctx->raddr = eaddr; + ctx->prot = PAGE_READ | PAGE_EXEC; + ret = 0; + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_REAL: + case POWERPC_MMU_BOOKE: + ctx->prot |= PAGE_WRITE; + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: + /* Real address are 60 bits long */ + ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL; + ctx->prot |= PAGE_WRITE; + break; +#endif + case POWERPC_MMU_SOFT_4xx_Z: + if (unlikely(msr_pe != 0)) { + /* 403 family add some particular protections, + * using PBL/PBU registers for accesses with no translation. + */ + in_plb = + /* Check PLB validity */ + (env->pb[0] < env->pb[1] && + /* and address in plb area */ + eaddr >= env->pb[0] && eaddr < env->pb[1]) || + (env->pb[2] < env->pb[3] && + eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; + if (in_plb ^ msr_px) { + /* Access in protected area */ + if (rw == 1) { + /* Access is not allowed */ + ret = -2; + } + } else { + /* Read-write access is allowed */ + ctx->prot |= PAGE_WRITE; + } + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n"); + break; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + + return ret; +} + +int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, + int rw, int access_type) +{ + int ret; + +#if 0 + qemu_log("%s\n", __func__); +#endif + if ((access_type == ACCESS_CODE && msr_ir == 0) || + (access_type != ACCESS_CODE && msr_dr == 0)) { + if (env->mmu_model == POWERPC_MMU_BOOKE) { + /* The BookE MMU always performs address translation. The + IS and DS bits only affect the address space. */ + ret = mmubooke_get_physical_address(env, ctx, eaddr, + rw, access_type); + } else if (env->mmu_model == POWERPC_MMU_BOOKE206) { + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); + } else { + /* No address translation. */ + ret = check_physical(env, ctx, eaddr, rw); + } + } else { + ret = -1; + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + /* Try to find a BAT */ + if (env->nb_BATs != 0) { + ret = get_bat(env, ctx, eaddr, rw, access_type); + } +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: +#endif + if (ret < 0) { + /* We didn't match any BAT entry or don't have BATs */ + ret = get_segment(env, ctx, eaddr, rw, access_type); + } + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ret = mmu40x_get_physical_address(env, ctx, eaddr, + rw, access_type); + break; + case POWERPC_MMU_BOOKE: + ret = mmubooke_get_physical_address(env, ctx, eaddr, + rw, access_type); + break; + case POWERPC_MMU_BOOKE206: + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode do not do any translation\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + } +#if 0 + qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n", + __func__, eaddr, ret, ctx->raddr); +#endif + + return ret; +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUPPCState *env, target_ulong addr) +{ + mmu_ctx_t ctx; + + if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { + return -1; + } + + return ctx.raddr & TARGET_PAGE_MASK; +} + +static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, + int rw) +{ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID1: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID2: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT; + break; + } + + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +/* Perform address translation */ +int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, + int mmu_idx) +{ + mmu_ctx_t ctx; + int access_type; + int ret = 0; + + if (rw == 2) { + /* code access */ + rw = 0; + access_type = ACCESS_CODE; + } else { + /* data access */ + access_type = env->access_type; + } + ret = get_physical_address(env, &ctx, address, rw, access_type); + if (ret == 0) { + tlb_set_page(env, address & TARGET_PAGE_MASK, + ctx.raddr & TARGET_PAGE_MASK, ctx.prot, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } else if (ret < 0) { + LOG_MMU_STATE(env); + if (access_type == ACCESS_CODE) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + env->exception_index = POWERPC_EXCP_IFTLB; + env->error_code = 1 << 18; + env->spr[SPR_IMISS] = address; + env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; + goto tlb_miss; + case POWERPC_MMU_SOFT_74xx: + env->exception_index = POWERPC_EXCP_IFTLB; + goto tlb_miss_74xx; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + env->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + env->spr[SPR_40x_ESR] = 0x00000000; + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: +#endif + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x40000000; + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ + case POWERPC_MMU_BOOKE: + env->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + return -1; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + break; + case -3: + /* No execute protection violation */ + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_ESR] = 0x00000000; + } + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + case -4: + /* Direct store exception */ + /* No code fetch is allowed in direct-store areas */ + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; +#if defined(TARGET_PPC64) + case -5: + /* No match in segment table */ + if (env->mmu_model == POWERPC_MMU_620) { + env->exception_index = POWERPC_EXCP_ISI; + /* XXX: this might be incorrect */ + env->error_code = 0x40000000; + } else { + env->exception_index = POWERPC_EXCP_ISEG; + env->error_code = 0; + } + break; +#endif + } + } else { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + if (rw == 1) { + env->exception_index = POWERPC_EXCP_DSTLB; + env->error_code = 1 << 16; + } else { + env->exception_index = POWERPC_EXCP_DLTLB; + env->error_code = 0; + } + env->spr[SPR_DMISS] = address; + env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; + tlb_miss: + env->error_code |= ctx.key << 19; + env->spr[SPR_HASH1] = env->htab_base + + get_pteg_offset(env, ctx.hash[0], HASH_PTE_SIZE_32); + env->spr[SPR_HASH2] = env->htab_base + + get_pteg_offset(env, ctx.hash[1], HASH_PTE_SIZE_32); + break; + case POWERPC_MMU_SOFT_74xx: + if (rw == 1) { + env->exception_index = POWERPC_EXCP_DSTLB; + } else { + env->exception_index = POWERPC_EXCP_DLTLB; + } + tlb_miss_74xx: + /* Implement LRU algorithm */ + env->error_code = ctx.key << 19; + env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | + ((env->last_way + 1) & (env->nb_ways - 1)); + env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + env->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] = 0x00800000; + } else { + env->spr[SPR_40x_ESR] = 0x00000000; + } + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: +#endif + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x42000000; + } else { + env->spr[SPR_DSISR] = 0x40000000; + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ + case POWERPC_MMU_BOOKE: + env->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; + return -1; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + if (env->mmu_model == POWERPC_MMU_SOFT_4xx + || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] |= 0x00800000; + } + } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; + } else { + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x0A000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + break; + case -4: + /* Direct store exception */ + switch (access_type) { + case ACCESS_FLOAT: + /* Floating point load/store */ + env->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = address; + break; + case ACCESS_RES: + /* lwarx, ldarx or stwcx. */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06000000; + } else { + env->spr[SPR_DSISR] = 0x04000000; + } + break; + case ACCESS_EXT: + /* eciwx or ecowx */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06100000; + } else { + env->spr[SPR_DSISR] = 0x04100000; + } + break; + default: + printf("DSI: invalid exception (%d)\n", ret); + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; + env->spr[SPR_DAR] = address; + break; + } + break; +#if defined(TARGET_PPC64) + case -5: + /* No match in segment table */ + if (env->mmu_model == POWERPC_MMU_620) { + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + /* XXX: this might be incorrect */ + if (rw == 1) { + env->spr[SPR_DSISR] = 0x42000000; + } else { + env->spr[SPR_DSISR] = 0x40000000; + } + } else { + env->exception_index = POWERPC_EXCP_DSEG; + env->error_code = 0; + env->spr[SPR_DAR] = address; + } + break; +#endif + } + } +#if 0 + printf("%s: set exception to %d %02x\n", __func__, + env->exception, env->error_code); +#endif + ret = 1; + } + + return ret; +} + +/*****************************************************************************/ +/* BATs management */ +#if !defined(FLUSH_ALL_TLBS) +static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, + target_ulong mask) +{ + target_ulong base, end, page; + + base = BATu & ~0x0001FFFF; + end = base + mask + 0x00020000; + LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" + TARGET_FMT_lx ")\n", base, end, mask); + for (page = base; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + LOG_BATS("Flush done\n"); +} +#endif + +static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, + target_ulong value) +{ + LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, + nr, ul == 0 ? 'u' : 'l', value, env->nip); +} + +void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#endif + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | + (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + tlb_flush(env, 1); +#endif + } +} + +void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + dump_store_bat(env, 'I', 1, nr, value); + env->IBAT[1][nr] = value; +} + +void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; + + dump_store_bat(env, 'D', 0, nr, value); + if (env->DBAT[0][nr] != value) { + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#endif + mask = (value << 15) & 0x0FFE0000UL; + env->DBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | + (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#else + tlb_flush(env, 1); +#endif + } +} + +void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + dump_store_bat(env, 'D', 1, nr, value); + env->DBAT[1][nr] = value; +} + +void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + int do_inval; +#endif + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + if (env->IBAT[1][nr] & 0x40) { + /* Invalidate BAT only if it is valid */ +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[0][nr] = env->IBAT[0][nr]; + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env, 1); + } +#endif + } +} + +void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + int do_inval; +#endif + + dump_store_bat(env, 'I', 1, nr, value); + if (env->IBAT[1][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + if (value & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (value << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + env->IBAT[1][nr] = value; + env->DBAT[1][nr] = value; +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env, 1); + } +#endif + } +} + +/*****************************************************************************/ +/* TLB management */ +void ppc_tlb_invalidate_all(CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ppc4xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE: + tlb_flush(env, 1); + break; + case POWERPC_MMU_BOOKE206: + booke206_flush_tlb(env, -1, 0); + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: +#endif /* defined(TARGET_PPC64) */ + tlb_flush(env, 1); + break; + default: + /* XXX: TODO */ + cpu_abort(env, "Unknown MMU model\n"); + break; + } +} + +void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) +{ +#if !defined(FLUSH_ALL_TLBS) + addr &= TARGET_PAGE_MASK; + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_virt(env, addr, 0); + if (env->id_tlbs == 1) { + ppc6xx_tlb_invalidate_virt(env, addr, 1); + } + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE: + /* XXX: TODO */ + cpu_abort(env, "BookE MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + /* XXX: TODO */ + cpu_abort(env, "BookE 2.06 MMU model is not implemented\n"); + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + /* tlbie invalidate TLBs for all segments */ + addr &= ~((target_ulong)-1ULL << 28); + /* XXX: this case should be optimized, + * giving a mask to tlb_flush_page + */ + tlb_flush_page(env, addr | (0x0 << 28)); + tlb_flush_page(env, addr | (0x1 << 28)); + tlb_flush_page(env, addr | (0x2 << 28)); + tlb_flush_page(env, addr | (0x3 << 28)); + tlb_flush_page(env, addr | (0x4 << 28)); + tlb_flush_page(env, addr | (0x5 << 28)); + tlb_flush_page(env, addr | (0x6 << 28)); + tlb_flush_page(env, addr | (0x7 << 28)); + tlb_flush_page(env, addr | (0x8 << 28)); + tlb_flush_page(env, addr | (0x9 << 28)); + tlb_flush_page(env, addr | (0xA << 28)); + tlb_flush_page(env, addr | (0xB << 28)); + tlb_flush_page(env, addr | (0xC << 28)); + tlb_flush_page(env, addr | (0xD << 28)); + tlb_flush_page(env, addr | (0xE << 28)); + tlb_flush_page(env, addr | (0xF << 28)); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_06d: + /* tlbie invalidate TLBs for all segments */ + /* XXX: given the fact that there are too many segments to invalidate, + * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, + * we just invalidate all TLBs + */ + tlb_flush(env, 1); + break; +#endif /* defined(TARGET_PPC64) */ + default: + /* XXX: TODO */ + cpu_abort(env, "Unknown MMU model\n"); + break; + } +#else + ppc_tlb_invalidate_all(env); +#endif +} + +/*****************************************************************************/ +/* Special registers manipulation */ +#if defined(TARGET_PPC64) +void ppc_store_asr(CPUPPCState *env, target_ulong value) +{ + if (env->asr != value) { + env->asr = value; + tlb_flush(env, 1); + } +} +#endif + +void ppc_store_sdr1(CPUPPCState *env, target_ulong value) +{ + LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value); + if (env->spr[SPR_SDR1] != value) { + env->spr[SPR_SDR1] = value; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + target_ulong htabsize = value & SDR_64_HTABSIZE; + + if (htabsize > 28) { + fprintf(stderr, "Invalid HTABSIZE 0x" TARGET_FMT_lx + " stored in SDR1\n", htabsize); + htabsize = 28; + } + env->htab_mask = (1ULL << (htabsize + 18)) - 1; + env->htab_base = value & SDR_64_HTABORG; + } else +#endif /* defined(TARGET_PPC64) */ + { + /* FIXME: Should check for valid HTABMASK values */ + env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF; + env->htab_base = value & SDR_32_HTABORG; + } + tlb_flush(env, 1); + } +} + +/* Segment registers load and store */ +target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) +{ +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + /* XXX */ + return 0; + } +#endif + return env->sr[sr_num]; +} + +void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) +{ + LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, + (int)srnum, value, env->sr[srnum]); +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + uint64_t rb = 0, rs = 0; + + /* ESID = srnum */ + rb |= ((uint32_t)srnum & 0xf) << 28; + /* Set the valid bit */ + rb |= 1 << 27; + /* Index = ESID */ + rb |= (uint32_t)srnum; + + /* VSID = VSID */ + rs |= (value & 0xfffffff) << 12; + /* flags = flags */ + rs |= ((value >> 27) & 0xf) << 8; + + ppc_store_slb(env, rb, rs); + } else +#endif + if (env->sr[srnum] != value) { + env->sr[srnum] = value; +/* Invalidating 256MB of virtual memory in 4kB pages is way longer than + flusing the whole TLB. */ +#if !defined(FLUSH_ALL_TLBS) && 0 + { + target_ulong page, end; + /* Invalidate 256 MB of virtual memory */ + page = (16 << 20) * srnum; + end = page + (16 << 20); + for (; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + } +#else + tlb_flush(env, 1); +#endif + } +} +#endif /* !defined(CONFIG_USER_ONLY) */ + +#if !defined(CONFIG_USER_ONLY) +/* SLB management */ +#if defined(TARGET_PPC64) +void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +{ + if (ppc_store_slb(env, rb, rs) < 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL); + } +} + +target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) +{ + target_ulong rt = 0; + + if (ppc_load_slb_esid(env, rb, &rt) < 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL); + } + return rt; +} + +target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) +{ + target_ulong rt = 0; + + if (ppc_load_slb_vsid(env, rb, &rt) < 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL); + } + return rt; +} +#endif /* defined(TARGET_PPC64) */ + +/* TLB management */ +void helper_tlbia(CPUPPCState *env) +{ + ppc_tlb_invalidate_all(env); +} + +void helper_tlbie(CPUPPCState *env, target_ulong addr) +{ + ppc_tlb_invalidate_one(env, addr); +} + +/* Software driven TLBs management */ +/* PowerPC 602/603 software TLB load instructions helpers */ +static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) +{ + target_ulong RPN, CMP, EPN; + int way; + + RPN = env->spr[SPR_RPA]; + if (is_code) { + CMP = env->spr[SPR_ICMP]; + EPN = env->spr[SPR_IMISS]; + } else { + CMP = env->spr[SPR_DCMP]; + EPN = env->spr[SPR_DMISS]; + } + way = (env->spr[SPR_SRR1] >> 17) & 1; + (void)EPN; /* avoid a compiler warning */ + LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, + RPN, way); + /* Store this TLB */ + ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), + way, is_code, CMP, RPN); +} + +void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) +{ + do_6xx_tlb(env, EPN, 0); +} + +void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) +{ + do_6xx_tlb(env, EPN, 1); +} + +/* PowerPC 74xx software TLB load instructions helpers */ +static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) +{ + target_ulong RPN, CMP, EPN; + int way; + + RPN = env->spr[SPR_PTELO]; + CMP = env->spr[SPR_PTEHI]; + EPN = env->spr[SPR_TLBMISS] & ~0x3; + way = env->spr[SPR_TLBMISS] & 0x3; + (void)EPN; /* avoid a compiler warning */ + LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, + RPN, way); + /* Store this TLB */ + ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), + way, is_code, CMP, RPN); +} + +void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) +{ + do_74xx_tlb(env, EPN, 0); +} + +void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) +{ + do_74xx_tlb(env, EPN, 1); +} + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +target_ulong helper_rac(CPUPPCState *env, target_ulong addr) +{ + mmu_ctx_t ctx; + int nb_BATs; + target_ulong ret = 0; + + /* We don't have to generate many instances of this instruction, + * as rac is supervisor only. + */ + /* XXX: FIX THIS: Pretend we have no BAT */ + nb_BATs = env->nb_BATs; + env->nb_BATs = 0; + if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { + ret = ctx.raddr; + } + env->nb_BATs = nb_BATs; + return ret; +} + +static inline target_ulong booke_tlb_to_page_size(int size) +{ + return 1024 << (2 * size); +} + +static inline int booke_page_size_to_tlb(target_ulong page_size) +{ + int size; + + switch (page_size) { + case 0x00000400UL: + size = 0x0; + break; + case 0x00001000UL: + size = 0x1; + break; + case 0x00004000UL: + size = 0x2; + break; + case 0x00010000UL: + size = 0x3; + break; + case 0x00040000UL: + size = 0x4; + break; + case 0x00100000UL: + size = 0x5; + break; + case 0x00400000UL: + size = 0x6; + break; + case 0x01000000UL: + size = 0x7; + break; + case 0x04000000UL: + size = 0x8; + break; + case 0x10000000UL: + size = 0x9; + break; + case 0x40000000UL: + size = 0xA; + break; +#if defined(TARGET_PPC64) + case 0x000100000000ULL: + size = 0xB; + break; + case 0x000400000000ULL: + size = 0xC; + break; + case 0x001000000000ULL: + size = 0xD; + break; + case 0x004000000000ULL: + size = 0xE; + break; + case 0x010000000000ULL: + size = 0xF; + break; +#endif + default: + size = -1; + break; + } + + return size; +} + +/* Helpers for 4xx TLB management */ +#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ + +#define PPC4XX_TLBHI_V 0x00000040 +#define PPC4XX_TLBHI_E 0x00000020 +#define PPC4XX_TLBHI_SIZE_MIN 0 +#define PPC4XX_TLBHI_SIZE_MAX 7 +#define PPC4XX_TLBHI_SIZE_DEFAULT 1 +#define PPC4XX_TLBHI_SIZE_SHIFT 7 +#define PPC4XX_TLBHI_SIZE_MASK 0x00000007 + +#define PPC4XX_TLBLO_EX 0x00000200 +#define PPC4XX_TLBLO_WR 0x00000100 +#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF +#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 + +target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + int size; + + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + ret = tlb->EPN; + if (tlb->prot & PAGE_VALID) { + ret |= PPC4XX_TLBHI_V; + } + size = booke_page_size_to_tlb(tlb->size); + if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { + size = PPC4XX_TLBHI_SIZE_DEFAULT; + } + ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; + env->spr[SPR_40x_PID] = tlb->PID; + return ret; +} + +target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + ret = tlb->RPN; + if (tlb->prot & PAGE_EXEC) { + ret |= PPC4XX_TLBLO_EX; + } + if (tlb->prot & PAGE_WRITE) { + ret |= PPC4XX_TLBLO_WR; + } + return ret; +} + +void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, + target_ulong val) +{ + ppcemb_tlb_t *tlb; + target_ulong page, end; + + LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, + val); + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + /* Invalidate previous TLB (if it's valid) */ + if (tlb->prot & PAGE_VALID) { + end = tlb->EPN + tlb->size; + LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " + TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + } + tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) + & PPC4XX_TLBHI_SIZE_MASK); + /* We cannot handle TLB size < TARGET_PAGE_SIZE. + * If this ever occurs, one should use the ppcemb target instead + * of the ppc or ppc64 one + */ + if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { + cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u " + "are not supported (%d)\n", + tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); + } + tlb->EPN = val & ~(tlb->size - 1); + if (val & PPC4XX_TLBHI_V) { + tlb->prot |= PAGE_VALID; + if (val & PPC4XX_TLBHI_E) { + /* XXX: TO BE FIXED */ + cpu_abort(env, + "Little-endian TLB entries are not supported by now\n"); + } + } else { + tlb->prot &= ~PAGE_VALID; + } + tlb->PID = env->spr[SPR_40x_PID]; /* PID */ + LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx + " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); + /* Invalidate new TLB (if valid) */ + if (tlb->prot & PAGE_VALID) { + end = tlb->EPN + tlb->size; + LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " + TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + } +} + +void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, + target_ulong val) +{ + ppcemb_tlb_t *tlb; + + LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, + val); + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; + tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; + tlb->prot = PAGE_READ; + if (val & PPC4XX_TLBLO_EX) { + tlb->prot |= PAGE_EXEC; + } + if (val & PPC4XX_TLBLO_WR) { + tlb->prot |= PAGE_WRITE; + } + LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx + " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); +} + +target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) +{ + return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); +} + +/* PowerPC 440 TLB management */ +void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, + target_ulong value) +{ + ppcemb_tlb_t *tlb; + target_ulong EPN, RPN, size; + int do_flush_tlbs; + + LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", + __func__, word, (int)entry, value); + do_flush_tlbs = 0; + entry &= 0x3F; + tlb = &env->tlb.tlbe[entry]; + switch (word) { + default: + /* Just here to please gcc */ + case 0: + EPN = value & 0xFFFFFC00; + if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { + do_flush_tlbs = 1; + } + tlb->EPN = EPN; + size = booke_tlb_to_page_size((value >> 4) & 0xF); + if ((tlb->prot & PAGE_VALID) && tlb->size < size) { + do_flush_tlbs = 1; + } + tlb->size = size; + tlb->attr &= ~0x1; + tlb->attr |= (value >> 8) & 1; + if (value & 0x200) { + tlb->prot |= PAGE_VALID; + } else { + if (tlb->prot & PAGE_VALID) { + tlb->prot &= ~PAGE_VALID; + do_flush_tlbs = 1; + } + } + tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; + if (do_flush_tlbs) { + tlb_flush(env, 1); + } + break; + case 1: + RPN = value & 0xFFFFFC0F; + if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { + tlb_flush(env, 1); + } + tlb->RPN = RPN; + break; + case 2: + tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); + tlb->prot = tlb->prot & PAGE_VALID; + if (value & 0x1) { + tlb->prot |= PAGE_READ << 4; + } + if (value & 0x2) { + tlb->prot |= PAGE_WRITE << 4; + } + if (value & 0x4) { + tlb->prot |= PAGE_EXEC << 4; + } + if (value & 0x8) { + tlb->prot |= PAGE_READ; + } + if (value & 0x10) { + tlb->prot |= PAGE_WRITE; + } + if (value & 0x20) { + tlb->prot |= PAGE_EXEC; + } + break; + } +} + +target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, + target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + int size; + + entry &= 0x3F; + tlb = &env->tlb.tlbe[entry]; + switch (word) { + default: + /* Just here to please gcc */ + case 0: + ret = tlb->EPN; + size = booke_page_size_to_tlb(tlb->size); + if (size < 0 || size > 0xF) { + size = 1; + } + ret |= size << 4; + if (tlb->attr & 0x1) { + ret |= 0x100; + } + if (tlb->prot & PAGE_VALID) { + ret |= 0x200; + } + env->spr[SPR_440_MMUCR] &= ~0x000000FF; + env->spr[SPR_440_MMUCR] |= tlb->PID; + break; + case 1: + ret = tlb->RPN; + break; + case 2: + ret = tlb->attr & ~0x1; + if (tlb->prot & (PAGE_READ << 4)) { + ret |= 0x1; + } + if (tlb->prot & (PAGE_WRITE << 4)) { + ret |= 0x2; + } + if (tlb->prot & (PAGE_EXEC << 4)) { + ret |= 0x4; + } + if (tlb->prot & PAGE_READ) { + ret |= 0x8; + } + if (tlb->prot & PAGE_WRITE) { + ret |= 0x10; + } + if (tlb->prot & PAGE_EXEC) { + ret |= 0x20; + } + break; + } + return ret; +} + +target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) +{ + return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); +} + +/* PowerPC BookE 2.06 TLB management */ + +static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) +{ + uint32_t tlbncfg = 0; + int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; + int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); + int tlb; + + tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; + + if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { + cpu_abort(env, "we don't support HES yet\n"); + } + + return booke206_get_tlbm(env, tlb, ea, esel); +} + +void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) +{ + env->spr[pidn] = pid; + /* changing PIDs mean we're in a different address space now */ + tlb_flush(env, 1); +} + +void helper_booke206_tlbwe(CPUPPCState *env) +{ + uint32_t tlbncfg, tlbn; + ppcmas_tlb_t *tlb; + uint32_t size_tlb, size_ps; + target_ulong mask; + + + switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { + case MAS0_WQ_ALWAYS: + /* good to go, write that entry */ + break; + case MAS0_WQ_COND: + /* XXX check if reserved */ + if (0) { + return; + } + break; + case MAS0_WQ_CLR_RSRV: + /* XXX clear entry */ + return; + default: + /* no idea what to do */ + return; + } + + if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && + !msr_gs) { + /* XXX we don't support direct LRAT setting yet */ + fprintf(stderr, "cpu: don't support LRAT setting yet\n"); + return; + } + + tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + + tlb = booke206_cur_tlb(env); + + if (!tlb) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL); + } + + /* check that we support the targeted size */ + size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size_ps = booke206_tlbnps(env, tlbn); + if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && + !(size_ps & (1 << size_tlb))) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL); + } + + if (msr_gs) { + cpu_abort(env, "missing HV implementation\n"); + } + tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | + env->spr[SPR_BOOKE_MAS3]; + tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; + + /* MAV 1.0 only */ + if (!(tlbncfg & TLBnCFG_AVAIL)) { + /* force !AVAIL TLB entries to correct page size */ + tlb->mas1 &= ~MAS1_TSIZE_MASK; + /* XXX can be configured in MMUCSR0 */ + tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; + } + + /* Make a mask from TLB size to discard invalid bits in EPN field */ + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + /* Add a mask for page attributes */ + mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; + + if (!msr_cm) { + /* Executing a tlbwe instruction in 32-bit mode will set + * bits 0:31 of the TLB EPN field to zero. + */ + mask &= 0xffffffff; + } + + tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; + + if (!(tlbncfg & TLBnCFG_IPROT)) { + /* no IPROT supported by TLB */ + tlb->mas1 &= ~MAS1_IPROT; + } + + if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { + tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK); + } else { + tlb_flush(env, 1); + } +} + +static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + int tlbn = booke206_tlbm_to_tlbn(env, tlb); + int way = booke206_tlbm_to_way(env, tlb); + + env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; + env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; + + env->spr[SPR_BOOKE_MAS1] = tlb->mas1; + env->spr[SPR_BOOKE_MAS2] = tlb->mas2; + env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; + env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; +} + +void helper_booke206_tlbre(CPUPPCState *env) +{ + ppcmas_tlb_t *tlb = NULL; + + tlb = booke206_cur_tlb(env); + if (!tlb) { + env->spr[SPR_BOOKE_MAS1] = 0; + } else { + booke206_tlb_to_mas(env, tlb); + } +} + +void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) +{ + ppcmas_tlb_t *tlb = NULL; + int i, j; + target_phys_addr_t raddr; + uint32_t spid, sas; + + spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; + sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + + if (!tlb) { + continue; + } + + if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { + continue; + } + + if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + continue; + } + + booke206_tlb_to_mas(env, tlb); + return; + } + } + + /* no entry found, fill with defaults */ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + } + + env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) + << MAS1_TID_SHIFT; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, + uint32_t ea) +{ + int i; + int ways = booke206_tlb_ways(env, tlbn); + target_ulong mask; + + for (i = 0; i < ways; i++) { + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); + if (!tlb) { + continue; + } + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && + !(tlb->mas1 & MAS1_IPROT)) { + tlb->mas1 &= ~MAS1_VALID; + } + } +} + +void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) +{ + if (address & 0x4) { + /* flush all entries */ + if (address & 0x8) { + /* flush all of TLB1 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); + } else { + /* flush all of TLB0 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); + } + return; + } + + if (address & 0x8) { + /* flush TLB1 entries */ + booke206_invalidate_ea_tlb(env, 1, address); + tlb_flush(env, 1); + } else { + /* flush TLB0 entries */ + booke206_invalidate_ea_tlb(env, 0, address); + tlb_flush_page(env, address & MAS2_EPN_MASK); + } +} + +void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) +{ + /* XXX missing LPID handling */ + booke206_flush_tlb(env, -1, 1); +} + +void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) +{ + int i, j; + int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); + ppcmas_tlb_t *tlb = env->tlb.tlbm; + int tlb_size; + + /* XXX missing LPID handling */ + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!(tlb[j].mas1 & MAS1_IPROT) && + ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { + tlb[j].mas1 &= ~MAS1_VALID; + } + } + tlb += booke206_tlb_size(env, i); + } + tlb_flush(env, 1); +} + +void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) +{ + int i, j; + ppcmas_tlb_t *tlb; + int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); + int pid = tid >> MAS6_SPID_SHIFT; + int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; + int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; + /* XXX check for unsupported isize and raise an invalid opcode then */ + int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; + /* XXX implement MAV2 handling */ + bool mav2 = false; + + /* XXX missing LPID handling */ + /* flush by pid and ea */ + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || + (tlb->mas1 & MAS1_IPROT) || + ((tlb->mas1 & MAS1_IND) != ind) || + ((tlb->mas8 & MAS8_TGS) != sgs)) { + continue; + } + if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { + /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ + continue; + } + /* XXX e500mc doesn't match SAS, but other cores might */ + tlb->mas1 &= ~MAS1_VALID; + } + } + tlb_flush(env, 1); +} + +void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type) +{ + int flags = 0; + + if (type & 2) { + flags |= BOOKE206_FLUSH_TLB1; + } + + if (type & 4) { + flags |= BOOKE206_FLUSH_TLB0; + } + + booke206_flush_tlb(env, flags, 1); +} +#endif diff --git a/target-ppc/mpic_helper.c b/target-ppc/mpic_helper.c new file mode 100644 index 0000000000..2c6a4d30a9 --- /dev/null +++ b/target-ppc/mpic_helper.c @@ -0,0 +1,35 @@ +/* + * PowerPC emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" + +/*****************************************************************************/ +/* SPR accesses */ + +#if !defined(CONFIG_USER_ONLY) +/* + * This is an ugly helper for EPR, which is basically the same as accessing + * the IACK (PIAC) register on the MPIC. Because we model the MPIC as a device + * that can only talk to the CPU through MMIO, let's access it that way! + */ +target_ulong helper_load_epr(CPUPPCState *env) +{ + return ldl_phys(env->mpic_cpu_base + 0xA0); +} +#endif diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c deleted file mode 100644 index 4ef2332a5a..0000000000 --- a/target-ppc/op_helper.c +++ /dev/null @@ -1,4568 +0,0 @@ -/* - * PowerPC emulation helpers for qemu. - * - * Copyright (c) 2003-2007 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ -#include <string.h> -#include "cpu.h" -#include "dyngen-exec.h" -#include "host-utils.h" -#include "helper.h" - -#include "helper_regs.h" - -#if !defined(CONFIG_USER_ONLY) -#include "softmmu_exec.h" -#endif /* !defined(CONFIG_USER_ONLY) */ - -//#define DEBUG_OP -//#define DEBUG_EXCEPTIONS -//#define DEBUG_SOFTWARE_TLB - -#ifdef DEBUG_SOFTWARE_TLB -# define LOG_SWTLB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_SWTLB(...) do { } while (0) -#endif - - -/*****************************************************************************/ -/* Exceptions processing helpers */ - -void helper_raise_exception_err (uint32_t exception, uint32_t error_code) -{ -#if 0 - printf("Raise exception %3x code : %d\n", exception, error_code); -#endif - env->exception_index = exception; - env->error_code = error_code; - cpu_loop_exit(env); -} - -void helper_raise_exception (uint32_t exception) -{ - helper_raise_exception_err(exception, 0); -} - -/*****************************************************************************/ -/* SPR accesses */ -void helper_load_dump_spr (uint32_t sprn) -{ - qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn, - env->spr[sprn]); -} - -void helper_store_dump_spr (uint32_t sprn) -{ - qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn, - env->spr[sprn]); -} - -target_ulong helper_load_tbl (void) -{ - return (target_ulong)cpu_ppc_load_tbl(env); -} - -target_ulong helper_load_tbu (void) -{ - return cpu_ppc_load_tbu(env); -} - -target_ulong helper_load_atbl (void) -{ - return (target_ulong)cpu_ppc_load_atbl(env); -} - -target_ulong helper_load_atbu (void) -{ - return cpu_ppc_load_atbu(env); -} - -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) -target_ulong helper_load_purr (void) -{ - return (target_ulong)cpu_ppc_load_purr(env); -} -#endif - -target_ulong helper_load_601_rtcl (void) -{ - return cpu_ppc601_load_rtcl(env); -} - -target_ulong helper_load_601_rtcu (void) -{ - return cpu_ppc601_load_rtcu(env); -} - -#if !defined(CONFIG_USER_ONLY) -#if defined (TARGET_PPC64) -void helper_store_asr (target_ulong val) -{ - ppc_store_asr(env, val); -} -#endif - -void helper_store_sdr1 (target_ulong val) -{ - ppc_store_sdr1(env, val); -} - -void helper_store_tbl (target_ulong val) -{ - cpu_ppc_store_tbl(env, val); -} - -void helper_store_tbu (target_ulong val) -{ - cpu_ppc_store_tbu(env, val); -} - -void helper_store_atbl (target_ulong val) -{ - cpu_ppc_store_atbl(env, val); -} - -void helper_store_atbu (target_ulong val) -{ - cpu_ppc_store_atbu(env, val); -} - -void helper_store_601_rtcl (target_ulong val) -{ - cpu_ppc601_store_rtcl(env, val); -} - -void helper_store_601_rtcu (target_ulong val) -{ - cpu_ppc601_store_rtcu(env, val); -} - -target_ulong helper_load_decr (void) -{ - return cpu_ppc_load_decr(env); -} - -void helper_store_decr (target_ulong val) -{ - cpu_ppc_store_decr(env, val); -} - -void helper_store_hid0_601 (target_ulong val) -{ - target_ulong hid0; - - hid0 = env->spr[SPR_HID0]; - if ((val ^ hid0) & 0x00000008) { - /* Change current endianness */ - env->hflags &= ~(1 << MSR_LE); - env->hflags_nmsr &= ~(1 << MSR_LE); - env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE); - env->hflags |= env->hflags_nmsr; - qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__, - val & 0x8 ? 'l' : 'b', env->hflags); - } - env->spr[SPR_HID0] = (uint32_t)val; -} - -void helper_store_403_pbr (uint32_t num, target_ulong value) -{ - if (likely(env->pb[num] != value)) { - env->pb[num] = value; - /* Should be optimized */ - tlb_flush(env, 1); - } -} - -target_ulong helper_load_40x_pit (void) -{ - return load_40x_pit(env); -} - -void helper_store_40x_pit (target_ulong val) -{ - store_40x_pit(env, val); -} - -void helper_store_40x_dbcr0 (target_ulong val) -{ - store_40x_dbcr0(env, val); -} - -void helper_store_40x_sler (target_ulong val) -{ - store_40x_sler(env, val); -} - -void helper_store_booke_tcr (target_ulong val) -{ - store_booke_tcr(env, val); -} - -void helper_store_booke_tsr (target_ulong val) -{ - store_booke_tsr(env, val); -} - -void helper_store_ibatu (uint32_t nr, target_ulong val) -{ - ppc_store_ibatu(env, nr, val); -} - -void helper_store_ibatl (uint32_t nr, target_ulong val) -{ - ppc_store_ibatl(env, nr, val); -} - -void helper_store_dbatu (uint32_t nr, target_ulong val) -{ - ppc_store_dbatu(env, nr, val); -} - -void helper_store_dbatl (uint32_t nr, target_ulong val) -{ - ppc_store_dbatl(env, nr, val); -} - -void helper_store_601_batl (uint32_t nr, target_ulong val) -{ - ppc_store_ibatl_601(env, nr, val); -} - -void helper_store_601_batu (uint32_t nr, target_ulong val) -{ - ppc_store_ibatu_601(env, nr, val); -} -#endif - -/*****************************************************************************/ -/* Memory load and stores */ - -static inline target_ulong addr_add(target_ulong addr, target_long arg) -{ -#if defined(TARGET_PPC64) - if (!msr_sf) - return (uint32_t)(addr + arg); - else -#endif - return addr + arg; -} - -void helper_lmw (target_ulong addr, uint32_t reg) -{ - for (; reg < 32; reg++) { - if (msr_le) - env->gpr[reg] = bswap32(ldl(addr)); - else - env->gpr[reg] = ldl(addr); - addr = addr_add(addr, 4); - } -} - -void helper_stmw (target_ulong addr, uint32_t reg) -{ - for (; reg < 32; reg++) { - if (msr_le) - stl(addr, bswap32((uint32_t)env->gpr[reg])); - else - stl(addr, (uint32_t)env->gpr[reg]); - addr = addr_add(addr, 4); - } -} - -void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg) -{ - int sh; - for (; nb > 3; nb -= 4) { - env->gpr[reg] = ldl(addr); - reg = (reg + 1) % 32; - addr = addr_add(addr, 4); - } - if (unlikely(nb > 0)) { - env->gpr[reg] = 0; - for (sh = 24; nb > 0; nb--, sh -= 8) { - env->gpr[reg] |= ldub(addr) << sh; - addr = addr_add(addr, 1); - } - } -} -/* PPC32 specification says we must generate an exception if - * rA is in the range of registers to be loaded. - * In an other hand, IBM says this is valid, but rA won't be loaded. - * For now, I'll follow the spec... - */ -void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) -{ - if (likely(xer_bc != 0)) { - if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) || - (reg < rb && (reg + xer_bc) > rb))) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | - POWERPC_EXCP_INVAL_LSWX); - } else { - helper_lsw(addr, xer_bc, reg); - } - } -} - -void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg) -{ - int sh; - for (; nb > 3; nb -= 4) { - stl(addr, env->gpr[reg]); - reg = (reg + 1) % 32; - addr = addr_add(addr, 4); - } - if (unlikely(nb > 0)) { - for (sh = 24; nb > 0; nb--, sh -= 8) { - stb(addr, (env->gpr[reg] >> sh) & 0xFF); - addr = addr_add(addr, 1); - } - } -} - -static void do_dcbz(target_ulong addr, int dcache_line_size) -{ - addr &= ~(dcache_line_size - 1); - int i; - for (i = 0 ; i < dcache_line_size ; i += 4) { - stl(addr + i , 0); - } - if (env->reserve_addr == addr) - env->reserve_addr = (target_ulong)-1ULL; -} - -void helper_dcbz(target_ulong addr) -{ - do_dcbz(addr, env->dcache_line_size); -} - -void helper_dcbz_970(target_ulong addr) -{ - if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) - do_dcbz(addr, 32); - else - do_dcbz(addr, env->dcache_line_size); -} - -void helper_icbi(target_ulong addr) -{ - addr &= ~(env->dcache_line_size - 1); - /* Invalidate one cache line : - * PowerPC specification says this is to be treated like a load - * (not a fetch) by the MMU. To be sure it will be so, - * do the load "by hand". - */ - ldl(addr); -} - -// XXX: to be tested -target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) -{ - int i, c, d; - d = 24; - for (i = 0; i < xer_bc; i++) { - c = ldub(addr); - addr = addr_add(addr, 1); - /* ra (if not 0) and rb are never modified */ - if (likely(reg != rb && (ra == 0 || reg != ra))) { - env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); - } - if (unlikely(c == xer_cmp)) - break; - if (likely(d != 0)) { - d -= 8; - } else { - d = 24; - reg++; - reg = reg & 0x1F; - } - } - return i; -} - -/*****************************************************************************/ -/* Fixed point operations helpers */ -#if defined(TARGET_PPC64) - -/* multiply high word */ -uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2) -{ - uint64_t tl, th; - - muls64(&tl, &th, arg1, arg2); - return th; -} - -/* multiply high word unsigned */ -uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2) -{ - uint64_t tl, th; - - mulu64(&tl, &th, arg1, arg2); - return th; -} - -uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2) -{ - int64_t th; - uint64_t tl; - - muls64(&tl, (uint64_t *)&th, arg1, arg2); - /* If th != 0 && th != -1, then we had an overflow */ - if (likely((uint64_t)(th + 1) <= 1)) { - env->xer &= ~(1 << XER_OV); - } else { - env->xer |= (1 << XER_OV) | (1 << XER_SO); - } - return (int64_t)tl; -} -#endif - -target_ulong helper_cntlzw (target_ulong t) -{ - return clz32(t); -} - -#if defined(TARGET_PPC64) -target_ulong helper_cntlzd (target_ulong t) -{ - return clz64(t); -} -#endif - -/* shift right arithmetic helper */ -target_ulong helper_sraw (target_ulong value, target_ulong shift) -{ - int32_t ret; - - if (likely(!(shift & 0x20))) { - if (likely((uint32_t)shift != 0)) { - shift &= 0x1f; - ret = (int32_t)value >> shift; - if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { - env->xer &= ~(1 << XER_CA); - } else { - env->xer |= (1 << XER_CA); - } - } else { - ret = (int32_t)value; - env->xer &= ~(1 << XER_CA); - } - } else { - ret = (int32_t)value >> 31; - if (ret) { - env->xer |= (1 << XER_CA); - } else { - env->xer &= ~(1 << XER_CA); - } - } - return (target_long)ret; -} - -#if defined(TARGET_PPC64) -target_ulong helper_srad (target_ulong value, target_ulong shift) -{ - int64_t ret; - - if (likely(!(shift & 0x40))) { - if (likely((uint64_t)shift != 0)) { - shift &= 0x3f; - ret = (int64_t)value >> shift; - if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { - env->xer &= ~(1 << XER_CA); - } else { - env->xer |= (1 << XER_CA); - } - } else { - ret = (int64_t)value; - env->xer &= ~(1 << XER_CA); - } - } else { - ret = (int64_t)value >> 63; - if (ret) { - env->xer |= (1 << XER_CA); - } else { - env->xer &= ~(1 << XER_CA); - } - } - return ret; -} -#endif - -#if defined(TARGET_PPC64) -target_ulong helper_popcntb (target_ulong val) -{ - val = (val & 0x5555555555555555ULL) + ((val >> 1) & - 0x5555555555555555ULL); - val = (val & 0x3333333333333333ULL) + ((val >> 2) & - 0x3333333333333333ULL); - val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & - 0x0f0f0f0f0f0f0f0fULL); - return val; -} - -target_ulong helper_popcntw (target_ulong val) -{ - val = (val & 0x5555555555555555ULL) + ((val >> 1) & - 0x5555555555555555ULL); - val = (val & 0x3333333333333333ULL) + ((val >> 2) & - 0x3333333333333333ULL); - val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & - 0x0f0f0f0f0f0f0f0fULL); - val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & - 0x00ff00ff00ff00ffULL); - val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & - 0x0000ffff0000ffffULL); - return val; -} - -target_ulong helper_popcntd (target_ulong val) -{ - return ctpop64(val); -} -#else -target_ulong helper_popcntb (target_ulong val) -{ - val = (val & 0x55555555) + ((val >> 1) & 0x55555555); - val = (val & 0x33333333) + ((val >> 2) & 0x33333333); - val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); - return val; -} - -target_ulong helper_popcntw (target_ulong val) -{ - val = (val & 0x55555555) + ((val >> 1) & 0x55555555); - val = (val & 0x33333333) + ((val >> 2) & 0x33333333); - val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); - val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); - val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); - return val; -} -#endif - -/*****************************************************************************/ -/* Floating point operations helpers */ -uint64_t helper_float32_to_float64(uint32_t arg) -{ - CPU_FloatU f; - CPU_DoubleU d; - f.l = arg; - d.d = float32_to_float64(f.f, &env->fp_status); - return d.ll; -} - -uint32_t helper_float64_to_float32(uint64_t arg) -{ - CPU_FloatU f; - CPU_DoubleU d; - d.ll = arg; - f.f = float64_to_float32(d.d, &env->fp_status); - return f.l; -} - -static inline int isden(float64 d) -{ - CPU_DoubleU u; - - u.d = d; - - return ((u.ll >> 52) & 0x7FF) == 0; -} - -uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf) -{ - CPU_DoubleU farg; - int isneg; - int ret; - farg.ll = arg; - isneg = float64_is_neg(farg.d); - if (unlikely(float64_is_any_nan(farg.d))) { - if (float64_is_signaling_nan(farg.d)) { - /* Signaling NaN: flags are undefined */ - ret = 0x00; - } else { - /* Quiet NaN */ - ret = 0x11; - } - } else if (unlikely(float64_is_infinity(farg.d))) { - /* +/- infinity */ - if (isneg) - ret = 0x09; - else - ret = 0x05; - } else { - if (float64_is_zero(farg.d)) { - /* +/- zero */ - if (isneg) - ret = 0x12; - else - ret = 0x02; - } else { - if (isden(farg.d)) { - /* Denormalized numbers */ - ret = 0x10; - } else { - /* Normalized numbers */ - ret = 0x00; - } - if (isneg) { - ret |= 0x08; - } else { - ret |= 0x04; - } - } - } - if (set_fprf) { - /* We update FPSCR_FPRF */ - env->fpscr &= ~(0x1F << FPSCR_FPRF); - env->fpscr |= ret << FPSCR_FPRF; - } - /* We just need fpcc to update Rc1 */ - return ret & 0xF; -} - -/* Floating-point invalid operations exception */ -static inline uint64_t fload_invalid_op_excp(int op) -{ - uint64_t ret = 0; - int ve; - - ve = fpscr_ve; - switch (op) { - case POWERPC_EXCP_FP_VXSNAN: - env->fpscr |= 1 << FPSCR_VXSNAN; - break; - case POWERPC_EXCP_FP_VXSOFT: - env->fpscr |= 1 << FPSCR_VXSOFT; - break; - case POWERPC_EXCP_FP_VXISI: - /* Magnitude subtraction of infinities */ - env->fpscr |= 1 << FPSCR_VXISI; - goto update_arith; - case POWERPC_EXCP_FP_VXIDI: - /* Division of infinity by infinity */ - env->fpscr |= 1 << FPSCR_VXIDI; - goto update_arith; - case POWERPC_EXCP_FP_VXZDZ: - /* Division of zero by zero */ - env->fpscr |= 1 << FPSCR_VXZDZ; - goto update_arith; - case POWERPC_EXCP_FP_VXIMZ: - /* Multiplication of zero by infinity */ - env->fpscr |= 1 << FPSCR_VXIMZ; - goto update_arith; - case POWERPC_EXCP_FP_VXVC: - /* Ordered comparison of NaN */ - env->fpscr |= 1 << FPSCR_VXVC; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; - /* We must update the target FPR before raising the exception */ - if (ve != 0) { - env->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - /* Exception is differed */ - ve = 0; - } - break; - case POWERPC_EXCP_FP_VXSQRT: - /* Square root of a negative number */ - env->fpscr |= 1 << FPSCR_VXSQRT; - update_arith: - env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); - if (ve == 0) { - /* Set the result to quiet NaN */ - ret = 0x7FF8000000000000ULL; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; - } - break; - case POWERPC_EXCP_FP_VXCVI: - /* Invalid conversion */ - env->fpscr |= 1 << FPSCR_VXCVI; - env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); - if (ve == 0) { - /* Set the result to quiet NaN */ - ret = 0x7FF8000000000000ULL; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; - } - break; - } - /* Update the floating-point invalid operation summary */ - env->fpscr |= 1 << FPSCR_VX; - /* Update the floating-point exception summary */ - env->fpscr |= 1 << FPSCR_FX; - if (ve != 0) { - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - if (msr_fe0 != 0 || msr_fe1 != 0) - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op); - } - return ret; -} - -static inline void float_zero_divide_excp(void) -{ - env->fpscr |= 1 << FPSCR_ZX; - env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); - /* Update the floating-point exception summary */ - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ze != 0) { - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - if (msr_fe0 != 0 || msr_fe1 != 0) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX); - } - } -} - -static inline void float_overflow_excp(void) -{ - env->fpscr |= 1 << FPSCR_OX; - /* Update the floating-point exception summary */ - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_oe != 0) { - /* XXX: should adjust the result */ - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - /* We must update the target FPR before raising the exception */ - env->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; - } else { - env->fpscr |= 1 << FPSCR_XX; - env->fpscr |= 1 << FPSCR_FI; - } -} - -static inline void float_underflow_excp(void) -{ - env->fpscr |= 1 << FPSCR_UX; - /* Update the floating-point exception summary */ - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ue != 0) { - /* XXX: should adjust the result */ - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - /* We must update the target FPR before raising the exception */ - env->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; - } -} - -static inline void float_inexact_excp(void) -{ - env->fpscr |= 1 << FPSCR_XX; - /* Update the floating-point exception summary */ - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_xe != 0) { - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - /* We must update the target FPR before raising the exception */ - env->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; - } -} - -static inline void fpscr_set_rounding_mode(void) -{ - int rnd_type; - - /* Set rounding mode */ - switch (fpscr_rn) { - case 0: - /* Best approximation (round to nearest) */ - rnd_type = float_round_nearest_even; - break; - case 1: - /* Smaller magnitude (round toward zero) */ - rnd_type = float_round_to_zero; - break; - case 2: - /* Round toward +infinite */ - rnd_type = float_round_up; - break; - default: - case 3: - /* Round toward -infinite */ - rnd_type = float_round_down; - break; - } - set_float_rounding_mode(rnd_type, &env->fp_status); -} - -void helper_fpscr_clrbit (uint32_t bit) -{ - int prev; - - prev = (env->fpscr >> bit) & 1; - env->fpscr &= ~(1 << bit); - if (prev == 1) { - switch (bit) { - case FPSCR_RN1: - case FPSCR_RN: - fpscr_set_rounding_mode(); - break; - default: - break; - } - } -} - -void helper_fpscr_setbit (uint32_t bit) -{ - int prev; - - prev = (env->fpscr >> bit) & 1; - env->fpscr |= 1 << bit; - if (prev == 0) { - switch (bit) { - case FPSCR_VX: - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ve) - goto raise_ve; - case FPSCR_OX: - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_oe) - goto raise_oe; - break; - case FPSCR_UX: - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ue) - goto raise_ue; - break; - case FPSCR_ZX: - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ze) - goto raise_ze; - break; - case FPSCR_XX: - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_xe) - goto raise_xe; - break; - case FPSCR_VXSNAN: - case FPSCR_VXISI: - case FPSCR_VXIDI: - case FPSCR_VXZDZ: - case FPSCR_VXIMZ: - case FPSCR_VXVC: - case FPSCR_VXSOFT: - case FPSCR_VXSQRT: - case FPSCR_VXCVI: - env->fpscr |= 1 << FPSCR_VX; - env->fpscr |= 1 << FPSCR_FX; - if (fpscr_ve != 0) - goto raise_ve; - break; - case FPSCR_VE: - if (fpscr_vx != 0) { - raise_ve: - env->error_code = POWERPC_EXCP_FP; - if (fpscr_vxsnan) - env->error_code |= POWERPC_EXCP_FP_VXSNAN; - if (fpscr_vxisi) - env->error_code |= POWERPC_EXCP_FP_VXISI; - if (fpscr_vxidi) - env->error_code |= POWERPC_EXCP_FP_VXIDI; - if (fpscr_vxzdz) - env->error_code |= POWERPC_EXCP_FP_VXZDZ; - if (fpscr_vximz) - env->error_code |= POWERPC_EXCP_FP_VXIMZ; - if (fpscr_vxvc) - env->error_code |= POWERPC_EXCP_FP_VXVC; - if (fpscr_vxsoft) - env->error_code |= POWERPC_EXCP_FP_VXSOFT; - if (fpscr_vxsqrt) - env->error_code |= POWERPC_EXCP_FP_VXSQRT; - if (fpscr_vxcvi) - env->error_code |= POWERPC_EXCP_FP_VXCVI; - goto raise_excp; - } - break; - case FPSCR_OE: - if (fpscr_ox != 0) { - raise_oe: - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; - goto raise_excp; - } - break; - case FPSCR_UE: - if (fpscr_ux != 0) { - raise_ue: - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; - goto raise_excp; - } - break; - case FPSCR_ZE: - if (fpscr_zx != 0) { - raise_ze: - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX; - goto raise_excp; - } - break; - case FPSCR_XE: - if (fpscr_xx != 0) { - raise_xe: - env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; - goto raise_excp; - } - break; - case FPSCR_RN1: - case FPSCR_RN: - fpscr_set_rounding_mode(); - break; - default: - break; - raise_excp: - /* Update the floating-point enabled exception summary */ - env->fpscr |= 1 << FPSCR_FEX; - /* We have to update Rc1 before raising the exception */ - env->exception_index = POWERPC_EXCP_PROGRAM; - break; - } - } -} - -void helper_store_fpscr (uint64_t arg, uint32_t mask) -{ - /* - * We use only the 32 LSB of the incoming fpr - */ - uint32_t prev, new; - int i; - - prev = env->fpscr; - new = (uint32_t)arg; - new &= ~0x60000000; - new |= prev & 0x60000000; - for (i = 0; i < 8; i++) { - if (mask & (1 << i)) { - env->fpscr &= ~(0xF << (4 * i)); - env->fpscr |= new & (0xF << (4 * i)); - } - } - /* Update VX and FEX */ - if (fpscr_ix != 0) - env->fpscr |= 1 << FPSCR_VX; - else - env->fpscr &= ~(1 << FPSCR_VX); - if ((fpscr_ex & fpscr_eex) != 0) { - env->fpscr |= 1 << FPSCR_FEX; - env->exception_index = POWERPC_EXCP_PROGRAM; - /* XXX: we should compute it properly */ - env->error_code = POWERPC_EXCP_FP; - } - else - env->fpscr &= ~(1 << FPSCR_FEX); - fpscr_set_rounding_mode(); -} - -void helper_float_check_status (void) -{ - if (env->exception_index == POWERPC_EXCP_PROGRAM && - (env->error_code & POWERPC_EXCP_FP)) { - /* Differred floating-point exception after target FPR update */ - if (msr_fe0 != 0 || msr_fe1 != 0) - helper_raise_exception_err(env->exception_index, env->error_code); - } else { - int status = get_float_exception_flags(&env->fp_status); - if (status & float_flag_divbyzero) { - float_zero_divide_excp(); - } else if (status & float_flag_overflow) { - float_overflow_excp(); - } else if (status & float_flag_underflow) { - float_underflow_excp(); - } else if (status & float_flag_inexact) { - float_inexact_excp(); - } - } -} - -void helper_reset_fpstatus (void) -{ - set_float_exception_flags(0, &env->fp_status); -} - -/* fadd - fadd. */ -uint64_t helper_fadd (uint64_t arg1, uint64_t arg2) -{ - CPU_DoubleU farg1, farg2; - - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && - float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { - /* sNaN addition */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status); - } - - return farg1.ll; -} - -/* fsub - fsub. */ -uint64_t helper_fsub (uint64_t arg1, uint64_t arg2) -{ - CPU_DoubleU farg1, farg2; - - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && - float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { - /* sNaN subtraction */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status); - } - - return farg1.ll; -} - -/* fmul - fmul. */ -uint64_t helper_fmul (uint64_t arg1, uint64_t arg2) -{ - CPU_DoubleU farg1, farg2; - - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { - /* sNaN multiplication */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); - } - - return farg1.ll; -} - -/* fdiv - fdiv. */ -uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2) -{ - CPU_DoubleU farg1, farg2; - - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) { - /* Division of infinity by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI); - } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) { - /* Division of zero by zero */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d))) { - /* sNaN division */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status); - } - - return farg1.ll; -} - -/* fabs */ -uint64_t helper_fabs (uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - farg.d = float64_abs(farg.d); - return farg.ll; -} - -/* fnabs */ -uint64_t helper_fnabs (uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - farg.d = float64_abs(farg.d); - farg.d = float64_chs(farg.d); - return farg.ll; -} - -/* fneg */ -uint64_t helper_fneg (uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - farg.d = float64_chs(farg.d); - return farg.ll; -} - -/* fctiw - fctiw. */ -uint64_t helper_fctiw (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int32(farg.d, &env->fp_status); - /* XXX: higher bits are not supposed to be significant. - * to make tests easier, return the same as a real PowerPC 750 - */ - farg.ll |= 0xFFF80000ULL << 32; - } - return farg.ll; -} - -/* fctiwz - fctiwz. */ -uint64_t helper_fctiwz (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status); - /* XXX: higher bits are not supposed to be significant. - * to make tests easier, return the same as a real PowerPC 750 - */ - farg.ll |= 0xFFF80000ULL << 32; - } - return farg.ll; -} - -#if defined(TARGET_PPC64) -/* fcfid - fcfid. */ -uint64_t helper_fcfid (uint64_t arg) -{ - CPU_DoubleU farg; - farg.d = int64_to_float64(arg, &env->fp_status); - return farg.ll; -} - -/* fctid - fctid. */ -uint64_t helper_fctid (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int64(farg.d, &env->fp_status); - } - return farg.ll; -} - -/* fctidz - fctidz. */ -uint64_t helper_fctidz (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status); - } - return farg.ll; -} - -#endif - -static inline uint64_t do_fri(uint64_t arg, int rounding_mode) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN round */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { - /* qNan / infinity round */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); - } else { - set_float_rounding_mode(rounding_mode, &env->fp_status); - farg.ll = float64_round_to_int(farg.d, &env->fp_status); - /* Restore rounding mode from FPSCR */ - fpscr_set_rounding_mode(); - } - return farg.ll; -} - -uint64_t helper_frin (uint64_t arg) -{ - return do_fri(arg, float_round_nearest_even); -} - -uint64_t helper_friz (uint64_t arg) -{ - return do_fri(arg, float_round_to_zero); -} - -uint64_t helper_frip (uint64_t arg) -{ - return do_fri(arg, float_round_up); -} - -uint64_t helper_frim (uint64_t arg) -{ - return do_fri(arg, float_round_down); -} - -/* fmadd - fmadd. */ -uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { - /* sNaN operation */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - } - - return farg1.ll; -} - -/* fmsub - fmsub. */ -uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { - /* sNaN operation */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - } - return farg1.ll; -} - -/* fnmadd - fnmadd. */ -uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { - /* sNaN operation */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - if (likely(!float64_is_any_nan(farg1.d))) { - farg1.d = float64_chs(farg1.d); - } - } - return farg1.ll; -} - -/* fnmsub - fnmsub. */ -uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d) || - float64_is_signaling_nan(farg3.d))) { - /* sNaN operation */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - if (likely(!float64_is_any_nan(farg1.d))) { - farg1.d = float64_chs(farg1.d); - } - } - return farg1.ll; -} - -/* frsp - frsp. */ -uint64_t helper_frsp (uint64_t arg) -{ - CPU_DoubleU farg; - float32 f32; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN square root */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - f32 = float64_to_float32(farg.d, &env->fp_status); - farg.d = float32_to_float64(f32, &env->fp_status); - - return farg.ll; -} - -/* fsqrt - fsqrt. */ -uint64_t helper_fsqrt (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { - /* Square root of a negative nonzero number */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT); - } else { - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN square root */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg.d = float64_sqrt(farg.d, &env->fp_status); - } - return farg.ll; -} - -/* fre - fre. */ -uint64_t helper_fre (uint64_t arg) -{ - CPU_DoubleU farg; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN reciprocal */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg.d = float64_div(float64_one, farg.d, &env->fp_status); - return farg.d; -} - -/* fres - fres. */ -uint64_t helper_fres (uint64_t arg) -{ - CPU_DoubleU farg; - float32 f32; - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN reciprocal */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg.d = float64_div(float64_one, farg.d, &env->fp_status); - f32 = float64_to_float32(farg.d, &env->fp_status); - farg.d = float32_to_float64(f32, &env->fp_status); - - return farg.ll; -} - -/* frsqrte - frsqrte. */ -uint64_t helper_frsqrte (uint64_t arg) -{ - CPU_DoubleU farg; - float32 f32; - farg.ll = arg; - - if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { - /* Reciprocal square root of a negative nonzero number */ - farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT); - } else { - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN reciprocal square root */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } - farg.d = float64_sqrt(farg.d, &env->fp_status); - farg.d = float64_div(float64_one, farg.d, &env->fp_status); - f32 = float64_to_float32(farg.d, &env->fp_status); - farg.d = float32_to_float64(f32, &env->fp_status); - } - return farg.ll; -} - -/* fsel - fsel. */ -uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - CPU_DoubleU farg1; - - farg1.ll = arg1; - - if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) { - return arg2; - } else { - return arg3; - } -} - -void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD) -{ - CPU_DoubleU farg1, farg2; - uint32_t ret = 0; - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely(float64_is_any_nan(farg1.d) || - float64_is_any_nan(farg2.d))) { - ret = 0x01UL; - } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { - ret = 0x08UL; - } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { - ret = 0x04UL; - } else { - ret = 0x02UL; - } - - env->fpscr &= ~(0x0F << FPSCR_FPRF); - env->fpscr |= ret << FPSCR_FPRF; - env->crf[crfD] = ret; - if (unlikely(ret == 0x01UL - && (float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d)))) { - /* sNaN comparison */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); - } -} - -void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD) -{ - CPU_DoubleU farg1, farg2; - uint32_t ret = 0; - farg1.ll = arg1; - farg2.ll = arg2; - - if (unlikely(float64_is_any_nan(farg1.d) || - float64_is_any_nan(farg2.d))) { - ret = 0x01UL; - } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { - ret = 0x08UL; - } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { - ret = 0x04UL; - } else { - ret = 0x02UL; - } - - env->fpscr &= ~(0x0F << FPSCR_FPRF); - env->fpscr |= ret << FPSCR_FPRF; - env->crf[crfD] = ret; - if (unlikely (ret == 0x01UL)) { - if (float64_is_signaling_nan(farg1.d) || - float64_is_signaling_nan(farg2.d)) { - /* sNaN comparison */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXVC); - } else { - /* qNaN comparison */ - fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC); - } - } -} - -#if !defined (CONFIG_USER_ONLY) -void helper_store_msr (target_ulong val) -{ - val = hreg_store_msr(env, val, 0); - if (val != 0) { - env->interrupt_request |= CPU_INTERRUPT_EXITTB; - helper_raise_exception(val); - } -} - -static inline void do_rfi(target_ulong nip, target_ulong msr, - target_ulong msrm, int keep_msrh) -{ -#if defined(TARGET_PPC64) - if (msr & (1ULL << MSR_SF)) { - nip = (uint64_t)nip; - msr &= (uint64_t)msrm; - } else { - nip = (uint32_t)nip; - msr = (uint32_t)(msr & msrm); - if (keep_msrh) - msr |= env->msr & ~((uint64_t)0xFFFFFFFF); - } -#else - nip = (uint32_t)nip; - msr &= (uint32_t)msrm; -#endif - /* XXX: beware: this is false if VLE is supported */ - env->nip = nip & ~((target_ulong)0x00000003); - hreg_store_msr(env, msr, 1); -#if defined (DEBUG_OP) - cpu_dump_rfi(env->nip, env->msr); -#endif - /* No need to raise an exception here, - * as rfi is always the last insn of a TB - */ - env->interrupt_request |= CPU_INTERRUPT_EXITTB; -} - -void helper_rfi (void) -{ - do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1], - ~((target_ulong)0x783F0000), 1); -} - -#if defined(TARGET_PPC64) -void helper_rfid (void) -{ - do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1], - ~((target_ulong)0x783F0000), 0); -} - -void helper_hrfid (void) -{ - do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1], - ~((target_ulong)0x783F0000), 0); -} -#endif -#endif - -void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags) -{ - if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || - ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || - ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || - ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || - ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); - } -} - -#if defined(TARGET_PPC64) -void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags) -{ - if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || - ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || - ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || - ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || - ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); -} -#endif - -/*****************************************************************************/ -/* PowerPC 601 specific instructions (POWER bridge) */ - -target_ulong helper_clcs (uint32_t arg) -{ - switch (arg) { - case 0x0CUL: - /* Instruction cache line size */ - return env->icache_line_size; - break; - case 0x0DUL: - /* Data cache line size */ - return env->dcache_line_size; - break; - case 0x0EUL: - /* Minimum cache line size */ - return (env->icache_line_size < env->dcache_line_size) ? - env->icache_line_size : env->dcache_line_size; - break; - case 0x0FUL: - /* Maximum cache line size */ - return (env->icache_line_size > env->dcache_line_size) ? - env->icache_line_size : env->dcache_line_size; - break; - default: - /* Undefined */ - return 0; - break; - } -} - -target_ulong helper_div (target_ulong arg1, target_ulong arg2) -{ - uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; - - if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = tmp % arg2; - return tmp / (int32_t)arg2; - } -} - -target_ulong helper_divo (target_ulong arg1, target_ulong arg2) -{ - uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; - - if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->xer |= (1 << XER_OV) | (1 << XER_SO); - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = tmp % arg2; - tmp /= (int32_t)arg2; - if ((int32_t)tmp != tmp) { - env->xer |= (1 << XER_OV) | (1 << XER_SO); - } else { - env->xer &= ~(1 << XER_OV); - } - return tmp; - } -} - -target_ulong helper_divs (target_ulong arg1, target_ulong arg2) -{ - if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; - return (int32_t)arg1 / (int32_t)arg2; - } -} - -target_ulong helper_divso (target_ulong arg1, target_ulong arg2) -{ - if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || - (int32_t)arg2 == 0) { - env->xer |= (1 << XER_OV) | (1 << XER_SO); - env->spr[SPR_MQ] = 0; - return INT32_MIN; - } else { - env->xer &= ~(1 << XER_OV); - env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; - return (int32_t)arg1 / (int32_t)arg2; - } -} - -#if !defined (CONFIG_USER_ONLY) -target_ulong helper_rac (target_ulong addr) -{ - mmu_ctx_t ctx; - int nb_BATs; - target_ulong ret = 0; - - /* We don't have to generate many instances of this instruction, - * as rac is supervisor only. - */ - /* XXX: FIX THIS: Pretend we have no BAT */ - nb_BATs = env->nb_BATs; - env->nb_BATs = 0; - if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) - ret = ctx.raddr; - env->nb_BATs = nb_BATs; - return ret; -} - -void helper_rfsvc (void) -{ - do_rfi(env->lr, env->ctr, 0x0000FFFF, 0); -} -#endif - -/*****************************************************************************/ -/* 602 specific instructions */ -/* mfrom is the most crazy instruction ever seen, imho ! */ -/* Real implementation uses a ROM table. Do the same */ -/* Extremely decomposed: - * -arg / 256 - * return 256 * log10(10 + 1.0) + 0.5 - */ -#if !defined (CONFIG_USER_ONLY) -target_ulong helper_602_mfrom (target_ulong arg) -{ - if (likely(arg < 602)) { -#include "mfrom_table.c" - return mfrom_ROM_table[arg]; - } else { - return 0; - } -} -#endif - -/*****************************************************************************/ -/* Embedded PowerPC specific helpers */ - -/* XXX: to be improved to check access rights when in user-mode */ -target_ulong helper_load_dcr (target_ulong dcrn) -{ - uint32_t val = 0; - - if (unlikely(env->dcr_env == NULL)) { - qemu_log("No DCR environment\n"); - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); - } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) { - qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG); - } - return val; -} - -void helper_store_dcr (target_ulong dcrn, target_ulong val) -{ - if (unlikely(env->dcr_env == NULL)) { - qemu_log("No DCR environment\n"); - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); - } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) { - qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG); - } -} - -#if !defined(CONFIG_USER_ONLY) -void helper_40x_rfci (void) -{ - do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3], - ~((target_ulong)0xFFFF0000), 0); -} - -void helper_rfci (void) -{ - do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1, - ~((target_ulong)0x3FFF0000), 0); -} - -void helper_rfdi (void) -{ - do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1, - ~((target_ulong)0x3FFF0000), 0); -} - -void helper_rfmci (void) -{ - do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1, - ~((target_ulong)0x3FFF0000), 0); -} -#endif - -/* 440 specific */ -target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc) -{ - target_ulong mask; - int i; - - i = 1; - for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { - if ((high & mask) == 0) { - if (update_Rc) { - env->crf[0] = 0x4; - } - goto done; - } - i++; - } - for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { - if ((low & mask) == 0) { - if (update_Rc) { - env->crf[0] = 0x8; - } - goto done; - } - i++; - } - if (update_Rc) { - env->crf[0] = 0x2; - } - done: - env->xer = (env->xer & ~0x7F) | i; - if (update_Rc) { - env->crf[0] |= xer_so; - } - return i; -} - -/*****************************************************************************/ -/* Altivec extension helpers */ -#if defined(HOST_WORDS_BIGENDIAN) -#define HI_IDX 0 -#define LO_IDX 1 -#else -#define HI_IDX 1 -#define LO_IDX 0 -#endif - -#if defined(HOST_WORDS_BIGENDIAN) -#define VECTOR_FOR_INORDER_I(index, element) \ - for (index = 0; index < ARRAY_SIZE(r->element); index++) -#else -#define VECTOR_FOR_INORDER_I(index, element) \ - for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--) -#endif - -/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise, - * execute the following block. */ -#define DO_HANDLE_NAN(result, x) \ - if (float32_is_any_nan(x)) { \ - CPU_FloatU __f; \ - __f.f = x; \ - __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \ - result = __f.f; \ - } else - -#define HANDLE_NAN1(result, x) \ - DO_HANDLE_NAN(result, x) -#define HANDLE_NAN2(result, x, y) \ - DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) -#define HANDLE_NAN3(result, x, y, z) \ - DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z) - -/* Saturating arithmetic helpers. */ -#define SATCVT(from, to, from_type, to_type, min, max) \ - static inline to_type cvt##from##to(from_type x, int *sat) \ - { \ - to_type r; \ - if (x < (from_type)min) { \ - r = min; \ - *sat = 1; \ - } else if (x > (from_type)max) { \ - r = max; \ - *sat = 1; \ - } else { \ - r = x; \ - } \ - return r; \ - } -#define SATCVTU(from, to, from_type, to_type, min, max) \ - static inline to_type cvt##from##to(from_type x, int *sat) \ - { \ - to_type r; \ - if (x > (from_type)max) { \ - r = max; \ - *sat = 1; \ - } else { \ - r = x; \ - } \ - return r; \ - } -SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX) -SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX) -SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX) - -SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX) -SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX) -SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX) -SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX) -SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX) -SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX) -#undef SATCVT -#undef SATCVTU - -#define LVE(name, access, swap, element) \ - void helper_##name (ppc_avr_t *r, target_ulong addr) \ - { \ - size_t n_elems = ARRAY_SIZE(r->element); \ - int adjust = HI_IDX*(n_elems-1); \ - int sh = sizeof(r->element[0]) >> 1; \ - int index = (addr & 0xf) >> sh; \ - if(msr_le) { \ - r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \ - } else { \ - r->element[LO_IDX ? index : (adjust - index)] = access(addr); \ - } \ - } -#define I(x) (x) -LVE(lvebx, ldub, I, u8) -LVE(lvehx, lduw, bswap16, u16) -LVE(lvewx, ldl, bswap32, u32) -#undef I -#undef LVE - -void helper_lvsl (ppc_avr_t *r, target_ulong sh) -{ - int i, j = (sh & 0xf); - - VECTOR_FOR_INORDER_I (i, u8) { - r->u8[i] = j++; - } -} - -void helper_lvsr (ppc_avr_t *r, target_ulong sh) -{ - int i, j = 0x10 - (sh & 0xf); - - VECTOR_FOR_INORDER_I (i, u8) { - r->u8[i] = j++; - } -} - -#define STVE(name, access, swap, element) \ - void helper_##name (ppc_avr_t *r, target_ulong addr) \ - { \ - size_t n_elems = ARRAY_SIZE(r->element); \ - int adjust = HI_IDX*(n_elems-1); \ - int sh = sizeof(r->element[0]) >> 1; \ - int index = (addr & 0xf) >> sh; \ - if(msr_le) { \ - access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \ - } else { \ - access(addr, r->element[LO_IDX ? index : (adjust - index)]); \ - } \ - } -#define I(x) (x) -STVE(stvebx, stb, I, u8) -STVE(stvehx, stw, bswap16, u16) -STVE(stvewx, stl, bswap32, u32) -#undef I -#undef LVE - -void helper_mtvscr (ppc_avr_t *r) -{ -#if defined(HOST_WORDS_BIGENDIAN) - env->vscr = r->u32[3]; -#else - env->vscr = r->u32[0]; -#endif - set_flush_to_zero(vscr_nj, &env->vec_status); -} - -void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - r->u32[i] = ~a->u32[i] < b->u32[i]; - } -} - -#define VARITH_DO(name, op, element) \ -void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ -{ \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - r->element[i] = a->element[i] op b->element[i]; \ - } \ -} -#define VARITH(suffix, element) \ - VARITH_DO(add##suffix, +, element) \ - VARITH_DO(sub##suffix, -, element) -VARITH(ubm, u8) -VARITH(uhm, u16) -VARITH(uwm, u32) -#undef VARITH_DO -#undef VARITH - -#define VARITHFP(suffix, func) \ - void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \ - r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \ - } \ - } \ - } -VARITHFP(addfp, float32_add) -VARITHFP(subfp, float32_sub) -#undef VARITHFP - -#define VARITHSAT_CASE(type, op, cvt, element) \ - { \ - type result = (type)a->element[i] op (type)b->element[i]; \ - r->element[i] = cvt(result, &sat); \ - } - -#define VARITHSAT_DO(name, op, optype, cvt, element) \ - void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int sat = 0; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - switch (sizeof(r->element[0])) { \ - case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \ - case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \ - case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \ - } \ - } \ - if (sat) { \ - env->vscr |= (1 << VSCR_SAT); \ - } \ - } -#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \ - VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \ - VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element) -#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \ - VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \ - VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element) -VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb) -VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh) -VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw) -VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub) -VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh) -VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw) -#undef VARITHSAT_CASE -#undef VARITHSAT_DO -#undef VARITHSAT_SIGNED -#undef VARITHSAT_UNSIGNED - -#define VAVG_DO(name, element, etype) \ - void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ - r->element[i] = x >> 1; \ - } \ - } - -#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \ - VAVG_DO(avgs##type, signed_element, signed_type) \ - VAVG_DO(avgu##type, unsigned_element, unsigned_type) -VAVG(b, s8, int16_t, u8, uint16_t) -VAVG(h, s16, int32_t, u16, uint32_t) -VAVG(w, s32, int64_t, u32, uint64_t) -#undef VAVG_DO -#undef VAVG - -#define VCF(suffix, cvt, element) \ - void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - float32 t = cvt(b->element[i], &env->vec_status); \ - r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \ - } \ - } -VCF(ux, uint32_to_float32, u32) -VCF(sx, int32_to_float32, s32) -#undef VCF - -#define VCMP_DO(suffix, compare, element, record) \ - void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - uint32_t ones = (uint32_t)-1; \ - uint32_t all = ones; \ - uint32_t none = 0; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \ - switch (sizeof (a->element[0])) { \ - case 4: r->u32[i] = result; break; \ - case 2: r->u16[i] = result; break; \ - case 1: r->u8[i] = result; break; \ - } \ - all &= result; \ - none |= result; \ - } \ - if (record) { \ - env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ - } \ - } -#define VCMP(suffix, compare, element) \ - VCMP_DO(suffix, compare, element, 0) \ - VCMP_DO(suffix##_dot, compare, element, 1) -VCMP(equb, ==, u8) -VCMP(equh, ==, u16) -VCMP(equw, ==, u32) -VCMP(gtub, >, u8) -VCMP(gtuh, >, u16) -VCMP(gtuw, >, u32) -VCMP(gtsb, >, s8) -VCMP(gtsh, >, s16) -VCMP(gtsw, >, s32) -#undef VCMP_DO -#undef VCMP - -#define VCMPFP_DO(suffix, compare, order, record) \ - void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - uint32_t ones = (uint32_t)-1; \ - uint32_t all = ones; \ - uint32_t none = 0; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - uint32_t result; \ - int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \ - if (rel == float_relation_unordered) { \ - result = 0; \ - } else if (rel compare order) { \ - result = ones; \ - } else { \ - result = 0; \ - } \ - r->u32[i] = result; \ - all &= result; \ - none |= result; \ - } \ - if (record) { \ - env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ - } \ - } -#define VCMPFP(suffix, compare, order) \ - VCMPFP_DO(suffix, compare, order, 0) \ - VCMPFP_DO(suffix##_dot, compare, order, 1) -VCMPFP(eqfp, ==, float_relation_equal) -VCMPFP(gefp, !=, float_relation_less) -VCMPFP(gtfp, ==, float_relation_greater) -#undef VCMPFP_DO -#undef VCMPFP - -static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, - int record) -{ - int i; - int all_in = 0; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); - if (le_rel == float_relation_unordered) { - r->u32[i] = 0xc0000000; - /* ALL_IN does not need to be updated here. */ - } else { - float32 bneg = float32_chs(b->f[i]); - int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status); - int le = le_rel != float_relation_greater; - int ge = ge_rel != float_relation_less; - r->u32[i] = ((!le) << 31) | ((!ge) << 30); - all_in |= (!le | !ge); - } - } - if (record) { - env->crf[6] = (all_in == 0) << 1; - } -} - -void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - vcmpbfp_internal(r, a, b, 0); -} - -void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - vcmpbfp_internal(r, a, b, 1); -} - -#define VCT(suffix, satcvt, element) \ - void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \ - { \ - int i; \ - int sat = 0; \ - float_status s = env->vec_status; \ - set_float_rounding_mode(float_round_to_zero, &s); \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - if (float32_is_any_nan(b->f[i])) { \ - r->element[i] = 0; \ - } else { \ - float64 t = float32_to_float64(b->f[i], &s); \ - int64_t j; \ - t = float64_scalbn(t, uim, &s); \ - j = float64_to_int64(t, &s); \ - r->element[i] = satcvt(j, &sat); \ - } \ - } \ - if (sat) { \ - env->vscr |= (1 << VSCR_SAT); \ - } \ - } -VCT(uxs, cvtsduw, u32) -VCT(sxs, cvtsdsw, s32) -#undef VCT - -void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { - /* Need to do the computation in higher precision and round - * once at the end. */ - float64 af, bf, cf, t; - af = float32_to_float64(a->f[i], &env->vec_status); - bf = float32_to_float64(b->f[i], &env->vec_status); - cf = float32_to_float64(c->f[i], &env->vec_status); - t = float64_mul(af, cf, &env->vec_status); - t = float64_add(t, bf, &env->vec_status); - r->f[i] = float64_to_float32(t, &env->vec_status); - } - } -} - -void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int sat = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(r->s16); i++) { - int32_t prod = a->s16[i] * b->s16[i]; - int32_t t = (int32_t)c->s16[i] + (prod >> 15); - r->s16[i] = cvtswsh (t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int sat = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(r->s16); i++) { - int32_t prod = a->s16[i] * b->s16[i] + 0x00004000; - int32_t t = (int32_t)c->s16[i] + (prod >> 15); - r->s16[i] = cvtswsh (t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -#define VMINMAX_DO(name, compare, element) \ - void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - if (a->element[i] compare b->element[i]) { \ - r->element[i] = b->element[i]; \ - } else { \ - r->element[i] = a->element[i]; \ - } \ - } \ - } -#define VMINMAX(suffix, element) \ - VMINMAX_DO(min##suffix, >, element) \ - VMINMAX_DO(max##suffix, <, element) -VMINMAX(sb, s8) -VMINMAX(sh, s16) -VMINMAX(sw, s32) -VMINMAX(ub, u8) -VMINMAX(uh, u16) -VMINMAX(uw, u32) -#undef VMINMAX_DO -#undef VMINMAX - -#define VMINMAXFP(suffix, rT, rF) \ - void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \ - if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \ - r->f[i] = rT->f[i]; \ - } else { \ - r->f[i] = rF->f[i]; \ - } \ - } \ - } \ - } -VMINMAXFP(minfp, a, b) -VMINMAXFP(maxfp, b, a) -#undef VMINMAXFP - -void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->s16); i++) { - int32_t prod = a->s16[i] * b->s16[i]; - r->s16[i] = (int16_t) (prod + c->s16[i]); - } -} - -#define VMRG_DO(name, element, highp) \ - void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - ppc_avr_t result; \ - int i; \ - size_t n_elems = ARRAY_SIZE(r->element); \ - for (i = 0; i < n_elems/2; i++) { \ - if (highp) { \ - result.element[i*2+HI_IDX] = a->element[i]; \ - result.element[i*2+LO_IDX] = b->element[i]; \ - } else { \ - result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \ - result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \ - } \ - } \ - *r = result; \ - } -#if defined(HOST_WORDS_BIGENDIAN) -#define MRGHI 0 -#define MRGLO 1 -#else -#define MRGHI 1 -#define MRGLO 0 -#endif -#define VMRG(suffix, element) \ - VMRG_DO(mrgl##suffix, element, MRGHI) \ - VMRG_DO(mrgh##suffix, element, MRGLO) -VMRG(b, u8) -VMRG(h, u16) -VMRG(w, u32) -#undef VMRG_DO -#undef VMRG -#undef MRGHI -#undef MRGLO - -void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int32_t prod[16]; - int i; - - for (i = 0; i < ARRAY_SIZE(r->s8); i++) { - prod[i] = (int32_t)a->s8[i] * b->u8[i]; - } - - VECTOR_FOR_INORDER_I(i, s32) { - r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3]; - } -} - -void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int32_t prod[8]; - int i; - - for (i = 0; i < ARRAY_SIZE(r->s16); i++) { - prod[i] = a->s16[i] * b->s16[i]; - } - - VECTOR_FOR_INORDER_I(i, s32) { - r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1]; - } -} - -void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int32_t prod[8]; - int i; - int sat = 0; - - for (i = 0; i < ARRAY_SIZE(r->s16); i++) { - prod[i] = (int32_t)a->s16[i] * b->s16[i]; - } - - VECTOR_FOR_INORDER_I (i, s32) { - int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1]; - r->u32[i] = cvtsdsw(t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - uint16_t prod[16]; - int i; - - for (i = 0; i < ARRAY_SIZE(r->u8); i++) { - prod[i] = a->u8[i] * b->u8[i]; - } - - VECTOR_FOR_INORDER_I(i, u32) { - r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3]; - } -} - -void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - uint32_t prod[8]; - int i; - - for (i = 0; i < ARRAY_SIZE(r->u16); i++) { - prod[i] = a->u16[i] * b->u16[i]; - } - - VECTOR_FOR_INORDER_I(i, u32) { - r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1]; - } -} - -void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - uint32_t prod[8]; - int i; - int sat = 0; - - for (i = 0; i < ARRAY_SIZE(r->u16); i++) { - prod[i] = a->u16[i] * b->u16[i]; - } - - VECTOR_FOR_INORDER_I (i, s32) { - uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1]; - r->u32[i] = cvtuduw(t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -#define VMUL_DO(name, mul_element, prod_element, evenp) \ - void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - VECTOR_FOR_INORDER_I(i, prod_element) { \ - if (evenp) { \ - r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \ - } else { \ - r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \ - } \ - } \ - } -#define VMUL(suffix, mul_element, prod_element) \ - VMUL_DO(mule##suffix, mul_element, prod_element, 1) \ - VMUL_DO(mulo##suffix, mul_element, prod_element, 0) -VMUL(sb, s8, s16) -VMUL(sh, s16, s32) -VMUL(ub, u8, u16) -VMUL(uh, u16, u32) -#undef VMUL_DO -#undef VMUL - -void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { - /* Need to do the computation is higher precision and round - * once at the end. */ - float64 af, bf, cf, t; - af = float32_to_float64(a->f[i], &env->vec_status); - bf = float32_to_float64(b->f[i], &env->vec_status); - cf = float32_to_float64(c->f[i], &env->vec_status); - t = float64_mul(af, cf, &env->vec_status); - t = float64_sub(t, bf, &env->vec_status); - t = float64_chs(t); - r->f[i] = float64_to_float32(t, &env->vec_status); - } - } -} - -void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - ppc_avr_t result; - int i; - VECTOR_FOR_INORDER_I (i, u8) { - int s = c->u8[i] & 0x1f; -#if defined(HOST_WORDS_BIGENDIAN) - int index = s & 0xf; -#else - int index = 15 - (s & 0xf); -#endif - if (s & 0x10) { - result.u8[i] = b->u8[index]; - } else { - result.u8[i] = a->u8[index]; - } - } - *r = result; -} - -#if defined(HOST_WORDS_BIGENDIAN) -#define PKBIG 1 -#else -#define PKBIG 0 -#endif -void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i, j; - ppc_avr_t result; -#if defined(HOST_WORDS_BIGENDIAN) - const ppc_avr_t *x[2] = { a, b }; -#else - const ppc_avr_t *x[2] = { b, a }; -#endif - - VECTOR_FOR_INORDER_I (i, u64) { - VECTOR_FOR_INORDER_I (j, u32){ - uint32_t e = x[i]->u32[j]; - result.u16[4*i+j] = (((e >> 9) & 0xfc00) | - ((e >> 6) & 0x3e0) | - ((e >> 3) & 0x1f)); - } - } - *r = result; -} - -#define VPK(suffix, from, to, cvt, dosat) \ - void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - int sat = 0; \ - ppc_avr_t result; \ - ppc_avr_t *a0 = PKBIG ? a : b; \ - ppc_avr_t *a1 = PKBIG ? b : a; \ - VECTOR_FOR_INORDER_I (i, from) { \ - result.to[i] = cvt(a0->from[i], &sat); \ - result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \ - } \ - *r = result; \ - if (dosat && sat) { \ - env->vscr |= (1 << VSCR_SAT); \ - } \ - } -#define I(x, y) (x) -VPK(shss, s16, s8, cvtshsb, 1) -VPK(shus, s16, u8, cvtshub, 1) -VPK(swss, s32, s16, cvtswsh, 1) -VPK(swus, s32, u16, cvtswuh, 1) -VPK(uhus, u16, u8, cvtuhub, 1) -VPK(uwus, u32, u16, cvtuwuh, 1) -VPK(uhum, u16, u8, I, 0) -VPK(uwum, u32, u16, I, 0) -#undef I -#undef VPK -#undef PKBIG - -void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN1(r->f[i], b->f[i]) { - r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status); - } - } -} - -#define VRFI(suffix, rounding) \ - void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \ - { \ - int i; \ - float_status s = env->vec_status; \ - set_float_rounding_mode(rounding, &s); \ - for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ - HANDLE_NAN1(r->f[i], b->f[i]) { \ - r->f[i] = float32_round_to_int (b->f[i], &s); \ - } \ - } \ - } -VRFI(n, float_round_nearest_even) -VRFI(m, float_round_down) -VRFI(p, float_round_up) -VRFI(z, float_round_to_zero) -#undef VRFI - -#define VROTATE(suffix, element) \ - void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \ - unsigned int shift = b->element[i] & mask; \ - r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \ - } \ - } -VROTATE(b, u8) -VROTATE(h, u16) -VROTATE(w, u32) -#undef VROTATE - -void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN1(r->f[i], b->f[i]) { - float32 t = float32_sqrt(b->f[i], &env->vec_status); - r->f[i] = float32_div(float32_one, t, &env->vec_status); - } - } -} - -void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) -{ - r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]); - r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]); -} - -void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN1(r->f[i], b->f[i]) { - r->f[i] = float32_exp2(b->f[i], &env->vec_status); - } - } -} - -void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN1(r->f[i], b->f[i]) { - r->f[i] = float32_log2(b->f[i], &env->vec_status); - } - } -} - -#if defined(HOST_WORDS_BIGENDIAN) -#define LEFT 0 -#define RIGHT 1 -#else -#define LEFT 1 -#define RIGHT 0 -#endif -/* The specification says that the results are undefined if all of the - * shift counts are not identical. We check to make sure that they are - * to conform to what real hardware appears to do. */ -#define VSHIFT(suffix, leftp) \ - void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int shift = b->u8[LO_IDX*15] & 0x7; \ - int doit = 1; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \ - doit = doit && ((b->u8[i] & 0x7) == shift); \ - } \ - if (doit) { \ - if (shift == 0) { \ - *r = *a; \ - } else if (leftp) { \ - uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \ - r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \ - r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \ - } else { \ - uint64_t carry = a->u64[HI_IDX] << (64 - shift); \ - r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \ - r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \ - } \ - } \ - } -VSHIFT(l, LEFT) -VSHIFT(r, RIGHT) -#undef VSHIFT -#undef LEFT -#undef RIGHT - -#define VSL(suffix, element) \ - void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \ - unsigned int shift = b->element[i] & mask; \ - r->element[i] = a->element[i] << shift; \ - } \ - } -VSL(b, u8) -VSL(h, u16) -VSL(w, u32) -#undef VSL - -void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift) -{ - int sh = shift & 0xf; - int i; - ppc_avr_t result; - -#if defined(HOST_WORDS_BIGENDIAN) - for (i = 0; i < ARRAY_SIZE(r->u8); i++) { - int index = sh + i; - if (index > 0xf) { - result.u8[i] = b->u8[index-0x10]; - } else { - result.u8[i] = a->u8[index]; - } - } -#else - for (i = 0; i < ARRAY_SIZE(r->u8); i++) { - int index = (16 - sh) + i; - if (index > 0xf) { - result.u8[i] = a->u8[index-0x10]; - } else { - result.u8[i] = b->u8[index]; - } - } -#endif - *r = result; -} - -void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf; - -#if defined (HOST_WORDS_BIGENDIAN) - memmove (&r->u8[0], &a->u8[sh], 16-sh); - memset (&r->u8[16-sh], 0, sh); -#else - memmove (&r->u8[sh], &a->u8[0], 16-sh); - memset (&r->u8[0], 0, sh); -#endif -} - -/* Experimental testing shows that hardware masks the immediate. */ -#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1)) -#if defined(HOST_WORDS_BIGENDIAN) -#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element) -#else -#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element)) -#endif -#define VSPLT(suffix, element) \ - void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \ - { \ - uint32_t s = b->element[SPLAT_ELEMENT(element)]; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - r->element[i] = s; \ - } \ - } -VSPLT(b, u8) -VSPLT(h, u16) -VSPLT(w, u32) -#undef VSPLT -#undef SPLAT_ELEMENT -#undef _SPLAT_MASKED - -#define VSPLTI(suffix, element, splat_type) \ - void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \ - { \ - splat_type x = (int8_t)(splat << 3) >> 3; \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - r->element[i] = x; \ - } \ - } -VSPLTI(b, s8, int8_t) -VSPLTI(h, s16, int16_t) -VSPLTI(w, s32, int32_t) -#undef VSPLTI - -#define VSR(suffix, element) \ - void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ - unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \ - unsigned int shift = b->element[i] & mask; \ - r->element[i] = a->element[i] >> shift; \ - } \ - } -VSR(ab, s8) -VSR(ah, s16) -VSR(aw, s32) -VSR(b, u8) -VSR(h, u16) -VSR(w, u32) -#undef VSR - -void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf; - -#if defined (HOST_WORDS_BIGENDIAN) - memmove (&r->u8[sh], &a->u8[0], 16-sh); - memset (&r->u8[0], 0, sh); -#else - memmove (&r->u8[0], &a->u8[sh], 16-sh); - memset (&r->u8[16-sh], 0, sh); -#endif -} - -void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i; - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - r->u32[i] = a->u32[i] >= b->u32[i]; - } -} - -void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int64_t t; - int i, upper; - ppc_avr_t result; - int sat = 0; - -#if defined(HOST_WORDS_BIGENDIAN) - upper = ARRAY_SIZE(r->s32)-1; -#else - upper = 0; -#endif - t = (int64_t)b->s32[upper]; - for (i = 0; i < ARRAY_SIZE(r->s32); i++) { - t += a->s32[i]; - result.s32[i] = 0; - } - result.s32[upper] = cvtsdsw(t, &sat); - *r = result; - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i, j, upper; - ppc_avr_t result; - int sat = 0; - -#if defined(HOST_WORDS_BIGENDIAN) - upper = 1; -#else - upper = 0; -#endif - for (i = 0; i < ARRAY_SIZE(r->u64); i++) { - int64_t t = (int64_t)b->s32[upper+i*2]; - result.u64[i] = 0; - for (j = 0; j < ARRAY_SIZE(r->u64); j++) { - t += a->s32[2*i+j]; - } - result.s32[upper+i*2] = cvtsdsw(t, &sat); - } - - *r = result; - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i, j; - int sat = 0; - - for (i = 0; i < ARRAY_SIZE(r->s32); i++) { - int64_t t = (int64_t)b->s32[i]; - for (j = 0; j < ARRAY_SIZE(r->s32); j++) { - t += a->s8[4*i+j]; - } - r->s32[i] = cvtsdsw(t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int sat = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(r->s32); i++) { - int64_t t = (int64_t)b->s32[i]; - t += a->s16[2*i] + a->s16[2*i+1]; - r->s32[i] = cvtsdsw(t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) -{ - int i, j; - int sat = 0; - - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { - uint64_t t = (uint64_t)b->u32[i]; - for (j = 0; j < ARRAY_SIZE(r->u32); j++) { - t += a->u8[4*i+j]; - } - r->u32[i] = cvtuduw(t, &sat); - } - - if (sat) { - env->vscr |= (1 << VSCR_SAT); - } -} - -#if defined(HOST_WORDS_BIGENDIAN) -#define UPKHI 1 -#define UPKLO 0 -#else -#define UPKHI 0 -#define UPKLO 1 -#endif -#define VUPKPX(suffix, hi) \ - void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \ - { \ - int i; \ - ppc_avr_t result; \ - for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \ - uint16_t e = b->u16[hi ? i : i+4]; \ - uint8_t a = (e >> 15) ? 0xff : 0; \ - uint8_t r = (e >> 10) & 0x1f; \ - uint8_t g = (e >> 5) & 0x1f; \ - uint8_t b = e & 0x1f; \ - result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \ - } \ - *r = result; \ - } -VUPKPX(lpx, UPKLO) -VUPKPX(hpx, UPKHI) -#undef VUPKPX - -#define VUPK(suffix, unpacked, packee, hi) \ - void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \ - { \ - int i; \ - ppc_avr_t result; \ - if (hi) { \ - for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \ - result.unpacked[i] = b->packee[i]; \ - } \ - } else { \ - for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \ - result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \ - } \ - } \ - *r = result; \ - } -VUPK(hsb, s16, s8, UPKHI) -VUPK(hsh, s32, s16, UPKHI) -VUPK(lsb, s16, s8, UPKLO) -VUPK(lsh, s32, s16, UPKLO) -#undef VUPK -#undef UPKHI -#undef UPKLO - -#undef DO_HANDLE_NAN -#undef HANDLE_NAN1 -#undef HANDLE_NAN2 -#undef HANDLE_NAN3 -#undef VECTOR_FOR_INORDER_I -#undef HI_IDX -#undef LO_IDX - -/*****************************************************************************/ -/* SPE extension helpers */ -/* Use a table to make this quicker */ -static uint8_t hbrev[16] = { - 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, - 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF, -}; - -static inline uint8_t byte_reverse(uint8_t val) -{ - return hbrev[val >> 4] | (hbrev[val & 0xF] << 4); -} - -static inline uint32_t word_reverse(uint32_t val) -{ - return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) | - (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24); -} - -#define MASKBITS 16 // Random value - to be fixed (implementation dependent) -target_ulong helper_brinc (target_ulong arg1, target_ulong arg2) -{ - uint32_t a, b, d, mask; - - mask = UINT32_MAX >> (32 - MASKBITS); - a = arg1 & mask; - b = arg2 & mask; - d = word_reverse(1 + word_reverse(a | ~b)); - return (arg1 & ~mask) | (d & b); -} - -uint32_t helper_cntlsw32 (uint32_t val) -{ - if (val & 0x80000000) - return clz32(~val); - else - return clz32(val); -} - -uint32_t helper_cntlzw32 (uint32_t val) -{ - return clz32(val); -} - -/* Single-precision floating-point conversions */ -static inline uint32_t efscfsi(uint32_t val) -{ - CPU_FloatU u; - - u.f = int32_to_float32(val, &env->vec_status); - - return u.l; -} - -static inline uint32_t efscfui(uint32_t val) -{ - CPU_FloatU u; - - u.f = uint32_to_float32(val, &env->vec_status); - - return u.l; -} - -static inline int32_t efsctsi(uint32_t val) -{ - CPU_FloatU u; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - - return float32_to_int32(u.f, &env->vec_status); -} - -static inline uint32_t efsctui(uint32_t val) -{ - CPU_FloatU u; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - - return float32_to_uint32(u.f, &env->vec_status); -} - -static inline uint32_t efsctsiz(uint32_t val) -{ - CPU_FloatU u; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - - return float32_to_int32_round_to_zero(u.f, &env->vec_status); -} - -static inline uint32_t efsctuiz(uint32_t val) -{ - CPU_FloatU u; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - - return float32_to_uint32_round_to_zero(u.f, &env->vec_status); -} - -static inline uint32_t efscfsf(uint32_t val) -{ - CPU_FloatU u; - float32 tmp; - - u.f = int32_to_float32(val, &env->vec_status); - tmp = int64_to_float32(1ULL << 32, &env->vec_status); - u.f = float32_div(u.f, tmp, &env->vec_status); - - return u.l; -} - -static inline uint32_t efscfuf(uint32_t val) -{ - CPU_FloatU u; - float32 tmp; - - u.f = uint32_to_float32(val, &env->vec_status); - tmp = uint64_to_float32(1ULL << 32, &env->vec_status); - u.f = float32_div(u.f, tmp, &env->vec_status); - - return u.l; -} - -static inline uint32_t efsctsf(uint32_t val) -{ - CPU_FloatU u; - float32 tmp; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - tmp = uint64_to_float32(1ULL << 32, &env->vec_status); - u.f = float32_mul(u.f, tmp, &env->vec_status); - - return float32_to_int32(u.f, &env->vec_status); -} - -static inline uint32_t efsctuf(uint32_t val) -{ - CPU_FloatU u; - float32 tmp; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float32_is_quiet_nan(u.f))) - return 0; - tmp = uint64_to_float32(1ULL << 32, &env->vec_status); - u.f = float32_mul(u.f, tmp, &env->vec_status); - - return float32_to_uint32(u.f, &env->vec_status); -} - -#define HELPER_SPE_SINGLE_CONV(name) \ -uint32_t helper_e##name (uint32_t val) \ -{ \ - return e##name(val); \ -} -/* efscfsi */ -HELPER_SPE_SINGLE_CONV(fscfsi); -/* efscfui */ -HELPER_SPE_SINGLE_CONV(fscfui); -/* efscfuf */ -HELPER_SPE_SINGLE_CONV(fscfuf); -/* efscfsf */ -HELPER_SPE_SINGLE_CONV(fscfsf); -/* efsctsi */ -HELPER_SPE_SINGLE_CONV(fsctsi); -/* efsctui */ -HELPER_SPE_SINGLE_CONV(fsctui); -/* efsctsiz */ -HELPER_SPE_SINGLE_CONV(fsctsiz); -/* efsctuiz */ -HELPER_SPE_SINGLE_CONV(fsctuiz); -/* efsctsf */ -HELPER_SPE_SINGLE_CONV(fsctsf); -/* efsctuf */ -HELPER_SPE_SINGLE_CONV(fsctuf); - -#define HELPER_SPE_VECTOR_CONV(name) \ -uint64_t helper_ev##name (uint64_t val) \ -{ \ - return ((uint64_t)e##name(val >> 32) << 32) | \ - (uint64_t)e##name(val); \ -} -/* evfscfsi */ -HELPER_SPE_VECTOR_CONV(fscfsi); -/* evfscfui */ -HELPER_SPE_VECTOR_CONV(fscfui); -/* evfscfuf */ -HELPER_SPE_VECTOR_CONV(fscfuf); -/* evfscfsf */ -HELPER_SPE_VECTOR_CONV(fscfsf); -/* evfsctsi */ -HELPER_SPE_VECTOR_CONV(fsctsi); -/* evfsctui */ -HELPER_SPE_VECTOR_CONV(fsctui); -/* evfsctsiz */ -HELPER_SPE_VECTOR_CONV(fsctsiz); -/* evfsctuiz */ -HELPER_SPE_VECTOR_CONV(fsctuiz); -/* evfsctsf */ -HELPER_SPE_VECTOR_CONV(fsctsf); -/* evfsctuf */ -HELPER_SPE_VECTOR_CONV(fsctuf); - -/* Single-precision floating-point arithmetic */ -static inline uint32_t efsadd(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_add(u1.f, u2.f, &env->vec_status); - return u1.l; -} - -static inline uint32_t efssub(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_sub(u1.f, u2.f, &env->vec_status); - return u1.l; -} - -static inline uint32_t efsmul(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_mul(u1.f, u2.f, &env->vec_status); - return u1.l; -} - -static inline uint32_t efsdiv(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_div(u1.f, u2.f, &env->vec_status); - return u1.l; -} - -#define HELPER_SPE_SINGLE_ARITH(name) \ -uint32_t helper_e##name (uint32_t op1, uint32_t op2) \ -{ \ - return e##name(op1, op2); \ -} -/* efsadd */ -HELPER_SPE_SINGLE_ARITH(fsadd); -/* efssub */ -HELPER_SPE_SINGLE_ARITH(fssub); -/* efsmul */ -HELPER_SPE_SINGLE_ARITH(fsmul); -/* efsdiv */ -HELPER_SPE_SINGLE_ARITH(fsdiv); - -#define HELPER_SPE_VECTOR_ARITH(name) \ -uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \ -{ \ - return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \ - (uint64_t)e##name(op1, op2); \ -} -/* evfsadd */ -HELPER_SPE_VECTOR_ARITH(fsadd); -/* evfssub */ -HELPER_SPE_VECTOR_ARITH(fssub); -/* evfsmul */ -HELPER_SPE_VECTOR_ARITH(fsmul); -/* evfsdiv */ -HELPER_SPE_VECTOR_ARITH(fsdiv); - -/* Single-precision floating-point comparisons */ -static inline uint32_t efscmplt(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; -} - -static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; -} - -static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; -} - -static inline uint32_t efststlt(uint32_t op1, uint32_t op2) -{ - /* XXX: TODO: ignore special values (NaN, infinites, ...) */ - return efscmplt(op1, op2); -} - -static inline uint32_t efststgt(uint32_t op1, uint32_t op2) -{ - /* XXX: TODO: ignore special values (NaN, infinites, ...) */ - return efscmpgt(op1, op2); -} - -static inline uint32_t efststeq(uint32_t op1, uint32_t op2) -{ - /* XXX: TODO: ignore special values (NaN, infinites, ...) */ - return efscmpeq(op1, op2); -} - -#define HELPER_SINGLE_SPE_CMP(name) \ -uint32_t helper_e##name (uint32_t op1, uint32_t op2) \ -{ \ - return e##name(op1, op2) << 2; \ -} -/* efststlt */ -HELPER_SINGLE_SPE_CMP(fststlt); -/* efststgt */ -HELPER_SINGLE_SPE_CMP(fststgt); -/* efststeq */ -HELPER_SINGLE_SPE_CMP(fststeq); -/* efscmplt */ -HELPER_SINGLE_SPE_CMP(fscmplt); -/* efscmpgt */ -HELPER_SINGLE_SPE_CMP(fscmpgt); -/* efscmpeq */ -HELPER_SINGLE_SPE_CMP(fscmpeq); - -static inline uint32_t evcmp_merge(int t0, int t1) -{ - return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); -} - -#define HELPER_VECTOR_SPE_CMP(name) \ -uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \ -{ \ - return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \ -} -/* evfststlt */ -HELPER_VECTOR_SPE_CMP(fststlt); -/* evfststgt */ -HELPER_VECTOR_SPE_CMP(fststgt); -/* evfststeq */ -HELPER_VECTOR_SPE_CMP(fststeq); -/* evfscmplt */ -HELPER_VECTOR_SPE_CMP(fscmplt); -/* evfscmpgt */ -HELPER_VECTOR_SPE_CMP(fscmpgt); -/* evfscmpeq */ -HELPER_VECTOR_SPE_CMP(fscmpeq); - -/* Double-precision floating-point conversion */ -uint64_t helper_efdcfsi (uint32_t val) -{ - CPU_DoubleU u; - - u.d = int32_to_float64(val, &env->vec_status); - - return u.ll; -} - -uint64_t helper_efdcfsid (uint64_t val) -{ - CPU_DoubleU u; - - u.d = int64_to_float64(val, &env->vec_status); - - return u.ll; -} - -uint64_t helper_efdcfui (uint32_t val) -{ - CPU_DoubleU u; - - u.d = uint32_to_float64(val, &env->vec_status); - - return u.ll; -} - -uint64_t helper_efdcfuid (uint64_t val) -{ - CPU_DoubleU u; - - u.d = uint64_to_float64(val, &env->vec_status); - - return u.ll; -} - -uint32_t helper_efdctsi (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_int32(u.d, &env->vec_status); -} - -uint32_t helper_efdctui (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_uint32(u.d, &env->vec_status); -} - -uint32_t helper_efdctsiz (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_int32_round_to_zero(u.d, &env->vec_status); -} - -uint64_t helper_efdctsidz (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_int64_round_to_zero(u.d, &env->vec_status); -} - -uint32_t helper_efdctuiz (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_uint32_round_to_zero(u.d, &env->vec_status); -} - -uint64_t helper_efdctuidz (uint64_t val) -{ - CPU_DoubleU u; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - - return float64_to_uint64_round_to_zero(u.d, &env->vec_status); -} - -uint64_t helper_efdcfsf (uint32_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.d = int32_to_float64(val, &env->vec_status); - tmp = int64_to_float64(1ULL << 32, &env->vec_status); - u.d = float64_div(u.d, tmp, &env->vec_status); - - return u.ll; -} - -uint64_t helper_efdcfuf (uint32_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.d = uint32_to_float64(val, &env->vec_status); - tmp = int64_to_float64(1ULL << 32, &env->vec_status); - u.d = float64_div(u.d, tmp, &env->vec_status); - - return u.ll; -} - -uint32_t helper_efdctsf (uint64_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - tmp = uint64_to_float64(1ULL << 32, &env->vec_status); - u.d = float64_mul(u.d, tmp, &env->vec_status); - - return float64_to_int32(u.d, &env->vec_status); -} - -uint32_t helper_efdctuf (uint64_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(float64_is_any_nan(u.d))) { - return 0; - } - tmp = uint64_to_float64(1ULL << 32, &env->vec_status); - u.d = float64_mul(u.d, tmp, &env->vec_status); - - return float64_to_uint32(u.d, &env->vec_status); -} - -uint32_t helper_efscfd (uint64_t val) -{ - CPU_DoubleU u1; - CPU_FloatU u2; - - u1.ll = val; - u2.f = float64_to_float32(u1.d, &env->vec_status); - - return u2.l; -} - -uint64_t helper_efdcfs (uint32_t val) -{ - CPU_DoubleU u2; - CPU_FloatU u1; - - u1.l = val; - u2.d = float32_to_float64(u1.f, &env->vec_status); - - return u2.ll; -} - -/* Double precision fixed-point arithmetic */ -uint64_t helper_efdadd (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - u1.d = float64_add(u1.d, u2.d, &env->vec_status); - return u1.ll; -} - -uint64_t helper_efdsub (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - u1.d = float64_sub(u1.d, u2.d, &env->vec_status); - return u1.ll; -} - -uint64_t helper_efdmul (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - u1.d = float64_mul(u1.d, u2.d, &env->vec_status); - return u1.ll; -} - -uint64_t helper_efddiv (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - u1.d = float64_div(u1.d, u2.d, &env->vec_status); - return u1.ll; -} - -/* Double precision floating point helpers */ -uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; -} - -uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; -} - -uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; -} - -uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2) -{ - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return helper_efdtstlt(op1, op2); -} - -uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2) -{ - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return helper_efdtstgt(op1, op2); -} - -uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2) -{ - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return helper_efdtsteq(op1, op2); -} - -/*****************************************************************************/ -/* Softmmu support */ -#if !defined (CONFIG_USER_ONLY) - -#define MMUSUFFIX _mmu - -#define SHIFT 0 -#include "softmmu_template.h" - -#define SHIFT 1 -#include "softmmu_template.h" - -#define SHIFT 2 -#include "softmmu_template.h" - -#define SHIFT 3 -#include "softmmu_template.h" - -/* try to fill the TLB and return an exception if error. If retaddr is - NULL, it means that the function was called in C code (i.e. not - from generated code or from helper.c) */ -/* XXX: fix it to restore all registers */ -void tlb_fill(CPUPPCState *env1, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - TranslationBlock *tb; - CPUPPCState *saved_env; - int ret; - - saved_env = env; - env = env1; - ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx); - if (unlikely(ret != 0)) { - if (likely(retaddr)) { - /* now we have a real cpu fault */ - tb = tb_find_pc(retaddr); - if (likely(tb)) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, retaddr); - } - } - helper_raise_exception_err(env->exception_index, env->error_code); - } - env = saved_env; -} - -/* Segment registers load and store */ -target_ulong helper_load_sr (target_ulong sr_num) -{ -#if defined(TARGET_PPC64) - if (env->mmu_model & POWERPC_MMU_64) - return ppc_load_sr(env, sr_num); -#endif - return env->sr[sr_num]; -} - -void helper_store_sr (target_ulong sr_num, target_ulong val) -{ - ppc_store_sr(env, sr_num, val); -} - -/* SLB management */ -#if defined(TARGET_PPC64) -void helper_store_slb (target_ulong rb, target_ulong rs) -{ - if (ppc_store_slb(env, rb, rs) < 0) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL); - } -} - -target_ulong helper_load_slb_esid (target_ulong rb) -{ - target_ulong rt; - - if (ppc_load_slb_esid(env, rb, &rt) < 0) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL); - } - return rt; -} - -target_ulong helper_load_slb_vsid (target_ulong rb) -{ - target_ulong rt; - - if (ppc_load_slb_vsid(env, rb, &rt) < 0) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL); - } - return rt; -} - -void helper_slbia (void) -{ - ppc_slb_invalidate_all(env); -} - -void helper_slbie (target_ulong addr) -{ - ppc_slb_invalidate_one(env, addr); -} - -#endif /* defined(TARGET_PPC64) */ - -/* TLB management */ -void helper_tlbia (void) -{ - ppc_tlb_invalidate_all(env); -} - -void helper_tlbie (target_ulong addr) -{ - ppc_tlb_invalidate_one(env, addr); -} - -/* Software driven TLBs management */ -/* PowerPC 602/603 software TLB load instructions helpers */ -static void do_6xx_tlb (target_ulong new_EPN, int is_code) -{ - target_ulong RPN, CMP, EPN; - int way; - - RPN = env->spr[SPR_RPA]; - if (is_code) { - CMP = env->spr[SPR_ICMP]; - EPN = env->spr[SPR_IMISS]; - } else { - CMP = env->spr[SPR_DCMP]; - EPN = env->spr[SPR_DMISS]; - } - way = (env->spr[SPR_SRR1] >> 17) & 1; - (void)EPN; /* avoid a compiler warning */ - LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, - RPN, way); - /* Store this TLB */ - ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), - way, is_code, CMP, RPN); -} - -void helper_6xx_tlbd (target_ulong EPN) -{ - do_6xx_tlb(EPN, 0); -} - -void helper_6xx_tlbi (target_ulong EPN) -{ - do_6xx_tlb(EPN, 1); -} - -/* PowerPC 74xx software TLB load instructions helpers */ -static void do_74xx_tlb (target_ulong new_EPN, int is_code) -{ - target_ulong RPN, CMP, EPN; - int way; - - RPN = env->spr[SPR_PTELO]; - CMP = env->spr[SPR_PTEHI]; - EPN = env->spr[SPR_TLBMISS] & ~0x3; - way = env->spr[SPR_TLBMISS] & 0x3; - (void)EPN; /* avoid a compiler warning */ - LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx - " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, - RPN, way); - /* Store this TLB */ - ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), - way, is_code, CMP, RPN); -} - -void helper_74xx_tlbd (target_ulong EPN) -{ - do_74xx_tlb(EPN, 0); -} - -void helper_74xx_tlbi (target_ulong EPN) -{ - do_74xx_tlb(EPN, 1); -} - -static inline target_ulong booke_tlb_to_page_size(int size) -{ - return 1024 << (2 * size); -} - -static inline int booke_page_size_to_tlb(target_ulong page_size) -{ - int size; - - switch (page_size) { - case 0x00000400UL: - size = 0x0; - break; - case 0x00001000UL: - size = 0x1; - break; - case 0x00004000UL: - size = 0x2; - break; - case 0x00010000UL: - size = 0x3; - break; - case 0x00040000UL: - size = 0x4; - break; - case 0x00100000UL: - size = 0x5; - break; - case 0x00400000UL: - size = 0x6; - break; - case 0x01000000UL: - size = 0x7; - break; - case 0x04000000UL: - size = 0x8; - break; - case 0x10000000UL: - size = 0x9; - break; - case 0x40000000UL: - size = 0xA; - break; -#if defined (TARGET_PPC64) - case 0x000100000000ULL: - size = 0xB; - break; - case 0x000400000000ULL: - size = 0xC; - break; - case 0x001000000000ULL: - size = 0xD; - break; - case 0x004000000000ULL: - size = 0xE; - break; - case 0x010000000000ULL: - size = 0xF; - break; -#endif - default: - size = -1; - break; - } - - return size; -} - -/* Helpers for 4xx TLB management */ -#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ - -#define PPC4XX_TLBHI_V 0x00000040 -#define PPC4XX_TLBHI_E 0x00000020 -#define PPC4XX_TLBHI_SIZE_MIN 0 -#define PPC4XX_TLBHI_SIZE_MAX 7 -#define PPC4XX_TLBHI_SIZE_DEFAULT 1 -#define PPC4XX_TLBHI_SIZE_SHIFT 7 -#define PPC4XX_TLBHI_SIZE_MASK 0x00000007 - -#define PPC4XX_TLBLO_EX 0x00000200 -#define PPC4XX_TLBLO_WR 0x00000100 -#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF -#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 - -target_ulong helper_4xx_tlbre_hi (target_ulong entry) -{ - ppcemb_tlb_t *tlb; - target_ulong ret; - int size; - - entry &= PPC4XX_TLB_ENTRY_MASK; - tlb = &env->tlb.tlbe[entry]; - ret = tlb->EPN; - if (tlb->prot & PAGE_VALID) { - ret |= PPC4XX_TLBHI_V; - } - size = booke_page_size_to_tlb(tlb->size); - if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { - size = PPC4XX_TLBHI_SIZE_DEFAULT; - } - ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; - env->spr[SPR_40x_PID] = tlb->PID; - return ret; -} - -target_ulong helper_4xx_tlbre_lo (target_ulong entry) -{ - ppcemb_tlb_t *tlb; - target_ulong ret; - - entry &= PPC4XX_TLB_ENTRY_MASK; - tlb = &env->tlb.tlbe[entry]; - ret = tlb->RPN; - if (tlb->prot & PAGE_EXEC) { - ret |= PPC4XX_TLBLO_EX; - } - if (tlb->prot & PAGE_WRITE) { - ret |= PPC4XX_TLBLO_WR; - } - return ret; -} - -void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val) -{ - ppcemb_tlb_t *tlb; - target_ulong page, end; - - LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, - val); - entry &= PPC4XX_TLB_ENTRY_MASK; - tlb = &env->tlb.tlbe[entry]; - /* Invalidate previous TLB (if it's valid) */ - if (tlb->prot & PAGE_VALID) { - end = tlb->EPN + tlb->size; - LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " - TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); - for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { - tlb_flush_page(env, page); - } - } - tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) - & PPC4XX_TLBHI_SIZE_MASK); - /* We cannot handle TLB size < TARGET_PAGE_SIZE. - * If this ever occurs, one should use the ppcemb target instead - * of the ppc or ppc64 one - */ - if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { - cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u " - "are not supported (%d)\n", - tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); - } - tlb->EPN = val & ~(tlb->size - 1); - if (val & PPC4XX_TLBHI_V) { - tlb->prot |= PAGE_VALID; - if (val & PPC4XX_TLBHI_E) { - /* XXX: TO BE FIXED */ - cpu_abort(env, - "Little-endian TLB entries are not supported by now\n"); - } - } else { - tlb->prot &= ~PAGE_VALID; - } - tlb->PID = env->spr[SPR_40x_PID]; /* PID */ - LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx - " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, - (int)entry, tlb->RPN, tlb->EPN, tlb->size, - tlb->prot & PAGE_READ ? 'r' : '-', - tlb->prot & PAGE_WRITE ? 'w' : '-', - tlb->prot & PAGE_EXEC ? 'x' : '-', - tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); - /* Invalidate new TLB (if valid) */ - if (tlb->prot & PAGE_VALID) { - end = tlb->EPN + tlb->size; - LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " - TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); - for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { - tlb_flush_page(env, page); - } - } -} - -void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val) -{ - ppcemb_tlb_t *tlb; - - LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, - val); - entry &= PPC4XX_TLB_ENTRY_MASK; - tlb = &env->tlb.tlbe[entry]; - tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; - tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; - tlb->prot = PAGE_READ; - if (val & PPC4XX_TLBLO_EX) { - tlb->prot |= PAGE_EXEC; - } - if (val & PPC4XX_TLBLO_WR) { - tlb->prot |= PAGE_WRITE; - } - LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx - " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, - (int)entry, tlb->RPN, tlb->EPN, tlb->size, - tlb->prot & PAGE_READ ? 'r' : '-', - tlb->prot & PAGE_WRITE ? 'w' : '-', - tlb->prot & PAGE_EXEC ? 'x' : '-', - tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); -} - -target_ulong helper_4xx_tlbsx (target_ulong address) -{ - return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); -} - -/* PowerPC 440 TLB management */ -void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value) -{ - ppcemb_tlb_t *tlb; - target_ulong EPN, RPN, size; - int do_flush_tlbs; - - LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", - __func__, word, (int)entry, value); - do_flush_tlbs = 0; - entry &= 0x3F; - tlb = &env->tlb.tlbe[entry]; - switch (word) { - default: - /* Just here to please gcc */ - case 0: - EPN = value & 0xFFFFFC00; - if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) - do_flush_tlbs = 1; - tlb->EPN = EPN; - size = booke_tlb_to_page_size((value >> 4) & 0xF); - if ((tlb->prot & PAGE_VALID) && tlb->size < size) - do_flush_tlbs = 1; - tlb->size = size; - tlb->attr &= ~0x1; - tlb->attr |= (value >> 8) & 1; - if (value & 0x200) { - tlb->prot |= PAGE_VALID; - } else { - if (tlb->prot & PAGE_VALID) { - tlb->prot &= ~PAGE_VALID; - do_flush_tlbs = 1; - } - } - tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; - if (do_flush_tlbs) - tlb_flush(env, 1); - break; - case 1: - RPN = value & 0xFFFFFC0F; - if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) - tlb_flush(env, 1); - tlb->RPN = RPN; - break; - case 2: - tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); - tlb->prot = tlb->prot & PAGE_VALID; - if (value & 0x1) - tlb->prot |= PAGE_READ << 4; - if (value & 0x2) - tlb->prot |= PAGE_WRITE << 4; - if (value & 0x4) - tlb->prot |= PAGE_EXEC << 4; - if (value & 0x8) - tlb->prot |= PAGE_READ; - if (value & 0x10) - tlb->prot |= PAGE_WRITE; - if (value & 0x20) - tlb->prot |= PAGE_EXEC; - break; - } -} - -target_ulong helper_440_tlbre (uint32_t word, target_ulong entry) -{ - ppcemb_tlb_t *tlb; - target_ulong ret; - int size; - - entry &= 0x3F; - tlb = &env->tlb.tlbe[entry]; - switch (word) { - default: - /* Just here to please gcc */ - case 0: - ret = tlb->EPN; - size = booke_page_size_to_tlb(tlb->size); - if (size < 0 || size > 0xF) - size = 1; - ret |= size << 4; - if (tlb->attr & 0x1) - ret |= 0x100; - if (tlb->prot & PAGE_VALID) - ret |= 0x200; - env->spr[SPR_440_MMUCR] &= ~0x000000FF; - env->spr[SPR_440_MMUCR] |= tlb->PID; - break; - case 1: - ret = tlb->RPN; - break; - case 2: - ret = tlb->attr & ~0x1; - if (tlb->prot & (PAGE_READ << 4)) - ret |= 0x1; - if (tlb->prot & (PAGE_WRITE << 4)) - ret |= 0x2; - if (tlb->prot & (PAGE_EXEC << 4)) - ret |= 0x4; - if (tlb->prot & PAGE_READ) - ret |= 0x8; - if (tlb->prot & PAGE_WRITE) - ret |= 0x10; - if (tlb->prot & PAGE_EXEC) - ret |= 0x20; - break; - } - return ret; -} - -target_ulong helper_440_tlbsx (target_ulong address) -{ - return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); -} - -/* PowerPC BookE 2.06 TLB management */ - -static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) -{ - uint32_t tlbncfg = 0; - int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; - int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); - int tlb; - - tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; - tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; - - if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { - cpu_abort(env, "we don't support HES yet\n"); - } - - return booke206_get_tlbm(env, tlb, ea, esel); -} - -void helper_booke_setpid(uint32_t pidn, target_ulong pid) -{ - env->spr[pidn] = pid; - /* changing PIDs mean we're in a different address space now */ - tlb_flush(env, 1); -} - -void helper_booke206_tlbwe(void) -{ - uint32_t tlbncfg, tlbn; - ppcmas_tlb_t *tlb; - uint32_t size_tlb, size_ps; - - switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { - case MAS0_WQ_ALWAYS: - /* good to go, write that entry */ - break; - case MAS0_WQ_COND: - /* XXX check if reserved */ - if (0) { - return; - } - break; - case MAS0_WQ_CLR_RSRV: - /* XXX clear entry */ - return; - default: - /* no idea what to do */ - return; - } - - if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && - !msr_gs) { - /* XXX we don't support direct LRAT setting yet */ - fprintf(stderr, "cpu: don't support LRAT setting yet\n"); - return; - } - - tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; - tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; - - tlb = booke206_cur_tlb(env); - - if (!tlb) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | - POWERPC_EXCP_INVAL_INVAL); - } - - /* check that we support the targeted size */ - size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - size_ps = booke206_tlbnps(env, tlbn); - if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && - !(size_ps & (1 << size_tlb))) { - helper_raise_exception_err(POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | - POWERPC_EXCP_INVAL_INVAL); - } - - if (msr_gs) { - cpu_abort(env, "missing HV implementation\n"); - } - tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | - env->spr[SPR_BOOKE_MAS3]; - tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; - - /* MAV 1.0 only */ - if (!(tlbncfg & TLBnCFG_AVAIL)) { - /* force !AVAIL TLB entries to correct page size */ - tlb->mas1 &= ~MAS1_TSIZE_MASK; - /* XXX can be configured in MMUCSR0 */ - tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; - } - - /* XXX needs to change when supporting 64-bit e500 */ - tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & 0xffffffff; - - if (!(tlbncfg & TLBnCFG_IPROT)) { - /* no IPROT supported by TLB */ - tlb->mas1 &= ~MAS1_IPROT; - } - - if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { - tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK); - } else { - tlb_flush(env, 1); - } -} - -static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) -{ - int tlbn = booke206_tlbm_to_tlbn(env, tlb); - int way = booke206_tlbm_to_way(env, tlb); - - env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; - env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; - - env->spr[SPR_BOOKE_MAS1] = tlb->mas1; - env->spr[SPR_BOOKE_MAS2] = tlb->mas2; - env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; - env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; -} - -void helper_booke206_tlbre(void) -{ - ppcmas_tlb_t *tlb = NULL; - - tlb = booke206_cur_tlb(env); - if (!tlb) { - env->spr[SPR_BOOKE_MAS1] = 0; - } else { - booke206_tlb_to_mas(env, tlb); - } -} - -void helper_booke206_tlbsx(target_ulong address) -{ - ppcmas_tlb_t *tlb = NULL; - int i, j; - target_phys_addr_t raddr; - uint32_t spid, sas; - - spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; - sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int ways = booke206_tlb_ways(env, i); - - for (j = 0; j < ways; j++) { - tlb = booke206_get_tlbm(env, i, address, j); - - if (!tlb) { - continue; - } - - if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { - continue; - } - - if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - continue; - } - - booke206_tlb_to_mas(env, tlb); - return; - } - } - - /* no entry found, fill with defaults */ - env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; - env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; - env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; - env->spr[SPR_BOOKE_MAS3] = 0; - env->spr[SPR_BOOKE_MAS7] = 0; - - if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { - env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; - } - - env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) - << MAS1_TID_SHIFT; - - /* next victim logic */ - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; - env->last_way++; - env->last_way &= booke206_tlb_ways(env, 0) - 1; - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; -} - -static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, - uint32_t ea) -{ - int i; - int ways = booke206_tlb_ways(env, tlbn); - target_ulong mask; - - for (i = 0; i < ways; i++) { - ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); - if (!tlb) { - continue; - } - mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); - if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && - !(tlb->mas1 & MAS1_IPROT)) { - tlb->mas1 &= ~MAS1_VALID; - } - } -} - -void helper_booke206_tlbivax(target_ulong address) -{ - if (address & 0x4) { - /* flush all entries */ - if (address & 0x8) { - /* flush all of TLB1 */ - booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); - } else { - /* flush all of TLB0 */ - booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); - } - return; - } - - if (address & 0x8) { - /* flush TLB1 entries */ - booke206_invalidate_ea_tlb(env, 1, address); - tlb_flush(env, 1); - } else { - /* flush TLB0 entries */ - booke206_invalidate_ea_tlb(env, 0, address); - tlb_flush_page(env, address & MAS2_EPN_MASK); - } -} - -void helper_booke206_tlbilx0(target_ulong address) -{ - /* XXX missing LPID handling */ - booke206_flush_tlb(env, -1, 1); -} - -void helper_booke206_tlbilx1(target_ulong address) -{ - int i, j; - int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); - ppcmas_tlb_t *tlb = env->tlb.tlbm; - int tlb_size; - - /* XXX missing LPID handling */ - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - tlb_size = booke206_tlb_size(env, i); - for (j = 0; j < tlb_size; j++) { - if (!(tlb[j].mas1 & MAS1_IPROT) && - ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { - tlb[j].mas1 &= ~MAS1_VALID; - } - } - tlb += booke206_tlb_size(env, i); - } - tlb_flush(env, 1); -} - -void helper_booke206_tlbilx3(target_ulong address) -{ - int i, j; - ppcmas_tlb_t *tlb; - int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); - int pid = tid >> MAS6_SPID_SHIFT; - int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; - int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; - /* XXX check for unsupported isize and raise an invalid opcode then */ - int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; - /* XXX implement MAV2 handling */ - bool mav2 = false; - - /* XXX missing LPID handling */ - /* flush by pid and ea */ - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int ways = booke206_tlb_ways(env, i); - - for (j = 0; j < ways; j++) { - tlb = booke206_get_tlbm(env, i, address, j); - if (!tlb) { - continue; - } - if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || - (tlb->mas1 & MAS1_IPROT) || - ((tlb->mas1 & MAS1_IND) != ind) || - ((tlb->mas8 & MAS8_TGS) != sgs)) { - continue; - } - if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { - /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ - continue; - } - /* XXX e500mc doesn't match SAS, but other cores might */ - tlb->mas1 &= ~MAS1_VALID; - } - } - tlb_flush(env, 1); -} - -void helper_booke206_tlbflush(uint32_t type) -{ - int flags = 0; - - if (type & 2) { - flags |= BOOKE206_FLUSH_TLB1; - } - - if (type & 4) { - flags |= BOOKE206_FLUSH_TLB0; - } - - booke206_flush_tlb(env, flags, 1); -} - -/* Embedded.Processor Control */ -static int dbell2irq(target_ulong rb) -{ - int msg = rb & DBELL_TYPE_MASK; - int irq = -1; - - switch (msg) { - case DBELL_TYPE_DBELL: - irq = PPC_INTERRUPT_DOORBELL; - break; - case DBELL_TYPE_DBELL_CRIT: - irq = PPC_INTERRUPT_CDOORBELL; - break; - case DBELL_TYPE_G_DBELL: - case DBELL_TYPE_G_DBELL_CRIT: - case DBELL_TYPE_G_DBELL_MC: - /* XXX implement */ - default: - break; - } - - return irq; -} - -void helper_msgclr(target_ulong rb) -{ - int irq = dbell2irq(rb); - - if (irq < 0) { - return; - } - - env->pending_interrupts &= ~(1 << irq); -} - -void helper_msgsnd(target_ulong rb) -{ - int irq = dbell2irq(rb); - int pir = rb & DBELL_PIRTAG_MASK; - CPUPPCState *cenv; - - if (irq < 0) { - return; - } - - for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { - if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { - cenv->pending_interrupts |= 1 << irq; - cpu_interrupt(cenv, CPU_INTERRUPT_HARD); - } - } -} - -#endif /* !CONFIG_USER_ONLY */ diff --git a/target-ppc/timebase_helper.c b/target-ppc/timebase_helper.c new file mode 100644 index 0000000000..fad738a4af --- /dev/null +++ b/target-ppc/timebase_helper.c @@ -0,0 +1,159 @@ +/* + * PowerPC emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "cpu.h" +#include "helper.h" + +/*****************************************************************************/ +/* SPR accesses */ + +target_ulong helper_load_tbl(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_tbl(env); +} + +target_ulong helper_load_tbu(CPUPPCState *env) +{ + return cpu_ppc_load_tbu(env); +} + +target_ulong helper_load_atbl(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_atbl(env); +} + +target_ulong helper_load_atbu(CPUPPCState *env) +{ + return cpu_ppc_load_atbu(env); +} + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +target_ulong helper_load_purr(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_purr(env); +} +#endif + +target_ulong helper_load_601_rtcl(CPUPPCState *env) +{ + return cpu_ppc601_load_rtcl(env); +} + +target_ulong helper_load_601_rtcu(CPUPPCState *env) +{ + return cpu_ppc601_load_rtcu(env); +} + +#if !defined(CONFIG_USER_ONLY) +void helper_store_tbl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_tbl(env, val); +} + +void helper_store_tbu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_tbu(env, val); +} + +void helper_store_atbl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_atbl(env, val); +} + +void helper_store_atbu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_atbu(env, val); +} + +void helper_store_601_rtcl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc601_store_rtcl(env, val); +} + +void helper_store_601_rtcu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc601_store_rtcu(env, val); +} + +target_ulong helper_load_decr(CPUPPCState *env) +{ + return cpu_ppc_load_decr(env); +} + +void helper_store_decr(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_decr(env, val); +} + +target_ulong helper_load_40x_pit(CPUPPCState *env) +{ + return load_40x_pit(env); +} + +void helper_store_40x_pit(CPUPPCState *env, target_ulong val) +{ + store_40x_pit(env, val); +} + +void helper_store_booke_tcr(CPUPPCState *env, target_ulong val) +{ + store_booke_tcr(env, val); +} + +void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) +{ + store_booke_tsr(env, val); +} +#endif + +/*****************************************************************************/ +/* Embedded PowerPC specific helpers */ + +/* XXX: to be improved to check access rights when in user-mode */ +target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) +{ + uint32_t val = 0; + + if (unlikely(env->dcr_env == NULL)) { + qemu_log("No DCR environment\n"); + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL); + } else if (unlikely(ppc_dcr_read(env->dcr_env, + (uint32_t)dcrn, &val) != 0)) { + qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG); + } + return val; +} + +void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) +{ + if (unlikely(env->dcr_env == NULL)) { + qemu_log("No DCR environment\n"); + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL); + } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, + (uint32_t)val) != 0)) { + qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG); + } +} diff --git a/target-ppc/translate.c b/target-ppc/translate.c index cf59765405..91eb7a062c 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -219,7 +219,7 @@ struct opc_handler_t { static inline void gen_reset_fpstatus(void) { - gen_helper_reset_fpstatus(); + gen_helper_reset_fpstatus(cpu_env); } static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc) @@ -229,15 +229,15 @@ static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc) if (set_fprf != 0) { /* This case might be optimized later */ tcg_gen_movi_i32(t0, 1); - gen_helper_compute_fprf(t0, arg, t0); + gen_helper_compute_fprf(t0, cpu_env, arg, t0); if (unlikely(set_rc)) { tcg_gen_mov_i32(cpu_crf[1], t0); } - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } else if (unlikely(set_rc)) { /* We always need to compute fpcc */ tcg_gen_movi_i32(t0, 0); - gen_helper_compute_fprf(t0, arg, t0); + gen_helper_compute_fprf(t0, cpu_env, arg, t0); tcg_gen_mov_i32(cpu_crf[1], t0); } @@ -270,7 +270,7 @@ static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t } t0 = tcg_const_i32(excp); t1 = tcg_const_i32(error); - gen_helper_raise_exception_err(t0, t1); + gen_helper_raise_exception_err(cpu_env, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); ctx->exception = (excp); @@ -283,7 +283,7 @@ static inline void gen_exception(DisasContext *ctx, uint32_t excp) gen_update_nip(ctx, ctx->nip); } t0 = tcg_const_i32(excp); - gen_helper_raise_exception(t0); + gen_helper_raise_exception(cpu_env, t0); tcg_temp_free_i32(t0); ctx->exception = (excp); } @@ -297,7 +297,7 @@ static inline void gen_debug_exception(DisasContext *ctx) gen_update_nip(ctx, ctx->nip); } t0 = tcg_const_i32(EXCP_DEBUG); - gen_helper_raise_exception(t0); + gen_helper_raise_exception(cpu_env, t0); tcg_temp_free_i32(t0); } @@ -1181,8 +1181,16 @@ static void gen_mulld(DisasContext *ctx) if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } + /* mulldo mulldo. */ -GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17); +static void gen_mulldo(DisasContext *ctx) +{ + gen_helper_mulldo(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} #endif /* neg neg. nego nego. */ @@ -1869,7 +1877,7 @@ static void gen_slw(DisasContext *ctx) /* sraw & sraw. */ static void gen_sraw(DisasContext *ctx) { - gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], + gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -1953,7 +1961,7 @@ static void gen_sld(DisasContext *ctx) /* srad & srad. */ static void gen_srad(DisasContext *ctx) { - gen_helper_srad(cpu_gpr[rA(ctx->opcode)], + gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2027,10 +2035,12 @@ static void gen_f##name(DisasContext *ctx) \ /* NIP cannot be restored if the memory exception comes from an helper */ \ gen_update_nip(ctx, ctx->nip - 4); \ gen_reset_fpstatus(); \ - gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \ + gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rA(ctx->opcode)], \ cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \ if (isfloat) { \ - gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \ + gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rD(ctx->opcode)]); \ } \ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \ Rc(ctx->opcode) != 0); \ @@ -2050,10 +2060,12 @@ static void gen_f##name(DisasContext *ctx) \ /* NIP cannot be restored if the memory exception comes from an helper */ \ gen_update_nip(ctx, ctx->nip - 4); \ gen_reset_fpstatus(); \ - gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \ + gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rA(ctx->opcode)], \ cpu_fpr[rB(ctx->opcode)]); \ if (isfloat) { \ - gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \ + gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rD(ctx->opcode)]); \ } \ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ set_fprf, Rc(ctx->opcode) != 0); \ @@ -2072,10 +2084,12 @@ static void gen_f##name(DisasContext *ctx) \ /* NIP cannot be restored if the memory exception comes from an helper */ \ gen_update_nip(ctx, ctx->nip - 4); \ gen_reset_fpstatus(); \ - gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \ - cpu_fpr[rC(ctx->opcode)]); \ + gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rA(ctx->opcode)], \ + cpu_fpr[rC(ctx->opcode)]); \ if (isfloat) { \ - gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \ + gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rD(ctx->opcode)]); \ } \ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ set_fprf, Rc(ctx->opcode) != 0); \ @@ -2094,7 +2108,8 @@ static void gen_f##name(DisasContext *ctx) \ /* NIP cannot be restored if the memory exception comes from an helper */ \ gen_update_nip(ctx, ctx->nip - 4); \ gen_reset_fpstatus(); \ - gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \ + gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rB(ctx->opcode)]); \ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ set_fprf, Rc(ctx->opcode) != 0); \ } @@ -2109,7 +2124,8 @@ static void gen_f##name(DisasContext *ctx) \ /* NIP cannot be restored if the memory exception comes from an helper */ \ gen_update_nip(ctx, ctx->nip - 4); \ gen_reset_fpstatus(); \ - gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \ + gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ + cpu_fpr[rB(ctx->opcode)]); \ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ set_fprf, Rc(ctx->opcode) != 0); \ } @@ -2140,8 +2156,10 @@ static void gen_frsqrtes(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); - gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); - gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); + gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env, + cpu_fpr[rB(ctx->opcode)]); + gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, + cpu_fpr[rD(ctx->opcode)]); gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); } @@ -2161,7 +2179,8 @@ static void gen_fsqrt(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); - gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); + gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, + cpu_fpr[rB(ctx->opcode)]); gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); } @@ -2174,8 +2193,10 @@ static void gen_fsqrts(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); - gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); - gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); + gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, + cpu_fpr[rB(ctx->opcode)]); + gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, + cpu_fpr[rD(ctx->opcode)]); gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); } @@ -2228,9 +2249,10 @@ static void gen_fcmpo(DisasContext *ctx) gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); crf = tcg_const_i32(crfD(ctx->opcode)); - gen_helper_fcmpo(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf); + gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)], + cpu_fpr[rB(ctx->opcode)], crf); tcg_temp_free_i32(crf); - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } /* fcmpu */ @@ -2245,9 +2267,10 @@ static void gen_fcmpu(DisasContext *ctx) gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); crf = tcg_const_i32(crfD(ctx->opcode)); - gen_helper_fcmpu(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf); + gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)], + cpu_fpr[rB(ctx->opcode)], crf); tcg_temp_free_i32(crf); - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } /*** Floating-point move ***/ @@ -2319,7 +2342,7 @@ static void gen_mtfsb0(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_const_i32(crb); - gen_helper_fpscr_clrbit(t0); + gen_helper_fpscr_clrbit(cpu_env, t0); tcg_temp_free_i32(t0); } if (unlikely(Rc(ctx->opcode) != 0)) { @@ -2344,14 +2367,14 @@ static void gen_mtfsb1(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_const_i32(crb); - gen_helper_fpscr_setbit(t0); + gen_helper_fpscr_setbit(cpu_env, t0); tcg_temp_free_i32(t0); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX); } /* We can raise a differed exception */ - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } /* mtfsf */ @@ -2371,13 +2394,13 @@ static void gen_mtfsf(DisasContext *ctx) t0 = tcg_const_i32(0xff); else t0 = tcg_const_i32(FM(ctx->opcode)); - gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0); + gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0); tcg_temp_free_i32(t0); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX); } /* We can raise a differed exception */ - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } /* mtfsfi */ @@ -2398,14 +2421,14 @@ static void gen_mtfsfi(DisasContext *ctx) gen_reset_fpstatus(); t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh)); t1 = tcg_const_i32(1 << sh); - gen_helper_store_fpscr(t0, t1); + gen_helper_store_fpscr(cpu_env, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX); } /* We can raise a differed exception */ - gen_helper_float_check_status(); + gen_helper_float_check_status(cpu_env); } /*** Addressing modes ***/ @@ -2495,7 +2518,7 @@ static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask) tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); t1 = tcg_const_i32(POWERPC_EXCP_ALIGN); t2 = tcg_const_i32(0); - gen_helper_raise_exception_err(t1, t2); + gen_helper_raise_exception_err(cpu_env, t1, t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); gen_set_label(l1); @@ -2966,7 +2989,7 @@ static void gen_lmw(DisasContext *ctx) t0 = tcg_temp_new(); t1 = tcg_const_i32(rD(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); - gen_helper_lmw(t0, t1); + gen_helper_lmw(cpu_env, t0, t1); tcg_temp_free(t0); tcg_temp_free_i32(t1); } @@ -2982,7 +3005,7 @@ static void gen_stmw(DisasContext *ctx) t0 = tcg_temp_new(); t1 = tcg_const_i32(rS(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); - gen_helper_stmw(t0, t1); + gen_helper_stmw(cpu_env, t0, t1); tcg_temp_free(t0); tcg_temp_free_i32(t1); } @@ -3020,7 +3043,7 @@ static void gen_lswi(DisasContext *ctx) gen_addr_register(ctx, t0); t1 = tcg_const_i32(nb); t2 = tcg_const_i32(start); - gen_helper_lsw(t0, t1, t2); + gen_helper_lsw(cpu_env, t0, t1, t2); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); @@ -3039,7 +3062,7 @@ static void gen_lswx(DisasContext *ctx) t1 = tcg_const_i32(rD(ctx->opcode)); t2 = tcg_const_i32(rA(ctx->opcode)); t3 = tcg_const_i32(rB(ctx->opcode)); - gen_helper_lswx(t0, t1, t2, t3); + gen_helper_lswx(cpu_env, t0, t1, t2, t3); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); @@ -3061,7 +3084,7 @@ static void gen_stswi(DisasContext *ctx) nb = 32; t1 = tcg_const_i32(nb); t2 = tcg_const_i32(rS(ctx->opcode)); - gen_helper_stsw(t0, t1, t2); + gen_helper_stsw(cpu_env, t0, t1, t2); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); @@ -3081,7 +3104,7 @@ static void gen_stswx(DisasContext *ctx) tcg_gen_trunc_tl_i32(t1, cpu_xer); tcg_gen_andi_i32(t1, t1, 0x7F); t2 = tcg_const_i32(rS(ctx->opcode)); - gen_helper_stsw(t0, t1, t2); + gen_helper_stsw(cpu_env, t0, t1, t2); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); @@ -3303,7 +3326,7 @@ static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) gen_qemu_ld32u(ctx, t0, arg2); tcg_gen_trunc_tl_i32(t1, t0); tcg_temp_free(t0); - gen_helper_float32_to_float64(arg1, t1); + gen_helper_float32_to_float64(arg1, cpu_env, t1); tcg_temp_free_i32(t1); } @@ -3393,7 +3416,7 @@ static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv t1 = tcg_temp_new(); - gen_helper_float64_to_float32(t0, arg1); + gen_helper_float64_to_float32(t0, cpu_env, arg1); tcg_gen_extu_i32_tl(t1, t0); tcg_temp_free_i32(t0); gen_qemu_st32(ctx, t1, arg2); @@ -3662,7 +3685,7 @@ static void gen_rfi(DisasContext *ctx) return; } gen_update_cfar(ctx, ctx->nip); - gen_helper_rfi(); + gen_helper_rfi(cpu_env); gen_sync_exception(ctx); #endif } @@ -3679,7 +3702,7 @@ static void gen_rfid(DisasContext *ctx) return; } gen_update_cfar(ctx, ctx->nip); - gen_helper_rfid(); + gen_helper_rfid(cpu_env); gen_sync_exception(ctx); #endif } @@ -3694,7 +3717,7 @@ static void gen_hrfid(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_hrfid(); + gen_helper_hrfid(cpu_env); gen_sync_exception(ctx); #endif } @@ -3722,7 +3745,8 @@ static void gen_tw(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode)); /* Update the nip since this might generate a trap exception */ gen_update_nip(ctx, ctx->nip); - gen_helper_tw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); + gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + t0); tcg_temp_free_i32(t0); } @@ -3733,7 +3757,7 @@ static void gen_twi(DisasContext *ctx) TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode)); /* Update the nip since this might generate a trap exception */ gen_update_nip(ctx, ctx->nip); - gen_helper_tw(cpu_gpr[rA(ctx->opcode)], t0, t1); + gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(t0); tcg_temp_free_i32(t1); } @@ -3745,7 +3769,8 @@ static void gen_td(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode)); /* Update the nip since this might generate a trap exception */ gen_update_nip(ctx, ctx->nip); - gen_helper_td(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); + gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + t0); tcg_temp_free_i32(t0); } @@ -3756,7 +3781,7 @@ static void gen_tdi(DisasContext *ctx) TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode)); /* Update the nip since this might generate a trap exception */ gen_update_nip(ctx, ctx->nip); - gen_helper_td(cpu_gpr[rA(ctx->opcode)], t0, t1); + gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(t0); tcg_temp_free_i32(t1); } @@ -3934,7 +3959,7 @@ static void gen_mtmsrd(DisasContext *ctx) * directly from ppc_store_msr */ gen_update_nip(ctx, ctx->nip); - gen_helper_store_msr(cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); /* Must stop the translation as machine state (may have) changed */ /* Note that mtmsr is not always defined as context-synchronizing */ gen_stop_exception(ctx); @@ -3972,7 +3997,7 @@ static void gen_mtmsr(DisasContext *ctx) #else tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]); #endif - gen_helper_store_msr(msr); + gen_helper_store_msr(cpu_env, msr); /* Must stop the translation as machine state (may have) changed */ /* Note that mtmsr is not always defined as context-synchronizing */ gen_stop_exception(ctx); @@ -4091,7 +4116,7 @@ static void gen_dcbz(DisasContext *ctx) gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_dcbz(t0); + gen_helper_dcbz(cpu_env, t0); tcg_temp_free(t0); } @@ -4104,9 +4129,9 @@ static void gen_dcbz_970(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); if (ctx->opcode & 0x00200000) - gen_helper_dcbz(t0); + gen_helper_dcbz(cpu_env, t0); else - gen_helper_dcbz_970(t0); + gen_helper_dcbz_970(cpu_env, t0); tcg_temp_free(t0); } @@ -4146,7 +4171,7 @@ static void gen_icbi(DisasContext *ctx) gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_icbi(t0); + gen_helper_icbi(cpu_env, t0); tcg_temp_free(t0); } @@ -4175,7 +4200,7 @@ static void gen_mfsr(DisasContext *ctx) return; } t0 = tcg_const_tl(SR(ctx->opcode)); - gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } @@ -4194,7 +4219,7 @@ static void gen_mfsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28); tcg_gen_andi_tl(t0, t0, 0xF); - gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } @@ -4211,7 +4236,7 @@ static void gen_mtsr(DisasContext *ctx) return; } t0 = tcg_const_tl(SR(ctx->opcode)); - gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); #endif } @@ -4230,7 +4255,7 @@ static void gen_mtsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28); tcg_gen_andi_tl(t0, t0, 0xF); - gen_helper_store_sr(t0, cpu_gpr[rD(ctx->opcode)]); + gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); tcg_temp_free(t0); #endif } @@ -4250,7 +4275,7 @@ static void gen_mfsr_64b(DisasContext *ctx) return; } t0 = tcg_const_tl(SR(ctx->opcode)); - gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } @@ -4269,7 +4294,7 @@ static void gen_mfsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28); tcg_gen_andi_tl(t0, t0, 0xF); - gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } @@ -4286,7 +4311,7 @@ static void gen_mtsr_64b(DisasContext *ctx) return; } t0 = tcg_const_tl(SR(ctx->opcode)); - gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); #endif } @@ -4305,7 +4330,7 @@ static void gen_mtsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28); tcg_gen_andi_tl(t0, t0, 0xF); - gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); #endif } @@ -4320,7 +4345,8 @@ static void gen_slbmte(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } - gen_helper_store_slb(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); #endif } @@ -4333,7 +4359,7 @@ static void gen_slbmfee(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } - gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], + gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -4347,7 +4373,7 @@ static void gen_slbmfev(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } - gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], + gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -4366,7 +4392,7 @@ static void gen_tlbia(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_tlbia(); + gen_helper_tlbia(cpu_env); #endif } @@ -4380,7 +4406,7 @@ static void gen_tlbiel(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]); + gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -4398,11 +4424,11 @@ static void gen_tlbie(DisasContext *ctx) if (!ctx->sf_mode) { TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]); - gen_helper_tlbie(t0); + gen_helper_tlbie(cpu_env, t0); tcg_temp_free(t0); } else #endif - gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]); + gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -4434,7 +4460,7 @@ static void gen_slbia(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_slbia(); + gen_helper_slbia(cpu_env); #endif } @@ -4448,7 +4474,7 @@ static void gen_slbie(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_slbie(cpu_gpr[rB(ctx->opcode)]); + gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } #endif @@ -4525,7 +4551,7 @@ static void gen_abso(DisasContext *ctx) static void gen_clcs(DisasContext *ctx) { TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode)); - gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free_i32(t0); /* Rc=1 sets CR0 to an undefined state */ } @@ -4533,7 +4559,8 @@ static void gen_clcs(DisasContext *ctx) /* div - div. */ static void gen_div(DisasContext *ctx) { - gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -4541,7 +4568,8 @@ static void gen_div(DisasContext *ctx) /* divo - divo. */ static void gen_divo(DisasContext *ctx) { - gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -4549,7 +4577,8 @@ static void gen_divo(DisasContext *ctx) /* divs - divs. */ static void gen_divs(DisasContext *ctx) { - gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -4557,7 +4586,8 @@ static void gen_divs(DisasContext *ctx) /* divso - divso. */ static void gen_divso(DisasContext *ctx) { - gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -4633,7 +4663,7 @@ static void gen_lscbx(DisasContext *ctx) gen_addr_reg_index(ctx, t0); /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); - gen_helper_lscbx(t0, t0, t1, t2, t3); + gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); tcg_temp_free_i32(t3); @@ -5165,7 +5195,7 @@ static void gen_tlbld_6xx(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_6xx_tlbd(cpu_gpr[rB(ctx->opcode)]); + gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -5179,7 +5209,7 @@ static void gen_tlbli_6xx(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_6xx_tlbi(cpu_gpr[rB(ctx->opcode)]); + gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -5195,7 +5225,7 @@ static void gen_tlbld_74xx(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_74xx_tlbd(cpu_gpr[rB(ctx->opcode)]); + gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -5209,7 +5239,7 @@ static void gen_tlbli_74xx(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_74xx_tlbi(cpu_gpr[rB(ctx->opcode)]); + gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -5257,7 +5287,7 @@ static void gen_mfsri(DisasContext *ctx) gen_addr_reg_index(ctx, t0); tcg_gen_shri_tl(t0, t0, 28); tcg_gen_andi_tl(t0, t0, 0xF); - gen_helper_load_sr(cpu_gpr[rd], t0); + gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0); tcg_temp_free(t0); if (ra != 0 && ra != rd) tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]); @@ -5276,7 +5306,7 @@ static void gen_rac(DisasContext *ctx) } t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_rac(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } @@ -5290,7 +5320,7 @@ static void gen_rfsvc(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } - gen_helper_rfsvc(); + gen_helper_rfsvc(cpu_env); gen_sync_exception(ctx); #endif } @@ -5454,7 +5484,7 @@ static void gen_tlbiva(DisasContext *ctx) } t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]); + gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); tcg_temp_free(t0); #endif } @@ -5687,7 +5717,7 @@ static void gen_mfdcr(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); dcrn = tcg_const_tl(SPR(ctx->opcode)); - gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], dcrn); + gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); tcg_temp_free(dcrn); #endif } @@ -5706,7 +5736,7 @@ static void gen_mtdcr(DisasContext *ctx) /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); dcrn = tcg_const_tl(SPR(ctx->opcode)); - gen_helper_store_dcr(dcrn, cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(dcrn); #endif } @@ -5724,7 +5754,8 @@ static void gen_mfdcrx(DisasContext *ctx) } /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); - gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ #endif } @@ -5742,7 +5773,8 @@ static void gen_mtdcrx(DisasContext *ctx) } /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); - gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ #endif } @@ -5752,7 +5784,8 @@ static void gen_mfdcrux(DisasContext *ctx) { /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); - gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } @@ -5761,7 +5794,8 @@ static void gen_mtdcrux(DisasContext *ctx) { /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); - gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } @@ -5849,7 +5883,7 @@ static void gen_rfci_40x(DisasContext *ctx) return; } /* Restore CPU state */ - gen_helper_40x_rfci(); + gen_helper_40x_rfci(cpu_env); gen_sync_exception(ctx); #endif } @@ -5864,7 +5898,7 @@ static void gen_rfci(DisasContext *ctx) return; } /* Restore CPU state */ - gen_helper_rfci(); + gen_helper_rfci(cpu_env); gen_sync_exception(ctx); #endif } @@ -5882,7 +5916,7 @@ static void gen_rfdi(DisasContext *ctx) return; } /* Restore CPU state */ - gen_helper_rfdi(); + gen_helper_rfdi(cpu_env); gen_sync_exception(ctx); #endif } @@ -5898,7 +5932,7 @@ static void gen_rfmci(DisasContext *ctx) return; } /* Restore CPU state */ - gen_helper_rfmci(); + gen_helper_rfmci(cpu_env); gen_sync_exception(ctx); #endif } @@ -5917,10 +5951,12 @@ static void gen_tlbre_40x(DisasContext *ctx) } switch (rB(ctx->opcode)) { case 0: - gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)]); break; case 1: - gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env, + cpu_gpr[rA(ctx->opcode)]); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); @@ -5942,7 +5978,7 @@ static void gen_tlbsx_40x(DisasContext *ctx) } t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); if (Rc(ctx->opcode)) { int l1 = gen_new_label(); @@ -5968,10 +6004,12 @@ static void gen_tlbwe_40x(DisasContext *ctx) } switch (rB(ctx->opcode)) { case 0: - gen_helper_4xx_tlbwe_hi(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); break; case 1: - gen_helper_4xx_tlbwe_lo(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); @@ -5998,7 +6036,8 @@ static void gen_tlbre_440(DisasContext *ctx) case 2: { TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); - gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], t0, cpu_gpr[rA(ctx->opcode)]); + gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, + t0, cpu_gpr[rA(ctx->opcode)]); tcg_temp_free_i32(t0); } break; @@ -6022,7 +6061,7 @@ static void gen_tlbsx_440(DisasContext *ctx) } t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], t0); + gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); if (Rc(ctx->opcode)) { int l1 = gen_new_label(); @@ -6052,7 +6091,8 @@ static void gen_tlbwe_440(DisasContext *ctx) case 2: { TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); - gen_helper_440_tlbwe(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); + gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); tcg_temp_free_i32(t0); } break; @@ -6076,7 +6116,7 @@ static void gen_tlbre_booke206(DisasContext *ctx) return; } - gen_helper_booke206_tlbre(); + gen_helper_booke206_tlbre(cpu_env); #endif } @@ -6100,7 +6140,7 @@ static void gen_tlbsx_booke206(DisasContext *ctx) } tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]); - gen_helper_booke206_tlbsx(t0); + gen_helper_booke206_tlbsx(cpu_env, t0); #endif } @@ -6115,7 +6155,7 @@ static void gen_tlbwe_booke206(DisasContext *ctx) return; } gen_update_nip(ctx, ctx->nip - 4); - gen_helper_booke206_tlbwe(); + gen_helper_booke206_tlbwe(cpu_env); #endif } @@ -6133,7 +6173,7 @@ static void gen_tlbivax_booke206(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - gen_helper_booke206_tlbivax(t0); + gen_helper_booke206_tlbivax(cpu_env, t0); #endif } @@ -6153,13 +6193,13 @@ static void gen_tlbilx_booke206(DisasContext *ctx) switch((ctx->opcode >> 21) & 0x3) { case 0: - gen_helper_booke206_tlbilx0(t0); + gen_helper_booke206_tlbilx0(cpu_env, t0); break; case 1: - gen_helper_booke206_tlbilx1(t0); + gen_helper_booke206_tlbilx1(cpu_env, t0); break; case 3: - gen_helper_booke206_tlbilx3(t0); + gen_helper_booke206_tlbilx3(cpu_env, t0); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); @@ -6220,8 +6260,8 @@ static void gen_wrteei(DisasContext *ctx) static void gen_dlmzb(DisasContext *ctx) { TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode)); - gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)], t0); + gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, + cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); tcg_temp_free_i32(t0); } @@ -6258,7 +6298,7 @@ static void gen_msgclr(DisasContext *ctx) return; } - gen_helper_msgclr(cpu_gpr[rB(ctx->opcode)]); + gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif } @@ -6347,7 +6387,7 @@ static void gen_lve##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ rs = gen_avr_ptr(rS(ctx->opcode)); \ - gen_helper_lve##name (rs, EA); \ + gen_helper_lve##name(cpu_env, rs, EA); \ tcg_temp_free(EA); \ tcg_temp_free_ptr(rs); \ } @@ -6365,7 +6405,7 @@ static void gen_stve##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ rs = gen_avr_ptr(rS(ctx->opcode)); \ - gen_helper_stve##name (rs, EA); \ + gen_helper_stve##name(cpu_env, rs, EA); \ tcg_temp_free(EA); \ tcg_temp_free_ptr(rs); \ } @@ -6440,7 +6480,7 @@ static void gen_mtvscr(DisasContext *ctx) return; } p = gen_avr_ptr(rD(ctx->opcode)); - gen_helper_mtvscr(p); + gen_helper_mtvscr(cpu_env, p); tcg_temp_free_ptr(p); } @@ -6479,6 +6519,23 @@ static void glue(gen_, name)(DisasContext *ctx) tcg_temp_free_ptr(rd); \ } +#define GEN_VXFORM_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGv_ptr ra, rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(rA(ctx->opcode)); \ + rb = gen_avr_ptr(rB(ctx->opcode)); \ + rd = gen_avr_ptr(rD(ctx->opcode)); \ + gen_helper_##name(rd, cpu_env, ra, rb); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + tcg_temp_free_ptr(rd); \ +} + GEN_VXFORM(vaddubm, 0, 0); GEN_VXFORM(vadduhm, 0, 1); GEN_VXFORM(vadduwm, 0, 2); @@ -6530,41 +6587,41 @@ GEN_VXFORM(vslo, 6, 16); GEN_VXFORM(vsro, 6, 17); GEN_VXFORM(vaddcuw, 0, 6); GEN_VXFORM(vsubcuw, 0, 22); -GEN_VXFORM(vaddubs, 0, 8); -GEN_VXFORM(vadduhs, 0, 9); -GEN_VXFORM(vadduws, 0, 10); -GEN_VXFORM(vaddsbs, 0, 12); -GEN_VXFORM(vaddshs, 0, 13); -GEN_VXFORM(vaddsws, 0, 14); -GEN_VXFORM(vsububs, 0, 24); -GEN_VXFORM(vsubuhs, 0, 25); -GEN_VXFORM(vsubuws, 0, 26); -GEN_VXFORM(vsubsbs, 0, 28); -GEN_VXFORM(vsubshs, 0, 29); -GEN_VXFORM(vsubsws, 0, 30); +GEN_VXFORM_ENV(vaddubs, 0, 8); +GEN_VXFORM_ENV(vadduhs, 0, 9); +GEN_VXFORM_ENV(vadduws, 0, 10); +GEN_VXFORM_ENV(vaddsbs, 0, 12); +GEN_VXFORM_ENV(vaddshs, 0, 13); +GEN_VXFORM_ENV(vaddsws, 0, 14); +GEN_VXFORM_ENV(vsububs, 0, 24); +GEN_VXFORM_ENV(vsubuhs, 0, 25); +GEN_VXFORM_ENV(vsubuws, 0, 26); +GEN_VXFORM_ENV(vsubsbs, 0, 28); +GEN_VXFORM_ENV(vsubshs, 0, 29); +GEN_VXFORM_ENV(vsubsws, 0, 30); GEN_VXFORM(vrlb, 2, 0); GEN_VXFORM(vrlh, 2, 1); GEN_VXFORM(vrlw, 2, 2); GEN_VXFORM(vsl, 2, 7); GEN_VXFORM(vsr, 2, 11); -GEN_VXFORM(vpkuhum, 7, 0); -GEN_VXFORM(vpkuwum, 7, 1); -GEN_VXFORM(vpkuhus, 7, 2); -GEN_VXFORM(vpkuwus, 7, 3); -GEN_VXFORM(vpkshus, 7, 4); -GEN_VXFORM(vpkswus, 7, 5); -GEN_VXFORM(vpkshss, 7, 6); -GEN_VXFORM(vpkswss, 7, 7); +GEN_VXFORM_ENV(vpkuhum, 7, 0); +GEN_VXFORM_ENV(vpkuwum, 7, 1); +GEN_VXFORM_ENV(vpkuhus, 7, 2); +GEN_VXFORM_ENV(vpkuwus, 7, 3); +GEN_VXFORM_ENV(vpkshus, 7, 4); +GEN_VXFORM_ENV(vpkswus, 7, 5); +GEN_VXFORM_ENV(vpkshss, 7, 6); +GEN_VXFORM_ENV(vpkswss, 7, 7); GEN_VXFORM(vpkpx, 7, 12); -GEN_VXFORM(vsum4ubs, 4, 24); -GEN_VXFORM(vsum4sbs, 4, 28); -GEN_VXFORM(vsum4shs, 4, 25); -GEN_VXFORM(vsum2sws, 4, 26); -GEN_VXFORM(vsumsws, 4, 30); -GEN_VXFORM(vaddfp, 5, 0); -GEN_VXFORM(vsubfp, 5, 1); -GEN_VXFORM(vmaxfp, 5, 16); -GEN_VXFORM(vminfp, 5, 17); +GEN_VXFORM_ENV(vsum4ubs, 4, 24); +GEN_VXFORM_ENV(vsum4sbs, 4, 28); +GEN_VXFORM_ENV(vsum4shs, 4, 25); +GEN_VXFORM_ENV(vsum2sws, 4, 26); +GEN_VXFORM_ENV(vsumsws, 4, 30); +GEN_VXFORM_ENV(vaddfp, 5, 0); +GEN_VXFORM_ENV(vsubfp, 5, 1); +GEN_VXFORM_ENV(vmaxfp, 5, 16); +GEN_VXFORM_ENV(vminfp, 5, 17); #define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ @@ -6577,7 +6634,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ ra = gen_avr_ptr(rA(ctx->opcode)); \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ - gen_helper_##opname (rd, ra, rb); \ + gen_helper_##opname(cpu_env, rd, ra, rb); \ tcg_temp_free_ptr(ra); \ tcg_temp_free_ptr(rb); \ tcg_temp_free_ptr(rd); \ @@ -6636,20 +6693,36 @@ static void glue(gen_, name)(DisasContext *ctx) tcg_temp_free_ptr(rd); \ } +#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGv_ptr rb, rd; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(rB(ctx->opcode)); \ + rd = gen_avr_ptr(rD(ctx->opcode)); \ + gen_helper_##name(cpu_env, rd, rb); \ + tcg_temp_free_ptr(rb); \ + tcg_temp_free_ptr(rd); \ + } + GEN_VXFORM_NOA(vupkhsb, 7, 8); GEN_VXFORM_NOA(vupkhsh, 7, 9); GEN_VXFORM_NOA(vupklsb, 7, 10); GEN_VXFORM_NOA(vupklsh, 7, 11); GEN_VXFORM_NOA(vupkhpx, 7, 13); GEN_VXFORM_NOA(vupklpx, 7, 15); -GEN_VXFORM_NOA(vrefp, 5, 4); -GEN_VXFORM_NOA(vrsqrtefp, 5, 5); -GEN_VXFORM_NOA(vexptefp, 5, 6); -GEN_VXFORM_NOA(vlogefp, 5, 7); -GEN_VXFORM_NOA(vrfim, 5, 8); -GEN_VXFORM_NOA(vrfin, 5, 9); -GEN_VXFORM_NOA(vrfip, 5, 10); -GEN_VXFORM_NOA(vrfiz, 5, 11); +GEN_VXFORM_NOA_ENV(vrefp, 5, 4); +GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); +GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); +GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); +GEN_VXFORM_NOA_ENV(vrfim, 5, 8); +GEN_VXFORM_NOA_ENV(vrfin, 5, 9); +GEN_VXFORM_NOA_ENV(vrfip, 5, 10); +GEN_VXFORM_NOA_ENV(vrfiz, 5, 11); #define GEN_VXFORM_SIMM(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ @@ -6685,13 +6758,32 @@ static void glue(gen_, name)(DisasContext *ctx) tcg_temp_free_ptr(rd); \ } +#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGv_ptr rb, rd; \ + TCGv_i32 uimm; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ + rb = gen_avr_ptr(rB(ctx->opcode)); \ + rd = gen_avr_ptr(rD(ctx->opcode)); \ + gen_helper_##name(cpu_env, rd, rb, uimm); \ + tcg_temp_free_i32(uimm); \ + tcg_temp_free_ptr(rb); \ + tcg_temp_free_ptr(rd); \ + } + GEN_VXFORM_UIMM(vspltb, 6, 8); GEN_VXFORM_UIMM(vsplth, 6, 9); GEN_VXFORM_UIMM(vspltw, 6, 10); -GEN_VXFORM_UIMM(vcfux, 5, 12); -GEN_VXFORM_UIMM(vcfsx, 5, 13); -GEN_VXFORM_UIMM(vctuxs, 5, 14); -GEN_VXFORM_UIMM(vctsxs, 5, 15); +GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); +GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); +GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); +GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); static void gen_vsldoi(DisasContext *ctx) { @@ -6713,7 +6805,7 @@ static void gen_vsldoi(DisasContext *ctx) } #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ -static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ TCGv_ptr ra, rb, rc, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ @@ -6725,9 +6817,9 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) rc = gen_avr_ptr(rC(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ if (Rc(ctx->opcode)) { \ - gen_helper_##name1 (rd, ra, rb, rc); \ + gen_helper_##name1(cpu_env, rd, ra, rb, rc); \ } else { \ - gen_helper_##name0 (rd, ra, rb, rc); \ + gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ } \ tcg_temp_free_ptr(ra); \ tcg_temp_free_ptr(rb); \ @@ -8008,7 +8100,7 @@ static inline void gen_##name(DisasContext *ctx) \ TCGv t1; \ t0 = tcg_temp_new_i32(); \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ - gen_helper_##name(t0, t0); \ + gen_helper_##name(t0, cpu_env, t0); \ t1 = tcg_temp_new(); \ tcg_gen_extu_i32_tl(t1, t0); \ tcg_temp_free_i32(t0); \ @@ -8023,7 +8115,7 @@ static inline void gen_##name(DisasContext *ctx) \ TCGv_i32 t0; \ TCGv t1; \ t0 = tcg_temp_new_i32(); \ - gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(t0, cpu_env, cpu_gpr[rB(ctx->opcode)]); \ t1 = tcg_temp_new(); \ tcg_gen_extu_i32_tl(t1, t0); \ tcg_temp_free_i32(t0); \ @@ -8037,13 +8129,14 @@ static inline void gen_##name(DisasContext *ctx) \ { \ TCGv_i32 t0 = tcg_temp_new_i32(); \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); \ tcg_temp_free_i32(t0); \ } #define GEN_SPEFPUOP_CONV_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \ + cpu_gpr[rB(ctx->opcode)]); \ } #define GEN_SPEFPUOP_ARITH2_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -8058,7 +8151,7 @@ static inline void gen_##name(DisasContext *ctx) \ t1 = tcg_temp_new_i32(); \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ - gen_helper_##name(t0, t0, t1); \ + gen_helper_##name(t0, cpu_env, t0, t1); \ tcg_temp_free_i32(t1); \ t2 = tcg_temp_new(); \ tcg_gen_extu_i32_tl(t2, t0); \ @@ -8075,8 +8168,8 @@ static inline void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \ - cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ } #define GEN_SPEFPUOP_COMP_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -8090,7 +8183,7 @@ static inline void gen_##name(DisasContext *ctx) \ t1 = tcg_temp_new_i32(); \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ tcg_temp_free_i32(t0); \ tcg_temp_free_i32(t1); \ } @@ -8101,28 +8194,29 @@ static inline void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ } #else #define GEN_SPEFPUOP_CONV_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \ + cpu_gpr[rB(ctx->opcode)]); \ } #define GEN_SPEFPUOP_CONV_32_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGv_i64 t0 = tcg_temp_new_i64(); \ gen_load_gpr64(t0, rB(ctx->opcode)); \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); \ tcg_temp_free_i64(t0); \ } #define GEN_SPEFPUOP_CONV_64_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGv_i64 t0 = tcg_temp_new_i64(); \ - gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(t0, cpu_env, cpu_gpr[rB(ctx->opcode)]); \ gen_store_gpr64(rD(ctx->opcode), t0); \ tcg_temp_free_i64(t0); \ } @@ -8131,7 +8225,7 @@ static inline void gen_##name(DisasContext *ctx) \ { \ TCGv_i64 t0 = tcg_temp_new_i64(); \ gen_load_gpr64(t0, rB(ctx->opcode)); \ - gen_helper_##name(t0, t0); \ + gen_helper_##name(t0, cpu_env, t0); \ gen_store_gpr64(rD(ctx->opcode), t0); \ tcg_temp_free_i64(t0); \ } @@ -8142,7 +8236,7 @@ static inline void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ - gen_helper_##name(cpu_gpr[rD(ctx->opcode)], \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ } #define GEN_SPEFPUOP_ARITH2_64_64(name) \ @@ -8157,7 +8251,7 @@ static inline void gen_##name(DisasContext *ctx) \ t1 = tcg_temp_new_i64(); \ gen_load_gpr64(t0, rA(ctx->opcode)); \ gen_load_gpr64(t1, rB(ctx->opcode)); \ - gen_helper_##name(t0, t0, t1); \ + gen_helper_##name(t0, cpu_env, t0, t1); \ gen_store_gpr64(rD(ctx->opcode), t0); \ tcg_temp_free_i64(t0); \ tcg_temp_free_i64(t1); \ @@ -8169,7 +8263,7 @@ static inline void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ } #define GEN_SPEFPUOP_COMP_64(name) \ @@ -8184,7 +8278,7 @@ static inline void gen_##name(DisasContext *ctx) \ t1 = tcg_temp_new_i64(); \ gen_load_gpr64(t0, rA(ctx->opcode)); \ gen_load_gpr64(t1, rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ tcg_temp_free_i64(t0); \ tcg_temp_free_i64(t1); \ } @@ -9532,7 +9626,7 @@ static inline void gen_intermediate_code_internal(CPUPPCState *env, ctx.access_type = -1; ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0; #if defined(TARGET_PPC64) - ctx.sf_mode = msr_sf; + ctx.sf_mode = msr_is_64bit(env, env->msr); ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); #endif ctx.fpu_enabled = msr_fp; @@ -9589,9 +9683,9 @@ static inline void gen_intermediate_code_internal(CPUPPCState *env, if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); if (unlikely(ctx.le_mode)) { - ctx.opcode = bswap32(ldl_code(ctx.nip)); + ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip)); } else { - ctx.opcode = ldl_code(ctx.nip); + ctx.opcode = cpu_ldl_code(env, ctx.nip); } LOG_DISAS("translate opcode %08x (%02x %02x %02x) (%s)\n", ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode), diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index 6f61175e7d..5742229197 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -55,31 +55,50 @@ PPC_IRQ_INIT_FN(e500); /* Generic callbacks: * do nothing but store/retrieve spr value */ +static void spr_load_dump_spr(int sprn) +{ +#ifdef PPC_DUMP_SPR_ACCESSES + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_load_dump_spr(t0); + tcg_temp_free_i32(t0); +#endif +} + static void spr_read_generic (void *opaque, int gprn, int sprn) { gen_load_spr(cpu_gpr[gprn], sprn); + spr_load_dump_spr(sprn); +} + +static void spr_store_dump_spr(int sprn) +{ #ifdef PPC_DUMP_SPR_ACCESSES - { - TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_load_dump_spr(t0); - tcg_temp_free_i32(t0); - } + TCGv_i32 t0 = tcg_const_i32(sprn); + gen_helper_store_dump_spr(t0); + tcg_temp_free_i32(t0); #endif } static void spr_write_generic (void *opaque, int sprn, int gprn) { gen_store_spr(sprn, cpu_gpr[gprn]); -#ifdef PPC_DUMP_SPR_ACCESSES - { - TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_store_dump_spr(t0); - tcg_temp_free_i32(t0); - } -#endif + spr_store_dump_spr(sprn); } #if !defined(CONFIG_USER_ONLY) +static void spr_write_generic32(void *opaque, int sprn, int gprn) +{ +#ifdef TARGET_PPC64 + TCGv t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); + gen_store_spr(sprn, t0); + tcg_temp_free(t0); + spr_store_dump_spr(sprn); +#else + spr_write_generic(opaque, sprn, gprn); +#endif +} + static void spr_write_clear (void *opaque, int sprn, int gprn) { TCGv t0 = tcg_temp_new(); @@ -159,7 +178,7 @@ static void spr_read_decr (void *opaque, int gprn, int sprn) if (use_icount) { gen_io_start(); } - gen_helper_load_decr(cpu_gpr[gprn]); + gen_helper_load_decr(cpu_gpr[gprn], cpu_env); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -171,7 +190,7 @@ static void spr_write_decr (void *opaque, int sprn, int gprn) if (use_icount) { gen_io_start(); } - gen_helper_store_decr(cpu_gpr[gprn]); + gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -186,7 +205,7 @@ static void spr_read_tbl (void *opaque, int gprn, int sprn) if (use_icount) { gen_io_start(); } - gen_helper_load_tbl(cpu_gpr[gprn]); + gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -198,7 +217,7 @@ static void spr_read_tbu (void *opaque, int gprn, int sprn) if (use_icount) { gen_io_start(); } - gen_helper_load_tbu(cpu_gpr[gprn]); + gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -208,13 +227,13 @@ static void spr_read_tbu (void *opaque, int gprn, int sprn) __attribute__ (( unused )) static void spr_read_atbl (void *opaque, int gprn, int sprn) { - gen_helper_load_atbl(cpu_gpr[gprn]); + gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); } __attribute__ (( unused )) static void spr_read_atbu (void *opaque, int gprn, int sprn) { - gen_helper_load_atbu(cpu_gpr[gprn]); + gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); } #if !defined(CONFIG_USER_ONLY) @@ -223,7 +242,7 @@ static void spr_write_tbl (void *opaque, int sprn, int gprn) if (use_icount) { gen_io_start(); } - gen_helper_store_tbl(cpu_gpr[gprn]); + gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -235,7 +254,7 @@ static void spr_write_tbu (void *opaque, int sprn, int gprn) if (use_icount) { gen_io_start(); } - gen_helper_store_tbu(cpu_gpr[gprn]); + gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); if (use_icount) { gen_io_end(); gen_stop_exception(opaque); @@ -245,20 +264,20 @@ static void spr_write_tbu (void *opaque, int sprn, int gprn) __attribute__ (( unused )) static void spr_write_atbl (void *opaque, int sprn, int gprn) { - gen_helper_store_atbl(cpu_gpr[gprn]); + gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); } __attribute__ (( unused )) static void spr_write_atbu (void *opaque, int sprn, int gprn) { - gen_helper_store_atbu(cpu_gpr[gprn]); + gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); } #if defined(TARGET_PPC64) __attribute__ (( unused )) static void spr_read_purr (void *opaque, int gprn, int sprn) { - gen_helper_load_purr(cpu_gpr[gprn]); + gen_helper_load_purr(cpu_gpr[gprn], cpu_env); } #endif #endif @@ -279,28 +298,28 @@ static void spr_read_ibat_h (void *opaque, int gprn, int sprn) static void spr_write_ibatu (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_ibatu(t0, cpu_gpr[gprn]); + gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_ibatu_h (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); - gen_helper_store_ibatu(t0, cpu_gpr[gprn]); + gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_ibatl (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); - gen_helper_store_ibatl(t0, cpu_gpr[gprn]); + gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_ibatl_h (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); - gen_helper_store_ibatl(t0, cpu_gpr[gprn]); + gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } @@ -319,35 +338,35 @@ static void spr_read_dbat_h (void *opaque, int gprn, int sprn) static void spr_write_dbatu (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); - gen_helper_store_dbatu(t0, cpu_gpr[gprn]); + gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_dbatu_h (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); - gen_helper_store_dbatu(t0, cpu_gpr[gprn]); + gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_dbatl (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); - gen_helper_store_dbatl(t0, cpu_gpr[gprn]); + gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_dbatl_h (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); - gen_helper_store_dbatl(t0, cpu_gpr[gprn]); + gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } /* SDR1 */ static void spr_write_sdr1 (void *opaque, int sprn, int gprn) { - gen_helper_store_sdr1(cpu_gpr[gprn]); + gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]); } /* 64 bits PowerPC specific SPRs */ @@ -373,7 +392,7 @@ static void spr_read_asr (void *opaque, int gprn, int sprn) static void spr_write_asr (void *opaque, int sprn, int gprn) { - gen_helper_store_asr(cpu_gpr[gprn]); + gen_helper_store_asr(cpu_env, cpu_gpr[gprn]); } #endif #endif @@ -382,30 +401,30 @@ static void spr_write_asr (void *opaque, int sprn, int gprn) /* RTC */ static void spr_read_601_rtcl (void *opaque, int gprn, int sprn) { - gen_helper_load_601_rtcl(cpu_gpr[gprn]); + gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env); } static void spr_read_601_rtcu (void *opaque, int gprn, int sprn) { - gen_helper_load_601_rtcu(cpu_gpr[gprn]); + gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env); } #if !defined(CONFIG_USER_ONLY) static void spr_write_601_rtcu (void *opaque, int sprn, int gprn) { - gen_helper_store_601_rtcu(cpu_gpr[gprn]); + gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]); } static void spr_write_601_rtcl (void *opaque, int sprn, int gprn) { - gen_helper_store_601_rtcl(cpu_gpr[gprn]); + gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]); } static void spr_write_hid0_601 (void *opaque, int sprn, int gprn) { DisasContext *ctx = opaque; - gen_helper_store_hid0_601(cpu_gpr[gprn]); + gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]); /* Must stop the translation as endianness may have changed */ gen_stop_exception(ctx); } @@ -421,14 +440,14 @@ static void spr_read_601_ubat (void *opaque, int gprn, int sprn) static void spr_write_601_ubatu (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batl(t0, cpu_gpr[gprn]); + gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } static void spr_write_601_ubatl (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); - gen_helper_store_601_batu(t0, cpu_gpr[gprn]); + gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } #endif @@ -437,36 +456,36 @@ static void spr_write_601_ubatl (void *opaque, int sprn, int gprn) #if !defined(CONFIG_USER_ONLY) static void spr_read_40x_pit (void *opaque, int gprn, int sprn) { - gen_helper_load_40x_pit(cpu_gpr[gprn]); + gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env); } static void spr_write_40x_pit (void *opaque, int sprn, int gprn) { - gen_helper_store_40x_pit(cpu_gpr[gprn]); + gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]); } static void spr_write_40x_dbcr0 (void *opaque, int sprn, int gprn) { DisasContext *ctx = opaque; - gen_helper_store_40x_dbcr0(cpu_gpr[gprn]); + gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]); /* We must stop translation as we may have rebooted */ gen_stop_exception(ctx); } static void spr_write_40x_sler (void *opaque, int sprn, int gprn) { - gen_helper_store_40x_sler(cpu_gpr[gprn]); + gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); } static void spr_write_booke_tcr (void *opaque, int sprn, int gprn) { - gen_helper_store_booke_tcr(cpu_gpr[gprn]); + gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]); } static void spr_write_booke_tsr (void *opaque, int sprn, int gprn) { - gen_helper_store_booke_tsr(cpu_gpr[gprn]); + gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]); } #endif @@ -481,7 +500,7 @@ static void spr_read_403_pbr (void *opaque, int gprn, int sprn) static void spr_write_403_pbr (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1); - gen_helper_store_403_pbr(t0, cpu_gpr[gprn]); + gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } @@ -1371,14 +1390,14 @@ static void spr_write_e500_l1csr0 (void *opaque, int sprn, int gprn) static void spr_write_booke206_mmucsr0 (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_booke206_tlbflush(t0); + gen_helper_booke206_tlbflush(cpu_env, t0); tcg_temp_free_i32(t0); } static void spr_write_booke_pid (void *opaque, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(sprn); - gen_helper_booke_setpid(t0, cpu_gpr[gprn]); + gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(t0); } #endif @@ -1591,10 +1610,14 @@ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, /* TLB assist registers */ /* XXX : not implemented */ for (i = 0; i < 8; i++) { + void (*uea_write)(void *o, int sprn, int gprn) = &spr_write_generic32; + if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) { + uea_write = &spr_write_generic; + } if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, uea_write, 0x00000000); } } @@ -2804,7 +2827,7 @@ static void init_excp_G2 (CPUPPCState *env) #endif } -static void init_excp_e200 (CPUPPCState *env) +static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC; @@ -2829,7 +2852,7 @@ static void init_excp_e200 (CPUPPCState *env) env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000; env->hreset_excp_prefix = 0x00000000UL; env->ivor_mask = 0x0000FFF7UL; - env->ivpr_mask = 0xFFFF0000UL; + env->ivpr_mask = ivpr_mask; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; #endif @@ -4307,7 +4330,7 @@ static void init_proc_e200 (CPUPPCState *env) env->id_tlbs = 0; env->tlb_type = TLB_EMB; #endif - init_excp_e200(env); + init_excp_e200(env, 0xFFFF0000UL); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ @@ -4424,16 +4447,70 @@ static void init_proc_e300 (CPUPPCState *env) #define check_pow_e500mc check_pow_none #define init_proc_e500mc init_proc_e500mc +/* e5500 core */ +#define POWERPC_INSNS_e5500 (PPC_INSNS_BASE | PPC_ISEL | \ + PPC_WRTEE | PPC_RFDI | PPC_RFMCI | \ + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | \ + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | \ + PPC_FLOAT | PPC_FLOAT_FRES | \ + PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | \ + PPC_FLOAT_STFIWX | PPC_WAIT | \ + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC | \ + PPC_64B | PPC_POPCNTB | PPC_POPCNTWD) +#define POWERPC_INSNS2_e5500 (PPC2_BOOKE206 | PPC2_PRCNTL) +#define POWERPC_MSRM_e5500 (0x000000009402FB36ULL) +#define POWERPC_MMU_e5500 (POWERPC_MMU_BOOKE206) +#define POWERPC_EXCP_e5500 (POWERPC_EXCP_BOOKE) +#define POWERPC_INPUT_e5500 (PPC_FLAGS_INPUT_BookE) +/* Fixme: figure out the correct flag for e5500 */ +#define POWERPC_BFDM_e5500 (bfd_mach_ppc_e500) +#define POWERPC_FLAG_e5500 (POWERPC_FLAG_CE | POWERPC_FLAG_DE | \ + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK) +#define check_pow_e5500 check_pow_none +#define init_proc_e5500 init_proc_e5500 + +#if !defined(CONFIG_USER_ONLY) +static void spr_write_mas73(void *opaque, int sprn, int gprn) +{ + TCGv val = tcg_temp_new(); + tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); + gen_store_spr(SPR_BOOKE_MAS3, val); + tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); + gen_store_spr(SPR_BOOKE_MAS7, val); + tcg_temp_free(val); +} + +static void spr_read_mas73(void *opaque, int gprn, int sprn) +{ + TCGv mas7 = tcg_temp_new(); + TCGv mas3 = tcg_temp_new(); + gen_load_spr(mas7, SPR_BOOKE_MAS7); + tcg_gen_shli_tl(mas7, mas7, 32); + gen_load_spr(mas3, SPR_BOOKE_MAS3); + tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); + tcg_temp_free(mas3); + tcg_temp_free(mas7); +} + +static void spr_load_epr(void *opaque, int gprn, int sprn) +{ + gen_helper_load_epr(cpu_gpr[gprn], cpu_env); +} + +#endif + enum fsl_e500_version { fsl_e500v1, fsl_e500v2, fsl_e500mc, + fsl_e5500, }; static void init_proc_e500 (CPUPPCState *env, int version) { uint32_t tlbncfg[2]; - uint64_t ivor_mask = 0x0000000F0000FFFFULL; + uint64_t ivor_mask; + uint64_t ivpr_mask = 0xFFFF0000ULL; uint32_t l1cfg0 = 0x3800 /* 8 ways */ | 0x0020; /* 32 kb */ #if !defined(CONFIG_USER_ONLY) @@ -4447,8 +4524,16 @@ static void init_proc_e500 (CPUPPCState *env, int version) * complain when accessing them. * gen_spr_BookE(env, 0x0000000F0000FD7FULL); */ - if (version == fsl_e500mc) { - ivor_mask = 0x000003FE0000FFFFULL; + switch (version) { + case fsl_e500v1: + case fsl_e500v2: + default: + ivor_mask = 0x0000000F0000FFFFULL; + break; + case fsl_e500mc: + case fsl_e5500: + ivor_mask = 0x000003FE0000FFFFULL; + break; } gen_spr_BookE(env, ivor_mask); /* Processor identification */ @@ -4476,6 +4561,7 @@ static void init_proc_e500 (CPUPPCState *env, int version) tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); break; case fsl_e500mc: + case fsl_e5500: tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64); break; @@ -4491,6 +4577,7 @@ static void init_proc_e500 (CPUPPCState *env, int version) env->icache_line_size = 32; break; case fsl_e500mc: + case fsl_e5500: env->dcache_line_size = 64; env->icache_line_size = 64; l1cfg0 |= 0x1000000; /* 64 byte cache block size */ @@ -4566,6 +4653,22 @@ static void init_proc_e500 (CPUPPCState *env, int version) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke206_mmucsr0, 0x00000000); + spr_register(env, SPR_BOOKE_EPR, "EPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_load_epr, SPR_NOACCESS, + 0x00000000); + /* XXX better abstract into Emb.xxx features */ + if (version == fsl_e5500) { + spr_register(env, SPR_BOOKE_EPCR, "EPCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MAS7_MAS3, "MAS7_MAS3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_mas73, &spr_write_mas73, + 0x00000000); + ivpr_mask = (target_ulong)~0xFFFFULL; + } #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 0; @@ -4575,7 +4678,7 @@ static void init_proc_e500 (CPUPPCState *env, int version) } #endif - init_excp_e200(env); + init_excp_e200(env, ivpr_mask); /* Allocate hardware IRQ controller */ ppce500_irq_init(env); } @@ -4595,6 +4698,13 @@ static void init_proc_e500mc(CPUPPCState *env) init_proc_e500(env, fsl_e500mc); } +#ifdef TARGET_PPC64 +static void init_proc_e5500(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e5500); +} +#endif + /* Non-embedded PowerPC */ /* POWER : same as 601, without mfmsr, mfsr */ @@ -7133,6 +7243,7 @@ enum { CPU_POWERPC_e500v2_v22 = 0x80210022, CPU_POWERPC_e500v2_v30 = 0x80210030, CPU_POWERPC_e500mc = 0x80230020, + CPU_POWERPC_e5500 = 0x80240020, /* MPC85xx microcontrollers */ #define CPU_POWERPC_MPC8533 CPU_POWERPC_MPC8533_v11 #define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21 @@ -8527,6 +8638,9 @@ static const ppc_def_t ppc_defs[] = { /* PowerPC e500v2 v3.0 core */ POWERPC_DEF("e500v2_v30", CPU_POWERPC_e500v2_v30, e500v2), POWERPC_DEF("e500mc", CPU_POWERPC_e500mc, e500mc), +#ifdef TARGET_PPC64 + POWERPC_DEF("e5500", CPU_POWERPC_e5500, e5500), +#endif /* PowerPC e500 microcontrollers */ /* MPC8533 */ POWERPC_DEF_SVR("MPC8533", @@ -9928,6 +10042,27 @@ int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def) env->bfd_mach = def->bfd_mach; env->check_pow = def->check_pow; +#if defined(TARGET_PPC64) + if (def->sps) + env->sps = *def->sps; + else if (env->mmu_model & POWERPC_MMU_64) { + /* Use default sets of page sizes */ + static const struct ppc_segment_page_sizes defsps = { + .sps = { + { .page_shift = 12, /* 4K */ + .slb_enc = 0, + .enc = { { .page_shift = 12, .pte_enc = 0 } } + }, + { .page_shift = 24, /* 16M */ + .slb_enc = 0x100, + .enc = { { .page_shift = 24, .pte_enc = 0 } } + }, + }, + }; + env->sps = defsps; + } +#endif /* defined(TARGET_PPC64) */ + if (kvm_enabled()) { if (kvmppc_fixup_cpu(env) != 0) { fprintf(stderr, "Unable to virtualize selected CPU with KVM\n"); diff --git a/target-s390x/Makefile.objs b/target-s390x/Makefile.objs new file mode 100644 index 0000000000..262747f8a6 --- /dev/null +++ b/target-s390x/Makefile.objs @@ -0,0 +1,5 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o +obj-$(CONFIG_KVM) += kvm.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c index f183213eab..619b202b92 100644 --- a/target-s390x/cpu.c +++ b/target-s390x/cpu.c @@ -20,7 +20,7 @@ * <http://www.gnu.org/licenses/lgpl-2.1.html> */ -#include "cpu-qom.h" +#include "cpu.h" #include "qemu-common.h" #include "qemu-timer.h" diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 2f3f3942c0..c30ac3a0e1 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -105,6 +105,8 @@ typedef struct CPUS390XState { QEMUTimer *cpu_timer; } CPUS390XState; +#include "cpu-qom.h" + #if defined(CONFIG_USER_ONLY) static inline void cpu_clone_regs(CPUS390XState *env, target_ulong newsp) { @@ -271,7 +273,7 @@ static inline int get_ilc(uint8_t opc) #define ILC_LATER_INC_2 0x22 -CPUS390XState *cpu_s390x_init(const char *cpu_model); +S390CPU *cpu_s390x_init(const char *cpu_model); void s390x_translate_init(void); int cpu_s390x_exec(CPUS390XState *s); void cpu_s390x_close(CPUS390XState *s); @@ -314,7 +316,7 @@ static inline void kvm_s390_interrupt_internal(CPUS390XState *env, int type, { } #endif -CPUS390XState *s390_cpu_addr2state(uint16_t cpu_addr); +S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); void s390_add_running_cpu(CPUS390XState *env); unsigned s390_del_running_cpu(CPUS390XState *env); @@ -340,7 +342,7 @@ static inline void cpu_set_tls(CPUS390XState *env, target_ulong newtls) env->aregs[1] = newtls & 0xffffffffULL; } -#define cpu_init cpu_s390x_init +#define cpu_init(model) (&cpu_s390x_init(model)->env) #define cpu_exec cpu_s390x_exec #define cpu_gen_code cpu_s390x_gen_code #define cpu_signal_handler cpu_s390x_signal_handler @@ -994,6 +996,4 @@ static inline void cpu_pc_from_tb(CPUS390XState *env, TranslationBlock* tb) env->psw.addr = tb->pc; } -#include "cpu-qom.h" - #endif diff --git a/target-s390x/helper.c b/target-s390x/helper.c index a34a35b536..d0a1180a83 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -70,7 +70,7 @@ void s390x_cpu_timer(void *opaque) } #endif -CPUS390XState *cpu_s390x_init(const char *cpu_model) +S390CPU *cpu_s390x_init(const char *cpu_model) { S390CPU *cpu; CPUS390XState *env; @@ -86,7 +86,7 @@ CPUS390XState *cpu_s390x_init(const char *cpu_model) env->cpu_model_str = cpu_model; qemu_init_vcpu(env); - return env; + return cpu; } #if defined(CONFIG_USER_ONLY) diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 90aad61eb0..ec08dd0474 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -292,8 +292,10 @@ static int handle_diag(CPUS390XState *env, struct kvm_run *run, int ipb_code) return r; } -static int s390_cpu_restart(CPUS390XState *env) +static int s390_cpu_restart(S390CPU *cpu) { + CPUS390XState *env = &cpu->env; + kvm_s390_interrupt(env, KVM_S390_RESTART, 0); s390_add_running_cpu(env); qemu_cpu_kick(env); @@ -312,6 +314,7 @@ static int s390_cpu_initial_reset(CPUS390XState *env) { int i; + s390_del_running_cpu(env); if (kvm_vcpu_ioctl(env, KVM_S390_INITIAL_RESET, NULL) < 0) { perror("cannot init reset vcpu"); } @@ -333,6 +336,7 @@ static int handle_sigp(CPUS390XState *env, struct kvm_run *run, uint8_t ipa1) uint16_t cpu_addr; uint8_t t; int r = -1; + S390CPU *target_cpu; CPUS390XState *target_env; cpu_synchronize_state(env); @@ -353,14 +357,15 @@ static int handle_sigp(CPUS390XState *env, struct kvm_run *run, uint8_t ipa1) parameter = env->regs[t] & 0x7ffffe00; cpu_addr = env->regs[ipa1 & 0x0f]; - target_env = s390_cpu_addr2state(cpu_addr); - if (!target_env) { + target_cpu = s390_cpu_addr2state(cpu_addr); + if (target_cpu == NULL) { goto out; } + target_env = &target_cpu->env; switch (order_code) { case SIGP_RESTART: - r = s390_cpu_restart(target_env); + r = s390_cpu_restart(target_cpu); break; case SIGP_STORE_STATUS_ADDR: r = s390_store_status(target_env, parameter); diff --git a/target-s390x/translate.c b/target-s390x/translate.c index 9bf8c38bb3..1c1baf5342 100644 --- a/target-s390x/translate.c +++ b/target-s390x/translate.c @@ -5098,7 +5098,7 @@ static void disas_s390_insn(DisasContext *s) disas_ed(s, op, r1, x2, b2, d2, r1b); break; default: - LOG_DISAS("unimplemented opcode 0x%x\n", opc); + qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc); gen_illegal_opcode(s, ilc); break; } diff --git a/target-sh4/Makefile.objs b/target-sh4/Makefile.objs new file mode 100644 index 0000000000..2e0e093e1f --- /dev/null +++ b/target-sh4/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-sparc/Makefile.objs b/target-sparc/Makefile.objs new file mode 100644 index 0000000000..a93e07deb1 --- /dev/null +++ b/target-sparc/Makefile.objs @@ -0,0 +1,8 @@ +obj-$(CONFIG_SOFTMMU) += machine.o +obj-y += translate.o helper.o cpu.o +obj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o +obj-$(TARGET_SPARC) += int32_helper.o +obj-$(TARGET_SPARC64) += int64_helper.o +obj-$(TARGET_SPARC64) += vis_helper.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c index efe5e704b5..9bec7a92f7 100644 --- a/target-sparc/ldst_helper.c +++ b/target-sparc/ldst_helper.c @@ -464,16 +464,18 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, if (size == 8) { ret = env->mxccregs[3]; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00a04: /* MXCC control register */ if (size == 4) { ret = env->mxccregs[3]; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00c00: /* Module reset register */ @@ -481,21 +483,24 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, ret = env->mxccregs[5]; /* should we do something here? */ } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00f00: /* MBus port address register */ if (size == 8) { ret = env->mxccregs[7]; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; default: - DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); break; } DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " @@ -719,40 +724,45 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, if (size == 8) { env->mxccdata[0] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00008: /* MXCC stream data register 1 */ if (size == 8) { env->mxccdata[1] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00010: /* MXCC stream data register 2 */ if (size == 8) { env->mxccdata[2] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00018: /* MXCC stream data register 3 */ if (size == 8) { env->mxccdata[3] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00100: /* MXCC stream source */ if (size == 8) { env->mxccregs[0] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) + 0); @@ -767,8 +777,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, if (size == 8) { env->mxccregs[1] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0, env->mxccdata[0]); @@ -783,8 +794,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, if (size == 8) { env->mxccregs[3] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00a04: /* MXCC control register */ @@ -792,8 +804,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) | val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00e00: /* MXCC error register */ @@ -801,21 +814,24 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, if (size == 8) { env->mxccregs[6] &= ~val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; case 0x01c00f00: /* MBus port address register */ if (size == 8) { env->mxccregs[7] = val; } else { - DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); } break; default: - DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr, - size); + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); break; } DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", diff --git a/target-unicore32/Makefile.objs b/target-unicore32/Makefile.objs new file mode 100644 index 0000000000..2e0e093e1f --- /dev/null +++ b/target-unicore32/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target-xtensa/Makefile.objs b/target-xtensa/Makefile.objs new file mode 100644 index 0000000000..b30e5a8466 --- /dev/null +++ b/target-xtensa/Makefile.objs @@ -0,0 +1,6 @@ +obj-y += xtensa-semi.o +obj-y += core-dc232b.o +obj-y += core-dc233c.o +obj-y += core-fsf.o +obj-y += translate.o op_helper.o helper.o cpu.o +obj-$(CONFIG_SOFTMMU) += machine.o diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h index 81f7833039..f7db116400 100644 --- a/target-xtensa/cpu.h +++ b/target-xtensa/cpu.h @@ -381,9 +381,12 @@ void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, uint32_t *vpn, uint32_t wi, uint32_t *ei); int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, uint32_t *pwi, uint32_t *pei, uint8_t *pring); +void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, + xtensa_tlb_entry *entry, bool dtlb, + unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte); void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte); -int xtensa_get_physical_addr(CPUXtensaState *env, +int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, uint32_t vaddr, int is_write, int mmu_idx, uint32_t *paddr, uint32_t *page_size, unsigned *access); void reset_mmu(CPUXtensaState *env); diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c index 5e7e72e113..044ce18364 100644 --- a/target-xtensa/helper.c +++ b/target-xtensa/helper.c @@ -130,11 +130,11 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUXtensaState *env, target_ulong add uint32_t page_size; unsigned access; - if (xtensa_get_physical_addr(env, addr, 0, 0, + if (xtensa_get_physical_addr(env, false, addr, 0, 0, &paddr, &page_size, &access) == 0) { return paddr; } - if (xtensa_get_physical_addr(env, addr, 2, 0, + if (xtensa_get_physical_addr(env, false, addr, 2, 0, &paddr, &page_size, &access) == 0) { return paddr; } @@ -443,30 +443,48 @@ static bool is_access_granted(unsigned access, int is_write) } } -static int autorefill_mmu(CPUXtensaState *env, uint32_t vaddr, bool dtlb, - uint32_t *wi, uint32_t *ei, uint8_t *ring); +static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); -static int get_physical_addr_mmu(CPUXtensaState *env, +static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, uint32_t vaddr, int is_write, int mmu_idx, - uint32_t *paddr, uint32_t *page_size, unsigned *access) + uint32_t *paddr, uint32_t *page_size, unsigned *access, + bool may_lookup_pt) { bool dtlb = is_write != 2; uint32_t wi; uint32_t ei; uint8_t ring; + uint32_t vpn; + uint32_t pte; + const xtensa_tlb_entry *entry = NULL; + xtensa_tlb_entry tmp_entry; int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && - (mmu_idx != 0 || ((vaddr ^ env->sregs[PTEVADDR]) & 0xffc00000)) && - autorefill_mmu(env, vaddr, dtlb, &wi, &ei, &ring) == 0) { + may_lookup_pt && get_pte(env, vaddr, &pte) == 0) { + ring = (pte >> 4) & 0x3; + wi = 0; + split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); + + if (update_tlb) { + wi = ++env->autorefill_idx & 0x3; + xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); + env->sregs[EXCVADDR] = vaddr; + qemu_log("%s: autorefill(%08x): %08x -> %08x\n", + __func__, vaddr, vpn, pte); + } else { + xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); + entry = &tmp_entry; + } ret = 0; } if (ret != 0) { return ret; } - const xtensa_tlb_entry *entry = - xtensa_tlb_get_entry(env, dtlb, wi, ei); + if (entry == NULL) { + entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); + } if (ring < mmu_idx) { return dtlb ? @@ -489,30 +507,21 @@ static int get_physical_addr_mmu(CPUXtensaState *env, return 0; } -static int autorefill_mmu(CPUXtensaState *env, uint32_t vaddr, bool dtlb, - uint32_t *wi, uint32_t *ei, uint8_t *ring) +static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) { uint32_t paddr; uint32_t page_size; unsigned access; uint32_t pt_vaddr = (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; - int ret = get_physical_addr_mmu(env, pt_vaddr, 0, 0, - &paddr, &page_size, &access); + int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, + &paddr, &page_size, &access, false); qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__, vaddr, ret ? ~0 : paddr); if (ret == 0) { - uint32_t vpn; - uint32_t pte = ldl_phys(paddr); - - *ring = (pte >> 4) & 0x3; - *wi = (++env->autorefill_idx) & 0x3; - split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, *wi, ei); - xtensa_tlb_set_entry(env, dtlb, *wi, *ei, vpn, pte); - qemu_log("%s: autorefill(%08x): %08x -> %08x\n", - __func__, vaddr, vpn, pte); + *pte = ldl_phys(paddr); } return ret; } @@ -548,13 +557,13 @@ static int get_physical_addr_region(CPUXtensaState *env, * * \return 0 if ok, exception cause code otherwise */ -int xtensa_get_physical_addr(CPUXtensaState *env, +int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, uint32_t vaddr, int is_write, int mmu_idx, uint32_t *paddr, uint32_t *page_size, unsigned *access) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { - return get_physical_addr_mmu(env, vaddr, is_write, mmu_idx, - paddr, page_size, access); + return get_physical_addr_mmu(env, update_tlb, + vaddr, is_write, mmu_idx, paddr, page_size, access, true); } else if (xtensa_option_bits_enabled(env->config, XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { diff --git a/target-xtensa/helper.h b/target-xtensa/helper.h index 48a741e46d..152fec044d 100644 --- a/target-xtensa/helper.h +++ b/target-xtensa/helper.h @@ -1,39 +1,39 @@ #include "def-helper.h" -DEF_HELPER_1(exception, void, i32) -DEF_HELPER_2(exception_cause, void, i32, i32) -DEF_HELPER_3(exception_cause_vaddr, void, i32, i32, i32) -DEF_HELPER_2(debug_exception, void, i32, i32) +DEF_HELPER_2(exception, noreturn, env, i32) +DEF_HELPER_3(exception_cause, noreturn, env, i32, i32) +DEF_HELPER_4(exception_cause_vaddr, noreturn, env, i32, i32, i32) +DEF_HELPER_3(debug_exception, noreturn, env, i32, i32) -DEF_HELPER_1(nsa, i32, i32) -DEF_HELPER_1(nsau, i32, i32) -DEF_HELPER_1(wsr_windowbase, void, i32) -DEF_HELPER_3(entry, void, i32, i32, i32) -DEF_HELPER_1(retw, i32, i32) -DEF_HELPER_1(rotw, void, i32) -DEF_HELPER_2(window_check, void, i32, i32) -DEF_HELPER_0(restore_owb, void) -DEF_HELPER_1(movsp, void, i32) -DEF_HELPER_1(wsr_lbeg, void, i32) -DEF_HELPER_1(wsr_lend, void, i32) +DEF_HELPER_FLAGS_1(nsa, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32) +DEF_HELPER_FLAGS_1(nsau, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32) +DEF_HELPER_2(wsr_windowbase, void, env, i32) +DEF_HELPER_4(entry, void, env, i32, i32, i32) +DEF_HELPER_2(retw, i32, env, i32) +DEF_HELPER_2(rotw, void, env, i32) +DEF_HELPER_3(window_check, void, env, i32, i32) +DEF_HELPER_1(restore_owb, void, env) +DEF_HELPER_2(movsp, void, env, i32) +DEF_HELPER_2(wsr_lbeg, void, env, i32) +DEF_HELPER_2(wsr_lend, void, env, i32) DEF_HELPER_1(simcall, void, env) -DEF_HELPER_0(dump_state, void) +DEF_HELPER_1(dump_state, void, env) -DEF_HELPER_2(waiti, void, i32, i32) -DEF_HELPER_2(timer_irq, void, i32, i32) -DEF_HELPER_1(advance_ccount, void, i32) +DEF_HELPER_3(waiti, void, env, i32, i32) +DEF_HELPER_3(timer_irq, void, env, i32, i32) +DEF_HELPER_2(advance_ccount, void, env, i32) DEF_HELPER_1(check_interrupts, void, env) -DEF_HELPER_1(wsr_rasid, void, i32) -DEF_HELPER_2(rtlb0, i32, i32, i32) -DEF_HELPER_2(rtlb1, i32, i32, i32) -DEF_HELPER_2(itlb, void, i32, i32) -DEF_HELPER_2(ptlb, i32, i32, i32) -DEF_HELPER_3(wtlb, void, i32, i32, i32) +DEF_HELPER_2(wsr_rasid, void, env, i32) +DEF_HELPER_FLAGS_3(rtlb0, TCG_CALL_CONST | TCG_CALL_PURE, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(rtlb1, TCG_CALL_CONST | TCG_CALL_PURE, i32, env, i32, i32) +DEF_HELPER_3(itlb, void, env, i32, i32) +DEF_HELPER_3(ptlb, i32, env, i32, i32) +DEF_HELPER_4(wtlb, void, env, i32, i32, i32) -DEF_HELPER_1(wsr_ibreakenable, void, i32) -DEF_HELPER_2(wsr_ibreaka, void, i32, i32) -DEF_HELPER_2(wsr_dbreaka, void, i32, i32) -DEF_HELPER_2(wsr_dbreakc, void, i32, i32) +DEF_HELPER_2(wsr_ibreakenable, void, env, i32) +DEF_HELPER_3(wsr_ibreaka, void, env, i32, i32) +DEF_HELPER_3(wsr_dbreaka, void, env, i32, i32) +DEF_HELPER_3(wsr_dbreakc, void, env, i32, i32) #include "def-helper.h" diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c index 364dc19bc0..2659c0e00f 100644 --- a/target-xtensa/op_helper.c +++ b/target-xtensa/op_helper.c @@ -26,12 +26,11 @@ */ #include "cpu.h" -#include "dyngen-exec.h" #include "helper.h" #include "host-utils.h" -static void do_unaligned_access(target_ulong addr, int is_write, int is_user, - uintptr_t retaddr); +static void do_unaligned_access(CPUXtensaState *env, + target_ulong addr, int is_write, int is_user, uintptr_t retaddr); #define ALIGNED_ONLY #define MMUSUFFIX _mmu @@ -48,7 +47,7 @@ static void do_unaligned_access(target_ulong addr, int is_write, int is_user, #define SHIFT 3 #include "softmmu_template.h" -static void do_restore_state(uintptr_t pc) +static void do_restore_state(CPUXtensaState *env, uintptr_t pc) { TranslationBlock *tb; @@ -58,44 +57,38 @@ static void do_restore_state(uintptr_t pc) } } -static void do_unaligned_access(target_ulong addr, int is_write, int is_user, - uintptr_t retaddr) +static void do_unaligned_access(CPUXtensaState *env, + target_ulong addr, int is_write, int is_user, uintptr_t retaddr) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { - do_restore_state(retaddr); - HELPER(exception_cause_vaddr)( + do_restore_state(env, retaddr); + HELPER(exception_cause_vaddr)(env, env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); } } -void tlb_fill(CPUXtensaState *env1, target_ulong vaddr, int is_write, int mmu_idx, - uintptr_t retaddr) +void tlb_fill(CPUXtensaState *env, + target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr) { - CPUXtensaState *saved_env = env; - - env = env1; - { - uint32_t paddr; - uint32_t page_size; - unsigned access; - int ret = xtensa_get_physical_addr(env, vaddr, is_write, mmu_idx, - &paddr, &page_size, &access); + uint32_t paddr; + uint32_t page_size; + unsigned access; + int ret = xtensa_get_physical_addr(env, true, vaddr, is_write, mmu_idx, + &paddr, &page_size, &access); - qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__, - vaddr, is_write, mmu_idx, paddr, ret); + qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__, + vaddr, is_write, mmu_idx, paddr, ret); - if (ret == 0) { - tlb_set_page(env, - vaddr & TARGET_PAGE_MASK, - paddr & TARGET_PAGE_MASK, - access, mmu_idx, page_size); - } else { - do_restore_state(retaddr); - HELPER(exception_cause_vaddr)(env->pc, ret, vaddr); - } + if (ret == 0) { + tlb_set_page(env, + vaddr & TARGET_PAGE_MASK, + paddr & TARGET_PAGE_MASK, + access, mmu_idx, page_size); + } else { + do_restore_state(env, retaddr); + HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); } - env = saved_env; } static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) @@ -103,20 +96,20 @@ static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) uint32_t paddr; uint32_t page_size; unsigned access; - int ret = xtensa_get_physical_addr(env, vaddr, 2, 0, + int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0, &paddr, &page_size, &access); if (ret == 0) { tb_invalidate_phys_addr(paddr); } } -void HELPER(exception)(uint32_t excp) +void HELPER(exception)(CPUXtensaState *env, uint32_t excp) { env->exception_index = excp; cpu_loop_exit(env); } -void HELPER(exception_cause)(uint32_t pc, uint32_t cause) +void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause) { uint32_t vector; @@ -136,24 +129,24 @@ void HELPER(exception_cause)(uint32_t pc, uint32_t cause) env->sregs[EXCCAUSE] = cause; env->sregs[PS] |= PS_EXCM; - HELPER(exception)(vector); + HELPER(exception)(env, vector); } -void HELPER(exception_cause_vaddr)(uint32_t pc, uint32_t cause, uint32_t vaddr) +void HELPER(exception_cause_vaddr)(CPUXtensaState *env, + uint32_t pc, uint32_t cause, uint32_t vaddr) { env->sregs[EXCVADDR] = vaddr; - HELPER(exception_cause)(pc, cause); + HELPER(exception_cause)(env, pc, cause); } -void debug_exception_env(CPUXtensaState *new_env, uint32_t cause) +void debug_exception_env(CPUXtensaState *env, uint32_t cause) { - if (xtensa_get_cintlevel(new_env) < new_env->config->debug_level) { - env = new_env; - HELPER(debug_exception)(env->pc, cause); + if (xtensa_get_cintlevel(env) < env->config->debug_level) { + HELPER(debug_exception)(env, env->pc, cause); } } -void HELPER(debug_exception)(uint32_t pc, uint32_t cause) +void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause) { unsigned level = env->config->debug_level; @@ -163,7 +156,7 @@ void HELPER(debug_exception)(uint32_t pc, uint32_t cause) env->sregs[EPS2 + level - 2] = env->sregs[PS]; env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | (level << PS_INTLEVEL_SHIFT); - HELPER(exception)(EXC_DEBUG); + HELPER(exception)(env, EXC_DEBUG); } uint32_t HELPER(nsa)(uint32_t v) @@ -232,39 +225,39 @@ void xtensa_sync_phys_from_window(CPUXtensaState *env) copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); } -static void rotate_window_abs(uint32_t position) +static void rotate_window_abs(CPUXtensaState *env, uint32_t position) { xtensa_sync_phys_from_window(env); env->sregs[WINDOW_BASE] = windowbase_bound(position, env); xtensa_sync_window_from_phys(env); } -static void rotate_window(uint32_t delta) +static void rotate_window(CPUXtensaState *env, uint32_t delta) { - rotate_window_abs(env->sregs[WINDOW_BASE] + delta); + rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta); } -void HELPER(wsr_windowbase)(uint32_t v) +void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v) { - rotate_window_abs(v); + rotate_window_abs(env, v); } -void HELPER(entry)(uint32_t pc, uint32_t s, uint32_t imm) +void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm) { int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT; if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { qemu_log("Illegal entry instruction(pc = %08x), PS = %08x\n", pc, env->sregs[PS]); - HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); + HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); } else { env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); - rotate_window(callinc); + rotate_window(env, callinc); env->sregs[WINDOW_START] |= windowstart_bit(env->sregs[WINDOW_BASE], env); } } -void HELPER(window_check)(uint32_t pc, uint32_t w) +void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w) { uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); uint32_t windowstart = env->sregs[WINDOW_START]; @@ -284,21 +277,21 @@ void HELPER(window_check)(uint32_t pc, uint32_t w) } m = windowbase_bound(windowbase + n, env); - rotate_window(n); + rotate_window(env, n); env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | (windowbase << PS_OWB_SHIFT) | PS_EXCM; env->sregs[EPC1] = env->pc = pc; if (windowstart & windowstart_bit(m + 1, env)) { - HELPER(exception)(EXC_WINDOW_OVERFLOW4); + HELPER(exception)(env, EXC_WINDOW_OVERFLOW4); } else if (windowstart & windowstart_bit(m + 2, env)) { - HELPER(exception)(EXC_WINDOW_OVERFLOW8); + HELPER(exception)(env, EXC_WINDOW_OVERFLOW8); } else { - HELPER(exception)(EXC_WINDOW_OVERFLOW12); + HELPER(exception)(env, EXC_WINDOW_OVERFLOW12); } } -uint32_t HELPER(retw)(uint32_t pc) +uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc) { int n = (env->regs[0] >> 30) & 0x3; int m = 0; @@ -319,13 +312,13 @@ uint32_t HELPER(retw)(uint32_t pc) qemu_log("Illegal retw instruction(pc = %08x), " "PS = %08x, m = %d, n = %d\n", pc, env->sregs[PS], m, n); - HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); + HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); } else { int owb = windowbase; ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); - rotate_window(-n); + rotate_window(env, -n); if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) { env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); } else { @@ -335,38 +328,38 @@ uint32_t HELPER(retw)(uint32_t pc) env->sregs[EPC1] = env->pc = pc; if (n == 1) { - HELPER(exception)(EXC_WINDOW_UNDERFLOW4); + HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4); } else if (n == 2) { - HELPER(exception)(EXC_WINDOW_UNDERFLOW8); + HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8); } else if (n == 3) { - HELPER(exception)(EXC_WINDOW_UNDERFLOW12); + HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12); } } } return ret_pc; } -void HELPER(rotw)(uint32_t imm4) +void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4) { - rotate_window(imm4); + rotate_window(env, imm4); } -void HELPER(restore_owb)(void) +void HELPER(restore_owb)(CPUXtensaState *env) { - rotate_window_abs((env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); + rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); } -void HELPER(movsp)(uint32_t pc) +void HELPER(movsp)(CPUXtensaState *env, uint32_t pc) { if ((env->sregs[WINDOW_START] & (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) | windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) | windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { - HELPER(exception_cause)(pc, ALLOCA_CAUSE); + HELPER(exception_cause)(env, pc, ALLOCA_CAUSE); } } -void HELPER(wsr_lbeg)(uint32_t v) +void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v) { if (env->sregs[LBEG] != v) { tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); @@ -374,7 +367,7 @@ void HELPER(wsr_lbeg)(uint32_t v) } } -void HELPER(wsr_lend)(uint32_t v) +void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v) { if (env->sregs[LEND] != v) { tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); @@ -383,12 +376,12 @@ void HELPER(wsr_lend)(uint32_t v) } } -void HELPER(dump_state)(void) +void HELPER(dump_state)(CPUXtensaState *env) { cpu_dump_state(env, stderr, fprintf, 0); } -void HELPER(waiti)(uint32_t pc, uint32_t intlevel) +void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) { env->pc = pc; env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | @@ -404,15 +397,15 @@ void HELPER(waiti)(uint32_t pc, uint32_t intlevel) if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { xtensa_rearm_ccompare_timer(env); } - HELPER(exception)(EXCP_HLT); + HELPER(exception)(env, EXCP_HLT); } -void HELPER(timer_irq)(uint32_t id, uint32_t active) +void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active) { xtensa_timer_irq(env, id, active); } -void HELPER(advance_ccount)(uint32_t d) +void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d) { xtensa_advance_ccount(env, d); } @@ -422,7 +415,7 @@ void HELPER(check_interrupts)(CPUXtensaState *env) check_interrupts(env); } -void HELPER(wsr_rasid)(uint32_t v) +void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) { v = (v & 0xffffff00) | 0x1; if (v != env->sregs[RASID]) { @@ -574,7 +567,7 @@ void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, * Split TLB address into TLB way, entry index and VPN (with index). * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format */ -static void split_tlb_entry_spec(uint32_t v, bool dtlb, +static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, uint32_t *vpn, uint32_t *wi, uint32_t *ei) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { @@ -587,41 +580,42 @@ static void split_tlb_entry_spec(uint32_t v, bool dtlb, } } -static xtensa_tlb_entry *get_tlb_entry(uint32_t v, bool dtlb, uint32_t *pwi) +static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, + uint32_t v, bool dtlb, uint32_t *pwi) { uint32_t vpn; uint32_t wi; uint32_t ei; - split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); + split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); if (pwi) { *pwi = wi; } return xtensa_tlb_get_entry(env, dtlb, wi, ei); } -uint32_t HELPER(rtlb0)(uint32_t v, uint32_t dtlb) +uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { uint32_t wi; - const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi); + const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; } else { return v & REGION_PAGE_MASK; } } -uint32_t HELPER(rtlb1)(uint32_t v, uint32_t dtlb) +uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) { - const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, NULL); + const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); return entry->paddr | entry->attr; } -void HELPER(itlb)(uint32_t v, uint32_t dtlb) +void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { uint32_t wi; - xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi); + xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); if (entry->variable && entry->asid) { tlb_flush_page(env, entry->vaddr); entry->asid = 0; @@ -629,7 +623,7 @@ void HELPER(itlb)(uint32_t v, uint32_t dtlb) } } -uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb) +uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { uint32_t wi; @@ -646,7 +640,7 @@ uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb) case INST_TLB_MULTI_HIT_CAUSE: case LOAD_STORE_TLB_MULTI_HIT_CAUSE: - HELPER(exception_cause_vaddr)(env->pc, res, v); + HELPER(exception_cause_vaddr)(env, env->pc, res, v); break; } return 0; @@ -655,6 +649,16 @@ uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb) } } +void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, + xtensa_tlb_entry *entry, bool dtlb, + unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) +{ + entry->vaddr = vpn; + entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); + entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; + entry->attr = pte & 0xf; +} + void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) { @@ -665,10 +669,8 @@ void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, if (entry->asid) { tlb_flush_page(env, entry->vaddr); } - entry->vaddr = vpn; - entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); - entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; - entry->attr = pte & 0xf; + xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); + tlb_flush_page(env, entry->vaddr); } else { qemu_log("%s %d, %d, %d trying to set immutable entry\n", __func__, dtlb, wi, ei); @@ -683,17 +685,17 @@ void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, } } -void HELPER(wtlb)(uint32_t p, uint32_t v, uint32_t dtlb) +void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) { uint32_t vpn; uint32_t wi; uint32_t ei; - split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); + split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); } -void HELPER(wsr_ibreakenable)(uint32_t v) +void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) { uint32_t change = v ^ env->sregs[IBREAKENABLE]; unsigned i; @@ -706,7 +708,7 @@ void HELPER(wsr_ibreakenable)(uint32_t v) env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); } -void HELPER(wsr_ibreaka)(uint32_t i, uint32_t v) +void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) { if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); @@ -715,7 +717,8 @@ void HELPER(wsr_ibreaka)(uint32_t i, uint32_t v) env->sregs[IBREAKA + i] = v; } -static void set_dbreak(unsigned i, uint32_t dbreaka, uint32_t dbreakc) +static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, + uint32_t dbreakc) { int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; uint32_t mask = dbreakc | ~DBREAKC_MASK; @@ -743,22 +746,22 @@ static void set_dbreak(unsigned i, uint32_t dbreaka, uint32_t dbreakc) } } -void HELPER(wsr_dbreaka)(uint32_t i, uint32_t v) +void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) { uint32_t dbreakc = env->sregs[DBREAKC + i]; if ((dbreakc & DBREAKC_SB_LB) && env->sregs[DBREAKA + i] != v) { - set_dbreak(i, v, dbreakc); + set_dbreak(env, i, v, dbreakc); } env->sregs[DBREAKA + i] = v; } -void HELPER(wsr_dbreakc)(uint32_t i, uint32_t v) +void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v) { if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) { if (v & DBREAKC_SB_LB) { - set_dbreak(i, env->sregs[DBREAKA + i], v); + set_dbreak(env, i, env->sregs[DBREAKA + i], v); } else { if (env->cpu_watchpoint[i]) { cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]); diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c index 521c0e6226..b883e6bb72 100644 --- a/target-xtensa/translate.c +++ b/target-xtensa/translate.c @@ -254,7 +254,7 @@ static void gen_advance_ccount(DisasContext *dc) if (dc->ccount_delta > 0) { TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta); dc->ccount_delta = 0; - gen_helper_advance_ccount(tmp); + gen_helper_advance_ccount(cpu_env, tmp); tcg_temp_free(tmp); } } @@ -268,7 +268,7 @@ static void gen_exception(DisasContext *dc, int excp) { TCGv_i32 tmp = tcg_const_i32(excp); gen_advance_ccount(dc); - gen_helper_exception(tmp); + gen_helper_exception(cpu_env, tmp); tcg_temp_free(tmp); } @@ -277,7 +277,7 @@ static void gen_exception_cause(DisasContext *dc, uint32_t cause) TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); - gen_helper_exception_cause(tpc, tcause); + gen_helper_exception_cause(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause == ILLEGAL_INSTRUCTION_CAUSE || @@ -292,7 +292,7 @@ static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); - gen_helper_exception_cause_vaddr(tpc, tcause, vaddr); + gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); tcg_temp_free(tpc); tcg_temp_free(tcause); } @@ -302,7 +302,7 @@ static void gen_debug_exception(DisasContext *dc, uint32_t cause) TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); - gen_helper_debug_exception(tpc, tcause); + gen_helper_debug_exception(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) { @@ -388,6 +388,7 @@ static bool gen_check_loop_end(DisasContext *dc, int slot) dc->next_pc == dc->lend) { int label = gen_new_label(); + gen_advance_ccount(dc); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1); gen_jumpi(dc, dc->lbeg, slot); @@ -410,6 +411,7 @@ static void gen_brcond(DisasContext *dc, TCGCond cond, { int label = gen_new_label(); + gen_advance_ccount(dc); tcg_gen_brcond_i32(cond, t0, t1, label); gen_jumpi_check_loop_end(dc, 0); gen_set_label(label); @@ -458,13 +460,13 @@ static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) { - gen_helper_wsr_lbeg(s); + gen_helper_wsr_lbeg(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) { - gen_helper_wsr_lend(s); + gen_helper_wsr_lend(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); } @@ -497,7 +499,7 @@ static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) { - gen_helper_wsr_windowbase(v); + gen_helper_wsr_windowbase(cpu_env, v); reset_used_window(dc); } @@ -514,7 +516,7 @@ static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) { - gen_helper_wsr_rasid(v); + gen_helper_wsr_rasid(cpu_env, v); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } @@ -526,7 +528,7 @@ static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { - gen_helper_wsr_ibreakenable(v); + gen_helper_wsr_ibreakenable(cpu_env, v); gen_jumpi_check_loop_end(dc, 0); } @@ -536,7 +538,7 @@ static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) if (id < dc->config->nibreak) { TCGv_i32 tmp = tcg_const_i32(id); - gen_helper_wsr_ibreaka(tmp, v); + gen_helper_wsr_ibreaka(cpu_env, tmp, v); tcg_temp_free(tmp); gen_jumpi_check_loop_end(dc, 0); } @@ -548,7 +550,7 @@ static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) if (id < dc->config->ndbreak) { TCGv_i32 tmp = tcg_const_i32(id); - gen_helper_wsr_dbreaka(tmp, v); + gen_helper_wsr_dbreaka(cpu_env, tmp, v); tcg_temp_free(tmp); } } @@ -559,7 +561,7 @@ static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) if (id < dc->config->ndbreak) { TCGv_i32 tmp = tcg_const_i32(id); - gen_helper_wsr_dbreakc(tmp, v); + gen_helper_wsr_dbreakc(cpu_env, tmp, v); tcg_temp_free(tmp); } } @@ -712,7 +714,7 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4) TCGv_i32 pc = tcg_const_i32(dc->next_pc); TCGv_i32 intlevel = tcg_const_i32(imm4); gen_advance_ccount(dc); - gen_helper_waiti(pc, intlevel); + gen_helper_waiti(cpu_env, pc, intlevel); tcg_temp_free(pc); tcg_temp_free(intlevel); } @@ -729,7 +731,7 @@ static void gen_window_check1(DisasContext *dc, unsigned r1) dc->used_window = r1 / 4; gen_advance_ccount(dc); - gen_helper_window_check(pc, w); + gen_helper_window_check(cpu_env, pc, w); tcg_temp_free(w); tcg_temp_free(pc); @@ -849,8 +851,8 @@ static void disas_xtensa_insn(DisasContext *dc) #define RSR_SR (b1) - uint8_t b0 = ldub_code(dc->pc); - uint8_t b1 = ldub_code(dc->pc + 1); + uint8_t b0 = cpu_ldub_code(cpu_single_env, dc->pc); + uint8_t b1 = cpu_ldub_code(cpu_single_env, dc->pc + 1); uint8_t b2 = 0; static const uint32_t B4CONST[] = { @@ -866,7 +868,7 @@ static void disas_xtensa_insn(DisasContext *dc) HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); } else { dc->next_pc = dc->pc + 3; - b2 = ldub_code(dc->pc + 2); + b2 = cpu_ldub_code(cpu_single_env, dc->pc + 2); } switch (OP0) { @@ -903,7 +905,7 @@ static void disas_xtensa_insn(DisasContext *dc) { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); - gen_helper_retw(tmp, tmp); + gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } @@ -951,7 +953,7 @@ static void disas_xtensa_insn(DisasContext *dc) { TCGv_i32 pc = tcg_const_i32(dc->pc); gen_advance_ccount(dc); - gen_helper_movsp(pc); + gen_helper_movsp(cpu_env, pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); } @@ -1031,7 +1033,7 @@ static void disas_xtensa_insn(DisasContext *dc) cpu_SR[WINDOW_START], tmp); } - gen_helper_restore_owb(); + gen_helper_restore_owb(cpu_env); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); @@ -1219,7 +1221,7 @@ static void disas_xtensa_insn(DisasContext *dc) { TCGv_i32 tmp = tcg_const_i32( RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); - gen_helper_rotw(tmp); + gen_helper_rotw(cpu_env, tmp); tcg_temp_free(tmp); reset_used_window(dc); } @@ -1255,28 +1257,32 @@ static void disas_xtensa_insn(DisasContext *dc) switch (RRR_R & 7) { case 3: /*RITLB0*/ /*RDTLB0*/ - gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); + gen_helper_rtlb0(cpu_R[RRR_T], + cpu_env, cpu_R[RRR_S], dtlb); break; case 4: /*IITLB*/ /*IDTLB*/ - gen_helper_itlb(cpu_R[RRR_S], dtlb); + gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 5: /*PITLB*/ /*PDTLB*/ tcg_gen_movi_i32(cpu_pc, dc->pc); - gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); + gen_helper_ptlb(cpu_R[RRR_T], + cpu_env, cpu_R[RRR_S], dtlb); break; case 6: /*WITLB*/ /*WDTLB*/ - gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); + gen_helper_wtlb( + cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 7: /*RITLB1*/ /*RDTLB1*/ - gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); + gen_helper_rtlb1(cpu_R[RRR_T], + cpu_env, cpu_R[RRR_S], dtlb); break; default: @@ -2244,7 +2250,7 @@ static void disas_xtensa_insn(DisasContext *dc) TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); gen_advance_ccount(dc); - gen_helper_entry(pc, s, imm); + gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); @@ -2278,7 +2284,7 @@ static void disas_xtensa_insn(DisasContext *dc) tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); - gen_helper_wsr_lend(tmp); + gen_helper_wsr_lend(cpu_env, tmp); tcg_temp_free(tmp); if (BRI8_R > 8) { @@ -2447,7 +2453,7 @@ static void disas_xtensa_insn(DisasContext *dc) { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); - gen_helper_retw(tmp, tmp); + gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } diff --git a/xtensa-semi.c b/target-xtensa/xtensa-semi.c index b7c8c34564..1c8a19ed56 100644 --- a/xtensa-semi.c +++ b/target-xtensa/xtensa-semi.c @@ -30,7 +30,6 @@ #include <string.h> #include <stddef.h> #include "cpu.h" -#include "dyngen-exec.h" #include "helper.h" #include "qemu-log.h" diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index d26569715b..0cff181257 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -1865,7 +1865,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, break; default: - tcg_dump_ops (s, stderr); + tcg_dump_ops (s); tcg_abort (); } } diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index c800574588..27a0ae88ec 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -1613,7 +1613,7 @@ static void tcg_out_op (TCGContext *s, TCGOpcode opc, const TCGArg *args, break; default: - tcg_dump_ops (s, stderr); + tcg_dump_ops (s); tcg_abort (); } } @@ -873,7 +873,7 @@ static const char * const cond_name[] = [TCG_COND_GTU] = "gtu" }; -void tcg_dump_ops(TCGContext *s, FILE *outfile) +void tcg_dump_ops(TCGContext *s) { const uint16_t *opc_ptr; const TCGArg *args; @@ -896,9 +896,10 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) #else pc = args[0]; #endif - if (!first_insn) - fprintf(outfile, "\n"); - fprintf(outfile, " ---- 0x%" PRIx64, pc); + if (!first_insn) { + qemu_log("\n"); + } + qemu_log(" ---- 0x%" PRIx64, pc); first_insn = 0; nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; @@ -912,28 +913,28 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) nb_iargs = arg & 0xffff; nb_cargs = def->nb_cargs; - fprintf(outfile, " %s ", def->name); + qemu_log(" %s ", def->name); /* function name */ - fprintf(outfile, "%s", - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + nb_iargs - 1])); + qemu_log("%s", + tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[nb_oargs + nb_iargs - 1])); /* flags */ - fprintf(outfile, ",$0x%" TCG_PRIlx, - args[nb_oargs + nb_iargs]); + qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]); /* nb out args */ - fprintf(outfile, ",$%d", nb_oargs); + qemu_log(",$%d", nb_oargs); for(i = 0; i < nb_oargs; i++) { - fprintf(outfile, ","); - fprintf(outfile, "%s", - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[i])); + qemu_log(","); + qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[i])); } for(i = 0; i < (nb_iargs - 1); i++) { - fprintf(outfile, ","); + qemu_log(","); if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) { - fprintf(outfile, "<dummy>"); + qemu_log("<dummy>"); } else { - fprintf(outfile, "%s", - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + i])); + qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[nb_oargs + i])); } } } else if (c == INDEX_op_movi_i32 @@ -947,20 +948,21 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; nb_cargs = def->nb_cargs; - fprintf(outfile, " %s %s,$", def->name, - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0])); + qemu_log(" %s %s,$", def->name, + tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0])); val = args[1]; th = tcg_find_helper(s, val); if (th) { - fprintf(outfile, "%s", th->name); + qemu_log("%s", th->name); } else { - if (c == INDEX_op_movi_i32) - fprintf(outfile, "0x%x", (uint32_t)val); - else - fprintf(outfile, "0x%" PRIx64 , (uint64_t)val); + if (c == INDEX_op_movi_i32) { + qemu_log("0x%x", (uint32_t)val); + } else { + qemu_log("0x%" PRIx64 , (uint64_t)val); + } } } else { - fprintf(outfile, " %s ", def->name); + qemu_log(" %s ", def->name); if (c == INDEX_op_nopn) { /* variable number of arguments */ nb_cargs = *args; @@ -974,16 +976,18 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) k = 0; for(i = 0; i < nb_oargs; i++) { - if (k != 0) - fprintf(outfile, ","); - fprintf(outfile, "%s", - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++])); + if (k != 0) { + qemu_log(","); + } + qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[k++])); } for(i = 0; i < nb_iargs; i++) { - if (k != 0) - fprintf(outfile, ","); - fprintf(outfile, "%s", - tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++])); + if (k != 0) { + qemu_log(","); + } + qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[k++])); } switch (c) { case INDEX_op_brcond_i32: @@ -998,10 +1002,11 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) #elif TCG_TARGET_REG_BITS == 64 case INDEX_op_setcond_i64: #endif - if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) - fprintf(outfile, ",%s", cond_name[args[k++]]); - else - fprintf(outfile, ",$0x%" TCG_PRIlx, args[k++]); + if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) { + qemu_log(",%s", cond_name[args[k++]]); + } else { + qemu_log(",$0x%" TCG_PRIlx, args[k++]); + } i = 1; break; default: @@ -1009,13 +1014,14 @@ void tcg_dump_ops(TCGContext *s, FILE *outfile) break; } for(; i < nb_cargs; i++) { - if (k != 0) - fprintf(outfile, ","); + if (k != 0) { + qemu_log(","); + } arg = args[k++]; - fprintf(outfile, "$0x%" TCG_PRIlx, arg); + qemu_log("$0x%" TCG_PRIlx, arg); } } - fprintf(outfile, "\n"); + qemu_log("\n"); args += nb_iargs + nb_oargs + nb_cargs; } } @@ -2048,7 +2054,7 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { qemu_log("OP:\n"); - tcg_dump_ops(s, logfile); + tcg_dump_ops(s); qemu_log("\n"); } #endif @@ -2069,7 +2075,7 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { qemu_log("OP after liveness analysis:\n"); - tcg_dump_ops(s, logfile); + tcg_dump_ops(s); qemu_log("\n"); } #endif @@ -571,7 +571,7 @@ TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args, /* only used for debugging purposes */ void tcg_register_helper(void *func, const char *name); const char *tcg_helper_get_name(TCGContext *s, void *func); -void tcg_dump_ops(TCGContext *s, FILE *outfile); +void tcg_dump_ops(TCGContext *s); void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf); TCGv_i32 tcg_const_i32(int32_t val); diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c index 453f1875e2..ef8580fc8d 100644 --- a/tcg/tci/tcg-target.c +++ b/tcg/tci/tcg-target.c @@ -487,7 +487,7 @@ static void tci_out_label(TCGContext *s, TCGArg arg) assert(label->u.value); } else { tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), arg, 0); - tcg_out_i(s, 0); + s->code_ptr += sizeof(tcg_target_ulong); } } @@ -878,7 +878,7 @@ static void tcg_target_init(TCGContext *s) #if defined(CONFIG_DEBUG_TCG_INTERPRETER) const char *envval = getenv("DEBUG_TCG"); if (envval) { - loglevel = strtol(envval, NULL, 0); + cpu_set_log(strtol(envval, NULL, 0)); } #endif @@ -1014,7 +1014,6 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) #endif #if TCG_TARGET_HAS_bswap64_i64 case INDEX_op_bswap64_i64: - TODO(); t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); tci_write_reg64(t0, bswap64(t1)); diff --git a/tests/Makefile b/tests/Makefile index 7340bc5044..d687ecce3f 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -13,6 +13,7 @@ check-unit-y += tests/test-qmp-commands$(EXESUF) check-unit-y += tests/test-string-input-visitor$(EXESUF) check-unit-y += tests/test-string-output-visitor$(EXESUF) check-unit-y += tests/test-coroutine$(EXESUF) +check-unit-y += tests/test-visitor-serialization$(EXESUF) check-unit-y += tests/test-iov$(EXESUF) check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh @@ -32,13 +33,12 @@ test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \ tests/test-coroutine.o tests/test-string-output-visitor.o \ tests/test-string-input-visitor.o tests/test-qmp-output-visitor.o \ tests/test-qmp-input-visitor.o tests/test-qmp-input-strict.o \ - tests/test-qmp-commands.o + tests/test-qmp-commands.o tests/test-visitor-serialization.o test-qapi-obj-y = $(qobject-obj-y) $(qapi-obj-y) $(tools-obj-y) test-qapi-obj-y += tests/test-qapi-visit.o tests/test-qapi-types.o test-qapi-obj-y += module.o -$(test-obj-y): $(GENERATED_HEADERS) $(test-obj-y): QEMU_INCLUDES += -Itests tests/check-qint$(EXESUF): tests/check-qint.o qint.o $(tools-obj-y) @@ -67,6 +67,7 @@ tests/test-qmp-output-visitor$(EXESUF): tests/test-qmp-output-visitor.o $(test-q tests/test-qmp-input-visitor$(EXESUF): tests/test-qmp-input-visitor.o $(test-qapi-obj-y) tests/test-qmp-input-strict$(EXESUF): tests/test-qmp-input-strict.o $(test-qapi-obj-y) tests/test-qmp-commands$(EXESUF): tests/test-qmp-commands.o tests/test-qmp-marshal.o $(test-qapi-obj-y) +tests/test-visitor-serialization$(EXESUF): tests/test-visitor-serialization.o $(test-qapi-obj-y) tests/rtc-test$(EXESUF): tests/rtc-test.o $(trace-obj-y) tests/m48t59-test$(EXESUF): tests/m48t59-test.o $(trace-obj-y) diff --git a/tests/fdc-test.c b/tests/fdc-test.c index 22d24ac7dc..585fb0e343 100644 --- a/tests/fdc-test.c +++ b/tests/fdc-test.c @@ -49,6 +49,7 @@ enum { enum { CMD_SENSE_INT = 0x08, CMD_SEEK = 0x0f, + CMD_READ = 0xe6, }; enum { @@ -99,19 +100,72 @@ static void ack_irq(void) g_assert(!get_irq(FLOPPY_IRQ)); } -static void send_step_pulse(void) +static uint8_t send_read_command(void) +{ + uint8_t drive = 0; + uint8_t head = 0; + uint8_t cyl = 0; + uint8_t sect_addr = 1; + uint8_t sect_size = 2; + uint8_t eot = 1; + uint8_t gap = 0x1b; + uint8_t gpl = 0xff; + + uint8_t msr = 0; + uint8_t st0; + + uint8_t ret = 0; + + floppy_send(CMD_READ); + floppy_send(head << 2 | drive); + g_assert(!get_irq(FLOPPY_IRQ)); + floppy_send(cyl); + floppy_send(head); + floppy_send(sect_addr); + floppy_send(sect_size); + floppy_send(eot); + floppy_send(gap); + floppy_send(gpl); + + uint8_t i = 0; + uint8_t n = 2; + for (; i < n; i++) { + msr = inb(FLOPPY_BASE + reg_msr); + if (msr == 0xd0) { + break; + } + sleep(1); + } + + if (i >= n) { + return 1; + } + + st0 = floppy_recv(); + if (st0 != 0x60) { + ret = 1; + } + + floppy_recv(); + floppy_recv(); + floppy_recv(); + floppy_recv(); + floppy_recv(); + floppy_recv(); + + return ret; +} + +static void send_step_pulse(int cyl) { int drive = 0; int head = 0; - static int cyl = 0; floppy_send(CMD_SEEK); floppy_send(head << 2 | drive); g_assert(!get_irq(FLOPPY_IRQ)); floppy_send(cyl); ack_irq(); - - cyl = (cyl + 1) % 4; } static uint8_t cmos_read(uint8_t reg) @@ -138,14 +192,21 @@ static void test_no_media_on_start(void) assert_bit_set(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); - send_step_pulse(); - send_step_pulse(); + send_step_pulse(1); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); } +static void test_read_without_media(void) +{ + uint8_t ret; + + ret = send_read_command(); + g_assert(ret == 0); +} + static void test_media_change(void) { uint8_t dir; @@ -162,7 +223,14 @@ static void test_media_change(void) dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); - send_step_pulse(); + send_step_pulse(0); + dir = inb(FLOPPY_BASE + reg_dir); + assert_bit_set(dir, DSKCHG); + dir = inb(FLOPPY_BASE + reg_dir); + assert_bit_set(dir, DSKCHG); + + /* Step to next track should clear DSKCHG bit. */ + send_step_pulse(1); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_clear(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); @@ -178,13 +246,57 @@ static void test_media_change(void) dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); - send_step_pulse(); + send_step_pulse(0); + dir = inb(FLOPPY_BASE + reg_dir); + assert_bit_set(dir, DSKCHG); + dir = inb(FLOPPY_BASE + reg_dir); + assert_bit_set(dir, DSKCHG); + + send_step_pulse(1); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); } +static void test_sense_interrupt(void) +{ + int drive = 0; + int head = 0; + int cyl = 0; + int ret = 0; + + floppy_send(CMD_SENSE_INT); + ret = floppy_recv(); + g_assert(ret == 0x80); + + floppy_send(CMD_SEEK); + floppy_send(head << 2 | drive); + g_assert(!get_irq(FLOPPY_IRQ)); + floppy_send(cyl); + + floppy_send(CMD_SENSE_INT); + ret = floppy_recv(); + g_assert(ret == 0x20); + floppy_recv(); +} + +/* success if no crash or abort */ +static void fuzz_registers(void) +{ + unsigned int i; + + for (i = 0; i < 1000; i++) { + uint8_t reg, val; + + reg = (uint8_t)g_test_rand_int_range(0, 8); + val = (uint8_t)g_test_rand_int_range(0, 256); + + outb(FLOPPY_BASE + reg, val); + inb(FLOPPY_BASE + reg); + } +} + int main(int argc, char **argv) { const char *arch = qtest_get_arch(); @@ -214,7 +326,10 @@ int main(int argc, char **argv) qtest_irq_intercept_in(global_qtest, "ioapic"); qtest_add_func("/fdc/cmos", test_cmos); qtest_add_func("/fdc/no_media_on_start", test_no_media_on_start); + qtest_add_func("/fdc/read_without_media", test_read_without_media); qtest_add_func("/fdc/media_change", test_media_change); + qtest_add_func("/fdc/sense_interrupt", test_sense_interrupt); + qtest_add_func("/fdc/fuzz-registers", fuzz_registers); ret = g_test_run(); diff --git a/tests/libqtest.c b/tests/libqtest.c index 6d333ef0ac..02d039218d 100644 --- a/tests/libqtest.c +++ b/tests/libqtest.c @@ -40,6 +40,7 @@ struct QTestState bool irq_level[MAX_IRQ]; GString *rx; gchar *pid_file; + char *socket_path, *qmp_socket_path; }; #define g_assert_no_errno(ret) do { \ @@ -74,6 +75,7 @@ static int socket_accept(int sock) socklen_t addrlen; int ret; + addrlen = sizeof(addr); do { ret = accept(sock, (struct sockaddr *)&addr, &addrlen); } while (ret == -1 && errno == EINTR); @@ -87,8 +89,6 @@ QTestState *qtest_init(const char *extra_args) { QTestState *s; int sock, qmpsock, ret, i; - gchar *socket_path; - gchar *qmp_socket_path; gchar *pid_file; gchar *command; const char *qemu_binary; @@ -97,14 +97,14 @@ QTestState *qtest_init(const char *extra_args) qemu_binary = getenv("QTEST_QEMU_BINARY"); g_assert(qemu_binary != NULL); - socket_path = g_strdup_printf("/tmp/qtest-%d.sock", getpid()); - qmp_socket_path = g_strdup_printf("/tmp/qtest-%d.qmp", getpid()); - pid_file = g_strdup_printf("/tmp/qtest-%d.pid", getpid()); - s = g_malloc(sizeof(*s)); - sock = init_socket(socket_path); - qmpsock = init_socket(qmp_socket_path); + s->socket_path = g_strdup_printf("/tmp/qtest-%d.sock", getpid()); + s->qmp_socket_path = g_strdup_printf("/tmp/qtest-%d.qmp", getpid()); + pid_file = g_strdup_printf("/tmp/qtest-%d.pid", getpid()); + + sock = init_socket(s->socket_path); + qmpsock = init_socket(s->qmp_socket_path); pid = fork(); if (pid == 0) { @@ -114,8 +114,8 @@ QTestState *qtest_init(const char *extra_args) "-qmp unix:%s,nowait " "-pidfile %s " "-machine accel=qtest " - "%s", qemu_binary, socket_path, - qmp_socket_path, pid_file, + "%s", qemu_binary, s->socket_path, + s->qmp_socket_path, pid_file, extra_args ?: ""); ret = system(command); @@ -132,9 +132,6 @@ QTestState *qtest_init(const char *extra_args) s->irq_level[i] = false; } - g_free(socket_path); - g_free(qmp_socket_path); - /* Read the QMP greeting and then do the handshake */ qtest_qmp(s, ""); qtest_qmp(s, "{ 'execute': 'qmp_capabilities' }"); @@ -159,6 +156,13 @@ void qtest_quit(QTestState *s) fclose(f); } + + unlink(s->pid_file); + unlink(s->socket_path); + unlink(s->qmp_socket_path); + g_free(s->pid_file); + g_free(s->socket_path); + g_free(s->qmp_socket_path); } static void socket_sendf(int fd, const char *fmt, va_list ap) @@ -290,6 +294,11 @@ void qtest_qmp(QTestState *s, const char *fmt, ...) continue; } + if (len == -1 || len == 0) { + fprintf(stderr, "Broken pipe\n"); + exit(1); + } + switch (c) { case '{': nesting++; diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index eb7bf996d1..cc671dd7aa 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -21,6 +21,7 @@ import os import iotests from iotests import qemu_img, qemu_io +import struct backing_img = os.path.join(iotests.test_dir, 'backing.img') mid_img = os.path.join(iotests.test_dir, 'mid.img') @@ -48,11 +49,21 @@ class ImageStreamingTestCase(iotests.QMPTestCase): self.assert_no_active_streams() + def create_image(self, name, size): + file = open(name, 'w') + i = 0 + while i < size: + sector = struct.pack('>l504xl', i / 512, i / 512) + file.write(sector) + i = i + 512 + file.close() + + class TestSingleDrive(ImageStreamingTestCase): image_len = 1 * 1024 * 1024 # MB def setUp(self): - qemu_img('create', backing_img, str(TestSingleDrive.image_len)) + self.create_image(backing_img, TestSingleDrive.image_len) qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, mid_img) qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % mid_img, test_img) self.vm = iotests.VM().add_drive(test_img) @@ -136,7 +147,7 @@ class TestStreamStop(ImageStreamingTestCase): result = self.vm.qmp('block-stream', device='drive0') self.assert_qmp(result, 'return', {}) - time.sleep(1) + time.sleep(0.1) events = self.vm.get_qmp_events(wait=False) self.assertEqual(events, [], 'unexpected QMP event: %s' % events) diff --git a/tests/qemu-iotests/036 b/tests/qemu-iotests/036 new file mode 100755 index 0000000000..329533e9ea --- /dev/null +++ b/tests/qemu-iotests/036 @@ -0,0 +1,68 @@ +#!/bin/bash +# +# Test that qcow2 unknown autoclear feature bits are cleared +# +# Copyright (C) 2011 Red Hat, Inc. +# Copyright IBM, Corp. 2010 +# +# Based on test 031. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=stefanha@linux.vnet.ibm.com + +seq=`basename $0` +echo "QA output created by $seq" + +here=`pwd` +tmp=/tmp/$$ +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter +. ./common.pattern + +# This tests qcow2-specific low-level functionality +_supported_fmt qcow2 +_supported_proto generic +_supported_os Linux + +# Only qcow2v3 and later supports feature bits +IMGOPTS="compat=1.1" + +echo === Create image with unknown autoclear feature bit === +echo +_make_test_img 64M +./qcow2.py $TEST_IMG set-feature-bit autoclear 63 +./qcow2.py $TEST_IMG dump-header + +echo +echo === Repair image === +echo +$QEMU_IMG check -r all $TEST_IMG +./qcow2.py $TEST_IMG dump-header + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/036.out b/tests/qemu-iotests/036.out new file mode 100644 index 0000000000..6953e37ab6 --- /dev/null +++ b/tests/qemu-iotests/036.out @@ -0,0 +1,52 @@ +QA output created by 036 +=== Create image with unknown autoclear feature bit === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +magic 0x514649fb +version 3 +backing_file_offset 0x0 +backing_file_size 0x0 +cluster_bits 16 +size 67108864 +crypt_method 0 +l1_size 1 +l1_table_offset 0x30000 +refcount_table_offset 0x10000 +refcount_table_clusters 1 +nb_snapshots 0 +snapshot_offset 0x0 +incompatible_features 0x0 +compatible_features 0x0 +autoclear_features 0x8000000000000000 +refcount_order 4 +header_length 104 + + +=== Repair image === + +No errors were found on the image. +magic 0x514649fb +version 3 +backing_file_offset 0x0 +backing_file_size 0x0 +cluster_bits 16 +size 67108864 +crypt_method 0 +l1_size 1 +l1_table_offset 0x30000 +refcount_table_offset 0x10000 +refcount_table_clusters 1 +nb_snapshots 0 +snapshot_offset 0x0 +incompatible_features 0x0 +compatible_features 0x0 +autoclear_features 0x0 +refcount_order 4 +header_length 104 + +Header extension: +magic 0x6803f857 +length 0 +data '' + +*** done diff --git a/tests/qemu-iotests/037 b/tests/qemu-iotests/037 new file mode 100755 index 0000000000..c11460b92f --- /dev/null +++ b/tests/qemu-iotests/037 @@ -0,0 +1,119 @@ +#!/bin/bash +# +# Test COW from backing files +# +# Copyright (C) 2012 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=kwolf@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +here=`pwd` +tmp=/tmp/$$ +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt qcow qcow2 vmdk qed +_supported_proto generic +_supported_os Linux + +CLUSTER_SIZE=4k +size=128M + +echo +echo "== creating backing file for COW tests ==" + +_make_test_img $size + +function backing_io() +{ + local offset=$1 + local sectors=$2 + local op=$3 + local pattern=0 + local cur_sec=0 + + for i in $(seq 0 $((sectors - 1))); do + cur_sec=$((offset / 512 + i)) + pattern=$(( ( (cur_sec % 256) + (cur_sec / 256)) % 256 )) + + echo "$op -P $pattern $((cur_sec * 512)) 512" + done +} + +backing_io 0 256 write | $QEMU_IO $TEST_IMG | _filter_qemu_io + +mv $TEST_IMG $TEST_IMG.base + +_make_test_img -b $TEST_IMG.base 6G + +echo +echo "== COW in a single cluster ==" +$QEMU_IO -c "write -P 0x77 0 2k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x88 6k 2k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x99 9k 2k" $TEST_IMG | _filter_qemu_io + +$QEMU_IO -c "read -P 0x77 0 2k" $TEST_IMG | _filter_qemu_io +backing_io $((2 * 1024)) 8 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x88 6k 2k" $TEST_IMG | _filter_qemu_io +backing_io $((8 * 1024)) 2 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x99 9k 2k" $TEST_IMG | _filter_qemu_io +backing_io $((11 * 1024)) 2 read | $QEMU_IO $TEST_IMG | _filter_qemu_io + +echo +echo "== COW in two-cluster allocations ==" +$QEMU_IO -c "write -P 0x77 16k 6k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x88 26k 6k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x99 33k 5k" $TEST_IMG | _filter_qemu_io + +$QEMU_IO -c "read -P 0x77 16k 6k" $TEST_IMG | _filter_qemu_io +backing_io $((22 * 1024)) 8 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x88 26k 6k" $TEST_IMG | _filter_qemu_io +backing_io $((32 * 1024)) 2 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x99 33k 5k" $TEST_IMG | _filter_qemu_io +backing_io $((38 * 1024)) 4 read | $QEMU_IO $TEST_IMG | _filter_qemu_io + +echo +echo "== COW in multi-cluster allocations ==" +$QEMU_IO -c "write -P 0x77 48k 15k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x88 66k 14k" $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "write -P 0x99 83k 15k" $TEST_IMG | _filter_qemu_io + +$QEMU_IO -c "read -P 0x77 48k 15k" $TEST_IMG | _filter_qemu_io +backing_io $((63 * 1024)) 6 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x88 66k 14k" $TEST_IMG | _filter_qemu_io +backing_io $((80 * 1024)) 6 read | $QEMU_IO $TEST_IMG | _filter_qemu_io +$QEMU_IO -c "read -P 0x99 83k 15k" $TEST_IMG | _filter_qemu_io +backing_io $((98 * 1024)) 4 read | $QEMU_IO $TEST_IMG | _filter_qemu_io + +_check_test_img + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/037.out b/tests/qemu-iotests/037.out new file mode 100644 index 0000000000..deb8a3be43 --- /dev/null +++ b/tests/qemu-iotests/037.out @@ -0,0 +1,645 @@ +QA output created by 037 + +== creating backing file for COW tests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +qemu-io> wrote 512/512 bytes at offset 0 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 512 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 1024 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 1536 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 2048 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 2560 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 3072 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 3584 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 4096 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 4608 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 5120 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 5632 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 6144 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 6656 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 7168 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 7680 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 8192 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 8704 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 9216 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 9728 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 10240 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 10752 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 11264 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 11776 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 12288 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 12800 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 13312 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 13824 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 14336 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 14848 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 15360 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 15872 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 16384 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 16896 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 17408 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 17920 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 18432 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 18944 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 19456 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 19968 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 20480 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 20992 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 21504 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 22016 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 22528 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 23040 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 23552 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 24064 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 24576 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 25088 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 25600 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 26112 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 26624 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 27136 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 27648 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 28160 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 28672 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 29184 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 29696 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 30208 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 30720 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 31232 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 31744 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 32256 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 32768 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 33280 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 33792 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 34304 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 34816 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 35328 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 35840 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 36352 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 36864 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 37376 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 37888 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 38400 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 38912 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 39424 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 39936 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 40448 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 40960 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 41472 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 41984 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 42496 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 43008 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 43520 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 44032 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 44544 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 45056 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 45568 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 46080 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 46592 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 47104 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 47616 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 48128 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 48640 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 49152 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 49664 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 50176 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 50688 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 51200 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 51712 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 52224 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 52736 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 53248 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 53760 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 54272 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 54784 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 55296 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 55808 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 56320 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 56832 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 57344 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 57856 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 58368 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 58880 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 59392 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 59904 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 60416 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 60928 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 61440 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 61952 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 62464 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 62976 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 63488 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 64000 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 64512 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 65024 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 65536 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 66048 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 66560 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 67072 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 67584 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 68096 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 68608 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 69120 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 69632 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 70144 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 70656 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 71168 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 71680 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 72192 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 72704 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 73216 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 73728 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 74240 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 74752 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 75264 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 75776 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 76288 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 76800 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 77312 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 77824 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 78336 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 78848 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 79360 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 79872 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 80384 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 80896 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 81408 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 81920 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 82432 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 82944 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 83456 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 83968 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 84480 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 84992 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 85504 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 86016 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 86528 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 87040 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 87552 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 88064 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 88576 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 89088 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 89600 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 90112 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 90624 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 91136 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 91648 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 92160 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 92672 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 93184 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 93696 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 94208 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 94720 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 95232 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 95744 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 96256 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 96768 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 97280 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 97792 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 98304 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 98816 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 99328 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 99840 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 100352 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 100864 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 101376 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 101888 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 102400 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 102912 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 103424 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 103936 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 104448 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 104960 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 105472 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 105984 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 106496 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 107008 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 107520 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 108032 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 108544 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 109056 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 109568 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 110080 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 110592 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 111104 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 111616 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 112128 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 112640 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 113152 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 113664 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 114176 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 114688 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 115200 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 115712 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 116224 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 116736 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 117248 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 117760 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 118272 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 118784 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 119296 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 119808 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 120320 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 120832 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 121344 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 121856 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 122368 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 122880 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 123392 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 123904 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 124416 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 124928 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 125440 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 125952 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 126464 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 126976 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 127488 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 128000 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 128512 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 129024 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 129536 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 130048 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 512/512 bytes at offset 130560 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=6442450944 backing_file='TEST_DIR/t.IMGFMT.base' + +== COW in a single cluster == +wrote 2048/2048 bytes at offset 0 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 2048/2048 bytes at offset 6144 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 2048/2048 bytes at offset 9216 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 2048/2048 bytes at offset 0 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 2048 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 2560 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 3072 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 3584 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 4096 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 4608 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 5120 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 5632 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 2048/2048 bytes at offset 6144 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 8192 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 8704 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 2048/2048 bytes at offset 9216 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 11264 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 11776 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> +== COW in two-cluster allocations == +wrote 6144/6144 bytes at offset 16384 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 6144/6144 bytes at offset 26624 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 5120/5120 bytes at offset 33792 +5 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 6144/6144 bytes at offset 16384 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 22528 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 23040 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 23552 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 24064 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 24576 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 25088 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 25600 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 26112 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 6144/6144 bytes at offset 26624 +6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 32768 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 33280 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 5120/5120 bytes at offset 33792 +5 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 38912 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 39424 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 39936 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 40448 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> +== COW in multi-cluster allocations == +wrote 15360/15360 bytes at offset 49152 +15 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 14336/14336 bytes at offset 67584 +14 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 15360/15360 bytes at offset 84992 +15 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 15360/15360 bytes at offset 49152 +15 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 64512 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 65024 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 65536 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 66048 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 66560 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 67072 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 14336/14336 bytes at offset 67584 +14 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 81920 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 82432 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 82944 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 83456 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 83968 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 84480 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 15360/15360 bytes at offset 84992 +15 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 100352 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 100864 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 101376 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 512/512 bytes at offset 101888 +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> No errors were found on the image. +*** done diff --git a/tests/qemu-iotests/038 b/tests/qemu-iotests/038 new file mode 100755 index 0000000000..36125eab1e --- /dev/null +++ b/tests/qemu-iotests/038 @@ -0,0 +1,133 @@ +#!/bin/bash +# +# Test COW from backing files with AIO +# +# Copyright (C) 2012 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=kwolf@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +here=`pwd` +tmp=/tmp/$$ +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt qcow2 qed +_supported_proto generic +_supported_os Linux + +CLUSTER_SIZE=2M +size=128M + +echo +echo "== creating backing file for COW tests ==" + +_make_test_img $size + +function backing_io() +{ + local offset=$1 + local sectors=$2 + local op=$3 + local pattern=0 + local cur_sec=0 + + for i in $(seq 0 $((sectors - 1))); do + cur_sec=$((offset / 65536 + i)) + pattern=$(( ( (cur_sec % 128) + (cur_sec / 128)) % 128 )) + + echo "$op -P $pattern $((cur_sec * 64))k 64k" + done +} + +backing_io 0 256 write | $QEMU_IO $TEST_IMG | _filter_qemu_io + +mv $TEST_IMG $TEST_IMG.base + +_make_test_img -b $TEST_IMG.base 6G + +echo +echo "== Some concurrent requests touching the same cluster ==" + +function overlay_io() +{ + # Start with a request touching two clusters + echo aio_write -P 0x80 2020k 80k + + # Then add some requests all over the place + for i in $(seq 0 15; seq 17 31; seq 33 47); do + echo aio_write -P $((0x81 + i)) $((i * 128))k 64k + done + + # Then backwards overwriting part of them + for i in $( (seq 0 15; seq 17 31; seq 33 47) | tac); do + echo aio_write -P $((0x81 + i)) $((i * 128 + 32))k 64k + done + + # And finally crossing the next cluster boundary + echo aio_write -P 0x90 4080k 80k +} + +overlay_io | $QEMU_IO $TEST_IMG | _filter_qemu_io |\ + sed -e 's/bytes at offset [0-9]*/bytes at offset XXX/g' + +echo +echo "== Verify image content ==" + +function verify_io() +{ + echo read -P 31 2016k 4k + echo read -P 0x80 2020k 80k + echo read -P 32 2100k 12k + echo read -P 33 2112k 64k + + echo read -P 63 4064k 16k + echo read -P 0x90 4080k 80k + echo read -P 65 4160k 64k + + for i in $(seq 0 15; seq 17 31; seq 33 47); do + echo read -P $((0x81 + i)) $((i * 128))k 96k + done + + for i in $(seq 0 14; seq 16 30; seq 32 47); do + local cur_sec=$(( i * 2 + 1 )) + local pattern=$(( ( (cur_sec % 128) + (cur_sec / 128)) % 128 )) + + echo read -P $pattern $((i * 128 + 96))k 32k + done +} + +verify_io | $QEMU_IO $TEST_IMG | _filter_qemu_io + +_check_test_img + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/038.out b/tests/qemu-iotests/038.out new file mode 100644 index 0000000000..acc7629267 --- /dev/null +++ b/tests/qemu-iotests/038.out @@ -0,0 +1,909 @@ +QA output created by 038 + +== creating backing file for COW tests == +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 +qemu-io> wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 65536 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 131072 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 196608 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 262144 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 327680 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 393216 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 458752 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 524288 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 589824 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 655360 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 720896 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 786432 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 851968 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 917504 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 983040 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1048576 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1114112 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1179648 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1245184 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1310720 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1376256 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1441792 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1507328 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1572864 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1638400 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1703936 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1769472 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1835008 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1900544 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 1966080 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2031616 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2097152 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2162688 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2228224 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2293760 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2359296 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2424832 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2490368 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2555904 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2621440 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2686976 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2752512 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2818048 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2883584 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 2949120 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3014656 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3080192 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3145728 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3211264 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3276800 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3342336 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3407872 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3473408 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3538944 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3604480 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3670016 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3735552 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3801088 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3866624 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3932160 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 3997696 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4063232 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4128768 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4194304 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4259840 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4325376 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4390912 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4456448 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4521984 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4587520 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4653056 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4718592 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4784128 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4849664 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4915200 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 4980736 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5046272 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5111808 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5177344 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5242880 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5308416 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5373952 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5439488 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5505024 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5570560 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5636096 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5701632 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5767168 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5832704 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5898240 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 5963776 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6029312 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6094848 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6160384 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6225920 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6291456 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6356992 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6422528 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6488064 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6553600 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6619136 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6684672 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6750208 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6815744 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6881280 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 6946816 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7012352 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7077888 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7143424 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7208960 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7274496 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7340032 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7405568 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7471104 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7536640 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7602176 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7667712 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7733248 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7798784 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7864320 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7929856 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 7995392 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8060928 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8126464 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8192000 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8257536 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8323072 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8388608 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8454144 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8519680 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8585216 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8650752 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8716288 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8781824 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8847360 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8912896 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 8978432 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9043968 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9109504 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9175040 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9240576 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9306112 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9371648 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9437184 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9502720 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9568256 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9633792 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9699328 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9764864 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9830400 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9895936 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 9961472 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10027008 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10092544 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10158080 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10223616 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10289152 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10354688 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10420224 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10485760 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10551296 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10616832 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10682368 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10747904 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10813440 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10878976 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 10944512 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11010048 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11075584 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11141120 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11206656 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11272192 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11337728 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11403264 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11468800 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11534336 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11599872 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11665408 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11730944 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11796480 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11862016 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11927552 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 11993088 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12058624 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12124160 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12189696 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12255232 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12320768 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12386304 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12451840 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12517376 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12582912 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12648448 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12713984 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12779520 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12845056 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12910592 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 12976128 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13041664 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13107200 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13172736 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13238272 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13303808 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13369344 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13434880 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13500416 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13565952 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13631488 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13697024 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13762560 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13828096 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13893632 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 13959168 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14024704 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14090240 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14155776 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14221312 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14286848 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14352384 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14417920 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14483456 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14548992 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14614528 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14680064 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14745600 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14811136 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14876672 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 14942208 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15007744 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15073280 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15138816 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15204352 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15269888 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15335424 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15400960 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15466496 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15532032 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15597568 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15663104 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15728640 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15794176 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15859712 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15925248 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 15990784 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16056320 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16121856 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16187392 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16252928 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16318464 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16384000 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16449536 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16515072 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16580608 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16646144 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> wrote 65536/65536 bytes at offset 16711680 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=6442450944 backing_file='TEST_DIR/t.IMGFMT.base' + +== Some concurrent requests touching the same cluster == +qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> qemu-io> wrote 81920/81920 bytes at offset XXX +80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset XXX +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 81920/81920 bytes at offset XXX +80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== Verify image content == +qemu-io> read 4096/4096 bytes at offset 2064384 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 81920/81920 bytes at offset 2068480 +80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 12288/12288 bytes at offset 2150400 +12 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 65536/65536 bytes at offset 2162688 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 16384/16384 bytes at offset 4161536 +16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 81920/81920 bytes at offset 4177920 +80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 65536/65536 bytes at offset 4259840 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 0 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 131072 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 262144 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 393216 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 524288 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 655360 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 786432 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 917504 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1048576 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1179648 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1310720 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1441792 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1572864 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1703936 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1835008 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 1966080 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2228224 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2359296 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2490368 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2621440 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2752512 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 2883584 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3014656 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3145728 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3276800 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3407872 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3538944 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3670016 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3801088 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 3932160 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4063232 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4325376 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4456448 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4587520 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4718592 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4849664 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 4980736 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5111808 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5242880 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5373952 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5505024 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5636096 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5767168 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 5898240 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 6029312 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 98304/98304 bytes at offset 6160384 +96 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 98304 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 229376 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 360448 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 491520 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 622592 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 753664 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 884736 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1015808 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1146880 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1277952 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1409024 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1540096 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1671168 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1802240 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 1933312 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2195456 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2326528 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2457600 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2588672 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2719744 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2850816 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 2981888 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3112960 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3244032 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3375104 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3506176 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3637248 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3768320 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 3899392 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4030464 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4292608 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4423680 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4554752 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4685824 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4816896 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 4947968 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5079040 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5210112 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5341184 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5472256 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5603328 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5734400 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5865472 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 5996544 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 6127616 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> read 32768/32768 bytes at offset 6258688 +32 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-io> No errors were found on the image. +*** done diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 36ebf1a7f4..7a2c92b6e9 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -42,3 +42,6 @@ 033 rw auto 034 rw auto backing 035 rw auto quick +036 rw auto quick +037 rw auto backing +038 rw auto backing diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index e27b40e289..e05b1d640b 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -54,7 +54,9 @@ class VM(object): self._qemu_log_path = os.path.join(test_dir, 'qemu-log.%d' % os.getpid()) self._args = qemu_args + ['-chardev', 'socket,id=mon,path=' + self._monitor_path, - '-mon', 'chardev=mon,mode=control', '-nographic'] + '-mon', 'chardev=mon,mode=control', + '-qtest', 'stdio', '-machine', 'accel=qtest', + '-display', 'none', '-vga', 'none'] self._num_drives = 0 def add_drive(self, path, opts=''): diff --git a/tests/qemu-iotests/qcow2.py b/tests/qemu-iotests/qcow2.py index e27196aa26..97f37707bc 100755 --- a/tests/qemu-iotests/qcow2.py +++ b/tests/qemu-iotests/qcow2.py @@ -181,10 +181,33 @@ def cmd_del_header_ext(fd, magic): h.update(fd) +def cmd_set_feature_bit(fd, group, bit): + try: + bit = int(bit, 0) + if bit < 0 or bit >= 64: + raise ValueError + except: + print "'%s' is not a valid bit number in range [0, 64)" % bit + sys.exit(1) + + h = QcowHeader(fd) + if group == 'incompatible': + h.incompatible_features |= 1 << bit + elif group == 'compatible': + h.compatible_features |= 1 << bit + elif group == 'autoclear': + h.autoclear_features |= 1 << bit + else: + print "'%s' is not a valid group, try 'incompatible', 'compatible', or 'autoclear'" % group + sys.exit(1) + + h.update(fd) + cmds = [ [ 'dump-header', cmd_dump_header, 0, 'Dump image header and header extensions' ], [ 'add-header-ext', cmd_add_header_ext, 2, 'Add a header extension' ], [ 'del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension' ], + [ 'set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'], ] def main(filename, cmd, args): diff --git a/tests/tcg/xtensa/test_mmu.S b/tests/tcg/xtensa/test_mmu.S index 52d5774212..5d87fbb703 100644 --- a/tests/tcg/xtensa/test_mmu.S +++ b/tests/tcg/xtensa/test_mmu.S @@ -293,26 +293,219 @@ test store_prohibited assert eq, a2, a3 test_end -test dtlb_autoload - set_vector kernel, 0 - - movi a2, 0xd4000000 +/* Set up page table entry vaddr->paddr, ring=pte_ring, attr=pte_attr + * and DTLB way 7 to cover this PTE, ring=pt_ring, attr=pt_attr + */ +.macro pt_setup pt_ring, pt_attr, pte_ring, vaddr, paddr, pte_attr + movi a2, 0x80000000 wsr a2, ptevaddr - movi a3, 0x00001013 - s32i a3, a2, 4 + + movi a3, 0x80000007 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ + movi a4, 0x04000003 | ((\pt_ring) << 4) /* PADDR 64M */ + wdtlb a4, a3 + isync + + movi a3, ((\paddr) & 0xfffff000) | ((\pte_ring) << 4) | (\pte_attr) + movi a1, ((\vaddr) >> 12) << 2 + add a2, a1, a2 + s32i a3, a2, 0 + + movi a3, 0x80000007 | (((\vaddr) >> 10) & 0xfffff000) /* way 7 */ + movi a4, 0x04000000 | ((\pt_ring) << 4) | (\pt_attr) /* PADDR 64M */ + wdtlb a4, a3 + isync + + movi a3, (\vaddr) +.endm + +/* out: PS.RING=ring, PS.EXCM=excm, a3=vaddr */ +.macro go_ring ring, excm, vaddr + movi a3, 10f + pitlb a3, a3 + ritlb1 a2, a3 + movi a1, 0x10 + or a2, a2, a1 + movi a1, 0x000ff000 + and a3, a3, a1 + movi a1, 4 + or a3, a3, a1 + witlb a2, a3 + movi a3, 10f + movi a1, 0x000fffff + and a1, a3, a1 + + movi a2, 0 + wsr a2, excvaddr + + movi a3, \vaddr + movi a2, 0x4000f | ((\ring) << 6) | ((\excm) << 4) + jx a1 +10: + wsr a2, ps + isync +.endm + +/* in: a3 -- virtual address to test */ +.macro assert_auto_tlb + movi a2, 0x4000f + wsr a2, ps + isync + pdtlb a2, a3 + movi a1, 0xfffff01f + and a2, a2, a1 + movi a1, 0xfffff000 + and a1, a1, a3 + xor a1, a1, a2 + assert gei, a1, 0x10 + movi a2, 0x14 + assert lt, a1, a2 +.endm + +/* in: a3 -- virtual address to test */ +.macro assert_no_auto_tlb + movi a2, 0x4000f + wsr a2, ps + isync pdtlb a2, a3 movi a1, 0x10 and a1, a1, a2 assert eqi, a1, 0 - l8ui a1, a3, 0 - pdtlb a2, a3 - movi a1, 0xfffff010 - and a1, a1, a2 - movi a3, 0x00001010 - assert eq, a1, a3 - movi a1, 0xf +.endm + +.macro assert_sr sr, v + rsr a2, \sr + movi a1, (\v) + assert eq, a1, a2 +.endm + +.macro assert_epc1_1m vaddr + movi a2, (\vaddr) + movi a1, 0xfffff and a1, a1, a2 - assert lti, a1, 4 + rsr a2, epc1 + assert eq, a1, a2 +.endm + +test dtlb_autoload + set_vector kernel, 0 + + pt_setup 0, 3, 1, 0x1000, 0x1000, 3 + assert_no_auto_tlb + + l8ui a1, a3, 0 + + rsr a2, excvaddr + assert eq, a2, a3 + + assert_auto_tlb +test_end + +test autoload_load_store_privilege + set_vector kernel, 0 + set_vector double, 2f + + pt_setup 0, 3, 0, 0x2000, 0x2000, 3 + movi a3, 0x2004 + assert_no_auto_tlb + + movi a2, 0x4005f /* ring 1 + excm => cring == 0 */ + wsr a2, ps + isync +1: + l32e a2, a3, -4 /* ring used */ + test_fail +2: + rsr a2, excvaddr + addi a1, a3, -4 + assert eq, a1, a2 + + assert_auto_tlb + assert_sr depc, 1b + assert_sr exccause, 26 +test_end + +test autoload_pte_load_prohibited + set_vector kernel, 2f + + pt_setup 0, 3, 0, 0x3000, 0, 0xc + assert_no_auto_tlb +1: + l32i a2, a3, 0 + test_fail +2: + rsr a2, excvaddr + assert eq, a2, a3 + + assert_auto_tlb + assert_sr epc1, 1b + assert_sr exccause, 28 +test_end + +test autoload_pt_load_prohibited + set_vector kernel, 2f + + pt_setup 0, 0xc, 0, 0x4000, 0x4000, 3 + assert_no_auto_tlb +1: + l32i a2, a3, 0 + test_fail +2: + rsr a2, excvaddr + assert eq, a2, a3 + + assert_no_auto_tlb + assert_sr epc1, 1b + assert_sr exccause, 24 +test_end + +test autoload_pt_privilege + set_vector kernel, 2f + pt_setup 0, 3, 1, 0x5000, 0, 3 + go_ring 1, 0, 0x5001 + + l8ui a2, a3, 0 +1: + syscall +2: + rsr a2, excvaddr + assert eq, a2, a3 + + assert_auto_tlb + assert_epc1_1m 1b + assert_sr exccause, 1 +test_end + +test autoload_pte_privilege + set_vector kernel, 2f + pt_setup 0, 3, 0, 0x6000, 0, 3 + go_ring 1, 0, 0x6001 +1: + l8ui a2, a3, 0 + syscall +2: + rsr a2, excvaddr + assert eq, a2, a3 + + assert_auto_tlb + assert_epc1_1m 1b + assert_sr exccause, 26 +test_end + +test autoload_3_level_pt + set_vector kernel, 2f + pt_setup 1, 3, 1, 0x00400000, 0, 3 + pt_setup 1, 3, 1, 0x80001000, 0x2000000, 3 + go_ring 1, 0, 0x00400001 +1: + l8ui a2, a3, 0 + syscall +2: + rsr a2, excvaddr + assert eq, a2, a3 + + assert_no_auto_tlb + assert_epc1_1m 1b + assert_sr exccause, 24 test_end test_suite_end diff --git a/tests/test-string-output-visitor.c b/tests/test-string-output-visitor.c index 22909b84ef..608f14a5de 100644 --- a/tests/test-string-output-visitor.c +++ b/tests/test-string-output-visitor.c @@ -84,7 +84,7 @@ static void test_visitor_out_number(TestOutputVisitorData *data, str = string_output_get_string(data->sov); g_assert(str != NULL); - g_assert_cmpstr(str, ==, "3.14"); + g_assert_cmpstr(str, ==, "3.140000"); g_free(str); } diff --git a/tests/test-visitor-serialization.c b/tests/test-visitor-serialization.c new file mode 100644 index 0000000000..b8ad16fc9e --- /dev/null +++ b/tests/test-visitor-serialization.c @@ -0,0 +1,784 @@ +/* + * Unit-tests for visitor-based serialization + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Michael Roth <mdroth@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include <glib.h> +#include <stdlib.h> +#include <stdint.h> +#include <float.h> +#include "test-qapi-types.h" +#include "test-qapi-visit.h" +#include "qemu-objects.h" +#include "qapi/qmp-input-visitor.h" +#include "qapi/qmp-output-visitor.h" +#include "qapi/string-input-visitor.h" +#include "qapi/string-output-visitor.h" + +typedef struct PrimitiveType { + union { + const char *string; + bool boolean; + double number; + int64_t integer; + uint8_t u8; + uint16_t u16; + uint32_t u32; + uint64_t u64; + int8_t s8; + int16_t s16; + int32_t s32; + int64_t s64; + intmax_t max; + } value; + enum { + PTYPE_STRING = 0, + PTYPE_BOOLEAN, + PTYPE_NUMBER, + PTYPE_INTEGER, + PTYPE_U8, + PTYPE_U16, + PTYPE_U32, + PTYPE_U64, + PTYPE_S8, + PTYPE_S16, + PTYPE_S32, + PTYPE_S64, + PTYPE_EOL, + } type; + const char *description; +} PrimitiveType; + +/* test helpers */ + +static void visit_primitive_type(Visitor *v, void **native, Error **errp) +{ + PrimitiveType *pt = *native; + switch(pt->type) { + case PTYPE_STRING: + visit_type_str(v, (char **)&pt->value.string, NULL, errp); + break; + case PTYPE_BOOLEAN: + visit_type_bool(v, &pt->value.boolean, NULL, errp); + break; + case PTYPE_NUMBER: + visit_type_number(v, &pt->value.number, NULL, errp); + break; + case PTYPE_INTEGER: + visit_type_int(v, &pt->value.integer, NULL, errp); + break; + case PTYPE_U8: + visit_type_uint8(v, &pt->value.u8, NULL, errp); + break; + case PTYPE_U16: + visit_type_uint16(v, &pt->value.u16, NULL, errp); + break; + case PTYPE_U32: + visit_type_uint32(v, &pt->value.u32, NULL, errp); + break; + case PTYPE_U64: + visit_type_uint64(v, &pt->value.u64, NULL, errp); + break; + case PTYPE_S8: + visit_type_int8(v, &pt->value.s8, NULL, errp); + break; + case PTYPE_S16: + visit_type_int16(v, &pt->value.s16, NULL, errp); + break; + case PTYPE_S32: + visit_type_int32(v, &pt->value.s32, NULL, errp); + break; + case PTYPE_S64: + visit_type_int64(v, &pt->value.s64, NULL, errp); + break; + case PTYPE_EOL: + g_assert(false); + } +} + +typedef struct TestStruct +{ + int64_t integer; + bool boolean; + char *string; +} TestStruct; + +static void visit_type_TestStruct(Visitor *v, TestStruct **obj, + const char *name, Error **errp) +{ + visit_start_struct(v, (void **)obj, NULL, name, sizeof(TestStruct), errp); + + visit_type_int(v, &(*obj)->integer, "integer", errp); + visit_type_bool(v, &(*obj)->boolean, "boolean", errp); + visit_type_str(v, &(*obj)->string, "string", errp); + + visit_end_struct(v, errp); +} + +static TestStruct *struct_create(void) +{ + TestStruct *ts = g_malloc0(sizeof(*ts)); + ts->integer = -42; + ts->boolean = true; + ts->string = strdup("test string"); + return ts; +} + +static void struct_compare(TestStruct *ts1, TestStruct *ts2) +{ + g_assert(ts1); + g_assert(ts2); + g_assert_cmpint(ts1->integer, ==, ts2->integer); + g_assert(ts1->boolean == ts2->boolean); + g_assert_cmpstr(ts1->string, ==, ts2->string); +} + +static void struct_cleanup(TestStruct *ts) +{ + g_free(ts->string); + g_free(ts); +} + +static void visit_struct(Visitor *v, void **native, Error **errp) +{ + visit_type_TestStruct(v, (TestStruct **)native, NULL, errp); +} + +static UserDefNested *nested_struct_create(void) +{ + UserDefNested *udnp = g_malloc0(sizeof(*udnp)); + udnp->string0 = strdup("test_string0"); + udnp->dict1.string1 = strdup("test_string1"); + udnp->dict1.dict2.userdef1 = g_malloc0(sizeof(UserDefOne)); + udnp->dict1.dict2.userdef1->integer = 42; + udnp->dict1.dict2.userdef1->string = strdup("test_string"); + udnp->dict1.dict2.string2 = strdup("test_string2"); + udnp->dict1.has_dict3 = true; + udnp->dict1.dict3.userdef2 = g_malloc0(sizeof(UserDefOne)); + udnp->dict1.dict3.userdef2->integer = 43; + udnp->dict1.dict3.userdef2->string = strdup("test_string"); + udnp->dict1.dict3.string3 = strdup("test_string3"); + return udnp; +} + +static void nested_struct_compare(UserDefNested *udnp1, UserDefNested *udnp2) +{ + g_assert(udnp1); + g_assert(udnp2); + g_assert_cmpstr(udnp1->string0, ==, udnp2->string0); + g_assert_cmpstr(udnp1->dict1.string1, ==, udnp2->dict1.string1); + g_assert_cmpint(udnp1->dict1.dict2.userdef1->integer, ==, + udnp2->dict1.dict2.userdef1->integer); + g_assert_cmpstr(udnp1->dict1.dict2.userdef1->string, ==, + udnp2->dict1.dict2.userdef1->string); + g_assert_cmpstr(udnp1->dict1.dict2.string2, ==, udnp2->dict1.dict2.string2); + g_assert(udnp1->dict1.has_dict3 == udnp2->dict1.has_dict3); + g_assert_cmpint(udnp1->dict1.dict3.userdef2->integer, ==, + udnp2->dict1.dict3.userdef2->integer); + g_assert_cmpstr(udnp1->dict1.dict3.userdef2->string, ==, + udnp2->dict1.dict3.userdef2->string); + g_assert_cmpstr(udnp1->dict1.dict3.string3, ==, udnp2->dict1.dict3.string3); +} + +static void nested_struct_cleanup(UserDefNested *udnp) +{ + qapi_free_UserDefNested(udnp); +} + +static void visit_nested_struct(Visitor *v, void **native, Error **errp) +{ + visit_type_UserDefNested(v, (UserDefNested **)native, NULL, errp); +} + +static void visit_nested_struct_list(Visitor *v, void **native, Error **errp) +{ + visit_type_UserDefNestedList(v, (UserDefNestedList **)native, NULL, errp); +} + +/* test cases */ + +typedef void (*VisitorFunc)(Visitor *v, void **native, Error **errp); + +typedef enum VisitorCapabilities { + VCAP_PRIMITIVES = 1, + VCAP_STRUCTURES = 2, + VCAP_LISTS = 4, +} VisitorCapabilities; + +typedef struct SerializeOps { + void (*serialize)(void *native_in, void **datap, + VisitorFunc visit, Error **errp); + void (*deserialize)(void **native_out, void *datap, + VisitorFunc visit, Error **errp); + void (*cleanup)(void *datap); + const char *type; + VisitorCapabilities caps; +} SerializeOps; + +typedef struct TestArgs { + const SerializeOps *ops; + void *test_data; +} TestArgs; + +#define FLOAT_STRING_PRECISION 6 /* corresponding to n in %.nf formatting */ +static gsize calc_float_string_storage(double value) +{ + int whole_value = value; + gsize i = 0; + do { + i++; + } while (whole_value /= 10); + return i + 2 + FLOAT_STRING_PRECISION; +} + +static void test_primitives(gconstpointer opaque) +{ + TestArgs *args = (TestArgs *) opaque; + const SerializeOps *ops = args->ops; + PrimitiveType *pt = args->test_data; + PrimitiveType *pt_copy = g_malloc0(sizeof(*pt_copy)); + Error *err = NULL; + void *serialize_data; + char *double1, *double2; + + pt_copy->type = pt->type; + ops->serialize(pt, &serialize_data, visit_primitive_type, &err); + ops->deserialize((void **)&pt_copy, serialize_data, visit_primitive_type, &err); + + g_assert(err == NULL); + g_assert(pt_copy != NULL); + if (pt->type == PTYPE_STRING) { + g_assert_cmpstr(pt->value.string, ==, pt_copy->value.string); + } else if (pt->type == PTYPE_NUMBER) { + /* we serialize with %f for our reference visitors, so rather than fuzzy + * floating math to test "equality", just compare the formatted values + */ + double1 = g_malloc0(calc_float_string_storage(pt->value.number)); + double2 = g_malloc0(calc_float_string_storage(pt_copy->value.number)); + g_assert_cmpstr(double1, ==, double2); + g_free(double1); + g_free(double2); + } else if (pt->type == PTYPE_BOOLEAN) { + g_assert_cmpint(!!pt->value.max, ==, !!pt->value.max); + } else { + g_assert_cmpint(pt->value.max, ==, pt_copy->value.max); + } + + ops->cleanup(serialize_data); + g_free(args); +} + +static void test_struct(gconstpointer opaque) +{ + TestArgs *args = (TestArgs *) opaque; + const SerializeOps *ops = args->ops; + TestStruct *ts = struct_create(); + TestStruct *ts_copy = NULL; + Error *err = NULL; + void *serialize_data; + + ops->serialize(ts, &serialize_data, visit_struct, &err); + ops->deserialize((void **)&ts_copy, serialize_data, visit_struct, &err); + + g_assert(err == NULL); + struct_compare(ts, ts_copy); + + struct_cleanup(ts); + struct_cleanup(ts_copy); + + ops->cleanup(serialize_data); + g_free(args); +} + +static void test_nested_struct(gconstpointer opaque) +{ + TestArgs *args = (TestArgs *) opaque; + const SerializeOps *ops = args->ops; + UserDefNested *udnp = nested_struct_create(); + UserDefNested *udnp_copy = NULL; + Error *err = NULL; + void *serialize_data; + + ops->serialize(udnp, &serialize_data, visit_nested_struct, &err); + ops->deserialize((void **)&udnp_copy, serialize_data, visit_nested_struct, &err); + + g_assert(err == NULL); + nested_struct_compare(udnp, udnp_copy); + + nested_struct_cleanup(udnp); + nested_struct_cleanup(udnp_copy); + + ops->cleanup(serialize_data); + g_free(args); +} + +static void test_nested_struct_list(gconstpointer opaque) +{ + TestArgs *args = (TestArgs *) opaque; + const SerializeOps *ops = args->ops; + UserDefNestedList *listp = NULL, *tmp, *tmp_copy, *listp_copy = NULL; + Error *err = NULL; + void *serialize_data; + int i = 0; + + for (i = 0; i < 8; i++) { + tmp = g_malloc0(sizeof(UserDefNestedList)); + tmp->value = nested_struct_create(); + tmp->next = listp; + listp = tmp; + } + + ops->serialize(listp, &serialize_data, visit_nested_struct_list, &err); + ops->deserialize((void **)&listp_copy, serialize_data, + visit_nested_struct_list, &err); + + g_assert(err == NULL); + + tmp = listp; + tmp_copy = listp_copy; + while (listp_copy) { + g_assert(listp); + nested_struct_compare(listp->value, listp_copy->value); + listp = listp->next; + listp_copy = listp_copy->next; + } + + qapi_free_UserDefNestedList(tmp); + qapi_free_UserDefNestedList(tmp_copy); + + ops->cleanup(serialize_data); + g_free(args); +} + +PrimitiveType pt_values[] = { + /* string tests */ + { + .description = "string_empty", + .type = PTYPE_STRING, + .value.string = "", + }, + { + .description = "string_whitespace", + .type = PTYPE_STRING, + .value.string = "a b c\td", + }, + { + .description = "string_newlines", + .type = PTYPE_STRING, + .value.string = "a\nb\n", + }, + { + .description = "string_commas", + .type = PTYPE_STRING, + .value.string = "a,b, c,d", + }, + { + .description = "string_single_quoted", + .type = PTYPE_STRING, + .value.string = "'a b',cd", + }, + { + .description = "string_double_quoted", + .type = PTYPE_STRING, + .value.string = "\"a b\",cd", + }, + /* boolean tests */ + { + .description = "boolean_true1", + .type = PTYPE_BOOLEAN, + .value.boolean = true, + }, + { + .description = "boolean_true2", + .type = PTYPE_BOOLEAN, + .value.boolean = 8, + }, + { + .description = "boolean_true3", + .type = PTYPE_BOOLEAN, + .value.boolean = -1, + }, + { + .description = "boolean_false1", + .type = PTYPE_BOOLEAN, + .value.boolean = false, + }, + { + .description = "boolean_false2", + .type = PTYPE_BOOLEAN, + .value.boolean = 0, + }, + /* number tests (double) */ + /* note: we format these to %.6f before comparing, since that's how + * we serialize them and it doesn't make sense to check precision + * beyond that. + */ + { + .description = "number_sanity1", + .type = PTYPE_NUMBER, + .value.number = -1, + }, + { + .description = "number_sanity2", + .type = PTYPE_NUMBER, + .value.number = 3.14159265, + }, + { + .description = "number_min", + .type = PTYPE_NUMBER, + .value.number = DBL_MIN, + }, + { + .description = "number_max", + .type = PTYPE_NUMBER, + .value.number = DBL_MAX, + }, + /* integer tests (int64) */ + { + .description = "integer_sanity1", + .type = PTYPE_INTEGER, + .value.integer = -1, + }, + { + .description = "integer_sanity2", + .type = PTYPE_INTEGER, + .value.integer = INT64_MAX / 2 + 1, + }, + { + .description = "integer_min", + .type = PTYPE_INTEGER, + .value.integer = INT64_MIN, + }, + { + .description = "integer_max", + .type = PTYPE_INTEGER, + .value.integer = INT64_MAX, + }, + /* uint8 tests */ + { + .description = "uint8_sanity1", + .type = PTYPE_U8, + .value.u8 = 1, + }, + { + .description = "uint8_sanity2", + .type = PTYPE_U8, + .value.u8 = UINT8_MAX / 2 + 1, + }, + { + .description = "uint8_min", + .type = PTYPE_U8, + .value.u8 = 0, + }, + { + .description = "uint8_max", + .type = PTYPE_U8, + .value.u8 = UINT8_MAX, + }, + /* uint16 tests */ + { + .description = "uint16_sanity1", + .type = PTYPE_U16, + .value.u16 = 1, + }, + { + .description = "uint16_sanity2", + .type = PTYPE_U16, + .value.u16 = UINT16_MAX / 2 + 1, + }, + { + .description = "uint16_min", + .type = PTYPE_U16, + .value.u16 = 0, + }, + { + .description = "uint16_max", + .type = PTYPE_U16, + .value.u16 = UINT16_MAX, + }, + /* uint32 tests */ + { + .description = "uint32_sanity1", + .type = PTYPE_U32, + .value.u32 = 1, + }, + { + .description = "uint32_sanity2", + .type = PTYPE_U32, + .value.u32 = UINT32_MAX / 2 + 1, + }, + { + .description = "uint32_min", + .type = PTYPE_U32, + .value.u32 = 0, + }, + { + .description = "uint32_max", + .type = PTYPE_U32, + .value.u32 = UINT32_MAX, + }, + /* uint64 tests */ + { + .description = "uint64_sanity1", + .type = PTYPE_U64, + .value.u64 = 1, + }, + { + .description = "uint64_sanity2", + .type = PTYPE_U64, + .value.u64 = UINT64_MAX / 2 + 1, + }, + { + .description = "uint64_min", + .type = PTYPE_U64, + .value.u64 = 0, + }, + { + .description = "uint64_max", + .type = PTYPE_U64, + .value.u64 = UINT64_MAX, + }, + /* int8 tests */ + { + .description = "int8_sanity1", + .type = PTYPE_S8, + .value.s8 = -1, + }, + { + .description = "int8_sanity2", + .type = PTYPE_S8, + .value.s8 = INT8_MAX / 2 + 1, + }, + { + .description = "int8_min", + .type = PTYPE_S8, + .value.s8 = INT8_MIN, + }, + { + .description = "int8_max", + .type = PTYPE_S8, + .value.s8 = INT8_MAX, + }, + /* int16 tests */ + { + .description = "int16_sanity1", + .type = PTYPE_S16, + .value.s16 = -1, + }, + { + .description = "int16_sanity2", + .type = PTYPE_S16, + .value.s16 = INT16_MAX / 2 + 1, + }, + { + .description = "int16_min", + .type = PTYPE_S16, + .value.s16 = INT16_MIN, + }, + { + .description = "int16_max", + .type = PTYPE_S16, + .value.s16 = INT16_MAX, + }, + /* int32 tests */ + { + .description = "int32_sanity1", + .type = PTYPE_S32, + .value.s32 = -1, + }, + { + .description = "int32_sanity2", + .type = PTYPE_S32, + .value.s32 = INT32_MAX / 2 + 1, + }, + { + .description = "int32_min", + .type = PTYPE_S32, + .value.s32 = INT32_MIN, + }, + { + .description = "int32_max", + .type = PTYPE_S32, + .value.s32 = INT32_MAX, + }, + /* int64 tests */ + { + .description = "int64_sanity1", + .type = PTYPE_S64, + .value.s64 = -1, + }, + { + .description = "int64_sanity2", + .type = PTYPE_S64, + .value.s64 = INT64_MAX / 2 + 1, + }, + { + .description = "int64_min", + .type = PTYPE_S64, + .value.s64 = INT64_MIN, + }, + { + .description = "int64_max", + .type = PTYPE_S64, + .value.s64 = INT64_MAX, + }, + { .type = PTYPE_EOL } +}; + +/* visitor-specific op implementations */ + +typedef struct QmpSerializeData { + QmpOutputVisitor *qov; + QmpInputVisitor *qiv; +} QmpSerializeData; + +static void qmp_serialize(void *native_in, void **datap, + VisitorFunc visit, Error **errp) +{ + QmpSerializeData *d = g_malloc0(sizeof(*d)); + + d->qov = qmp_output_visitor_new(); + visit(qmp_output_get_visitor(d->qov), &native_in, errp); + *datap = d; +} + +static void qmp_deserialize(void **native_out, void *datap, + VisitorFunc visit, Error **errp) +{ + QmpSerializeData *d = datap; + QString *output_json = qobject_to_json(qmp_output_get_qobject(d->qov)); + QObject *obj = qobject_from_json(qstring_get_str(output_json)); + + QDECREF(output_json); + d->qiv = qmp_input_visitor_new(obj); + visit(qmp_input_get_visitor(d->qiv), native_out, errp); +} + +static void qmp_cleanup(void *datap) +{ + QmpSerializeData *d = datap; + qmp_output_visitor_cleanup(d->qov); + qmp_input_visitor_cleanup(d->qiv); +} + +typedef struct StringSerializeData { + StringOutputVisitor *sov; + StringInputVisitor *siv; +} StringSerializeData; + +static void string_serialize(void *native_in, void **datap, + VisitorFunc visit, Error **errp) +{ + StringSerializeData *d = g_malloc0(sizeof(*d)); + + d->sov = string_output_visitor_new(); + visit(string_output_get_visitor(d->sov), &native_in, errp); + *datap = d; +} + +static void string_deserialize(void **native_out, void *datap, + VisitorFunc visit, Error **errp) +{ + StringSerializeData *d = datap; + + d->siv = string_input_visitor_new(string_output_get_string(d->sov)); + visit(string_input_get_visitor(d->siv), native_out, errp); +} + +static void string_cleanup(void *datap) +{ + StringSerializeData *d = datap; + string_output_visitor_cleanup(d->sov); + string_input_visitor_cleanup(d->siv); +} + +/* visitor registration, test harness */ + +/* note: to function interchangeably as a serialization mechanism your + * visitor test implementation should pass the test cases for all visitor + * capabilities: primitives, structures, and lists + */ +static const SerializeOps visitors[] = { + { + .type = "QMP", + .serialize = qmp_serialize, + .deserialize = qmp_deserialize, + .cleanup = qmp_cleanup, + .caps = VCAP_PRIMITIVES | VCAP_STRUCTURES | VCAP_LISTS + }, + { + .type = "String", + .serialize = string_serialize, + .deserialize = string_deserialize, + .cleanup = string_cleanup, + .caps = VCAP_PRIMITIVES + }, + { NULL } +}; + +static void add_visitor_type(const SerializeOps *ops) +{ + char testname_prefix[128]; + char testname[128]; + TestArgs *args; + int i = 0; + + sprintf(testname_prefix, "/visitor/serialization/%s", ops->type); + + if (ops->caps & VCAP_PRIMITIVES) { + while (pt_values[i].type != PTYPE_EOL) { + sprintf(testname, "%s/primitives/%s", testname_prefix, + pt_values[i].description); + args = g_malloc0(sizeof(*args)); + args->ops = ops; + args->test_data = &pt_values[i]; + g_test_add_data_func(testname, args, test_primitives); + i++; + } + } + + if (ops->caps & VCAP_STRUCTURES) { + sprintf(testname, "%s/struct", testname_prefix); + args = g_malloc0(sizeof(*args)); + args->ops = ops; + args->test_data = NULL; + g_test_add_data_func(testname, args, test_struct); + + sprintf(testname, "%s/nested_struct", testname_prefix); + args = g_malloc0(sizeof(*args)); + args->ops = ops; + args->test_data = NULL; + g_test_add_data_func(testname, args, test_nested_struct); + } + + if (ops->caps & VCAP_LISTS) { + sprintf(testname, "%s/nested_struct_list", testname_prefix); + args = g_malloc0(sizeof(*args)); + args->ops = ops; + args->test_data = NULL; + g_test_add_data_func(testname, args, test_nested_struct_list); + } +} + +int main(int argc, char **argv) +{ + int i = 0; + + g_test_init(&argc, &argv, NULL); + + while (visitors[i].type != NULL) { + add_visitor_type(&visitors[i]); + i++; + } + + g_test_run(); + + return 0; +} diff --git a/trace-events b/trace-events index 45c6bc1271..1f9fc98c89 100644 --- a/trace-events +++ b/trace-events @@ -252,24 +252,26 @@ usb_ehci_qtd_fields(uint32_t addr, int tbytes, int cpage, int cerr, int pid) "QT usb_ehci_qtd_bits(uint32_t addr, int ioc, int active, int halt, int babble, int xacterr) "QTD @ %08x - ioc %d, active %d, halt %d, babble %d, xacterr %d" usb_ehci_itd(uint32_t addr, uint32_t nxt, uint32_t mplen, uint32_t mult, uint32_t ep, uint32_t devaddr) "ITD @ %08x: next %08x - mplen %d, mult %d, ep %d, dev %d" usb_ehci_sitd(uint32_t addr, uint32_t nxt, uint32_t active) "ITD @ %08x: next %08x - active %d" -usb_ehci_port_attach(uint32_t port, const char *device) "attach port #%d - %s" -usb_ehci_port_detach(uint32_t port) "detach port #%d" +usb_ehci_port_attach(uint32_t port, const char *owner, const char *device) "attach port #%d, owner %s, device %s" +usb_ehci_port_detach(uint32_t port, const char *owner) "detach port #%d, owner %s" usb_ehci_port_reset(uint32_t port, int enable) "reset port #%d - %d" usb_ehci_data(int rw, uint32_t cpage, uint32_t offset, uint32_t addr, uint32_t len, uint32_t bufpos) "write %d, cpage %d, offset 0x%03x, addr 0x%08x, len %d, bufpos %d" usb_ehci_queue_action(void *q, const char *action) "q %p: %s" +usb_ehci_packet_action(void *q, void *p, const char *action) "q %p p %p: %s" +usb_ehci_interrupt(uint32_t level, uint32_t sts, uint32_t mask) "level %d, sts 0x%x, mask 0x%x" # hw/usb/hcd-uhci.c usb_uhci_reset(void) "=== RESET ===" usb_uhci_schedule_start(void) "" usb_uhci_schedule_stop(void) "" usb_uhci_frame_start(uint32_t num) "nr %d" +usb_uhci_frame_stop_bandwidth(void) "" usb_uhci_frame_loop_stop_idle(void) "" -usb_uhci_frame_loop_stop_bandwidth(void) "" usb_uhci_frame_loop_continue(void) "" -usb_uhci_mmio_readw(uint32_t addr, uint32_t val) "addr %04x, ret 0x04%x" -usb_uhci_mmio_writew(uint32_t addr, uint32_t val) "addr %04x, val 0x04%x" -usb_uhci_mmio_readl(uint32_t addr, uint32_t val) "addr %04x, ret 0x08%x" -usb_uhci_mmio_writel(uint32_t addr, uint32_t val) "addr %04x, val 0x08%x" +usb_uhci_mmio_readw(uint32_t addr, uint32_t val) "addr 0x%04x, ret 0x%04x" +usb_uhci_mmio_writew(uint32_t addr, uint32_t val) "addr 0x%04x, val 0x%04x" +usb_uhci_mmio_readl(uint32_t addr, uint32_t val) "addr 0x%04x, ret 0x%08x" +usb_uhci_mmio_writel(uint32_t addr, uint32_t val) "addr 0x%04x, val 0x%08x" usb_uhci_queue_add(uint32_t token) "token 0x%x" usb_uhci_queue_del(uint32_t token) "token 0x%x" usb_uhci_packet_add(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" @@ -289,6 +291,41 @@ usb_uhci_td_nextqh(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" usb_uhci_td_async(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" usb_uhci_td_complete(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" +# hw/usb/hcd-xhci.c +usb_xhci_reset(void) "=== RESET ===" +usb_xhci_run(void) "" +usb_xhci_stop(void) "" +usb_xhci_cap_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_oper_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_port_read(uint32_t port, uint32_t off, uint32_t val) "port %d, off 0x%04x, ret 0x%08x" +usb_xhci_runtime_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_doorbell_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_oper_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_port_write(uint32_t port, uint32_t off, uint32_t val) "port %d, off 0x%04x, val 0x%08x" +usb_xhci_runtime_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_doorbell_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_irq_intx(uint32_t level) "level %d" +usb_xhci_irq_msi(uint32_t nr) "nr %d" +usb_xhci_queue_event(uint32_t idx, const char *name, uint64_t param, uint32_t status, uint32_t control) "idx %d, %s, p %016" PRIx64 ", s %08x, c 0x%08x" +usb_xhci_fetch_trb(uint64_t addr, const char *name, uint64_t param, uint32_t status, uint32_t control) "addr %016" PRIx64 ", %s, p %016" PRIx64 ", s %08x, c 0x%08x" +usb_xhci_slot_enable(uint32_t slotid) "slotid %d" +usb_xhci_slot_disable(uint32_t slotid) "slotid %d" +usb_xhci_slot_address(uint32_t slotid) "slotid %d" +usb_xhci_slot_configure(uint32_t slotid) "slotid %d" +usb_xhci_slot_evaluate(uint32_t slotid) "slotid %d" +usb_xhci_slot_reset(uint32_t slotid) "slotid %d" +usb_xhci_ep_enable(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_disable(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_kick(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_stop(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_reset(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_xfer_start(void *xfer, uint32_t slotid, uint32_t epid, uint32_t length) "%p: slotid %d, epid %d, length %d" +usb_xhci_xfer_async(void *xfer) "%p" +usb_xhci_xfer_nak(void *xfer) "%p" +usb_xhci_xfer_retry(void *xfer) "%p" +usb_xhci_xfer_success(void *xfer, uint32_t bytes) "%p: len %d" +usb_xhci_xfer_error(void *xfer, uint32_t ret) "%p: ret %d" + # hw/usb/desc.c usb_desc_device(int addr, int len, int ret) "dev %d query device, len %d, ret %d" usb_desc_device_qualifier(int addr, int len, int ret) "dev %d query device qualifier, len %d, ret %d" @@ -331,8 +368,10 @@ usb_host_urb_complete(int bus, int addr, void *aurb, int status, int length, int usb_host_urb_canceled(int bus, int addr, void *aurb) "dev %d:%d, aurb %p" usb_host_ep_set_halt(int bus, int addr, int ep) "dev %d:%d, ep %d" usb_host_ep_clear_halt(int bus, int addr, int ep) "dev %d:%d, ep %d" -usb_host_ep_start_iso(int bus, int addr, int ep) "dev %d:%d, ep %d" -usb_host_ep_stop_iso(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_start(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_stop(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_out_of_bufs(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_many_urbs(int bus, int addr, int count) "dev %d:%d, count %d" usb_host_reset(int bus, int addr) "dev %d:%d" usb_host_auto_scan_enabled(void) usb_host_auto_scan_disabled(void) @@ -475,6 +514,85 @@ lm32_uart_irq_state(int level) "irq state %d" # hw/lm32_sys.c lm32_sys_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" +# hw/megasas.c +megasas_init_firmware(uint64_t pa) "pa %" PRIx64 " " +megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at %" PRIx64 " len %d head %" PRIx64 " tail %" PRIx64 " flags %x" +megasas_initq_map_failed(int frame) "scmd %d: failed to map queue" +megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d" +megasas_qf_found(unsigned int index, uint64_t pa) "found mapped frame %x pa %" PRIx64 "" +megasas_qf_new(unsigned int index, void *cmd) "return new frame %x cmd %p" +megasas_qf_failed(unsigned long pa) "all frames busy for frame %lx" +megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int tail, int busy) "enqueue frame %x count %d context %" PRIx64 " tail %x busy %d" +megasas_qf_update(unsigned int head, unsigned int busy) "update reply queue head %x busy %d" +megasas_qf_dequeue(unsigned int index) "dequeue frame %x" +megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu" +megasas_qf_complete_noirq(uint64_t context) "context %" PRIx64 " " +megasas_qf_complete(uint64_t context, unsigned int tail, unsigned int offset, int busy, unsigned int doorbell) "context %" PRIx64 " tail %x offset %d busy %d doorbell %x" +megasas_handle_frame(const char *cmd, uint64_t addr, uint64_t context, uint32_t count) "MFI cmd %s addr %" PRIx64 " context %" PRIx64 " count %d" +megasas_frame_busy(uint64_t addr) "frame %" PRIx64 " busy" +megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: Unhandled MFI cmd %x" +megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu" +megasas_scsi_target_not_present(const char *frame, int bus, int dev, int lun) "%s dev %x/%x/%x target not present" +megasas_scsi_invalid_cdb_len(const char *frame, int bus, int dev, int lun, int len) "%s dev %x/%x/%x invalid cdb len %d" +megasas_iov_read_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_write_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_read_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_write_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_scsi_req_alloc_failed(const char *frame, int dev, int lun) "%s dev %x/%x req allocation failed" +megasas_scsi_read_start(int cmd, int len) "scmd %d: transfer %d bytes of data" +megasas_scsi_write_start(int cmd, int len) "scmd %d: transfer %d bytes of data" +megasas_scsi_nodata(int cmd) "scmd %d: no data to be transferred" +megasas_scsi_complete(int cmd, uint32_t status, int len, int xfer) "scmd %d: finished with status %x, len %u/%u" +megasas_command_complete(int cmd, uint32_t status, uint32_t resid) "scmd %d: command completed, status %x, residual %d" +megasas_handle_io(int cmd, const char *frame, int dev, int lun, unsigned long lba, unsigned long count) "scmd %d: %s dev %x/%x lba %lx count %lu" +megasas_io_target_not_present(int cmd, const char *frame, int dev, int lun) "scmd %d: %s dev 1/%x/%x LUN not present" +megasas_io_read_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA %lx %lu blocks (%lu bytes)" +megasas_io_write_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA %lx %lu blocks (%lu bytes)" +megasas_io_complete(int cmd, uint32_t len) "scmd %d: %d bytes completed" +megasas_io_read(int cmd, int bytes, int len, unsigned long offset) "scmd %d: %d/%d bytes, iov offset %lu" +megasas_io_write(int cmd, int bytes, int len, unsigned long offset) "scmd %d: %d/%d bytes, iov offset %lu" +megasas_io_continue(int cmd, int bytes) "scmd %d: %d bytes left" +megasas_iovec_map_failed(int cmd, int index, unsigned long iov_size) "scmd %d: iovec %d size %lu" +megasas_iovec_sgl_overflow(int cmd, int index, int limit) "scmd %d: iovec count %d limit %d" +megasas_iovec_sgl_underflow(int cmd, int index) "scmd %d: iovec count %d" +megasas_iovec_sgl_invalid(int cmd, int index, uint64_t pa, uint32_t len) "scmd %d: element %d pa %" PRIx64 " len %u" +megasas_iovec_overflow(int cmd, int len, int limit) "scmd %d: len %d limit %d" +megasas_iovec_underflow(int cmd, int len, int limit) "scmd %d: len %d limit %d" +megasas_handle_dcmd(int cmd, int opcode) "scmd %d: MFI DCMD opcode %x" +megasas_finish_dcmd(int cmd, int size) "scmd %d: MFI DCMD wrote %d bytes" +megasas_dcmd_req_alloc_failed(int cmd, const char *desc) "scmd %d: %s alloc failed" +megasas_dcmd_internal_submit(int cmd, const char *desc, int dev) "scmd %d: %s to dev %d" +megasas_dcmd_internal_finish(int cmd, int opcode, int lun) "scmd %d: DCMD finish internal cmd %x lun %d" +megasas_dcmd_internal_invalid(int cmd, int opcode) "scmd %d: Invalid internal DCMD %x" +megasas_dcmd_unhandled(int cmd, int opcode, int len) "scmd %d: opcode %x, len %d" +megasas_dcmd_zero_sge(int cmd) "scmd %d: zero DCMD sge count" +megasas_dcmd_invalid_sge(int cmd, int count) "scmd %d: invalid DCMD sge count %d" +megasas_dcmd_map_failed(int cmd) "scmd %d: Failed to map DCMD buffer" +megasas_dcmd_invalid_xfer_len(int cmd, unsigned long size, unsigned long max) "scmd %d: invalid xfer len %ld, max %ld" +megasas_dcmd_enter(int cmd, const char *dcmd, int len) "scmd %d: DCMD %s len %d" +megasas_dcmd_dummy(int cmd, unsigned long size) "scmd %d: DCMD dummy xfer len %ld" +megasas_dcmd_set_fw_time(int cmd, unsigned long time) "scmd %d: Set FW time %lx" +megasas_dcmd_pd_get_list(int cmd, int num, int max, int offset) "scmd %d: DCMD PD get list: %d / %d PDs, size %d" +megasas_dcmd_ld_get_list(int cmd, int num, int max) "scmd %d: DCMD LD get list: found %d / %d LDs" +megasas_dcmd_ld_get_info(int cmd, int ld_id) "scmd %d: DCMD LD get info for dev %d" +megasas_dcmd_pd_get_info(int cmd, int pd_id) "scmd %d: DCMD PD get info for dev %d" +megasas_dcmd_pd_list_query(int cmd, int flags) "scmd %d: DCMD PD list query flags %x" +megasas_dcmd_dump_frame(int offset, char f0, char f1, char f2, char f3, char f4, char f5, char f6, char f7) "0x%x: %02x %02x %02x %02x %02x %02x %02x %02x" +megasas_abort_frame(int cmd, int abort_cmd) "scmd %d: aborting frame %x" +megasas_abort_no_cmd(int cmd, uint64_t context) "scmd %d: no active command for frame context %" PRIx64 "" +megasas_abort_invalid_context(int cmd, uint64_t context, int abort_cmd) "scmd %d: invalid frame context %" PRIx64 " for abort frame %x" +megasas_reset(void) "Reset" +megasas_init(int sges, int cmds, const char *intr, const char *mode) "Using %d sges, %d cmds, %s, %s mode" +megasas_msix_raise(int vector) "vector %d" +megasas_irq_lower(void) "INTx" +megasas_irq_raise(void) "INTx" +megasas_intr_enabled(void) "Interrupts enabled" +megasas_intr_disabled(void) "Interrupts disabled" +megasas_mmio_readl(unsigned long addr, uint32_t val) "addr 0x%lx: 0x%x" +megasas_mmio_invalid_readl(unsigned long addr) "addr 0x%lx" +megasas_mmio_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x" +megasas_mmio_invalid_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x" + # hw/milkymist-ac97.c milkymist_ac97_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x" milkymist_ac97_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x" @@ -641,6 +759,11 @@ esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (%2.2x)" # monitor.c handle_qmp_command(void *mon, const char *cmd_name) "mon %p cmd_name \"%s\"" monitor_protocol_emitter(void *mon) "mon %p" +monitor_protocol_event(uint32_t event, const char *evname, void *data) "event=%d name \"%s\" data %p" +monitor_protocol_event_handler(uint32_t event, void *data, uint64_t last, uint64_t now) "event=%d data=%p last=%" PRId64 " now=%" PRId64 +monitor_protocol_event_emit(uint32_t event, void *data) "event=%d data=%p" +monitor_protocol_event_queue(uint32_t event, void *data, uint64_t rate, uint64_t last, uint64_t now) "event=%d data=%p rate=%" PRId64 " last=%" PRId64 " now=%" PRId64 +monitor_protocol_event_throttle(uint32_t event, uint64_t rate) "event=%d rate=%" PRId64 # hw/opencores_eth.c open_eth_mii_write(unsigned idx, uint16_t v) "MII[%02x] <- %04x" @@ -741,6 +864,11 @@ displaysurface_resize(void *display_state, void *display_surface, int width, int # vga.c ppm_save(const char *filename, void *display_surface) "%s surface=%p" +# savevm.c + +savevm_section_start(void) "" +savevm_section_end(unsigned int section_id) "section_id %u" + # hw/qxl.c disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d" disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u" diff --git a/trace/simple.c b/trace/simple.c index 33ae48696d..b4a3c6e950 100644 --- a/trace/simple.c +++ b/trace/simple.c @@ -161,8 +161,11 @@ static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, } timestamp = get_clock(); - +#if GLIB_CHECK_VERSION(2, 30, 0) + idx = g_atomic_int_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN; +#else idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN; +#endif trace_buf[idx] = (TraceRecord){ .event = event, .timestamp_ns = timestamp, diff --git a/ui/Makefile.objs b/ui/Makefile.objs new file mode 100644 index 0000000000..adc07be761 --- /dev/null +++ b/ui/Makefile.objs @@ -0,0 +1,14 @@ +vnc-obj-y += vnc.o d3des.o +vnc-obj-y += vnc-enc-zlib.o vnc-enc-hextile.o +vnc-obj-y += vnc-enc-tight.o vnc-palette.o +vnc-obj-y += vnc-enc-zrle.o +vnc-obj-$(CONFIG_VNC_TLS) += vnc-tls.o vnc-auth-vencrypt.o +vnc-obj-$(CONFIG_VNC_SASL) += vnc-auth-sasl.o +vnc-obj-y += vnc-jobs.o + +common-obj-y += keymaps.o +common-obj-$(CONFIG_SPICE) += spice-core.o spice-input.o spice-display.o +common-obj-$(CONFIG_SDL) += sdl.o sdl_zoom.o x_keymap.o +common-obj-$(CONFIG_COCOA) += cocoa.o +common-obj-$(CONFIG_CURSES) += curses.o +common-obj-$(CONFIG_VNC) += $(vnc-obj-y) diff --git a/ui/spice-display.c b/ui/spice-display.c index 5418eb3c7c..3e8f0b3ad5 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -244,6 +244,8 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd) { QXLDevSurfaceCreate surface; + memset(&surface, 0, sizeof(surface)); + dprint(1, "%s: %dx%d\n", __FUNCTION__, ds_get_width(ssd->ds), ds_get_height(ssd->ds)); diff --git a/ui/vnc-jobs-sync.c b/ui/vnc-jobs-sync.c deleted file mode 100644 index 49b77afcc9..0000000000 --- a/ui/vnc-jobs-sync.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * QEMU VNC display driver - * - * Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws> - * Copyright (C) 2006 Fabrice Bellard - * Copyright (C) 2009 Red Hat, Inc - * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "vnc.h" -#include "vnc-jobs.h" - -void vnc_jobs_clear(VncState *vs) -{ -} - -void vnc_jobs_join(VncState *vs) -{ -} - -VncJob *vnc_job_new(VncState *vs) -{ - vs->job.vs = vs; - vs->job.rectangles = 0; - - vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE); - vnc_write_u8(vs, 0); - vs->job.saved_offset = vs->output.offset; - vnc_write_u16(vs, 0); - return &vs->job; -} - -void vnc_job_push(VncJob *job) -{ - VncState *vs = job->vs; - - vs->output.buffer[job->saved_offset] = (job->rectangles >> 8) & 0xFF; - vs->output.buffer[job->saved_offset + 1] = job->rectangles & 0xFF; - vnc_flush(job->vs); -} - -int vnc_job_add_rect(VncJob *job, int x, int y, int w, int h) -{ - int n; - - n = vnc_send_framebuffer_update(job->vs, x, y, w, h); - if (n >= 0) - job->rectangles += n; - return n; -} - -bool vnc_has_job(VncState *vs) -{ - return false; -} diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs.c index 087b84d319..087b84d319 100644 --- a/ui/vnc-jobs-async.c +++ b/ui/vnc-jobs.c diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h index 4c661f95e5..86e6d888c6 100644 --- a/ui/vnc-jobs.h +++ b/ui/vnc-jobs.h @@ -38,51 +38,35 @@ bool vnc_has_job(VncState *vs); void vnc_jobs_clear(VncState *vs); void vnc_jobs_join(VncState *vs); -#ifdef CONFIG_VNC_THREAD - void vnc_jobs_consume_buffer(VncState *vs); void vnc_start_worker_thread(void); bool vnc_worker_thread_running(void); void vnc_stop_worker_thread(void); -#endif /* CONFIG_VNC_THREAD */ - /* Locks */ static inline int vnc_trylock_display(VncDisplay *vd) { -#ifdef CONFIG_VNC_THREAD return qemu_mutex_trylock(&vd->mutex); -#else - return 0; -#endif } static inline void vnc_lock_display(VncDisplay *vd) { -#ifdef CONFIG_VNC_THREAD qemu_mutex_lock(&vd->mutex); -#endif } static inline void vnc_unlock_display(VncDisplay *vd) { -#ifdef CONFIG_VNC_THREAD qemu_mutex_unlock(&vd->mutex); -#endif } static inline void vnc_lock_output(VncState *vs) { -#ifdef CONFIG_VNC_THREAD qemu_mutex_lock(&vs->output_mutex); -#endif } static inline void vnc_unlock_output(VncState *vs) { -#ifdef CONFIG_VNC_THREAD qemu_mutex_unlock(&vs->output_mutex); -#endif } #endif /* VNC_JOBS_H */ @@ -526,7 +526,6 @@ static void vnc_desktop_resize(VncState *vs) vnc_flush(vs); } -#ifdef CONFIG_VNC_THREAD static void vnc_abort_display_jobs(VncDisplay *vd) { VncState *vs; @@ -545,11 +544,6 @@ static void vnc_abort_display_jobs(VncDisplay *vd) vnc_unlock_output(vs); } } -#else -static void vnc_abort_display_jobs(VncDisplay *vd) -{ -} -#endif static void vnc_dpy_resize(DisplayState *ds) { @@ -867,19 +861,12 @@ static int find_and_clear_dirty_height(struct VncState *vs, return h; } -#ifdef CONFIG_VNC_THREAD static int vnc_update_client_sync(VncState *vs, int has_dirty) { int ret = vnc_update_client(vs, has_dirty); vnc_jobs_join(vs); return ret; } -#else -static int vnc_update_client_sync(VncState *vs, int has_dirty) -{ - return vnc_update_client(vs, has_dirty); -} -#endif static int vnc_update_client(VncState *vs, int has_dirty) { @@ -1066,11 +1053,9 @@ static void vnc_disconnect_finish(VncState *vs) qemu_remove_led_event_handler(vs->led); vnc_unlock_output(vs); -#ifdef CONFIG_VNC_THREAD qemu_mutex_destroy(&vs->output_mutex); qemu_bh_delete(vs->bh); buffer_free(&vs->jobs_buffer); -#endif for (i = 0; i < VNC_STAT_ROWS; ++i) { g_free(vs->lossy_rect[i]); @@ -1286,14 +1271,12 @@ static long vnc_client_read_plain(VncState *vs) return ret; } -#ifdef CONFIG_VNC_THREAD static void vnc_jobs_bh(void *opaque) { VncState *vs = opaque; vnc_jobs_consume_buffer(vs); } -#endif /* * First function called whenever there is more data to be read from @@ -2699,10 +2682,8 @@ static void vnc_connect(VncDisplay *vd, int csock, int skipauth) vs->as.fmt = AUD_FMT_S16; vs->as.endianness = 0; -#ifdef CONFIG_VNC_THREAD qemu_mutex_init(&vs->output_mutex); vs->bh = qemu_bh_new(vnc_jobs_bh, vs); -#endif QTAILQ_INSERT_HEAD(&vd->clients, vs, next); @@ -2762,10 +2743,8 @@ void vnc_display_init(DisplayState *ds) if (!vs->kbd_layout) exit(1); -#ifdef CONFIG_VNC_THREAD qemu_mutex_init(&vs->mutex); vnc_start_worker_thread(); -#endif dcl->dpy_copy = vnc_dpy_copy; dcl->dpy_update = vnc_dpy_update; @@ -29,9 +29,7 @@ #include "qemu-common.h" #include "qemu-queue.h" -#ifdef CONFIG_VNC_THREAD #include "qemu-thread.h" -#endif #include "console.h" #include "monitor.h" #include "audio/audio.h" @@ -146,9 +144,7 @@ struct VncDisplay DisplayState *ds; kbd_layout_t *kbd_layout; int lock_key_sync; -#ifdef CONFIG_VNC_THREAD QemuMutex mutex; -#endif QEMUCursor *cursor; int cursor_msize; @@ -216,7 +212,6 @@ typedef struct VncZywrle { int buf[VNC_ZRLE_TILE_WIDTH * VNC_ZRLE_TILE_HEIGHT]; } VncZywrle; -#ifdef CONFIG_VNC_THREAD struct VncRect { int x; @@ -238,14 +233,6 @@ struct VncJob QLIST_HEAD(, VncRectEntry) rectangles; QTAILQ_ENTRY(VncJob) next; }; -#else -struct VncJob -{ - VncState *vs; - int rectangles; - size_t saved_offset; -}; -#endif struct VncState { @@ -300,13 +287,9 @@ struct VncState QEMUPutLEDEntry *led; bool abort; -#ifndef CONFIG_VNC_THREAD - VncJob job; -#else QemuMutex output_mutex; QEMUBH *bh; Buffer jobs_buffer; -#endif /* Encoding specific, if you add something here, don't forget to * update vnc_async_encoding_start() diff --git a/user-exec.c b/user-exec.c index d8c2ad9f2f..b2a4261eca 100644 --- a/user-exec.c +++ b/user-exec.c @@ -41,7 +41,7 @@ static void exception_action(CPUArchState *env1) { #if defined(TARGET_I386) - raise_exception_err_env(env1, env1->exception_index, env1->error_code); + raise_exception_err(env1, env1->exception_index, env1->error_code); #else cpu_loop_exit(env1); #endif @@ -51,14 +51,12 @@ #ifdef CONFIG_BSD #include <sys/stat.h> #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) -#include <libutil.h> #include <sys/sysctl.h> #else #include <util.h> #endif #else #ifdef __linux__ -#include <pty.h> #include <malloc.h> #include <linux/ppdev.h> @@ -81,10 +79,6 @@ #endif #endif -#if defined(__OpenBSD__) -#include <util.h> -#endif - #if defined(CONFIG_VDE) #include <libvdeplug.h> #endif @@ -3212,6 +3206,10 @@ int main(int argc, char **argv, char **envp) } loc_set_none(); + if (machine->hw_version) { + qemu_set_version(machine->hw_version); + } + /* Init CPU def lists, based on config * - Must be called after all the qemu_read_config_file() calls * - Must be called before list_cpus() @@ -26,7 +26,7 @@ #ifndef QEMU_VMSTATE_H #define QEMU_VMSTATE_H 1 -typedef void SaveSetParamsHandler(int blk_enable, int shared, void * opaque); +typedef void SaveSetParamsHandler(const MigrationParams *params, void * opaque); typedef void SaveStateHandler(QEMUFile *f, void *opaque); typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque); typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); @@ -1191,3 +1191,15 @@ void xen_register_framebuffer(MemoryRegion *mr) { framebuffer = mr; } + +void xen_shutdown_fatal_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fprintf(stderr, "Will destroy the domain.\n"); + /* destroy the domain */ + qemu_system_shutdown_request(); +} |