aboutsummaryrefslogtreecommitdiff
path: root/target/xtensa
diff options
context:
space:
mode:
authorThomas Huth <thuth@redhat.com>2016-10-11 08:56:52 +0200
committerThomas Huth <thuth@redhat.com>2016-12-20 21:52:12 +0100
commitfcf5ef2ab52c621a4617ebbef36bf43b4003f4c0 (patch)
tree2b450d96b01455df8ed908bf8f26ddc388a03380 /target/xtensa
parent82ecffa8c050bf5bbc13329e9b65eac1caa5b55c (diff)
Move target-* CPU file into a target/ folder
We've currently got 18 architectures in QEMU, and thus 18 target-xxx folders in the root folder of the QEMU source tree. More architectures (e.g. RISC-V, AVR) are likely to be included soon, too, so the main folder of the QEMU sources slowly gets quite overcrowded with the target-xxx folders. To disburden the main folder a little bit, let's move the target-xxx folders into a dedicated target/ folder, so that target-xxx/ simply becomes target/xxx/ instead. Acked-by: Laurent Vivier <laurent@vivier.eu> [m68k part] Acked-by: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> [tricore part] Acked-by: Michael Walle <michael@walle.cc> [lm32 part] Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> [s390x part] Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> [s390x part] Acked-by: Eduardo Habkost <ehabkost@redhat.com> [i386 part] Acked-by: Artyom Tarasenko <atar4qemu@gmail.com> [sparc part] Acked-by: Richard Henderson <rth@twiddle.net> [alpha part] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa part] Reviewed-by: David Gibson <david@gibson.dropbear.id.au> [ppc part] Acked-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> [cris&microblaze part] Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn> [unicore32 part] Signed-off-by: Thomas Huth <thuth@redhat.com>
Diffstat (limited to 'target/xtensa')
-rw-r--r--target/xtensa/Makefile.objs7
-rw-r--r--target/xtensa/core-dc232b.c50
-rw-r--r--target/xtensa/core-dc232b/core-isa.h422
-rw-r--r--target/xtensa/core-dc232b/gdb-config.c261
-rw-r--r--target/xtensa/core-dc233c.c51
-rw-r--r--target/xtensa/core-dc233c/core-isa.h473
-rw-r--r--target/xtensa/core-dc233c/gdb-config.c145
-rw-r--r--target/xtensa/core-fsf.c49
-rw-r--r--target/xtensa/core-fsf/core-isa.h360
-rw-r--r--target/xtensa/cpu-qom.h66
-rw-r--r--target/xtensa/cpu.c184
-rw-r--r--target/xtensa/cpu.h587
-rw-r--r--target/xtensa/gdbstub.c128
-rw-r--r--target/xtensa/helper.c730
-rw-r--r--target/xtensa/helper.h58
-rwxr-xr-xtarget/xtensa/import_core.sh51
-rw-r--r--target/xtensa/monitor.c35
-rw-r--r--target/xtensa/op_helper.c984
-rw-r--r--target/xtensa/overlay_tool.h602
-rw-r--r--target/xtensa/translate.c3225
-rw-r--r--target/xtensa/xtensa-semi.c318
21 files changed, 8786 insertions, 0 deletions
diff --git a/target/xtensa/Makefile.objs b/target/xtensa/Makefile.objs
new file mode 100644
index 0000000000..481de91973
--- /dev/null
+++ b/target/xtensa/Makefile.objs
@@ -0,0 +1,7 @@
+obj-y += xtensa-semi.o
+obj-y += core-dc232b.o
+obj-y += core-dc233c.o
+obj-y += core-fsf.o
+obj-$(CONFIG_SOFTMMU) += monitor.o
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
diff --git a/target/xtensa/core-dc232b.c b/target/xtensa/core-dc232b.c
new file mode 100644
index 0000000000..bb8ed4197f
--- /dev/null
+++ b/target/xtensa/core-dc232b.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-dc232b/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig dc232b __attribute__((unused)) = {
+ .name = "dc232b",
+ .gdb_regmap = {
+ .num_regs = 120,
+ .num_core_regs = 52,
+ .reg = {
+#include "core-dc232b/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc232b)
diff --git a/target/xtensa/core-dc232b/core-isa.h b/target/xtensa/core-dc232b/core-isa.h
new file mode 100644
index 0000000000..a9935b87af
--- /dev/null
+++ b/target/xtensa/core-dc232b/core-isa.h
@@ -0,0 +1,422 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef XTENSA_DC232B_CORE_ISA_H
+#define XTENSA_DC232B_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU insns */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 701001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc232b" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)"
+#define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 221001 /* major*100+minor */
+#define XCHAL_HW_REL_LX2 1
+#define XCHAL_HW_REL_LX2_1 1
+#define XCHAL_HW_REL_LX2_1_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0xD00002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC232B_CORE_ISA_H */
diff --git a/target/xtensa/core-dc232b/gdb-config.c b/target/xtensa/core-dc232b/gdb-config.c
new file mode 100644
index 0000000000..13aba5edec
--- /dev/null
+++ b/target/xtensa/core-dc232b/gdb-config.c
@@ -0,0 +1,261 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+ XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(55, 220, 18, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(56, 224, 18, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(58, 232, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(59, 236, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(60, 240, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(61, 244, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(62, 248, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(63, 252, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(64, 256, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(65, 260, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(66, 264, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(67, 268, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(68, 272, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(69, 276, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(70, 280, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(71, 284, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(72, 288, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(73, 292, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(74, 296, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(75, 300, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(76, 304, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(77, 308, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(78, 312, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(79, 316, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(80, 320, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(81, 324, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(82, 328, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(83, 332, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(84, 336, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(85, 340, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(86, 344, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(87, 348, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(89, 356, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(90, 360, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(91, 364, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(92, 368, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(93, 372, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(94, 376, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(95, 380, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(96, 384, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(97, 388, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(98, 392, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(99, 396, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(100, 400, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(101, 404, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(102, 408, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(103, 412, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(104, 416, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(105, 420, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(106, 424, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(107, 428, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(108, 432, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(109, 436, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(110, 440, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(111, 444, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(112, 448, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(113, 452, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(114, 456, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(115, 460, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(116, 464, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(117, 468, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(118, 472, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(119, 476, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15,
+ 0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-dc233c.c b/target/xtensa/core-dc233c.c
new file mode 100644
index 0000000000..40475e5205
--- /dev/null
+++ b/target/xtensa/core-dc233c.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-dc233c/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig dc233c __attribute__((unused)) = {
+ .name = "dc233c",
+ .gdb_regmap = {
+ .num_regs = 121,
+ .num_core_regs = 52,
+ .reg = {
+#include "core-dc233c/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc233c)
diff --git a/target/xtensa/core-dc233c/core-isa.h b/target/xtensa/core-dc233c/core-isa.h
new file mode 100644
index 0000000000..ff92b7f3ed
--- /dev/null
+++ b/target/xtensa/core-dc233c/core-isa.h
@@ -0,0 +1,473 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_DC233C_CORE_ISA_H
+#define XTENSA_DC233C_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+(CoreID) set in the Xtensa
+Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+(not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+number: 1 == XEA1 (old)
+2 == XEA2 (new)
+0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+[autorefill] and protection)
+usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC233C_CORE_ISA_H */
diff --git a/target/xtensa/core-dc233c/gdb-config.c b/target/xtensa/core-dc233c/gdb-config.c
new file mode 100644
index 0000000000..b632341b28
--- /dev/null
+++ b/target/xtensa/core-dc233c/gdb-config.c
@@ -0,0 +1,145 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* idx ofs bi sz al targno flags cp typ group name */
+XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc, 0, 0, 0, 0, 0, 0)
+XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0, 0, 0, 0, 0, 0, 0)
+XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1, 0, 0, 0, 0, 0, 0)
+XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2, 0, 0, 0, 0, 0, 0)
+XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3, 0, 0, 0, 0, 0, 0)
+XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4, 0, 0, 0, 0, 0, 0)
+XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5, 0, 0, 0, 0, 0, 0)
+XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6, 0, 0, 0, 0, 0, 0)
+XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7, 0, 0, 0, 0, 0, 0)
+XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8, 0, 0, 0, 0, 0, 0)
+XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9, 0, 0, 0, 0, 0, 0)
+XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10, 0, 0, 0, 0, 0, 0)
+XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11, 0, 0, 0, 0, 0, 0)
+XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12, 0, 0, 0, 0, 0, 0)
+XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13, 0, 0, 0, 0, 0, 0)
+XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14, 0, 0, 0, 0, 0, 0)
+XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15, 0, 0, 0, 0, 0, 0)
+XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16, 0, 0, 0, 0, 0, 0)
+XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17, 0, 0, 0, 0, 0, 0)
+XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18, 0, 0, 0, 0, 0, 0)
+XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19, 0, 0, 0, 0, 0, 0)
+XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20, 0, 0, 0, 0, 0, 0)
+XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21, 0, 0, 0, 0, 0, 0)
+XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22, 0, 0, 0, 0, 0, 0)
+XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23, 0, 0, 0, 0, 0, 0)
+XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24, 0, 0, 0, 0, 0, 0)
+XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25, 0, 0, 0, 0, 0, 0)
+XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26, 0, 0, 0, 0, 0, 0)
+XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27, 0, 0, 0, 0, 0, 0)
+XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28, 0, 0, 0, 0, 0, 0)
+XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29, 0, 0, 0, 0, 0, 0)
+XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30, 0, 0, 0, 0, 0, 0)
+XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31, 0, 0, 0, 0, 0, 0)
+XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg, 0, 0, 0, 0, 0, 0)
+XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend, 0, 0, 0, 0, 0, 0)
+XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount, 0, 0, 0, 0, 0, 0)
+XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar, 0, 0, 0, 0, 0, 0)
+XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase, 0, 0, 0, 0, 0, 0)
+XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase, 0, 0, 0, 0, 0, 0)
+XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart, 0, 0, 0, 0, 0, 0)
+XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176, 0, 0, 0, 0, 0, 0)
+XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208, 0, 0, 0, 0, 0, 0)
+XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps, 0, 0, 0, 0, 0, 0)
+XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr, 0, 0, 0, 0, 0, 0)
+XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1, 0, 0, 0, 0, 0, 0)
+XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo, 0, 0, 0, 0, 0, 0)
+XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi, 0, 0, 0, 0, 0, 0)
+XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0, 0, 0, 0, 0, 0, 0)
+XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1, 0, 0, 0, 0, 0, 0)
+XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2, 0, 0, 0, 0, 0, 0)
+XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3, 0, 0, 0, 0, 0, 0)
+XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate, 0, 0, 0, 0, 0, 0)
+XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr, 0, 0, 0, 0, 0, 0)
+XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid, 0, 0, 0, 0, 0, 0)
+XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid, 0, 0, 0, 0, 0, 0)
+XTREG(55, 220, 25, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(56, 224, 25, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable, 0, 0, 0, 0, 0, 0)
+XTREG(58, 232, 6, 4, 4, 0x0263, 0x0007, -2, 2, 0x1000, atomctl, 0, 0, 0, 0, 0, 0)
+XTREG(59, 236, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr, 0, 0, 0, 0, 0, 0)
+XTREG(60, 240, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(61, 244, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(62, 248, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(63, 252, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(64, 256, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0, 0, 0, 0, 0, 0, 0)
+XTREG(65, 260, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1, 0, 0, 0, 0, 0, 0)
+XTREG(66, 264, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1, 0, 0, 0, 0, 0, 0)
+XTREG(67, 268, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2, 0, 0, 0, 0, 0, 0)
+XTREG(68, 272, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3, 0, 0, 0, 0, 0, 0)
+XTREG(69, 276, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4, 0, 0, 0, 0, 0, 0)
+XTREG(70, 280, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5, 0, 0, 0, 0, 0, 0)
+XTREG(71, 284, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6, 0, 0, 0, 0, 0, 0)
+XTREG(72, 288, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7, 0, 0, 0, 0, 0, 0)
+XTREG(73, 292, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc, 0, 0, 0, 0, 0, 0)
+XTREG(74, 296, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2, 0, 0, 0, 0, 0, 0)
+XTREG(75, 300, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3, 0, 0, 0, 0, 0, 0)
+XTREG(76, 304, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4, 0, 0, 0, 0, 0, 0)
+XTREG(77, 308, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5, 0, 0, 0, 0, 0, 0)
+XTREG(78, 312, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6, 0, 0, 0, 0, 0, 0)
+XTREG(79, 316, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7, 0, 0, 0, 0, 0, 0)
+XTREG(80, 320, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1, 0, 0, 0, 0, 0, 0)
+XTREG(81, 324, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2, 0, 0, 0, 0, 0, 0)
+XTREG(82, 328, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3, 0, 0, 0, 0, 0, 0)
+XTREG(83, 332, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4, 0, 0, 0, 0, 0, 0)
+XTREG(84, 336, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5, 0, 0, 0, 0, 0, 0)
+XTREG(85, 340, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6, 0, 0, 0, 0, 0, 0)
+XTREG(86, 344, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7, 0, 0, 0, 0, 0, 0)
+XTREG(87, 348, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable, 0, 0, 0, 0, 0, 0)
+XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt, 0, 0, 0, 0, 0, 0)
+XTREG(89, 356, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset, 0, 0, 0, 0, 0, 0)
+XTREG(90, 360, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear, 0, 0, 0, 0, 0, 0)
+XTREG(91, 364, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable, 0, 0, 0, 0, 0, 0)
+XTREG(92, 368, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase, 0, 0, 0, 0, 0, 0)
+XTREG(93, 372, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause, 0, 0, 0, 0, 0, 0)
+XTREG(94, 376, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause, 0, 0, 0, 0, 0, 0)
+XTREG(95, 380, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount, 0, 0, 0, 0, 0, 0)
+XTREG(96, 384, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid, 0, 0, 0, 0, 0, 0)
+XTREG(97, 388, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount, 0, 0, 0, 0, 0, 0)
+XTREG(98, 392, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel, 0, 0, 0, 0, 0, 0)
+XTREG(99, 396, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr, 0, 0, 0, 0, 0, 0)
+XTREG(100, 400, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0, 0, 0, 0, 0, 0, 0)
+XTREG(101, 404, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1, 0, 0, 0, 0, 0, 0)
+XTREG(102, 408, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2, 0, 0, 0, 0, 0, 0)
+XTREG(103, 412, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0, 0, 0, 0, 0, 0, 0)
+XTREG(104, 416, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1, 0, 0, 0, 0, 0, 0)
+XTREG(105, 420, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0, 0, 0, 0, 0, 0, 0)
+XTREG(106, 424, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1, 0, 0, 0, 0, 0, 0)
+XTREG(107, 428, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2, 0, 0, 0, 0, 0, 0)
+XTREG(108, 432, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3, 0, 0, 0, 0, 0, 0)
+XTREG(109, 436, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4, 0, 0, 0, 0, 0, 0)
+XTREG(110, 440, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5, 0, 0, 0, 0, 0, 0)
+XTREG(111, 444, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6, 0, 0, 0, 0, 0, 0)
+XTREG(112, 448, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7, 0, 0, 0, 0, 0, 0)
+XTREG(113, 452, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8, 0, 0, 0, 0, 0, 0)
+XTREG(114, 456, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9, 0, 0, 0, 0, 0, 0)
+XTREG(115, 460, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10, 0, 0, 0, 0, 0, 0)
+XTREG(116, 464, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11, 0, 0, 0, 0, 0, 0)
+XTREG(117, 468, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12, 0, 0, 0, 0, 0, 0)
+XTREG(118, 472, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13, 0, 0, 0, 0, 0, 0)
+XTREG(119, 476, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14, 0, 0, 0, 0, 0, 0)
+XTREG(120, 480, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15, 0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-fsf.c b/target/xtensa/core-fsf.c
new file mode 100644
index 0000000000..15ef470e8b
--- /dev/null
+++ b/target/xtensa/core-fsf.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-fsf/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig fsf __attribute__((unused)) = {
+ .name = "fsf",
+ .gdb_regmap = {
+ /* GDB for this core is not supported currently */
+ .reg = {
+ XTREG_END
+ },
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(fsf)
diff --git a/target/xtensa/core-fsf/core-isa.h b/target/xtensa/core-fsf/core-isa.h
new file mode 100644
index 0000000000..fb2bb8f2cb
--- /dev/null
+++ b/target/xtensa/core-fsf/core-isa.h
@@ -0,0 +1,360 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Tensilica Inc.
+ */
+
+#ifndef XTENSA_FSF_CORE_ISA_H
+#define XTENSA_FSF_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 800002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "fsf" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "fsf standard core"
+#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XTHAL_HW_REL_LX2 1
+#define XTHAL_HW_REL_LX2_0 1
+#define XTHAL_HW_REL_LX2_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See <xtensa/config/core-matmap.h> header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_FSF_CORE_ISA_H */
diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h
new file mode 100644
index 0000000000..403bd95721
--- /dev/null
+++ b/target/xtensa/cpu-qom.h
@@ -0,0 +1,66 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef QEMU_XTENSA_CPU_QOM_H
+#define QEMU_XTENSA_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_XTENSA_CPU "xtensa-cpu"
+
+#define XTENSA_CPU_CLASS(class) \
+ OBJECT_CLASS_CHECK(XtensaCPUClass, (class), TYPE_XTENSA_CPU)
+#define XTENSA_CPU(obj) \
+ OBJECT_CHECK(XtensaCPU, (obj), TYPE_XTENSA_CPU)
+#define XTENSA_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(XtensaCPUClass, (obj), TYPE_XTENSA_CPU)
+
+typedef struct XtensaConfig XtensaConfig;
+
+/**
+ * XtensaCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @config: The CPU core configuration.
+ *
+ * An Xtensa CPU model.
+ */
+typedef struct XtensaCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+
+ const XtensaConfig *config;
+} XtensaCPUClass;
+
+typedef struct XtensaCPU XtensaCPU;
+
+#endif
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
new file mode 100644
index 0000000000..e8e9f9175b
--- /dev/null
+++ b/target/xtensa/cpu.c
@@ -0,0 +1,184 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool xtensa_cpu_has_work(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ return cpu->env.pending_irq_level;
+}
+
+/* CPUClass::reset() */
+static void xtensa_cpu_reset(CPUState *s)
+{
+ XtensaCPU *cpu = XTENSA_CPU(s);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(cpu);
+ CPUXtensaState *env = &cpu->env;
+
+ xcc->parent_reset(s);
+
+ env->exception_taken = 0;
+ env->pc = env->config->exception_vector[EXC_RESET];
+ env->sregs[LITBASE] &= ~1;
+ env->sregs[PS] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10;
+ env->sregs[VECBASE] = env->config->vecbase;
+ env->sregs[IBREAKENABLE] = 0;
+ env->sregs[CACHEATTR] = 0x22222222;
+ env->sregs[ATOMCTL] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_ATOMCTL) ? 0x28 : 0x15;
+ env->sregs[CONFIGID0] = env->config->configid[0];
+ env->sregs[CONFIGID1] = env->config->configid[1];
+
+ env->pending_irq_level = 0;
+ reset_mmu(env);
+}
+
+static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_XTENSA_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void xtensa_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cs->gdb_num_regs = xcc->config->gdb_regmap.num_regs;
+
+ qemu_init_vcpu(cs);
+
+ xcc->parent_realize(dev, errp);
+}
+
+static void xtensa_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ XtensaCPU *cpu = XTENSA_CPU(obj);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
+ CPUXtensaState *env = &cpu->env;
+ static bool tcg_inited;
+
+ cs->env_ptr = env;
+ env->config = xcc->config;
+
+ if (tcg_enabled() && !tcg_inited) {
+ tcg_inited = true;
+ xtensa_translate_init();
+ }
+}
+
+static const VMStateDescription vmstate_xtensa_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc);
+
+ xcc->parent_realize = dc->realize;
+ dc->realize = xtensa_cpu_realizefn;
+
+ xcc->parent_reset = cc->reset;
+ cc->reset = xtensa_cpu_reset;
+
+ cc->class_by_name = xtensa_cpu_class_by_name;
+ cc->has_work = xtensa_cpu_has_work;
+ cc->do_interrupt = xtensa_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
+ cc->dump_state = xtensa_cpu_dump_state;
+ cc->set_pc = xtensa_cpu_set_pc;
+ cc->gdb_read_register = xtensa_cpu_gdb_read_register;
+ cc->gdb_write_register = xtensa_cpu_gdb_write_register;
+ cc->gdb_stop_before_watchpoint = true;
+#ifndef CONFIG_USER_ONLY
+ cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
+ cc->do_unassigned_access = xtensa_cpu_do_unassigned_access;
+#endif
+ cc->debug_excp_handler = xtensa_breakpoint_handler;
+ dc->vmsd = &vmstate_xtensa_cpu;
+}
+
+static const TypeInfo xtensa_cpu_type_info = {
+ .name = TYPE_XTENSA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(XtensaCPU),
+ .instance_init = xtensa_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(XtensaCPUClass),
+ .class_init = xtensa_cpu_class_init,
+};
+
+static void xtensa_cpu_register_types(void)
+{
+ type_register_static(&xtensa_cpu_type_info);
+}
+
+type_init(xtensa_cpu_register_types)
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
new file mode 100644
index 0000000000..7fe82a37af
--- /dev/null
+++ b/target/xtensa/cpu.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XTENSA_CPU_H
+#define XTENSA_CPU_H
+
+#define ALIGNED_ONLY
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUXtensaState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define NB_MMU_MODES 4
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_PAGE_BITS 12
+
+enum {
+ /* Additional instructions */
+ XTENSA_OPTION_CODE_DENSITY,
+ XTENSA_OPTION_LOOP,
+ XTENSA_OPTION_EXTENDED_L32R,
+ XTENSA_OPTION_16_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL_HIGH,
+ XTENSA_OPTION_32_BIT_IDIV,
+ XTENSA_OPTION_MAC16,
+ XTENSA_OPTION_MISC_OP_NSA,
+ XTENSA_OPTION_MISC_OP_MINMAX,
+ XTENSA_OPTION_MISC_OP_SEXT,
+ XTENSA_OPTION_MISC_OP_CLAMPS,
+ XTENSA_OPTION_COPROCESSOR,
+ XTENSA_OPTION_BOOLEAN,
+ XTENSA_OPTION_FP_COPROCESSOR,
+ XTENSA_OPTION_MP_SYNCHRO,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ XTENSA_OPTION_ATOMCTL,
+ XTENSA_OPTION_DEPBITS,
+
+ /* Interrupts and exceptions */
+ XTENSA_OPTION_EXCEPTION,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ XTENSA_OPTION_UNALIGNED_EXCEPTION,
+ XTENSA_OPTION_INTERRUPT,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+
+ /* Local memory */
+ XTENSA_OPTION_ICACHE,
+ XTENSA_OPTION_ICACHE_TEST,
+ XTENSA_OPTION_ICACHE_INDEX_LOCK,
+ XTENSA_OPTION_DCACHE,
+ XTENSA_OPTION_DCACHE_TEST,
+ XTENSA_OPTION_DCACHE_INDEX_LOCK,
+ XTENSA_OPTION_IRAM,
+ XTENSA_OPTION_IROM,
+ XTENSA_OPTION_DRAM,
+ XTENSA_OPTION_DROM,
+ XTENSA_OPTION_XLMI,
+ XTENSA_OPTION_HW_ALIGNMENT,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+
+ /* Memory protection and translation */
+ XTENSA_OPTION_REGION_PROTECTION,
+ XTENSA_OPTION_REGION_TRANSLATION,
+ XTENSA_OPTION_MMU,
+ XTENSA_OPTION_CACHEATTR,
+
+ /* Other */
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ XTENSA_OPTION_PROCESSOR_INTERFACE,
+ XTENSA_OPTION_MISC_SR,
+ XTENSA_OPTION_THREAD_POINTER,
+ XTENSA_OPTION_PROCESSOR_ID,
+ XTENSA_OPTION_DEBUG,
+ XTENSA_OPTION_TRACE_PORT,
+};
+
+enum {
+ THREADPTR = 231,
+ FCR = 232,
+ FSR = 233,
+};
+
+enum {
+ LBEG = 0,
+ LEND = 1,
+ LCOUNT = 2,
+ SAR = 3,
+ BR = 4,
+ LITBASE = 5,
+ SCOMPARE1 = 12,
+ ACCLO = 16,
+ ACCHI = 17,
+ MR = 32,
+ WINDOW_BASE = 72,
+ WINDOW_START = 73,
+ PTEVADDR = 83,
+ RASID = 90,
+ ITLBCFG = 91,
+ DTLBCFG = 92,
+ IBREAKENABLE = 96,
+ CACHEATTR = 98,
+ ATOMCTL = 99,
+ IBREAKA = 128,
+ DBREAKA = 144,
+ DBREAKC = 160,
+ CONFIGID0 = 176,
+ EPC1 = 177,
+ DEPC = 192,
+ EPS2 = 194,
+ CONFIGID1 = 208,
+ EXCSAVE1 = 209,
+ CPENABLE = 224,
+ INTSET = 226,
+ INTCLEAR = 227,
+ INTENABLE = 228,
+ PS = 230,
+ VECBASE = 231,
+ EXCCAUSE = 232,
+ DEBUGCAUSE = 233,
+ CCOUNT = 234,
+ PRID = 235,
+ ICOUNT = 236,
+ ICOUNTLEVEL = 237,
+ EXCVADDR = 238,
+ CCOMPARE = 240,
+ MISC = 244,
+};
+
+#define PS_INTLEVEL 0xf
+#define PS_INTLEVEL_SHIFT 0
+
+#define PS_EXCM 0x10
+#define PS_UM 0x20
+
+#define PS_RING 0xc0
+#define PS_RING_SHIFT 6
+
+#define PS_OWB 0xf00
+#define PS_OWB_SHIFT 8
+
+#define PS_CALLINC 0x30000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_LEN 2
+
+#define PS_WOE 0x40000
+
+#define DEBUGCAUSE_IC 0x1
+#define DEBUGCAUSE_IB 0x2
+#define DEBUGCAUSE_DB 0x4
+#define DEBUGCAUSE_BI 0x8
+#define DEBUGCAUSE_BN 0x10
+#define DEBUGCAUSE_DI 0x20
+#define DEBUGCAUSE_DBNUM 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8
+
+#define DBREAKC_SB 0x80000000
+#define DBREAKC_LB 0x40000000
+#define DBREAKC_SB_LB (DBREAKC_SB | DBREAKC_LB)
+#define DBREAKC_MASK 0x3f
+
+#define MAX_NAREG 64
+#define MAX_NINTERRUPT 32
+#define MAX_NLEVEL 6
+#define MAX_NNMI 1
+#define MAX_NCCOMPARE 3
+#define MAX_TLB_WAY_SIZE 8
+#define MAX_NDBREAK 2
+
+#define REGION_PAGE_MASK 0xe0000000
+
+#define PAGE_CACHE_MASK 0x700
+#define PAGE_CACHE_SHIFT 8
+#define PAGE_CACHE_INVALID 0x000
+#define PAGE_CACHE_BYPASS 0x100
+#define PAGE_CACHE_WT 0x200
+#define PAGE_CACHE_WB 0x400
+#define PAGE_CACHE_ISOLATE 0x600
+
+enum {
+ /* Static vectors */
+ EXC_RESET,
+ EXC_MEMORY_ERROR,
+
+ /* Dynamic vectors */
+ EXC_WINDOW_OVERFLOW4,
+ EXC_WINDOW_UNDERFLOW4,
+ EXC_WINDOW_OVERFLOW8,
+ EXC_WINDOW_UNDERFLOW8,
+ EXC_WINDOW_OVERFLOW12,
+ EXC_WINDOW_UNDERFLOW12,
+ EXC_IRQ,
+ EXC_KERNEL,
+ EXC_USER,
+ EXC_DOUBLE,
+ EXC_DEBUG,
+ EXC_MAX
+};
+
+enum {
+ ILLEGAL_INSTRUCTION_CAUSE = 0,
+ SYSCALL_CAUSE,
+ INSTRUCTION_FETCH_ERROR_CAUSE,
+ LOAD_STORE_ERROR_CAUSE,
+ LEVEL1_INTERRUPT_CAUSE,
+ ALLOCA_CAUSE,
+ INTEGER_DIVIDE_BY_ZERO_CAUSE,
+ PRIVILEGED_CAUSE = 8,
+ LOAD_STORE_ALIGNMENT_CAUSE,
+
+ INSTR_PIF_DATA_ERROR_CAUSE = 12,
+ LOAD_STORE_PIF_DATA_ERROR_CAUSE,
+ INSTR_PIF_ADDR_ERROR_CAUSE,
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+
+ INST_TLB_MISS_CAUSE,
+ INST_TLB_MULTI_HIT_CAUSE,
+ INST_FETCH_PRIVILEGE_CAUSE,
+ INST_FETCH_PROHIBITED_CAUSE = 20,
+ LOAD_STORE_TLB_MISS_CAUSE = 24,
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE,
+ LOAD_STORE_PRIVILEGE_CAUSE,
+ LOAD_PROHIBITED_CAUSE = 28,
+ STORE_PROHIBITED_CAUSE,
+
+ COPROCESSOR0_DISABLED = 32,
+};
+
+typedef enum {
+ INTTYPE_LEVEL,
+ INTTYPE_EDGE,
+ INTTYPE_NMI,
+ INTTYPE_SOFTWARE,
+ INTTYPE_TIMER,
+ INTTYPE_DEBUG,
+ INTTYPE_WRITE_ERR,
+ INTTYPE_PROFILING,
+ INTTYPE_MAX
+} interrupt_type;
+
+typedef struct xtensa_tlb_entry {
+ uint32_t vaddr;
+ uint32_t paddr;
+ uint8_t asid;
+ uint8_t attr;
+ bool variable;
+} xtensa_tlb_entry;
+
+typedef struct xtensa_tlb {
+ unsigned nways;
+ const unsigned way_size[10];
+ bool varway56;
+ unsigned nrefillentries;
+} xtensa_tlb;
+
+typedef struct XtensaGdbReg {
+ int targno;
+ int type;
+ int group;
+ unsigned size;
+} XtensaGdbReg;
+
+typedef struct XtensaGdbRegmap {
+ int num_regs;
+ int num_core_regs;
+ /* PC + a + ar + sr + ur */
+ XtensaGdbReg reg[1 + 16 + 64 + 256 + 256];
+} XtensaGdbRegmap;
+
+struct XtensaConfig {
+ const char *name;
+ uint64_t options;
+ XtensaGdbRegmap gdb_regmap;
+ unsigned nareg;
+ int excm_level;
+ int ndepc;
+ uint32_t vecbase;
+ uint32_t exception_vector[EXC_MAX];
+ unsigned ninterrupt;
+ unsigned nlevel;
+ uint32_t interrupt_vector[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t level_mask[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t inttype_mask[INTTYPE_MAX];
+ struct {
+ uint32_t level;
+ interrupt_type inttype;
+ } interrupt[MAX_NINTERRUPT];
+ unsigned nccompare;
+ uint32_t timerint[MAX_NCCOMPARE];
+ unsigned nextint;
+ unsigned extint[MAX_NINTERRUPT];
+
+ unsigned debug_level;
+ unsigned nibreak;
+ unsigned ndbreak;
+
+ uint32_t configid[2];
+
+ uint32_t clock_freq_khz;
+
+ xtensa_tlb itlb;
+ xtensa_tlb dtlb;
+};
+
+typedef struct XtensaConfigList {
+ const XtensaConfig *config;
+ struct XtensaConfigList *next;
+} XtensaConfigList;
+
+#ifdef HOST_WORDS_BIGENDIAN
+enum {
+ FP_F32_HIGH,
+ FP_F32_LOW,
+};
+#else
+enum {
+ FP_F32_LOW,
+ FP_F32_HIGH,
+};
+#endif
+
+typedef struct CPUXtensaState {
+ const XtensaConfig *config;
+ uint32_t regs[16];
+ uint32_t pc;
+ uint32_t sregs[256];
+ uint32_t uregs[256];
+ uint32_t phys_regs[MAX_NAREG];
+ union {
+ float32 f32[2];
+ float64 f64;
+ } fregs[16];
+ float_status fp_status;
+
+ xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
+ xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
+ unsigned autorefill_idx;
+
+ int pending_irq_level; /* level of last raised IRQ */
+ void **irq_inputs;
+ QEMUTimer *ccompare_timer;
+ uint32_t wake_ccount;
+ int64_t halt_clock;
+
+ int exception_taken;
+
+ /* Watchpoints for DBREAK registers */
+ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
+
+ CPU_COMMON
+} CPUXtensaState;
+
+/**
+ * XtensaCPU:
+ * @env: #CPUXtensaState
+ *
+ * An Xtensa CPU.
+ */
+struct XtensaCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUXtensaState env;
+};
+
+static inline XtensaCPU *xtensa_env_get_cpu(const CPUXtensaState *env)
+{
+ return container_of(env, XtensaCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(xtensa_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(XtensaCPU, env)
+
+void xtensa_cpu_do_interrupt(CPUState *cpu);
+bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
+void xtensa_cpu_do_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int opaque,
+ unsigned size);
+void xtensa_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+#define cpu_signal_handler cpu_xtensa_signal_handler
+#define cpu_list xtensa_cpu_list
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define XTENSA_DEFAULT_CPU_MODEL "fsf"
+#else
+#define XTENSA_DEFAULT_CPU_MODEL "dc232b"
+#endif
+
+XtensaCPU *cpu_xtensa_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_xtensa_init(cpu_model))
+
+void xtensa_translate_init(void);
+void xtensa_breakpoint_handler(CPUState *cs);
+void xtensa_finalize_config(XtensaConfig *config);
+void xtensa_register_core(XtensaConfigList *node);
+void check_interrupts(CPUXtensaState *s);
+void xtensa_irq_init(CPUXtensaState *env);
+void *xtensa_get_extint(CPUXtensaState *env, unsigned extint);
+void xtensa_advance_ccount(CPUXtensaState *env, uint32_t d);
+void xtensa_timer_irq(CPUXtensaState *env, uint32_t id, uint32_t active);
+void xtensa_rearm_ccompare_timer(CPUXtensaState *env);
+int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc);
+void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+void xtensa_sync_window_from_phys(CPUXtensaState *env);
+void xtensa_sync_phys_from_window(CPUXtensaState *env);
+uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way);
+void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei);
+int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring);
+void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte);
+void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte);
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access);
+void reset_mmu(CPUXtensaState *env);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env);
+void debug_exception_env(CPUXtensaState *new_env, uint32_t cause);
+
+
+#define XTENSA_OPTION_BIT(opt) (((uint64_t)1) << (opt))
+#define XTENSA_OPTION_ALL (~(uint64_t)0)
+
+static inline bool xtensa_option_bits_enabled(const XtensaConfig *config,
+ uint64_t opt)
+{
+ return (config->options & opt) != 0;
+}
+
+static inline bool xtensa_option_enabled(const XtensaConfig *config, int opt)
+{
+ return xtensa_option_bits_enabled(config, XTENSA_OPTION_BIT(opt));
+}
+
+static inline int xtensa_get_cintlevel(const CPUXtensaState *env)
+{
+ int level = (env->sregs[PS] & PS_INTLEVEL) >> PS_INTLEVEL_SHIFT;
+ if ((env->sregs[PS] & PS_EXCM) && env->config->excm_level > level) {
+ level = env->config->excm_level;
+ }
+ return level;
+}
+
+static inline int xtensa_get_ring(const CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline int xtensa_get_cring(const CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) &&
+ (env->sregs[PS] & PS_EXCM) == 0) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env,
+ bool dtlb, unsigned wi, unsigned ei)
+{
+ return dtlb ?
+ env->dtlb[wi] + ei :
+ env->itlb[wi] + ei;
+}
+
+static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
+{
+ return env->sregs[WINDOW_START] |
+ (env->sregs[WINDOW_START] << env->config->nareg / 4);
+}
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _ring0
+#define MMU_MODE1_SUFFIX _ring1
+#define MMU_MODE2_SUFFIX _ring2
+#define MMU_MODE3_SUFFIX _ring3
+
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
+{
+ return xtensa_get_cring(env);
+}
+
+#define XTENSA_TBFLAG_RING_MASK 0x3
+#define XTENSA_TBFLAG_EXCM 0x4
+#define XTENSA_TBFLAG_LITBASE 0x8
+#define XTENSA_TBFLAG_DEBUG 0x10
+#define XTENSA_TBFLAG_ICOUNT 0x20
+#define XTENSA_TBFLAG_CPENABLE_MASK 0x3fc0
+#define XTENSA_TBFLAG_CPENABLE_SHIFT 6
+#define XTENSA_TBFLAG_EXCEPTION 0x4000
+#define XTENSA_TBFLAG_WINDOW_MASK 0x18000
+#define XTENSA_TBFLAG_WINDOW_SHIFT 15
+
+static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+ *flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ *flags |= XTENSA_TBFLAG_EXCM;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
+ (env->sregs[LITBASE] & 1)) {
+ *flags |= XTENSA_TBFLAG_LITBASE;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ *flags |= XTENSA_TBFLAG_DEBUG;
+ }
+ if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
+ *flags |= XTENSA_TBFLAG_ICOUNT;
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
+ *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
+ }
+ if (cs->singlestep_enabled && env->exception_taken) {
+ *flags |= XTENSA_TBFLAG_EXCEPTION;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
+ (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t w = ctz32(windowstart | 0x8);
+
+ *flags |= w << XTENSA_TBFLAG_WINDOW_SHIFT;
+ } else {
+ *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
+ }
+}
+
+#include "exec/cpu-all.h"
+
+#endif
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
new file mode 100644
index 0000000000..fa5469a4ef
--- /dev/null
+++ b/target/xtensa/gdbstub.c
@@ -0,0 +1,128 @@
+/*
+ * Xtensa gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/log.h"
+
+int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+ unsigned i;
+
+ if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
+ return 0;
+ }
+
+ switch (reg->type) {
+ case 9: /*pc*/
+ return gdb_get_reg32(mem_buf, env->pc);
+
+ case 1: /*ar*/
+ xtensa_sync_phys_from_window(env);
+ return gdb_get_reg32(mem_buf, env->phys_regs[(reg->targno & 0xff)
+ % env->config->nareg]);
+
+ case 2: /*SR*/
+ return gdb_get_reg32(mem_buf, env->sregs[reg->targno & 0xff]);
+
+ case 3: /*UR*/
+ return gdb_get_reg32(mem_buf, env->uregs[reg->targno & 0xff]);
+
+ case 4: /*f*/
+ i = reg->targno & 0x0f;
+ switch (reg->size) {
+ case 4:
+ return gdb_get_reg32(mem_buf,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]));
+ case 8:
+ return gdb_get_reg64(mem_buf, float64_val(env->fregs[i].f64));
+ default:
+ return 0;
+ }
+
+ case 8: /*a*/
+ return gdb_get_reg32(mem_buf, env->regs[reg->targno & 0x0f]);
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return 0;
+ }
+}
+
+int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t tmp;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+
+ if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ switch (reg->type) {
+ case 9: /*pc*/
+ env->pc = tmp;
+ break;
+
+ case 1: /*ar*/
+ env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
+ xtensa_sync_window_from_phys(env);
+ break;
+
+ case 2: /*SR*/
+ env->sregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case 3: /*UR*/
+ env->uregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case 4: /*f*/
+ switch (reg->size) {
+ case 4:
+ env->fregs[reg->targno & 0x0f].f32[FP_F32_LOW] = make_float32(tmp);
+ return 4;
+ case 8:
+ env->fregs[reg->targno & 0x0f].f64 = make_float64(tmp);
+ return 8;
+ default:
+ return 0;
+ }
+
+ case 8: /*a*/
+ env->regs[reg->targno & 0x0f] = tmp;
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return 0;
+ }
+
+ return 4;
+}
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
new file mode 100644
index 0000000000..768b32c417
--- /dev/null
+++ b/target/xtensa/helper.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+
+static struct XtensaConfigList *xtensa_cores;
+
+static void xtensa_core_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
+ const XtensaConfig *config = data;
+
+ xcc->config = config;
+
+ /* Use num_core_regs to see only non-privileged registers in an unmodified
+ * gdb. Use num_regs to see all registers. gdb modification is required
+ * for that: reset bit 0 in the 'flags' field of the registers definitions
+ * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
+ */
+ cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
+}
+
+void xtensa_finalize_config(XtensaConfig *config)
+{
+ unsigned i, n = 0;
+
+ if (config->gdb_regmap.num_regs) {
+ return;
+ }
+
+ for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
+ n += (config->gdb_regmap.reg[i].type != 6);
+ }
+ config->gdb_regmap.num_regs = n;
+}
+
+void xtensa_register_core(XtensaConfigList *node)
+{
+ TypeInfo type = {
+ .parent = TYPE_XTENSA_CPU,
+ .class_init = xtensa_core_class_init,
+ .class_data = (void *)node->config,
+ };
+
+ node->next = xtensa_cores;
+ xtensa_cores = node;
+ type.name = g_strdup_printf("%s-" TYPE_XTENSA_CPU, node->config->name);
+ type_register(&type);
+ g_free((gpointer)type.name);
+}
+
+static uint32_t check_hw_breakpoints(CPUXtensaState *env)
+{
+ unsigned i;
+
+ for (i = 0; i < env->config->ndbreak; ++i) {
+ if (env->cpu_watchpoint[i] &&
+ env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
+ }
+ }
+ return 0;
+}
+
+void xtensa_breakpoint_handler(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ uint32_t cause;
+
+ cs->watchpoint_hit = NULL;
+ cause = check_hw_breakpoints(env);
+ if (cause) {
+ debug_exception_env(env, cause);
+ }
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+}
+
+XtensaCPU *cpu_xtensa_init(const char *cpu_model)
+{
+ ObjectClass *oc;
+ XtensaCPU *cpu;
+ CPUXtensaState *env;
+
+ oc = cpu_class_by_name(TYPE_XTENSA_CPU, cpu_model);
+ if (oc == NULL) {
+ return NULL;
+ }
+
+ cpu = XTENSA_CPU(object_new(object_class_get_name(oc)));
+ env = &cpu->env;
+
+ xtensa_irq_init(env);
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+
+void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ XtensaConfigList *core = xtensa_cores;
+ cpu_fprintf(f, "Available CPUs:\n");
+ for (; core; core = core->next) {
+ cpu_fprintf(f, " %s\n", core->config->name);
+ }
+}
+
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ return ~0;
+}
+
+static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
+{
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_RELOCATABLE_VECTOR)) {
+ return vector - env->config->vecbase + env->sregs[VECBASE];
+ } else {
+ return vector;
+ }
+}
+
+/*!
+ * Handle penging IRQ.
+ * For the high priority interrupt jump to the corresponding interrupt vector.
+ * For the level-1 interrupt convert it to either user, kernel or double
+ * exception with the 'level-1 interrupt' exception cause.
+ */
+static void handle_interrupt(CPUXtensaState *env)
+{
+ int level = env->pending_irq_level;
+
+ if (level > xtensa_get_cintlevel(env) &&
+ level <= env->config->nlevel &&
+ (env->config->level_mask[level] &
+ env->sregs[INTSET] &
+ env->sregs[INTENABLE])) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ if (level > 1) {
+ env->sregs[EPC1 + level - 1] = env->pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] =
+ (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
+ env->pc = relocated_vector(env,
+ env->config->interrupt_vector[level]);
+ } else {
+ env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
+
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = env->pc;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ }
+ cs->exception_index = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ cs->exception_index =
+ (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+ env->sregs[PS] |= PS_EXCM;
+ }
+ env->exception_taken = 1;
+ }
+}
+
+void xtensa_cpu_do_interrupt(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->exception_index == EXC_IRQ) {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(EXC_IRQ) level = %d, cintlevel = %d, "
+ "pc = %08x, a0 = %08x, ps = %08x, "
+ "intset = %08x, intenable = %08x, "
+ "ccount = %08x\n",
+ __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[INTSET], env->sregs[INTENABLE],
+ env->sregs[CCOUNT]);
+ handle_interrupt(env);
+ }
+
+ switch (cs->exception_index) {
+ case EXC_WINDOW_OVERFLOW4:
+ case EXC_WINDOW_UNDERFLOW4:
+ case EXC_WINDOW_OVERFLOW8:
+ case EXC_WINDOW_UNDERFLOW8:
+ case EXC_WINDOW_OVERFLOW12:
+ case EXC_WINDOW_UNDERFLOW12:
+ case EXC_KERNEL:
+ case EXC_USER:
+ case EXC_DOUBLE:
+ case EXC_DEBUG:
+ qemu_log_mask(CPU_LOG_INT, "%s(%d) "
+ "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
+ __func__, cs->exception_index,
+ env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
+ if (env->config->exception_vector[cs->exception_index]) {
+ env->pc = relocated_vector(env,
+ env->config->exception_vector[cs->exception_index]);
+ env->exception_taken = 1;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ }
+ break;
+
+ case EXC_IRQ:
+ break;
+
+ default:
+ qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ break;
+ }
+ check_interrupts(env);
+}
+
+bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ cs->exception_index = EXC_IRQ;
+ xtensa_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
+ const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned wi, ei;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
+ entry[wi][ei].asid = 0;
+ entry[wi][ei].variable = true;
+ }
+ }
+}
+
+static void reset_tlb_mmu_ways56(CPUXtensaState *env,
+ const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ if (!tlb->varway56) {
+ static const xtensa_tlb_entry way5[] = {
+ {
+ .vaddr = 0xd0000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xd8000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ static const xtensa_tlb_entry way6[] = {
+ {
+ .vaddr = 0xe0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xf0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ memcpy(entry[5], way5, sizeof(way5));
+ memcpy(entry[6], way6, sizeof(way6));
+ } else {
+ uint32_t ei;
+ for (ei = 0; ei < 8; ++ei) {
+ entry[6][ei].vaddr = ei << 29;
+ entry[6][ei].paddr = ei << 29;
+ entry[6][ei].asid = 1;
+ entry[6][ei].attr = 3;
+ }
+ }
+}
+
+static void reset_tlb_region_way0(CPUXtensaState *env,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned ei;
+
+ for (ei = 0; ei < 8; ++ei) {
+ entry[0][ei].vaddr = ei << 29;
+ entry[0][ei].paddr = ei << 29;
+ entry[0][ei].asid = 1;
+ entry[0][ei].attr = 2;
+ entry[0][ei].variable = true;
+ }
+}
+
+void reset_mmu(CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ env->sregs[RASID] = 0x04030201;
+ env->sregs[ITLBCFG] = 0;
+ env->sregs[DTLBCFG] = 0;
+ env->autorefill_idx = 0;
+ reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
+ reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
+ } else {
+ reset_tlb_region_way0(env, env->itlb);
+ reset_tlb_region_way0(env, env->dtlb);
+ }
+}
+
+static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
+{
+ unsigned i;
+ for (i = 0; i < 4; ++i) {
+ if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
+ return i;
+ }
+ }
+ return 0xff;
+}
+
+/*!
+ * Lookup xtensa TLB for the given virtual address.
+ * See ISA, 4.6.2.2
+ *
+ * \param pwi: [out] way index
+ * \param pei: [out] entry index
+ * \param pring: [out] access ring
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring)
+{
+ const xtensa_tlb *tlb = dtlb ?
+ &env->config->dtlb : &env->config->itlb;
+ const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
+ env->dtlb : env->itlb;
+
+ int nhits = 0;
+ unsigned wi;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ uint32_t vpn;
+ uint32_t ei;
+ split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
+ if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
+ unsigned ring = get_ring(env, entry[wi][ei].asid);
+ if (ring < 4) {
+ if (++nhits > 1) {
+ return dtlb ?
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE :
+ INST_TLB_MULTI_HIT_CAUSE;
+ }
+ *pwi = wi;
+ *pei = ei;
+ *pring = ring;
+ }
+ }
+ }
+ return nhits ? 0 :
+ (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
+}
+
+/*!
+ * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.5.10
+ */
+static unsigned mmu_attr_to_access(uint32_t attr)
+{
+ unsigned access = 0;
+
+ if (attr < 12) {
+ access |= PAGE_READ;
+ if (attr & 0x1) {
+ access |= PAGE_EXEC;
+ }
+ if (attr & 0x2) {
+ access |= PAGE_WRITE;
+ }
+
+ switch (attr & 0xc) {
+ case 0:
+ access |= PAGE_CACHE_BYPASS;
+ break;
+
+ case 4:
+ access |= PAGE_CACHE_WB;
+ break;
+
+ case 8:
+ access |= PAGE_CACHE_WT;
+ break;
+ }
+ } else if (attr == 13) {
+ access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
+ }
+ return access;
+}
+
+/*!
+ * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.3.3
+ */
+static unsigned region_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+/*!
+ * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, A.2.14 The Cache Attribute Register
+ */
+static unsigned cacheattr_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+static bool is_access_granted(unsigned access, int is_write)
+{
+ switch (is_write) {
+ case 0:
+ return access & PAGE_READ;
+
+ case 1:
+ return access & PAGE_WRITE;
+
+ case 2:
+ return access & PAGE_EXEC;
+
+ default:
+ return 0;
+ }
+}
+
+static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
+
+static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access,
+ bool may_lookup_pt)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ uint32_t vpn;
+ uint32_t pte;
+ const xtensa_tlb_entry *entry = NULL;
+ xtensa_tlb_entry tmp_entry;
+ int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
+
+ if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
+ may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
+ ring = (pte >> 4) & 0x3;
+ wi = 0;
+ split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
+
+ if (update_tlb) {
+ wi = ++env->autorefill_idx & 0x3;
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
+ env->sregs[EXCVADDR] = vaddr;
+ qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
+ __func__, vaddr, vpn, pte);
+ } else {
+ xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
+ entry = &tmp_entry;
+ }
+ ret = 0;
+ }
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (entry == NULL) {
+ entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+ }
+
+ if (ring < mmu_idx) {
+ return dtlb ?
+ LOAD_STORE_PRIVILEGE_CAUSE :
+ INST_FETCH_PRIVILEGE_CAUSE;
+ }
+
+ *access = mmu_attr_to_access(entry->attr) &
+ ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
+ *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+
+ return 0;
+}
+
+static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ uint32_t pt_vaddr =
+ (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
+ int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
+ &paddr, &page_size, &access, false);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n",
+ __func__, vaddr, ret ? ~0 : paddr);
+
+ if (ret == 0) {
+ *pte = ldl_phys(cs->as, paddr);
+ }
+ return ret;
+}
+
+static int get_physical_addr_region(CPUXtensaState *env,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi = 0;
+ uint32_t ei = (vaddr >> 29) & 0x7;
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ *access = region_attr_to_access(entry->attr);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
+ *page_size = ~REGION_PAGE_MASK + 1;
+
+ return 0;
+}
+
+/*!
+ * Convert virtual address to physical addr.
+ * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
+ *
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return get_physical_addr_mmu(env, update_tlb,
+ vaddr, is_write, mmu_idx, paddr, page_size, access, true);
+ } else if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
+ return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
+ paddr, page_size, access);
+ } else {
+ *paddr = vaddr;
+ *page_size = TARGET_PAGE_SIZE;
+ *access = cacheattr_attr_to_access(
+ env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
+ return 0;
+ }
+}
+
+static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
+ CPUXtensaState *env, bool dtlb)
+{
+ unsigned wi, ei;
+ const xtensa_tlb *conf =
+ dtlb ? &env->config->dtlb : &env->config->itlb;
+ unsigned (*attr_to_access)(uint32_t) =
+ xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
+ mmu_attr_to_access : region_attr_to_access;
+
+ for (wi = 0; wi < conf->nways; ++wi) {
+ uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+ const char *sz_text;
+ bool print_header = true;
+
+ if (sz >= 0x100000) {
+ sz >>= 20;
+ sz_text = "MB";
+ } else {
+ sz >>= 10;
+ sz_text = "KB";
+ }
+
+ for (ei = 0; ei < conf->way_size[wi]; ++ei) {
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (entry->asid) {
+ static const char * const cache_text[8] = {
+ [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
+ [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
+ [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
+ [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
+ };
+ unsigned access = attr_to_access(entry->attr);
+ unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
+ PAGE_CACHE_SHIFT;
+
+ if (print_header) {
+ print_header = false;
+ cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
+ cpu_fprintf(f,
+ "\tVaddr Paddr ASID Attr RWX Cache\n"
+ "\t---------- ---------- ---- ---- --- -------\n");
+ }
+ cpu_fprintf(f,
+ "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
+ entry->vaddr,
+ entry->paddr,
+ entry->asid,
+ entry->attr,
+ (access & PAGE_READ) ? 'R' : '-',
+ (access & PAGE_WRITE) ? 'W' : '-',
+ (access & PAGE_EXEC) ? 'X' : '-',
+ cache_text[cache_idx] ? cache_text[cache_idx] :
+ "Invalid");
+ }
+ }
+ }
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
+
+ cpu_fprintf(f, "ITLB:\n");
+ dump_tlb(f, cpu_fprintf, env, false);
+ cpu_fprintf(f, "\nDTLB:\n");
+ dump_tlb(f, cpu_fprintf, env, true);
+ } else {
+ cpu_fprintf(f, "No TLB for this CPU core\n");
+ }
+}
diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h
new file mode 100644
index 0000000000..5ea9c5beec
--- /dev/null
+++ b/target/xtensa/helper.h
@@ -0,0 +1,58 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_3(exception_cause, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_cause_vaddr, noreturn, env, i32, i32, i32)
+DEF_HELPER_3(debug_exception, noreturn, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(nsa, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(nsau, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_2(wsr_windowbase, void, env, i32)
+DEF_HELPER_4(entry, void, env, i32, i32, i32)
+DEF_HELPER_2(retw, i32, env, i32)
+DEF_HELPER_2(rotw, void, env, i32)
+DEF_HELPER_3(window_check, noreturn, env, i32, i32)
+DEF_HELPER_1(restore_owb, void, env)
+DEF_HELPER_2(movsp, void, env, i32)
+DEF_HELPER_2(wsr_lbeg, void, env, i32)
+DEF_HELPER_2(wsr_lend, void, env, i32)
+DEF_HELPER_1(simcall, void, env)
+DEF_HELPER_1(dump_state, void, env)
+
+DEF_HELPER_3(waiti, void, env, i32, i32)
+DEF_HELPER_3(timer_irq, void, env, i32, i32)
+DEF_HELPER_2(advance_ccount, void, env, i32)
+DEF_HELPER_1(check_interrupts, void, env)
+DEF_HELPER_3(check_atomctl, void, env, i32, i32)
+
+DEF_HELPER_2(itlb_hit_test, void, env, i32)
+DEF_HELPER_2(wsr_rasid, void, env, i32)
+DEF_HELPER_FLAGS_3(rtlb0, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(rtlb1, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_3(itlb, void, env, i32, i32)
+DEF_HELPER_3(ptlb, i32, env, i32, i32)
+DEF_HELPER_4(wtlb, void, env, i32, i32, i32)
+
+DEF_HELPER_2(wsr_ibreakenable, void, env, i32)
+DEF_HELPER_3(wsr_ibreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreakc, void, env, i32, i32)
+
+DEF_HELPER_2(wur_fcr, void, env, i32)
+DEF_HELPER_FLAGS_1(abs_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_1(neg_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_3(add_s, f32, env, f32, f32)
+DEF_HELPER_3(sub_s, f32, env, f32, f32)
+DEF_HELPER_3(mul_s, f32, env, f32, f32)
+DEF_HELPER_4(madd_s, f32, env, f32, f32, f32)
+DEF_HELPER_4(msub_s, f32, env, f32, f32, f32)
+DEF_HELPER_FLAGS_3(ftoi, TCG_CALL_NO_RWG_SE, i32, f32, i32, i32)
+DEF_HELPER_FLAGS_3(ftoui, TCG_CALL_NO_RWG_SE, i32, f32, i32, i32)
+DEF_HELPER_3(itof, f32, env, i32, i32)
+DEF_HELPER_3(uitof, f32, env, i32, i32)
+
+DEF_HELPER_4(un_s, void, env, i32, f32, f32)
+DEF_HELPER_4(oeq_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ueq_s, void, env, i32, f32, f32)
+DEF_HELPER_4(olt_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ult_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ole_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ule_s, void, env, i32, f32, f32)
diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh
new file mode 100755
index 0000000000..351bee41c2
--- /dev/null
+++ b/target/xtensa/import_core.sh
@@ -0,0 +1,51 @@
+#! /bin/bash -e
+
+OVERLAY="$1"
+NAME="$2"
+FREQ=40000
+BASE=$(dirname "$0")
+TARGET="$BASE"/core-$NAME
+
+[ $# -ge 2 -a -f "$OVERLAY" ] || { cat <<EOF
+Usage: $0 overlay-archive-to-import core-name [frequency-in-KHz]
+ overlay-archive-to-import: file name of xtensa-config-overlay.tar.gz
+ to import configuration from.
+ core-name: QEMU name of the imported core. Must be valid
+ C identifier.
+ frequency-in-KHz: core frequency (40MHz if not specified).
+EOF
+exit
+}
+
+[ $# -ge 3 ] && FREQ="$3"
+mkdir -p "$TARGET"
+tar -xf "$OVERLAY" -C "$TARGET" --strip-components=1 \
+ --xform='s/core/core-isa/' config/core.h
+tar -xf "$OVERLAY" -O gdb/xtensa-config.c | \
+ sed -n '1,/*\//p;/XTREG/,/XTREG_END/p' > "$TARGET"/gdb-config.c
+
+cat <<EOF > "${TARGET}.c"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-$NAME/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig $NAME __attribute__((unused)) = {
+ .name = "$NAME",
+ .gdb_regmap = {
+ .reg = {
+#include "core-$NAME/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = $FREQ,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE($NAME)
+EOF
+
+grep -q core-${NAME}.o "$BASE"/Makefile.objs || \
+ echo "obj-y += core-${NAME}.o" >> "$BASE"/Makefile.objs
diff --git a/target/xtensa/monitor.c b/target/xtensa/monitor.c
new file mode 100644
index 0000000000..f3fa4cd278
--- /dev/null
+++ b/target/xtensa/monitor.c
@@ -0,0 +1,35 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
new file mode 100644
index 0000000000..0a4b2147bc
--- /dev/null
+++ b/target/xtensa/op_helper.c
@@ -0,0 +1,984 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/address-spaces.h"
+#include "qemu/timer.h"
+
+void xtensa_cpu_do_unaligned_access(CPUState *cs,
+ vaddr addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
+ !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
+ cpu_restore_state(CPU(cpu), retaddr);
+ HELPER(exception_cause_vaddr)(env,
+ env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
+ }
+}
+
+void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
+ &paddr, &page_size, &access);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
+ __func__, vaddr, access_type, mmu_idx, paddr, ret);
+
+ if (ret == 0) {
+ tlb_set_page(cs,
+ vaddr & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
+ } else {
+ cpu_restore_state(cs, retaddr);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
+ }
+}
+
+void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int opaque,
+ unsigned size)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ HELPER(exception_cause_vaddr)(env, env->pc,
+ is_exec ?
+ INSTR_PIF_ADDR_ERROR_CAUSE :
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+ is_exec ? addr : cs->mem_io_vaddr);
+}
+
+static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
+{
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
+ &paddr, &page_size, &access);
+ if (ret == 0) {
+ tb_invalidate_phys_addr(&address_space_memory, paddr);
+ }
+}
+
+void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ if (excp == EXCP_DEBUG) {
+ env->exception_taken = 0;
+ }
+ cpu_loop_exit(cs);
+}
+
+void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ uint32_t vector;
+
+ env->pc = pc;
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = pc;
+ } else {
+ env->sregs[EPC1] = pc;
+ }
+ vector = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = pc;
+ vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+
+ env->sregs[EXCCAUSE] = cause;
+ env->sregs[PS] |= PS_EXCM;
+
+ HELPER(exception)(env, vector);
+}
+
+void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
+ uint32_t pc, uint32_t cause, uint32_t vaddr)
+{
+ env->sregs[EXCVADDR] = vaddr;
+ HELPER(exception_cause)(env, pc, cause);
+}
+
+void debug_exception_env(CPUXtensaState *env, uint32_t cause)
+{
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ HELPER(debug_exception)(env, env->pc, cause);
+ }
+}
+
+void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ unsigned level = env->config->debug_level;
+
+ env->pc = pc;
+ env->sregs[DEBUGCAUSE] = cause;
+ env->sregs[EPC1 + level - 1] = pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
+ (level << PS_INTLEVEL_SHIFT);
+ HELPER(exception)(env, EXC_DEBUG);
+}
+
+uint32_t HELPER(nsa)(uint32_t v)
+{
+ if (v & 0x80000000) {
+ v = ~v;
+ }
+ return v ? clz32(v) - 1 : 31;
+}
+
+uint32_t HELPER(nsau)(uint32_t v)
+{
+ return v ? clz32(v) : 32;
+}
+
+static void copy_window_from_phys(CPUXtensaState *env,
+ uint32_t window, uint32_t phys, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n1 * sizeof(uint32_t));
+ memcpy(env->regs + window + n1, env->phys_regs,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static void copy_phys_from_window(CPUXtensaState *env,
+ uint32_t phys, uint32_t window, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n1 * sizeof(uint32_t));
+ memcpy(env->phys_regs, env->regs + window + n1,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+
+static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
+{
+ return a & (env->config->nareg / 4 - 1);
+}
+
+static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
+{
+ return 1 << windowbase_bound(a, env);
+}
+
+void xtensa_sync_window_from_phys(CPUXtensaState *env)
+{
+ copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
+}
+
+void xtensa_sync_phys_from_window(CPUXtensaState *env)
+{
+ copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
+}
+
+static void rotate_window_abs(CPUXtensaState *env, uint32_t position)
+{
+ xtensa_sync_phys_from_window(env);
+ env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
+ xtensa_sync_window_from_phys(env);
+}
+
+static void rotate_window(CPUXtensaState *env, uint32_t delta)
+{
+ rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
+}
+
+void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
+{
+ rotate_window_abs(env, v);
+}
+
+void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
+{
+ int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
+ if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
+ pc, env->sregs[PS]);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ } else {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+
+ if (windowstart & ((1 << callinc) - 1)) {
+ HELPER(window_check)(env, pc, callinc);
+ }
+ env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3);
+ rotate_window(env, callinc);
+ env->sregs[WINDOW_START] |=
+ windowstart_bit(env->sregs[WINDOW_BASE], env);
+ }
+}
+
+void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
+{
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t n = ctz32(windowstart) + 1;
+
+ assert(n <= w);
+
+ rotate_window(env, n);
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ switch (ctz32(windowstart >> n)) {
+ case 0:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
+ break;
+ case 1:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
+ break;
+ default:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
+ break;
+ }
+}
+
+uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+ int m = 0;
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = env->sregs[WINDOW_START];
+ uint32_t ret_pc = 0;
+
+ if (windowstart & windowstart_bit(windowbase - 1, env)) {
+ m = 1;
+ } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
+ m = 2;
+ } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
+ m = 3;
+ }
+
+ if (n == 0 || (m != 0 && m != n) ||
+ ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
+ "PS = %08x, m = %d, n = %d\n",
+ pc, env->sregs[PS], m, n);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ } else {
+ int owb = windowbase;
+
+ ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
+
+ rotate_window(env, -n);
+ if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
+ env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
+ } else {
+ /* window underflow */
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ if (n == 1) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
+ } else if (n == 2) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
+ } else if (n == 3) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
+ }
+ }
+ }
+ return ret_pc;
+}
+
+void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
+{
+ rotate_window(env, imm4);
+}
+
+void HELPER(restore_owb)(CPUXtensaState *env)
+{
+ rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
+}
+
+void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
+{
+ if ((env->sregs[WINDOW_START] &
+ (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
+ HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
+ }
+}
+
+void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
+{
+ if (env->sregs[LBEG] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ env->sregs[LBEG] = v;
+ }
+}
+
+void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
+{
+ if (env->sregs[LEND] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ env->sregs[LEND] = v;
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ }
+}
+
+void HELPER(dump_state)(CPUXtensaState *env)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
+ cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+}
+
+void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
+{
+ CPUState *cpu;
+
+ env->pc = pc;
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
+ (intlevel << PS_INTLEVEL_SHIFT);
+ check_interrupts(env);
+ if (env->pending_irq_level) {
+ cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
+ return;
+ }
+
+ cpu = CPU(xtensa_env_get_cpu(env));
+ env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ cpu->halted = 1;
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) {
+ xtensa_rearm_ccompare_timer(env);
+ }
+ HELPER(exception)(env, EXCP_HLT);
+}
+
+void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active)
+{
+ xtensa_timer_irq(env, id, active);
+}
+
+void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d)
+{
+ xtensa_advance_ccount(env, d);
+}
+
+void HELPER(check_interrupts)(CPUXtensaState *env)
+{
+ check_interrupts(env);
+}
+
+void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
+{
+ get_page_addr_code(env, vaddr);
+}
+
+/*!
+ * Check vaddr accessibility/cache attributes and raise an exception if
+ * specified by the ATOMCTL SR.
+ *
+ * Note: local memory exclusion is not implemented
+ */
+void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
+{
+ uint32_t paddr, page_size, access;
+ uint32_t atomctl = env->sregs[ATOMCTL];
+ int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
+ xtensa_get_cring(env), &paddr, &page_size, &access);
+
+ /*
+ * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
+ * see opcode description in the ISA
+ */
+ if (rc == 0 &&
+ (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
+ rc = STORE_PROHIBITED_CAUSE;
+ }
+
+ if (rc) {
+ HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
+ }
+
+ /*
+ * When data cache is not configured use ATOMCTL bypass field.
+ * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
+ * under the Conditional Store Option.
+ */
+ if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ access = PAGE_CACHE_BYPASS;
+ }
+
+ switch (access & PAGE_CACHE_MASK) {
+ case PAGE_CACHE_WB:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_WT:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_BYPASS:
+ if ((atomctl & 0x3) == 0) {
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ }
+ break;
+
+ case PAGE_CACHE_ISOLATE:
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
+ v = (v & 0xffffff00) | 0x1;
+ if (v != env->sregs[RASID]) {
+ env->sregs[RASID] = v;
+ tlb_flush(CPU(cpu), 1);
+ }
+}
+
+static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
+
+ switch (way) {
+ case 4:
+ return (tlbcfg >> 16) & 0x3;
+
+ case 5:
+ return (tlbcfg >> 20) & 0x1;
+
+ case 6:
+ return (tlbcfg >> 24) & 0x1;
+
+ default:
+ return 0;
+ }
+}
+
+/*!
+ * Get bit mask for the virtual address bits translated by the TLB way
+ */
+uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ switch (way) {
+ case 4:
+ return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
+
+ case 5:
+ if (varway56) {
+ return 0xf8000000 << get_page_size(env, dtlb, way);
+ } else {
+ return 0xf8000000;
+ }
+
+ case 6:
+ if (varway56) {
+ return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
+ } else {
+ return 0xf0000000;
+ }
+
+ default:
+ return 0xfffff000;
+ }
+ } else {
+ return REGION_PAGE_MASK;
+ }
+}
+
+/*!
+ * Get bit mask for the 'VPN without index' field.
+ * See ISA, 4.6.5.6, data format for RxTLB0
+ */
+static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (way < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ return is32 ? 0xffff8000 : 0xffffc000;
+ } else if (way == 4) {
+ return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
+ } else if (way <= 6) {
+ uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (varway56) {
+ return mask << (way == 5 ? 2 : 3);
+ } else {
+ return mask << 1;
+ }
+ } else {
+ return 0xfffff000;
+ }
+}
+
+/*!
+ * Split virtual address into VPN (with index) and entry index
+ * for the given TLB way
+ */
+void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei)
+{
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (!dtlb) {
+ wi &= 7;
+ }
+
+ if (wi < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
+ } else {
+ switch (wi) {
+ case 4:
+ {
+ uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
+ *ei = (v >> eibase) & 0x3;
+ }
+ break;
+
+ case 5:
+ if (varway56) {
+ uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x3;
+ } else {
+ *ei = (v >> 27) & 0x1;
+ }
+ break;
+
+ case 6:
+ if (varway56) {
+ uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x7;
+ } else {
+ *ei = (v >> 28) & 0x1;
+ }
+ break;
+
+ default:
+ *ei = 0;
+ break;
+ }
+ }
+ *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+}
+
+/*!
+ * Split TLB address into TLB way, entry index and VPN (with index).
+ * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
+ */
+static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ *wi = v & (dtlb ? 0xf : 0x7);
+ split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+ } else {
+ *vpn = v & REGION_PAGE_MASK;
+ *wi = 0;
+ *ei = (v >> 29) & 0x7;
+ }
+}
+
+static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
+ uint32_t v, bool dtlb, uint32_t *pwi)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ if (pwi) {
+ *pwi = wi;
+ }
+ return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+}
+
+uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+ } else {
+ return v & REGION_PAGE_MASK;
+ }
+}
+
+uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
+ return entry->paddr | entry->attr;
+}
+
+void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ if (entry->variable && entry->asid) {
+ tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
+ entry->asid = 0;
+ }
+ }
+}
+
+uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
+
+ switch (res) {
+ case 0:
+ if (ring >= xtensa_get_ring(env)) {
+ return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
+ }
+ break;
+
+ case INST_TLB_MULTI_HIT_CAUSE:
+ case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
+ HELPER(exception_cause_vaddr)(env, env->pc, res, v);
+ break;
+ }
+ return 0;
+ } else {
+ return (v & REGION_PAGE_MASK) | 0x1;
+ }
+}
+
+void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ entry->vaddr = vpn;
+ entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+ entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
+ entry->attr = pte & 0xf;
+}
+
+void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ if (entry->variable) {
+ if (entry->asid) {
+ tlb_flush_page(cs, entry->vaddr);
+ }
+ xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
+ tlb_flush_page(cs, entry->vaddr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
+ __func__, dtlb, wi, ei);
+ }
+ } else {
+ tlb_flush_page(cs, entry->vaddr);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_REGION_TRANSLATION)) {
+ entry->paddr = pte & REGION_PAGE_MASK;
+ }
+ entry->attr = pte & 0xf;
+ }
+}
+
+void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+}
+
+
+void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
+{
+ uint32_t change = v ^ env->sregs[IBREAKENABLE];
+ unsigned i;
+
+ for (i = 0; i < env->config->nibreak; ++i) {
+ if (change & (1 << i)) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ }
+ }
+ env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
+}
+
+void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ tb_invalidate_virtual_addr(env, v);
+ }
+ env->sregs[IBREAKA + i] = v;
+}
+
+static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
+ uint32_t dbreakc)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+ uint32_t mask = dbreakc | ~DBREAKC_MASK;
+
+ if (env->cpu_watchpoint[i]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ }
+ if (dbreakc & DBREAKC_SB) {
+ flags |= BP_MEM_WRITE;
+ }
+ if (dbreakc & DBREAKC_LB) {
+ flags |= BP_MEM_READ;
+ }
+ /* contiguous mask after inversion is one less than some power of 2 */
+ if ((~mask + 1) & ~mask) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
+ /* cut mask after the first zero bit */
+ mask = 0xffffffff << (32 - clo32(mask));
+ }
+ if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
+ flags, &env->cpu_watchpoint[i])) {
+ env->cpu_watchpoint[i] = NULL;
+ qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
+ dbreaka & mask, ~mask + 1);
+ }
+}
+
+void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ uint32_t dbreakc = env->sregs[DBREAKC + i];
+
+ if ((dbreakc & DBREAKC_SB_LB) &&
+ env->sregs[DBREAKA + i] != v) {
+ set_dbreak(env, i, v, dbreakc);
+ }
+ env->sregs[DBREAKA + i] = v;
+}
+
+void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
+ if (v & DBREAKC_SB_LB) {
+ set_dbreak(env, i, env->sregs[DBREAKA + i], v);
+ } else {
+ if (env->cpu_watchpoint[i]) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ env->cpu_watchpoint[i] = NULL;
+ }
+ }
+ }
+ env->sregs[DBREAKC + i] = v;
+}
+
+void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
+{
+ static const int rounding_mode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+
+ env->uregs[FCR] = v & 0xfffff07f;
+ set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
+}
+
+float32 HELPER(abs_s)(float32 v)
+{
+ return float32_abs(v);
+}
+
+float32 HELPER(neg_s)(float32 v)
+{
+ return float32_chs(v);
+}
+
+float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_add(a, b, &env->fp_status);
+}
+
+float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_sub(a, b, &env->fp_status);
+}
+
+float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_mul(a, b, &env->fp_status);
+}
+
+float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, 0,
+ &env->fp_status);
+}
+
+float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+ return float32_to_int32(
+ float32_scalbn(v, scale, &fp_status), &fp_status);
+}
+
+uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+ float32 res;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+
+ res = float32_scalbn(v, scale, &fp_status);
+
+ if (float32_is_neg(v) && !float32_is_any_nan(v)) {
+ return float32_to_int32(res, &fp_status);
+ } else {
+ return float32_to_uint32(res, &fp_status);
+ }
+}
+
+float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(int32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(uint32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
+{
+ if (v) {
+ env->sregs[BR] |= br;
+ } else {
+ env->sregs[BR] &= ~br;
+ }
+}
+
+void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
+}
+
+void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_less || v == float_relation_unordered, br);
+}
+
+void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v != float_relation_greater, br);
+}
diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h
new file mode 100644
index 0000000000..e8a7fda3d8
--- /dev/null
+++ b/target/xtensa/overlay_tool.h
@@ -0,0 +1,602 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define XTREG(idx, ofs, bi, sz, al, no, flags, cp, typ, grp, name, \
+ a1, a2, a3, a4, a5, a6) \
+ { .targno = (no), .type = (typ), .group = (grp), .size = (sz) },
+#define XTREG_END { .targno = -1 },
+
+#ifndef XCHAL_HAVE_DEPBITS
+#define XCHAL_HAVE_DEPBITS 0
+#endif
+
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
+#ifndef XCHAL_UNALIGNED_LOAD_HW
+#define XCHAL_UNALIGNED_LOAD_HW 0
+#endif
+
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#define XCHAL_VECBASE_RESET_VADDR 0
+#endif
+
+#ifndef XCHAL_HW_MIN_VERSION
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+
+#define XCHAL_OPTION(xchal, qemu) ((xchal) ? XTENSA_OPTION_BIT(qemu) : 0)
+
+#define XTENSA_OPTIONS ( \
+ XCHAL_OPTION(XCHAL_HAVE_DENSITY, XTENSA_OPTION_CODE_DENSITY) | \
+ XCHAL_OPTION(XCHAL_HAVE_LOOPS, XTENSA_OPTION_LOOP) | \
+ XCHAL_OPTION(XCHAL_HAVE_ABSOLUTE_LITERALS, XTENSA_OPTION_EXTENDED_L32R) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL16, XTENSA_OPTION_16_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32, XTENSA_OPTION_32_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32_HIGH, XTENSA_OPTION_32_BIT_IMUL_HIGH) | \
+ XCHAL_OPTION(XCHAL_HAVE_DIV32, XTENSA_OPTION_32_BIT_IDIV) | \
+ XCHAL_OPTION(XCHAL_HAVE_MAC16, XTENSA_OPTION_MAC16) | \
+ XCHAL_OPTION(XCHAL_HAVE_NSA, XTENSA_OPTION_MISC_OP_NSA) | \
+ XCHAL_OPTION(XCHAL_HAVE_MINMAX, XTENSA_OPTION_MISC_OP_MINMAX) | \
+ XCHAL_OPTION(XCHAL_HAVE_SEXT, XTENSA_OPTION_MISC_OP_SEXT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CLAMPS, XTENSA_OPTION_MISC_OP_CLAMPS) | \
+ XCHAL_OPTION(XCHAL_HAVE_CP, XTENSA_OPTION_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_BOOLEANS, XTENSA_OPTION_BOOLEAN) | \
+ XCHAL_OPTION(XCHAL_HAVE_FP, XTENSA_OPTION_FP_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_RELEASE_SYNC, XTENSA_OPTION_MP_SYNCHRO) | \
+ XCHAL_OPTION(XCHAL_HAVE_S32C1I, XTENSA_OPTION_CONDITIONAL_STORE) | \
+ XCHAL_OPTION(XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION >= 230000, \
+ XTENSA_OPTION_ATOMCTL) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEPBITS, XTENSA_OPTION_DEPBITS) | \
+ /* Interrupts and exceptions */ \
+ XCHAL_OPTION(XCHAL_HAVE_EXCEPTIONS, XTENSA_OPTION_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_VECBASE, XTENSA_OPTION_RELOCATABLE_VECTOR) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_EXCEPTION, \
+ XTENSA_OPTION_UNALIGNED_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_INTERRUPTS, XTENSA_OPTION_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_HIGHPRI_INTERRUPTS, \
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CCOUNT, XTENSA_OPTION_TIMER_INTERRUPT) | \
+ /* Local memory, TODO */ \
+ XCHAL_OPTION(XCHAL_ICACHE_WAYS, XTENSA_OPTION_ICACHE) | \
+ XCHAL_OPTION(XCHAL_ICACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_ICACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_DCACHE_WAYS, XTENSA_OPTION_DCACHE) | \
+ XCHAL_OPTION(XCHAL_DCACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_DCACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_HW, XTENSA_OPTION_HW_ALIGNMENT) | \
+ /* Memory protection and translation */ \
+ XCHAL_OPTION(XCHAL_HAVE_MIMIC_CACHEATTR, \
+ XTENSA_OPTION_REGION_PROTECTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_XLT_CACHEATTR, \
+ XTENSA_OPTION_REGION_TRANSLATION) | \
+ XCHAL_OPTION(XCHAL_HAVE_PTP_MMU, XTENSA_OPTION_MMU) | \
+ XCHAL_OPTION(XCHAL_HAVE_CACHEATTR, XTENSA_OPTION_CACHEATTR) | \
+ /* Other, TODO */ \
+ XCHAL_OPTION(XCHAL_HAVE_WINDOWED, XTENSA_OPTION_WINDOWED_REGISTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEBUG, XTENSA_OPTION_DEBUG) |\
+ XCHAL_OPTION(XCHAL_NUM_MISC_REGS > 0, XTENSA_OPTION_MISC_SR) | \
+ XCHAL_OPTION(XCHAL_HAVE_THREADPTR, XTENSA_OPTION_THREAD_POINTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_PRID, XTENSA_OPTION_PROCESSOR_ID))
+
+#ifndef XCHAL_WINDOW_OF4_VECOFS
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#endif
+
+#if XCHAL_HAVE_WINDOWED
+#define WINDOW_VECTORS \
+ [EXC_WINDOW_OVERFLOW4] = XCHAL_WINDOW_OF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW4] = XCHAL_WINDOW_UF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW8] = XCHAL_WINDOW_OF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW8] = XCHAL_WINDOW_UF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW12] = XCHAL_WINDOW_OF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW12] = XCHAL_WINDOW_UF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR,
+#else
+#define WINDOW_VECTORS
+#endif
+
+#define EXCEPTION_VECTORS { \
+ [EXC_RESET] = XCHAL_RESET_VECTOR_VADDR, \
+ WINDOW_VECTORS \
+ [EXC_KERNEL] = XCHAL_KERNEL_VECTOR_VADDR, \
+ [EXC_USER] = XCHAL_USER_VECTOR_VADDR, \
+ [EXC_DOUBLE] = XCHAL_DOUBLEEXC_VECTOR_VADDR, \
+ [EXC_DEBUG] = XCHAL_DEBUG_VECTOR_VADDR, \
+ }
+
+#define INTERRUPT_VECTORS { \
+ 0, \
+ 0, \
+ XCHAL_INTLEVEL2_VECTOR_VADDR, \
+ XCHAL_INTLEVEL3_VECTOR_VADDR, \
+ XCHAL_INTLEVEL4_VECTOR_VADDR, \
+ XCHAL_INTLEVEL5_VECTOR_VADDR, \
+ XCHAL_INTLEVEL6_VECTOR_VADDR, \
+ XCHAL_INTLEVEL7_VECTOR_VADDR, \
+ }
+
+#define LEVEL_MASKS { \
+ [1] = XCHAL_INTLEVEL1_MASK, \
+ [2] = XCHAL_INTLEVEL2_MASK, \
+ [3] = XCHAL_INTLEVEL3_MASK, \
+ [4] = XCHAL_INTLEVEL4_MASK, \
+ [5] = XCHAL_INTLEVEL5_MASK, \
+ [6] = XCHAL_INTLEVEL6_MASK, \
+ [7] = XCHAL_INTLEVEL7_MASK, \
+ }
+
+#define INTTYPE_MASKS { \
+ [INTTYPE_EDGE] = XCHAL_INTTYPE_MASK_EXTERN_EDGE, \
+ [INTTYPE_NMI] = XCHAL_INTTYPE_MASK_NMI, \
+ [INTTYPE_SOFTWARE] = XCHAL_INTTYPE_MASK_SOFTWARE, \
+ }
+
+#define XTHAL_INTTYPE_EXTERN_LEVEL INTTYPE_LEVEL
+#define XTHAL_INTTYPE_EXTERN_EDGE INTTYPE_EDGE
+#define XTHAL_INTTYPE_NMI INTTYPE_NMI
+#define XTHAL_INTTYPE_SOFTWARE INTTYPE_SOFTWARE
+#define XTHAL_INTTYPE_TIMER INTTYPE_TIMER
+#define XTHAL_INTTYPE_TBD1 INTTYPE_DEBUG
+#define XTHAL_INTTYPE_TBD2 INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_WRITE_ERROR INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_PROFILING INTTYPE_PROFILING
+
+
+#define INTERRUPT(i) { \
+ .level = XCHAL_INT ## i ## _LEVEL, \
+ .inttype = XCHAL_INT ## i ## _TYPE, \
+ }
+
+#define INTERRUPTS { \
+ [0] = INTERRUPT(0), \
+ [1] = INTERRUPT(1), \
+ [2] = INTERRUPT(2), \
+ [3] = INTERRUPT(3), \
+ [4] = INTERRUPT(4), \
+ [5] = INTERRUPT(5), \
+ [6] = INTERRUPT(6), \
+ [7] = INTERRUPT(7), \
+ [8] = INTERRUPT(8), \
+ [9] = INTERRUPT(9), \
+ [10] = INTERRUPT(10), \
+ [11] = INTERRUPT(11), \
+ [12] = INTERRUPT(12), \
+ [13] = INTERRUPT(13), \
+ [14] = INTERRUPT(14), \
+ [15] = INTERRUPT(15), \
+ [16] = INTERRUPT(16), \
+ [17] = INTERRUPT(17), \
+ [18] = INTERRUPT(18), \
+ [19] = INTERRUPT(19), \
+ [20] = INTERRUPT(20), \
+ [21] = INTERRUPT(21), \
+ [22] = INTERRUPT(22), \
+ [23] = INTERRUPT(23), \
+ [24] = INTERRUPT(24), \
+ [25] = INTERRUPT(25), \
+ [26] = INTERRUPT(26), \
+ [27] = INTERRUPT(27), \
+ [28] = INTERRUPT(28), \
+ [29] = INTERRUPT(29), \
+ [30] = INTERRUPT(30), \
+ [31] = INTERRUPT(31), \
+ }
+
+#define TIMERINTS { \
+ [0] = XCHAL_TIMER0_INTERRUPT, \
+ [1] = XCHAL_TIMER1_INTERRUPT, \
+ [2] = XCHAL_TIMER2_INTERRUPT, \
+ }
+
+#define EXTINTS { \
+ [0] = XCHAL_EXTINT0_NUM, \
+ [1] = XCHAL_EXTINT1_NUM, \
+ [2] = XCHAL_EXTINT2_NUM, \
+ [3] = XCHAL_EXTINT3_NUM, \
+ [4] = XCHAL_EXTINT4_NUM, \
+ [5] = XCHAL_EXTINT5_NUM, \
+ [6] = XCHAL_EXTINT6_NUM, \
+ [7] = XCHAL_EXTINT7_NUM, \
+ [8] = XCHAL_EXTINT8_NUM, \
+ [9] = XCHAL_EXTINT9_NUM, \
+ [10] = XCHAL_EXTINT10_NUM, \
+ [11] = XCHAL_EXTINT11_NUM, \
+ [12] = XCHAL_EXTINT12_NUM, \
+ [13] = XCHAL_EXTINT13_NUM, \
+ [14] = XCHAL_EXTINT14_NUM, \
+ [15] = XCHAL_EXTINT15_NUM, \
+ [16] = XCHAL_EXTINT16_NUM, \
+ [17] = XCHAL_EXTINT17_NUM, \
+ [18] = XCHAL_EXTINT18_NUM, \
+ [19] = XCHAL_EXTINT19_NUM, \
+ [20] = XCHAL_EXTINT20_NUM, \
+ [21] = XCHAL_EXTINT21_NUM, \
+ [22] = XCHAL_EXTINT22_NUM, \
+ [23] = XCHAL_EXTINT23_NUM, \
+ [24] = XCHAL_EXTINT24_NUM, \
+ [25] = XCHAL_EXTINT25_NUM, \
+ [26] = XCHAL_EXTINT26_NUM, \
+ [27] = XCHAL_EXTINT27_NUM, \
+ [28] = XCHAL_EXTINT28_NUM, \
+ [29] = XCHAL_EXTINT29_NUM, \
+ [30] = XCHAL_EXTINT30_NUM, \
+ [31] = XCHAL_EXTINT31_NUM, \
+ }
+
+#define EXCEPTIONS_SECTION \
+ .excm_level = XCHAL_EXCM_LEVEL, \
+ .vecbase = XCHAL_VECBASE_RESET_VADDR, \
+ .exception_vector = EXCEPTION_VECTORS
+
+#define INTERRUPTS_SECTION \
+ .ninterrupt = XCHAL_NUM_INTERRUPTS, \
+ .nlevel = XCHAL_NUM_INTLEVELS, \
+ .interrupt_vector = INTERRUPT_VECTORS, \
+ .level_mask = LEVEL_MASKS, \
+ .inttype_mask = INTTYPE_MASKS, \
+ .interrupt = INTERRUPTS, \
+ .nccompare = XCHAL_NUM_TIMERS, \
+ .timerint = TIMERINTS, \
+ .nextint = XCHAL_NUM_EXTINTERRUPTS, \
+ .extint = EXTINTS
+
+#if XCHAL_HAVE_PTP_MMU
+
+#define TLB_TEMPLATE(ways, refill_way_size, way56) { \
+ .nways = ways, \
+ .way_size = { \
+ (refill_way_size), (refill_way_size), \
+ (refill_way_size), (refill_way_size), \
+ 4, (way56) ? 4 : 2, (way56) ? 8 : 2, 1, 1, 1, \
+ }, \
+ .varway56 = (way56), \
+ .nrefillentries = (refill_way_size) * 4, \
+ }
+
+#define ITLB(varway56) \
+ TLB_TEMPLATE(7, 1 << XCHAL_ITLB_ARF_ENTRIES_LOG2, varway56)
+
+#define DTLB(varway56) \
+ TLB_TEMPLATE(10, 1 << XCHAL_DTLB_ARF_ENTRIES_LOG2, varway56)
+
+#define TLB_SECTION \
+ .itlb = ITLB(XCHAL_HAVE_SPANNING_WAY), \
+ .dtlb = DTLB(XCHAL_HAVE_SPANNING_WAY)
+
+#elif XCHAL_HAVE_XLT_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR
+
+#define TLB_TEMPLATE { \
+ .nways = 1, \
+ .way_size = { \
+ 8, \
+ } \
+ }
+
+#define TLB_SECTION \
+ .itlb = TLB_TEMPLATE, \
+ .dtlb = TLB_TEMPLATE
+
+#endif
+
+#if (defined(TARGET_WORDS_BIGENDIAN) != 0) == (XCHAL_HAVE_BE != 0)
+#define REGISTER_CORE(core) \
+ static void __attribute__((constructor)) register_core(void) \
+ { \
+ static XtensaConfigList node = { \
+ .config = &core, \
+ }; \
+ xtensa_finalize_config(&core); \
+ xtensa_register_core(&node); \
+ }
+#else
+#define REGISTER_CORE(core)
+#endif
+
+#define DEBUG_SECTION \
+ .debug_level = XCHAL_DEBUGLEVEL, \
+ .nibreak = XCHAL_NUM_IBREAK, \
+ .ndbreak = XCHAL_NUM_DBREAK
+
+#define CONFIG_SECTION \
+ .configid = { \
+ XCHAL_HW_CONFIGID0, \
+ XCHAL_HW_CONFIGID1, \
+ }
+
+#define DEFAULT_SECTIONS \
+ .options = XTENSA_OPTIONS, \
+ .nareg = XCHAL_NUM_AREGS, \
+ .ndepc = (XCHAL_XEA_VERSION >= 2), \
+ EXCEPTIONS_SECTION, \
+ INTERRUPTS_SECTION, \
+ TLB_SECTION, \
+ DEBUG_SECTION, \
+ CONFIG_SECTION
+
+
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 2
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 3
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 4
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 5
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 6
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 7
+#define XCHAL_INTLEVEL7_VECTOR_VADDR 0
+#endif
+
+
+#if XCHAL_NUM_INTERRUPTS <= 0
+#define XCHAL_INT0_LEVEL 0
+#define XCHAL_INT0_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 1
+#define XCHAL_INT1_LEVEL 0
+#define XCHAL_INT1_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 2
+#define XCHAL_INT2_LEVEL 0
+#define XCHAL_INT2_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 3
+#define XCHAL_INT3_LEVEL 0
+#define XCHAL_INT3_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 4
+#define XCHAL_INT4_LEVEL 0
+#define XCHAL_INT4_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 5
+#define XCHAL_INT5_LEVEL 0
+#define XCHAL_INT5_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 6
+#define XCHAL_INT6_LEVEL 0
+#define XCHAL_INT6_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 7
+#define XCHAL_INT7_LEVEL 0
+#define XCHAL_INT7_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 8
+#define XCHAL_INT8_LEVEL 0
+#define XCHAL_INT8_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 9
+#define XCHAL_INT9_LEVEL 0
+#define XCHAL_INT9_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 10
+#define XCHAL_INT10_LEVEL 0
+#define XCHAL_INT10_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 11
+#define XCHAL_INT11_LEVEL 0
+#define XCHAL_INT11_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 12
+#define XCHAL_INT12_LEVEL 0
+#define XCHAL_INT12_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 13
+#define XCHAL_INT13_LEVEL 0
+#define XCHAL_INT13_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 14
+#define XCHAL_INT14_LEVEL 0
+#define XCHAL_INT14_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 15
+#define XCHAL_INT15_LEVEL 0
+#define XCHAL_INT15_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 16
+#define XCHAL_INT16_LEVEL 0
+#define XCHAL_INT16_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 17
+#define XCHAL_INT17_LEVEL 0
+#define XCHAL_INT17_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 18
+#define XCHAL_INT18_LEVEL 0
+#define XCHAL_INT18_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 19
+#define XCHAL_INT19_LEVEL 0
+#define XCHAL_INT19_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 20
+#define XCHAL_INT20_LEVEL 0
+#define XCHAL_INT20_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 21
+#define XCHAL_INT21_LEVEL 0
+#define XCHAL_INT21_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 22
+#define XCHAL_INT22_LEVEL 0
+#define XCHAL_INT22_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 23
+#define XCHAL_INT23_LEVEL 0
+#define XCHAL_INT23_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 24
+#define XCHAL_INT24_LEVEL 0
+#define XCHAL_INT24_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 25
+#define XCHAL_INT25_LEVEL 0
+#define XCHAL_INT25_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 26
+#define XCHAL_INT26_LEVEL 0
+#define XCHAL_INT26_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 27
+#define XCHAL_INT27_LEVEL 0
+#define XCHAL_INT27_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 28
+#define XCHAL_INT28_LEVEL 0
+#define XCHAL_INT28_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 29
+#define XCHAL_INT29_LEVEL 0
+#define XCHAL_INT29_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 30
+#define XCHAL_INT30_LEVEL 0
+#define XCHAL_INT30_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 31
+#define XCHAL_INT31_LEVEL 0
+#define XCHAL_INT31_TYPE 0
+#endif
+
+
+#if XCHAL_NUM_EXTINTERRUPTS <= 0
+#define XCHAL_EXTINT0_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 1
+#define XCHAL_EXTINT1_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 2
+#define XCHAL_EXTINT2_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 3
+#define XCHAL_EXTINT3_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 4
+#define XCHAL_EXTINT4_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 5
+#define XCHAL_EXTINT5_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 6
+#define XCHAL_EXTINT6_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 7
+#define XCHAL_EXTINT7_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 8
+#define XCHAL_EXTINT8_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 9
+#define XCHAL_EXTINT9_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 10
+#define XCHAL_EXTINT10_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 11
+#define XCHAL_EXTINT11_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 12
+#define XCHAL_EXTINT12_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 13
+#define XCHAL_EXTINT13_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 14
+#define XCHAL_EXTINT14_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 15
+#define XCHAL_EXTINT15_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 16
+#define XCHAL_EXTINT16_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 17
+#define XCHAL_EXTINT17_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 18
+#define XCHAL_EXTINT18_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 19
+#define XCHAL_EXTINT19_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 20
+#define XCHAL_EXTINT20_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 21
+#define XCHAL_EXTINT21_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 22
+#define XCHAL_EXTINT22_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 23
+#define XCHAL_EXTINT23_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 24
+#define XCHAL_EXTINT24_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 25
+#define XCHAL_EXTINT25_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 26
+#define XCHAL_EXTINT26_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 27
+#define XCHAL_EXTINT27_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 28
+#define XCHAL_EXTINT28_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 29
+#define XCHAL_EXTINT29_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 30
+#define XCHAL_EXTINT30_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 31
+#define XCHAL_EXTINT31_NUM 0
+#endif
+
+
+#define XTHAL_TIMER_UNCONFIGURED 0
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
new file mode 100644
index 0000000000..0858c296ea
--- /dev/null
+++ b/target/xtensa/translate.c
@@ -0,0 +1,3225 @@
+/*
+ * Xtensa ISA:
+ * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "exec/cpu_ldst.h"
+#include "exec/semihost.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+typedef struct DisasContext {
+ const XtensaConfig *config;
+ TranslationBlock *tb;
+ uint32_t pc;
+ uint32_t next_pc;
+ int cring;
+ int ring;
+ uint32_t lbeg;
+ uint32_t lend;
+ TCGv_i32 litbase;
+ int is_jmp;
+ int singlestep_enabled;
+
+ bool sar_5bit;
+ bool sar_m32_5bit;
+ bool sar_m32_allocated;
+ TCGv_i32 sar_m32;
+
+ uint32_t ccount_delta;
+ unsigned window;
+
+ bool debug;
+ bool icount;
+ TCGv_i32 next_icount;
+
+ unsigned cpenable;
+} DisasContext;
+
+static TCGv_env cpu_env;
+static TCGv_i32 cpu_pc;
+static TCGv_i32 cpu_R[16];
+static TCGv_i32 cpu_FR[16];
+static TCGv_i32 cpu_SR[256];
+static TCGv_i32 cpu_UR[256];
+
+#include "exec/gen-icount.h"
+
+typedef struct XtensaReg {
+ const char *name;
+ uint64_t opt_bits;
+ enum {
+ SR_R = 1,
+ SR_W = 2,
+ SR_X = 4,
+ SR_RW = 3,
+ SR_RWX = 7,
+ } access;
+} XtensaReg;
+
+#define XTENSA_REG_ACCESS(regname, opt, acc) { \
+ .name = (regname), \
+ .opt_bits = XTENSA_OPTION_BIT(opt), \
+ .access = (acc), \
+ }
+
+#define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX)
+
+#define XTENSA_REG_BITS_ACCESS(regname, opt, acc) { \
+ .name = (regname), \
+ .opt_bits = (opt), \
+ .access = (acc), \
+ }
+
+#define XTENSA_REG_BITS(regname, opt) \
+ XTENSA_REG_BITS_ACCESS(regname, opt, SR_RWX)
+
+static const XtensaReg sregnames[256] = {
+ [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP),
+ [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP),
+ [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP),
+ [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL),
+ [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN),
+ [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R),
+ [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE),
+ [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16),
+ [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16),
+ [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16),
+ [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16),
+ [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16),
+ [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16),
+ [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER),
+ [WINDOW_START] = XTENSA_REG("WINDOW_START",
+ XTENSA_OPTION_WINDOWED_REGISTER),
+ [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU),
+ [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU),
+ [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU),
+ [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU),
+ [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG),
+ [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR),
+ [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL),
+ [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG),
+ [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG),
+ [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG),
+ [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG),
+ [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG),
+ [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG),
+ [CONFIGID0] = XTENSA_REG_BITS_ACCESS("CONFIGID0", XTENSA_OPTION_ALL, SR_R),
+ [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION),
+ [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION),
+ [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [CONFIGID1] = XTENSA_REG_BITS_ACCESS("CONFIGID1", XTENSA_OPTION_ALL, SR_R),
+ [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION),
+ [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR),
+ [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW),
+ [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W),
+ [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT),
+ [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL),
+ [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR),
+ [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION),
+ [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R),
+ [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT),
+ [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R),
+ [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG),
+ [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG),
+ [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION),
+ [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT),
+ [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1",
+ XTENSA_OPTION_TIMER_INTERRUPT),
+ [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2",
+ XTENSA_OPTION_TIMER_INTERRUPT),
+ [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR),
+ [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR),
+ [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR),
+ [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR),
+};
+
+static const XtensaReg uregnames[256] = {
+ [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER),
+ [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR),
+ [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR),
+};
+
+void xtensa_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "ar0", "ar1", "ar2", "ar3",
+ "ar4", "ar5", "ar6", "ar7",
+ "ar8", "ar9", "ar10", "ar11",
+ "ar12", "ar13", "ar14", "ar15",
+ };
+ static const char * const fregnames[] = {
+ "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15",
+ };
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, pc), "pc");
+
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, regs[i]),
+ regnames[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, fregs[i].f32[FP_F32_LOW]),
+ fregnames[i]);
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (sregnames[i].name) {
+ cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, sregs[i]),
+ sregnames[i].name);
+ }
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (uregnames[i].name) {
+ cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, uregs[i]),
+ uregnames[i].name);
+ }
+ }
+}
+
+static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
+{
+ return xtensa_option_bits_enabled(dc->config, opt);
+}
+
+static inline bool option_enabled(DisasContext *dc, int opt)
+{
+ return xtensa_option_enabled(dc->config, opt);
+}
+
+static void init_litbase(DisasContext *dc)
+{
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ dc->litbase = tcg_temp_local_new_i32();
+ tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
+ }
+}
+
+static void reset_litbase(DisasContext *dc)
+{
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ tcg_temp_free(dc->litbase);
+ }
+}
+
+static void init_sar_tracker(DisasContext *dc)
+{
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+ dc->sar_m32_allocated = false;
+}
+
+static void reset_sar_tracker(DisasContext *dc)
+{
+ if (dc->sar_m32_allocated) {
+ tcg_temp_free(dc->sar_m32);
+ }
+}
+
+static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = true;
+ dc->sar_m32_5bit = false;
+}
+
+static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ TCGv_i32 tmp = tcg_const_i32(32);
+ if (!dc->sar_m32_allocated) {
+ dc->sar_m32 = tcg_temp_local_new_i32();
+ dc->sar_m32_allocated = true;
+ }
+ tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
+ tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = true;
+ tcg_temp_free(tmp);
+}
+
+static void gen_advance_ccount(DisasContext *dc)
+{
+ if (dc->ccount_delta > 0) {
+ TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
+ gen_helper_advance_ccount(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+ dc->ccount_delta = 0;
+}
+
+static void gen_exception(DisasContext *dc, int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_advance_ccount(dc);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void gen_exception_cause(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_exception_cause(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
+ cause == SYSCALL_CAUSE) {
+ dc->is_jmp = DISAS_UPDATE;
+ }
+}
+
+static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
+ TCGv_i32 vaddr)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+}
+
+static void gen_debug_exception(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_debug_exception(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
+ dc->is_jmp = DISAS_UPDATE;
+ }
+}
+
+static bool gen_check_privilege(DisasContext *dc)
+{
+ if (dc->cring) {
+ gen_exception_cause(dc, PRIVILEGED_CAUSE);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static bool gen_check_cpenable(DisasContext *dc, unsigned cp)
+{
+ if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
+ !(dc->cpenable & (1 << cp))) {
+ gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
+{
+ tcg_gen_mov_i32(cpu_pc, dest);
+ gen_advance_ccount(dc);
+ if (dc->icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
+ }
+ if (dc->singlestep_enabled) {
+ gen_exception(dc, EXCP_DEBUG);
+ } else {
+ if (slot >= 0) {
+ tcg_gen_goto_tb(slot);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + slot);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ }
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+static void gen_jump(DisasContext *dc, TCGv dest)
+{
+ gen_jump_slot(dc, dest, -1);
+}
+
+static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
+{
+ TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
+ if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
+ slot = -1;
+ }
+#endif
+ gen_jump_slot(dc, tmp, slot);
+ tcg_temp_free(tmp);
+}
+
+static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
+ int slot)
+{
+ TCGv_i32 tcallinc = tcg_const_i32(callinc);
+
+ tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
+ tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
+ tcg_temp_free(tcallinc);
+ tcg_gen_movi_i32(cpu_R[callinc << 2],
+ (callinc << 30) | (dc->next_pc & 0x3fffffff));
+ gen_jump_slot(dc, dest, slot);
+}
+
+static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
+{
+ gen_callw_slot(dc, callinc, dest, -1);
+}
+
+static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
+{
+ TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
+ if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
+ slot = -1;
+ }
+#endif
+ gen_callw_slot(dc, callinc, tmp, slot);
+ tcg_temp_free(tmp);
+}
+
+static bool gen_check_loop_end(DisasContext *dc, int slot)
+{
+ if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
+ !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
+ dc->next_pc == dc->lend) {
+ TCGLabel *label = gen_new_label();
+
+ gen_advance_ccount(dc);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
+ gen_jumpi(dc, dc->lbeg, slot);
+ gen_set_label(label);
+ gen_jumpi(dc, dc->next_pc, -1);
+ return true;
+ }
+ return false;
+}
+
+static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
+{
+ if (!gen_check_loop_end(dc, slot)) {
+ gen_jumpi(dc, dc->next_pc, slot);
+ }
+}
+
+static void gen_brcond(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
+{
+ TCGLabel *label = gen_new_label();
+
+ gen_advance_ccount(dc);
+ tcg_gen_brcond_i32(cond, t0, t1, label);
+ gen_jumpi_check_loop_end(dc, 0);
+ gen_set_label(label);
+ gen_jumpi(dc, dc->pc + offset, 1);
+}
+
+static void gen_brcondi(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, uint32_t t1, uint32_t offset)
+{
+ TCGv_i32 tmp = tcg_const_i32(t1);
+ gen_brcond(dc, cond, t0, tmp, offset);
+ tcg_temp_free(tmp);
+}
+
+static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
+{
+ if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
+ if (sregnames[sr].name) {
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not configured\n", sregnames[sr].name);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "SR %d is not implemented\n", sr);
+ }
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return false;
+ } else if (!(sregnames[sr].access & access)) {
+ static const char * const access_text[] = {
+ [SR_R] = "rsr",
+ [SR_W] = "wsr",
+ [SR_X] = "xsr",
+ };
+ assert(access < ARRAY_SIZE(access_text) && access_text[access]);
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not available for %s\n", sregnames[sr].name,
+ access_text[access]);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return false;
+ }
+ return true;
+}
+
+static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ gen_advance_ccount(dc);
+ tcg_gen_mov_i32(d, cpu_SR[sr]);
+}
+
+static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
+ tcg_gen_or_i32(d, d, cpu_SR[sr]);
+ tcg_gen_andi_i32(d, d, 0xfffffffc);
+}
+
+static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ static void (* const rsr_handler[256])(DisasContext *dc,
+ TCGv_i32 d, uint32_t sr) = {
+ [CCOUNT] = gen_rsr_ccount,
+ [PTEVADDR] = gen_rsr_ptevaddr,
+ };
+
+ if (rsr_handler[sr]) {
+ rsr_handler[sr](dc, d, sr);
+ } else {
+ tcg_gen_mov_i32(d, cpu_SR[sr]);
+ }
+}
+
+static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ gen_helper_wsr_lbeg(cpu_env, s);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ gen_helper_wsr_lend(cpu_env, s);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+}
+
+static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
+}
+
+static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_ext8s_i32(cpu_SR[sr], s);
+}
+
+static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_windowbase(cpu_env, v);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
+}
+
+static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_rasid(cpu_env, v);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
+}
+
+static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_ibreakenable(cpu_env, v);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f);
+}
+
+static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - IBREAKA;
+
+ if (id < dc->config->nibreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_ibreaka(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ gen_jumpi_check_loop_end(dc, 0);
+ }
+}
+
+static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - DBREAKA;
+
+ if (id < dc->config->ndbreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_dbreaka(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - DBREAKC;
+
+ if (id < dc->config->ndbreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_dbreakc(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v,
+ dc->config->inttype_mask[INTTYPE_SOFTWARE]);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, v,
+ dc->config->inttype_mask[INTTYPE_EDGE] |
+ dc->config->inttype_mask[INTTYPE_NMI] |
+ dc->config->inttype_mask[INTTYPE_SOFTWARE]);
+ tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
+ tcg_temp_free(tmp);
+ gen_helper_check_interrupts(cpu_env);
+}
+
+static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
+ PS_UM | PS_EXCM | PS_INTLEVEL;
+
+ if (option_enabled(dc, XTENSA_OPTION_MMU)) {
+ mask |= PS_RING;
+ }
+ tcg_gen_andi_i32(cpu_SR[sr], v, mask);
+ gen_helper_check_interrupts(cpu_env);
+ /* This can change mmu index and tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ if (dc->icount) {
+ tcg_gen_mov_i32(dc->next_icount, v);
+ } else {
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ }
+}
+
+static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ uint32_t id = sr - CCOMPARE;
+ if (id < dc->config->nccompare) {
+ uint32_t int_bit = 1 << dc->config->timerint[id];
+ gen_advance_ccount(dc);
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
+ gen_helper_check_interrupts(cpu_env);
+ }
+}
+
+static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ static void (* const wsr_handler[256])(DisasContext *dc,
+ uint32_t sr, TCGv_i32 v) = {
+ [LBEG] = gen_wsr_lbeg,
+ [LEND] = gen_wsr_lend,
+ [SAR] = gen_wsr_sar,
+ [BR] = gen_wsr_br,
+ [LITBASE] = gen_wsr_litbase,
+ [ACCHI] = gen_wsr_acchi,
+ [WINDOW_BASE] = gen_wsr_windowbase,
+ [WINDOW_START] = gen_wsr_windowstart,
+ [PTEVADDR] = gen_wsr_ptevaddr,
+ [RASID] = gen_wsr_rasid,
+ [ITLBCFG] = gen_wsr_tlbcfg,
+ [DTLBCFG] = gen_wsr_tlbcfg,
+ [IBREAKENABLE] = gen_wsr_ibreakenable,
+ [ATOMCTL] = gen_wsr_atomctl,
+ [IBREAKA] = gen_wsr_ibreaka,
+ [IBREAKA + 1] = gen_wsr_ibreaka,
+ [DBREAKA] = gen_wsr_dbreaka,
+ [DBREAKA + 1] = gen_wsr_dbreaka,
+ [DBREAKC] = gen_wsr_dbreakc,
+ [DBREAKC + 1] = gen_wsr_dbreakc,
+ [CPENABLE] = gen_wsr_cpenable,
+ [INTSET] = gen_wsr_intset,
+ [INTCLEAR] = gen_wsr_intclear,
+ [INTENABLE] = gen_wsr_intenable,
+ [PS] = gen_wsr_ps,
+ [ICOUNT] = gen_wsr_icount,
+ [ICOUNTLEVEL] = gen_wsr_icountlevel,
+ [CCOMPARE] = gen_wsr_ccompare,
+ [CCOMPARE + 1] = gen_wsr_ccompare,
+ [CCOMPARE + 2] = gen_wsr_ccompare,
+ };
+
+ if (wsr_handler[sr]) {
+ wsr_handler[sr](dc, sr, s);
+ } else {
+ tcg_gen_mov_i32(cpu_SR[sr], s);
+ }
+}
+
+static void gen_wur(uint32_t ur, TCGv_i32 s)
+{
+ switch (ur) {
+ case FCR:
+ gen_helper_wur_fcr(cpu_env, s);
+ break;
+
+ case FSR:
+ tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
+ break;
+
+ default:
+ tcg_gen_mov_i32(cpu_UR[ur], s);
+ break;
+ }
+}
+
+static void gen_load_store_alignment(DisasContext *dc, int shift,
+ TCGv_i32 addr, bool no_hw_alignment)
+{
+ if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
+ tcg_gen_andi_i32(addr, addr, ~0 << shift);
+ } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
+ no_hw_alignment) {
+ TCGLabel *label = gen_new_label();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
+ gen_set_label(label);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_waiti(DisasContext *dc, uint32_t imm4)
+{
+ TCGv_i32 pc = tcg_const_i32(dc->next_pc);
+ TCGv_i32 intlevel = tcg_const_i32(imm4);
+ gen_advance_ccount(dc);
+ gen_helper_waiti(cpu_env, pc, intlevel);
+ tcg_temp_free(pc);
+ tcg_temp_free(intlevel);
+}
+
+static bool gen_window_check1(DisasContext *dc, unsigned r1)
+{
+ if (r1 / 4 > dc->window) {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 w = tcg_const_i32(r1 / 4);
+
+ gen_advance_ccount(dc);
+ gen_helper_window_check(cpu_env, pc, w);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static bool gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
+{
+ return gen_window_check1(dc, r1 > r2 ? r1 : r2);
+}
+
+static bool gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
+ unsigned r3)
+{
+ return gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
+}
+
+static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
+{
+ TCGv_i32 m = tcg_temp_new_i32();
+
+ if (hi) {
+ (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
+ } else {
+ (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
+ }
+ return m;
+}
+
+static inline unsigned xtensa_op0_insn_len(unsigned op0)
+{
+ return op0 >= 8 ? 2 : 3;
+}
+
+static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
+{
+#define HAS_OPTION_BITS(opt) do { \
+ if (!option_bits_enabled(dc, opt)) { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Option is not enabled %s:%d\n", \
+ __FILE__, __LINE__); \
+ goto invalid_opcode; \
+ } \
+ } while (0)
+
+#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
+
+#define TBD() qemu_log_mask(LOG_UNIMP, "TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
+#define RESERVED() do { \
+ qemu_log_mask(LOG_GUEST_ERROR, "RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
+ dc->pc, b0, b1, b2, __FILE__, __LINE__); \
+ goto invalid_opcode; \
+ } while (0)
+
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define OP0 (((b0) & 0xf0) >> 4)
+#define OP1 (((b2) & 0xf0) >> 4)
+#define OP2 ((b2) & 0xf)
+#define RRR_R ((b1) & 0xf)
+#define RRR_S (((b1) & 0xf0) >> 4)
+#define RRR_T ((b0) & 0xf)
+#else
+#define OP0 (((b0) & 0xf))
+#define OP1 (((b2) & 0xf))
+#define OP2 (((b2) & 0xf0) >> 4)
+#define RRR_R (((b1) & 0xf0) >> 4)
+#define RRR_S (((b1) & 0xf))
+#define RRR_T (((b0) & 0xf0) >> 4)
+#endif
+#define RRR_X ((RRR_R & 0x4) >> 2)
+#define RRR_Y ((RRR_T & 0x4) >> 2)
+#define RRR_W (RRR_R & 0x3)
+
+#define RRRN_R RRR_R
+#define RRRN_S RRR_S
+#define RRRN_T RRR_T
+
+#define RRI4_R RRR_R
+#define RRI4_S RRR_S
+#define RRI4_T RRR_T
+#ifdef TARGET_WORDS_BIGENDIAN
+#define RRI4_IMM4 ((b2) & 0xf)
+#else
+#define RRI4_IMM4 (((b2) & 0xf0) >> 4)
+#endif
+
+#define RRI8_R RRR_R
+#define RRI8_S RRR_S
+#define RRI8_T RRR_T
+#define RRI8_IMM8 (b2)
+#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define RI16_IMM16 (((b1) << 8) | (b2))
+#else
+#define RI16_IMM16 (((b2) << 8) | (b1))
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define CALL_N (((b0) & 0xc) >> 2)
+#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
+#else
+#define CALL_N (((b0) & 0x30) >> 4)
+#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
+#endif
+#define CALL_OFFSET_SE \
+ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
+
+#define CALLX_N CALL_N
+#ifdef TARGET_WORDS_BIGENDIAN
+#define CALLX_M ((b0) & 0x3)
+#else
+#define CALLX_M (((b0) & 0xc0) >> 6)
+#endif
+#define CALLX_S RRR_S
+
+#define BRI12_M CALLX_M
+#define BRI12_S RRR_S
+#ifdef TARGET_WORDS_BIGENDIAN
+#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
+#else
+#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
+#endif
+#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
+
+#define BRI8_M BRI12_M
+#define BRI8_R RRI8_R
+#define BRI8_S RRI8_S
+#define BRI8_IMM8 RRI8_IMM8
+#define BRI8_IMM8_SE RRI8_IMM8_SE
+
+#define RSR_SR (b1)
+
+ uint8_t b0 = cpu_ldub_code(env, dc->pc);
+ uint8_t b1 = cpu_ldub_code(env, dc->pc + 1);
+ uint8_t b2 = 0;
+ unsigned len = xtensa_op0_insn_len(OP0);
+
+ static const uint32_t B4CONST[] = {
+ 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
+ };
+
+ static const uint32_t B4CONSTU[] = {
+ 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
+ };
+
+ switch (len) {
+ case 2:
+ HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
+ break;
+
+ case 3:
+ b2 = cpu_ldub_code(env, dc->pc + 2);
+ break;
+
+ default:
+ RESERVED();
+ }
+ dc->next_pc = dc->pc + len;
+
+ switch (OP0) {
+ case 0: /*QRST*/
+ switch (OP1) {
+ case 0: /*RST0*/
+ switch (OP2) {
+ case 0: /*ST0*/
+ if ((RRR_R & 0xc) == 0x8) {
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ }
+
+ switch (RRR_R) {
+ case 0: /*SNM0*/
+ switch (CALLX_M) {
+ case 0: /*ILL*/
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ break;
+
+ case 1: /*reserved*/
+ RESERVED();
+ break;
+
+ case 2: /*JR*/
+ switch (CALLX_N) {
+ case 0: /*RET*/
+ case 2: /*JX*/
+ if (gen_window_check1(dc, CALLX_S)) {
+ gen_jump(dc, cpu_R[CALLX_S]);
+ }
+ break;
+
+ case 1: /*RETWw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_retw(tmp, cpu_env, tmp);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 3: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*CALLX*/
+ if (!gen_window_check2(dc, CALLX_S, CALLX_N << 2)) {
+ break;
+ }
+ switch (CALLX_N) {
+ case 0: /*CALLX0*/
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
+ tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 1: /*CALLX4w*/
+ case 2: /*CALLX8w*/
+ case 3: /*CALLX12w*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
+ gen_callw(dc, CALLX_N, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ }
+ break;
+ }
+ break;
+
+ case 1: /*MOVSPw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_window_check2(dc, RRR_T, RRR_S)) {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_movsp(cpu_env, pc);
+ tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
+ tcg_temp_free(pc);
+ }
+ break;
+
+ case 2: /*SYNC*/
+ switch (RRR_T) {
+ case 0: /*ISYNC*/
+ break;
+
+ case 1: /*RSYNC*/
+ break;
+
+ case 2: /*ESYNC*/
+ break;
+
+ case 3: /*DSYNC*/
+ break;
+
+ case 8: /*EXCW*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ break;
+
+ case 12: /*MEMW*/
+ break;
+
+ case 13: /*EXTW*/
+ break;
+
+ case 15: /*NOP*/
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*RFEIx*/
+ switch (RRR_T) {
+ case 0: /*RFETx*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ switch (RRR_S) {
+ case 0: /*RFEx*/
+ if (gen_check_privilege(dc)) {
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1]);
+ }
+ break;
+
+ case 1: /*RFUEx*/
+ RESERVED();
+ break;
+
+ case 2: /*RFDEx*/
+ if (gen_check_privilege(dc)) {
+ gen_jump(dc, cpu_SR[
+ dc->config->ndepc ? DEPC : EPC1]);
+ }
+ break;
+
+ case 4: /*RFWOw*/
+ case 5: /*RFWUw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc)) {
+ TCGv_i32 tmp = tcg_const_i32(1);
+
+ tcg_gen_andi_i32(
+ cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
+
+ if (RRR_S == 4) {
+ tcg_gen_andc_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ } else {
+ tcg_gen_or_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ }
+
+ gen_helper_restore_owb(cpu_env);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1]);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 1: /*RFIx*/
+ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
+ if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
+ if (gen_check_privilege(dc)) {
+ tcg_gen_mov_i32(cpu_SR[PS],
+ cpu_SR[EPS2 + RRR_S - 2]);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "RFI %d is illegal\n", RRR_S);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+ break;
+
+ case 2: /*RFME*/
+ TBD();
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 4: /*BREAKx*/
+ HAS_OPTION(XTENSA_OPTION_DEBUG);
+ if (dc->debug) {
+ gen_debug_exception(dc, DEBUGCAUSE_BI);
+ }
+ break;
+
+ case 5: /*SYSCALLx*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ switch (RRR_S) {
+ case 0: /*SYSCALLx*/
+ gen_exception_cause(dc, SYSCALL_CAUSE);
+ break;
+
+ case 1: /*SIMCALL*/
+ if (semihosting_enabled()) {
+ if (gen_check_privilege(dc)) {
+ gen_helper_simcall(cpu_env);
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+ break;
+
+ default:
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 6: /*RSILx*/
+ HAS_OPTION(XTENSA_OPTION_INTERRUPT);
+ if (gen_check_privilege(dc) &&
+ gen_window_check1(dc, RRR_T)) {
+ tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
+ tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+ }
+ break;
+
+ case 7: /*WAITIx*/
+ HAS_OPTION(XTENSA_OPTION_INTERRUPT);
+ if (gen_check_privilege(dc)) {
+ gen_waiti(dc, RRR_S);
+ }
+ break;
+
+ case 8: /*ANY4p*/
+ case 9: /*ALL4p*/
+ case 10: /*ANY8p*/
+ case 11: /*ALL8p*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ {
+ const unsigned shift = (RRR_R & 2) ? 8 : 4;
+ TCGv_i32 mask = tcg_const_i32(
+ ((1 << shift) - 1) << RRR_S);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
+ if (RRR_R & 1) { /*ALL*/
+ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
+ } else { /*ANY*/
+ tcg_gen_add_i32(tmp, tmp, mask);
+ }
+ tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
+ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
+ tmp, RRR_T, 1);
+ tcg_temp_free(mask);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 1: /*AND*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 2: /*OR*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 3: /*XOR*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 4: /*ST1*/
+ switch (RRR_R) {
+ case 0: /*SSR*/
+ if (gen_window_check1(dc, RRR_S)) {
+ gen_right_shift_sar(dc, cpu_R[RRR_S]);
+ }
+ break;
+
+ case 1: /*SSL*/
+ if (gen_window_check1(dc, RRR_S)) {
+ gen_left_shift_sar(dc, cpu_R[RRR_S]);
+ }
+ break;
+
+ case 2: /*SSA8L*/
+ if (gen_window_check1(dc, RRR_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 3: /*SSA8B*/
+ if (gen_window_check1(dc, RRR_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
+ gen_left_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 4: /*SSAI*/
+ {
+ TCGv_i32 tmp = tcg_const_i32(
+ RRR_S | ((RRR_T & 1) << 4));
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 6: /*RER*/
+ TBD();
+ break;
+
+ case 7: /*WER*/
+ TBD();
+ break;
+
+ case 8: /*ROTWw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc)) {
+ TCGv_i32 tmp = tcg_const_i32(
+ RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
+ gen_helper_rotw(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ }
+ break;
+
+ case 14: /*NSAu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
+ if (gen_window_check2(dc, RRR_S, RRR_T)) {
+ gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
+ }
+ break;
+
+ case 15: /*NSAUu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
+ if (gen_window_check2(dc, RRR_S, RRR_T)) {
+ gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 5: /*TLB*/
+ HAS_OPTION_BITS(
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
+
+ switch (RRR_R & 7) {
+ case 3: /*RITLB0*/ /*RDTLB0*/
+ gen_helper_rtlb0(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ case 4: /*IITLB*/ /*IDTLB*/
+ gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb);
+ /* This could change memory mapping, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ break;
+
+ case 5: /*PITLB*/ /*PDTLB*/
+ tcg_gen_movi_i32(cpu_pc, dc->pc);
+ gen_helper_ptlb(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ case 6: /*WITLB*/ /*WDTLB*/
+ gen_helper_wtlb(
+ cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
+ /* This could change memory mapping, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ break;
+
+ case 7: /*RITLB1*/ /*RDTLB1*/
+ gen_helper_rtlb1(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ default:
+ tcg_temp_free(dtlb);
+ RESERVED();
+ break;
+ }
+ tcg_temp_free(dtlb);
+ }
+ break;
+
+ case 6: /*RT0*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ switch (RRR_S) {
+ case 0: /*NEG*/
+ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
+ break;
+
+ case 1: /*ABS*/
+ {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 neg = tcg_temp_new_i32();
+
+ tcg_gen_neg_i32(neg, cpu_R[RRR_T]);
+ tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R],
+ cpu_R[RRR_T], zero, cpu_R[RRR_T], neg);
+ tcg_temp_free(neg);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 7: /*reserved*/
+ RESERVED();
+ break;
+
+ case 8: /*ADD*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 9: /*ADD**/
+ case 10:
+ case 11:
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
+ tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 12: /*SUB*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 13: /*SUB**/
+ case 14:
+ case 15:
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
+ tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
+ tcg_temp_free(tmp);
+ }
+ break;
+ }
+ break;
+
+ case 1: /*RST1*/
+ switch (OP2) {
+ case 0: /*SLLI*/
+ case 1:
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
+ 32 - (RRR_T | ((OP2 & 1) << 4)));
+ }
+ break;
+
+ case 2: /*SRAI*/
+ case 3:
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
+ RRR_S | ((OP2 & 1) << 4));
+ }
+ break;
+
+ case 4: /*SRLI*/
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
+ }
+ break;
+
+ case 6: /*XSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_X) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
+ gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
+ gen_wsr(dc, RSR_SR, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ /*
+ * Note: 64 bit ops are used here solely because SAR values
+ * have range 0..63
+ */
+#define gen_shift_reg(cmd, reg) do { \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ tcg_gen_extu_i32_i64(tmp, reg); \
+ tcg_gen_##cmd##_i64(v, v, tmp); \
+ tcg_gen_extrl_i64_i32(cpu_R[RRR_R], v); \
+ tcg_temp_free_i64(v); \
+ tcg_temp_free_i64(tmp); \
+ } while (0)
+
+#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
+
+ case 8: /*SRC*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
+ gen_shift(shr);
+ }
+ break;
+
+ case 9: /*SRL*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ if (dc->sar_5bit) {
+ tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
+ gen_shift(shr);
+ }
+ break;
+
+ case 10: /*SLL*/
+ if (!gen_window_check2(dc, RRR_R, RRR_S)) {
+ break;
+ }
+ if (dc->sar_m32_5bit) {
+ tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ TCGv_i32 s = tcg_const_i32(32);
+ tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
+ tcg_gen_andi_i32(s, s, 0x3f);
+ tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
+ gen_shift_reg(shl, s);
+ tcg_temp_free(s);
+ }
+ break;
+
+ case 11: /*SRA*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ if (dc->sar_5bit) {
+ tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
+ gen_shift(sar);
+ }
+ break;
+#undef gen_shift
+#undef gen_shift_reg
+
+ case 12: /*MUL16U*/
+ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 v1 = tcg_temp_new_i32();
+ TCGv_i32 v2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
+ tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
+ tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
+ tcg_temp_free(v2);
+ tcg_temp_free(v1);
+ }
+ break;
+
+ case 13: /*MUL16S*/
+ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 v1 = tcg_temp_new_i32();
+ TCGv_i32 v2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
+ tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
+ tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
+ tcg_temp_free(v2);
+ tcg_temp_free(v1);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 2: /*RST2*/
+ if (OP2 >= 8 && !gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ break;
+ }
+
+ if (OP2 >= 12) {
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
+ gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
+ gen_set_label(label);
+ }
+
+ switch (OP2) {
+#define BOOLEAN_LOGIC(fn, r, s, t) \
+ do { \
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
+ TCGv_i32 tmp1 = tcg_temp_new_i32(); \
+ TCGv_i32 tmp2 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
+ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
+ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
+ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
+ tcg_temp_free(tmp1); \
+ tcg_temp_free(tmp2); \
+ } while (0)
+
+ case 0: /*ANDBp*/
+ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 1: /*ANDBCp*/
+ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 2: /*ORBp*/
+ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 3: /*ORBCp*/
+ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 4: /*XORBp*/
+ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
+ break;
+
+#undef BOOLEAN_LOGIC
+
+ case 8: /*MULLi*/
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
+ tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ case 10: /*MULUHi*/
+ case 11: /*MULSHi*/
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
+ {
+ TCGv lo = tcg_temp_new();
+
+ if (OP2 == 10) {
+ tcg_gen_mulu2_i32(lo, cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ } else {
+ tcg_gen_muls2_i32(lo, cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ tcg_temp_free(lo);
+ }
+ break;
+
+ case 12: /*QUOUi*/
+ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ case 13: /*QUOSi*/
+ case 15: /*REMSi*/
+ {
+ TCGLabel *label1 = gen_new_label();
+ TCGLabel *label2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
+ label1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
+ label1);
+ tcg_gen_movi_i32(cpu_R[RRR_R],
+ OP2 == 13 ? 0x80000000 : 0);
+ tcg_gen_br(label2);
+ gen_set_label(label1);
+ if (OP2 == 13) {
+ tcg_gen_div_i32(cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ } else {
+ tcg_gen_rem_i32(cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ gen_set_label(label2);
+ }
+ break;
+
+ case 14: /*REMUi*/
+ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*RST3*/
+ switch (OP2) {
+ case 0: /*RSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_R) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
+ }
+ break;
+
+ case 1: /*WSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_W) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
+ }
+ break;
+
+ case 2: /*SEXTu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ int shift = 24 - RRR_T;
+
+ if (shift == 24) {
+ tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
+ } else if (shift == 16) {
+ tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
+ tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
+ tcg_temp_free(tmp);
+ }
+ }
+ break;
+
+ case 3: /*CLAMPSu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
+ tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
+
+ tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
+ tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T));
+
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero,
+ cpu_R[RRR_S], tmp1);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 4: /*MINu*/
+ case 5: /*MAXu*/
+ case 6: /*MINUu*/
+ case 7: /*MAXUu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ static const TCGCond cond[] = {
+ TCG_COND_LE,
+ TCG_COND_GE,
+ TCG_COND_LEU,
+ TCG_COND_GEU
+ };
+ tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 8: /*MOVEQZ*/
+ case 9: /*MOVNEZ*/
+ case 10: /*MOVLTZ*/
+ case 11: /*MOVGEZ*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ,
+ TCG_COND_NE,
+ TCG_COND_LT,
+ TCG_COND_GE,
+ };
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R],
+ cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 12: /*MOVFp*/
+ case 13: /*MOVTp*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
+ tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
+ cpu_R[RRR_R], tmp, zero,
+ cpu_R[RRR_S], cpu_R[RRR_R]);
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 14: /*RUR*/
+ if (gen_window_check1(dc, RRR_R)) {
+ int st = (RRR_S << 4) + RRR_T;
+ if (uregnames[st].name) {
+ tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "RUR %d not implemented, ", st);
+ TBD();
+ }
+ }
+ break;
+
+ case 15: /*WUR*/
+ if (gen_window_check1(dc, RRR_T)) {
+ if (uregnames[RSR_SR].name) {
+ gen_wur(RSR_SR, cpu_R[RRR_T]);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "WUR %d not implemented, ", RSR_SR);
+ TBD();
+ }
+ }
+ break;
+
+ }
+ break;
+
+ case 4: /*EXTUI*/
+ case 5:
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ int shiftimm = RRR_S | ((OP1 & 1) << 4);
+ int maskimm = (1 << (OP2 + 1)) - 1;
+
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
+ tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 6: /*CUST0*/
+ RESERVED();
+ break;
+
+ case 7: /*CUST1*/
+ RESERVED();
+ break;
+
+ case 8: /*LSCXp*/
+ switch (OP2) {
+ case 0: /*LSXf*/
+ case 1: /*LSXUf*/
+ case 4: /*SSXf*/
+ case 5: /*SSXUf*/
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ if (gen_window_check2(dc, RRR_S, RRR_T) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]);
+ gen_load_store_alignment(dc, 2, addr, false);
+ if (OP2 & 0x4) {
+ tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring);
+ } else {
+ tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring);
+ }
+ if (OP2 & 0x1) {
+ tcg_gen_mov_i32(cpu_R[RRR_S], addr);
+ }
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 9: /*LSC4*/
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ switch (OP2) {
+ case 0: /*L32E*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRR_S],
+ (0xffffffc0 | (RRR_R << 2)));
+ tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ case 4: /*S32E*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRR_S],
+ (0xffffffc0 | (RRR_R << 2)));
+ tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ case 5: /*S32N*/
+ if (gen_window_check2(dc, RRI4_S, RRI4_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+
+ tcg_gen_addi_i32(addr, cpu_R[RRI4_S], RRI4_IMM4 << 2);
+ gen_load_store_alignment(dc, 2, addr, false);
+ tcg_gen_qemu_st32(cpu_R[RRI4_T], addr, dc->cring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default:
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 10: /*FP0*/
+ /*DEPBITS*/
+ if (option_enabled(dc, XTENSA_OPTION_DEPBITS)) {
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ tcg_gen_deposit_i32(cpu_R[RRR_T], cpu_R[RRR_T], cpu_R[RRR_S],
+ OP2, RRR_R + 1);
+ break;
+ }
+
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ switch (OP2) {
+ case 0: /*ADD.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_add_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 1: /*SUB.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_sub_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 2: /*MUL.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_mul_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 4: /*MADD.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_madd_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_R], cpu_FR[RRR_S],
+ cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 5: /*MSUB.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_msub_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_R], cpu_FR[RRR_S],
+ cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 8: /*ROUND.Sf*/
+ case 9: /*TRUNC.Sf*/
+ case 10: /*FLOOR.Sf*/
+ case 11: /*CEIL.Sf*/
+ case 14: /*UTRUNC.Sf*/
+ if (gen_window_check1(dc, RRR_R) &&
+ gen_check_cpenable(dc, 0)) {
+ static const unsigned rounding_mode_const[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_down,
+ float_round_up,
+ [6] = float_round_to_zero,
+ };
+ TCGv_i32 rounding_mode = tcg_const_i32(
+ rounding_mode_const[OP2 & 7]);
+ TCGv_i32 scale = tcg_const_i32(RRR_T);
+
+ if (OP2 == 14) {
+ gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S],
+ rounding_mode, scale);
+ } else {
+ gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S],
+ rounding_mode, scale);
+ }
+
+ tcg_temp_free(rounding_mode);
+ tcg_temp_free(scale);
+ }
+ break;
+
+ case 12: /*FLOAT.Sf*/
+ case 13: /*UFLOAT.Sf*/
+ if (gen_window_check1(dc, RRR_S) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 scale = tcg_const_i32(-RRR_T);
+
+ if (OP2 == 13) {
+ gen_helper_uitof(cpu_FR[RRR_R], cpu_env,
+ cpu_R[RRR_S], scale);
+ } else {
+ gen_helper_itof(cpu_FR[RRR_R], cpu_env,
+ cpu_R[RRR_S], scale);
+ }
+ tcg_temp_free(scale);
+ }
+ break;
+
+ case 15: /*FP1OP*/
+ switch (RRR_T) {
+ case 0: /*MOV.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 1: /*ABS.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 4: /*RFRf*/
+ if (gen_window_check1(dc, RRR_R) &&
+ gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 5: /*WFRf*/
+ if (gen_window_check1(dc, RRR_S) &&
+ gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]);
+ }
+ break;
+
+ case 6: /*NEG.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 11: /*FP1*/
+ /*DEPBITS*/
+ if (option_enabled(dc, XTENSA_OPTION_DEPBITS)) {
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ tcg_gen_deposit_i32(cpu_R[RRR_T], cpu_R[RRR_T], cpu_R[RRR_S],
+ OP2 + 16, RRR_R + 1);
+ break;
+ }
+
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+
+#define gen_compare(rel, br, a, b) \
+ do { \
+ if (gen_check_cpenable(dc, 0)) { \
+ TCGv_i32 bit = tcg_const_i32(1 << br); \
+ \
+ gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \
+ tcg_temp_free(bit); \
+ } \
+ } while (0)
+
+ switch (OP2) {
+ case 1: /*UN.Sf*/
+ gen_compare(un_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 2: /*OEQ.Sf*/
+ gen_compare(oeq_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 3: /*UEQ.Sf*/
+ gen_compare(ueq_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 4: /*OLT.Sf*/
+ gen_compare(olt_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 5: /*ULT.Sf*/
+ gen_compare(ult_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 6: /*OLE.Sf*/
+ gen_compare(ole_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 7: /*ULE.Sf*/
+ gen_compare(ule_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+#undef gen_compare
+
+ case 8: /*MOVEQZ.Sf*/
+ case 9: /*MOVNEZ.Sf*/
+ case 10: /*MOVLTZ.Sf*/
+ case 11: /*MOVGEZ.Sf*/
+ if (gen_window_check1(dc, RRR_T) &&
+ gen_check_cpenable(dc, 0)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ,
+ TCG_COND_NE,
+ TCG_COND_LT,
+ TCG_COND_GE,
+ };
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R],
+ cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 12: /*MOVF.Sf*/
+ case 13: /*MOVT.Sf*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ if (gen_check_cpenable(dc, 0)) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
+ tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
+ cpu_FR[RRR_R], tmp, zero,
+ cpu_FR[RRR_S], cpu_FR[RRR_R]);
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 1: /*L32R*/
+ if (gen_window_check1(dc, RRR_T)) {
+ TCGv_i32 tmp = tcg_const_i32(
+ ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
+ 0 : ((dc->pc + 3) & ~3)) +
+ (0xfffc0000 | (RI16_IMM16 << 2)));
+
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ tcg_gen_add_i32(tmp, tmp, dc->litbase);
+ }
+ tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 2: /*LSAI*/
+#define gen_load_store(type, shift) do { \
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ \
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
+ if (shift) { \
+ gen_load_store_alignment(dc, shift, addr, false); \
+ } \
+ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ switch (RRI8_R) {
+ case 0: /*L8UI*/
+ gen_load_store(ld8u, 0);
+ break;
+
+ case 1: /*L16UI*/
+ gen_load_store(ld16u, 1);
+ break;
+
+ case 2: /*L32I*/
+ gen_load_store(ld32u, 2);
+ break;
+
+ case 4: /*S8I*/
+ gen_load_store(st8, 0);
+ break;
+
+ case 5: /*S16I*/
+ gen_load_store(st16, 1);
+ break;
+
+ case 6: /*S32I*/
+ gen_load_store(st32, 2);
+ break;
+
+#define gen_dcache_hit_test(w, shift) do { \
+ if (gen_window_check1(dc, RRI##w##_S)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ TCGv_i32 res = tcg_temp_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
+ RRI##w##_IMM##w << shift); \
+ tcg_gen_qemu_ld8u(res, addr, dc->cring); \
+ tcg_temp_free(addr); \
+ tcg_temp_free(res); \
+ } \
+ } while (0)
+
+#define gen_dcache_hit_test4() gen_dcache_hit_test(4, 4)
+#define gen_dcache_hit_test8() gen_dcache_hit_test(8, 2)
+
+ case 7: /*CACHEc*/
+ if (RRI8_T < 8) {
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ }
+
+ switch (RRI8_T) {
+ case 0: /*DPFRc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 1: /*DPFWc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 2: /*DPFROc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 3: /*DPFWOc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 4: /*DHWBc*/
+ gen_dcache_hit_test8();
+ break;
+
+ case 5: /*DHWBIc*/
+ gen_dcache_hit_test8();
+ break;
+
+ case 6: /*DHIc*/
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test8();
+ }
+ break;
+
+ case 7: /*DIIc*/
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI8_S);
+ }
+ break;
+
+ case 8: /*DCEc*/
+ switch (OP1) {
+ case 0: /*DPFLl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test4();
+ }
+ break;
+
+ case 2: /*DHUl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test4();
+ }
+ break;
+
+ case 3: /*DIUl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ case 4: /*DIWBc*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ case 5: /*DIWBIc*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+#undef gen_dcache_hit_test
+#undef gen_dcache_hit_test4
+#undef gen_dcache_hit_test8
+
+#define gen_icache_hit_test(w, shift) do { \
+ if (gen_window_check1(dc, RRI##w##_S)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ tcg_gen_movi_i32(cpu_pc, dc->pc); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
+ RRI##w##_IMM##w << shift); \
+ gen_helper_itlb_hit_test(cpu_env, addr); \
+ tcg_temp_free(addr); \
+ }\
+ } while (0)
+
+#define gen_icache_hit_test4() gen_icache_hit_test(4, 4)
+#define gen_icache_hit_test8() gen_icache_hit_test(8, 2)
+
+ case 12: /*IPFc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 13: /*ICEc*/
+ switch (OP1) {
+ case 0: /*IPFLl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_icache_hit_test4();
+ }
+ break;
+
+ case 2: /*IHUl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_icache_hit_test4();
+ }
+ break;
+
+ case 3: /*IIUl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 14: /*IHIc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ gen_icache_hit_test8();
+ break;
+
+ case 15: /*IIIc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI8_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+#undef gen_icache_hit_test
+#undef gen_icache_hit_test4
+#undef gen_icache_hit_test8
+
+ case 9: /*L16SI*/
+ gen_load_store(ld16s, 1);
+ break;
+#undef gen_load_store
+
+ case 10: /*MOVI*/
+ if (gen_window_check1(dc, RRI8_T)) {
+ tcg_gen_movi_i32(cpu_R[RRI8_T],
+ RRI8_IMM8 | (RRI8_S << 8) |
+ ((RRI8_S & 0x8) ? 0xfffff000 : 0));
+ }
+ break;
+
+#define gen_load_store_no_hw_align(type) do { \
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \
+ TCGv_i32 addr = tcg_temp_local_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
+ gen_load_store_alignment(dc, 2, addr, true); \
+ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ case 11: /*L32AIy*/
+ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
+ gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
+ break;
+
+ case 12: /*ADDI*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
+ }
+ break;
+
+ case 13: /*ADDMI*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S],
+ RRI8_IMM8_SE << 8);
+ }
+ break;
+
+ case 14: /*S32C1Iy*/
+ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGLabel *label = gen_new_label();
+ TCGv_i32 tmp = tcg_temp_local_new_i32();
+ TCGv_i32 addr = tcg_temp_local_new_i32();
+ TCGv_i32 tpc;
+
+ tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
+ gen_load_store_alignment(dc, 2, addr, true);
+
+ gen_advance_ccount(dc);
+ tpc = tcg_const_i32(dc->pc);
+ gen_helper_check_atomctl(cpu_env, tpc, addr);
+ tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
+ tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
+ cpu_SR[SCOMPARE1], label);
+
+ tcg_gen_qemu_st32(tmp, addr, dc->cring);
+
+ gen_set_label(label);
+ tcg_temp_free(tpc);
+ tcg_temp_free(addr);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 15: /*S32RIy*/
+ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
+ gen_load_store_no_hw_align(st32); /*TODO release?*/
+ break;
+#undef gen_load_store_no_hw_align
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*LSCIp*/
+ switch (RRI8_R) {
+ case 0: /*LSIf*/
+ case 4: /*SSIf*/
+ case 8: /*LSIUf*/
+ case 12: /*SSIUf*/
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ if (gen_window_check1(dc, RRI8_S) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
+ gen_load_store_alignment(dc, 2, addr, false);
+ if (RRI8_R & 0x4) {
+ tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring);
+ } else {
+ tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring);
+ }
+ if (RRI8_R & 0x8) {
+ tcg_gen_mov_i32(cpu_R[RRI8_S], addr);
+ }
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 4: /*MAC16d*/
+ HAS_OPTION(XTENSA_OPTION_MAC16);
+ {
+ enum {
+ MAC16_UMUL = 0x0,
+ MAC16_MUL = 0x4,
+ MAC16_MULA = 0x8,
+ MAC16_MULS = 0xc,
+ MAC16_NONE = 0xf,
+ } op = OP1 & 0xc;
+ bool is_m1_sr = (OP2 & 0x3) == 2;
+ bool is_m2_sr = (OP2 & 0xc) == 0;
+ uint32_t ld_offset = 0;
+
+ if (OP2 > 9) {
+ RESERVED();
+ }
+
+ switch (OP2 & 2) {
+ case 0: /*MACI?/MACC?*/
+ is_m1_sr = true;
+ ld_offset = (OP2 & 1) ? -4 : 4;
+
+ if (OP2 >= 8) { /*MACI/MACC*/
+ if (OP1 == 0) { /*LDINC/LDDEC*/
+ op = MAC16_NONE;
+ } else {
+ RESERVED();
+ }
+ } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
+ RESERVED();
+ }
+ break;
+
+ case 2: /*MACD?/MACA?*/
+ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
+ RESERVED();
+ }
+ break;
+ }
+
+ if (op != MAC16_NONE) {
+ if (!is_m1_sr && !gen_window_check1(dc, RRR_S)) {
+ break;
+ }
+ if (!is_m2_sr && !gen_window_check1(dc, RRR_T)) {
+ break;
+ }
+ }
+
+ if (ld_offset && !gen_window_check1(dc, RRR_S)) {
+ break;
+ }
+
+ {
+ TCGv_i32 vaddr = tcg_temp_new_i32();
+ TCGv_i32 mem32 = tcg_temp_new_i32();
+
+ if (ld_offset) {
+ tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
+ gen_load_store_alignment(dc, 2, vaddr, false);
+ tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
+ }
+ if (op != MAC16_NONE) {
+ TCGv_i32 m1 = gen_mac16_m(
+ is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
+ OP1 & 1, op == MAC16_UMUL);
+ TCGv_i32 m2 = gen_mac16_m(
+ is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
+ OP1 & 2, op == MAC16_UMUL);
+
+ if (op == MAC16_MUL || op == MAC16_UMUL) {
+ tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
+ if (op == MAC16_UMUL) {
+ tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
+ } else {
+ tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
+ }
+ } else {
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+
+ tcg_gen_mul_i32(lo, m1, m2);
+ tcg_gen_sari_i32(hi, lo, 31);
+ if (op == MAC16_MULA) {
+ tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ } else {
+ tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ }
+ tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+ tcg_temp_free(m1);
+ tcg_temp_free(m2);
+ }
+ if (ld_offset) {
+ tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
+ tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
+ }
+ tcg_temp_free(vaddr);
+ tcg_temp_free(mem32);
+ }
+ }
+ break;
+
+ case 5: /*CALLN*/
+ switch (CALL_N) {
+ case 0: /*CALL0*/
+ tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
+ gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
+ break;
+
+ case 1: /*CALL4w*/
+ case 2: /*CALL8w*/
+ case 3: /*CALL12w*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_window_check1(dc, CALL_N << 2)) {
+ gen_callwi(dc, CALL_N,
+ (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
+ }
+ break;
+ }
+ break;
+
+ case 6: /*SI*/
+ switch (CALL_N) {
+ case 0: /*J*/
+ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
+ break;
+
+ case 1: /*BZ*/
+ if (gen_window_check1(dc, BRI12_S)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ, /*BEQZ*/
+ TCG_COND_NE, /*BNEZ*/
+ TCG_COND_LT, /*BLTZ*/
+ TCG_COND_GE, /*BGEZ*/
+ };
+
+ gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
+ 4 + BRI12_IMM12_SE);
+ }
+ break;
+
+ case 2: /*BI0*/
+ if (gen_window_check1(dc, BRI8_S)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ, /*BEQI*/
+ TCG_COND_NE, /*BNEI*/
+ TCG_COND_LT, /*BLTI*/
+ TCG_COND_GE, /*BGEI*/
+ };
+
+ gen_brcondi(dc, cond[BRI8_M & 3],
+ cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
+ }
+ break;
+
+ case 3: /*BI1*/
+ switch (BRI8_M) {
+ case 0: /*ENTRYw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 s = tcg_const_i32(BRI12_S);
+ TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
+ gen_advance_ccount(dc);
+ gen_helper_entry(cpu_env, pc, s, imm);
+ tcg_temp_free(imm);
+ tcg_temp_free(s);
+ tcg_temp_free(pc);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ }
+ break;
+
+ case 1: /*B1*/
+ switch (BRI8_R) {
+ case 0: /*BFp*/
+ case 1: /*BTp*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
+ gen_brcondi(dc,
+ BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
+ tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 8: /*LOOP*/
+ case 9: /*LOOPNEZ*/
+ case 10: /*LOOPGTZ*/
+ HAS_OPTION(XTENSA_OPTION_LOOP);
+ if (gen_window_check1(dc, RRI8_S)) {
+ uint32_t lend = dc->pc + RRI8_IMM8 + 4;
+ TCGv_i32 tmp = tcg_const_i32(lend);
+
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
+ tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
+ gen_helper_wsr_lend(cpu_env, tmp);
+ tcg_temp_free(tmp);
+
+ if (BRI8_R > 8) {
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_i32(
+ BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
+ cpu_R[RRI8_S], 0, label);
+ gen_jumpi(dc, lend, 1);
+ gen_set_label(label);
+ }
+
+ gen_jumpi(dc, dc->next_pc, 0);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 2: /*BLTUI*/
+ case 3: /*BGEUI*/
+ if (gen_window_check1(dc, BRI8_S)) {
+ gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
+ cpu_R[BRI8_S], B4CONSTU[BRI8_R],
+ 4 + BRI8_IMM8_SE);
+ }
+ break;
+ }
+ break;
+
+ }
+ break;
+
+ case 7: /*B*/
+ {
+ TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
+
+ switch (RRI8_R & 7) {
+ case 0: /*BNONE*/ /*BANY*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 1: /*BEQ*/ /*BNE*/
+ case 2: /*BLT*/ /*BGE*/
+ case 3: /*BLTU*/ /*BGEU*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ static const TCGCond cond[] = {
+ [1] = TCG_COND_EQ,
+ [2] = TCG_COND_LT,
+ [3] = TCG_COND_LTU,
+ [9] = TCG_COND_NE,
+ [10] = TCG_COND_GE,
+ [11] = TCG_COND_GEU,
+ };
+ gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
+ 4 + RRI8_IMM8_SE);
+ }
+ break;
+
+ case 4: /*BALL*/ /*BNALL*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
+ gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
+ 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 5: /*BBC*/ /*BBS*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ TCGv_i32 bit = tcg_const_i32(0x80000000);
+#else
+ TCGv_i32 bit = tcg_const_i32(0x00000001);
+#endif
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_shr_i32(bit, bit, tmp);
+#else
+ tcg_gen_shl_i32(bit, bit, tmp);
+#endif
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ tcg_temp_free(bit);
+ }
+ break;
+
+ case 6: /*BBCI*/ /*BBSI*/
+ case 7:
+ if (gen_window_check1(dc, RRI8_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
+#ifdef TARGET_WORDS_BIGENDIAN
+ 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
+#else
+ 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
+#endif
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ }
+ }
+ break;
+
+#define gen_narrow_load_store(type) do { \
+ if (gen_window_check2(dc, RRRN_S, RRRN_T)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
+ gen_load_store_alignment(dc, 2, addr, false); \
+ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ case 8: /*L32I.Nn*/
+ gen_narrow_load_store(ld32u);
+ break;
+
+ case 9: /*S32I.Nn*/
+ gen_narrow_load_store(st32);
+ break;
+#undef gen_narrow_load_store
+
+ case 10: /*ADD.Nn*/
+ if (gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T)) {
+ tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
+ }
+ break;
+
+ case 11: /*ADDI.Nn*/
+ if (gen_window_check2(dc, RRRN_R, RRRN_S)) {
+ tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S],
+ RRRN_T ? RRRN_T : -1);
+ }
+ break;
+
+ case 12: /*ST2n*/
+ if (!gen_window_check1(dc, RRRN_S)) {
+ break;
+ }
+ if (RRRN_T < 8) { /*MOVI.Nn*/
+ tcg_gen_movi_i32(cpu_R[RRRN_S],
+ RRRN_R | (RRRN_T << 4) |
+ ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
+ } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
+ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
+
+ gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
+ 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
+ }
+ break;
+
+ case 13: /*ST3n*/
+ switch (RRRN_R) {
+ case 0: /*MOV.Nn*/
+ if (gen_window_check2(dc, RRRN_S, RRRN_T)) {
+ tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
+ }
+ break;
+
+ case 15: /*S3*/
+ switch (RRRN_T) {
+ case 0: /*RET.Nn*/
+ gen_jump(dc, cpu_R[0]);
+ break;
+
+ case 1: /*RETW.Nn*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_retw(tmp, cpu_env, tmp);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 2: /*BREAK.Nn*/
+ HAS_OPTION(XTENSA_OPTION_DEBUG);
+ if (dc->debug) {
+ gen_debug_exception(dc, DEBUGCAUSE_BN);
+ }
+ break;
+
+ case 3: /*NOP.Nn*/
+ break;
+
+ case 6: /*ILL.Nn*/
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+
+ if (dc->is_jmp == DISAS_NEXT) {
+ gen_check_loop_end(dc, 0);
+ }
+ dc->pc = dc->next_pc;
+
+ return;
+
+invalid_opcode:
+ qemu_log_mask(LOG_GUEST_ERROR, "INVALID(pc = %08x)\n", dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+#undef HAS_OPTION
+}
+
+static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
+{
+ uint8_t b0 = cpu_ldub_code(env, dc->pc);
+ return xtensa_op0_insn_len(OP0);
+}
+
+static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
+{
+ unsigned i;
+
+ for (i = 0; i < dc->config->nibreak; ++i) {
+ if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
+ env->sregs[IBREAKA + i] == dc->pc) {
+ gen_debug_exception(dc, DEBUGCAUSE_IB);
+ break;
+ }
+ }
+}
+
+void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc;
+ int insn_count = 0;
+ int max_insns = tb->cflags & CF_COUNT_MASK;
+ uint32_t pc_start = tb->pc;
+ uint32_t next_page_start =
+ (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ dc.config = env->config;
+ dc.singlestep_enabled = cs->singlestep_enabled;
+ dc.tb = tb;
+ dc.pc = pc_start;
+ dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
+ dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
+ dc.lbeg = env->sregs[LBEG];
+ dc.lend = env->sregs[LEND];
+ dc.is_jmp = DISAS_NEXT;
+ dc.ccount_delta = 0;
+ dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
+ dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
+ dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
+ XTENSA_TBFLAG_CPENABLE_SHIFT;
+ dc.window = ((tb->flags & XTENSA_TBFLAG_WINDOW_MASK) >>
+ XTENSA_TBFLAG_WINDOW_SHIFT);
+
+ init_litbase(&dc);
+ init_sar_tracker(&dc);
+ if (dc.icount) {
+ dc.next_icount = tcg_temp_local_new_i32();
+ }
+
+ gen_tb_start(tb);
+
+ if (tb->flags & XTENSA_TBFLAG_EXCEPTION) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ }
+
+ do {
+ tcg_gen_insn_start(dc.pc);
+ ++insn_count;
+
+ ++dc.ccount_delta;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ dc.is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc.pc += 2;
+ break;
+ }
+
+ if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (dc.icount) {
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
+ tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
+ if (dc.debug) {
+ gen_debug_exception(&dc, DEBUGCAUSE_IC);
+ }
+ gen_set_label(label);
+ }
+
+ if (dc.debug) {
+ gen_ibreak_check(env, &dc);
+ }
+
+ disas_xtensa_insn(env, &dc);
+ if (dc.icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
+ }
+ if (cs->singlestep_enabled) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ break;
+ }
+ } while (dc.is_jmp == DISAS_NEXT &&
+ insn_count < max_insns &&
+ dc.pc < next_page_start &&
+ dc.pc + xtensa_insn_len(env, &dc) <= next_page_start &&
+ !tcg_op_buf_full());
+
+ reset_litbase(&dc);
+ reset_sar_tracker(&dc);
+ if (dc.icount) {
+ tcg_temp_free(dc.next_icount);
+ }
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ if (dc.is_jmp == DISAS_NEXT) {
+ gen_jumpi(&dc, dc.pc, 0);
+ }
+ gen_tb_end(tb, insn_count);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc.pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc.pc - pc_start;
+ tb->icount = insn_count;
+}
+
+void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ int i, j;
+
+ cpu_fprintf(f, "PC=%08x\n\n", env->pc);
+
+ for (i = j = 0; i < 256; ++i) {
+ if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) {
+ cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i],
+ (j++ % 4) == 3 ? '\n' : ' ');
+ }
+ }
+
+ cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
+
+ for (i = j = 0; i < 256; ++i) {
+ if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) {
+ cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i],
+ (j++ % 4) == 3 ? '\n' : ' ');
+ }
+ }
+
+ cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
+
+ for (i = 0; i < 16; ++i) {
+ cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+
+ cpu_fprintf(f, "\n");
+
+ for (i = 0; i < env->config->nareg; ++i) {
+ cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
+ cpu_fprintf(f, "\n");
+
+ for (i = 0; i < 16; ++i) {
+ cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]),
+ *(float *)(env->fregs[i].f32 + FP_F32_LOW),
+ (i % 2) == 1 ? '\n' : ' ');
+ }
+ }
+}
+
+void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
new file mode 100644
index 0000000000..370e365c65
--- /dev/null
+++ b/target/xtensa/xtensa-semi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/log.h"
+
+enum {
+ TARGET_SYS_exit = 1,
+ TARGET_SYS_read = 3,
+ TARGET_SYS_write = 4,
+ TARGET_SYS_open = 5,
+ TARGET_SYS_close = 6,
+ TARGET_SYS_lseek = 19,
+ TARGET_SYS_select_one = 29,
+
+ TARGET_SYS_argc = 1000,
+ TARGET_SYS_argv_sz = 1001,
+ TARGET_SYS_argv = 1002,
+ TARGET_SYS_memset = 1004,
+};
+
+enum {
+ SELECT_ONE_READ = 1,
+ SELECT_ONE_WRITE = 2,
+ SELECT_ONE_EXCEPT = 3,
+};
+
+enum {
+ TARGET_EPERM = 1,
+ TARGET_ENOENT = 2,
+ TARGET_ESRCH = 3,
+ TARGET_EINTR = 4,
+ TARGET_EIO = 5,
+ TARGET_ENXIO = 6,
+ TARGET_E2BIG = 7,
+ TARGET_ENOEXEC = 8,
+ TARGET_EBADF = 9,
+ TARGET_ECHILD = 10,
+ TARGET_EAGAIN = 11,
+ TARGET_ENOMEM = 12,
+ TARGET_EACCES = 13,
+ TARGET_EFAULT = 14,
+ TARGET_ENOTBLK = 15,
+ TARGET_EBUSY = 16,
+ TARGET_EEXIST = 17,
+ TARGET_EXDEV = 18,
+ TARGET_ENODEV = 19,
+ TARGET_ENOTDIR = 20,
+ TARGET_EISDIR = 21,
+ TARGET_EINVAL = 22,
+ TARGET_ENFILE = 23,
+ TARGET_EMFILE = 24,
+ TARGET_ENOTTY = 25,
+ TARGET_ETXTBSY = 26,
+ TARGET_EFBIG = 27,
+ TARGET_ENOSPC = 28,
+ TARGET_ESPIPE = 29,
+ TARGET_EROFS = 30,
+ TARGET_EMLINK = 31,
+ TARGET_EPIPE = 32,
+ TARGET_EDOM = 33,
+ TARGET_ERANGE = 34,
+ TARGET_ENOSYS = 88,
+ TARGET_ELOOP = 92,
+};
+
+static uint32_t errno_h2g(int host_errno)
+{
+ static const uint32_t guest_errno[] = {
+ [EPERM] = TARGET_EPERM,
+ [ENOENT] = TARGET_ENOENT,
+ [ESRCH] = TARGET_ESRCH,
+ [EINTR] = TARGET_EINTR,
+ [EIO] = TARGET_EIO,
+ [ENXIO] = TARGET_ENXIO,
+ [E2BIG] = TARGET_E2BIG,
+ [ENOEXEC] = TARGET_ENOEXEC,
+ [EBADF] = TARGET_EBADF,
+ [ECHILD] = TARGET_ECHILD,
+ [EAGAIN] = TARGET_EAGAIN,
+ [ENOMEM] = TARGET_ENOMEM,
+ [EACCES] = TARGET_EACCES,
+ [EFAULT] = TARGET_EFAULT,
+#ifdef ENOTBLK
+ [ENOTBLK] = TARGET_ENOTBLK,
+#endif
+ [EBUSY] = TARGET_EBUSY,
+ [EEXIST] = TARGET_EEXIST,
+ [EXDEV] = TARGET_EXDEV,
+ [ENODEV] = TARGET_ENODEV,
+ [ENOTDIR] = TARGET_ENOTDIR,
+ [EISDIR] = TARGET_EISDIR,
+ [EINVAL] = TARGET_EINVAL,
+ [ENFILE] = TARGET_ENFILE,
+ [EMFILE] = TARGET_EMFILE,
+ [ENOTTY] = TARGET_ENOTTY,
+#ifdef ETXTBSY
+ [ETXTBSY] = TARGET_ETXTBSY,
+#endif
+ [EFBIG] = TARGET_EFBIG,
+ [ENOSPC] = TARGET_ENOSPC,
+ [ESPIPE] = TARGET_ESPIPE,
+ [EROFS] = TARGET_EROFS,
+ [EMLINK] = TARGET_EMLINK,
+ [EPIPE] = TARGET_EPIPE,
+ [EDOM] = TARGET_EDOM,
+ [ERANGE] = TARGET_ERANGE,
+ [ENOSYS] = TARGET_ENOSYS,
+#ifdef ELOOP
+ [ELOOP] = TARGET_ELOOP,
+#endif
+ };
+
+ if (host_errno == 0) {
+ return 0;
+ } else if (host_errno > 0 && host_errno < ARRAY_SIZE(guest_errno) &&
+ guest_errno[host_errno]) {
+ return guest_errno[host_errno];
+ } else {
+ return TARGET_EINVAL;
+ }
+}
+
+void HELPER(simcall)(CPUXtensaState *env)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ uint32_t *regs = env->regs;
+
+ switch (regs[2]) {
+ case TARGET_SYS_exit:
+ qemu_log("exit(%d) simcall\n", regs[3]);
+ exit(regs[3]);
+ break;
+
+ case TARGET_SYS_read:
+ case TARGET_SYS_write:
+ {
+ bool is_write = regs[2] == TARGET_SYS_write;
+ uint32_t fd = regs[3];
+ uint32_t vaddr = regs[4];
+ uint32_t len = regs[5];
+
+ while (len > 0) {
+ hwaddr paddr = cpu_get_phys_page_debug(cs, vaddr);
+ uint32_t page_left =
+ TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
+ uint32_t io_sz = page_left < len ? page_left : len;
+ hwaddr sz = io_sz;
+ void *buf = cpu_physical_memory_map(paddr, &sz, is_write);
+
+ if (buf) {
+ vaddr += io_sz;
+ len -= io_sz;
+ regs[2] = is_write ?
+ write(fd, buf, io_sz) :
+ read(fd, buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ cpu_physical_memory_unmap(buf, sz, is_write, sz);
+ if (regs[2] == -1) {
+ break;
+ }
+ } else {
+ regs[2] = -1;
+ regs[3] = TARGET_EINVAL;
+ break;
+ }
+ }
+ }
+ break;
+
+ case TARGET_SYS_open:
+ {
+ char name[1024];
+ int rc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(name); ++i) {
+ rc = cpu_memory_rw_debug(cs, regs[3] + i,
+ (uint8_t *)name + i, 1, 0);
+ if (rc != 0 || name[i] == 0) {
+ break;
+ }
+ }
+
+ if (rc == 0 && i < ARRAY_SIZE(name)) {
+ regs[2] = open(name, regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ } else {
+ regs[2] = -1;
+ regs[3] = TARGET_EINVAL;
+ }
+ }
+ break;
+
+ case TARGET_SYS_close:
+ if (regs[3] < 3) {
+ regs[2] = regs[3] = 0;
+ } else {
+ regs[2] = close(regs[3]);
+ regs[3] = errno_h2g(errno);
+ }
+ break;
+
+ case TARGET_SYS_lseek:
+ regs[2] = lseek(regs[3], (off_t)(int32_t)regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ break;
+
+ case TARGET_SYS_select_one:
+ {
+ uint32_t fd = regs[3];
+ uint32_t rq = regs[4];
+ uint32_t target_tv = regs[5];
+ uint32_t target_tvv[2];
+
+ struct timeval tv = {0};
+ fd_set fdset;
+
+ FD_ZERO(&fdset);
+ FD_SET(fd, &fdset);
+
+ if (target_tv) {
+ cpu_memory_rw_debug(cs, target_tv,
+ (uint8_t *)target_tvv, sizeof(target_tvv), 0);
+ tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
+ tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
+ }
+ regs[2] = select(fd + 1,
+ rq == SELECT_ONE_READ ? &fdset : NULL,
+ rq == SELECT_ONE_WRITE ? &fdset : NULL,
+ rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
+ target_tv ? &tv : NULL);
+ regs[3] = errno_h2g(errno);
+ }
+ break;
+
+ case TARGET_SYS_argc:
+ regs[2] = 1;
+ regs[3] = 0;
+ break;
+
+ case TARGET_SYS_argv_sz:
+ regs[2] = 128;
+ regs[3] = 0;
+ break;
+
+ case TARGET_SYS_argv:
+ {
+ struct Argv {
+ uint32_t argptr[2];
+ char text[120];
+ } argv = {
+ {0, 0},
+ "test"
+ };
+
+ argv.argptr[0] = tswap32(regs[3] + offsetof(struct Argv, text));
+ cpu_memory_rw_debug(cs,
+ regs[3], (uint8_t *)&argv, sizeof(argv), 1);
+ }
+ break;
+
+ case TARGET_SYS_memset:
+ {
+ uint32_t base = regs[3];
+ uint32_t sz = regs[5];
+
+ while (sz) {
+ hwaddr len = sz;
+ void *buf = cpu_physical_memory_map(base, &len, 1);
+
+ if (buf && len) {
+ memset(buf, regs[4], len);
+ cpu_physical_memory_unmap(buf, len, 1, len);
+ } else {
+ len = 1;
+ }
+ base += len;
+ sz -= len;
+ }
+ regs[2] = regs[3];
+ regs[3] = 0;
+ }
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s(%d): not implemented\n", __func__, regs[2]);
+ regs[2] = -1;
+ regs[3] = TARGET_ENOSYS;
+ break;
+ }
+}