aboutsummaryrefslogtreecommitdiff
path: root/target-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'target-alpha')
-rw-r--r--target-alpha/cpu.h385
-rw-r--r--target-alpha/exec.h82
-rw-r--r--target-alpha/helper.c454
-rw-r--r--target-alpha/op.c1103
-rw-r--r--target-alpha/op_helper.c1255
-rw-r--r--target-alpha/op_helper.h141
-rw-r--r--target-alpha/op_helper_mem.h40
-rw-r--r--target-alpha/op_mem.h125
-rw-r--r--target-alpha/op_template.h167
-rw-r--r--target-alpha/translate.c2117
10 files changed, 5869 insertions, 0 deletions
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
new file mode 100644
index 0000000000..ca12105f20
--- /dev/null
+++ b/target-alpha/cpu.h
@@ -0,0 +1,385 @@
+/*
+ * Alpha emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined (__CPU_ALPHA_H__)
+#define __CPU_ALPHA_H__
+
+#include "config.h"
+
+#define TARGET_LONG_BITS 64
+
+#include "cpu-defs.h"
+
+
+#include <setjmp.h>
+
+#include "softfloat.h"
+
+/* XXX: put this in a common place */
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define TARGET_HAS_ICE 1
+
+#define ELF_MACHINE EM_ALPHA
+
+#define ICACHE_LINE_SIZE 32
+#define DCACHE_LINE_SIZE 32
+
+#define TARGET_PAGE_BITS 12
+
+#define VA_BITS 43
+
+/* Alpha major type */
+enum {
+ ALPHA_EV3 = 1,
+ ALPHA_EV4 = 2,
+ ALPHA_SIM = 3,
+ ALPHA_LCA = 4,
+ ALPHA_EV5 = 5, /* 21164 */
+ ALPHA_EV45 = 6, /* 21064A */
+ ALPHA_EV56 = 7, /* 21164A */
+};
+
+/* EV4 minor type */
+enum {
+ ALPHA_EV4_2 = 0,
+ ALPHA_EV4_3 = 1,
+};
+
+/* LCA minor type */
+enum {
+ ALPHA_LCA_1 = 1, /* 21066 */
+ ALPHA_LCA_2 = 2, /* 20166 */
+ ALPHA_LCA_3 = 3, /* 21068 */
+ ALPHA_LCA_4 = 4, /* 21068 */
+ ALPHA_LCA_5 = 5, /* 21066A */
+ ALPHA_LCA_6 = 6, /* 21068A */
+};
+
+/* EV5 minor type */
+enum {
+ ALPHA_EV5_1 = 1, /* Rev BA, CA */
+ ALPHA_EV5_2 = 2, /* Rev DA, EA */
+ ALPHA_EV5_3 = 3, /* Pass 3 */
+ ALPHA_EV5_4 = 4, /* Pass 3.2 */
+ ALPHA_EV5_5 = 5, /* Pass 4 */
+};
+
+/* EV45 minor type */
+enum {
+ ALPHA_EV45_1 = 1, /* Pass 1 */
+ ALPHA_EV45_2 = 2, /* Pass 1.1 */
+ ALPHA_EV45_3 = 3, /* Pass 2 */
+};
+
+/* EV56 minor type */
+enum {
+ ALPHA_EV56_1 = 1, /* Pass 1 */
+ ALPHA_EV56_2 = 2, /* Pass 2 */
+};
+
+enum {
+ IMPLVER_2106x = 0, /* EV4, EV45 & LCA45 */
+ IMPLVER_21164 = 1, /* EV5, EV56 & PCA45 */
+ IMPLVER_21264 = 2, /* EV6, EV67 & EV68x */
+ IMPLVER_21364 = 3, /* EV7 & EV79 */
+};
+
+enum {
+ AMASK_BWX = 0x00000001,
+ AMASK_FIX = 0x00000002,
+ AMASK_CIX = 0x00000004,
+ AMASK_MVI = 0x00000100,
+ AMASK_TRAP = 0x00000200,
+ AMASK_PREFETCH = 0x00001000,
+};
+
+enum {
+ VAX_ROUND_NORMAL = 0,
+ VAX_ROUND_CHOPPED,
+};
+
+enum {
+ IEEE_ROUND_NORMAL = 0,
+ IEEE_ROUND_DYNAMIC,
+ IEEE_ROUND_PLUS,
+ IEEE_ROUND_MINUS,
+ IEEE_ROUND_CHOPPED,
+};
+
+/* IEEE floating-point operations encoding */
+/* Trap mode */
+enum {
+ FP_TRAP_I = 0x0,
+ FP_TRAP_U = 0x1,
+ FP_TRAP_S = 0x4,
+ FP_TRAP_SU = 0x5,
+ FP_TRAP_SUI = 0x7,
+};
+
+/* Rounding mode */
+enum {
+ FP_ROUND_CHOPPED = 0x0,
+ FP_ROUND_MINUS = 0x1,
+ FP_ROUND_NORMAL = 0x2,
+ FP_ROUND_DYNAMIC = 0x3,
+};
+
+/* Internal processor registers */
+/* XXX: TOFIX: most of those registers are implementation dependant */
+enum {
+ /* Ebox IPRs */
+ IPR_CC = 0xC0,
+ IPR_CC_CTL = 0xC1,
+ IPR_VA = 0xC2,
+ IPR_VA_CTL = 0xC4,
+ IPR_VA_FORM = 0xC3,
+ /* Ibox IPRs */
+ IPR_ITB_TAG = 0x00,
+ IPR_ITB_PTE = 0x01,
+ IPT_ITB_IAP = 0x02,
+ IPT_ITB_IA = 0x03,
+ IPT_ITB_IS = 0x04,
+ IPR_PMPC = 0x05,
+ IPR_EXC_ADDR = 0x06,
+ IPR_IVA_FORM = 0x07,
+ IPR_CM = 0x09,
+ IPR_IER = 0x0A,
+ IPR_SIRR = 0x0C,
+ IPR_ISUM = 0x0D,
+ IPR_HW_INT_CLR = 0x0E,
+ IPR_EXC_SUM = 0x0F,
+ IPR_PAL_BASE = 0x10,
+ IPR_I_CTL = 0x11,
+ IPR_I_STAT = 0x16,
+ IPR_IC_FLUSH = 0x13,
+ IPR_IC_FLUSH_ASM = 0x12,
+ IPR_CLR_MAP = 0x15,
+ IPR_SLEEP = 0x17,
+ IPR_PCTX = 0x40,
+ IPR_PCTR_CTL = 0x14,
+ /* Mbox IPRs */
+ IPR_DTB_TAG0 = 0x20,
+ IPR_DTB_TAG1 = 0xA0,
+ IPR_DTB_PTE0 = 0x21,
+ IPR_DTB_PTE1 = 0xA1,
+ IPR_DTB_ALTMODE = 0xA6,
+ IPR_DTB_IAP = 0xA2,
+ IPR_DTB_IA = 0xA3,
+ IPR_DTB_IS0 = 0x24,
+ IPR_DTB_IS1 = 0xA4,
+ IPR_DTB_ASN0 = 0x25,
+ IPR_DTB_ASN1 = 0xA5,
+ IPR_MM_STAT = 0x27,
+ IPR_M_CTL = 0x28,
+ IPR_DC_CTL = 0x29,
+ IPR_DC_STAT = 0x2A,
+ /* Cbox IPRs */
+ IPR_C_DATA = 0x2B,
+ IPR_C_SHIFT = 0x2C,
+
+ IPR_ASN,
+ IPR_ASTEN,
+ IPR_ASTSR,
+ IPR_DATFX,
+ IPR_ESP,
+ IPR_FEN,
+ IPR_IPIR,
+ IPR_IPL,
+ IPR_KSP,
+ IPR_MCES,
+ IPR_PERFMON,
+ IPR_PCBB,
+ IPR_PRBR,
+ IPR_PTBR,
+ IPR_SCBB,
+ IPR_SISR,
+ IPR_SSP,
+ IPR_SYSPTBR,
+ IPR_TBCHK,
+ IPR_TBIA,
+ IPR_TBIAP,
+ IPR_TBIS,
+ IPR_TBISD,
+ IPR_TBISI,
+ IPR_USP,
+ IPR_VIRBND,
+ IPR_VPTB,
+ IPR_WHAMI,
+ IPR_ALT_MODE,
+ IPR_LAST,
+};
+
+typedef struct CPUAlphaState CPUAlphaState;
+
+typedef struct pal_handler_t pal_handler_t;
+struct pal_handler_t {
+ /* Reset */
+ void (*reset)(CPUAlphaState *env);
+ /* Uncorrectable hardware error */
+ void (*machine_check)(CPUAlphaState *env);
+ /* Arithmetic exception */
+ void (*arithmetic)(CPUAlphaState *env);
+ /* Interrupt / correctable hardware error */
+ void (*interrupt)(CPUAlphaState *env);
+ /* Data fault */
+ void (*dfault)(CPUAlphaState *env);
+ /* DTB miss pal */
+ void (*dtb_miss_pal)(CPUAlphaState *env);
+ /* DTB miss native */
+ void (*dtb_miss_native)(CPUAlphaState *env);
+ /* Unaligned access */
+ void (*unalign)(CPUAlphaState *env);
+ /* ITB miss */
+ void (*itb_miss)(CPUAlphaState *env);
+ /* Instruction stream access violation */
+ void (*itb_acv)(CPUAlphaState *env);
+ /* Reserved or privileged opcode */
+ void (*opcdec)(CPUAlphaState *env);
+ /* Floating point exception */
+ void (*fen)(CPUAlphaState *env);
+ /* Call pal instruction */
+ void (*call_pal)(CPUAlphaState *env, uint32_t palcode);
+};
+
+struct CPUAlphaState {
+ uint64_t ir[31];
+ float64 fir[31];
+ float_status fp_status;
+ uint64_t fpcr;
+ uint64_t pc;
+ uint64_t lock;
+ uint32_t pcc[2];
+ uint64_t ipr[IPR_LAST];
+ uint64_t ps;
+ uint64_t unique;
+ int saved_mode; /* Used for HW_LD / HW_ST */
+
+ /* */
+ double ft0, ft1, ft2;
+
+ /* Those resources are used only in Qemu core */
+ CPU_COMMON
+
+ jmp_buf jmp_env;
+ int user_mode_only; /* user mode only simulation */
+ uint32_t hflags;
+ int halted;
+
+ int exception_index;
+ int error_code;
+ int interrupt_request;
+
+ uint32_t features;
+ uint32_t amask;
+ int implver;
+ pal_handler_t *pal_handler;
+};
+
+#include "cpu-all.h"
+
+enum {
+ FEATURE_ASN = 0x00000001,
+ FEATURE_SPS = 0x00000002,
+ FEATURE_VIRBND = 0x00000004,
+ FEATURE_TBCHK = 0x00000008,
+};
+
+enum {
+ EXCP_RESET = 0x0000,
+ EXCP_MCHK = 0x0020,
+ EXCP_ARITH = 0x0060,
+ EXCP_HW_INTERRUPT = 0x00E0,
+ EXCP_DFAULT = 0x01E0,
+ EXCP_DTB_MISS_PAL = 0x09E0,
+ EXCP_ITB_MISS = 0x03E0,
+ EXCP_ITB_ACV = 0x07E0,
+ EXCP_DTB_MISS_NATIVE = 0x08E0,
+ EXCP_UNALIGN = 0x11E0,
+ EXCP_OPCDEC = 0x13E0,
+ EXCP_FEN = 0x17E0,
+ EXCP_CALL_PAL = 0x2000,
+ EXCP_CALL_PALP = 0x3000,
+ EXCP_CALL_PALE = 0x4000,
+ /* Pseudo exception for console */
+ EXCP_CONSOLE_DISPATCH = 0x4001,
+ EXCP_CONSOLE_FIXUP = 0x4002,
+};
+
+/* Arithmetic exception */
+enum {
+ EXCP_ARITH_OVERFLOW,
+};
+
+enum {
+ PALCODE_CALL = 0x00000000,
+ PALCODE_LD = 0x01000000,
+ PALCODE_ST = 0x02000000,
+ PALCODE_MFPR = 0x03000000,
+ PALCODE_MTPR = 0x04000000,
+ PALCODE_REI = 0x05000000,
+ PALCODE_INIT = 0xF0000000,
+};
+
+enum {
+ IR_V0 = 0,
+ IR_T0 = 1,
+ IR_T1 = 2,
+ IR_T2 = 3,
+ IR_T3 = 4,
+ IR_T4 = 5,
+ IR_T5 = 6,
+ IR_T6 = 7,
+ IR_T7 = 8,
+ IR_S0 = 9,
+ IR_S1 = 10,
+ IR_S2 = 11,
+ IR_S3 = 12,
+ IR_S4 = 13,
+ IR_S5 = 14,
+ IR_S6 = 15,
+#define IR_FP IR_S6
+ IR_A0 = 16,
+ IR_A1 = 17,
+ IR_A2 = 18,
+ IR_A3 = 19,
+ IR_A4 = 20,
+ IR_A5 = 21,
+ IR_T8 = 22,
+ IR_T9 = 23,
+ IR_T10 = 24,
+ IR_T11 = 25,
+ IR_RA = 26,
+ IR_T12 = 27,
+#define IR_PV IR_T12
+ IR_AT = 28,
+ IR_GP = 29,
+ IR_SP = 30,
+ IR_ZERO = 31,
+};
+
+int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
+int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
+void cpu_loop_exit (void);
+void pal_init (CPUState *env);
+void call_pal (CPUState *env, int palcode);
+
+#endif /* !defined (__CPU_ALPHA_H__) */
diff --git a/target-alpha/exec.h b/target-alpha/exec.h
new file mode 100644
index 0000000000..f109160a51
--- /dev/null
+++ b/target-alpha/exec.h
@@ -0,0 +1,82 @@
+/*
+ * Alpha emulation cpu run-time definitions for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined (__ALPHA_EXEC_H__)
+#define __ALPHA_EXEC_H__
+
+#include "config.h"
+
+#include "dyngen-exec.h"
+
+#define TARGET_LONG_BITS 64
+
+register struct CPUAlphaState *env asm(AREG0);
+
+#if TARGET_LONG_BITS > HOST_LONG_BITS
+
+/* no registers can be used */
+#define T0 (env->t0)
+#define T1 (env->t1)
+#define T2 (env->t2)
+
+#else
+
+register uint64_t T0 asm(AREG1);
+register uint64_t T1 asm(AREG2);
+register uint64_t T2 asm(AREG3);
+
+#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
+
+#define PARAM(n) ((uint64_t)PARAM##n)
+#define SPARAM(n) ((int32_t)PARAM##n)
+#define FT0 (env->ft0)
+#define FT1 (env->ft1)
+#define FT2 (env->ft2)
+#define FP_STATUS (env->fp_status)
+
+#if defined (DEBUG_OP)
+#define RETURN() __asm__ __volatile__("nop" : : : "memory");
+#else
+#define RETURN() __asm__ __volatile__("" : : : "memory");
+#endif
+
+#include "cpu.h"
+#include "exec-all.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+static inline void env_to_regs(void)
+{
+}
+
+static inline void regs_to_env(void)
+{
+}
+
+int cpu_alpha_handle_mmu_fault (CPUState *env, uint64_t address, int rw,
+ int is_user, int is_softmmu);
+int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
+int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
+
+void do_interrupt (CPUState *env);
+
+#endif /* !defined (__ALPHA_EXEC_H__) */
diff --git a/target-alpha/helper.c b/target-alpha/helper.c
new file mode 100644
index 0000000000..0049c397ae
--- /dev/null
+++ b/target-alpha/helper.c
@@ -0,0 +1,454 @@
+/*
+ * Alpha emulation cpu helpers for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+
+#if defined(CONFIG_USER_ONLY)
+
+int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int is_user, int is_softmmu)
+{
+ if (rw == 2)
+ env->exception_index = EXCP_ITB_MISS;
+ else
+ env->exception_index = EXCP_DFAULT;
+ env->ipr[IPR_EXC_ADDR] = address;
+
+ return 1;
+}
+
+target_ulong cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
+{
+ return addr;
+}
+
+void do_interrupt (CPUState *env)
+{
+ env->exception_index = -1;
+}
+
+#else
+
+target_ulong cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
+{
+ return -1;
+}
+
+int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int is_user, int is_softmmu)
+{
+ uint32_t opc;
+
+ if (rw == 2) {
+ /* Instruction translation buffer miss */
+ env->exception_index = EXCP_ITB_MISS;
+ } else {
+ if (env->ipr[IPR_EXC_ADDR] & 1)
+ env->exception_index = EXCP_DTB_MISS_PAL;
+ else
+ env->exception_index = EXCP_DTB_MISS_NATIVE;
+ opc = (ldl_code(env->pc) >> 21) << 4;
+ if (rw) {
+ opc |= 0x9;
+ } else {
+ opc |= 0x4;
+ }
+ env->ipr[IPR_MM_STAT] = opc;
+ }
+
+ return 1;
+}
+
+int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp)
+{
+ uint64_t hwpcb;
+ int ret = 0;
+
+ hwpcb = env->ipr[IPR_PCBB];
+ switch (iprn) {
+ case IPR_ASN:
+ if (env->features & FEATURE_ASN)
+ *valp = env->ipr[IPR_ASN];
+ else
+ *valp = 0;
+ break;
+ case IPR_ASTEN:
+ *valp = ((int64_t)(env->ipr[IPR_ASTEN] << 60)) >> 60;
+ break;
+ case IPR_ASTSR:
+ *valp = ((int64_t)(env->ipr[IPR_ASTSR] << 60)) >> 60;
+ break;
+ case IPR_DATFX:
+ /* Write only */
+ ret = -1;
+ break;
+ case IPR_ESP:
+ if (env->features & FEATURE_SPS)
+ *valp = env->ipr[IPR_ESP];
+ else
+ *valp = ldq_raw(hwpcb + 8);
+ break;
+ case IPR_FEN:
+ *valp = ((int64_t)(env->ipr[IPR_FEN] << 63)) >> 63;
+ break;
+ case IPR_IPIR:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_IPL:
+ *valp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
+ break;
+ case IPR_KSP:
+ if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
+ ret = -1;
+ } else {
+ if (env->features & FEATURE_SPS)
+ *valp = env->ipr[IPR_KSP];
+ else
+ *valp = ldq_raw(hwpcb + 0);
+ }
+ break;
+ case IPR_MCES:
+ *valp = ((int64_t)(env->ipr[IPR_MCES] << 59)) >> 59;
+ break;
+ case IPR_PERFMON:
+ /* Implementation specific */
+ *valp = 0;
+ break;
+ case IPR_PCBB:
+ *valp = ((int64_t)env->ipr[IPR_PCBB] << 16) >> 16;
+ break;
+ case IPR_PRBR:
+ *valp = env->ipr[IPR_PRBR];
+ break;
+ case IPR_PTBR:
+ *valp = env->ipr[IPR_PTBR];
+ break;
+ case IPR_SCBB:
+ *valp = (int64_t)((int32_t)env->ipr[IPR_SCBB]);
+ break;
+ case IPR_SIRR:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_SISR:
+ *valp = (int64_t)((int16_t)env->ipr[IPR_SISR]);
+ case IPR_SSP:
+ if (env->features & FEATURE_SPS)
+ *valp = env->ipr[IPR_SSP];
+ else
+ *valp = ldq_raw(hwpcb + 16);
+ break;
+ case IPR_SYSPTBR:
+ if (env->features & FEATURE_VIRBND)
+ *valp = env->ipr[IPR_SYSPTBR];
+ else
+ ret = -1;
+ break;
+ case IPR_TBCHK:
+ if ((env->features & FEATURE_TBCHK)) {
+ /* XXX: TODO */
+ *valp = 0;
+ ret = -1;
+ } else {
+ ret = -1;
+ }
+ break;
+ case IPR_TBIA:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_TBIAP:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_TBIS:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_TBISD:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_TBISI:
+ /* Write-only */
+ ret = -1;
+ break;
+ case IPR_USP:
+ if (env->features & FEATURE_SPS)
+ *valp = env->ipr[IPR_USP];
+ else
+ *valp = ldq_raw(hwpcb + 24);
+ break;
+ case IPR_VIRBND:
+ if (env->features & FEATURE_VIRBND)
+ *valp = env->ipr[IPR_VIRBND];
+ else
+ ret = -1;
+ break;
+ case IPR_VPTB:
+ *valp = env->ipr[IPR_VPTB];
+ break;
+ case IPR_WHAMI:
+ *valp = env->ipr[IPR_WHAMI];
+ break;
+ default:
+ /* Invalid */
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp)
+{
+ uint64_t hwpcb, tmp64;
+ uint8_t tmp8;
+ int ret = 0;
+
+ hwpcb = env->ipr[IPR_PCBB];
+ switch (iprn) {
+ case IPR_ASN:
+ /* Read-only */
+ ret = -1;
+ break;
+ case IPR_ASTEN:
+ tmp8 = ((int8_t)(env->ipr[IPR_ASTEN] << 4)) >> 4;
+ *oldvalp = tmp8;
+ tmp8 &= val & 0xF;
+ tmp8 |= (val >> 4) & 0xF;
+ env->ipr[IPR_ASTEN] &= ~0xF;
+ env->ipr[IPR_ASTEN] |= tmp8;
+ ret = 1;
+ break;
+ case IPR_ASTSR:
+ tmp8 = ((int8_t)(env->ipr[IPR_ASTSR] << 4)) >> 4;
+ *oldvalp = tmp8;
+ tmp8 &= val & 0xF;
+ tmp8 |= (val >> 4) & 0xF;
+ env->ipr[IPR_ASTSR] &= ~0xF;
+ env->ipr[IPR_ASTSR] |= tmp8;
+ ret = 1;
+ case IPR_DATFX:
+ env->ipr[IPR_DATFX] &= ~0x1;
+ env->ipr[IPR_DATFX] |= val & 1;
+ tmp64 = ldq_raw(hwpcb + 56);
+ tmp64 &= ~0x8000000000000000ULL;
+ tmp64 |= (val & 1) << 63;
+ stq_raw(hwpcb + 56, tmp64);
+ break;
+ case IPR_ESP:
+ if (env->features & FEATURE_SPS)
+ env->ipr[IPR_ESP] = val;
+ else
+ stq_raw(hwpcb + 8, val);
+ break;
+ case IPR_FEN:
+ env->ipr[IPR_FEN] = val & 1;
+ tmp64 = ldq_raw(hwpcb + 56);
+ tmp64 &= ~1;
+ tmp64 |= val & 1;
+ stq_raw(hwpcb + 56, tmp64);
+ break;
+ case IPR_IPIR:
+ /* XXX: TODO: Send IRQ to CPU #ir[16] */
+ break;
+ case IPR_IPL:
+ *oldvalp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
+ env->ipr[IPR_IPL] &= ~0x1F;
+ env->ipr[IPR_IPL] |= val & 0x1F;
+ /* XXX: may issue an interrupt or ASR _now_ */
+ ret = 1;
+ break;
+ case IPR_KSP:
+ if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
+ ret = -1;
+ } else {
+ if (env->features & FEATURE_SPS)
+ env->ipr[IPR_KSP] = val;
+ else
+ stq_raw(hwpcb + 0, val);
+ }
+ break;
+ case IPR_MCES:
+ env->ipr[IPR_MCES] &= ~((val & 0x7) | 0x18);
+ env->ipr[IPR_MCES] |= val & 0x18;
+ break;
+ case IPR_PERFMON:
+ /* Implementation specific */
+ *oldvalp = 0;
+ ret = 1;
+ break;
+ case IPR_PCBB:
+ /* Read-only */
+ ret = -1;
+ break;
+ case IPR_PRBR:
+ env->ipr[IPR_PRBR] = val;
+ break;
+ case IPR_PTBR:
+ /* Read-only */
+ ret = -1;
+ break;
+ case IPR_SCBB:
+ env->ipr[IPR_SCBB] = (uint32_t)val;
+ break;
+ case IPR_SIRR:
+ if (val & 0xF) {
+ env->ipr[IPR_SISR] |= 1 << (val & 0xF);
+ /* XXX: request a software interrupt _now_ */
+ }
+ break;
+ case IPR_SISR:
+ /* Read-only */
+ ret = -1;
+ break;
+ case IPR_SSP:
+ if (env->features & FEATURE_SPS)
+ env->ipr[IPR_SSP] = val;
+ else
+ stq_raw(hwpcb + 16, val);
+ break;
+ case IPR_SYSPTBR:
+ if (env->features & FEATURE_VIRBND)
+ env->ipr[IPR_SYSPTBR] = val;
+ else
+ ret = -1;
+ case IPR_TBCHK:
+ /* Read-only */
+ ret = -1;
+ break;
+ case IPR_TBIA:
+ tlb_flush(env, 1);
+ break;
+ case IPR_TBIAP:
+ tlb_flush(env, 1);
+ break;
+ case IPR_TBIS:
+ tlb_flush_page(env, val);
+ break;
+ case IPR_TBISD:
+ tlb_flush_page(env, val);
+ break;
+ case IPR_TBISI:
+ tlb_flush_page(env, val);
+ break;
+ case IPR_USP:
+ if (env->features & FEATURE_SPS)
+ env->ipr[IPR_USP] = val;
+ else
+ stq_raw(hwpcb + 24, val);
+ break;
+ case IPR_VIRBND:
+ if (env->features & FEATURE_VIRBND)
+ env->ipr[IPR_VIRBND] = val;
+ else
+ ret = -1;
+ break;
+ case IPR_VPTB:
+ env->ipr[IPR_VPTB] = val;
+ break;
+ case IPR_WHAMI:
+ /* Read-only */
+ ret = -1;
+ break;
+ default:
+ /* Invalid */
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+void do_interrupt (CPUState *env)
+{
+ int excp;
+
+ env->ipr[IPR_EXC_ADDR] = env->pc | 1;
+ excp = env->exception_index;
+ env->exception_index = 0;
+ env->error_code = 0;
+ /* XXX: disable interrupts and memory mapping */
+ if (env->ipr[IPR_PAL_BASE] != -1ULL) {
+ /* We use native PALcode */
+ env->pc = env->ipr[IPR_PAL_BASE] + excp;
+ } else {
+ /* We use emulated PALcode */
+ call_pal(env);
+ /* Emulate REI */
+ env->pc = env->ipr[IPR_EXC_ADDR] & ~7;
+ env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
+ /* XXX: re-enable interrupts and memory mapping */
+ }
+}
+#endif
+
+void cpu_dump_state (CPUState *env, FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ int flags)
+{
+ static unsigned char *linux_reg_names[] = {
+ "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
+ "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
+ "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
+ "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
+ };
+ int i;
+
+ cpu_fprintf(f, " PC " TARGET_FMT_lx " PS " TARGET_FMT_lx "\n",
+ env->pc, env->ps);
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
+ linux_reg_names[i], env->ir[i]);
+ if ((i % 3) == 2)
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "\n");
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
+ *((uint64_t *)(&env->fir[i])));
+ if ((i % 3) == 2)
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "FT " TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx,
+ *((uint64_t *)(&env->ft0)), *((uint64_t *)(&env->ft1)),
+ *((uint64_t *)(&env->ft2)));
+ cpu_fprintf(f, "\nMEM " TARGET_FMT_lx " %d %d\n",
+ ldq_raw(0x000000004007df60ULL),
+ (uint8_t *)(&env->ft0), (uint8_t *)(&env->fir[0]));
+}
+
+void cpu_dump_EA (target_ulong EA)
+{
+ FILE *f;
+
+ if (logfile)
+ f = logfile;
+ else
+ f = stdout;
+ fprintf(f, "Memory access at address " TARGET_FMT_lx "\n", EA);
+}
diff --git a/target-alpha/op.c b/target-alpha/op.c
new file mode 100644
index 0000000000..8a22c5c087
--- /dev/null
+++ b/target-alpha/op.c
@@ -0,0 +1,1103 @@
+/*
+ * Alpha emulation cpu micro-operations for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DEBUG_OP
+
+#include "config.h"
+#include "exec.h"
+
+#include "op_helper.h"
+
+#define REG 0
+#include "op_template.h"
+
+#define REG 1
+#include "op_template.h"
+
+#define REG 2
+#include "op_template.h"
+
+#define REG 3
+#include "op_template.h"
+
+#define REG 4
+#include "op_template.h"
+
+#define REG 5
+#include "op_template.h"
+
+#define REG 6
+#include "op_template.h"
+
+#define REG 7
+#include "op_template.h"
+
+#define REG 8
+#include "op_template.h"
+
+#define REG 9
+#include "op_template.h"
+
+#define REG 10
+#include "op_template.h"
+
+#define REG 11
+#include "op_template.h"
+
+#define REG 12
+#include "op_template.h"
+
+#define REG 13
+#include "op_template.h"
+
+#define REG 14
+#include "op_template.h"
+
+#define REG 15
+#include "op_template.h"
+
+#define REG 16
+#include "op_template.h"
+
+#define REG 17
+#include "op_template.h"
+
+#define REG 18
+#include "op_template.h"
+
+#define REG 19
+#include "op_template.h"
+
+#define REG 20
+#include "op_template.h"
+
+#define REG 21
+#include "op_template.h"
+
+#define REG 22
+#include "op_template.h"
+
+#define REG 23
+#include "op_template.h"
+
+#define REG 24
+#include "op_template.h"
+
+#define REG 25
+#include "op_template.h"
+
+#define REG 26
+#include "op_template.h"
+
+#define REG 27
+#include "op_template.h"
+
+#define REG 28
+#include "op_template.h"
+
+#define REG 29
+#include "op_template.h"
+
+#define REG 30
+#include "op_template.h"
+
+#define REG 31
+#include "op_template.h"
+
+/* Debug stuff */
+void OPPROTO op_no_op (void)
+{
+#if !defined (DEBUG_OP)
+ __asm__ __volatile__("nop" : : : "memory");
+#endif
+ RETURN();
+}
+
+void OPPROTO op_tb_flush (void)
+{
+ helper_tb_flush();
+ RETURN();
+}
+
+/* Load and stores */
+#define MEMSUFFIX _raw
+#include "op_mem.h"
+#if !defined(CONFIG_USER_ONLY)
+#define MEMSUFFIX _user
+#include "op_mem.h"
+#define MEMSUFFIX _kernel
+#include "op_mem.h"
+/* Those are used for supervisor, executive and pal modes */
+#define MEMSUFFIX _data
+#include "op_mem.h"
+#endif
+
+/* Special operation for load and store */
+void OPPROTO op_n7 (void)
+{
+ T0 &= ~(uint64_t)0x7;
+ RETURN();
+}
+
+/* Misc */
+void OPPROTO op_excp (void)
+{
+ helper_excp(PARAM(1), PARAM(2));
+ RETURN();
+}
+
+void OPPROTO op_load_amask (void)
+{
+ helper_amask();
+ RETURN();
+}
+
+void OPPROTO op_load_pcc (void)
+{
+ helper_load_pcc();
+ RETURN();
+}
+
+void OPPROTO op_load_implver (void)
+{
+ helper_load_implver();
+ RETURN();
+}
+
+void OPPROTO op_load_fpcr (void)
+{
+ helper_load_fpcr();
+ RETURN();
+}
+
+void OPPROTO op_store_fpcr (void)
+{
+ helper_store_fpcr();
+ RETURN();
+}
+
+void OPPROTO op_load_irf (void)
+{
+ helper_load_irf();
+ RETURN();
+}
+
+void OPPROTO op_set_irf (void)
+{
+ helper_set_irf();
+ RETURN();
+}
+
+void OPPROTO op_clear_irf (void)
+{
+ helper_clear_irf();
+ RETURN();
+}
+
+void OPPROTO op_exit_tb (void)
+{
+ EXIT_TB();
+}
+
+/* Arithmetic */
+void OPPROTO op_addq (void)
+{
+ T0 += T1;
+ RETURN();
+}
+
+void OPPROTO op_addqv (void)
+{
+ helper_addqv();
+ RETURN();
+}
+
+void OPPROTO op_addl (void)
+{
+ T0 = (int64_t)((int32_t)(T0 + T1));
+ RETURN();
+}
+
+void OPPROTO op_addlv (void)
+{
+ helper_addlv();
+ RETURN();
+}
+
+void OPPROTO op_subq (void)
+{
+ T0 -= T1;
+ RETURN();
+}
+
+void OPPROTO op_subqv (void)
+{
+ helper_subqv();
+ RETURN();
+}
+
+void OPPROTO op_subl (void)
+{
+ T0 = (int64_t)((int32_t)(T0 - T1));
+ RETURN();
+}
+
+void OPPROTO op_sublv (void)
+{
+ helper_sublv();
+ RETURN();
+}
+
+void OPPROTO op_s4 (void)
+{
+ T0 <<= 2;
+ RETURN();
+}
+
+void OPPROTO op_s8 (void)
+{
+ T0 <<= 3;
+ RETURN();
+}
+
+void OPPROTO op_mull (void)
+{
+ T0 = (int64_t)((int32_t)T0 * (int32_t)T1);
+ RETURN();
+}
+
+void OPPROTO op_mullv (void)
+{
+ helper_mullv();
+ RETURN();
+}
+
+void OPPROTO op_mulq (void)
+{
+ T0 *= T1;
+ RETURN();
+}
+
+void OPPROTO op_mulqv (void)
+{
+ helper_mulqv();
+ RETURN();
+}
+
+void OPPROTO op_umulh (void)
+{
+ helper_umulh();
+ RETURN();
+}
+
+/* Logical */
+void OPPROTO op_and (void)
+{
+ T0 &= T1;
+ RETURN();
+}
+
+void OPPROTO op_bic (void)
+{
+ T0 &= ~T1;
+ RETURN();
+}
+
+void OPPROTO op_bis (void)
+{
+ T0 |= T1;
+ RETURN();
+}
+
+void OPPROTO op_eqv (void)
+{
+ T0 ^= ~T1;
+ RETURN();
+}
+
+void OPPROTO op_ornot (void)
+{
+ T0 |= ~T1;
+ RETURN();
+}
+
+void OPPROTO op_xor (void)
+{
+ T0 ^= T1;
+ RETURN();
+}
+
+void OPPROTO op_sll (void)
+{
+ T0 <<= T1;
+ RETURN();
+}
+
+void OPPROTO op_srl (void)
+{
+ T0 >>= T1;
+ RETURN();
+}
+
+void OPPROTO op_sra (void)
+{
+ T0 = (int64_t)T0 >> T1;
+ RETURN();
+}
+
+void OPPROTO op_sextb (void)
+{
+ T0 = (int64_t)((int8_t)T0);
+ RETURN();
+}
+
+void OPPROTO op_sextw (void)
+{
+ T0 = (int64_t)((int16_t)T0);
+ RETURN();
+
+}
+
+void OPPROTO op_ctpop (void)
+{
+ helper_ctpop();
+ RETURN();
+}
+
+void OPPROTO op_ctlz (void)
+{
+ helper_ctlz();
+ RETURN();
+}
+
+void OPPROTO op_cttz (void)
+{
+ helper_cttz();
+ RETURN();
+}
+
+void OPPROTO op_mskbl (void)
+{
+ helper_mskbl();
+ RETURN();
+}
+
+void OPPROTO op_extbl (void)
+{
+ helper_extbl();
+ RETURN();
+}
+
+void OPPROTO op_insbl (void)
+{
+ helper_insbl();
+ RETURN();
+}
+
+void OPPROTO op_mskwl (void)
+{
+ helper_mskwl();
+ RETURN();
+}
+
+void OPPROTO op_extwl (void)
+{
+ helper_extwl();
+ RETURN();
+}
+
+void OPPROTO op_inswl (void)
+{
+ helper_inswl();
+ RETURN();
+}
+
+void OPPROTO op_mskll (void)
+{
+ helper_mskll();
+ RETURN();
+}
+
+void OPPROTO op_extll (void)
+{
+ helper_extll();
+ RETURN();
+}
+
+void OPPROTO op_insll (void)
+{
+ helper_insll();
+ RETURN();
+}
+
+void OPPROTO op_zap (void)
+{
+ helper_zap();
+ RETURN();
+}
+
+void OPPROTO op_zapnot (void)
+{
+ helper_zapnot();
+ RETURN();
+}
+
+void OPPROTO op_mskql (void)
+{
+ helper_mskql();
+ RETURN();
+}
+
+void OPPROTO op_extql (void)
+{
+ helper_extql();
+ RETURN();
+}
+
+void OPPROTO op_insql (void)
+{
+ helper_insql();
+ RETURN();
+}
+
+void OPPROTO op_mskwh (void)
+{
+ helper_mskwh();
+ RETURN();
+}
+
+void OPPROTO op_inswh (void)
+{
+ helper_inswh();
+ RETURN();
+}
+
+void OPPROTO op_extwh (void)
+{
+ helper_extwh();
+ RETURN();
+}
+
+void OPPROTO op_msklh (void)
+{
+ helper_msklh();
+ RETURN();
+}
+
+void OPPROTO op_inslh (void)
+{
+ helper_inslh();
+ RETURN();
+}
+
+void OPPROTO op_extlh (void)
+{
+ helper_extlh();
+ RETURN();
+}
+
+void OPPROTO op_mskqh (void)
+{
+ helper_mskqh();
+ RETURN();
+}
+
+void OPPROTO op_insqh (void)
+{
+ helper_insqh();
+ RETURN();
+}
+
+void OPPROTO op_extqh (void)
+{
+ helper_extqh();
+ RETURN();
+}
+
+/* Tests */
+void OPPROTO op_cmpult (void)
+{
+ if (T0 < T1)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpule (void)
+{
+ if (T0 <= T1)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpeq (void)
+{
+ if (T0 == T1)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmplt (void)
+{
+ if ((int64_t)T0 < (int64_t)T1)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmple (void)
+{
+ if ((int64_t)T0 <= (int64_t)T1)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpbge (void)
+{
+ helper_cmpbge();
+ RETURN();
+}
+
+void OPPROTO op_cmpeqz (void)
+{
+ if (T0 == 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpnez (void)
+{
+ if (T0 != 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpltz (void)
+{
+ if ((int64_t)T0 < 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmplez (void)
+{
+ if ((int64_t)T0 <= 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpgtz (void)
+{
+ if ((int64_t)T0 > 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmpgez (void)
+{
+ if ((int64_t)T0 >= 0)
+ T0 = 1;
+ else
+ T0 = 0;
+ RETURN();
+}
+
+void OPPROTO op_cmplbs (void)
+{
+ T0 &= 1;
+ RETURN();
+}
+
+void OPPROTO op_cmplbc (void)
+{
+ T0 = (~T0) & 1;
+ RETURN();
+}
+
+/* Branches */
+void OPPROTO op_branch (void)
+{
+ env->pc = T0 & ~3;
+ RETURN();
+}
+
+void OPPROTO op_addq1 (void)
+{
+ T1 += T0;
+ RETURN();
+}
+
+#if 0 // Qemu does not know how to do this...
+void OPPROTO op_bcond (void)
+{
+ if (T0)
+ env->pc = T1 & ~3;
+ else
+ env->pc = PARAM(1);
+ RETURN();
+}
+#else
+void OPPROTO op_bcond (void)
+{
+ if (T0)
+ env->pc = T1 & ~3;
+ else
+ env->pc = ((uint64_t)PARAM(1) << 32) | (uint64_t)PARAM(2);
+ RETURN();
+}
+#endif
+
+#if 0 // Qemu does not know how to do this...
+void OPPROTO op_update_pc (void)
+{
+ env->pc = PARAM(1);
+ RETURN();
+}
+#else
+void OPPROTO op_update_pc (void)
+{
+ env->pc = ((uint64_t)PARAM(1) << 32) | (uint64_t)PARAM(2);
+ RETURN();
+}
+#endif
+
+/* Optimization for 32 bits hosts architectures */
+void OPPROTO op_update_pc32 (void)
+{
+ env->pc = (uint64_t)PARAM(1);
+ RETURN();
+}
+
+/* IEEE floating point arithmetic */
+/* S floating (single) */
+void OPPROTO op_adds (void)
+{
+ FT0 = float32_add(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_subs (void)
+{
+ FT0 = float32_sub(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_muls (void)
+{
+ FT0 = float32_mul(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_divs (void)
+{
+ FT0 = float32_div(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_sqrts (void)
+{
+ helper_sqrts();
+ RETURN();
+}
+
+void OPPROTO op_cpys (void)
+{
+ helper_cpys();
+ RETURN();
+}
+
+void OPPROTO op_cpysn (void)
+{
+ helper_cpysn();
+ RETURN();
+}
+
+void OPPROTO op_cpyse (void)
+{
+ helper_cpyse();
+ RETURN();
+}
+
+void OPPROTO op_itofs (void)
+{
+ helper_itofs();
+ RETURN();
+}
+
+void OPPROTO op_ftois (void)
+{
+ helper_ftois();
+ RETURN();
+}
+
+/* T floating (double) */
+void OPPROTO op_addt (void)
+{
+ FT0 = float64_add(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_subt (void)
+{
+ FT0 = float64_sub(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_mult (void)
+{
+ FT0 = float64_mul(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_divt (void)
+{
+ FT0 = float64_div(FT0, FT1, &FP_STATUS);
+ RETURN();
+}
+
+void OPPROTO op_sqrtt (void)
+{
+ helper_sqrtt();
+ RETURN();
+}
+
+void OPPROTO op_cmptun (void)
+{
+ helper_cmptun();
+ RETURN();
+}
+
+void OPPROTO op_cmpteq (void)
+{
+ helper_cmpteq();
+ RETURN();
+}
+
+void OPPROTO op_cmptle (void)
+{
+ helper_cmptle();
+ RETURN();
+}
+
+void OPPROTO op_cmptlt (void)
+{
+ helper_cmptlt();
+ RETURN();
+}
+
+void OPPROTO op_itoft (void)
+{
+ helper_itoft();
+ RETURN();
+}
+
+void OPPROTO op_ftoit (void)
+{
+ helper_ftoit();
+ RETURN();
+}
+
+/* VAX floating point arithmetic */
+/* F floating */
+void OPPROTO op_addf (void)
+{
+ helper_addf();
+ RETURN();
+}
+
+void OPPROTO op_subf (void)
+{
+ helper_subf();
+ RETURN();
+}
+
+void OPPROTO op_mulf (void)
+{
+ helper_mulf();
+ RETURN();
+}
+
+void OPPROTO op_divf (void)
+{
+ helper_divf();
+ RETURN();
+}
+
+void OPPROTO op_sqrtf (void)
+{
+ helper_sqrtf();
+ RETURN();
+}
+
+void OPPROTO op_cmpfeq (void)
+{
+ helper_cmpfeq();
+ RETURN();
+}
+
+void OPPROTO op_cmpfne (void)
+{
+ helper_cmpfne();
+ RETURN();
+}
+
+void OPPROTO op_cmpflt (void)
+{
+ helper_cmpflt();
+ RETURN();
+}
+
+void OPPROTO op_cmpfle (void)
+{
+ helper_cmpfle();
+ RETURN();
+}
+
+void OPPROTO op_cmpfgt (void)
+{
+ helper_cmpfgt();
+ RETURN();
+}
+
+void OPPROTO op_cmpfge (void)
+{
+ helper_cmpfge();
+ RETURN();
+}
+
+void OPPROTO op_itoff (void)
+{
+ helper_itoff();
+ RETURN();
+}
+
+/* G floating */
+void OPPROTO op_addg (void)
+{
+ helper_addg();
+ RETURN();
+}
+
+void OPPROTO op_subg (void)
+{
+ helper_subg();
+ RETURN();
+}
+
+void OPPROTO op_mulg (void)
+{
+ helper_mulg();
+ RETURN();
+}
+
+void OPPROTO op_divg (void)
+{
+ helper_divg();
+ RETURN();
+}
+
+void OPPROTO op_sqrtg (void)
+{
+ helper_sqrtg();
+ RETURN();
+}
+
+void OPPROTO op_cmpgeq (void)
+{
+ helper_cmpgeq();
+ RETURN();
+}
+
+void OPPROTO op_cmpglt (void)
+{
+ helper_cmpglt();
+ RETURN();
+}
+
+void OPPROTO op_cmpgle (void)
+{
+ helper_cmpgle();
+ RETURN();
+}
+
+/* Floating point format conversion */
+void OPPROTO op_cvtst (void)
+{
+ FT0 = (float)FT0;
+ RETURN();
+}
+
+void OPPROTO op_cvtqs (void)
+{
+ helper_cvtqs();
+ RETURN();
+}
+
+void OPPROTO op_cvtts (void)
+{
+ FT0 = (float)FT0;
+ RETURN();
+}
+
+void OPPROTO op_cvttq (void)
+{
+ helper_cvttq();
+ RETURN();
+}
+
+void OPPROTO op_cvtqt (void)
+{
+ helper_cvtqt();
+ RETURN();
+}
+
+void OPPROTO op_cvtqf (void)
+{
+ helper_cvtqf();
+ RETURN();
+}
+
+void OPPROTO op_cvtgf (void)
+{
+ helper_cvtgf();
+ RETURN();
+}
+
+void OPPROTO op_cvtgd (void)
+{
+ helper_cvtgd();
+ RETURN();
+}
+
+void OPPROTO op_cvtgq (void)
+{
+ helper_cvtgq();
+ RETURN();
+}
+
+void OPPROTO op_cvtqg (void)
+{
+ helper_cvtqg();
+ RETURN();
+}
+
+void OPPROTO op_cvtdg (void)
+{
+ helper_cvtdg();
+ RETURN();
+}
+
+void OPPROTO op_cvtlq (void)
+{
+ helper_cvtlq();
+ RETURN();
+}
+
+void OPPROTO op_cvtql (void)
+{
+ helper_cvtql();
+ RETURN();
+}
+
+void OPPROTO op_cvtqlv (void)
+{
+ helper_cvtqlv();
+ RETURN();
+}
+
+void OPPROTO op_cvtqlsv (void)
+{
+ helper_cvtqlsv();
+ RETURN();
+}
+
+/* PALcode support special instructions */
+#if !defined (CONFIG_USER_ONLY)
+void OPPROTO op_hw_rei (void)
+{
+ env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
+ env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
+ /* XXX: re-enable interrupts and memory mapping */
+ RETURN();
+}
+
+void OPPROTO op_hw_ret (void)
+{
+ env->pc = T0 & ~3;
+ env->ipr[IPR_EXC_ADDR] = T0 & 1;
+ /* XXX: re-enable interrupts and memory mapping */
+ RETURN();
+}
+
+void OPPROTO op_mfpr (void)
+{
+ helper_mfpr(PARAM(1));
+ RETURN();
+}
+
+void OPPROTO op_mtpr (void)
+{
+ helper_mtpr(PARAM(1));
+ RETURN();
+}
+
+void OPPROTO op_set_alt_mode (void)
+{
+ env->saved_mode = env->ps & 0xC;
+ env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
+ RETURN();
+}
+
+void OPPROTO op_restore_mode (void)
+{
+ env->ps = (env->ps & ~0xC) | env->saved_mode;
+ RETURN();
+}
+
+void OPPROTO op_ld_phys_to_virt (void)
+{
+ helper_ld_phys_to_virt();
+ RETURN();
+}
+
+void OPPROTO op_st_phys_to_virt (void)
+{
+ helper_st_phys_to_virt();
+ RETURN();
+}
+#endif /* !defined (CONFIG_USER_ONLY) */
diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c
new file mode 100644
index 0000000000..746665a429
--- /dev/null
+++ b/target-alpha/op_helper.c
@@ -0,0 +1,1255 @@
+/*
+ * Alpha emulation cpu micro-operations helpers for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "exec.h"
+#include "softfloat.h"
+
+#include "op_helper.h"
+
+#define MEMSUFFIX _raw
+#include "op_helper_mem.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#define MEMSUFFIX _user
+#include "op_helper_mem.h"
+
+#define MEMSUFFIX _kernel
+#include "op_helper_mem.h"
+
+/* Those are used for supervisor and executive modes */
+#define MEMSUFFIX _data
+#include "op_helper_mem.h"
+#endif
+
+void helper_tb_flush (void)
+{
+ tlb_flush(env, 1);
+}
+
+void cpu_dump_EA (target_ulong EA);
+void helper_print_mem_EA (target_ulong EA)
+{
+ cpu_dump_EA(EA);
+}
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+void helper_excp (uint32_t excp, uint32_t error)
+{
+ env->exception_index = excp;
+ env->error_code = error;
+ cpu_loop_exit();
+}
+
+void helper_amask (void)
+{
+ switch (env->implver) {
+ case IMPLVER_2106x:
+ /* EV4, EV45, LCA, LCA45 & EV5 */
+ break;
+ case IMPLVER_21164:
+ case IMPLVER_21264:
+ case IMPLVER_21364:
+ T0 &= ~env->amask;
+ break;
+ }
+}
+
+void helper_load_pcc (void)
+{
+ /* XXX: TODO */
+ T0 = 0;
+}
+
+void helper_load_implver (void)
+{
+ T0 = env->implver;
+}
+
+void helper_load_fpcr (void)
+{
+ T0 = 0;
+#ifdef CONFIG_SOFTFLOAT
+ T0 |= env->fp_status.float_exception_flags << 52;
+ if (env->fp_status.float_exception_flags)
+ T0 |= 1ULL << 63;
+ env->ipr[IPR_EXC_SUM] &= ~0x3E:
+ env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
+#endif
+ switch (env->fp_status.float_rounding_mode) {
+ case float_round_nearest_even:
+ T0 |= 2ULL << 58;
+ break;
+ case float_round_down:
+ T0 |= 1ULL << 58;
+ break;
+ case float_round_up:
+ T0 |= 3ULL << 58;
+ break;
+ case float_round_to_zero:
+ break;
+ }
+}
+
+void helper_store_fpcr (void)
+{
+#ifdef CONFIG_SOFTFLOAT
+ set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
+#endif
+ switch ((T0 >> 58) & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &FP_STATUS);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_up, &FP_STATUS);
+ break;
+ }
+}
+
+void helper_load_irf (void)
+{
+ /* XXX: TODO */
+ T0 = 0;
+}
+
+void helper_set_irf (void)
+{
+ /* XXX: TODO */
+}
+
+void helper_clear_irf (void)
+{
+ /* XXX: TODO */
+}
+
+void helper_addqv (void)
+{
+ T2 = T0;
+ T0 += T1;
+ if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+}
+
+void helper_addlv (void)
+{
+ T2 = T0;
+ T0 = (uint32_t)(T0 + T1);
+ if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+}
+
+void helper_subqv (void)
+{
+ T2 = T0;
+ T0 -= T1;
+ if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+}
+
+void helper_sublv (void)
+{
+ T2 = T0;
+ T0 = (uint32_t)(T0 - T1);
+ if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+}
+
+void helper_mullv (void)
+{
+ int64_t res = (int64_t)T0 * (int64_t)T1;
+
+ if (unlikely((int32_t)res != res)) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+ T0 = (int64_t)((int32_t)res);
+}
+
+void helper_mulqv ()
+{
+ uint64_t res, tmp0, tmp1;
+
+ res = (T0 >> 32) * (T1 >> 32);
+ tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
+ ((T0 >> 32) * (T1 & 0xFFFFFFFF));
+ tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
+ tmp0 += tmp1 >> 32;
+ res += tmp0 >> 32;
+ T0 *= T1;
+ if (unlikely(res != 0)) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+}
+
+void helper_umulh (void)
+{
+ uint64_t tmp0, tmp1;
+
+ tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
+ ((T0 >> 32) * (T1 & 0xFFFFFFFF));
+ tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
+ tmp0 += tmp1 >> 32;
+ T0 = (T0 >> 32) * (T0 >> 32);
+ T0 += tmp0 >> 32;
+}
+
+void helper_ctpop (void)
+{
+ int n;
+
+ for (n = 0; T0 != 0; n++)
+ T0 = T0 ^ (T0 - 1);
+ T0 = n;
+}
+
+void helper_ctlz (void)
+{
+ uint32_t op32;
+ int n;
+
+ n = 0;
+ if (!(T0 & 0xFFFFFFFF00000000ULL)) {
+ n += 32;
+ T0 <<= 32;
+ }
+ /* Make it easier for 32 bits hosts */
+ op32 = T0 >> 32;
+ if (!(op32 & 0xFFFF0000UL)) {
+ n += 16;
+ op32 <<= 16;
+ }
+ if (!(op32 & 0xFF000000UL)) {
+ n += 8;
+ op32 <<= 8;
+ }
+ if (!(op32 & 0xF0000000UL)) {
+ n += 4;
+ op32 <<= 4;
+ }
+ if (!(op32 & 0xC0000000UL)) {
+ n += 2;
+ op32 <<= 2;
+ }
+ if (!(op32 & 0x80000000UL)) {
+ n++;
+ op32 <<= 1;
+ }
+ if (!(op32 & 0x80000000UL)) {
+ n++;
+ }
+ T0 = n;
+}
+
+void helper_cttz (void)
+{
+ uint32_t op32;
+ int n;
+
+ n = 0;
+ if (!(T0 & 0x00000000FFFFFFFFULL)) {
+ n += 32;
+ T0 >>= 32;
+ }
+ /* Make it easier for 32 bits hosts */
+ op32 = T0;
+ if (!(op32 & 0x0000FFFFUL)) {
+ n += 16;
+ op32 >>= 16;
+ }
+ if (!(op32 & 0x000000FFUL)) {
+ n += 8;
+ op32 >>= 8;
+ }
+ if (!(op32 & 0x0000000FUL)) {
+ n += 4;
+ op32 >>= 4;
+ }
+ if (!(op32 & 0x00000003UL)) {
+ n += 2;
+ op32 >>= 2;
+ }
+ if (!(op32 & 0x00000001UL)) {
+ n++;
+ op32 >>= 1;
+ }
+ if (!(op32 & 0x00000001UL)) {
+ n++;
+ }
+ T0 = n;
+}
+
+static inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
+{
+ uint64_t mask;
+
+ mask = 0;
+ mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
+ mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
+ mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
+ mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
+ mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
+ mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
+ mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
+ mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
+
+ return op & ~mask;
+}
+
+void helper_mskbl (void)
+{
+ T0 = byte_zap(T0, 0x01 << (T1 & 7));
+}
+
+void helper_extbl (void)
+{
+ T0 >>= (T1 & 7) * 8;
+ T0 = byte_zap(T0, 0xFE);
+}
+
+void helper_insbl (void)
+{
+ T0 <<= (T1 & 7) * 8;
+ T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
+}
+
+void helper_mskwl (void)
+{
+ T0 = byte_zap(T0, 0x03 << (T1 & 7));
+}
+
+void helper_extwl (void)
+{
+ T0 >>= (T1 & 7) * 8;
+ T0 = byte_zap(T0, 0xFC);
+}
+
+void helper_inswl (void)
+{
+ T0 <<= (T1 & 7) * 8;
+ T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
+}
+
+void helper_mskll (void)
+{
+ T0 = byte_zap(T0, 0x0F << (T1 & 7));
+}
+
+void helper_extll (void)
+{
+ T0 >>= (T1 & 7) * 8;
+ T0 = byte_zap(T0, 0xF0);
+}
+
+void helper_insll (void)
+{
+ T0 <<= (T1 & 7) * 8;
+ T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
+}
+
+void helper_zap (void)
+{
+ T0 = byte_zap(T0, T1);
+}
+
+void helper_zapnot (void)
+{
+ T0 = byte_zap(T0, ~T1);
+}
+
+void helper_mskql (void)
+{
+ T0 = byte_zap(T0, 0xFF << (T1 & 7));
+}
+
+void helper_extql (void)
+{
+ T0 >>= (T1 & 7) * 8;
+ T0 = byte_zap(T0, 0x00);
+}
+
+void helper_insql (void)
+{
+ T0 <<= (T1 & 7) * 8;
+ T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
+}
+
+void helper_mskwh (void)
+{
+ T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
+}
+
+void helper_inswh (void)
+{
+ T0 >>= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
+}
+
+void helper_extwh (void)
+{
+ T0 <<= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, ~0x07);
+}
+
+void helper_msklh (void)
+{
+ T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
+}
+
+void helper_inslh (void)
+{
+ T0 >>= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
+}
+
+void helper_extlh (void)
+{
+ T0 <<= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, ~0x0F);
+}
+
+void helper_mskqh (void)
+{
+ T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
+}
+
+void helper_insqh (void)
+{
+ T0 >>= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
+}
+
+void helper_extqh (void)
+{
+ T0 <<= 64 - ((T1 & 7) * 8);
+ T0 = byte_zap(T0, 0x00);
+}
+
+void helper_cmpbge (void)
+{
+ uint8_t opa, opb, res;
+ int i;
+
+ res = 0;
+ for (i = 0; i < 7; i++) {
+ opa = T0 >> (i * 8);
+ opb = T1 >> (i * 8);
+ if (opa >= opb)
+ res |= 1 << i;
+ }
+ T0 = res;
+}
+
+void helper_cmov_fir (int freg)
+{
+ if (FT0 != 0)
+ env->fir[freg] = FT1;
+}
+
+void helper_sqrts (void)
+{
+ FT0 = float32_sqrt(FT0, &FP_STATUS);
+}
+
+void helper_cpys (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p, q, r;
+
+ p.d = FT0;
+ q.d = FT1;
+ r.i = p.i & 0x8000000000000000ULL;
+ r.i |= q.i & ~0x8000000000000000ULL;
+ FT0 = r.d;
+}
+
+void helper_cpysn (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p, q, r;
+
+ p.d = FT0;
+ q.d = FT1;
+ r.i = (~p.i) & 0x8000000000000000ULL;
+ r.i |= q.i & ~0x8000000000000000ULL;
+ FT0 = r.d;
+}
+
+void helper_cpyse (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p, q, r;
+
+ p.d = FT0;
+ q.d = FT1;
+ r.i = p.i & 0xFFF0000000000000ULL;
+ r.i |= q.i & ~0xFFF0000000000000ULL;
+ FT0 = r.d;
+}
+
+void helper_itofs (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.d = FT0;
+ FT0 = int64_to_float32(p.i, &FP_STATUS);
+}
+
+void helper_ftois (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = float32_to_int64(FT0, &FP_STATUS);
+ FT0 = p.d;
+}
+
+void helper_sqrtt (void)
+{
+ FT0 = float64_sqrt(FT0, &FP_STATUS);
+}
+
+void helper_cmptun (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = 0;
+ if (float64_is_nan(FT0) || float64_is_nan(FT1))
+ p.i = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cmpteq (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = 0;
+ if (float64_eq(FT0, FT1, &FP_STATUS))
+ p.i = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cmptle (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = 0;
+ if (float64_le(FT0, FT1, &FP_STATUS))
+ p.i = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cmptlt (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = 0;
+ if (float64_lt(FT0, FT1, &FP_STATUS))
+ p.i = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_itoft (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.d = FT0;
+ FT0 = int64_to_float64(p.i, &FP_STATUS);
+}
+
+void helper_ftoit (void)
+{
+ union {
+ double d;
+ uint64_t i;
+ } p;
+
+ p.i = float64_to_int64(FT0, &FP_STATUS);
+ FT0 = p.d;
+}
+
+static int vaxf_is_valid (float ff)
+{
+ union {
+ float f;
+ uint32_t i;
+ } p;
+ uint32_t exp, mant;
+
+ p.f = ff;
+ exp = (p.i >> 23) & 0xFF;
+ mant = p.i & 0x007FFFFF;
+ if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
+ /* Reserved operands / Dirty zero */
+ return 0;
+ }
+
+ return 1;
+}
+
+static float vaxf_to_ieee32 (float ff)
+{
+ union {
+ float f;
+ uint32_t i;
+ } p;
+ uint32_t exp;
+
+ p.f = ff;
+ exp = (p.i >> 23) & 0xFF;
+ if (exp < 3) {
+ /* Underflow */
+ p.f = 0.0;
+ } else {
+ p.f *= 0.25;
+ }
+
+ return p.f;
+}
+
+static float ieee32_to_vaxf (float fi)
+{
+ union {
+ float f;
+ uint32_t i;
+ } p;
+ uint32_t exp, mant;
+
+ p.f = fi;
+ exp = (p.i >> 23) & 0xFF;
+ mant = p.i & 0x007FFFFF;
+ if (exp == 255) {
+ /* NaN or infinity */
+ p.i = 1;
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ p.i = 0;
+ } else {
+ /* Denormalized */
+ p.f *= 2.0;
+ }
+ } else {
+ if (exp >= 253) {
+ /* Overflow */
+ p.i = 1;
+ } else {
+ p.f *= 4.0;
+ }
+ }
+
+ return p.f;
+}
+
+void helper_addf (void)
+{
+ float ft0, ft1, ft2;
+
+ if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxf_to_ieee32(FT0);
+ ft1 = vaxf_to_ieee32(FT1);
+ ft2 = float32_add(ft0, ft1, &FP_STATUS);
+ FT0 = ieee32_to_vaxf(ft2);
+}
+
+void helper_subf (void)
+{
+ float ft0, ft1, ft2;
+
+ if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxf_to_ieee32(FT0);
+ ft1 = vaxf_to_ieee32(FT1);
+ ft2 = float32_sub(ft0, ft1, &FP_STATUS);
+ FT0 = ieee32_to_vaxf(ft2);
+}
+
+void helper_mulf (void)
+{
+ float ft0, ft1, ft2;
+
+ if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxf_to_ieee32(FT0);
+ ft1 = vaxf_to_ieee32(FT1);
+ ft2 = float32_mul(ft0, ft1, &FP_STATUS);
+ FT0 = ieee32_to_vaxf(ft2);
+}
+
+void helper_divf (void)
+{
+ float ft0, ft1, ft2;
+
+ if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxf_to_ieee32(FT0);
+ ft1 = vaxf_to_ieee32(FT1);
+ ft2 = float32_div(ft0, ft1, &FP_STATUS);
+ FT0 = ieee32_to_vaxf(ft2);
+}
+
+void helper_sqrtf (void)
+{
+ float ft0, ft1;
+
+ if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxf_to_ieee32(FT0);
+ ft1 = float32_sqrt(ft0, &FP_STATUS);
+ FT0 = ieee32_to_vaxf(ft1);
+}
+
+void helper_itoff (void)
+{
+ /* XXX: TODO */
+}
+
+static int vaxg_is_valid (double ff)
+{
+ union {
+ double f;
+ uint64_t i;
+ } p;
+ uint64_t exp, mant;
+
+ p.f = ff;
+ exp = (p.i >> 52) & 0x7FF;
+ mant = p.i & 0x000FFFFFFFFFFFFFULL;
+ if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
+ /* Reserved operands / Dirty zero */
+ return 0;
+ }
+
+ return 1;
+}
+
+static double vaxg_to_ieee64 (double fg)
+{
+ union {
+ double f;
+ uint64_t i;
+ } p;
+ uint32_t exp;
+
+ p.f = fg;
+ exp = (p.i >> 52) & 0x7FF;
+ if (exp < 3) {
+ /* Underflow */
+ p.f = 0.0;
+ } else {
+ p.f *= 0.25;
+ }
+
+ return p.f;
+}
+
+static double ieee64_to_vaxg (double fi)
+{
+ union {
+ double f;
+ uint64_t i;
+ } p;
+ uint64_t mant;
+ uint32_t exp;
+
+ p.f = fi;
+ exp = (p.i >> 52) & 0x7FF;
+ mant = p.i & 0x000FFFFFFFFFFFFFULL;
+ if (exp == 255) {
+ /* NaN or infinity */
+ p.i = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ p.i = 0;
+ } else {
+ /* Denormalized */
+ p.f *= 2.0;
+ }
+ } else {
+ if (exp >= 2045) {
+ /* Overflow */
+ p.i = 1; /* VAX dirty zero */
+ } else {
+ p.f *= 4.0;
+ }
+ }
+
+ return p.f;
+}
+
+void helper_addg (void)
+{
+ double ft0, ft1, ft2;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ ft2 = float64_add(ft0, ft1, &FP_STATUS);
+ FT0 = ieee64_to_vaxg(ft2);
+}
+
+void helper_subg (void)
+{
+ double ft0, ft1, ft2;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ ft2 = float64_sub(ft0, ft1, &FP_STATUS);
+ FT0 = ieee64_to_vaxg(ft2);
+}
+
+void helper_mulg (void)
+{
+ double ft0, ft1, ft2;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ ft2 = float64_mul(ft0, ft1, &FP_STATUS);
+ FT0 = ieee64_to_vaxg(ft2);
+}
+
+void helper_divg (void)
+{
+ double ft0, ft1, ft2;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ ft2 = float64_div(ft0, ft1, &FP_STATUS);
+ FT0 = ieee64_to_vaxg(ft2);
+}
+
+void helper_sqrtg (void)
+{
+ double ft0, ft1;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = float64_sqrt(ft0, &FP_STATUS);
+ FT0 = ieee64_to_vaxg(ft1);
+}
+
+void helper_cmpgeq (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+ double ft0, ft1;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ p.u = 0;
+ if (float64_eq(ft0, ft1, &FP_STATUS))
+ p.u = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cmpglt (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+ double ft0, ft1;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ p.u = 0;
+ if (float64_lt(ft0, ft1, &FP_STATUS))
+ p.u = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cmpgle (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+ double ft0, ft1;
+
+ if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
+ /* XXX: TODO */
+ }
+ ft0 = vaxg_to_ieee64(FT0);
+ ft1 = vaxg_to_ieee64(FT1);
+ p.u = 0;
+ if (float64_le(ft0, ft1, &FP_STATUS))
+ p.u = 0x4000000000000000ULL;
+ FT0 = p.d;
+}
+
+void helper_cvtqs (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.d = FT0;
+ FT0 = (float)p.u;
+}
+
+void helper_cvttq (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.u = FT0;
+ FT0 = p.d;
+}
+
+void helper_cvtqt (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.d = FT0;
+ FT0 = p.u;
+}
+
+void helper_cvtqf (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.d = FT0;
+ FT0 = ieee32_to_vaxf(p.u);
+}
+
+void helper_cvtgf (void)
+{
+ double ft0;
+
+ ft0 = vaxg_to_ieee64(FT0);
+ FT0 = ieee32_to_vaxf(ft0);
+}
+
+void helper_cvtgd (void)
+{
+ /* XXX: TODO */
+}
+
+void helper_cvtgq (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.u = vaxg_to_ieee64(FT0);
+ FT0 = p.d;
+}
+
+void helper_cvtqg (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p;
+
+ p.d = FT0;
+ FT0 = ieee64_to_vaxg(p.u);
+}
+
+void helper_cvtdg (void)
+{
+ /* XXX: TODO */
+}
+
+void helper_cvtlq (void)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p, q;
+
+ p.d = FT0;
+ q.u = (p.u >> 29) & 0x3FFFFFFF;
+ q.u |= (p.u >> 32);
+ q.u = (int64_t)((int32_t)q.u);
+ FT0 = q.d;
+}
+
+static inline void __helper_cvtql (int s, int v)
+{
+ union {
+ double d;
+ uint64_t u;
+ } p, q;
+
+ p.d = FT0;
+ q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
+ q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
+ FT0 = q.d;
+ if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
+ helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
+ }
+ if (s) {
+ /* TODO */
+ }
+}
+
+void helper_cvtql (void)
+{
+ __helper_cvtql(0, 0);
+}
+
+void helper_cvtqlv (void)
+{
+ __helper_cvtql(0, 1);
+}
+
+void helper_cvtqlsv (void)
+{
+ __helper_cvtql(1, 1);
+}
+
+void helper_cmpfeq (void)
+{
+ if (float64_eq(FT0, FT1, &FP_STATUS))
+ T0 = 1;
+ else
+ T0 = 0;
+}
+
+void helper_cmpfne (void)
+{
+ if (float64_eq(FT0, FT1, &FP_STATUS))
+ T0 = 0;
+ else
+ T0 = 1;
+}
+
+void helper_cmpflt (void)
+{
+ if (float64_lt(FT0, FT1, &FP_STATUS))
+ T0 = 1;
+ else
+ T0 = 0;
+}
+
+void helper_cmpfle (void)
+{
+ if (float64_lt(FT0, FT1, &FP_STATUS))
+ T0 = 1;
+ else
+ T0 = 0;
+}
+
+void helper_cmpfgt (void)
+{
+ if (float64_le(FT0, FT1, &FP_STATUS))
+ T0 = 0;
+ else
+ T0 = 1;
+}
+
+void helper_cmpfge (void)
+{
+ if (float64_lt(FT0, FT1, &FP_STATUS))
+ T0 = 0;
+ else
+ T0 = 1;
+}
+
+#if !defined (CONFIG_USER_ONLY)
+void helper_mfpr (int iprn)
+{
+ uint64_t val;
+
+ if (cpu_alpha_mfpr(env, iprn, &val) == 0)
+ T0 = val;
+}
+
+void helper_mtpr (int iprn)
+{
+ cpu_alpha_mtpr(env, iprn, T0, NULL);
+}
+#endif
+
+/*****************************************************************************/
+/* Softmmu support */
+#if !defined (CONFIG_USER_ONLY)
+
+#define GETPC() (__builtin_return_address(0))
+
+/* XXX: the two following helpers are pure hacks.
+ * Hopefully, we emulate the PALcode, then we should never see
+ * HW_LD / HW_ST instructions.
+ */
+void helper_ld_phys_to_virt (void)
+{
+ uint64_t tlb_addr, physaddr;
+ int index, is_user;
+ void *retaddr;
+
+ is_user = (env->ps >> 3) & 3;
+ index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[is_user][index].addr_read;
+ if ((T0 & TARGET_PAGE_MASK) ==
+ (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ physaddr = T0 + env->tlb_table[is_user][index].addend;
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC();
+ tlb_fill(T0, 0, is_user, retaddr);
+ goto redo;
+ }
+ T0 = physaddr;
+}
+
+void helper_st_phys_to_virt (void)
+{
+ uint64_t tlb_addr, physaddr;
+ int index, is_user;
+ void *retaddr;
+
+ is_user = (env->ps >> 3) & 3;
+ index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[is_user][index].addr_write;
+ if ((T0 & TARGET_PAGE_MASK) ==
+ (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ physaddr = T0 + env->tlb_table[is_user][index].addend;
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC();
+ tlb_fill(T0, 1, is_user, retaddr);
+ goto redo;
+ }
+ T0 = physaddr;
+}
+
+#define MMUSUFFIX _mmu
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
+{
+ TranslationBlock *tb;
+ CPUState *saved_env;
+ target_phys_addr_t pc;
+ int ret;
+
+ /* XXX: hack to restore env in all cases, even if not called from
+ generated code */
+ saved_env = env;
+ env = cpu_single_env;
+ ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, is_user, 1);
+ if (!likely(ret == 0)) {
+ if (likely(retaddr)) {
+ /* now we have a real cpu fault */
+ pc = (target_phys_addr_t)retaddr;
+ tb = tb_find_pc(pc);
+ if (likely(tb)) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc, NULL);
+ }
+ }
+ /* Exception index and error code are already set */
+ cpu_loop_exit();
+ }
+ env = saved_env;
+}
+
+#endif
diff --git a/target-alpha/op_helper.h b/target-alpha/op_helper.h
new file mode 100644
index 0000000000..fb55eff37c
--- /dev/null
+++ b/target-alpha/op_helper.h
@@ -0,0 +1,141 @@
+/*
+ * Alpha emulation cpu micro-operations helpers definitions for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+void helper_call_pal (uint32_t palcode);
+void helper_excp (uint32_t excp, uint32_t error);
+void helper_amask (void);
+void helper_load_pcc (void);
+void helper_load_implver (void);
+void helper_load_fpcr (void);
+void helper_store_fpcr (void);
+void helper_load_irf (void);
+void helper_set_irf (void);
+void helper_clear_irf (void);
+void helper_addqv (void);
+void helper_addlv (void);
+void helper_subqv (void);
+void helper_sublv (void);
+void helper_mullv (void);
+void helper_mulqv (void);
+void helper_umulh (void);
+void helper_ctpop (void);
+void helper_ctlz (void);
+void helper_cttz (void);
+void helper_mskbl (void);
+void helper_extbl (void);
+void helper_insbl (void);
+void helper_mskwl (void);
+void helper_extwl (void);
+void helper_inswl (void);
+void helper_mskll (void);
+void helper_extll (void);
+void helper_insll (void);
+void helper_zap (void);
+void helper_zapnot (void);
+void helper_mskql (void);
+void helper_extql (void);
+void helper_insql (void);
+void helper_mskwh (void);
+void helper_inswh (void);
+void helper_extwh (void);
+void helper_msklh (void);
+void helper_inslh (void);
+void helper_extlh (void);
+void helper_mskqh (void);
+void helper_insqh (void);
+void helper_extqh (void);
+void helper_cmpbge (void);
+void helper_cmov_fir (int freg);
+
+double helper_ldff_raw (target_ulong ea);
+void helper_stff_raw (target_ulong ea, double op);
+double helper_ldfg_raw (target_ulong ea);
+void helper_stfg_raw (target_ulong ea, double op);
+#if !defined(CONFIG_USER_ONLY)
+double helper_ldff_user (target_ulong ea);
+void helper_stff_user (target_ulong ea, double op);
+double helper_ldff_kernel (target_ulong ea);
+void helper_stff_kernel (target_ulong ea, double op);
+double helper_ldff_data (target_ulong ea);
+void helper_stff_data (target_ulong ea, double op);
+double helper_ldfg_user (target_ulong ea);
+void helper_stfg_user (target_ulong ea, double op);
+double helper_ldfg_kernel (target_ulong ea);
+void helper_stfg_kernel (target_ulong ea, double op);
+double helper_ldfg_data (target_ulong ea);
+void helper_stfg_data (target_ulong ea, double op);
+#endif
+
+void helper_sqrts (void);
+void helper_cpys (void);
+void helper_cpysn (void);
+void helper_cpyse (void);
+void helper_itofs (void);
+void helper_ftois (void);
+
+void helper_sqrtt (void);
+void helper_cmptun (void);
+void helper_cmpteq (void);
+void helper_cmptle (void);
+void helper_cmptlt (void);
+void helper_itoft (void);
+void helper_ftoit (void);
+
+void helper_addf (void);
+void helper_subf (void);
+void helper_mulf (void);
+void helper_divf (void);
+void helper_sqrtf (void);
+void helper_cmpfeq (void);
+void helper_cmpfne (void);
+void helper_cmpflt (void);
+void helper_cmpfle (void);
+void helper_cmpfgt (void);
+void helper_cmpfge (void);
+void helper_itoff (void);
+
+void helper_addg (void);
+void helper_subg (void);
+void helper_mulg (void);
+void helper_divg (void);
+void helper_sqrtg (void);
+void helper_cmpgeq (void);
+void helper_cmpglt (void);
+void helper_cmpgle (void);
+
+void helper_cvtqs (void);
+void helper_cvttq (void);
+void helper_cvtqt (void);
+void helper_cvtqf (void);
+void helper_cvtgf (void);
+void helper_cvtgd (void);
+void helper_cvtgq (void);
+void helper_cvtqg (void);
+void helper_cvtdg (void);
+void helper_cvtlq (void);
+void helper_cvtql (void);
+void helper_cvtqlv (void);
+void helper_cvtqlsv (void);
+
+void helper_mfpr (int iprn);
+void helper_mtpr (int iprn);
+void helper_ld_phys_to_virt (void);
+void helper_st_phys_to_virt (void);
+void helper_tb_flush (void);
diff --git a/target-alpha/op_helper_mem.h b/target-alpha/op_helper_mem.h
new file mode 100644
index 0000000000..7ab5718b94
--- /dev/null
+++ b/target-alpha/op_helper_mem.h
@@ -0,0 +1,40 @@
+/*
+ * Alpha emulation cpu micro-operations helpers for memory accesses for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* XXX: TODO */
+double glue(helper_ldff, MEMSUFFIX) (target_ulong ea)
+{
+ return 0;
+}
+
+void glue(helper_stff, MEMSUFFIX) (target_ulong ea, double op)
+{
+}
+
+double glue(helper_ldfg, MEMSUFFIX) (target_ulong ea)
+{
+ return 0;
+}
+
+void glue(helper_stfg, MEMSUFFIX) (target_ulong ea, double op)
+{
+}
+
+#undef MEMSUFFIX
diff --git a/target-alpha/op_mem.h b/target-alpha/op_mem.h
new file mode 100644
index 0000000000..9f017a2af3
--- /dev/null
+++ b/target-alpha/op_mem.h
@@ -0,0 +1,125 @@
+/*
+ * Alpha emulation cpu micro-operations for memory accesses for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DEBUG_MEM_ACCESSES
+#if defined (DEBUG_MEM_ACCESSES)
+void helper_print_mem_EA (target_ulong EA);
+#define print_mem_EA(EA) do { helper_print_mem_EA(EA); } while (0)
+#else
+#define print_mem_EA(EA) do { } while (0)
+#endif
+
+static inline uint32_t glue(ldl_l, MEMSUFFIX) (target_ulong EA)
+{
+ env->lock = EA;
+
+ return glue(ldl, MEMSUFFIX)(EA);
+}
+
+static inline uint32_t glue(ldq_l, MEMSUFFIX) (target_ulong EA)
+{
+ env->lock = EA;
+
+ return glue(ldq, MEMSUFFIX)(EA);
+}
+
+static inline void glue(stl_c, MEMSUFFIX) (target_ulong EA, uint32_t data)
+{
+ if (EA == env->lock) {
+ glue(stl, MEMSUFFIX)(EA, data);
+ T0 = 0;
+ } else {
+ T0 = 1;
+ }
+ env->lock = -1;
+}
+
+static inline void glue(stq_c, MEMSUFFIX) (target_ulong EA, uint64_t data)
+{
+ if (EA == env->lock) {
+ glue(stq, MEMSUFFIX)(EA, data);
+ T0 = 0;
+ } else {
+ T0 = 1;
+ }
+ env->lock = -1;
+}
+
+#define ALPHA_LD_OP(name, op) \
+void OPPROTO glue(glue(op_ld, name), MEMSUFFIX) (void) \
+{ \
+ print_mem_EA(T0); \
+ T1 = glue(op, MEMSUFFIX)(T0); \
+ RETURN(); \
+}
+
+#define ALPHA_ST_OP(name, op) \
+void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
+{ \
+ print_mem_EA(T0); \
+ glue(op, MEMSUFFIX)(T0, T1); \
+ RETURN(); \
+}
+
+ALPHA_LD_OP(bu, ldub);
+ALPHA_ST_OP(b, stb);
+ALPHA_LD_OP(wu, lduw);
+ALPHA_ST_OP(w, stw);
+ALPHA_LD_OP(l, ldl);
+ALPHA_ST_OP(l, stl);
+ALPHA_LD_OP(q, ldq);
+ALPHA_ST_OP(q, stq);
+
+ALPHA_LD_OP(q_u, ldq);
+ALPHA_ST_OP(q_u, stq);
+
+ALPHA_LD_OP(l_l, ldl_l);
+ALPHA_LD_OP(q_l, ldq_l);
+ALPHA_ST_OP(l_c, stl_c);
+ALPHA_ST_OP(q_c, stq_c);
+
+#define ALPHA_LDF_OP(name, op) \
+void OPPROTO glue(glue(op_ld, name), MEMSUFFIX) (void) \
+{ \
+ print_mem_EA(T0); \
+ FT1 = glue(op, MEMSUFFIX)(T0); \
+ RETURN(); \
+}
+
+#define ALPHA_STF_OP(name, op) \
+void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
+{ \
+ print_mem_EA(T0); \
+ glue(op, MEMSUFFIX)(T0, FT1); \
+ RETURN(); \
+}
+
+ALPHA_LDF_OP(t, ldfq);
+ALPHA_STF_OP(t, stfq);
+ALPHA_LDF_OP(s, ldfl);
+ALPHA_STF_OP(s, stfl);
+
+/* VAX floating point */
+ALPHA_LDF_OP(f, helper_ldff);
+ALPHA_STF_OP(f, helper_stff);
+ALPHA_LDF_OP(g, helper_ldfg);
+ALPHA_STF_OP(g, helper_stfg);
+
+#undef MEMSUFFIX
diff --git a/target-alpha/op_template.h b/target-alpha/op_template.h
new file mode 100644
index 0000000000..e96303ba36
--- /dev/null
+++ b/target-alpha/op_template.h
@@ -0,0 +1,167 @@
+/*
+ * Alpha emulation cpu micro-operations templates for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Optimized constant loads */
+#if REG < 3
+void OPPROTO glue(op_reset_T, REG) (void)
+{
+ glue(T, REG) = 0;
+ RETURN();
+}
+
+void OPPROTO glue(op_reset_FT, REG) (void)
+{
+ glue(FT, REG) = 0;
+ RETURN();
+}
+
+/* XXX: This can be great on most RISC machines */
+#if !defined(__i386__) && !defined(__x86_64__)
+void OPPROTO glue(op_set_s16_T, REG) (void)
+{
+ glue(T, REG) = (int16_t)PARAM(1);
+ RETURN();
+}
+
+void OPPROTO glue(op_set_u16_T, REG) (void)
+{
+ glue(T, REG) = (uint16_t)PARAM(1);
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_set_s32_T, REG) (void)
+{
+ glue(T, REG) = (int32_t)PARAM(1);
+ RETURN();
+}
+
+void OPPROTO glue(op_set_u32_T, REG) (void)
+{
+ glue(T, REG) = (uint32_t)PARAM(1);
+ RETURN();
+}
+
+#if 0 // Qemu does not know how to do this...
+void OPPROTO glue(op_set_64_T, REG) (void)
+{
+ glue(T, REG) = (int64_t)PARAM(1);
+ RETURN();
+}
+#else
+void OPPROTO glue(op_set_64_T, REG) (void)
+{
+ glue(T, REG) = ((int64_t)PARAM(1) << 32) | (int64_t)PARAM(2);
+ RETURN();
+}
+#endif
+
+#endif /* REG < 3 */
+
+/* Fixed-point register moves */
+#if REG < 31
+void OPPROTO glue(op_load_T0_ir, REG) (void)
+{
+ T0 = env->ir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_load_T1_ir, REG) (void)
+{
+ T1 = env->ir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_load_T2_ir, REG) (void)
+{
+ T2 = env->ir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_store_T0_ir, REG) (void)
+{
+ env->ir[REG] = T0;
+ RETURN();
+}
+
+void OPPROTO glue(op_store_T1_ir, REG) (void)
+{
+ env->ir[REG] = T1;
+ RETURN();
+}
+
+void OPPROTO glue(op_store_T2_ir, REG) (void)
+{
+ env->ir[REG] = T2;
+ RETURN();
+}
+
+void OPPROTO glue(op_cmov_ir, REG) (void)
+{
+ if (T0)
+ env->ir[REG] = T1;
+ RETURN();
+}
+
+/* floating point registers moves */
+void OPPROTO glue(op_load_FT0_fir, REG) (void)
+{
+ FT0 = env->fir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_load_FT1_fir, REG) (void)
+{
+ FT1 = env->fir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_load_FT2_fir, REG) (void)
+{
+ FT2 = env->fir[REG];
+ RETURN();
+}
+
+void OPPROTO glue(op_store_FT0_fir, REG) (void)
+{
+ env->fir[REG] = FT0;
+ RETURN();
+}
+
+void OPPROTO glue(op_store_FT1_fir, REG) (void)
+{
+ env->fir[REG] = FT1;
+ RETURN();
+}
+
+void OPPROTO glue(op_store_FT2_fir, REG) (void)
+{
+ env->fir[REG] = FT2;
+ RETURN();
+}
+
+void OPPROTO glue(op_cmov_fir, REG) (void)
+{
+ helper_cmov_fir(REG);
+ RETURN();
+}
+#endif /* REG < 31 */
+
+#undef REG
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
new file mode 100644
index 0000000000..0afd8964ce
--- /dev/null
+++ b/target-alpha/translate.c
@@ -0,0 +1,2117 @@
+/*
+ * Alpha emulation cpu translation for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+
+#define DO_SINGLE_STEP
+#define GENERATE_NOP
+#define ALPHA_DEBUG_DISAS
+#define DO_TB_FLUSH
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ uint64_t pc;
+ int mem_idx;
+#if !defined (CONFIG_USER_ONLY)
+ int pal_mode;
+#endif
+ uint32_t amask;
+};
+
+#ifdef USE_DIRECT_JUMP
+#define TBPARAM(x)
+#else
+#define TBPARAM(x) (long)(x)
+#endif
+
+enum {
+#define DEF(s, n, copy_size) INDEX_op_ ## s,
+#include "opc.h"
+#undef DEF
+ NB_OPS,
+};
+
+static uint16_t *gen_opc_ptr;
+static uint32_t *gen_opparam_ptr;
+
+#include "gen-op.h"
+
+static inline void gen_op_nop (void)
+{
+#if defined(GENERATE_NOP)
+ gen_op_no_op();
+#endif
+}
+
+#define GEN32(func, NAME) \
+static GenOpFunc *NAME ## _table [32] = { \
+NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
+NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
+NAME ## 8, NAME ## 9, NAME ## 10, NAME ## 11, \
+NAME ## 12, NAME ## 13, NAME ## 14, NAME ## 15, \
+NAME ## 16, NAME ## 17, NAME ## 18, NAME ## 19, \
+NAME ## 20, NAME ## 21, NAME ## 22, NAME ## 23, \
+NAME ## 24, NAME ## 25, NAME ## 26, NAME ## 27, \
+NAME ## 28, NAME ## 29, NAME ## 30, NAME ## 31, \
+}; \
+static inline void func(int n) \
+{ \
+ NAME ## _table[n](); \
+}
+
+/* IR moves */
+/* Special hacks for ir31 */
+#define gen_op_load_T0_ir31 gen_op_reset_T0
+#define gen_op_load_T1_ir31 gen_op_reset_T1
+#define gen_op_load_T2_ir31 gen_op_reset_T2
+#define gen_op_store_T0_ir31 gen_op_nop
+#define gen_op_store_T1_ir31 gen_op_nop
+#define gen_op_store_T2_ir31 gen_op_nop
+#define gen_op_cmov_ir31 gen_op_nop
+GEN32(gen_op_load_T0_ir, gen_op_load_T0_ir);
+GEN32(gen_op_load_T1_ir, gen_op_load_T1_ir);
+GEN32(gen_op_load_T2_ir, gen_op_load_T2_ir);
+GEN32(gen_op_store_T0_ir, gen_op_store_T0_ir);
+GEN32(gen_op_store_T1_ir, gen_op_store_T1_ir);
+GEN32(gen_op_store_T2_ir, gen_op_store_T2_ir);
+GEN32(gen_op_cmov_ir, gen_op_cmov_ir);
+
+static inline void gen_load_ir (DisasContext *ctx, int irn, int Tn)
+{
+ switch (Tn) {
+ case 0:
+ gen_op_load_T0_ir(irn);
+ break;
+ case 1:
+ gen_op_load_T1_ir(irn);
+ break;
+ case 2:
+ gen_op_load_T2_ir(irn);
+ break;
+ }
+}
+
+static inline void gen_store_ir (DisasContext *ctx, int irn, int Tn)
+{
+ switch (Tn) {
+ case 0:
+ gen_op_store_T0_ir(irn);
+ break;
+ case 1:
+ gen_op_store_T1_ir(irn);
+ break;
+ case 2:
+ gen_op_store_T2_ir(irn);
+ break;
+ }
+}
+
+/* FIR moves */
+/* Special hacks for fir31 */
+#define gen_op_load_FT0_fir31 gen_op_reset_FT0
+#define gen_op_load_FT1_fir31 gen_op_reset_FT1
+#define gen_op_load_FT2_fir31 gen_op_reset_FT2
+#define gen_op_store_FT0_fir31 gen_op_nop
+#define gen_op_store_FT1_fir31 gen_op_nop
+#define gen_op_store_FT2_fir31 gen_op_nop
+#define gen_op_cmov_fir31 gen_op_nop
+GEN32(gen_op_load_FT0_fir, gen_op_load_FT0_fir);
+GEN32(gen_op_load_FT1_fir, gen_op_load_FT1_fir);
+GEN32(gen_op_load_FT2_fir, gen_op_load_FT2_fir);
+GEN32(gen_op_store_FT0_fir, gen_op_store_FT0_fir);
+GEN32(gen_op_store_FT1_fir, gen_op_store_FT1_fir);
+GEN32(gen_op_store_FT2_fir, gen_op_store_FT2_fir);
+GEN32(gen_op_cmov_fir, gen_op_cmov_fir);
+
+static inline void gen_load_fir (DisasContext *ctx, int firn, int Tn)
+{
+ switch (Tn) {
+ case 0:
+ gen_op_load_FT0_fir(firn);
+ break;
+ case 1:
+ gen_op_load_FT1_fir(firn);
+ break;
+ case 2:
+ gen_op_load_FT2_fir(firn);
+ break;
+ }
+}
+
+static inline void gen_store_fir (DisasContext *ctx, int firn, int Tn)
+{
+ switch (Tn) {
+ case 0:
+ gen_op_store_FT0_fir(firn);
+ break;
+ case 1:
+ gen_op_store_FT1_fir(firn);
+ break;
+ case 2:
+ gen_op_store_FT2_fir(firn);
+ break;
+ }
+}
+
+/* Memory moves */
+#if defined(CONFIG_USER_ONLY)
+#define OP_LD_TABLE(width) \
+static GenOpFunc *gen_op_ld##width[] = { \
+ &gen_op_ld##width##_raw, \
+}
+#define OP_ST_TABLE(width) \
+static GenOpFunc *gen_op_st##width[] = { \
+ &gen_op_st##width##_raw, \
+}
+#else
+#define OP_LD_TABLE(width) \
+static GenOpFunc *gen_op_ld##width[] = { \
+ &gen_op_ld##width##_kernel, \
+ &gen_op_ld##width##_user, /* executive */ \
+ &gen_op_ld##width##_data, /* supervisor */ \
+ &gen_op_ld##width##_data, /* user */ \
+}
+#define OP_ST_TABLE(width) \
+static GenOpFunc *gen_op_st##width[] = { \
+ &gen_op_st##width##_kernel, \
+ &gen_op_st##width##_user, /* executive */ \
+ &gen_op_st##width##_data, /* supervisor */ \
+ &gen_op_st##width##_data, /* user */ \
+}
+#endif
+
+#define GEN_LD(width) \
+OP_LD_TABLE(width); \
+static void gen_ld##width (DisasContext *ctx) \
+{ \
+ (*gen_op_ld##width[ctx->mem_idx])(); \
+}
+
+#define GEN_ST(width) \
+OP_ST_TABLE(width); \
+static void gen_st##width (DisasContext *ctx) \
+{ \
+ (*gen_op_st##width[ctx->mem_idx])(); \
+}
+
+GEN_LD(bu);
+GEN_ST(b);
+GEN_LD(wu);
+GEN_ST(w);
+GEN_LD(l);
+GEN_ST(l);
+GEN_LD(q);
+GEN_ST(q);
+GEN_LD(q_u);
+GEN_ST(q_u);
+GEN_LD(l_l);
+GEN_ST(l_c);
+GEN_LD(q_l);
+GEN_ST(q_c);
+
+GEN_LD(f);
+GEN_ST(f);
+GEN_LD(g);
+GEN_ST(g);
+GEN_LD(s);
+GEN_ST(s);
+GEN_LD(t);
+GEN_ST(t);
+
+#if defined(__i386__) || defined(__x86_64__)
+static inline void gen_op_set_s16_T0 (int16_t imm)
+{
+ gen_op_set_s32_T0((int32_t)imm);
+}
+
+static inline void gen_op_set_s16_T1 (int16_t imm)
+{
+ gen_op_set_s32_T1((int32_t)imm);
+}
+
+static inline void gen_op_set_u16_T0 (uint16_t imm)
+{
+ gen_op_set_s32_T0((uint32_t)imm);
+}
+
+static inline void gen_op_set_u16_T1 (uint16_t imm)
+{
+ gen_op_set_s32_T1((uint32_t)imm);
+}
+#endif
+
+static inline void gen_set_sT0 (DisasContext *ctx, int64_t imm)
+{
+ int32_t imm32;
+ int16_t imm16;
+
+ imm32 = imm;
+ if (imm32 == imm) {
+ imm16 = imm;
+ if (imm16 == imm) {
+ if (imm == 0) {
+ gen_op_reset_T0();
+ } else {
+ gen_op_set_s16_T0(imm16);
+ }
+ } else {
+ gen_op_set_s32_T0(imm32);
+ }
+ } else {
+#if 0 // Qemu does not know how to do this...
+ gen_op_set_64_T0(imm);
+#else
+ gen_op_set_64_T0(imm >> 32, imm);
+#endif
+ }
+}
+
+static inline void gen_set_sT1 (DisasContext *ctx, int64_t imm)
+{
+ int32_t imm32;
+ int16_t imm16;
+
+ imm32 = imm;
+ if (imm32 == imm) {
+ imm16 = imm;
+ if (imm16 == imm) {
+ if (imm == 0) {
+ gen_op_reset_T1();
+ } else {
+ gen_op_set_s16_T1(imm16);
+ }
+ } else {
+ gen_op_set_s32_T1(imm32);
+ }
+ } else {
+#if 0 // Qemu does not know how to do this...
+ gen_op_set_64_T1(imm);
+#else
+ gen_op_set_64_T1(imm >> 32, imm);
+#endif
+ }
+}
+
+static inline void gen_set_uT0 (DisasContext *ctx, uint64_t imm)
+{
+ if (!(imm >> 32)) {
+ if ((!imm >> 16)) {
+ if (imm == 0)
+ gen_op_reset_T0();
+ else
+ gen_op_set_u16_T0(imm);
+ } else {
+ gen_op_set_u32_T0(imm);
+ }
+ } else {
+#if 0 // Qemu does not know how to do this...
+ gen_op_set_64_T0(imm);
+#else
+ gen_op_set_64_T0(imm >> 32, imm);
+#endif
+ }
+}
+
+static inline void gen_set_uT1 (DisasContext *ctx, uint64_t imm)
+{
+ if (!(imm >> 32)) {
+ if ((!imm >> 16)) {
+ if (imm == 0)
+ gen_op_reset_T1();
+ else
+ gen_op_set_u16_T1(imm);
+ } else {
+ gen_op_set_u32_T1(imm);
+ }
+ } else {
+#if 0 // Qemu does not know how to do this...
+ gen_op_set_64_T1(imm);
+#else
+ gen_op_set_64_T1(imm >> 32, imm);
+#endif
+ }
+}
+
+static inline void gen_update_pc (DisasContext *ctx)
+{
+ if (!(ctx->pc >> 32)) {
+ gen_op_update_pc32(ctx->pc);
+ } else {
+#if 0 // Qemu does not know how to do this...
+ gen_op_update_pc(ctx->pc);
+#else
+ gen_op_update_pc(ctx->pc >> 32, ctx->pc);
+#endif
+ }
+}
+
+static inline void _gen_op_bcond (DisasContext *ctx)
+{
+#if 0 // Qemu does not know how to do this...
+ gen_op_bcond(ctx->pc);
+#else
+ gen_op_bcond(ctx->pc >> 32, ctx->pc);
+#endif
+}
+
+static inline void gen_excp (DisasContext *ctx, int exception, int error_code)
+{
+ gen_update_pc(ctx);
+ gen_op_excp(exception, error_code);
+}
+
+static inline void gen_invalid (DisasContext *ctx)
+{
+ gen_excp(ctx, EXCP_OPCDEC, 0);
+}
+
+static void gen_load_mem (DisasContext *ctx,
+ void (*gen_load_op)(DisasContext *ctx),
+ int ra, int rb, int32_t disp16, int clear)
+{
+ if (ra == 31 && disp16 == 0) {
+ /* UNOP */
+ gen_op_nop();
+ } else {
+ gen_load_ir(ctx, rb, 0);
+ if (disp16 != 0) {
+ gen_set_sT1(ctx, disp16);
+ gen_op_addq();
+ }
+ if (clear)
+ gen_op_n7();
+ (*gen_load_op)(ctx);
+ gen_store_ir(ctx, ra, 1);
+ }
+}
+
+static void gen_store_mem (DisasContext *ctx,
+ void (*gen_store_op)(DisasContext *ctx),
+ int ra, int rb, int32_t disp16, int clear)
+{
+ gen_load_ir(ctx, rb, 0);
+ if (disp16 != 0) {
+ gen_set_sT1(ctx, disp16);
+ gen_op_addq();
+ }
+ if (clear)
+ gen_op_n7();
+ gen_load_ir(ctx, ra, 1);
+ (*gen_store_op)(ctx);
+}
+
+static void gen_load_fmem (DisasContext *ctx,
+ void (*gen_load_fop)(DisasContext *ctx),
+ int ra, int rb, int32_t disp16)
+{
+ gen_load_ir(ctx, rb, 0);
+ if (disp16 != 0) {
+ gen_set_sT1(ctx, disp16);
+ gen_op_addq();
+ }
+ (*gen_load_fop)(ctx);
+ gen_store_fir(ctx, ra, 1);
+}
+
+static void gen_store_fmem (DisasContext *ctx,
+ void (*gen_store_fop)(DisasContext *ctx),
+ int ra, int rb, int32_t disp16)
+{
+ gen_load_ir(ctx, rb, 0);
+ if (disp16 != 0) {
+ gen_set_sT1(ctx, disp16);
+ gen_op_addq();
+ }
+ gen_load_fir(ctx, ra, 1);
+ (*gen_store_fop)(ctx);
+}
+
+static void gen_bcond (DisasContext *ctx, void (*gen_test_op)(void),
+ int ra, int32_t disp16)
+{
+ if (disp16 != 0) {
+ gen_set_uT0(ctx, ctx->pc);
+ gen_set_sT1(ctx, disp16 << 2);
+ gen_op_addq1();
+ } else {
+ gen_set_uT1(ctx, ctx->pc);
+ }
+ gen_load_ir(ctx, ra, 0);
+ (*gen_test_op)();
+ _gen_op_bcond(ctx);
+}
+
+static void gen_fbcond (DisasContext *ctx, void (*gen_test_op)(void),
+ int ra, int32_t disp16)
+{
+ if (disp16 != 0) {
+ gen_set_uT0(ctx, ctx->pc);
+ gen_set_sT1(ctx, disp16 << 2);
+ gen_op_addq1();
+ } else {
+ gen_set_uT1(ctx, ctx->pc);
+ }
+ gen_load_fir(ctx, ra, 0);
+ (*gen_test_op)();
+ _gen_op_bcond(ctx);
+}
+
+static void gen_arith2 (DisasContext *ctx, void (*gen_arith_op)(void),
+ int rb, int rc, int islit, int8_t lit)
+{
+ if (islit)
+ gen_set_sT0(ctx, lit);
+ else
+ gen_load_ir(ctx, rb, 0);
+ (*gen_arith_op)();
+ gen_store_ir(ctx, rc, 0);
+}
+
+static void gen_arith3 (DisasContext *ctx, void (*gen_arith_op)(void),
+ int ra, int rb, int rc, int islit, int8_t lit)
+{
+ gen_load_ir(ctx, ra, 0);
+ if (islit)
+ gen_set_sT1(ctx, lit);
+ else
+ gen_load_ir(ctx, rb, 1);
+ (*gen_arith_op)();
+ gen_store_ir(ctx, rc, 0);
+}
+
+static void gen_cmov (DisasContext *ctx, void (*gen_test_op)(void),
+ int ra, int rb, int rc, int islit, int8_t lit)
+{
+ gen_load_ir(ctx, ra, 1);
+ if (islit)
+ gen_set_sT0(ctx, lit);
+ else
+ gen_load_ir(ctx, rb, 0);
+ (*gen_test_op)();
+ gen_op_cmov_ir(rc);
+}
+
+static void gen_farith2 (DisasContext *ctx, void (*gen_arith_fop)(void),
+ int rb, int rc)
+{
+ gen_load_fir(ctx, rb, 0);
+ (*gen_arith_fop)();
+ gen_store_fir(ctx, rc, 0);
+}
+
+static void gen_farith3 (DisasContext *ctx, void (*gen_arith_fop)(void),
+ int ra, int rb, int rc)
+{
+ gen_load_fir(ctx, ra, 0);
+ gen_load_fir(ctx, rb, 1);
+ (*gen_arith_fop)();
+ gen_store_fir(ctx, rc, 0);
+}
+
+static void gen_fcmov (DisasContext *ctx, void (*gen_test_fop)(void),
+ int ra, int rb, int rc)
+{
+ gen_load_fir(ctx, ra, 0);
+ gen_load_fir(ctx, rb, 1);
+ (*gen_test_fop)();
+ gen_op_cmov_fir(rc);
+}
+
+static void gen_fti (DisasContext *ctx, void (*gen_move_fop)(void),
+ int ra, int rc)
+{
+ gen_load_fir(ctx, rc, 0);
+ (*gen_move_fop)();
+ gen_store_ir(ctx, ra, 0);
+}
+
+static void gen_itf (DisasContext *ctx, void (*gen_move_fop)(void),
+ int ra, int rc)
+{
+ gen_load_ir(ctx, ra, 0);
+ (*gen_move_fop)();
+ gen_store_fir(ctx, rc, 0);
+}
+
+static void gen_s4addl (void)
+{
+ gen_op_s4();
+ gen_op_addl();
+}
+
+static void gen_s4subl (void)
+{
+ gen_op_s4();
+ gen_op_subl();
+}
+
+static void gen_s8addl (void)
+{
+ gen_op_s8();
+ gen_op_addl();
+}
+
+static void gen_s8subl (void)
+{
+ gen_op_s8();
+ gen_op_subl();
+}
+
+static void gen_s4addq (void)
+{
+ gen_op_s4();
+ gen_op_addq();
+}
+
+static void gen_s4subq (void)
+{
+ gen_op_s4();
+ gen_op_subq();
+}
+
+static void gen_s8addq (void)
+{
+ gen_op_s8();
+ gen_op_addq();
+}
+
+static void gen_s8subq (void)
+{
+ gen_op_s8();
+ gen_op_subq();
+}
+
+static void gen_amask (void)
+{
+ gen_op_load_amask();
+ gen_op_bic();
+}
+
+static int translate_one (DisasContext *ctx, uint32_t insn)
+{
+ uint32_t palcode;
+ int32_t disp21, disp16, disp12;
+ uint16_t fn11, fn16;
+ uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit;
+ int8_t lit;
+ int ret;
+
+ /* Decode all instruction fields */
+ opc = insn >> 26;
+ ra = (insn >> 21) & 0x1F;
+ rb = (insn >> 16) & 0x1F;
+ rc = insn & 0x1F;
+ sbz = (insn >> 13) & 0x07;
+ islit = (insn >> 12) & 1;
+ lit = (insn >> 13) & 0xFF;
+ palcode = insn & 0x03FFFFFF;
+ disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
+ disp16 = (int16_t)(insn & 0x0000FFFF);
+ disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
+ fn16 = insn & 0x0000FFFF;
+ fn11 = (insn >> 5) & 0x000007FF;
+ fpfn = fn11 & 0x3F;
+ fn7 = (insn >> 5) & 0x0000007F;
+ fn2 = (insn >> 5) & 0x00000003;
+ ret = 0;
+#if defined ALPHA_DEBUG_DISAS
+ if (logfile != NULL) {
+ fprintf(logfile, "opc %02x ra %d rb %d rc %d disp16 %04x\n",
+ opc, ra, rb, rc, disp16);
+ }
+#endif
+ switch (opc) {
+ case 0x00:
+ /* CALL_PAL */
+ if (palcode >= 0x80 && palcode < 0xC0) {
+ /* Unprivileged PAL call */
+ gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x1F) << 6), 0);
+#if !defined (CONFIG_USER_ONLY)
+ } else if (palcode < 0x40) {
+ /* Privileged PAL code */
+ if (ctx->mem_idx & 1)
+ goto invalid_opc;
+ else
+ gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x1F) << 6), 0);
+#endif
+ } else {
+ /* Invalid PAL call */
+ goto invalid_opc;
+ }
+ ret = 3;
+ break;
+ case 0x01:
+ /* OPC01 */
+ goto invalid_opc;
+ case 0x02:
+ /* OPC02 */
+ goto invalid_opc;
+ case 0x03:
+ /* OPC03 */
+ goto invalid_opc;
+ case 0x04:
+ /* OPC04 */
+ goto invalid_opc;
+ case 0x05:
+ /* OPC05 */
+ goto invalid_opc;
+ case 0x06:
+ /* OPC06 */
+ goto invalid_opc;
+ case 0x07:
+ /* OPC07 */
+ goto invalid_opc;
+ case 0x08:
+ /* LDA */
+ gen_load_ir(ctx, rb, 0);
+ gen_set_sT1(ctx, disp16);
+ gen_op_addq();
+ gen_store_ir(ctx, ra, 0);
+ break;
+ case 0x09:
+ /* LDAH */
+ gen_load_ir(ctx, rb, 0);
+ gen_set_sT1(ctx, disp16 << 16);
+ gen_op_addq();
+ gen_store_ir(ctx, ra, 0);
+ break;
+ case 0x0A:
+ /* LDBU */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_load_mem(ctx, &gen_ldbu, ra, rb, disp16, 0);
+ break;
+ case 0x0B:
+ /* LDQ_U */
+ gen_load_mem(ctx, &gen_ldq_u, ra, rb, disp16, 1);
+ break;
+ case 0x0C:
+ /* LDWU */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_load_mem(ctx, &gen_ldwu, ra, rb, disp16, 0);
+ break;
+ case 0x0D:
+ /* STW */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_store_mem(ctx, &gen_stw, ra, rb, disp16, 0);
+ break;
+ case 0x0E:
+ /* STB */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_store_mem(ctx, &gen_stb, ra, rb, disp16, 0);
+ break;
+ case 0x0F:
+ /* STQ_U */
+ gen_store_mem(ctx, &gen_stq_u, ra, rb, disp16, 1);
+ break;
+ case 0x10:
+ switch (fn7) {
+ case 0x00:
+ /* ADDL */
+ gen_arith3(ctx, &gen_op_addl, ra, rb, rc, islit, lit);
+ break;
+ case 0x02:
+ /* S4ADDL */
+ gen_arith3(ctx, &gen_s4addl, ra, rb, rc, islit, lit);
+ break;
+ case 0x09:
+ /* SUBL */
+ gen_arith3(ctx, &gen_op_subl, ra, rb, rc, islit, lit);
+ break;
+ case 0x0B:
+ /* S4SUBL */
+ gen_arith3(ctx, &gen_s4subl, ra, rb, rc, islit, lit);
+ break;
+ case 0x0F:
+ /* CMPBGE */
+ gen_arith3(ctx, &gen_op_cmpbge, ra, rb, rc, islit, lit);
+ break;
+ case 0x12:
+ /* S8ADDL */
+ gen_arith3(ctx, &gen_s8addl, ra, rb, rc, islit, lit);
+ break;
+ case 0x1B:
+ /* S8SUBL */
+ gen_arith3(ctx, &gen_s8subl, ra, rb, rc, islit, lit);
+ break;
+ case 0x1D:
+ /* CMPULT */
+ gen_arith3(ctx, &gen_op_cmpult, ra, rb, rc, islit, lit);
+ break;
+ case 0x20:
+ /* ADDQ */
+ gen_arith3(ctx, &gen_op_addq, ra, rb, rc, islit, lit);
+ break;
+ case 0x22:
+ /* S4ADDQ */
+ gen_arith3(ctx, &gen_s4addq, ra, rb, rc, islit, lit);
+ break;
+ case 0x29:
+ /* SUBQ */
+ gen_arith3(ctx, &gen_op_subq, ra, rb, rc, islit, lit);
+ break;
+ case 0x2B:
+ /* S4SUBQ */
+ gen_arith3(ctx, &gen_s4subq, ra, rb, rc, islit, lit);
+ break;
+ case 0x2D:
+ /* CMPEQ */
+ gen_arith3(ctx, &gen_op_cmpeq, ra, rb, rc, islit, lit);
+ break;
+ case 0x32:
+ /* S8ADDQ */
+ gen_arith3(ctx, &gen_s8addq, ra, rb, rc, islit, lit);
+ break;
+ case 0x3B:
+ /* S8SUBQ */
+ gen_arith3(ctx, &gen_s8subq, ra, rb, rc, islit, lit);
+ break;
+ case 0x3D:
+ /* CMPULE */
+ gen_arith3(ctx, &gen_op_cmpule, ra, rb, rc, islit, lit);
+ break;
+ case 0x40:
+ /* ADDL/V */
+ gen_arith3(ctx, &gen_op_addlv, ra, rb, rc, islit, lit);
+ break;
+ case 0x49:
+ /* SUBL/V */
+ gen_arith3(ctx, &gen_op_sublv, ra, rb, rc, islit, lit);
+ break;
+ case 0x4D:
+ /* CMPLT */
+ gen_arith3(ctx, &gen_op_cmplt, ra, rb, rc, islit, lit);
+ break;
+ case 0x60:
+ /* ADDQ/V */
+ gen_arith3(ctx, &gen_op_addqv, ra, rb, rc, islit, lit);
+ break;
+ case 0x69:
+ /* SUBQ/V */
+ gen_arith3(ctx, &gen_op_subqv, ra, rb, rc, islit, lit);
+ break;
+ case 0x6D:
+ /* CMPLE */
+ gen_arith3(ctx, &gen_op_cmple, ra, rb, rc, islit, lit);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x11:
+ switch (fn7) {
+ case 0x00:
+ /* AND */
+ gen_arith3(ctx, &gen_op_and, ra, rb, rc, islit, lit);
+ break;
+ case 0x08:
+ /* BIC */
+ gen_arith3(ctx, &gen_op_bic, ra, rb, rc, islit, lit);
+ break;
+ case 0x14:
+ /* CMOVLBS */
+ gen_cmov(ctx, &gen_op_cmplbs, ra, rb, rc, islit, lit);
+ break;
+ case 0x16:
+ /* CMOVLBC */
+ gen_cmov(ctx, &gen_op_cmplbc, ra, rb, rc, islit, lit);
+ break;
+ case 0x20:
+ /* BIS */
+ if (ra == rb || ra == 31 || rb == 31) {
+ if (ra == 31 && rc == 31) {
+ /* NOP */
+ gen_op_nop();
+ } else {
+ /* MOV */
+ gen_load_ir(ctx, rb, 0);
+ gen_store_ir(ctx, rc, 0);
+ }
+ } else {
+ gen_arith3(ctx, &gen_op_bis, ra, rb, rc, islit, lit);
+ }
+ break;
+ case 0x24:
+ /* CMOVEQ */
+ gen_cmov(ctx, &gen_op_cmpeqz, ra, rb, rc, islit, lit);
+ break;
+ case 0x26:
+ /* CMOVNE */
+ gen_cmov(ctx, &gen_op_cmpnez, ra, rb, rc, islit, lit);
+ break;
+ case 0x28:
+ /* ORNOT */
+ gen_arith3(ctx, &gen_op_ornot, ra, rb, rc, islit, lit);
+ break;
+ case 0x40:
+ /* XOR */
+ gen_arith3(ctx, &gen_op_xor, ra, rb, rc, islit, lit);
+ break;
+ case 0x44:
+ /* CMOVLT */
+ gen_cmov(ctx, &gen_op_cmpltz, ra, rb, rc, islit, lit);
+ break;
+ case 0x46:
+ /* CMOVGE */
+ gen_cmov(ctx, &gen_op_cmpgez, ra, rb, rc, islit, lit);
+ break;
+ case 0x48:
+ /* EQV */
+ gen_arith3(ctx, &gen_op_eqv, ra, rb, rc, islit, lit);
+ break;
+ case 0x61:
+ /* AMASK */
+ gen_arith2(ctx, &gen_amask, rb, rc, islit, lit);
+ break;
+ case 0x64:
+ /* CMOVLE */
+ gen_cmov(ctx, &gen_op_cmplez, ra, rb, rc, islit, lit);
+ break;
+ case 0x66:
+ /* CMOVGT */
+ gen_cmov(ctx, &gen_op_cmpgtz, ra, rb, rc, islit, lit);
+ break;
+ case 0x6C:
+ /* IMPLVER */
+ gen_op_load_implver();
+ gen_store_ir(ctx, rc, 0);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x12:
+ switch (fn7) {
+ case 0x02:
+ /* MSKBL */
+ gen_arith3(ctx, &gen_op_mskbl, ra, rb, rc, islit, lit);
+ break;
+ case 0x06:
+ /* EXTBL */
+ gen_arith3(ctx, &gen_op_extbl, ra, rb, rc, islit, lit);
+ break;
+ case 0x0B:
+ /* INSBL */
+ gen_arith3(ctx, &gen_op_insbl, ra, rb, rc, islit, lit);
+ break;
+ case 0x12:
+ /* MSKWL */
+ gen_arith3(ctx, &gen_op_mskwl, ra, rb, rc, islit, lit);
+ break;
+ case 0x16:
+ /* EXTWL */
+ gen_arith3(ctx, &gen_op_extwl, ra, rb, rc, islit, lit);
+ break;
+ case 0x1B:
+ /* INSWL */
+ gen_arith3(ctx, &gen_op_inswl, ra, rb, rc, islit, lit);
+ break;
+ case 0x22:
+ /* MSKLL */
+ gen_arith3(ctx, &gen_op_mskll, ra, rb, rc, islit, lit);
+ break;
+ case 0x26:
+ /* EXTLL */
+ gen_arith3(ctx, &gen_op_extll, ra, rb, rc, islit, lit);
+ break;
+ case 0x2B:
+ /* INSLL */
+ gen_arith3(ctx, &gen_op_insll, ra, rb, rc, islit, lit);
+ break;
+ case 0x30:
+ /* ZAP */
+ gen_arith3(ctx, &gen_op_zap, ra, rb, rc, islit, lit);
+ break;
+ case 0x31:
+ /* ZAPNOT */
+ gen_arith3(ctx, &gen_op_zapnot, ra, rb, rc, islit, lit);
+ break;
+ case 0x32:
+ /* MSKQL */
+ gen_arith3(ctx, &gen_op_mskql, ra, rb, rc, islit, lit);
+ break;
+ case 0x34:
+ /* SRL */
+ gen_arith3(ctx, &gen_op_srl, ra, rb, rc, islit, lit);
+ break;
+ case 0x36:
+ /* EXTQL */
+ gen_arith3(ctx, &gen_op_extql, ra, rb, rc, islit, lit);
+ break;
+ case 0x39:
+ /* SLL */
+ gen_arith3(ctx, &gen_op_sll, ra, rb, rc, islit, lit);
+ break;
+ case 0x3B:
+ /* INSQL */
+ gen_arith3(ctx, &gen_op_insql, ra, rb, rc, islit, lit);
+ break;
+ case 0x3C:
+ /* SRA */
+ gen_arith3(ctx, &gen_op_sra, ra, rb, rc, islit, lit);
+ break;
+ case 0x52:
+ /* MSKWH */
+ gen_arith3(ctx, &gen_op_mskwh, ra, rb, rc, islit, lit);
+ break;
+ case 0x57:
+ /* INSWH */
+ gen_arith3(ctx, &gen_op_inswh, ra, rb, rc, islit, lit);
+ break;
+ case 0x5A:
+ /* EXTWH */
+ gen_arith3(ctx, &gen_op_extwh, ra, rb, rc, islit, lit);
+ break;
+ case 0x62:
+ /* MSKLH */
+ gen_arith3(ctx, &gen_op_msklh, ra, rb, rc, islit, lit);
+ break;
+ case 0x67:
+ /* INSLH */
+ gen_arith3(ctx, &gen_op_inslh, ra, rb, rc, islit, lit);
+ break;
+ case 0x6A:
+ /* EXTLH */
+ gen_arith3(ctx, &gen_op_extlh, ra, rb, rc, islit, lit);
+ break;
+ case 0x72:
+ /* MSKQH */
+ gen_arith3(ctx, &gen_op_mskqh, ra, rb, rc, islit, lit);
+ break;
+ case 0x77:
+ /* INSQH */
+ gen_arith3(ctx, &gen_op_insqh, ra, rb, rc, islit, lit);
+ break;
+ case 0x7A:
+ /* EXTQH */
+ gen_arith3(ctx, &gen_op_extqh, ra, rb, rc, islit, lit);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x13:
+ switch (fn7) {
+ case 0x00:
+ /* MULL */
+ gen_arith3(ctx, &gen_op_mull, ra, rb, rc, islit, lit);
+ break;
+ case 0x20:
+ /* MULQ */
+ gen_arith3(ctx, &gen_op_mulq, ra, rb, rc, islit, lit);
+ break;
+ case 0x30:
+ /* UMULH */
+ gen_arith3(ctx, &gen_op_umulh, ra, rb, rc, islit, lit);
+ break;
+ case 0x40:
+ /* MULL/V */
+ gen_arith3(ctx, &gen_op_mullv, ra, rb, rc, islit, lit);
+ break;
+ case 0x60:
+ /* MULQ/V */
+ gen_arith3(ctx, &gen_op_mulqv, ra, rb, rc, islit, lit);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x14:
+ switch (fpfn) { /* f11 & 0x3F */
+ case 0x04:
+ /* ITOFS */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_itf(ctx, &gen_op_itofs, ra, rc);
+ break;
+ case 0x0A:
+ /* SQRTF */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_farith2(ctx, &gen_op_sqrtf, rb, rc);
+ break;
+ case 0x0B:
+ /* SQRTS */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_farith2(ctx, &gen_op_sqrts, rb, rc);
+ break;
+ case 0x14:
+ /* ITOFF */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+#if 0 // TODO
+ gen_itf(ctx, &gen_op_itoff, ra, rc);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x24:
+ /* ITOFT */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_itf(ctx, &gen_op_itoft, ra, rc);
+ break;
+ case 0x2A:
+ /* SQRTG */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_farith2(ctx, &gen_op_sqrtg, rb, rc);
+ break;
+ case 0x02B:
+ /* SQRTT */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_farith2(ctx, &gen_op_sqrtt, rb, rc);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x15:
+ /* VAX floating point */
+ /* XXX: rounding mode and trap are ignored (!) */
+ switch (fpfn) { /* f11 & 0x3F */
+ case 0x00:
+ /* ADDF */
+ gen_farith3(ctx, &gen_op_addf, ra, rb, rc);
+ break;
+ case 0x01:
+ /* SUBF */
+ gen_farith3(ctx, &gen_op_subf, ra, rb, rc);
+ break;
+ case 0x02:
+ /* MULF */
+ gen_farith3(ctx, &gen_op_mulf, ra, rb, rc);
+ break;
+ case 0x03:
+ /* DIVF */
+ gen_farith3(ctx, &gen_op_divf, ra, rb, rc);
+ break;
+ case 0x1E:
+ /* CVTDG */
+#if 0 // TODO
+ gen_farith2(ctx, &gen_op_cvtdg, rb, rc);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x20:
+ /* ADDG */
+ gen_farith3(ctx, &gen_op_addg, ra, rb, rc);
+ break;
+ case 0x21:
+ /* SUBG */
+ gen_farith3(ctx, &gen_op_subg, ra, rb, rc);
+ break;
+ case 0x22:
+ /* MULG */
+ gen_farith3(ctx, &gen_op_mulg, ra, rb, rc);
+ break;
+ case 0x23:
+ /* DIVG */
+ gen_farith3(ctx, &gen_op_divg, ra, rb, rc);
+ break;
+ case 0x25:
+ /* CMPGEQ */
+ gen_farith3(ctx, &gen_op_cmpgeq, ra, rb, rc);
+ break;
+ case 0x26:
+ /* CMPGLT */
+ gen_farith3(ctx, &gen_op_cmpglt, ra, rb, rc);
+ break;
+ case 0x27:
+ /* CMPGLE */
+ gen_farith3(ctx, &gen_op_cmpgle, ra, rb, rc);
+ break;
+ case 0x2C:
+ /* CVTGF */
+ gen_farith2(ctx, &gen_op_cvtgf, rb, rc);
+ break;
+ case 0x2D:
+ /* CVTGD */
+#if 0 // TODO
+ gen_farith2(ctx, &gen_op_cvtgd, rb, rc);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x2F:
+ /* CVTGQ */
+ gen_farith2(ctx, &gen_op_cvtgq, rb, rc);
+ break;
+ case 0x3C:
+ /* CVTQF */
+ gen_farith2(ctx, &gen_op_cvtqf, rb, rc);
+ break;
+ case 0x3E:
+ /* CVTQG */
+ gen_farith2(ctx, &gen_op_cvtqg, rb, rc);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x16:
+ /* IEEE floating-point */
+ /* XXX: rounding mode and traps are ignored (!) */
+ switch (fpfn) { /* f11 & 0x3F */
+ case 0x00:
+ /* ADDS */
+ gen_farith3(ctx, &gen_op_adds, ra, rb, rc);
+ break;
+ case 0x01:
+ /* SUBS */
+ gen_farith3(ctx, &gen_op_subs, ra, rb, rc);
+ break;
+ case 0x02:
+ /* MULS */
+ gen_farith3(ctx, &gen_op_muls, ra, rb, rc);
+ break;
+ case 0x03:
+ /* DIVS */
+ gen_farith3(ctx, &gen_op_divs, ra, rb, rc);
+ break;
+ case 0x20:
+ /* ADDT */
+ gen_farith3(ctx, &gen_op_addt, ra, rb, rc);
+ break;
+ case 0x21:
+ /* SUBT */
+ gen_farith3(ctx, &gen_op_subt, ra, rb, rc);
+ break;
+ case 0x22:
+ /* MULT */
+ gen_farith3(ctx, &gen_op_mult, ra, rb, rc);
+ break;
+ case 0x23:
+ /* DIVT */
+ gen_farith3(ctx, &gen_op_divt, ra, rb, rc);
+ break;
+ case 0x24:
+ /* CMPTUN */
+ gen_farith3(ctx, &gen_op_cmptun, ra, rb, rc);
+ break;
+ case 0x25:
+ /* CMPTEQ */
+ gen_farith3(ctx, &gen_op_cmpteq, ra, rb, rc);
+ break;
+ case 0x26:
+ /* CMPTLT */
+ gen_farith3(ctx, &gen_op_cmptlt, ra, rb, rc);
+ break;
+ case 0x27:
+ /* CMPTLE */
+ gen_farith3(ctx, &gen_op_cmptle, ra, rb, rc);
+ break;
+ case 0x2C:
+ /* XXX: incorrect */
+ if (fn11 == 0x2AC) {
+ /* CVTST */
+ gen_farith2(ctx, &gen_op_cvtst, rb, rc);
+ } else {
+ /* CVTTS */
+ gen_farith2(ctx, &gen_op_cvtts, rb, rc);
+ }
+ break;
+ case 0x2F:
+ /* CVTTQ */
+ gen_farith2(ctx, &gen_op_cvttq, rb, rc);
+ break;
+ case 0x3C:
+ /* CVTQS */
+ gen_farith2(ctx, &gen_op_cvtqs, rb, rc);
+ break;
+ case 0x3E:
+ /* CVTQT */
+ gen_farith2(ctx, &gen_op_cvtqt, rb, rc);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x17:
+ switch (fn11) {
+ case 0x010:
+ /* CVTLQ */
+ gen_farith2(ctx, &gen_op_cvtlq, rb, rc);
+ break;
+ case 0x020:
+ /* CPYS */
+ if (ra == rb) {
+ if (ra == 31 && rc == 31) {
+ /* FNOP */
+ gen_op_nop();
+ } else {
+ /* FMOV */
+ gen_load_fir(ctx, rb, 0);
+ gen_store_fir(ctx, rc, 0);
+ }
+ } else {
+ gen_farith3(ctx, &gen_op_cpys, ra, rb, rc);
+ }
+ break;
+ case 0x021:
+ /* CPYSN */
+ gen_farith2(ctx, &gen_op_cpysn, rb, rc);
+ break;
+ case 0x022:
+ /* CPYSE */
+ gen_farith2(ctx, &gen_op_cpyse, rb, rc);
+ break;
+ case 0x024:
+ /* MT_FPCR */
+ gen_load_fir(ctx, ra, 0);
+ gen_op_store_fpcr();
+ break;
+ case 0x025:
+ /* MF_FPCR */
+ gen_op_load_fpcr();
+ gen_store_fir(ctx, ra, 0);
+ break;
+ case 0x02A:
+ /* FCMOVEQ */
+ gen_fcmov(ctx, &gen_op_cmpfeq, ra, rb, rc);
+ break;
+ case 0x02B:
+ /* FCMOVNE */
+ gen_fcmov(ctx, &gen_op_cmpfne, ra, rb, rc);
+ break;
+ case 0x02C:
+ /* FCMOVLT */
+ gen_fcmov(ctx, &gen_op_cmpflt, ra, rb, rc);
+ break;
+ case 0x02D:
+ /* FCMOVGE */
+ gen_fcmov(ctx, &gen_op_cmpfge, ra, rb, rc);
+ break;
+ case 0x02E:
+ /* FCMOVLE */
+ gen_fcmov(ctx, &gen_op_cmpfle, ra, rb, rc);
+ break;
+ case 0x02F:
+ /* FCMOVGT */
+ gen_fcmov(ctx, &gen_op_cmpfgt, ra, rb, rc);
+ break;
+ case 0x030:
+ /* CVTQL */
+ gen_farith2(ctx, &gen_op_cvtql, rb, rc);
+ break;
+ case 0x130:
+ /* CVTQL/V */
+ gen_farith2(ctx, &gen_op_cvtqlv, rb, rc);
+ break;
+ case 0x530:
+ /* CVTQL/SV */
+ gen_farith2(ctx, &gen_op_cvtqlsv, rb, rc);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x18:
+ switch ((uint16_t)disp16) {
+ case 0x0000:
+ /* TRAPB */
+ /* No-op. Just exit from the current tb */
+ ret = 2;
+ break;
+ case 0x0400:
+ /* EXCB */
+ /* No-op. Just exit from the current tb */
+ ret = 2;
+ break;
+ case 0x4000:
+ /* MB */
+ /* No-op */
+ break;
+ case 0x4400:
+ /* WMB */
+ /* No-op */
+ break;
+ case 0x8000:
+ /* FETCH */
+ /* No-op */
+ break;
+ case 0xA000:
+ /* FETCH_M */
+ /* No-op */
+ break;
+ case 0xC000:
+ /* RPCC */
+ gen_op_load_pcc();
+ gen_store_ir(ctx, ra, 0);
+ break;
+ case 0xE000:
+ /* RC */
+ gen_op_load_irf();
+ gen_store_ir(ctx, ra, 0);
+ gen_op_clear_irf();
+ break;
+ case 0xE800:
+ /* ECB */
+ /* XXX: TODO: evict tb cache at address rb */
+#if 0
+ ret = 2;
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0xF000:
+ /* RS */
+ gen_op_load_irf();
+ gen_store_ir(ctx, ra, 0);
+ gen_op_set_irf();
+ break;
+ case 0xF800:
+ /* WH64 */
+ /* No-op */
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x19:
+ /* HW_MFPR (PALcode) */
+#if defined (CONFIG_USER_ONLY)
+ goto invalid_opc;
+#else
+ if (!ctx->pal_mode)
+ goto invalid_opc;
+ gen_op_mfpr(insn & 0xFF);
+ gen_store_ir(ctx, ra, 0);
+ break;
+#endif
+ case 0x1A:
+ gen_load_ir(ctx, rb, 0);
+ if (ra != 31) {
+ gen_set_uT1(ctx, ctx->pc);
+ gen_store_ir(ctx, ra, 1);
+ }
+ gen_op_branch();
+ /* Those four jumps only differ by the branch prediction hint */
+ switch (fn2) {
+ case 0x0:
+ /* JMP */
+ break;
+ case 0x1:
+ /* JSR */
+ break;
+ case 0x2:
+ /* RET */
+ break;
+ case 0x3:
+ /* JSR_COROUTINE */
+ break;
+ }
+ ret = 1;
+ break;
+ case 0x1B:
+ /* HW_LD (PALcode) */
+#if defined (CONFIG_USER_ONLY)
+ goto invalid_opc;
+#else
+ if (!ctx->pal_mode)
+ goto invalid_opc;
+ gen_load_ir(ctx, rb, 0);
+ gen_set_sT1(ctx, disp12);
+ gen_op_addq();
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access */
+ gen_op_ldl_raw();
+ break;
+ case 0x1:
+ /* Quadword physical access */
+ gen_op_ldq_raw();
+ break;
+ case 0x2:
+ /* Longword physical access with lock */
+ gen_op_ldl_l_raw();
+ break;
+ case 0x3:
+ /* Quadword physical access with lock */
+ gen_op_ldq_l_raw();
+ break;
+ case 0x4:
+ /* Longword virtual PTE fetch */
+ gen_op_ldl_kernel();
+ break;
+ case 0x5:
+ /* Quadword virtual PTE fetch */
+ gen_op_ldq_kernel();
+ break;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x8:
+ /* Longword virtual access */
+ gen_op_ld_phys_to_virt();
+ gen_op_ldl_raw();
+ break;
+ case 0x9:
+ /* Quadword virtual access */
+ gen_op_ld_phys_to_virt();
+ gen_op_ldq_raw();
+ break;
+ case 0xA:
+ /* Longword virtual access with protection check */
+ gen_ldl(ctx);
+ break;
+ case 0xB:
+ /* Quadword virtual access with protection check */
+ gen_ldq(ctx);
+ break;
+ case 0xC:
+ /* Longword virtual access with altenate access mode */
+ gen_op_set_alt_mode();
+ gen_op_ld_phys_to_virt();
+ gen_op_ldl_raw();
+ gen_op_restore_mode();
+ break;
+ case 0xD:
+ /* Quadword virtual access with altenate access mode */
+ gen_op_set_alt_mode();
+ gen_op_ld_phys_to_virt();
+ gen_op_ldq_raw();
+ gen_op_restore_mode();
+ break;
+ case 0xE:
+ /* Longword virtual access with alternate access mode and
+ * protection checks
+ */
+ gen_op_set_alt_mode();
+ gen_op_ldl_data();
+ gen_op_restore_mode();
+ break;
+ case 0xF:
+ /* Quadword virtual access with alternate access mode and
+ * protection checks
+ */
+ gen_op_set_alt_mode();
+ gen_op_ldq_data();
+ gen_op_restore_mode();
+ break;
+ }
+ gen_store_ir(ctx, ra, 1);
+ break;
+#endif
+ case 0x1C:
+ switch (fn7) {
+ case 0x00:
+ /* SEXTB */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_arith2(ctx, &gen_op_sextb, rb, rc, islit, lit);
+ break;
+ case 0x01:
+ /* SEXTW */
+ if (!(ctx->amask & AMASK_BWX))
+ goto invalid_opc;
+ gen_arith2(ctx, &gen_op_sextw, rb, rc, islit, lit);
+ break;
+ case 0x30:
+ /* CTPOP */
+ if (!(ctx->amask & AMASK_CIX))
+ goto invalid_opc;
+ gen_arith2(ctx, &gen_op_ctpop, rb, rc, 0, 0);
+ break;
+ case 0x31:
+ /* PERR */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x32:
+ /* CTLZ */
+ if (!(ctx->amask & AMASK_CIX))
+ goto invalid_opc;
+ gen_arith2(ctx, &gen_op_ctlz, rb, rc, 0, 0);
+ break;
+ case 0x33:
+ /* CTTZ */
+ if (!(ctx->amask & AMASK_CIX))
+ goto invalid_opc;
+ gen_arith2(ctx, &gen_op_cttz, rb, rc, 0, 0);
+ break;
+ case 0x34:
+ /* UNPKBW */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x35:
+ /* UNPKWL */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x36:
+ /* PKWB */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x37:
+ /* PKLB */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x38:
+ /* MINSB8 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x39:
+ /* MINSW4 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3A:
+ /* MINUB8 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3B:
+ /* MINUW4 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3C:
+ /* MAXUB8 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3D:
+ /* MAXUW4 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3E:
+ /* MAXSB8 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x3F:
+ /* MAXSW4 */
+ if (!(ctx->amask & AMASK_MVI))
+ goto invalid_opc;
+ /* XXX: TODO */
+ goto invalid_opc;
+ break;
+ case 0x70:
+ /* FTOIT */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_fti(ctx, &gen_op_ftoit, ra, rb);
+ break;
+ case 0x78:
+ /* FTOIS */
+ if (!(ctx->amask & AMASK_FIX))
+ goto invalid_opc;
+ gen_fti(ctx, &gen_op_ftois, ra, rb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+ case 0x1D:
+ /* HW_MTPR (PALcode) */
+#if defined (CONFIG_USER_ONLY)
+ goto invalid_opc;
+#else
+ if (!ctx->pal_mode)
+ goto invalid_opc;
+ gen_load_ir(ctx, ra, 0);
+ gen_op_mtpr(insn & 0xFF);
+ ret = 2;
+ break;
+#endif
+ case 0x1E:
+ /* HW_REI (PALcode) */
+#if defined (CONFIG_USER_ONLY)
+ goto invalid_opc;
+#else
+ if (!ctx->pal_mode)
+ goto invalid_opc;
+ if (rb == 31) {
+ /* "Old" alpha */
+ gen_op_hw_rei();
+ } else {
+ gen_load_ir(ctx, rb, 0);
+ gen_set_uT1(ctx, (((int64_t)insn << 51) >> 51));
+ gen_op_addq();
+ gen_op_hw_ret();
+ }
+ ret = 2;
+ break;
+#endif
+ case 0x1F:
+ /* HW_ST (PALcode) */
+#if defined (CONFIG_USER_ONLY)
+ goto invalid_opc;
+#else
+ if (!ctx->pal_mode)
+ goto invalid_opc;
+ gen_load_ir(ctx, rb, 0);
+ gen_set_sT1(ctx, disp12);
+ gen_op_addq();
+ gen_load_ir(ctx, ra, 1);
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access */
+ gen_op_stl_raw();
+ break;
+ case 0x1:
+ /* Quadword physical access */
+ gen_op_stq_raw();
+ break;
+ case 0x2:
+ /* Longword physical access with lock */
+ gen_op_stl_c_raw();
+ break;
+ case 0x3:
+ /* Quadword physical access with lock */
+ gen_op_stq_c_raw();
+ break;
+ case 0x4:
+ /* Longword virtual access */
+ gen_op_st_phys_to_virt();
+ gen_op_stl_raw();
+ break;
+ case 0x5:
+ /* Quadword virtual access */
+ gen_op_st_phys_to_virt();
+ gen_op_stq_raw();
+ break;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x8:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x9:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xA:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xB:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xC:
+ /* Longword virtual access with alternate access mode */
+ gen_op_set_alt_mode();
+ gen_op_st_phys_to_virt();
+ gen_op_ldl_raw();
+ gen_op_restore_mode();
+ break;
+ case 0xD:
+ /* Quadword virtual access with alternate access mode */
+ gen_op_set_alt_mode();
+ gen_op_st_phys_to_virt();
+ gen_op_ldq_raw();
+ gen_op_restore_mode();
+ break;
+ case 0xE:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xF:
+ /* Invalid */
+ goto invalid_opc;
+ }
+ ret = 2;
+ break;
+#endif
+ case 0x20:
+ /* LDF */
+#if 0 // TODO
+ gen_load_fmem(ctx, &gen_ldf, ra, rb, disp16);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x21:
+ /* LDG */
+#if 0 // TODO
+ gen_load_fmem(ctx, &gen_ldg, ra, rb, disp16);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x22:
+ /* LDS */
+ gen_load_fmem(ctx, &gen_lds, ra, rb, disp16);
+ break;
+ case 0x23:
+ /* LDT */
+ gen_load_fmem(ctx, &gen_ldt, ra, rb, disp16);
+ break;
+ case 0x24:
+ /* STF */
+#if 0 // TODO
+ gen_store_fmem(ctx, &gen_stf, ra, rb, disp16);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x25:
+ /* STG */
+#if 0 // TODO
+ gen_store_fmem(ctx, &gen_stg, ra, rb, disp16);
+#else
+ goto invalid_opc;
+#endif
+ break;
+ case 0x26:
+ /* STS */
+ gen_store_fmem(ctx, &gen_sts, ra, rb, disp16);
+ break;
+ case 0x27:
+ /* STT */
+ gen_store_fmem(ctx, &gen_stt, ra, rb, disp16);
+ break;
+ case 0x28:
+ /* LDL */
+ gen_load_mem(ctx, &gen_ldl, ra, rb, disp16, 0);
+ break;
+ case 0x29:
+ /* LDQ */
+ gen_load_mem(ctx, &gen_ldq, ra, rb, disp16, 0);
+ break;
+ case 0x2A:
+ /* LDL_L */
+ gen_load_mem(ctx, &gen_ldl_l, ra, rb, disp16, 0);
+ break;
+ case 0x2B:
+ /* LDQ_L */
+ gen_load_mem(ctx, &gen_ldq_l, ra, rb, disp16, 0);
+ break;
+ case 0x2C:
+ /* STL */
+ gen_store_mem(ctx, &gen_stl, ra, rb, disp16, 0);
+ break;
+ case 0x2D:
+ /* STQ */
+ gen_store_mem(ctx, &gen_stq, ra, rb, disp16, 0);
+ break;
+ case 0x2E:
+ /* STL_C */
+ gen_store_mem(ctx, &gen_stl_c, ra, rb, disp16, 0);
+ break;
+ case 0x2F:
+ /* STQ_C */
+ gen_store_mem(ctx, &gen_stq_c, ra, rb, disp16, 0);
+ break;
+ case 0x30:
+ /* BR */
+ gen_set_uT0(ctx, ctx->pc);
+ gen_store_ir(ctx, ra, 0);
+ if (disp21 != 0) {
+ gen_set_sT1(ctx, disp21 << 2);
+ gen_op_addq();
+ }
+ gen_op_branch();
+ ret = 1;
+ break;
+ case 0x31:
+ /* FBEQ */
+ gen_fbcond(ctx, &gen_op_cmpfeq, ra, disp16);
+ ret = 1;
+ break;
+ case 0x32:
+ /* FBLT */
+ gen_fbcond(ctx, &gen_op_cmpflt, ra, disp16);
+ ret = 1;
+ break;
+ case 0x33:
+ /* FBLE */
+ gen_fbcond(ctx, &gen_op_cmpfle, ra, disp16);
+ ret = 1;
+ break;
+ case 0x34:
+ /* BSR */
+ gen_set_uT0(ctx, ctx->pc);
+ gen_store_ir(ctx, ra, 0);
+ if (disp21 != 0) {
+ gen_set_sT1(ctx, disp21 << 2);
+ gen_op_addq();
+ }
+ gen_op_branch();
+ ret = 1;
+ break;
+ case 0x35:
+ /* FBNE */
+ gen_fbcond(ctx, &gen_op_cmpfne, ra, disp16);
+ ret = 1;
+ break;
+ case 0x36:
+ /* FBGE */
+ gen_fbcond(ctx, &gen_op_cmpfge, ra, disp16);
+ ret = 1;
+ break;
+ case 0x37:
+ /* FBGT */
+ gen_fbcond(ctx, &gen_op_cmpfgt, ra, disp16);
+ ret = 1;
+ break;
+ case 0x38:
+ /* BLBC */
+ gen_bcond(ctx, &gen_op_cmplbc, ra, disp16);
+ ret = 1;
+ break;
+ case 0x39:
+ /* BEQ */
+ gen_bcond(ctx, &gen_op_cmpeqz, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3A:
+ /* BLT */
+ gen_bcond(ctx, &gen_op_cmpltz, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3B:
+ /* BLE */
+ gen_bcond(ctx, &gen_op_cmplez, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3C:
+ /* BLBS */
+ gen_bcond(ctx, &gen_op_cmplbs, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3D:
+ /* BNE */
+ gen_bcond(ctx, &gen_op_cmpnez, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3E:
+ /* BGE */
+ gen_bcond(ctx, &gen_op_cmpgez, ra, disp16);
+ ret = 1;
+ break;
+ case 0x3F:
+ /* BGT */
+ gen_bcond(ctx, &gen_op_cmpgtz, ra, disp16);
+ ret = 1;
+ break;
+ invalid_opc:
+ gen_invalid(ctx);
+ ret = 3;
+ break;
+ }
+
+ return ret;
+}
+
+int gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
+ int search_pc)
+{
+#if defined ALPHA_DEBUG_DISAS
+ static int insn_count;
+#endif
+ DisasContext ctx, *ctxp = &ctx;
+ target_ulong pc_start;
+ uint32_t insn;
+ uint16_t *gen_opc_end;
+ int j, lj = -1;
+ int ret;
+
+ pc_start = tb->pc;
+ gen_opc_ptr = gen_opc_buf;
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opparam_ptr = gen_opparam_buf;
+ nb_gen_labels = 0;
+ ctx.pc = pc_start;
+ ctx.amask = env->amask;
+#if defined (CONFIG_USER_ONLY)
+ ctx.mem_idx = 0;
+#else
+ ctx.mem_idx = ((env->ps >> 3) & 3);
+ ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
+#endif
+ for (ret = 0; ret == 0;) {
+ if (env->nb_breakpoints > 0) {
+ for(j = 0; j < env->nb_breakpoints; j++) {
+ if (env->breakpoints[j] == ctx.pc) {
+ gen_excp(&ctx, EXCP_DEBUG, 0);
+ break;
+ }
+ }
+ }
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j)
+ gen_opc_instr_start[lj++] = 0;
+ gen_opc_pc[lj] = ctx.pc;
+ gen_opc_instr_start[lj] = 1;
+ }
+ }
+#if defined ALPHA_DEBUG_DISAS
+ insn_count++;
+ if (logfile != NULL) {
+ fprintf(logfile, "pc %016lx mem_idx\n", ctx.pc, ctx.mem_idx);
+ }
+#endif
+ insn = ldl_code(ctx.pc);
+#if defined ALPHA_DEBUG_DISAS
+ insn_count++;
+ if (logfile != NULL) {
+ fprintf(logfile, "opcode %08x %d\n", insn, insn_count);
+ }
+#endif
+ ctx.pc += 4;
+ ret = translate_one(ctxp, insn);
+ if (ret != 0)
+ break;
+ /* if we reach a page boundary or are single stepping, stop
+ * generation
+ */
+ if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) ||
+ (env->singlestep_enabled)) {
+ break;
+ }
+#if defined (DO_SINGLE_STEP)
+ break;
+#endif
+ }
+ if (ret != 1 && ret != 3) {
+ gen_update_pc(&ctx);
+ }
+ gen_op_reset_T0();
+#if defined (DO_TB_FLUSH)
+ gen_op_tb_flush();
+#endif
+ /* Generate the return instruction */
+ gen_op_exit_tb();
+ *gen_opc_ptr = INDEX_op_end;
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j)
+ gen_opc_instr_start[lj++] = 0;
+ tb->size = 0;
+ } else {
+ tb->size = ctx.pc - pc_start;
+ }
+#if defined ALPHA_DEBUG_DISAS
+ if (loglevel & CPU_LOG_TB_CPU) {
+ cpu_dump_state(env, logfile, fprintf, 0);
+ }
+ if (loglevel & CPU_LOG_TB_IN_ASM) {
+ fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
+ target_disas(logfile, pc_start, ctx.pc - pc_start, 1);
+ fprintf(logfile, "\n");
+ }
+ if (loglevel & CPU_LOG_TB_OP) {
+ fprintf(logfile, "OP:\n");
+ dump_ops(gen_opc_buf, gen_opparam_buf);
+ fprintf(logfile, "\n");
+ }
+#endif
+
+ return 0;
+}
+
+int gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 0);
+}
+
+int gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 1);
+}
+
+CPUAlphaState * cpu_alpha_init (void)
+{
+ CPUAlphaState *env;
+ uint64_t hwpcb;
+
+ env = qemu_mallocz(sizeof(CPUAlphaState));
+ if (!env)
+ return NULL;
+ cpu_exec_init(env);
+ tlb_flush(env, 1);
+ /* XXX: should not be hardcoded */
+ env->implver = IMPLVER_2106x;
+ env->ps = 0x1F00;
+#if defined (CONFIG_USER_ONLY)
+ env->ps |= 1 << 3;
+#endif
+ pal_init(env);
+ /* Initialize IPR */
+ hwpcb = env->ipr[IPR_PCBB];
+ env->ipr[IPR_ASN] = 0;
+ env->ipr[IPR_ASTEN] = 0;
+ env->ipr[IPR_ASTSR] = 0;
+ env->ipr[IPR_DATFX] = 0;
+ /* XXX: fix this */
+ // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
+ // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
+ // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
+ // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
+ env->ipr[IPR_FEN] = 0;
+ env->ipr[IPR_IPL] = 31;
+ env->ipr[IPR_MCES] = 0;
+ env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
+ // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
+ env->ipr[IPR_SISR] = 0;
+ env->ipr[IPR_VIRBND] = -1ULL;
+
+ return env;
+}