aboutsummaryrefslogtreecommitdiff
path: root/target/hppa
diff options
context:
space:
mode:
Diffstat (limited to 'target/hppa')
-rw-r--r--target/hppa/cpu-param.h22
-rw-r--r--target/hppa/cpu-qom.h1
-rw-r--r--target/hppa/cpu.c70
-rw-r--r--target/hppa/cpu.h180
-rw-r--r--target/hppa/gdbstub.c32
-rw-r--r--target/hppa/helper.c101
-rw-r--r--target/hppa/helper.h55
-rw-r--r--target/hppa/insns.decode197
-rw-r--r--target/hppa/int_helper.c77
-rw-r--r--target/hppa/machine.c194
-rw-r--r--target/hppa/mem_helper.c434
-rw-r--r--target/hppa/op_helper.c310
-rw-r--r--target/hppa/sys_helper.c14
-rw-r--r--target/hppa/trace-events1
-rw-r--r--target/hppa/translate.c2139
15 files changed, 2456 insertions, 1371 deletions
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
index c2791ae5f2..6746869a3b 100644
--- a/target/hppa/cpu-param.h
+++ b/target/hppa/cpu-param.h
@@ -8,26 +8,16 @@
#ifndef HPPA_CPU_PARAM_H
#define HPPA_CPU_PARAM_H
-#ifdef TARGET_HPPA64
-# define TARGET_LONG_BITS 64
-# define TARGET_REGISTER_BITS 64
-# define TARGET_VIRT_ADDR_SPACE_BITS 64
-# define TARGET_PHYS_ADDR_SPACE_BITS 64
-#elif defined(CONFIG_USER_ONLY)
-# define TARGET_LONG_BITS 32
-# define TARGET_REGISTER_BITS 32
-# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_LONG_BITS 64
+
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_ABI32)
# define TARGET_PHYS_ADDR_SPACE_BITS 32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
#else
-/*
- * In order to form the GVA from space:offset,
- * we need a 64-bit virtual address space.
- */
-# define TARGET_LONG_BITS 64
-# define TARGET_REGISTER_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 64
# define TARGET_VIRT_ADDR_SPACE_BITS 64
-# define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
+
#define TARGET_PAGE_BITS 12
#endif
diff --git a/target/hppa/cpu-qom.h b/target/hppa/cpu-qom.h
index b96e0318c7..4a85ebf5e0 100644
--- a/target/hppa/cpu-qom.h
+++ b/target/hppa/cpu-qom.h
@@ -24,6 +24,7 @@
#include "qom/object.h"
#define TYPE_HPPA_CPU "hppa-cpu"
+#define TYPE_HPPA64_CPU "hppa64-cpu"
OBJECT_DECLARE_CPU_TYPE(HPPACPU, HPPACPUClass, HPPA_CPU)
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 1644297bf8..04de1689d7 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -77,9 +77,10 @@ static void hppa_restore_state_to_opc(CPUState *cs,
HPPACPU *cpu = HPPA_CPU(cs);
cpu->env.iaoq_f = data[0];
- if (data[1] != (target_ureg)-1) {
+ if (data[1] != (target_ulong)-1) {
cpu->env.iaoq_b = data[1];
}
+ cpu->env.unwind_breg = data[2];
/*
* Since we were executing the instruction at IAOQ_F, and took some
* sort of action that provoked the cpu_restore_state, we can infer
@@ -137,8 +138,10 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
{
HPPACPU *cpu = HPPA_CPU(cs);
+
cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
hppa_cpu_alarm_timer, cpu);
+ hppa_ptlbe(&cpu->env);
}
#endif
}
@@ -156,7 +159,39 @@ static void hppa_cpu_initfn(Object *obj)
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
{
- return object_class_by_name(TYPE_HPPA_CPU);
+ g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL);
+ ObjectClass *oc = object_class_by_name(typename);
+
+ if (oc &&
+ !object_class_is_abstract(oc) &&
+ object_class_dynamic_cast(oc, TYPE_HPPA_CPU)) {
+ return oc;
+ }
+ return NULL;
+}
+
+static void hppa_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUClass *cc = CPU_CLASS(oc);
+ const char *tname = object_class_get_name(oc);
+ g_autofree char *name = g_strndup(tname, strchr(tname, '-') - tname);
+
+ if (cc->deprecation_note) {
+ qemu_printf(" %s (deprecated)\n", name);
+ } else {
+ qemu_printf(" %s\n", name);
+ }
+}
+
+void hppa_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list_sorted(TYPE_HPPA_CPU, false);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, hppa_cpu_list_entry, NULL);
+ g_slist_free(list);
}
#ifndef CONFIG_USER_ONLY
@@ -207,20 +242,21 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->tcg_ops = &hppa_tcg_ops;
}
-static const TypeInfo hppa_cpu_type_info = {
- .name = TYPE_HPPA_CPU,
- .parent = TYPE_CPU,
- .instance_size = sizeof(HPPACPU),
- .instance_align = __alignof(HPPACPU),
- .instance_init = hppa_cpu_initfn,
- .abstract = false,
- .class_size = sizeof(HPPACPUClass),
- .class_init = hppa_cpu_class_init,
+static const TypeInfo hppa_cpu_type_infos[] = {
+ {
+ .name = TYPE_HPPA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(HPPACPU),
+ .instance_align = __alignof(HPPACPU),
+ .instance_init = hppa_cpu_initfn,
+ .abstract = false,
+ .class_size = sizeof(HPPACPUClass),
+ .class_init = hppa_cpu_class_init,
+ },
+ {
+ .name = TYPE_HPPA64_CPU,
+ .parent = TYPE_HPPA_CPU,
+ },
};
-static void hppa_cpu_register_types(void)
-{
- type_register_static(&hppa_cpu_type_info);
-}
-
-type_init(hppa_cpu_register_types)
+DEFINE_TYPES(hppa_cpu_type_infos)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 798d0c26d7..144794d089 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -23,6 +23,7 @@
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+#include "qemu/interval-tree.h"
/* PA-RISC 1.x processors have a strong memory model. */
/* ??? While we do not yet implement PA-RISC 2.0, those processors have
@@ -30,21 +31,33 @@
basis. It's probably easier to fall back to a strong memory model. */
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
-#define MMU_KERNEL_IDX 11
-#define MMU_PL1_IDX 12
-#define MMU_PL2_IDX 13
-#define MMU_USER_IDX 14
-#define MMU_PHYS_IDX 15
+#define MMU_KERNEL_IDX 7
+#define MMU_KERNEL_P_IDX 8
+#define MMU_PL1_IDX 9
+#define MMU_PL1_P_IDX 10
+#define MMU_PL2_IDX 11
+#define MMU_PL2_P_IDX 12
+#define MMU_USER_IDX 13
+#define MMU_USER_P_IDX 14
+#define MMU_PHYS_IDX 15
-#define PRIV_TO_MMU_IDX(priv) (MMU_KERNEL_IDX + (priv))
-#define MMU_IDX_TO_PRIV(mmu_idx) ((mmu_idx) - MMU_KERNEL_IDX)
+#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
+#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
+#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
-#define TARGET_INSN_START_EXTRA_WORDS 1
+#define TARGET_INSN_START_EXTRA_WORDS 2
/* No need to flush MMU_PHYS_IDX */
#define HPPA_MMU_FLUSH_MASK \
- (1 << MMU_KERNEL_IDX | 1 << MMU_PL1_IDX | \
- 1 << MMU_PL2_IDX | 1 << MMU_USER_IDX)
+ (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
+ 1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
+ 1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \
+ 1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX)
+
+/* Indicies to flush for access_id changes. */
+#define HPPA_MMU_FLUSH_P_MASK \
+ (1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \
+ 1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX)
/* Hardware exceptions, interrupts, faults, and traps. */
#define EXCP_HPMC 1 /* high priority machine check */
@@ -107,11 +120,7 @@
#define PSW_T 0x01000000
#define PSW_S 0x02000000
#define PSW_E 0x04000000
-#ifdef TARGET_HPPA64
#define PSW_W 0x08000000 /* PA2.0 only */
-#else
-#define PSW_W 0
-#endif
#define PSW_Z 0x40000000 /* PA1.x only */
#define PSW_Y 0x80000000 /* PA1.x only */
@@ -124,15 +133,12 @@
#define PSW_SM_P PSW_P
#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
-#ifdef TARGET_HPPA64
#define PSW_SM_E 0x100
#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
-#else
-#define PSW_SM_E 0
-#define PSW_SM_W 0
-#endif
#define CR_RC 0
+#define CR_PSW_DEFAULT 6 /* see SeaBIOS PDC_PSW firmware call */
+#define PDC_PSW_WIDE_BIT 2
#define CR_PID1 8
#define CR_PID2 9
#define CR_PID3 12
@@ -150,45 +156,37 @@
#define CR_IPSW 22
#define CR_EIRR 23
-#if TARGET_REGISTER_BITS == 32
-typedef uint32_t target_ureg;
-typedef int32_t target_sreg;
-#define TREG_FMT_lx "%08"PRIx32
-#define TREG_FMT_ld "%"PRId32
-#else
-typedef uint64_t target_ureg;
-typedef int64_t target_sreg;
-#define TREG_FMT_lx "%016"PRIx64
-#define TREG_FMT_ld "%"PRId64
-#endif
+typedef struct HPPATLBEntry {
+ union {
+ IntervalTreeNode itree;
+ struct HPPATLBEntry *unused_next;
+ };
+
+ target_ulong pa;
+
+ unsigned entry_valid : 1;
-typedef struct {
- uint64_t va_b;
- uint64_t va_e;
- target_ureg pa;
unsigned u : 1;
unsigned t : 1;
unsigned d : 1;
unsigned b : 1;
- unsigned page_size : 4;
unsigned ar_type : 3;
unsigned ar_pl1 : 2;
unsigned ar_pl2 : 2;
- unsigned entry_valid : 1;
unsigned access_id : 16;
-} hppa_tlb_entry;
+} HPPATLBEntry;
typedef struct CPUArchState {
- target_ureg iaoq_f; /* front */
- target_ureg iaoq_b; /* back, aka next instruction */
+ target_ulong iaoq_f; /* front */
+ target_ulong iaoq_b; /* back, aka next instruction */
- target_ureg gr[32];
+ target_ulong gr[32];
uint64_t fr[32];
uint64_t sr[8]; /* stored shifted into place for gva */
- target_ureg psw; /* All psw bits except the following: */
- target_ureg psw_n; /* boolean */
- target_sreg psw_v; /* in most significant bit */
+ target_ulong psw; /* All psw bits except the following: */
+ target_ulong psw_n; /* boolean */
+ target_long psw_v; /* in most significant bit */
/* Splitting the carry-borrow field into the MSB and "the rest", allows
* for "the rest" to be deleted when it is unused, but the MSB is in use.
@@ -197,8 +195,8 @@ typedef struct CPUArchState {
* host has the appropriate add-with-carry insn to compute the msb).
* Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
*/
- target_ureg psw_cb; /* in least significant bit of next nibble */
- target_ureg psw_cb_msb; /* boolean */
+ target_ulong psw_cb; /* in least significant bit of next nibble */
+ target_ulong psw_cb_msb; /* boolean */
uint64_t iasq_f;
uint64_t iasq_b;
@@ -206,24 +204,40 @@ typedef struct CPUArchState {
uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
float_status fp_status;
- target_ureg cr[32]; /* control registers */
- target_ureg cr_back[2]; /* back of cr17/cr18 */
- target_ureg shadow[7]; /* shadow registers */
+ target_ulong cr[32]; /* control registers */
+ target_ulong cr_back[2]; /* back of cr17/cr18 */
+ target_ulong shadow[7]; /* shadow registers */
- /* ??? The number of entries isn't specified by the architecture. */
-#ifdef TARGET_HPPA64
-#define HPPA_BTLB_FIXED 0 /* BTLBs are not supported in 64-bit machines */
-#else
-#define HPPA_BTLB_FIXED 16
-#endif
-#define HPPA_BTLB_VARIABLE 0
+ /*
+ * During unwind of a memory insn, the base register of the address.
+ * This is used to construct CR_IOR for pa2.0.
+ */
+ uint32_t unwind_breg;
+
+ /*
+ * ??? The number of entries isn't specified by the architecture.
+ * BTLBs are not supported in 64-bit machines.
+ */
+#define PA10_BTLB_FIXED 16
+#define PA10_BTLB_VARIABLE 0
#define HPPA_TLB_ENTRIES 256
-#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
- /* ??? Implement a unified itlb/dtlb for the moment. */
- /* ??? We should use a more intelligent data structure. */
- hppa_tlb_entry tlb[HPPA_TLB_ENTRIES];
+ /* Index for round-robin tlb eviction. */
uint32_t tlb_last;
+
+ /*
+ * For pa1.x, the partial initialized, still invalid tlb entry
+ * which has had ITLBA performed, but not yet ITLBP.
+ */
+ HPPATLBEntry *tlb_partial;
+
+ /* Linked list of all invalid (unused) tlb entries. */
+ HPPATLBEntry *tlb_unused;
+
+ /* Root of the search tree for all valid tlb entries. */
+ IntervalTreeRoot tlb_root;
+
+ HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
} CPUHPPAState;
/**
@@ -243,13 +257,23 @@ struct ArchCPU {
#include "exec/cpu-all.h"
+static inline bool hppa_is_pa20(CPUHPPAState *env)
+{
+ return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL;
+}
+
+static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
+{
+ return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE;
+}
+
static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
{
#ifdef CONFIG_USER_ONLY
return MMU_USER_IDX;
#else
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
- return PRIV_TO_MMU_IDX(env->iaoq_f & 3);
+ return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
}
return MMU_PHYS_IDX; /* mmu disabled */
#endif
@@ -259,23 +283,26 @@ void hppa_translate_init(void);
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
- target_ureg off)
+static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc,
+ target_ulong off)
{
#ifdef CONFIG_USER_ONLY
return off;
#else
- off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull);
+ off &= psw & PSW_W ? MAKE_64BIT_MASK(0, 62) : MAKE_64BIT_MASK(0, 32);
return spc | off;
#endif
}
static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
- target_ureg off)
+ target_ulong off)
{
return hppa_form_gva_psw(env->psw, spc, off);
}
+hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr);
+hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
+
/*
* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
* TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
@@ -299,13 +326,12 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
*cs_base = env->iaoq_b & -4;
flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
#else
- /* ??? E, T, H, L, B, P bits need to be here, when implemented. */
- flags |= env->psw & (PSW_W | PSW_C | PSW_D);
+ /* ??? E, T, H, L, B bits need to be here, when implemented. */
+ flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
- *pc = (env->psw & PSW_C
- ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4)
- : env->iaoq_f & -4);
+ *pc = hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
+ env->iaoq_f & -4);
*cs_base = env->iasq_f;
/* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
@@ -313,8 +339,8 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
which is the primary case we care about -- using goto_tb within a page.
Failure is indicated by a zero difference. */
if (env->iasq_f == env->iasq_b) {
- target_sreg diff = env->iaoq_b - env->iaoq_f;
- if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
+ target_long diff = env->iaoq_b - env->iaoq_f;
+ if (diff == (int32_t)diff) {
*cs_base |= (uint32_t)diff;
}
}
@@ -328,8 +354,8 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
*pflags = flags;
}
-target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
-void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
+target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
void cpu_hppa_loaded_fr0(CPUHPPAState *env);
#ifdef CONFIG_USER_ONLY
@@ -342,6 +368,7 @@ int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
#ifndef CONFIG_USER_ONLY
+void hppa_ptlbe(CPUHPPAState *env);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
@@ -350,7 +377,7 @@ void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot,
- hppa_tlb_entry **tlb_entry);
+ HPPATLBEntry **tlb_entry);
extern const MemoryRegionOps hppa_io_eir_ops;
extern const VMStateDescription vmstate_hppa_cpu;
void hppa_cpu_alarm_timer(void *);
@@ -358,4 +385,9 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
#endif
G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
+#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
+
+#define cpu_list hppa_cpu_list
+void hppa_cpu_list(void);
+
#endif /* HPPA_CPU_H */
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
index 48a514384f..4a965b38d7 100644
--- a/target/hppa/gdbstub.c
+++ b/target/hppa/gdbstub.c
@@ -21,11 +21,16 @@
#include "cpu.h"
#include "gdbstub/helpers.h"
+/*
+ * GDB 15 only supports PA1.0 via the remote protocol, and ignores
+ * any provided xml. Which means that any attempt to provide more
+ * data results in "Remote 'g' packet reply is too long".
+ */
+
int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
- target_ureg val;
+ CPUHPPAState *env = cpu_env(cs);
+ uint32_t val;
switch (n) {
case 0:
@@ -139,24 +144,13 @@ int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
break;
}
- if (TARGET_REGISTER_BITS == 64) {
- return gdb_get_reg64(mem_buf, val);
- } else {
- return gdb_get_reg32(mem_buf, val);
- }
+ return gdb_get_reg32(mem_buf, val);
}
int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
- target_ureg val;
-
- if (TARGET_REGISTER_BITS == 64) {
- val = ldq_p(mem_buf);
- } else {
- val = ldl_p(mem_buf);
- }
+ CPUHPPAState *env = cpu_env(cs);
+ uint32_t val = ldl_p(mem_buf);
switch (n) {
case 0:
@@ -166,7 +160,7 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->gr[n] = val;
break;
case 32:
- env->cr[CR_SAR] = val;
+ env->cr[CR_SAR] = val & (hppa_is_pa20(env) ? 63 : 31);
break;
case 33:
env->iaoq_f = val;
@@ -278,5 +272,5 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
break;
}
- return sizeof(target_ureg);
+ return 4;
}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index a8d3f456ee..859644c47a 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -25,22 +25,32 @@
#include "exec/helper-proto.h"
#include "qemu/qemu-print.h"
-target_ureg cpu_hppa_get_psw(CPUHPPAState *env)
+target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
{
- target_ureg psw;
+ target_ulong psw;
+ target_ulong mask1 = (target_ulong)-1 / 0xf;
+ target_ulong maskf = (target_ulong)-1 / 0xffff * 0xf;
/* Fold carry bits down to 8 consecutive bits. */
- /* ??? Needs tweaking for hppa64. */
- /* .......b...c...d...e...f...g...h */
- psw = (env->psw_cb >> 4) & 0x01111111;
- /* .......b..bc..cd..de..ef..fg..gh */
+ /* ^^^b^^^c^^^d^^^e^^^f^^^g^^^h^^^i^^^j^^^k^^^l^^^m^^^n^^^o^^^p^^^^ */
+ psw = (env->psw_cb >> 4) & mask1;
+ /* .......b...c...d...e...f...g...h...i...j...k...l...m...n...o...p */
psw |= psw >> 3;
- /* .............bcd............efgh */
- psw |= (psw >> 6) & 0x000f000f;
- /* .........................bcdefgh */
- psw |= (psw >> 12) & 0xf;
- psw |= env->psw_cb_msb << 7;
- psw = (psw & 0xff) << 8;
+ /* .......b..bc..cd..de..ef..fg..gh..hi..ij..jk..kl..lm..mn..no..op */
+ psw |= psw >> 6;
+ psw &= maskf;
+ /* .............bcd............efgh............ijkl............mnop */
+ psw |= psw >> 12;
+ /* .............bcd.........bcdefgh........efghijkl........ijklmnop */
+ psw |= env->psw_cb_msb << 39;
+ /* .............bcd........abcdefgh........efghijkl........ijklmnop */
+
+ /* For hppa64, the two 8-bit fields are discontiguous. */
+ if (hppa_is_pa20(env)) {
+ psw = (psw & 0xff00000000ull) | ((psw & 0xff) << 8);
+ } else {
+ psw = (psw & 0xff) << 8;
+ }
psw |= env->psw_n * PSW_N;
psw |= (env->psw_v < 0) * PSW_V;
@@ -49,16 +59,36 @@ target_ureg cpu_hppa_get_psw(CPUHPPAState *env)
return psw;
}
-void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
{
- target_ureg old_psw = env->psw;
- target_ureg cb = 0;
+ uint64_t reserved;
+ target_ulong cb = 0;
+
+ /* Do not allow reserved bits to be set. */
+ if (hppa_is_pa20(env)) {
+ reserved = MAKE_64BIT_MASK(40, 24) | MAKE_64BIT_MASK(28, 4);
+ reserved |= PSW_G; /* PA1.x only */
+ reserved |= PSW_E; /* not implemented */
+ } else {
+ reserved = MAKE_64BIT_MASK(32, 32) | MAKE_64BIT_MASK(28, 2);
+ reserved |= PSW_O | PSW_W; /* PA2.0 only */
+ reserved |= PSW_E | PSW_Y | PSW_Z; /* not implemented */
+ }
+ psw &= ~reserved;
env->psw = psw & ~(PSW_N | PSW_V | PSW_CB);
env->psw_n = (psw / PSW_N) & 1;
env->psw_v = -((psw / PSW_V) & 1);
- env->psw_cb_msb = (psw >> 15) & 1;
+ env->psw_cb_msb = (psw >> 39) & 1;
+ cb |= ((psw >> 38) & 1) << 60;
+ cb |= ((psw >> 37) & 1) << 56;
+ cb |= ((psw >> 36) & 1) << 52;
+ cb |= ((psw >> 35) & 1) << 48;
+ cb |= ((psw >> 34) & 1) << 44;
+ cb |= ((psw >> 33) & 1) << 40;
+ cb |= ((psw >> 32) & 1) << 36;
+ cb |= ((psw >> 15) & 1) << 32;
cb |= ((psw >> 14) & 1) << 28;
cb |= ((psw >> 13) & 1) << 24;
cb |= ((psw >> 12) & 1) << 20;
@@ -67,29 +97,30 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
cb |= ((psw >> 9) & 1) << 8;
cb |= ((psw >> 8) & 1) << 4;
env->psw_cb = cb;
-
- /* If PSW_P changes, it affects how we translate addresses. */
- if ((psw ^ old_psw) & PSW_P) {
-#ifndef CONFIG_USER_ONLY
- tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
-#endif
- }
}
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
- target_ureg psw = cpu_hppa_get_psw(env);
- target_ureg psw_cb;
+ CPUHPPAState *env = cpu_env(cs);
+ target_ulong psw = cpu_hppa_get_psw(env);
+ target_ulong psw_cb;
char psw_c[20];
- int i;
+ int i, w;
+ uint64_t m;
+
+ if (hppa_is_pa20(env)) {
+ w = 16;
+ m = UINT64_MAX;
+ } else {
+ w = 8;
+ m = UINT32_MAX;
+ }
qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx
- " IIR " TREG_FMT_lx "\n",
+ " IIR %0*" PRIx64 "\n",
hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f),
hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b),
- env->cr[CR_IIR]);
+ w, m & env->cr[CR_IIR]);
psw_c[0] = (psw & PSW_W ? 'W' : '-');
psw_c[1] = (psw & PSW_E ? 'E' : '-');
@@ -110,13 +141,15 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
psw_c[16] = (psw & PSW_D ? 'D' : '-');
psw_c[17] = (psw & PSW_I ? 'I' : '-');
psw_c[18] = '\0';
- psw_cb = ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28);
+ psw_cb = ((env->psw_cb >> 4) & 0x1111111111111111ull)
+ | (env->psw_cb_msb << 60);
- qemu_fprintf(f, "PSW " TREG_FMT_lx " CB " TREG_FMT_lx " %s\n",
- psw, psw_cb, psw_c);
+ qemu_fprintf(f, "PSW %0*" PRIx64 " CB %0*" PRIx64 " %s\n",
+ w, m & psw, w, m & psw_cb, psw_c);
for (i = 0; i < 32; i++) {
- qemu_fprintf(f, "GR%02d " TREG_FMT_lx "%c", i, env->gr[i],
+ qemu_fprintf(f, "GR%02d %0*" PRIx64 "%c",
+ i, w, m & env->gr[i],
(i & 3) == 3 ? '\n' : ' ');
}
#ifndef CONFIG_USER_ONLY
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index 647f043c85..20698f68ed 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -1,24 +1,28 @@
-#if TARGET_REGISTER_BITS == 64
-# define dh_alias_tr i64
-# define dh_typecode_tr dh_typecode_i64
-#else
-# define dh_alias_tr i32
-# define dh_typecode_tr dh_typecode_i32
-#endif
-#define dh_ctype_tr target_ureg
-
DEF_HELPER_2(excp, noreturn, env, int)
-DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tr)
-DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tr)
+DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
-DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tr)
-DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
-DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tr)
-DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
+
+DEF_HELPER_FLAGS_3(stdby_b, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stdby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stdby_e, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stdby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_1(ldc_check, TCG_CALL_NO_RWG, void, tl)
-DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tr, env, tl, i32, i32)
+DEF_HELPER_FLAGS_2(hadd_ss, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(hadd_us, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(havg, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_3(hshladd, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_3(hshradd, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_2(hsub_ss, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(hsub_us, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tl, env, tl, i32, i32)
DEF_HELPER_FLAGS_1(loaded_fr0, TCG_CALL_NO_RWG, void, env)
@@ -77,7 +81,7 @@ DEF_HELPER_FLAGS_4(fmpynfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(fmpyfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmpynfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
-DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tr)
+DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(halt, noreturn, env)
@@ -85,15 +89,18 @@ DEF_HELPER_1(reset, noreturn, env)
DEF_HELPER_1(getshadowregs, void, env)
DEF_HELPER_1(rfi, void, env)
DEF_HELPER_1(rfi_r, void, env)
-DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tr)
-DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tr)
-DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tr)
-DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tr, env, tr)
-DEF_HELPER_FLAGS_3(itlba, TCG_CALL_NO_RWG, void, env, tl, tr)
-DEF_HELPER_FLAGS_3(itlbp, TCG_CALL_NO_RWG, void, env, tl, tr)
+DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tl, env, tl)
+DEF_HELPER_FLAGS_3(itlba_pa11, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(itlbp_pa11, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(idtlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(iitlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_2(ptlb, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(ptlb_l, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tr, env, tl)
+DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl)
DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_1(diag_btlb, void, env)
#endif
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
index aebe03ccfd..f5a3f02fd1 100644
--- a/target/hppa/insns.decode
+++ b/target/hppa/insns.decode
@@ -46,11 +46,16 @@
%im5_0 0:s1 1:4
%im5_16 16:s1 17:4
+%len5 0:5 !function=assemble_6
+%len6_8 8:1 0:5 !function=assemble_6
+%len6_12 12:1 0:5 !function=assemble_6
+%cpos6_11 11:1 5:5
%ma_to_m 5:1 13:1 !function=ma_to_m
%ma2_to_m 2:2 !function=ma_to_m
%pos_to_m 0:1 !function=pos_to_m
%neg_to_m 0:1 !function=neg_to_m
%a_to_m 2:1 !function=neg_to_m
+%cmpbid_c 13:2 !function=cmpbid_c
####
# Argument set definitions
@@ -59,28 +64,43 @@
# All insns that need to form a virtual address should use this set.
&ldst t b x disp sp m scale size
-&rr_cf t r cf
+&rr_cf_d t r cf d
+&rrr t r1 r2
&rrr_cf t r1 r2 cf
-&rrr_cf_sh t r1 r2 cf sh
+&rrr_cf_d t r1 r2 cf d
+&rrr_sh t r1 r2 sh
+&rrr_cf_d_sh t r1 r2 cf d sh
+&rri t r i
&rri_cf t r i cf
+&rri_cf_d t r i cf d
&rrb_c_f disp n c f r1 r2
+&rrb_c_d_f disp n c d f r1 r2
&rib_c_f disp n c f r i
+&rib_c_d_f disp n c d f r i
####
# Format definitions
####
-@rr_cf ...... r:5 ..... cf:4 ....... t:5 &rr_cf
+@rr_cf_d ...... r:5 ..... cf:4 ...... d:1 t:5 &rr_cf_d
+@rrr ...... r2:5 r1:5 .... ....... t:5 &rrr
@rrr_cf ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf
-@rrr_cf_sh ...... r2:5 r1:5 cf:4 .... sh:2 . t:5 &rrr_cf_sh
-@rrr_cf_sh0 ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf_sh sh=0
+@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d
+@rrr_sh ...... r2:5 r1:5 ........ sh:2 . t:5 &rrr_sh
+@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 d:1 t:5 &rrr_cf_d_sh
+@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d_sh sh=0
@rri_cf ...... r:5 t:5 cf:4 . ........... &rri_cf i=%lowsign_11
+@rri_cf_d ...... r:5 t:5 cf:4 d:1 ........... &rri_cf_d i=%lowsign_11
@rrb_cf ...... r2:5 r1:5 c:3 ........... n:1 . \
&rrb_c_f disp=%assemble_12
+@rrb_cdf ...... r2:5 r1:5 c:3 ........... n:1 . \
+ &rrb_c_d_f disp=%assemble_12
@rib_cf ...... r:5 ..... c:3 ........... n:1 . \
&rib_c_f disp=%assemble_12 i=%im5_16
+@rib_cdf ...... r:5 ..... c:3 ........... n:1 . \
+ &rib_c_d_f disp=%assemble_12 i=%im5_16
####
# System
@@ -130,6 +150,7 @@ nop_addrx 000001 ..... ..... -- 01001110 . 00000 @addrx # pdc
probe 000001 b:5 ri:5 sp:2 imm:1 100011 write:1 0 t:5
+# pa1.x tlb insert instructions
ixtlbx 000001 b:5 r:5 sp:2 0100000 addr:1 0 00000 data=1
ixtlbx 000001 b:5 r:5 ... 000000 addr:1 0 00000 \
sp=%assemble_sr3x data=0
@@ -137,9 +158,26 @@ ixtlbx 000001 b:5 r:5 ... 000000 addr:1 0 00000 \
# pcxl and pcxl2 Fast TLB Insert instructions
ixtlbxf 000001 00000 r:5 00 0 data:1 01000 addr:1 0 00000
-pxtlbx 000001 b:5 x:5 sp:2 0100100 local:1 m:1 ----- data=1
-pxtlbx 000001 b:5 x:5 ... 000100 local:1 m:1 ----- \
- sp=%assemble_sr3x data=0
+# pa2.0 tlb insert idtlbt and iitlbt instructions
+ixtlbt 000001 r2:5 r1:5 000 data:1 100000 0 00000 # idtlbt
+
+# pdtlb, pitlb
+pxtlb 000001 b:5 x:5 sp:2 01001000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlb 000001 b:5 x:5 ... 0001000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
+
+# ... pa20 local
+pxtlb_l 000001 b:5 x:5 sp:2 01011000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlb_l 000001 b:5 x:5 ... 0011000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
+
+# pdtlbe, pitlbe
+pxtlbe 000001 b:5 x:5 sp:2 01001001 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlbe 000001 b:5 x:5 ... 0001001 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
lpa 000001 b:5 x:5 sp:2 01001101 m:1 t:5 \
&ldst disp=0 scale=0 size=0
@@ -150,30 +188,36 @@ lci 000001 ----- ----- -- 01001100 0 t:5
# Arith/Log
####
-andcm 000010 ..... ..... .... 000000 - ..... @rrr_cf
-and 000010 ..... ..... .... 001000 - ..... @rrr_cf
-or 000010 ..... ..... .... 001001 - ..... @rrr_cf
-xor 000010 ..... ..... .... 001010 0 ..... @rrr_cf
-uxor 000010 ..... ..... .... 001110 0 ..... @rrr_cf
+andcm 000010 ..... ..... .... 000000 . ..... @rrr_cf_d
+and 000010 ..... ..... .... 001000 . ..... @rrr_cf_d
+or 000010 ..... ..... .... 001001 . ..... @rrr_cf_d
+xor 000010 ..... ..... .... 001010 . ..... @rrr_cf_d
+uxor 000010 ..... ..... .... 001110 . ..... @rrr_cf_d
ds 000010 ..... ..... .... 010001 0 ..... @rrr_cf
-cmpclr 000010 ..... ..... .... 100010 0 ..... @rrr_cf
-uaddcm 000010 ..... ..... .... 100110 0 ..... @rrr_cf
-uaddcm_tc 000010 ..... ..... .... 100111 0 ..... @rrr_cf
-dcor 000010 ..... 00000 .... 101110 0 ..... @rr_cf
-dcor_i 000010 ..... 00000 .... 101111 0 ..... @rr_cf
-
-add 000010 ..... ..... .... 0110.. - ..... @rrr_cf_sh
-add_l 000010 ..... ..... .... 1010.. 0 ..... @rrr_cf_sh
-add_tsv 000010 ..... ..... .... 1110.. 0 ..... @rrr_cf_sh
-add_c 000010 ..... ..... .... 011100 0 ..... @rrr_cf_sh0
-add_c_tsv 000010 ..... ..... .... 111100 0 ..... @rrr_cf_sh0
-
-sub 000010 ..... ..... .... 010000 - ..... @rrr_cf
-sub_tsv 000010 ..... ..... .... 110000 0 ..... @rrr_cf
-sub_tc 000010 ..... ..... .... 010011 0 ..... @rrr_cf
-sub_tsv_tc 000010 ..... ..... .... 110011 0 ..... @rrr_cf
-sub_b 000010 ..... ..... .... 010100 0 ..... @rrr_cf
-sub_b_tsv 000010 ..... ..... .... 110100 0 ..... @rrr_cf
+cmpclr 000010 ..... ..... .... 100010 . ..... @rrr_cf_d
+uaddcm 000010 ..... ..... .... 100110 . ..... @rrr_cf_d
+uaddcm_tc 000010 ..... ..... .... 100111 . ..... @rrr_cf_d
+dcor 000010 ..... 00000 .... 101110 . ..... @rr_cf_d
+dcor_i 000010 ..... 00000 .... 101111 . ..... @rr_cf_d
+
+add 000010 ..... ..... .... 0110.. . ..... @rrr_cf_d_sh
+add_l 000010 ..... ..... .... 1010.. . ..... @rrr_cf_d_sh
+add_tsv 000010 ..... ..... .... 1110.. . ..... @rrr_cf_d_sh
+{
+ add_c 000010 ..... ..... .... 011100 . ..... @rrr_cf_d_sh0
+ hshladd 000010 ..... ..... 0000 0111.. 0 ..... @rrr_sh
+}
+add_c_tsv 000010 ..... ..... .... 111100 . ..... @rrr_cf_d_sh0
+
+sub 000010 ..... ..... .... 010000 . ..... @rrr_cf_d
+sub_tsv 000010 ..... ..... .... 110000 . ..... @rrr_cf_d
+sub_tc 000010 ..... ..... .... 010011 . ..... @rrr_cf_d
+sub_tsv_tc 000010 ..... ..... .... 110011 . ..... @rrr_cf_d
+{
+ sub_b 000010 ..... ..... .... 010100 . ..... @rrr_cf_d
+ hshradd 000010 ..... ..... 0000 0101.. 0 ..... @rrr_sh
+}
+sub_b_tsv 000010 ..... ..... .... 110100 . ..... @rrr_cf_d
ldil 001000 t:5 ..................... i=%assemble_21
addil 001010 r:5 ..................... i=%assemble_21
@@ -187,7 +231,28 @@ addi_tc_tsv 101100 ..... ..... .... 1 ........... @rri_cf
subi 100101 ..... ..... .... 0 ........... @rri_cf
subi_tsv 100101 ..... ..... .... 1 ........... @rri_cf
-cmpiclr 100100 ..... ..... .... 0 ........... @rri_cf
+cmpiclr 100100 ..... ..... .... . ........... @rri_cf_d
+
+hadd 000010 ..... ..... 00000011 11 0 ..... @rrr
+hadd_ss 000010 ..... ..... 00000011 01 0 ..... @rrr
+hadd_us 000010 ..... ..... 00000011 00 0 ..... @rrr
+
+havg 000010 ..... ..... 00000010 11 0 ..... @rrr
+
+hshl 111110 00000 r:5 100010 i:4 0 t:5 &rri
+hshr_s 111110 r:5 00000 110011 i:4 0 t:5 &rri
+hshr_u 111110 r:5 00000 110010 i:4 0 t:5 &rri
+
+hsub 000010 ..... ..... 00000001 11 0 ..... @rrr
+hsub_ss 000010 ..... ..... 00000001 01 0 ..... @rrr
+hsub_us 000010 ..... ..... 00000001 00 0 ..... @rrr
+
+mixh_l 111110 ..... ..... 1 00 00100000 ..... @rrr
+mixh_r 111110 ..... ..... 1 10 00100000 ..... @rrr
+mixw_l 111110 ..... ..... 1 00 00000000 ..... @rrr
+mixw_r 111110 ..... ..... 1 10 00000000 ..... @rrr
+
+permh 111110 r1:5 r2:5 0 c0:2 0 c1:2 c2:2 c3:2 0 t:5
####
# Index Mem
@@ -204,10 +269,16 @@ ld 000011 ..... ..... .. . 0 -- 00 size:2 ...... @ldstx
st 000011 ..... ..... .. . 1 -- 10 size:2 ...... @stim5
ldc 000011 ..... ..... .. . 1 -- 0111 ...... @ldim5 size=2
ldc 000011 ..... ..... .. . 0 -- 0111 ...... @ldstx size=2
+ldc 000011 ..... ..... .. . 1 -- 0101 ...... @ldim5 size=3
+ldc 000011 ..... ..... .. . 0 -- 0101 ...... @ldstx size=3
lda 000011 ..... ..... .. . 1 -- 0110 ...... @ldim5 size=2
lda 000011 ..... ..... .. . 0 -- 0110 ...... @ldstx size=2
+lda 000011 ..... ..... .. . 1 -- 0100 ...... @ldim5 size=3
+lda 000011 ..... ..... .. . 0 -- 0100 ...... @ldstx size=3
sta 000011 ..... ..... .. . 1 -- 1110 ...... @stim5 size=2
+sta 000011 ..... ..... .. . 1 -- 1111 ...... @stim5 size=3
stby 000011 b:5 r:5 sp:2 a:1 1 -- 1100 m:1 ..... disp=%im5_0
+stdby 000011 b:5 r:5 sp:2 a:1 1 -- 1101 m:1 ..... disp=%im5_0
@fldstwx ...... b:5 x:5 sp:2 scale:1 ....... m:1 ..... \
&ldst t=%rt64 disp=0 size=2
@@ -233,6 +304,8 @@ fstd 001011 ..... ..... .. . 1 -- 100 0 . ..... @fldstdi
# Offset Mem
####
+@ldstim11 ...... b:5 t:5 sp:2 .............. \
+ &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3
@ldstim14 ...... b:5 t:5 sp:2 .............. \
&ldst disp=%lowsign_14 x=0 scale=0 m=0
@ldstim14m ...... b:5 t:5 sp:2 .............. \
@@ -264,11 +337,11 @@ fstw 011110 b:5 ..... sp:2 .............. \
fstw 011111 b:5 ..... sp:2 ...........0.. \
&ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2
-fldd 010100 b:5 t:5 sp:2 .......... .. 1 . \
- &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3
+ld 010100 ..... ..... .. ............0. @ldstim11
+fldd 010100 ..... ..... .. ............1. @ldstim11
-fstd 011100 b:5 t:5 sp:2 .......... .. 1 . \
- &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3
+st 011100 ..... ..... .. ............0. @ldstim11
+fstd 011100 ..... ..... .. ............1. @ldstim11
####
# Floating-point Multiply Add
@@ -286,16 +359,20 @@ fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd
# Conditional Branches
####
-bb_sar 110000 00000 r:5 c:1 10 ........... n:1 . disp=%assemble_12
-bb_imm 110001 p:5 r:5 c:1 10 ........... n:1 . disp=%assemble_12
+bb_sar 110000 00000 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12
+bb_imm 110001 p:5 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12
movb 110010 ..... ..... ... ........... . . @rrb_cf f=0
movbi 110011 ..... ..... ... ........... . . @rib_cf f=0
-cmpb 100000 ..... ..... ... ........... . . @rrb_cf f=0
-cmpb 100010 ..... ..... ... ........... . . @rrb_cf f=1
-cmpbi 100001 ..... ..... ... ........... . . @rib_cf f=0
-cmpbi 100011 ..... ..... ... ........... . . @rib_cf f=1
+cmpb 100000 ..... ..... ... ........... . . @rrb_cdf d=0 f=0
+cmpb 100010 ..... ..... ... ........... . . @rrb_cdf d=0 f=1
+cmpb 100111 ..... ..... ... ........... . . @rrb_cdf d=1 f=0
+cmpb 101111 ..... ..... ... ........... . . @rrb_cdf d=1 f=1
+cmpbi 100001 ..... ..... ... ........... . . @rib_cdf d=0 f=0
+cmpbi 100011 ..... ..... ... ........... . . @rib_cdf d=0 f=1
+cmpbi 111011 r:5 ..... f:1 .. ........... n:1 . \
+ &rib_c_d_f d=1 disp=%assemble_12 c=%cmpbid_c i=%im5_16
addb 101000 ..... ..... ... ........... . . @rrb_cf f=0
addb 101010 ..... ..... ... ........... . . @rrb_cf f=1
@@ -306,16 +383,28 @@ addbi 101011 ..... ..... ... ........... . . @rib_cf f=1
# Shift, Extract, Deposit
####
-shrpw_sar 110100 r2:5 r1:5 c:3 00 0 00000 t:5
-shrpw_imm 110100 r2:5 r1:5 c:3 01 0 cpos:5 t:5
-
-extrw_sar 110100 r:5 t:5 c:3 10 se:1 00000 clen:5
-extrw_imm 110100 r:5 t:5 c:3 11 se:1 pos:5 clen:5
-
-depw_sar 110101 t:5 r:5 c:3 00 nz:1 00000 clen:5
-depw_imm 110101 t:5 r:5 c:3 01 nz:1 cpos:5 clen:5
-depwi_sar 110101 t:5 ..... c:3 10 nz:1 00000 clen:5 i=%im5_16
-depwi_imm 110101 t:5 ..... c:3 11 nz:1 cpos:5 clen:5 i=%im5_16
+shrp_sar 110100 r2:5 r1:5 c:3 00 0 d:1 0000 t:5
+shrp_imm 110100 r2:5 r1:5 c:3 01 0 cpos:5 t:5 d=0
+shrp_imm 110100 r2:5 r1:5 c:3 0. 1 ..... t:5 \
+ d=1 cpos=%cpos6_11
+
+extr_sar 110100 r:5 t:5 c:3 10 se:1 00 000 ..... d=0 len=%len5
+extr_sar 110100 r:5 t:5 c:3 10 se:1 1. 000 ..... d=1 len=%len6_8
+extr_imm 110100 r:5 t:5 c:3 11 se:1 pos:5 ..... d=0 len=%len5
+extr_imm 110110 r:5 t:5 c:3 .. se:1 ..... ..... \
+ d=1 len=%len6_12 pos=%cpos6_11
+
+dep_sar 110101 t:5 r:5 c:3 00 nz:1 00 000 ..... d=0 len=%len5
+dep_sar 110101 t:5 r:5 c:3 00 nz:1 1. 000 ..... d=1 len=%len6_8
+dep_imm 110101 t:5 r:5 c:3 01 nz:1 cpos:5 ..... d=0 len=%len5
+dep_imm 111100 t:5 r:5 c:3 .. nz:1 ..... ..... \
+ d=1 len=%len6_12 cpos=%cpos6_11
+depi_sar 110101 t:5 ..... c:3 10 nz:1 d:1 . 000 ..... \
+ i=%im5_16 len=%len6_8
+depi_imm 110101 t:5 ..... c:3 11 nz:1 cpos:5 ..... \
+ d=0 i=%im5_16 len=%len5
+depi_imm 111101 t:5 ..... c:3 .. nz:1 ..... ..... \
+ d=1 i=%im5_16 len=%len6_12 cpos=%cpos6_11
####
# Branch External
@@ -343,6 +432,8 @@ bl 111010 ..... ..... 101 ........... n:1 . &BL l=2 \
disp=%assemble_22
b_gate 111010 ..... ..... 001 ........... . . @bl
blr 111010 l:5 x:5 010 00000000000 n:1 0
+nopbts 111010 00000 00000 010 0---------1 0 1 # clrbts/popbts
+nopbts 111010 00000 ----- 010 00000000000 0 1 # pushbts/pushnom
bv 111010 b:5 x:5 110 00000000000 n:1 0
bve 111010 b:5 00000 110 10000000000 n:1 - l=0
bve 111010 b:5 00000 111 10000000000 n:1 - l=2
@@ -384,7 +475,7 @@ fmpyfadd_d 101110 rm1:5 rm2:5 ... 0 1 ..0 0 0 neg:1 t:5 ra3=%rc32
@f0e_f_3 ...... ..... ..... ... .0 110 ..0 ..... \
&fclass3 r1=%ra64 r2=%rb64 t=%rt64
-@f0e_d_3 ...... r1:5 r2:5 ... 01 110 000 t:5
+@f0e_d_3 ...... r1:5 r2:5 ... 01 110 000 t:5 &fclass3
# Floating point class 0
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 3ab9934a1d..467ee7daf5 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -52,9 +52,17 @@ static void io_eir_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
HPPACPU *cpu = opaque;
- int le_bit = ~data & (TARGET_REGISTER_BITS - 1);
+ CPUHPPAState *env = &cpu->env;
+ int widthm1 = 31;
+ int le_bit;
+
+ /* The default PSW.W controls the width of EIRR. */
+ if (hppa_is_pa20(env) && env->cr[CR_PSW_DEFAULT] & PDC_PSW_WIDE_BIT) {
+ widthm1 = 63;
+ }
+ le_bit = ~data & widthm1;
- cpu->env.cr[CR_EIRR] |= (target_ureg)1 << le_bit;
+ env->cr[CR_EIRR] |= 1ull << le_bit;
eval_interrupt(cpu);
}
@@ -73,7 +81,7 @@ void hppa_cpu_alarm_timer(void *opaque)
io_eir_write(opaque, 0, 0, 4);
}
-void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val)
+void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val)
{
env->cr[CR_EIRR] &= ~val;
qemu_mutex_lock_iothread();
@@ -81,7 +89,7 @@ void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val)
qemu_mutex_unlock_iothread();
}
-void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
+void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val)
{
env->cr[CR_EIEM] = val;
qemu_mutex_lock_iothread();
@@ -94,25 +102,37 @@ void hppa_cpu_do_interrupt(CPUState *cs)
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
int i = cs->exception_index;
- target_ureg iaoq_f = env->iaoq_f;
- target_ureg iaoq_b = env->iaoq_b;
- uint64_t iasq_f = env->iasq_f;
- uint64_t iasq_b = env->iasq_b;
-
- target_ureg old_psw;
+ uint64_t old_psw;
/* As documented in pa2.0 -- interruption handling. */
/* step 1 */
env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env);
- /* step 2 -- note PSW_W == 0 for !HPPA64. */
- cpu_hppa_put_psw(env, PSW_W | (i == EXCP_HPMC ? PSW_M : 0));
+ /* step 2 -- Note PSW_W is masked out again for pa1.x */
+ cpu_hppa_put_psw(env,
+ (env->cr[CR_PSW_DEFAULT] & PDC_PSW_WIDE_BIT ? PSW_W : 0) |
+ (i == EXCP_HPMC ? PSW_M : 0));
/* step 3 */
- env->cr[CR_IIASQ] = iasq_f >> 32;
- env->cr_back[0] = iasq_b >> 32;
- env->cr[CR_IIAOQ] = iaoq_f;
- env->cr_back[1] = iaoq_b;
+ /*
+ * For pa1.x, IIASQ is simply a copy of IASQ.
+ * For pa2.0, IIASQ is the top bits of the virtual address,
+ * or zero if translation is disabled.
+ */
+ if (!hppa_is_pa20(env)) {
+ env->cr[CR_IIASQ] = env->iasq_f >> 32;
+ env->cr_back[0] = env->iasq_b >> 32;
+ } else if (old_psw & PSW_C) {
+ env->cr[CR_IIASQ] =
+ hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
+ env->cr_back[0] =
+ hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
+ } else {
+ env->cr[CR_IIASQ] = 0;
+ env->cr_back[0] = 0;
+ }
+ env->cr[CR_IIAOQ] = env->iaoq_f;
+ env->cr_back[1] = env->iaoq_b;
if (old_psw & PSW_Q) {
/* step 5 */
@@ -145,14 +165,13 @@ void hppa_cpu_do_interrupt(CPUState *cs)
/* ??? An alternate fool-proof method would be to store the
instruction data into the unwind info. That's probably
a bit too much in the way of extra storage required. */
- vaddr vaddr;
- hwaddr paddr;
+ vaddr vaddr = env->iaoq_f & -4;
+ hwaddr paddr = vaddr;
- paddr = vaddr = iaoq_f & -4;
if (old_psw & PSW_C) {
int prot, t;
- vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr);
+ vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
0, &paddr, &prot, NULL);
if (t >= 0) {
@@ -182,14 +201,14 @@ void hppa_cpu_do_interrupt(CPUState *cs)
/* step 7 */
if (i == EXCP_TOC) {
- env->iaoq_f = FIRMWARE_START;
+ env->iaoq_f = hppa_form_gva(env, 0, FIRMWARE_START);
/* help SeaBIOS and provide iaoq_b and iasq_back in shadow regs */
env->gr[24] = env->cr_back[0];
env->gr[25] = env->cr_back[1];
} else {
- env->iaoq_f = env->cr[CR_IVA] + 32 * i;
+ env->iaoq_f = hppa_form_gva(env, 0, env->cr[CR_IVA] + 32 * i);
}
- env->iaoq_b = env->iaoq_f + 4;
+ env->iaoq_b = hppa_form_gva(env, 0, env->iaoq_f + 4);
env->iasq_f = 0;
env->iasq_b = 0;
@@ -239,14 +258,10 @@ void hppa_cpu_do_interrupt(CPUState *cs)
snprintf(unknown, sizeof(unknown), "unknown %d", i);
name = unknown;
}
- qemu_log("INT %6d: %s @ " TARGET_FMT_lx "," TARGET_FMT_lx
- " -> " TREG_FMT_lx " " TARGET_FMT_lx "\n",
- ++count, name,
- hppa_form_gva(env, iasq_f, iaoq_f),
- hppa_form_gva(env, iasq_b, iaoq_b),
- env->iaoq_f,
- hppa_form_gva(env, (uint64_t)env->cr[CR_ISR] << 32,
- env->cr[CR_IOR]));
+ qemu_log("INT %6d: %s @ " TARGET_FMT_lx ":" TARGET_FMT_lx
+ " for " TARGET_FMT_lx ":" TARGET_FMT_lx "\n",
+ ++count, name, env->cr[CR_IIASQ], env->cr[CR_IIAOQ],
+ env->cr[CR_ISR], env->cr[CR_IOR]);
}
cs->exception_index = -1;
}
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
index 905991d7f9..2f8e8cc5a1 100644
--- a/target/hppa/machine.c
+++ b/target/hppa/machine.c
@@ -21,33 +21,12 @@
#include "cpu.h"
#include "migration/cpu.h"
-#if TARGET_REGISTER_BITS == 64
-#define qemu_put_betr qemu_put_be64
-#define qemu_get_betr qemu_get_be64
-#define VMSTATE_UINTTL_V(_f, _s, _v) \
- VMSTATE_UINT64_V(_f, _s, _v)
-#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v)
-#else
-#define qemu_put_betr qemu_put_be32
-#define qemu_get_betr qemu_get_be32
-#define VMSTATE_UINTTR_V(_f, _s, _v) \
- VMSTATE_UINT32_V(_f, _s, _v)
-#define VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v)
-#endif
-
-#define VMSTATE_UINTTR(_f, _s) \
- VMSTATE_UINTTR_V(_f, _s, 0)
-#define VMSTATE_UINTTR_ARRAY(_f, _s, _n) \
- VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, 0)
-
static int get_psw(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
CPUHPPAState *env = opaque;
- cpu_hppa_put_psw(env, qemu_get_betr(f));
+ cpu_hppa_put_psw(env, qemu_get_be64(f));
return 0;
}
@@ -55,7 +34,7 @@ static int put_psw(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
CPUHPPAState *env = opaque;
- qemu_put_betr(f, cpu_hppa_get_psw(env));
+ qemu_put_be64(f, cpu_hppa_get_psw(env));
return 0;
}
@@ -65,70 +44,138 @@ static const VMStateInfo vmstate_psw = {
.put = put_psw,
};
-/* FIXME: Use the PA2.0 format, which is a superset of the PA1.1 format. */
static int get_tlb(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
- hppa_tlb_entry *ent = opaque;
- uint32_t val;
-
- memset(ent, 0, sizeof(*ent));
-
- ent->va_b = qemu_get_be64(f);
- ent->pa = qemu_get_betr(f);
- val = qemu_get_be32(f);
-
- ent->entry_valid = extract32(val, 0, 1);
- ent->access_id = extract32(val, 1, 18);
- ent->u = extract32(val, 19, 1);
- ent->ar_pl2 = extract32(val, 20, 2);
- ent->ar_pl1 = extract32(val, 22, 2);
- ent->ar_type = extract32(val, 24, 3);
- ent->b = extract32(val, 27, 1);
- ent->d = extract32(val, 28, 1);
- ent->t = extract32(val, 29, 1);
-
- ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1;
+ HPPATLBEntry *ent = opaque;
+ uint64_t val;
+
+ ent->itree.start = qemu_get_be64(f);
+ ent->itree.last = qemu_get_be64(f);
+ ent->pa = qemu_get_be64(f);
+ val = qemu_get_be64(f);
+
+ if (val) {
+ ent->t = extract64(val, 61, 1);
+ ent->d = extract64(val, 60, 1);
+ ent->b = extract64(val, 59, 1);
+ ent->ar_type = extract64(val, 56, 3);
+ ent->ar_pl1 = extract64(val, 54, 2);
+ ent->ar_pl2 = extract64(val, 52, 2);
+ ent->u = extract64(val, 51, 1);
+ /* o = bit 50 */
+ /* p = bit 49 */
+ ent->access_id = extract64(val, 1, 31);
+ ent->entry_valid = 1;
+ }
return 0;
}
static int put_tlb(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
- hppa_tlb_entry *ent = opaque;
- uint32_t val = 0;
+ HPPATLBEntry *ent = opaque;
+ uint64_t val = 0;
if (ent->entry_valid) {
val = 1;
- val = deposit32(val, 1, 18, ent->access_id);
- val = deposit32(val, 19, 1, ent->u);
- val = deposit32(val, 20, 2, ent->ar_pl2);
- val = deposit32(val, 22, 2, ent->ar_pl1);
- val = deposit32(val, 24, 3, ent->ar_type);
- val = deposit32(val, 27, 1, ent->b);
- val = deposit32(val, 28, 1, ent->d);
- val = deposit32(val, 29, 1, ent->t);
+ val = deposit64(val, 61, 1, ent->t);
+ val = deposit64(val, 60, 1, ent->d);
+ val = deposit64(val, 59, 1, ent->b);
+ val = deposit64(val, 56, 3, ent->ar_type);
+ val = deposit64(val, 54, 2, ent->ar_pl1);
+ val = deposit64(val, 52, 2, ent->ar_pl2);
+ val = deposit64(val, 51, 1, ent->u);
+ /* o = bit 50 */
+ /* p = bit 49 */
+ val = deposit64(val, 1, 31, ent->access_id);
}
- qemu_put_be64(f, ent->va_b);
- qemu_put_betr(f, ent->pa);
- qemu_put_be32(f, val);
+ qemu_put_be64(f, ent->itree.start);
+ qemu_put_be64(f, ent->itree.last);
+ qemu_put_be64(f, ent->pa);
+ qemu_put_be64(f, val);
return 0;
}
-static const VMStateInfo vmstate_tlb = {
+static const VMStateInfo vmstate_tlb_entry = {
.name = "tlb entry",
.get = get_tlb,
.put = put_tlb,
};
-static VMStateField vmstate_env_fields[] = {
- VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
+static int tlb_pre_load(void *opaque)
+{
+ CPUHPPAState *env = opaque;
+
+ /*
+ * Zap the entire tlb, on-the-side data structures and all.
+ * Each tlb entry will have data re-filled by put_tlb.
+ */
+ memset(env->tlb, 0, sizeof(env->tlb));
+ memset(&env->tlb_root, 0, sizeof(env->tlb_root));
+ env->tlb_unused = NULL;
+ env->tlb_partial = NULL;
+
+ return 0;
+}
+
+static int tlb_post_load(void *opaque, int version_id)
+{
+ CPUHPPAState *env = opaque;
+ uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
+ HPPATLBEntry **unused = &env->tlb_unused;
+ HPPATLBEntry *partial = NULL;
+
+ /*
+ * Re-create the interval tree from the valid entries.
+ * Truely invalid entries should have start == end == 0.
+ * Otherwise it should be the in-flight tlb_partial entry.
+ */
+ for (uint32_t i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ HPPATLBEntry *e = &env->tlb[i];
+
+ if (e->entry_valid) {
+ interval_tree_insert(&e->itree, &env->tlb_root);
+ } else if (i < btlb_entries) {
+ /* btlb not in unused list */
+ } else if (partial == NULL && e->itree.start < e->itree.last) {
+ partial = e;
+ } else {
+ *unused = e;
+ unused = &e->unused_next;
+ }
+ }
+ env->tlb_partial = partial;
+ *unused = NULL;
+
+ return 0;
+}
+
+static const VMStateField vmstate_tlb_fields[] = {
+ VMSTATE_ARRAY(tlb, CPUHPPAState,
+ ARRAY_SIZE(((CPUHPPAState *)0)->tlb),
+ 0, vmstate_tlb_entry, HPPATLBEntry),
+ VMSTATE_UINT32(tlb_last, CPUHPPAState),
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_tlb = {
+ .name = "env/tlb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_tlb_fields,
+ .pre_load = tlb_pre_load,
+ .post_load = tlb_post_load,
+};
+
+static const VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINT64_ARRAY(gr, CPUHPPAState, 32),
VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
VMSTATE_UINT64_ARRAY(sr, CPUHPPAState, 8),
- VMSTATE_UINTTR_ARRAY(cr, CPUHPPAState, 32),
- VMSTATE_UINTTR_ARRAY(cr_back, CPUHPPAState, 2),
- VMSTATE_UINTTR_ARRAY(shadow, CPUHPPAState, 7),
+ VMSTATE_UINT64_ARRAY(cr, CPUHPPAState, 32),
+ VMSTATE_UINT64_ARRAY(cr_back, CPUHPPAState, 2),
+ VMSTATE_UINT64_ARRAY(shadow, CPUHPPAState, 7),
/* Save the architecture value of the psw, not the internally
expanded version. Since this architecture value does not
@@ -145,28 +192,29 @@ static VMStateField vmstate_env_fields[] = {
.offset = 0
},
- VMSTATE_UINTTR(iaoq_f, CPUHPPAState),
- VMSTATE_UINTTR(iaoq_b, CPUHPPAState),
+ VMSTATE_UINT64(iaoq_f, CPUHPPAState),
+ VMSTATE_UINT64(iaoq_b, CPUHPPAState),
VMSTATE_UINT64(iasq_f, CPUHPPAState),
VMSTATE_UINT64(iasq_b, CPUHPPAState),
VMSTATE_UINT32(fr0_shadow, CPUHPPAState),
-
- VMSTATE_ARRAY(tlb, CPUHPPAState, ARRAY_SIZE(((CPUHPPAState *)0)->tlb),
- 0, vmstate_tlb, hppa_tlb_entry),
- VMSTATE_UINT32(tlb_last, CPUHPPAState),
-
VMSTATE_END_OF_LIST()
};
+static const VMStateDescription *vmstate_env_subsections[] = {
+ &vmstate_tlb,
+ NULL
+};
+
static const VMStateDescription vmstate_env = {
.name = "env",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = vmstate_env_fields,
+ .subsections = vmstate_env_subsections,
};
-static VMStateField vmstate_cpu_fields[] = {
+static const VMStateField vmstate_cpu_fields[] = {
VMSTATE_CPU(),
VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState),
VMSTATE_END_OF_LIST()
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 350485f619..858ce6ec7f 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -25,72 +25,136 @@
#include "hw/core/cpu.h"
#include "trace.h"
-static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
+hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
- hppa_tlb_entry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e) {
- trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
- ent->va_b, ent->va_e, ent->pa);
- return ent;
- }
+ if (likely(extract64(addr, 58, 4) != 0xf)) {
+ /* Memory address space */
+ return addr & MAKE_64BIT_MASK(0, 62);
+ }
+ if (extract64(addr, 54, 4) != 0) {
+ /* I/O address space */
+ return addr | MAKE_64BIT_MASK(62, 2);
+ }
+ /* PDC address space */
+ return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
+}
+
+hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
+{
+ if (likely(extract32(addr, 28, 4) != 0xf)) {
+ /* Memory address space */
+ return addr & MAKE_64BIT_MASK(0, 32);
+ }
+ if (extract32(addr, 24, 4) != 0) {
+ /* I/O address space */
+ return addr | MAKE_64BIT_MASK(32, 32);
+ }
+ /* PDC address space */
+ return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
+}
+
+static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
+{
+ if (!hppa_is_pa20(env)) {
+ return addr;
+ } else if (env->psw & PSW_W) {
+ return hppa_abs_to_phys_pa2_w1(addr);
+ } else {
+ return hppa_abs_to_phys_pa2_w0(addr);
+ }
+}
+
+static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
+{
+ IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
+
+ if (i) {
+ HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
+ trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
+ ent->itree.start, ent->itree.last, ent->pa);
+ return ent;
}
trace_hppa_tlb_find_entry_not_found(env, addr);
return NULL;
}
-static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent,
+static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
bool force_flush_btlb)
{
CPUState *cs = env_cpu(env);
+ bool is_btlb;
if (!ent->entry_valid) {
return;
}
- trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
+ trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
+ ent->itree.last, ent->pa);
- tlb_flush_range_by_mmuidx(cs, ent->va_b,
- ent->va_e - ent->va_b + 1,
- HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
+ tlb_flush_range_by_mmuidx(cs, ent->itree.start,
+ ent->itree.last - ent->itree.start + 1,
+ HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
- /* never clear BTLBs, unless forced to do so. */
- if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
+ /* Never clear BTLBs, unless forced to do so. */
+ is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
+ if (is_btlb && !force_flush_btlb) {
return;
}
+ interval_tree_remove(&ent->itree, &env->tlb_root);
memset(ent, 0, sizeof(*ent));
- ent->va_b = -1;
+
+ if (!is_btlb) {
+ ent->unused_next = env->tlb_unused;
+ env->tlb_unused = ent;
+ }
}
-static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
{
- hppa_tlb_entry *ent;
- uint32_t i;
+ IntervalTreeNode *i, *n;
- if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
- i = HPPA_BTLB_ENTRIES;
- env->tlb_last = HPPA_BTLB_ENTRIES + 1;
- } else {
- i = env->tlb_last;
- env->tlb_last++;
+ i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
+ for (; i ; i = n) {
+ HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
+
+ /*
+ * Find the next entry now: In the normal case the current entry
+ * will be removed, but in the BTLB case it will remain.
+ */
+ n = interval_tree_iter_next(i, va_b, va_e);
+ hppa_flush_tlb_ent(env, ent, false);
}
+}
+
+static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+{
+ HPPATLBEntry *ent = env->tlb_unused;
+
+ if (ent == NULL) {
+ uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
+ uint32_t i = env->tlb_last;
- ent = &env->tlb[i];
+ if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
+ i = btlb_entries;
+ }
+ env->tlb_last = i + 1;
- hppa_flush_tlb_ent(env, ent, false);
+ ent = &env->tlb[i];
+ hppa_flush_tlb_ent(env, ent, false);
+ }
+
+ env->tlb_unused = ent->unused_next;
return ent;
}
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot,
- hppa_tlb_entry **tlb_entry)
+ HPPATLBEntry **tlb_entry)
{
hwaddr phys;
int prot, r_prot, w_prot, x_prot, priv;
- hppa_tlb_entry *ent;
+ HPPATLBEntry *ent;
int ret = -1;
if (tlb_entry) {
@@ -106,7 +170,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
/* Find a valid tlb entry that matches the virtual address. */
ent = hppa_find_tlb(env, addr);
- if (ent == NULL || !ent->entry_valid) {
+ if (ent == NULL) {
phys = 0;
prot = 0;
ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
@@ -118,7 +182,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
/* We now know the physical address. */
- phys = ent->pa + (addr - ent->va_b);
+ phys = ent->pa + (addr - ent->itree.start);
/* Map TLB access_rights field to QEMU protection. */
priv = MMU_IDX_TO_PRIV(mmu_idx);
@@ -144,7 +208,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
/* access_id == 0 means public page and no check is performed */
- if ((env->psw & PSW_P) && ent->access_id) {
+ if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
/* If bits [31:1] match, and bit 0 is set, suppress write. */
int match = ent->access_id * 2 + 1;
@@ -197,7 +261,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
egress:
- *pphys = phys;
+ *pphys = phys = hppa_abs_to_phys(env, phys);
*pprot = prot;
trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
return ret;
@@ -213,7 +277,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
/* ??? We really ought to know if the code mmu is disabled too,
in order to get the correct debugging dumps. */
if (!(cpu->env.psw & PSW_D)) {
- return addr;
+ return hppa_abs_to_phys(&cpu->env, addr);
}
excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
@@ -225,13 +289,60 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return excp == EXCP_DTLB_MISS ? -1 : phys;
}
+G_NORETURN static void
+raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
+ vaddr addr, bool mmu_disabled)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+
+ if (env->psw & PSW_Q) {
+ /*
+ * For pa1.x, the offset and space never overlap, and so we
+ * simply extract the high and low part of the virtual address.
+ *
+ * For pa2.0, the formation of these are described in section
+ * "Interruption Parameter Registers", page 2-15.
+ */
+ env->cr[CR_IOR] = (uint32_t)addr;
+ env->cr[CR_ISR] = addr >> 32;
+
+ if (hppa_is_pa20(env)) {
+ if (mmu_disabled) {
+ /*
+ * If data translation was disabled, the ISR contains
+ * the upper portion of the abs address, zero-extended.
+ */
+ env->cr[CR_ISR] &= 0x3fffffff;
+ } else {
+ /*
+ * If data translation was enabled, the upper two bits
+ * of the IOR (the b field) are equal to the two space
+ * bits from the base register used to form the gva.
+ */
+ uint64_t b;
+
+ cpu_restore_state(cs, retaddr);
+
+ b = env->gr[env->unwind_breg];
+ b >>= (env->psw & PSW_W ? 62 : 30);
+ env->cr[CR_IOR] |= b << 62;
+
+ cpu_loop_exit(cs);
+ }
+ }
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
- hppa_tlb_entry *ent;
+ HPPATLBEntry *ent;
int prot, excp, a_prot;
hwaddr phys;
@@ -254,56 +365,51 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
return false;
}
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
+
/* Failure. Raise the indicated exception. */
- cs->exception_index = excp;
- if (cpu->env.psw & PSW_Q) {
- /* ??? Needs tweaking for hppa64. */
- cpu->env.cr[CR_IOR] = addr;
- cpu->env.cr[CR_ISR] = addr >> 32;
- }
- cpu_loop_exit_restore(cs, retaddr);
+ raise_exception_with_ior(env, excp, retaddr,
+ addr, mmu_idx == MMU_PHYS_IDX);
}
trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
phys & TARGET_PAGE_MASK, size, type, mmu_idx);
- /* Success! Store the translation into the QEMU TLB. */
+
+ /*
+ * Success! Store the translation into the QEMU TLB.
+ * Note that we always install a single-page entry, because that
+ * is what works best with softmmu -- anything else will trigger
+ * the large page protection mask. We do not require this,
+ * because we record the large page here in the hppa tlb.
+ */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
+ prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
-void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
{
- hppa_tlb_entry *empty = NULL;
- int i;
-
- /* Zap any old entries covering ADDR; notice empty entries on the way. */
- for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
- hppa_tlb_entry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e) {
- if (ent->entry_valid) {
- hppa_flush_tlb_ent(env, ent, false);
- }
- if (!empty) {
- empty = ent;
- }
- }
- }
+ HPPATLBEntry *ent;
- /* If we didn't see an empty entry, evict one. */
- if (empty == NULL) {
- empty = hppa_alloc_tlb_ent(env);
+ /* Zap any old entries covering ADDR. */
+ addr &= TARGET_PAGE_MASK;
+ hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
+
+ ent = env->tlb_partial;
+ if (ent == NULL) {
+ ent = hppa_alloc_tlb_ent(env);
+ env->tlb_partial = ent;
}
- /* Note that empty->entry_valid == 0 already. */
- empty->va_b = addr & TARGET_PAGE_MASK;
- empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
- empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
- trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
+ /* Note that ent->entry_valid == 0 already. */
+ ent->itree.start = addr;
+ ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
+ ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+ trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
}
-static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg)
+static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
+ target_ulong reg)
{
ent->access_id = extract32(reg, 1, 18);
ent->u = extract32(reg, 19, 1);
@@ -314,49 +420,153 @@ static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg
ent->d = extract32(reg, 28, 1);
ent->t = extract32(reg, 29, 1);
ent->entry_valid = 1;
+
+ interval_tree_insert(&ent->itree, &env->tlb_root);
trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
}
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
-void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
{
- hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+ HPPATLBEntry *ent = env->tlb_partial;
- if (unlikely(ent == NULL)) {
- qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
- return;
+ if (ent) {
+ env->tlb_partial = NULL;
+ if (ent->itree.start <= addr && addr <= ent->itree.last) {
+ set_access_bits_pa11(env, ent, reg);
+ return;
+ }
}
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
+}
+
+static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
+ target_ulong r2, vaddr va_b)
+{
+ HPPATLBEntry *ent;
+ vaddr va_e;
+ uint64_t va_size;
+ int mask_shift;
+
+ mask_shift = 2 * (r1 & 0xf);
+ va_size = TARGET_PAGE_SIZE << mask_shift;
+ va_b &= -va_size;
+ va_e = va_b + va_size - 1;
+
+ hppa_flush_tlb_range(env, va_b, va_e);
+ ent = hppa_alloc_tlb_ent(env);
+
+ ent->itree.start = va_b;
+ ent->itree.last = va_e;
+ ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
+ ent->t = extract64(r2, 61, 1);
+ ent->d = extract64(r2, 60, 1);
+ ent->b = extract64(r2, 59, 1);
+ ent->ar_type = extract64(r2, 56, 3);
+ ent->ar_pl1 = extract64(r2, 54, 2);
+ ent->ar_pl2 = extract64(r2, 52, 2);
+ ent->u = extract64(r2, 51, 1);
+ /* o = bit 50 */
+ /* p = bit 49 */
+ ent->access_id = extract64(r2, 1, 31);
+ ent->entry_valid = 1;
- set_access_bits(env, ent, reg);
+ interval_tree_insert(&ent->itree, &env->tlb_root);
+ trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
+ trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
+ ent->ar_pl2, ent->ar_pl1, ent->ar_type,
+ ent->b, ent->d, ent->t);
}
-/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
- synchronous across all processors. */
+void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
+{
+ vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
+ itlbt_pa20(env, r1, r2, va_b);
+}
+
+void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
+{
+ vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
+ itlbt_pa20(env, r1, r2, va_b);
+}
+
+/* Purge (Insn/Data) TLB. */
static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
{
CPUHPPAState *env = cpu_env(cpu);
- target_ulong addr = (target_ulong) data.target_ptr;
- hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+ vaddr start = data.target_ptr;
+ vaddr end;
+
+ /*
+ * PA2.0 allows a range of pages encoded into GR[b], which we have
+ * copied into the bottom bits of the otherwise page-aligned address.
+ * PA1.x will always provide zero here, for a single page flush.
+ */
+ end = start & 0xf;
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_SIZE << (2 * end);
+ end = start + end - 1;
+
+ hppa_flush_tlb_range(env, start, end);
+}
- if (ent && ent->entry_valid) {
- hppa_flush_tlb_ent(env, ent, false);
- }
+/* This is local to the current cpu. */
+void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
+{
+ trace_hppa_tlb_ptlb_local(env);
+ ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
}
+/* This is synchronous across all processors. */
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
{
CPUState *src = env_cpu(env);
CPUState *cpu;
+ bool wait = false;
+
trace_hppa_tlb_ptlb(env);
run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
CPU_FOREACH(cpu) {
if (cpu != src) {
async_run_on_cpu(cpu, ptlb_work, data);
+ wait = true;
}
}
- async_safe_run_on_cpu(src, ptlb_work, data);
+ if (wait) {
+ async_safe_run_on_cpu(src, ptlb_work, data);
+ } else {
+ ptlb_work(src, data);
+ }
+}
+
+void hppa_ptlbe(CPUHPPAState *env)
+{
+ uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
+ uint32_t i;
+
+ /* Zap the (non-btlb) tlb entries themselves. */
+ memset(&env->tlb[btlb_entries], 0,
+ sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
+ env->tlb_last = btlb_entries;
+ env->tlb_partial = NULL;
+
+ /* Put them all onto the unused list. */
+ env->tlb_unused = &env->tlb[btlb_entries];
+ for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
+ env->tlb[i].unused_next = &env->tlb[i + 1];
+ }
+
+ /* Re-initialize the interval tree with only the btlb entries. */
+ memset(&env->tlb_root, 0, sizeof(env->tlb_root));
+ for (i = 0; i < btlb_entries; ++i) {
+ if (env->tlb[i].entry_valid) {
+ interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
+ }
+ }
+
+ tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
}
/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
@@ -365,17 +575,12 @@ void HELPER(ptlbe)(CPUHPPAState *env)
{
trace_hppa_tlb_ptlbe(env);
qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
- memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
- sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
- env->tlb_last = HPPA_BTLB_ENTRIES;
- tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
+ hppa_ptlbe(env);
}
void cpu_hppa_change_prot_id(CPUHPPAState *env)
{
- if (env->psw & PSW_P) {
- tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
- }
+ tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
}
void HELPER(change_prot_id)(CPUHPPAState *env)
@@ -383,7 +588,7 @@ void HELPER(change_prot_id)(CPUHPPAState *env)
cpu_hppa_change_prot_id(env);
}
-target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
+target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
{
hwaddr phys;
int prot, excp;
@@ -391,16 +596,11 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
&phys, &prot, NULL);
if (excp >= 0) {
- if (env->psw & PSW_Q) {
- /* ??? Needs tweaking for hppa64. */
- env->cr[CR_IOR] = addr;
- env->cr[CR_ISR] = addr >> 32;
- }
if (excp == EXCP_DTLB_MISS) {
excp = EXCP_NA_DTLB_MISS;
}
trace_hppa_tlb_lpa_failed(env, addr);
- hppa_dynamic_excp(env, excp, GETPC());
+ raise_exception_with_ior(env, excp, GETPC(), addr, false);
}
trace_hppa_tlb_lpa_success(env, addr, phys);
return phys;
@@ -409,7 +609,7 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
/* Return the ar_type of the TLB at VADDR, or -1. */
int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
{
- hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
+ HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
return ent ? ent->ar_type : -1;
}
@@ -424,15 +624,17 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
unsigned int phys_page, len, slot;
int mmu_idx = cpu_mmu_index(env, 0);
uintptr_t ra = GETPC();
- hppa_tlb_entry *btlb;
+ HPPATLBEntry *btlb;
uint64_t virt_page;
uint32_t *vaddr;
+ uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
-#ifdef TARGET_HPPA64
/* BTLBs are not supported on 64-bit CPUs */
- env->gr[28] = -1; /* nonexistent procedure */
- return;
-#endif
+ if (btlb_entries == 0) {
+ env->gr[28] = -1; /* nonexistent procedure */
+ return;
+ }
+
env->gr[28] = 0; /* PDC_OK */
switch (env->gr[25]) {
@@ -446,8 +648,8 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
} else {
vaddr[0] = cpu_to_be32(1);
vaddr[1] = cpu_to_be32(16 * 1024);
- vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED);
- vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE);
+ vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
+ vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
}
break;
case 1:
@@ -464,15 +666,17 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
(long long) virt_page << TARGET_PAGE_BITS,
(long long) (virt_page + len) << TARGET_PAGE_BITS,
(long long) virt_page, phys_page, len, slot);
- if (slot < HPPA_BTLB_ENTRIES) {
+ if (slot < btlb_entries) {
btlb = &env->tlb[slot];
- /* force flush of possibly existing BTLB entry */
+
+ /* Force flush of possibly existing BTLB entry. */
hppa_flush_tlb_ent(env, btlb, true);
- /* create new BTLB entry */
- btlb->va_b = virt_page << TARGET_PAGE_BITS;
- btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1;
+
+ /* Create new BTLB entry */
+ btlb->itree.start = virt_page << TARGET_PAGE_BITS;
+ btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
btlb->pa = phys_page << TARGET_PAGE_BITS;
- set_access_bits(env, btlb, env->gr[20]);
+ set_access_bits_pa11(env, btlb, env->gr[20]);
btlb->t = 0;
btlb->d = 1;
} else {
@@ -484,7 +688,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
slot = env->gr[22];
qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
slot);
- if (slot < HPPA_BTLB_ENTRIES) {
+ if (slot < btlb_entries) {
btlb = &env->tlb[slot];
hppa_flush_tlb_ent(env, btlb, true);
} else {
@@ -494,7 +698,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
case 3:
/* Purge all BTLB entries */
qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
- for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) {
+ for (slot = 0; slot < btlb_entries; slot++) {
btlb = &env->tlb[slot];
hppa_flush_tlb_ent(env, btlb, true);
}
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 837e2b3117..a0e31c0c25 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -42,25 +42,25 @@ G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
cpu_loop_exit_restore(cs, ra);
}
-void HELPER(tsv)(CPUHPPAState *env, target_ureg cond)
+void HELPER(tsv)(CPUHPPAState *env, target_ulong cond)
{
- if (unlikely((target_sreg)cond < 0)) {
+ if (unlikely((target_long)cond < 0)) {
hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC());
}
}
-void HELPER(tcond)(CPUHPPAState *env, target_ureg cond)
+void HELPER(tcond)(CPUHPPAState *env, target_ulong cond)
{
if (unlikely(cond)) {
hppa_dynamic_excp(env, EXCP_COND, GETPC());
}
}
-static void atomic_store_3(CPUHPPAState *env, target_ulong addr,
- uint32_t val, uintptr_t ra)
+static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
+ uint32_t val, uint32_t mask, uintptr_t ra)
{
int mmu_idx = cpu_mmu_index(env, 0);
- uint32_t old, new, cmp, mask, *haddr;
+ uint32_t old, new, cmp, *haddr;
void *vaddr;
vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
@@ -81,7 +81,36 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr,
}
}
-static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val,
+static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
+ uint64_t val, uint64_t mask,
+ int size, uintptr_t ra)
+{
+#ifdef CONFIG_ATOMIC64
+ int mmu_idx = cpu_mmu_index(env, 0);
+ uint64_t old, new, cmp, *haddr;
+ void *vaddr;
+
+ vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
+ if (vaddr == NULL) {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ haddr = (uint64_t *)((uintptr_t)vaddr & -8);
+
+ old = *haddr;
+ while (1) {
+ new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
+ cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
+ if (cmp == old) {
+ return;
+ }
+ old = cmp;
+ }
+#else
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+#endif
+}
+
+static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
bool parallel, uintptr_t ra)
{
switch (addr & 3) {
@@ -94,7 +123,7 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val,
case 1:
/* The 3 byte store must appear atomic. */
if (parallel) {
- atomic_store_3(env, addr, val, ra);
+ atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
} else {
cpu_stb_data_ra(env, addr, val >> 16, ra);
cpu_stw_data_ra(env, addr + 1, val, ra);
@@ -106,25 +135,153 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val,
}
}
-void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ureg val)
+static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
+ bool parallel, uintptr_t ra)
+{
+ switch (addr & 7) {
+ case 7:
+ cpu_stb_data_ra(env, addr, val, ra);
+ break;
+ case 6:
+ cpu_stw_data_ra(env, addr, val, ra);
+ break;
+ case 5:
+ /* The 3 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
+ } else {
+ cpu_stb_data_ra(env, addr, val >> 16, ra);
+ cpu_stw_data_ra(env, addr + 1, val, ra);
+ }
+ break;
+ case 4:
+ cpu_stl_data_ra(env, addr, val, ra);
+ break;
+ case 3:
+ /* The 5 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
+ } else {
+ cpu_stb_data_ra(env, addr, val >> 32, ra);
+ cpu_stl_data_ra(env, addr + 1, val, ra);
+ }
+ break;
+ case 2:
+ /* The 6 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
+ } else {
+ cpu_stw_data_ra(env, addr, val >> 32, ra);
+ cpu_stl_data_ra(env, addr + 2, val, ra);
+ }
+ break;
+ case 1:
+ /* The 7 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
+ } else {
+ cpu_stb_data_ra(env, addr, val >> 48, ra);
+ cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
+ cpu_stl_data_ra(env, addr + 3, val, ra);
+ }
+ break;
+ default:
+ cpu_stq_data_ra(env, addr, val, ra);
+ break;
+ }
+}
+
+void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
{
do_stby_b(env, addr, val, false, GETPC());
}
void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
- target_ureg val)
+ target_ulong val)
{
do_stby_b(env, addr, val, true, GETPC());
}
-static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val,
+void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ do_stdby_b(env, addr, val, false, GETPC());
+}
+
+void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ulong val)
+{
+ do_stdby_b(env, addr, val, true, GETPC());
+}
+
+static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
bool parallel, uintptr_t ra)
{
switch (addr & 3) {
case 3:
/* The 3 byte store must appear atomic. */
if (parallel) {
- atomic_store_3(env, addr - 3, val, ra);
+ atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
+ } else {
+ cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
+ cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
+ }
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
+ break;
+ case 1:
+ cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
+ break;
+ default:
+ /* Nothing is stored, but protection is checked and the
+ cacheline is marked dirty. */
+ probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
+ break;
+ }
+}
+
+static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
+ bool parallel, uintptr_t ra)
+{
+ switch (addr & 7) {
+ case 7:
+ /* The 7 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr - 7, val,
+ 0xffffffffffffff00ull, 7, ra);
+ } else {
+ cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
+ cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
+ cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
+ }
+ break;
+ case 6:
+ /* The 6 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr - 6, val,
+ 0xffffffffffff0000ull, 6, ra);
+ } else {
+ cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
+ cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
+ }
+ break;
+ case 5:
+ /* The 5 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask64(env, addr - 5, val,
+ 0xffffffffff000000ull, 5, ra);
+ } else {
+ cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
+ cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
+ }
+ break;
+ case 4:
+ cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
+ break;
+ case 3:
+ /* The 3 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
} else {
cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
@@ -144,17 +301,28 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val,
}
}
-void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ureg val)
+void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
{
do_stby_e(env, addr, val, false, GETPC());
}
void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
- target_ureg val)
+ target_ulong val)
{
do_stby_e(env, addr, val, true, GETPC());
}
+void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ do_stdby_e(env, addr, val, false, GETPC());
+}
+
+void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ulong val)
+{
+ do_stdby_e(env, addr, val, true, GETPC());
+}
+
void HELPER(ldc_check)(target_ulong addr)
{
if (unlikely(addr & 0xf)) {
@@ -164,7 +332,7 @@ void HELPER(ldc_check)(target_ulong addr)
}
}
-target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
+target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
uint32_t level, uint32_t want)
{
#ifdef CONFIG_USER_ONLY
@@ -196,7 +364,7 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
#endif
}
-target_ureg HELPER(read_interval_timer)(void)
+target_ulong HELPER(read_interval_timer)(void)
{
#ifdef CONFIG_USER_ONLY
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
@@ -209,3 +377,113 @@ target_ureg HELPER(read_interval_timer)(void)
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
#endif
}
+
+uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = sextract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = f1 + f2;
+
+ fr = MIN(fr, INT16_MAX);
+ fr = MAX(fr, INT16_MIN);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
+
+uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = extract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = f1 + f2;
+
+ fr = MIN(fr, UINT16_MAX);
+ fr = MAX(fr, 0);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
+
+uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = extract64(r1, i, 16);
+ int f2 = extract64(r2, i, 16);
+ int fr = f1 + f2;
+
+ ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
+ }
+ return ret;
+}
+
+uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = sextract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = f1 - f2;
+
+ fr = MIN(fr, INT16_MAX);
+ fr = MAX(fr, INT16_MIN);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
+
+uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = extract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = f1 - f2;
+
+ fr = MIN(fr, UINT16_MAX);
+ fr = MAX(fr, 0);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
+
+uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = sextract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = (f1 << sh) + f2;
+
+ fr = MIN(fr, INT16_MAX);
+ fr = MAX(fr, INT16_MIN);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
+
+uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
+{
+ uint64_t ret = 0;
+
+ for (int i = 0; i < 64; i += 16) {
+ int f1 = sextract64(r1, i, 16);
+ int f2 = sextract64(r2, i, 16);
+ int fr = (f1 >> sh) + f2;
+
+ fr = MIN(fr, INT16_MAX);
+ fr = MAX(fr, INT16_MIN);
+ ret = deposit64(ret, i, 16, fr);
+ }
+ return ret;
+}
diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c
index 4bb4cf611c..a59245eed3 100644
--- a/target/hppa/sys_helper.c
+++ b/target/hppa/sys_helper.c
@@ -24,7 +24,7 @@
#include "qemu/timer.h"
#include "sysemu/runstate.h"
-void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val)
+void HELPER(write_interval_timer)(CPUHPPAState *env, target_ulong val)
{
HPPACPU *cpu = env_archcpu(env);
uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -58,7 +58,7 @@ void HELPER(reset)(CPUHPPAState *env)
helper_excp(env, EXCP_HLT);
}
-target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm)
+target_ulong HELPER(swap_system_mask)(CPUHPPAState *env, target_ulong nsm)
{
target_ulong psw = env->psw;
/*
@@ -80,6 +80,16 @@ void HELPER(rfi)(CPUHPPAState *env)
env->iasq_b = (uint64_t)env->cr_back[0] << 32;
env->iaoq_f = env->cr[CR_IIAOQ];
env->iaoq_b = env->cr_back[1];
+
+ /*
+ * For pa2.0, IIASQ is the top bits of the virtual address.
+ * To recreate the space identifier, remove the offset bits.
+ */
+ if (hppa_is_pa20(env)) {
+ env->iasq_f &= ~env->iaoq_f;
+ env->iasq_b &= ~env->iaoq_b;
+ }
+
cpu_hppa_put_psw(env, env->cr[CR_IPSW]);
}
diff --git a/target/hppa/trace-events b/target/hppa/trace-events
index 8931517890..a10ba73d5d 100644
--- a/target/hppa/trace-events
+++ b/target/hppa/trace-events
@@ -10,6 +10,7 @@ disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size,
disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d"
disable hppa_tlb_ptlb(void *env) "env=%p"
+disable hppa_tlb_ptlb_local(void *env) "env=%p"
disable hppa_tlb_ptlbe(void *env) "env=%p"
disable hppa_tlb_lpa_success(void *env, uint64_t addr, uint64_t phys) "env=%p addr=0x%lx phys=0x%lx"
disable hppa_tlb_lpa_failed(void *env, uint64_t addr) "env=%p addr=0x%lx"
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 9f3ba9f42f..bcce65d587 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -23,6 +23,7 @@
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
@@ -32,240 +33,35 @@
#include "exec/helper-info.c.inc"
#undef HELPER_H
-
-/* Since we have a distinction between register size and address size,
- we need to redefine all of these. */
-
-#undef TCGv
+/* Choose to use explicit sizes within this file. */
#undef tcg_temp_new
-#undef tcg_global_mem_new
-
-#if TARGET_LONG_BITS == 64
-#define TCGv_tl TCGv_i64
-#define tcg_temp_new_tl tcg_temp_new_i64
-#if TARGET_REGISTER_BITS == 64
-#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
-#else
-#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
-#endif
-#else
-#define TCGv_tl TCGv_i32
-#define tcg_temp_new_tl tcg_temp_new_i32
-#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
-#endif
-
-#if TARGET_REGISTER_BITS == 64
-#define TCGv_reg TCGv_i64
-
-#define tcg_temp_new tcg_temp_new_i64
-#define tcg_global_mem_new tcg_global_mem_new_i64
-
-#define tcg_gen_movi_reg tcg_gen_movi_i64
-#define tcg_gen_mov_reg tcg_gen_mov_i64
-#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
-#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
-#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
-#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
-#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
-#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
-#define tcg_gen_ld_reg tcg_gen_ld_i64
-#define tcg_gen_st8_reg tcg_gen_st8_i64
-#define tcg_gen_st16_reg tcg_gen_st16_i64
-#define tcg_gen_st32_reg tcg_gen_st32_i64
-#define tcg_gen_st_reg tcg_gen_st_i64
-#define tcg_gen_add_reg tcg_gen_add_i64
-#define tcg_gen_addi_reg tcg_gen_addi_i64
-#define tcg_gen_sub_reg tcg_gen_sub_i64
-#define tcg_gen_neg_reg tcg_gen_neg_i64
-#define tcg_gen_subfi_reg tcg_gen_subfi_i64
-#define tcg_gen_subi_reg tcg_gen_subi_i64
-#define tcg_gen_and_reg tcg_gen_and_i64
-#define tcg_gen_andi_reg tcg_gen_andi_i64
-#define tcg_gen_or_reg tcg_gen_or_i64
-#define tcg_gen_ori_reg tcg_gen_ori_i64
-#define tcg_gen_xor_reg tcg_gen_xor_i64
-#define tcg_gen_xori_reg tcg_gen_xori_i64
-#define tcg_gen_not_reg tcg_gen_not_i64
-#define tcg_gen_shl_reg tcg_gen_shl_i64
-#define tcg_gen_shli_reg tcg_gen_shli_i64
-#define tcg_gen_shr_reg tcg_gen_shr_i64
-#define tcg_gen_shri_reg tcg_gen_shri_i64
-#define tcg_gen_sar_reg tcg_gen_sar_i64
-#define tcg_gen_sari_reg tcg_gen_sari_i64
-#define tcg_gen_brcond_reg tcg_gen_brcond_i64
-#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
-#define tcg_gen_setcond_reg tcg_gen_setcond_i64
-#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
-#define tcg_gen_mul_reg tcg_gen_mul_i64
-#define tcg_gen_muli_reg tcg_gen_muli_i64
-#define tcg_gen_div_reg tcg_gen_div_i64
-#define tcg_gen_rem_reg tcg_gen_rem_i64
-#define tcg_gen_divu_reg tcg_gen_divu_i64
-#define tcg_gen_remu_reg tcg_gen_remu_i64
-#define tcg_gen_discard_reg tcg_gen_discard_i64
-#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
-#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
-#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
-#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
-#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
-#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
-#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
-#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
-#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
-#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
-#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
-#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
-#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
-#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
-#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
-#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
-#define tcg_gen_andc_reg tcg_gen_andc_i64
-#define tcg_gen_eqv_reg tcg_gen_eqv_i64
-#define tcg_gen_nand_reg tcg_gen_nand_i64
-#define tcg_gen_nor_reg tcg_gen_nor_i64
-#define tcg_gen_orc_reg tcg_gen_orc_i64
-#define tcg_gen_clz_reg tcg_gen_clz_i64
-#define tcg_gen_ctz_reg tcg_gen_ctz_i64
-#define tcg_gen_clzi_reg tcg_gen_clzi_i64
-#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
-#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
-#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
-#define tcg_gen_rotl_reg tcg_gen_rotl_i64
-#define tcg_gen_rotli_reg tcg_gen_rotli_i64
-#define tcg_gen_rotr_reg tcg_gen_rotr_i64
-#define tcg_gen_rotri_reg tcg_gen_rotri_i64
-#define tcg_gen_deposit_reg tcg_gen_deposit_i64
-#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
-#define tcg_gen_extract_reg tcg_gen_extract_i64
-#define tcg_gen_sextract_reg tcg_gen_sextract_i64
-#define tcg_gen_extract2_reg tcg_gen_extract2_i64
-#define tcg_constant_reg tcg_constant_i64
-#define tcg_gen_movcond_reg tcg_gen_movcond_i64
-#define tcg_gen_add2_reg tcg_gen_add2_i64
-#define tcg_gen_sub2_reg tcg_gen_sub2_i64
-#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
-#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
-#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
-#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
-#else
-#define TCGv_reg TCGv_i32
-#define tcg_temp_new tcg_temp_new_i32
-#define tcg_global_mem_new tcg_global_mem_new_i32
-
-#define tcg_gen_movi_reg tcg_gen_movi_i32
-#define tcg_gen_mov_reg tcg_gen_mov_i32
-#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
-#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
-#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
-#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
-#define tcg_gen_ld32u_reg tcg_gen_ld_i32
-#define tcg_gen_ld32s_reg tcg_gen_ld_i32
-#define tcg_gen_ld_reg tcg_gen_ld_i32
-#define tcg_gen_st8_reg tcg_gen_st8_i32
-#define tcg_gen_st16_reg tcg_gen_st16_i32
-#define tcg_gen_st32_reg tcg_gen_st32_i32
-#define tcg_gen_st_reg tcg_gen_st_i32
-#define tcg_gen_add_reg tcg_gen_add_i32
-#define tcg_gen_addi_reg tcg_gen_addi_i32
-#define tcg_gen_sub_reg tcg_gen_sub_i32
-#define tcg_gen_neg_reg tcg_gen_neg_i32
-#define tcg_gen_subfi_reg tcg_gen_subfi_i32
-#define tcg_gen_subi_reg tcg_gen_subi_i32
-#define tcg_gen_and_reg tcg_gen_and_i32
-#define tcg_gen_andi_reg tcg_gen_andi_i32
-#define tcg_gen_or_reg tcg_gen_or_i32
-#define tcg_gen_ori_reg tcg_gen_ori_i32
-#define tcg_gen_xor_reg tcg_gen_xor_i32
-#define tcg_gen_xori_reg tcg_gen_xori_i32
-#define tcg_gen_not_reg tcg_gen_not_i32
-#define tcg_gen_shl_reg tcg_gen_shl_i32
-#define tcg_gen_shli_reg tcg_gen_shli_i32
-#define tcg_gen_shr_reg tcg_gen_shr_i32
-#define tcg_gen_shri_reg tcg_gen_shri_i32
-#define tcg_gen_sar_reg tcg_gen_sar_i32
-#define tcg_gen_sari_reg tcg_gen_sari_i32
-#define tcg_gen_brcond_reg tcg_gen_brcond_i32
-#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
-#define tcg_gen_setcond_reg tcg_gen_setcond_i32
-#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
-#define tcg_gen_mul_reg tcg_gen_mul_i32
-#define tcg_gen_muli_reg tcg_gen_muli_i32
-#define tcg_gen_div_reg tcg_gen_div_i32
-#define tcg_gen_rem_reg tcg_gen_rem_i32
-#define tcg_gen_divu_reg tcg_gen_divu_i32
-#define tcg_gen_remu_reg tcg_gen_remu_i32
-#define tcg_gen_discard_reg tcg_gen_discard_i32
-#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
-#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
-#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
-#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
-#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
-#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
-#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
-#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
-#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
-#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
-#define tcg_gen_ext32u_reg tcg_gen_mov_i32
-#define tcg_gen_ext32s_reg tcg_gen_mov_i32
-#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
-#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
-#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
-#define tcg_gen_andc_reg tcg_gen_andc_i32
-#define tcg_gen_eqv_reg tcg_gen_eqv_i32
-#define tcg_gen_nand_reg tcg_gen_nand_i32
-#define tcg_gen_nor_reg tcg_gen_nor_i32
-#define tcg_gen_orc_reg tcg_gen_orc_i32
-#define tcg_gen_clz_reg tcg_gen_clz_i32
-#define tcg_gen_ctz_reg tcg_gen_ctz_i32
-#define tcg_gen_clzi_reg tcg_gen_clzi_i32
-#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
-#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
-#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
-#define tcg_gen_rotl_reg tcg_gen_rotl_i32
-#define tcg_gen_rotli_reg tcg_gen_rotli_i32
-#define tcg_gen_rotr_reg tcg_gen_rotr_i32
-#define tcg_gen_rotri_reg tcg_gen_rotri_i32
-#define tcg_gen_deposit_reg tcg_gen_deposit_i32
-#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
-#define tcg_gen_extract_reg tcg_gen_extract_i32
-#define tcg_gen_sextract_reg tcg_gen_sextract_i32
-#define tcg_gen_extract2_reg tcg_gen_extract2_i32
-#define tcg_constant_reg tcg_constant_i32
-#define tcg_gen_movcond_reg tcg_gen_movcond_i32
-#define tcg_gen_add2_reg tcg_gen_add2_i32
-#define tcg_gen_sub2_reg tcg_gen_sub2_i32
-#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
-#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
-#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
-#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
-#endif /* TARGET_REGISTER_BITS */
typedef struct DisasCond {
TCGCond c;
- TCGv_reg a0, a1;
+ TCGv_i64 a0, a1;
} DisasCond;
typedef struct DisasContext {
DisasContextBase base;
CPUState *cs;
+ TCGOp *insn_start;
- target_ureg iaoq_f;
- target_ureg iaoq_b;
- target_ureg iaoq_n;
- TCGv_reg iaoq_n_var;
-
- int ntempr, ntempl;
- TCGv_reg tempr[8];
- TCGv_tl templ[4];
+ uint64_t iaoq_f;
+ uint64_t iaoq_b;
+ uint64_t iaoq_n;
+ TCGv_i64 iaoq_n_var;
DisasCond null_cond;
TCGLabel *null_lab;
+ TCGv_i64 zero;
+
uint32_t insn;
uint32_t tb_flags;
int mmu_idx;
int privilege;
bool psw_n_nonzero;
+ bool is_pa20;
#ifdef CONFIG_USER_ONLY
MemOp unalign;
@@ -332,6 +128,23 @@ static int expand_shl11(DisasContext *ctx, int val)
return val << 11;
}
+static int assemble_6(DisasContext *ctx, int val)
+{
+ /*
+ * Officially, 32 * x + 32 - y.
+ * Here, x is already in bit 5, and y is [4:0].
+ * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
+ * with the overflow from bit 4 summing with x.
+ */
+ return (val ^ 31) + 1;
+}
+
+/* Translate CMPI doubleword conditions to standard. */
+static int cmpbid_c(DisasContext *ctx, int val)
+{
+ return val ? val : 4; /* 0 == "*<<" */
+}
+
/* Include the auto-generated decoder. */
#include "decode-insns.c.inc"
@@ -350,24 +163,24 @@ static int expand_shl11(DisasContext *ctx, int val)
#define DISAS_EXIT DISAS_TARGET_3
/* global register indexes */
-static TCGv_reg cpu_gr[32];
+static TCGv_i64 cpu_gr[32];
static TCGv_i64 cpu_sr[4];
static TCGv_i64 cpu_srH;
-static TCGv_reg cpu_iaoq_f;
-static TCGv_reg cpu_iaoq_b;
+static TCGv_i64 cpu_iaoq_f;
+static TCGv_i64 cpu_iaoq_b;
static TCGv_i64 cpu_iasq_f;
static TCGv_i64 cpu_iasq_b;
-static TCGv_reg cpu_sar;
-static TCGv_reg cpu_psw_n;
-static TCGv_reg cpu_psw_v;
-static TCGv_reg cpu_psw_cb;
-static TCGv_reg cpu_psw_cb_msb;
+static TCGv_i64 cpu_sar;
+static TCGv_i64 cpu_psw_n;
+static TCGv_i64 cpu_psw_v;
+static TCGv_i64 cpu_psw_cb;
+static TCGv_i64 cpu_psw_cb_msb;
void hppa_translate_init(void)
{
#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
- typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
+ typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
static const GlobalVar vars[] = {
{ &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
DEF_VAR(psw_n),
@@ -422,6 +235,13 @@ void hppa_translate_init(void)
"iasq_b");
}
+static void set_insn_breg(DisasContext *ctx, int breg)
+{
+ assert(ctx->insn_start != NULL);
+ tcg_set_insn_start_param(ctx->insn_start, 2, breg);
+ ctx->insn_start = NULL;
+}
+
static DisasCond cond_make_f(void)
{
return (DisasCond){
@@ -445,36 +265,36 @@ static DisasCond cond_make_n(void)
return (DisasCond){
.c = TCG_COND_NE,
.a0 = cpu_psw_n,
- .a1 = tcg_constant_reg(0)
+ .a1 = tcg_constant_i64(0)
};
}
-static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
+static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
{
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
- return (DisasCond){
- .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
- };
+ return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
}
-static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
+static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
{
- TCGv_reg tmp = tcg_temp_new();
- tcg_gen_mov_reg(tmp, a0);
- return cond_make_0_tmp(c, tmp);
+ return cond_make_tmp(c, a0, tcg_constant_i64(0));
}
-static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
+static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
{
- DisasCond r = { .c = c };
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tmp, a0);
+ return cond_make_0_tmp(c, tmp);
+}
- assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
- r.a0 = tcg_temp_new();
- tcg_gen_mov_reg(r.a0, a0);
- r.a1 = tcg_temp_new();
- tcg_gen_mov_reg(r.a1, a1);
+static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
- return r;
+ tcg_gen_mov_i64(t0, a0);
+ tcg_gen_mov_i64(t1, a1);
+ return cond_make_tmp(c, t0, t1);
}
static void cond_free(DisasCond *cond)
@@ -492,60 +312,35 @@ static void cond_free(DisasCond *cond)
}
}
-static TCGv_reg get_temp(DisasContext *ctx)
-{
- unsigned i = ctx->ntempr++;
- g_assert(i < ARRAY_SIZE(ctx->tempr));
- return ctx->tempr[i] = tcg_temp_new();
-}
-
-#ifndef CONFIG_USER_ONLY
-static TCGv_tl get_temp_tl(DisasContext *ctx)
-{
- unsigned i = ctx->ntempl++;
- g_assert(i < ARRAY_SIZE(ctx->templ));
- return ctx->templ[i] = tcg_temp_new_tl();
-}
-#endif
-
-static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
-{
- TCGv_reg t = get_temp(ctx);
- tcg_gen_movi_reg(t, v);
- return t;
-}
-
-static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0) {
- TCGv_reg t = get_temp(ctx);
- tcg_gen_movi_reg(t, 0);
- return t;
+ return ctx->zero;
} else {
return cpu_gr[reg];
}
}
-static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
- return get_temp(ctx);
+ return tcg_temp_new_i64();
} else {
return cpu_gr[reg];
}
}
-static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
+static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
{
if (ctx->null_cond.c != TCG_COND_NEVER) {
- tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
+ tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
ctx->null_cond.a1, dest, t);
} else {
- tcg_gen_mov_reg(dest, t);
+ tcg_gen_mov_i64(dest, t);
}
}
-static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
+static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
{
if (reg != 0) {
save_or_nullify(ctx, cpu_gr[reg], t);
@@ -653,18 +448,18 @@ static void nullify_over(DisasContext *ctx)
/* If we're using PSW[N], copy it to a temp because... */
if (ctx->null_cond.a0 == cpu_psw_n) {
- ctx->null_cond.a0 = tcg_temp_new();
- tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
+ ctx->null_cond.a0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
}
/* ... we clear it before branching over the implementation,
so that (1) it's clear after nullifying this insn and
(2) if this insn nullifies the next, PSW[N] is valid. */
if (ctx->psw_n_nonzero) {
ctx->psw_n_nonzero = false;
- tcg_gen_movi_reg(cpu_psw_n, 0);
+ tcg_gen_movi_i64(cpu_psw_n, 0);
}
- tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
+ tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
ctx->null_cond.a1, ctx->null_lab);
cond_free(&ctx->null_cond);
}
@@ -675,12 +470,12 @@ static void nullify_save(DisasContext *ctx)
{
if (ctx->null_cond.c == TCG_COND_NEVER) {
if (ctx->psw_n_nonzero) {
- tcg_gen_movi_reg(cpu_psw_n, 0);
+ tcg_gen_movi_i64(cpu_psw_n, 0);
}
return;
}
if (ctx->null_cond.a0 != cpu_psw_n) {
- tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
+ tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
ctx->null_cond.a0, ctx->null_cond.a1);
ctx->psw_n_nonzero = true;
}
@@ -693,7 +488,7 @@ static void nullify_save(DisasContext *ctx)
static void nullify_set(DisasContext *ctx, bool x)
{
if (ctx->psw_n_nonzero || x) {
- tcg_gen_movi_reg(cpu_psw_n, x);
+ tcg_gen_movi_i64(cpu_psw_n, x);
}
}
@@ -736,16 +531,36 @@ static bool nullify_end(DisasContext *ctx)
return true;
}
-static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
+static uint64_t gva_offset_mask(DisasContext *ctx)
+{
+ return (ctx->tb_flags & PSW_W
+ ? MAKE_64BIT_MASK(0, 62)
+ : MAKE_64BIT_MASK(0, 32));
+}
+
+static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
+ uint64_t ival, TCGv_i64 vval)
{
- if (unlikely(ival == -1)) {
- tcg_gen_mov_reg(dest, vval);
+ uint64_t mask = gva_offset_mask(ctx);
+
+ if (ival != -1) {
+ tcg_gen_movi_i64(dest, ival & mask);
+ return;
+ }
+ tcg_debug_assert(vval != NULL);
+
+ /*
+ * We know that the IAOQ is already properly masked.
+ * This optimization is primarily for "iaoq_f = iaoq_b".
+ */
+ if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
+ tcg_gen_mov_i64(dest, vval);
} else {
- tcg_gen_movi_reg(dest, ival);
+ tcg_gen_andi_i64(dest, vval, mask);
}
}
-static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
+static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
{
return ctx->iaoq_f + disp + 8;
}
@@ -757,8 +572,8 @@ static void gen_excp_1(int exception)
static void gen_excp(DisasContext *ctx, int exception)
{
- copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
- copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
nullify_save(ctx);
gen_excp_1(exception);
ctx->base.is_jmp = DISAS_NORETURN;
@@ -767,7 +582,7 @@ static void gen_excp(DisasContext *ctx, int exception)
static bool gen_excp_iir(DisasContext *ctx, int exc)
{
nullify_over(ctx);
- tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
+ tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
gen_excp(ctx, exc);
return nullify_end(ctx);
@@ -790,7 +605,7 @@ static bool gen_illegal(DisasContext *ctx)
} while (0)
#endif
-static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
+static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
{
return translator_use_goto_tb(&ctx->base, dest);
}
@@ -806,16 +621,16 @@ static bool use_nullify_skip(DisasContext *ctx)
}
static void gen_goto_tb(DisasContext *ctx, int which,
- target_ureg f, target_ureg b)
+ uint64_t f, uint64_t b)
{
if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
tcg_gen_goto_tb(which);
- tcg_gen_movi_reg(cpu_iaoq_f, f);
- tcg_gen_movi_reg(cpu_iaoq_b, b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
tcg_gen_exit_tb(ctx->base.tb, which);
} else {
- copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
- copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
tcg_gen_lookup_and_goto_ptr();
}
}
@@ -830,27 +645,41 @@ static bool cond_need_cb(int c)
return c == 4 || c == 5;
}
+/* Need extensions from TCGv_i32 to TCGv_i64. */
+static bool cond_need_ext(DisasContext *ctx, bool d)
+{
+ return !(ctx->is_pa20 && d);
+}
+
/*
* Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
* the Parisc 1.1 Architecture Reference Manual for details.
*/
-static DisasCond do_cond(unsigned cf, TCGv_reg res,
- TCGv_reg cb_msb, TCGv_reg sv)
+static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
+ TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
{
DisasCond cond;
- TCGv_reg tmp;
+ TCGv_i64 tmp;
switch (cf >> 1) {
case 0: /* Never / TR (0 / 1) */
cond = cond_make_f();
break;
case 1: /* = / <> (Z / !Z) */
+ if (cond_need_ext(ctx, d)) {
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(tmp, res);
+ res = tmp;
+ }
cond = cond_make_0(TCG_COND_EQ, res);
break;
case 2: /* < / >= (N ^ V / !(N ^ V) */
- tmp = tcg_temp_new();
- tcg_gen_xor_reg(tmp, res, sv);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, res, sv);
+ if (cond_need_ext(ctx, d)) {
+ tcg_gen_ext32s_i64(tmp, tmp);
+ }
cond = cond_make_0_tmp(TCG_COND_LT, tmp);
break;
case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
@@ -863,27 +692,42 @@ static DisasCond do_cond(unsigned cf, TCGv_reg res,
* !(~(res ^ sv) >> 31) | !res
* !(~(res ^ sv) >> 31 & res)
*/
- tmp = tcg_temp_new();
- tcg_gen_eqv_reg(tmp, res, sv);
- tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
- tcg_gen_and_reg(tmp, tmp, res);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_eqv_i64(tmp, res, sv);
+ if (cond_need_ext(ctx, d)) {
+ tcg_gen_sextract_i64(tmp, tmp, 31, 1);
+ tcg_gen_and_i64(tmp, tmp, res);
+ tcg_gen_ext32u_i64(tmp, tmp);
+ } else {
+ tcg_gen_sari_i64(tmp, tmp, 63);
+ tcg_gen_and_i64(tmp, tmp, res);
+ }
cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
break;
case 4: /* NUV / UV (!C / C) */
+ /* Only bit 0 of cb_msb is ever set. */
cond = cond_make_0(TCG_COND_EQ, cb_msb);
break;
case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
- tmp = tcg_temp_new();
- tcg_gen_neg_reg(tmp, cb_msb);
- tcg_gen_and_reg(tmp, tmp, res);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_neg_i64(tmp, cb_msb);
+ tcg_gen_and_i64(tmp, tmp, res);
+ if (cond_need_ext(ctx, d)) {
+ tcg_gen_ext32u_i64(tmp, tmp);
+ }
cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
break;
case 6: /* SV / NSV (V / !V) */
+ if (cond_need_ext(ctx, d)) {
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(tmp, sv);
+ sv = tmp;
+ }
cond = cond_make_0(TCG_COND_LT, sv);
break;
case 7: /* OD / EV */
- tmp = tcg_temp_new();
- tcg_gen_andi_reg(tmp, res, 1);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp, res, 1);
cond = cond_make_0_tmp(TCG_COND_NE, tmp);
break;
default:
@@ -900,35 +744,55 @@ static DisasCond do_cond(unsigned cf, TCGv_reg res,
can use the inputs directly. This can allow other computation to be
deleted as unused. */
-static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
- TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
+static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
+ TCGv_i64 res, TCGv_i64 in1,
+ TCGv_i64 in2, TCGv_i64 sv)
{
- DisasCond cond;
+ TCGCond tc;
+ bool ext_uns;
switch (cf >> 1) {
case 1: /* = / <> */
- cond = cond_make(TCG_COND_EQ, in1, in2);
+ tc = TCG_COND_EQ;
+ ext_uns = true;
break;
case 2: /* < / >= */
- cond = cond_make(TCG_COND_LT, in1, in2);
+ tc = TCG_COND_LT;
+ ext_uns = false;
break;
case 3: /* <= / > */
- cond = cond_make(TCG_COND_LE, in1, in2);
+ tc = TCG_COND_LE;
+ ext_uns = false;
break;
case 4: /* << / >>= */
- cond = cond_make(TCG_COND_LTU, in1, in2);
+ tc = TCG_COND_LTU;
+ ext_uns = true;
break;
case 5: /* <<= / >> */
- cond = cond_make(TCG_COND_LEU, in1, in2);
+ tc = TCG_COND_LEU;
+ ext_uns = true;
break;
default:
- return do_cond(cf, res, NULL, sv);
+ return do_cond(ctx, cf, d, res, NULL, sv);
}
+
if (cf & 1) {
- cond.c = tcg_invert_cond(cond.c);
+ tc = tcg_invert_cond(tc);
}
+ if (cond_need_ext(ctx, d)) {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
- return cond;
+ if (ext_uns) {
+ tcg_gen_ext32u_i64(t1, in1);
+ tcg_gen_ext32u_i64(t2, in2);
+ } else {
+ tcg_gen_ext32s_i64(t1, in1);
+ tcg_gen_ext32s_i64(t2, in2);
+ }
+ return cond_make_tmp(tc, t1, t2);
+ }
+ return cond_make(tc, in1, in2);
}
/*
@@ -940,8 +804,12 @@ static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
* how cases c={2,3} are treated.
*/
-static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
+static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
+ TCGv_i64 res)
{
+ TCGCond tc;
+ bool ext_uns;
+
switch (cf) {
case 0: /* never */
case 9: /* undef, C */
@@ -956,30 +824,55 @@ static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
return cond_make_t();
case 2: /* == */
- return cond_make_0(TCG_COND_EQ, res);
+ tc = TCG_COND_EQ;
+ ext_uns = true;
+ break;
case 3: /* <> */
- return cond_make_0(TCG_COND_NE, res);
+ tc = TCG_COND_NE;
+ ext_uns = true;
+ break;
case 4: /* < */
- return cond_make_0(TCG_COND_LT, res);
+ tc = TCG_COND_LT;
+ ext_uns = false;
+ break;
case 5: /* >= */
- return cond_make_0(TCG_COND_GE, res);
+ tc = TCG_COND_GE;
+ ext_uns = false;
+ break;
case 6: /* <= */
- return cond_make_0(TCG_COND_LE, res);
+ tc = TCG_COND_LE;
+ ext_uns = false;
+ break;
case 7: /* > */
- return cond_make_0(TCG_COND_GT, res);
+ tc = TCG_COND_GT;
+ ext_uns = false;
+ break;
case 14: /* OD */
case 15: /* EV */
- return do_cond(cf, res, NULL, NULL);
+ return do_cond(ctx, cf, d, res, NULL, NULL);
default:
g_assert_not_reached();
}
+
+ if (cond_need_ext(ctx, d)) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ if (ext_uns) {
+ tcg_gen_ext32u_i64(tmp, res);
+ } else {
+ tcg_gen_ext32s_i64(tmp, res);
+ }
+ return cond_make_0_tmp(tc, tmp);
+ }
+ return cond_make_0(tc, res);
}
/* Similar, but for shift/extract/deposit conditions. */
-static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
+static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
+ TCGv_i64 res)
{
unsigned c, f;
@@ -992,28 +885,29 @@ static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
}
f = (orig & 4) / 4;
- return do_log_cond(c * 2 + f, res);
+ return do_log_cond(ctx, c * 2 + f, d, res);
}
/* Similar, but for unit conditions. */
-static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
- TCGv_reg in1, TCGv_reg in2)
+static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
+ TCGv_i64 in1, TCGv_i64 in2)
{
DisasCond cond;
- TCGv_reg tmp, cb = NULL;
+ TCGv_i64 tmp, cb = NULL;
+ uint64_t d_repl = d ? 0x0000000100000001ull : 1;
if (cf & 8) {
/* Since we want to test lots of carry-out bits all at once, do not
* do our normal thing and compute carry-in of bit B+1 since that
* leaves us with carry bits spread across two words.
*/
- cb = tcg_temp_new();
- tmp = tcg_temp_new();
- tcg_gen_or_reg(cb, in1, in2);
- tcg_gen_and_reg(tmp, in1, in2);
- tcg_gen_andc_reg(cb, cb, res);
- tcg_gen_or_reg(cb, cb, tmp);
+ cb = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+ tcg_gen_or_i64(cb, in1, in2);
+ tcg_gen_and_i64(tmp, in1, in2);
+ tcg_gen_andc_i64(cb, cb, res);
+ tcg_gen_or_i64(cb, cb, tmp);
}
switch (cf >> 1) {
@@ -1027,33 +921,33 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
/* See hasless(v,1) from
* https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
*/
- tmp = tcg_temp_new();
- tcg_gen_subi_reg(tmp, res, 0x01010101u);
- tcg_gen_andc_reg(tmp, tmp, res);
- tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
+ tcg_gen_andc_i64(tmp, tmp, res);
+ tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
cond = cond_make_0(TCG_COND_NE, tmp);
break;
case 3: /* SHZ / NHZ */
- tmp = tcg_temp_new();
- tcg_gen_subi_reg(tmp, res, 0x00010001u);
- tcg_gen_andc_reg(tmp, tmp, res);
- tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
+ tcg_gen_andc_i64(tmp, tmp, res);
+ tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
cond = cond_make_0(TCG_COND_NE, tmp);
break;
case 4: /* SDC / NDC */
- tcg_gen_andi_reg(cb, cb, 0x88888888u);
+ tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 6: /* SBC / NBC */
- tcg_gen_andi_reg(cb, cb, 0x80808080u);
+ tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 7: /* SHC / NHC */
- tcg_gen_andi_reg(cb, cb, 0x80008000u);
+ tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
@@ -1067,68 +961,87 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
return cond;
}
+static TCGv_i64 get_carry(DisasContext *ctx, bool d,
+ TCGv_i64 cb, TCGv_i64 cb_msb)
+{
+ if (cond_need_ext(ctx, d)) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_extract_i64(t, cb, 32, 1);
+ return t;
+ }
+ return cb_msb;
+}
+
+static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
+{
+ return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
+}
+
/* Compute signed overflow for addition. */
-static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
- TCGv_reg in1, TCGv_reg in2)
+static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
+ TCGv_i64 in1, TCGv_i64 in2)
{
- TCGv_reg sv = get_temp(ctx);
- TCGv_reg tmp = tcg_temp_new();
+ TCGv_i64 sv = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_xor_reg(sv, res, in1);
- tcg_gen_xor_reg(tmp, in1, in2);
- tcg_gen_andc_reg(sv, sv, tmp);
+ tcg_gen_xor_i64(sv, res, in1);
+ tcg_gen_xor_i64(tmp, in1, in2);
+ tcg_gen_andc_i64(sv, sv, tmp);
return sv;
}
/* Compute signed overflow for subtraction. */
-static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
- TCGv_reg in1, TCGv_reg in2)
+static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
+ TCGv_i64 in1, TCGv_i64 in2)
{
- TCGv_reg sv = get_temp(ctx);
- TCGv_reg tmp = tcg_temp_new();
+ TCGv_i64 sv = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_xor_reg(sv, res, in1);
- tcg_gen_xor_reg(tmp, in1, in2);
- tcg_gen_and_reg(sv, sv, tmp);
+ tcg_gen_xor_i64(sv, res, in1);
+ tcg_gen_xor_i64(tmp, in1, in2);
+ tcg_gen_and_i64(sv, sv, tmp);
return sv;
}
-static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
- TCGv_reg in2, unsigned shift, bool is_l,
- bool is_tsv, bool is_tc, bool is_c, unsigned cf)
+static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, unsigned shift, bool is_l,
+ bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
{
- TCGv_reg dest, cb, cb_msb, sv, tmp;
+ TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
unsigned c = cf >> 1;
DisasCond cond;
- dest = tcg_temp_new();
+ dest = tcg_temp_new_i64();
cb = NULL;
cb_msb = NULL;
+ cb_cond = NULL;
if (shift) {
- tmp = get_temp(ctx);
- tcg_gen_shli_reg(tmp, in1, shift);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_shli_i64(tmp, in1, shift);
in1 = tmp;
}
if (!is_l || cond_need_cb(c)) {
- TCGv_reg zero = tcg_constant_reg(0);
- cb_msb = get_temp(ctx);
- tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
+ cb_msb = tcg_temp_new_i64();
+ cb = tcg_temp_new_i64();
+
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
if (is_c) {
- tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
+ get_psw_carry(ctx, d), ctx->zero);
}
- if (!is_l) {
- cb = get_temp(ctx);
- tcg_gen_xor_reg(cb, in1, in2);
- tcg_gen_xor_reg(cb, cb, dest);
+ tcg_gen_xor_i64(cb, in1, in2);
+ tcg_gen_xor_i64(cb, cb, dest);
+ if (cond_need_cb(c)) {
+ cb_cond = get_carry(ctx, d, cb, cb_msb);
}
} else {
- tcg_gen_add_reg(dest, in1, in2);
+ tcg_gen_add_i64(dest, in1, in2);
if (is_c) {
- tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
+ tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
}
}
@@ -1143,10 +1056,10 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
}
/* Emit any conditional trap before any writeback. */
- cond = do_cond(cf, dest, cb_msb, sv);
+ cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
if (is_tc) {
- tmp = tcg_temp_new();
- tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(tcg_env, tmp);
}
@@ -1162,61 +1075,65 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
ctx->null_cond = cond;
}
-static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
+static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
bool is_l, bool is_tsv, bool is_tc, bool is_c)
{
- TCGv_reg tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
+ do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
+ is_tsv, is_tc, is_c, a->cf, a->d);
return nullify_end(ctx);
}
static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
bool is_tsv, bool is_tc)
{
- TCGv_reg tcg_im, tcg_r2;
+ TCGv_i64 tcg_im, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
- tcg_im = load_const(ctx, a->i);
+ tcg_im = tcg_constant_i64(a->i);
tcg_r2 = load_gpr(ctx, a->r);
- do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
+ /* All ADDI conditions are 32-bit. */
+ do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
return nullify_end(ctx);
}
-static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
- TCGv_reg in2, bool is_tsv, bool is_b,
- bool is_tc, unsigned cf)
+static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, bool is_tsv, bool is_b,
+ bool is_tc, unsigned cf, bool d)
{
- TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
+ TCGv_i64 dest, sv, cb, cb_msb, tmp;
unsigned c = cf >> 1;
DisasCond cond;
- dest = tcg_temp_new();
- cb = tcg_temp_new();
- cb_msb = tcg_temp_new();
+ dest = tcg_temp_new_i64();
+ cb = tcg_temp_new_i64();
+ cb_msb = tcg_temp_new_i64();
- zero = tcg_constant_reg(0);
if (is_b) {
/* DEST,C = IN1 + ~IN2 + C. */
- tcg_gen_not_reg(cb, in2);
- tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
- tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
- tcg_gen_xor_reg(cb, cb, in1);
- tcg_gen_xor_reg(cb, cb, dest);
+ tcg_gen_not_i64(cb, in2);
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
+ get_psw_carry(ctx, d), ctx->zero);
+ tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
+ tcg_gen_xor_i64(cb, cb, in1);
+ tcg_gen_xor_i64(cb, cb, dest);
} else {
- /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
- operations by seeding the high word with 1 and subtracting. */
- tcg_gen_movi_reg(cb_msb, 1);
- tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
- tcg_gen_eqv_reg(cb, in1, in2);
- tcg_gen_xor_reg(cb, cb, dest);
+ /*
+ * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
+ * operations by seeding the high word with 1 and subtracting.
+ */
+ TCGv_i64 one = tcg_constant_i64(1);
+ tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
+ tcg_gen_eqv_i64(cb, in1, in2);
+ tcg_gen_xor_i64(cb, cb, dest);
}
/* Compute signed overflow if required. */
@@ -1230,15 +1147,15 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
/* Compute the condition. We cannot use the special case for borrow. */
if (!is_b) {
- cond = do_sub_cond(cf, dest, in1, in2, sv);
+ cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
} else {
- cond = do_cond(cf, dest, cb_msb, sv);
+ cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
}
/* Emit any conditional trap before any writeback. */
if (is_tc) {
- tmp = tcg_temp_new();
- tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(tcg_env, tmp);
}
@@ -1252,41 +1169,42 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
ctx->null_cond = cond;
}
-static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
+static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
bool is_tsv, bool is_b, bool is_tc)
{
- TCGv_reg tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
+ do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
return nullify_end(ctx);
}
static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
{
- TCGv_reg tcg_im, tcg_r2;
+ TCGv_i64 tcg_im, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
- tcg_im = load_const(ctx, a->i);
+ tcg_im = tcg_constant_i64(a->i);
tcg_r2 = load_gpr(ctx, a->r);
- do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
+ /* All SUBI conditions are 32-bit. */
+ do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
return nullify_end(ctx);
}
-static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
- TCGv_reg in2, unsigned cf)
+static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, unsigned cf, bool d)
{
- TCGv_reg dest, sv;
+ TCGv_i64 dest, sv;
DisasCond cond;
- dest = tcg_temp_new();
- tcg_gen_sub_reg(dest, in1, in2);
+ dest = tcg_temp_new_i64();
+ tcg_gen_sub_i64(dest, in1, in2);
/* Compute signed overflow if required. */
sv = NULL;
@@ -1295,10 +1213,10 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
}
/* Form the condition for the compare. */
- cond = do_sub_cond(cf, dest, in1, in2, sv);
+ cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
/* Clear. */
- tcg_gen_movi_reg(dest, 0);
+ tcg_gen_movi_i64(dest, 0);
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
@@ -1306,11 +1224,11 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
ctx->null_cond = cond;
}
-static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
- TCGv_reg in2, unsigned cf,
- void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, unsigned cf, bool d,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
{
- TCGv_reg dest = dest_gpr(ctx, rt);
+ TCGv_i64 dest = dest_gpr(ctx, rt);
/* Perform the operation, and writeback. */
fn(dest, in1, in2);
@@ -1319,29 +1237,29 @@ static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (cf) {
- ctx->null_cond = do_log_cond(cf, dest);
+ ctx->null_cond = do_log_cond(ctx, cf, d, dest);
}
}
-static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
- void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
{
- TCGv_reg tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
+ do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
return nullify_end(ctx);
}
-static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
- TCGv_reg in2, unsigned cf, bool is_tc,
- void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
{
- TCGv_reg dest;
+ TCGv_i64 dest;
DisasCond cond;
if (cf == 0) {
@@ -1350,14 +1268,14 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
save_gpr(ctx, rt, dest);
cond_free(&ctx->null_cond);
} else {
- dest = tcg_temp_new();
+ dest = tcg_temp_new_i64();
fn(dest, in1, in2);
- cond = do_unit_cond(cf, dest, in1, in2);
+ cond = do_unit_cond(cf, d, dest, in1, in2);
if (is_tc) {
- TCGv_reg tmp = tcg_temp_new();
- tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(tcg_env, tmp);
}
save_gpr(ctx, rt, dest);
@@ -1372,17 +1290,17 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
from the top 2 bits of the base register. There are a few system
instructions that have a 3-bit space specifier, for which SR0 is
not special. To handle this, pass ~SP. */
-static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
+static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
{
TCGv_ptr ptr;
- TCGv_reg tmp;
+ TCGv_i64 tmp;
TCGv_i64 spc;
if (sp != 0) {
if (sp < 0) {
sp = ~sp;
}
- spc = get_temp_tl(ctx);
+ spc = tcg_temp_new_i64();
load_spr(ctx, spc, sp);
return spc;
}
@@ -1391,12 +1309,13 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
}
ptr = tcg_temp_new_ptr();
- tmp = tcg_temp_new();
- spc = get_temp_tl(ctx);
+ tmp = tcg_temp_new_i64();
+ spc = tcg_temp_new_i64();
- tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
- tcg_gen_andi_reg(tmp, tmp, 030);
- tcg_gen_trunc_reg_ptr(ptr, tmp);
+ /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
+ tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
+ tcg_gen_andi_i64(tmp, tmp, 030);
+ tcg_gen_trunc_i64_ptr(ptr, tmp);
tcg_gen_add_ptr(ptr, ptr, tcg_env);
tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
@@ -1405,38 +1324,35 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
}
#endif
-static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
- unsigned rb, unsigned rx, int scale, target_sreg disp,
+static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
+ unsigned rb, unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, bool is_phys)
{
- TCGv_reg base = load_gpr(ctx, rb);
- TCGv_reg ofs;
+ TCGv_i64 base = load_gpr(ctx, rb);
+ TCGv_i64 ofs;
+ TCGv_i64 addr;
+
+ set_insn_breg(ctx, rb);
/* Note that RX is mutually exclusive with DISP. */
if (rx) {
- ofs = get_temp(ctx);
- tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
- tcg_gen_add_reg(ofs, ofs, base);
+ ofs = tcg_temp_new_i64();
+ tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
+ tcg_gen_add_i64(ofs, ofs, base);
} else if (disp || modify) {
- ofs = get_temp(ctx);
- tcg_gen_addi_reg(ofs, base, disp);
+ ofs = tcg_temp_new_i64();
+ tcg_gen_addi_i64(ofs, base, disp);
} else {
ofs = base;
}
*pofs = ofs;
-#ifdef CONFIG_USER_ONLY
- *pgva = (modify <= 0 ? ofs : base);
-#else
- TCGv_tl addr = get_temp_tl(ctx);
- tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
- if (ctx->tb_flags & PSW_W) {
- tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
- }
+ *pgva = addr = tcg_temp_new_i64();
+ tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
+#ifndef CONFIG_USER_ONLY
if (!is_phys) {
- tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
+ tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
}
- *pgva = addr;
#endif
}
@@ -1446,29 +1362,29 @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
* = 0 for no base register update.
*/
static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, MemOp mop)
{
- TCGv_reg ofs;
- TCGv_tl addr;
+ TCGv_i64 ofs;
+ TCGv_i64 addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
+ tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
}
static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, MemOp mop)
{
- TCGv_reg ofs;
- TCGv_tl addr;
+ TCGv_i64 ofs;
+ TCGv_i64 addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
@@ -1482,11 +1398,11 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
}
static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, MemOp mop)
{
- TCGv_reg ofs;
- TCGv_tl addr;
+ TCGv_i64 ofs;
+ TCGv_i64 addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
@@ -1500,11 +1416,11 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
}
static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, MemOp mop)
{
- TCGv_reg ofs;
- TCGv_tl addr;
+ TCGv_i64 ofs;
+ TCGv_i64 addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
@@ -1517,19 +1433,11 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
}
}
-#if TARGET_REGISTER_BITS == 64
-#define do_load_reg do_load_64
-#define do_store_reg do_store_64
-#else
-#define do_load_reg do_load_32
-#define do_store_reg do_store_32
-#endif
-
static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify, MemOp mop)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
nullify_over(ctx);
@@ -1538,16 +1446,16 @@ static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
dest = dest_gpr(ctx, rt);
} else {
/* Make sure if RT == RB, we see the result of the load. */
- dest = get_temp(ctx);
+ dest = tcg_temp_new_i64();
}
- do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
+ do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx);
}
static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify)
{
TCGv_i32 tmp;
@@ -1572,7 +1480,7 @@ static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
}
static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify)
{
TCGv_i64 tmp;
@@ -1597,16 +1505,16 @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
}
static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
- target_sreg disp, unsigned sp,
+ int64_t disp, unsigned sp,
int modify, MemOp mop)
{
nullify_over(ctx);
- do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
+ do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
return nullify_end(ctx);
}
static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify)
{
TCGv_i32 tmp;
@@ -1626,7 +1534,7 @@ static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
}
static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_sreg disp,
+ unsigned rx, int scale, int64_t disp,
unsigned sp, int modify)
{
TCGv_i64 tmp;
@@ -1739,12 +1647,12 @@ static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
/* Emit an unconditional branch to a direct target, which may or may not
have already had nullification handled. */
-static bool do_dbranch(DisasContext *ctx, target_ureg dest,
+static bool do_dbranch(DisasContext *ctx, uint64_t dest,
unsigned link, bool is_n)
{
if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
if (link != 0) {
- copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
ctx->iaoq_n = dest;
if (is_n) {
@@ -1754,7 +1662,7 @@ static bool do_dbranch(DisasContext *ctx, target_ureg dest,
nullify_over(ctx);
if (link != 0) {
- copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
if (is_n && use_nullify_skip(ctx)) {
@@ -1776,10 +1684,10 @@ static bool do_dbranch(DisasContext *ctx, target_ureg dest,
/* Emit a conditional branch to a direct target. If the branch itself
is nullified, we should have already used nullify_over. */
-static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
+static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
DisasCond *cond)
{
- target_ureg dest = iaoq_dest(ctx, disp);
+ uint64_t dest = iaoq_dest(ctx, disp);
TCGLabel *taken = NULL;
TCGCond c = cond->c;
bool n;
@@ -1795,7 +1703,7 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
}
taken = gen_new_label();
- tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
+ tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
cond_free(cond);
/* Not taken: Condition not satisfied; nullify on backward branches. */
@@ -1812,7 +1720,7 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
if (ctx->iaoq_n == -1) {
/* The temporary iaoq_n_var died at the branch above.
Regenerate it here instead of saving it. */
- tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
}
gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
}
@@ -1842,24 +1750,25 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
/* Emit an unconditional branch to an indirect target. This handles
nullification of the branch itself. */
-static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
+static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
unsigned link, bool is_n)
{
- TCGv_reg a0, a1, next, tmp;
+ TCGv_i64 a0, a1, next, tmp;
TCGCond c;
assert(ctx->null_lab == NULL);
if (ctx->null_cond.c == TCG_COND_NEVER) {
if (link != 0) {
- copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
- next = get_temp(ctx);
- tcg_gen_mov_reg(next, dest);
+ next = tcg_temp_new_i64();
+ tcg_gen_mov_i64(next, dest);
if (is_n) {
if (use_nullify_skip(ctx)) {
- tcg_gen_mov_reg(cpu_iaoq_f, next);
- tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
+ tcg_gen_addi_i64(next, next, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
nullify_set(ctx, 0);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
return true;
@@ -1881,12 +1790,14 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
/* We do have to handle the non-local temporary, DEST, before
branching. Since IOAQ_F is not really live at this point, we
can simply store DEST optimistically. Similarly with IAOQ_B. */
- tcg_gen_mov_reg(cpu_iaoq_f, dest);
- tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
+ next = tcg_temp_new_i64();
+ tcg_gen_addi_i64(next, dest, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
nullify_over(ctx);
if (link != 0) {
- tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
+ copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
tcg_gen_lookup_and_goto_ptr();
return nullify_end(ctx);
@@ -1895,23 +1806,23 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
a0 = ctx->null_cond.a0;
a1 = ctx->null_cond.a1;
- tmp = tcg_temp_new();
- next = get_temp(ctx);
+ tmp = tcg_temp_new_i64();
+ next = tcg_temp_new_i64();
- copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
+ copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
ctx->iaoq_n = -1;
ctx->iaoq_n_var = next;
if (link != 0) {
- tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
+ tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
}
if (is_n) {
/* The branch nullifies the next insn, which means the state of N
after the branch is the inverse of the state of N that applied
to the branch. */
- tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
+ tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
cond_free(&ctx->null_cond);
ctx->null_cond = cond_make_n();
ctx->psw_n_nonzero = true;
@@ -1929,23 +1840,23 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
* IAOQ_Next{30..31} ← IAOQ_Front{30..31};
* which keeps the privilege level from being increased.
*/
-static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
+static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
switch (ctx->privilege) {
case 0:
/* Privilege 0 is maximum and is allowed to decrease. */
return offset;
case 3:
/* Privilege 3 is minimum and is never allowed to increase. */
- dest = get_temp(ctx);
- tcg_gen_ori_reg(dest, offset, 3);
+ dest = tcg_temp_new_i64();
+ tcg_gen_ori_i64(dest, offset, 3);
break;
default:
- dest = get_temp(ctx);
- tcg_gen_andi_reg(dest, offset, -4);
- tcg_gen_ori_reg(dest, dest, ctx->privilege);
- tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
+ dest = tcg_temp_new_i64();
+ tcg_gen_andi_i64(dest, offset, -4);
+ tcg_gen_ori_i64(dest, dest, ctx->privilege);
+ tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
break;
}
return dest;
@@ -1961,6 +1872,8 @@ static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
aforementioned BE. */
static void do_page_zero(DisasContext *ctx)
{
+ TCGv_i64 tmp;
+
/* If by some means we get here with PSW[N]=1, that implies that
the B,GATE instruction would be skipped, and we'd fault on the
next insn within the privileged page. */
@@ -1968,7 +1881,7 @@ static void do_page_zero(DisasContext *ctx)
case TCG_COND_NEVER:
break;
case TCG_COND_ALWAYS:
- tcg_gen_movi_reg(cpu_psw_n, 0);
+ tcg_gen_movi_i64(cpu_psw_n, 0);
goto do_sigill;
default:
/* Since this is always the first (and only) insn within the
@@ -1996,9 +1909,12 @@ static void do_page_zero(DisasContext *ctx)
break;
case 0xe0: /* SET_THREAD_POINTER */
- tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
- tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
- tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
+ tcg_gen_addi_i64(tmp, tmp, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
break;
@@ -2039,8 +1955,8 @@ static bool trans_sync(DisasContext *ctx, arg_sync *a)
static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
{
unsigned rt = a->t;
- TCGv_reg tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_reg(tmp, ctx->iaoq_f);
+ TCGv_i64 tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_i64(tmp, ctx->iaoq_f);
save_gpr(ctx, rt, tmp);
cond_free(&ctx->null_cond);
@@ -2052,13 +1968,11 @@ static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
unsigned rt = a->t;
unsigned rs = a->sp;
TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_reg t1 = tcg_temp_new();
load_spr(ctx, t0, rs);
tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_trunc_i64_reg(t1, t0);
- save_gpr(ctx, rt, t1);
+ save_gpr(ctx, rt, t0);
cond_free(&ctx->null_cond);
return true;
@@ -2068,19 +1982,17 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
{
unsigned rt = a->t;
unsigned ctl = a->r;
- TCGv_reg tmp;
+ TCGv_i64 tmp;
switch (ctl) {
case CR_SAR:
-#ifdef TARGET_HPPA64
if (a->e == 0) {
/* MFSAR without ,W masks low 5 bits. */
tmp = dest_gpr(ctx, rt);
- tcg_gen_andi_reg(tmp, cpu_sar, 31);
+ tcg_gen_andi_i64(tmp, cpu_sar, 31);
save_gpr(ctx, rt, tmp);
goto done;
}
-#endif
save_gpr(ctx, rt, cpu_sar);
goto done;
case CR_IT: /* Interval Timer */
@@ -2104,8 +2016,8 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
break;
}
- tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
save_gpr(ctx, rt, tmp);
done:
@@ -2117,22 +2029,21 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
{
unsigned rr = a->r;
unsigned rs = a->sp;
- TCGv_i64 t64;
+ TCGv_i64 tmp;
if (rs >= 5) {
CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
}
nullify_over(ctx);
- t64 = tcg_temp_new_i64();
- tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
- tcg_gen_shli_i64(t64, t64, 32);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
if (rs >= 4) {
- tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
ctx->tb_flags &= ~TB_FLAG_SR_SAME;
} else {
- tcg_gen_mov_i64(cpu_sr[rs], t64);
+ tcg_gen_mov_i64(cpu_sr[rs], tmp);
}
return nullify_end(ctx);
@@ -2141,13 +2052,13 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
{
unsigned ctl = a->t;
- TCGv_reg reg;
- TCGv_reg tmp;
+ TCGv_i64 reg;
+ TCGv_i64 tmp;
if (ctl == CR_SAR) {
reg = load_gpr(ctx, a->r);
- tmp = tcg_temp_new();
- tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
save_or_nullify(ctx, cpu_sar, tmp);
cond_free(&ctx->null_cond);
@@ -2159,7 +2070,13 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
#ifndef CONFIG_USER_ONLY
nullify_over(ctx);
- reg = load_gpr(ctx, a->r);
+
+ if (ctx->is_pa20) {
+ reg = load_gpr(ctx, a->r);
+ } else {
+ reg = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
+ }
switch (ctl) {
case CR_IT:
@@ -2177,11 +2094,11 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
case CR_IIAOQ:
/* FIXME: Respect PSW_Q bit */
/* The write advances the queue and stores to the back element. */
- tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, tcg_env,
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, tcg_env,
offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
- tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
- tcg_gen_st_reg(reg, tcg_env,
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_i64(reg, tcg_env,
offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
break;
@@ -2189,14 +2106,14 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
case CR_PID2:
case CR_PID3:
case CR_PID4:
- tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
#ifndef CONFIG_USER_ONLY
gen_helper_change_prot_id(tcg_env);
#endif
break;
default:
- tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
break;
}
return nullify_end(ctx);
@@ -2205,10 +2122,10 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
{
- TCGv_reg tmp = tcg_temp_new();
+ TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
- tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
+ tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
+ tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
save_or_nullify(ctx, cpu_sar, tmp);
cond_free(&ctx->null_cond);
@@ -2217,17 +2134,14 @@ static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
{
- TCGv_reg dest = dest_gpr(ctx, a->t);
+ TCGv_i64 dest = dest_gpr(ctx, a->t);
#ifdef CONFIG_USER_ONLY
/* We don't implement space registers in user mode. */
- tcg_gen_movi_reg(dest, 0);
+ tcg_gen_movi_i64(dest, 0);
#else
- TCGv_i64 t0 = tcg_temp_new_i64();
-
- tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_trunc_i64_reg(dest, t0);
+ tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
+ tcg_gen_shri_i64(dest, dest, 32);
#endif
save_gpr(ctx, a->t, dest);
@@ -2239,13 +2153,13 @@ static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_reg tmp;
+ TCGv_i64 tmp;
nullify_over(ctx);
- tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
- tcg_gen_andi_reg(tmp, tmp, ~a->i);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_andi_i64(tmp, tmp, ~a->i);
gen_helper_swap_system_mask(tmp, tcg_env, tmp);
save_gpr(ctx, a->t, tmp);
@@ -2259,13 +2173,13 @@ static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_reg tmp;
+ TCGv_i64 tmp;
nullify_over(ctx);
- tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
- tcg_gen_ori_reg(tmp, tmp, a->i);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_ori_i64(tmp, tmp, a->i);
gen_helper_swap_system_mask(tmp, tcg_env, tmp);
save_gpr(ctx, a->t, tmp);
@@ -2279,11 +2193,11 @@ static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_reg tmp, reg;
+ TCGv_i64 tmp, reg;
nullify_over(ctx);
reg = load_gpr(ctx, a->r);
- tmp = get_temp(ctx);
+ tmp = tcg_temp_new_i64();
gen_helper_swap_system_mask(tmp, tcg_env, reg);
/* Exit the TB to recognize new interrupts. */
@@ -2356,12 +2270,12 @@ static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
{
if (a->m) {
- TCGv_reg dest = dest_gpr(ctx, a->b);
- TCGv_reg src1 = load_gpr(ctx, a->b);
- TCGv_reg src2 = load_gpr(ctx, a->x);
+ TCGv_i64 dest = dest_gpr(ctx, a->b);
+ TCGv_i64 src1 = load_gpr(ctx, a->b);
+ TCGv_i64 src2 = load_gpr(ctx, a->x);
/* The only thing we need to do is the base register modification. */
- tcg_gen_add_reg(dest, src1, src2);
+ tcg_gen_add_i64(dest, src1, src2);
save_gpr(ctx, a->b, dest);
}
cond_free(&ctx->null_cond);
@@ -2370,9 +2284,9 @@ static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
static bool trans_probe(DisasContext *ctx, arg_probe *a)
{
- TCGv_reg dest, ofs;
+ TCGv_i64 dest, ofs;
TCGv_i32 level, want;
- TCGv_tl addr;
+ TCGv_i64 addr;
nullify_over(ctx);
@@ -2383,7 +2297,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a)
level = tcg_constant_i32(a->ri);
} else {
level = tcg_temp_new_i32();
- tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
+ tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
tcg_gen_andi_i32(level, level, 3);
}
want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
@@ -2396,19 +2310,22 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a)
static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
{
+ if (ctx->is_pa20) {
+ return false;
+ }
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_tl addr;
- TCGv_reg ofs, reg;
+ TCGv_i64 addr;
+ TCGv_i64 ofs, reg;
nullify_over(ctx);
form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
reg = load_gpr(ctx, a->r);
if (a->addr) {
- gen_helper_itlba(tcg_env, addr, reg);
+ gen_helper_itlba_pa11(tcg_env, addr, reg);
} else {
- gen_helper_itlbp(tcg_env, addr, reg);
+ gen_helper_itlbp_pa11(tcg_env, addr, reg);
}
/* Exit TB for TLB change if mmu is enabled. */
@@ -2419,25 +2336,63 @@ static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
#endif
}
-static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
+static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_tl addr;
- TCGv_reg ofs;
+ TCGv_i64 addr;
+ TCGv_i64 ofs;
nullify_over(ctx);
form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
- if (a->m) {
- save_gpr(ctx, a->b, ofs);
+
+ /*
+ * Page align now, rather than later, so that we can add in the
+ * page_size field from pa2.0 from the low 4 bits of GR[b].
+ */
+ tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
+ if (ctx->is_pa20) {
+ tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
}
- if (a->local) {
- gen_helper_ptlbe(tcg_env);
+
+ if (local) {
+ gen_helper_ptlb_l(tcg_env, addr);
} else {
gen_helper_ptlb(tcg_env, addr);
}
+ if (a->m) {
+ save_gpr(ctx, a->b, ofs);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
+{
+ return do_pxtlb(ctx, a, false);
+}
+
+static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
+{
+ return ctx->is_pa20 && do_pxtlb(ctx, a, true);
+}
+
+static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+
+ trans_nop_addrx(ctx, a);
+ gen_helper_ptlbe(tcg_env);
+
/* Exit TB for TLB change if mmu is enabled. */
if (ctx->tb_flags & PSW_C) {
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
@@ -2454,10 +2409,13 @@ static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
*/
static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
{
+ if (ctx->is_pa20) {
+ return false;
+ }
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_tl addr, atl, stl;
- TCGv_reg reg;
+ TCGv_i64 addr, atl, stl;
+ TCGv_i64 reg;
nullify_over(ctx);
@@ -2465,13 +2423,11 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
* FIXME:
* if (not (pcxl or pcxl2))
* return gen_illegal(ctx);
- *
- * Note for future: these are 32-bit systems; no hppa64.
*/
- atl = tcg_temp_new_tl();
- stl = tcg_temp_new_tl();
- addr = tcg_temp_new_tl();
+ atl = tcg_temp_new_i64();
+ stl = tcg_temp_new_i64();
+ addr = tcg_temp_new_i64();
tcg_gen_ld32u_i64(stl, tcg_env,
a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
@@ -2480,13 +2436,13 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
: offsetof(CPUHPPAState, cr[CR_IIAOQ]));
tcg_gen_shli_i64(stl, stl, 32);
- tcg_gen_or_tl(addr, atl, stl);
+ tcg_gen_or_i64(addr, atl, stl);
reg = load_gpr(ctx, a->r);
if (a->addr) {
- gen_helper_itlba(tcg_env, addr, reg);
+ gen_helper_itlba_pa11(tcg_env, addr, reg);
} else {
- gen_helper_itlbp(tcg_env, addr, reg);
+ gen_helper_itlbp_pa11(tcg_env, addr, reg);
}
/* Exit TB for TLB change if mmu is enabled. */
@@ -2497,18 +2453,44 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
#endif
}
+static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
+{
+ if (!ctx->is_pa20) {
+ return false;
+ }
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+ {
+ TCGv_i64 src1 = load_gpr(ctx, a->r1);
+ TCGv_i64 src2 = load_gpr(ctx, a->r2);
+
+ if (a->data) {
+ gen_helper_idtlbt_pa20(tcg_env, src1, src2);
+ } else {
+ gen_helper_iitlbt_pa20(tcg_env, src1, src2);
+ }
+ }
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- TCGv_tl vaddr;
- TCGv_reg ofs, paddr;
+ TCGv_i64 vaddr;
+ TCGv_i64 ofs, paddr;
nullify_over(ctx);
form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
- paddr = tcg_temp_new();
+ paddr = tcg_temp_new_i64();
gen_helper_lpa(paddr, tcg_env, vaddr);
/* Note that physical address result overrides base modification. */
@@ -2529,78 +2511,78 @@ static bool trans_lci(DisasContext *ctx, arg_lci *a)
physical address. Two addresses with the same CI have a coherent
view of the cache. Our implementation is to return 0 for all,
since the entire address space is coherent. */
- save_gpr(ctx, a->t, tcg_constant_reg(0));
+ save_gpr(ctx, a->t, ctx->zero);
cond_free(&ctx->null_cond);
return true;
}
-static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
+static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
{
return do_add_reg(ctx, a, false, false, false, false);
}
-static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
+static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
{
return do_add_reg(ctx, a, true, false, false, false);
}
-static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
+static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
{
return do_add_reg(ctx, a, false, true, false, false);
}
-static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
+static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
{
return do_add_reg(ctx, a, false, false, false, true);
}
-static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
+static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
{
return do_add_reg(ctx, a, false, true, false, true);
}
-static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, false, false, false);
}
-static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, true, false, false);
}
-static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, false, false, true);
}
-static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, true, false, true);
}
-static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, false, true, false);
}
-static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_sub_reg(ctx, a, true, true, false);
}
-static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
{
- return do_log_reg(ctx, a, tcg_gen_andc_reg);
+ return do_log_reg(ctx, a, tcg_gen_andc_i64);
}
-static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
{
- return do_log_reg(ctx, a, tcg_gen_and_reg);
+ return do_log_reg(ctx, a, tcg_gen_and_i64);
}
-static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
{
if (a->cf == 0) {
unsigned r2 = a->r2;
@@ -2613,8 +2595,8 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
}
if (r2 == 0) { /* COPY */
if (r1 == 0) {
- TCGv_reg dest = dest_gpr(ctx, rt);
- tcg_gen_movi_reg(dest, 0);
+ TCGv_i64 dest = dest_gpr(ctx, rt);
+ tcg_gen_movi_i64(dest, 0);
save_gpr(ctx, rt, dest);
} else {
save_gpr(ctx, rt, cpu_gr[r1]);
@@ -2635,8 +2617,8 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
nullify_over(ctx);
/* Advance the instruction queue. */
- copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
- copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
nullify_set(ctx, 0);
/* Tell the qemu main loop to halt until this cpu has work. */
@@ -2649,142 +2631,146 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
}
#endif
}
- return do_log_reg(ctx, a, tcg_gen_or_reg);
+ return do_log_reg(ctx, a, tcg_gen_or_i64);
}
-static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
{
- return do_log_reg(ctx, a, tcg_gen_xor_reg);
+ return do_log_reg(ctx, a, tcg_gen_xor_i64);
}
-static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
{
- TCGv_reg tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
+ do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
return nullify_end(ctx);
}
-static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
{
- TCGv_reg tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
+ do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
return nullify_end(ctx);
}
-static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
+static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
{
- TCGv_reg tcg_r1, tcg_r2, tmp;
+ TCGv_i64 tcg_r1, tcg_r2, tmp;
if (a->cf) {
nullify_over(ctx);
}
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- tmp = get_temp(ctx);
- tcg_gen_not_reg(tmp, tcg_r2);
- do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_not_i64(tmp, tcg_r2);
+ do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
return nullify_end(ctx);
}
-static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_uaddcm(ctx, a, false);
}
-static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
+static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
{
return do_uaddcm(ctx, a, true);
}
-static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
+static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
{
- TCGv_reg tmp;
+ TCGv_i64 tmp;
nullify_over(ctx);
- tmp = get_temp(ctx);
- tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
if (!is_i) {
- tcg_gen_not_reg(tmp, tmp);
+ tcg_gen_not_i64(tmp, tmp);
}
- tcg_gen_andi_reg(tmp, tmp, 0x11111111);
- tcg_gen_muli_reg(tmp, tmp, 6);
- do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
- is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
+ tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
+ tcg_gen_muli_i64(tmp, tmp, 6);
+ do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
+ is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
return nullify_end(ctx);
}
-static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
+static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
{
return do_dcor(ctx, a, false);
}
-static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
+static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
{
return do_dcor(ctx, a, true);
}
static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
{
- TCGv_reg dest, add1, add2, addc, zero, in1, in2;
+ TCGv_i64 dest, add1, add2, addc, in1, in2;
+ TCGv_i64 cout;
nullify_over(ctx);
in1 = load_gpr(ctx, a->r1);
in2 = load_gpr(ctx, a->r2);
- add1 = tcg_temp_new();
- add2 = tcg_temp_new();
- addc = tcg_temp_new();
- dest = tcg_temp_new();
- zero = tcg_constant_reg(0);
+ add1 = tcg_temp_new_i64();
+ add2 = tcg_temp_new_i64();
+ addc = tcg_temp_new_i64();
+ dest = tcg_temp_new_i64();
/* Form R1 << 1 | PSW[CB]{8}. */
- tcg_gen_add_reg(add1, in1, in1);
- tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
-
- /* Add or subtract R2, depending on PSW[V]. Proper computation of
- carry{8} requires that we subtract via + ~R2 + 1, as described in
- the manual. By extracting and masking V, we can produce the
- proper inputs to the addition without movcond. */
- tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
- tcg_gen_xor_reg(add2, in2, addc);
- tcg_gen_andi_reg(addc, addc, 1);
- /* ??? This is only correct for 32-bit. */
- tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
- tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
+ tcg_gen_add_i64(add1, in1, in1);
+ tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
+
+ /*
+ * Add or subtract R2, depending on PSW[V]. Proper computation of
+ * carry requires that we subtract via + ~R2 + 1, as described in
+ * the manual. By extracting and masking V, we can produce the
+ * proper inputs to the addition without movcond.
+ */
+ tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
+ tcg_gen_xor_i64(add2, in2, addc);
+ tcg_gen_andi_i64(addc, addc, 1);
+
+ tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
+ tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
+ addc, ctx->zero);
/* Write back the result register. */
save_gpr(ctx, a->t, dest);
/* Write back PSW[CB]. */
- tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
- tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
+ tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
+ tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
/* Write back PSW[V] for the division step. */
- tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
- tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
+ cout = get_psw_carry(ctx, false);
+ tcg_gen_neg_i64(cpu_psw_v, cout);
+ tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
/* Install the new nullification. */
if (a->cf) {
- TCGv_reg sv = NULL;
+ TCGv_i64 sv = NULL;
if (cond_need_sv(a->cf >> 1)) {
/* ??? The lshift is supposed to contribute to overflow. */
sv = do_add_sv(ctx, dest, add1, add2);
}
- ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
+ ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
}
return nullify_end(ctx);
@@ -2820,53 +2806,270 @@ static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
return do_sub_imm(ctx, a, true);
}
-static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
+static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
{
- TCGv_reg tcg_im, tcg_r2;
+ TCGv_i64 tcg_im, tcg_r2;
if (a->cf) {
nullify_over(ctx);
}
- tcg_im = load_const(ctx, a->i);
+ tcg_im = tcg_constant_i64(a->i);
tcg_r2 = load_gpr(ctx, a->r);
- do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
+ do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
+
+ return nullify_end(ctx);
+}
+
+static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 r1, r2, dest;
+
+ if (!ctx->is_pa20) {
+ return false;
+ }
+
+ nullify_over(ctx);
+
+ r1 = load_gpr(ctx, a->r1);
+ r2 = load_gpr(ctx, a->r2);
+ dest = dest_gpr(ctx, a->t);
+
+ fn(dest, r1, r2);
+ save_gpr(ctx, a->t, dest);
return nullify_end(ctx);
}
+static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
+ void (*fn)(TCGv_i64, TCGv_i64, int64_t))
+{
+ TCGv_i64 r, dest;
+
+ if (!ctx->is_pa20) {
+ return false;
+ }
+
+ nullify_over(ctx);
+
+ r = load_gpr(ctx, a->r);
+ dest = dest_gpr(ctx, a->t);
+
+ fn(dest, r, a->i);
+ save_gpr(ctx, a->t, dest);
+
+ return nullify_end(ctx);
+}
+
+static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
+ void (*fn)(TCGv_i64, TCGv_i64,
+ TCGv_i64, TCGv_i32))
+{
+ TCGv_i64 r1, r2, dest;
+
+ if (!ctx->is_pa20) {
+ return false;
+ }
+
+ nullify_over(ctx);
+
+ r1 = load_gpr(ctx, a->r1);
+ r2 = load_gpr(ctx, a->r2);
+ dest = dest_gpr(ctx, a->t);
+
+ fn(dest, r1, r2, tcg_constant_i32(a->sh));
+ save_gpr(ctx, a->t, dest);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
+}
+
+static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_helper_hadd_ss);
+}
+
+static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_helper_hadd_us);
+}
+
+static bool trans_havg(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_helper_havg);
+}
+
+static bool trans_hshl(DisasContext *ctx, arg_rri *a)
+{
+ return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
+}
+
+static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
+{
+ return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
+}
+
+static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
+{
+ return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
+}
+
+static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
+{
+ return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
+}
+
+static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
+{
+ return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
+}
+
+static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
+}
+
+static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_helper_hsub_ss);
+}
+
+static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_helper_hsub_us);
+}
+
+static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
+{
+ uint64_t mask = 0xffff0000ffff0000ull;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(tmp, r2, mask);
+ tcg_gen_andi_i64(dst, r1, mask);
+ tcg_gen_shri_i64(tmp, tmp, 16);
+ tcg_gen_or_i64(dst, dst, tmp);
+}
+
+static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_mixh_l);
+}
+
+static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
+{
+ uint64_t mask = 0x0000ffff0000ffffull;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(tmp, r1, mask);
+ tcg_gen_andi_i64(dst, r2, mask);
+ tcg_gen_shli_i64(tmp, tmp, 16);
+ tcg_gen_or_i64(dst, dst, tmp);
+}
+
+static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_mixh_r);
+}
+
+static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(tmp, r2, 32);
+ tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
+}
+
+static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_mixw_l);
+}
+
+static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
+{
+ tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
+}
+
+static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
+{
+ return do_multimedia(ctx, a, gen_mixw_r);
+}
+
+static bool trans_permh(DisasContext *ctx, arg_permh *a)
+{
+ TCGv_i64 r, t0, t1, t2, t3;
+
+ if (!ctx->is_pa20) {
+ return false;
+ }
+
+ nullify_over(ctx);
+
+ r = load_gpr(ctx, a->r1);
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ t3 = tcg_temp_new_i64();
+
+ tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
+ tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
+ tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
+ tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
+
+ tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
+ tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
+ tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
+
+ save_gpr(ctx, a->t, t0);
+ return nullify_end(ctx);
+}
+
static bool trans_ld(DisasContext *ctx, arg_ldst *a)
{
- if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
+ if (ctx->is_pa20) {
+ /*
+ * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
+ * Any base modification still occurs.
+ */
+ if (a->t == 0) {
+ return trans_nop_addrx(ctx, a);
+ }
+ } else if (a->size > MO_32) {
return gen_illegal(ctx);
- } else {
- return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
- a->disp, a->sp, a->m, a->size | MO_TE);
}
+ return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
+ a->disp, a->sp, a->m, a->size | MO_TE);
}
static bool trans_st(DisasContext *ctx, arg_ldst *a)
{
assert(a->x == 0 && a->scale == 0);
- if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
+ if (!ctx->is_pa20 && a->size > MO_32) {
return gen_illegal(ctx);
- } else {
- return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
}
+ return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
}
static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
{
MemOp mop = MO_TE | MO_ALIGN | a->size;
- TCGv_reg zero, dest, ofs;
- TCGv_tl addr;
+ TCGv_i64 dest, ofs;
+ TCGv_i64 addr;
+
+ if (!ctx->is_pa20 && a->size > MO_32) {
+ return gen_illegal(ctx);
+ }
nullify_over(ctx);
if (a->m) {
/* Base register modification. Make sure if RT == RB,
we see the result of the load. */
- dest = get_temp(ctx);
+ dest = tcg_temp_new_i64();
} else {
dest = dest_gpr(ctx, a->t);
}
@@ -2884,8 +3087,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
*/
gen_helper_ldc_check(addr);
- zero = tcg_constant_reg(0);
- tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
+ tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
if (a->m) {
save_gpr(ctx, a->b, ofs);
@@ -2897,8 +3099,8 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
static bool trans_stby(DisasContext *ctx, arg_stby *a)
{
- TCGv_reg ofs, val;
- TCGv_tl addr;
+ TCGv_i64 ofs, val;
+ TCGv_i64 addr;
nullify_over(ctx);
@@ -2919,7 +3121,41 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a)
}
}
if (a->m) {
- tcg_gen_andi_reg(ofs, ofs, ~3);
+ tcg_gen_andi_i64(ofs, ofs, ~3);
+ save_gpr(ctx, a->b, ofs);
+ }
+
+ return nullify_end(ctx);
+}
+
+static bool trans_stdby(DisasContext *ctx, arg_stby *a)
+{
+ TCGv_i64 ofs, val;
+ TCGv_i64 addr;
+
+ if (!ctx->is_pa20) {
+ return false;
+ }
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ val = load_gpr(ctx, a->r);
+ if (a->a) {
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stdby_e_parallel(tcg_env, addr, val);
+ } else {
+ gen_helper_stdby_e(tcg_env, addr, val);
+ }
+ } else {
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stdby_b_parallel(tcg_env, addr, val);
+ } else {
+ gen_helper_stdby_b(tcg_env, addr, val);
+ }
+ }
+ if (a->m) {
+ tcg_gen_andi_i64(ofs, ofs, ~7);
save_gpr(ctx, a->b, ofs);
}
@@ -2950,9 +3186,9 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a)
static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
{
- TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
+ TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
- tcg_gen_movi_reg(tcg_rt, a->i);
+ tcg_gen_movi_i64(tcg_rt, a->i);
save_gpr(ctx, a->t, tcg_rt);
cond_free(&ctx->null_cond);
return true;
@@ -2960,10 +3196,10 @@ static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
static bool trans_addil(DisasContext *ctx, arg_addil *a)
{
- TCGv_reg tcg_rt = load_gpr(ctx, a->r);
- TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
+ TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
+ TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
- tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
+ tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
save_gpr(ctx, 1, tcg_r1);
cond_free(&ctx->null_cond);
return true;
@@ -2971,75 +3207,100 @@ static bool trans_addil(DisasContext *ctx, arg_addil *a)
static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
{
- TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
+ TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
/* Special case rb == 0, for the LDI pseudo-op.
- The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
+ The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
if (a->b == 0) {
- tcg_gen_movi_reg(tcg_rt, a->i);
+ tcg_gen_movi_i64(tcg_rt, a->i);
} else {
- tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
+ tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
}
save_gpr(ctx, a->t, tcg_rt);
cond_free(&ctx->null_cond);
return true;
}
-static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
- unsigned c, unsigned f, unsigned n, int disp)
+static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
+ unsigned c, unsigned f, bool d, unsigned n, int disp)
{
- TCGv_reg dest, in2, sv;
+ TCGv_i64 dest, in2, sv;
DisasCond cond;
in2 = load_gpr(ctx, r);
- dest = get_temp(ctx);
+ dest = tcg_temp_new_i64();
- tcg_gen_sub_reg(dest, in1, in2);
+ tcg_gen_sub_i64(dest, in1, in2);
sv = NULL;
if (cond_need_sv(c)) {
sv = do_sub_sv(ctx, dest, in1, in2);
}
- cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
+ cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
return do_cbranch(ctx, disp, n, &cond);
}
static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
{
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
nullify_over(ctx);
- return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
+ return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
+ a->c, a->f, a->d, a->n, a->disp);
}
static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
{
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
nullify_over(ctx);
- return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
+ return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
+ a->c, a->f, a->d, a->n, a->disp);
}
-static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
+static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
unsigned c, unsigned f, unsigned n, int disp)
{
- TCGv_reg dest, in2, sv, cb_msb;
+ TCGv_i64 dest, in2, sv, cb_cond;
DisasCond cond;
+ bool d = false;
+
+ /*
+ * For hppa64, the ADDB conditions change with PSW.W,
+ * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
+ */
+ if (ctx->tb_flags & PSW_W) {
+ d = c >= 5;
+ if (d) {
+ c &= 3;
+ }
+ }
in2 = load_gpr(ctx, r);
- dest = tcg_temp_new();
+ dest = tcg_temp_new_i64();
sv = NULL;
- cb_msb = NULL;
+ cb_cond = NULL;
if (cond_need_cb(c)) {
- cb_msb = get_temp(ctx);
- tcg_gen_movi_reg(cb_msb, 0);
- tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ TCGv_i64 cb = tcg_temp_new_i64();
+ TCGv_i64 cb_msb = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(cb_msb, 0);
+ tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ tcg_gen_xor_i64(cb, in1, in2);
+ tcg_gen_xor_i64(cb, cb, dest);
+ cb_cond = get_carry(ctx, d, cb, cb_msb);
} else {
- tcg_gen_add_reg(dest, in1, in2);
+ tcg_gen_add_i64(dest, in1, in2);
}
if (cond_need_sv(c)) {
sv = do_add_sv(ctx, dest, in1, in2);
}
- cond = do_cond(c * 2 + f, dest, cb_msb, sv);
+ cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
save_gpr(ctx, r, dest);
return do_cbranch(ctx, disp, n, &cond);
}
@@ -3053,34 +3314,42 @@ static bool trans_addb(DisasContext *ctx, arg_addb *a)
static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
{
nullify_over(ctx);
- return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
+ return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
}
static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
{
- TCGv_reg tmp, tcg_r;
+ TCGv_i64 tmp, tcg_r;
DisasCond cond;
nullify_over(ctx);
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i64();
tcg_r = load_gpr(ctx, a->r);
- tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
+ if (cond_need_ext(ctx, a->d)) {
+ /* Force shift into [32,63] */
+ tcg_gen_ori_i64(tmp, cpu_sar, 32);
+ tcg_gen_shl_i64(tmp, tcg_r, tmp);
+ } else {
+ tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
+ }
- cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
return do_cbranch(ctx, a->disp, a->n, &cond);
}
static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
{
- TCGv_reg tmp, tcg_r;
+ TCGv_i64 tmp, tcg_r;
DisasCond cond;
+ int p;
nullify_over(ctx);
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i64();
tcg_r = load_gpr(ctx, a->r);
- tcg_gen_shli_reg(tmp, tcg_r, a->p);
+ p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
+ tcg_gen_shli_i64(tmp, tcg_r, p);
cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
return do_cbranch(ctx, a->disp, a->n, &cond);
@@ -3088,178 +3357,246 @@ static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
static bool trans_movb(DisasContext *ctx, arg_movb *a)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
DisasCond cond;
nullify_over(ctx);
dest = dest_gpr(ctx, a->r2);
if (a->r1 == 0) {
- tcg_gen_movi_reg(dest, 0);
+ tcg_gen_movi_i64(dest, 0);
} else {
- tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
+ tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
}
- cond = do_sed_cond(a->c, dest);
+ /* All MOVB conditions are 32-bit. */
+ cond = do_sed_cond(ctx, a->c, false, dest);
return do_cbranch(ctx, a->disp, a->n, &cond);
}
static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
DisasCond cond;
nullify_over(ctx);
dest = dest_gpr(ctx, a->r);
- tcg_gen_movi_reg(dest, a->i);
+ tcg_gen_movi_i64(dest, a->i);
- cond = do_sed_cond(a->c, dest);
+ /* All MOVBI conditions are 32-bit. */
+ cond = do_sed_cond(ctx, a->c, false, dest);
return do_cbranch(ctx, a->disp, a->n, &cond);
}
-static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
+static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
{
- TCGv_reg dest;
+ TCGv_i64 dest, src2;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
dest = dest_gpr(ctx, a->t);
+ src2 = load_gpr(ctx, a->r2);
if (a->r1 == 0) {
- tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
- tcg_gen_shr_reg(dest, dest, cpu_sar);
+ if (a->d) {
+ tcg_gen_shr_i64(dest, src2, cpu_sar);
+ } else {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_i64(dest, src2);
+ tcg_gen_andi_i64(tmp, cpu_sar, 31);
+ tcg_gen_shr_i64(dest, dest, tmp);
+ }
} else if (a->r1 == a->r2) {
- TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
- tcg_gen_rotr_i32(t32, t32, cpu_sar);
- tcg_gen_extu_i32_reg(dest, t32);
+ if (a->d) {
+ tcg_gen_rotr_i64(dest, src2, cpu_sar);
+ } else {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ TCGv_i32 s32 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(t32, src2);
+ tcg_gen_extrl_i64_i32(s32, cpu_sar);
+ tcg_gen_andi_i32(s32, s32, 31);
+ tcg_gen_rotr_i32(t32, t32, s32);
+ tcg_gen_extu_i32_i64(dest, t32);
+ }
} else {
- TCGv_i64 t = tcg_temp_new_i64();
- TCGv_i64 s = tcg_temp_new_i64();
+ TCGv_i64 src1 = load_gpr(ctx, a->r1);
+
+ if (a->d) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 n = tcg_temp_new_i64();
+
+ tcg_gen_xori_i64(n, cpu_sar, 63);
+ tcg_gen_shl_i64(t, src2, n);
+ tcg_gen_shli_i64(t, t, 1);
+ tcg_gen_shr_i64(dest, src1, cpu_sar);
+ tcg_gen_or_i64(dest, dest, t);
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 s = tcg_temp_new_i64();
- tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
- tcg_gen_extu_reg_i64(s, cpu_sar);
- tcg_gen_shr_i64(t, t, s);
- tcg_gen_trunc_i64_reg(dest, t);
+ tcg_gen_concat32_i64(t, src2, src1);
+ tcg_gen_andi_i64(s, cpu_sar, 31);
+ tcg_gen_shr_i64(dest, t, s);
+ }
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
}
return nullify_end(ctx);
}
-static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
+static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
{
- unsigned sa = 31 - a->cpos;
- TCGv_reg dest, t2;
+ unsigned width, sa;
+ TCGv_i64 dest, t2;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
+ width = a->d ? 64 : 32;
+ sa = width - 1 - a->cpos;
+
dest = dest_gpr(ctx, a->t);
t2 = load_gpr(ctx, a->r2);
if (a->r1 == 0) {
- tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
- } else if (TARGET_REGISTER_BITS == 32) {
- tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
- } else if (a->r1 == a->r2) {
- TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_reg_i32(t32, t2);
- tcg_gen_rotri_i32(t32, t32, sa);
- tcg_gen_extu_i32_reg(dest, t32);
+ tcg_gen_extract_i64(dest, t2, sa, width - sa);
+ } else if (width == TARGET_LONG_BITS) {
+ tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
} else {
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
- tcg_gen_shri_i64(t64, t64, sa);
- tcg_gen_trunc_i64_reg(dest, t64);
+ assert(!a->d);
+ if (a->r1 == a->r2) {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t32, t2);
+ tcg_gen_rotri_i32(t32, t32, sa);
+ tcg_gen_extu_i32_i64(dest, t32);
+ } else {
+ tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
+ tcg_gen_extract_i64(dest, dest, sa, 32);
+ }
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
}
return nullify_end(ctx);
}
-static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
+static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
{
- unsigned len = 32 - a->clen;
- TCGv_reg dest, src, tmp;
+ unsigned widthm1 = a->d ? 63 : 31;
+ TCGv_i64 dest, src, tmp;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
dest = dest_gpr(ctx, a->t);
src = load_gpr(ctx, a->r);
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i64();
/* Recall that SAR is using big-endian bit numbering. */
- tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
+ tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
+ tcg_gen_xori_i64(tmp, tmp, widthm1);
+
if (a->se) {
- tcg_gen_sar_reg(dest, src, tmp);
- tcg_gen_sextract_reg(dest, dest, 0, len);
+ if (!a->d) {
+ tcg_gen_ext32s_i64(dest, src);
+ src = dest;
+ }
+ tcg_gen_sar_i64(dest, src, tmp);
+ tcg_gen_sextract_i64(dest, dest, 0, a->len);
} else {
- tcg_gen_shr_reg(dest, src, tmp);
- tcg_gen_extract_reg(dest, dest, 0, len);
+ if (!a->d) {
+ tcg_gen_ext32u_i64(dest, src);
+ src = dest;
+ }
+ tcg_gen_shr_i64(dest, src, tmp);
+ tcg_gen_extract_i64(dest, dest, 0, a->len);
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
}
return nullify_end(ctx);
}
-static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
+static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
{
- unsigned len = 32 - a->clen;
- unsigned cpos = 31 - a->pos;
- TCGv_reg dest, src;
+ unsigned len, cpos, width;
+ TCGv_i64 dest, src;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
+ len = a->len;
+ width = a->d ? 64 : 32;
+ cpos = width - 1 - a->pos;
+ if (cpos + len > width) {
+ len = width - cpos;
+ }
+
dest = dest_gpr(ctx, a->t);
src = load_gpr(ctx, a->r);
if (a->se) {
- tcg_gen_sextract_reg(dest, src, cpos, len);
+ tcg_gen_sextract_i64(dest, src, cpos, len);
} else {
- tcg_gen_extract_reg(dest, src, cpos, len);
+ tcg_gen_extract_i64(dest, src, cpos, len);
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
}
return nullify_end(ctx);
}
-static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
+static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
{
- unsigned len = 32 - a->clen;
- target_sreg mask0, mask1;
- TCGv_reg dest;
+ unsigned len, width;
+ uint64_t mask0, mask1;
+ TCGv_i64 dest;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
- if (a->cpos + len > 32) {
- len = 32 - a->cpos;
+
+ len = a->len;
+ width = a->d ? 64 : 32;
+ if (a->cpos + len > width) {
+ len = width - a->cpos;
}
dest = dest_gpr(ctx, a->t);
@@ -3267,110 +3604,122 @@ static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
mask1 = deposit64(-1, a->cpos, len, a->i);
if (a->nz) {
- TCGv_reg src = load_gpr(ctx, a->t);
- if (mask1 != -1) {
- tcg_gen_andi_reg(dest, src, mask1);
- src = dest;
- }
- tcg_gen_ori_reg(dest, src, mask0);
+ TCGv_i64 src = load_gpr(ctx, a->t);
+ tcg_gen_andi_i64(dest, src, mask1);
+ tcg_gen_ori_i64(dest, dest, mask0);
} else {
- tcg_gen_movi_reg(dest, mask0);
+ tcg_gen_movi_i64(dest, mask0);
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
}
return nullify_end(ctx);
}
-static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
+static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
{
unsigned rs = a->nz ? a->t : 0;
- unsigned len = 32 - a->clen;
- TCGv_reg dest, val;
+ unsigned len, width;
+ TCGv_i64 dest, val;
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
- if (a->cpos + len > 32) {
- len = 32 - a->cpos;
+
+ len = a->len;
+ width = a->d ? 64 : 32;
+ if (a->cpos + len > width) {
+ len = width - a->cpos;
}
dest = dest_gpr(ctx, a->t);
val = load_gpr(ctx, a->r);
if (rs == 0) {
- tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
+ tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
} else {
- tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
+ tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
}
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (a->c) {
- ctx->null_cond = do_sed_cond(a->c, dest);
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
}
return nullify_end(ctx);
}
-static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
- unsigned nz, unsigned clen, TCGv_reg val)
+static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
+ bool d, bool nz, unsigned len, TCGv_i64 val)
{
unsigned rs = nz ? rt : 0;
- unsigned len = 32 - clen;
- TCGv_reg mask, tmp, shift, dest;
- unsigned msb = 1U << (len - 1);
+ unsigned widthm1 = d ? 63 : 31;
+ TCGv_i64 mask, tmp, shift, dest;
+ uint64_t msb = 1ULL << (len - 1);
dest = dest_gpr(ctx, rt);
- shift = tcg_temp_new();
- tmp = tcg_temp_new();
+ shift = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
/* Convert big-endian bit numbering in SAR to left-shift. */
- tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
+ tcg_gen_andi_i64(shift, cpu_sar, widthm1);
+ tcg_gen_xori_i64(shift, shift, widthm1);
- mask = tcg_temp_new();
- tcg_gen_movi_reg(mask, msb + (msb - 1));
- tcg_gen_and_reg(tmp, val, mask);
+ mask = tcg_temp_new_i64();
+ tcg_gen_movi_i64(mask, msb + (msb - 1));
+ tcg_gen_and_i64(tmp, val, mask);
if (rs) {
- tcg_gen_shl_reg(mask, mask, shift);
- tcg_gen_shl_reg(tmp, tmp, shift);
- tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
- tcg_gen_or_reg(dest, dest, tmp);
+ tcg_gen_shl_i64(mask, mask, shift);
+ tcg_gen_shl_i64(tmp, tmp, shift);
+ tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
+ tcg_gen_or_i64(dest, dest, tmp);
} else {
- tcg_gen_shl_reg(dest, tmp, shift);
+ tcg_gen_shl_i64(dest, tmp, shift);
}
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
cond_free(&ctx->null_cond);
if (c) {
- ctx->null_cond = do_sed_cond(c, dest);
+ ctx->null_cond = do_sed_cond(ctx, c, d, dest);
}
return nullify_end(ctx);
}
-static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
+static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
{
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
- return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
+ return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
+ load_gpr(ctx, a->r));
}
-static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
+static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
{
+ if (!ctx->is_pa20 && a->d) {
+ return false;
+ }
if (a->c) {
nullify_over(ctx);
}
- return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
+ return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
+ tcg_constant_i64(a->i));
}
static bool trans_be(DisasContext *ctx, arg_be *a)
{
- TCGv_reg tmp;
+ TCGv_i64 tmp;
#ifdef CONFIG_USER_ONLY
/* ??? It seems like there should be a good way of using
@@ -3388,8 +3737,8 @@ static bool trans_be(DisasContext *ctx, arg_be *a)
nullify_over(ctx);
#endif
- tmp = get_temp(ctx);
- tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
tmp = do_ibranch_priv(ctx, tmp);
#ifdef CONFIG_USER_ONLY
@@ -3399,20 +3748,21 @@ static bool trans_be(DisasContext *ctx, arg_be *a)
load_spr(ctx, new_spc, a->sp);
if (a->l) {
- copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
}
if (a->n && use_nullify_skip(ctx)) {
- tcg_gen_mov_reg(cpu_iaoq_f, tmp);
- tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
+ tcg_gen_addi_i64(tmp, tmp, 4);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
tcg_gen_mov_i64(cpu_iasq_f, new_spc);
tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
} else {
- copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
if (ctx->iaoq_b == -1) {
tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
}
- tcg_gen_mov_reg(cpu_iaoq_b, tmp);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
tcg_gen_mov_i64(cpu_iasq_b, new_spc);
nullify_set(ctx, a->n);
}
@@ -3429,7 +3779,7 @@ static bool trans_bl(DisasContext *ctx, arg_bl *a)
static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
{
- target_ureg dest = iaoq_dest(ctx, a->disp);
+ uint64_t dest = iaoq_dest(ctx, a->disp);
nullify_over(ctx);
@@ -3471,11 +3821,11 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
#endif
if (a->l) {
- TCGv_reg tmp = dest_gpr(ctx, a->l);
+ TCGv_i64 tmp = dest_gpr(ctx, a->l);
if (ctx->privilege < 3) {
- tcg_gen_andi_reg(tmp, tmp, -4);
+ tcg_gen_andi_i64(tmp, tmp, -4);
}
- tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
+ tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
save_gpr(ctx, a->l, tmp);
}
@@ -3485,9 +3835,9 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
static bool trans_blr(DisasContext *ctx, arg_blr *a)
{
if (a->x) {
- TCGv_reg tmp = get_temp(ctx);
- tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
- tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
+ tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
/* The computation here never changes privilege level. */
return do_ibranch(ctx, tmp, a->l, a->n);
} else {
@@ -3498,14 +3848,14 @@ static bool trans_blr(DisasContext *ctx, arg_blr *a)
static bool trans_bv(DisasContext *ctx, arg_bv *a)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
if (a->x == 0) {
dest = load_gpr(ctx, a->b);
} else {
- dest = get_temp(ctx);
- tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
- tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
+ dest = tcg_temp_new_i64();
+ tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
+ tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
}
dest = do_ibranch_priv(ctx, dest);
return do_ibranch(ctx, dest, 0, a->n);
@@ -3513,7 +3863,7 @@ static bool trans_bv(DisasContext *ctx, arg_bv *a)
static bool trans_bve(DisasContext *ctx, arg_bve *a)
{
- TCGv_reg dest;
+ TCGv_i64 dest;
#ifdef CONFIG_USER_ONLY
dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
@@ -3522,14 +3872,14 @@ static bool trans_bve(DisasContext *ctx, arg_bve *a)
nullify_over(ctx);
dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
- copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
if (ctx->iaoq_b == -1) {
tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
}
- copy_iaoq_entry(cpu_iaoq_b, -1, dest);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
if (a->l) {
- copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
}
nullify_set(ctx, a->n);
tcg_gen_lookup_and_goto_ptr();
@@ -3538,6 +3888,12 @@ static bool trans_bve(DisasContext *ctx, arg_bve *a)
#endif
}
+static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
+{
+ /* All branch target stack instructions implement as nop. */
+ return ctx->is_pa20;
+}
+
/*
* Float class 0
*/
@@ -3551,7 +3907,7 @@ static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
{
uint64_t ret;
- if (TARGET_REGISTER_BITS == 64) {
+ if (ctx->is_pa20) {
ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
} else {
ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
@@ -3830,12 +4186,12 @@ static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
{
- TCGv_reg t;
+ TCGv_i64 t;
nullify_over(ctx);
- t = get_temp(ctx);
- tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
+ t = tcg_temp_new_i64();
+ tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
if (a->y == 1) {
int mask;
@@ -3843,7 +4199,7 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
switch (a->c) {
case 0: /* simple */
- tcg_gen_andi_reg(t, t, 0x4000000);
+ tcg_gen_andi_i64(t, t, 0x4000000);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
goto done;
case 2: /* rej */
@@ -3872,17 +4228,17 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
return true;
}
if (inv) {
- TCGv_reg c = load_const(ctx, mask);
- tcg_gen_or_reg(t, t, c);
+ TCGv_i64 c = tcg_constant_i64(mask);
+ tcg_gen_or_i64(t, t, c);
ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
} else {
- tcg_gen_andi_reg(t, t, mask);
+ tcg_gen_andi_i64(t, t, mask);
ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
}
} else {
unsigned cbit = (a->y ^ 1) - 1;
- tcg_gen_extract_reg(t, t, 21 - cbit, 1);
+ tcg_gen_extract_i64(t, t, 21 - cbit, 1);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
}
@@ -4062,6 +4418,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cs = cs;
ctx->tb_flags = ctx->base.tb->flags;
+ ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
#ifdef CONFIG_USER_ONLY
ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
@@ -4071,8 +4428,9 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
- ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
- PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
+ ctx->mmu_idx = (ctx->tb_flags & PSW_D
+ ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
+ : MMU_PHYS_IDX);
/* Recover the IAOQ values from the GVA + PRIV. */
uint64_t cs_base = ctx->base.tb->cs_base;
@@ -4085,14 +4443,11 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->iaoq_n = -1;
ctx->iaoq_n_var = NULL;
+ ctx->zero = tcg_constant_i64(0);
+
/* Bound the number of instructions by those left on the page. */
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
-
- ctx->ntempr = 0;
- ctx->ntempl = 0;
- memset(ctx->tempr, 0, sizeof(ctx->tempr));
- memset(ctx->templ, 0, sizeof(ctx->templ));
}
static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
@@ -4113,7 +4468,8 @@ static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
+ tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
+ ctx->insn_start = tcg_last_op();
}
static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
@@ -4121,7 +4477,6 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUHPPAState *env = cpu_env(cs);
DisasJumpType ret;
- int i, n;
/* Execute one insn. */
#ifdef CONFIG_USER_ONLY
@@ -4140,8 +4495,8 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
This will be overwritten by a branch. */
if (ctx->iaoq_b == -1) {
ctx->iaoq_n = -1;
- ctx->iaoq_n_var = get_temp(ctx);
- tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ ctx->iaoq_n_var = tcg_temp_new_i64();
+ tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
} else {
ctx->iaoq_n = ctx->iaoq_b + 4;
ctx->iaoq_n_var = NULL;
@@ -4160,16 +4515,6 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
}
}
- /* Forget any temporaries allocated. */
- for (i = 0, n = ctx->ntempr; i < n; ++i) {
- ctx->tempr[i] = NULL;
- }
- for (i = 0, n = ctx->ntempl; i < n; ++i) {
- ctx->templ[i] = NULL;
- }
- ctx->ntempr = 0;
- ctx->ntempl = 0;
-
/* Advance the insn queue. Note that this check also detects
a priority change within the instruction queue. */
if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
@@ -4197,8 +4542,8 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
case DISAS_IAQ_N_STALE:
case DISAS_IAQ_N_STALE_EXIT:
if (ctx->iaoq_f == -1) {
- tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
- copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
#ifndef CONFIG_USER_ONLY
tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
#endif
@@ -4207,7 +4552,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
? DISAS_EXIT
: DISAS_IAQ_N_UPDATED);
} else if (ctx->iaoq_b == -1) {
- tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
}
break;
@@ -4227,8 +4572,8 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
case DISAS_TOO_MANY:
case DISAS_IAQ_N_STALE:
case DISAS_IAQ_N_STALE_EXIT:
- copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
- copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
nullify_save(ctx);
/* FALLTHRU */
case DISAS_IAQ_N_UPDATED: