aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-05-15 11:46:58 +0200
committerRichard Henderson <richard.henderson@linaro.org>2024-05-15 11:46:58 +0200
commit922582ace2df59572a671f5c0c5c6c5c706995e5 (patch)
tree66e8f88bb4c773947dda5b75ead65f26a6ec35a3
parent2b01688380103acc2a9cd197b964d643fceba2a9 (diff)
parent9e035f00788c52a6f51529c54371a611d9f8b089 (diff)
Merge tag 'pull-hppa-20240515' of https://gitlab.com/rth7680/qemu into staging
target/hppa: - Use TCG_COND_TST where applicable. - Use CF_BP_PAGE instead of a local breakpoint search. - Clean up IAOQ handling during translation. - Implement CF_PCREL. - Implement PSW.B. - Implement PSW.X. - Log cpu state on interrupt and rfi. # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmZEgnwdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+43gf8CakQdMSqfGV2nGP+ # 7wWZOAV04IyfkJ38F/CH0ihUkblEOzXJ1shTFkrHEw257j0D10MctSSbjrqz5BwU # obQcwoVlxzTGXqzhkZ6wagkcqjv3TtlPtznZIk6JssdlrtwIKDmE2/3t1dzHnyBD # WTrS0SK3YvVRovq/ai51raUbiBsNq7XG3skHEsMKsFxp4EaDP5JTbputdQWdffjh # TBmXImhHC3gm09KWIUZwfEBHlaa7YXk2orzB8kBE8S2kQj9vrGXEaC4jYnBcQLPw # NDDkBYRqxHYQr0vIAHee+5cUgt1jDBr5rXnAnJwzK0wyEEc4Mi4OTPhNE604iu2y # SDxS8Q== # =A4Qf # -----END PGP SIGNATURE----- # gpg: Signature made Wed 15 May 2024 11:38:04 AM CEST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * tag 'pull-hppa-20240515' of https://gitlab.com/rth7680/qemu: (43 commits) target/hppa: Log cpu state on return-from-interrupt target/hppa: Log cpu state at interrupt target/hppa: Implement CF_PCREL target/hppa: Adjust priv for B,GATE at runtime target/hppa: Drop tlb_entry return from hppa_get_physical_address target/hppa: Implement PSW_X target/hppa: Implement PSW_B target/hppa: Manage PSW_X and PSW_B in translator target/hppa: Split PSW X and B into their own field target/hppa: Improve hppa_cpu_dump_state target/hppa: Do not mask in copy_iaoq_entry target/hppa: Store full iaoq_f and page offset of iaoq_b in TB linux-user/hppa: Force all code addresses to PRIV_USER target/hppa: Use delay_excp for conditional trap on overflow target/hppa: Use delay_excp for conditional traps target/hppa: Introduce DisasDelayException target/hppa: Remove cond_free target/hppa: Use TCG_COND_TST* in trans_ftest target/hppa: Use registerfields.h for FPSR target/hppa: Use TCG_COND_TST* in trans_bb_imm ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--linux-user/elfload.c4
-rw-r--r--linux-user/hppa/cpu_loop.c14
-rw-r--r--linux-user/hppa/signal.c6
-rw-r--r--linux-user/hppa/target_cpu.h4
-rw-r--r--target/hppa/cpu.c86
-rw-r--r--target/hppa/cpu.h80
-rw-r--r--target/hppa/fpu_helper.c26
-rw-r--r--target/hppa/gdbstub.c6
-rw-r--r--target/hppa/helper.c66
-rw-r--r--target/hppa/helper.h3
-rw-r--r--target/hppa/int_helper.c33
-rw-r--r--target/hppa/mem_helper.c95
-rw-r--r--target/hppa/op_helper.c17
-rw-r--r--target/hppa/sys_helper.c12
-rw-r--r--target/hppa/translate.c1146
15 files changed, 857 insertions, 741 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index b473cda6b4..c1e1511ff2 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1887,8 +1887,8 @@ static inline void init_thread(struct target_pt_regs *regs,
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{
- regs->iaoq[0] = infop->entry;
- regs->iaoq[1] = infop->entry + 4;
+ regs->iaoq[0] = infop->entry | PRIV_USER;
+ regs->iaoq[1] = regs->iaoq[0] + 4;
regs->gr[23] = 0;
regs->gr[24] = infop->argv;
regs->gr[25] = infop->argc;
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
index d5232f37fe..bc093b8fe8 100644
--- a/linux-user/hppa/cpu_loop.c
+++ b/linux-user/hppa/cpu_loop.c
@@ -129,8 +129,8 @@ void cpu_loop(CPUHPPAState *env)
default:
env->gr[28] = ret;
/* We arrived here by faking the gateway page. Return. */
- env->iaoq_f = env->gr[31];
- env->iaoq_b = env->gr[31] + 4;
+ env->iaoq_f = env->gr[31] | PRIV_USER;
+ env->iaoq_b = env->iaoq_f + 4;
break;
case -QEMU_ERESTARTSYS:
case -QEMU_ESIGRETURN:
@@ -140,8 +140,8 @@ void cpu_loop(CPUHPPAState *env)
case EXCP_SYSCALL_LWS:
env->gr[21] = hppa_lws(env);
/* We arrived here by faking the gateway page. Return. */
- env->iaoq_f = env->gr[31];
- env->iaoq_b = env->gr[31] + 4;
+ env->iaoq_f = env->gr[31] | PRIV_USER;
+ env->iaoq_b = env->iaoq_f + 4;
break;
case EXCP_IMP:
force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, env->iaoq_f);
@@ -152,9 +152,9 @@ void cpu_loop(CPUHPPAState *env)
case EXCP_PRIV_OPR:
/* check for glibc ABORT_INSTRUCTION "iitlbp %r0,(%sr0, %r0)" */
if (env->cr[CR_IIR] == 0x04000000) {
- force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f);
} else {
- force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->iaoq_f);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->iaoq_f);
}
break;
case EXCP_PRIV_REG:
@@ -170,7 +170,7 @@ void cpu_loop(CPUHPPAState *env)
force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f);
break;
case EXCP_BREAK:
- force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f & ~3);
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f);
break;
case EXCP_DEBUG:
force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f);
diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c
index 682ba25922..f6f094c960 100644
--- a/linux-user/hppa/signal.c
+++ b/linux-user/hppa/signal.c
@@ -101,7 +101,9 @@ static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
cpu_hppa_loaded_fr0(env);
__get_user(env->iaoq_f, &sc->sc_iaoq[0]);
+ env->iaoq_f |= PRIV_USER;
__get_user(env->iaoq_b, &sc->sc_iaoq[1]);
+ env->iaoq_b |= PRIV_USER;
__get_user(env->cr[CR_SAR], &sc->sc_sar);
}
@@ -162,8 +164,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
unlock_user(fdesc, haddr, 0);
haddr = dest;
}
- env->iaoq_f = haddr;
- env->iaoq_b = haddr + 4;
+ env->iaoq_f = haddr | PRIV_USER;
+ env->iaoq_b = env->iaoq_f + 4;
env->psw_n = 0;
return;
diff --git a/linux-user/hppa/target_cpu.h b/linux-user/hppa/target_cpu.h
index aacf3e9e02..4b84422a90 100644
--- a/linux-user/hppa/target_cpu.h
+++ b/linux-user/hppa/target_cpu.h
@@ -28,8 +28,8 @@ static inline void cpu_clone_regs_child(CPUHPPAState *env, target_ulong newsp,
/* Indicate child in return value. */
env->gr[28] = 0;
/* Return from the syscall. */
- env->iaoq_f = env->gr[31];
- env->iaoq_b = env->gr[31] + 4;
+ env->iaoq_f = env->gr[31] | PRIV_USER;
+ env->iaoq_b = env->iaoq_f + 4;
}
static inline void cpu_clone_regs_parent(CPUHPPAState *env, unsigned flags)
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 393a81988d..f0507874ce 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -32,61 +32,96 @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
HPPACPU *cpu = HPPA_CPU(cs);
+#ifdef CONFIG_USER_ONLY
+ value |= PRIV_USER;
+#endif
cpu->env.iaoq_f = value;
cpu->env.iaoq_b = value + 4;
}
static vaddr hppa_cpu_get_pc(CPUState *cs)
{
- HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = cpu_env(cs);
- return cpu->env.iaoq_f;
+ return hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
+ env->iaoq_f & -4);
}
-static void hppa_cpu_synchronize_from_tb(CPUState *cs,
- const TranslationBlock *tb)
+void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
+ uint64_t *pcsbase, uint32_t *pflags)
{
- HPPACPU *cpu = HPPA_CPU(cs);
+ uint32_t flags = 0;
+ uint64_t cs_base = 0;
+
+ /*
+ * TB lookup assumes that PC contains the complete virtual address.
+ * If we leave space+offset separate, we'll get ITLB misses to an
+ * incomplete virtual address. This also means that we must separate
+ * out current cpu privilege from the low bits of IAOQ_F.
+ */
+ *pc = hppa_cpu_get_pc(env_cpu(env));
+ flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
+
+ /*
+ * The only really interesting case is if IAQ_Back is on the same page
+ * as IAQ_Front, so that we can use goto_tb between the blocks. In all
+ * other cases, we'll be ending the TranslationBlock with one insn and
+ * not linking between them.
+ */
+ if (env->iasq_f != env->iasq_b) {
+ cs_base |= CS_BASE_DIFFSPACE;
+ } else if ((env->iaoq_f ^ env->iaoq_b) & TARGET_PAGE_MASK) {
+ cs_base |= CS_BASE_DIFFPAGE;
+ } else {
+ cs_base |= env->iaoq_b & ~TARGET_PAGE_MASK;
+ }
- tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
+ /* ??? E, T, H, L bits need to be here, when implemented. */
+ flags |= env->psw_n * PSW_N;
+ flags |= env->psw_xb;
+ flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
#ifdef CONFIG_USER_ONLY
- cpu->env.iaoq_f = tb->pc;
- cpu->env.iaoq_b = tb->cs_base;
+ flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
#else
- /* Recover the IAOQ values from the GVA + PRIV. */
- uint32_t priv = (tb->flags >> TB_FLAG_PRIV_SHIFT) & 3;
- target_ulong cs_base = tb->cs_base;
- target_ulong iasq_f = cs_base & ~0xffffffffull;
- int32_t diff = cs_base;
-
- cpu->env.iasq_f = iasq_f;
- cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
- if (diff) {
- cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
+ if ((env->sr[4] == env->sr[5])
+ & (env->sr[4] == env->sr[6])
+ & (env->sr[4] == env->sr[7])) {
+ flags |= TB_FLAG_SR_SAME;
}
#endif
+ *pcsbase = cs_base;
+ *pflags = flags;
+}
+
+static void hppa_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ /* IAQ is always up-to-date before goto_tb. */
cpu->env.psw_n = (tb->flags & PSW_N) != 0;
+ cpu->env.psw_xb = tb->flags & (PSW_X | PSW_B);
}
static void hppa_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
{
- HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = cpu_env(cs);
- cpu->env.iaoq_f = data[0];
- if (data[1] != (target_ulong)-1) {
- cpu->env.iaoq_b = data[1];
+ env->iaoq_f = (env->iaoq_f & TARGET_PAGE_MASK) | data[0];
+ if (data[1] != INT32_MIN) {
+ env->iaoq_b = env->iaoq_f + data[1];
}
- cpu->env.unwind_breg = data[2];
+ env->unwind_breg = data[2];
/*
* Since we were executing the instruction at IAOQ_F, and took some
* sort of action that provoked the cpu_restore_state, we can infer
* that the instruction was not nullified.
*/
- cpu->env.psw_n = 0;
+ env->psw_n = 0;
}
static bool hppa_cpu_has_work(CPUState *cs)
@@ -152,6 +187,9 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
hppa_ptlbe(&cpu->env);
}
#endif
+
+ /* Use pc-relative instructions always to simplify the translator. */
+ tcg_cflags_set(cs, CF_PCREL);
}
static void hppa_cpu_initfn(Object *obj)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index fb2e4c4a98..2bcb3b602b 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -24,6 +24,7 @@
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
#include "qemu/interval-tree.h"
+#include "hw/registerfields.h"
#define MMU_ABS_W_IDX 6
#define MMU_ABS_IDX 7
@@ -41,6 +42,9 @@
#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
+#define PRIV_KERNEL 0
+#define PRIV_USER 3
+
#define TARGET_INSN_START_EXTRA_WORDS 2
/* No need to flush MMU_ABS*_IDX */
@@ -152,6 +156,30 @@
#define CR_IPSW 22
#define CR_EIRR 23
+FIELD(FPSR, ENA_I, 0, 1)
+FIELD(FPSR, ENA_U, 1, 1)
+FIELD(FPSR, ENA_O, 2, 1)
+FIELD(FPSR, ENA_Z, 3, 1)
+FIELD(FPSR, ENA_V, 4, 1)
+FIELD(FPSR, ENABLES, 0, 5)
+FIELD(FPSR, D, 5, 1)
+FIELD(FPSR, T, 6, 1)
+FIELD(FPSR, RM, 9, 2)
+FIELD(FPSR, CQ, 11, 11)
+FIELD(FPSR, CQ0_6, 15, 7)
+FIELD(FPSR, CQ0_4, 17, 5)
+FIELD(FPSR, CQ0_2, 19, 3)
+FIELD(FPSR, CQ0, 21, 1)
+FIELD(FPSR, CA, 15, 7)
+FIELD(FPSR, CA0, 21, 1)
+FIELD(FPSR, C, 26, 1)
+FIELD(FPSR, FLG_I, 27, 1)
+FIELD(FPSR, FLG_U, 28, 1)
+FIELD(FPSR, FLG_O, 29, 1)
+FIELD(FPSR, FLG_Z, 30, 1)
+FIELD(FPSR, FLG_V, 31, 1)
+FIELD(FPSR, FLAGS, 27, 5)
+
typedef struct HPPATLBEntry {
union {
IntervalTreeNode itree;
@@ -180,7 +208,8 @@ typedef struct CPUArchState {
uint64_t fr[32];
uint64_t sr[8]; /* stored shifted into place for gva */
- target_ulong psw; /* All psw bits except the following: */
+ uint32_t psw; /* All psw bits except the following: */
+ uint32_t psw_xb; /* X and B, in their normal positions */
target_ulong psw_n; /* boolean */
target_long psw_v; /* in most significant bit */
@@ -313,48 +342,11 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
#define TB_FLAG_SR_SAME PSW_I
#define TB_FLAG_PRIV_SHIFT 8
#define TB_FLAG_UNALIGN 0x400
+#define CS_BASE_DIFFPAGE (1 << 12)
+#define CS_BASE_DIFFSPACE (1 << 13)
-static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- uint32_t flags = env->psw_n * PSW_N;
-
- /* TB lookup assumes that PC contains the complete virtual address.
- If we leave space+offset separate, we'll get ITLB misses to an
- incomplete virtual address. This also means that we must separate
- out current cpu privilege from the low bits of IAOQ_F. */
-#ifdef CONFIG_USER_ONLY
- *pc = env->iaoq_f & -4;
- *cs_base = env->iaoq_b & -4;
- flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
-#else
- /* ??? E, T, H, L, B bits need to be here, when implemented. */
- flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
- flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
-
- *pc = hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
- env->iaoq_f & -4);
- *cs_base = env->iasq_f;
-
- /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
- low 32-bits of CS_BASE. This will succeed for all direct branches,
- which is the primary case we care about -- using goto_tb within a page.
- Failure is indicated by a zero difference. */
- if (env->iasq_f == env->iasq_b) {
- target_long diff = env->iaoq_b - env->iaoq_f;
- if (diff == (int32_t)diff) {
- *cs_base |= (uint32_t)diff;
- }
- }
- if ((env->sr[4] == env->sr[5])
- & (env->sr[4] == env->sr[6])
- & (env->sr[4] == env->sr[7])) {
- flags |= TB_FLAG_SR_SAME;
- }
-#endif
-
- *pflags = flags;
-}
+void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags);
target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
@@ -379,8 +371,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot,
- HPPATLBEntry **tlb_entry);
+ int type, hwaddr *pphys, int *pprot);
void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
@@ -389,7 +380,6 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
extern const MemoryRegionOps hppa_io_eir_ops;
extern const VMStateDescription vmstate_hppa_cpu;
void hppa_cpu_alarm_timer(void *);
-int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
#endif
G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
index 576f283b04..deaed2b65d 100644
--- a/target/hppa/fpu_helper.c
+++ b/target/hppa/fpu_helper.c
@@ -30,7 +30,7 @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
env->fr0_shadow = shadow;
- switch (extract32(shadow, 9, 2)) {
+ switch (FIELD_EX32(shadow, FPSR, RM)) {
default:
rm = float_round_nearest_even;
break;
@@ -46,7 +46,7 @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
}
set_float_rounding_mode(rm, &env->fp_status);
- d = extract32(shadow, 5, 1);
+ d = FIELD_EX32(shadow, FPSR, D);
set_flush_to_zero(d, &env->fp_status);
set_flush_inputs_to_zero(d, &env->fp_status);
}
@@ -57,7 +57,7 @@ void cpu_hppa_loaded_fr0(CPUHPPAState *env)
}
#define CONVERT_BIT(X, SRC, DST) \
- ((SRC) > (DST) \
+ ((unsigned)(SRC) > (unsigned)(DST) \
? (X) / ((SRC) / (DST)) & (DST) \
: ((X) & (SRC)) * ((DST) / (SRC)))
@@ -73,12 +73,12 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
}
set_float_exception_flags(0, &env->fp_status);
- hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, 1u << 0);
- hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, 1u << 1);
- hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, 1u << 2);
- hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, 1u << 3);
- hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, 1u << 4);
- shadow |= hard_exp << (32 - 5);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, R_FPSR_ENA_I_MASK);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, R_FPSR_ENA_U_MASK);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, R_FPSR_ENA_O_MASK);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, R_FPSR_ENA_Z_MASK);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, R_FPSR_ENA_V_MASK);
+ shadow |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT);
env->fr0_shadow = shadow;
env->fr[0] = (uint64_t)shadow << 32;
@@ -378,15 +378,15 @@ static void update_fr0_cmp(CPUHPPAState *env, uint32_t y,
if (y) {
/* targeted comparison */
/* set fpsr[ca[y - 1]] to current compare */
- shadow = deposit32(shadow, 21 - (y - 1), 1, c);
+ shadow = deposit32(shadow, R_FPSR_CA0_SHIFT - (y - 1), 1, c);
} else {
/* queued comparison */
/* shift cq right by one place */
- shadow = deposit32(shadow, 11, 10, extract32(shadow, 12, 10));
+ shadow = (shadow & ~R_FPSR_CQ_MASK) | ((shadow >> 1) & R_FPSR_CQ_MASK);
/* move fpsr[c] to fpsr[cq[0]] */
- shadow = deposit32(shadow, 21, 1, extract32(shadow, 26, 1));
+ shadow = FIELD_DP32(shadow, FPSR, CQ0, FIELD_EX32(shadow, FPSR, C));
/* set fpsr[c] to current compare */
- shadow = deposit32(shadow, 26, 1, c);
+ shadow = FIELD_DP32(shadow, FPSR, C, c);
}
env->fr0_shadow = shadow;
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
index 4a965b38d7..0daa52f7af 100644
--- a/target/hppa/gdbstub.c
+++ b/target/hppa/gdbstub.c
@@ -163,12 +163,18 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->cr[CR_SAR] = val & (hppa_is_pa20(env) ? 63 : 31);
break;
case 33:
+#ifdef CONFIG_USER_ONLY
+ val |= PRIV_USER;
+#endif
env->iaoq_f = val;
break;
case 34:
env->iasq_f = (uint64_t)val << 32;
break;
case 35:
+#ifdef CONFIG_USER_ONLY
+ val |= PRIV_USER;
+#endif
env->iaoq_b = val;
break;
case 36:
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index 9d217d051c..b79ddd8184 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -54,7 +54,7 @@ target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
psw |= env->psw_n * PSW_N;
psw |= (env->psw_v < 0) * PSW_V;
- psw |= env->psw;
+ psw |= env->psw | env->psw_xb;
return psw;
}
@@ -76,8 +76,8 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
}
psw &= ~reserved;
- env->psw = psw & (uint32_t)~(PSW_N | PSW_V | PSW_CB);
-
+ env->psw = psw & (uint32_t)~(PSW_B | PSW_N | PSW_V | PSW_X | PSW_CB);
+ env->psw_xb = psw & (PSW_X | PSW_B);
env->psw_n = (psw / PSW_N) & 1;
env->psw_v = -((psw / PSW_V) & 1);
@@ -102,6 +102,19 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
+#ifndef CONFIG_USER_ONLY
+ static const char cr_name[32][5] = {
+ "RC", "CR1", "CR2", "CR3",
+ "CR4", "CR5", "CR6", "CR7",
+ "PID1", "PID2", "CCR", "SAR",
+ "PID3", "PID4", "IVA", "EIEM",
+ "ITMR", "ISQF", "IOQF", "IIR",
+ "ISR", "IOR", "IPSW", "EIRR",
+ "TR0", "TR1", "TR2", "TR3",
+ "TR4", "TR5", "TR6", "TR7",
+ };
+#endif
+
CPUHPPAState *env = cpu_env(cs);
target_ulong psw = cpu_hppa_get_psw(env);
target_ulong psw_cb;
@@ -117,11 +130,12 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
m = UINT32_MAX;
}
- qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx
- " IIR %0*" PRIx64 "\n",
+ qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n"
+ "IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n",
+ env->iasq_f >> 32, w, m & env->iaoq_f,
hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f),
- hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b),
- w, m & env->cr[CR_IIR]);
+ env->iasq_b >> 32, w, m & env->iaoq_b,
+ hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b));
psw_c[0] = (psw & PSW_W ? 'W' : '-');
psw_c[1] = (psw & PSW_E ? 'E' : '-');
@@ -154,12 +168,46 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
(i & 3) == 3 ? '\n' : ' ');
}
#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "%-4s %0*" PRIx64 "%c",
+ cr_name[i], w, m & env->cr[i],
+ (i & 3) == 3 ? '\n' : ' ');
+ }
+ qemu_fprintf(f, "ISQB %0*" PRIx64 " IOQB %0*" PRIx64 "\n",
+ w, m & env->cr_back[0], w, m & env->cr_back[1]);
for (i = 0; i < 8; i++) {
qemu_fprintf(f, "SR%02d %08x%c", i, (uint32_t)(env->sr[i] >> 32),
(i & 3) == 3 ? '\n' : ' ');
}
#endif
- qemu_fprintf(f, "\n");
- /* ??? FR */
+ if (flags & CPU_DUMP_FPU) {
+ static const char rm[4][4] = { "RN", "RZ", "R+", "R-" };
+ char flg[6], ena[6];
+ uint32_t fpsr = env->fr0_shadow;
+
+ flg[0] = (fpsr & R_FPSR_FLG_V_MASK ? 'V' : '-');
+ flg[1] = (fpsr & R_FPSR_FLG_Z_MASK ? 'Z' : '-');
+ flg[2] = (fpsr & R_FPSR_FLG_O_MASK ? 'O' : '-');
+ flg[3] = (fpsr & R_FPSR_FLG_U_MASK ? 'U' : '-');
+ flg[4] = (fpsr & R_FPSR_FLG_I_MASK ? 'I' : '-');
+ flg[5] = '\0';
+
+ ena[0] = (fpsr & R_FPSR_ENA_V_MASK ? 'V' : '-');
+ ena[1] = (fpsr & R_FPSR_ENA_Z_MASK ? 'Z' : '-');
+ ena[2] = (fpsr & R_FPSR_ENA_O_MASK ? 'O' : '-');
+ ena[3] = (fpsr & R_FPSR_ENA_U_MASK ? 'U' : '-');
+ ena[4] = (fpsr & R_FPSR_ENA_I_MASK ? 'I' : '-');
+ ena[5] = '\0';
+
+ qemu_fprintf(f, "FPSR %08x flag %s enable %s %s\n",
+ fpsr, flg, ena, rm[FIELD_EX32(fpsr, FPSR, RM)]);
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "FR%02d %016" PRIx64 "%c",
+ i, env->fr[i], (i & 3) == 3 ? '\n' : ' ');
+ }
+ }
+
+ qemu_fprintf(f, "\n");
}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index 5900fd70bc..de411923d9 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -1,6 +1,4 @@
DEF_HELPER_2(excp, noreturn, env, int)
-DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
-DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl)
@@ -88,6 +86,7 @@ DEF_HELPER_1(halt, noreturn, env)
DEF_HELPER_1(reset, noreturn, env)
DEF_HELPER_1(rfi, void, env)
DEF_HELPER_1(rfi_r, void, env)
+DEF_HELPER_FLAGS_2(b_gate_priv, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tl, env, tl)
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index a667ee380d..391f32f27d 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -134,13 +134,13 @@ void hppa_cpu_do_interrupt(CPUState *cs)
switch (i) {
case EXCP_ILL:
case EXCP_BREAK:
+ case EXCP_OVERFLOW:
+ case EXCP_COND:
case EXCP_PRIV_REG:
case EXCP_PRIV_OPR:
/* IIR set via translate.c. */
break;
- case EXCP_OVERFLOW:
- case EXCP_COND:
case EXCP_ASSIST:
case EXCP_DTLB_MISS:
case EXCP_NA_ITLB_MISS:
@@ -167,7 +167,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
- 0, &paddr, &prot, NULL);
+ 0, &paddr, &prot);
if (t >= 0) {
/* We can't re-load the instruction. */
env->cr[CR_IIR] = 0;
@@ -241,21 +241,22 @@ void hppa_cpu_do_interrupt(CPUState *cs)
[EXCP_SYSCALL_LWS] = "syscall-lws",
[EXCP_TOC] = "TOC (transfer of control)",
};
- static int count;
- const char *name = NULL;
- char unknown[16];
- if (i >= 0 && i < ARRAY_SIZE(names)) {
- name = names[i];
- }
- if (!name) {
- snprintf(unknown, sizeof(unknown), "unknown %d", i);
- name = unknown;
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ const char *name = NULL;
+
+ if (i >= 0 && i < ARRAY_SIZE(names)) {
+ name = names[i];
+ }
+ if (name) {
+ fprintf(logfile, "INT: cpu %d %s\n", cs->cpu_index, name);
+ } else {
+ fprintf(logfile, "INT: cpu %d unknown %d\n", cs->cpu_index, i);
+ }
+ hppa_cpu_dump_state(cs, logfile, 0);
+ qemu_log_unlock(logfile);
}
- qemu_log("INT %6d: %s @ " TARGET_FMT_lx ":" TARGET_FMT_lx
- " for " TARGET_FMT_lx ":" TARGET_FMT_lx "\n",
- ++count, name, env->cr[CR_IIASQ], env->cr[CR_IIAOQ],
- env->cr[CR_ISR], env->cr[CR_IOR]);
}
cs->exception_index = -1;
}
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index d09877afd7..b984f730aa 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -197,18 +197,13 @@ static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
}
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot,
- HPPATLBEntry **tlb_entry)
+ int type, hwaddr *pphys, int *pprot)
{
hwaddr phys;
int prot, r_prot, w_prot, x_prot, priv;
HPPATLBEntry *ent;
int ret = -1;
- if (tlb_entry) {
- *tlb_entry = NULL;
- }
-
/* Virtual translation disabled. Map absolute to physical. */
if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
switch (mmu_idx) {
@@ -238,10 +233,6 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
goto egress;
}
- if (tlb_entry) {
- *tlb_entry = ent;
- }
-
/* We now know the physical address. */
phys = ent->pa + (addr - ent->itree.start);
@@ -296,30 +287,38 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
goto egress;
}
- /* In reverse priority order, check for conditions which raise faults.
- As we go, remove PROT bits that cover the condition we want to check.
- In this way, the resulting PROT will force a re-check of the
- architectural TLB entry for the next access. */
- if (unlikely(!ent->d)) {
+ /*
+ * In priority order, check for conditions which raise faults.
+ * Remove PROT bits that cover the condition we want to check,
+ * so that the resulting PROT will force a re-check of the
+ * architectural TLB entry for the next access.
+ */
+ if (unlikely(ent->t)) {
+ prot &= PAGE_EXEC;
+ if (!(type & PAGE_EXEC)) {
+ /* The T bit is set -- Page Reference Fault. */
+ ret = EXCP_PAGE_REF;
+ }
+ } else if (!ent->d) {
+ prot &= PAGE_READ | PAGE_EXEC;
if (type & PAGE_WRITE) {
/* The D bit is not set -- TLB Dirty Bit Fault. */
ret = EXCP_TLB_DIRTY;
}
+ } else if (unlikely(ent->b)) {
prot &= PAGE_READ | PAGE_EXEC;
- }
- if (unlikely(ent->b)) {
if (type & PAGE_WRITE) {
- /* The B bit is set -- Data Memory Break Fault. */
- ret = EXCP_DMB;
- }
- prot &= PAGE_READ | PAGE_EXEC;
- }
- if (unlikely(ent->t)) {
- if (!(type & PAGE_EXEC)) {
- /* The T bit is set -- Page Reference Fault. */
- ret = EXCP_PAGE_REF;
+ /*
+ * The B bit is set -- Data Memory Break Fault.
+ * Except when PSW_X is set, allow this single access to succeed.
+ * The write bit will be invalidated for subsequent accesses.
+ */
+ if (env->psw_xb & PSW_X) {
+ prot |= PAGE_WRITE_INV;
+ } else {
+ ret = EXCP_DMB;
+ }
}
- prot &= PAGE_EXEC;
}
egress:
@@ -342,7 +341,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
- &phys, &prot, NULL);
+ &phys, &prot);
/* Since we're translating for debugging, the only error that is a
hard error is no translation at all. Otherwise, while a real cpu
@@ -424,7 +423,6 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
- HPPATLBEntry *ent;
int prot, excp, a_prot;
hwaddr phys;
@@ -440,8 +438,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
break;
}
- excp = hppa_get_physical_address(env, addr, mmu_idx,
- a_prot, &phys, &prot, &ent);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot);
if (unlikely(excp >= 0)) {
if (probe) {
return false;
@@ -682,7 +679,7 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
int prot, excp;
excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
- &phys, &prot, NULL);
+ &phys, &prot);
if (excp >= 0) {
if (excp == EXCP_DTLB_MISS) {
excp = EXCP_NA_DTLB_MISS;
@@ -694,13 +691,6 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
return phys;
}
-/* Return the ar_type of the TLB at VADDR, or -1. */
-int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
-{
- HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
- return ent ? ent->ar_type : -1;
-}
-
/*
* diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
* allow operating systems to modify the Block TLB (BTLB) entries.
@@ -796,3 +786,30 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
break;
}
}
+
+uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
+{
+ uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
+ HPPATLBEntry *ent = hppa_find_tlb(env, gva);
+
+ if (ent == NULL) {
+ raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false);
+ }
+
+ /*
+ * There should be no need to check page permissions, as that will
+ * already have been done by tb_lookup via get_page_addr_code.
+ * All we need at this point is to check the ar_type.
+ *
+ * No change for non-gateway pages or for priv decrease.
+ */
+ if (ent->ar_type & 4) {
+ int old_priv = iaoq_f & 3;
+ int new_priv = ent->ar_type & 3;
+
+ if (new_priv < old_priv) {
+ iaoq_f = (iaoq_f & -4) | new_priv;
+ }
+ }
+ return iaoq_f;
+}
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 6cf49f33b7..7f79196fff 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -42,20 +42,6 @@ G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
cpu_loop_exit_restore(cs, ra);
}
-void HELPER(tsv)(CPUHPPAState *env, target_ulong cond)
-{
- if (unlikely((target_long)cond < 0)) {
- hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC());
- }
-}
-
-void HELPER(tcond)(CPUHPPAState *env, target_ulong cond)
-{
- if (unlikely(cond)) {
- hppa_dynamic_excp(env, EXCP_COND, GETPC());
- }
-}
-
static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
uint32_t val, uint32_t mask, uintptr_t ra)
{
@@ -348,8 +334,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
}
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
- excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys,
- &prot, NULL);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
if (excp >= 0) {
cpu_restore_state(env_cpu(env), GETPC());
hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c
index 22d6c89964..9b43b556fd 100644
--- a/target/hppa/sys_helper.c
+++ b/target/hppa/sys_helper.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
@@ -93,6 +94,17 @@ void HELPER(rfi)(CPUHPPAState *env)
env->iaoq_b = env->cr_back[1];
env->iasq_f = (env->cr[CR_IIASQ] << 32) & ~(env->iaoq_f & mask);
env->iasq_b = (env->cr_back[0] << 32) & ~(env->iaoq_b & mask);
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ CPUState *cs = env_cpu(env);
+
+ fprintf(logfile, "RFI: cpu %d\n", cs->cpu_index);
+ hppa_cpu_dump_state(cs, logfile, 0);
+ qemu_log_unlock(logfile);
+ }
+ }
}
static void getshadowregs(CPUHPPAState *env)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 7287e1debf..51c1762435 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -41,25 +41,51 @@ typedef struct DisasCond {
TCGv_i64 a0, a1;
} DisasCond;
+typedef struct DisasIAQE {
+ /* IASQ; may be null for no change from TB. */
+ TCGv_i64 space;
+ /* IAOQ base; may be null for relative address. */
+ TCGv_i64 base;
+ /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
+ int64_t disp;
+} DisasIAQE;
+
+typedef struct DisasDelayException {
+ struct DisasDelayException *next;
+ TCGLabel *lab;
+ uint32_t insn;
+ bool set_iir;
+ int8_t set_n;
+ uint8_t excp;
+ /* Saved state at parent insn. */
+ DisasIAQE iaq_f, iaq_b;
+} DisasDelayException;
+
typedef struct DisasContext {
DisasContextBase base;
CPUState *cs;
- uint64_t iaoq_f;
- uint64_t iaoq_b;
- uint64_t iaoq_n;
- TCGv_i64 iaoq_n_var;
+ /* IAQ_Front, IAQ_Back. */
+ DisasIAQE iaq_f, iaq_b;
+ /* IAQ_Next, for jumps, otherwise null for simple advance. */
+ DisasIAQE iaq_j, *iaq_n;
+
+ /* IAOQ_Front at entry to TB. */
+ uint64_t iaoq_first;
DisasCond null_cond;
TCGLabel *null_lab;
+ DisasDelayException *delay_excp_list;
TCGv_i64 zero;
uint32_t insn;
uint32_t tb_flags;
int mmu_idx;
int privilege;
+ uint32_t psw_xb;
bool psw_n_nonzero;
+ bool psw_b_next;
bool is_pa20;
bool insn_start_updated;
@@ -238,6 +264,7 @@ static TCGv_i64 cpu_psw_n;
static TCGv_i64 cpu_psw_v;
static TCGv_i64 cpu_psw_cb;
static TCGv_i64 cpu_psw_cb_msb;
+static TCGv_i32 cpu_psw_xb;
void hppa_translate_init(void)
{
@@ -290,6 +317,9 @@ void hppa_translate_init(void)
*v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
}
+ cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUHPPAState, psw_xb),
+ "psw_xb");
cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHPPAState, iasq_f),
"iasq_f");
@@ -332,47 +362,32 @@ static DisasCond cond_make_n(void)
};
}
-static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
+static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
{
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
}
-static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
+static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
{
- return cond_make_tmp(c, a0, tcg_constant_i64(0));
+ return cond_make_tt(c, a0, tcg_constant_i64(imm));
}
-static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
+static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
{
TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_mov_i64(tmp, a0);
- return cond_make_0_tmp(c, tmp);
+ return cond_make_ti(c, tmp, imm);
}
-static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
+static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
tcg_gen_mov_i64(t0, a0);
tcg_gen_mov_i64(t1, a1);
- return cond_make_tmp(c, t0, t1);
-}
-
-static void cond_free(DisasCond *cond)
-{
- switch (cond->c) {
- default:
- cond->a0 = NULL;
- cond->a1 = NULL;
- /* fallthru */
- case TCG_COND_ALWAYS:
- cond->c = TCG_COND_NEVER;
- break;
- case TCG_COND_NEVER:
- break;
- }
+ return cond_make_tt(c, t0, t1);
}
static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
@@ -499,6 +514,25 @@ static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
#endif
}
+/*
+ * Write a value to psw_xb, bearing in mind the known value.
+ * To be used just before exiting the TB, so do not update the known value.
+ */
+static void store_psw_xb(DisasContext *ctx, uint32_t xb)
+{
+ tcg_debug_assert(xb == 0 || xb == PSW_B);
+ if (ctx->psw_xb != xb) {
+ tcg_gen_movi_i32(cpu_psw_xb, xb);
+ }
+}
+
+/* Write a value to psw_xb, and update the known value. */
+static void set_psw_xb(DisasContext *ctx, uint32_t xb)
+{
+ store_psw_xb(ctx, xb);
+ ctx->psw_xb = xb;
+}
+
/* Skip over the implementation of an insn that has been nullified.
Use this when the insn is too complex for a conditional move. */
static void nullify_over(DisasContext *ctx)
@@ -524,7 +558,7 @@ static void nullify_over(DisasContext *ctx)
tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
ctx->null_cond.a1, ctx->null_lab);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
}
}
@@ -542,7 +576,7 @@ static void nullify_save(DisasContext *ctx)
ctx->null_cond.a0, ctx->null_cond.a1);
ctx->psw_n_nonzero = true;
}
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
}
/* Set a PSW[N] to X. The intention is that this is used immediately
@@ -566,6 +600,8 @@ static bool nullify_end(DisasContext *ctx)
/* For NEXT, NORETURN, STALE, we can easily continue (or exit).
For UPDATED, we cannot update on the nullified path. */
assert(status != DISAS_IAQ_N_UPDATED);
+ /* Taken branches are handled manually. */
+ assert(!ctx->psw_b_next);
if (likely(null_lab == NULL)) {
/* The current insn wasn't conditional or handled the condition
@@ -594,31 +630,94 @@ static bool nullify_end(DisasContext *ctx)
return true;
}
+static bool iaqe_variable(const DisasIAQE *e)
+{
+ return e->base || e->space;
+}
+
+static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
+{
+ return (DisasIAQE){
+ .space = e->space,
+ .base = e->base,
+ .disp = e->disp + disp,
+ };
+}
+
+static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
+{
+ return (DisasIAQE){
+ .space = ctx->iaq_b.space,
+ .disp = ctx->iaq_f.disp + 8 + disp,
+ };
+}
+
+static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
+{
+ return (DisasIAQE){
+ .space = ctx->iaq_b.space,
+ .base = var,
+ };
+}
+
static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
- uint64_t ival, TCGv_i64 vval)
+ const DisasIAQE *src)
{
- uint64_t mask = gva_offset_mask(ctx->tb_flags);
+ tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
+}
- if (ival != -1) {
- tcg_gen_movi_i64(dest, ival & mask);
- return;
+static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
+ const DisasIAQE *b)
+{
+ DisasIAQE b_next;
+
+ if (b == NULL) {
+ b_next = iaqe_incr(f, 4);
+ b = &b_next;
}
- tcg_debug_assert(vval != NULL);
/*
- * We know that the IAOQ is already properly masked.
- * This optimization is primarily for "iaoq_f = iaoq_b".
+ * There is an edge case
+ * bv r0(rN)
+ * b,l disp,r0
+ * for which F will use cpu_iaoq_b (from the indirect branch),
+ * and B will use cpu_iaoq_f (from the direct branch).
+ * In this case we need an extra temporary.
*/
- if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
- tcg_gen_mov_i64(dest, vval);
+ if (f->base != cpu_iaoq_b) {
+ copy_iaoq_entry(ctx, cpu_iaoq_b, b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, f);
+ } else if (f->base == b->base) {
+ copy_iaoq_entry(ctx, cpu_iaoq_f, f);
+ tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
} else {
- tcg_gen_andi_i64(dest, vval, mask);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ copy_iaoq_entry(ctx, tmp, b);
+ copy_iaoq_entry(ctx, cpu_iaoq_f, f);
+ tcg_gen_mov_i64(cpu_iaoq_b, tmp);
+ }
+
+ if (f->space) {
+ tcg_gen_mov_i64(cpu_iasq_f, f->space);
+ }
+ if (b->space || f->space) {
+ tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
}
}
-static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
+static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
{
- return ctx->iaoq_f + disp + 8;
+ tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
+ if (!link) {
+ return;
+ }
+ DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
+ copy_iaoq_entry(ctx, cpu_gr[link], &next);
+#ifndef CONFIG_USER_ONLY
+ if (with_sr0) {
+ tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
+ }
+#endif
}
static void gen_excp_1(int exception)
@@ -628,20 +727,44 @@ static void gen_excp_1(int exception)
static void gen_excp(DisasContext *ctx, int exception)
{
- copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
- copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
nullify_save(ctx);
gen_excp_1(exception);
ctx->base.is_jmp = DISAS_NORETURN;
}
+static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
+{
+ DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
+
+ memset(e, 0, sizeof(*e));
+ e->next = ctx->delay_excp_list;
+ ctx->delay_excp_list = e;
+
+ e->lab = gen_new_label();
+ e->insn = ctx->insn;
+ e->set_iir = true;
+ e->set_n = ctx->psw_n_nonzero ? 0 : -1;
+ e->excp = excp;
+ e->iaq_f = ctx->iaq_f;
+ e->iaq_b = ctx->iaq_b;
+
+ return e;
+}
+
static bool gen_excp_iir(DisasContext *ctx, int exc)
{
- nullify_over(ctx);
- tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
- tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
- gen_excp(ctx, exc);
- return nullify_end(ctx);
+ if (ctx->null_cond.c == TCG_COND_NEVER) {
+ tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
+ tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
+ gen_excp(ctx, exc);
+ } else {
+ DisasDelayException *e = delay_excp(ctx, exc);
+ tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
+ ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
+ ctx->null_cond = cond_make_f();
+ }
+ return true;
}
static bool gen_illegal(DisasContext *ctx)
@@ -661,9 +784,12 @@ static bool gen_illegal(DisasContext *ctx)
} while (0)
#endif
-static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
+static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
+ const DisasIAQE *b)
{
- return translator_use_goto_tb(&ctx->base, dest);
+ return (!iaqe_variable(f) &&
+ (b == NULL || !iaqe_variable(b)) &&
+ translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
}
/* If the next insn is to be nullified, and it's on the same page,
@@ -672,21 +798,20 @@ static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
executing a TB that merely branches to the next TB. */
static bool use_nullify_skip(DisasContext *ctx)
{
- return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
- && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
+ return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
+ && !iaqe_variable(&ctx->iaq_b)
+ && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
+ & TARGET_PAGE_MASK) == 0);
}
static void gen_goto_tb(DisasContext *ctx, int which,
- uint64_t f, uint64_t b)
+ const DisasIAQE *f, const DisasIAQE *b)
{
- if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
+ install_iaq_entries(ctx, f, b);
+ if (use_goto_tb(ctx, f, b)) {
tcg_gen_goto_tb(which);
- copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
- copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
tcg_gen_exit_tb(ctx->base.tb, which);
} else {
- copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
- copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
tcg_gen_lookup_and_goto_ptr();
}
}
@@ -709,28 +834,36 @@ static bool cond_need_cb(int c)
static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
{
+ TCGCond sign_cond, zero_cond;
+ uint64_t sign_imm, zero_imm;
DisasCond cond;
TCGv_i64 tmp;
+ if (d) {
+ /* 64-bit condition. */
+ sign_imm = 0;
+ sign_cond = TCG_COND_LT;
+ zero_imm = 0;
+ zero_cond = TCG_COND_EQ;
+ } else {
+ /* 32-bit condition. */
+ sign_imm = 1ull << 31;
+ sign_cond = TCG_COND_TSTNE;
+ zero_imm = UINT32_MAX;
+ zero_cond = TCG_COND_TSTEQ;
+ }
+
switch (cf >> 1) {
case 0: /* Never / TR (0 / 1) */
cond = cond_make_f();
break;
case 1: /* = / <> (Z / !Z) */
- if (!d) {
- tmp = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(tmp, res);
- res = tmp;
- }
- cond = cond_make_0(TCG_COND_EQ, res);
+ cond = cond_make_vi(zero_cond, res, zero_imm);
break;
case 2: /* < / >= (N ^ V / !(N ^ V) */
tmp = tcg_temp_new_i64();
tcg_gen_xor_i64(tmp, res, sv);
- if (!d) {
- tcg_gen_ext32s_i64(tmp, tmp);
- }
- cond = cond_make_0_tmp(TCG_COND_LT, tmp);
+ cond = cond_make_ti(sign_cond, tmp, sign_imm);
break;
case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
/*
@@ -738,45 +871,29 @@ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
* (N ^ V) | Z
* ((res < 0) ^ (sv < 0)) | !res
* ((res ^ sv) < 0) | !res
- * (~(res ^ sv) >= 0) | !res
- * !(~(res ^ sv) >> 31) | !res
- * !(~(res ^ sv) >> 31 & res)
+ * ((res ^ sv) < 0 ? 1 : !res)
+ * !((res ^ sv) < 0 ? 0 : res)
*/
tmp = tcg_temp_new_i64();
- tcg_gen_eqv_i64(tmp, res, sv);
- if (!d) {
- tcg_gen_sextract_i64(tmp, tmp, 31, 1);
- tcg_gen_and_i64(tmp, tmp, res);
- tcg_gen_ext32u_i64(tmp, tmp);
- } else {
- tcg_gen_sari_i64(tmp, tmp, 63);
- tcg_gen_and_i64(tmp, tmp, res);
- }
- cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
+ tcg_gen_xor_i64(tmp, res, sv);
+ tcg_gen_movcond_i64(sign_cond, tmp,
+ tmp, tcg_constant_i64(sign_imm),
+ ctx->zero, res);
+ cond = cond_make_ti(zero_cond, tmp, zero_imm);
break;
case 4: /* NUV / UV (!UV / UV) */
- cond = cond_make_0(TCG_COND_EQ, uv);
+ cond = cond_make_vi(TCG_COND_EQ, uv, 0);
break;
case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */
tmp = tcg_temp_new_i64();
tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
- if (!d) {
- tcg_gen_ext32u_i64(tmp, tmp);
- }
- cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
+ cond = cond_make_ti(zero_cond, tmp, zero_imm);
break;
case 6: /* SV / NSV (V / !V) */
- if (!d) {
- tmp = tcg_temp_new_i64();
- tcg_gen_ext32s_i64(tmp, sv);
- sv = tmp;
- }
- cond = cond_make_0(TCG_COND_LT, sv);
+ cond = cond_make_vi(sign_cond, sv, sign_imm);
break;
case 7: /* OD / EV */
- tmp = tcg_temp_new_i64();
- tcg_gen_andi_i64(tmp, res, 1);
- cond = cond_make_0_tmp(TCG_COND_NE, tmp);
+ cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
break;
default:
g_assert_not_reached();
@@ -838,9 +955,9 @@ static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
tcg_gen_ext32s_i64(t1, in1);
tcg_gen_ext32s_i64(t2, in2);
}
- return cond_make_tmp(tc, t1, t2);
+ return cond_make_tt(tc, t1, t2);
}
- return cond_make(tc, in1, in2);
+ return cond_make_vv(tc, in1, in2);
}
/*
@@ -856,65 +973,41 @@ static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
TCGv_i64 res)
{
TCGCond tc;
- bool ext_uns;
-
- switch (cf) {
- case 0: /* never */
- case 9: /* undef, C */
- case 11: /* undef, C & !Z */
- case 12: /* undef, V */
- return cond_make_f();
-
- case 1: /* true */
- case 8: /* undef, !C */
- case 10: /* undef, !C | Z */
- case 13: /* undef, !V */
- return cond_make_t();
+ uint64_t imm;
- case 2: /* == */
- tc = TCG_COND_EQ;
- ext_uns = true;
- break;
- case 3: /* <> */
- tc = TCG_COND_NE;
- ext_uns = true;
- break;
- case 4: /* < */
- tc = TCG_COND_LT;
- ext_uns = false;
+ switch (cf >> 1) {
+ case 0: /* never / always */
+ case 4: /* undef, C */
+ case 5: /* undef, C & !Z */
+ case 6: /* undef, V */
+ return cf & 1 ? cond_make_t() : cond_make_f();
+ case 1: /* == / <> */
+ tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
+ imm = d ? 0 : UINT32_MAX;
break;
- case 5: /* >= */
- tc = TCG_COND_GE;
- ext_uns = false;
+ case 2: /* < / >= */
+ tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
+ imm = d ? 0 : 1ull << 31;
break;
- case 6: /* <= */
- tc = TCG_COND_LE;
- ext_uns = false;
- break;
- case 7: /* > */
- tc = TCG_COND_GT;
- ext_uns = false;
+ case 3: /* <= / > */
+ tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
+ if (!d) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(tmp, res);
+ return cond_make_ti(tc, tmp, 0);
+ }
+ return cond_make_vi(tc, res, 0);
+ case 7: /* OD / EV */
+ tc = TCG_COND_TSTNE;
+ imm = 1;
break;
-
- case 14: /* OD */
- case 15: /* EV */
- return do_cond(ctx, cf, d, res, NULL, NULL);
-
default:
g_assert_not_reached();
}
-
- if (!d) {
- TCGv_i64 tmp = tcg_temp_new_i64();
-
- if (ext_uns) {
- tcg_gen_ext32u_i64(tmp, res);
- } else {
- tcg_gen_ext32s_i64(tmp, res);
- }
- return cond_make_0_tmp(tc, tmp);
+ if (cf & 1) {
+ tc = tcg_invert_cond(tc);
}
- return cond_make_0(tc, res);
+ return cond_make_vi(tc, res, imm);
}
/* Similar, but for shift/extract/deposit conditions. */
@@ -971,9 +1064,8 @@ static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
tmp = tcg_temp_new_i64();
tcg_gen_subi_i64(tmp, res, ones);
tcg_gen_andc_i64(tmp, tmp, res);
- tcg_gen_andi_i64(tmp, tmp, sgns);
- return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp);
+ return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
}
static TCGv_i64 get_carry(DisasContext *ctx, bool d,
@@ -1061,6 +1153,36 @@ static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
return sv;
}
+static void gen_tc(DisasContext *ctx, DisasCond *cond)
+{
+ DisasDelayException *e;
+
+ switch (cond->c) {
+ case TCG_COND_NEVER:
+ break;
+ case TCG_COND_ALWAYS:
+ gen_excp_iir(ctx, EXCP_COND);
+ break;
+ default:
+ e = delay_excp(ctx, EXCP_COND);
+ tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
+ /* In the non-trap path, the condition is known false. */
+ *cond = cond_make_f();
+ break;
+ }
+}
+
+static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
+{
+ DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
+ DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
+
+ tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
+
+ /* In the non-trap path, V is known zero. */
+ *sv = tcg_constant_i64(0);
+}
+
static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
TCGv_i64 in2, unsigned shift, bool is_l,
bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
@@ -1103,10 +1225,7 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
if (is_tsv || cond_need_sv(c)) {
sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
if (is_tsv) {
- if (!d) {
- tcg_gen_ext32s_i64(sv, sv);
- }
- gen_helper_tsv(tcg_env, sv);
+ gen_tsv(ctx, &sv, d);
}
}
@@ -1119,9 +1238,7 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
/* Emit any conditional trap before any writeback. */
cond = do_cond(ctx, cf, d, dest, uv, sv);
if (is_tc) {
- tmp = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(tcg_env, tmp);
+ gen_tc(ctx, &cond);
}
/* Write back the result. */
@@ -1132,7 +1249,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
ctx->null_cond = cond;
}
@@ -1141,6 +1257,10 @@ static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
{
TCGv_i64 tcg_r1, tcg_r2;
+ if (unlikely(is_tc && a->cf == 1)) {
+ /* Unconditional trap on condition. */
+ return gen_excp_iir(ctx, EXCP_COND);
+ }
if (a->cf) {
nullify_over(ctx);
}
@@ -1156,6 +1276,10 @@ static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
{
TCGv_i64 tcg_im, tcg_r2;
+ if (unlikely(is_tc && a->cf == 1)) {
+ /* Unconditional trap on condition. */
+ return gen_excp_iir(ctx, EXCP_COND);
+ }
if (a->cf) {
nullify_over(ctx);
}
@@ -1170,7 +1294,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
TCGv_i64 in2, bool is_tsv, bool is_b,
bool is_tc, unsigned cf, bool d)
{
- TCGv_i64 dest, sv, cb, cb_msb, tmp;
+ TCGv_i64 dest, sv, cb, cb_msb;
unsigned c = cf >> 1;
DisasCond cond;
@@ -1202,10 +1326,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
if (is_tsv || cond_need_sv(c)) {
sv = do_sub_sv(ctx, dest, in1, in2);
if (is_tsv) {
- if (!d) {
- tcg_gen_ext32s_i64(sv, sv);
- }
- gen_helper_tsv(tcg_env, sv);
+ gen_tsv(ctx, &sv, d);
}
}
@@ -1218,9 +1339,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
/* Emit any conditional trap before any writeback. */
if (is_tc) {
- tmp = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(tcg_env, tmp);
+ gen_tc(ctx, &cond);
}
/* Write back the result. */
@@ -1229,7 +1348,6 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
ctx->null_cond = cond;
}
@@ -1284,7 +1402,6 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
ctx->null_cond = cond;
}
@@ -1299,10 +1416,7 @@ static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (cf) {
- ctx->null_cond = do_log_cond(ctx, cf, d, dest);
- }
+ ctx->null_cond = do_log_cond(ctx, cf, d, dest);
}
static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
@@ -1386,18 +1500,15 @@ static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
tcg_gen_shri_i64(cb, cb, 1);
}
- tcg_gen_andi_i64(cb, cb, test_cb);
- cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb);
+ cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
+ cb, test_cb);
}
if (is_tc) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(tcg_env, tmp);
+ gen_tc(ctx, &cond);
}
save_gpr(ctx, rt, dest);
- cond_free(&ctx->null_cond);
ctx->null_cond = cond;
}
@@ -1764,36 +1875,43 @@ static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
/* Emit an unconditional branch to a direct target, which may or may not
have already had nullification handled. */
-static bool do_dbranch(DisasContext *ctx, uint64_t dest,
+static bool do_dbranch(DisasContext *ctx, int64_t disp,
unsigned link, bool is_n)
{
+ ctx->iaq_j = iaqe_branchi(ctx, disp);
+
if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
- if (link != 0) {
- copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
- }
- ctx->iaoq_n = dest;
+ install_link(ctx, link, false);
if (is_n) {
+ if (use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ store_psw_xb(ctx, 0);
+ gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
ctx->null_cond.c = TCG_COND_ALWAYS;
}
+ ctx->iaq_n = &ctx->iaq_j;
+ ctx->psw_b_next = true;
} else {
nullify_over(ctx);
- if (link != 0) {
- copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
- }
-
+ install_link(ctx, link, false);
if (is_n && use_nullify_skip(ctx)) {
nullify_set(ctx, 0);
- gen_goto_tb(ctx, 0, dest, dest + 4);
+ store_psw_xb(ctx, 0);
+ gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
} else {
nullify_set(ctx, is_n);
- gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
+ store_psw_xb(ctx, PSW_B);
+ gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
}
-
nullify_end(ctx);
nullify_set(ctx, 0);
- gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
+ store_psw_xb(ctx, 0);
+ gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
ctx->base.is_jmp = DISAS_NORETURN;
}
return true;
@@ -1804,7 +1922,7 @@ static bool do_dbranch(DisasContext *ctx, uint64_t dest,
static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
DisasCond *cond)
{
- uint64_t dest = iaoq_dest(ctx, disp);
+ DisasIAQE next;
TCGLabel *taken = NULL;
TCGCond c = cond->c;
bool n;
@@ -1813,45 +1931,43 @@ static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
/* Handle TRUE and NEVER as direct branches. */
if (c == TCG_COND_ALWAYS) {
- return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
- }
- if (c == TCG_COND_NEVER) {
- return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
+ return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
}
taken = gen_new_label();
tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
- cond_free(cond);
/* Not taken: Condition not satisfied; nullify on backward branches. */
n = is_n && disp < 0;
if (n && use_nullify_skip(ctx)) {
nullify_set(ctx, 0);
- gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
+ store_psw_xb(ctx, 0);
+ next = iaqe_incr(&ctx->iaq_b, 4);
+ gen_goto_tb(ctx, 0, &next, NULL);
} else {
if (!n && ctx->null_lab) {
gen_set_label(ctx->null_lab);
ctx->null_lab = NULL;
}
nullify_set(ctx, n);
- if (ctx->iaoq_n == -1) {
- /* The temporary iaoq_n_var died at the branch above.
- Regenerate it here instead of saving it. */
- tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
- }
- gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
+ store_psw_xb(ctx, 0);
+ gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
}
gen_set_label(taken);
/* Taken: Condition satisfied; nullify on forward branches. */
n = is_n && disp >= 0;
+
+ next = iaqe_branchi(ctx, disp);
if (n && use_nullify_skip(ctx)) {
nullify_set(ctx, 0);
- gen_goto_tb(ctx, 1, dest, dest + 4);
+ store_psw_xb(ctx, 0);
+ gen_goto_tb(ctx, 1, &next, NULL);
} else {
nullify_set(ctx, n);
- gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
+ store_psw_xb(ctx, PSW_B);
+ gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
}
/* Not taken: the branch itself was nullified. */
@@ -1865,89 +1981,45 @@ static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
return true;
}
-/* Emit an unconditional branch to an indirect target. This handles
- nullification of the branch itself. */
-static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
- unsigned link, bool is_n)
+/*
+ * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
+ * This handles nullification of the branch itself.
+ */
+static bool do_ibranch(DisasContext *ctx, unsigned link,
+ bool with_sr0, bool is_n)
{
- TCGv_i64 a0, a1, next, tmp;
- TCGCond c;
-
- assert(ctx->null_lab == NULL);
-
- if (ctx->null_cond.c == TCG_COND_NEVER) {
- if (link != 0) {
- copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
- }
- next = tcg_temp_new_i64();
- tcg_gen_mov_i64(next, dest);
+ if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
+ install_link(ctx, link, with_sr0);
if (is_n) {
if (use_nullify_skip(ctx)) {
- copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
- tcg_gen_addi_i64(next, next, 4);
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
+ install_iaq_entries(ctx, &ctx->iaq_j, NULL);
nullify_set(ctx, 0);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
return true;
}
ctx->null_cond.c = TCG_COND_ALWAYS;
}
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = next;
- } else if (is_n && use_nullify_skip(ctx)) {
- /* The (conditional) branch, B, nullifies the next insn, N,
- and we're allowed to skip execution N (no single-step or
- tracepoint in effect). Since the goto_ptr that we must use
- for the indirect branch consumes no special resources, we
- can (conditionally) skip B and continue execution. */
- /* The use_nullify_skip test implies we have a known control path. */
- tcg_debug_assert(ctx->iaoq_b != -1);
- tcg_debug_assert(ctx->iaoq_n != -1);
-
- /* We do have to handle the non-local temporary, DEST, before
- branching. Since IOAQ_F is not really live at this point, we
- can simply store DEST optimistically. Similarly with IAOQ_B. */
- copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
- next = tcg_temp_new_i64();
- tcg_gen_addi_i64(next, dest, 4);
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
-
- nullify_over(ctx);
- if (link != 0) {
- copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
- }
- tcg_gen_lookup_and_goto_ptr();
- return nullify_end(ctx);
- } else {
- c = ctx->null_cond.c;
- a0 = ctx->null_cond.a0;
- a1 = ctx->null_cond.a1;
-
- tmp = tcg_temp_new_i64();
- next = tcg_temp_new_i64();
-
- copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = next;
+ ctx->iaq_n = &ctx->iaq_j;
+ ctx->psw_b_next = true;
+ return true;
+ }
- if (link != 0) {
- tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
- }
+ nullify_over(ctx);
- if (is_n) {
- /* The branch nullifies the next insn, which means the state of N
- after the branch is the inverse of the state of N that applied
- to the branch. */
- tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
- cond_free(&ctx->null_cond);
- ctx->null_cond = cond_make_n();
- ctx->psw_n_nonzero = true;
- } else {
- cond_free(&ctx->null_cond);
- }
+ install_link(ctx, link, with_sr0);
+ if (is_n && use_nullify_skip(ctx)) {
+ install_iaq_entries(ctx, &ctx->iaq_j, NULL);
+ nullify_set(ctx, 0);
+ store_psw_xb(ctx, 0);
+ } else {
+ install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
+ nullify_set(ctx, is_n);
+ store_psw_xb(ctx, PSW_B);
}
- return true;
+
+ tcg_gen_lookup_and_goto_ptr();
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return nullify_end(ctx);
}
/* Implement
@@ -1959,21 +2031,20 @@ static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
*/
static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
{
- TCGv_i64 dest;
+ TCGv_i64 dest = tcg_temp_new_i64();
switch (ctx->privilege) {
case 0:
/* Privilege 0 is maximum and is allowed to decrease. */
- return offset;
+ tcg_gen_mov_i64(dest, offset);
+ break;
case 3:
/* Privilege 3 is minimum and is never allowed to increase. */
- dest = tcg_temp_new_i64();
tcg_gen_ori_i64(dest, offset, 3);
break;
default:
- dest = tcg_temp_new_i64();
tcg_gen_andi_i64(dest, offset, -4);
tcg_gen_ori_i64(dest, dest, ctx->privilege);
- tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
+ tcg_gen_umax_i64(dest, dest, offset);
break;
}
return dest;
@@ -1989,7 +2060,7 @@ static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
aforementioned BE. */
static void do_page_zero(DisasContext *ctx)
{
- TCGv_i64 tmp;
+ assert(ctx->iaq_f.disp == 0);
/* If by some means we get here with PSW[N]=1, that implies that
the B,GATE instruction would be skipped, and we'd fault on the
@@ -2006,15 +2077,12 @@ static void do_page_zero(DisasContext *ctx)
g_assert_not_reached();
}
- /* Check that we didn't arrive here via some means that allowed
- non-sequential instruction execution. Normally the PSW[B] bit
- detects this by disallowing the B,GATE instruction to execute
- under such conditions. */
- if (ctx->iaoq_b != ctx->iaoq_f + 4) {
+ /* If PSW[B] is set, the B,GATE insn would trap. */
+ if (ctx->psw_xb & PSW_B) {
goto do_sigill;
}
- switch (ctx->iaoq_f & -4) {
+ switch (ctx->base.pc_first) {
case 0x00: /* Null pointer call */
gen_excp_1(EXCP_IMP);
ctx->base.is_jmp = DISAS_NORETURN;
@@ -2026,13 +2094,15 @@ static void do_page_zero(DisasContext *ctx)
break;
case 0xe0: /* SET_THREAD_POINTER */
- tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
- tmp = tcg_temp_new_i64();
- tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
- copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
- tcg_gen_addi_i64(tmp, tmp, 4);
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
- ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
+ {
+ DisasIAQE next = { .base = tcg_temp_new_i64() };
+
+ tcg_gen_st_i64(cpu_gr[26], tcg_env,
+ offsetof(CPUHPPAState, cr[27]));
+ tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
+ install_iaq_entries(ctx, &next, NULL);
+ ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
+ }
break;
case 0x100: /* SYSCALL */
@@ -2051,7 +2121,7 @@ static void do_page_zero(DisasContext *ctx)
static bool trans_nop(DisasContext *ctx, arg_nop *a)
{
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2065,18 +2135,19 @@ static bool trans_sync(DisasContext *ctx, arg_sync *a)
/* No point in nullifying the memory barrier. */
tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
{
- unsigned rt = a->t;
- TCGv_i64 tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL);
- save_gpr(ctx, rt, tmp);
+ TCGv_i64 dest = dest_gpr(ctx, a->t);
- cond_free(&ctx->null_cond);
+ copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
+ tcg_gen_andi_i64(dest, dest, -4);
+
+ save_gpr(ctx, a->t, dest);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2091,7 +2162,7 @@ static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
save_gpr(ctx, rt, t0);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2136,7 +2207,7 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
save_gpr(ctx, rt, tmp);
done:
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2176,7 +2247,7 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
save_or_nullify(ctx, cpu_sar, tmp);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2250,7 +2321,7 @@ static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
save_or_nullify(ctx, cpu_sar, tmp);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2267,7 +2338,7 @@ static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
#endif
save_gpr(ctx, a->t, dest);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2367,6 +2438,7 @@ static bool trans_halt(DisasContext *ctx, arg_halt *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
+ set_psw_xb(ctx, 0);
nullify_over(ctx);
gen_helper_halt(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
@@ -2378,6 +2450,7 @@ static bool trans_reset(DisasContext *ctx, arg_reset *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
+ set_psw_xb(ctx, 0);
nullify_over(ctx);
gen_helper_reset(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
@@ -2429,7 +2502,7 @@ static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
tcg_gen_add_i64(dest, src1, src2);
save_gpr(ctx, a->b, dest);
}
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2671,7 +2744,7 @@ static bool trans_lci(DisasContext *ctx, arg_lci *a)
since the entire address space is coherent. */
save_gpr(ctx, a->t, ctx->zero);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -2748,7 +2821,7 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
unsigned rt = a->t;
if (rt == 0) { /* NOP */
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
if (r2 == 0) { /* COPY */
@@ -2759,7 +2832,7 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
} else {
save_gpr(ctx, rt, cpu_gr[r1]);
}
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
#ifndef CONFIG_USER_ONLY
@@ -2772,11 +2845,13 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
/* No need to check for supervisor, as userland can only pause
until the next timer interrupt. */
+
+ set_psw_xb(ctx, 0);
+
nullify_over(ctx);
/* Advance the instruction queue. */
- copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
- copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ install_iaq_entries(ctx, &ctx->iaq_b, NULL);
nullify_set(ctx, 0);
/* Tell the qemu main loop to halt until this cpu has work. */
@@ -2825,11 +2900,7 @@ static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
save_gpr(ctx, a->t, dest);
- cond_free(&ctx->null_cond);
- if (a->cf) {
- ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
- }
-
+ ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
return nullify_end(ctx);
}
@@ -2855,7 +2926,7 @@ static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
tcg_gen_subi_i64(tmp, tmp, 1);
}
save_gpr(ctx, a->t, tmp);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -3381,7 +3452,7 @@ static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
tcg_gen_movi_i64(tcg_rt, a->i);
save_gpr(ctx, a->t, tcg_rt);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -3392,7 +3463,7 @@ static bool trans_addil(DisasContext *ctx, arg_addil *a)
tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
save_gpr(ctx, 1, tcg_r1);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -3408,7 +3479,7 @@ static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
}
save_gpr(ctx, a->t, tcg_rt);
- cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_f();
return true;
}
@@ -3525,24 +3596,18 @@ static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
tcg_gen_shl_i64(tmp, tcg_r, tmp);
}
- cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
return do_cbranch(ctx, a->disp, a->n, &cond);
}
static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
{
- TCGv_i64 tmp, tcg_r;
DisasCond cond;
- int p;
+ int p = a->p | (a->d ? 0 : 32);
nullify_over(ctx);
-
- tmp = tcg_temp_new_i64();
- tcg_r = load_gpr(ctx, a->r);
- p = a->p | (a->d ? 0 : 32);
- tcg_gen_shli_i64(tmp, tcg_r, p);
-
- cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
+ load_gpr(ctx, a->r), 1ull << (63 - p));
return do_cbranch(ctx, a->disp, a->n, &cond);
}
@@ -3640,10 +3705,7 @@ static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3683,10 +3745,7 @@ static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3728,10 +3787,7 @@ static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3764,10 +3820,7 @@ static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3804,10 +3857,7 @@ static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3840,10 +3890,7 @@ static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
save_gpr(ctx, a->t, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (a->c) {
- ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
return nullify_end(ctx);
}
@@ -3877,10 +3924,7 @@ static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
save_gpr(ctx, rt, dest);
/* Install the new nullification. */
- cond_free(&ctx->null_cond);
- if (c) {
- ctx->null_cond = do_sed_cond(ctx, c, d, dest);
- }
+ ctx->null_cond = do_sed_cond(ctx, c, d, dest);
return nullify_end(ctx);
}
@@ -3910,104 +3954,53 @@ static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
static bool trans_be(DisasContext *ctx, arg_be *a)
{
- TCGv_i64 tmp;
-
-#ifdef CONFIG_USER_ONLY
- /* ??? It seems like there should be a good way of using
- "be disp(sr2, r0)", the canonical gateway entry mechanism
- to our advantage. But that appears to be inconvenient to
- manage along side branch delay slots. Therefore we handle
- entry into the gateway page via absolute address. */
- /* Since we don't implement spaces, just branch. Do notice the special
- case of "be disp(*,r0)" using a direct branch to disp, so that we can
- goto_tb to the TB containing the syscall. */
- if (a->b == 0) {
- return do_dbranch(ctx, a->disp, a->l, a->n);
- }
-#else
- nullify_over(ctx);
+#ifndef CONFIG_USER_ONLY
+ ctx->iaq_j.space = tcg_temp_new_i64();
+ load_spr(ctx, ctx->iaq_j.space, a->sp);
#endif
- tmp = tcg_temp_new_i64();
- tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
- tmp = do_ibranch_priv(ctx, tmp);
+ ctx->iaq_j.base = tcg_temp_new_i64();
+ ctx->iaq_j.disp = 0;
-#ifdef CONFIG_USER_ONLY
- return do_ibranch(ctx, tmp, a->l, a->n);
-#else
- TCGv_i64 new_spc = tcg_temp_new_i64();
+ tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
+ ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
- load_spr(ctx, new_spc, a->sp);
- if (a->l) {
- copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
- }
- if (a->n && use_nullify_skip(ctx)) {
- copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
- tcg_gen_addi_i64(tmp, tmp, 4);
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
- tcg_gen_mov_i64(cpu_iasq_f, new_spc);
- tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
- nullify_set(ctx, 0);
- } else {
- copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
- if (ctx->iaoq_b == -1) {
- tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
- }
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
- tcg_gen_mov_i64(cpu_iasq_b, new_spc);
- nullify_set(ctx, a->n);
- }
- tcg_gen_lookup_and_goto_ptr();
- ctx->base.is_jmp = DISAS_NORETURN;
- return nullify_end(ctx);
-#endif
+ return do_ibranch(ctx, a->l, true, a->n);
}
static bool trans_bl(DisasContext *ctx, arg_bl *a)
{
- return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
+ return do_dbranch(ctx, a->disp, a->l, a->n);
}
static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
{
- uint64_t dest = iaoq_dest(ctx, a->disp);
-
- nullify_over(ctx);
+ int64_t disp = a->disp;
+ bool indirect = false;
- /* Make sure the caller hasn't done something weird with the queue.
- * ??? This is not quite the same as the PSW[B] bit, which would be
- * expensive to track. Real hardware will trap for
- * b gateway
- * b gateway+4 (in delay slot of first branch)
- * However, checking for a non-sequential instruction queue *will*
- * diagnose the security hole
- * b gateway
- * b evil
- * in which instructions at evil would run with increased privs.
- */
- if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
+ /* Trap if PSW[B] is set. */
+ if (ctx->psw_xb & PSW_B) {
return gen_illegal(ctx);
}
+ nullify_over(ctx);
+
#ifndef CONFIG_USER_ONLY
- if (ctx->tb_flags & PSW_C) {
- int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
- /* If we could not find a TLB entry, then we need to generate an
- ITLB miss exception so the kernel will provide it.
- The resulting TLB fill operation will invalidate this TB and
- we will re-translate, at which point we *will* be able to find
- the TLB entry and determine if this is in fact a gateway page. */
- if (type < 0) {
- gen_excp(ctx, EXCP_ITLB_MISS);
- return true;
- }
- /* No change for non-gateway pages or for priv decrease. */
- if (type >= 4 && type - 4 < ctx->privilege) {
- dest = deposit64(dest, 0, 2, type - 4);
- }
+ if (ctx->privilege == 0) {
+ /* Privilege cannot decrease. */
+ } else if (!(ctx->tb_flags & PSW_C)) {
+ /* With paging disabled, priv becomes 0. */
+ disp -= ctx->privilege;
} else {
- dest &= -4; /* priv = 0 */
+ /* Adjust the dest offset for the privilege change from the PTE. */
+ TCGv_i64 off = tcg_temp_new_i64();
+
+ copy_iaoq_entry(ctx, off, &ctx->iaq_f);
+ gen_helper_b_gate_priv(off, tcg_env, off);
+
+ ctx->iaq_j.base = off;
+ ctx->iaq_j.disp = disp + 8;
+ indirect = true;
}
#endif
@@ -4020,20 +4013,29 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
save_gpr(ctx, a->l, tmp);
}
- return do_dbranch(ctx, dest, 0, a->n);
+ if (indirect) {
+ return do_ibranch(ctx, 0, false, a->n);
+ }
+ return do_dbranch(ctx, disp, 0, a->n);
}
static bool trans_blr(DisasContext *ctx, arg_blr *a)
{
if (a->x) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
- tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
+ DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
/* The computation here never changes privilege level. */
- return do_ibranch(ctx, tmp, a->l, a->n);
+ copy_iaoq_entry(ctx, t0, &next);
+ tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
+ tcg_gen_add_i64(t0, t0, t1);
+
+ ctx->iaq_j = iaqe_next_absv(ctx, t0);
+ return do_ibranch(ctx, a->l, false, a->n);
} else {
/* BLR R0,RX is a good way to load PC+8 into RX. */
- return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
+ return do_dbranch(ctx, 0, a->l, a->n);
}
}
@@ -4049,34 +4051,22 @@ static bool trans_bv(DisasContext *ctx, arg_bv *a)
tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
}
dest = do_ibranch_priv(ctx, dest);
- return do_ibranch(ctx, dest, 0, a->n);
+ ctx->iaq_j = iaqe_next_absv(ctx, dest);
+
+ return do_ibranch(ctx, 0, false, a->n);
}
static bool trans_bve(DisasContext *ctx, arg_bve *a)
{
- TCGv_i64 dest;
-
-#ifdef CONFIG_USER_ONLY
- dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
- return do_ibranch(ctx, dest, a->l, a->n);
-#else
- nullify_over(ctx);
- dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
+ TCGv_i64 b = load_gpr(ctx, a->b);
- copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
- if (ctx->iaoq_b == -1) {
- tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
- }
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
- tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
- if (a->l) {
- copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
- }
- nullify_set(ctx, a->n);
- tcg_gen_lookup_and_goto_ptr();
- ctx->base.is_jmp = DISAS_NORETURN;
- return nullify_end(ctx);
+#ifndef CONFIG_USER_ONLY
+ ctx->iaq_j.space = space_select(ctx, 0, b);
#endif
+ ctx->iaq_j.base = do_ibranch_priv(ctx, b);
+ ctx->iaq_j.disp = 0;
+
+ return do_ibranch(ctx, a->l, false, a->n);
}
static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
@@ -4377,6 +4367,8 @@ static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
{
+ TCGCond tc = TCG_COND_TSTNE;
+ uint32_t mask;
TCGv_i64 t;
nullify_over(ctx);
@@ -4385,55 +4377,41 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
if (a->y == 1) {
- int mask;
- bool inv = false;
-
switch (a->c) {
case 0: /* simple */
- tcg_gen_andi_i64(t, t, 0x4000000);
- ctx->null_cond = cond_make_0(TCG_COND_NE, t);
- goto done;
+ mask = R_FPSR_C_MASK;
+ break;
case 2: /* rej */
- inv = true;
+ tc = TCG_COND_TSTEQ;
/* fallthru */
case 1: /* acc */
- mask = 0x43ff800;
+ mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
break;
case 6: /* rej8 */
- inv = true;
+ tc = TCG_COND_TSTEQ;
/* fallthru */
case 5: /* acc8 */
- mask = 0x43f8000;
+ mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
break;
case 9: /* acc6 */
- mask = 0x43e0000;
+ mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
break;
case 13: /* acc4 */
- mask = 0x4380000;
+ mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
break;
case 17: /* acc2 */
- mask = 0x4200000;
+ mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
break;
default:
gen_illegal(ctx);
return true;
}
- if (inv) {
- TCGv_i64 c = tcg_constant_i64(mask);
- tcg_gen_or_i64(t, t, c);
- ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
- } else {
- tcg_gen_andi_i64(t, t, mask);
- ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
- }
} else {
unsigned cbit = (a->y ^ 1) - 1;
-
- tcg_gen_extract_i64(t, t, 21 - cbit, 1);
- ctx->null_cond = cond_make_0(TCG_COND_NE, t);
+ mask = R_FPSR_CA0_MASK >> cbit;
}
- done:
+ ctx->null_cond = cond_make_ti(tc, t, mask);
return nullify_end(ctx);
}
@@ -4639,34 +4617,38 @@ static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ uint64_t cs_base;
int bound;
ctx->cs = cs;
ctx->tb_flags = ctx->base.tb->flags;
ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
+ ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
#ifdef CONFIG_USER_ONLY
- ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
+ ctx->privilege = PRIV_USER;
ctx->mmu_idx = MMU_USER_IDX;
- ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
- ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
ctx->mmu_idx = (ctx->tb_flags & PSW_D
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
: ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
+#endif
- /* Recover the IAOQ values from the GVA + PRIV. */
- uint64_t cs_base = ctx->base.tb->cs_base;
- uint64_t iasq_f = cs_base & ~0xffffffffull;
- int32_t diff = cs_base;
+ cs_base = ctx->base.tb->cs_base;
+ ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
- ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
- ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
-#endif
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = NULL;
+ if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
+ ctx->iaq_b.space = cpu_iasq_b;
+ ctx->iaq_b.base = cpu_iaoq_b;
+ } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
+ ctx->iaq_b.base = cpu_iaoq_b;
+ } else {
+ uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
+ uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
+ ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
+ }
ctx->zero = tcg_constant_i64(0);
@@ -4692,8 +4674,23 @@ static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ uint64_t iaoq_f, iaoq_b;
+ int64_t diff;
+
+ tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
- tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
+ iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
+ if (iaqe_variable(&ctx->iaq_b)) {
+ diff = INT32_MIN;
+ } else {
+ iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
+ diff = iaoq_b - iaoq_f;
+ /* Direct branches can only produce a 24-bit displacement. */
+ tcg_debug_assert(diff == (int32_t)diff);
+ tcg_debug_assert(diff != INT32_MIN);
+ }
+
+ tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
ctx->insn_start_updated = false;
}
@@ -4716,16 +4713,13 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
the page permissions for execute. */
uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
- /* Set up the IA queue for the next insn.
- This will be overwritten by a branch. */
- if (ctx->iaoq_b == -1) {
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = tcg_temp_new_i64();
- tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
- } else {
- ctx->iaoq_n = ctx->iaoq_b + 4;
- ctx->iaoq_n_var = NULL;
- }
+ /*
+ * Set up the IA queue for the next insn.
+ * This will be overwritten by a branch.
+ */
+ ctx->iaq_n = NULL;
+ memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
+ ctx->psw_b_next = false;
if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
ctx->null_cond.c = TCG_COND_NEVER;
@@ -4738,51 +4732,47 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ret = ctx->base.is_jmp;
assert(ctx->null_lab == NULL);
}
- }
- /* Advance the insn queue. Note that this check also detects
- a priority change within the instruction queue. */
- if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
- if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
- && use_goto_tb(ctx, ctx->iaoq_b)
- && (ctx->null_cond.c == TCG_COND_NEVER
- || ctx->null_cond.c == TCG_COND_ALWAYS)) {
- nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
- gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
- ctx->base.is_jmp = ret = DISAS_NORETURN;
- } else {
- ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
+ if (ret != DISAS_NORETURN) {
+ set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
}
}
- ctx->iaoq_f = ctx->iaoq_b;
- ctx->iaoq_b = ctx->iaoq_n;
- ctx->base.pc_next += 4;
-
- switch (ret) {
- case DISAS_NORETURN:
- case DISAS_IAQ_N_UPDATED:
- break;
- case DISAS_NEXT:
- case DISAS_IAQ_N_STALE:
- case DISAS_IAQ_N_STALE_EXIT:
- if (ctx->iaoq_f == -1) {
- copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
- copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
-#ifndef CONFIG_USER_ONLY
- tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
-#endif
- nullify_save(ctx);
- ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
- ? DISAS_EXIT
- : DISAS_IAQ_N_UPDATED);
- } else if (ctx->iaoq_b == -1) {
- copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
- }
- break;
+ /* If the TranslationBlock must end, do so. */
+ ctx->base.pc_next += 4;
+ if (ret != DISAS_NEXT) {
+ return;
+ }
+ /* Note this also detects a priority change. */
+ if (iaqe_variable(&ctx->iaq_b)
+ || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ return;
+ }
- default:
- g_assert_not_reached();
+ /*
+ * Advance the insn queue.
+ * The only exit now is DISAS_TOO_MANY from the translator loop.
+ */
+ ctx->iaq_f.disp = ctx->iaq_b.disp;
+ if (!ctx->iaq_n) {
+ ctx->iaq_b.disp += 4;
+ return;
+ }
+ /*
+ * If IAQ_Next is variable in any way, we need to copy into the
+ * IAQ_Back globals, in case the next insn raises an exception.
+ */
+ if (ctx->iaq_n->base) {
+ copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
+ ctx->iaq_b.base = cpu_iaoq_b;
+ ctx->iaq_b.disp = 0;
+ } else {
+ ctx->iaq_b.disp = ctx->iaq_n->disp;
+ }
+ if (ctx->iaq_n->space) {
+ tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
+ ctx->iaq_b.space = cpu_iasq_b;
}
}
@@ -4790,29 +4780,57 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
DisasJumpType is_jmp = ctx->base.is_jmp;
+ /* Assume the insn queue has not been advanced. */
+ DisasIAQE *f = &ctx->iaq_b;
+ DisasIAQE *b = ctx->iaq_n;
switch (is_jmp) {
case DISAS_NORETURN:
break;
case DISAS_TOO_MANY:
+ /* The insn queue has not been advanced. */
+ f = &ctx->iaq_f;
+ b = &ctx->iaq_b;
+ /* FALLTHRU */
case DISAS_IAQ_N_STALE:
+ if (use_goto_tb(ctx, f, b)
+ && (ctx->null_cond.c == TCG_COND_NEVER
+ || ctx->null_cond.c == TCG_COND_ALWAYS)) {
+ nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
+ gen_goto_tb(ctx, 0, f, b);
+ break;
+ }
+ /* FALLTHRU */
case DISAS_IAQ_N_STALE_EXIT:
- copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
- copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ install_iaq_entries(ctx, f, b);
nullify_save(ctx);
- /* FALLTHRU */
- case DISAS_IAQ_N_UPDATED:
- if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
- tcg_gen_lookup_and_goto_ptr();
+ if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
+ tcg_gen_exit_tb(NULL, 0);
break;
}
/* FALLTHRU */
+ case DISAS_IAQ_N_UPDATED:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
case DISAS_EXIT:
tcg_gen_exit_tb(NULL, 0);
break;
default:
g_assert_not_reached();
}
+
+ for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
+ gen_set_label(e->lab);
+ if (e->set_n >= 0) {
+ tcg_gen_movi_i64(cpu_psw_n, e->set_n);
+ }
+ if (e->set_iir) {
+ tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
+ offsetof(CPUHPPAState, cr[CR_IIR]));
+ }
+ install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
+ gen_excp_1(e->excp);
+ }
}
#ifdef CONFIG_USER_ONLY
@@ -4853,6 +4871,6 @@ static const TranslatorOps hppa_tr_ops = {
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
vaddr pc, void *host_pc)
{
- DisasContext ctx;
+ DisasContext ctx = { };
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
}