aboutsummaryrefslogtreecommitdiff
path: root/target/sh4/translate.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/sh4/translate.c')
-rw-r--r--target/sh4/translate.c1466
1 files changed, 733 insertions, 733 deletions
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 220a06bdce..81f825f125 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -136,10 +136,10 @@ void sh4_translate_init(void)
offsetof(CPUSH4State, fpul), "FPUL");
cpu_flags = tcg_global_mem_new_i32(tcg_env,
- offsetof(CPUSH4State, flags), "_flags_");
+ offsetof(CPUSH4State, flags), "_flags_");
cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
- offsetof(CPUSH4State, delayed_pc),
- "_delayed_pc_");
+ offsetof(CPUSH4State, delayed_pc),
+ "_delayed_pc_");
cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State,
delayed_cond),
@@ -252,9 +252,9 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
static void gen_jump(DisasContext * ctx)
{
if (ctx->delayed_pc == -1) {
- /* Target is not statically known, it comes necessarily from a
- delayed jump as immediate jump are conditinal jumps */
- tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
+ /* Target is not statically known, it comes necessarily from a
+ delayed jump as immediate jump are conditinal jumps */
+ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
tcg_gen_discard_i32(cpu_delayed_pc);
if (use_exit_tb(ctx)) {
tcg_gen_exit_tb(NULL, 0);
@@ -263,7 +263,7 @@ static void gen_jump(DisasContext * ctx)
}
ctx->base.is_jmp = DISAS_NORETURN;
} else {
- gen_goto_tb(ctx, 0, ctx->delayed_pc);
+ gen_goto_tb(ctx, 0, ctx->delayed_pc);
}
}
@@ -411,103 +411,103 @@ static void _decode_opc(DisasContext * ctx)
TB, or if we already saw movca.l in this TB and did not flush stores
yet. */
if (ctx->has_movcal)
- {
- int opcode = ctx->opcode & 0xf0ff;
- if (opcode != 0x0093 /* ocbi */
- && opcode != 0x00c3 /* movca.l */)
- {
- gen_helper_discard_movcal_backup(tcg_env);
- ctx->has_movcal = 0;
- }
- }
+ {
+ int opcode = ctx->opcode & 0xf0ff;
+ if (opcode != 0x0093 /* ocbi */
+ && opcode != 0x00c3 /* movca.l */)
+ {
+ gen_helper_discard_movcal_backup(tcg_env);
+ ctx->has_movcal = 0;
+ }
+ }
#if 0
fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
#endif
switch (ctx->opcode) {
- case 0x0019: /* div0u */
+ case 0x0019: /* div0u */
tcg_gen_movi_i32(cpu_sr_m, 0);
tcg_gen_movi_i32(cpu_sr_q, 0);
tcg_gen_movi_i32(cpu_sr_t, 0);
- return;
- case 0x000b: /* rts */
- CHECK_NOT_DELAY_SLOT
- tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
+ return;
+ case 0x000b: /* rts */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- ctx->delayed_pc = (uint32_t) - 1;
- return;
- case 0x0028: /* clrmac */
- tcg_gen_movi_i32(cpu_mach, 0);
- tcg_gen_movi_i32(cpu_macl, 0);
- return;
- case 0x0048: /* clrs */
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0028: /* clrmac */
+ tcg_gen_movi_i32(cpu_mach, 0);
+ tcg_gen_movi_i32(cpu_macl, 0);
+ return;
+ case 0x0048: /* clrs */
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
- return;
- case 0x0008: /* clrt */
+ return;
+ case 0x0008: /* clrt */
tcg_gen_movi_i32(cpu_sr_t, 0);
- return;
- case 0x0038: /* ldtlb */
- CHECK_PRIVILEGED
+ return;
+ case 0x0038: /* ldtlb */
+ CHECK_PRIVILEGED
gen_helper_ldtlb(tcg_env);
- return;
- case 0x002b: /* rte */
- CHECK_PRIVILEGED
- CHECK_NOT_DELAY_SLOT
+ return;
+ case 0x002b: /* rte */
+ CHECK_PRIVILEGED
+ CHECK_NOT_DELAY_SLOT
gen_write_sr(cpu_ssr);
- tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
- ctx->delayed_pc = (uint32_t) - 1;
+ ctx->delayed_pc = (uint32_t) - 1;
ctx->base.is_jmp = DISAS_STOP;
- return;
- case 0x0058: /* sets */
+ return;
+ case 0x0058: /* sets */
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
- return;
- case 0x0018: /* sett */
+ return;
+ case 0x0018: /* sett */
tcg_gen_movi_i32(cpu_sr_t, 1);
- return;
- case 0xfbfd: /* frchg */
+ return;
+ case 0xfbfd: /* frchg */
CHECK_FPSCR_PR_0
- tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
ctx->base.is_jmp = DISAS_STOP;
- return;
- case 0xf3fd: /* fschg */
+ return;
+ case 0xf3fd: /* fschg */
CHECK_FPSCR_PR_0
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
ctx->base.is_jmp = DISAS_STOP;
- return;
- case 0xf7fd: /* fpchg */
+ return;
+ case 0xf7fd: /* fpchg */
CHECK_SH4A
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
ctx->base.is_jmp = DISAS_STOP;
return;
- case 0x0009: /* nop */
- return;
- case 0x001b: /* sleep */
- CHECK_PRIVILEGED
+ case 0x0009: /* nop */
+ return;
+ case 0x001b: /* sleep */
+ CHECK_PRIVILEGED
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
gen_helper_sleep(tcg_env);
- return;
+ return;
}
switch (ctx->opcode & 0xf000) {
- case 0x1000: /* mov.l Rm,@(disp,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
MO_TEUL | UNALIGN(ctx));
- }
- return;
- case 0x5000: /* mov.l @(disp,Rm),Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
+ }
+ return;
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
MO_TESL | UNALIGN(ctx));
- }
- return;
- case 0xe000: /* mov #imm,Rn */
+ }
+ return;
+ case 0xe000: /* mov #imm,Rn */
#ifdef CONFIG_USER_ONLY
/*
* Detect the start of a gUSA region (mov #-n, r15).
@@ -521,178 +521,178 @@ static void _decode_opc(DisasContext * ctx)
ctx->base.is_jmp = DISAS_STOP;
}
#endif
- tcg_gen_movi_i32(REG(B11_8), B7_0s);
- return;
- case 0x9000: /* mov.w @(disp,PC),Rn */
- {
+ tcg_gen_movi_i32(REG(B11_8), B7_0s);
+ return;
+ case 0x9000: /* mov.w @(disp,PC),Rn */
+ {
TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
MO_TESW | MO_ALIGN);
- }
- return;
- case 0xd000: /* mov.l @(disp,PC),Rn */
- {
+ }
+ return;
+ case 0xd000: /* mov.l @(disp,PC),Rn */
+ {
TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
MO_TESL | MO_ALIGN);
- }
- return;
- case 0x7000: /* add #imm,Rn */
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
- return;
- case 0xa000: /* bra disp */
- CHECK_NOT_DELAY_SLOT
+ }
+ return;
+ case 0x7000: /* add #imm,Rn */
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
+ return;
+ case 0xa000: /* bra disp */
+ CHECK_NOT_DELAY_SLOT
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- return;
- case 0xb000: /* bsr disp */
- CHECK_NOT_DELAY_SLOT
+ return;
+ case 0xb000: /* bsr disp */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- return;
+ return;
}
switch (ctx->opcode & 0xf00f) {
- case 0x6003: /* mov Rm,Rn */
- tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x2000: /* mov.b Rm,@Rn */
+ case 0x6003: /* mov Rm,Rn */
+ tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x2000: /* mov.b Rm,@Rn */
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
- return;
- case 0x2001: /* mov.w Rm,@Rn */
+ return;
+ case 0x2001: /* mov.w Rm,@Rn */
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
MO_TEUW | UNALIGN(ctx));
- return;
- case 0x2002: /* mov.l Rm,@Rn */
+ return;
+ case 0x2002: /* mov.l Rm,@Rn */
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
MO_TEUL | UNALIGN(ctx));
- return;
- case 0x6000: /* mov.b @Rm,Rn */
+ return;
+ case 0x6000: /* mov.b @Rm,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
- return;
- case 0x6001: /* mov.w @Rm,Rn */
+ return;
+ case 0x6001: /* mov.w @Rm,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
MO_TESW | UNALIGN(ctx));
- return;
- case 0x6002: /* mov.l @Rm,Rn */
+ return;
+ case 0x6002: /* mov.l @Rm,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
MO_TESL | UNALIGN(ctx));
- return;
- case 0x2004: /* mov.b Rm,@-Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 1);
+ return;
+ case 0x2004: /* mov.b Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 1);
/* might cause re-execution */
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
- tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
- }
- return;
- case 0x2005: /* mov.w Rm,@-Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 2);
+ tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
+ }
+ return;
+ case 0x2005: /* mov.w Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 2);
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
MO_TEUW | UNALIGN(ctx));
- tcg_gen_mov_i32(REG(B11_8), addr);
- }
- return;
- case 0x2006: /* mov.l Rm,@-Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
+ case 0x2006: /* mov.l Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
MO_TEUL | UNALIGN(ctx));
- tcg_gen_mov_i32(REG(B11_8), addr);
- }
- return;
- case 0x6004: /* mov.b @Rm+,Rn */
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
+ case 0x6004: /* mov.b @Rm+,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
- if ( B11_8 != B7_4 )
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
- return;
- case 0x6005: /* mov.w @Rm+,Rn */
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
+ return;
+ case 0x6005: /* mov.w @Rm+,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
MO_TESW | UNALIGN(ctx));
- if ( B11_8 != B7_4 )
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
- return;
- case 0x6006: /* mov.l @Rm+,Rn */
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ return;
+ case 0x6006: /* mov.l @Rm+,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
MO_TESL | UNALIGN(ctx));
- if ( B11_8 != B7_4 )
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
- return;
- case 0x0004: /* mov.b Rm,@(R0,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ return;
+ case 0x0004: /* mov.b Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
- }
- return;
- case 0x0005: /* mov.w Rm,@(R0,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ }
+ return;
+ case 0x0005: /* mov.w Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
MO_TEUW | UNALIGN(ctx));
- }
- return;
- case 0x0006: /* mov.l Rm,@(R0,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ }
+ return;
+ case 0x0006: /* mov.l Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
MO_TEUL | UNALIGN(ctx));
- }
- return;
- case 0x000c: /* mov.b @(R0,Rm),Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ }
+ return;
+ case 0x000c: /* mov.b @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
- }
- return;
- case 0x000d: /* mov.w @(R0,Rm),Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ }
+ return;
+ case 0x000d: /* mov.w @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
MO_TESW | UNALIGN(ctx));
- }
- return;
- case 0x000e: /* mov.l @(R0,Rm),Rn */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ }
+ return;
+ case 0x000e: /* mov.l @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
MO_TESL | UNALIGN(ctx));
- }
- return;
- case 0x6008: /* swap.b Rm,Rn */
- {
+ }
+ return;
+ case 0x6008: /* swap.b Rm,Rn */
+ {
TCGv low = tcg_temp_new();
tcg_gen_bswap16_i32(low, REG(B7_4), 0);
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
- }
- return;
- case 0x6009: /* swap.w Rm,Rn */
+ }
+ return;
+ case 0x6009: /* swap.w Rm,Rn */
tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
- return;
- case 0x200d: /* xtrct Rm,Rn */
- {
- TCGv high, low;
- high = tcg_temp_new();
- tcg_gen_shli_i32(high, REG(B7_4), 16);
- low = tcg_temp_new();
- tcg_gen_shri_i32(low, REG(B11_8), 16);
- tcg_gen_or_i32(REG(B11_8), high, low);
- }
- return;
- case 0x300c: /* add Rm,Rn */
- tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
- return;
- case 0x300e: /* addc Rm,Rn */
+ return;
+ case 0x200d: /* xtrct Rm,Rn */
+ {
+ TCGv high, low;
+ high = tcg_temp_new();
+ tcg_gen_shli_i32(high, REG(B7_4), 16);
+ low = tcg_temp_new();
+ tcg_gen_shri_i32(low, REG(B11_8), 16);
+ tcg_gen_or_i32(REG(B11_8), high, low);
+ }
+ return;
+ case 0x300c: /* add Rm,Rn */
+ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300e: /* addc Rm,Rn */
{
TCGv t0, t1;
t0 = tcg_constant_tl(0);
@@ -701,8 +701,8 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
REG(B11_8), t0, t1, cpu_sr_t);
}
- return;
- case 0x300f: /* addv Rm,Rn */
+ return;
+ case 0x300f: /* addv Rm,Rn */
{
TCGv t0, t1, t2;
t0 = tcg_temp_new();
@@ -715,42 +715,42 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
tcg_gen_mov_i32(REG(B7_4), t0);
}
- return;
- case 0x2009: /* and Rm,Rn */
- tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
- return;
- case 0x3000: /* cmp/eq Rm,Rn */
+ return;
+ case 0x2009: /* and Rm,Rn */
+ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x3000: /* cmp/eq Rm,Rn */
tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
- return;
- case 0x3003: /* cmp/ge Rm,Rn */
+ return;
+ case 0x3003: /* cmp/ge Rm,Rn */
tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
- return;
- case 0x3007: /* cmp/gt Rm,Rn */
+ return;
+ case 0x3007: /* cmp/gt Rm,Rn */
tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
- return;
- case 0x3006: /* cmp/hi Rm,Rn */
+ return;
+ case 0x3006: /* cmp/hi Rm,Rn */
tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
- return;
- case 0x3002: /* cmp/hs Rm,Rn */
+ return;
+ case 0x3002: /* cmp/hs Rm,Rn */
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
- return;
- case 0x200c: /* cmp/str Rm,Rn */
- {
- TCGv cmp1 = tcg_temp_new();
- TCGv cmp2 = tcg_temp_new();
+ return;
+ case 0x200c: /* cmp/str Rm,Rn */
+ {
+ TCGv cmp1 = tcg_temp_new();
+ TCGv cmp2 = tcg_temp_new();
tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
tcg_gen_andc_i32(cmp1, cmp1, cmp2);
tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
- }
- return;
- case 0x2007: /* div0s Rm,Rn */
+ }
+ return;
+ case 0x2007: /* div0s Rm,Rn */
tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
- return;
- case 0x3004: /* div1 Rm,Rn */
+ return;
+ case 0x3004: /* div1 Rm,Rn */
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
@@ -779,80 +779,80 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_xori_i32(cpu_sr_t, t1, 1);
tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
}
- return;
- case 0x300d: /* dmuls.l Rm,Rn */
+ return;
+ case 0x300d: /* dmuls.l Rm,Rn */
tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
- return;
- case 0x3005: /* dmulu.l Rm,Rn */
+ return;
+ case 0x3005: /* dmulu.l Rm,Rn */
tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
- return;
- case 0x600e: /* exts.b Rm,Rn */
- tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x600f: /* exts.w Rm,Rn */
- tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x600c: /* extu.b Rm,Rn */
- tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x600d: /* extu.w Rm,Rn */
- tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x000f: /* mac.l @Rm+,@Rn+ */
- {
- TCGv arg0, arg1;
- arg0 = tcg_temp_new();
+ return;
+ case 0x600e: /* exts.b Rm,Rn */
+ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600f: /* exts.w Rm,Rn */
+ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600c: /* extu.b Rm,Rn */
+ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600d: /* extu.w Rm,Rn */
+ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x000f: /* mac.l @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
MO_TESL | MO_ALIGN);
- arg1 = tcg_temp_new();
+ arg1 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
gen_helper_macl(tcg_env, arg0, arg1);
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
- }
- return;
- case 0x400f: /* mac.w @Rm+,@Rn+ */
- {
- TCGv arg0, arg1;
- arg0 = tcg_temp_new();
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ }
+ return;
+ case 0x400f: /* mac.w @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
MO_TESL | MO_ALIGN);
- arg1 = tcg_temp_new();
+ arg1 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
gen_helper_macw(tcg_env, arg0, arg1);
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
- }
- return;
- case 0x0007: /* mul.l Rm,Rn */
- tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
- return;
- case 0x200f: /* muls.w Rm,Rn */
- {
- TCGv arg0, arg1;
- arg0 = tcg_temp_new();
- tcg_gen_ext16s_i32(arg0, REG(B7_4));
- arg1 = tcg_temp_new();
- tcg_gen_ext16s_i32(arg1, REG(B11_8));
- tcg_gen_mul_i32(cpu_macl, arg0, arg1);
- }
- return;
- case 0x200e: /* mulu.w Rm,Rn */
- {
- TCGv arg0, arg1;
- arg0 = tcg_temp_new();
- tcg_gen_ext16u_i32(arg0, REG(B7_4));
- arg1 = tcg_temp_new();
- tcg_gen_ext16u_i32(arg1, REG(B11_8));
- tcg_gen_mul_i32(cpu_macl, arg0, arg1);
- }
- return;
- case 0x600b: /* neg Rm,Rn */
- tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x600a: /* negc Rm,Rn */
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ }
+ return;
+ case 0x0007: /* mul.l Rm,Rn */
+ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
+ return;
+ case 0x200f: /* muls.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ }
+ return;
+ case 0x200e: /* mulu.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ }
+ return;
+ case 0x600b: /* neg Rm,Rn */
+ tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600a: /* negc Rm,Rn */
{
TCGv t0 = tcg_constant_i32(0);
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
@@ -861,15 +861,15 @@ static void _decode_opc(DisasContext * ctx)
t0, t0, REG(B11_8), cpu_sr_t);
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
}
- return;
- case 0x6007: /* not Rm,Rn */
- tcg_gen_not_i32(REG(B11_8), REG(B7_4));
- return;
- case 0x200b: /* or Rm,Rn */
- tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
- return;
- case 0x400c: /* shad Rm,Rn */
- {
+ return;
+ case 0x6007: /* not Rm,Rn */
+ tcg_gen_not_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x200b: /* or Rm,Rn */
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x400c: /* shad Rm,Rn */
+ {
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
@@ -888,10 +888,10 @@ static void _decode_opc(DisasContext * ctx)
/* select between the two cases */
tcg_gen_movi_i32(t0, 0);
tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
- }
- return;
- case 0x400d: /* shld Rm,Rn */
- {
+ }
+ return;
+ case 0x400d: /* shld Rm,Rn */
+ {
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
@@ -910,12 +910,12 @@ static void _decode_opc(DisasContext * ctx)
/* select between the two cases */
tcg_gen_movi_i32(t0, 0);
tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
- }
- return;
- case 0x3008: /* sub Rm,Rn */
- tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
- return;
- case 0x300a: /* subc Rm,Rn */
+ }
+ return;
+ case 0x3008: /* sub Rm,Rn */
+ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300a: /* subc Rm,Rn */
{
TCGv t0, t1;
t0 = tcg_constant_tl(0);
@@ -925,8 +925,8 @@ static void _decode_opc(DisasContext * ctx)
REG(B11_8), t0, t1, cpu_sr_t);
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
}
- return;
- case 0x300b: /* subv Rm,Rn */
+ return;
+ case 0x300b: /* subv Rm,Rn */
{
TCGv t0, t1, t2;
t0 = tcg_temp_new();
@@ -939,68 +939,68 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_shri_i32(cpu_sr_t, t1, 31);
tcg_gen_mov_i32(REG(B11_8), t0);
}
- return;
- case 0x2008: /* tst Rm,Rn */
- {
- TCGv val = tcg_temp_new();
- tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
+ return;
+ case 0x2008: /* tst Rm,Rn */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
- }
- return;
- case 0x200a: /* xor Rm,Rn */
- tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
- return;
+ }
+ return;
+ case 0x200a: /* xor Rm,Rn */
+ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
int xsrc = XHACK(B7_4);
int xdst = XHACK(B11_8);
tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
- } else {
+ } else {
tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
- }
- return;
+ }
+ return;
case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
MO_TEUQ | MO_ALIGN);
- } else {
+ } else {
tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
MO_TEUL | MO_ALIGN);
- }
- return;
+ }
+ return;
case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
- } else {
+ } else {
tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
MO_TEUL | MO_ALIGN);
- }
- return;
+ }
+ return;
case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
- } else {
+ } else {
tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
MO_TEUL | MO_ALIGN);
- tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
- }
- return;
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ }
+ return;
case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
{
TCGv addr = tcg_temp_new_i32();
if (ctx->tbflags & FPSCR_SZ) {
@@ -1016,108 +1016,108 @@ static void _decode_opc(DisasContext * ctx)
}
tcg_gen_mov_i32(REG(B11_8), addr);
}
- return;
+ return;
case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
- CHECK_FPU_ENABLED
- {
- TCGv addr = tcg_temp_new_i32();
- tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
- } else {
+ } else {
tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
MO_TEUL | MO_ALIGN);
- }
- }
- return;
+ }
+ }
+ return;
case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
- CHECK_FPU_ENABLED
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
MO_TEUQ | MO_ALIGN);
- } else {
+ } else {
tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
MO_TEUL | MO_ALIGN);
- }
- }
- return;
+ }
+ }
+ return;
case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
- {
- CHECK_FPU_ENABLED
+ {
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_PR) {
TCGv_i64 fp0, fp1;
if (ctx->opcode & 0x0110) {
goto do_illegal;
}
- fp0 = tcg_temp_new_i64();
- fp1 = tcg_temp_new_i64();
+ fp0 = tcg_temp_new_i64();
+ fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, B11_8);
gen_load_fpr64(ctx, fp1, B7_4);
switch (ctx->opcode & 0xf00f) {
- case 0xf000: /* fadd Rm,Rn */
+ case 0xf000: /* fadd Rm,Rn */
gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
break;
- case 0xf001: /* fsub Rm,Rn */
+ case 0xf001: /* fsub Rm,Rn */
gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
break;
- case 0xf002: /* fmul Rm,Rn */
+ case 0xf002: /* fmul Rm,Rn */
gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
break;
- case 0xf003: /* fdiv Rm,Rn */
+ case 0xf003: /* fdiv Rm,Rn */
gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
break;
- case 0xf004: /* fcmp/eq Rm,Rn */
+ case 0xf004: /* fcmp/eq Rm,Rn */
gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
return;
- case 0xf005: /* fcmp/gt Rm,Rn */
+ case 0xf005: /* fcmp/gt Rm,Rn */
gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
return;
}
gen_store_fpr64(ctx, fp0, B11_8);
- } else {
+ } else {
switch (ctx->opcode & 0xf00f) {
- case 0xf000: /* fadd Rm,Rn */
+ case 0xf000: /* fadd Rm,Rn */
gen_helper_fadd_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
- case 0xf001: /* fsub Rm,Rn */
+ case 0xf001: /* fsub Rm,Rn */
gen_helper_fsub_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
- case 0xf002: /* fmul Rm,Rn */
+ case 0xf002: /* fmul Rm,Rn */
gen_helper_fmul_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
- case 0xf003: /* fdiv Rm,Rn */
+ case 0xf003: /* fdiv Rm,Rn */
gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
- case 0xf004: /* fcmp/eq Rm,Rn */
+ case 0xf004: /* fcmp/eq Rm,Rn */
gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
FREG(B11_8), FREG(B7_4));
return;
- case 0xf005: /* fcmp/gt Rm,Rn */
+ case 0xf005: /* fcmp/gt Rm,Rn */
gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
FREG(B11_8), FREG(B7_4));
return;
}
- }
- }
- return;
+ }
+ }
+ return;
case 0xf00e: /* fmac FR0,RM,Rn */
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
@@ -1127,348 +1127,348 @@ static void _decode_opc(DisasContext * ctx)
}
switch (ctx->opcode & 0xff00) {
- case 0xc900: /* and #imm,R0 */
- tcg_gen_andi_i32(REG(0), REG(0), B7_0);
- return;
- case 0xcd00: /* and.b #imm,@(R0,GBR) */
- {
- TCGv addr, val;
- addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(0), cpu_gbr);
- val = tcg_temp_new();
+ case 0xc900: /* and #imm,R0 */
+ tcg_gen_andi_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcd00: /* and.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
- tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_andi_i32(val, val, B7_0);
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
- }
- return;
- case 0x8b00: /* bf label */
- CHECK_NOT_DELAY_SLOT
+ }
+ return;
+ case 0x8b00: /* bf label */
+ CHECK_NOT_DELAY_SLOT
gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
- return;
- case 0x8f00: /* bf/s label */
- CHECK_NOT_DELAY_SLOT
+ return;
+ case 0x8f00: /* bf/s label */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
- return;
- case 0x8900: /* bt label */
- CHECK_NOT_DELAY_SLOT
+ return;
+ case 0x8900: /* bt label */
+ CHECK_NOT_DELAY_SLOT
gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
- return;
- case 0x8d00: /* bt/s label */
- CHECK_NOT_DELAY_SLOT
+ return;
+ case 0x8d00: /* bt/s label */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
- return;
- case 0x8800: /* cmp/eq #imm,R0 */
+ return;
+ case 0x8800: /* cmp/eq #imm,R0 */
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
- return;
- case 0xc400: /* mov.b @(disp,GBR),R0 */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ return;
+ case 0xc400: /* mov.b @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
- }
- return;
- case 0xc500: /* mov.w @(disp,GBR),R0 */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ }
+ return;
+ case 0xc500: /* mov.w @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
- }
- return;
- case 0xc600: /* mov.l @(disp,GBR),R0 */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ }
+ return;
+ case 0xc600: /* mov.l @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
- }
- return;
- case 0xc000: /* mov.b R0,@(disp,GBR) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ }
+ return;
+ case 0xc000: /* mov.b R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
- }
- return;
- case 0xc100: /* mov.w R0,@(disp,GBR) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ }
+ return;
+ case 0xc100: /* mov.w R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
- }
- return;
- case 0xc200: /* mov.l R0,@(disp,GBR) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ }
+ return;
+ case 0xc200: /* mov.l R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
- }
- return;
- case 0x8000: /* mov.b R0,@(disp,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ }
+ return;
+ case 0x8000: /* mov.b R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
- }
- return;
- case 0x8100: /* mov.w R0,@(disp,Rn) */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ }
+ return;
+ case 0x8100: /* mov.w R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
MO_TEUW | UNALIGN(ctx));
- }
- return;
- case 0x8400: /* mov.b @(disp,Rn),R0 */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ }
+ return;
+ case 0x8400: /* mov.b @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
- }
- return;
- case 0x8500: /* mov.w @(disp,Rn),R0 */
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ }
+ return;
+ case 0x8500: /* mov.w @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
MO_TESW | UNALIGN(ctx));
- }
- return;
- case 0xc700: /* mova @(disp,PC),R0 */
+ }
+ return;
+ case 0xc700: /* mova @(disp,PC),R0 */
tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
4 + B7_0 * 4) & ~3);
- return;
- case 0xcb00: /* or #imm,R0 */
- tcg_gen_ori_i32(REG(0), REG(0), B7_0);
- return;
- case 0xcf00: /* or.b #imm,@(R0,GBR) */
- {
- TCGv addr, val;
- addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(0), cpu_gbr);
- val = tcg_temp_new();
+ return;
+ case 0xcb00: /* or #imm,R0 */
+ tcg_gen_ori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcf00: /* or.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
- tcg_gen_ori_i32(val, val, B7_0);
+ tcg_gen_ori_i32(val, val, B7_0);
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
- }
- return;
- case 0xc300: /* trapa #imm */
- {
- TCGv imm;
- CHECK_NOT_DELAY_SLOT
+ }
+ return;
+ case 0xc300: /* trapa #imm */
+ {
+ TCGv imm;
+ CHECK_NOT_DELAY_SLOT
gen_save_cpu_state(ctx, true);
- imm = tcg_constant_i32(B7_0);
+ imm = tcg_constant_i32(B7_0);
gen_helper_trapa(tcg_env, imm);
ctx->base.is_jmp = DISAS_NORETURN;
- }
- return;
- case 0xc800: /* tst #imm,R0 */
- {
- TCGv val = tcg_temp_new();
- tcg_gen_andi_i32(val, REG(0), B7_0);
+ }
+ return;
+ case 0xc800: /* tst #imm,R0 */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_andi_i32(val, REG(0), B7_0);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
- }
- return;
- case 0xcc00: /* tst.b #imm,@(R0,GBR) */
- {
- TCGv val = tcg_temp_new();
- tcg_gen_add_i32(val, REG(0), cpu_gbr);
+ }
+ return;
+ case 0xcc00: /* tst.b #imm,@(R0,GBR) */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_add_i32(val, REG(0), cpu_gbr);
tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
- tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_andi_i32(val, val, B7_0);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
- }
- return;
- case 0xca00: /* xor #imm,R0 */
- tcg_gen_xori_i32(REG(0), REG(0), B7_0);
- return;
- case 0xce00: /* xor.b #imm,@(R0,GBR) */
- {
- TCGv addr, val;
- addr = tcg_temp_new();
- tcg_gen_add_i32(addr, REG(0), cpu_gbr);
- val = tcg_temp_new();
+ }
+ return;
+ case 0xca00: /* xor #imm,R0 */
+ tcg_gen_xori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xce00: /* xor.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
- tcg_gen_xori_i32(val, val, B7_0);
+ tcg_gen_xori_i32(val, val, B7_0);
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
- }
- return;
+ }
+ return;
}
switch (ctx->opcode & 0xf08f) {
- case 0x408e: /* ldc Rm,Rn_BANK */
- CHECK_PRIVILEGED
- tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
- return;
- case 0x4087: /* ldc.l @Rm+,Rn_BANK */
- CHECK_PRIVILEGED
+ case 0x408e: /* ldc Rm,Rn_BANK */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
+ return;
+ case 0x4087: /* ldc.l @Rm+,Rn_BANK */
+ CHECK_PRIVILEGED
tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
- return;
- case 0x0082: /* stc Rm_BANK,Rn */
- CHECK_PRIVILEGED
- tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
- return;
- case 0x4083: /* stc.l Rm_BANK,@-Rn */
- CHECK_PRIVILEGED
- {
- TCGv addr = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ return;
+ case 0x0082: /* stc Rm_BANK,Rn */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
+ return;
+ case 0x4083: /* stc.l Rm_BANK,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
MO_TEUL | MO_ALIGN);
- tcg_gen_mov_i32(REG(B11_8), addr);
- }
- return;
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
}
switch (ctx->opcode & 0xf0ff) {
- case 0x0023: /* braf Rn */
- CHECK_NOT_DELAY_SLOT
+ case 0x0023: /* braf Rn */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- ctx->delayed_pc = (uint32_t) - 1;
- return;
- case 0x0003: /* bsrf Rn */
- CHECK_NOT_DELAY_SLOT
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0003: /* bsrf Rn */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
- tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
+ tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- ctx->delayed_pc = (uint32_t) - 1;
- return;
- case 0x4015: /* cmp/pl Rn */
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x4015: /* cmp/pl Rn */
tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
- return;
- case 0x4011: /* cmp/pz Rn */
+ return;
+ case 0x4011: /* cmp/pz Rn */
tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
- return;
- case 0x4010: /* dt Rn */
- tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4010: /* dt Rn */
+ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
- return;
- case 0x402b: /* jmp @Rn */
- CHECK_NOT_DELAY_SLOT
- tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ return;
+ case 0x402b: /* jmp @Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- ctx->delayed_pc = (uint32_t) - 1;
- return;
- case 0x400b: /* jsr @Rn */
- CHECK_NOT_DELAY_SLOT
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400b: /* jsr @Rn */
+ CHECK_NOT_DELAY_SLOT
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
- tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ctx->envflags |= TB_FLAG_DELAY_SLOT;
- ctx->delayed_pc = (uint32_t) - 1;
- return;
- case 0x400e: /* ldc Rm,SR */
- CHECK_PRIVILEGED
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400e: /* ldc Rm,SR */
+ CHECK_PRIVILEGED
{
TCGv val = tcg_temp_new();
tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
gen_write_sr(val);
ctx->base.is_jmp = DISAS_STOP;
}
- return;
- case 0x4007: /* ldc.l @Rm+,SR */
- CHECK_PRIVILEGED
- {
- TCGv val = tcg_temp_new();
+ return;
+ case 0x4007: /* ldc.l @Rm+,SR */
+ CHECK_PRIVILEGED
+ {
+ TCGv val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
tcg_gen_andi_i32(val, val, 0x700083f3);
gen_write_sr(val);
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
ctx->base.is_jmp = DISAS_STOP;
- }
- return;
- case 0x0002: /* stc SR,Rn */
- CHECK_PRIVILEGED
+ }
+ return;
+ case 0x0002: /* stc SR,Rn */
+ CHECK_PRIVILEGED
gen_read_sr(REG(B11_8));
- return;
- case 0x4003: /* stc SR,@-Rn */
- CHECK_PRIVILEGED
- {
- TCGv addr = tcg_temp_new();
+ return;
+ case 0x4003: /* stc SR,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
TCGv val = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
gen_read_sr(val);
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
- tcg_gen_mov_i32(REG(B11_8), addr);
- }
- return;
-#define LD(reg,ldnum,ldpnum,prechk) \
- case ldnum: \
- prechk \
- tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
- return; \
- case ldpnum: \
- prechk \
- tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
- MO_TESL | MO_ALIGN); \
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
+#define LD(reg,ldnum,ldpnum,prechk) \
+ case ldnum: \
+ prechk \
+ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
+ return; \
+ case ldpnum: \
+ prechk \
+ tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
+ MO_TESL | MO_ALIGN); \
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
return;
-#define ST(reg,stnum,stpnum,prechk) \
- case stnum: \
- prechk \
- tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
- return; \
- case stpnum: \
- prechk \
- { \
- TCGv addr = tcg_temp_new(); \
- tcg_gen_subi_i32(addr, REG(B11_8), 4); \
- tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
- MO_TEUL | MO_ALIGN); \
- tcg_gen_mov_i32(REG(B11_8), addr); \
- } \
+#define ST(reg,stnum,stpnum,prechk) \
+ case stnum: \
+ prechk \
+ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
+ return; \
+ case stpnum: \
+ prechk \
+ { \
+ TCGv addr = tcg_temp_new(); \
+ tcg_gen_subi_i32(addr, REG(B11_8), 4); \
+ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
+ MO_TEUL | MO_ALIGN); \
+ tcg_gen_mov_i32(REG(B11_8), addr); \
+ } \
return;
-#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
- LD(reg,ldnum,ldpnum,prechk) \
- ST(reg,stnum,stpnum,prechk)
- LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
- LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
- LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
- LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
- ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
+#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
+ LD(reg,ldnum,ldpnum,prechk) \
+ ST(reg,stnum,stpnum,prechk)
+ LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
+ LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
+ LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
+ LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
+ ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
- LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
- LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
- LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
- LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
- LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
- case 0x406a: /* lds Rm,FPSCR */
- CHECK_FPU_ENABLED
+ LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
+ LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
+ LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
+ LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
+ LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
+ case 0x406a: /* lds Rm,FPSCR */
+ CHECK_FPU_ENABLED
gen_helper_ld_fpscr(tcg_env, REG(B11_8));
ctx->base.is_jmp = DISAS_STOP;
- return;
- case 0x4066: /* lds.l @Rm+,FPSCR */
- CHECK_FPU_ENABLED
- {
- TCGv addr = tcg_temp_new();
+ return;
+ case 0x4066: /* lds.l @Rm+,FPSCR */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
- tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
gen_helper_ld_fpscr(tcg_env, addr);
ctx->base.is_jmp = DISAS_STOP;
- }
- return;
- case 0x006a: /* sts FPSCR,Rn */
- CHECK_FPU_ENABLED
- tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
- return;
- case 0x4062: /* sts FPSCR,@-Rn */
- CHECK_FPU_ENABLED
- {
- TCGv addr, val;
- val = tcg_temp_new();
- tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
- addr = tcg_temp_new();
- tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ }
+ return;
+ case 0x006a: /* sts FPSCR,Rn */
+ CHECK_FPU_ENABLED
+ tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
+ return;
+ case 0x4062: /* sts FPSCR,@-Rn */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr, val;
+ val = tcg_temp_new();
+ tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
+ addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
- tcg_gen_mov_i32(REG(B11_8), addr);
- }
- return;
- case 0x00c3: /* movca.l R0,@Rm */
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
+ case 0x00c3: /* movca.l R0,@Rm */
{
TCGv val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
@@ -1478,23 +1478,23 @@ static void _decode_opc(DisasContext * ctx)
MO_TEUL | MO_ALIGN);
}
ctx->has_movcal = 1;
- return;
- case 0x40a9: /* movua.l @Rm,R0 */
+ return;
+ case 0x40a9: /* movua.l @Rm,R0 */
CHECK_SH4A
/* Load non-boundary-aligned data */
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
MO_TEUL | MO_UNALN);
return;
- case 0x40e9: /* movua.l @Rm+,R0 */
+ case 0x40e9: /* movua.l @Rm+,R0 */
CHECK_SH4A
/* Load non-boundary-aligned data */
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
MO_TEUL | MO_UNALN);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
return;
- case 0x0029: /* movt Rn */
+ case 0x0029: /* movt Rn */
tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
- return;
+ return;
case 0x0073:
/* MOVCO.L
* LDST -> T
@@ -1558,182 +1558,182 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_movi_i32(cpu_lock_addr, 0);
}
return;
- case 0x0093: /* ocbi @Rn */
- {
+ case 0x0093: /* ocbi @Rn */
+ {
gen_helper_ocbi(tcg_env, REG(B11_8));
- }
- return;
- case 0x00a3: /* ocbp @Rn */
- case 0x00b3: /* ocbwb @Rn */
+ }
+ return;
+ case 0x00a3: /* ocbp @Rn */
+ case 0x00b3: /* ocbwb @Rn */
/* These instructions are supposed to do nothing in case of
a cache miss. Given that we only partially emulate caches
it is safe to simply ignore them. */
- return;
- case 0x0083: /* pref @Rn */
- return;
- case 0x00d3: /* prefi @Rn */
+ return;
+ case 0x0083: /* pref @Rn */
+ return;
+ case 0x00d3: /* prefi @Rn */
CHECK_SH4A
return;
- case 0x00e3: /* icbi @Rn */
+ case 0x00e3: /* icbi @Rn */
CHECK_SH4A
return;
- case 0x00ab: /* synco */
+ case 0x00ab: /* synco */
CHECK_SH4A
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
return;
- case 0x4024: /* rotcl Rn */
- {
- TCGv tmp = tcg_temp_new();
+ case 0x4024: /* rotcl Rn */
+ {
+ TCGv tmp = tcg_temp_new();
tcg_gen_mov_i32(tmp, cpu_sr_t);
tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
- tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
- }
- return;
- case 0x4025: /* rotcr Rn */
- {
- TCGv tmp = tcg_temp_new();
+ }
+ return;
+ case 0x4025: /* rotcr Rn */
+ {
+ TCGv tmp = tcg_temp_new();
tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
- tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
- }
- return;
- case 0x4004: /* rotl Rn */
- tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
+ }
+ return;
+ case 0x4004: /* rotl Rn */
+ tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
- return;
- case 0x4005: /* rotr Rn */
+ return;
+ case 0x4005: /* rotr Rn */
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
- tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
- return;
- case 0x4000: /* shll Rn */
- case 0x4020: /* shal Rn */
+ tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4000: /* shll Rn */
+ case 0x4020: /* shal Rn */
tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
- tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
- return;
- case 0x4021: /* shar Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4021: /* shar Rn */
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
- tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
- return;
- case 0x4001: /* shlr Rn */
+ tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4001: /* shlr Rn */
tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
- tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
- return;
- case 0x4008: /* shll2 Rn */
- tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
- return;
- case 0x4018: /* shll8 Rn */
- tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
- return;
- case 0x4028: /* shll16 Rn */
- tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
- return;
- case 0x4009: /* shlr2 Rn */
- tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
- return;
- case 0x4019: /* shlr8 Rn */
- tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
- return;
- case 0x4029: /* shlr16 Rn */
- tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
- return;
- case 0x401b: /* tas.b @Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4008: /* shll2 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4018: /* shll8 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4028: /* shll16 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x4009: /* shlr2 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4019: /* shlr8 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4029: /* shlr16 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x401b: /* tas.b @Rn */
tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
tcg_constant_i32(0x80), ctx->memidx, MO_UB);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
return;
case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
- return;
+ return;
case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
- return;
+ return;
case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_PR) {
- TCGv_i64 fp;
+ TCGv_i64 fp;
if (ctx->opcode & 0x0100) {
goto do_illegal;
}
- fp = tcg_temp_new_i64();
+ fp = tcg_temp_new_i64();
gen_helper_float_DT(fp, tcg_env, cpu_fpul);
gen_store_fpr64(ctx, fp, B11_8);
- }
- else {
+ }
+ else {
gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
- }
- return;
+ }
+ return;
case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_PR) {
- TCGv_i64 fp;
+ TCGv_i64 fp;
if (ctx->opcode & 0x0100) {
goto do_illegal;
}
- fp = tcg_temp_new_i64();
+ fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
- }
- else {
+ }
+ else {
gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
- }
- return;
+ }
+ return;
case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
- return;
+ return;
case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
- return;
+ return;
case 0xf06d: /* fsqrt FRn */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_PR) {
if (ctx->opcode & 0x0100) {
goto do_illegal;
}
- TCGv_i64 fp = tcg_temp_new_i64();
+ TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
gen_helper_fsqrt_DT(fp, tcg_env, fp);
gen_store_fpr64(ctx, fp, B11_8);
- } else {
+ } else {
gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
- }
- return;
+ }
+ return;
case 0xf07d: /* fsrra FRn */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
- break;
+ break;
case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
tcg_gen_movi_i32(FREG(B11_8), 0);
return;
case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
- CHECK_FPU_ENABLED
+ CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
return;
case 0xf0ad: /* fcnvsd FPUL,DRn */
- CHECK_FPU_ENABLED
- {
- TCGv_i64 fp = tcg_temp_new_i64();
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
gen_store_fpr64(ctx, fp, B11_8);
- }
- return;
+ }
+ return;
case 0xf0bd: /* fcnvds DRn,FPUL */
- CHECK_FPU_ENABLED
- {
- TCGv_i64 fp = tcg_temp_new_i64();
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
- }
- return;
+ }
+ return;
case 0xf0ed: /* fipr FVm,FVn */
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_1
@@ -1808,10 +1808,10 @@ static void decode_opc(DisasContext * ctx)
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
- gen_delayed_conditional_jump(ctx);
+ gen_delayed_conditional_jump(ctx);
} else {
gen_jump(ctx);
- }
+ }
}
}