diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-07-30 19:56:06 -1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-10-13 09:18:11 -0700 |
commit | 452635318b78b98f0ef2586463334565674cb9e6 (patch) | |
tree | b2507a14d3c8c600da2a5b42ecd4e45ea8767acf /target/alpha | |
parent | ee26ce674a93c824713542cec3b6a9ca85459165 (diff) |
target/alpha: Reorg fp memory operations
Pass in the context to each mini-helper, instead of an
incorrectly named "flags". Separate gen_load_fp and
gen_store_fp, away from the integer helpers.
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/alpha')
-rw-r--r-- | target/alpha/translate.c | 83 |
1 files changed, 57 insertions, 26 deletions
diff --git a/target/alpha/translate.c b/target/alpha/translate.c index b034206688..bfdd485508 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -267,30 +267,47 @@ static inline DisasJumpType gen_invalid(DisasContext *ctx) return gen_excp(ctx, EXCP_OPCDEC, 0); } -static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) +static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_f(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); + gen_helper_memory_to_f(dest, tmp32); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) +static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv tmp = tcg_temp_new(); - tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); - gen_helper_memory_to_g(t0, tmp); + tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ); + gen_helper_memory_to_g(dest, tmp); tcg_temp_free(tmp); } -static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) +static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_s(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); + gen_helper_memory_to_s(dest, tmp32); tcg_temp_free_i32(tmp32); } +static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) +{ + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ); +} + +static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) +{ + /* Loads to $f31 are prefetches, which we can treat as nops. */ + if (likely(ra != 31)) { + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, cpu_fir[ra], addr); + tcg_temp_free(addr); + } +} + static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) { tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); @@ -338,30 +355,44 @@ static inline void gen_load_mem(DisasContext *ctx, tcg_temp_free(tmp); } -static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) +static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_f_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_f_to_memory(tmp32, addr); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) +static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) { TCGv tmp = tcg_temp_new(); - gen_helper_g_to_memory(tmp, t0); - tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); + gen_helper_g_to_memory(tmp, src); + tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ); tcg_temp_free(tmp); } -static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) +static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_s_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_s_to_memory(tmp32, src); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); tcg_temp_free_i32(tmp32); } +static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) +{ + tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ); +} + +static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) +{ + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, load_fpr(ctx, ra), addr); + tcg_temp_free(addr); +} + static inline void gen_store_mem(DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), @@ -2776,42 +2807,42 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x20: /* LDF */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldf); break; case 0x21: /* LDG */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldg); break; case 0x22: /* LDS */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_lds); break; case 0x23: /* LDT */ REQUIRE_FEN; - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldt); break; case 0x24: /* STF */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stf); break; case 0x25: /* STG */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stg); break; case 0x26: /* STS */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_sts); break; case 0x27: /* STT */ REQUIRE_FEN; - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stt); break; case 0x28: /* LDL */ |