diff options
author | Richard Henderson <rth@twiddle.net> | 2016-07-14 11:47:08 -0700 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-10-31 14:46:47 -0600 |
commit | f939ffe5a022a8798824e2720ed5a14186fca6b6 (patch) | |
tree | e0af32c3702ed416af9e1aebfb76803e987168eb /target-sparc | |
parent | 918d9a2c9d36378a3cf6636018900a4731c83b9d (diff) |
target-sparc: Implement ldqf and stqf inline
At the same time, fix a problem with stqf_asi, when
a write might access two pages.
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'target-sparc')
-rw-r--r-- | target-sparc/helper.h | 2 | ||||
-rw-r--r-- | target-sparc/ldst_helper.c | 97 | ||||
-rw-r--r-- | target-sparc/translate.c | 79 |
3 files changed, 67 insertions, 111 deletions
diff --git a/target-sparc/helper.h b/target-sparc/helper.h index b7bff84efa..0cf1bfb73a 100644 --- a/target-sparc/helper.h +++ b/target-sparc/helper.h @@ -38,8 +38,6 @@ DEF_HELPER_3(tsubcctv, tl, env, tl, tl) DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64) DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64) #endif -DEF_HELPER_FLAGS_3(ldqf, TCG_CALL_NO_WG, void, env, tl, int) -DEF_HELPER_FLAGS_3(stqf, TCG_CALL_NO_WG, void, env, tl, int) #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32) DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32) diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c index 725bb49e9d..de7d53ae20 100644 --- a/target-sparc/ldst_helper.c +++ b/target-sparc/ldst_helper.c @@ -254,18 +254,6 @@ static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, #endif -#if defined(TARGET_SPARC64) || defined(CONFIG_USER_ONLY) -static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) -{ -#ifdef TARGET_SPARC64 - if (AM_CHECK(env1)) { - addr &= 0xffffffffULL; - } -#endif - return addr; -} -#endif - #ifdef TARGET_SPARC64 /* returns true if access using this ASI is to have address translated by MMU otherwise access is to raw physical address */ @@ -290,14 +278,21 @@ static inline int is_translating_asi(int asi) } } +static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) +{ + if (AM_CHECK(env1)) { + addr &= 0xffffffffULL; + } + return addr; +} + static inline target_ulong asi_address_mask(CPUSPARCState *env, int asi, target_ulong addr) { if (is_translating_asi(asi)) { - return address_mask(env, addr); - } else { - return addr; + addr = address_mask(env, addr); } + return addr; } #endif @@ -1601,78 +1596,6 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, #endif /* CONFIG_USER_ONLY */ #endif /* TARGET_SPARC64 */ -void helper_ldqf(CPUSPARCState *env, target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit load */ - CPU_QuadU u; - - do_check_align(env, addr, 7, GETPC()); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.ll.upper = cpu_ldq_user(env, addr); - u.ll.lower = cpu_ldq_user(env, addr + 8); - QT0 = u.q; - break; - case MMU_KERNEL_IDX: - u.ll.upper = cpu_ldq_kernel(env, addr); - u.ll.lower = cpu_ldq_kernel(env, addr + 8); - QT0 = u.q; - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.ll.upper = cpu_ldq_hypv(env, addr); - u.ll.lower = cpu_ldq_hypv(env, addr + 8); - QT0 = u.q; - break; -#endif - default: - DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.ll.upper = cpu_ldq_data(env, address_mask(env, addr)); - u.ll.lower = cpu_ldq_data(env, address_mask(env, addr + 8)); - QT0 = u.q; -#endif -} - -void helper_stqf(CPUSPARCState *env, target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit store */ - CPU_QuadU u; - - do_check_align(env, addr, 7, GETPC()); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.q = QT0; - cpu_stq_user(env, addr, u.ll.upper); - cpu_stq_user(env, addr + 8, u.ll.lower); - break; - case MMU_KERNEL_IDX: - u.q = QT0; - cpu_stq_kernel(env, addr, u.ll.upper); - cpu_stq_kernel(env, addr + 8, u.ll.lower); - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.q = QT0; - cpu_stq_hypv(env, addr, u.ll.upper); - cpu_stq_hypv(env, addr + 8, u.ll.lower); - break; -#endif - default: - DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.q = QT0; - cpu_stq_data(env, address_mask(env, addr), u.ll.upper); - cpu_stq_data(env, address_mask(env, addr + 8), u.ll.lower); -#endif -} - #if !defined(CONFIG_USER_ONLY) #ifndef TARGET_SPARC64 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, diff --git a/target-sparc/translate.c b/target-sparc/translate.c index ecdc7e1d8a..a869d7d6dc 100644 --- a/target-sparc/translate.c +++ b/target-sparc/translate.c @@ -242,7 +242,29 @@ static void gen_op_store_QT0_fpr(unsigned int dst) offsetof(CPU_QuadU, ll.lower)); } +static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, + TCGv_i64 v1, TCGv_i64 v2) +{ + dst = QFPREG(dst); + + tcg_gen_mov_i64(cpu_fpr[dst / 2], v1); + tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2); + gen_update_fprs_dirty(dc, dst); +} + #ifdef TARGET_SPARC64 +static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) +{ + src = QFPREG(src); + return cpu_fpr[src / 2]; +} + +static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) +{ + src = QFPREG(src); + return cpu_fpr[src / 2 + 1]; +} + static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) { rd = QFPREG(rd); @@ -2557,10 +2579,17 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop); break; case 8: + /* ??? Only 4-byte alignment required. However, it is legal + for the cpu to signal the alignment fault, and the OS trap + handler is required to fix it up. */ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); break; case 16: - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); + /* Only 4-byte alignment required. See above. Requiring + 16-byte alignment here avoids having to probe the second + page before performing the first write. */ + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN_16); tcg_gen_addi_tl(addr, addr, 8); tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); break; @@ -5426,17 +5455,16 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32); break; case 0x22: /* ldqf, load quad fpreg */ - { - TCGv_i32 r_const; - - CHECK_FPU_FEATURE(dc, FLOAT128); - r_const = tcg_const_i32(dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_ldqf(cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(r_const); - gen_op_store_QT0_fpr(QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); - } + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_address_mask(dc, cpu_addr); + cpu_src1_64 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(cpu_src1_64, cpu_addr, dc->mem_idx); + tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); + cpu_src2_64 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(cpu_src2_64, cpu_addr, dc->mem_idx); + gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); + tcg_temp_free_i64(cpu_src1_64); + tcg_temp_free_i64(cpu_src2_64); break; case 0x23: /* lddf, load double fpreg */ gen_address_mask(dc, cpu_addr); @@ -5537,16 +5565,20 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) case 0x26: #ifdef TARGET_SPARC64 /* V9 stqf, store quad fpreg */ - { - TCGv_i32 r_const; - - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(QFPREG(rd)); - r_const = tcg_const_i32(dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_stqf(cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(r_const); - } + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_address_mask(dc, cpu_addr); + /* ??? While stqf only requires 4-byte alignment, it is + legal for the cpu to signal the unaligned exception. + The OS trap handler is then required to fix it up. + For qemu, this avoids having to probe the second page + before performing the first write. */ + cpu_src1_64 = gen_load_fpr_Q0(dc, rd); + tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, + dc->mem_idx, MO_TEQ | MO_ALIGN_16); + tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); + cpu_src2_64 = gen_load_fpr_Q1(dc, rd); + tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, + dc->mem_idx, MO_TEQ); break; #else /* !TARGET_SPARC64 */ /* stdfq, store floating point queue */ @@ -5562,6 +5594,9 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #endif #endif case 0x27: /* stdf, store double fpreg */ + /* ??? Only 4-byte alignment required. However, it is + legal for the cpu to signal the alignment fault, and + the OS trap handler is required to fix it up. */ gen_address_mask(dc, cpu_addr); cpu_src1_64 = gen_load_fpr_D(dc, rd); tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx); |