aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorNikunj A Dadhania <nikunj@linux.vnet.ibm.com>2016-09-12 12:11:34 +0530
committerDavid Gibson <david@gibson.dropbear.id.au>2016-09-23 10:29:40 +1000
commit2468f23dcb5b74ef882f3a3a5dd0a00bf0a71bb7 (patch)
treef1f4272a95f316b305535f9fef0a1908ca70a096 /target-ppc
parent761a89c6419132e612f7c9f773ea42872134bb88 (diff)
target-ppc: convert st64 to use new macro
Use macro for st64 as well, this changes the function signature from gen_qemu_st64 => gen_qemu_st64_i64. Replace this at all the call sites. Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/translate.c37
-rw-r--r--target-ppc/translate/fp-impl.inc.c42
-rw-r--r--target-ppc/translate/fp-ops.inc.c2
-rw-r--r--target-ppc/translate/spe-impl.inc.c2
-rw-r--r--target-ppc/translate/vmx-impl.inc.c12
-rw-r--r--target-ppc/translate/vsx-impl.inc.c6
6 files changed, 48 insertions, 53 deletions
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index f228ae0ff4..254ad4092e 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -2519,12 +2519,7 @@ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
}
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
-
-static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
#define GEN_LD(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -2769,9 +2764,9 @@ GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
@@ -2808,16 +2803,16 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- /* We only need to swap high and low halves. gen_qemu_st64 does
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
} else {
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
}
tcg_temp_free(EA);
} else {
@@ -2831,7 +2826,7 @@ static void gen_std(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
@@ -3113,7 +3108,7 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
#if defined(TARGET_PPC64)
if (size == 8) {
- gen_qemu_st64(ctx, cpu_gpr[reg], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[reg], EA);
} else
#endif
if (size == 4) {
@@ -3130,10 +3125,10 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
gpr1 = cpu_gpr[reg];
gpr2 = cpu_gpr[reg+1];
}
- gen_qemu_st64(ctx, gpr1, EA);
+ gen_qemu_st64_i64(ctx, gpr1, EA);
EA8 = tcg_temp_local_new();
gen_addr_add(ctx, EA8, EA, 8);
- gen_qemu_st64(ctx, gpr2, EA8);
+ gen_qemu_st64_i64(ctx, gpr2, EA8);
tcg_temp_free(EA8);
#endif
} else {
@@ -6622,10 +6617,10 @@ GEN_STS(stb, st8, 0x06, PPC_INTEGER)
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
diff --git a/target-ppc/translate/fp-impl.inc.c b/target-ppc/translate/fp-impl.inc.c
index 53b7fc7445..872af7b56f 100644
--- a/target-ppc/translate/fp-impl.inc.c
+++ b/target-ppc/translate/fp-impl.inc.c
@@ -848,7 +848,7 @@ static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
}
/* stfd stfdu stfdux stfdx */
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
@@ -863,16 +863,16 @@ static void gen_stfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -888,16 +888,16 @@ static void gen_stfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -990,9 +990,9 @@ static void gen_stfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@@ -1005,10 +1005,10 @@ static void gen_stfqu(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@@ -1024,10 +1024,10 @@ static void gen_stfqux(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@@ -1042,9 +1042,9 @@ static void gen_stfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
diff --git a/target-ppc/translate/fp-ops.inc.c b/target-ppc/translate/fp-ops.inc.c
index 291a1e62d0..d36ab4ecc3 100644
--- a/target-ppc/translate/fp-ops.inc.c
+++ b/target-ppc/translate/fp-ops.inc.c
@@ -85,7 +85,7 @@ GEN_STUF(name, stop, op | 0x21, type) \
GEN_STUXF(name, stop, op | 0x01, type) \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
diff --git a/target-ppc/translate/spe-impl.inc.c b/target-ppc/translate/spe-impl.inc.c
index a969927e9b..8c1c16c63e 100644
--- a/target-ppc/translate/spe-impl.inc.c
+++ b/target-ppc/translate/spe-impl.inc.c
@@ -725,7 +725,7 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
{
TCGv_i64 t0 = tcg_temp_new_i64();
gen_load_gpr64(t0, rS(ctx->opcode));
- gen_qemu_st64(ctx, t0, addr);
+ gen_qemu_st64_i64(ctx, t0, addr);
tcg_temp_free_i64(t0);
}
diff --git a/target-ppc/translate/vmx-impl.inc.c b/target-ppc/translate/vmx-impl.inc.c
index eb3164bbb8..3ce374d891 100644
--- a/target-ppc/translate/vmx-impl.inc.c
+++ b/target-ppc/translate/vmx-impl.inc.c
@@ -52,16 +52,16 @@ static void gen_st##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary \
- 64-bit byteswap already. */ \
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does \
+ necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
diff --git a/target-ppc/translate/vsx-impl.inc.c b/target-ppc/translate/vsx-impl.inc.c
index 572579403f..99cabb2f11 100644
--- a/target-ppc/translate/vsx-impl.inc.c
+++ b/target-ppc/translate/vsx-impl.inc.c
@@ -115,7 +115,7 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \
}
-VSX_STORE_SCALAR(stxsdx, st64)
+VSX_STORE_SCALAR(stxsdx, st64_i64)
VSX_STORE_SCALAR(stxsiwx, st32_i64)
VSX_STORE_SCALAR(stxsspx, st32fs)
@@ -129,9 +129,9 @@ static void gen_stxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
tcg_temp_free(EA);
}