aboutsummaryrefslogtreecommitdiff
path: root/target/ppc/translate/vmx-impl.c.inc
diff options
context:
space:
mode:
Diffstat (limited to 'target/ppc/translate/vmx-impl.c.inc')
-rw-r--r--target/ppc/translate/vmx-impl.c.inc130
1 files changed, 68 insertions, 62 deletions
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 05ba9c9492..112233b541 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -171,53 +171,56 @@ static void gen_mtvscr(DisasContext *ctx)
gen_helper_mtvscr(cpu_env, val);
}
+static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ TCGv_i64 avr;
+ TCGv_i64 ten, z;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ avr = tcg_temp_new_i64();
+ ten = tcg_constant_i64(10);
+ z = tcg_constant_i64(0);
+
+ if (add_cin) {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ get_avr64(avr, rB(ctx->opcode), false);
+ tcg_gen_andi_i64(t2, avr, 0xF);
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(avr, t2, avr, ten);
+ set_avr64(rD(ctx->opcode), avr, false);
+ }
+
+ if (ret_carry) {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ set_avr64(rD(ctx->opcode), z, true);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mul_i64(t0, avr, ten);
+ tcg_gen_add_i64(avr, t0, t2);
+ set_avr64(rD(ctx->opcode), avr, true);
+ }
+}
+
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_i64 t0; \
- TCGv_i64 t1; \
- TCGv_i64 t2; \
- TCGv_i64 avr; \
- TCGv_i64 ten, z; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- t2 = tcg_temp_new_i64(); \
- avr = tcg_temp_new_i64(); \
- ten = tcg_const_i64(10); \
- z = tcg_const_i64(0); \
- \
- if (add_cin) { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- get_avr64(avr, rB(ctx->opcode), false); \
- tcg_gen_andi_i64(t2, avr, 0xF); \
- tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(avr, t2, avr, ten); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } \
- \
- if (ret_carry) { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- set_avr64(rD(ctx->opcode), z, true); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mul_i64(t0, avr, ten); \
- tcg_gen_add_i64(avr, t0, t2); \
- set_avr64(rD(ctx->opcode), avr, true); \
- } \
-} \
+ static void glue(gen_, name)(DisasContext *ctx) \
+ { gen_vx_vmul10(ctx, add_cin, ret_carry); }
GEN_VX_VMUL10(vmul10uq, 0, 0);
GEN_VX_VMUL10(vmul10euq, 1, 0);
@@ -903,7 +906,6 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
hi = tcg_temp_new_i64();
lo = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- t1 = tcg_const_i64(0);
get_avr64(lo, a->vra, false);
get_avr64(hi, a->vra, true);
@@ -914,7 +916,10 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
if (right) {
tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
if (alg) {
+ t1 = tcg_temp_new_i64();
tcg_gen_sari_i64(t1, lo, 63);
+ } else {
+ t1 = zero;
}
tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
} else {
@@ -1619,7 +1624,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(cpu_env, rd, rb, uimm); \
@@ -1960,7 +1965,7 @@ static void gen_vsldoi(DisasContext *ctx)
ra = gen_avr_ptr(rA(ctx->opcode));
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
- sh = tcg_const_i32(VSH(ctx->opcode));
+ sh = tcg_constant_i32(VSH(ctx->opcode));
gen_helper_vsldoi(rd, ra, rb, sh);
}
@@ -2231,24 +2236,25 @@ static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
{
- TCGv_i64 rt, vrb, mask;
- rt = tcg_const_i64(0);
- vrb = tcg_temp_new_i64();
+ TCGv_i64 r[2], mask;
+
+ r[0] = tcg_temp_new_i64();
+ r[1] = tcg_temp_new_i64();
mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
for (int i = 0; i < 2; i++) {
- get_avr64(vrb, a->vrb, i);
+ get_avr64(r[i], a->vrb, i);
if (a->mp) {
- tcg_gen_and_i64(vrb, mask, vrb);
+ tcg_gen_and_i64(r[i], mask, r[i]);
} else {
- tcg_gen_andc_i64(vrb, mask, vrb);
+ tcg_gen_andc_i64(r[i], mask, r[i]);
}
- tcg_gen_ctpop_i64(vrb, vrb);
- tcg_gen_add_i64(rt, rt, vrb);
+ tcg_gen_ctpop_i64(r[i], r[i]);
}
- tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
- tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
+ tcg_gen_add_i64(r[0], r[0], r[1]);
+ tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
return true;
}
@@ -2569,7 +2575,7 @@ static void gen_##op(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
}
@@ -2588,7 +2594,7 @@ static void gen_##op(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, rb, ps); \
}
@@ -2720,7 +2726,7 @@ static void gen_##op(DisasContext *ctx) \
} \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- st_six = tcg_const_i32(rB(ctx->opcode)); \
+ st_six = tcg_constant_i32(rB(ctx->opcode)); \
gen_helper_##op(rd, ra, st_six); \
}