diff options
Diffstat (limited to 'target-alpha')
-rw-r--r-- | target-alpha/helper.h | 3 | ||||
-rw-r--r-- | target-alpha/op_helper.c | 18 | ||||
-rw-r--r-- | target-alpha/translate.c | 51 |
3 files changed, 45 insertions, 27 deletions
diff --git a/target-alpha/helper.h b/target-alpha/helper.h index a545c5cd4c..4eb3b6f63c 100644 --- a/target-alpha/helper.h +++ b/target-alpha/helper.h @@ -19,9 +19,6 @@ DEF_HELPER_1(cttz, i64, i64) DEF_HELPER_2(zap, i64, i64, i64) DEF_HELPER_2(zapnot, i64, i64, i64) -DEF_HELPER_2(inswh, i64, i64, i64) -DEF_HELPER_2(inslh, i64, i64, i64) -DEF_HELPER_2(insqh, i64, i64, i64) DEF_HELPER_2(cmpbge, i64, i64, i64) diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c index b6ec0e8003..d7f4fb200c 100644 --- a/target-alpha/op_helper.c +++ b/target-alpha/op_helper.c @@ -185,24 +185,6 @@ uint64_t helper_zapnot(uint64_t val, uint64_t mask) return byte_zap(val, ~mask); } -uint64_t helper_inswh(uint64_t val, uint64_t mask) -{ - val >>= 64 - ((mask & 7) * 8); - return byte_zap(val, ~((0x03 << (mask & 7)) >> 8)); -} - -uint64_t helper_inslh(uint64_t val, uint64_t mask) -{ - val >>= 64 - ((mask & 7) * 8); - return byte_zap(val, ~((0x0F << (mask & 7)) >> 8)); -} - -uint64_t helper_insqh(uint64_t val, uint64_t mask) -{ - val >>= 64 - ((mask & 7) * 8); - return byte_zap(val, ~((0xFF << (mask & 7)) >> 8)); -} - uint64_t helper_cmpbge (uint64_t op1, uint64_t op2) { uint8_t opa, opb, res; diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 1dc344821a..1c809b7611 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -625,6 +625,48 @@ static void gen_ext_l(int ra, int rb, int rc, int islit, } } +/* INSWH, INSLH, INSQH */ +static void gen_ins_h(int ra, int rb, int rc, int islit, + uint8_t lit, uint8_t byte_mask) +{ + if (unlikely(rc == 31)) + return; + else if (unlikely(ra == 31) || (islit && (lit & 7) == 0)) + tcg_gen_movi_i64(cpu_ir[rc], 0); + else { + TCGv tmp = tcg_temp_new(); + + /* The instruction description has us left-shift the byte mask + and extract bits <15:8> and apply that zap at the end. This + is equivalent to simply performing the zap first and shifting + afterward. */ + gen_zapnoti (tmp, cpu_ir[ra], byte_mask); + + if (islit) { + /* Note that we have handled the lit==0 case above. */ + tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8); + } else { + TCGv shift = tcg_temp_new(); + + /* If (B & 7) == 0, we need to shift by 64 and leave a zero. + Do this portably by splitting the shift into two parts: + shift_count-1 and 1. Arrange for the -1 by using + ones-complement instead of twos-complement in the negation: + ~((B & 7) * 8) & 63. */ + + tcg_gen_andi_i64(shift, cpu_ir[rb], 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + + tcg_gen_shr_i64(cpu_ir[rc], tmp, shift); + tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1); + tcg_temp_free(shift); + } + tcg_temp_free(tmp); + } +} + /* INSBL, INSWL, INSLL, INSQL */ static void gen_ins_l(int ra, int rb, int rc, int islit, uint8_t lit, uint8_t byte_mask) @@ -750,9 +792,6 @@ ARITH3(addlv) ARITH3(sublv) ARITH3(addqv) ARITH3(subqv) -ARITH3(inswh) -ARITH3(inslh) -ARITH3(insqh) ARITH3(umulh) ARITH3(mullv) ARITH3(mulqv) @@ -1479,7 +1518,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn) break; case 0x57: /* INSWH */ - gen_inswh(ra, rb, rc, islit, lit); + gen_ins_h(ra, rb, rc, islit, lit, 0x03); break; case 0x5A: /* EXTWH */ @@ -1491,7 +1530,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn) break; case 0x67: /* INSLH */ - gen_inslh(ra, rb, rc, islit, lit); + gen_ins_h(ra, rb, rc, islit, lit, 0x0f); break; case 0x6A: /* EXTLH */ @@ -1503,7 +1542,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn) break; case 0x77: /* INSQH */ - gen_insqh(ra, rb, rc, islit, lit); + gen_ins_h(ra, rb, rc, islit, lit, 0xff); break; case 0x7A: /* EXTQH */ |