aboutsummaryrefslogtreecommitdiff
path: root/target/ppc/fpu_helper.c
diff options
context:
space:
mode:
authorBharata B Rao <bharata@linux.vnet.ibm.com>2017-02-10 12:53:08 +0530
committerDavid Gibson <david@gibson.dropbear.id.au>2017-02-22 11:28:28 +1100
commita8d411abac9347aadeac87687b8a3c9895ea0fd7 (patch)
tree18cf66df7a6030f8abfce325bfeb9578d1f75d8c /target/ppc/fpu_helper.c
parentfd425037d25cecaaffdb3831697e0adc10ca2ba3 (diff)
target-ppc: Implement round to odd variants of quad FP instructions
xsaddqpo: VSX Scalar Add Quad-Precision using round to Odd xsmulqo: VSX Scalar Multiply Quad-Precision using round to Odd xsdivqpo: VSX Scalar Divide Quad-Precision using round to Odd xscvqpdpo: VSX Scalar round & Convert Quad-Precision format to Double-Precision format using round to Odd xssqrtqpo: VSX Scalar Square Root Quad-Precision using round to Odd xssubqpo: VSX Scalar Subtract Quad-Precision using round to Odd In addition, fix the invalid bitmask in the instruction encoding of xssqrtqp[o]. Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> CC: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target/ppc/fpu_helper.c')
-rw-r--r--target/ppc/fpu_helper.c42
1 files changed, 20 insertions, 22 deletions
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 1b6cd3bd10..96f9801186 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -1850,12 +1850,11 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsadddpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -1930,19 +1929,18 @@ VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xa, xb;
+ float_status tstat;
getVSR(rA(opcode) + 32, &xa, env);
getVSR(rB(opcode) + 32, &xb, env);
getVSR(rD(opcode) + 32, &xt, env);
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsmulpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- helper_reset_fpstatus(env);
-
- float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -2019,18 +2017,18 @@ VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xa, xb;
+ float_status tstat;
getVSR(rA(opcode) + 32, &xa, env);
getVSR(rB(opcode) + 32, &xb, env);
getVSR(rD(opcode) + 32, &xt, env);
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsdivqpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- helper_reset_fpstatus(env);
- float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -2954,18 +2952,20 @@ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
{
ppc_vsr_t xt, xb;
+ float_status tstat;
getVSR(rB(opcode) + 32, &xb, env);
memset(&xt, 0, sizeof(xt));
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xscvqpdpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- xt.VsrD(0) = float128_to_float64(xb.f128, &env->fp_status);
+ xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(float128_is_signaling_nan(xb.f128,
- &env->fp_status))) {
+ &tstat))) {
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
}
@@ -3496,12 +3496,11 @@ void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xsadddpo after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_sqrt(xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
@@ -3534,12 +3533,11 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
+ tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
- /* TODO: Support xssubqp after round-to-odd is implemented */
- abort();
+ tstat.float_rounding_mode = float_round_to_odd;
}
- tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;