aboutsummaryrefslogtreecommitdiff
path: root/target/s390x
diff options
context:
space:
mode:
Diffstat (limited to 'target/s390x')
-rw-r--r--target/s390x/cc_helper.c123
-rw-r--r--target/s390x/cpu_features.c39
-rw-r--r--target/s390x/cpu_models.c25
-rw-r--r--target/s390x/helper.c10
-rw-r--r--target/s390x/insn-data.def76
-rw-r--r--target/s390x/internal.h11
-rw-r--r--target/s390x/translate.c287
7 files changed, 276 insertions, 295 deletions
diff --git a/target/s390x/cc_helper.c b/target/s390x/cc_helper.c
index 5432aeeed4..e7039d0d18 100644
--- a/target/s390x/cc_helper.c
+++ b/target/s390x/cc_helper.c
@@ -123,6 +123,17 @@ static uint32_t cc_calc_nz(uint64_t dst)
return !!dst;
}
+static uint32_t cc_calc_addu(uint64_t carry_out, uint64_t result)
+{
+ g_assert(carry_out <= 1);
+ return (result != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_subu(uint64_t borrow_out, uint64_t result)
+{
+ return cc_calc_addu(borrow_out + 1, result);
+}
+
static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
{
if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
@@ -138,21 +149,6 @@ static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
}
}
-static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
-{
- return (ar != 0) + 2 * (ar < a1);
-}
-
-static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
-{
- /* Recover a2 + carry_in. */
- uint64_t a2c = ar - a1;
- /* Check for a2+carry_in overflow, then a1+a2c overflow. */
- int carry_out = (a2c < a2) || (ar < a1);
-
- return (ar != 0) + 2 * carry_out;
-}
-
static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
{
if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
@@ -168,32 +164,6 @@ static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
}
}
-static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
-{
- if (ar == 0) {
- return 2;
- } else {
- if (a2 > a1) {
- return 1;
- } else {
- return 3;
- }
- }
-}
-
-static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
-{
- int borrow_out;
-
- if (ar != a1 - a2) { /* difference means borrow-in */
- borrow_out = (a2 >= a1);
- } else {
- borrow_out = (a2 > a1);
- }
-
- return (ar != 0) + 2 * !borrow_out;
-}
-
static uint32_t cc_calc_abs_64(int64_t dst)
{
if ((uint64_t)dst == 0x8000000000000000ULL) {
@@ -239,21 +209,6 @@ static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
}
}
-static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
-{
- return (ar != 0) + 2 * (ar < a1);
-}
-
-static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
-{
- /* Recover a2 + carry_in. */
- uint32_t a2c = ar - a1;
- /* Check for a2+carry_in overflow, then a1+a2c overflow. */
- int carry_out = (a2c < a2) || (ar < a1);
-
- return (ar != 0) + 2 * carry_out;
-}
-
static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
{
if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
@@ -269,32 +224,6 @@ static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
}
}
-static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
-{
- if (ar == 0) {
- return 2;
- } else {
- if (a2 > a1) {
- return 1;
- } else {
- return 3;
- }
- }
-}
-
-static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
-{
- int borrow_out;
-
- if (ar != a1 - a2) { /* difference means borrow-in */
- borrow_out = (a2 >= a1);
- } else {
- borrow_out = (a2 > a1);
- }
-
- return (ar != 0) + 2 * !borrow_out;
-}
-
static uint32_t cc_calc_abs_32(int32_t dst)
{
if ((uint32_t)dst == 0x80000000UL) {
@@ -483,24 +412,18 @@ static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
case CC_OP_NZ:
r = cc_calc_nz(dst);
break;
- case CC_OP_ADD_64:
- r = cc_calc_add_64(src, dst, vr);
+ case CC_OP_ADDU:
+ r = cc_calc_addu(src, dst);
break;
- case CC_OP_ADDU_64:
- r = cc_calc_addu_64(src, dst, vr);
+ case CC_OP_SUBU:
+ r = cc_calc_subu(src, dst);
break;
- case CC_OP_ADDC_64:
- r = cc_calc_addc_64(src, dst, vr);
+ case CC_OP_ADD_64:
+ r = cc_calc_add_64(src, dst, vr);
break;
case CC_OP_SUB_64:
r = cc_calc_sub_64(src, dst, vr);
break;
- case CC_OP_SUBU_64:
- r = cc_calc_subu_64(src, dst, vr);
- break;
- case CC_OP_SUBB_64:
- r = cc_calc_subb_64(src, dst, vr);
- break;
case CC_OP_ABS_64:
r = cc_calc_abs_64(dst);
break;
@@ -517,21 +440,9 @@ static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
case CC_OP_ADD_32:
r = cc_calc_add_32(src, dst, vr);
break;
- case CC_OP_ADDU_32:
- r = cc_calc_addu_32(src, dst, vr);
- break;
- case CC_OP_ADDC_32:
- r = cc_calc_addc_32(src, dst, vr);
- break;
case CC_OP_SUB_32:
r = cc_calc_sub_32(src, dst, vr);
break;
- case CC_OP_SUBU_32:
- r = cc_calc_subu_32(src, dst, vr);
- break;
- case CC_OP_SUBB_32:
- r = cc_calc_subb_32(src, dst, vr);
- break;
case CC_OP_ABS_32:
r = cc_calc_abs_32(dst);
break;
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index 42fe0bf4ca..5528acd082 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -107,8 +107,45 @@ void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
}
- if (type == S390_FEAT_TYPE_SCLP_FAC134 && s390_is_pv()) {
+ if (!s390_is_pv()) {
+ return;
+ }
+
+ /*
+ * Some facilities are not available for CPUs in protected mode:
+ * - All SIE facilities because SIE is not available
+ * - DIAG318
+ *
+ * As VMs can move in and out of protected mode the CPU model
+ * doesn't protect us from that problem because it is only
+ * validated at the start of the VM.
+ */
+ switch (type) {
+ case S390_FEAT_TYPE_SCLP_CPU:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_F2)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SKEY)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_GPERE)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SIIF)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SIGPIF)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_IB)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_CEI)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_CONF_CHAR:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_GSLS)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_HPMA2)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_KSS)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_64BSCAO)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_CMMA)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_PFMFI)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_IBS)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_FAC134:
clear_be_bit(s390_feat_def(S390_FEAT_DIAG_318)->bit, data);
+ break;
+ default:
+ return;
}
}
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index a23fd3e32b..35179f9dc7 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -239,8 +239,29 @@ bool s390_has_feat(S390Feat feat)
}
return 0;
}
- if (feat == S390_FEAT_DIAG_318 && s390_is_pv()) {
- return false;
+
+ if (s390_is_pv()) {
+ switch (feat) {
+ case S390_FEAT_DIAG_318:
+ case S390_FEAT_HPMA2:
+ case S390_FEAT_SIE_F2:
+ case S390_FEAT_SIE_SKEY:
+ case S390_FEAT_SIE_GPERE:
+ case S390_FEAT_SIE_SIIF:
+ case S390_FEAT_SIE_SIGPIF:
+ case S390_FEAT_SIE_IB:
+ case S390_FEAT_SIE_CEI:
+ case S390_FEAT_SIE_KSS:
+ case S390_FEAT_SIE_GSLS:
+ case S390_FEAT_SIE_64BSCAO:
+ case S390_FEAT_SIE_CMMA:
+ case S390_FEAT_SIE_PFMFI:
+ case S390_FEAT_SIE_IBS:
+ return false;
+ break;
+ default:
+ break;
+ }
}
return test_bit(feat, cpu->model->features);
}
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index b877690845..7678994feb 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -395,6 +395,8 @@ const char *cc_name(enum cc_op cc_op)
[CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
[CC_OP_STATIC] = "CC_OP_STATIC",
[CC_OP_NZ] = "CC_OP_NZ",
+ [CC_OP_ADDU] = "CC_OP_ADDU",
+ [CC_OP_SUBU] = "CC_OP_SUBU",
[CC_OP_LTGT_32] = "CC_OP_LTGT_32",
[CC_OP_LTGT_64] = "CC_OP_LTGT_64",
[CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
@@ -402,19 +404,11 @@ const char *cc_name(enum cc_op cc_op)
[CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
[CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
[CC_OP_ADD_64] = "CC_OP_ADD_64",
- [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
- [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
[CC_OP_SUB_64] = "CC_OP_SUB_64",
- [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
- [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
[CC_OP_ABS_64] = "CC_OP_ABS_64",
[CC_OP_NABS_64] = "CC_OP_NABS_64",
[CC_OP_ADD_32] = "CC_OP_ADD_32",
- [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
- [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
[CC_OP_SUB_32] = "CC_OP_SUB_32",
- [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
- [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
[CC_OP_ABS_32] = "CC_OP_ABS_32",
[CC_OP_NABS_32] = "CC_OP_NABS_32",
[CC_OP_COMP_32] = "CC_OP_COMP_32",
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index b95bc98d35..26badb663a 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -58,34 +58,34 @@
C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64)
/* ADD LOGICAL */
- C(0x1e00, ALR, RR_a, Z, r1, r2, new, r1_32, add, addu32)
- C(0xb9fa, ALRK, RRF_a, DO, r2, r3, new, r1_32, add, addu32)
- C(0x5e00, AL, RX_a, Z, r1, m2_32u, new, r1_32, add, addu32)
- C(0xe35e, ALY, RXY_a, LD, r1, m2_32u, new, r1_32, add, addu32)
- C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, add, addu64)
- C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, add, addu64)
- C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, add, addu64)
- C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, add, addu64)
- C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, add, addu64)
+ C(0x1e00, ALR, RR_a, Z, r1_32u, r2_32u, new, r1_32, add, addu32)
+ C(0xb9fa, ALRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, add, addu32)
+ C(0x5e00, AL, RX_a, Z, r1_32u, m2_32u, new, r1_32, add, addu32)
+ C(0xe35e, ALY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, add, addu32)
+ C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, addu64, addu64)
+ C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, addu64, addu64)
+ C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, addu64, addu64)
+ C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, addu64, addu64)
+ C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, addu64, addu64)
/* ADD LOGICAL HIGH */
C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32)
- C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, addu32)
+ C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, add, addu32)
/* ADD LOGICAL IMMEDIATE */
- C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32)
- C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64)
+ C(0xc20b, ALFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, add, addu32)
+ C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, addu64, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE */
- D(0xeb6e, ALSI, SIY, GIE, la1, i2, new, 0, asi, addu32, MO_TEUL)
- C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32)
- D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asi, addu64, MO_TEQ)
- C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64)
+ D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL)
+ C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32)
+ C(0xeb7e, ALGSI, SIY, GIE, la1, i2, r1, 0, asiu64, addu64)
+ C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
- C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32)
- C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, 0)
+ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32)
+ C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, 0)
/* ADD LOGICAL WITH CARRY */
- C(0xb998, ALCR, RRE, Z, r1, r2, new, r1_32, addc, addc32)
- C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc, addc64)
- C(0xe398, ALC, RXY_a, Z, r1, m2_32u, new, r1_32, addc, addc32)
- C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc, addc64)
+ C(0xb998, ALCR, RRE, Z, r1_32u, r2_32u, new, r1_32, addc32, addu32)
+ C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc64, addu64)
+ C(0xe398, ALC, RXY_a, Z, r1_32u, m2_32u, new, r1_32, addc32, addu32)
+ C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc64, addu64)
/* AND */
C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32)
@@ -900,26 +900,26 @@
C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32)
C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32)
/* SUBTRACT LOGICAL */
- C(0x1f00, SLR, RR_a, Z, r1, r2, new, r1_32, sub, subu32)
- C(0xb9fb, SLRK, RRF_a, DO, r2, r3, new, r1_32, sub, subu32)
- C(0x5f00, SL, RX_a, Z, r1, m2_32u, new, r1_32, sub, subu32)
- C(0xe35f, SLY, RXY_a, LD, r1, m2_32u, new, r1_32, sub, subu32)
- C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, sub, subu64)
- C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, sub, subu64)
- C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, sub, subu64)
- C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, sub, subu64)
- C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, sub, subu64)
+ C(0x1f00, SLR, RR_a, Z, r1_32u, r2_32u, new, r1_32, sub, subu32)
+ C(0xb9fb, SLRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, sub, subu32)
+ C(0x5f00, SL, RX_a, Z, r1_32u, m2_32u, new, r1_32, sub, subu32)
+ C(0xe35f, SLY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, sub, subu32)
+ C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, subu64, subu64)
+ C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, subu64, subu64)
+ C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, subu64, subu64)
+ C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, subu64, subu64)
+ C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, subu64, subu64)
/* SUBTRACT LOCICAL HIGH */
C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32)
- C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subu32)
+ C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, sub, subu32)
/* SUBTRACT LOGICAL IMMEDIATE */
- C(0xc205, SLFI, RIL_a, EI, r1, i2_32u, new, r1_32, sub, subu32)
- C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, sub, subu64)
+ C(0xc205, SLFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, sub, subu32)
+ C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, subu64, subu64)
/* SUBTRACT LOGICAL WITH BORROW */
- C(0xb999, SLBR, RRE, Z, r1, r2, new, r1_32, subb, subb32)
- C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb, subb64)
- C(0xe399, SLB, RXY_a, Z, r1, m2_32u, new, r1_32, subb, subb32)
- C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb, subb64)
+ C(0xb999, SLBR, RRE, Z, r1_32u, r2_32u, new, r1_32, subb32, subu32)
+ C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb64, subu64)
+ C(0xe399, SLB, RXY_a, Z, r1_32u, m2_32u, new, r1_32, subb32, subu32)
+ C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb64, subu64)
/* SUPERVISOR CALL */
C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0)
diff --git a/target/s390x/internal.h b/target/s390x/internal.h
index 64602660ae..11515bb617 100644
--- a/target/s390x/internal.h
+++ b/target/s390x/internal.h
@@ -160,6 +160,9 @@ enum cc_op {
CC_OP_STATIC, /* CC value is env->cc_op */
CC_OP_NZ, /* env->cc_dst != 0 */
+ CC_OP_ADDU, /* dst != 0, src = carry out (0,1) */
+ CC_OP_SUBU, /* dst != 0, src = borrow out (0,-1) */
+
CC_OP_LTGT_32, /* signed less/greater than (32bit) */
CC_OP_LTGT_64, /* signed less/greater than (64bit) */
CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
@@ -168,21 +171,13 @@ enum cc_op {
CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
CC_OP_ADD_64, /* overflow on add (64bit) */
- CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
- CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
CC_OP_SUB_64, /* overflow on subtraction (64bit) */
- CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
- CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
CC_OP_ABS_64, /* sign eval on abs (64bit) */
CC_OP_NABS_64, /* sign eval on nabs (64bit) */
CC_OP_MULS_64, /* overflow on signed multiply (64bit) */
CC_OP_ADD_32, /* overflow on add (32bit) */
- CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
- CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
CC_OP_SUB_32, /* overflow on subtraction (32bit) */
- CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
- CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
CC_OP_ABS_32, /* sign eval on abs (64bit) */
CC_OP_NABS_32, /* sign eval on nabs (64bit) */
CC_OP_MULS_32, /* overflow on signed multiply (32bit) */
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index be32938f6d..3d5c0d6106 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -600,17 +600,9 @@ static void gen_op_calc_cc(DisasContext *s)
dummy = tcg_const_i64(0);
/* FALLTHRU */
case CC_OP_ADD_64:
- case CC_OP_ADDU_64:
- case CC_OP_ADDC_64:
case CC_OP_SUB_64:
- case CC_OP_SUBU_64:
- case CC_OP_SUBB_64:
case CC_OP_ADD_32:
- case CC_OP_ADDU_32:
- case CC_OP_ADDC_32:
case CC_OP_SUB_32:
- case CC_OP_SUBU_32:
- case CC_OP_SUBB_32:
local_cc_op = tcg_const_i32(s->cc_op);
break;
case CC_OP_CONST0:
@@ -650,6 +642,7 @@ static void gen_op_calc_cc(DisasContext *s)
/* 1 argument */
gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
break;
+ case CC_OP_ADDU:
case CC_OP_ICM:
case CC_OP_LTGT_32:
case CC_OP_LTGT_64:
@@ -659,6 +652,7 @@ static void gen_op_calc_cc(DisasContext *s)
case CC_OP_TM_64:
case CC_OP_SLA_32:
case CC_OP_SLA_64:
+ case CC_OP_SUBU:
case CC_OP_NZ_F128:
case CC_OP_VC:
case CC_OP_MULS_64:
@@ -666,17 +660,9 @@ static void gen_op_calc_cc(DisasContext *s)
gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
break;
case CC_OP_ADD_64:
- case CC_OP_ADDU_64:
- case CC_OP_ADDC_64:
case CC_OP_SUB_64:
- case CC_OP_SUBU_64:
- case CC_OP_SUBB_64:
case CC_OP_ADD_32:
- case CC_OP_ADDU_32:
- case CC_OP_ADDC_32:
case CC_OP_SUB_32:
- case CC_OP_SUBU_32:
- case CC_OP_SUBB_32:
/* 3 arguments */
gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
break;
@@ -849,42 +835,20 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
account_inline_branch(s, old_cc_op);
break;
- case CC_OP_ADDU_32:
- case CC_OP_ADDU_64:
+ case CC_OP_ADDU:
+ case CC_OP_SUBU:
switch (mask) {
- case 8 | 2: /* vr == 0 */
+ case 8 | 2: /* result == 0 */
cond = TCG_COND_EQ;
break;
- case 4 | 1: /* vr != 0 */
+ case 4 | 1: /* result != 0 */
cond = TCG_COND_NE;
break;
- case 8 | 4: /* no carry -> vr >= src */
- cond = TCG_COND_GEU;
+ case 8 | 4: /* !carry (borrow) */
+ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
break;
- case 2 | 1: /* carry -> vr < src */
- cond = TCG_COND_LTU;
- break;
- default:
- goto do_dynamic;
- }
- account_inline_branch(s, old_cc_op);
- break;
-
- case CC_OP_SUBU_32:
- case CC_OP_SUBU_64:
- /* Note that CC=0 is impossible; treat it as dont-care. */
- switch (mask & 7) {
- case 2: /* zero -> op1 == op2 */
- cond = TCG_COND_EQ;
- break;
- case 4 | 1: /* !zero -> op1 != op2 */
- cond = TCG_COND_NE;
- break;
- case 4: /* borrow (!carry) -> op1 < op2 */
- cond = TCG_COND_LTU;
- break;
- case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
- cond = TCG_COND_GEU;
+ case 2 | 1: /* carry (!borrow) */
+ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
break;
default:
goto do_dynamic;
@@ -919,7 +883,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
break;
case CC_OP_LTGT_32:
case CC_OP_LTUGTU_32:
- case CC_OP_SUBU_32:
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
@@ -936,7 +899,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
- case CC_OP_SUBU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
c->g1 = c->g2 = true;
@@ -950,26 +912,22 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
break;
- case CC_OP_ADDU_32:
- c->is_64 = false;
- c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
- if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
- tcg_gen_movi_i32(c->u.s32.b, 0);
- } else {
- tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
- }
- break;
-
- case CC_OP_ADDU_64:
- c->u.s64.a = cc_vr;
+ case CC_OP_ADDU:
+ case CC_OP_SUBU:
+ c->is_64 = true;
+ c->u.s64.b = tcg_const_i64(0);
c->g1 = true;
- if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
- c->u.s64.b = tcg_const_i64(0);
- } else {
- c->u.s64.b = cc_src;
- c->g2 = true;
+ switch (mask) {
+ case 8 | 2:
+ case 4 | 1: /* result */
+ c->u.s64.a = cc_dst;
+ break;
+ case 8 | 4:
+ case 2 | 1: /* carry */
+ c->u.s64.a = cc_src;
+ break;
+ default:
+ g_assert_not_reached();
}
break;
@@ -1445,38 +1403,60 @@ static DisasJumpType op_add(DisasContext *s, DisasOps *o)
return DISAS_NEXT;
}
-static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
+static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
{
- DisasCompare cmp;
- TCGv_i64 carry;
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+ return DISAS_NEXT;
+}
+
+/* Compute carry into cc_src. */
+static void compute_carry(DisasContext *s)
+{
+ switch (s->cc_op) {
+ case CC_OP_ADDU:
+ /* The carry value is already in cc_src (1,0). */
+ break;
+ case CC_OP_SUBU:
+ tcg_gen_addi_i64(cc_src, cc_src, 1);
+ break;
+ default:
+ gen_op_calc_cc(s);
+ /* fall through */
+ case CC_OP_STATIC:
+ /* The carry flag is the msb of CC; compute into cc_src. */
+ tcg_gen_extu_i32_i64(cc_src, cc_op);
+ tcg_gen_shri_i64(cc_src, cc_src, 1);
+ break;
+ }
+}
+static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
+{
+ compute_carry(s);
tcg_gen_add_i64(o->out, o->in1, o->in2);
+ tcg_gen_add_i64(o->out, o->out, cc_src);
+ return DISAS_NEXT;
+}
- /* The carry flag is the msb of CC, therefore the branch mask that would
- create that comparison is 3. Feeding the generated comparison to
- setcond produces the carry flag that we desire. */
- disas_jcc(s, &cmp, 3);
- carry = tcg_temp_new_i64();
- if (cmp.is_64) {
- tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
- } else {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
- tcg_gen_extu_i32_i64(carry, t);
- tcg_temp_free_i32(t);
- }
- free_compare(&cmp);
+static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
+{
+ compute_carry(s);
+
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
+ tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
+ tcg_temp_free_i64(zero);
- tcg_gen_add_i64(o->out, o->out, carry);
- tcg_temp_free_i64(carry);
return DISAS_NEXT;
}
static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
{
- o->in1 = tcg_temp_new_i64();
+ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
- if (!s390_has_feat(S390_FEAT_STFLE_45)) {
+ o->in1 = tcg_temp_new_i64();
+ if (non_atomic) {
tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic addition in memory. */
@@ -1487,7 +1467,30 @@ static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
/* Recompute also for atomic case: needed for setting CC. */
tcg_gen_add_i64(o->out, o->in1, o->in2);
- if (!s390_has_feat(S390_FEAT_STFLE_45)) {
+ if (non_atomic) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
+{
+ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
+
+ o->in1 = tcg_temp_new_i64();
+ if (non_atomic) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic addition in memory. */
+ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+
+ if (non_atomic) {
tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
@@ -4732,29 +4735,58 @@ static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
return DISAS_NEXT;
}
-static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
+static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
{
- DisasCompare cmp;
- TCGv_i64 borrow;
-
- tcg_gen_sub_i64(o->out, o->in1, o->in2);
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+ return DISAS_NEXT;
+}
- /* The !borrow flag is the msb of CC. Since we want the inverse of
- that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
- disas_jcc(s, &cmp, 8 | 4);
- borrow = tcg_temp_new_i64();
- if (cmp.is_64) {
- tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
- } else {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
- tcg_gen_extu_i32_i64(borrow, t);
- tcg_temp_free_i32(t);
+/* Compute borrow (0, -1) into cc_src. */
+static void compute_borrow(DisasContext *s)
+{
+ switch (s->cc_op) {
+ case CC_OP_SUBU:
+ /* The borrow value is already in cc_src (0,-1). */
+ break;
+ default:
+ gen_op_calc_cc(s);
+ /* fall through */
+ case CC_OP_STATIC:
+ /* The carry flag is the msb of CC; compute into cc_src. */
+ tcg_gen_extu_i32_i64(cc_src, cc_op);
+ tcg_gen_shri_i64(cc_src, cc_src, 1);
+ /* fall through */
+ case CC_OP_ADDU:
+ /* Convert carry (1,0) to borrow (0,-1). */
+ tcg_gen_subi_i64(cc_src, cc_src, 1);
+ break;
}
- free_compare(&cmp);
+}
+
+static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
+{
+ compute_borrow(s);
+
+ /* Borrow is {0, -1}, so add to subtract. */
+ tcg_gen_add_i64(o->out, o->in1, cc_src);
+ tcg_gen_sub_i64(o->out, o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
+{
+ compute_borrow(s);
+
+ /*
+ * Borrow is {0, -1}, so add to subtract; replicate the
+ * borrow input to produce 128-bit -1 for the addition.
+ */
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
+ tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
+ tcg_temp_free_i64(zero);
- tcg_gen_sub_i64(o->out, o->out, borrow);
- tcg_temp_free_i64(borrow);
return DISAS_NEXT;
}
@@ -5185,22 +5217,14 @@ static void cout_adds64(DisasContext *s, DisasOps *o)
static void cout_addu32(DisasContext *s, DisasOps *o)
{
- gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
+ tcg_gen_shri_i64(cc_src, o->out, 32);
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
}
static void cout_addu64(DisasContext *s, DisasOps *o)
{
- gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
-}
-
-static void cout_addc32(DisasContext *s, DisasOps *o)
-{
- gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
-}
-
-static void cout_addc64(DisasContext *s, DisasOps *o)
-{
- gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
}
static void cout_cmps32(DisasContext *s, DisasOps *o)
@@ -5291,22 +5315,14 @@ static void cout_subs64(DisasContext *s, DisasOps *o)
static void cout_subu32(DisasContext *s, DisasOps *o)
{
- gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
+ tcg_gen_sari_i64(cc_src, o->out, 32);
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
}
static void cout_subu64(DisasContext *s, DisasOps *o)
{
- gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
-}
-
-static void cout_subb32(DisasContext *s, DisasOps *o)
-{
- gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
-}
-
-static void cout_subb64(DisasContext *s, DisasOps *o)
-{
- gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
}
static void cout_tm32(DisasContext *s, DisasOps *o)
@@ -5637,6 +5653,13 @@ static void in1_r2_sr32(DisasContext *s, DisasOps *o)
}
#define SPEC_in1_r2_sr32 0
+static void in1_r2_32u(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
+}
+#define SPEC_in1_r2_32u 0
+
static void in1_r3(DisasContext *s, DisasOps *o)
{
o->in1 = load_reg(get_field(s, r3));