diff options
Diffstat (limited to 'target/s390x/insn-data.def')
-rw-r--r-- | target/s390x/insn-data.def | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index 71fa9b8d6c..e61475bdc4 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -1054,6 +1054,143 @@ /* VECTOR UNPACK LOGICAL LOW */ F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +/* === Vector Integer Instructions === */ + +/* VECTOR ADD */ + F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC) +/* VECTOR ADD COMPUTE CARRY */ + F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC) +/* VECTOR ADD WITH CARRY */ + F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC) +/* VECTOR ADD WITH CARRY COMPUTE CARRY */ + F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC) +/* VECTOR AND */ + F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC) +/* VECTOR AND WITH COMPLEMENT */ + F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC) +/* VECTOR AVERAGE */ + F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC) +/* VECTOR AVERAGE LOGICAL */ + F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC) +/* VECTOR CHECKSUM */ + F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC) +/* VECTOR ELEMENT COMPARE */ + F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC) +/* VECTOR ELEMENT COMPARE LOGICAL */ + F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC) +/* VECTOR COMPARE EQUAL */ + E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC) +/* VECTOR COMPARE HIGH */ + E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC) +/* VECTOR COMPARE HIGH LOGICAL */ + E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC) +/* VECTOR COUNT LEADING ZEROS */ + F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC) +/* VECTOR COUNT TRAILING ZEROS */ + F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC) +/* VECTOR EXCLUSIVE OR */ + F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC) +/* VECTOR GALOIS FIELD MULTIPLY SUM */ + F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC) +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ + F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC) +/* VECTOR LOAD COMPLEMENT */ + F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC) +/* VECTOR LOAD POSITIVE */ + F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC) +/* VECTOR MAXIMUM */ + F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MAXIMUM LOGICAL */ + F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MINIMUM */ + F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MINIMUM LOGICAL */ + F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOW */ + F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD HIGH */ + F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL HIGH */ + F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD EVEN */ + F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL EVEN */ + F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD ODD */ + F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL ODD */ + F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY HIGH */ + F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL HIGH */ + F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOW */ + F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY EVEN */ + F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL EVEN */ + F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY ODD */ + F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL ODD */ + F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR NAND */ + F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) +/* VECTOR NOR */ + F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC) +/* VECTOR NOT EXCLUSIVE OR */ + F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC) +/* VECTOR OR */ + F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC) +/* VECTOR OR WITH COMPLEMENT */ + F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC) +/* VECTOR POPULATION COUNT */ + F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC) +/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ + F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, verllv, 0, IF_VEC) + F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, verll, 0, IF_VEC) +/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ + F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT LEFT */ + F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ + F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT RIGHT LOGICAL */ + F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR SHIFT LEFT */ + F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +/* VECTOR SHIFT LEFT BY BYTE */ + F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ + F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) +/* VECTOR SHIFT RIGHT ARITHMETIC */ + F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +/* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ + F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +/* VECTOR SHIFT RIGHT LOGICAL */ + F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ + F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +/* VECTOR SUBTRACT */ + F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) +/* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ + F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC) +/* VECTOR SUBTRACT WITH BORROW INDICATION */ + F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC) +/* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */ + F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC) +/* VECTOR SUM ACROSS DOUBLEWORD */ + F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC) +/* VECTOR SUM ACROSS QUADWORD */ + F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC) +/* VECTOR SUM ACROSS WORD */ + F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC) +/* VECTOR TEST UNDER MASK */ + F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC) + #ifndef CONFIG_USER_ONLY /* COMPARE AND SWAP AND PURGE */ E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) |