aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2019-03-07 13:15:20 +0100
committerCornelia Huck <cohuck@redhat.com>2019-03-11 09:31:01 +0100
commit0a85f8257aa9aac4b71b9d57fa8e250321163aea (patch)
tree9de3f5413893cd3cd39ee85c70315deb092077a5 /target
parentf180da83c039acfd9ac949c029b154c118a7b293 (diff)
s390x/tcg: Implement VECTOR LOAD MULTIPLE
Try to load the last element first. Access to the first element will be checked afterwards. This way, we can guarantee that the vector is not modified before we checked for all possible exceptions. (16 vectors cannot cross more than two pages) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20190307121539.12842-14-david@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Diffstat (limited to 'target')
-rw-r--r--target/s390x/insn-data.def2
-rw-r--r--target/s390x/translate_vx.inc.c40
2 files changed, 42 insertions, 0 deletions
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 2b36205c84..fa0f3a9003 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -1000,6 +1000,8 @@
F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC)
/* VECTOR LOAD LOGICAL ELEMENT AND ZERO */
F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC)
+/* VECTOR LOAD MULTIPLE */
+ F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index cdad2a52f0..083f5c6213 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -406,3 +406,43 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
tcg_temp_free_i64(t);
return DISAS_NEXT;
}
+
+static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v3 = get_field(s->fields, v3);
+ uint8_t v1 = get_field(s->fields, v1);
+ TCGv_i64 t0, t1;
+
+ if (v3 < v1 || (v3 - v1 + 1) > 16) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /*
+ * Check for possible access exceptions by trying to load the last
+ * element. The first element will be checked first next.
+ */
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
+ tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
+
+ for (;; v1++) {
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t1, v1, 0, ES_64);
+ if (v1 == v3) {
+ break;
+ }
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t1, v1, 1, ES_64);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ }
+
+ /* Store the last element, loaded first */
+ write_vec_element_i64(t0, v1, 1, ES_64);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ return DISAS_NEXT;
+}