aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/sve.decode11
-rw-r--r--target/arm/translate-sve.c85
2 files changed, 70 insertions, 26 deletions
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 45016c6042..75133ce659 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -83,6 +83,7 @@
&rprr_gather_load rd pg rn rm esz msz u ff xs scale
&rpri_gather_load rd pg rn imm esz msz u ff
&rprr_scatter_store rd pg rn rm esz msz xs scale
+&rpri_scatter_store rd pg rn imm esz msz
###########################################################################
# Named instruction formats. These are generally used to
@@ -219,6 +220,8 @@
&rprr_store nreg=0
@rprr_scatter_store ....... msz:2 .. rm:5 ... pg:3 rn:5 rd:5 \
&rprr_scatter_store
+@rpri_scatter_store ....... msz:2 .. imm:5 ... pg:3 rn:5 rd:5 \
+ &rpri_scatter_store
###########################################################################
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
@@ -932,6 +935,14 @@ ST1_zprz 1110010 .. 01 ..... 101 ... ..... ..... \
ST1_zprz 1110010 .. 00 ..... 101 ... ..... ..... \
@rprr_scatter_store xs=2 esz=3 scale=0
+# SVE 64-bit scatter store (vector plus immediate)
+ST1_zpiz 1110010 .. 10 ..... 101 ... ..... ..... \
+ @rpri_scatter_store esz=3
+
+# SVE 32-bit scatter store (vector plus immediate)
+ST1_zpiz 1110010 .. 11 ..... 101 ... ..... ..... \
+ @rpri_scatter_store esz=2
+
# SVE 64-bit scatter store (scalar plus unpacked 32-bit scaled offset)
# Require msz > 0
ST1_zprz 1110010 .. 01 ..... 100 ... ..... ..... \
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 66d8c94c58..7591358539 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -4391,32 +4391,34 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
return true;
}
+/* Indexed by [xs][msz]. */
+static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][3] = {
+ { gen_helper_sve_stbs_zsu,
+ gen_helper_sve_sths_zsu,
+ gen_helper_sve_stss_zsu, },
+ { gen_helper_sve_stbs_zss,
+ gen_helper_sve_sths_zss,
+ gen_helper_sve_stss_zss, },
+};
+
+/* Note that we overload xs=2 to indicate 64-bit offset. */
+static gen_helper_gvec_mem_scatter * const scatter_store_fn64[3][4] = {
+ { gen_helper_sve_stbd_zsu,
+ gen_helper_sve_sthd_zsu,
+ gen_helper_sve_stsd_zsu,
+ gen_helper_sve_stdd_zsu, },
+ { gen_helper_sve_stbd_zss,
+ gen_helper_sve_sthd_zss,
+ gen_helper_sve_stsd_zss,
+ gen_helper_sve_stdd_zss, },
+ { gen_helper_sve_stbd_zd,
+ gen_helper_sve_sthd_zd,
+ gen_helper_sve_stsd_zd,
+ gen_helper_sve_stdd_zd, },
+};
+
static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
{
- /* Indexed by [xs][msz]. */
- static gen_helper_gvec_mem_scatter * const fn32[2][3] = {
- { gen_helper_sve_stbs_zsu,
- gen_helper_sve_sths_zsu,
- gen_helper_sve_stss_zsu, },
- { gen_helper_sve_stbs_zss,
- gen_helper_sve_sths_zss,
- gen_helper_sve_stss_zss, },
- };
- /* Note that we overload xs=2 to indicate 64-bit offset. */
- static gen_helper_gvec_mem_scatter * const fn64[3][4] = {
- { gen_helper_sve_stbd_zsu,
- gen_helper_sve_sthd_zsu,
- gen_helper_sve_stsd_zsu,
- gen_helper_sve_stdd_zsu, },
- { gen_helper_sve_stbd_zss,
- gen_helper_sve_sthd_zss,
- gen_helper_sve_stsd_zss,
- gen_helper_sve_stdd_zss, },
- { gen_helper_sve_stbd_zd,
- gen_helper_sve_sthd_zd,
- gen_helper_sve_stsd_zd,
- gen_helper_sve_stdd_zd, },
- };
gen_helper_gvec_mem_scatter *fn;
if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
@@ -4427,10 +4429,10 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
}
switch (a->esz) {
case MO_32:
- fn = fn32[a->xs][a->msz];
+ fn = scatter_store_fn32[a->xs][a->msz];
break;
case MO_64:
- fn = fn64[a->xs][a->msz];
+ fn = scatter_store_fn64[a->xs][a->msz];
break;
default:
g_assert_not_reached();
@@ -4440,6 +4442,37 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
return true;
}
+static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
+{
+ gen_helper_gvec_mem_scatter *fn = NULL;
+ TCGv_i64 imm;
+
+ if (a->esz < a->msz) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ switch (a->esz) {
+ case MO_32:
+ fn = scatter_store_fn32[0][a->msz];
+ break;
+ case MO_64:
+ fn = scatter_store_fn64[2][a->msz];
+ break;
+ }
+ assert(fn != NULL);
+
+ /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
+ * by loading the immediate into the scalar parameter.
+ */
+ imm = tcg_const_i64(a->imm << a->msz);
+ do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn);
+ tcg_temp_free_i64(imm);
+ return true;
+}
+
/*
* Prefetches
*/