aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorClaudio Fontana <claudio.fontana@huawei.com>2013-06-11 10:14:09 +0200
committerClaudio Fontana <claudio.fontana@huawei.com>2013-07-03 14:43:11 +0200
commitb1f6dc0d2a03f0e114cc5ff08b0a133e24fd55ad (patch)
treed14417cb5e08b0bcf5b2b268a8289ac7a0a2f733 /tcg
parent1acd5a373905ddb28957842256a038956941f332 (diff)
tcg/aarch64: implement ldst 12bit scaled uimm offset
implement the 12bit scaled unsigned immediate offset variant of LDR/STR. This improves code size by avoiding the movi + ldst_r for naturally aligned offsets in range. Signed-off-by: Claudio Fontana <claudio.fontana@huawei.com> Reviewed-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/aarch64/tcg-target.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 562a549dab..cfd40817f2 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -315,6 +315,17 @@ static inline void tcg_out_ldst_9(TCGContext *s,
tcg_out32(s, op_data << 24 | mod << 20 | off << 12 | rn << 5 | rd);
}
+/* tcg_out_ldst_12 expects a scaled unsigned immediate offset */
+static inline void tcg_out_ldst_12(TCGContext *s,
+ enum aarch64_ldst_op_data op_data,
+ enum aarch64_ldst_op_type op_type,
+ TCGReg rd, TCGReg rn,
+ tcg_target_ulong scaled_uimm)
+{
+ tcg_out32(s, (op_data | 1) << 24
+ | op_type << 20 | scaled_uimm << 10 | rn << 5 | rd);
+}
+
static inline void tcg_out_movr(TCGContext *s, int ext, TCGReg rd, TCGReg src)
{
/* register to register move using MOV (shifted register with no shift) */
@@ -374,10 +385,25 @@ static inline void tcg_out_ldst(TCGContext *s, enum aarch64_ldst_op_data data,
{
if (offset >= -256 && offset < 256) {
tcg_out_ldst_9(s, data, type, rd, rn, offset);
- } else {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
- tcg_out_ldst_r(s, data, type, rd, rn, TCG_REG_TMP);
+ return;
}
+
+ if (offset >= 256) {
+ /* if the offset is naturally aligned and in range,
+ then we can use the scaled uimm12 encoding */
+ unsigned int s_bits = data >> 6;
+ if (!(offset & ((1 << s_bits) - 1))) {
+ tcg_target_ulong scaled_uimm = offset >> s_bits;
+ if (scaled_uimm <= 0xfff) {
+ tcg_out_ldst_12(s, data, type, rd, rn, scaled_uimm);
+ return;
+ }
+ }
+ }
+
+ /* worst-case scenario, move offset to temp register, use reg offset */
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
+ tcg_out_ldst_r(s, data, type, rd, rn, TCG_REG_TMP);
}
/* mov alias implemented with add immediate, useful to move to/from SP */