diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2019-05-18 11:37:02 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2019-05-22 15:09:43 -0400 |
commit | 11e2bfef799024be4a08fcf6797fe0b22fb16b58 (patch) | |
tree | 0a5cb2aec8d91f6e6cf68ce2b4962e831ef57314 | |
parent | 9e27f58b9902834dffc0d66d9eb62f78d9c2a632 (diff) |
tcg/i386: Use MOVDQA for TCG_TYPE_V128 load/store
This instruction raises #GP, aka SIGSEGV, if the effective address
is not aligned to 16-bytes.
We have assertions in tcg-op-gvec.c that the offset from ENV is
aligned, for vector types <= V128. But the offset itself does not
validate that the final pointer is aligned -- one must also remember
to use the QEMU_ALIGNED() attribute on the vector member within ENV.
PowerPC Altivec has vector load/store instructions that silently
discard the low 4 bits of the address, making alignment mistakes
difficult to discover. Aid that by making the most popular host
visibly signal the error.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | tcg/i386/tcg-target.inc.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 6ec5e60448..c0443da4af 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1082,14 +1082,24 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, } /* FALLTHRU */ case TCG_TYPE_V64: + /* There is no instruction that can validate 8-byte alignment. */ tcg_debug_assert(ret >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2); break; case TCG_TYPE_V128: + /* + * The gvec infrastructure is asserts that v128 vector loads + * and stores use a 16-byte aligned offset. Validate that the + * final pointer is aligned by using an insn that will SIGSEGV. + */ tcg_debug_assert(ret >= 16); - tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx, ret, 0, arg1, arg2); + tcg_out_vex_modrm_offset(s, OPC_MOVDQA_VxWx, ret, 0, arg1, arg2); break; case TCG_TYPE_V256: + /* + * The gvec infrastructure only requires 16-byte alignment, + * so here we must use an unaligned load. + */ tcg_debug_assert(ret >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL, ret, 0, arg1, arg2); @@ -1117,14 +1127,24 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, } /* FALLTHRU */ case TCG_TYPE_V64: + /* There is no instruction that can validate 8-byte alignment. */ tcg_debug_assert(arg >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2); break; case TCG_TYPE_V128: + /* + * The gvec infrastructure is asserts that v128 vector loads + * and stores use a 16-byte aligned offset. Validate that the + * final pointer is aligned by using an insn that will SIGSEGV. + */ tcg_debug_assert(arg >= 16); - tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx, arg, 0, arg1, arg2); + tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2); break; case TCG_TYPE_V256: + /* + * The gvec infrastructure only requires 16-byte alignment, + * so here we must use an unaligned store. + */ tcg_debug_assert(arg >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL, arg, 0, arg1, arg2); |