diff options
author | Richard Henderson <rth@twiddle.net> | 2016-07-14 12:43:06 -0700 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-09-16 08:12:06 -0700 |
commit | 85aa80813dd9f5c1f581c743e45678a3bee220f8 (patch) | |
tree | 2a140ecb81d60cf1a593a160c0d09f88ae5a3c7d /tcg/tcg.h | |
parent | ebc231d7daf1f41b23d8b6a6d1234800b86e5fe2 (diff) |
tcg: Support arbitrary size + alignment
Previously we allowed fully unaligned operations, but not operations
that are aligned but with less alignment than the operation size.
In addition, arm32, ia64, mips, and sparc had been omitted from the
previous overalignment patch, which would have led to that alignment
being enforced.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/tcg.h')
-rw-r--r-- | tcg/tcg.h | 51 |
1 files changed, 19 insertions, 32 deletions
@@ -287,20 +287,19 @@ typedef enum TCGMemOp { * MO_ALIGN accesses will result in a call to the CPU's * do_unaligned_access hook if the guest address is not aligned. * The default depends on whether the target CPU defines ALIGNED_ONLY. + * * Some architectures (e.g. ARMv8) need the address which is aligned * to a size more than the size of the memory access. - * To support such check it's enough the current costless alignment - * check implementation in QEMU, but we need to support - * an alignment size specifying. - * MO_ALIGN supposes a natural alignment - * (i.e. the alignment size is the size of a memory access). - * Note that an alignment size must be equal or greater - * than an access size. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * * There are three options: - * - an alignment to the size of an access (MO_ALIGN); - * - an alignment to the specified size that is equal or greater than - * an access size (MO_ALIGN_x where 'x' is a size in bytes); * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); */ MO_ASHIFT = 4, MO_AMASK = 7 << MO_ASHIFT, @@ -353,38 +352,26 @@ typedef enum TCGMemOp { * @memop: TCGMemOp value * * Extract the alignment size from the memop. - * - * Returns: 0 in case of byte access (which is always aligned); - * positive value - number of alignment bits; - * negative value if unaligned access enabled - * and this is not a byte access. */ -static inline int get_alignment_bits(TCGMemOp memop) +static inline unsigned get_alignment_bits(TCGMemOp memop) { - int a = memop & MO_AMASK; - int s = memop & MO_SIZE; - int r; + unsigned a = memop & MO_AMASK; if (a == MO_UNALN) { - /* Negative value if unaligned access enabled, - * or zero value in case of byte access. - */ - return -s; + /* No alignment required. */ + a = 0; } else if (a == MO_ALIGN) { - /* A natural alignment: return a number of access size bits */ - r = s; + /* A natural alignment requirement. */ + a = memop & MO_SIZE; } else { - /* Specific alignment size. It must be equal or greater - * than the access size. - */ - r = a >> MO_ASHIFT; - tcg_debug_assert(r >= s); + /* A specific alignment requirement. */ + a = a >> MO_ASHIFT; } #if defined(CONFIG_SOFTMMU) /* The requested alignment cannot overlap the TLB flags. */ - tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0); + tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); #endif - return r; + return a; } typedef tcg_target_ulong TCGArg; |