aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-05-13 16:09:35 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-05-13 16:09:35 +0100
commit09319b30411357afcd2bf75a78acd705ea704131 (patch)
tree99078bf18c6acf83d77b74d53d62dc4bf10e4a4a
parentcd2b9b86803e46a09cf239afc44413884efa53f4 (diff)
disas/libvixl: Update to libvixl 1.4
Update our copy of libvixl to upstream's 1.4 release. Note that we no longer need any local fixes for compilation on 32 bit hosts -- they have all been integrated upstream. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 1399040419-9227-1-git-send-email-peter.maydell@linaro.org Acked-by: Richard Henderson <rth@twiddle.net>
-rw-r--r--disas/libvixl/a64/assembler-a64.h451
-rw-r--r--disas/libvixl/a64/constants-a64.h36
-rw-r--r--disas/libvixl/a64/decoder-a64.cc36
-rw-r--r--disas/libvixl/a64/disasm-a64.cc189
-rw-r--r--disas/libvixl/a64/disasm-a64.h1
-rw-r--r--disas/libvixl/a64/instructions-a64.cc32
-rw-r--r--disas/libvixl/a64/instructions-a64.h56
-rw-r--r--disas/libvixl/globals.h42
-rw-r--r--disas/libvixl/platform.h4
-rw-r--r--disas/libvixl/utils.cc37
-rw-r--r--disas/libvixl/utils.h86
11 files changed, 628 insertions, 342 deletions
diff --git a/disas/libvixl/a64/assembler-a64.h b/disas/libvixl/a64/assembler-a64.h
index 93b3011868..1e2947b283 100644
--- a/disas/libvixl/a64/assembler-a64.h
+++ b/disas/libvixl/a64/assembler-a64.h
@@ -38,6 +38,7 @@ namespace vixl {
typedef uint64_t RegList;
static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
// Registers.
// Some CPURegister methods can return Register and FPRegister types, so we
@@ -58,62 +59,62 @@ class CPURegister {
};
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
- ASSERT(!IsValid());
- ASSERT(IsNone());
+ VIXL_ASSERT(!IsValid());
+ VIXL_ASSERT(IsNone());
}
CPURegister(unsigned code, unsigned size, RegisterType type)
: code_(code), size_(size), type_(type) {
- ASSERT(IsValidOrNone());
+ VIXL_ASSERT(IsValidOrNone());
}
unsigned code() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return code_;
}
RegisterType type() const {
- ASSERT(IsValidOrNone());
+ VIXL_ASSERT(IsValidOrNone());
return type_;
}
RegList Bit() const {
- ASSERT(code_ < (sizeof(RegList) * 8));
+ VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
}
unsigned size() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return size_;
}
int SizeInBytes() const {
- ASSERT(IsValid());
- ASSERT(size() % 8 == 0);
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(size() % 8 == 0);
return size_ / 8;
}
int SizeInBits() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return size_;
}
bool Is32Bits() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return size_ == 32;
}
bool Is64Bits() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return size_ == 64;
}
bool IsValid() const {
if (IsValidRegister() || IsValidFPRegister()) {
- ASSERT(!IsNone());
+ VIXL_ASSERT(!IsNone());
return true;
} else {
- ASSERT(IsNone());
+ VIXL_ASSERT(IsNone());
return false;
}
}
@@ -132,25 +133,29 @@ class CPURegister {
bool IsNone() const {
// kNoRegister types should always have size 0 and code 0.
- ASSERT((type_ != kNoRegister) || (code_ == 0));
- ASSERT((type_ != kNoRegister) || (size_ == 0));
+ VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
+ VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
return type_ == kNoRegister;
}
+ bool Aliases(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (code_ == other.code_) && (type_ == other.type_);
+ }
+
bool Is(const CPURegister& other) const {
- ASSERT(IsValidOrNone() && other.IsValidOrNone());
- return (code_ == other.code_) && (size_ == other.size_) &&
- (type_ == other.type_);
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (size_ == other.size_);
}
inline bool IsZero() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
inline bool IsSP() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
@@ -188,13 +193,13 @@ class Register : public CPURegister {
explicit Register() : CPURegister() {}
inline explicit Register(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
- ASSERT(IsValidRegister());
+ VIXL_ASSERT(IsValidRegister());
}
explicit Register(unsigned code, unsigned size)
: CPURegister(code, size, kRegister) {}
bool IsValid() const {
- ASSERT(IsRegister() || IsNone());
+ VIXL_ASSERT(IsRegister() || IsNone());
return IsValidRegister();
}
@@ -216,13 +221,13 @@ class FPRegister : public CPURegister {
inline FPRegister() : CPURegister() {}
inline explicit FPRegister(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
- ASSERT(IsValidFPRegister());
+ VIXL_ASSERT(IsValidFPRegister());
}
inline FPRegister(unsigned code, unsigned size)
: CPURegister(code, size, kFPRegister) {}
bool IsValid() const {
- ASSERT(IsFPRegister() || IsNone());
+ VIXL_ASSERT(IsFPRegister() || IsNone());
return IsValidFPRegister();
}
@@ -306,30 +311,30 @@ class CPURegList {
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.size()), type_(reg1.type()) {
- ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
- ASSERT(IsValid());
+ VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ VIXL_ASSERT(IsValid());
}
inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
}
inline CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
- ASSERT(((type == CPURegister::kRegister) &&
- (last_reg < kNumberOfRegisters)) ||
- ((type == CPURegister::kFPRegister) &&
- (last_reg < kNumberOfFPRegisters)));
- ASSERT(last_reg >= first_reg);
- list_ = (1UL << (last_reg + 1)) - 1;
- list_ &= ~((1UL << first_reg) - 1);
- ASSERT(IsValid());
+ VIXL_ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ VIXL_ASSERT(last_reg >= first_reg);
+ list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
+ list_ &= ~((UINT64_C(1) << first_reg) - 1);
+ VIXL_ASSERT(IsValid());
}
inline CPURegister::RegisterType type() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return type_;
}
@@ -337,9 +342,9 @@ class CPURegList {
// this list are left unchanged. The type and size of the registers in the
// 'other' list must match those in this list.
void Combine(const CPURegList& other) {
- ASSERT(IsValid());
- ASSERT(other.type() == type_);
- ASSERT(other.RegisterSizeInBits() == size_);
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
@@ -347,44 +352,49 @@ class CPURegList {
// do not exist in this list are ignored. The type and size of the registers
// in the 'other' list must match those in this list.
void Remove(const CPURegList& other) {
- ASSERT(IsValid());
- ASSERT(other.type() == type_);
- ASSERT(other.RegisterSizeInBits() == size_);
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
list_ &= ~other.list();
}
// Variants of Combine and Remove which take a single register.
inline void Combine(const CPURegister& other) {
- ASSERT(other.type() == type_);
- ASSERT(other.size() == size_);
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
Combine(other.code());
}
inline void Remove(const CPURegister& other) {
- ASSERT(other.type() == type_);
- ASSERT(other.size() == size_);
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
Remove(other.code());
}
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
inline void Combine(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister(code, size_, type_).IsValid());
- list_ |= (1UL << code);
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ |= (UINT64_C(1) << code);
}
inline void Remove(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister(code, size_, type_).IsValid());
- list_ &= ~(1UL << code);
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ &= ~(UINT64_C(1) << code);
}
inline RegList list() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return list_;
}
+ inline void set_list(RegList new_list) {
+ VIXL_ASSERT(IsValid());
+ list_ = new_list;
+ }
+
// Remove all callee-saved registers from the list. This can be useful when
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
@@ -401,31 +411,41 @@ class CPURegList {
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
inline bool IsEmpty() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return list_ == 0;
}
inline bool IncludesAliasOf(const CPURegister& other) const {
- ASSERT(IsValid());
- return (type_ == other.type()) && (other.Bit() & list_);
+ VIXL_ASSERT(IsValid());
+ return (type_ == other.type()) && ((other.Bit() & list_) != 0);
+ }
+
+ inline bool IncludesAliasOf(int code) const {
+ VIXL_ASSERT(IsValid());
+ return ((code & list_) != 0);
}
inline int Count() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
inline unsigned RegisterSizeInBits() const {
- ASSERT(IsValid());
+ VIXL_ASSERT(IsValid());
return size_;
}
inline unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
- ASSERT((size_in_bits % 8) == 0);
+ VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
+ inline unsigned TotalSizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
private:
RegList list_;
unsigned size_;
@@ -471,33 +491,34 @@ class Operand {
bool IsImmediate() const;
bool IsShiftedRegister() const;
bool IsExtendedRegister() const;
+ bool IsZero() const;
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
// which helps in the encoding of instructions that use the stack pointer.
Operand ToExtendedRegister() const;
int64_t immediate() const {
- ASSERT(IsImmediate());
+ VIXL_ASSERT(IsImmediate());
return immediate_;
}
Register reg() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
Shift shift() const {
- ASSERT(IsShiftedRegister());
+ VIXL_ASSERT(IsShiftedRegister());
return shift_;
}
Extend extend() const {
- ASSERT(IsExtendedRegister());
+ VIXL_ASSERT(IsExtendedRegister());
return extend_;
}
unsigned shift_amount() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
@@ -556,7 +577,7 @@ class Label {
Label() : is_bound_(false), link_(NULL), target_(NULL) {}
~Label() {
// If the label has been linked to, it needs to be bound to a target.
- ASSERT(!IsLinked() || IsBound());
+ VIXL_ASSERT(!IsLinked() || IsBound());
}
inline Instruction* link() const { return link_; }
@@ -643,7 +664,7 @@ class Assembler {
void bind(Label* label);
int UpdateAndGetByteOffsetTo(Label* label);
inline int UpdateAndGetInstructionOffsetTo(Label* label) {
- ASSERT(Label::kEndOfChain == 0);
+ VIXL_ASSERT(Label::kEndOfChain == 0);
return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2;
}
@@ -716,8 +737,12 @@ class Assembler {
// Add.
void add(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Compare negative.
void cmn(const Register& rn, const Operand& operand);
@@ -725,40 +750,62 @@ class Assembler {
// Subtract.
void sub(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Compare.
void cmp(const Register& rn, const Operand& operand);
// Negate.
void neg(const Register& rd,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
// Add with carry bit.
void adc(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Subtract with carry bit.
void sbc(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Negate with carry bit.
void ngc(const Register& rd,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
// Logical instructions.
// Bitwise and (A & B).
void and_(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Bit test and set flags.
void tst(const Register& rn, const Operand& operand);
@@ -766,8 +813,12 @@ class Assembler {
// Bit clear (A & ~B).
void bic(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S = LeaveFlags);
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
// Bitwise or (A | B).
void orr(const Register& rd, const Register& rn, const Operand& operand);
@@ -818,8 +869,8 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
@@ -828,15 +879,15 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, lsb, lsb + width - 1);
}
// Sbfm aliases.
// Arithmetic shift right.
inline void asr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.size());
+ VIXL_ASSERT(shift < rd.size());
sbfm(rd, rn, shift, rd.size() - 1);
}
@@ -845,8 +896,8 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
@@ -855,8 +906,8 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, lsb, lsb + width - 1);
}
@@ -879,13 +930,13 @@ class Assembler {
// Logical shift left.
inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.size();
- ASSERT(shift < reg_size);
+ VIXL_ASSERT(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.size());
+ VIXL_ASSERT(shift < rd.size());
ubfm(rd, rn, shift, rd.size() - 1);
}
@@ -894,8 +945,8 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
@@ -904,8 +955,8 @@ class Assembler {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.size());
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, lsb, lsb + width - 1);
}
@@ -1109,9 +1160,12 @@ class Assembler {
// Load literal to register.
void ldr(const Register& rt, uint64_t imm);
- // Load literal to FP register.
+ // Load double precision floating point literal to FP register.
void ldr(const FPRegister& ft, double imm);
+ // Load single precision floating point literal to FP register.
+ void ldr(const FPRegister& ft, float imm);
+
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -1160,6 +1214,15 @@ class Assembler {
// System hint.
void hint(SystemHint code);
+ // Data memory barrier.
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier.
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier.
+ void isb();
+
// Alias for system instructions.
// No-op.
void nop() {
@@ -1167,17 +1230,20 @@ class Assembler {
}
// FP instructions.
- // Move immediate to FP register.
- void fmov(FPRegister fd, double imm);
+ // Move double precision immediate to FP register.
+ void fmov(const FPRegister& fd, double imm);
+
+ // Move single precision immediate to FP register.
+ void fmov(const FPRegister& fd, float imm);
// Move FP register to register.
- void fmov(Register rd, FPRegister fn);
+ void fmov(const Register& rd, const FPRegister& fn);
// Move register to FP register.
- void fmov(FPRegister fd, Register rn);
+ void fmov(const FPRegister& fd, const Register& rn);
// Move FP register to FP register.
- void fmov(FPRegister fd, FPRegister fn);
+ void fmov(const FPRegister& fd, const FPRegister& fn);
// FP add.
void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
@@ -1188,12 +1254,30 @@ class Assembler {
// FP multiply.
void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
- // FP multiply and subtract.
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
void fmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa);
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
// FP divide.
void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
@@ -1203,6 +1287,12 @@ class Assembler {
// FP minimum.
void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ // FP maximum number.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum number.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
// FP absolute.
void fabs(const FPRegister& fd, const FPRegister& fn);
@@ -1212,6 +1302,12 @@ class Assembler {
// FP square root.
void fsqrt(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (toward minus infinity).
+ void frintm(const FPRegister& fd, const FPRegister& fn);
+
// FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn);
@@ -1244,24 +1340,30 @@ class Assembler {
// FP convert between single and double precision.
void fcvt(const FPRegister& fd, const FPRegister& fn);
- // Convert FP to unsigned integer (round towards -infinity).
- void fcvtmu(const Register& rd, const FPRegister& fn);
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
// Convert FP to signed integer (round towards -infinity).
void fcvtms(const Register& rd, const FPRegister& fn);
- // Convert FP to unsigned integer (nearest with ties to even).
- void fcvtnu(const Register& rd, const FPRegister& fn);
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
// Convert FP to signed integer (nearest with ties to even).
void fcvtns(const Register& rd, const FPRegister& fn);
- // Convert FP to unsigned integer (round towards zero).
- void fcvtzu(const Register& rd, const FPRegister& fn);
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
// Convert FP to signed integer (round towards zero).
void fcvtzs(const Register& rd, const FPRegister& fn);
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
// Convert signed integer or fixed point to FP.
void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
@@ -1282,14 +1384,14 @@ class Assembler {
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
void EmitStringData(const char * string) {
- ASSERT(string != NULL);
+ VIXL_ASSERT(string != NULL);
size_t len = strlen(string) + 1;
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
- ASSERT(sizeof(pad) == kInstructionSize);
+ VIXL_STATIC_ASSERT(sizeof(pad) == kInstructionSize);
Instruction* next_pc = AlignUp(pc_, kInstructionSize);
EmitData(&pad, next_pc - pc_);
}
@@ -1298,44 +1400,44 @@ class Assembler {
// Register encoding.
static Instr Rd(CPURegister rd) {
- ASSERT(rd.code() != kSPRegInternalCode);
+ VIXL_ASSERT(rd.code() != kSPRegInternalCode);
return rd.code() << Rd_offset;
}
static Instr Rn(CPURegister rn) {
- ASSERT(rn.code() != kSPRegInternalCode);
+ VIXL_ASSERT(rn.code() != kSPRegInternalCode);
return rn.code() << Rn_offset;
}
static Instr Rm(CPURegister rm) {
- ASSERT(rm.code() != kSPRegInternalCode);
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
return rm.code() << Rm_offset;
}
static Instr Ra(CPURegister ra) {
- ASSERT(ra.code() != kSPRegInternalCode);
+ VIXL_ASSERT(ra.code() != kSPRegInternalCode);
return ra.code() << Ra_offset;
}
static Instr Rt(CPURegister rt) {
- ASSERT(rt.code() != kSPRegInternalCode);
+ VIXL_ASSERT(rt.code() != kSPRegInternalCode);
return rt.code() << Rt_offset;
}
static Instr Rt2(CPURegister rt2) {
- ASSERT(rt2.code() != kSPRegInternalCode);
+ VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
return rt2.code() << Rt2_offset;
}
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
- ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rd.IsZero());
return (rd.code() & kRegCodeMask) << Rd_offset;
}
static Instr RnSP(Register rn) {
- ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
return (rn.code() & kRegCodeMask) << Rn_offset;
}
@@ -1346,7 +1448,7 @@ class Assembler {
} else if (S == LeaveFlags) {
return 0 << FlagsUpdate_offset;
}
- UNREACHABLE();
+ VIXL_UNREACHABLE();
return 0;
}
@@ -1356,7 +1458,7 @@ class Assembler {
// PC-relative address encoding.
static Instr ImmPCRelAddress(int imm21) {
- ASSERT(is_int21(imm21));
+ VIXL_ASSERT(is_int21(imm21));
Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
Instr immlo = imm << ImmPCRelLo_offset;
@@ -1365,27 +1467,27 @@ class Assembler {
// Branch encoding.
static Instr ImmUncondBranch(int imm26) {
- ASSERT(is_int26(imm26));
+ VIXL_ASSERT(is_int26(imm26));
return truncate_to_int26(imm26) << ImmUncondBranch_offset;
}
static Instr ImmCondBranch(int imm19) {
- ASSERT(is_int19(imm19));
+ VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCondBranch_offset;
}
static Instr ImmCmpBranch(int imm19) {
- ASSERT(is_int19(imm19));
+ VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCmpBranch_offset;
}
static Instr ImmTestBranch(int imm14) {
- ASSERT(is_int14(imm14));
+ VIXL_ASSERT(is_int14(imm14));
return truncate_to_int14(imm14) << ImmTestBranch_offset;
}
static Instr ImmTestBranchBit(unsigned bit_pos) {
- ASSERT(is_uint6(bit_pos));
+ VIXL_ASSERT(is_uint6(bit_pos));
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
@@ -1400,7 +1502,7 @@ class Assembler {
}
static Instr ImmAddSub(int64_t imm) {
- ASSERT(IsImmAddSub(imm));
+ VIXL_ASSERT(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset;
} else {
@@ -1409,55 +1511,55 @@ class Assembler {
}
static inline Instr ImmS(unsigned imms, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
((reg_size == kWRegSize) && is_uint5(imms)));
USE(reg_size);
return imms << ImmS_offset;
}
static inline Instr ImmR(unsigned immr, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
USE(reg_size);
- ASSERT(is_uint6(immr));
+ VIXL_ASSERT(is_uint6(immr));
return immr << ImmR_offset;
}
static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT(is_uint6(imms));
- ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(is_uint6(imms));
+ VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
USE(reg_size);
return imms << ImmSetBits_offset;
}
static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
USE(reg_size);
return immr << ImmRotate_offset;
}
static inline Instr ImmLLiteral(int imm19) {
- ASSERT(is_int19(imm19));
+ VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset;
}
static inline Instr BitN(unsigned bitn, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
USE(reg_size);
return bitn << BitN_offset;
}
static Instr ShiftDP(Shift shift) {
- ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
return shift << ShiftDP_offset;
}
static Instr ImmDPShift(unsigned amount) {
- ASSERT(is_uint6(amount));
+ VIXL_ASSERT(is_uint6(amount));
return amount << ImmDPShift_offset;
}
@@ -1466,12 +1568,12 @@ class Assembler {
}
static Instr ImmExtendShift(unsigned left_shift) {
- ASSERT(left_shift <= 4);
+ VIXL_ASSERT(left_shift <= 4);
return left_shift << ImmExtendShift_offset;
}
static Instr ImmCondCmp(unsigned imm) {
- ASSERT(is_uint5(imm));
+ VIXL_ASSERT(is_uint5(imm));
return imm << ImmCondCmp_offset;
}
@@ -1481,55 +1583,65 @@ class Assembler {
// MemOperand offset encoding.
static Instr ImmLSUnsigned(int imm12) {
- ASSERT(is_uint12(imm12));
+ VIXL_ASSERT(is_uint12(imm12));
return imm12 << ImmLSUnsigned_offset;
}
static Instr ImmLS(int imm9) {
- ASSERT(is_int9(imm9));
+ VIXL_ASSERT(is_int9(imm9));
return truncate_to_int9(imm9) << ImmLS_offset;
}
static Instr ImmLSPair(int imm7, LSDataSize size) {
- ASSERT(((imm7 >> size) << size) == imm7);
+ VIXL_ASSERT(((imm7 >> size) << size) == imm7);
int scaled_imm7 = imm7 >> size;
- ASSERT(is_int7(scaled_imm7));
+ VIXL_ASSERT(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
}
static Instr ImmShiftLS(unsigned shift_amount) {
- ASSERT(is_uint1(shift_amount));
+ VIXL_ASSERT(is_uint1(shift_amount));
return shift_amount << ImmShiftLS_offset;
}
static Instr ImmException(int imm16) {
- ASSERT(is_uint16(imm16));
+ VIXL_ASSERT(is_uint16(imm16));
return imm16 << ImmException_offset;
}
static Instr ImmSystemRegister(int imm15) {
- ASSERT(is_uint15(imm15));
+ VIXL_ASSERT(is_uint15(imm15));
return imm15 << ImmSystemRegister_offset;
}
static Instr ImmHint(int imm7) {
- ASSERT(is_uint7(imm7));
+ VIXL_ASSERT(is_uint7(imm7));
return imm7 << ImmHint_offset;
}
+ static Instr ImmBarrierDomain(int imm2) {
+ VIXL_ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+ }
+
+ static Instr ImmBarrierType(int imm2) {
+ VIXL_ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+ }
+
static LSDataSize CalcLSDataSize(LoadStoreOp op) {
- ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ VIXL_ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
return static_cast<LSDataSize>(op >> SizeLS_offset);
}
// Move immediates encoding.
static Instr ImmMoveWide(uint64_t imm) {
- ASSERT(is_uint16(imm));
+ VIXL_ASSERT(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
static Instr ShiftMoveWide(int64_t shift) {
- ASSERT(is_uint2(shift));
+ VIXL_ASSERT(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}
@@ -1543,20 +1655,20 @@ class Assembler {
}
static Instr FPScale(unsigned scale) {
- ASSERT(is_uint6(scale));
+ VIXL_ASSERT(is_uint6(scale));
return scale << FPScale_offset;
}
// Size of the code generated in bytes
uint64_t SizeOfCodeGenerated() const {
- ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - buffer_;
}
// Size of the code generated since label to the current position.
uint64_t SizeOfCodeGeneratedSince(Label* label) const {
- ASSERT(label->IsBound());
- ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
+ VIXL_ASSERT(label->IsBound());
+ VIXL_ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - label->target();
}
@@ -1568,7 +1680,7 @@ class Assembler {
inline void ReleaseLiteralPool() {
if (--literal_pool_monitor_ == 0) {
// Has the literal pool been blocked for too long?
- ASSERT(literals_.empty() ||
+ VIXL_ASSERT(literals_.empty() ||
(pc_ < (literals_.back()->pc_ + kMaxLoadLiteralRange)));
}
}
@@ -1622,6 +1734,9 @@ class Assembler {
FlagsUpdate S,
AddSubWithCarryOp op);
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
// Functions for emulating operands not directly supported by the instruction
// set.
void EmitShift(const Register& rd,
@@ -1706,17 +1821,13 @@ class Assembler {
const FPRegister& fa,
FPDataProcessing3SourceOp op);
- // Encoding helpers.
- static bool IsImmFP32(float imm);
- static bool IsImmFP64(double imm);
-
void RecordLiteral(int64_t imm, unsigned size);
// Emit the instruction at pc_.
void Emit(Instr instruction) {
- ASSERT(sizeof(*pc_) == 1);
- ASSERT(sizeof(instruction) == kInstructionSize);
- ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+ VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
+ VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ VIXL_ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
#ifdef DEBUG
finalized_ = false;
@@ -1729,8 +1840,8 @@ class Assembler {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
- ASSERT(sizeof(*pc_) == 1);
- ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+ VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
+ VIXL_ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
#ifdef DEBUG
finalized_ = false;
@@ -1744,7 +1855,7 @@ class Assembler {
}
inline void CheckBufferSpace() {
- ASSERT(pc_ < (buffer_ + buffer_size_));
+ VIXL_ASSERT(pc_ < (buffer_ + buffer_size_));
if (pc_ > next_literal_pool_check_) {
CheckLiteralPool();
}
diff --git a/disas/libvixl/a64/constants-a64.h b/disas/libvixl/a64/constants-a64.h
index 2e0336dd0f..99677c1be3 100644
--- a/disas/libvixl/a64/constants-a64.h
+++ b/disas/libvixl/a64/constants-a64.h
@@ -116,6 +116,8 @@ V_(ImmCmpBranch, 23, 5, SignedBits) \
V_(ImmLLiteral, 23, 5, SignedBits) \
V_(ImmException, 20, 5, Bits) \
V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
\
/* System (MRS, MSR) */ \
V_(ImmSystemRegister, 19, 5, Bits) \
@@ -181,7 +183,7 @@ enum Condition {
inline Condition InvertCondition(Condition cond) {
// Conditions al and nv behave identically, as "always true". They can't be
// inverted, because there is no "always false" condition.
- ASSERT((cond != al) && (cond != nv));
+ VIXL_ASSERT((cond != al) && (cond != nv));
return static_cast<Condition>(cond ^ 1);
}
@@ -246,6 +248,20 @@ enum SystemHint {
SEVL = 5
};
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
// System/special register names.
// This information is not encoded as one field but as the concatenation of
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
@@ -274,7 +290,7 @@ enum SystemRegister {
//
// The enumerations can be used like this:
//
-// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
// switch(instr->Mask(PCRelAddressingMask)) {
// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
@@ -560,6 +576,15 @@ enum ExceptionOp {
DCPS3 = ExceptionFixed | 0x00A00003
};
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
// Any load or store.
enum LoadStoreAnyOp {
LoadStoreAnyFMask = 0x0a000000,
@@ -927,17 +952,22 @@ enum FPDataProcessing1SourceOp {
FRINTN = FRINTN_s,
FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
FRINTZ = FRINTZ_s,
FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
- FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
};
// Floating point data processing 2 source.
diff --git a/disas/libvixl/a64/decoder-a64.cc b/disas/libvixl/a64/decoder-a64.cc
index 9e9033c49c..8450eb3b49 100644
--- a/disas/libvixl/a64/decoder-a64.cc
+++ b/disas/libvixl/a64/decoder-a64.cc
@@ -132,7 +132,7 @@ void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
}
// We reached the end of the list. The last element must be
// registered_visitor.
- ASSERT(*it == registered_visitor);
+ VIXL_ASSERT(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
@@ -150,7 +150,7 @@ void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
}
// We reached the end of the list. The last element must be
// registered_visitor.
- ASSERT(*it == registered_visitor);
+ VIXL_ASSERT(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
@@ -161,16 +161,16 @@ void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
void Decoder::DecodePCRelAddressing(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x0);
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
- ASSERT(instr->Bit(28) == 0x1);
+ VIXL_ASSERT(instr->Bit(28) == 0x1);
VisitPCRelAddressing(instr);
}
void Decoder::DecodeBranchSystemException(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x4) ||
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
@@ -271,7 +271,7 @@ void Decoder::DecodeBranchSystemException(Instruction* instr) {
void Decoder::DecodeLoadStore(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x8) ||
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
@@ -390,7 +390,7 @@ void Decoder::DecodeLoadStore(Instruction* instr) {
void Decoder::DecodeLogical(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x2);
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
VisitUnallocated(instr);
@@ -409,7 +409,7 @@ void Decoder::DecodeLogical(Instruction* instr) {
void Decoder::DecodeBitfieldExtract(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x3);
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
@@ -434,7 +434,7 @@ void Decoder::DecodeBitfieldExtract(Instruction* instr) {
void Decoder::DecodeAddSubImmediate(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x1);
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
VisitUnallocated(instr);
} else {
@@ -444,8 +444,8 @@ void Decoder::DecodeAddSubImmediate(Instruction* instr) {
void Decoder::DecodeDataProcessing(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xA) ||
- (instr->Bits(27, 24) == 0xB) );
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB));
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
@@ -559,8 +559,8 @@ void Decoder::DecodeDataProcessing(Instruction* instr) {
void Decoder::DecodeFP(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xE) ||
- (instr->Bits(27, 24) == 0xF) );
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF));
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
@@ -665,14 +665,14 @@ void Decoder::DecodeFP(Instruction* instr) {
VisitFPConditionalSelect(instr);
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
}
}
}
} else {
// Bit 30 == 1 has been handled earlier.
- ASSERT(instr->Bit(30) == 0);
+ VIXL_ASSERT(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
VisitUnallocated(instr);
} else {
@@ -687,21 +687,21 @@ void Decoder::DecodeFP(Instruction* instr) {
void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO: Implement Advanced SIMD load/store instruction decode.
- ASSERT(instr->Bits(29, 25) == 0x6);
+ VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
VisitUnimplemented(instr);
}
void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO: Implement Advanced SIMD data processing instruction decode.
- ASSERT(instr->Bits(27, 25) == 0x7);
+ VIXL_ASSERT(instr->Bits(27, 25) == 0x7);
VisitUnimplemented(instr);
}
#define DEFINE_VISITOR_CALLERS(A) \
void Decoder::Visit##A(Instruction *instr) { \
- ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
(*it)->Visit##A(instr); \
diff --git a/disas/libvixl/a64/disasm-a64.cc b/disas/libvixl/a64/disasm-a64.cc
index 5f172da7d3..aa133a99bf 100644
--- a/disas/libvixl/a64/disasm-a64.cc
+++ b/disas/libvixl/a64/disasm-a64.cc
@@ -95,7 +95,7 @@ void Disassembler::VisitAddSubImmediate(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -142,7 +142,7 @@ void Disassembler::VisitAddSubShifted(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -180,7 +180,7 @@ void Disassembler::VisitAddSubExtended(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -215,7 +215,7 @@ void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -258,30 +258,30 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
- ASSERT((reg_size == kXRegSize) ||
- ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+ VIXL_ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
// Test for movz: 16 bits set at positions 0, 16, 32 or 48.
- if (((value & 0xffffffffffff0000ULL) == 0ULL) ||
- ((value & 0xffffffff0000ffffULL) == 0ULL) ||
- ((value & 0xffff0000ffffffffULL) == 0ULL) ||
- ((value & 0x0000ffffffffffffULL) == 0ULL)) {
+ if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
return true;
}
// Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
if ((reg_size == kXRegSize) &&
- (((value & 0xffffffffffff0000ULL) == 0xffffffffffff0000ULL) ||
- ((value & 0xffffffff0000ffffULL) == 0xffffffff0000ffffULL) ||
- ((value & 0xffff0000ffffffffULL) == 0xffff0000ffffffffULL) ||
- ((value & 0x0000ffffffffffffULL) == 0x0000ffffffffffffULL))) {
+ (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
return true;
}
if ((reg_size == kWRegSize) &&
@@ -337,7 +337,7 @@ void Disassembler::VisitLogicalShifted(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
@@ -353,7 +353,7 @@ void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
case CCMN_x: mnemonic = "ccmn"; break;
case CCMP_w:
case CCMP_x: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -368,7 +368,7 @@ void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
case CCMN_x_imm: mnemonic = "ccmn"; break;
case CCMP_w_imm:
case CCMP_x_imm: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -421,7 +421,7 @@ void Disassembler::VisitConditionalSelect(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -520,7 +520,7 @@ void Disassembler::VisitExtract(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -538,7 +538,7 @@ void Disassembler::VisitPCRelAddressing(Instruction* instr) {
void Disassembler::VisitConditionalBranch(Instruction* instr) {
switch (instr->Mask(ConditionalBranchMask)) {
case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
}
@@ -570,7 +570,7 @@ void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case B: mnemonic = "b"; break;
case BL: mnemonic = "bl"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -591,7 +591,7 @@ void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
FORMAT(CLS, "cls");
#undef FORMAT
case REV32_x: mnemonic = "rev32"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -690,7 +690,7 @@ void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
form = form_xxx;
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -705,7 +705,7 @@ void Disassembler::VisitCompareBranch(Instruction* instr) {
case CBZ_x: mnemonic = "cbz"; break;
case CBNZ_w:
case CBNZ_x: mnemonic = "cbnz"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -722,7 +722,7 @@ void Disassembler::VisitTestBranch(Instruction* instr) {
switch (instr->Mask(TestBranchMask)) {
case TBZ: mnemonic = "tbz"; break;
case TBNZ: mnemonic = "tbnz"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -742,7 +742,7 @@ void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
case MOVZ_x: mnemonic = "movz"; break;
case MOVK_w:
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -981,7 +981,7 @@ void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
switch (instr->Mask(FPConditionalSelectMask)) {
case FCSEL_s:
case FCSEL_d: mnemonic = "fcsel"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -1033,7 +1033,7 @@ void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
FORMAT(FMINNM, "fminnm");
FORMAT(FNMUL, "fnmul");
#undef FORMAT
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -1052,7 +1052,7 @@ void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
FORMAT(FNMADD, "fnmadd");
FORMAT(FNMSUB, "fnmsub");
#undef FORMAT
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -1065,7 +1065,7 @@ void Disassembler::VisitFPImmediate(Instruction* instr) {
switch (instr->Mask(FPImmediateMask)) {
case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -1082,6 +1082,14 @@ void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
case FMOV_sw:
case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
case FCVTMS_ws:
case FCVTMS_xs:
case FCVTMS_wd:
@@ -1141,7 +1149,7 @@ void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
case UCVTF_sx_fixed:
case UCVTF_dw_fixed:
case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -1176,7 +1184,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
}
}
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
- ASSERT(instr->Mask(SystemHintMask) == HINT);
+ VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
case NOP: {
mnemonic = "nop";
@@ -1184,6 +1192,24 @@ void Disassembler::VisitSystem(Instruction* instr) {
break;
}
}
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
}
Format(instr, mnemonic, form);
@@ -1226,7 +1252,7 @@ void Disassembler::ProcessOutput(Instruction* /*instr*/) {
void Disassembler::Format(Instruction* instr, const char* mnemonic,
const char* format) {
- ASSERT(mnemonic != NULL);
+ VIXL_ASSERT(mnemonic != NULL);
ResetOutput();
Substitute(instr, mnemonic);
if (format != NULL) {
@@ -1268,8 +1294,9 @@ int Disassembler::SubstituteField(Instruction* instr, const char* format) {
case 'A': return SubstitutePCRelAddressField(instr, format);
case 'B': return SubstituteBranchTargetField(instr, format);
case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
default: {
- UNREACHABLE();
+ VIXL_UNREACHABLE();
return 1;
}
}
@@ -1294,7 +1321,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
}
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
// Increase field length for registers tagged as stack.
@@ -1331,7 +1358,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
int Disassembler::SubstituteImmediateField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'I');
+ VIXL_ASSERT(format[0] == 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
@@ -1339,10 +1366,10 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
- ASSERT(format[5] == 'L');
+ VIXL_ASSERT(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
- AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
}
return 8;
@@ -1384,14 +1411,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
return 6;
}
case 'A': { // IAddSub.
- ASSERT(instr->ShiftAddSub() <= 1);
+ VIXL_ASSERT(instr->ShiftAddSub() <= 1);
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
}
case 'F': { // IFPSingle, IFPDouble or IFPFBits.
if (format[3] == 'F') { // IFPFbits.
- AppendToOutput("#%" PRId64, 64 - instr->FPScale());
+ AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
} else {
AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
@@ -1412,27 +1439,27 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
return 5;
}
case 'P': { // IP - Conditional compare.
- AppendToOutput("#%" PRId64, instr->ImmCondCmp());
+ AppendToOutput("#%d", instr->ImmCondCmp());
return 2;
}
case 'B': { // Bitfields.
return SubstituteBitfieldImmediateField(instr, format);
}
case 'E': { // IExtract.
- AppendToOutput("#%" PRId64, instr->ImmS());
+ AppendToOutput("#%d", instr->ImmS());
return 8;
}
case 'S': { // IS - Test and branch bit.
- AppendToOutput("#%" PRId64, (instr->ImmTestBranchBit5() << 5) |
- instr->ImmTestBranchBit40());
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
return 2;
}
case 'D': { // IDebug - HLT and BRK instructions.
- AppendToOutput("#0x%" PRIx64, instr->ImmException());
+ AppendToOutput("#0x%x", instr->ImmException());
return 6;
}
default: {
- UNIMPLEMENTED();
+ VIXL_UNIMPLEMENTED();
return 0;
}
}
@@ -1441,7 +1468,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
const char* format) {
- ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
unsigned r = instr->ImmR();
unsigned s = instr->ImmS();
@@ -1455,19 +1482,19 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
AppendToOutput("#%d", s + 1);
return 5;
} else {
- ASSERT(format[3] == '-');
+ VIXL_ASSERT(format[3] == '-');
AppendToOutput("#%d", s - r + 1);
return 7;
}
}
case 'Z': { // IBZ-r.
- ASSERT((format[3] == '-') && (format[4] == 'r'));
+ VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
AppendToOutput("#%d", reg_size - r);
return 5;
}
default: {
- UNREACHABLE();
+ VIXL_UNREACHABLE();
return 0;
}
}
@@ -1476,7 +1503,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
int Disassembler::SubstituteLiteralField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "LValue", 6) == 0);
+ VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
USE(format);
switch (instr->Mask(LoadLiteralMask)) {
@@ -1484,7 +1511,7 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
case LDR_x_lit:
case LDR_s_lit:
case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
return 6;
@@ -1492,12 +1519,12 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'H');
- ASSERT(instr->ShiftDP() <= 0x3);
+ VIXL_ASSERT(format[0] == 'H');
+ VIXL_ASSERT(instr->ShiftDP() <= 0x3);
switch (format[1]) {
case 'D': { // HDP.
- ASSERT(instr->ShiftDP() != ROR);
+ VIXL_ASSERT(instr->ShiftDP() != ROR);
} // Fall through.
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
@@ -1508,7 +1535,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
return 3;
}
default:
- UNIMPLEMENTED();
+ VIXL_UNIMPLEMENTED();
return 0;
}
}
@@ -1516,7 +1543,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
int Disassembler::SubstituteConditionField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'C');
+ VIXL_ASSERT(format[0] == 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
@@ -1538,27 +1565,27 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
- ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+ VIXL_ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
int offset = instr->ImmPCRel();
// Only ADR (AddrPCRelByte) is supported.
- ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+ VIXL_ASSERT(strcmp(format, "AddrPCRelByte") == 0);
char sign = '+';
if (offset < 0) {
offset = -offset;
sign = '-';
}
- // TODO: Extend this to support printing the target address.
- AppendToOutput("#%c0x%x", sign, offset);
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ AppendToOutput("#%c0x%x (addr %p)", sign, offset, instr + offset);
return 13;
}
int Disassembler::SubstituteBranchTargetField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "BImm", 4) == 0);
+ VIXL_ASSERT(strncmp(format, "BImm", 4) == 0);
int64_t offset = 0;
switch (format[5]) {
@@ -1570,7 +1597,7 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
case 'm': offset = instr->ImmCmpBranch(); break;
// BImmTest - test and branch immediate.
case 'e': offset = instr->ImmTestBranch(); break;
- default: UNIMPLEMENTED();
+ default: VIXL_UNIMPLEMENTED();
}
offset <<= kInstructionSizeLog2;
char sign = '+';
@@ -1578,15 +1605,16 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
offset = -offset;
sign = '-';
}
- AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, instr + offset);
return 8;
}
int Disassembler::SubstituteExtendField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "Ext", 3) == 0);
- ASSERT(instr->ExtendMode() <= 7);
+ VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
+ VIXL_ASSERT(instr->ExtendMode() <= 7);
USE(format);
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@@ -1598,12 +1626,12 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
(((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
(instr->ExtendMode() == UXTX))) {
if (instr->ImmExtendShift() > 0) {
- AppendToOutput(", lsl #%" PRId64, instr->ImmExtendShift());
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
}
} else {
AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
if (instr->ImmExtendShift() > 0) {
- AppendToOutput(" #%" PRId64, instr->ImmExtendShift());
+ AppendToOutput(" #%d", instr->ImmExtendShift());
}
}
return 3;
@@ -1612,7 +1640,7 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
USE(format);
@@ -1632,7 +1660,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
if (!((ext == UXTX) && (shift == 0))) {
AppendToOutput(", %s", extend_mode[ext]);
if (shift != 0) {
- AppendToOutput(" #%" PRId64, instr->SizeLS());
+ AppendToOutput(" #%d", instr->SizeLS());
}
}
return 9;
@@ -1641,7 +1669,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
int Disassembler::SubstitutePrefetchField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'P');
+ VIXL_ASSERT(format[0] == 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
@@ -1654,6 +1682,23 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
return 6;
}
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
void Disassembler::ResetOutput() {
buffer_pos_ = 0;
diff --git a/disas/libvixl/a64/disasm-a64.h b/disas/libvixl/a64/disasm-a64.h
index 857a5acac4..3a56e15515 100644
--- a/disas/libvixl/a64/disasm-a64.h
+++ b/disas/libvixl/a64/disasm-a64.h
@@ -64,6 +64,7 @@ class Disassembler: public DecoderVisitor {
int SubstituteBranchTargetField(Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
inline bool RdIsZROrSP(Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
diff --git a/disas/libvixl/a64/instructions-a64.cc b/disas/libvixl/a64/instructions-a64.cc
index e87fa3acce..c4eb7c4518 100644
--- a/disas/libvixl/a64/instructions-a64.cc
+++ b/disas/libvixl/a64/instructions-a64.cc
@@ -33,20 +33,20 @@ namespace vixl {
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
- ASSERT(width <= 64);
+ VIXL_ASSERT(width <= 64);
rotate &= 63;
- return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
- (value >> rotate);
+ return ((value & ((UINT64_C(1) << rotate) - 1)) <<
+ (width - rotate)) | (value >> rotate);
}
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
- ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
- (width == 32));
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- uint64_t result = value & ((1UL << width) - 1UL);
+ VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((UINT64_C(1) << width) - 1);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
@@ -84,7 +84,7 @@ uint64_t Instruction::ImmLogical() {
if (imm_s == 0x3F) {
return 0;
}
- uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
@@ -96,14 +96,14 @@ uint64_t Instruction::ImmLogical() {
if ((imm_s & mask) == mask) {
return 0;
}
- uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
- UNREACHABLE();
+ VIXL_UNREACHABLE();
return 0;
}
@@ -155,7 +155,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
offset = ImmPCRel();
} else {
// All PC-relative branches.
- ASSERT(BranchType() != UnknownBranchType);
+ VIXL_ASSERT(BranchType() != UnknownBranchType);
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
}
@@ -169,7 +169,7 @@ inline int Instruction::ImmBranch() const {
case UncondBranchType: return ImmUncondBranch();
case CompareBranchType: return ImmCmpBranch();
case TestBranchType: return ImmTestBranch();
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
return 0;
}
@@ -186,7 +186,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
- ASSERT(Mask(PCRelAddressingMask) == ADR);
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
Instr imm = Assembler::ImmPCRelAddress(target - this);
@@ -195,7 +195,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) {
- ASSERT(((target - this) & 3) == 0);
+ VIXL_ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = (target - this) >> kInstructionSizeLog2;
@@ -220,14 +220,14 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
imm_mask = ImmTestBranch_mask;
break;
}
- default: UNREACHABLE();
+ default: VIXL_UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(Instruction* source) {
- ASSERT(((source - this) & 3) == 0);
+ VIXL_ASSERT(((source - this) & 3) == 0);
int offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
diff --git a/disas/libvixl/a64/instructions-a64.h b/disas/libvixl/a64/instructions-a64.h
index ba9068ca8b..a4240d7d33 100644
--- a/disas/libvixl/a64/instructions-a64.h
+++ b/disas/libvixl/a64/instructions-a64.h
@@ -44,30 +44,36 @@ const unsigned kMaxLoadLiteralRange = 1 * MBytes;
const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
const unsigned kXRegSize = 64;
const unsigned kXRegSizeLog2 = 6;
const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
const unsigned kSRegSize = 32;
const unsigned kSRegSizeLog2 = 5;
const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
const unsigned kDRegSize = 64;
const unsigned kDRegSizeLog2 = 6;
const unsigned kDRegSizeInBytes = kDRegSize / 8;
-const int64_t kWRegMask = 0x00000000ffffffffLL;
-const int64_t kXRegMask = 0xffffffffffffffffLL;
-const int64_t kSRegMask = 0x00000000ffffffffLL;
-const int64_t kDRegMask = 0xffffffffffffffffLL;
-const int64_t kXSignMask = 0x1LL << 63;
-const int64_t kWSignMask = 0x1LL << 31;
-const int64_t kByteMask = 0xffL;
-const int64_t kHalfWordMask = 0xffffL;
-const int64_t kWordMask = 0xffffffffLL;
-const uint64_t kXMaxUInt = 0xffffffffffffffffULL;
-const uint64_t kWMaxUInt = 0xffffffffULL;
-const int64_t kXMaxInt = 0x7fffffffffffffffLL;
-const int64_t kXMinInt = 0x8000000000000000LL;
-const int32_t kWMaxInt = 0x7fffffff;
-const int32_t kWMinInt = 0x80000000;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const uint64_t kWRegMask = UINT64_C(0xffffffff);
+const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSRegMask = UINT64_C(0xffffffff);
+const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSSignMask = UINT64_C(0x80000000);
+const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kWSignMask = UINT64_C(0x80000000);
+const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kByteMask = UINT64_C(0xff);
+const uint64_t kHalfWordMask = UINT64_C(0xffff);
+const uint64_t kWordMask = UINT64_C(0xffffffff);
+const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
+const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
+const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
+const int64_t kXMinInt = INT64_C(0x8000000000000000);
+const int32_t kWMaxInt = INT32_C(0x7fffffff);
+const int32_t kWMinInt = INT32_C(0x80000000);
const unsigned kLinkRegCode = 30;
const unsigned kZeroRegCode = 31;
const unsigned kSPRegInternalCode = 63;
@@ -81,18 +87,28 @@ const unsigned kFloatExponentBits = 8;
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
-const double kFP64PositiveInfinity = rawbits_to_double(0x7ff0000000000000ULL);
-const double kFP64NegativeInfinity = rawbits_to_double(0xfff0000000000000ULL);
+const double kFP64PositiveInfinity =
+ rawbits_to_double(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+ rawbits_to_double(UINT64_C(0xfff0000000000000));
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
-static const double kFP64SignallingNaN = rawbits_to_double(0x7ff000007f800001ULL);
+static const double kFP64SignallingNaN =
+ rawbits_to_double(UINT64_C(0x7ff000007f800001));
static const float kFP32SignallingNaN = rawbits_to_float(0x7f800001);
// A similar value, but as a quiet NaN.
-static const double kFP64QuietNaN = rawbits_to_double(0x7ff800007fc00001ULL);
+static const double kFP64QuietNaN =
+ rawbits_to_double(UINT64_C(0x7ff800007fc00001));
static const float kFP32QuietNaN = rawbits_to_float(0x7fc00001);
+// The default NaN values (for FPCR.DN=1).
+static const double kFP64DefaultNaN =
+ rawbits_to_double(UINT64_C(0x7ff8000000000000));
+static const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
+
+
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
@@ -325,7 +341,7 @@ class Instruction {
}
inline Instruction* InstructionAtOffset(int64_t offset) {
- ASSERT(IsWordAligned(this + offset));
+ VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
diff --git a/disas/libvixl/globals.h b/disas/libvixl/globals.h
index a6a3fccd8a..e28dc6663a 100644
--- a/disas/libvixl/globals.h
+++ b/disas/libvixl/globals.h
@@ -27,8 +27,20 @@
#ifndef VIXL_GLOBALS_H
#define VIXL_GLOBALS_H
-// Get the standard printf format macros for C99 stdint types.
+// Get standard C99 macros for integer types.
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+
+#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
+#endif
+
+#include <stdint.h>
#include <inttypes.h>
#include <assert.h>
@@ -45,21 +57,29 @@ typedef uint8_t byte;
const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
- #define ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
+#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
#ifdef DEBUG
- #define ASSERT(condition) assert(condition)
- #define CHECK(condition) ASSERT(condition)
- #define UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); ABORT()
- #define UNREACHABLE() printf("UNREACHABLE\t"); ABORT()
+ #define VIXL_ASSERT(condition) assert(condition)
+ #define VIXL_CHECK(condition) VIXL_ASSERT(condition)
+ #define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
+ #define VIXL_UNREACHABLE() printf("UNREACHABLE\t"); VIXL_ABORT()
#else
- #define ASSERT(condition) ((void) 0)
- #define CHECK(condition) assert(condition)
- #define UNIMPLEMENTED() ((void) 0)
- #define UNREACHABLE() ((void) 0)
+ #define VIXL_ASSERT(condition) ((void) 0)
+ #define VIXL_CHECK(condition) assert(condition)
+ #define VIXL_UNIMPLEMENTED() ((void) 0)
+ #define VIXL_UNREACHABLE() ((void) 0)
#endif
+// This is not as powerful as template based assertions, but it is simple.
+// It assumes that the descriptions are unique. If this starts being a problem,
+// we can switch to a different implemention.
+#define VIXL_CONCAT(a, b) a##b
+#define VIXL_STATIC_ASSERT_LINE(line, condition) \
+ typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
+ __attribute__((unused))
+#define VIXL_STATIC_ASSERT(condition) VIXL_STATIC_ASSERT_LINE(__LINE__, condition) //NOLINT
template <typename T> inline void USE(T) {}
-#define ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); ABORT()
+#define VIXL_ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); VIXL_ABORT()
#endif // VIXL_GLOBALS_H
diff --git a/disas/libvixl/platform.h b/disas/libvixl/platform.h
index a2600f370d..b5c2085b90 100644
--- a/disas/libvixl/platform.h
+++ b/disas/libvixl/platform.h
@@ -34,9 +34,7 @@ namespace vixl {
// Currently we assume running the simulator implies running on x86 hardware.
inline void HostBreakpoint() { asm("int3"); }
#else
-inline void HostBreakpoint() {
- // TODO: Implement HostBreakpoint on a64.
-}
+inline void HostBreakpoint() { asm("brk"); }
#endif
} // namespace vixl
diff --git a/disas/libvixl/utils.cc b/disas/libvixl/utils.cc
index a45fb95f47..c9c05d1e18 100644
--- a/disas/libvixl/utils.cc
+++ b/disas/libvixl/utils.cc
@@ -58,9 +58,9 @@ double rawbits_to_double(uint64_t bits) {
int CountLeadingZeros(uint64_t value, int width) {
- ASSERT((width == 32) || (width == 64));
+ VIXL_ASSERT((width == 32) || (width == 64));
int count = 0;
- uint64_t bit_test = 1UL << (width - 1);
+ uint64_t bit_test = UINT64_C(1) << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
@@ -70,7 +70,7 @@ int CountLeadingZeros(uint64_t value, int width) {
int CountLeadingSignBits(int64_t value, int width) {
- ASSERT((width == 32) || (width == 64));
+ VIXL_ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
@@ -80,7 +80,7 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
- ASSERT((width == 32) || (width == 64));
+ VIXL_ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
@@ -92,10 +92,10 @@ int CountTrailingZeros(uint64_t value, int width) {
int CountSetBits(uint64_t value, int width) {
// TODO: Other widths could be added here, as the implementation already
// supports them.
- ASSERT((width == 32) || (width == 64));
+ VIXL_ASSERT((width == 32) || (width == 64));
// Mask out unused bits to ensure that they are not counted.
- value &= (0xffffffffffffffffULL >> (64-width));
+ value &= (UINT64_C(0xffffffffffffffff) >> (64-width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
@@ -108,18 +108,19 @@ int CountSetBits(uint64_t value, int width) {
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
- value = ((value >> 1) & 0x5555555555555555ULL) +
- (value & 0x5555555555555555ULL);
- value = ((value >> 2) & 0x3333333333333333ULL) +
- (value & 0x3333333333333333ULL);
- value = ((value >> 4) & 0x0f0f0f0f0f0f0f0fULL) +
- (value & 0x0f0f0f0f0f0f0f0fULL);
- value = ((value >> 8) & 0x00ff00ff00ff00ffULL) +
- (value & 0x00ff00ff00ff00ffULL);
- value = ((value >> 16) & 0x0000ffff0000ffffULL) +
- (value & 0x0000ffff0000ffffULL);
- value = ((value >> 32) & 0x00000000ffffffffULL) +
- (value & 0x00000000ffffffffULL);
+ const uint64_t kMasks[] = {
+ UINT64_C(0x5555555555555555),
+ UINT64_C(0x3333333333333333),
+ UINT64_C(0x0f0f0f0f0f0f0f0f),
+ UINT64_C(0x00ff00ff00ff00ff),
+ UINT64_C(0x0000ffff0000ffff),
+ UINT64_C(0x00000000ffffffff),
+ };
+
+ for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) {
+ int shift = 1 << i;
+ value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
+ }
return value;
}
diff --git a/disas/libvixl/utils.h b/disas/libvixl/utils.h
index 029341eb14..83c928c8e3 100644
--- a/disas/libvixl/utils.h
+++ b/disas/libvixl/utils.h
@@ -27,7 +27,7 @@
#ifndef VIXL_UTILS_H
#define VIXL_UTILS_H
-
+#include <math.h>
#include <string.h>
#include "globals.h"
@@ -35,19 +35,19 @@ namespace vixl {
// Check number width.
inline bool is_intn(unsigned n, int64_t x) {
- ASSERT((0 < n) && (n < 64));
- int64_t limit = 1ULL << (n - 1);
+ VIXL_ASSERT((0 < n) && (n < 64));
+ int64_t limit = INT64_C(1) << (n - 1);
return (-limit <= x) && (x < limit);
}
inline bool is_uintn(unsigned n, int64_t x) {
- ASSERT((0 < n) && (n < 64));
+ VIXL_ASSERT((0 < n) && (n < 64));
return !(x >> n);
}
inline unsigned truncate_to_intn(unsigned n, int64_t x) {
- ASSERT((0 < n) && (n < 64));
- return (x & ((1ULL << n) - 1));
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return (x & ((INT64_C(1) << n) - 1));
}
#define INT_1_TO_63_LIST(V) \
@@ -90,13 +90,67 @@ inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
return (x << (63 - msb)) >> (lsb + 63 - msb);
}
-// floating point representation
+// Floating point representation.
uint32_t float_to_rawbits(float value);
uint64_t double_to_rawbits(double value);
float rawbits_to_float(uint32_t bits);
double rawbits_to_double(uint64_t bits);
-// Bits counting.
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ uint64_t raw = double_to_rawbits(num);
+ if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ uint32_t raw = float_to_rawbits(num);
+ if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return isnan(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ VIXL_ASSERT(isnan(num));
+ return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ VIXL_ASSERT(isnan(num));
+ return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+
+// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
@@ -106,20 +160,30 @@ int CountSetBits(uint64_t value, int width);
// TODO: rename/refactor to make it specific to instructions.
template<typename T>
bool IsWordAligned(T pointer) {
- ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
+ VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
return (reinterpret_cast<intptr_t>(pointer) & 3) == 0;
}
// Increment a pointer until it has the specified alignment.
template<class T>
T AlignUp(T pointer, size_t alignment) {
- ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t));
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
size_t align_step = (alignment - pointer_raw) % alignment;
- ASSERT((pointer_raw + align_step) % alignment == 0);
+ VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
return reinterpret_cast<T>(pointer_raw + align_step);
}
+// Decrement a pointer until it has the specified alignment.
+template<class T>
+T AlignDown(T pointer, size_t alignment) {
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ size_t align_step = pointer_raw % alignment;
+ VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
+ return reinterpret_cast<T>(pointer_raw - align_step);
+}
+
} // namespace vixl