aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--disas/arm-a64.cc9
-rw-r--r--disas/libvixl/README2
-rw-r--r--disas/libvixl/a64/assembler-a64.h290
-rw-r--r--disas/libvixl/a64/constants-a64.h61
-rw-r--r--disas/libvixl/a64/decoder-a64.h2
-rw-r--r--disas/libvixl/a64/disasm-a64.cc142
-rw-r--r--disas/libvixl/a64/disasm-a64.h48
-rw-r--r--disas/libvixl/a64/instructions-a64.cc63
-rw-r--r--disas/libvixl/a64/instructions-a64.h110
-rw-r--r--disas/libvixl/globals.h2
-rw-r--r--disas/libvixl/utils.cc13
-rw-r--r--disas/libvixl/utils.h14
-rw-r--r--hw/arm/armv7m.c39
-rw-r--r--hw/arm/boot.c22
-rw-r--r--hw/arm/stellaris.c27
-rw-r--r--hw/arm/virt.c37
-rw-r--r--include/exec/cpu_ldst.h28
-rw-r--r--include/hw/arm/arm.h3
-rw-r--r--target-arm/cpu.c33
-rw-r--r--target-arm/cpu.h138
-rw-r--r--target-arm/helper-a64.c18
-rw-r--r--target-arm/helper.c818
-rw-r--r--target-arm/kvm64.c13
-rw-r--r--target-arm/translate-a64.c24
-rw-r--r--target-arm/translate.c35
-rw-r--r--target-arm/translate.h3
26 files changed, 1427 insertions, 567 deletions
diff --git a/disas/arm-a64.cc b/disas/arm-a64.cc
index ca29f6f253..e04f946ca3 100644
--- a/disas/arm-a64.cc
+++ b/disas/arm-a64.cc
@@ -67,7 +67,8 @@ static void vixl_init(FILE *f) {
int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
{
uint8_t bytes[INSN_SIZE];
- uint32_t instr;
+ uint32_t instrval;
+ const Instruction *instr;
int status;
status = info->read_memory_func(addr, bytes, INSN_SIZE, info);
@@ -80,8 +81,10 @@ int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
vixl_init(info->stream);
}
- instr = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
- vixl_decoder->Decode(reinterpret_cast<Instruction*>(&instr));
+ instrval = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
+ instr = reinterpret_cast<const Instruction *>(&instrval);
+ vixl_disasm->MapCodeAddress(addr, instr);
+ vixl_decoder->Decode(instr);
return INSN_SIZE;
}
diff --git a/disas/libvixl/README b/disas/libvixl/README
index cba31b458b..58db41c67c 100644
--- a/disas/libvixl/README
+++ b/disas/libvixl/README
@@ -2,7 +2,7 @@
The code in this directory is a subset of libvixl:
https://github.com/armvixl/vixl
(specifically, it is the set of files needed for disassembly only,
-taken from libvixl 1.6).
+taken from libvixl 1.7).
Bugfixes should preferably be sent upstream initially.
The disassembler does not currently support the entire A64 instruction
diff --git a/disas/libvixl/a64/assembler-a64.h b/disas/libvixl/a64/assembler-a64.h
index 16a704b7d4..35aaf20f72 100644
--- a/disas/libvixl/a64/assembler-a64.h
+++ b/disas/libvixl/a64/assembler-a64.h
@@ -151,21 +151,21 @@ class CPURegister {
return Aliases(other) && (size_ == other.size_);
}
- inline bool IsZero() const {
+ bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
- inline bool IsSP() const {
+ bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
- inline bool IsRegister() const {
+ bool IsRegister() const {
return type_ == kRegister;
}
- inline bool IsFPRegister() const {
+ bool IsFPRegister() const {
return type_ == kFPRegister;
}
@@ -179,7 +179,7 @@ class CPURegister {
const FPRegister& S() const;
const FPRegister& D() const;
- inline bool IsSameSizeAndType(const CPURegister& other) const {
+ bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && (type_ == other.type_);
}
@@ -198,7 +198,7 @@ class CPURegister {
class Register : public CPURegister {
public:
Register() : CPURegister() {}
- inline explicit Register(const CPURegister& other)
+ explicit Register(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidRegister());
}
@@ -213,10 +213,6 @@ class Register : public CPURegister {
static const Register& WRegFromCode(unsigned code);
static const Register& XRegFromCode(unsigned code);
- // V8 compatibility.
- static const int kNumRegisters = kNumberOfRegisters;
- static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
-
private:
static const Register wregisters[];
static const Register xregisters[];
@@ -225,12 +221,12 @@ class Register : public CPURegister {
class FPRegister : public CPURegister {
public:
- inline FPRegister() : CPURegister() {}
- inline explicit FPRegister(const CPURegister& other)
+ FPRegister() : CPURegister() {}
+ explicit FPRegister(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidFPRegister());
}
- inline FPRegister(unsigned code, unsigned size)
+ FPRegister(unsigned code, unsigned size)
: CPURegister(code, size, kFPRegister) {}
bool IsValid() const {
@@ -241,10 +237,6 @@ class FPRegister : public CPURegister {
static const FPRegister& SRegFromCode(unsigned code);
static const FPRegister& DRegFromCode(unsigned code);
- // V8 compatibility.
- static const int kNumRegisters = kNumberOfFPRegisters;
- static const int kNumAllocatableRegisters = kNumberOfFPRegisters - 1;
-
private:
static const FPRegister sregisters[];
static const FPRegister dregisters[];
@@ -312,23 +304,23 @@ bool AreSameSizeAndType(const CPURegister& reg1,
// Lists of registers.
class CPURegList {
public:
- inline explicit CPURegList(CPURegister reg1,
- CPURegister reg2 = NoCPUReg,
- CPURegister reg3 = NoCPUReg,
- CPURegister reg4 = NoCPUReg)
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.size()), type_(reg1.type()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
- inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
- inline CPURegList(CPURegister::RegisterType type, unsigned size,
- unsigned first_reg, unsigned last_reg)
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
@@ -340,7 +332,7 @@ class CPURegList {
VIXL_ASSERT(IsValid());
}
- inline CPURegister::RegisterType type() const {
+ CPURegister::RegisterType type() const {
VIXL_ASSERT(IsValid());
return type_;
}
@@ -366,13 +358,13 @@ class CPURegList {
}
// Variants of Combine and Remove which take a single register.
- inline void Combine(const CPURegister& other) {
+ void Combine(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Combine(other.code());
}
- inline void Remove(const CPURegister& other) {
+ void Remove(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Remove(other.code());
@@ -380,24 +372,51 @@ class CPURegList {
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
- inline void Combine(int code) {
+ void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
- inline void Remove(int code) {
+ void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
- inline RegList list() const {
+ static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
+ }
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
+ }
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ RegList list() const {
VIXL_ASSERT(IsValid());
return list_;
}
- inline void set_list(RegList new_list) {
+ void set_list(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
@@ -417,38 +436,38 @@ class CPURegList {
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
- inline bool IsEmpty() const {
+ bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
- inline bool IncludesAliasOf(const CPURegister& other) const {
+ bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.type()) && ((other.Bit() & list_) != 0);
}
- inline bool IncludesAliasOf(int code) const {
+ bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
- inline int Count() const {
+ int Count() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
- inline unsigned RegisterSizeInBits() const {
+ unsigned RegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
- inline unsigned RegisterSizeInBytes() const {
+ unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
- inline unsigned TotalSizeInBytes() const {
+ unsigned TotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return RegisterSizeInBytes() * Count();
}
@@ -587,8 +606,10 @@ class Label {
VIXL_ASSERT(!IsLinked() || IsBound());
}
- inline bool IsBound() const { return location_ >= 0; }
- inline bool IsLinked() const { return !links_.empty(); }
+ bool IsBound() const { return location_ >= 0; }
+ bool IsLinked() const { return !links_.empty(); }
+
+ ptrdiff_t location() const { return location_; }
private:
// The list of linked instructions is stored in a stack-like structure. We
@@ -647,22 +668,20 @@ class Label {
std::stack<ptrdiff_t> * links_extended_;
};
- inline ptrdiff_t location() const { return location_; }
-
- inline void Bind(ptrdiff_t location) {
+ void Bind(ptrdiff_t location) {
// Labels can only be bound once.
VIXL_ASSERT(!IsBound());
location_ = location;
}
- inline void AddLink(ptrdiff_t instruction) {
+ void AddLink(ptrdiff_t instruction) {
// If a label is bound, the assembler already has the information it needs
// to write the instruction, so there is no need to add it to links_.
VIXL_ASSERT(!IsBound());
links_.push(instruction);
}
- inline ptrdiff_t GetAndRemoveNextLink() {
+ ptrdiff_t GetAndRemoveNextLink() {
VIXL_ASSERT(IsLinked());
ptrdiff_t link = links_.top();
links_.pop();
@@ -845,14 +864,14 @@ class Assembler {
// Return the address of an offset in the buffer.
template <typename T>
- inline T GetOffsetAddress(ptrdiff_t offset) {
+ T GetOffsetAddress(ptrdiff_t offset) {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return buffer_->GetOffsetAddress<T>(offset);
}
// Return the address of a bound label.
template <typename T>
- inline T GetLabelAddress(const Label * label) {
+ T GetLabelAddress(const Label * label) {
VIXL_ASSERT(label->IsBound());
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(label->location());
@@ -860,14 +879,14 @@ class Assembler {
// Return the address of the cursor.
template <typename T>
- inline T GetCursorAddress() {
+ T GetCursorAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(CursorOffset());
}
// Return the address of the start of the buffer.
template <typename T>
- inline T GetStartAddress() {
+ T GetStartAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0);
}
@@ -1074,20 +1093,20 @@ class Assembler {
// Bfm aliases.
// Bitfield insert.
- inline void bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Bitfield extract and insert low.
- inline void bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, lsb, lsb + width - 1);
@@ -1095,92 +1114,92 @@ class Assembler {
// Sbfm aliases.
// Arithmetic shift right.
- inline void asr(const Register& rd, const Register& rn, unsigned shift) {
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
sbfm(rd, rn, shift, rd.size() - 1);
}
// Signed bitfield insert with zero at right.
- inline void sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Signed bitfield extract.
- inline void sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, lsb, lsb + width - 1);
}
// Signed extend byte.
- inline void sxtb(const Register& rd, const Register& rn) {
+ void sxtb(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 7);
}
// Signed extend halfword.
- inline void sxth(const Register& rd, const Register& rn) {
+ void sxth(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 15);
}
// Signed extend word.
- inline void sxtw(const Register& rd, const Register& rn) {
+ void sxtw(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 31);
}
// Ubfm aliases.
// Logical shift left.
- inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.size();
VIXL_ASSERT(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
- inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
ubfm(rd, rn, shift, rd.size() - 1);
}
// Unsigned bitfield insert with zero at right.
- inline void ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Unsigned bitfield extract.
- inline void ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, lsb, lsb + width - 1);
}
// Unsigned extend byte.
- inline void uxtb(const Register& rd, const Register& rn) {
+ void uxtb(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 7);
}
// Unsigned extend halfword.
- inline void uxth(const Register& rd, const Register& rn) {
+ void uxth(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 15);
}
// Unsigned extend word.
- inline void uxtw(const Register& rd, const Register& rn) {
+ void uxtw(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 31);
}
@@ -1230,7 +1249,7 @@ class Assembler {
void cneg(const Register& rd, const Register& rn, Condition cond);
// Rotate right.
- inline void ror(const Register& rd, const Register& rs, unsigned shift) {
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
extr(rd, rs, rs, shift);
}
@@ -1495,6 +1514,19 @@ class Assembler {
// Load-acquire register.
void ldar(const Register& rt, const MemOperand& src);
+ // Prefetch memory.
+ void prfm(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Prefetch memory (with unscaled offset).
+ void prfum(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Prefetch memory in the literal pool.
+ void prfm(PrefetchOperation op, RawLiteral* literal);
+
+ // Prefetch from pc + imm19 << 2.
+ void prfm(PrefetchOperation op, int imm19);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
@@ -1638,12 +1670,21 @@ class Assembler {
// FP round to integer (nearest with ties to away).
void frinta(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer (implicit rounding).
+ void frinti(const FPRegister& fd, const FPRegister& fn);
+
// FP round to integer (toward minus infinity).
void frintm(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer (toward plus infinity).
+ void frintp(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (exact, implicit rounding).
+ void frintx(const FPRegister& fd, const FPRegister& fn);
+
// FP round to integer (towards zero).
void frintz(const FPRegister& fd, const FPRegister& fn);
@@ -1705,16 +1746,16 @@ class Assembler {
// Emit generic instructions.
// Emit raw instructions into the instruction stream.
- inline void dci(Instr raw_inst) { Emit(raw_inst); }
+ void dci(Instr raw_inst) { Emit(raw_inst); }
// Emit 32 bits of data into the instruction stream.
- inline void dc32(uint32_t data) {
+ void dc32(uint32_t data) {
VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit32(data);
}
// Emit 64 bits of data into the instruction stream.
- inline void dc64(uint64_t data) {
+ void dc64(uint64_t data) {
VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit64(data);
}
@@ -1849,14 +1890,14 @@ class Assembler {
}
}
- static inline Instr ImmS(unsigned imms, unsigned reg_size) {
+ static Instr ImmS(unsigned imms, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
((reg_size == kWRegSize) && is_uint5(imms)));
USE(reg_size);
return imms << ImmS_offset;
}
- static inline Instr ImmR(unsigned immr, unsigned reg_size) {
+ static Instr ImmR(unsigned immr, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
USE(reg_size);
@@ -1864,7 +1905,7 @@ class Assembler {
return immr << ImmR_offset;
}
- static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+ static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(is_uint6(imms));
VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
@@ -1872,7 +1913,7 @@ class Assembler {
return imms << ImmSetBits_offset;
}
- static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
+ static Instr ImmRotate(unsigned immr, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
@@ -1880,12 +1921,12 @@ class Assembler {
return immr << ImmRotate_offset;
}
- static inline Instr ImmLLiteral(int imm19) {
+ static Instr ImmLLiteral(int imm19) {
VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset;
}
- static inline Instr BitN(unsigned bitn, unsigned reg_size) {
+ static Instr BitN(unsigned bitn, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
USE(reg_size);
@@ -1943,6 +1984,11 @@ class Assembler {
return shift_amount << ImmShiftLS_offset;
}
+ static Instr ImmPrefetchOperation(int imm5) {
+ VIXL_ASSERT(is_uint5(imm5));
+ return imm5 << ImmPrefetchOperation_offset;
+ }
+
static Instr ImmException(int imm16) {
VIXL_ASSERT(is_uint16(imm16));
return imm16 << ImmException_offset;
@@ -2003,12 +2049,32 @@ class Assembler {
return scale << FPScale_offset;
}
+ // Immediate field checking helpers.
+ static bool IsImmAddSub(int64_t immediate);
+ static bool IsImmConditionalCompare(int64_t immediate);
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n = NULL,
+ unsigned* imm_s = NULL,
+ unsigned* imm_r = NULL);
+ static bool IsImmLSPair(int64_t offset, LSDataSize size);
+ static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
// Size of the code generated since label to the current position.
size_t SizeOfCodeGeneratedSince(Label* label) const {
VIXL_ASSERT(label->IsBound());
return buffer_->OffsetFrom(label->location());
}
+ size_t SizeOfCodeGenerated() const {
+ return buffer_->CursorOffset();
+ }
+
size_t BufferCapacity() const { return buffer_->capacity(); }
size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
@@ -2025,7 +2091,7 @@ class Assembler {
}
}
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
void AcquireBuffer() {
VIXL_ASSERT(buffer_monitor_ >= 0);
buffer_monitor_++;
@@ -2037,16 +2103,16 @@ class Assembler {
}
#endif
- inline PositionIndependentCodeOption pic() {
+ PositionIndependentCodeOption pic() const {
return pic_;
}
- inline bool AllowPageOffsetDependentCode() {
+ bool AllowPageOffsetDependentCode() const {
return (pic() == PageOffsetDependentCode) ||
(pic() == PositionDependentCode);
}
- static inline const Register& AppropriateZeroRegFor(const CPURegister& reg) {
+ static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
return reg.Is64Bits() ? xzr : wzr;
}
@@ -2056,14 +2122,15 @@ class Assembler {
const MemOperand& addr,
LoadStoreOp op,
LoadStoreScalingOption option = PreferScaledOffset);
- static bool IsImmLSUnscaled(int64_t offset);
- static bool IsImmLSScaled(int64_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairOp op);
- static bool IsImmLSPair(int64_t offset, LSDataSize size);
+
+ void Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
// TODO(all): The third parameter should be passed by reference but gcc 4.8.2
// reports a bogus uninitialised warning then.
@@ -2077,18 +2144,12 @@ class Assembler {
unsigned imm_s,
unsigned imm_r,
LogicalOp op);
- static bool IsImmLogical(uint64_t value,
- unsigned width,
- unsigned* n = NULL,
- unsigned* imm_s = NULL,
- unsigned* imm_r = NULL);
void ConditionalCompare(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op);
- static bool IsImmConditionalCompare(int64_t immediate);
void AddSubWithCarry(const Register& rd,
const Register& rn,
@@ -2096,8 +2157,6 @@ class Assembler {
FlagsUpdate S,
AddSubWithCarryOp op);
- static bool IsImmFP32(float imm);
- static bool IsImmFP64(double imm);
// Functions for emulating operands not directly supported by the instruction
// set.
@@ -2115,7 +2174,6 @@ class Assembler {
const Operand& operand,
FlagsUpdate S,
AddSubOp op);
- static bool IsImmAddSub(int64_t immediate);
// Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
// registers. Only simple loads are supported; sign- and zero-extension (such
@@ -2180,6 +2238,12 @@ class Assembler {
const FPRegister& fa,
FPDataProcessing3SourceOp op);
+ // Encode the specified MemOperand for the specified access size and scaling
+ // preference.
+ Instr LoadStoreMemOperand(const MemOperand& addr,
+ LSDataSize size,
+ LoadStoreScalingOption option);
+
// Link the current (not-yet-emitted) instruction to the specified label, then
// return an offset to be encoded in the instruction. If the label is not yet
// bound, an offset of 0 is returned.
@@ -2205,7 +2269,7 @@ class Assembler {
CodeBuffer* buffer_;
PositionIndependentCodeOption pic_;
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
int64_t buffer_monitor_;
#endif
};
@@ -2239,7 +2303,7 @@ class CodeBufferCheckScope {
AssertPolicy assert_policy = kMaximumSize)
: assm_(assm) {
if (check_policy == kCheck) assm->EnsureSpaceFor(size);
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
assm->bind(&start_);
size_ = size;
assert_policy_ = assert_policy;
@@ -2251,7 +2315,7 @@ class CodeBufferCheckScope {
// This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
size_ = 0;
assert_policy_ = kNoAssert;
assm->AcquireBuffer();
@@ -2259,7 +2323,7 @@ class CodeBufferCheckScope {
}
~CodeBufferCheckScope() {
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
assm_->ReleaseBuffer();
switch (assert_policy_) {
case kNoAssert: break;
@@ -2277,7 +2341,7 @@ class CodeBufferCheckScope {
protected:
Assembler* assm_;
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
Label start_;
size_t size_;
AssertPolicy assert_policy_;
diff --git a/disas/libvixl/a64/constants-a64.h b/disas/libvixl/a64/constants-a64.h
index 7a14f85f59..bc1a2c4b9b 100644
--- a/disas/libvixl/a64/constants-a64.h
+++ b/disas/libvixl/a64/constants-a64.h
@@ -31,12 +31,6 @@ namespace vixl {
const unsigned kNumberOfRegisters = 32;
const unsigned kNumberOfFPRegisters = 32;
-// Callee saved registers are x21-x30(lr).
-const int kNumberOfCalleeSavedRegisters = 10;
-const int kFirstCalleeSavedRegisterIndex = 21;
-// Callee saved FP registers are d8-d15.
-const int kNumberOfCalleeSavedFPRegisters = 8;
-const int kFirstCalleeSavedFPRegisterIndex = 8;
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@@ -53,7 +47,6 @@ V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load/store register. */ \
V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
-V_(PrefetchMode, 4, 0, Bits) \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \
@@ -109,6 +102,10 @@ V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \
V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \
+V_(ImmPrefetchOperation, 4, 0, Bits) \
+V_(PrefetchHint, 4, 3, Bits) \
+V_(PrefetchTarget, 2, 1, Bits) \
+V_(PrefetchStream, 0, 0, Bits) \
\
/* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \
@@ -269,6 +266,29 @@ enum BarrierType {
BarrierAll = 3
};
+enum PrefetchOperation {
+ PLDL1KEEP = 0x00,
+ PLDL1STRM = 0x01,
+ PLDL2KEEP = 0x02,
+ PLDL2STRM = 0x03,
+ PLDL3KEEP = 0x04,
+ PLDL3STRM = 0x05,
+
+ PLIL1KEEP = 0x08,
+ PLIL1STRM = 0x09,
+ PLIL2KEEP = 0x0a,
+ PLIL2STRM = 0x0b,
+ PLIL3KEEP = 0x0c,
+ PLIL3STRM = 0x0d,
+
+ PSTL1KEEP = 0x10,
+ PSTL1STRM = 0x11,
+ PSTL2KEEP = 0x12,
+ PSTL2STRM = 0x13,
+ PSTL3KEEP = 0x14,
+ PSTL3STRM = 0x15
+};
+
// System/special register names.
// This information is not encoded as one field but as the concatenation of
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
@@ -605,6 +625,12 @@ enum LoadStoreAnyOp {
LoadStoreAnyFixed = 0x08000000
};
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
#define LOAD_STORE_PAIR_OP_LIST(V) \
V(STP, w, 0x00000000), \
V(LDP, w, 0x00400000), \
@@ -703,27 +729,28 @@ enum LoadLiteralOp {
V(LD, R, d, 0xC4400000)
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
#undef LOAD_STORE_UNSCALED
};
-// Load/store (post, pre, offset and unsigned.)
-enum LoadStoreOp {
- LoadStoreOpMask = 0xC4C00000,
- #define LOAD_STORE(A, B, C, D) \
- A##B##_##C = D
- LOAD_STORE_OP_LIST(LOAD_STORE),
- #undef LOAD_STORE
- PRFM = 0xC0800000
-};
-
// Load/store post index.
enum LoadStorePostIndex {
LoadStorePostIndexFixed = 0x38000400,
diff --git a/disas/libvixl/a64/decoder-a64.h b/disas/libvixl/a64/decoder-a64.h
index 172594c89b..fd08d6c1f4 100644
--- a/disas/libvixl/a64/decoder-a64.h
+++ b/disas/libvixl/a64/decoder-a64.h
@@ -108,7 +108,7 @@ class DecoderVisitor {
}
private:
- VisitorConstness constness_;
+ const VisitorConstness constness_;
};
diff --git a/disas/libvixl/a64/disasm-a64.cc b/disas/libvixl/a64/disasm-a64.cc
index e4a74aa57c..f7bc2468bb 100644
--- a/disas/libvixl/a64/disasm-a64.cc
+++ b/disas/libvixl/a64/disasm-a64.cc
@@ -34,6 +34,7 @@ Disassembler::Disassembler() {
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
buffer_pos_ = 0;
own_buffer_ = true;
+ code_address_offset_ = 0;
}
@@ -42,6 +43,7 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
buffer_ = text_buffer;
buffer_pos_ = 0;
own_buffer_ = false;
+ code_address_offset_ = 0;
}
@@ -739,9 +741,25 @@ void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
// shift calculation.
switch (instr->Mask(MoveWideImmediateMask)) {
case MOVN_w:
- case MOVN_x: mnemonic = "movn"; break;
+ case MOVN_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
+ if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
+ mnemonic = "movn";
+ } else {
+ mnemonic = "mov";
+ form = "'Rd, 'IMoveNeg";
+ }
+ } else {
+ mnemonic = "movn";
+ }
+ break;
case MOVZ_w:
- case MOVZ_x: mnemonic = "movz"; break;
+ case MOVZ_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
+ mnemonic = "mov";
+ else
+ mnemonic = "movz";
+ break;
case MOVK_w:
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
default: VIXL_UNREACHABLE();
@@ -806,7 +824,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
#undef LS_UNSIGNEDOFFSET
- case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
}
Format(instr, mnemonic, form);
}
@@ -833,6 +851,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
const char *form_x = "'Xt, ['Xns'ILS]";
const char *form_s = "'St, ['Xns'ILS]";
const char *form_d = "'Dt, ['Xns'ILS]";
+ const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
case STURB_w: mnemonic = "sturb"; break;
@@ -852,6 +871,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
case LDURSH_x: form = form_x; // Fall through.
case LDURSH_w: mnemonic = "ldursh"; break;
case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
default: form = "(LoadStoreUnscaledOffset)";
}
Format(instr, mnemonic, form);
@@ -872,6 +892,11 @@ void Disassembler::VisitLoadLiteral(const Instruction* instr) {
form = "'Xt, 'ILLiteral 'LValue";
break;
}
+ case PRFM_lit: {
+ mnemonic = "prfm";
+ form = "'PrefOp, 'ILLiteral 'LValue";
+ break;
+ }
default: mnemonic = "unimplemented";
}
Format(instr, mnemonic, form);
@@ -1344,7 +1369,7 @@ void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
void Disassembler::AppendAddressToOutput(const Instruction* instr,
const void* addr) {
USE(instr);
- AppendToOutput("(addr %p)", addr);
+ AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
}
@@ -1360,6 +1385,40 @@ void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
}
+void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ int64_t rel_addr = CodeRelativeAddress(addr);
+ if (rel_addr >= 0) {
+ AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
+ } else {
+ AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
+ }
+}
+
+
+void Disassembler::AppendCodeRelativeCodeAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeDataAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::MapCodeAddress(int64_t base_address,
+ const Instruction* instr_address) {
+ set_code_address_offset(
+ base_address - reinterpret_cast<intptr_t>(instr_address));
+}
+int64_t Disassembler::CodeRelativeAddress(const void* addr) {
+ return reinterpret_cast<intptr_t>(addr) + code_address_offset();
+}
+
+
void Disassembler::Format(const Instruction* instr, const char* mnemonic,
const char* format) {
VIXL_ASSERT(mnemonic != NULL);
@@ -1486,16 +1545,20 @@ int Disassembler::SubstituteImmediateField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'I');
switch (format[1]) {
- case 'M': { // IMoveImm or IMoveLSL.
- if (format[5] == 'I') {
- uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
- AppendToOutput("#0x%" PRIx64, imm);
- } else {
- VIXL_ASSERT(format[5] == 'L');
+ case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
+ if (format[5] == 'L') {
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
}
+ } else {
+ VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ if (format[5] == 'N')
+ imm = ~imm;
+ if (!instr->SixtyFourBits())
+ imm &= UINT64_C(0xffffffff);
+ AppendToOutput("#0x%" PRIx64, imm);
}
return 8;
}
@@ -1634,14 +1697,31 @@ int Disassembler::SubstituteLiteralField(const Instruction* instr,
VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
USE(format);
+ const void * address = instr->LiteralAddress<const void *>();
switch (instr->Mask(LoadLiteralMask)) {
case LDR_w_lit:
case LDR_x_lit:
case LDRSW_x_lit:
case LDR_s_lit:
case LDR_d_lit:
- AppendDataAddressToOutput(instr, instr->LiteralAddress());
+ AppendCodeRelativeDataAddressToOutput(instr, address);
break;
+ case PRFM_lit: {
+ // Use the prefetch hint to decide how to print the address.
+ switch (instr->PrefetchHint()) {
+ case 0x0: // PLD: prefetch for load.
+ case 0x2: // PST: prepare for store.
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case 0x1: // PLI: preload instructions.
+ AppendCodeRelativeCodeAddressToOutput(instr, address);
+ break;
+ case 0x3: // Unallocated hint.
+ AppendCodeRelativeAddressToOutput(instr, address);
+ break;
+ }
+ break;
+ }
default:
VIXL_UNREACHABLE();
}
@@ -1701,17 +1781,22 @@ int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
(strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
int64_t offset = instr->ImmPCRel();
- const Instruction * base = instr;
+ // Compute the target address based on the effective address (after applying
+ // code_address_offset). This is required for correct behaviour of adrp.
+ const Instruction* base = instr + code_address_offset();
if (format[9] == 'P') {
offset *= kPageSize;
base = AlignDown(base, kPageSize);
}
+ // Strip code_address_offset before printing, so we can use the
+ // semantically-correct AppendCodeRelativeAddressToOutput.
+ const void* target =
+ reinterpret_cast<const void*>(base + offset - code_address_offset());
- const void* target = reinterpret_cast<const void*>(base + offset);
AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" ");
- AppendAddressToOutput(instr, target);
+ AppendCodeRelativeAddressToOutput(instr, target);
return 13;
}
@@ -1738,7 +1823,7 @@ int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" ");
- AppendCodeAddressToOutput(instr, target_address);
+ AppendCodeRelativeCodeAddressToOutput(instr, target_address);
return 8;
}
@@ -1805,13 +1890,26 @@ int Disassembler::SubstitutePrefetchField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'P');
USE(format);
- int prefetch_mode = instr->PrefetchMode();
-
- const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
- int level = (prefetch_mode >> 1) + 1;
- const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
-
- AppendToOutput("p%sl%d%s", ls, level, ks);
+ static const char* hints[] = {"ld", "li", "st"};
+ static const char* stream_options[] = {"keep", "strm"};
+
+ unsigned hint = instr->PrefetchHint();
+ unsigned target = instr->PrefetchTarget() + 1;
+ unsigned stream = instr->PrefetchStream();
+
+ if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
+ // Unallocated prefetch operations.
+ int prefetch_mode = instr->ImmPrefetchOperation();
+ AppendToOutput("#0b%c%c%c%c%c",
+ (prefetch_mode & (1 << 4)) ? '1' : '0',
+ (prefetch_mode & (1 << 3)) ? '1' : '0',
+ (prefetch_mode & (1 << 2)) ? '1' : '0',
+ (prefetch_mode & (1 << 1)) ? '1' : '0',
+ (prefetch_mode & (1 << 0)) ? '1' : '0');
+ } else {
+ VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
+ AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
+ }
return 6;
}
diff --git a/disas/libvixl/a64/disasm-a64.h b/disas/libvixl/a64/disasm-a64.h
index db043375c5..ddfe98be19 100644
--- a/disas/libvixl/a64/disasm-a64.h
+++ b/disas/libvixl/a64/disasm-a64.h
@@ -43,7 +43,7 @@ class Disassembler: public DecoderVisitor {
char* GetOutput();
// Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(const Instruction* instr);
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
@@ -65,23 +65,45 @@ class Disassembler: public DecoderVisitor {
// Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction.
- virtual void AppendAddressToOutput(const Instruction* instr,
- const void* addr);
+ virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr);
// Prints the address of some code.
// This is used for example to print the target address of a branch to an
// immediate offset.
// A sub-class can for example override this method to lookup the address and
// print an appropriate name.
- virtual void AppendCodeAddressToOutput(const Instruction* instr,
- const void* addr);
+ virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
// Prints the address of some data.
// This is used for example to print the source address of a load literal
// instruction.
+ virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Same as the above, but for addresses that are not relative to the code
+ // buffer. They are currently not used by VIXL.
+ virtual void AppendAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr);
+ public:
+ // Get/Set the offset that should be added to code addresses when printing
+ // code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
+ // helpers.
+ // Below is an example of how a branch immediate instruction in memory at
+ // address 0xb010200 would disassemble with different offsets.
+ // Base address | Disassembly
+ // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
+ // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
+ // 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
+ void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
+ int64_t CodeRelativeAddress(const void* instr);
+
private:
void Format(
const Instruction* instr, const char* mnemonic, const char* format);
@@ -101,32 +123,40 @@ class Disassembler: public DecoderVisitor {
int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format);
- inline bool RdIsZROrSP(const Instruction* instr) const {
+ bool RdIsZROrSP(const Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}
- inline bool RnIsZROrSP(const Instruction* instr) const {
+ bool RnIsZROrSP(const Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}
- inline bool RmIsZROrSP(const Instruction* instr) const {
+ bool RmIsZROrSP(const Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}
- inline bool RaIsZROrSP(const Instruction* instr) const {
+ bool RaIsZROrSP(const Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+ int64_t code_address_offset() const { return code_address_offset_; }
+
protected:
void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
+ void set_code_address_offset(int64_t code_address_offset) {
+ code_address_offset_ = code_address_offset;
+ }
+
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
+
+ int64_t code_address_offset_;
};
diff --git a/disas/libvixl/a64/instructions-a64.cc b/disas/libvixl/a64/instructions-a64.cc
index 1f08c781eb..b091886838 100644
--- a/disas/libvixl/a64/instructions-a64.cc
+++ b/disas/libvixl/a64/instructions-a64.cc
@@ -30,6 +30,20 @@
namespace vixl {
+// Floating-point infinity values.
+const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
+const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
+const double kFP64PositiveInfinity =
+ rawbits_to_double(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+ rawbits_to_double(UINT64_C(0xfff0000000000000));
+
+
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
+
+
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
@@ -54,6 +68,55 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
}
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are
// not met.
diff --git a/disas/libvixl/a64/instructions-a64.h b/disas/libvixl/a64/instructions-a64.h
index 29f972291b..f1d883ccc7 100644
--- a/disas/libvixl/a64/instructions-a64.h
+++ b/disas/libvixl/a64/instructions-a64.h
@@ -96,6 +96,17 @@ const unsigned kDoubleExponentBits = 11;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
+// Floating-point infinity values.
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// The default NaN values (for FPCR.DN=1).
+extern const double kFP64DefaultNaN;
+extern const float kFP32DefaultNaN;
+
+
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
@@ -140,33 +151,33 @@ enum Reg31Mode {
class Instruction {
public:
- inline Instr InstructionBits() const {
+ Instr InstructionBits() const {
return *(reinterpret_cast<const Instr*>(this));
}
- inline void SetInstructionBits(Instr new_instr) {
+ void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr;
}
- inline int Bit(int pos) const {
+ int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
- inline uint32_t Bits(int msb, int lsb) const {
+ uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
}
- inline int32_t SignedBits(int msb, int lsb) const {
+ int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits);
}
- inline Instr Mask(uint32_t mask) const {
+ Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
- inline int64_t Name() const { return Func(HighBit, LowBit); }
+ int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@@ -182,56 +193,64 @@ class Instruction {
float ImmFP32() const;
double ImmFP64() const;
- inline LSDataSize SizeLSPair() const {
+ LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
// Helpers.
- inline bool IsCondBranchImm() const {
+ bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
- inline bool IsUncondBranchImm() const {
+ bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
- inline bool IsCompareBranch() const {
+ bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
- inline bool IsTestBranch() const {
+ bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
- inline bool IsPCRelAddressing() const {
+ bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
- inline bool IsLogicalImmediate() const {
+ bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
- inline bool IsAddSubImmediate() const {
+ bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
- inline bool IsAddSubExtended() const {
+ bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
- inline bool IsLoadOrStore() const {
+ bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
- inline bool IsMovn() const {
+ bool IsLoad() const;
+ bool IsStore() const;
+
+ bool IsLoadLiteral() const {
+ // This includes PRFM_lit.
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
- inline Reg31Mode RdMode() const {
+ Reg31Mode RdMode() const {
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
@@ -260,7 +279,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
- inline Reg31Mode RnMode() const {
+ Reg31Mode RnMode() const {
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
@@ -272,7 +291,7 @@ class Instruction {
return Reg31IsZeroRegister;
}
- inline ImmBranchType BranchType() const {
+ ImmBranchType BranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
@@ -296,55 +315,66 @@ class Instruction {
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source);
- inline uint8_t* LiteralAddress() const {
- int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
- const uint8_t* address = reinterpret_cast<const uint8_t*>(this) + offset;
- // Note that the result is safely mutable only if the backing buffer is
- // safely mutable.
- return const_cast<uint8_t*>(address);
+ // Calculate the address of a literal referred to by a load-literal
+ // instruction, and return it as the specified type.
+ //
+ // The literal itself is safely mutable only if the backing buffer is safely
+ // mutable.
+ template <typename T>
+ T LiteralAddress() const {
+ uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
+ ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ uint64_t address_raw = base_raw + offset;
+
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ T address = (T)(address_raw);
+
+ // Assert that the address can be represented by the specified type.
+ VIXL_ASSERT((uint64_t)(address) == address_raw);
+
+ return address;
}
- inline uint32_t Literal32() const {
+ uint32_t Literal32() const {
uint32_t literal;
- memcpy(&literal, LiteralAddress(), sizeof(literal));
-
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal;
}
- inline uint64_t Literal64() const {
+ uint64_t Literal64() const {
uint64_t literal;
- memcpy(&literal, LiteralAddress(), sizeof(literal));
-
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal;
}
- inline float LiteralFP32() const {
+ float LiteralFP32() const {
return rawbits_to_float(Literal32());
}
- inline double LiteralFP64() const {
+ double LiteralFP64() const {
return rawbits_to_double(Literal64());
}
- inline const Instruction* NextInstruction() const {
+ const Instruction* NextInstruction() const {
return this + kInstructionSize;
}
- inline const Instruction* InstructionAtOffset(int64_t offset) const {
+ const Instruction* InstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
- template<typename T> static inline Instruction* Cast(T src) {
+ template<typename T> static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
- template<typename T> static inline const Instruction* CastConst(T src) {
+ template<typename T> static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src);
}
private:
- inline int ImmBranch() const;
+ int ImmBranch() const;
void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target);
diff --git a/disas/libvixl/globals.h b/disas/libvixl/globals.h
index e28dc6663a..0c2493105d 100644
--- a/disas/libvixl/globals.h
+++ b/disas/libvixl/globals.h
@@ -58,7 +58,7 @@ const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
-#ifdef DEBUG
+#ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) assert(condition)
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
diff --git a/disas/libvixl/utils.cc b/disas/libvixl/utils.cc
index 21965d7a1f..80b132a11e 100644
--- a/disas/libvixl/utils.cc
+++ b/disas/libvixl/utils.cc
@@ -135,4 +135,17 @@ bool IsPowerOf2(int64_t value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
} // namespace vixl
diff --git a/disas/libvixl/utils.h b/disas/libvixl/utils.h
index 1540c3060b..b4406263ac 100644
--- a/disas/libvixl/utils.h
+++ b/disas/libvixl/utils.h
@@ -166,6 +166,8 @@ int CountSetBits(uint64_t value, int width);
uint64_t LowestSetBit(uint64_t value);
bool IsPowerOf2(int64_t value);
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
// Pointer alignment
// TODO: rename/refactor to make it specific to instructions.
template<typename T>
@@ -174,14 +176,14 @@ bool IsWordAligned(T pointer) {
return ((intptr_t)(pointer) & 3) == 0;
}
-// Increment a pointer until it has the specified alignment.
+// Increment a pointer (up to 64 bits) until it has the specified alignment.
template<class T>
T AlignUp(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
- uintptr_t pointer_raw = (uintptr_t)pointer;
- VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
+ uint64_t pointer_raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = (alignment - pointer_raw) % alignment;
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
@@ -189,14 +191,14 @@ T AlignUp(T pointer, size_t alignment) {
return (T)(pointer_raw + align_step);
}
-// Decrement a pointer until it has the specified alignment.
+// Decrement a pointer (up to 64 bits) until it has the specified alignment.
template<class T>
T AlignDown(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
- uintptr_t pointer_raw = (uintptr_t)pointer;
- VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
+ uint64_t pointer_raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = pointer_raw % alignment;
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index ef24ca40fc..c6eab6de30 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -163,30 +163,23 @@ static void armv7m_reset(void *opaque)
}
/* Init CPU and memory for a v7-M based board.
- flash_size and sram_size are in kb.
+ mem_size is in bytes.
Returns the NVIC array. */
-qemu_irq *armv7m_init(MemoryRegion *system_memory,
- int flash_size, int sram_size,
+qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
const char *kernel_filename, const char *cpu_model)
{
ARMCPU *cpu;
CPUARMState *env;
DeviceState *nvic;
- /* FIXME: make this local state. */
- static qemu_irq pic[64];
+ qemu_irq *pic = g_new(qemu_irq, num_irq);
int image_size;
uint64_t entry;
uint64_t lowaddr;
int i;
int big_endian;
- MemoryRegion *sram = g_new(MemoryRegion, 1);
- MemoryRegion *flash = g_new(MemoryRegion, 1);
MemoryRegion *hack = g_new(MemoryRegion, 1);
- flash_size *= 1024;
- sram_size *= 1024;
-
if (cpu_model == NULL) {
cpu_model = "cortex-m3";
}
@@ -197,35 +190,15 @@ qemu_irq *armv7m_init(MemoryRegion *system_memory,
}
env = &cpu->env;
-#if 0
- /* > 32Mb SRAM gets complicated because it overlaps the bitband area.
- We don't have proper commandline options, so allocate half of memory
- as SRAM, up to a maximum of 32Mb, and the rest as code. */
- if (ram_size > (512 + 32) * 1024 * 1024)
- ram_size = (512 + 32) * 1024 * 1024;
- sram_size = (ram_size / 2) & TARGET_PAGE_MASK;
- if (sram_size > 32 * 1024 * 1024)
- sram_size = 32 * 1024 * 1024;
- code_size = ram_size - sram_size;
-#endif
-
- /* Flash programming is done via the SCU, so pretend it is ROM. */
- memory_region_init_ram(flash, NULL, "armv7m.flash", flash_size,
- &error_abort);
- vmstate_register_ram_global(flash);
- memory_region_set_readonly(flash, true);
- memory_region_add_subregion(system_memory, 0, flash);
- memory_region_init_ram(sram, NULL, "armv7m.sram", sram_size, &error_abort);
- vmstate_register_ram_global(sram);
- memory_region_add_subregion(system_memory, 0x20000000, sram);
armv7m_bitband_init();
nvic = qdev_create(NULL, "armv7m_nvic");
+ qdev_prop_set_uint32(nvic, "num-irq", num_irq);
env->nvic = nvic;
qdev_init_nofail(nvic);
sysbus_connect_irq(SYS_BUS_DEVICE(nvic), 0,
qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ));
- for (i = 0; i < 64; i++) {
+ for (i = 0; i < num_irq; i++) {
pic[i] = qdev_get_gpio_in(nvic, i);
}
@@ -244,7 +217,7 @@ qemu_irq *armv7m_init(MemoryRegion *system_memory,
image_size = load_elf(kernel_filename, NULL, NULL, &entry, &lowaddr,
NULL, big_endian, ELF_MACHINE, 1);
if (image_size < 0) {
- image_size = load_image_targphys(kernel_filename, 0, flash_size);
+ image_size = load_image_targphys(kernel_filename, 0, mem_size);
lowaddr = 0;
}
if (image_size < 0) {
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 52ebd8be9b..a48d1b28d4 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -463,8 +463,26 @@ static void do_cpu_reset(void *opaque)
* (SCR.NS = 0), we change that here if non-secure boot has been
* requested.
*/
- if (arm_feature(env, ARM_FEATURE_EL3) && !info->secure_boot) {
- env->cp15.scr_el3 |= SCR_NS;
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* AArch64 is defined to come out of reset into EL3 if enabled.
+ * If we are booting Linux then we need to adjust our EL as
+ * Linux expects us to be in EL2 or EL1. AArch32 resets into
+ * SVC, which Linux expects, so no privilege/exception level to
+ * adjust.
+ */
+ if (env->aarch64) {
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ env->pstate = PSTATE_MODE_EL2h;
+ } else {
+ env->pstate = PSTATE_MODE_EL1h;
+ }
+ }
+
+ /* Set to non-secure if not a secure boot */
+ if (!info->secure_boot) {
+ /* Linux expects non-secure state */
+ env->cp15.scr_el3 |= SCR_NS;
+ }
}
if (CPU(cpu) == first_cpu) {
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index ccc3b189c3..cb515ec765 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -29,6 +29,8 @@
#define BP_OLED_SSI 0x02
#define BP_GAMEPAD 0x04
+#define NUM_IRQ_LINES 64
+
typedef const struct {
const char *name;
uint32_t did0;
@@ -1220,10 +1222,27 @@ static void stellaris_init(const char *kernel_filename, const char *cpu_model,
int i;
int j;
- flash_size = ((board->dc0 & 0xffff) + 1) << 1;
- sram_size = (board->dc0 >> 18) + 1;
- pic = armv7m_init(get_system_memory(),
- flash_size, sram_size, kernel_filename, cpu_model);
+ MemoryRegion *sram = g_new(MemoryRegion, 1);
+ MemoryRegion *flash = g_new(MemoryRegion, 1);
+ MemoryRegion *system_memory = get_system_memory();
+
+ flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024;
+ sram_size = ((board->dc0 >> 18) + 1) * 1024;
+
+ /* Flash programming is done via the SCU, so pretend it is ROM. */
+ memory_region_init_ram(flash, NULL, "stellaris.flash", flash_size,
+ &error_abort);
+ vmstate_register_ram_global(flash);
+ memory_region_set_readonly(flash, true);
+ memory_region_add_subregion(system_memory, 0, flash);
+
+ memory_region_init_ram(sram, NULL, "stellaris.sram", sram_size,
+ &error_abort);
+ vmstate_register_ram_global(sram);
+ memory_region_add_subregion(system_memory, 0x20000000, sram);
+
+ pic = armv7m_init(system_memory, flash_size, NUM_IRQ_LINES,
+ kernel_filename, cpu_model);
if (board->dc1 & (1 << 16)) {
dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 235344034d..34d9379032 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -441,10 +441,32 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
int i;
hwaddr size = vbi->memmap[VIRT_MMIO].size;
- /* Note that we have to create the transports in forwards order
- * so that command line devices are inserted lowest address first,
- * and then add dtb nodes in reverse order so that they appear in
- * the finished device tree lowest address first.
+ /* We create the transports in forwards order. Since qbus_realize()
+ * prepends (not appends) new child buses, the incrementing loop below will
+ * create a list of virtio-mmio buses with decreasing base addresses.
+ *
+ * When a -device option is processed from the command line,
+ * qbus_find_recursive() picks the next free virtio-mmio bus in forwards
+ * order. The upshot is that -device options in increasing command line
+ * order are mapped to virtio-mmio buses with decreasing base addresses.
+ *
+ * When this code was originally written, that arrangement ensured that the
+ * guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to
+ * the first -device on the command line. (The end-to-end order is a
+ * function of this loop, qbus_realize(), qbus_find_recursive(), and the
+ * guest kernel's name-to-address assignment strategy.)
+ *
+ * Meanwhile, the kernel's traversal seems to have been reversed; see eg.
+ * the message, if not necessarily the code, of commit 70161ff336.
+ * Therefore the loop now establishes the inverse of the original intent.
+ *
+ * Unfortunately, we can't counteract the kernel change by reversing the
+ * loop; it would break existing command lines.
+ *
+ * In any case, the kernel makes no guarantee about the stability of
+ * enumeration order of virtio devices (as demonstrated by it changing
+ * between kernel versions). For reliable and stable identification
+ * of disks users must use UUIDs or similar mechanisms.
*/
for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) {
int irq = vbi->irqmap[VIRT_MMIO] + i;
@@ -453,6 +475,13 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
sysbus_create_simple("virtio-mmio", base, pic[irq]);
}
+ /* We add dtb nodes in reverse order so that they appear in the finished
+ * device tree lowest address first.
+ *
+ * Note that this mapping is independent of the loop above. The previous
+ * loop influences virtio device to virtio transport assignment, whereas
+ * this loop controls how virtio transports are laid out in the dtb.
+ */
for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) {
char *nodename;
int irq = vbi->irqmap[VIRT_MMIO] + i;
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 0e825ea773..1673287189 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -244,9 +244,31 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
-#if (NB_MMU_MODES > 6)
-#error "NB_MMU_MODES > 6 is not supported for now"
-#endif /* (NB_MMU_MODES > 6) */
+#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
+
+#define CPU_MMU_INDEX 6
+#define MEMSUFFIX MMU_MODE6_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 7) */
+
+#if (NB_MMU_MODES > 7)
+/* Note that supporting NB_MMU_MODES == 9 would require
+ * changes to at least the ARM TCG backend.
+ */
+#error "NB_MMU_MODES > 7 is not supported for now"
+#endif /* (NB_MMU_MODES > 7) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env))
diff --git a/include/hw/arm/arm.h b/include/hw/arm/arm.h
index c4bf56d44f..5c940eb412 100644
--- a/include/hw/arm/arm.h
+++ b/include/hw/arm/arm.h
@@ -15,8 +15,7 @@
#include "hw/irq.h"
/* armv7m.c */
-qemu_irq *armv7m_init(MemoryRegion *system_memory,
- int flash_size, int sram_size,
+qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
const char *kernel_filename, const char *cpu_model);
/* arm_boot.c */
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 285947f911..d38af747ac 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -113,7 +113,14 @@ static void arm_cpu_reset(CPUState *s)
/* and to the FP/Neon instructions */
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
#else
- env->pstate = PSTATE_MODE_EL1h;
+ /* Reset into the highest available EL */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ env->pstate = PSTATE_MODE_EL3h;
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
+ env->pstate = PSTATE_MODE_EL2h;
+ } else {
+ env->pstate = PSTATE_MODE_EL1h;
+ }
env->pc = cpu->rvbar;
#endif
} else {
@@ -320,6 +327,29 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
#endif
}
+
+static bool arm_cpu_is_big_endian(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int cur_el;
+
+ cpu_synchronize_state(cs);
+
+ /* In 32bit guest endianness is determined by looking at CPSR's E bit */
+ if (!is_a64(env)) {
+ return (env->uncached_cpsr & CPSR_E) ? 1 : 0;
+ }
+
+ cur_el = arm_current_el(env);
+
+ if (cur_el == 0) {
+ return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
+ }
+
+ return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+}
+
#endif
static inline void set_feature(CPUARMState *env, int feature)
@@ -1189,6 +1219,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->do_interrupt = arm_cpu_do_interrupt;
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu;
+ cc->virtio_is_big_endian = arm_cpu_is_big_endian;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index cd7a9e8e14..1830a12d4a 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -32,6 +32,8 @@
# define ELF_MACHINE EM_ARM
#endif
+#define TARGET_IS_BIENDIAN 1
+
#define CPUArchState struct CPUARMState
#include "qemu-common.h"
@@ -98,7 +100,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
struct arm_boot_info;
-#define NB_MMU_MODES 4
+#define NB_MMU_MODES 7
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -1110,8 +1112,14 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
* a register definition to override a previous definition for the
* same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
* old must have the OVERRIDE bit set.
- * NO_MIGRATE indicates that this register should be ignored for migration;
- * (eg because any state is accessed via some other coprocessor register).
+ * ALIAS indicates that this register is an alias view of some underlying
+ * state which is also visible via another register, and that the other
+ * register is handling migration; registers marked ALIAS will not be migrated
+ * but may have their state set by syncing of register state from KVM.
+ * NO_RAW indicates that this register has no underlying state and does not
+ * support raw access for state saving/loading; it will not be used for either
+ * migration or KVM state synchronization. (Typically this is for "registers"
+ * which are actually used as instructions for cache maintenance and so on.)
* IO indicates that this register does I/O and therefore its accesses
* need to be surrounded by gen_io_start()/gen_io_end(). In particular,
* registers which implement clocks or timers require this.
@@ -1121,8 +1129,9 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_64BIT 4
#define ARM_CP_SUPPRESS_TB_END 8
#define ARM_CP_OVERRIDE 16
-#define ARM_CP_NO_MIGRATE 32
+#define ARM_CP_ALIAS 32
#define ARM_CP_IO 64
+#define ARM_CP_NO_RAW 128
#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
@@ -1132,7 +1141,7 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
-#define ARM_CP_FLAG_MASK 0x7f
+#define ARM_CP_FLAG_MASK 0xff
/* Valid values for ARMCPRegInfo state field, indicating which of
* the AArch32 and AArch64 execution states this register is visible in.
@@ -1211,6 +1220,10 @@ static inline bool cptype_valid(int cptype)
*/
static inline int arm_current_el(CPUARMState *env)
{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return !((env->v7m.exception == 0) && (env->v7m.control & 1));
+ }
+
if (is_a64(env)) {
return extract32(env->pstate, 2, 2);
}
@@ -1568,13 +1581,90 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
-/* MMU modes definitions */
-#define MMU_MODE0_SUFFIX _user
-#define MMU_MODE1_SUFFIX _kernel
+/* ARM has the following "translation regimes" (as the ARM ARM calls them):
+ *
+ * If EL3 is 64-bit:
+ * + NonSecure EL1 & 0 stage 1
+ * + NonSecure EL1 & 0 stage 2
+ * + NonSecure EL2
+ * + Secure EL1 & EL0
+ * + Secure EL3
+ * If EL3 is 32-bit:
+ * + NonSecure PL1 & 0 stage 1
+ * + NonSecure PL1 & 0 stage 2
+ * + NonSecure PL2
+ * + Secure PL0 & PL1
+ * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
+ *
+ * For QEMU, an mmu_idx is not quite the same as a translation regime because:
+ * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
+ * may differ in access permissions even if the VA->PA map is the same
+ * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
+ * translation, which means that we have one mmu_idx that deals with two
+ * concatenated translation regimes [this sort of combined s1+2 TLB is
+ * architecturally permitted]
+ * 3. we don't need to allocate an mmu_idx to translations that we won't be
+ * handling via the TLB. The only way to do a stage 1 translation without
+ * the immediate stage 2 translation is via the ATS or AT system insns,
+ * which can be slow-pathed and always do a page table walk.
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * This gives us the following list of mmu_idx values:
+ *
+ * NS EL0 (aka NS PL0) stage 1+2
+ * NS EL1 (aka NS PL1) stage 1+2
+ * NS EL2 (aka NS PL2)
+ * S EL3 (aka S PL1)
+ * S EL0 (aka S PL0)
+ * S EL1 (not used if EL3 is 32 bit)
+ * NS EL0+1 stage 2
+ *
+ * (The last of these is an mmu_idx because we want to be able to use the TLB
+ * for the accesses done as part of a stage 1 page table walk, rather than
+ * having to walk the stage 2 page table over and over.)
+ *
+ * Our enumeration includes at the end some entries which are not "true"
+ * mmu_idx values in that they don't have corresponding TLBs and are only
+ * valid for doing slow path page table walks.
+ *
+ * The constant names here are patterned after the general style of the names
+ * of the AT/ATS operations.
+ * The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ */
+typedef enum ARMMMUIdx {
+ ARMMMUIdx_S12NSE0 = 0,
+ ARMMMUIdx_S12NSE1 = 1,
+ ARMMMUIdx_S1E2 = 2,
+ ARMMMUIdx_S1E3 = 3,
+ ARMMMUIdx_S1SE0 = 4,
+ ARMMMUIdx_S1SE1 = 5,
+ ARMMMUIdx_S2NS = 6,
+ /* Indexes below here don't have TLBs and are used only for AT system
+ * instructions or for the first stage of an S12 page table walk.
+ */
+ ARMMMUIdx_S1NSE0 = 7,
+ ARMMMUIdx_S1NSE1 = 8,
+} ARMMMUIdx;
+
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUARMState *env)
+
+/* Return the exception level we're running at if this is our mmu_idx */
+static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
- return arm_current_el(env);
+ assert(mmu_idx < ARMMMUIdx_S2NS);
+ return mmu_idx & 3;
+}
+
+/* Determine the current mmu_idx to use for normal loads/stores */
+static inline int cpu_mmu_index(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && arm_is_secure_below_el3(env)) {
+ return ARMMMUIdx_S1SE0 + el;
+ }
+ return el;
}
/* Return the Exception Level targeted by debug exceptions;
@@ -1641,9 +1731,13 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* Bit usage in the TB flags field: bit 31 indicates whether we are
* in 32 or 64 bit mode. The meaning of the other bits depends on that.
+ * We put flags which are shared between 32 and 64 bit mode at the top
+ * of the word, and flags which apply to only one mode at the bottom.
*/
#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX_SHIFT 28
+#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
/* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0
@@ -1652,8 +1746,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
-#define ARM_TBFLAG_PRIV_SHIFT 6
-#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN_SHIFT 7
#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC_SHIFT 8
@@ -1679,8 +1771,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
/* Bit usage when in AArch64 state */
-#define ARM_TBFLAG_AA64_EL_SHIFT 0
-#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
@@ -1691,14 +1781,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
(((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX(F) \
+ (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
#define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \
(((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE(F) \
(((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
-#define ARM_TBFLAG_PRIV(F) \
- (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN(F) \
(((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC(F) \
@@ -1713,8 +1803,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
-#define ARM_TBFLAG_AA64_EL(F) \
- (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN(F) \
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
@@ -1738,8 +1826,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
if (is_a64(env)) {
*pc = env->pc;
- *flags = ARM_TBFLAG_AARCH64_STATE_MASK
- | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
+ *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
}
@@ -1757,21 +1844,12 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
} else {
- int privmode;
*pc = env->regs[15];
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
| (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
| (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
| (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
| (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
- if (arm_feature(env, ARM_FEATURE_M)) {
- privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
- } else {
- privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
- }
- if (privmode) {
- *flags |= ARM_TBFLAG_PRIV_MASK;
- }
if (!(access_secure_reg(env))) {
*flags |= ARM_TBFLAG_NS_MASK;
}
@@ -1799,6 +1877,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
+ *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+
*cs_base = 0;
}
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index 81066ca936..8aa40e9763 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -135,6 +135,9 @@ float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
if ((float32_is_zero(a) && float32_is_infinity(b)) ||
(float32_is_infinity(a) && float32_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@@ -148,6 +151,9 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
if ((float64_is_zero(a) && float64_is_infinity(b)) ||
(float64_is_infinity(a) && float64_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@@ -223,6 +229,9 @@ float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@@ -235,6 +244,9 @@ float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {
@@ -247,6 +259,9 @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@@ -259,6 +274,9 @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 1a5e0678b0..1a1a00577e 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -13,7 +13,7 @@
#ifndef CONFIG_USER_ONLY
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size);
@@ -119,6 +119,7 @@ static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
+ assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
return CPREG_FIELD64(env, ri);
} else {
@@ -129,6 +130,7 @@ static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
CPREG_FIELD64(env, ri) = value;
} else {
@@ -174,6 +176,27 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
+{
+ /* Return true if the regdef would cause an assertion if you called
+ * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
+ * program bug for it not to have the NO_RAW flag).
+ * NB that returning false here doesn't necessarily mean that calling
+ * read/write_raw_cp_reg() is safe, because we can't distinguish "has
+ * read/write access functions which are safe for raw use" from "has
+ * read/write access functions which have side effects but has forgotten
+ * to provide raw access functions".
+ * The tests here line up with the conditions in read/write_raw_cp_reg()
+ * and assertions in raw_read()/raw_write().
+ */
+ if ((ri->type & ARM_CP_CONST) ||
+ ri->fieldoffset ||
+ ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
+ return false;
+ }
+ return true;
+}
+
bool write_cpustate_to_list(ARMCPU *cpu)
{
/* Write the coprocessor state from cpu->env to the (index,value) list. */
@@ -189,7 +212,7 @@ bool write_cpustate_to_list(ARMCPU *cpu)
ok = false;
continue;
}
- if (ri->type & ARM_CP_NO_MIGRATE) {
+ if (ri->type & ARM_CP_NO_RAW) {
continue;
}
cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
@@ -212,7 +235,7 @@ bool write_list_to_cpustate(ARMCPU *cpu)
ok = false;
continue;
}
- if (ri->type & ARM_CP_NO_MIGRATE) {
+ if (ri->type & ARM_CP_NO_RAW) {
continue;
}
/* Write value and confirm it reads back as written
@@ -236,7 +259,7 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
- if (!(ri->type & ARM_CP_NO_MIGRATE)) {
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
/* The value array need not be initialized at this point */
cpu->cpreg_array_len++;
@@ -252,7 +275,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
- if (!(ri->type & ARM_CP_NO_MIGRATE)) {
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
cpu->cpreg_array_len++;
}
}
@@ -508,7 +531,7 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
.resetvalue = 0 },
/* v6 doesn't have the cache ID registers but Linux reads them anyway */
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
- .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
/* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
* implementing it as RAZ means the "debug architecture version" bits
@@ -522,16 +545,16 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
*/
{ .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -854,7 +877,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
*/
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenset_write,
.accessfn = pmreg_access,
@@ -869,11 +892,11 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access,
.writefn = pmcntenclr_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_ALIAS },
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
.access = PL0_RW, .accessfn = pmreg_access,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenclr_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
@@ -928,7 +951,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.resetvalue = 0,
.writefn = pmintenset_write, .raw_writefn = raw_write },
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.resetvalue = 0, .writefn = pmintenclr_write, },
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
@@ -939,7 +962,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.resetvalue = 0 },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
- .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
+ .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
{ .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
.access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
@@ -988,44 +1011,44 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.resetfn = arm_cp_reset_ignore },
{ .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
+ .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
/* 32 bit ITLB invalidates */
{ .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
/* 32 bit DTLB invalidates */
{ .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
/* 32 bit TLB invalidates */
{ .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
{ .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
REGINFO_SENTINEL
};
static const ARMCPRegInfo v7mp_cp_reginfo[] = {
/* 32 bit TLB invalidates, Inner Shareable */
{ .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
{ .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbiasid_is_write },
{ .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbimvaa_is_write },
REGINFO_SENTINEL
};
@@ -1268,7 +1291,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
* Our reset value matches the fixed frequency we implement the timer at.
*/
{ .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
.resetfn = arm_cp_reset_ignore,
@@ -1288,7 +1311,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
/* per-timer control */
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_PHYS].ctl),
@@ -1304,7 +1327,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.writefn = gt_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_VIRT].ctl),
@@ -1321,52 +1344,52 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
/* TimerValue views: a 32 bit downcounting view of the underlying state */
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
/* The counter itself */
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
- .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_pct_access,
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
},
{ .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_pct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
- .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_vct_access,
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
},
{ .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_vct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
/* Comparison value, indicating when the timer goes off */
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
.accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
.writefn = gt_cval_write, .raw_writefn = raw_write,
@@ -1381,7 +1404,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
.accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
.writefn = gt_cval_write, .raw_writefn = raw_write,
@@ -1428,23 +1451,23 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
/* Other states are only available with TrustZone; in
* a non-TZ implementation these registers don't exist
* at all, which is an Uncategorized trap. This underdecoding
- * is safe because the reginfo is NO_MIGRATE.
+ * is safe because the reginfo is NO_RAW.
*/
return CP_ACCESS_TRAP_UNCATEGORIZED;
}
return CP_ACCESS_OK;
}
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ int access_type, ARMMMUIdx mmu_idx)
{
hwaddr phys_addr;
target_ulong page_size;
int prot;
- int ret, is_user = ri->opc2 & 2;
- int access_type = ri->opc2 & 1;
+ int ret;
uint64_t par64;
- ret = get_phys_addr(env, value, access_type, is_user,
+ ret = get_phys_addr(env, value, access_type, mmu_idx,
&phys_addr, &prot, &page_size);
if (extended_addresses_enabled(env)) {
/* ret is a DFSR/IFSR value for the long descriptor
@@ -1481,9 +1504,105 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
((ret & 0xf) << 1) | 1;
}
}
+ return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ bool secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE1;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1SE0;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE0;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_S12NSE1;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx);
A32_BANKED_CURRENT_REG_SET(env, par, par64);
}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ ARMMMUIdx mmu_idx;
+ int secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = ARMMMUIdx_S1E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = ARMMMUIdx_S12NSE1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
+}
#endif
static const ARMCPRegInfo vapa_cp_reginfo[] = {
@@ -1495,7 +1614,7 @@ static const ARMCPRegInfo vapa_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
+ .writefn = ats_write, .type = ARM_CP_NO_RAW },
#endif
REGINFO_SENTINEL
};
@@ -1554,12 +1673,12 @@ static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
{ .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
.resetvalue = 0,
.readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
{ .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
.resetvalue = 0,
.readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
@@ -1691,7 +1810,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
offsetoflow32(CPUARMState, cp15.dfsr_ns) },
.resetfn = arm_cp_reset_ignore, },
@@ -1719,7 +1838,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
+ .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
.resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
@@ -1789,7 +1908,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
.writefn = omap_threadid_write },
{ .name = "TI925T_STATUS", .cp = 15, .crn = 15,
.crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
/* TODO: Peripheral port remap register:
* On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
@@ -1798,7 +1917,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
*/
{ .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
- .type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
.writefn = omap_cachemaint_write },
{ .name = "C9", .cp = 15, .crn = 9,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
@@ -1848,7 +1967,7 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
{ .name = "C15_IMPDEF", .cp = 15, .crn = 15,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
.access = PL1_RW,
- .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
+ .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1856,7 +1975,7 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
/* Cache status: RAZ because we have no cache so it's always clean */
{ .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
- .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1864,7 +1983,7 @@ static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
/* We never have a a block transfer operation in progress */
{ .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
/* The cache ops themselves: these all NOP for QEMU */
{ .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
@@ -1887,10 +2006,10 @@ static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
* to indicate that there are no dirty cache lines.
*/
{ .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = (1 << 30) },
{ .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = (1 << 30) },
REGINFO_SENTINEL
};
@@ -1900,7 +2019,7 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
{ .name = "C9_READBUFFER", .cp = 15, .crn = 9,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
.access = PL1_RW, .resetvalue = 0,
- .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -1926,7 +2045,7 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
{ .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
- .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
+ .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -1947,12 +2066,12 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
offsetof(CPUARMState, cp15.par_ns)} },
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
- .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
offsetof(CPUARMState, cp15.ttbr0_ns) },
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
- .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
offsetof(CPUARMState, cp15.ttbr1_ns) },
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
@@ -2144,7 +2263,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.access = PL0_RW, .type = ARM_CP_NZCV },
{ .name = "DAIF", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.access = PL0_RW, .accessfn = aa64_daif_access,
.fieldoffset = offsetof(CPUARMState, daif),
.writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
@@ -2156,7 +2275,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
{ .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_NO_RAW,
.readfn = aa64_dczid_read },
{ .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
@@ -2207,77 +2326,77 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
/* TLBI operations */
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbiall_is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_asid_is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbiall_write },
{ .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_write },
{ .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_asid_write },
{ .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_write },
{ .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_write },
{ .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_write },
#ifndef CONFIG_USER_ONLY
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
#endif
/* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbimvaa_is_write },
{ .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W },
@@ -2312,12 +2431,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
offsetoflow32(CPUARMState, cp15.dacr_ns) } },
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
/* We rely on the access checks not allowing the guest to write to the
@@ -2327,11 +2446,15 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
.access = PL1_RW, .accessfn = sp_el0_access,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, sp_el[0]) },
+ { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
{ .name = "SPSel", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
REGINFO_SENTINEL
};
@@ -2343,7 +2466,7 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
@@ -2386,12 +2509,12 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
.writefn = dacr_write, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL2_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
{ .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
@@ -2402,7 +2525,7 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
@@ -2410,6 +2533,10 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
.access = PL2_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
.resetvalue = 0 },
+ { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
REGINFO_SENTINEL
};
@@ -2418,7 +2545,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
.resetvalue = 0, .writefn = scr_write },
- { .name = "SCR", .type = ARM_CP_NO_MIGRATE,
+ { .name = "SCR", .type = ARM_CP_ALIAS,
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
.resetfn = arm_cp_reset_ignore, .writefn = scr_write },
@@ -2451,19 +2578,19 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL3_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
{ .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
@@ -2510,7 +2637,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
*/
{ .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.access = PL1_R,
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
.resetfn = arm_cp_reset_ignore },
@@ -2963,7 +3090,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
.accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write,
@@ -3053,17 +3180,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.resetvalue = cpu->mvfr2 },
REGINFO_SENTINEL
};
- ARMCPRegInfo rvbar = {
- .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
- .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
- };
- define_one_arm_cp_reg(cpu, &rvbar);
+ /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
+ /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
} else {
/* If EL2 is missing but higher ELs are enabled, we need to
* register the no_el2 reginfos.
@@ -3074,6 +3214,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, el3_cp_reginfo);
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
}
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
@@ -3440,14 +3586,14 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
*/
if ((r->state == ARM_CP_STATE_BOTH && ns) ||
(arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
- r2->type |= ARM_CP_NO_MIGRATE;
+ r2->type |= ARM_CP_ALIAS;
r2->resetfn = arm_cp_reset_ignore;
}
} else if ((secstate != r->secure) && !ns) {
/* The register is not banked so we only want to allow migration of
* the non-secure instance.
*/
- r2->type |= ARM_CP_NO_MIGRATE;
+ r2->type |= ARM_CP_ALIAS;
r2->resetfn = arm_cp_reset_ignore;
}
@@ -3496,15 +3642,25 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
r2->opc2 = opc2;
/* By convention, for wildcarded registers only the first
* entry is used for migration; the others are marked as
- * NO_MIGRATE so we don't try to transfer the register
+ * ALIAS so we don't try to transfer the register
* multiple times. Special registers (ie NOP/WFI) are
- * never migratable.
+ * never migratable and not even raw-accessible.
*/
- if ((r->type & ARM_CP_SPECIAL) ||
- ((r->crm == CP_ANY) && crm != 0) ||
+ if ((r->type & ARM_CP_SPECIAL)) {
+ r2->type |= ARM_CP_NO_RAW;
+ }
+ if (((r->crm == CP_ANY) && crm != 0) ||
((r->opc1 == CP_ANY) && opc1 != 0) ||
((r->opc2 == CP_ANY) && opc2 != 0)) {
- r2->type |= ARM_CP_NO_MIGRATE;
+ r2->type |= ARM_CP_ALIAS;
+ }
+
+ /* Check that raw accesses are either forbidden or handled. Note that
+ * we can't assert this earlier because the setup of fieldoffset for
+ * banked registers has to be done first.
+ */
+ if (!(r2->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r2));
}
/* Overriding of an existing definition must be explicitly
@@ -4460,91 +4616,170 @@ void arm_cpu_do_interrupt(CPUState *cs)
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
+
+/* Return the exception level which controls this address translation regime */
+static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_S1E2:
+ return 2;
+ case ARMMMUIdx_S1E3:
+ return 3;
+ case ARMMMUIdx_S1SE0:
+ return arm_el_is_aa64(env, 3) ? 1 : 3;
+ case ARMMMUIdx_S1SE1:
+ case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_S1NSE1:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Return the SCTLR value which controls this address translation regime */
+static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return true if the specified stage of address translation is disabled */
+static inline bool regime_translation_disabled(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ return (env->cp15.hcr_el2 & HCR_VM) == 0;
+ }
+ return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
+}
+
+/* Return the TCR controlling this translation regime */
+static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ /* TODO: return VTCR_EL2 */
+ g_assert_not_reached();
+ }
+ return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1NSE0:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ g_assert_not_reached();
+ }
+}
+
/* Check section/page access permissions.
Returns the page protection flags, or zero if the access is not
permitted. */
-static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
- int access_type, int is_user)
-{
- int prot_ro;
-
- if (domain_prot == 3) {
- return PAGE_READ | PAGE_WRITE;
- }
-
- if (access_type == 1)
- prot_ro = 0;
- else
- prot_ro = PAGE_READ;
-
- switch (ap) {
- case 0:
- if (arm_feature(env, ARM_FEATURE_V7)) {
- return 0;
- }
- if (access_type == 1)
- return 0;
- switch (A32_BANKED_CURRENT_REG_GET(env, sctlr) & (SCTLR_S | SCTLR_R)) {
- case SCTLR_S:
- return is_user ? 0 : PAGE_READ;
- case SCTLR_R:
- return PAGE_READ;
- default:
- return 0;
- }
- case 1:
- return is_user ? 0 : PAGE_READ | PAGE_WRITE;
- case 2:
- if (is_user)
- return prot_ro;
- else
- return PAGE_READ | PAGE_WRITE;
- case 3:
- return PAGE_READ | PAGE_WRITE;
- case 4: /* Reserved. */
- return 0;
- case 5:
- return is_user ? 0 : prot_ro;
- case 6:
- return prot_ro;
- case 7:
- if (!arm_feature (env, ARM_FEATURE_V6K))
- return 0;
- return prot_ro;
- default:
- abort();
- }
-}
-
-static bool get_level1_table_address(CPUARMState *env, uint32_t *table,
- uint32_t address)
-{
- /* Get the TCR bank based on our security state */
- TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
-
- /* We only get here if EL1 is running in AArch32. If EL3 is running in
- * AArch32 there is a secure and non-secure instance of the translation
- * table registers.
- */
+static inline int check_ap(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ap, int domain_prot,
+ int access_type)
+{
+ int prot_ro;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (domain_prot == 3) {
+ return PAGE_READ | PAGE_WRITE;
+ }
+
+ if (access_type == 1) {
+ prot_ro = 0;
+ } else {
+ prot_ro = PAGE_READ;
+ }
+
+ switch (ap) {
+ case 0:
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ return 0;
+ }
+ if (access_type == 1) {
+ return 0;
+ }
+ switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
+ case SCTLR_S:
+ return is_user ? 0 : PAGE_READ;
+ case SCTLR_R:
+ return PAGE_READ;
+ default:
+ return 0;
+ }
+ case 1:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 2:
+ if (is_user) {
+ return prot_ro;
+ } else {
+ return PAGE_READ | PAGE_WRITE;
+ }
+ case 3:
+ return PAGE_READ | PAGE_WRITE;
+ case 4: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : prot_ro;
+ case 6:
+ return prot_ro;
+ case 7:
+ if (!arm_feature(env, ARM_FEATURE_V6K)) {
+ return 0;
+ }
+ return prot_ro;
+ default:
+ abort();
+ }
+}
+
+static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t *table, uint32_t address)
+{
+ /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
+ int el = regime_el(env, mmu_idx);
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
if (address & tcr->mask) {
if (tcr->raw_tcr & TTBCR_PD1) {
/* Translation table walk disabled for TTBR1 */
return false;
}
- *table = A32_BANKED_CURRENT_REG_GET(env, ttbr1) & 0xffffc000;
+ *table = env->cp15.ttbr1_el[el] & 0xffffc000;
} else {
if (tcr->raw_tcr & TTBCR_PD0) {
/* Translation table walk disabled for TTBR0 */
return false;
}
- *table = A32_BANKED_CURRENT_REG_GET(env, ttbr0) & tcr->base_mask;
+ *table = env->cp15.ttbr0_el[el] & tcr->base_mask;
}
*table |= (address >> 18) & 0x3ffc;
return true;
}
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
- int is_user, hwaddr *phys_ptr,
+ ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -4556,10 +4791,11 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int domain = 0;
int domain_prot;
hwaddr phys_addr;
+ uint32_t dacr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, &table, address)) {
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
code = 5;
goto do_fault;
@@ -4567,7 +4803,12 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
desc = ldl_phys(cs->as, table);
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
- domain_prot = (A32_BANKED_CURRENT_REG_GET(env, dacr) >> (domain * 2)) & 3;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
if (type == 0) {
/* Section translation fault. */
code = 5;
@@ -4588,13 +4829,13 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
*page_size = 1024 * 1024;
} else {
/* Lookup l2 entry. */
- if (type == 1) {
- /* Coarse pagetable. */
- table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- } else {
- /* Fine pagetable. */
- table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
- }
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
desc = ldl_phys(cs->as, table);
switch (desc & 3) {
case 0: /* Page translation fault. */
@@ -4611,17 +4852,17 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
*page_size = 0x1000;
break;
case 3: /* 1k page. */
- if (type == 1) {
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- } else {
- /* Page translation fault. */
- code = 7;
- goto do_fault;
- }
- } else {
- phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
- }
+ if (type == 1) {
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ } else {
+ /* Page translation fault. */
+ code = 7;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ }
ap = (desc >> 4) & 3;
*page_size = 0x400;
break;
@@ -4631,7 +4872,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
}
code = 15;
}
- *prot = check_ap(env, ap, domain_prot, access_type, is_user);
+ *prot = check_ap(env, mmu_idx, ap, domain_prot, access_type);
if (!*prot) {
/* Access permission fault. */
goto do_fault;
@@ -4644,7 +4885,7 @@ do_fault:
}
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
- int is_user, hwaddr *phys_ptr,
+ ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -4658,10 +4899,11 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int domain = 0;
int domain_prot;
hwaddr phys_addr;
+ uint32_t dacr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, &table, address)) {
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
code = 5;
goto do_fault;
@@ -4679,7 +4921,12 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
/* Page or Section. */
domain = (desc >> 5) & 0x0f;
}
- domain_prot = (A32_BANKED_CURRENT_REG_GET(env, dacr) >> (domain * 2)) & 3;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
if (domain_prot == 0 || domain_prot == 2) {
if (type != 1) {
code = 9; /* Section domain fault. */
@@ -4733,20 +4980,20 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
if (domain_prot == 3) {
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
- if (pxn && !is_user) {
+ if (pxn && !regime_is_user(env, mmu_idx)) {
xn = 1;
}
if (xn && access_type == 2)
goto do_fault;
/* The simplified model uses AP[0] as an access control bit. */
- if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_AFE)
+ if ((regime_sctlr(env, mmu_idx) & SCTLR_AFE)
&& (ap & 1) == 0) {
/* Access flag fault. */
code = (code == 15) ? 6 : 3;
goto do_fault;
}
- *prot = check_ap(env, ap, domain_prot, access_type, is_user);
+ *prot = check_ap(env, mmu_idx, ap, domain_prot, access_type);
if (!*prot) {
/* Access permission fault. */
goto do_fault;
@@ -4771,7 +5018,7 @@ typedef enum {
} MMUFaultType;
static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr)
{
@@ -4791,9 +5038,17 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int32_t granule_sz = 9;
int32_t va_size = 32;
int32_t tbi = 0;
- TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
-
- if (arm_el_is_aa64(env, 1)) {
+ bool is_user;
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
+ /* TODO:
+ * This code assumes we're either a 64-bit EL1 or a 32-bit PL1;
+ * it doesn't handle the different format TCR for TCR_EL2, TCR_EL3,
+ * and VTCR_EL2, or the fact that those regimes don't have a split
+ * TTBR0/TTBR1. Attribute and permission bit handling should also
+ * be checked when adding support for those page table walks.
+ */
+ if (arm_el_is_aa64(env, regime_el(env, mmu_idx))) {
va_size = 64;
if (extract64(address, 55, 1))
tbi = extract64(tcr->raw_tcr, 38, 1);
@@ -4808,12 +5063,12 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
*/
uint32_t t0sz = extract32(tcr->raw_tcr, 0, 6);
- if (arm_el_is_aa64(env, 1)) {
+ if (va_size == 64) {
t0sz = MIN(t0sz, 39);
t0sz = MAX(t0sz, 16);
}
uint32_t t1sz = extract32(tcr->raw_tcr, 16, 6);
- if (arm_el_is_aa64(env, 1)) {
+ if (va_size == 64) {
t1sz = MIN(t1sz, 39);
t1sz = MAX(t1sz, 16);
}
@@ -4868,6 +5123,10 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
}
+ /* Here we should have set up all the parameters for the translation:
+ * va_size, ttbr, epd, tsz, granule_sz, tbi
+ */
+
if (epd) {
/* Translation table walk disabled => Translation fault on TLB miss */
goto do_fault;
@@ -4953,6 +5212,7 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
goto do_fault;
}
fault_type = permission_fault;
+ is_user = regime_is_user(env, mmu_idx);
if (is_user && !(attrs & (1 << 4))) {
/* Unprivileged access not enabled */
goto do_fault;
@@ -4987,27 +5247,31 @@ do_fault:
}
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot)
{
int n;
uint32_t mask;
uint32_t base;
+ bool is_user = regime_is_user(env, mmu_idx);
*phys_ptr = address;
for (n = 7; n >= 0; n--) {
- base = env->cp15.c6_region[n];
- if ((base & 1) == 0)
- continue;
- mask = 1 << ((base >> 1) & 0x1f);
- /* Keep this shift separate from the above to avoid an
- (undefined) << 32. */
- mask = (mask << 1) - 1;
- if (((base ^ address) & ~mask) == 0)
- break;
- }
- if (n < 0)
- return 2;
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0) {
+ continue;
+ }
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0) {
+ break;
+ }
+ }
+ if (n < 0) {
+ return 2;
+ }
if (access_type == 2) {
mask = env->cp15.pmsav5_insn_ap;
@@ -5017,31 +5281,34 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
mask = (mask >> (n * 4)) & 0xf;
switch (mask) {
case 0:
- return 1;
+ return 1;
case 1:
- if (is_user)
- return 1;
- *prot = PAGE_READ | PAGE_WRITE;
- break;
+ if (is_user) {
+ return 1;
+ }
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
case 2:
- *prot = PAGE_READ;
- if (!is_user)
- *prot |= PAGE_WRITE;
- break;
+ *prot = PAGE_READ;
+ if (!is_user) {
+ *prot |= PAGE_WRITE;
+ }
+ break;
case 3:
- *prot = PAGE_READ | PAGE_WRITE;
- break;
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
case 5:
- if (is_user)
- return 1;
- *prot = PAGE_READ;
- break;
+ if (is_user) {
+ return 1;
+ }
+ *prot = PAGE_READ;
+ break;
case 6:
- *prot = PAGE_READ;
- break;
+ *prot = PAGE_READ;
+ break;
default:
- /* Bad permission. */
- return 1;
+ /* Bad permission. */
+ return 1;
}
*prot |= PAGE_EXEC;
return 0;
@@ -5065,44 +5332,60 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
- * @is_user: 0 for privileged access, 1 for user
+ * @mmu_idx: MMU index indicating required translation regime
* @phys_ptr: set to the physical address corresponding to the virtual address
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
*/
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size)
{
- /* This is not entirely correct as get_phys_addr() can also be called
- * from ats_write() for an address translation of a specific regime.
- */
- uint32_t sctlr = A32_BANKED_CURRENT_REG_GET(env, sctlr);
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ /* TODO: when we support EL2 we should here call ourselves recursively
+ * to do the stage 1 and then stage 2 translations. The ldl_phys
+ * calls for stage 1 will also need changing.
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_EL2));
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
- /* Fast Context Switch Extension. */
- if (address < 0x02000000) {
- address += A32_BANKED_CURRENT_REG_GET(env, fcseidr);
+ /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
}
- if ((sctlr & SCTLR_M) == 0) {
+ if (regime_translation_disabled(env, mmu_idx)) {
/* MMU/MPU disabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*page_size = TARGET_PAGE_SIZE;
return 0;
- } else if (arm_feature(env, ARM_FEATURE_MPU)) {
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
*page_size = TARGET_PAGE_SIZE;
- return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
- prot);
- } else if (extended_addresses_enabled(env)) {
- return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
+ return get_phys_addr_mpu(env, address, access_type, mmu_idx, phys_ptr,
+ prot);
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
- } else if (sctlr & SCTLR_XP) {
- return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
} else {
- return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
+ return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
}
}
@@ -5115,12 +5398,11 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
hwaddr phys_addr;
target_ulong page_size;
int prot;
- int ret, is_user;
+ int ret;
uint32_t syn;
bool same_el = (arm_current_el(env) != 0);
- is_user = mmu_idx == MMU_USER_IDX;
- ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
+ ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr, &prot,
&page_size);
if (ret == 0) {
/* Map a single [sub]page. */
@@ -5156,12 +5438,14 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret;
- ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
+ &prot, &page_size);
if (ret != 0) {
return -1;
@@ -6242,7 +6526,7 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
} else {
return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
}
- } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
+ } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
float_raise(float_flag_underflow, fpst);
return float64_set_sign(float64_zero, float64_is_neg(f64));
}
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index ba16821737..033babf551 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -193,9 +193,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
+ if (!write_list_to_kvmstate(cpu)) {
+ return EINVAL;
+ }
+
/* TODO:
* FP state
- * system registers
*/
return ret;
}
@@ -269,6 +272,14 @@ int kvm_arch_get_registers(CPUState *cs)
}
}
+ if (!write_kvmstate_to_list(cpu)) {
+ return EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
/* TODO: other registers */
return ret;
}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 80d23597c7..acf4b162bd 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -123,6 +123,23 @@ void a64_translate_init(void)
#endif
}
+static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+ * if EL1, access as if EL0; otherwise access at current EL
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ g_assert_not_reached();
+ default:
+ return s->mmu_idx;
+ }
+}
+
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
@@ -2107,7 +2124,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
- int memidx = is_unpriv ? 1 : get_mem_index(s);
+ int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
if (is_store) {
do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
@@ -10922,14 +10939,15 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->bswap_code = 0;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
- dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:
diff --git a/target-arm/translate.c b/target-arm/translate.c
index bdfcdf169c..1c36b8b05e 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -113,6 +113,28 @@ void arm_translate_init(void)
a64_translate_init();
}
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -8739,6 +8761,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
ARCH(6T2);
shift = (insn >> 7) & 0x1f;
i = (insn >> 16) & 0x1f;
+ if (i < shift) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
i = i + 1 - shift;
if (rm == 15) {
tmp = tcg_temp_new_i32();
@@ -8793,7 +8819,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp2 = load_reg(s, rn);
if ((insn & 0x01200000) == 0x00200000) {
/* ldrt/strt */
- i = MMU_USER_IDX;
+ i = get_a32_user_mem_index(s);
} else {
i = get_mem_index(s);
}
@@ -10173,7 +10199,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- memidx = MMU_USER_IDX;
+ memidx = get_a32_user_mem_index(s);
break;
case 0x9: /* Post-decrement. */
imm = -imm;
@@ -11032,8 +11058,10 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
@@ -11042,7 +11070,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
- dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:
diff --git a/target-arm/translate.h b/target-arm/translate.h
index f6ee7892ba..a1eb5b5347 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -20,6 +20,7 @@ typedef struct DisasContext {
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
+ ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool ns; /* Use non-secure CPREG bank on access */
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
bool vfp_enabled; /* FP enabled via FPSCR.EN */
@@ -69,7 +70,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
- return s->current_el;
+ return s->mmu_idx;
}
/* target-specific extra values for is_jmp */