aboutsummaryrefslogtreecommitdiff
path: root/target/arm/tlb_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/tlb_helper.c')
-rw-r--r--target/arm/tlb_helper.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 7388494a55..b35dc8a011 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -10,8 +10,6 @@
#include "internals.h"
#include "exec/exec-all.h"
-#if !defined(CONFIG_USER_ONLY)
-
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
bool same_el, bool ea,
@@ -122,6 +120,8 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
+#if !defined(CONFIG_USER_ONLY)
+
/*
* arm_cpu_do_transaction_failed: handle a memory system error response
* (eg "no device/memory present at address") by raising an external abort
@@ -166,6 +166,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
/*
* Walk the page table and (if the mapping exists) add the page
@@ -175,7 +176,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
*/
ret = get_phys_addr(&cpu->env, address, access_type,
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ &phys_addr, &attrs, &prot, &page_size,
+ &fi, &cacheattrs);
if (likely(!ret)) {
/*
* Map a single [sub]page. Regions smaller than our declared
@@ -186,6 +188,11 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
+ /* Notice and record tagged memory. */
+ if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
+ arm_tlb_mte_tagged(&attrs) = true;
+ }
+
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
prot, mmu_idx, page_size);
return true;