aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-10-14 11:12:34 +0100
committerPeter Maydell <peter.maydell@linaro.org>2024-10-14 11:12:34 +0100
commit3860a2a8de56fad71db42f4ad120eb7eff03b51f (patch)
tree1db830a2b3c4308a66f6fbe789e5f77f6ce15321
parentb38d263bca64bbba36d4b175ea0f5746b4c5604d (diff)
parente530581ee06573fcf48c7f7a6c3f8ec6e5809243 (diff)
Merge tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu into staging
linux-user/i386: Emulate orig_ax linux-user/vm86: Fix compilation with Clang tcg: remove singlestep_enabled from DisasContextBase accel/tcg: Add TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/arm: Fix alignment fault priority in get_phys_addr_lpae # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcMRU4dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9dSQf+MUJq//oig+bDeUlQ # v3uBMFVi1DBYI1Y/xVODADpn8Ltv5s9v7N+/phi+St2W65OzGNYviHvq/abeyhdo # M40LGtOvjO6Mns+Z9NKTobtT8n4ap4JJyoFjuXFTHkMMDiQ/v7FkEJJoS3W2bemi # zmKYF/vWe3bwI+d3+dyaUjA92gSs+Hlj8uEVBlzn3ubA19ZdvtyfKURPQynrkwlo # dFtAOFRFBU6vrlJSBElxUfYO4jC4Cng19EOrWvIsuKAkACuhiHgah10i3WKw8Asz # 1iRUYXe0EOlX2RYNTD+Oj5j0cViRylirgPtIhEIPBuDP7m1Jy1JO4dVARUJBBU71 # Zd4Uuw== # =EX+a # -----END PGP SIGNATURE----- # gpg: Signature made Sun 13 Oct 2024 23:10:22 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu: (27 commits) target/arm: Fix alignment fault priority in get_phys_addr_lpae target/arm: Implement TCGCPUOps.tlb_fill_align target/arm: Move device detection earlier in get_phys_addr_lpae target/arm: Pass MemOp to get_phys_addr_lpae target/arm: Pass MemOp through get_phys_addr_twostage target/arm: Pass MemOp to get_phys_addr_nogpc target/arm: Pass MemOp to get_phys_addr_gpc target/arm: Pass MemOp to get_phys_addr_with_space_nogpc target/arm: Pass MemOp to get_phys_addr target/hppa: Implement TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/hppa: Fix priority of T, D, and B page faults target/hppa: Perform access rights before protection id check target/hppa: Add MemOp argument to hppa_get_physical_address accel/tcg: Use the alignment test in tlb_fill_align accel/tcg: Add TCGCPUOps.tlb_fill_align include/exec/memop: Introduce memop_atomicity_bits include/exec/memop: Rename get_alignment_bits include/exec/memop: Move get_alignment_bits from tcg.h accel/tcg: Assert noreturn from write-only page for atomics ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cputlb.c160
-rw-r--r--accel/tcg/translator.c1
-rw-r--r--accel/tcg/user-exec.c4
-rw-r--r--configs/targets/i386-linux-user.mak2
-rw-r--r--configs/targets/x86_64-linux-user.mak2
-rw-r--r--gdb-xml/i386-32bit-linux.xml11
-rw-r--r--gdb-xml/i386-64bit-linux.xml11
-rw-r--r--include/exec/cpu-common.h13
-rw-r--r--include/exec/memop.h47
-rw-r--r--include/exec/translator.h2
-rw-r--r--include/hw/core/cpu.h4
-rw-r--r--include/hw/core/tcg-cpu-ops.h26
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/tcg/tcg.h23
-rw-r--r--linux-user/elfload.c6
-rw-r--r--linux-user/i386/cpu_loop.c3
-rw-r--r--linux-user/qemu.h4
-rw-r--r--linux-user/vm86.c65
-rw-r--r--target/arm/cpu.c2
-rw-r--r--target/arm/helper.c9
-rw-r--r--target/arm/internals.h12
-rw-r--r--target/arm/ptw.c141
-rw-r--r--target/arm/tcg/cpu-v7m.c2
-rw-r--r--target/arm/tcg/m_helper.c8
-rw-r--r--target/arm/tcg/tlb_helper.c47
-rw-r--r--target/arm/tcg/translate-a64.c4
-rw-r--r--target/hppa/cpu.c2
-rw-r--r--target/hppa/cpu.h8
-rw-r--r--target/hppa/int_helper.c2
-rw-r--r--target/hppa/mem_helper.c55
-rw-r--r--target/hppa/op_helper.c2
-rw-r--r--target/i386/cpu.c1
-rw-r--r--target/i386/cpu.h1
-rw-r--r--target/i386/gdbstub.c102
-rw-r--r--target/mips/tcg/translate.c5
-rw-r--r--target/xtensa/translate.c2
-rw-r--r--tcg/arm/tcg-target.c.inc4
-rw-r--r--tcg/sparc64/tcg-target.c.inc2
-rw-r--r--tcg/tcg-op-ldst.c6
-rw-r--r--tcg/tcg.c2
-rw-r--r--tests/tcg/multiarch/gdbstub/test-proc-mappings.py17
41 files changed, 463 insertions, 358 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 117b516739..b76a4eac4e 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
}
/*
- * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
- * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
- * be discarded and looked up again (e.g. via tlb_entry()).
+ * Note: tlb_fill_align() can trigger a resize of the TLB.
+ * This means that all of the caller's prior references to the TLB table
+ * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
+ * (e.g. via tlb_entry()).
*/
-static void tlb_fill(CPUState *cpu, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
+ int mmu_idx, MemOp memop, int size,
+ bool probe, uintptr_t ra)
{
- bool ok;
+ const TCGCPUOps *ops = cpu->cc->tcg_ops;
+ CPUTLBEntryFull full;
- /*
- * This is not a probe, so only valid return is success; failure
- * should result in exception + longjmp to the cpu loop.
- */
- ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
- access_type, mmu_idx, false, retaddr);
- assert(ok);
+ if (ops->tlb_fill_align) {
+ if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
+ memop, size, probe, ra)) {
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ return true;
+ }
+ } else {
+ /* Legacy behaviour is alignment before paging. */
+ if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
+ ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
+ }
+ if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
+ return true;
+ }
+ }
+ assert(probe);
+ return false;
}
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
@@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
- if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ 0, fault_size, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
return TLB_INVALID_MASK;
}
- /* TLB resize via tlb_fill may have moved the entry. */
+ /* TLB resize via tlb_fill_align may have moved the entry. */
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
- * to force the next access through tlb_fill. We've just
- * called tlb_fill, so we know that this entry *is* valid.
+ * to force the next access through tlb_fill_align. We've just
+ * called tlb_fill_align, so we know that this entry *is* valid.
*/
flags &= ~TLB_INVALID_MASK;
}
@@ -1607,16 +1620,17 @@ typedef struct MMULookupLocals {
* mmu_lookup1: translate one page
* @cpu: generic cpu state
* @data: lookup parameters
+ * @memop: memory operation for the access, or 0
* @mmu_idx: virtual address context
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
*
* Resolve the translation for the one page at @data.addr, filling in
* the rest of @data with the results. If the translation fails,
- * tlb_fill will longjmp out. Return true if the softmmu tlb for
+ * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
@@ -1631,7 +1645,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill_align(cpu, addr, access_type, mmu_idx,
+ memop, data->size, false, ra);
maybe_resized = true;
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
@@ -1643,6 +1658,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
+ if (likely(!maybe_resized)) {
+ /* Alignment has not been checked by tlb_fill_align. */
+ int a_bits = memop_alignment_bits(memop);
+
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ int at_bits = memop_atomicity_bits(memop);
+ a_bits = MAX(a_bits, at_bits);
+ }
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
+ cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
+ }
+ }
+
data->full = full;
data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
@@ -1699,7 +1733,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
- unsigned a_bits;
bool crosspage;
int flags;
@@ -1708,12 +1741,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
- /* Handle CPU specific unaligned behaviour */
- a_bits = get_alignment_bits(l->memop);
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
-
l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
@@ -1721,7 +1748,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1740,8 +1767,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
@@ -1760,31 +1787,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
- /*
- * This alignment check differs from the one above, in that this is
- * based on the atomicity of the operation. The intended use case is
- * the ARM memory type field of each PTE, where access to pages with
- * Device memory type require alignment.
- */
- if (unlikely(flags & TLB_CHECK_ALIGNED)) {
- MemOp size = l->memop & MO_SIZE;
-
- switch (l->memop & MO_ATOM_MASK) {
- case MO_ATOM_NONE:
- size = MO_8;
- break;
- case MO_ATOM_IFALIGN_PAIR:
- case MO_ATOM_WITHIN16_PAIR:
- size = size ? size - 1 : 0;
- break;
- default:
- break;
- }
- if (addr & ((1 << size) - 1)) {
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
- }
- }
-
return crosspage;
}
@@ -1797,34 +1799,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
+ bool did_tlb_fill = false;
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
- /* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
- /* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- /* Enforce qemu required alignment. */
- if (unlikely(addr & (size - 1))) {
- /* We get here if guest alignment was not requested,
- or was not enforced by cpu_unaligned_access above.
- We might widen the access and emulate, but for now
- mark an exception and exit the cpu loop. */
- goto stop_the_world;
- }
-
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
@@ -1833,8 +1819,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(cpu, addr, size,
- MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
+ mop, size, false, retaddr);
+ did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
}
@@ -1848,15 +1835,32 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
+ 0, size, false, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
- * write, this shouldn't ever return. But just in case,
- * handle via stop-the-world.
+ * write, this shouldn't ever return.
+ */
+ g_assert_not_reached();
+ }
+
+ /* Enforce guest required alignment, if not handled by tlb_fill_align. */
+ if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
+
+ /* Enforce qemu required alignment. */
+ if (unlikely(addr & (size - 1))) {
+ /*
+ * We get here if guest alignment was not requested, or was not
+ * enforced by cpu_unaligned_access or tlb_fill_align above.
+ * We might widen the access and emulate, but for now
+ * mark an exception and exit the cpu loop.
*/
goto stop_the_world;
}
+
/* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 113edcffe3..cbad00a517 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -129,7 +129,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = *max_insns;
- db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->insn_start = NULL;
db->fake_insn = false;
db->host_addr[0] = host_pc;
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 11b6d45e90..51b2c16dbe 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -954,7 +954,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
@@ -1236,7 +1236,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
diff --git a/configs/targets/i386-linux-user.mak b/configs/targets/i386-linux-user.mak
index 5b2546a430..b72a156473 100644
--- a/configs/targets/i386-linux-user.mak
+++ b/configs/targets/i386-linux-user.mak
@@ -1,4 +1,4 @@
TARGET_ARCH=i386
TARGET_SYSTBL_ABI=i386
TARGET_SYSTBL=syscall_32.tbl
-TARGET_XML_FILES= gdb-xml/i386-32bit.xml
+TARGET_XML_FILES= gdb-xml/i386-32bit.xml gdb-xml/i386-32bit-linux.xml
diff --git a/configs/targets/x86_64-linux-user.mak b/configs/targets/x86_64-linux-user.mak
index 9ceefbb615..86042814d3 100644
--- a/configs/targets/x86_64-linux-user.mak
+++ b/configs/targets/x86_64-linux-user.mak
@@ -2,4 +2,4 @@ TARGET_ARCH=x86_64
TARGET_BASE_ARCH=i386
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall_64.tbl
-TARGET_XML_FILES= gdb-xml/i386-64bit.xml
+TARGET_XML_FILES= gdb-xml/i386-64bit.xml gdb-xml/i386-64bit-linux.xml
diff --git a/gdb-xml/i386-32bit-linux.xml b/gdb-xml/i386-32bit-linux.xml
new file mode 100644
index 0000000000..5ffe5616e6
--- /dev/null
+++ b/gdb-xml/i386-32bit-linux.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.i386.linux">
+ <reg name="orig_eax" bitsize="32" type="int"/>
+</feature>
diff --git a/gdb-xml/i386-64bit-linux.xml b/gdb-xml/i386-64bit-linux.xml
new file mode 100644
index 0000000000..0f26990d2f
--- /dev/null
+++ b/gdb-xml/i386-64bit-linux.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.i386.linux">
+ <reg name="orig_rax" bitsize="64" type="int"/>
+</feature>
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 2e1b499cb7..638dc806a5 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -239,6 +239,17 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
}
/**
+ * env_cpu_const(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUState associated with the environment.
+ */
+static inline const CPUState *env_cpu_const(const CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+/**
* env_cpu(env)
* @env: The architecture environment
*
@@ -246,7 +257,7 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
*/
static inline CPUState *env_cpu(CPUArchState *env)
{
- return (void *)env - sizeof(CPUState);
+ return (CPUState *)env_cpu_const(env);
}
#ifndef CONFIG_USER_ONLY
diff --git a/include/exec/memop.h b/include/exec/memop.h
index f881fe7af4..b699bf7688 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -170,4 +170,51 @@ static inline bool memop_big_endian(MemOp op)
return (op & MO_BSWAP) == MO_BE;
}
+/**
+ * memop_alignment_bits:
+ * @memop: MemOp value
+ *
+ * Extract the alignment size from the memop.
+ */
+static inline unsigned memop_alignment_bits(MemOp memop)
+{
+ unsigned a = memop & MO_AMASK;
+
+ if (a == MO_UNALN) {
+ /* No alignment required. */
+ a = 0;
+ } else if (a == MO_ALIGN) {
+ /* A natural alignment requirement. */
+ a = memop & MO_SIZE;
+ } else {
+ /* A specific alignment requirement. */
+ a = a >> MO_ASHIFT;
+ }
+ return a;
+}
+
+/*
+ * memop_atomicity_bits:
+ * @memop: MemOp value
+ *
+ * Extract the atomicity size from the memop.
+ */
+static inline unsigned memop_atomicity_bits(MemOp memop)
+{
+ unsigned size = memop & MO_SIZE;
+
+ switch (memop & MO_ATOM_MASK) {
+ case MO_ATOM_NONE:
+ size = MO_8;
+ break;
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ size = size ? size - 1 : 0;
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
#endif
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 25004dfb76..d8dcb77b5f 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -71,7 +71,6 @@ typedef enum DisasJumpType {
* @is_jmp: What instruction to disassemble next.
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
- * @singlestep_enabled: "Hardware" single stepping enabled.
* @plugin_enabled: TCG plugin enabled in this TB.
* @fake_insn: True if translator_fake_ldb used.
* @insn_start: The last op emitted by the insn_start hook,
@@ -86,7 +85,6 @@ struct DisasContextBase {
DisasJumpType is_jmp;
int num_insns;
int max_insns;
- bool singlestep_enabled;
bool plugin_enabled;
bool fake_insn;
struct TCGOp *insn_start;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 04e9ad4996..d21a24c82f 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -205,7 +205,7 @@ struct CPUClass {
* so the layout is not as critical as that of CPUTLBEntry. This is
* also why we don't want to combine the two structs.
*/
-typedef struct CPUTLBEntryFull {
+struct CPUTLBEntryFull {
/*
* @xlat_section contains:
* - in the lower TARGET_PAGE_BITS, a physical section number
@@ -261,7 +261,7 @@ typedef struct CPUTLBEntryFull {
bool guarded;
} arm;
} extra;
-} CPUTLBEntryFull;
+};
/*
* Data elements that are per MMU mode, minus the bits accessed by
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index 34318cf0e6..663efb9133 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -13,6 +13,7 @@
#include "exec/breakpoint.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
+#include "exec/memop.h"
#include "exec/mmu-access-type.h"
#include "exec/vaddr.h"
@@ -132,6 +133,31 @@ struct TCGCPUOps {
*/
bool (*cpu_exec_halt)(CPUState *cpu);
/**
+ * @tlb_fill_align: Handle a softmmu tlb miss
+ * @cpu: cpu context
+ * @out: output page properties
+ * @addr: virtual address
+ * @access_type: read, write or execute
+ * @mmu_idx: mmu context
+ * @memop: memory operation for the access
+ * @size: memory access size, or 0 for whole page
+ * @probe: test only, no fault
+ * @ra: host return address for exception unwind
+ *
+ * If the access is valid, fill in @out and return true.
+ * Otherwise if probe is true, return false.
+ * Otherwise raise an exception and do not return.
+ *
+ * The alignment check for the access is deferred to this hook,
+ * so that the target can determine the priority of any alignment
+ * fault with respect to other potential faults from paging.
+ * Zero may be passed for @memop to skip any alignment check
+ * for non-memory-access operations such as probing.
+ */
+ bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
+ /**
* @tlb_fill: Handle a softmmu tlb miss
*
* If the access is valid, call tlb_set_page and return true;
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 9d222dc376..3d84efcac4 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -40,6 +40,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUArchState CPUArchState;
typedef struct CPUPluginState CPUPluginState;
typedef struct CPUState CPUState;
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceState DeviceState;
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
typedef struct DisasContextBase DisasContextBase;
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 21d5884741..824fb3560d 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -281,29 +281,6 @@ static inline int tcg_type_size(TCGType t)
return 4 << i;
}
-/**
- * get_alignment_bits
- * @memop: MemOp value
- *
- * Extract the alignment size from the memop.
- */
-static inline unsigned get_alignment_bits(MemOp memop)
-{
- unsigned a = memop & MO_AMASK;
-
- if (a == MO_UNALN) {
- /* No alignment required. */
- a = 0;
- } else if (a == MO_ALIGN) {
- /* A natural alignment requirement. */
- a = memop & MO_SIZE;
- } else {
- /* A specific alignment requirement. */
- a = a >> MO_ASHIFT;
- }
- return a;
-}
-
typedef tcg_target_ulong TCGArg;
/* Define type and accessor macros for TCG variables.
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 52c88a68a9..6cef8db3b5 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -203,7 +203,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[12] = tswapreg(env->regs[R_EDX]);
(*regs)[13] = tswapreg(env->regs[R_ESI]);
(*regs)[14] = tswapreg(env->regs[R_EDI]);
- (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */
+ (*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
(*regs)[16] = tswapreg(env->eip);
(*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
(*regs)[18] = tswapreg(env->eflags);
@@ -306,7 +306,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
(*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
(*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
(*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
- (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */
+ (*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
(*regs)[12] = tswapreg(env->eip);
(*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
(*regs)[14] = tswapreg(env->eflags);
@@ -4314,7 +4314,7 @@ static int wmr_write_region(void *opaque, target_ulong start,
*/
static int elf_core_dump(int signr, const CPUArchState *env)
{
- const CPUState *cpu = env_cpu((CPUArchState *)env);
+ const CPUState *cpu = env_cpu_const(env);
const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
struct rlimit dumpsize;
CountAndSizeRegions css;
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index 92beb6830c..7a35215278 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -172,6 +172,7 @@ static void emulate_vsyscall(CPUX86State *env)
/*
* Perform the syscall. None of the vsyscalls should need restarting.
*/
+ get_task_state(env_cpu(env))->orig_ax = syscall;
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
env->regs[R_EDX], env->regs[10], env->regs[8],
env->regs[9], 0, 0);
@@ -221,6 +222,7 @@ void cpu_loop(CPUX86State *env)
case EXCP_SYSCALL:
#endif
/* linux syscall from int $0x80 */
+ get_task_state(cs)->orig_ax = env->regs[R_EAX];
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EBX],
@@ -239,6 +241,7 @@ void cpu_loop(CPUX86State *env)
#ifdef TARGET_X86_64
case EXCP_SYSCALL:
/* linux syscall from syscall instruction. */
+ get_task_state(cs)->orig_ax = env->regs[R_EAX];
ret = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 98ad848ab2..895bdd722a 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -114,6 +114,10 @@ struct TaskState {
uint32_t v86flags;
uint32_t v86mask;
#endif
+#if defined(TARGET_I386)
+ /* Last syscall number. */
+ target_ulong orig_ax;
+#endif
abi_ulong child_tidptr;
#ifdef TARGET_M68K
abi_ulong tp_value;
diff --git a/linux-user/vm86.c b/linux-user/vm86.c
index 31a2d707cf..5091d53fb8 100644
--- a/linux-user/vm86.c
+++ b/linux-user/vm86.c
@@ -47,30 +47,6 @@ static inline void vm_putw(CPUX86State *env, uint32_t segptr,
cpu_stw_data(env, segptr + (reg16 & 0xffff), val);
}
-static inline void vm_putl(CPUX86State *env, uint32_t segptr,
- unsigned int reg16, unsigned int val)
-{
- cpu_stl_data(env, segptr + (reg16 & 0xffff), val);
-}
-
-static inline unsigned int vm_getb(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_ldub_data(env, segptr + (reg16 & 0xffff));
-}
-
-static inline unsigned int vm_getw(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_lduw_data(env, segptr + (reg16 & 0xffff));
-}
-
-static inline unsigned int vm_getl(CPUX86State *env,
- uint32_t segptr, unsigned int reg16)
-{
- return cpu_ldl_data(env, segptr + (reg16 & 0xffff));
-}
-
void save_v86_state(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@@ -131,19 +107,6 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
env->regs[R_EAX] = retval;
}
-static inline int set_IF(CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- ts->v86flags |= VIF_MASK;
- if (ts->v86flags & VIP_MASK) {
- return_to_32bit(env, TARGET_VM86_STI);
- return 1;
- }
- return 0;
-}
-
static inline void clear_IF(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@@ -162,34 +125,6 @@ static inline void clear_AC(CPUX86State *env)
env->eflags &= ~AC_MASK;
}
-static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- set_flags(ts->v86flags, eflags, ts->v86mask);
- set_flags(env->eflags, eflags, SAFE_MASK);
- if (eflags & IF_MASK)
- return set_IF(env);
- else
- clear_IF(env);
- return 0;
-}
-
-static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
-{
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
-
- set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
- set_flags(env->eflags, flags, SAFE_MASK);
- if (flags & IF_MASK)
- return set_IF(env);
- else
- clear_IF(env);
- return 0;
-}
-
static inline unsigned int get_vflags(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 19191c2391..1320fd8c8f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2663,7 +2663,7 @@ static const TCGCPUOps arm_tcg_ops = {
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
#else
- .tlb_fill = arm_cpu_tlb_fill,
+ .tlb_fill_align = arm_cpu_tlb_fill_align,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.do_interrupt = arm_cpu_do_interrupt,
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 3f77b40734..0a731a38e8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3599,11 +3599,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
GetPhysAddrResult res = {};
/*
- * I_MXTJT: Granule protection checks are not performed on the final address
- * of a successful translation.
+ * I_MXTJT: Granule protection checks are not performed on the final
+ * address of a successful translation. This is a translation not a
+ * memory reference, so "memop = none = 0".
*/
- ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
- &res, &fi);
+ ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
+ mmu_idx, ss, &res, &fi);
/*
* ATS operations only do S1 or S1+S2 translations, so we never
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 1e5da81ce9..299a96a81a 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -816,9 +816,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra);
#else
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
+bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
#endif
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
@@ -1432,6 +1432,7 @@ typedef struct GetPhysAddrResult {
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
+ * @memop: memory operation feeding this access, or 0 for none
* @mmu_idx: MMU index indicating required translation regime
* @result: set on translation success.
* @fi: set to fault info if the translation fails
@@ -1450,7 +1451,7 @@ typedef struct GetPhysAddrResult {
* value.
*/
bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
__attribute__((nonnull));
@@ -1460,6 +1461,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
+ * @memop: memory operation feeding this access, or 0 for none
* @mmu_idx: MMU index indicating required translation regime
* @space: security space for the access
* @result: set on translation success.
@@ -1469,7 +1471,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
* a Granule Protection Check on the resulting address.
*/
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 659855133c..dd40268397 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -75,13 +75,13 @@ typedef struct S1Translate {
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi);
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi);
@@ -579,7 +579,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
};
GetPhysAddrResult s2 = { };
- if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
+ if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) {
goto fail;
}
@@ -1684,12 +1684,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
* @ptw: Current and next stage parameters for the walk.
* @address: virtual address to get physical address for
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
+ * @memop: memory operation feeding this access, or 0 for none
* @result: set on translation success,
* @fi: set to fault info if the translation fails
*/
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
@@ -2028,8 +2029,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
xn = extract64(attrs, 53, 2);
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
}
+
+ result->cacheattrs.is_s2_format = true;
+ result->cacheattrs.attrs = extract32(attrs, 2, 4);
+ /*
+ * Security state does not really affect HCR_EL2.FWB;
+ * we only need to filter FWB for aa32 or other FEAT.
+ */
+ device = S2_attrs_are_device(arm_hcr_el2_eff(env),
+ result->cacheattrs.attrs);
} else {
int nse, ns = extract32(attrs, 5, 1);
+ uint8_t attrindx;
+ uint64_t mair;
+
switch (out_space) {
case ARMSS_Root:
/*
@@ -2101,6 +2114,49 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
*/
result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
result->f.attrs.space, out_space);
+
+ /* Index into MAIR registers for cache attributes */
+ attrindx = extract32(attrs, 2, 3);
+ mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ result->cacheattrs.is_s2_format = false;
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
+
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
+ }
+ device = S1_attrs_are_device(result->cacheattrs.attrs);
+ }
+
+ /*
+ * Enable alignment checks on Device memory.
+ *
+ * Per R_XCHFJ, the correct ordering for alignment, permission,
+ * and stage 2 faults is:
+ * - Alignment fault caused by the memory type
+ * - Permission fault
+ * - A stage 2 fault on the memory access
+ * Perform the alignment check now, so that we recognize it in
+ * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent
+ * softmmu tlb hit will also check the alignment; clear along the
+ * non-device path so that tlb_fill_flags is consistent in the
+ * event of restart_atomic_update.
+ *
+ * In v7, for a CPU without the Virtualization Extensions this
+ * access is UNPREDICTABLE; we choose to make it take the alignment
+ * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
+ * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
+ */
+ if (device) {
+ unsigned a_bits = memop_atomicity_bits(memop);
+ if (address & ((1 << a_bits) - 1)) {
+ fi->type = ARMFault_Alignment;
+ goto do_fault;
+ }
+ result->f.tlb_fill_flags = TLB_CHECK_ALIGNED;
+ } else {
+ result->f.tlb_fill_flags = 0;
}
if (!(result->f.prot & (1 << access_type))) {
@@ -2130,51 +2186,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.attrs.space = out_space;
result->f.attrs.secure = arm_space_is_secure(out_space);
- if (regime_is_stage2(mmu_idx)) {
- result->cacheattrs.is_s2_format = true;
- result->cacheattrs.attrs = extract32(attrs, 2, 4);
- /*
- * Security state does not really affect HCR_EL2.FWB;
- * we only need to filter FWB for aa32 or other FEAT.
- */
- device = S2_attrs_are_device(arm_hcr_el2_eff(env),
- result->cacheattrs.attrs);
- } else {
- /* Index into MAIR registers for cache attributes */
- uint8_t attrindx = extract32(attrs, 2, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
- assert(attrindx <= 7);
- result->cacheattrs.is_s2_format = false;
- result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
-
- /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
- if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
- }
- device = S1_attrs_are_device(result->cacheattrs.attrs);
- }
-
- /*
- * Enable alignment checks on Device memory.
- *
- * Per R_XCHFJ, this check is mis-ordered. The correct ordering
- * for alignment, permission, and stage 2 faults should be:
- * - Alignment fault caused by the memory type
- * - Permission fault
- * - A stage 2 fault on the memory access
- * but due to the way the TCG softmmu TLB operates, we will have
- * implicitly done the permission check and the stage2 lookup in
- * finding the TLB entry, so the alignment check cannot be done sooner.
- *
- * In v7, for a CPU without the Virtualization Extensions this
- * access is UNPREDICTABLE; we choose to make it take the alignment
- * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
- * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
- */
- if (device) {
- result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
- }
-
/*
* For FEAT_LPA2 and effective DS, the SH field in the attributes
* was re-purposed for output address bits. The SH attribute in
@@ -3301,7 +3312,7 @@ static bool get_phys_addr_disabled(CPUARMState *env,
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
@@ -3313,7 +3324,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
ARMSecuritySpace ipa_space;
uint64_t hcr;
- ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, address, access_type,
+ memop, result, fi);
/* If S1 fails, return early. */
if (ret) {
@@ -3339,7 +3351,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
cacheattrs1 = result->cacheattrs;
memset(result, 0, sizeof(*result));
- ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, ipa, access_type,
+ memop, result, fi);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
@@ -3406,7 +3419,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
@@ -3469,7 +3482,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
if (arm_feature(env, ARM_FEATURE_EL2) &&
!regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
return get_phys_addr_twostage(env, ptw, address, access_type,
- result, fi);
+ memop, result, fi);
}
/* fall through */
@@ -3532,7 +3545,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
}
if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
+ return get_phys_addr_lpae(env, ptw, address, access_type,
+ memop, result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7) ||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
@@ -3543,11 +3557,12 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
- if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
+ if (get_phys_addr_nogpc(env, ptw, address, access_type,
+ memop, result, fi)) {
return true;
}
if (!granule_protection_check(env, result->f.phys_addr,
@@ -3559,7 +3574,7 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
}
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
- MMUAccessType access_type,
+ MMUAccessType access_type, MemOp memop,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
@@ -3568,11 +3583,12 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
.in_mmu_idx = mmu_idx,
.in_space = space,
};
- return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
+ return get_phys_addr_nogpc(env, &ptw, address, access_type,
+ memop, result, fi);
}
bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
S1Translate ptw = {
@@ -3641,7 +3657,8 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
}
ptw.in_space = ss;
- return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
+ return get_phys_addr_gpc(env, &ptw, address, access_type,
+ memop, result, fi);
}
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
@@ -3660,7 +3677,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
ARMMMUFaultInfo fi = {};
bool ret;
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
+ ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index 5496f14dc1..58e54578d6 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -242,7 +242,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
#else
- .tlb_fill = arm_cpu_tlb_fill,
+ .tlb_fill_align = arm_cpu_tlb_fill_align,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.do_interrupt = arm_v7m_cpu_do_interrupt,
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 23d7f73035..f7354f3c6e 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
int exc;
bool exc_secure;
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
if (mode == STACK_LAZYFP) {
@@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
bool exc_secure;
uint32_t value;
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
qemu_log_mask(CPU_LOG_INT,
@@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
"...really SecureFault with SFSR.INVEP\n");
return false;
}
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
/* the MPU lookup failed */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
ARMMMUFaultInfo fi = {};
uint32_t value;
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
qemu_log_mask(CPU_LOG_INT,
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 885bf4ec14..8841f039bc 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra)
{
ARMCPU *cpu = ARM_CPU(cs);
GetPhysAddrResult res = {};
ARMMMUFaultInfo local_fi, *fi;
- int ret;
/*
* Allow S1_ptw_translate to see any fault generated here.
@@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
/*
- * Walk the page table and (if the mapping exists) add the page
- * to the TLB. On success, return true. Otherwise, if probing,
- * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
- * register format, and signal the fault.
+ * Per R_XCHFJ, alignment fault not due to memory type has
+ * highest precedence. Otherwise, walk the page table and
+ * and collect the page description.
*/
- ret = get_phys_addr(&cpu->env, address, access_type,
- core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &res, fi);
- if (likely(!ret)) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
- res.f.phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
-
+ if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
+ fi->type = ARMFault_Alignment;
+ } else if (!get_phys_addr(&cpu->env, address, access_type, memop,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &res, fi)) {
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
res.f.extra.arm.shareability = res.cacheattrs.shareability;
-
- tlb_set_page_full(cs, mmu_idx, address, &res.f);
+ *out = res.f;
return true;
- } else if (probe) {
+ }
+ if (probe) {
return false;
- } else {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
}
+
+ /* Now we have a real cpu fault. */
+ cpu_restore_state(cs, ra);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
}
#else
void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 071b6349fc..ec0b1ee252 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -294,7 +294,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
@@ -326,7 +326,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 7cf2e2f266..c38439c180 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -226,7 +226,7 @@ static const TCGCPUOps hppa_tcg_ops = {
.restore_state_to_opc = hppa_restore_state_to_opc,
#ifndef CONFIG_USER_ONLY
- .tlb_fill = hppa_cpu_tlb_fill,
+ .tlb_fill_align = hppa_cpu_tlb_fill_align,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
.do_interrupt = hppa_cpu_do_interrupt,
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index f4e051f176..e45ba50a59 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -363,13 +363,13 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
void hppa_ptlbe(CPUHPPAState *env);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled);
-bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
+bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot);
+ int type, MemOp mop, hwaddr *pphys, int *pprot);
void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 391f32f27d..58695def82 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -167,7 +167,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
- 0, &paddr, &prot);
+ 0, 0, &paddr, &prot);
if (t >= 0) {
/* We can't re-load the instruction. */
env->cr[CR_IIR] = 0;
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index b984f730aa..b8c3e55170 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -197,7 +197,7 @@ static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
}
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
- int type, hwaddr *pphys, int *pprot)
+ int type, MemOp mop, hwaddr *pphys, int *pprot)
{
hwaddr phys;
int prot, r_prot, w_prot, x_prot, priv;
@@ -221,7 +221,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
g_assert_not_reached();
}
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- goto egress;
+ goto egress_align;
}
/* Find a valid tlb entry that matches the virtual address. */
@@ -267,6 +267,12 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
goto egress;
}
+ if (unlikely(!(prot & type))) {
+ /* Not allowed -- Inst/Data Memory Access Rights Fault. */
+ ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
+ goto egress;
+ }
+
/* access_id == 0 means public page and no check is performed */
if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
int access_prot = (hppa_is_pa20(env)
@@ -281,14 +287,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
prot &= access_prot;
}
- if (unlikely(!(prot & type))) {
- /* Not allowed -- Inst/Data Memory Access Rights Fault. */
- ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
- goto egress;
- }
-
/*
- * In priority order, check for conditions which raise faults.
+ * In reverse priority order, check for conditions which raise faults.
* Remove PROT bits that cover the condition we want to check,
* so that the resulting PROT will force a re-check of the
* architectural TLB entry for the next access.
@@ -299,13 +299,15 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
/* The T bit is set -- Page Reference Fault. */
ret = EXCP_PAGE_REF;
}
- } else if (!ent->d) {
+ }
+ if (unlikely(!ent->d)) {
prot &= PAGE_READ | PAGE_EXEC;
if (type & PAGE_WRITE) {
/* The D bit is not set -- TLB Dirty Bit Fault. */
ret = EXCP_TLB_DIRTY;
}
- } else if (unlikely(ent->b)) {
+ }
+ if (unlikely(ent->b)) {
prot &= PAGE_READ | PAGE_EXEC;
if (type & PAGE_WRITE) {
/*
@@ -321,6 +323,11 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
}
}
+ egress_align:
+ if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
+ ret = EXCP_UNALIGN;
+ }
+
egress:
*pphys = phys;
*pprot = prot;
@@ -340,7 +347,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
- excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
+ excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
&phys, &prot);
/* Since we're translating for debugging, the only error that is a
@@ -417,12 +424,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
}
}
-bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
- MMUAccessType type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
+ MMUAccessType type, int mmu_idx,
+ MemOp memop, int size, bool probe, uintptr_t ra)
{
- HPPACPU *cpu = HPPA_CPU(cs);
- CPUHPPAState *env = &cpu->env;
+ CPUHPPAState *env = cpu_env(cs);
int prot, excp, a_prot;
hwaddr phys;
@@ -438,7 +444,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
break;
}
- excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
+ &phys, &prot);
if (unlikely(excp >= 0)) {
if (probe) {
return false;
@@ -446,7 +453,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
- raise_exception_with_ior(env, excp, retaddr, addr,
+ raise_exception_with_ior(env, excp, ra, addr,
MMU_IDX_MMU_DISABLED(mmu_idx));
}
@@ -460,8 +467,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
* the large page protection mask. We do not require this,
* because we record the large page here in the hppa tlb.
*/
- tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ memset(out, 0, sizeof(*out));
+ out->phys_addr = phys;
+ out->prot = prot;
+ out->attrs = MEMTXATTRS_UNSPECIFIED;
+ out->lg_page_size = TARGET_PAGE_BITS;
+
return true;
}
@@ -678,7 +689,7 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
hwaddr phys;
int prot, excp;
- excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
+ excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
&phys, &prot);
if (excp >= 0) {
if (excp == EXCP_DTLB_MISS) {
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 7f79196fff..744325969f 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -334,7 +334,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
}
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
- excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
+ excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
if (excp >= 0) {
cpu_restore_state(env_cpu(env), GETPC());
hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ff227a8c5c..0d30191482 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -7831,6 +7831,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
mce_init(cpu);
+ x86_cpu_gdb_init(cs);
qemu_init_vcpu(cs);
/*
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 9c39384ac0..4c63e7b045 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2226,6 +2226,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void x86_cpu_gdb_init(CPUState *cs);
void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index 4acf485879..04c49e802d 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -18,8 +18,13 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "accel/tcg/vcpu-state.h"
#include "cpu.h"
+#include "exec/gdbstub.h"
#include "gdbstub/helpers.h"
+#ifdef CONFIG_LINUX_USER
+#include "linux-user/qemu.h"
+#endif
#ifdef TARGET_X86_64
static const int gpr_map[16] = {
@@ -96,6 +101,19 @@ static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
return 4;
}
+static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val)
+{
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, val);
+ } else {
+ return gdb_get_reg64(mem_buf, val & 0xffffffffUL);
+ }
+ } else {
+ return gdb_get_reg32(mem_buf, val);
+ }
+}
+
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
@@ -137,15 +155,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
} else {
switch (n) {
case IDX_IP_REG:
- if (TARGET_LONG_BITS == 64) {
- if (env->hflags & HF_CS64_MASK) {
- return gdb_get_reg64(mem_buf, env->eip);
- } else {
- return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL);
- }
- } else {
- return gdb_get_reg32(mem_buf, env->eip);
- }
+ return gdb_get_reg(env, mem_buf, env->eip);
case IDX_FLAGS_REG:
return gdb_get_reg32(mem_buf, env->eflags);
@@ -248,6 +258,21 @@ static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
return 4;
}
+static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val)
+{
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ *val = ldq_p(mem_buf);
+ } else {
+ *val = ldq_p(mem_buf) & 0xffffffffUL;
+ }
+ return 8;
+ } else {
+ *val = (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+}
+
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
@@ -288,18 +313,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
} else {
switch (n) {
case IDX_IP_REG:
- if (TARGET_LONG_BITS == 64) {
- if (env->hflags & HF_CS64_MASK) {
- env->eip = ldq_p(mem_buf);
- } else {
- env->eip = ldq_p(mem_buf) & 0xffffffffUL;
- }
- return 8;
- } else {
- env->eip &= ~0xffffffffUL;
- env->eip |= (uint32_t)ldl_p(mem_buf);
- return 4;
- }
+ return gdb_write_reg(env, mem_buf, &env->eip);
case IDX_FLAGS_REG:
env->eflags = ldl_p(mem_buf);
return 4;
@@ -397,3 +411,49 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
/* Unrecognised register. */
return 0;
}
+
+#ifdef CONFIG_LINUX_USER
+
+#define IDX_ORIG_AX 0
+
+static int x86_cpu_gdb_read_linux_register(CPUState *cs, GByteArray *mem_buf,
+ int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ switch (n) {
+ case IDX_ORIG_AX:
+ return gdb_get_reg(env, mem_buf, get_task_state(cs)->orig_ax);
+ }
+ return 0;
+}
+
+static int x86_cpu_gdb_write_linux_register(CPUState *cs, uint8_t *mem_buf,
+ int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ switch (n) {
+ case IDX_ORIG_AX:
+ return gdb_write_reg(env, mem_buf, &get_task_state(cs)->orig_ax);
+ }
+ return 0;
+}
+
+#endif
+
+void x86_cpu_gdb_init(CPUState *cs)
+{
+#ifdef CONFIG_LINUX_USER
+ gdb_register_coprocessor(cs, x86_cpu_gdb_read_linux_register,
+ x86_cpu_gdb_write_linux_register,
+#ifdef TARGET_X86_64
+ gdb_find_static_feature("i386-64bit-linux.xml"),
+#else
+ gdb_find_static_feature("i386-32bit-linux.xml"),
+#endif
+ 0);
+#endif
+}
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 333469b268..50d8537a3b 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -15362,7 +15362,8 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
* hardware does (e.g. if a delay slot instruction faults, the
* reported PC is the PC of the branch).
*/
- if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ if ((tb_cflags(ctx->base.tb) & CF_SINGLE_STEP) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
ctx->base.max_insns = 2;
}
@@ -15445,7 +15446,7 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
* together with its delay slot.
*/
if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
- && !ctx->base.singlestep_enabled) {
+ && !(tb_cflags(ctx->base.tb) & CF_SINGLE_STEP)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 75b7bfda4c..f4da4a40f9 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -521,7 +521,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
mop |= MO_ALIGN;
}
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
- tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
+ tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop));
}
return mop;
}
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 3de5f50b62..56072d89a2 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -1587,7 +1587,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
break;
@@ -1691,7 +1691,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
} else {
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 176c98740b..32f9ec24b5 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -1133,7 +1133,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* Otherwise, test for at least natural alignment and defer
* everything else to the helper functions.
*/
- if (s_bits != get_alignment_bits(opc)) {
+ if (s_bits != memop_alignment_bits(opc)) {
tcg_debug_assert(check_fit_tl(a_mask, 13));
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index 23dc807f11..a318011229 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -45,7 +45,7 @@ static void check_max_alignment(unsigned a_bits)
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{
- unsigned a_bits = get_alignment_bits(op);
+ unsigned a_bits = memop_alignment_bits(op);
check_max_alignment(a_bits);
@@ -559,7 +559,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
/* In serial mode, reduce atomicity. */
@@ -676,7 +676,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
/* In serial mode, reduce atomicity. */
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 34e3056380..5decd83cf4 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -5506,7 +5506,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp host_atom, bool allow_two_ops)
{
- MemOp align = get_alignment_bits(opc);
+ MemOp align = memop_alignment_bits(opc);
MemOp size = opc & MO_SIZE;
MemOp half = size ? size - 1 : 0;
MemOp atom = opc & MO_ATOM_MASK;
diff --git a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
index 564613fabf..0f687f3284 100644
--- a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
+++ b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py
@@ -8,17 +8,12 @@ from test_gdbstub import main, report
def run_test():
"""Run through the tests one by one"""
- try:
- mappings = gdb.execute("info proc mappings", False, True)
- except gdb.error as exc:
- exc_str = str(exc)
- if "Not supported on this target." in exc_str:
- # Detect failures due to an outstanding issue with how GDB handles
- # the x86_64 QEMU's target.xml, which does not contain the
- # definition of orig_rax. Skip the test in this case.
- print("SKIP: {}".format(exc_str))
- return
- raise
+ if gdb.selected_inferior().architecture().name() == "m68k":
+ # m68k GDB supports only GDB_OSABI_SVR4, but GDB_OSABI_LINUX is
+ # required for the info proc support (see set_gdbarch_info_proc()).
+ print("SKIP: m68k GDB does not support GDB_OSABI_LINUX")
+ exit(0)
+ mappings = gdb.execute("info proc mappings", False, True)
report(isinstance(mappings, str), "Fetched the mappings from the inferior")
# Broken with host page size > guest page size
# report("/sha1" in mappings, "Found the test binary name in the mappings")