aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-09-04 16:29:18 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-09-04 16:29:18 +0100
commit9de65783e188b6cc3816847e03602864921bf504 (patch)
treee3d8b0edb78638f474dd4b5b9d4a7d249faec8e6 /include
parentda9e0c27214733888d6366794f33c4c2db348dc1 (diff)
parentc25c283df0f08582df29f1d5d7be1516b851532d (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190903' into staging
Allow page table bit to swap endianness. Reorganize watchpoints out of i/o path. Return host address from probe_write / probe_access. # gpg: Signature made Tue 03 Sep 2019 16:47:50 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190903: (36 commits) tcg: Factor out probe_write() logic into probe_access() tcg: Make probe_write() return a pointer to the host page s390x/tcg: Pass a size to probe_write() in do_csst() hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well tcg: Enforce single page access in probe_write() tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code s390x/tcg: Fix length calculation in probe_write_access() s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access() tcg: Check for watchpoints in probe_write() cputlb: Handle watchpoints via TLB_WATCHPOINT cputlb: Remove double-alignment in store_helper cputlb: Fix size operand for tlb_fill on unaligned store exec: Factor out cpu_watchpoint_address_matches cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK exec: Factor out core logic of check_watchpoint() exec: Move user-only watchpoint stubs inline target/sparc: sun4u Invert Endian TTE bit target/sparc: Add TLB entry with attributes cputlb: Byte swap memory transaction attribute ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/cpu-all.h8
-rw-r--r--include/exec/exec-all.h10
-rw-r--r--include/exec/memattrs.h2
-rw-r--r--include/exec/memop.h134
-rw-r--r--include/exec/memory.h12
-rw-r--r--include/hw/core/cpu.h37
6 files changed, 193 insertions, 10 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 8323094648..d2d443c4f9 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -329,14 +329,14 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
-/* Set if TLB entry must have MMU lookup repeated for every access */
-#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
-#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_RECHECK)
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 135aeaab0d..81b02eb2fe 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -260,8 +260,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
-void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
- uintptr_t retaddr);
#else
static inline void tlb_init(CPUState *cpu)
{
@@ -312,6 +310,14 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
{
}
#endif
+void *probe_access(CPUArchState *env, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+
+static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
+}
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index d4a3477d71..95f2d20d55 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -37,6 +37,8 @@ typedef struct MemTxAttrs {
unsigned int user:1;
/* Requester ID (for MSI for example) */
unsigned int requester_id:16;
+ /* Invert endianness for this page */
+ unsigned int byte_swap:1;
/*
* The following are target-specific page-table bits. These are not
* related to actual memory transactions at all. However, this structure
diff --git a/include/exec/memop.h b/include/exec/memop.h
new file mode 100644
index 0000000000..529d07b02d
--- /dev/null
+++ b/include/exec/memop.h
@@ -0,0 +1,134 @@
+/*
+ * Constants for memory operations
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMOP_H
+#define MEMOP_H
+
+#include "qemu/host-utils.h"
+
+typedef enum MemOp {
+ MO_8 = 0,
+ MO_16 = 1,
+ MO_32 = 2,
+ MO_64 = 3,
+ MO_SIZE = 3, /* Mask for the above. */
+
+ MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
+
+ MO_BSWAP = 8, /* Host reverse endian. */
+#ifdef HOST_WORDS_BIGENDIAN
+ MO_LE = MO_BSWAP,
+ MO_BE = 0,
+#else
+ MO_LE = 0,
+ MO_BE = MO_BSWAP,
+#endif
+#ifdef NEED_CPU_H
+#ifdef TARGET_WORDS_BIGENDIAN
+ MO_TE = MO_BE,
+#else
+ MO_TE = MO_LE,
+#endif
+#endif
+
+ /*
+ * MO_UNALN accesses are never checked for alignment.
+ * MO_ALIGN accesses will result in a call to the CPU's
+ * do_unaligned_access hook if the guest address is not aligned.
+ * The default depends on whether the target CPU defines
+ * TARGET_ALIGNED_ONLY.
+ *
+ * Some architectures (e.g. ARMv8) need the address which is aligned
+ * to a size more than the size of the memory access.
+ * Some architectures (e.g. SPARCv9) need an address which is aligned,
+ * but less strictly than the natural alignment.
+ *
+ * MO_ALIGN supposes the alignment size is the size of a memory access.
+ *
+ * There are three options:
+ * - unaligned access permitted (MO_UNALN).
+ * - an alignment to the size of an access (MO_ALIGN);
+ * - an alignment to a specified size, which may be more or less than
+ * the access size (MO_ALIGN_x where 'x' is a size in bytes);
+ */
+ MO_ASHIFT = 4,
+ MO_AMASK = 7 << MO_ASHIFT,
+#ifdef NEED_CPU_H
+#ifdef TARGET_ALIGNED_ONLY
+ MO_ALIGN = 0,
+ MO_UNALN = MO_AMASK,
+#else
+ MO_ALIGN = MO_AMASK,
+ MO_UNALN = 0,
+#endif
+#endif
+ MO_ALIGN_2 = 1 << MO_ASHIFT,
+ MO_ALIGN_4 = 2 << MO_ASHIFT,
+ MO_ALIGN_8 = 3 << MO_ASHIFT,
+ MO_ALIGN_16 = 4 << MO_ASHIFT,
+ MO_ALIGN_32 = 5 << MO_ASHIFT,
+ MO_ALIGN_64 = 6 << MO_ASHIFT,
+
+ /* Combinations of the above, for ease of use. */
+ MO_UB = MO_8,
+ MO_UW = MO_16,
+ MO_UL = MO_32,
+ MO_SB = MO_SIGN | MO_8,
+ MO_SW = MO_SIGN | MO_16,
+ MO_SL = MO_SIGN | MO_32,
+ MO_Q = MO_64,
+
+ MO_LEUW = MO_LE | MO_UW,
+ MO_LEUL = MO_LE | MO_UL,
+ MO_LESW = MO_LE | MO_SW,
+ MO_LESL = MO_LE | MO_SL,
+ MO_LEQ = MO_LE | MO_Q,
+
+ MO_BEUW = MO_BE | MO_UW,
+ MO_BEUL = MO_BE | MO_UL,
+ MO_BESW = MO_BE | MO_SW,
+ MO_BESL = MO_BE | MO_SL,
+ MO_BEQ = MO_BE | MO_Q,
+
+#ifdef NEED_CPU_H
+ MO_TEUW = MO_TE | MO_UW,
+ MO_TEUL = MO_TE | MO_UL,
+ MO_TESW = MO_TE | MO_SW,
+ MO_TESL = MO_TE | MO_SL,
+ MO_TEQ = MO_TE | MO_Q,
+#endif
+
+ MO_SSIZE = MO_SIZE | MO_SIGN,
+} MemOp;
+
+/* MemOp to size in bytes. */
+static inline unsigned memop_size(MemOp op)
+{
+ return 1 << (op & MO_SIZE);
+}
+
+/* Size in bytes to MemOp. */
+static inline MemOp size_memop(unsigned size)
+{
+#ifdef CONFIG_DEBUG_TCG
+ /* Power of 2 up to 8. */
+ assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
+#endif
+ return ctz32(size);
+}
+
+/* Big endianness from MemOp. */
+static inline bool memop_big_endian(MemOp op)
+{
+ return (op & MO_BSWAP) == MO_BE;
+}
+
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
index ecca388e69..2dd810259d 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -19,6 +19,7 @@
#include "exec/cpu-common.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
+#include "exec/memop.h"
#include "exec/ramlist.h"
#include "qemu/bswap.h"
#include "qemu/queue.h"
@@ -1739,13 +1740,13 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner);
* @mr: #MemoryRegion to access
* @addr: address within that region
* @pval: pointer to uint64_t which the data is written to
- * @size: size of the access in bytes
+ * @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
hwaddr addr,
uint64_t *pval,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs);
/**
* memory_region_dispatch_write: perform a write directly to the specified
@@ -1754,13 +1755,13 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
* @mr: #MemoryRegion to access
* @addr: address within that region
* @data: data to write
- * @size: size of the access in bytes
+ * @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs);
/**
@@ -2200,6 +2201,9 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
}
}
+/* enum device_endian to MemOp. */
+MemOp devend_memop(enum device_endian end);
+
#endif
#endif
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 77fca95a40..c7cda65c66 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -1070,12 +1070,49 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
+#ifdef CONFIG_USER_ONLY
+static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, CPUWatchpoint **watchpoint)
+{
+ return -ENOSYS;
+}
+
+static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
+ vaddr len, int flags)
+{
+ return -ENOSYS;
+}
+
+static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
+ CPUWatchpoint *wp)
+{
+}
+
+static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
+{
+}
+
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs atr, int fl, uintptr_t ra)
+{
+}
+
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
+ vaddr addr, vaddr len)
+{
+ return 0;
+}
+#else
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs attrs, int flags, uintptr_t ra);
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+#endif
/**
* cpu_get_address_space: