From 14776ab5a12972ea439c7fb2203a4c15a09094b4 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:10:58 +1000 Subject: tcg: TCGMemOp is now accelerator independent MemOp Preparation for collapsing the two byte swaps, adjust_endianness and handle_bswap, along the I/O path. Target dependant attributes are conditionalized upon NEED_CPU_H. Signed-off-by: Tony Nguyen Acked-by: David Gibson Reviewed-by: Richard Henderson Acked-by: Cornelia Huck Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 110 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 include/exec/memop.h (limited to 'include') diff --git a/include/exec/memop.h b/include/exec/memop.h new file mode 100644 index 0000000000..7262ca3dfd --- /dev/null +++ b/include/exec/memop.h @@ -0,0 +1,110 @@ +/* + * Constants for memory operations + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMOP_H +#define MEMOP_H + +typedef enum MemOp { + MO_8 = 0, + MO_16 = 1, + MO_32 = 2, + MO_64 = 3, + MO_SIZE = 3, /* Mask for the above. */ + + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + + MO_BSWAP = 8, /* Host reverse endian. */ +#ifdef HOST_WORDS_BIGENDIAN + MO_LE = MO_BSWAP, + MO_BE = 0, +#else + MO_LE = 0, + MO_BE = MO_BSWAP, +#endif +#ifdef NEED_CPU_H +#ifdef TARGET_WORDS_BIGENDIAN + MO_TE = MO_BE, +#else + MO_TE = MO_LE, +#endif +#endif + + /* + * MO_UNALN accesses are never checked for alignment. + * MO_ALIGN accesses will result in a call to the CPU's + * do_unaligned_access hook if the guest address is not aligned. + * The default depends on whether the target CPU defines + * TARGET_ALIGNED_ONLY. + * + * Some architectures (e.g. ARMv8) need the address which is aligned + * to a size more than the size of the memory access. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * + * There are three options: + * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); + */ + MO_ASHIFT = 4, + MO_AMASK = 7 << MO_ASHIFT, +#ifdef NEED_CPU_H +#ifdef TARGET_ALIGNED_ONLY + MO_ALIGN = 0, + MO_UNALN = MO_AMASK, +#else + MO_ALIGN = MO_AMASK, + MO_UNALN = 0, +#endif +#endif + MO_ALIGN_2 = 1 << MO_ASHIFT, + MO_ALIGN_4 = 2 << MO_ASHIFT, + MO_ALIGN_8 = 3 << MO_ASHIFT, + MO_ALIGN_16 = 4 << MO_ASHIFT, + MO_ALIGN_32 = 5 << MO_ASHIFT, + MO_ALIGN_64 = 6 << MO_ASHIFT, + + /* Combinations of the above, for ease of use. */ + MO_UB = MO_8, + MO_UW = MO_16, + MO_UL = MO_32, + MO_SB = MO_SIGN | MO_8, + MO_SW = MO_SIGN | MO_16, + MO_SL = MO_SIGN | MO_32, + MO_Q = MO_64, + + MO_LEUW = MO_LE | MO_UW, + MO_LEUL = MO_LE | MO_UL, + MO_LESW = MO_LE | MO_SW, + MO_LESL = MO_LE | MO_SL, + MO_LEQ = MO_LE | MO_Q, + + MO_BEUW = MO_BE | MO_UW, + MO_BEUL = MO_BE | MO_UL, + MO_BESW = MO_BE | MO_SW, + MO_BESL = MO_BE | MO_SL, + MO_BEQ = MO_BE | MO_Q, + +#ifdef NEED_CPU_H + MO_TEUW = MO_TE | MO_UW, + MO_TEUL = MO_TE | MO_UL, + MO_TESW = MO_TE | MO_SW, + MO_TESL = MO_TE | MO_SL, + MO_TEQ = MO_TE | MO_Q, +#endif + + MO_SSIZE = MO_SIZE | MO_SIGN, +} MemOp; + +#endif -- cgit v1.2.3 From 66b9b24375ac215cdcbdf9e14d665395360abff4 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:29:05 +1000 Subject: memory: Introduce size_memop The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Introduce no-op size_memop to aid preparatory conversion of interfaces. Once interfaces are converted, size_memop will be implemented to return a MemOp from size in bytes. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <35b8ee74020f67cf40848fb7d5f127cf96c851d6.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/exec/memop.h b/include/exec/memop.h index 7262ca3dfd..dfd76a1604 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -107,4 +107,14 @@ typedef enum MemOp { MO_SSIZE = MO_SIZE | MO_SIGN, } MemOp; +/* Size in bytes to MemOp. */ +static inline unsigned size_memop(unsigned size) +{ + /* + * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} + * "unsigned size" operand into a "MemOp op". + */ + return size; +} + #endif -- cgit v1.2.3 From e67c904668d82ca4416cd91d37d9f5abcceef747 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:48 +1000 Subject: memory: Access MemoryRegion with MemOp Convert memory_region_dispatch_{read|write} operand "unsigned size" into a "MemOp op". Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <1dd82df5801866743f838f1d046475115a1d32da.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 20 ++++++++++++++------ include/exec/memory.h | 9 +++++---- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/exec/memop.h b/include/exec/memop.h index dfd76a1604..0a610b75d9 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -12,6 +12,8 @@ #ifndef MEMOP_H #define MEMOP_H +#include "qemu/host-utils.h" + typedef enum MemOp { MO_8 = 0, MO_16 = 1, @@ -107,14 +109,20 @@ typedef enum MemOp { MO_SSIZE = MO_SIZE | MO_SIGN, } MemOp; +/* MemOp to size in bytes. */ +static inline unsigned memop_size(MemOp op) +{ + return 1 << (op & MO_SIZE); +} + /* Size in bytes to MemOp. */ -static inline unsigned size_memop(unsigned size) +static inline MemOp size_memop(unsigned size) { - /* - * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} - * "unsigned size" operand into a "MemOp op". - */ - return size; +#ifdef CONFIG_DEBUG_TCG + /* Power of 2 up to 8. */ + assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); +#endif + return ctz32(size); } #endif diff --git a/include/exec/memory.h b/include/exec/memory.h index fddc2ff48a..192875b080 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -19,6 +19,7 @@ #include "exec/cpu-common.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" +#include "exec/memop.h" #include "exec/ramlist.h" #include "qemu/bswap.h" #include "qemu/queue.h" @@ -1749,13 +1750,13 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner); * @mr: #MemoryRegion to access * @addr: address within that region * @pval: pointer to uint64_t which the data is written to - * @size: size of the access in bytes + * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, - unsigned size, + MemOp op, MemTxAttrs attrs); /** * memory_region_dispatch_write: perform a write directly to the specified @@ -1764,13 +1765,13 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, * @mr: #MemoryRegion to access * @addr: address within that region * @data: data to write - * @size: size of the access in bytes + * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, - unsigned size, + MemOp op, MemTxAttrs attrs); /** -- cgit v1.2.3 From d5d680cacc66ef7e3c02c81dc8f3a34eabce6dfe Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:52 +1000 Subject: memory: Access MemoryRegion with endianness Preparation for collapsing the two byte swaps adjust_endianness and handle_bswap into the former. Call memory_region_dispatch_{read|write} with endianness encoded into the "MemOp op" operand. This patch does not change any behaviour as memory_region_dispatch_{read|write} is yet to handle the endianness. Once it does handle endianness, callers with byte swaps can collapse them into adjust_endianness. Reviewed-by: Richard Henderson Signed-off-by: Tony Nguyen Message-Id: <8066ab3eb037c0388dfadfe53c5118429dd1de3a.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memory.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/exec/memory.h b/include/exec/memory.h index 192875b080..c4c86a6ff4 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -2211,6 +2211,9 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, } } +/* enum device_endian to MemOp. */ +MemOp devend_memop(enum device_endian end); + #endif #endif -- cgit v1.2.3 From be5c4787e9a6eed12fd765d9e890f7cc6cd63220 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:53 +1000 Subject: cputlb: Replace size and endian operands for MemOp Preparation for collapsing the two byte swaps adjust_endianness and handle_bswap into the former. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <755b7104410956b743e1f1e9c34ab87db113360f.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/exec/memop.h b/include/exec/memop.h index 0a610b75d9..529d07b02d 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -125,4 +125,10 @@ static inline MemOp size_memop(unsigned size) return ctz32(size); } +/* Big endianness from MemOp. */ +static inline bool memop_big_endian(MemOp op) +{ + return (op & MO_BSWAP) == MO_BE; +} + #endif -- cgit v1.2.3 From a26fc6f5152b47f1d7ed928f9c9d462d01ff1624 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:56 +1000 Subject: cputlb: Byte swap memory transaction attribute Notice new attribute, byte swap, and force the transaction through the memory slow path. Required by architectures that can invert endianness of memory transaction, e.g. SPARC64 has the Invert Endian TTE bit. Suggested-by: Richard Henderson Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <2a10a1f1c00a894af1212c8f68ef09c2966023c1.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memattrs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index d4a3477d71..95f2d20d55 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -37,6 +37,8 @@ typedef struct MemTxAttrs { unsigned int user:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; + /* Invert endianness for this page */ + unsigned int byte_swap:1; /* * The following are target-specific page-table bits. These are not * related to actual memory transactions at all. However, this structure -- cgit v1.2.3 From 74841f044e96644d8ed8a388fe505df5ab843d0a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 13:31:58 -0700 Subject: exec: Move user-only watchpoint stubs inline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let the user-only watchpoint stubs resolve to empty inline functions. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- include/hw/core/cpu.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'include') diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 77fca95a40..6de688059d 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1070,12 +1070,35 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) return false; } +#ifdef CONFIG_USER_ONLY +static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + return -ENOSYS; +} + +static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags) +{ + return -ENOSYS; +} + +static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, + CPUWatchpoint *wp) +{ +} + +static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +{ +} +#else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +#endif /** * cpu_get_address_space: -- cgit v1.2.3 From 0026348b48fe532279e8c12b100c16c1aa991373 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 Aug 2019 12:07:40 +0200 Subject: exec: Factor out core logic of check_watchpoint() We want to perform the same checks in probe_write() to trigger a cpu exit before doing any modifications. We'll have to pass a PC. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson Message-Id: <20190823100741.9621-9-david@redhat.com> [rth: Use vaddr for len, like other watchpoint functions; Move user-only stub to static inline.] Signed-off-by: Richard Henderson --- include/hw/core/cpu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 6de688059d..7bd8bed5b2 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1091,6 +1091,11 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { } + +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs atr, int fl, uintptr_t ra) +{ +} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -1098,6 +1103,8 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); #endif /** -- cgit v1.2.3 From 30d7e098d5c38644359820317fcf72e3e129ec53 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 23 Aug 2019 15:12:32 -0700 Subject: cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK We had two different mechanisms to force a recheck of the tlb. Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit that would immediate set TLB_INVALID_MASK, which automatically means that a second check of the tlb entry fails. We can use the same mechanism to handle small pages. Conserve TLB_* bits by removing TLB_RECHECK. Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- include/exec/cpu-all.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 8323094648..8d07ae23a5 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -329,14 +329,11 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) -/* Set if TLB entry must have MMU lookup repeated for every access */ -#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ - | TLB_RECHECK) +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) /** * tlb_hit_page: return true if page aligned @addr is a hit against the -- cgit v1.2.3 From 56ad8b007dde7a61e02582e1f2d5c57fc0165a6b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 08:21:34 -0700 Subject: exec: Factor out cpu_watchpoint_address_matches We want to move the check for watchpoints from memory_region_section_get_iotlb to tlb_set_page_with_attrs. Isolate the loop over watchpoints to an exported function. Rename the existing cpu_watchpoint_address_matches to watchpoint_address_matches, since it doesn't actually have a cpu argument. Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- include/hw/core/cpu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 7bd8bed5b2..c7cda65c66 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1096,6 +1096,12 @@ static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs atr, int fl, uintptr_t ra) { } + +static inline int cpu_watchpoint_address_matches(CPUState *cpu, + vaddr addr, vaddr len) +{ + return 0; +} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -1105,6 +1111,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs attrs, int flags, uintptr_t ra); +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); #endif /** -- cgit v1.2.3 From 50b107c5d617eaf93301cef20221312e7a986701 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 09:51:09 -0700 Subject: cputlb: Handle watchpoints via TLB_WATCHPOINT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The raising of exceptions from check_watchpoint, buried inside of the I/O subsystem, is fundamentally broken. We do not have the helper return address with which we can unwind guest state. Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT. Move the call to cpu_check_watchpoint into the cputlb helpers where we do have the helper return address. This allows watchpoints on RAM to bypass the full i/o access path. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- include/exec/cpu-all.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 8d07ae23a5..d2d443c4f9 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -329,11 +329,14 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +/* Set if TLB entry contains a watchpoint. */ +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) +#define TLB_FLAGS_MASK \ + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT) /** * tlb_hit_page: return true if page aligned @addr is a hit against the -- cgit v1.2.3 From 59e96ac6cb13951dd09afc70622858089abf3384 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:08 +0200 Subject: tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code Factor it out into common code. Similar to the !CONFIG_USER_ONLY variant, let's not allow to cross page boundaries. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson Message-Id: <20190826075112.25637-4-david@redhat.com> [rth: Move cpu & cc variables inside if block.] Signed-off-by: Richard Henderson --- include/exec/exec-all.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 135aeaab0d..cbcc85add3 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -260,8 +260,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size); -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); #else static inline void tlb_init(CPUState *cpu) { @@ -312,6 +310,8 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr); #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -- cgit v1.2.3 From fef39ccd567032d3ad520ed80f3576068e6eb2e3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 30 Aug 2019 12:09:58 +0200 Subject: tcg: Make probe_write() return a pointer to the host page ... similar to tlb_vaddr_to_host(); however, allow access to the host page except when TLB_NOTDIRTY or TLB_MMIO is set. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190830100959.26615-2-david@redhat.com> Signed-off-by: Richard Henderson --- include/exec/exec-all.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index cbcc85add3..a7893ed16b 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -310,8 +310,8 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr); #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -- cgit v1.2.3 From c25c283df0f08582df29f1d5d7be1516b851532d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 30 Aug 2019 12:09:59 +0200 Subject: tcg: Factor out probe_write() logic into probe_access() Let's also allow to probe other access types. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190830100959.26615-3-david@redhat.com> Signed-off-by: Richard Henderson --- include/exec/exec-all.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a7893ed16b..81b02eb2fe 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -310,8 +310,14 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); + +static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, + int mmu_idx, uintptr_t retaddr) +{ + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); +} #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -- cgit v1.2.3