aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-04-01 15:47:00 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-06-05 12:04:29 -0700
commitbdbb9d6999dbf7f6c50828f58e81c6fa299041be (patch)
tree8cd68764f3816d174941d96b0595d8c9d29b4094 /include
parent85314e13ad724247fbd74ff13555bff1cbda8356 (diff)
tcg: Spit out exec/translation-block.h
This is all that is required by tcg/ from exec-all.h. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h132
-rw-r--r--include/exec/translation-block.h149
2 files changed, 150 insertions, 131 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index dec17b1e62..f01c7d57e8 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -24,20 +24,9 @@
#ifdef CONFIG_TCG
#include "exec/cpu_ldst.h"
#endif
-#include "qemu/interval-tree.h"
+#include "exec/translation-block.h"
#include "qemu/clang-tsa.h"
-/* Page tracking code uses ram addresses in system mode, and virtual
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
- type. */
-#if defined(CONFIG_USER_ONLY)
-typedef vaddr tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
-#else
-typedef ram_addr_t tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
-#endif
-
/**
* cpu_unwind_state_data:
* @cpu: the cpu context
@@ -478,8 +467,6 @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
#endif
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
-
/* Estimated block size for TB allocation. */
/* ??? The following is based on a 2015 survey of x86_64 host output.
Better would seem to be some sort of dynamically sized TB array,
@@ -490,123 +477,6 @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
#define CODE_GEN_AVG_BLOCK_SIZE 150
#endif
-/*
- * Translation Cache-related fields of a TB.
- * This struct exists just for convenience; we keep track of TB's in a binary
- * search tree, and the only fields needed to compare TB's in the tree are
- * @ptr and @size.
- * Note: the address of search data can be obtained by adding @size to @ptr.
- */
-struct tb_tc {
- const void *ptr; /* pointer to the translated code */
- size_t size;
-};
-
-struct TranslationBlock {
- /*
- * Guest PC corresponding to this block. This must be the true
- * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
- * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
- * privilege, must store those bits elsewhere.
- *
- * If CF_PCREL, the opcodes for the TranslationBlock are written
- * such that the TB is associated only with the physical page and
- * may be run in any virtual address context. In this case, PC
- * must always be taken from ENV in a target-specific manner.
- * Unwind information is taken as offsets from the page, to be
- * deposited into the "current" PC.
- */
- vaddr pc;
-
- /*
- * Target-specific data associated with the TranslationBlock, e.g.:
- * x86: the original user, the Code Segment virtual base,
- * arm: an extension of tb->flags,
- * s390x: instruction data for EXECUTE,
- * sparc: the next pc of the instruction queue (for delay slots).
- */
- uint64_t cs_base;
-
- uint32_t flags; /* flags defining in which context the code was generated */
- uint32_t cflags; /* compile flags */
-
-/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
-#define CF_COUNT_MASK 0x000001ff
-#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
-#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
-#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
-#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
-#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
-#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
-#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
-#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
-#define CF_CLUSTER_SHIFT 24
-
- /*
- * Above fields used for comparing
- */
-
- /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
- uint16_t size;
- uint16_t icount;
-
- struct tb_tc tc;
-
- /*
- * Track tb_page_addr_t intervals that intersect this TB.
- * For user-only, the virtual addresses are always contiguous,
- * and we use a unified interval tree. For system, we use a
- * linked list headed in each PageDesc. Within the list, the lsb
- * of the previous pointer tells the index of page_next[], and the
- * list is protected by the PageDesc lock(s).
- */
-#ifdef CONFIG_USER_ONLY
- IntervalTreeNode itree;
-#else
- uintptr_t page_next[2];
- tb_page_addr_t page_addr[2];
-#endif
-
- /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
- QemuSpin jmp_lock;
-
- /* The following data are used to directly call another TB from
- * the code of this one. This can be done either by emitting direct or
- * indirect native jump instructions. These jumps are reset so that the TB
- * just continues its execution. The TB can be linked to another one by
- * setting one of the jump targets (or patching the jump instruction). Only
- * two of such jumps are supported.
- */
-#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
- uint16_t jmp_reset_offset[2]; /* offset of original jump target */
- uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
- uintptr_t jmp_target_addr[2]; /* target address */
-
- /*
- * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
- * Each TB can have two outgoing jumps, and therefore can participate
- * in two lists. The list entries are kept in jmp_list_next[2]. The least
- * significant bit (LSB) of the pointers in these lists is used to encode
- * which of the two list entries is to be used in the pointed TB.
- *
- * List traversals are protected by jmp_lock. The destination TB of each
- * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
- * can be acquired from any origin TB.
- *
- * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
- * being invalidated, so that no further outgoing jumps from it can be set.
- *
- * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
- * to a destination TB that has CF_INVALID set.
- */
- uintptr_t jmp_list_head;
- uintptr_t jmp_list_next[2];
- uintptr_t jmp_dest[2];
-};
-
/* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
new file mode 100644
index 0000000000..5119924927
--- /dev/null
+++ b/include/exec/translation-block.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TranslationBlock.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_TRANSLATION_BLOCK_H
+#define EXEC_TRANSLATION_BLOCK_H
+
+#include "qemu/atomic.h"
+#include "qemu/thread.h"
+#include "qemu/interval-tree.h"
+#include "exec/cpu-common.h"
+#include "exec/target_page.h"
+
+/*
+ * Page tracking code uses ram addresses in system mode, and virtual
+ * addresses in userspace mode. Define tb_page_addr_t to be an
+ * appropriate type.
+ */
+#if defined(CONFIG_USER_ONLY)
+typedef vaddr tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
+#else
+typedef ram_addr_t tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#endif
+
+/*
+ * Translation Cache-related fields of a TB.
+ * This struct exists just for convenience; we keep track of TB's in a binary
+ * search tree, and the only fields needed to compare TB's in the tree are
+ * @ptr and @size.
+ * Note: the address of search data can be obtained by adding @size to @ptr.
+ */
+struct tb_tc {
+ const void *ptr; /* pointer to the translated code */
+ size_t size;
+};
+
+struct TranslationBlock {
+ /*
+ * Guest PC corresponding to this block. This must be the true
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
+ * privilege, must store those bits elsewhere.
+ *
+ * If CF_PCREL, the opcodes for the TranslationBlock are written
+ * such that the TB is associated only with the physical page and
+ * may be run in any virtual address context. In this case, PC
+ * must always be taken from ENV in a target-specific manner.
+ * Unwind information is taken as offsets from the page, to be
+ * deposited into the "current" PC.
+ */
+ vaddr pc;
+
+ /*
+ * Target-specific data associated with the TranslationBlock, e.g.:
+ * x86: the original user, the Code Segment virtual base,
+ * arm: an extension of tb->flags,
+ * s390x: instruction data for EXECUTE,
+ * sparc: the next pc of the instruction queue (for delay slots).
+ */
+ uint64_t cs_base;
+
+ uint32_t flags; /* flags defining in which context the code was generated */
+ uint32_t cflags; /* compile flags */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
+#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00020000
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
+#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
+#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
+
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
+ struct tb_tc tc;
+
+ /*
+ * Track tb_page_addr_t intervals that intersect this TB.
+ * For user-only, the virtual addresses are always contiguous,
+ * and we use a unified interval tree. For system, we use a
+ * linked list headed in each PageDesc. Within the list, the lsb
+ * of the previous pointer tells the index of page_next[], and the
+ * list is protected by the PageDesc lock(s).
+ */
+#ifdef CONFIG_USER_ONLY
+ IntervalTreeNode itree;
+#else
+ uintptr_t page_next[2];
+ tb_page_addr_t page_addr[2];
+#endif
+
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continues its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
+ uintptr_t jmp_target_addr[2]; /* target address */
+
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
+ */
+ uintptr_t jmp_list_head;
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_dest[2];
+};
+
+/* The alignment given to TranslationBlock during allocation. */
+#define CODE_GEN_ALIGN 16
+
+#endif /* EXEC_TRANSLATION_BLOCK_H */