diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2022-10-26 10:53:41 -0400 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2022-10-26 10:53:41 -0400 |
commit | 08a5d04606292b3cf6f5756bf2a095654a290626 (patch) | |
tree | 35be6993f21937571b2537912ee90ebf85b5c6d7 /include | |
parent | 7c02614ec97f02aef888c1ecdcfbf396035d4871 (diff) | |
parent | 04f105758b0089f73ee47260671580cde35f96cc (diff) |
Merge tag 'pull-tcg-20221026' of https://gitlab.com/rth7680/qemu into staging
Revert incorrect cflags initialization.
Add direct jumps for tcg/loongarch64.
Speed up breakpoint check.
Improve assertions for atomic.h.
Move restore_state_to_opc to TCGCPUOps.
Cleanups to TranslationBlock maintenance.
# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmNYlo4dHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9y2wf9EKsCA6VtYI2Qtftf
# q/ujYFmUf8AKTb9eVcA0XX71CT1dEnFR7GQyT8B8X13x0pSbOX7tbEWHPreegTFV
# tESiejvymi6Q9devAB58GVwNoU/zPIQQGhCPxkVUKDmRztJz22MbGUzd7UKPPgU8
# 2nVMkIpLTMBsKeFLxE/D3ZntmdKsgyI/1Dtkl9TxvlDGsCbMjbNcr8lM+TLaG2oX
# GZhFyJHKEVy0cobukvhhb/9rU7AWdG/BnFmZM16JxvHV/YCwJBx3Udhcy9xPePUU
# yIjkGsUAq4aB6H9RFuTWh7GmaY5u6gMbTTi2J7hDos0mzauYJtpgEB/H42LpycGE
# sOhkLQ==
# =DUb8
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 25 Oct 2022 22:08:14 EDT
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* tag 'pull-tcg-20221026' of https://gitlab.com/rth7680/qemu: (47 commits)
accel/tcg: Remove restore_state_to_opc function
target/xtensa: Convert to tcg_ops restore_state_to_opc
target/tricore: Convert to tcg_ops restore_state_to_opc
target/sparc: Convert to tcg_ops restore_state_to_opc
target/sh4: Convert to tcg_ops restore_state_to_opc
target/s390x: Convert to tcg_ops restore_state_to_opc
target/rx: Convert to tcg_ops restore_state_to_opc
target/riscv: Convert to tcg_ops restore_state_to_opc
target/ppc: Convert to tcg_ops restore_state_to_opc
target/openrisc: Convert to tcg_ops restore_state_to_opc
target/nios2: Convert to tcg_ops restore_state_to_opc
target/mips: Convert to tcg_ops restore_state_to_opc
target/microblaze: Convert to tcg_ops restore_state_to_opc
target/m68k: Convert to tcg_ops restore_state_to_opc
target/loongarch: Convert to tcg_ops restore_state_to_opc
target/i386: Convert to tcg_ops restore_state_to_opc
target/hppa: Convert to tcg_ops restore_state_to_opc
target/hexagon: Convert to tcg_ops restore_state_to_opc
target/cris: Convert to tcg_ops restore_state_to_opc
target/avr: Convert to tcg_ops restore_state_to_opc
...
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/exec/cpu-all.h | 22 | ||||
-rw-r--r-- | include/exec/exec-all.h | 35 | ||||
-rw-r--r-- | include/exec/ram_addr.h | 2 | ||||
-rw-r--r-- | include/exec/translate-all.h | 2 | ||||
-rw-r--r-- | include/hw/core/tcg-cpu-ops.h | 11 | ||||
-rw-r--r-- | include/qemu/atomic.h | 16 | ||||
-rw-r--r-- | include/qemu/osdep.h | 8 | ||||
-rw-r--r-- | include/qemu/thread.h | 8 |
8 files changed, 61 insertions, 43 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 16b7df41bf..2eb1176538 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -281,28 +281,18 @@ void page_reset_target_data(target_ulong start, target_ulong end); int page_check_range(target_ulong start, target_ulong len, int flags); /** - * page_alloc_target_data(address, size) + * page_get_target_data(address) * @address: guest virtual address - * @size: size of data to allocate * - * Allocate @size bytes of out-of-band data to associate with the - * guest page at @address. If the page is not mapped, NULL will - * be returned. If there is existing data associated with @address, - * no new memory will be allocated. + * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate + * with the guest page at @address, allocating it if necessary. The + * caller should already have verified that the address is valid. * * The memory will be freed when the guest page is deallocated, * e.g. with the munmap system call. */ -void *page_alloc_target_data(target_ulong address, size_t size); - -/** - * page_get_target_data(address) - * @address: guest virtual address - * - * Return any out-of-bound memory assocated with the guest page - * at @address, as per page_alloc_target_data. - */ -void *page_get_target_data(target_ulong address); +void *page_get_target_data(target_ulong address) + __attribute__((returns_nonnull)); #endif CPUArchState *cpu_copy(CPUArchState *env); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index e5f8b224a5..e948992a80 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -39,9 +39,6 @@ typedef ram_addr_t tb_page_addr_t; #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT #endif -void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, - target_ulong *data); - /** * cpu_restore_state: * @cpu: the vCPU state is to be restore to @@ -610,18 +607,40 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) return qatomic_read(&tb->cflags); } +static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) +{ + return tb->page_addr[0]; +} + +static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) +{ + return tb->page_addr[1]; +} + +static inline void tb_set_page_addr0(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[0] = addr; +} + +static inline void tb_set_page_addr1(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[1] = addr; +} + /* current cflags for hashing/comparison */ uint32_t curr_cflags(CPUState *cpu); /* TranslationBlock invalidate API */ #if defined(CONFIG_USER_ONLY) void tb_invalidate_phys_addr(target_ulong addr); -void tb_invalidate_phys_range(target_ulong start, target_ulong end); #else void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); #endif void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ @@ -642,14 +661,6 @@ extern __thread uintptr_t tci_tb_ptr; smaller than 4 bytes, so we don't worry about special-casing this. */ #define GETPC_ADJ 2 -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) -void assert_no_pages_locked(void); -#else -static inline void assert_no_pages_locked(void) -{ -} -#endif - #if !defined(CONFIG_USER_ONLY) /** diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index f3e0c78161..1500680458 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -147,8 +147,6 @@ static inline void qemu_ram_block_writeback(RAMBlock *block) #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) -void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end); - static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, unsigned client) diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h index 9f646389af..3e9cb91565 100644 --- a/include/exec/translate-all.h +++ b/include/exec/translate-all.h @@ -29,7 +29,7 @@ void page_collection_unlock(struct page_collection *set); void tb_invalidate_phys_page_fast(struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr); -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_invalidate_phys_page(tb_page_addr_t addr); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); #ifdef CONFIG_USER_ONLY diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 78c6c6635d..20e3c0ffbb 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -31,6 +31,17 @@ struct TCGCPUOps { * function to restore all the state, and register it here. */ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); + /** + * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn + * + * This is called when we unwind state in the middle of a TB, + * usually before raising an exception. Set all part of the CPU + * state which are tracked insn-by-insn in the target-specific + * arguments to start_insn, passed as @data. + */ + void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, + const uint64_t *data); + /** @cpu_exec_enter: Callback for cpu_exec preparation */ void (*cpu_exec_enter)(CPUState *cpu); /** @cpu_exec_exit: Callback for cpu_exec cleanup */ diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 7e8fc8e7cd..874134fd19 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -133,7 +133,7 @@ #define qatomic_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_read__nocheck(ptr); \ }) @@ -141,7 +141,7 @@ __atomic_store_n(ptr, i, __ATOMIC_RELAXED) #define qatomic_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_set__nocheck(ptr, i); \ } while(0) @@ -159,27 +159,27 @@ #define qatomic_rcu_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ qatomic_rcu_read__nocheck(ptr, &_val); \ _val; \ }) #define qatomic_rcu_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) #define qatomic_load_acquire(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ _val; \ }) #define qatomic_store_release(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) @@ -191,7 +191,7 @@ }) #define qatomic_xchg(ptr, i) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_xchg__nocheck(ptr, i); \ }) @@ -204,7 +204,7 @@ }) #define qatomic_cmpxchg(ptr, old, new) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_cmpxchg__nocheck(ptr, old, new); \ }) diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index b1c161c035..2276094729 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -186,6 +186,14 @@ void QEMU_ERROR("code path is reachable") #define qemu_build_not_reached() g_assert_not_reached() #endif +/** + * qemu_build_assert() + * + * The compiler, during optimization, is expected to prove that the + * assertion is true. + */ +#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached() + /* * According to waitpid man page: * WCOREDUMP diff --git a/include/qemu/thread.h b/include/qemu/thread.h index af19f2b3fc..20641e5844 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -227,7 +227,7 @@ struct QemuSpin { static inline void qemu_spin_init(QemuSpin *spin) { - __sync_lock_release(&spin->value); + qatomic_set(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_create(spin, __tsan_mutex_not_static); #endif @@ -246,7 +246,7 @@ static inline void qemu_spin_lock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, 0); #endif - while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { + while (unlikely(qatomic_xchg(&spin->value, 1))) { while (qatomic_read(&spin->value)) { cpu_relax(); } @@ -261,7 +261,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock); #endif - bool busy = __sync_lock_test_and_set(&spin->value, true); + bool busy = qatomic_xchg(&spin->value, true); #ifdef CONFIG_TSAN unsigned flags = __tsan_mutex_try_lock; flags |= busy ? __tsan_mutex_try_lock_failed : 0; @@ -280,7 +280,7 @@ static inline void qemu_spin_unlock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_unlock(spin, 0); #endif - __sync_lock_release(&spin->value); + qatomic_store_release(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_post_unlock(spin, 0); #endif |