diff options
Diffstat (limited to 'include/exec/exec-all.h')
-rw-r--r-- | include/exec/exec-all.h | 54 |
1 files changed, 1 insertions, 53 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 85528f9941..b6a4a122da 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -21,6 +21,7 @@ #define _EXEC_ALL_H_ #include "qemu-common.h" +#include "exec/tb-context.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ #define DEBUG_DISAS @@ -40,30 +41,6 @@ typedef ram_addr_t tb_page_addr_t; #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ #define DISAS_TB_JUMP 3 /* only pc was modified statically */ -struct TranslationBlock; -typedef struct TranslationBlock TranslationBlock; - -/* XXX: make safe guess about sizes */ -#define MAX_OP_PER_INSTR 266 - -#if HOST_LONG_BITS == 32 -#define MAX_OPC_PARAM_PER_ARG 2 -#else -#define MAX_OPC_PARAM_PER_ARG 1 -#endif -#define MAX_OPC_PARAM_IARGS 5 -#define MAX_OPC_PARAM_OARGS 1 -#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) - -/* A Call op needs up to 4 + 2N parameters on 32-bit archs, - * and up to 4 + N parameters on 64-bit archs - * (N = number of input arguments + output arguments). */ -#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) -#define OPC_BUF_SIZE 640 -#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) - -#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) - #include "qemu/log.h" void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); @@ -104,15 +81,6 @@ void cpu_reloading_memory_map(void); * Note that with KVM only one address space is supported. */ void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); -/** - * cpu_get_address_space: - * @cpu: CPU to get address space from - * @asidx: index identifying which address space to get - * - * Return the requested address space of this CPU. @asidx - * specifies which address space to read. - */ -AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); /* cputlb.c */ /** * tlb_flush_page: @@ -212,9 +180,6 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...) #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -#define CODE_GEN_PHYS_HASH_BITS 15 -#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) - /* Estimated block size for TB allocation. */ /* ??? The following is based on a 2015 survey of x86_64 host output. Better would seem to be some sort of dynamically sized TB array, @@ -289,23 +254,6 @@ struct TranslationBlock { uintptr_t jmp_list_first; }; -#include "qemu/thread.h" - -typedef struct TBContext TBContext; - -struct TBContext { - - TranslationBlock *tbs; - TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; - int nb_tbs; - /* any access to the tbs or the page table must use this lock */ - QemuMutex tb_lock; - - /* statistics */ - int tb_flush_count; - int tb_phys_invalidate_count; -}; - void tb_free(TranslationBlock *tb); void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |