diff options
39 files changed, 266 insertions, 234 deletions
@@ -60,7 +60,7 @@ #endif /* CONFIG_LINUX */ -static CPUArchState *next_cpu; +static CPUState *next_cpu; static bool cpu_thread_is_idle(CPUState *cpu) { @@ -79,10 +79,10 @@ static bool cpu_thread_is_idle(CPUState *cpu) static bool all_cpu_threads_idle(void) { - CPUArchState *env; + CPUState *cpu; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - if (!cpu_thread_is_idle(ENV_GET_CPU(env))) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + if (!cpu_thread_is_idle(cpu)) { return false; } } @@ -388,15 +388,13 @@ void configure_icount(const char *option) void hw_error(const char *fmt, ...) { va_list ap; - CPUArchState *env; CPUState *cpu; va_start(ap, fmt); fprintf(stderr, "qemu: hardware error: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU); } @@ -406,28 +404,28 @@ void hw_error(const char *fmt, ...) void cpu_synchronize_all_states(void) { - CPUArchState *env; + CPUState *cpu; - for (env = first_cpu; env; env = env->next_cpu) { - cpu_synchronize_state(ENV_GET_CPU(env)); + for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { + cpu_synchronize_state(cpu); } } void cpu_synchronize_all_post_reset(void) { - CPUArchState *cpu; + CPUState *cpu; for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { - cpu_synchronize_post_reset(ENV_GET_CPU(cpu)); + cpu_synchronize_post_reset(cpu); } } void cpu_synchronize_all_post_init(void) { - CPUArchState *cpu; + CPUState *cpu; for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { - cpu_synchronize_post_init(ENV_GET_CPU(cpu)); + cpu_synchronize_post_init(cpu); } } @@ -698,7 +696,7 @@ static void qemu_wait_io_event_common(CPUState *cpu) static void qemu_tcg_wait_io_event(void) { - CPUArchState *env; + CPUState *cpu; while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs @@ -711,8 +709,8 @@ static void qemu_tcg_wait_io_event(void) qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); } - for (env = first_cpu; env != NULL; env = env->next_cpu) { - qemu_wait_io_event_common(ENV_GET_CPU(env)); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + qemu_wait_io_event_common(cpu); } } @@ -814,7 +812,6 @@ static void tcg_signal_cpu_creation(CPUState *cpu, void *data) static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; - CPUArchState *env; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); @@ -824,12 +821,12 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ - while (ENV_GET_CPU(first_cpu)->stopped) { + while (first_cpu->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ - for (env = first_cpu; env != NULL; env = env->next_cpu) { - qemu_wait_io_event_common(ENV_GET_CPU(env)); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + qemu_wait_io_event_common(cpu); } } @@ -923,7 +920,7 @@ void qemu_mutex_lock_iothread(void) } else { iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { - qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu)); + qemu_cpu_kick_thread(first_cpu); qemu_mutex_lock(&qemu_global_mutex); } iothread_requesting_mutex = false; @@ -938,14 +935,13 @@ void qemu_mutex_unlock_iothread(void) static int all_vcpus_paused(void) { - CPUArchState *penv = first_cpu; + CPUState *cpu = first_cpu; - while (penv) { - CPUState *pcpu = ENV_GET_CPU(penv); - if (!pcpu->stopped) { + while (cpu) { + if (!cpu->stopped) { return 0; } - penv = penv->next_cpu; + cpu = cpu->next_cpu; } return 1; @@ -953,25 +949,23 @@ static int all_vcpus_paused(void) void pause_all_vcpus(void) { - CPUArchState *penv = first_cpu; + CPUState *cpu = first_cpu; qemu_clock_enable(vm_clock, false); - while (penv) { - CPUState *pcpu = ENV_GET_CPU(penv); - pcpu->stop = true; - qemu_cpu_kick(pcpu); - penv = penv->next_cpu; + while (cpu) { + cpu->stop = true; + qemu_cpu_kick(cpu); + cpu = cpu->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { - penv = first_cpu; - while (penv) { - CPUState *pcpu = ENV_GET_CPU(penv); - pcpu->stop = false; - pcpu->stopped = true; - penv = penv->next_cpu; + cpu = first_cpu; + while (cpu) { + cpu->stop = false; + cpu->stopped = true; + cpu = cpu->next_cpu; } return; } @@ -979,10 +973,10 @@ void pause_all_vcpus(void) while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); - penv = first_cpu; - while (penv) { - qemu_cpu_kick(ENV_GET_CPU(penv)); - penv = penv->next_cpu; + cpu = first_cpu; + while (cpu) { + qemu_cpu_kick(cpu); + cpu = cpu->next_cpu; } } } @@ -996,13 +990,12 @@ void cpu_resume(CPUState *cpu) void resume_all_vcpus(void) { - CPUArchState *penv = first_cpu; + CPUState *cpu = first_cpu; qemu_clock_enable(vm_clock, true); - while (penv) { - CPUState *pcpu = ENV_GET_CPU(penv); - cpu_resume(pcpu); - penv = penv->next_cpu; + while (cpu) { + cpu_resume(cpu); + cpu = cpu->next_cpu; } } @@ -1151,8 +1144,8 @@ static void tcg_exec_all(void) next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { - CPUArchState *env = next_cpu; - CPUState *cpu = ENV_GET_CPU(env); + CPUState *cpu = next_cpu; + CPUArchState *env = cpu->env_ptr; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); @@ -1172,12 +1165,10 @@ static void tcg_exec_all(void) void set_numa_modes(void) { - CPUArchState *env; CPUState *cpu; int i; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { for (i = 0; i < nb_numa_nodes; i++) { if (test_bit(cpu->cpu_index, node_cpumask[i])) { cpu->numa_node = i; @@ -1197,18 +1188,30 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) CpuInfoList *qmp_query_cpus(Error **errp) { CpuInfoList *head = NULL, *cur_item = NULL; - CPUArchState *env; + CPUState *cpu; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - CPUState *cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { CpuInfoList *info; +#if defined(TARGET_I386) + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; +#elif defined(TARGET_PPC) + PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); + CPUPPCState *env = &ppc_cpu->env; +#elif defined(TARGET_SPARC) + SPARCCPU *sparc_cpu = SPARC_CPU(cpu); + CPUSPARCState *env = &sparc_cpu->env; +#elif defined(TARGET_MIPS) + MIPSCPU *mips_cpu = MIPS_CPU(cpu); + CPUMIPSState *env = &mips_cpu->env; +#endif cpu_synchronize_state(cpu); info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->CPU = cpu->cpu_index; - info->value->current = (env == first_cpu); + info->value->current = (cpu == first_cpu); info->value->halted = cpu->halted; info->value->thread_id = cpu->thread_id; #if defined(TARGET_I386) @@ -1316,11 +1319,14 @@ exit: void qmp_inject_nmi(Error **errp) { #if defined(TARGET_I386) - CPUArchState *env; + CPUState *cs; + + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; - for (env = first_cpu; env != NULL; env = env->next_cpu) { if (!env->apic_state) { - cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_NMI); + cpu_interrupt(cs, CPU_INTERRUPT_NMI); } else { apic_deliver_nmi(env->apic_state); } @@ -186,11 +186,13 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length) { + CPUState *cpu; CPUArchState *env; - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { int mmu_idx; + env = cpu->env_ptr; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; @@ -275,13 +275,11 @@ static inline int cpu_index(CPUState *cpu) static int write_elf64_notes(DumpState *s) { - CPUArchState *env; CPUState *cpu; int ret; int id; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { id = cpu_index(cpu); ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s); if (ret < 0) { @@ -290,7 +288,7 @@ static int write_elf64_notes(DumpState *s) } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s); if (ret < 0) { dump_error(s, "dump: failed to write CPU status.\n"); @@ -327,13 +325,11 @@ static int write_elf32_note(DumpState *s) static int write_elf32_notes(DumpState *s) { - CPUArchState *env; CPUState *cpu; int ret; int id; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { id = cpu_index(cpu); ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s); if (ret < 0) { @@ -342,7 +338,7 @@ static int write_elf32_notes(DumpState *s) } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s); if (ret < 0) { dump_error(s, "dump: failed to write CPU status.\n"); @@ -705,7 +701,7 @@ static ram_addr_t get_start_block(DumpState *s) static int dump_init(DumpState *s, int fd, bool paging, bool has_filter, int64_t begin, int64_t length, Error **errp) { - CPUArchState *env; + CPUState *cpu; int nr_cpus; Error *err = NULL; int ret; @@ -738,7 +734,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter, */ cpu_synchronize_all_states(); nr_cpus = 0; - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { nr_cpus++; } @@ -69,7 +69,7 @@ static MemoryRegion io_mem_unassigned; #endif -CPUArchState *first_cpu; +CPUState *first_cpu; /* current CPU in the current thread. It is only valid inside cpu_exec() */ DEFINE_TLS(CPUState *, current_cpu); @@ -351,27 +351,26 @@ const VMStateDescription vmstate_cpu_common = { CPUState *qemu_get_cpu(int index) { - CPUArchState *env = first_cpu; - CPUState *cpu = NULL; + CPUState *cpu = first_cpu; - while (env) { - cpu = ENV_GET_CPU(env); + while (cpu) { if (cpu->cpu_index == index) { break; } - env = env->next_cpu; + cpu = cpu->next_cpu; } - return env ? cpu : NULL; + return cpu; } void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) { - CPUArchState *env = first_cpu; + CPUState *cpu; - while (env) { - func(ENV_GET_CPU(env), data); - env = env->next_cpu; + cpu = first_cpu; + while (cpu) { + func(cpu, data); + cpu = cpu->next_cpu; } } @@ -379,17 +378,17 @@ void cpu_exec_init(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); CPUClass *cc = CPU_GET_CLASS(cpu); - CPUArchState **penv; + CPUState **pcpu; int cpu_index; #if defined(CONFIG_USER_ONLY) cpu_list_lock(); #endif - env->next_cpu = NULL; - penv = &first_cpu; + cpu->next_cpu = NULL; + pcpu = &first_cpu; cpu_index = 0; - while (*penv != NULL) { - penv = &(*penv)->next_cpu; + while (*pcpu != NULL) { + pcpu = &(*pcpu)->next_cpu; cpu_index++; } cpu->cpu_index = cpu_index; @@ -399,7 +398,7 @@ void cpu_exec_init(CPUArchState *env) #ifndef CONFIG_USER_ONLY cpu->thread_id = qemu_get_thread_id(); #endif - *penv = env; + *pcpu = cpu; #if defined(CONFIG_USER_ONLY) cpu_list_unlock(); #endif @@ -638,7 +637,6 @@ void cpu_abort(CPUArchState *env, const char *fmt, ...) CPUArchState *cpu_copy(CPUArchState *env) { CPUArchState *new_env = cpu_init(env->cpu_model_str); - CPUArchState *next_cpu = new_env->next_cpu; #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; CPUWatchpoint *wp; @@ -646,9 +644,6 @@ CPUArchState *cpu_copy(CPUArchState *env) memcpy(new_env, env, sizeof(CPUArchState)); - /* Preserve chaining. */ - new_env->next_cpu = next_cpu; - /* Clone all break/watchpoints. Note: Once we support ptrace with hw-debug register access, make sure BP_CPU break/watchpoints are handled correctly on clone. */ @@ -1757,12 +1752,14 @@ static void core_commit(MemoryListener *listener) static void tcg_commit(MemoryListener *listener) { - CPUArchState *env; + CPUState *cpu; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ /* XXX: slow ! */ - for(env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + tlb_flush(env, 1); } } @@ -1839,6 +1839,7 @@ static const char *get_feature_xml(const char *p, const char **newp) /* Generate the XML description for this CPU. */ if (!target_xml[0]) { GDBRegisterState *r; + CPUArchState *env = first_cpu->env_ptr; snprintf(target_xml, sizeof(target_xml), "<?xml version=\"1.0\"?>" @@ -1847,7 +1848,7 @@ static const char *get_feature_xml(const char *p, const char **newp) "<xi:include href=\"%s\"/>", GDB_CORE_XML); - for (r = first_cpu->gdb_regs; r; r = r->next) { + for (r = env->gdb_regs; r; r = r->next) { pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\""); pstrcat(target_xml, sizeof(target_xml), r->xml); pstrcat(target_xml, sizeof(target_xml), "\"/>"); @@ -1949,6 +1950,7 @@ static const int xlat_gdb_type[] = { static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) { + CPUState *cpu; CPUArchState *env; int err = 0; @@ -1958,7 +1960,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) switch (type) { case GDB_BREAKPOINT_SW: case GDB_BREAKPOINT_HW: - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + env = cpu->env_ptr; err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL); if (err) break; @@ -1968,7 +1971,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) case GDB_WATCHPOINT_WRITE: case GDB_WATCHPOINT_READ: case GDB_WATCHPOINT_ACCESS: - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + env = cpu->env_ptr; err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type], NULL); if (err) @@ -1983,6 +1987,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) { + CPUState *cpu; CPUArchState *env; int err = 0; @@ -1992,7 +1997,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) switch (type) { case GDB_BREAKPOINT_SW: case GDB_BREAKPOINT_HW: - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + env = cpu->env_ptr; err = cpu_breakpoint_remove(env, addr, BP_GDB); if (err) break; @@ -2002,7 +2008,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) case GDB_WATCHPOINT_WRITE: case GDB_WATCHPOINT_READ: case GDB_WATCHPOINT_ACCESS: - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + env = cpu->env_ptr; err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]); if (err) break; @@ -2016,6 +2023,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) static void gdb_breakpoint_remove_all(void) { + CPUState *cpu; CPUArchState *env; if (kvm_enabled()) { @@ -2023,7 +2031,8 @@ static void gdb_breakpoint_remove_all(void) return; } - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + env = cpu->env_ptr; cpu_breakpoint_remove_all(env, BP_GDB); #ifndef CONFIG_USER_ONLY cpu_watchpoint_remove_all(env, BP_GDB); @@ -2071,13 +2080,11 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc) static CPUArchState *find_cpu(uint32_t thread_id) { - CPUArchState *env; CPUState *cpu; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { if (cpu_index(cpu) == thread_id) { - return env; + return cpu->env_ptr; } } @@ -2394,7 +2401,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) put_packet(s, "QC1"); break; } else if (strcmp(p,"fThreadInfo") == 0) { - s->query_cpu = first_cpu; + s->query_cpu = first_cpu->env_ptr; goto report_cpuinfo; } else if (strcmp(p,"sThreadInfo") == 0) { report_cpuinfo: @@ -2402,7 +2409,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) snprintf(buf, sizeof(buf), "m%x", cpu_index(ENV_GET_CPU(s->query_cpu))); put_packet(s, buf); - s->query_cpu = s->query_cpu->next_cpu; + s->query_cpu = ENV_GET_CPU(s->query_cpu)->next_cpu->env_ptr; } else put_packet(s, "l"); break; @@ -2869,8 +2876,8 @@ static void gdb_accept(void) socket_set_nodelay(fd); s = g_malloc0(sizeof(GDBState)); - s->c_cpu = first_cpu; - s->g_cpu = first_cpu; + s->c_cpu = first_cpu->env_ptr; + s->g_cpu = first_cpu->env_ptr; s->fd = fd; gdb_has_xml = 0; @@ -3054,8 +3061,8 @@ int gdbserver_start(const char *device) mon_chr = s->mon_chr; memset(s, 0, sizeof(GDBState)); } - s->c_cpu = first_cpu; - s->g_cpu = first_cpu; + s->c_cpu = first_cpu->env_ptr; + s->g_cpu = first_cpu->env_ptr; s->chr = chr; s->state = chr ? RS_IDLE : RS_INACTIVE; s->mon_chr = mon_chr; diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 7c0090ff4e..2b33444cf1 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -333,7 +333,7 @@ static void do_cpu_reset(void *opaque) env->regs[15] = info->entry & 0xfffffffe; env->thumb = info->entry & 1; } else { - if (env == first_cpu) { + if (CPU(cpu) == first_cpu) { env->regs[15] = info->loader_start; if (!info->dtb_filename) { if (old_param) { @@ -351,7 +351,7 @@ static void do_cpu_reset(void *opaque) void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info) { - CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); int kernel_size; int initrd_size; int n; @@ -476,9 +476,9 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info) } info->is_linux = is_linux; - for (; env; env = env->next_cpu) { - cpu = arm_env_get_cpu(env); - env->boot_info = info; + for (; cs; cs = cs->next_cpu) { + cpu = ARM_CPU(cs); + cpu->env.boot_info = info; qemu_register_reset(do_cpu_reset, cpu); } } diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index 74f110ba61..7c90b2d782 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -131,7 +131,7 @@ static void nuri_init(QEMUMachineInitArgs *args) { exynos4_boards_init_common(args, EXYNOS4_BOARD_NURI); - arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &exynos4_board_binfo); } static void smdkc210_init(QEMUMachineInitArgs *args) @@ -141,7 +141,7 @@ static void smdkc210_init(QEMUMachineInitArgs *args) lan9215_init(SMDK_LAN9118_BASE_ADDR, qemu_irq_invert(s->irq_table[exynos4210_get_irq(37, 1)])); - arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &exynos4_board_binfo); } static QEMUMachine exynos4_machines[EXYNOS4_NUM_OF_BOARDS] = { diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index de6c98a687..3a2fb4c81c 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -321,7 +321,7 @@ static void highbank_init(QEMUMachineInitArgs *args) highbank_binfo.loader_start = 0; highbank_binfo.write_secondary_boot = hb_write_secondary; highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; - arm_load_kernel(arm_env_get_cpu(first_cpu), &highbank_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &highbank_binfo); } static QEMUMachine highbank_machine = { diff --git a/hw/arm/realview.c b/hw/arm/realview.c index b5962565f9..3060f48f77 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -331,7 +331,7 @@ static void realview_init(QEMUMachineInitArgs *args, realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); - arm_load_kernel(arm_env_get_cpu(first_cpu), &realview_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &realview_binfo); } static void realview_eb_init(QEMUMachineInitArgs *args) diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index 710413e5a9..fd18b60532 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -519,7 +519,7 @@ static void vexpress_common_init(const VEDBoardInfo *daughterboard, vexpress_binfo.smp_loader_start = map[VE_SRAM]; vexpress_binfo.smp_bootreg_addr = map[VE_SYSREGS] + 0x30; vexpress_binfo.gic_cpu_if_addr = daughterboard->gic_cpu_if_addr; - arm_load_kernel(arm_env_get_cpu(first_cpu), &vexpress_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &vexpress_binfo); } static void vexpress_a9_init(QEMUMachineInitArgs *args) diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index 0dc0871651..3444823f3f 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -226,7 +226,7 @@ static void zynq_init(QEMUMachineInitArgs *args) zynq_binfo.nb_cpus = 1; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; - arm_load_kernel(arm_env_get_cpu(first_cpu), &zynq_binfo); + arm_load_kernel(ARM_CPU(first_cpu), &zynq_binfo); } static QEMUMachine zynq_machine = { diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index 98e5ca5258..1022d67178 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -33,7 +33,7 @@ static void kvmclock_vm_state_change(void *opaque, int running, RunState state) { KVMClockState *s = opaque; - CPUArchState *penv = first_cpu; + CPUState *cpu = first_cpu; int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL); int ret; @@ -53,8 +53,8 @@ static void kvmclock_vm_state_change(void *opaque, int running, if (!cap_clock_ctrl) { return; } - for (penv = first_cpu; penv != NULL; penv = penv->next_cpu) { - ret = kvm_vcpu_ioctl(ENV_GET_CPU(penv), KVM_KVMCLOCK_CTRL, 0); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0); if (ret) { if (ret != -EINVAL) { fprintf(stderr, "%s: %s\n", __func__, strerror(-ret)); @@ -124,9 +124,11 @@ static const TypeInfo kvmclock_info = { /* Note: Must be called after VCPU initialization. */ void kvmclock_create(void) { + X86CPU *cpu = X86_CPU(first_cpu); + if (kvm_enabled() && - first_cpu->features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | - (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { + cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | + (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { sysbus_create_simple("kvmclock", -1, NULL); } } diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c index e13678fa59..ccd089a40e 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/kvmvapic.c @@ -490,13 +490,15 @@ static void vapic_enable_tpr_reporting(bool enable) VAPICEnableTPRReporting info = { .enable = enable, }; + CPUState *cs; X86CPU *cpu; CPUX86State *env; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = x86_env_get_cpu(env); + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { + cpu = X86_CPU(cs); + env = &cpu->env; info.apic = env->apic_state; - run_on_cpu(CPU(cpu), vapic_do_enable_tpr_reporting, &info); + run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info); } } @@ -719,8 +721,9 @@ static int vapic_init(SysBusDevice *dev) static void do_vapic_enable(void *data) { VAPICROMState *s = data; + X86CPU *cpu = X86_CPU(first_cpu); - vapic_enable(s, first_cpu); + vapic_enable(s, &cpu->env); } static int vapic_post_load(void *opaque, int version_id) @@ -743,7 +746,7 @@ static int vapic_post_load(void *opaque, int version_id) } if (s->state == VAPIC_ACTIVE) { if (smp_cpus == 1) { - run_on_cpu(ENV_GET_CPU(first_cpu), do_vapic_enable, s); + run_on_cpu(first_cpu, do_vapic_enable, s); } else { zero = g_malloc0(s->rom_state.vapic_size); cpu_physical_memory_rw(s->vapic_paddr, zero, diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 52242568f6..c5d8570af1 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -160,8 +160,9 @@ void cpu_smm_register(cpu_set_smm_t callback, void *arg) void cpu_smm_update(CPUX86State *env) { - if (smm_set && smm_arg && env == first_cpu) + if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == first_cpu) { smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg); + } } @@ -185,18 +186,21 @@ int cpu_get_pic_interrupt(CPUX86State *env) static void pic_irq_request(void *opaque, int irq, int level) { - CPUX86State *env = first_cpu; + CPUState *cs = first_cpu; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; DPRINTF("pic_irqs: %s irq %d\n", level? "raise" : "lower", irq); if (env->apic_state) { - while (env) { + while (cs) { + cpu = X86_CPU(cs); + env = &cpu->env; if (apic_accept_pic_intr(env->apic_state)) { apic_deliver_pic_intr(env->apic_state, level); } - env = env->next_cpu; + cs = cs->next_cpu; } } else { - CPUState *cs = CPU(x86_env_get_cpu(env)); if (level) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { @@ -1274,8 +1278,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi, } } - a20_line = qemu_allocate_irqs(handle_a20_line_change, - x86_env_get_cpu(first_cpu), 2); + a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); i8042 = isa_create_simple(isa_bus, "i8042"); i8042_setup_a20_line(i8042, &a20_line[0]); if (!no_vmport) { diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 01323a9368..b58c25596d 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -229,8 +229,7 @@ static void pc_init1(MemoryRegion *system_memory, if (pci_enabled && acpi_enabled) { i2c_bus *smbus; - smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, - x86_env_get_cpu(first_cpu), 1); + smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, gsi[9], *smi_irq, diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c index f397950cbb..55c76e4afc 100644 --- a/hw/intc/sh_intc.c +++ b/hw/intc/sh_intc.c @@ -42,16 +42,15 @@ void sh_intc_toggle_source(struct intc_source *source, pending_changed = 1; if (pending_changed) { - CPUState *cpu = CPU(sh_env_get_cpu(first_cpu)); if (source->pending) { source->parent->pending++; if (source->parent->pending == 1) { - cpu_interrupt(cpu, CPU_INTERRUPT_HARD); + cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD); } } else { source->parent->pending--; if (source->parent->pending == 0) { - cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); + cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); } } } diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index b49be4dae3..d1921aa635 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -380,7 +380,7 @@ static void ich9_apm_ctrl_changed(uint32_t val, void *arg) /* SMI_EN = PMBASE + 30. SMI control and enable register */ if (lpc->pm.smi_en & ICH9_PMIO_SMI_EN_APMC_EN) { - cpu_interrupt(CPU(x86_env_get_cpu(first_cpu)), CPU_INTERRUPT_SMI); + cpu_interrupt(first_cpu, CPU_INTERRUPT_SMI); } } diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c index 7e561217dc..de87241e9c 100644 --- a/hw/mips/mips_malta.c +++ b/hw/mips/mips_malta.c @@ -844,7 +844,8 @@ void mips_malta_init(QEMUMachineInitArgs *args) cpu_mips_clock_init(env); qemu_register_reset(main_cpu_reset, cpu); } - env = first_cpu; + cpu = MIPS_CPU(first_cpu); + env = &cpu->env; /* allocate RAM */ if (ram_size > (256 << 20)) { diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 69837a5fb8..960059930f 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -500,7 +500,6 @@ static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params, qemu_irq **irqs) { DeviceState *dev; - CPUPPCState *env; CPUState *cs; int r; @@ -512,9 +511,7 @@ static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params, return NULL; } - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cs = ENV_GET_CPU(env); - + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { if (kvm_openpic_connect_vcpu(dev, cs)) { fprintf(stderr, "%s: failed to connect vcpu to irqchip\n", __func__); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index fb57b42ea0..554f2440b1 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -440,15 +440,14 @@ void ppce500_irq_init(CPUPPCState *env) /* Enable or Disable the E500 EPR capability */ void ppce500_set_mpic_proxy(bool enabled) { - CPUPPCState *env; + CPUState *cs; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - PowerPCCPU *cpu = ppc_env_get_cpu(env); - CPUState *cs = CPU(cpu); + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { + PowerPCCPU *cpu = POWERPC_CPU(cs); - env->mpic_proxy = enabled; + cpu->env.mpic_proxy = enabled; if (kvm_enabled()) { - kvmppc_set_mpic_proxy(POWERPC_CPU(cs), enabled); + kvmppc_set_mpic_proxy(cpu, enabled); } } } diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index d07dd014c6..19f2442482 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -605,8 +605,9 @@ static void ppc_prep_init(QEMUMachineInitArgs *args) /* PCI -> ISA bridge */ pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "i82378"); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); + cpu = POWERPC_CPU(first_cpu); qdev_connect_gpio_out(&pci->qdev, 0, - first_cpu->irq_inputs[PPC6xx_INPUT_INT]); + cpu->env.irq_inputs[PPC6xx_INPUT_INT]); qdev_connect_gpio_out(&pci->qdev, 1, *cpu_exit_irq); sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9)); sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11)); @@ -651,7 +652,8 @@ static void ppc_prep_init(QEMUMachineInitArgs *args) } isa_create_simple(isa_bus, "i8042"); - sysctrl->reset_irq = first_cpu->irq_inputs[PPC6xx_INPUT_HRESET]; + cpu = POWERPC_CPU(first_cpu); + sysctrl->reset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET]; portio_list_init(port_list, NULL, prep_portio_list, sysctrl, "prep"); portio_list_add(port_list, get_system_io(), 0x0); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index c040794081..e46aad30d9 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -131,7 +131,6 @@ int spapr_allocate_irq_block(int num, bool lsi) static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr) { int ret = 0, offset; - CPUPPCState *env; CPUState *cpu; char cpu_model[32]; int smt = kvmppc_smt_threads(); @@ -139,8 +138,7 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr) assert(spapr->cpu_model); - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = CPU(ppc_env_get_cpu(env)); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { uint32_t associativity[] = {cpu_to_be32(0x5), cpu_to_be32(0x0), cpu_to_be32(0x0), @@ -231,7 +229,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model, uint32_t epow_irq) { void *fdt; - CPUPPCState *env; + CPUState *cs; uint32_t start_prop = cpu_to_be32(initrd_base); uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt" @@ -304,10 +302,11 @@ static void *spapr_create_fdt_skel(const char *cpu_model, /* This is needed during FDT finalization */ spapr->cpu_model = g_strdup(modelname); - for (env = first_cpu; env != NULL; env = env->next_cpu) { - CPUState *cpu = CPU(ppc_env_get_cpu(env)); - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - int index = cpu->cpu_index; + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + int index = cs->cpu_index; uint32_t servers_prop[smp_threads]; uint32_t gservers_prop[smp_threads * 2]; char *nodename; @@ -632,7 +631,7 @@ static void spapr_reset_htab(sPAPREnvironment *spapr) static void ppc_spapr_reset(void) { - CPUState *first_cpu_cpu; + PowerPCCPU *first_ppc_cpu; /* Reset the hash table & recalc the RMA */ spapr_reset_htab(spapr); @@ -644,11 +643,11 @@ static void ppc_spapr_reset(void) spapr->rtas_size); /* Set up the entry state */ - first_cpu_cpu = ENV_GET_CPU(first_cpu); - first_cpu->gpr[3] = spapr->fdt_addr; - first_cpu->gpr[5] = 0; - first_cpu_cpu->halted = 0; - first_cpu->nip = spapr->entry_point; + first_ppc_cpu = POWERPC_CPU(first_cpu); + first_ppc_cpu->env.gpr[3] = spapr->fdt_addr; + first_ppc_cpu->env.gpr[5] = 0; + first_cpu->halted = 0; + first_ppc_cpu->env.nip = spapr->entry_point; } diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 6760851139..6499cd0f2e 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -356,7 +356,6 @@ CPUArchState *cpu_copy(CPUArchState *env); void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -extern CPUArchState *first_cpu; /* Flags for use in ENV->INTERRUPT_PENDING. diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index c4ac929875..39094b3f48 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -181,7 +181,6 @@ typedef struct CPUWatchpoint { sigjmp_buf jmp_env; \ int exception_index; \ \ - CPUArchState *next_cpu; /* next CPU sharing TB cache */ \ /* user data */ \ void *opaque; \ \ diff --git a/include/qom/cpu.h b/include/qom/cpu.h index d7fc1868d6..a08a8ab2b6 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -114,6 +114,7 @@ struct kvm_run; * CPU and return to its top level loop. * @env_ptr: Pointer to subclass-specific CPUArchState field. * @current_tb: Currently executing TB. + * @next_cpu: Next CPU sharing TB cache. * @kvm_fd: vCPU file descriptor for KVM. * * State of one CPU core or thread. @@ -146,6 +147,7 @@ struct CPUState { void *env_ptr; /* CPUArchState */ struct TranslationBlock *current_tb; + CPUState *next_cpu; int kvm_fd; bool kvm_vcpu_dirty; @@ -157,6 +159,8 @@ struct CPUState { uint32_t halted; /* used by alpha, cris, ppc TCG */ }; +extern CPUState *first_cpu; + DECLARE_TLS(CPUState *, current_cpu); #define current_cpu tls_var(current_cpu) @@ -1938,7 +1938,9 @@ int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr, } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + err = kvm_update_guest_debug(env, 0); if (err) { return err; @@ -1979,7 +1981,9 @@ int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr, } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + err = kvm_update_guest_debug(env, 0); if (err) { return err; @@ -1992,13 +1996,11 @@ void kvm_remove_all_breakpoints(CPUState *cpu) { struct kvm_sw_breakpoint *bp, *next; KVMState *s = cpu->kvm_state; - CPUArchState *env; QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { /* Try harder to find a CPU that currently sees the breakpoint. */ - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) { break; } @@ -2009,7 +2011,9 @@ void kvm_remove_all_breakpoints(CPUState *cpu) } kvm_arch_remove_all_hw_breakpoints(); - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + kvm_update_guest_debug(env, 0); } } diff --git a/linux-user/elfload.c b/linux-user/elfload.c index ddef23e6dc..d517450db4 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -2628,7 +2628,7 @@ static int fill_note_info(struct elf_note_info *info, long signr, const CPUArchState *env) { #define NUMNOTES 3 - CPUArchState *cpu = NULL; + CPUState *cpu = NULL; TaskState *ts = (TaskState *)env->opaque; int i; @@ -2667,9 +2667,10 @@ static int fill_note_info(struct elf_note_info *info, /* read and fill status of all threads */ cpu_list_lock(); for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { - if (cpu == thread_env) + if (cpu == ENV_GET_CPU(thread_env)) { continue; - fill_thread_info(info, cpu); + } + fill_thread_info(info, (CPUArchState *)cpu->env_ptr); } cpu_list_unlock(); diff --git a/linux-user/main.c b/linux-user/main.c index af82db87b8..564bed6e8b 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -120,8 +120,8 @@ void fork_end(int child) if (child) { /* Child processes created by fork() only have a single thread. Discard information about the parent threads. */ - first_cpu = thread_env; - thread_env->next_cpu = NULL; + first_cpu = ENV_GET_CPU(thread_env); + first_cpu->next_cpu = NULL; pending_cpus = 0; pthread_mutex_init(&exclusive_lock, NULL); pthread_mutex_init(&cpu_list_mutex, NULL); @@ -148,7 +148,6 @@ static inline void exclusive_idle(void) Must only be called from outside cpu_arm_exec. */ static inline void start_exclusive(void) { - CPUArchState *other; CPUState *other_cpu; pthread_mutex_lock(&exclusive_lock); @@ -156,8 +155,7 @@ static inline void start_exclusive(void) pending_cpus = 1; /* Make all other cpus stop executing. */ - for (other = first_cpu; other; other = other->next_cpu) { - other_cpu = ENV_GET_CPU(other); + for (other_cpu = first_cpu; other_cpu; other_cpu = other_cpu->next_cpu) { if (other_cpu->running) { pending_cpus++; cpu_exit(other_cpu); diff --git a/linux-user/syscall.c b/linux-user/syscall.c index a2125fa2f2..4c96f4fd85 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -5030,6 +5030,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { +#ifdef CONFIG_USE_NPTL + CPUState *cpu = ENV_GET_CPU(cpu_env); +#endif abi_long ret; struct stat st; struct statfs stfs; @@ -5052,13 +5055,13 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, be disabling signals. */ if (first_cpu->next_cpu) { TaskState *ts; - CPUArchState **lastp; - CPUArchState *p; + CPUState **lastp; + CPUState *p; cpu_list_lock(); lastp = &first_cpu; p = first_cpu; - while (p && p != (CPUArchState *)cpu_env) { + while (p && p != cpu) { lastp = &p->next_cpu; p = p->next_cpu; } diff --git a/memory_mapping.c b/memory_mapping.c index 5634f813cd..515a984e0d 100644 --- a/memory_mapping.c +++ b/memory_mapping.c @@ -165,13 +165,13 @@ void memory_mapping_list_init(MemoryMappingList *list) QTAILQ_INIT(&list->head); } -static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) +static CPUState *find_paging_enabled_cpu(CPUState *start_cpu) { - CPUArchState *env; + CPUState *cpu; - for (env = start_cpu; env != NULL; env = env->next_cpu) { - if (cpu_paging_enabled(ENV_GET_CPU(env))) { - return env; + for (cpu = start_cpu; cpu != NULL; cpu = cpu->next_cpu) { + if (cpu_paging_enabled(cpu)) { + return cpu; } } @@ -180,15 +180,15 @@ static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp) { - CPUArchState *env, *first_paging_enabled_cpu; + CPUState *cpu, *first_paging_enabled_cpu; RAMBlock *block; ram_addr_t offset, length; first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); if (first_paging_enabled_cpu) { - for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_paging_enabled_cpu; cpu != NULL; cpu = cpu->next_cpu) { Error *err = NULL; - cpu_get_memory_mapping(ENV_GET_CPU(env), list, &err); + cpu_get_memory_mapping(cpu, list, &err); if (err) { error_propagate(errp, err); return; @@ -1806,14 +1806,12 @@ static void do_info_mtree(Monitor *mon, const QDict *qdict) static void do_info_numa(Monitor *mon, const QDict *qdict) { int i; - CPUArchState *env; CPUState *cpu; monitor_printf(mon, "%d nodes\n", nb_numa_nodes); for (i = 0; i < nb_numa_nodes; i++) { monitor_printf(mon, "node %d cpus:", i); - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { if (cpu->numa_node == i) { monitor_printf(mon, " %d", cpu->cpu_index); } diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c index 83898cd00f..d13322808f 100644 --- a/target-i386/arch_dump.c +++ b/target-i386/arch_dump.c @@ -185,7 +185,8 @@ int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, X86CPU *cpu = X86_CPU(cs); int ret; #ifdef TARGET_X86_64 - bool lma = !!(first_cpu->hflags & HF_LMA_MASK); + X86CPU *first_x86_cpu = X86_CPU(first_cpu); + bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK); if (lma) { ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque); @@ -394,7 +395,9 @@ int cpu_get_dump_info(ArchDumpInfo *info) RAMBlock *block; #ifdef TARGET_X86_64 - lma = !!(first_cpu->hflags & HF_LMA_MASK); + X86CPU *first_x86_cpu = X86_CPU(first_cpu); + + lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK); #endif if (lma) { diff --git a/target-i386/helper.c b/target-i386/helper.c index 5e5abe3b86..d6f43d7a21 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -1188,6 +1188,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags) { + CPUState *cs = CPU(cpu); CPUX86State *cenv = &cpu->env; MCEInjectionParams params = { .mon = mon, @@ -1200,7 +1201,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, .flags = flags, }; unsigned bank_num = cenv->mcg_cap & 0xff; - CPUX86State *env; if (!cenv->mcg_cap) { monitor_printf(mon, "MCE injection not supported\n"); @@ -1220,19 +1220,22 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, return; } - run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms); + run_on_cpu(cs, do_inject_x86_mce, ¶ms); if (flags & MCE_INJECT_BROADCAST) { + CPUState *other_cs; + params.bank = 1; params.status = MCI_STATUS_VAL | MCI_STATUS_UC; params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV; params.addr = 0; params.misc = 0; - for (env = first_cpu; env != NULL; env = env->next_cpu) { - if (cenv == env) { + for (other_cs = first_cpu; other_cs != NULL; + other_cs = other_cs->next_cpu) { + if (other_cs == cs) { continue; } - params.cpu = x86_env_get_cpu(env); - run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms); + params.cpu = X86_CPU(other_cs); + run_on_cpu(other_cs, do_inject_x86_mce, ¶ms); } } } diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 4b557b3a38..935ef6393d 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -345,20 +345,22 @@ int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) int kvm_arch_on_sigbus(int code, void *addr) { - if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { + X86CPU *cpu = X86_CPU(first_cpu); + + if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { ram_addr_t ram_addr; hwaddr paddr; /* Hope we are lucky for AO MCE */ if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL || - !kvm_physical_memory_addr_from_host(CPU(first_cpu)->kvm_state, + !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr, &paddr)) { fprintf(stderr, "Hardware memory error for memory used by " "QEMU itself instead of guest system!: %p\n", addr); return 0; } kvm_hwpoison_page_add(ram_addr); - kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code); + kvm_mce_inject(X86_CPU(first_cpu), paddr, code); } else { if (code == BUS_MCEERR_AO) { return 0; diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c index e345f9a1e7..957926ced7 100644 --- a/target-i386/misc_helper.c +++ b/target-i386/misc_helper.c @@ -610,7 +610,7 @@ void helper_mwait(CPUX86State *env, int next_eip_addend) cpu = x86_env_get_cpu(env); cs = CPU(cpu); /* XXX: not complete but not completely erroneous */ - if (cs->cpu_index != 0 || env->next_cpu != NULL) { + if (cs->cpu_index != 0 || cs->next_cpu != NULL) { /* more than one CPU: do not sleep because another CPU may wake this one */ } else { diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index f6838ecd5f..5cf1c3f04b 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -1696,39 +1696,38 @@ target_ulong helper_emt(void) target_ulong helper_dvpe(CPUMIPSState *env) { - CPUMIPSState *other_cpu_env = first_cpu; + CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; do { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); /* Turn off all VPEs except the one executing the dvpe. */ - if (other_cpu_env != env) { - MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env); - - other_cpu_env->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); + if (&other_cpu->env != env) { + other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); mips_vpe_sleep(other_cpu); } - other_cpu_env = other_cpu_env->next_cpu; - } while (other_cpu_env); + other_cs = other_cs->next_cpu; + } while (other_cs); return prev; } target_ulong helper_evpe(CPUMIPSState *env) { - CPUMIPSState *other_cpu_env = first_cpu; + CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; do { - MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env); + MIPSCPU *other_cpu = MIPS_CPU(other_cs); - if (other_cpu_env != env + if (&other_cpu->env != env /* If the VPE is WFI, don't disturb its sleep. */ && !mips_vpe_is_wfi(other_cpu)) { /* Enable the VPE. */ - other_cpu_env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); mips_vpe_wake(other_cpu); /* And wake it up. */ } - other_cpu_env = other_cpu_env->next_cpu; - } while (other_cpu_env); + other_cs = other_cs->next_cpu; + } while (other_cs); return prev; } #endif /* !CONFIG_USER_ONLY */ diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c index 4a0fc6dd57..e9fcad8ef6 100644 --- a/target-ppc/excp_helper.c +++ b/target-ppc/excp_helper.c @@ -986,16 +986,19 @@ void helper_msgsnd(target_ulong rb) { int irq = dbell2irq(rb); int pir = rb & DBELL_PIRTAG_MASK; - CPUPPCState *cenv; + CPUState *cs; if (irq < 0) { return; } - for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { + for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *cenv = &cpu->env; + if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { cenv->pending_interrupts |= 1 << irq; - cpu_interrupt(CPU(ppc_env_get_cpu(cenv)), CPU_INTERRUPT_HARD); + cpu_interrupt(cs, CPU_INTERRUPT_HARD); } } } diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index e95ad4a645..b0099e122f 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -1579,7 +1579,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) /* Find the largest hardware supported page size that's less than * or equal to the (logical) backing page size of guest RAM */ - kvm_get_smmu_info(ppc_env_get_cpu(first_cpu), &info); + kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info); rampagesize = getrampagesize(); best_page_shift = 0; diff --git a/translate-all.c b/translate-all.c index 02f8e5e800..e8683d2c66 100644 --- a/translate-all.c +++ b/translate-all.c @@ -681,7 +681,7 @@ static void page_flush_tb(void) /* XXX: tb_flush is currently not thread safe */ void tb_flush(CPUArchState *env1) { - CPUArchState *env; + CPUState *cpu; #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", @@ -696,7 +696,9 @@ void tb_flush(CPUArchState *env1) } tcg_ctx.tb_ctx.nb_tbs = 0; - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *)); } @@ -821,7 +823,7 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n) /* invalidate one TB */ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { - CPUArchState *env; + CPUState *cpu; PageDesc *p; unsigned int h, n1; tb_page_addr_t phys_pc; @@ -848,7 +850,9 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); - for (env = first_cpu; env != NULL; env = env->next_cpu) { + for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { + CPUArchState *env = cpu->env_ptr; + if (env->tb_jmp_cache[h] == tb) { env->tb_jmp_cache[h] = NULL; } |