diff options
author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2013-07-18 14:32:54 -0500 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2013-07-29 10:37:04 -0500 |
commit | a90db1584a00dc1d1439dc7729d99674b666b85e (patch) | |
tree | 6141d78c5b7ed1f21a98ccad141ecf083d6e3218 /target-ppc/machine.c | |
parent | fdc43322c978d78e79e692872dcec7b4f6a447f1 (diff) |
target-ppc: Convert ppc cpu savevm to VMStateDescription
The savevm code for the powerpc cpu emulation is currently based around
the old register_savevm() rather than register_vmstate() method. It's also
rather broken, missing some important state on some CPU models.
This patch completely rewrites the savevm for target-ppc, using the new
VMStateDescription approach. Exactly what needs to be saved in what
configurations has been more carefully examined, too. This introduces a
new version (5) of the cpu save format. The old load function is retained
to support version 4 images.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-2-git-send-email-aliguori@us.ibm.com
[aik: ppc cpu savevm convertion fixed to use PowerPCCPU instead of CPUPPCState]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'target-ppc/machine.c')
-rw-r--r-- | target-ppc/machine.c | 531 |
1 files changed, 443 insertions, 88 deletions
diff --git a/target-ppc/machine.c b/target-ppc/machine.c index 2d10adb60d..12e1512996 100644 --- a/target-ppc/machine.c +++ b/target-ppc/machine.c @@ -1,96 +1,12 @@ #include "hw/hw.h" #include "hw/boards.h" #include "sysemu/kvm.h" +#include "helper_regs.h" -void cpu_save(QEMUFile *f, void *opaque) +static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) { - CPUPPCState *env = (CPUPPCState *)opaque; - unsigned int i, j; - uint32_t fpscr; - target_ulong xer; - - for (i = 0; i < 32; i++) - qemu_put_betls(f, &env->gpr[i]); -#if !defined(TARGET_PPC64) - for (i = 0; i < 32; i++) - qemu_put_betls(f, &env->gprh[i]); -#endif - qemu_put_betls(f, &env->lr); - qemu_put_betls(f, &env->ctr); - for (i = 0; i < 8; i++) - qemu_put_be32s(f, &env->crf[i]); - xer = cpu_read_xer(env); - qemu_put_betls(f, &xer); - qemu_put_betls(f, &env->reserve_addr); - qemu_put_betls(f, &env->msr); - for (i = 0; i < 4; i++) - qemu_put_betls(f, &env->tgpr[i]); - for (i = 0; i < 32; i++) { - union { - float64 d; - uint64_t l; - } u; - u.d = env->fpr[i]; - qemu_put_be64(f, u.l); - } - fpscr = env->fpscr; - qemu_put_be32s(f, &fpscr); - qemu_put_sbe32s(f, &env->access_type); -#if defined(TARGET_PPC64) - qemu_put_betls(f, &env->spr[SPR_ASR]); - qemu_put_sbe32s(f, &env->slb_nr); -#endif - qemu_put_betls(f, &env->spr[SPR_SDR1]); - for (i = 0; i < 32; i++) - qemu_put_betls(f, &env->sr[i]); - for (i = 0; i < 2; i++) - for (j = 0; j < 8; j++) - qemu_put_betls(f, &env->DBAT[i][j]); - for (i = 0; i < 2; i++) - for (j = 0; j < 8; j++) - qemu_put_betls(f, &env->IBAT[i][j]); - qemu_put_sbe32s(f, &env->nb_tlb); - qemu_put_sbe32s(f, &env->tlb_per_way); - qemu_put_sbe32s(f, &env->nb_ways); - qemu_put_sbe32s(f, &env->last_way); - qemu_put_sbe32s(f, &env->id_tlbs); - qemu_put_sbe32s(f, &env->nb_pids); - if (env->tlb.tlb6) { - // XXX assumes 6xx - for (i = 0; i < env->nb_tlb; i++) { - qemu_put_betls(f, &env->tlb.tlb6[i].pte0); - qemu_put_betls(f, &env->tlb.tlb6[i].pte1); - qemu_put_betls(f, &env->tlb.tlb6[i].EPN); - } - } - for (i = 0; i < 4; i++) - qemu_put_betls(f, &env->pb[i]); - for (i = 0; i < 1024; i++) - qemu_put_betls(f, &env->spr[i]); - qemu_put_be32s(f, &env->vscr); - qemu_put_be64s(f, &env->spe_acc); - qemu_put_be32s(f, &env->spe_fscr); - qemu_put_betls(f, &env->msr_mask); - qemu_put_be32s(f, &env->flags); - qemu_put_sbe32s(f, &env->error_code); - qemu_put_be32s(f, &env->pending_interrupts); - qemu_put_be32s(f, &env->irq_input_state); - for (i = 0; i < POWERPC_EXCP_NB; i++) - qemu_put_betls(f, &env->excp_vectors[i]); - qemu_put_betls(f, &env->excp_prefix); - qemu_put_betls(f, &env->ivor_mask); - qemu_put_betls(f, &env->ivpr_mask); - qemu_put_betls(f, &env->hreset_vector); - qemu_put_betls(f, &env->nip); - qemu_put_betls(f, &env->hflags); - qemu_put_betls(f, &env->hflags_nmsr); - qemu_put_sbe32s(f, &env->mmu_idx); - qemu_put_sbe32(f, 0); -} - -int cpu_load(QEMUFile *f, void *opaque, int version_id) -{ - CPUPPCState *env = (CPUPPCState *)opaque; + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; unsigned int i, j; target_ulong sdr1; uint32_t fpscr; @@ -177,3 +93,442 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id) return 0; } + +static int get_avr(QEMUFile *f, void *pv, size_t size) +{ + ppc_avr_t *v = pv; + + v->u64[0] = qemu_get_be64(f); + v->u64[1] = qemu_get_be64(f); + + return 0; +} + +static void put_avr(QEMUFile *f, void *pv, size_t size) +{ + ppc_avr_t *v = pv; + + qemu_put_be64(f, v->u64[0]); + qemu_put_be64(f, v->u64[1]); +} + +const VMStateInfo vmstate_info_avr = { + .name = "avr", + .get = get_avr, + .put = put_avr, +}; + +#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t) + +#define VMSTATE_AVR_ARRAY(_f, _s, _n) \ + VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) + +static void cpu_pre_save(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int i; + + env->spr[SPR_LR] = env->lr; + env->spr[SPR_CTR] = env->ctr; + env->spr[SPR_XER] = env->xer; +#if defined(TARGET_PPC64) + env->spr[SPR_CFAR] = env->cfar; +#endif + env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; + + for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { + env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i]; + env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i]; + env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i]; + env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i]; + } + for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { + env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4]; + env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4]; + env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4]; + env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4]; + } +} + +static int cpu_post_load(void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int i; + + env->lr = env->spr[SPR_LR]; + env->ctr = env->spr[SPR_CTR]; + env->xer = env->spr[SPR_XER]; +#if defined(TARGET_PPC64) + env->cfar = env->spr[SPR_CFAR]; +#endif + env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; + + for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { + env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; + env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; + env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; + env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; + } + for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { + env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; + env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; + env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; + env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; + } + + /* Restore htab_base and htab_mask variables */ + ppc_store_sdr1(env, env->spr[SPR_SDR1]); + + hreg_compute_hflags(env); + hreg_compute_mem_idx(env); + + return 0; +} + +static bool fpu_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return (cpu->env.insns_flags & PPC_FLOAT); +} + +static const VMStateDescription vmstate_fpu = { + .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_FLOAT64_ARRAY(env.fpr, PowerPCCPU, 32), + VMSTATE_UINTTL(env.fpscr, PowerPCCPU), + VMSTATE_END_OF_LIST() + }, +}; + +static bool altivec_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return (cpu->env.insns_flags & PPC_ALTIVEC); +} + +static const VMStateDescription vmstate_altivec = { + .name = "cpu/altivec", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_AVR_ARRAY(env.avr, PowerPCCPU, 32), + VMSTATE_UINT32(env.vscr, PowerPCCPU), + VMSTATE_END_OF_LIST() + }, +}; + +static bool vsx_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return (cpu->env.insns_flags2 & PPC2_VSX); +} + +static const VMStateDescription vmstate_vsx = { + .name = "cpu/vsx", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64_ARRAY(env.vsr, PowerPCCPU, 32), + VMSTATE_END_OF_LIST() + }, +}; + +static bool sr_needed(void *opaque) +{ +#ifdef TARGET_PPC64 + PowerPCCPU *cpu = opaque; + + return !(cpu->env.mmu_model & POWERPC_MMU_64); +#else + return true; +#endif +} + +static const VMStateDescription vmstate_sr = { + .name = "cpu/sr", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), + VMSTATE_END_OF_LIST() + }, +}; + +#ifdef TARGET_PPC64 +static int get_slbe(QEMUFile *f, void *pv, size_t size) +{ + ppc_slb_t *v = pv; + + v->esid = qemu_get_be64(f); + v->vsid = qemu_get_be64(f); + + return 0; +} + +static void put_slbe(QEMUFile *f, void *pv, size_t size) +{ + ppc_slb_t *v = pv; + + qemu_put_be64(f, v->esid); + qemu_put_be64(f, v->vsid); +} + +const VMStateInfo vmstate_info_slbe = { + .name = "slbe", + .get = get_slbe, + .put = put_slbe, +}; + +#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) + +#define VMSTATE_SLB_ARRAY(_f, _s, _n) \ + VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) + +static bool slb_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + /* We don't support any of the old segment table based 64-bit CPUs */ + return (cpu->env.mmu_model & POWERPC_MMU_64); +} + +static const VMStateDescription vmstate_slb = { + .name = "cpu/slb", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU), + VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, 64), + VMSTATE_END_OF_LIST() + } +}; +#endif /* TARGET_PPC64 */ + +static const VMStateDescription vmstate_tlb6xx_entry = { + .name = "cpu/tlb6xx_entry", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), + VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), + VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlb6xx_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_6XX); +} + +static const VMStateDescription vmstate_tlb6xx = { + .name = "cpu/tlb6xx", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, + env.nb_tlb, + vmstate_tlb6xx_entry, + ppc6xx_tlb_t), + VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_tlbemb_entry = { + .name = "cpu/tlbemb_entry", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(RPN, ppcemb_tlb_t), + VMSTATE_UINTTL(EPN, ppcemb_tlb_t), + VMSTATE_UINTTL(PID, ppcemb_tlb_t), + VMSTATE_UINTTL(size, ppcemb_tlb_t), + VMSTATE_UINT32(prot, ppcemb_tlb_t), + VMSTATE_UINT32(attr, ppcemb_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlbemb_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_EMB); +} + +static bool pbr403_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + uint32_t pvr = cpu->env.spr[SPR_PVR]; + + return (pvr & 0xffff0000) == 0x00200000; +} + +static const VMStateDescription vmstate_pbr403 = { + .name = "cpu/pbr403", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_tlbemb = { + .name = "cpu/tlb6xx", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, + env.nb_tlb, + vmstate_tlbemb_entry, + ppcemb_tlb_t), + /* 403 protection registers */ + VMSTATE_END_OF_LIST() + }, + .subsections = (VMStateSubsection []) { + { + .vmsd = &vmstate_pbr403, + .needed = pbr403_needed, + } , { + /* empty */ + } + } +}; + +static const VMStateDescription vmstate_tlbmas_entry = { + .name = "cpu/tlbmas_entry", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT32(mas8, ppcmas_tlb_t), + VMSTATE_UINT32(mas1, ppcmas_tlb_t), + VMSTATE_UINT64(mas2, ppcmas_tlb_t), + VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlbmas_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_MAS); +} + +static const VMStateDescription vmstate_tlbmas = { + .name = "cpu/tlbmas", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, + env.nb_tlb, + vmstate_tlbmas_entry, + ppcmas_tlb_t), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ppc_cpu = { + .name = "cpu", + .version_id = 5, + .minimum_version_id = 5, + .minimum_version_id_old = 4, + .load_state_old = cpu_load_old, + .pre_save = cpu_pre_save, + .post_load = cpu_post_load, + .fields = (VMStateField []) { + /* Verify we haven't changed the pvr */ + VMSTATE_UINTTL_EQUAL(env.spr[SPR_PVR], PowerPCCPU), + + /* User mode architected state */ + VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), +#if !defined(TARGET_PPC64) + VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), +#endif + VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), + VMSTATE_UINTTL(env.nip, PowerPCCPU), + + /* SPRs */ + VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), + VMSTATE_UINT64(env.spe_acc, PowerPCCPU), + + /* Reservation */ + VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), + + /* Supervisor mode architected state */ + VMSTATE_UINTTL(env.msr, PowerPCCPU), + + /* Internal state */ + VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), + /* FIXME: access_type? */ + + /* Sanity checking */ + VMSTATE_UINTTL_EQUAL(env.msr_mask, PowerPCCPU), + VMSTATE_UINT64_EQUAL(env.insns_flags, PowerPCCPU), + VMSTATE_UINT64_EQUAL(env.insns_flags2, PowerPCCPU), + VMSTATE_UINT32_EQUAL(env.nb_BATs, PowerPCCPU), + VMSTATE_END_OF_LIST() + }, + .subsections = (VMStateSubsection []) { + { + .vmsd = &vmstate_fpu, + .needed = fpu_needed, + } , { + .vmsd = &vmstate_altivec, + .needed = altivec_needed, + } , { + .vmsd = &vmstate_vsx, + .needed = vsx_needed, + } , { + .vmsd = &vmstate_sr, + .needed = sr_needed, + } , { +#ifdef TARGET_PPC64 + .vmsd = &vmstate_slb, + .needed = slb_needed, + } , { +#endif /* TARGET_PPC64 */ + .vmsd = &vmstate_tlb6xx, + .needed = tlb6xx_needed, + } , { + .vmsd = &vmstate_tlbemb, + .needed = tlbemb_needed, + } , { + .vmsd = &vmstate_tlbmas, + .needed = tlbmas_needed, + } , { + /* empty */ + } + } +}; |