aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc/kvm.c')
-rw-r--r--target-ppc/kvm.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 8a196c6cc1..b77ce5e94c 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -818,7 +818,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
/* Sync SLB */
#ifdef TARGET_PPC64
- for (i = 0; i < 64; i++) {
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
}
@@ -1033,9 +1033,22 @@ int kvm_arch_get_registers(CPUState *cs)
/* Sync SLB */
#ifdef TARGET_PPC64
- for (i = 0; i < 64; i++) {
- ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
- sregs.u.s.ppc64.slb[i].slbv);
+ /*
+ * The packed SLB array we get from KVM_GET_SREGS only contains
+ * information about valid entries. So we flush our internal
+ * copy to get rid of stale ones, then put all valid SLB entries
+ * back in.
+ */
+ memset(env->slb, 0, sizeof(env->slb));
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+ target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+ /*
+ * Only restore valid entries
+ */
+ if (rb & SLB_ESID_V) {
+ ppc_store_slb(env, rb, rs);
+ }
}
#endif
@@ -1789,6 +1802,20 @@ static int kvm_ppc_register_host_cpu_type(void)
return 0;
}
+int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
+{
+ struct kvm_rtas_token_args args = {
+ .token = token,
+ };
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
+ return -ENOENT;
+ }
+
+ strncpy(args.name, function, sizeof(args.name));
+
+ return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
+}
int kvmppc_get_htab_fd(bool write)
{