From 217a4acb211d603f33199cf94ada9fce3ac419b5 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 19 Mar 2015 15:04:50 +0100 Subject: s390x/mmu: Use access type definitions instead of magic values Since there are now proper definitions for the MMU access type, let's use them in the s390x MMU code, too, instead of the hard-to-understand magic values. Signed-off-by: Thomas Huth Reviewed-by: Jens Freimann Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck --- target-s390x/helper.c | 2 +- target-s390x/mmu_helper.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'target-s390x') diff --git a/target-s390x/helper.c b/target-s390x/helper.c index f1060c2bce..041c9c7429 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -162,7 +162,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) vaddr &= 0x7fffffff; } - mmu_translate(env, vaddr, 2, asc, &raddr, &prot, false); + mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false); return raddr; } diff --git a/target-s390x/mmu_helper.c b/target-s390x/mmu_helper.c index b061c85aff..9b88498b39 100644 --- a/target-s390x/mmu_helper.c +++ b/target-s390x/mmu_helper.c @@ -68,7 +68,7 @@ static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr, { uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | 4 | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46; DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec); @@ -85,7 +85,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, int ilen = ILEN_LATER; uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46; DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits); @@ -94,7 +94,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, } /* Code accesses have an undefined ilc. */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { ilen = 2; } @@ -288,7 +288,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw, exc); - if ((rw == 1) && !(*flags & PAGE_WRITE)) { + if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) { trigger_prot_fault(env, vaddr, asc, rw, exc); return -1; } @@ -338,7 +338,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, * Instruction: Primary * Data: Secondary */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1], raddr, flags, rw, exc); *flags &= ~(PAGE_READ | PAGE_WRITE); -- cgit v1.2.3 From f07177a5599fb204e42a007db4820ceda1bc85ba Mon Sep 17 00:00:00 2001 From: Ekaterina Tumanova Date: Tue, 3 Mar 2015 18:35:27 +0100 Subject: s390x/kvm: Put vm name, extended name and UUID into STSI322 SYSIB KVM prefills the SYSIB, returned by STSI 3.2.2. This patch allows userspace to intercept execution, and fill in the values, that are known to qemu: machine name (8 chars), extended machine name (256 chars), extended machine name encoding (equals 2 for UTF-8) and UUID. STSI322 qemu handler also finds a highest virtualization level in level-3 virtualization stack that doesn't support Extended Names (Ext Name delimiter) and propagates zero Ext Name to all levels below, because this level is not capable of managing Extended Names of lower levels. Signed-off-by: Ekaterina Tumanova Reviewed-by: Christian Borntraeger Reviewed-by: Thomas Huth Signed-off-by: Cornelia Huck --- target-s390x/cpu.h | 8 ++++-- target-s390x/kvm.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) (limited to 'target-s390x') diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 8135dda318..79bc80b586 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -865,9 +865,13 @@ struct sysib_322 { uint8_t name[8]; uint32_t caf; uint8_t cpi[16]; - uint8_t res3[24]; + uint8_t res5[3]; + uint8_t ext_name_encoding; + uint32_t res3; + uint8_t uuid[16]; } vm[8]; - uint8_t res4[3552]; + uint8_t res4[1504]; + uint8_t ext_names[8][256]; }; /* MMU defines */ diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index b48c643b36..619684b9ee 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -44,6 +44,7 @@ #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/ipl.h" +#include "hw/s390x/ebcdic.h" /* #define DEBUG_KVM */ @@ -255,6 +256,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); + kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); return 0; } @@ -1723,6 +1725,72 @@ static int handle_tsch(S390CPU *cpu) return ret; } +static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr) +{ + struct sysib_322 sysib; + int del; + + if (s390_cpu_virt_mem_read(cpu, addr, &sysib, sizeof(sysib))) { + return; + } + /* Shift the stack of Extended Names to prepare for our own data */ + memmove(&sysib.ext_names[1], &sysib.ext_names[0], + sizeof(sysib.ext_names[0]) * (sysib.count - 1)); + /* First virt level, that doesn't provide Ext Names delimits stack. It is + * assumed it's not capable of managing Extended Names for lower levels. + */ + for (del = 1; del < sysib.count; del++) { + if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { + break; + } + } + if (del < sysib.count) { + memset(sysib.ext_names[del], 0, + sizeof(sysib.ext_names[0]) * (sysib.count - del)); + } + /* Insert short machine name in EBCDIC, padded with blanks */ + if (qemu_name) { + memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); + ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), + strlen(qemu_name))); + } + sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ + memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); + /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's + * considered by s390 as not capable of providing any Extended Name. + * Therefore if no name was specified on qemu invocation, we go with the + * same "KVMguest" default, which KVM has filled into short name field. + */ + if (qemu_name) { + strncpy((char *)sysib.ext_names[0], qemu_name, + sizeof(sysib.ext_names[0])); + } else { + strcpy((char *)sysib.ext_names[0], "KVMguest"); + } + /* Insert UUID */ + memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid)); + + s390_cpu_virt_mem_write(cpu, addr, &sysib, sizeof(sysib)); +} + +static int handle_stsi(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + struct kvm_run *run = cs->kvm_run; + + switch (run->s390_stsi.fc) { + case 3: + if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { + return 0; + } + /* Only sysib 3.2.2 needs post-handling for now. */ + insert_stsi_3_2_2(cpu, run->s390_stsi.addr); + return 0; + default: + return 0; + } +} + static int kvm_arch_handle_debug_exit(S390CPU *cpu) { CPUState *cs = CPU(cpu); @@ -1772,6 +1840,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) case KVM_EXIT_S390_TSCH: ret = handle_tsch(cpu); break; + case KVM_EXIT_S390_STSI: + ret = handle_stsi(cpu); + break; case KVM_EXIT_DEBUG: ret = kvm_arch_handle_debug_exit(cpu); break; -- cgit v1.2.3 From a9bcd1b8719dea2e91512238d810e2a0037e174d Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Fri, 6 Feb 2015 15:54:58 +0100 Subject: s390x/mmu: Use ioctl for reading and writing from/to guest memory Add code to make use of the new ioctl for reading from / writing to virtual guest memory. By using the ioctl, the memory accesses are now protected with the so-called ipte-lock in the kernel. [CH: moved error message into kvm_s390_mem_op()] Signed-off-by: Thomas Huth Acked-by: Christian Borntraeger Signed-off-by: Cornelia Huck --- target-s390x/cpu.h | 7 +++++++ target-s390x/kvm.c | 40 ++++++++++++++++++++++++++++++++++++++++ target-s390x/mmu_helper.c | 7 +++++++ 3 files changed, 54 insertions(+) (limited to 'target-s390x') diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 79bc80b586..9c4274325a 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -401,6 +401,8 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq); int kvm_s390_inject_flic(struct kvm_s390_irq *irq); void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len, + bool is_write); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock); #else @@ -418,6 +420,11 @@ static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) { return -ENOSYS; } +static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, + int len, bool is_write) +{ + return -ENOSYS; +} static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) { diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 619684b9ee..1c0e78c64b 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -123,6 +123,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static int cap_sync_regs; static int cap_async_pf; +static int cap_mem_op; static void *legacy_s390_alloc(size_t size, uint64_t *align); @@ -247,6 +248,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) { cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); + cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); kvm_s390_enable_cmma(s); @@ -550,6 +552,44 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); } +/** + * kvm_s390_mem_op: + * @addr: the logical start address in guest memory + * @hostbuf: buffer in host memory. NULL = do only checks w/o copying + * @len: length that should be transfered + * @is_write: true = write, false = read + * Returns: 0 on success, non-zero if an exception or error occured + * + * Use KVM ioctl to read/write from/to guest memory. An access exception + * is injected into the vCPU in case of translation errors. + */ +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len, + bool is_write) +{ + struct kvm_s390_mem_op mem_op = { + .gaddr = addr, + .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, + .size = len, + .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE + : KVM_S390_MEMOP_LOGICAL_READ, + .buf = (uint64_t)hostbuf, + }; + int ret; + + if (!cap_mem_op) { + return -ENOSYS; + } + if (!hostbuf) { + mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); + if (ret < 0) { + error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret)); + } + return ret; +} + /* * Legacy layout for s390: * Older S390 KVM requires the topmost vma of the RAM to be diff --git a/target-s390x/mmu_helper.c b/target-s390x/mmu_helper.c index 9b88498b39..cd2cb51629 100644 --- a/target-s390x/mmu_helper.c +++ b/target-s390x/mmu_helper.c @@ -450,6 +450,13 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, target_ulong *pages; int ret; + if (kvm_enabled()) { + ret = kvm_s390_mem_op(cpu, laddr, hostbuf, len, is_write); + if (ret >= 0) { + return ret; + } + } + nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS) + 1; pages = g_malloc(nr_pages * sizeof(*pages)); -- cgit v1.2.3 From 6cb1e49de58cab8f243b05a971a9a1f80ab3223d Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Thu, 5 Mar 2015 12:36:48 +0300 Subject: s390x/kvm: Support access register mode for KVM_S390_MEM_OP ioctl Access register mode is one of the modes that control dynamic address translation. In this mode the address space is specified by values of the access registers. The effective address-space-control element is obtained from the result of the access register translation. See the "Access-Register Introduction" section of the chapter 5 "Program Execution" in "Principles of Operations" for more details. When the CPU is in AR mode, the s390_cpu_virt_mem_rw() function must know which access register number to use for address translation. This patch does several things: - add new parameter 'uint8_t ar' to that function - decode ar number from intercepted instructions - pass the ar number to s390_cpu_virt_mem_rw(), which in turn passes it to the KVM_S390_MEM_OP ioctl. Signed-off-by: Alexander Yarygin Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Signed-off-by: Cornelia Huck --- target-s390x/cpu.h | 30 +++++++++++++++++------------- target-s390x/ioinst.c | 42 +++++++++++++++++++++++++----------------- target-s390x/kvm.c | 46 ++++++++++++++++++++++++++++++---------------- target-s390x/mmu_helper.c | 5 +++-- 4 files changed, 75 insertions(+), 48 deletions(-) (limited to 'target-s390x') diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 9c4274325a..ba7d250962 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -356,7 +356,8 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, #ifndef CONFIG_USER_ONLY void do_restart_interrupt(CPUS390XState *env); -static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) +static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, + uint8_t *ar) { hwaddr addr = 0; uint8_t reg; @@ -366,6 +367,9 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) addr = env->regs[reg]; } addr += (ipb >> 16) & 0xfff; + if (ar) { + *ar = reg; + } return addr; } @@ -401,8 +405,8 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq); int kvm_s390_inject_flic(struct kvm_s390_irq *irq); void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); -int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len, - bool is_write); +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock); #else @@ -420,8 +424,8 @@ static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) { return -ENOSYS; } -static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, - int len, bool is_write) +static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, + void *hostbuf, int len, bool is_write) { return -ENOSYS; } @@ -963,15 +967,15 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr); -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int len, - bool is_write); +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, + int len, bool is_write); -#define s390_cpu_virt_mem_read(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, false) -#define s390_cpu_virt_mem_write(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, true) -#define s390_cpu_virt_mem_check_write(cpu, laddr, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, NULL, len, true) +#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) +#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) +#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL diff --git a/target-s390x/ioinst.c b/target-s390x/ioinst.c index b00a00ca2b..e220cea8ab 100644 --- a/target-s390x/ioinst.c +++ b/target-s390x/ioinst.c @@ -149,13 +149,14 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &schib, sizeof(schib))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || @@ -215,13 +216,14 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &orig_orb, sizeof(orb))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { return; } copy_orb_from_guest(&orb, &orig_orb); @@ -258,8 +260,9 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) uint64_t addr; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -268,7 +271,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) cc = css_do_stcrw(&crw); /* 0 - crw stored, 1 - zeroes stored */ - if (s390_cpu_virt_mem_write(cpu, addr, &crw, sizeof(crw)) == 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { setcc(cpu, cc); } else if (cc == 0) { /* Write failed: requeue CRW since STCRW is a suppressing instruction */ @@ -284,8 +287,9 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int cc; SCHIB schib; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -297,7 +301,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ - if (!s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib))) { + if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { program_interrupt(env, PGM_OPERAND, 2); } return; @@ -322,12 +326,13 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } } if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &schib, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, + sizeof(schib)) != 0) { return; } } else { /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { return; } } @@ -342,13 +347,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) IRB irb; uint64_t addr; int cc, irb_len; + uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -362,14 +368,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &irb, irb_len) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, irb_len) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { return -EFAULT; } } @@ -645,7 +651,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) * present CHSC sub-handlers ... if we ever need more, we should take * care of req->len here first. */ - if (s390_cpu_virt_mem_read(cpu, addr, buf, sizeof(ChscReq))) { + if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { return; } req = (ChscReq *)buf; @@ -677,7 +683,8 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) break; } - if (!s390_cpu_virt_mem_write(cpu, addr + len, res, be16_to_cpu(res->len))) { + if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, + be16_to_cpu(res->len))) { setcc(cpu, 0); /* Command execution complete */ } } @@ -690,9 +697,10 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) IOIntCode int_code; hwaddr len; int ret; + uint8_t ar; trace_ioinst("tpi"); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -702,7 +710,7 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) len = lowcore ? 8 /* two words */ : 12 /* three words */; ret = css_do_tpi(&int_code, lowcore); if (ret == 1) { - s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, &int_code, len); + s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len); } return ret; } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 1c0e78c64b..8e65e43f02 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -555,6 +555,7 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) /** * kvm_s390_mem_op: * @addr: the logical start address in guest memory + * @ar: the access register number * @hostbuf: buffer in host memory. NULL = do only checks w/o copying * @len: length that should be transfered * @is_write: true = write, false = read @@ -563,8 +564,8 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) * Use KVM ioctl to read/write from/to guest memory. An access exception * is injected into the vCPU in case of translation errors. */ -int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len, - bool is_write) +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write) { struct kvm_s390_mem_op mem_op = { .gaddr = addr, @@ -573,6 +574,7 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len, .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE : KVM_S390_MEMOP_LOGICAL_READ, .buf = (uint64_t)hostbuf, + .ar = ar, }; int ret; @@ -1017,7 +1019,8 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) return rc; } -static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t x2 = (run->s390_sieic.ipa & 0x000f); @@ -1028,12 +1031,16 @@ static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (x2 ? env->regs[x2] : 0) + (long)(int)disp2; } -static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t base2 = run->s390_sieic.ipb >> 28; @@ -1043,6 +1050,9 @@ static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; } @@ -1074,11 +1084,12 @@ static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return stpcifc_service_call(cpu, r1, fiba); + return stpcifc_service_call(cpu, r1, fiba, ar); } static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) @@ -1100,22 +1111,24 @@ static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint8_t r3 = run->s390_sieic.ipa & 0x000f; uint64_t gaddr; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - gaddr = get_base_disp_rsy(cpu, run); + gaddr = get_base_disp_rsy(cpu, run, &ar); - return pcistb_service_call(cpu, r1, r3, gaddr); + return pcistb_service_call(cpu, r1, r3, gaddr, ar); } static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return mpcifc_service_call(cpu, r1, fiba); + return mpcifc_service_call(cpu, r1, fiba, ar); } static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) @@ -1244,7 +1257,7 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) * For any diagnose call we support, bits 48-63 of the resulting * address specify the function code; the remainder is ignored. */ - func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; + func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; switch (func_code) { case DIAG_IPL: kvm_handle_diag_308(cpu, run); @@ -1591,7 +1604,8 @@ static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) cpu_synchronize_state(CPU(cpu)); /* get order code */ - order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; + order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL) + & SIGP_ORDER_MASK; status_reg = &env->regs[r1]; param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; @@ -1765,12 +1779,12 @@ static int handle_tsch(S390CPU *cpu) return ret; } -static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr) +static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) { struct sysib_322 sysib; int del; - if (s390_cpu_virt_mem_read(cpu, addr, &sysib, sizeof(sysib))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { return; } /* Shift the stack of Extended Names to prepare for our own data */ @@ -1810,7 +1824,7 @@ static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr) /* Insert UUID */ memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid)); - s390_cpu_virt_mem_write(cpu, addr, &sysib, sizeof(sysib)); + s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); } static int handle_stsi(S390CPU *cpu) @@ -1824,7 +1838,7 @@ static int handle_stsi(S390CPU *cpu) return 0; } /* Only sysib 3.2.2 needs post-handling for now. */ - insert_stsi_3_2_2(cpu, run->s390_stsi.addr); + insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); return 0; default: return 0; diff --git a/target-s390x/mmu_helper.c b/target-s390x/mmu_helper.c index cd2cb51629..30a38ecb3c 100644 --- a/target-s390x/mmu_helper.c +++ b/target-s390x/mmu_helper.c @@ -435,6 +435,7 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, /** * s390_cpu_virt_mem_rw: * @laddr: the logical start address + * @ar: the access register number * @hostbuf: buffer in host memory. NULL = do only checks w/o copying * @len: length that should be transfered * @is_write: true = write, false = read @@ -443,7 +444,7 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, * Copy from/to guest memory using logical addresses. Note that we inject a * program interrupt in case there is an error while accessing the memory. */ -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { int currlen, nr_pages, i; @@ -451,7 +452,7 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int ret; if (kvm_enabled()) { - ret = kvm_s390_mem_op(cpu, laddr, hostbuf, len, is_write); + ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write); if (ret >= 0) { return ret; } -- cgit v1.2.3