aboutsummaryrefslogtreecommitdiff
path: root/hw/s390x
diff options
context:
space:
mode:
authorFrank Blaschka <frank.blaschka@de.ibm.com>2015-01-09 09:04:39 +0100
committerCornelia Huck <cornelia.huck@de.ibm.com>2015-01-12 10:14:04 +0100
commit863f6f52b7134304234ea81554f62f54afb016fa (patch)
tree9fd4f86a28998fdd56c4e75ca0719935dc2eb97a /hw/s390x
parent8cba80c3a0331926c9c412c4c1e07896de29aab6 (diff)
s390: implement pci instructions
This patch implements the s390 pci instructions in qemu. It allows to access and drive pci devices attached to the s390 pci bus. Because of platform constrains devices using IO BARs are not supported. Also a device has to support MSI/MSI-X to run on s390. Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Diffstat (limited to 'hw/s390x')
-rw-r--r--hw/s390x/Makefile.objs2
-rw-r--r--hw/s390x/s390-pci-inst.c811
-rw-r--r--hw/s390x/s390-pci-inst.h288
3 files changed, 1100 insertions, 1 deletions
diff --git a/hw/s390x/Makefile.objs b/hw/s390x/Makefile.objs
index 428d957d54..27cd75a932 100644
--- a/hw/s390x/Makefile.objs
+++ b/hw/s390x/Makefile.objs
@@ -8,4 +8,4 @@ obj-y += ipl.o
obj-y += css.o
obj-y += s390-virtio-ccw.o
obj-y += virtio-ccw.o
-obj-y += s390-pci-bus.o
+obj-y += s390-pci-bus.o s390-pci-inst.o
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
new file mode 100644
index 0000000000..5ea13e5d79
--- /dev/null
+++ b/hw/s390x/s390-pci-inst.c
@@ -0,0 +1,811 @@
+/*
+ * s390 PCI instructions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "s390-pci-inst.h"
+#include "s390-pci-bus.h"
+#include <exec/memory-internal.h>
+#include <qemu/error-report.h>
+
+/* #define DEBUG_S390PCI_INST */
+#ifdef DEBUG_S390PCI_INST
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static void s390_set_status_code(CPUS390XState *env,
+ uint8_t r, uint64_t status_code)
+{
+ env->regs[r] &= ~0xff000000ULL;
+ env->regs[r] |= (status_code & 0xff) << 24;
+}
+
+static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
+{
+ S390PCIBusDevice *pbdev;
+ uint32_t res_code, initial_l2, g_l2, finish;
+ int rc, idx;
+ uint64_t resume_token;
+
+ rc = 0;
+ if (lduw_p(&rrb->request.hdr.len) != 32) {
+ res_code = CLP_RC_LEN;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
+ res_code = CLP_RC_FMT;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
+ ldq_p(&rrb->request.reserved1) != 0 ||
+ ldq_p(&rrb->request.reserved2) != 0) {
+ res_code = CLP_RC_RESNOT0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ resume_token = ldq_p(&rrb->request.resume_token);
+
+ if (resume_token) {
+ pbdev = s390_pci_find_dev_by_idx(resume_token);
+ if (!pbdev) {
+ res_code = CLP_RC_LISTPCI_BADRT;
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (lduw_p(&rrb->response.hdr.len) < 48) {
+ res_code = CLP_RC_8K;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ initial_l2 = lduw_p(&rrb->response.hdr.len);
+ if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
+ != 0) {
+ res_code = CLP_RC_LEN;
+ rc = -EINVAL;
+ *cc = 3;
+ goto out;
+ }
+
+ stl_p(&rrb->response.fmt, 0);
+ stq_p(&rrb->response.reserved1, 0);
+ stq_p(&rrb->response.reserved2, 0);
+ stl_p(&rrb->response.mdd, FH_VIRT);
+ stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
+ rrb->response.entry_size = sizeof(ClpFhListEntry);
+ finish = 0;
+ idx = resume_token;
+ g_l2 = LIST_PCI_HDR_LEN;
+ do {
+ pbdev = s390_pci_find_dev_by_idx(idx);
+ if (!pbdev) {
+ finish = 1;
+ break;
+ }
+ stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
+ pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
+ stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
+ pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
+ stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000);
+ stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
+ stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
+
+ g_l2 += sizeof(ClpFhListEntry);
+ /* Add endian check for DPRINTF? */
+ DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
+ g_l2,
+ lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
+ lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
+ ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
+ ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
+ idx++;
+ } while (g_l2 < initial_l2);
+
+ if (finish == 1) {
+ resume_token = 0;
+ } else {
+ resume_token = idx;
+ }
+ stq_p(&rrb->response.resume_token, resume_token);
+ stw_p(&rrb->response.hdr.len, g_l2);
+ stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
+out:
+ if (rc) {
+ DPRINTF("list pci failed rc 0x%x\n", rc);
+ stw_p(&rrb->response.hdr.rsp, res_code);
+ }
+ return rc;
+}
+
+int clp_service_call(S390CPU *cpu, uint8_t r2)
+{
+ ClpReqHdr *reqh;
+ ClpRspHdr *resh;
+ S390PCIBusDevice *pbdev;
+ uint32_t req_len;
+ uint32_t res_len;
+ uint8_t buffer[4096 * 2];
+ uint8_t cc = 0;
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ cpu_physical_memory_read(env->regs[r2], buffer, sizeof(*reqh));
+ reqh = (ClpReqHdr *)buffer;
+ req_len = lduw_p(&reqh->len);
+ if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ cpu_physical_memory_read(env->regs[r2], buffer, req_len + sizeof(*resh));
+ resh = (ClpRspHdr *)(buffer + req_len);
+ res_len = lduw_p(&resh->len);
+ if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ if ((req_len + res_len) > 8192) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ cpu_physical_memory_read(env->regs[r2], buffer, req_len + res_len);
+
+ if (req_len != 32) {
+ stw_p(&resh->rsp, CLP_RC_LEN);
+ goto out;
+ }
+
+ switch (lduw_p(&reqh->cmd)) {
+ case CLP_LIST_PCI: {
+ ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
+ list_pci(rrb, &cc);
+ break;
+ }
+ case CLP_SET_PCI_FN: {
+ ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
+ ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
+
+ pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
+ if (!pbdev) {
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ goto out;
+ }
+
+ switch (reqsetpci->oc) {
+ case CLP_SET_ENABLE_PCI_FN:
+ pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET;
+ stl_p(&ressetpci->fh, pbdev->fh);
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ break;
+ case CLP_SET_DISABLE_PCI_FN:
+ pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET);
+ pbdev->error_state = false;
+ pbdev->lgstg_blocked = false;
+ stl_p(&ressetpci->fh, pbdev->fh);
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ break;
+ default:
+ DPRINTF("unknown set pci command\n");
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
+ break;
+ }
+ break;
+ }
+ case CLP_QUERY_PCI_FN: {
+ ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
+ ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
+
+ pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
+ if (!pbdev) {
+ DPRINTF("query pci no pci dev\n");
+ stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ goto out;
+ }
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ uint32_t data = pci_get_long(pbdev->pdev->config +
+ PCI_BASE_ADDRESS_0 + (i * 4));
+
+ stl_p(&resquery->bar[i], data);
+ resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
+ ctz64(pbdev->pdev->io_regions[i].size) : 0;
+ DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
+ ldl_p(&resquery->bar[i]),
+ pbdev->pdev->io_regions[i].size,
+ resquery->bar_size[i]);
+ }
+
+ stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
+ stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
+ stw_p(&resquery->pchid, 0);
+ stw_p(&resquery->ug, 1);
+ stl_p(&resquery->uid, pbdev->fid);
+ stw_p(&resquery->hdr.rsp, CLP_RC_OK);
+ break;
+ }
+ case CLP_QUERY_PCI_FNGRP: {
+ ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
+ resgrp->fr = 1;
+ stq_p(&resgrp->dasm, 0);
+ stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
+ stw_p(&resgrp->mui, 0);
+ stw_p(&resgrp->i, 128);
+ resgrp->version = 0;
+
+ stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
+ break;
+ }
+ default:
+ DPRINTF("unknown clp command\n");
+ stw_p(&resh->rsp, CLP_RC_CMD);
+ break;
+ }
+
+out:
+ cpu_physical_memory_write(env->regs[r2], buffer, req_len + res_len);
+ setcc(cpu, cc);
+ return 0;
+}
+
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ S390PCIBusDevice *pbdev;
+ uint64_t offset;
+ uint64_t data;
+ uint8_t len;
+ uint32_t fh;
+ uint8_t pcias;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ return 0;
+ }
+
+ fh = env->regs[r2] >> 32;
+ pcias = (env->regs[r2] >> 16) & 0xf;
+ len = env->regs[r2] & 0xf;
+ offset = env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcilg no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ if (pcias < 6) {
+ if ((8 - (offset & 0x7)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
+ io_mem_read(mr, offset, &data, len);
+ } else if (pcias == 15) {
+ if ((4 - (offset & 0x3)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ data = pci_host_config_read_common(
+ pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
+
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = bswap16(data);
+ break;
+ case 4:
+ data = bswap32(data);
+ break;
+ case 8:
+ data = bswap64(data);
+ break;
+ default:
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ } else {
+ DPRINTF("invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ env->regs[r1] = data;
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
+ uint64_t *data, uint8_t len)
+{
+ uint32_t val;
+ uint8_t *msg_data;
+
+ if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
+ return;
+ }
+
+ if (len != 4) {
+ DPRINTF("access msix table msg data but len is %d\n", len);
+ return;
+ }
+
+ msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+ val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
+ pci_set_long(msg_data, val);
+ DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
+}
+
+static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
+{
+ if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
+ offset >= pbdev->msix.table_offset &&
+ offset <= pbdev->msix.table_offset +
+ (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ uint64_t offset, data;
+ S390PCIBusDevice *pbdev;
+ uint8_t len;
+ uint32_t fh;
+ uint8_t pcias;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ return 0;
+ }
+
+ fh = env->regs[r2] >> 32;
+ pcias = (env->regs[r2] >> 16) & 0xf;
+ len = env->regs[r2] & 0xf;
+ offset = env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcistg no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ data = env->regs[r1];
+ if (pcias < 6) {
+ if ((8 - (offset & 0x7)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ MemoryRegion *mr;
+ if (trap_msix(pbdev, offset, pcias)) {
+ offset = offset - pbdev->msix.table_offset;
+ mr = &pbdev->pdev->msix_table_mmio;
+ update_msix_table_msg_data(pbdev, offset, &data, len);
+ } else {
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ }
+
+ io_mem_write(mr, offset, data, len);
+ } else if (pcias == 15) {
+ if ((4 - (offset & 0x3)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = bswap16(data);
+ break;
+ case 4:
+ data = bswap32(data);
+ break;
+ case 8:
+ data = bswap64(data);
+ break;
+ default:
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ pci_host_config_write_common(pbdev->pdev, offset,
+ pci_config_size(pbdev->pdev),
+ data, len);
+ } else {
+ DPRINTF("pcistg invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t fh;
+ S390PCIBusDevice *pbdev;
+ ram_addr_t size;
+ IOMMUTLBEntry entry;
+ MemoryRegion *mr;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ goto out;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ goto out;
+ }
+
+ fh = env->regs[r1] >> 32;
+ size = env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+
+ if (!pbdev) {
+ DPRINTF("rpcit no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ goto out;
+ }
+
+ mr = pci_device_iommu_address_space(pbdev->pdev)->root;
+ entry = mr->iommu_ops->translate(mr, env->regs[r2], 0);
+
+ if (!entry.translated_addr) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ goto out;
+ }
+
+ entry.addr_mask = size - 1;
+ memory_region_notify_iommu(mr, entry);
+ setcc(cpu, ZPCI_PCI_LS_OK);
+out:
+ return 0;
+}
+
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
+{
+ CPUS390XState *env = &cpu->env;
+ S390PCIBusDevice *pbdev;
+ MemoryRegion *mr;
+ int i;
+ uint64_t val;
+ uint32_t fh;
+ uint8_t pcias;
+ uint8_t len;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ fh = env->regs[r1] >> 32;
+ pcias = (env->regs[r1] >> 16) & 0xf;
+ len = env->regs[r1] & 0xff;
+
+ if (pcias > 5) {
+ DPRINTF("pcistb invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ switch (len) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
+ program_interrupt(env, PGM_ADDRESSING, 6);
+ return 0;
+ }
+
+ for (i = 0; i < len / 8; i++) {
+ val = ldq_phys(&address_space_memory, gaddr + i * 8);
+ io_mem_write(mr, env->regs[r3] + i * 8, val, 8);
+ }
+
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
+{
+ int ret;
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
+ FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
+ &pbdev->routes.adapter.adapter_id);
+ assert(ret == 0);
+
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ ldq_p(&fib.aisb), true);
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ ldq_p(&fib.aibv), true);
+
+ pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
+ pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
+ pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
+ pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
+ pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
+ pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
+ pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
+
+ DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
+ return 0;
+}
+
+static int dereg_irqs(S390PCIBusDevice *pbdev)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ pbdev->routes.adapter.ind_addr, false);
+
+ pbdev->routes.adapter.summary_addr = 0;
+ pbdev->routes.adapter.summary_offset = 0;
+ pbdev->routes.adapter.ind_addr = 0;
+ pbdev->routes.adapter.ind_offset = 0;
+ pbdev->isc = 0;
+ pbdev->noi = 0;
+ pbdev->sum = 0;
+
+ DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
+ return 0;
+}
+
+static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
+{
+ uint64_t pba = ldq_p(&fib.pba);
+ uint64_t pal = ldq_p(&fib.pal);
+ uint64_t g_iota = ldq_p(&fib.iota);
+ uint8_t dt = (g_iota >> 2) & 0x7;
+ uint8_t t = (g_iota >> 11) & 0x1;
+
+ if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
+ program_interrupt(env, PGM_OPERAND, 6);
+ return -EINVAL;
+ }
+
+ /* currently we only support designation type 1 with translation */
+ if (!(dt == ZPCI_IOTA_RTTO && t)) {
+ error_report("unsupported ioat dt %d t %d", dt, t);
+ program_interrupt(env, PGM_OPERAND, 6);
+ return -EINVAL;
+ }
+
+ pbdev->pba = pba;
+ pbdev->pal = pal;
+ pbdev->g_iota = g_iota;
+ return 0;
+}
+
+static void dereg_ioat(S390PCIBusDevice *pbdev)
+{
+ pbdev->pba = 0;
+ pbdev->pal = 0;
+ pbdev->g_iota = 0;
+}
+
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
+{
+ CPUS390XState *env = &cpu->env;
+ uint8_t oc;
+ uint32_t fh;
+ ZpciFib fib;
+ S390PCIBusDevice *pbdev;
+ uint64_t cc = ZPCI_PCI_LS_OK;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ oc = env->regs[r1] & 0xff;
+ fh = env->regs[r1] >> 32;
+
+ if (fiba & 0x7) {
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ cpu_physical_memory_read(fiba, (uint8_t *)&fib, sizeof(fib));
+
+ switch (oc) {
+ case ZPCI_MOD_FC_REG_INT:
+ if (reg_irqs(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_DEREG_INT:
+ dereg_irqs(pbdev);
+ break;
+ case ZPCI_MOD_FC_REG_IOAT:
+ if (reg_ioat(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_DEREG_IOAT:
+ dereg_ioat(pbdev);
+ break;
+ case ZPCI_MOD_FC_REREG_IOAT:
+ dereg_ioat(pbdev);
+ if (reg_ioat(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_RESET_ERROR:
+ pbdev->error_state = false;
+ pbdev->lgstg_blocked = false;
+ break;
+ case ZPCI_MOD_FC_RESET_BLOCK:
+ pbdev->lgstg_blocked = false;
+ break;
+ case ZPCI_MOD_FC_SET_MEASURE:
+ pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
+ break;
+ default:
+ program_interrupt(&cpu->env, PGM_OPERAND, 6);
+ cc = ZPCI_PCI_LS_ERR;
+ }
+
+ setcc(cpu, cc);
+ return 0;
+}
+
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t fh;
+ ZpciFib fib;
+ S390PCIBusDevice *pbdev;
+ uint32_t data;
+ uint64_t cc = ZPCI_PCI_LS_OK;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ fh = env->regs[r1] >> 32;
+
+ if (fiba & 0x7) {
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ memset(&fib, 0, sizeof(fib));
+ stq_p(&fib.pba, pbdev->pba);
+ stq_p(&fib.pal, pbdev->pal);
+ stq_p(&fib.iota, pbdev->g_iota);
+ stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
+ stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
+ stq_p(&fib.fmb_addr, pbdev->fmb_addr);
+
+ data = (pbdev->isc << 28) | (pbdev->noi << 16) |
+ (pbdev->routes.adapter.ind_offset << 8) | (pbdev->sum << 7) |
+ pbdev->routes.adapter.summary_offset;
+ stw_p(&fib.data, data);
+
+ if (pbdev->fh >> ENABLE_BIT_OFFSET) {
+ fib.fc |= 0x80;
+ }
+
+ if (pbdev->error_state) {
+ fib.fc |= 0x40;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ fib.fc |= 0x20;
+ }
+
+ if (pbdev->g_iota) {
+ fib.fc |= 0x10;
+ }
+
+ cpu_physical_memory_write(fiba, (uint8_t *)&fib, sizeof(fib));
+ setcc(cpu, cc);
+ return 0;
+}
diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h
new file mode 100644
index 0000000000..7e6c804737
--- /dev/null
+++ b/hw/s390x/s390-pci-inst.h
@@ -0,0 +1,288 @@
+/*
+ * s390 PCI instruction definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_INST_H
+#define HW_S390_PCI_INST_H
+
+#include <sysemu/dma.h>
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE 4096
+#define PCI_BAR_COUNT 6
+#define PCI_MAX_FUNCTIONS 4096
+
+typedef struct ClpReqHdr {
+ uint16_t len;
+ uint16_t cmd;
+} QEMU_PACKED ClpReqHdr;
+
+typedef struct ClpRspHdr {
+ uint16_t len;
+ uint16_t rsp;
+} QEMU_PACKED ClpRspHdr;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+typedef struct ClpFhListEntry {
+ uint16_t device_id;
+ uint16_t vendor_id;
+#define CLP_FHLIST_MASK_CONFIG 0x80000000
+ uint32_t config;
+ uint32_t fid;
+ uint32_t fh;
+} QEMU_PACKED ClpFhListEntry;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(ClpFhListEntry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+
+#define CLP_MASK_FMT 0xf0000000
+
+/* List PCI functions request */
+typedef struct ClpReqListPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint64_t reserved2;
+} QEMU_PACKED ClpReqListPci;
+
+/* List PCI functions response */
+typedef struct ClpRspListPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint32_t mdd;
+ uint16_t max_fn;
+ uint8_t reserved2;
+ uint8_t entry_size;
+ ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} QEMU_PACKED ClpRspListPci;
+
+/* Query PCI function request */
+typedef struct ClpReqQueryPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPci;
+
+/* Query PCI function response */
+typedef struct ClpRspQueryPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint16_t vfn; /* virtual fn number */
+#define CLP_RSP_QPCI_MASK_UTIL 0x100
+#define CLP_RSP_QPCI_MASK_PFGID 0xff
+ uint16_t ug;
+ uint32_t fid; /* pci function id */
+ uint8_t bar_size[PCI_BAR_COUNT];
+ uint16_t pchid;
+ uint32_t bar[PCI_BAR_COUNT];
+ uint64_t reserved2;
+ uint64_t sdma; /* start dma as */
+ uint64_t edma; /* end dma as */
+ uint32_t reserved3[11];
+ uint32_t uid;
+ uint8_t util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} QEMU_PACKED ClpRspQueryPci;
+
+/* Query PCI function group request */
+typedef struct ClpReqQueryPciGrp {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_REQ_QPCIG_MASK_PFGID 0xff
+ uint32_t g;
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPciGrp;
+
+/* Query PCI function group response */
+typedef struct ClpRspQueryPciGrp {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_RSP_QPCIG_MASK_NOI 0xfff
+ uint16_t i;
+ uint8_t version;
+#define CLP_RSP_QPCIG_MASK_FRAME 0x2
+#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
+ uint8_t fr;
+ uint16_t reserved2;
+ uint16_t mui;
+ uint64_t reserved3;
+ uint64_t dasm; /* dma address space mask */
+ uint64_t msia; /* MSI address */
+ uint64_t reserved4;
+ uint64_t reserved5;
+} QEMU_PACKED ClpRspQueryPciGrp;
+
+/* Set PCI function request */
+typedef struct ClpReqSetPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint16_t reserved2;
+ uint8_t oc; /* operation controls */
+ uint8_t ndas; /* number of dma spaces */
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqSetPci;
+
+/* Set PCI function response */
+typedef struct ClpRspSetPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpRspSetPci;
+
+typedef struct ClpReqRspListPci {
+ ClpReqListPci request;
+ ClpRspListPci response;
+} QEMU_PACKED ClpReqRspListPci;
+
+typedef struct ClpReqRspSetPci {
+ ClpReqSetPci request;
+ ClpRspSetPci response;
+} QEMU_PACKED ClpReqRspSetPci;
+
+typedef struct ClpReqRspQueryPci {
+ ClpReqQueryPci request;
+ ClpRspQueryPci response;
+} QEMU_PACKED ClpReqRspQueryPci;
+
+typedef struct ClpReqRspQueryPciGrp {
+ ClpReqQueryPciGrp request;
+ ClpRspQueryPciGrp response;
+} QEMU_PACKED ClpReqRspQueryPciGrp;
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+typedef struct ZpciFib {
+ uint8_t fmt; /* format */
+ uint8_t reserved1[7];
+ uint8_t fc; /* function controls */
+ uint8_t reserved2;
+ uint16_t reserved3;
+ uint32_t reserved4;
+ uint64_t pba; /* PCI base address */
+ uint64_t pal; /* PCI address limit */
+ uint64_t iota; /* I/O Translation Anchor */
+#define FIB_DATA_ISC(x) (((x) >> 28) & 0x7)
+#define FIB_DATA_NOI(x) (((x) >> 16) & 0xfff)
+#define FIB_DATA_AIBVO(x) (((x) >> 8) & 0x3f)
+#define FIB_DATA_SUM(x) (((x) >> 7) & 0x1)
+#define FIB_DATA_AISBO(x) ((x) & 0x3f)
+ uint32_t data;
+ uint32_t reserved5;
+ uint64_t aibv; /* Adapter int bit vector address */
+ uint64_t aisb; /* Adapter int summary bit address */
+ uint64_t fmb_addr; /* Function measurement address and key */
+ uint32_t reserved6;
+ uint32_t gd;
+} QEMU_PACKED ZpciFib;
+
+int clp_service_call(S390CPU *cpu, uint8_t r2);
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
+
+#endif