diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-08-27 16:44:20 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-08-27 16:44:20 +0100 |
commit | 19b599f7664b2ebfd0f405fb79c14dd241557452 (patch) | |
tree | b393f7e0dba1d81af703c27cf747b704eb37a3a4 | |
parent | e1e388900d84f978c35c66125b546576bf7e4019 (diff) | |
parent | 4e4abd111a2af0179a4467368d695958844bf113 (diff) |
Merge remote-tracking branch 'remotes/armbru/tags/pull-error-2018-08-27-v2' into staging
Error reporting patches for 2018-08-27
# gpg: Signature made Mon 27 Aug 2018 14:18:15 BST
# gpg: using RSA key 3870B400EB918653
# gpg: Good signature from "Markus Armbruster <armbru@redhat.com>"
# gpg: aka "Markus Armbruster <armbru@pond.sub.org>"
# Primary key fingerprint: 354B C8B3 D7EB 2A6B 6867 4E5F 3870 B400 EB91 8653
* remotes/armbru/tags/pull-error-2018-08-27-v2:
intel-iommu: replace more vtd_err_* traces
intel-iommu: start to use error_report_once
qemu-error: introduce {error|warn}_report_once
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | hw/i386/intel_iommu.c | 129 | ||||
-rw-r--r-- | hw/i386/trace-events | 13 | ||||
-rw-r--r-- | include/qemu/error-report.h | 32 |
3 files changed, 115 insertions, 59 deletions
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 0a8cd4e9cc..3dfada19a6 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -311,14 +311,14 @@ static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) { if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || pre_fsts & VTD_FSTS_IQE) { - trace_vtd_err("There are previous interrupt conditions " - "to be serviced by software, fault event " - "is not generated."); + error_report_once("There are previous interrupt conditions " + "to be serviced by software, fault event " + "is not generated"); return; } vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { - trace_vtd_err("Interrupt Mask set, irq is not generated."); + error_report_once("Interrupt Mask set, irq is not generated"); } else { vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); @@ -426,20 +426,20 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, trace_vtd_dmar_fault(source_id, fault, addr, is_write); if (fsts_reg & VTD_FSTS_PFO) { - trace_vtd_err("New fault is not recorded due to " - "Primary Fault Overflow."); + error_report_once("New fault is not recorded due to " + "Primary Fault Overflow"); return; } if (vtd_try_collapse_fault(s, source_id)) { - trace_vtd_err("New fault is not recorded due to " - "compression of faults."); + error_report_once("New fault is not recorded due to " + "compression of faults"); return; } if (vtd_is_frcd_set(s, s->next_frcd_reg)) { - trace_vtd_err("Next Fault Recording Reg is used, " - "new fault is not recorded, set PFO field."); + error_report_once("Next Fault Recording Reg is used, " + "new fault is not recorded, set PFO field"); vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); return; } @@ -447,8 +447,8 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); if (fsts_reg & VTD_FSTS_PPF) { - trace_vtd_err("There are pending faults already, " - "fault event is not generated."); + error_report_once("There are pending faults already, " + "fault event is not generated"); vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); s->next_frcd_reg++; if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { @@ -705,7 +705,8 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, uint64_t access_right_check; if (!vtd_iova_range_check(iova, ce, aw_bits)) { - trace_vtd_err_dmar_iova_overflow(iova); + error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", + __func__, iova); return -VTD_FR_ADDR_BEYOND_MGAW; } @@ -717,7 +718,8 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, slpte = vtd_get_slpte(addr, offset); if (slpte == (uint64_t)-1) { - trace_vtd_err_dmar_slpte_read_error(iova, level); + error_report_once("%s: detected read error on DMAR slpte " + "(iova=0x%" PRIx64 ")", __func__, iova); if (level == vtd_ce_get_level(ce)) { /* Invalid programming of context-entry */ return -VTD_FR_CONTEXT_ENTRY_INV; @@ -728,11 +730,17 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, *reads = (*reads) && (slpte & VTD_SL_R); *writes = (*writes) && (slpte & VTD_SL_W); if (!(slpte & access_right_check)) { - trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write); + error_report_once("%s: detected slpte permission error " + "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " + "slpte=0x%" PRIx64 ", write=%d)", __func__, + iova, level, slpte, is_write); return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; } if (vtd_slpte_nonzero_rsvd(slpte, level)) { - trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte); + error_report_once("%s: detected splte reserve non-zero " + "iova=0x%" PRIx64 ", level=0x%" PRIx32 + "slpte=0x%" PRIx64 ")", __func__, iova, + level, slpte); return -VTD_FR_PAGING_ENTRY_RSVD; } @@ -1056,8 +1064,10 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, * we just skip the sync for this time. After all we even * don't have the root table pointer! */ - trace_vtd_err("Detected invalid context entry when " - "trying to sync shadow page table"); + error_report_once("%s: invalid context entry for bus 0x%x" + " devfn 0x%x", + __func__, pci_bus_num(vtd_as->bus), + vtd_as->devfn); return 0; } } @@ -1514,7 +1524,8 @@ static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) break; default: - trace_vtd_err("Context cache invalidate type error."); + error_report_once("%s: invalid context: 0x%" PRIx64, + __func__, val); caig = 0; } return caig; @@ -1634,7 +1645,8 @@ static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) am = VTD_IVA_AM(addr); addr = VTD_IVA_ADDR(addr); if (am > VTD_MAMV) { - trace_vtd_err("IOTLB PSI flush: address mask overflow."); + error_report_once("%s: address mask overflow: 0x%" PRIx64, + __func__, vtd_get_quad_raw(s, DMAR_IVA_REG)); iaig = 0; break; } @@ -1643,7 +1655,8 @@ static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) break; default: - trace_vtd_err("IOTLB flush: invalid granularity."); + error_report_once("%s: invalid granularity: 0x%" PRIx64, + __func__, val); iaig = 0; } return iaig; @@ -1692,7 +1705,10 @@ static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) /* Ok - report back to driver */ vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); } else { - trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type); + error_report_once("%s: detected improper state when disable QI " + "(head=0x%x, tail=0x%x, last_type=%d)", + __func__, + s->iq_head, s->iq_tail, s->iq_last_desc_type); } } } @@ -1793,8 +1809,8 @@ static void vtd_handle_ccmd_write(IntelIOMMUState *s) /* Context-cache invalidation request */ if (val & VTD_CCMD_ICC) { if (s->qi_enabled) { - trace_vtd_err("Queued Invalidation enabled, " - "should not use register-based invalidation"); + error_report_once("Queued Invalidation enabled, " + "should not use register-based invalidation"); return; } ret = vtd_context_cache_invalidate(s, val); @@ -1814,8 +1830,8 @@ static void vtd_handle_iotlb_write(IntelIOMMUState *s) /* IOTLB invalidation request */ if (val & VTD_TLB_IVT) { if (s->qi_enabled) { - trace_vtd_err("Queued Invalidation enabled, " - "should not use register-based invalidation."); + error_report_once("Queued Invalidation enabled, " + "should not use register-based invalidation"); return; } ret = vtd_iotlb_flush(s, val); @@ -1833,7 +1849,7 @@ static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset, dma_addr_t addr = base_addr + offset * sizeof(*inv_desc); if (dma_memory_read(&address_space_memory, addr, inv_desc, sizeof(*inv_desc))) { - trace_vtd_err("Read INV DESC failed."); + error_report_once("Read INV DESC failed"); inv_desc->lo = 0; inv_desc->hi = 0; return false; @@ -2089,7 +2105,9 @@ static void vtd_fetch_inv_desc(IntelIOMMUState *s) if (s->iq_tail >= s->iq_size) { /* Detects an invalid Tail pointer */ - trace_vtd_err_qi_tail(s->iq_tail, s->iq_size); + error_report_once("%s: detected invalid QI tail " + "(tail=0x%x, size=0x%x)", + __func__, s->iq_tail, s->iq_size); vtd_handle_inv_queue_error(s); return; } @@ -2188,7 +2206,8 @@ static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) trace_vtd_reg_read(addr, size); if (addr + size > DMAR_REG_SIZE) { - trace_vtd_err("Read MMIO over range."); + error_report_once("%s: MMIO over range: addr=0x%" PRIx64 + " size=0x%u", __func__, addr, size); return (uint64_t)-1; } @@ -2239,7 +2258,8 @@ static void vtd_mem_write(void *opaque, hwaddr addr, trace_vtd_reg_write(addr, size, val); if (addr + size > DMAR_REG_SIZE) { - trace_vtd_err("Write MMIO over range."); + error_report_once("%s: MMIO over range: addr=0x%" PRIx64 + " size=0x%u", __func__, addr, size); return; } @@ -2500,10 +2520,12 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, iotlb.iova, iotlb.translated_addr, iotlb.addr_mask); } else { - trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus), - VTD_PCI_SLOT(vtd_as->devfn), - VTD_PCI_FUNC(vtd_as->devfn), - iotlb.iova); + error_report_once("%s: detected translation failure " + "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")", + __func__, pci_bus_num(vtd_as->bus), + VTD_PCI_SLOT(vtd_as->devfn), + VTD_PCI_FUNC(vtd_as->devfn), + iotlb.iova); } return iotlb; @@ -2610,7 +2632,8 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, addr = iommu->intr_root + index * sizeof(*entry); if (dma_memory_read(&address_space_memory, addr, entry, sizeof(*entry))) { - trace_vtd_err("Memory read failed for IRTE."); + error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, + __func__, index, addr); return -VTD_FR_IR_ROOT_INVAL; } @@ -2618,15 +2641,19 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, le64_to_cpu(entry->data[0])); if (!entry->irte.present) { - trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + error_report_once("%s: detected non-present IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); return -VTD_FR_IR_ENTRY_P; } if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || entry->irte.__reserved_2) { - trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + error_report_once("%s: detected non-zero reserved IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); return -VTD_FR_IR_IRTE_RSVD; } @@ -2640,7 +2667,9 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, case VTD_SVT_ALL: mask = vtd_svt_mask[entry->irte.sid_q]; if ((source_id & mask) != (sid & mask)) { - trace_vtd_err_irte_sid(index, sid, source_id); + error_report_once("%s: invalid IRTE SID " + "(index=%u, sid=%u, source_id=%u)", + __func__, index, sid, source_id); return -VTD_FR_IR_SID_ERR; } break; @@ -2650,13 +2679,17 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, bus_min = source_id & 0xff; bus = sid >> 8; if (bus > bus_max || bus < bus_min) { - trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max); + error_report_once("%s: invalid SVT_BUS " + "(index=%u, bus=%u, min=%u, max=%u)", + __func__, index, bus, bus_min, bus_max); return -VTD_FR_IR_SID_ERR; } break; default: - trace_vtd_err_irte_svt(index, entry->irte.sid_vtype); + error_report_once("%s: detected invalid IRTE SVT " + "(index=%u, type=%d)", __func__, + index, entry->irte.sid_vtype); /* Take this as verification failure. */ return -VTD_FR_IR_SID_ERR; break; @@ -2742,14 +2775,15 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, } if (origin->address & VTD_MSI_ADDR_HI_MASK) { - trace_vtd_err("MSI address high 32 bits non-zero when " - "Interrupt Remapping enabled."); + error_report_once("%s: MSI address high 32 bits non-zero detected: " + "address=0x%" PRIx64, __func__, origin->address); return -VTD_FR_IR_REQ_RSVD; } addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; if (addr.addr.__head != 0xfee) { - trace_vtd_err("MSI addr low 32 bit invalid."); + error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32, + __func__, addr.data); return -VTD_FR_IR_REQ_RSVD; } @@ -2777,7 +2811,10 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, if (addr.addr.sub_valid) { trace_vtd_ir_remap_type("MSI"); if (origin->data & VTD_IR_MSI_DATA_RESERVED) { - trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data); + error_report_once("%s: invalid IR MSI " + "(sid=%u, address=0x%" PRIx64 + ", data=0x%" PRIx32 ")", + __func__, sid, origin->address, origin->data); return -VTD_FR_IR_REQ_RSVD; } } else { diff --git a/hw/i386/trace-events b/hw/i386/trace-events index e14d06ec83..9e6fc4dca9 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -69,20 +69,7 @@ vtd_ir_remap_msi_req(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PR vtd_fsts_ppf(bool set) "FSTS PPF bit set to %d" vtd_fsts_clear_ip(void) "" vtd_frr_new(int index, uint64_t hi, uint64_t lo) "index %d high 0x%"PRIx64" low 0x%"PRIx64 -vtd_err(const char *str) "%s" -vtd_err_dmar_iova_overflow(uint64_t iova) "iova 0x%"PRIx64 -vtd_err_dmar_slpte_read_error(uint64_t iova, int level) "iova 0x%"PRIx64" level %d" -vtd_err_dmar_slpte_perm_error(uint64_t iova, int level, uint64_t slpte, bool is_write) "iova 0x%"PRIx64" level %d slpte 0x%"PRIx64" write %d" -vtd_err_dmar_slpte_resv_error(uint64_t iova, int level, uint64_t slpte) "iova 0x%"PRIx64" level %d slpte 0x%"PRIx64 -vtd_err_dmar_translate(uint8_t bus, uint8_t slot, uint8_t func, uint64_t iova) "dev %02x:%02x.%02x iova 0x%"PRIx64 vtd_warn_invalid_qi_tail(uint16_t tail) "tail 0x%"PRIx16 -vtd_err_qi_disable(uint16_t head, uint16_t tail, int type) "head 0x%"PRIx16" tail 0x%"PRIx16" last_desc_type %d" -vtd_err_qi_tail(uint16_t tail, uint16_t size) "tail 0x%"PRIx16" size 0x%"PRIx16 -vtd_err_irte(int index, uint64_t lo, uint64_t hi) "index %d low 0x%"PRIx64" high 0x%"PRIx64 -vtd_err_irte_sid(int index, uint16_t req, uint16_t target) "index %d SVT_ALL sid 0x%"PRIx16" (should be: 0x%"PRIx16")" -vtd_err_irte_sid_bus(int index, uint8_t bus, uint8_t min, uint8_t max) "index %d SVT_BUS bus 0x%"PRIx8" (should be: 0x%"PRIx8"-0x%"PRIx8")" -vtd_err_irte_svt(int index, int type) "index %d SVT type %d" -vtd_err_ir_msi_invalid(uint16_t sid, uint64_t addr, uint64_t data) "sid 0x%"PRIx16" addr 0x%"PRIx64" data 0x%"PRIx64 vtd_warn_ir_vector(uint16_t sid, int index, int vec, int target) "sid 0x%"PRIx16" index %d vec %d (should be: %d)" vtd_warn_ir_trigger(uint16_t sid, int index, int trig, int target) "sid 0x%"PRIx16" index %d trigger %d (should be: %d)" diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h index e1c8ae1a52..72fab2b031 100644 --- a/include/qemu/error-report.h +++ b/include/qemu/error-report.h @@ -44,6 +44,38 @@ void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void warn_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void info_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +/* + * Similar to error_report(), except it prints the message just once. + * Return true when it prints, false otherwise. + */ +#define error_report_once(fmt, ...) \ + ({ \ + static bool print_once_; \ + bool ret_print_once_ = !print_once_; \ + \ + if (!print_once_) { \ + print_once_ = true; \ + error_report(fmt, ##__VA_ARGS__); \ + } \ + unlikely(ret_print_once_); \ + }) + +/* + * Similar to warn_report(), except it prints the message just once. + * Return true when it prints, false otherwise. + */ +#define warn_report_once(fmt, ...) \ + ({ \ + static bool print_once_; \ + bool ret_print_once_ = !print_once_; \ + \ + if (!print_once_) { \ + print_once_ = true; \ + warn_report(fmt, ##__VA_ARGS__); \ + } \ + unlikely(ret_print_once_); \ + }) + const char *error_get_progname(void); extern bool enable_timestamp_msg; |