diff options
Diffstat (limited to 'dma.h')
-rw-r--r-- | dma.h | 218 |
1 files changed, 215 insertions, 3 deletions
@@ -13,7 +13,9 @@ #include <stdio.h> #include "hw/hw.h" #include "block.h" +#include "kvm.h" +typedef struct DMAContext DMAContext; typedef struct ScatterGatherEntry ScatterGatherEntry; typedef enum { @@ -26,19 +28,229 @@ struct QEMUSGList { int nsg; int nalloc; size_t size; + DMAContext *dma; }; #if defined(TARGET_PHYS_ADDR_BITS) -typedef target_phys_addr_t dma_addr_t; -#define DMA_ADDR_FMT TARGET_FMT_plx +/* + * When an IOMMU is present, bus addresses become distinct from + * CPU/memory physical addresses and may be a different size. Because + * the IOVA size depends more on the bus than on the platform, we more + * or less have to treat these as 64-bit always to cover all (or at + * least most) cases. + */ +typedef uint64_t dma_addr_t; + +#define DMA_ADDR_BITS 64 +#define DMA_ADDR_FMT "%" PRIx64 + +typedef int DMATranslateFunc(DMAContext *dma, + dma_addr_t addr, + target_phys_addr_t *paddr, + target_phys_addr_t *len, + DMADirection dir); +typedef void* DMAMapFunc(DMAContext *dma, + dma_addr_t addr, + dma_addr_t *len, + DMADirection dir); +typedef void DMAUnmapFunc(DMAContext *dma, + void *buffer, + dma_addr_t len, + DMADirection dir, + dma_addr_t access_len); + +struct DMAContext { + DMATranslateFunc *translate; + DMAMapFunc *map; + DMAUnmapFunc *unmap; +}; + +static inline void dma_barrier(DMAContext *dma, DMADirection dir) +{ + /* + * This is called before DMA read and write operations + * unless the _relaxed form is used and is responsible + * for providing some sane ordering of accesses vs + * concurrently running VCPUs. + * + * Users of map(), unmap() or lower level st/ld_* + * operations are responsible for providing their own + * ordering via barriers. + * + * This primitive implementation does a simple smp_mb() + * before each operation which provides pretty much full + * ordering. + * + * A smarter implementation can be devised if needed to + * use lighter barriers based on the direction of the + * transfer, the DMA context, etc... + */ + if (kvm_enabled()) { + smp_mb(); + } +} + +static inline bool dma_has_iommu(DMAContext *dma) +{ + return !!dma; +} + +/* Checks that the given range of addresses is valid for DMA. This is + * useful for certain cases, but usually you should just use + * dma_memory_{read,write}() and check for errors */ +bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, + DMADirection dir); +static inline bool dma_memory_valid(DMAContext *dma, + dma_addr_t addr, dma_addr_t len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + return true; + } else { + return iommu_dma_memory_valid(dma, addr, len, dir); + } +} + +int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, DMADirection dir); +static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + /* Fast-path for no IOMMU */ + cpu_physical_memory_rw(addr, buf, len, + dir == DMA_DIRECTION_FROM_DEVICE); + return 0; + } else { + return iommu_dma_memory_rw(dma, addr, buf, len, dir); + } +} + +static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, + DMA_DIRECTION_FROM_DEVICE); +} + +static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) +{ + dma_barrier(dma, dir); + + return dma_memory_rw_relaxed(dma, addr, buf, len, dir); +} + +static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + return dma_memory_rw(dma, addr, (void *)buf, len, + DMA_DIRECTION_FROM_DEVICE); +} + +int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, + dma_addr_t len); + +int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); + +void *iommu_dma_memory_map(DMAContext *dma, + dma_addr_t addr, dma_addr_t *len, + DMADirection dir); +static inline void *dma_memory_map(DMAContext *dma, + dma_addr_t addr, dma_addr_t *len, + DMADirection dir) +{ + if (!dma_has_iommu(dma)) { + target_phys_addr_t xlen = *len; + void *p; + + p = cpu_physical_memory_map(addr, &xlen, + dir == DMA_DIRECTION_FROM_DEVICE); + *len = xlen; + return p; + } else { + return iommu_dma_memory_map(dma, addr, len, dir); + } +} + +void iommu_dma_memory_unmap(DMAContext *dma, + void *buffer, dma_addr_t len, + DMADirection dir, dma_addr_t access_len); +static inline void dma_memory_unmap(DMAContext *dma, + void *buffer, dma_addr_t len, + DMADirection dir, dma_addr_t access_len) +{ + if (!dma_has_iommu(dma)) { + cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len, + dir == DMA_DIRECTION_FROM_DEVICE, + access_len); + } else { + iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); + } +} + +#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ + static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ + dma_addr_t addr) \ + { \ + uint##_bits##_t val; \ + dma_memory_read(dma, addr, &val, (_bits) / 8); \ + return _end##_bits##_to_cpu(val); \ + } \ + static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ + dma_addr_t addr, \ + uint##_bits##_t val) \ + { \ + val = cpu_to_##_end##_bits(val); \ + dma_memory_write(dma, addr, &val, (_bits) / 8); \ + } + +static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) +{ + uint8_t val; + + dma_memory_read(dma, addr, &val, 1); + return val; +} + +static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) +{ + dma_memory_write(dma, addr, &val, 1); +} + +DEFINE_LDST_DMA(uw, w, 16, le); +DEFINE_LDST_DMA(l, l, 32, le); +DEFINE_LDST_DMA(q, q, 64, le); +DEFINE_LDST_DMA(uw, w, 16, be); +DEFINE_LDST_DMA(l, l, 32, be); +DEFINE_LDST_DMA(q, q, 64, be); + +#undef DEFINE_LDST_DMA + +void dma_context_init(DMAContext *dma, DMATranslateFunc translate, + DMAMapFunc map, DMAUnmapFunc unmap); struct ScatterGatherEntry { dma_addr_t base; dma_addr_t len; }; -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint); +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); void qemu_sglist_destroy(QEMUSGList *qsg); #endif |