summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-09-20 14:04:08 +0200
committerChristoph Hellwig <hch@lst.de>2018-10-01 07:28:03 -0700
commitb4ebe6063204da58e48600b810a97c29ae9e5d12 (patch)
tree5bf1cf53eeaa87f1bf0d89e8f55cf27d720599b9 /kernel/dma
parent7d21ee4c719f00896767ce19c4c01a56374c2ced (diff)
downloadlinux-b4ebe6063204da58e48600b810a97c29ae9e5d12.tar.bz2
dma-direct: implement complete bus_dma_mask handling
Instead of rejecting devices with a too small bus_dma_mask we can handle by taking the bus dma_mask into account for allocations and bounce buffering decisions. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/direct.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index e78548397a92..60c433b880e0 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
return false;
}
- if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+ if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
dev_err(dev,
- "%s: overflow %pad+%zu of device mask %llx\n",
- caller, &dma_addr, size, *dev->dma_mask);
+ "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
+ caller, &dma_addr, size,
+ *dev->dma_mask, dev->bus_dma_mask);
}
return false;
}
@@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
+ if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
+ max_dma = dev->bus_dma_mask;
+
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask)
{
+ if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
+ dma_mask = dev->bus_dma_mask;
+
if (force_dma_unencrypted())
*phys_mask = __dma_to_phys(dev, dma_mask);
else
@@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
return phys_to_dma_direct(dev, phys) + size - 1 <=
- dev->coherent_dma_mask;
+ min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
}
void *dma_direct_alloc_pages(struct device *dev, size_t size,
@@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask)
if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
return 0;
#endif
- /*
- * Upstream PCI/PCIe bridges or SoC interconnects may not carry
- * as many DMA address bits as the device itself supports.
- */
- if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
- return 0;
return 1;
}