From 54ed812162148ec7819a33b4fe112b8dc9d1cd00 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:33:39 +0200 Subject: firmware/ivc: use dma_mapping_error DMA_ERROR_CODE is not supposed to be used by drivers. Signed-off-by: Christoph Hellwig Acked-by: Thierry Reding --- drivers/firmware/tegra/ivc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c index 29ecfd815320..a01461d63f68 100644 --- a/drivers/firmware/tegra/ivc.c +++ b/drivers/firmware/tegra/ivc.c @@ -646,12 +646,12 @@ int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, if (peer) { ivc->rx.phys = dma_map_single(peer, rx, queue_size, DMA_BIDIRECTIONAL); - if (ivc->rx.phys == DMA_ERROR_CODE) + if (dma_mapping_error(peer, ivc->rx.phys)) return -ENOMEM; ivc->tx.phys = dma_map_single(peer, tx, queue_size, DMA_BIDIRECTIONAL); - if (ivc->tx.phys == DMA_ERROR_CODE) { + if (dma_mapping_error(peer, ivc->tx.phys)) { dma_unmap_single(peer, ivc->rx.phys, queue_size, DMA_BIDIRECTIONAL); return -ENOMEM; -- cgit v1.2.3 From d43732ce021f4244a4c53a45d57ad0c13c30ed94 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:45:58 +0200 Subject: ibmveth: properly unwind on init errors That way the driver doesn't have to rely on DMA_ERROR_CODE, which is not a public API and going away. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 159 +++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72ab7b6bf20b..3ac27f59e595 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) } } -static void ibmveth_cleanup(struct ibmveth_adapter *adapter) -{ - int i; - struct device *dev = &adapter->vdev->dev; - - if (adapter->buffer_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { - dma_unmap_single(dev, adapter->buffer_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->buffer_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->buffer_list_addr); - adapter->buffer_list_addr = NULL; - } - - if (adapter->filter_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->filter_list_dma)) { - dma_unmap_single(dev, adapter->filter_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->filter_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->filter_list_addr); - adapter->filter_list_addr = NULL; - } - - if (adapter->rx_queue.queue_addr != NULL) { - dma_free_coherent(dev, adapter->rx_queue.queue_len, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_dma); - adapter->rx_queue.queue_addr = NULL; - } - - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (adapter->rx_buff_pool[i].active) - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - - if (adapter->bounce_buffer != NULL) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } -} - static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address) { @@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; + rc = -ENOMEM; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->buffer_list_addr) { + netdev_err(netdev, "unable to allocate list pages\n"); + goto out; + } - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { - netdev_err(netdev, "unable to allocate filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter pages\n"); + goto out_free_buffer_list; } dev = &adapter->vdev->dev; @@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_addr = dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); - if (!adapter->rx_queue.queue_addr) { - rc = -ENOMEM; - goto err_out; - } + if (!adapter->rx_queue.queue_addr) + goto out_free_filter_list; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->buffer_list_dma)) { + netdev_err(netdev, "unable to map buffer list pages\n"); + goto out_free_queue_mem; + } + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma))) { - netdev_err(netdev, "unable to map filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + if (dma_mapping_error(dev, adapter->filter_list_dma)) { + netdev_err(netdev, "unable to map filter list pages\n"); + goto out_unmap_buffer_list; } adapter->rx_queue.index = 0; @@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); rc = -ENONET; - goto err_out; + goto out_unmap_filter_list; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { @@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; - goto err_out; + goto out_free_buffer_pools; } } @@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev) lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); - goto err_out; + goto out_free_buffer_pools; } + rc = -ENOMEM; adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) { - rc = -ENOMEM; - goto err_out_free_irq; - } + if (!adapter->bounce_buffer) + goto out_free_irq; + adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); - rc = -ENOMEM; - goto err_out_free_irq; + goto out_free_bounce_buffer; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev) return 0; -err_out_free_irq: +out_free_bounce_buffer: + kfree(adapter->bounce_buffer); +out_free_irq: free_irq(netdev->irq, netdev); -err_out: - ibmveth_cleanup(adapter); +out_free_buffer_pools: + while (--i >= 0) { + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + } +out_unmap_filter_list: + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_unmap_buffer_list: + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_free_queue_mem: + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); +out_free_filter_list: + free_page((unsigned long)adapter->filter_list_addr); +out_free_buffer_list: + free_page((unsigned long)adapter->buffer_list_addr); +out: napi_disable(&adapter->napi); return rc; } @@ -698,7 +670,9 @@ err_out: static int ibmveth_close(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; long lpar_rc; + int i; netdev_dbg(netdev, "close starting\n"); @@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_update_rx_no_buffer(adapter); - ibmveth_cleanup(adapter); + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->buffer_list_addr); + + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->filter_list_addr); + + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + kfree(adapter->bounce_buffer); netdev_dbg(netdev, "close complete\n"); @@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); - - adapter->buffer_list_dma = DMA_ERROR_CODE; - adapter->filter_list_dma = DMA_ERROR_CODE; - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - netdev_dbg(netdev, "registering netdev...\n"); ibmveth_set_features(netdev, netdev->features); -- cgit v1.2.3 From e4734b3f5ffc42d821a316383222d71dce7d5c9e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:54:31 +0200 Subject: dmaengine: ioat: don't use DMA_ERROR_CODE DMA_ERROR_CODE is not a public API and will go away. Instead properly unwind based on the loop counter. Signed-off-by: Christoph Hellwig Acked-by: Dave Jiang Acked-By: Vinod Koul --- drivers/dma/ioat/init.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 6ad4384b3fa8..ed8ed1192775 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -839,8 +839,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) goto free_resources; } - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_srcs[i] = DMA_ERROR_CODE; for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); @@ -910,8 +908,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) xor_val_result = 1; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); @@ -965,8 +961,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) op = IOAT_OP_XOR_VAL; xor_val_result = 0; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); @@ -1017,18 +1011,14 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) goto free_resources; dma_unmap: if (op == IOAT_OP_XOR) { - if (dest_dma != DMA_ERROR_CODE) - dma_unmap_page(dev, dest_dma, PAGE_SIZE, - DMA_FROM_DEVICE); - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); + while (--i >= 0) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); } else if (op == IOAT_OP_XOR_VAL) { - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); + while (--i >= 0) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); } free_resources: dma->device_free_chan_resources(dma_chan); -- cgit v1.2.3 From e0c7a510aea75db4927d810fb0a84233641b53c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:40:52 +0200 Subject: drm/exynos: don't use DMA_ERROR_CODE DMA_ERROR_CODE already isn't a valid API to user for drivers and will go away soon. exynos_drm_fb_dma_addr uses it a an error return when the passed in index is invalid, but the callers never check for it but instead pass the address straight to the hardware. Add a WARN_ON instead and just return 0. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index c77a5aced81a..d48fd7c918f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -181,8 +181,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - if (index >= MAX_FB_BUFFER) - return DMA_ERROR_CODE; + if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) + return 0; return exynos_fb->dma_addr[index]; } -- cgit v1.2.3 From b400585720820077704c84813930a3e255f87a23 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:46:22 +0200 Subject: drm/armada: don't abuse DMA_ERROR_CODE dev_addr isn't even a dma_addr_t, and DMA_ERROR_CODE has never been a valid driver API. Add a bool mapped flag instead. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/armada/armada_fb.c | 2 +- drivers/gpu/drm/armada/armada_gem.c | 5 ++--- drivers/gpu/drm/armada/armada_gem.h | 1 + 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index 2a7eb6817c36..92e6b08ea64a 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, } /* Framebuffer objects must have a valid device address for scanout */ - if (obj->dev_addr == DMA_ERROR_CODE) { + if (!obj->mapped) { ret = -EINVAL; goto err_unref; } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index d6c2a5d190eb..a76ca21d063b 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) obj->phys_addr = obj->linear->start; obj->dev_addr = obj->linear->start; + obj->mapped = true; } DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, @@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size) return NULL; drm_gem_private_object_init(dev, &obj->obj, size); - obj->dev_addr = DMA_ERROR_CODE; DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); @@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, return NULL; } - obj->dev_addr = DMA_ERROR_CODE; - mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); @@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj) return -EINVAL; } dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); + dobj->mapped = true; return 0; } diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h index b88d2b9853c7..6e524e0676bb 100644 --- a/drivers/gpu/drm/armada/armada_gem.h +++ b/drivers/gpu/drm/armada/armada_gem.h @@ -16,6 +16,7 @@ struct armada_gem_object { void *addr; phys_addr_t phys_addr; resource_size_t dev_addr; + bool mapped; struct drm_mm_node *linear; /* for linear backed */ struct page *page; /* for page backed */ struct sg_table *sgt; /* for imported */ -- cgit v1.2.3 From 81a5a316756b23cd0264627366ba32b337b8fa24 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:55:30 +0200 Subject: iommu/dma: don't rely on DMA_ERROR_CODE DMA_ERROR_CODE is not a public API and will go away soon. dma dma-iommu driver already implements a proper ->mapping_error method, so it's only using the value internally. Add a new local define using the value that arm64 which is the only current user of dma-iommu. Signed-off-by: Christoph Hellwig --- drivers/iommu/dma-iommu.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 62618e77bedc..9403336f1fa6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -31,6 +31,8 @@ #include #include +#define IOMMU_MAPPING_ERROR 0 + struct iommu_dma_msi_page { struct list_head list; dma_addr_t iova; @@ -500,7 +502,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, { __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); - *handle = DMA_ERROR_CODE; + *handle = IOMMU_MAPPING_ERROR; } /** @@ -533,7 +535,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, dma_addr_t iova; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; - *handle = DMA_ERROR_CODE; + *handle = IOMMU_MAPPING_ERROR; min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { @@ -627,11 +629,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; if (iommu_map(domain, iova, phys - iova_off, size, prot)) { iommu_dma_free_iova(cookie, iova, size); - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; } return iova + iova_off; } @@ -671,7 +673,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, s->offset += s_iova_off; s->length = s_length; - sg_dma_address(s) = DMA_ERROR_CODE; + sg_dma_address(s) = IOMMU_MAPPING_ERROR; sg_dma_len(s) = 0; /* @@ -714,11 +716,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int i; for_each_sg(sg, s, nents, i) { - if (sg_dma_address(s) != DMA_ERROR_CODE) + if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) s->offset += sg_dma_address(s); if (sg_dma_len(s)) s->length = sg_dma_len(s); - sg_dma_address(s) = DMA_ERROR_CODE; + sg_dma_address(s) = IOMMU_MAPPING_ERROR; sg_dma_len(s) = 0; } } @@ -836,7 +838,7 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return dma_addr == DMA_ERROR_CODE; + return dma_addr == IOMMU_MAPPING_ERROR; } static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, -- cgit v1.2.3 From dceb1a6819ab2c8b5564354543447b1af4fccedd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 13:15:13 +0200 Subject: xen-swiotlb: consolidate xen_swiotlb_dma_ops ARM and x86 had duplicated versions of the dma_ops structure, the only difference is that x86 hasn't wired up the set_dma_mask, mmap, and get_sgtable ops yet. On x86 all of them are identical to the generic version, so they aren't needed but harmless. All the symbols used only for xen_swiotlb_dma_ops can now be marked static as well. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk --- arch/arm/xen/mm.c | 17 -------- arch/x86/xen/pci-swiotlb-xen.c | 14 ------- drivers/xen/swiotlb-xen.c | 93 ++++++++++++++++++++++-------------------- include/xen/swiotlb-xen.h | 62 +--------------------------- 4 files changed, 49 insertions(+), 137 deletions(-) diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index f0325d96b97a..785d2a562a23 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); const struct dma_map_ops *xen_dma_ops; EXPORT_SYMBOL(xen_dma_ops); -static const struct dma_map_ops xen_swiotlb_dma_ops = { - .alloc = xen_swiotlb_alloc_coherent, - .free = xen_swiotlb_free_coherent, - .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, - .sync_single_for_device = xen_swiotlb_sync_single_for_device, - .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, - .map_sg = xen_swiotlb_map_sg_attrs, - .unmap_sg = xen_swiotlb_unmap_sg_attrs, - .map_page = xen_swiotlb_map_page, - .unmap_page = xen_swiotlb_unmap_page, - .dma_supported = xen_swiotlb_dma_supported, - .set_dma_mask = xen_swiotlb_set_dma_mask, - .mmap = xen_swiotlb_dma_mmap, - .get_sgtable = xen_swiotlb_get_sgtable, -}; - int __init xen_mm_init(void) { struct gnttab_cache_flush cflush; diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 42b08f8fc2ca..37c6056a7bba 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -18,20 +18,6 @@ int xen_swiotlb __read_mostly; -static const struct dma_map_ops xen_swiotlb_dma_ops = { - .alloc = xen_swiotlb_alloc_coherent, - .free = xen_swiotlb_free_coherent, - .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, - .sync_single_for_device = xen_swiotlb_sync_single_for_device, - .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, - .map_sg = xen_swiotlb_map_sg_attrs, - .unmap_sg = xen_swiotlb_unmap_sg_attrs, - .map_page = xen_swiotlb_map_page, - .unmap_page = xen_swiotlb_unmap_page, - .dma_supported = xen_swiotlb_dma_supported, -}; - /* * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 8dab0d3dc172..a0f006daab48 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -295,7 +295,8 @@ error: free_pages((unsigned long)xen_io_tlb_start, order); return rc; } -void * + +static void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) @@ -346,9 +347,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, memset(ret, 0, size); return ret; } -EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); -void +static void xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr, unsigned long attrs) { @@ -369,8 +369,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); } -EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); - /* * Map a single buffer of the indicated size for DMA in streaming mode. The @@ -379,7 +377,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); * Once the device is given the dma address, the device owns this memory until * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. */ -dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, +static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -429,7 +427,6 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, return DMA_ERROR_CODE; } -EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -467,13 +464,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, dma_mark_clean(phys_to_virt(paddr), size); } -void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, +static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { xen_unmap_single(hwdev, dev_addr, size, dir, attrs); } -EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation @@ -516,7 +512,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } -EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); void xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, @@ -524,7 +519,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } -EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); + +/* + * Unmap a set of streaming mode DMA translations. Again, cpu read rules + * concerning calls here are the same as for swiotlb_unmap_page() above. + */ +static void +xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + BUG_ON(dir == DMA_NONE); + + for_each_sg(sgl, sg, nelems, i) + xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); + +} /* * Map a set of buffers described by scatterlist in streaming mode for DMA. @@ -542,7 +555,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); * Device ownership issues as mentioned above for xen_swiotlb_map_page are the * same here. */ -int +static int xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) @@ -599,27 +612,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, } return nelems; } -EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); - -/* - * Unmap a set of streaming mode DMA translations. Again, cpu read rules - * concerning calls here are the same as for swiotlb_unmap_page() above. - */ -void -xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - BUG_ON(dir == DMA_NONE); - - for_each_sg(sgl, sg, nelems, i) - xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); - -} -EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); /* * Make physical memory consistent for a set of streaming mode DMA translations @@ -641,21 +633,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sg), dir, target); } -void +static void xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } -EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); -void +static void xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } -EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); /* * Return whether the given device DMA address mask can be supported @@ -663,14 +653,13 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ -int +static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; } -EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); -int +static int xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) @@ -680,14 +669,13 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) return 0; } -EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); /* * Create userspace mapping for the DMA-coherent memory. * This function should be called with the pages from the current domain only, * passing pages mapped from other domains would lead to memory corruption. */ -int +static int xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) @@ -699,13 +687,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, #endif return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); } -EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap); /* * This function should be called with the pages from the current domain only, * passing pages mapped from other domains would lead to memory corruption. */ -int +static int xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) @@ -727,4 +714,20 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, #endif return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); } -EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable); + +const struct dma_map_ops xen_swiotlb_dma_ops = { + .alloc = xen_swiotlb_alloc_coherent, + .free = xen_swiotlb_free_coherent, + .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, + .sync_single_for_device = xen_swiotlb_sync_single_for_device, + .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, + .map_sg = xen_swiotlb_map_sg_attrs, + .unmap_sg = xen_swiotlb_unmap_sg_attrs, + .map_page = xen_swiotlb_map_page, + .unmap_page = xen_swiotlb_unmap_page, + .dma_supported = xen_swiotlb_dma_supported, + .set_dma_mask = xen_swiotlb_set_dma_mask, + .mmap = xen_swiotlb_dma_mmap, + .get_sgtable = xen_swiotlb_get_sgtable, +}; diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 1f6d78f044b6..ed2de363da33 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -1,69 +1,9 @@ #ifndef __LINUX_SWIOTLB_XEN_H #define __LINUX_SWIOTLB_XEN_H -#include -#include #include extern int xen_swiotlb_init(int verbose, bool early); +extern const struct dma_map_ops xen_swiotlb_dma_ops; -extern void -*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs); - -extern void -xen_swiotlb_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs); - -extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs); - -extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs); -extern int -xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs); - -extern void -xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs); - -extern void -xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); - -extern void -xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); - -extern void -xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); - -extern void -xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); - -extern int -xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); - -extern int -xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); - -extern int -xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -extern int -xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs); #endif /* __LINUX_SWIOTLB_XEN_H */ -- cgit v1.2.3 From 4d048dbc057b989966b25011385bb9d825e932ea Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 13:23:27 +0200 Subject: xen-swiotlb: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk --- drivers/xen/swiotlb-xen.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a0f006daab48..c3a04b2d7532 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -67,6 +67,8 @@ static unsigned long dma_alloc_coherent_mask(struct device *dev, } #endif +#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) + static char *xen_io_tlb_start, *xen_io_tlb_end; static unsigned long xen_io_tlb_nslabs; /* @@ -410,7 +412,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) - return DMA_ERROR_CODE; + return XEN_SWIOTLB_ERROR_CODE; dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), @@ -425,7 +427,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return DMA_ERROR_CODE; + return XEN_SWIOTLB_ERROR_CODE; } /* @@ -715,6 +717,11 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); } +static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == XEN_SWIOTLB_ERROR_CODE; +} + const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, @@ -730,4 +737,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .set_dma_mask = xen_swiotlb_set_dma_mask, .mmap = xen_swiotlb_dma_mmap, .get_sgtable = xen_swiotlb_get_sgtable, + .mapping_error = xen_swiotlb_mapping_error, }; -- cgit v1.2.3 From fb9ef14b7f7275e8bd081d2450a983589e787d0e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:23:05 +0200 Subject: c6x: remove DMA_ERROR_CODE Signed-off-by: Christoph Hellwig --- arch/c6x/include/asm/dma-mapping.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index aca9f755e4f8..05daf1038111 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ #ifndef _ASM_C6X_DMA_MAPPING_H #define _ASM_C6X_DMA_MAPPING_H -/* - * DMA errors are defined by all-bits-set in the DMA address. - */ -#define DMA_ERROR_CODE ~0 - extern const struct dma_map_ops c6x_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -- cgit v1.2.3 From 75fbede1cf8415d23fe1cb59cd70faa4d08813ab Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:28:08 +0200 Subject: ia64: remove DMA_ERROR_CODE All ia64 dma_mapping_ops instances already have a mapping_error member. Signed-off-by: Christoph Hellwig --- arch/ia64/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 73ec3c6f4cfe..3ce5ab4339f3 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -12,8 +12,6 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK -#define DMA_ERROR_CODE 0 - extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); -- cgit v1.2.3 From 7c72e0d557c0c93d22f179dd5a64e6bc475810c2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:29:17 +0200 Subject: m32r: remove DMA_ERROR_CODE dma-noop is the only dma_mapping_ops instance for m32r and does not return errors. Signed-off-by: Christoph Hellwig --- arch/m32r/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h index c01d9f52d228..aff3ae8b62f7 100644 --- a/arch/m32r/include/asm/dma-mapping.h +++ b/arch/m32r/include/asm/dma-mapping.h @@ -8,8 +8,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &dma_noop_ops; -- cgit v1.2.3 From b0230a695232679d5bbf9af8499801ff90cee80d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:30:35 +0200 Subject: microblaze: remove DMA_ERROR_CODE microblaze does not return errors for dma_map_page. Signed-off-by: Christoph Hellwig --- arch/microblaze/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 3fad5e722a66..e15cd2f76e23 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -28,8 +28,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - #define __dma_alloc_coherent(dev, gfp, size, handle) NULL #define __dma_free_coherent(size, addr) ((void)0) -- cgit v1.2.3 From d7950f8347483c773534c0d98044a0863a9a24a7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:32:27 +0200 Subject: openrisc: remove DMA_ERROR_CODE openrisc does not return errors for dma_map_page. Signed-off-by: Christoph Hellwig --- arch/openrisc/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 0c0075f17145..a4ea139c2ef9 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -26,8 +26,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - extern const struct dma_map_ops or1k_dma_map_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -- cgit v1.2.3 From f2a80aedb20c0513b4e127d1fe1cecf936a25ba0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:48:09 +0200 Subject: sh: remove DMA_ERROR_CODE sh does not return errors for dma_map_page. Signed-off-by: Christoph Hellwig --- arch/sh/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index d99008af5f73..9b06be07db4d 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,8 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -#define DMA_ERROR_CODE 0 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); -- cgit v1.2.3 From 50f5c08b016bb4c6d34667d7ab9d5ee35e85261f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:49:22 +0200 Subject: xtensa: remove DMA_ERROR_CODE xtensa already implements the mapping_error method for its only dma_map_ops instance. Signed-off-by: Christoph Hellwig --- arch/xtensa/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index c6140fa8c0be..269738dc9d1d 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -16,8 +16,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - extern const struct dma_map_ops xtensa_dma_map_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -- cgit v1.2.3 From e0d60ac10ec01647bbdd3963831515dde17f2d70 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:21:50 +0200 Subject: arm64: remove DMA_ERROR_CODE The dma alloc interface returns an error by return NULL, and the mapping interfaces rely on the mapping_error method, which the dummy ops already implement correctly. Thus remove the DMA_ERROR_CODE define. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/arm64/include/asm/dma-mapping.h | 1 - arch/arm64/mm/dma-mapping.c | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 5392dbeffa45..cf8fc8f05580 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -24,7 +24,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0) extern const struct dma_map_ops dummy_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 3216e098c058..147fbb907a2f 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -184,7 +184,6 @@ static void *__dma_alloc(struct device *dev, size_t size, no_map: __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); no_mem: - *dma_handle = DMA_ERROR_CODE; return NULL; } @@ -487,7 +486,7 @@ static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return DMA_ERROR_CODE; + return 0; } static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, -- cgit v1.2.3 From e830467143ad0b4efeab3e6c9e1d8033a51a4bf9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 12:26:24 +0200 Subject: hexagon: switch to use ->mapping_error for error reporting Signed-off-by: Christoph Hellwig Acked-by: Richard Kuo --- arch/hexagon/include/asm/dma-mapping.h | 2 -- arch/hexagon/kernel/dma.c | 12 +++++++++--- arch/hexagon/kernel/hexagon_ksyms.c | 1 - 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index d3a87bd9b686..00e3f10113b0 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -29,8 +29,6 @@ #include struct device; -extern int bad_dma_address; -#define DMA_ERROR_CODE bad_dma_address extern const struct dma_map_ops *dma_ops; diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index e74b65009587..8140b1c5de68 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -25,11 +25,11 @@ #include #include +#define HEXAGON_MAPPING_ERROR 0 + const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); -int bad_dma_address; /* globals are automatically initialized to zero */ - static inline void *dma_addr_to_virt(dma_addr_t dma_addr) { return phys_to_virt((unsigned long) dma_addr); @@ -181,7 +181,7 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, WARN_ON(size == 0); if (!check_addr("map_single", dev, bus, size)) - return bad_dma_address; + return HEXAGON_MAPPING_ERROR; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_sync(dma_addr_to_virt(bus), size, dir); @@ -203,6 +203,11 @@ static void hexagon_sync_single_for_device(struct device *dev, dma_sync(dma_addr_to_virt(dma_handle), size, dir); } +static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == HEXAGON_MAPPING_ERROR; +} + const struct dma_map_ops hexagon_dma_ops = { .alloc = hexagon_dma_alloc_coherent, .free = hexagon_free_coherent, @@ -210,6 +215,7 @@ const struct dma_map_ops hexagon_dma_ops = { .map_page = hexagon_map_page, .sync_single_for_cpu = hexagon_sync_single_for_cpu, .sync_single_for_device = hexagon_sync_single_for_device, + .mapping_error = hexagon_mapping_error, .is_phys = 1, }; diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index 00bcad9cbd8f..aa248f595431 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -40,7 +40,6 @@ EXPORT_SYMBOL(memset); /* Additional variables */ EXPORT_SYMBOL(__phys_offset); EXPORT_SYMBOL(_dflt_cache_att); -EXPORT_SYMBOL(bad_dma_address); #define DECLARE_EXPORT(name) \ extern void name(void); EXPORT_SYMBOL(name) -- cgit v1.2.3 From a869572c3196440e96d62d61320e4ae2f530b02e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 13:26:45 +0200 Subject: iommu/amd: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig --- drivers/iommu/amd_iommu.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 63cacf5d6cf2..d41280e869de 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -54,6 +54,8 @@ #include "amd_iommu_types.h" #include "irq_remapping.h" +#define AMD_IOMMU_MAPPING_ERROR 0 + #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) #define LOOP_TIMEOUT 100000 @@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev, paddr &= PAGE_MASK; address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); - if (address == DMA_ERROR_CODE) + if (address == AMD_IOMMU_MAPPING_ERROR) goto out; prot = dir2prot(direction); @@ -2431,7 +2433,7 @@ out_unmap: dma_ops_free_iova(dma_dom, address, pages); - return DMA_ERROR_CODE; + return AMD_IOMMU_MAPPING_ERROR; } /* @@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, if (PTR_ERR(domain) == -EINVAL) return (dma_addr_t)paddr; else if (IS_ERR(domain)) - return DMA_ERROR_CODE; + return AMD_IOMMU_MAPPING_ERROR; dma_mask = *dev->dma_mask; dma_dom = to_dma_ops_domain(domain); @@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, npages = sg_num_pages(dev, sglist, nelems); address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); - if (address == DMA_ERROR_CODE) + if (address == AMD_IOMMU_MAPPING_ERROR) goto out_err; prot = dir2prot(direction); @@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t size, *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), size, DMA_BIDIRECTIONAL, dma_mask); - if (*dma_addr == DMA_ERROR_CODE) + if (*dma_addr == AMD_IOMMU_MAPPING_ERROR) goto out_free; return page_address(page); @@ -2732,6 +2734,11 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } +static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == AMD_IOMMU_MAPPING_ERROR; +} + static const struct dma_map_ops amd_iommu_dma_ops = { .alloc = alloc_coherent, .free = free_coherent, @@ -2740,6 +2747,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = { .map_sg = map_sg, .unmap_sg = unmap_sg, .dma_supported = amd_iommu_dma_supported, + .mapping_error = amd_iommu_mapping_error, }; static int init_reserved_iova_ranges(void) -- cgit v1.2.3 From 81cac18e1180e9812c9838d8cef3a1fff00f3702 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 13:04:09 +0200 Subject: s390: implement ->mapping_error s390 can also use noop_dma_ops, and while that currently does not return errors it will so in the future. Implementing the mapping_error method is the proper way to have per-ops error conditions. Signed-off-by: Christoph Hellwig Acked-by: Gerald Schaefer --- arch/s390/include/asm/dma-mapping.h | 2 -- arch/s390/pci/pci_dma.c | 18 +++++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3108b8dbe266..512ad0eaa11a 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -8,8 +8,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t) 0x0) - extern const struct dma_map_ops s390_pci_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 9081a57fa340..ea623faab525 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -14,6 +14,8 @@ #include #include +#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0) + static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; static int s390_iommu_strict; @@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size) out_error: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); - return DMA_ERROR_CODE; + return S390_MAPPING_ERROR; } static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size) @@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); dma_addr = dma_alloc_address(dev, nr_pages); - if (dma_addr == DMA_ERROR_CODE) { + if (dma_addr == S390_MAPPING_ERROR) { ret = -ENOSPC; goto out_err; } @@ -352,7 +354,7 @@ out_free: out_err: zpci_err("map error:\n"); zpci_err_dma(ret, pa); - return DMA_ERROR_CODE; + return S390_MAPPING_ERROR; } static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, @@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, int ret; dma_addr_base = dma_alloc_address(dev, nr_pages); - if (dma_addr_base == DMA_ERROR_CODE) + if (dma_addr_base == S390_MAPPING_ERROR) return -ENOMEM; dma_addr = dma_addr_base; @@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, for (i = 1; i < nr_elements; i++) { s = sg_next(s); - s->dma_address = DMA_ERROR_CODE; + s->dma_address = S390_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || @@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, s->dma_length = 0; } } + +static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == S390_MAPPING_ERROR; +} int zpci_dma_init_device(struct zpci_dev *zdev) { @@ -657,6 +664,7 @@ const struct dma_map_ops s390_pci_dma_ops = { .unmap_sg = s390_dma_unmap_sg, .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, + .mapping_error = s390_mapping_error, /* if we support direct DMA this must be conditional */ .is_phys = 0, /* dma_supported is unconditionally true without a callback */ -- cgit v1.2.3 From ceaf481c4f6ae6090288528825abc7d051bb1032 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 13:33:44 +0200 Subject: sparc: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- arch/sparc/include/asm/dma-mapping.h | 2 -- arch/sparc/kernel/iommu.c | 12 +++++++++--- arch/sparc/kernel/iommu_common.h | 2 ++ arch/sparc/kernel/pci_sun4v.c | 14 ++++++++++---- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 69cc627779f2..b8e8dfcd065d 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -5,8 +5,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - #define HAVE_ARCH_DMA_SUPPORTED 1 int dma_supported(struct device *dev, u64 mask); diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index c63ba99ca551..dafa316d978d 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -314,7 +314,7 @@ bad: bad_no_ctx: if (printk_ratelimit()) WARN_ON(1); - return DMA_ERROR_CODE; + return SPARC_MAPPING_ERROR; } static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, @@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_ERROR_CODE; + outs->dma_address = SPARC_MAPPING_ERROR; outs->dma_length = 0; } @@ -573,7 +573,7 @@ iommu_map_failed: iommu_tbl_range_free(&iommu->tbl, vaddr, npages, IOMMU_ERROR_CODE); - s->dma_address = DMA_ERROR_CODE; + s->dma_address = SPARC_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -741,6 +741,11 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } +static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == SPARC_MAPPING_ERROR; +} + static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, @@ -750,6 +755,7 @@ static const struct dma_map_ops sun4u_dma_ops = { .unmap_sg = dma_4u_unmap_sg, .sync_single_for_cpu = dma_4u_sync_single_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, + .mapping_error = dma_4u_mapping_error, }; const struct dma_map_ops *dma_ops = &sun4u_dma_ops; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index 828493329f68..5ea5c192b1d9 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -47,4 +47,6 @@ static inline int is_span_boundary(unsigned long entry, return iommu_is_span_boundary(entry, nr, shift, boundary_size); } +#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0) + #endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 68bec7c97cb8..8e2a56f4c03a 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -412,12 +412,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, bad: if (printk_ratelimit()) WARN_ON(1); - return DMA_ERROR_CODE; + return SPARC_MAPPING_ERROR; iommu_map_fail: local_irq_restore(flags); iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); - return DMA_ERROR_CODE; + return SPARC_MAPPING_ERROR; } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, @@ -590,7 +590,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_ERROR_CODE; + outs->dma_address = SPARC_MAPPING_ERROR; outs->dma_length = 0; } @@ -607,7 +607,7 @@ iommu_map_failed: iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ - s->dma_address = DMA_ERROR_CODE; + s->dma_address = SPARC_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -669,6 +669,11 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, local_irq_restore(flags); } +static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == SPARC_MAPPING_ERROR; +} + static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, @@ -676,6 +681,7 @@ static const struct dma_map_ops sun4v_dma_ops = { .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, + .mapping_error = dma_4v_mapping_error, }; static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) -- cgit v1.2.3 From 6009faa43f804c99b3d8fff94fa1e0692be70358 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 21 May 2017 14:17:10 +0200 Subject: powerpc: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Instead define a ->mapping_error method for all IOMMU based dma operation instances. The direct ops don't ever return an error and don't need a ->mapping_error method. Signed-off-by: Christoph Hellwig Acked-by: Michael Ellerman --- arch/powerpc/include/asm/dma-mapping.h | 4 ---- arch/powerpc/include/asm/iommu.h | 4 ++++ arch/powerpc/kernel/dma-iommu.c | 6 ++++++ arch/powerpc/kernel/iommu.c | 28 ++++++++++++++-------------- arch/powerpc/platforms/cell/iommu.c | 1 + arch/powerpc/platforms/pseries/vio.c | 3 ++- 6 files changed, 27 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 181a095468e4..73aedbe6c977 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -17,10 +17,6 @@ #include #include -#ifdef CONFIG_PPC64 -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) -#endif - /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 8a8ce220d7d0..20febe0b7f32 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -139,6 +139,8 @@ struct scatterlist; #ifdef CONFIG_PPC64 +#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0) + static inline void set_iommu_table_base(struct device *dev, struct iommu_table *base) { @@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void) } #endif /* !CONFIG_IOMMU_API */ +int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr); + #else static inline void *get_iommu_table_base(struct device *dev) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index fb7cbaa37658..8f7abf9baa63 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev) return mask; } +int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == IOMMU_MAPPING_ERROR; +} + struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, @@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = { .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, .get_required_mask = dma_iommu_get_required_mask, + .mapping_error = dma_iommu_mapping_error, }; EXPORT_SYMBOL(dma_iommu_ops); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index f2b724cd9e64..233ca3fe4754 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev, if (unlikely(npages == 0)) { if (printk_ratelimit()) WARN_ON(1); - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; } if (should_fail_iommu(dev)) - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; /* * We don't need to disable preemption here because any CPU can @@ -278,7 +278,7 @@ again: } else { /* Give up */ spin_unlock_irqrestore(&(pool->lock), flags); - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; } } @@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, unsigned long attrs) { unsigned long entry; - dma_addr_t ret = DMA_ERROR_CODE; + dma_addr_t ret = IOMMU_MAPPING_ERROR; int build_fail; entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); - if (unlikely(entry == DMA_ERROR_CODE)) - return DMA_ERROR_CODE; + if (unlikely(entry == IOMMU_MAPPING_ERROR)) + return IOMMU_MAPPING_ERROR; entry += tbl->it_offset; /* Offset into real TCE table */ ret = entry << tbl->it_page_shift; /* Set the return dma address */ @@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, /* tbl->it_ops->set() only returns non-zero for transient errors. * Clean up the table bitmap in this case and return - * DMA_ERROR_CODE. For all other errors the functionality is + * IOMMU_MAPPING_ERROR. For all other errors the functionality is * not altered. */ if (unlikely(build_fail)) { __iommu_free(tbl, ret, npages); - return DMA_ERROR_CODE; + return IOMMU_MAPPING_ERROR; } /* Flush/invalidate TLB caches if necessary */ @@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); /* Handle failure */ - if (unlikely(entry == DMA_ERROR_CODE)) { + if (unlikely(entry == IOMMU_MAPPING_ERROR)) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_info(dev, "iommu_alloc failed, tbl %p " @@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, */ if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_ERROR_CODE; + outs->dma_address = IOMMU_MAPPING_ERROR; outs->dma_length = 0; } @@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = iommu_num_pages(s->dma_address, s->dma_length, IOMMU_PAGE_SIZE(tbl)); __iommu_free(tbl, vaddr, npages); - s->dma_address = DMA_ERROR_CODE; + s->dma_address = IOMMU_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) { - dma_addr_t dma_handle = DMA_ERROR_CODE; + dma_addr_t dma_handle = IOMMU_MAPPING_ERROR; void *vaddr; unsigned long uaddr; unsigned int npages, align; @@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, mask >> tbl->it_page_shift, align, attrs); - if (dma_handle == DMA_ERROR_CODE) { + if (dma_handle == IOMMU_MAPPING_ERROR) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { dev_info(dev, "iommu_alloc failed, tbl %p " @@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); - if (mapping == DMA_ERROR_CODE) { + if (mapping == IOMMU_MAPPING_ERROR) { free_pages((unsigned long)ret, order); return NULL; } diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 71b995bbcae0..948086e33a0c 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -660,6 +660,7 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { .set_dma_mask = dma_set_mask_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, + .mapping_error = dma_iommu_mapping_error, }; static void cell_dma_dev_setup_fixed(struct device *dev); diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 28b09fd797ec..e6f43d546827 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; - dma_addr_t ret = DMA_ERROR_CODE; + dma_addr_t ret = IOMMU_MAPPING_ERROR; tbl = get_iommu_table_base(dev); if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { @@ -625,6 +625,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .unmap_page = vio_dma_iommu_unmap_page, .dma_supported = vio_dma_iommu_dma_supported, .get_required_mask = vio_dma_get_required_mask, + .mapping_error = dma_iommu_mapping_error, }; /** -- cgit v1.2.3 From 14a9aad7f0df85d977c0d8be42e80fa3f8efdb26 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:34:21 +0200 Subject: x86/pci-nommu: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig --- arch/x86/kernel/pci-nommu.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index a88952ef371c..085fe6ce4049 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -11,6 +11,8 @@ #include #include +#define NOMMU_MAPPING_ERROR 0 + static int check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) { @@ -33,7 +35,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, dma_addr_t bus = page_to_phys(page) + offset; WARN_ON(size == 0); if (!check_addr("map_single", dev, bus, size)) - return DMA_ERROR_CODE; + return NOMMU_MAPPING_ERROR; flush_write_buffers(); return bus; } @@ -88,6 +90,11 @@ static void nommu_sync_sg_for_device(struct device *dev, flush_write_buffers(); } +static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == NOMMU_MAPPING_ERROR; +} + const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, @@ -96,4 +103,5 @@ const struct dma_map_ops nommu_dma_ops = { .sync_single_for_device = nommu_sync_single_for_device, .sync_sg_for_device = nommu_sync_sg_for_device, .is_phys = 1, + .mapping_error = nommu_mapping_error, }; -- cgit v1.2.3 From 8bd17c6670ecf259f689f398a8336068f0a972ce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:36:43 +0200 Subject: x86/calgary: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig --- arch/x86/kernel/pci-calgary_64.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index fda7867046d0..e75b490f2b0b 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -50,6 +50,8 @@ #include #include +#define CALGARY_MAPPING_ERROR 0 + #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT int use_calgary __read_mostly = 1; #else @@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev, if (panic_on_overflow) panic("Calgary: fix the allocator.\n"); else - return DMA_ERROR_CODE; + return CALGARY_MAPPING_ERROR; } } @@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, entry = iommu_range_alloc(dev, tbl, npages); - if (unlikely(entry == DMA_ERROR_CODE)) { + if (unlikely(entry == CALGARY_MAPPING_ERROR)) { pr_warn("failed to allocate %u pages in iommu %p\n", npages, tbl); - return DMA_ERROR_CODE; + return CALGARY_MAPPING_ERROR; } /* set the return dma address */ @@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned long flags; /* were we called with bad_dma_address? */ - badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE); + badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE); if (unlikely(dma_addr < badend)) { WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " "address 0x%Lx\n", dma_addr); @@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); entry = iommu_range_alloc(dev, tbl, npages); - if (entry == DMA_ERROR_CODE) { + if (entry == CALGARY_MAPPING_ERROR) { /* makes sure unmap knows to stop */ s->dma_length = 0; goto error; @@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, error: calgary_unmap_sg(dev, sg, nelems, dir, 0); for_each_sg(sg, s, nelems, i) { - sg->dma_address = DMA_ERROR_CODE; + sg->dma_address = CALGARY_MAPPING_ERROR; sg->dma_length = 0; } return 0; @@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, /* set up tces to cover the allocated range */ mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); - if (mapping == DMA_ERROR_CODE) + if (mapping == CALGARY_MAPPING_ERROR) goto free; *dma_handle = mapping; return ret; @@ -478,6 +480,11 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } +static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == CALGARY_MAPPING_ERROR; +} + static const struct dma_map_ops calgary_dma_ops = { .alloc = calgary_alloc_coherent, .free = calgary_free_coherent, @@ -485,6 +492,7 @@ static const struct dma_map_ops calgary_dma_ops = { .unmap_sg = calgary_unmap_sg, .map_page = calgary_map_page, .unmap_page = calgary_unmap_page, + .mapping_error = calgary_mapping_error, }; static inline void __iomem * busno_to_bbar(unsigned char num) @@ -732,7 +740,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) struct iommu_table *tbl = pci_iommu(dev->bus); /* reserve EMERGENCY_PAGES from bad_dma_address and up */ - iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES); + iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES); /* avoid the BIOS/VGA first 640KB-1MB region */ /* for CalIOC2 - avoid the entire first MB */ -- cgit v1.2.3 From a760088b45186d6235e05a75788d142bf203a927 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:37:09 +0200 Subject: x86: remove DMA_ERROR_CODE All dma_map_ops instances now handle their errors through ->mapping_error. Signed-off-by: Christoph Hellwig --- arch/x86/include/asm/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 08a0838b83fb..c35d228aa381 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -19,8 +19,6 @@ # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) #endif -#define DMA_ERROR_CODE 0 - extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -- cgit v1.2.3 From 9eef8b8cc26559fe5f2575daf7d08c6a17e81ff8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:53:03 +0200 Subject: arm: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig --- arch/arm/common/dmabounce.c | 13 +++++++++--- arch/arm/include/asm/dma-iommu.h | 2 ++ arch/arm/include/asm/dma-mapping.h | 1 - arch/arm/mm/dma-mapping.c | 41 ++++++++++++++++++++++++-------------- 4 files changed, 38 insertions(+), 19 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9b1b7be2ec0e..4060378e0f14 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -33,6 +33,7 @@ #include #include +#include #undef STATS @@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, if (buf == NULL) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", @@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, ret = needs_bounce(dev, dma_addr, size); if (ret < 0) - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; if (ret == 0) { arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); @@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, if (PageHighMem(page)) { dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } return map_single(dev, page_address(page) + offset, size, dir, attrs); @@ -452,6 +453,11 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) return arm_dma_ops.set_dma_mask(dev, dma_mask); } +static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return arm_dma_ops.mapping_error(dev, dma_addr); +} + static const struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -466,6 +472,7 @@ static const struct dma_map_ops dmabounce_ops = { .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .set_dma_mask = dmabounce_set_mask, + .mapping_error = dmabounce_mapping_error, }; static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 2ef282f96651..389a26a10ea3 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -9,6 +9,8 @@ #include #include +#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) + struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 680d3f3889e7..52a8fd5a8edb 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -12,7 +12,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern const struct dma_map_ops arm_dma_ops; extern const struct dma_map_ops arm_coherent_dma_ops; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bd83c531828a..8f2c5a8a98f0 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } +static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == ARM_MAPPING_ERROR; +} + const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -193,6 +198,7 @@ const struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, + .mapping_error = arm_dma_mapping_error, }; EXPORT_SYMBOL(arm_dma_ops); @@ -211,6 +217,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, + .mapping_error = arm_dma_mapping_error, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -799,7 +806,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); args.gfp = gfp; - *handle = DMA_ERROR_CODE; + *handle = ARM_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); cma = allowblock ? dev_get_cma_area(dev) : false; @@ -1254,7 +1261,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (i == mapping->nr_bitmaps) { if (extend_iommu_mapping(mapping)) { spin_unlock_irqrestore(&mapping->lock, flags); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } start = bitmap_find_next_zero_area(mapping->bitmaps[i], @@ -1262,7 +1269,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } bitmap_set(mapping->bitmaps[i], start, count); @@ -1445,7 +1452,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int i; dma_addr = __alloc_iova(mapping, size); - if (dma_addr == DMA_ERROR_CODE) + if (dma_addr == ARM_MAPPING_ERROR) return dma_addr; iova = dma_addr; @@ -1472,7 +1479,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) @@ -1533,7 +1540,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, return NULL; *handle = __iommu_create_mapping(dev, &page, size, attrs); - if (*handle == DMA_ERROR_CODE) + if (*handle == ARM_MAPPING_ERROR) goto err_mapping; return addr; @@ -1561,7 +1568,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; void *addr = NULL; - *handle = DMA_ERROR_CODE; + *handle = ARM_MAPPING_ERROR; size = PAGE_ALIGN(size); if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) @@ -1582,7 +1589,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = __iommu_create_mapping(dev, pages, size, attrs); - if (*handle == DMA_ERROR_CODE) + if (*handle == ARM_MAPPING_ERROR) goto err_buffer; if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) @@ -1732,10 +1739,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int prot; size = PAGE_ALIGN(size); - *handle = DMA_ERROR_CODE; + *handle = ARM_MAPPING_ERROR; iova_base = iova = __alloc_iova(mapping, size); - if (iova == DMA_ERROR_CODE) + if (iova == ARM_MAPPING_ERROR) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { @@ -1775,7 +1782,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = DMA_ERROR_CODE; + s->dma_address = ARM_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { @@ -1950,7 +1957,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == DMA_ERROR_CODE) + if (dma_addr == ARM_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs); @@ -1962,7 +1969,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } /** @@ -2056,7 +2063,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, size_t len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == DMA_ERROR_CODE) + if (dma_addr == ARM_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; @@ -2068,7 +2075,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return DMA_ERROR_CODE; + return ARM_MAPPING_ERROR; } /** @@ -2140,6 +2147,8 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, + + .mapping_error = arm_dma_mapping_error, }; const struct dma_map_ops iommu_coherent_ops = { @@ -2156,6 +2165,8 @@ const struct dma_map_ops iommu_coherent_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, + + .mapping_error = arm_dma_mapping_error, }; /** -- cgit v1.2.3 From f51f288e237cbcfd3dbd1d4fa2d3dec00d7253e2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:58:49 +0200 Subject: dma-mapping: remove DMA_ERROR_CODE And update the documentation - dma_mapping_error has been supported everywhere for a long time. Signed-off-by: Christoph Hellwig --- Documentation/DMA-API-HOWTO.txt | 31 +++++-------------------------- include/linux/dma-mapping.h | 5 ----- 2 files changed, 5 insertions(+), 31 deletions(-) diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 979228bc9035..4ed388356898 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -550,32 +550,11 @@ and to unmap it: dma_unmap_single(dev, dma_handle, size, direction); You should call dma_mapping_error() as dma_map_single() could fail and return -error. Not all DMA implementations support the dma_mapping_error() interface. -However, it is a good practice to call dma_mapping_error() interface, which -will invoke the generic mapping error check interface. Doing so will ensure -that the mapping code will work correctly on all DMA implementations without -any dependency on the specifics of the underlying implementation. Using the -returned address without checking for errors could result in failures ranging -from panics to silent data corruption. A couple of examples of incorrect ways -to check for errors that make assumptions about the underlying DMA -implementation are as follows and these are applicable to dma_map_page() as -well. - -Incorrect example 1: - dma_addr_t dma_handle; - - dma_handle = dma_map_single(dev, addr, size, direction); - if ((dma_handle & 0xffff != 0) || (dma_handle >= 0x1000000)) { - goto map_error; - } - -Incorrect example 2: - dma_addr_t dma_handle; - - dma_handle = dma_map_single(dev, addr, size, direction); - if (dma_handle == DMA_ERROR_CODE) { - goto map_error; - } +error. Doing so will ensure that the mapping code will work correctly on all +DMA implementations without any dependency on the specifics of the underlying +implementation. Using the returned address without checking for errors could +result in failures ranging from panics to silent data corruption. The same +applies to dma_map_page() as well. You should call dma_unmap_single() when the DMA activity is finished, e.g., from the interrupt which told you that the DMA transfer is done. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4f3eecedca2d..a57875309bfd 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -546,12 +546,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) if (get_dma_ops(dev)->mapping_error) return get_dma_ops(dev)->mapping_error(dev, dma_addr); - -#ifdef DMA_ERROR_CODE - return dma_addr == DMA_ERROR_CODE; -#else return 0; -#endif } #ifndef HAVE_ARCH_DMA_SUPPORTED -- cgit v1.2.3 From c6d333e0847260972cb6279de76d8004d2461d3f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 09:06:26 +0200 Subject: sparc: remove leon_dma_ops We can just use pci32_dma_ops directly. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- arch/sparc/include/asm/dma-mapping.h | 3 +-- arch/sparc/kernel/ioport.c | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index b8e8dfcd065d..98da9f92c318 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -17,7 +17,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, } extern const struct dma_map_ops *dma_ops; -extern const struct dma_map_ops *leon_dma_ops; extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; @@ -26,7 +25,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return leon_dma_ops; + return &pci32_dma_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index cf20033a1458..dd081d557609 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -637,6 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } } +/* note: leon re-uses pci32_dma_ops */ const struct dma_map_ops pci32_dma_ops = { .alloc = pci32_alloc_coherent, .free = pci32_free_coherent, @@ -651,10 +652,6 @@ const struct dma_map_ops pci32_dma_ops = { }; EXPORT_SYMBOL(pci32_dma_ops); -/* leon re-uses pci32_dma_ops */ -const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; -EXPORT_SYMBOL(leon_dma_ops); - const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); -- cgit v1.2.3 From b02c2b0bfd7ae4fb5bc8685ac564861b99c7a552 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 09:11:30 +0200 Subject: sparc: remove arch specific dma_supported implementations Usually dma_supported decisions are done by the dma_map_ops instance. Switch sparc to that model by providing a ->dma_supported instance for sbus that always returns false, and implementations tailored to the sun4u and sun4v cases for sparc64, and leave it unimplemented for PCI on sparc32, which means always supported. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- arch/sparc/include/asm/dma-mapping.h | 3 --- arch/sparc/kernel/iommu.c | 40 +++++++++++++++--------------------- arch/sparc/kernel/ioport.c | 22 ++++++-------------- arch/sparc/kernel/pci_sun4v.c | 17 +++++++++++++++ 4 files changed, 39 insertions(+), 43 deletions(-) diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 98da9f92c318..60bf1633d554 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -5,9 +5,6 @@ #include #include -#define HAVE_ARCH_DMA_SUPPORTED 1 -int dma_supported(struct device *dev, u64 mask); - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index dafa316d978d..fcbcc031f615 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -746,6 +746,21 @@ static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == SPARC_MAPPING_ERROR; } +static int dma_4u_supported(struct device *dev, u64 device_mask) +{ + struct iommu *iommu = dev->archdata.iommu; + + if (device_mask > DMA_BIT_MASK(32)) + return 0; + if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask) + return 1; +#ifdef CONFIG_PCI + if (dev_is_pci(dev)) + return pci64_dma_supported(to_pci_dev(dev), device_mask); +#endif + return 0; +} + static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, @@ -755,32 +770,9 @@ static const struct dma_map_ops sun4u_dma_ops = { .unmap_sg = dma_4u_unmap_sg, .sync_single_for_cpu = dma_4u_sync_single_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, + .dma_supported = dma_4u_supported, .mapping_error = dma_4u_mapping_error, }; const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); - -int dma_supported(struct device *dev, u64 device_mask) -{ - struct iommu *iommu = dev->archdata.iommu; - u64 dma_addr_mask = iommu->dma_addr_mask; - - if (device_mask > DMA_BIT_MASK(32)) { - if (iommu->atu) - dma_addr_mask = iommu->atu->dma_addr_mask; - else - return 0; - } - - if ((device_mask & dma_addr_mask) == dma_addr_mask) - return 1; - -#ifdef CONFIG_PCI - if (dev_is_pci(dev)) - return pci64_dma_supported(to_pci_dev(dev), device_mask); -#endif - - return 0; -} -EXPORT_SYMBOL(dma_supported); diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index dd081d557609..12894f259bea 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -401,6 +401,11 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } +static int sbus_dma_supported(struct device *dev, u64 mask) +{ + return 0; +} + static const struct dma_map_ops sbus_dma_ops = { .alloc = sbus_alloc_coherent, .free = sbus_free_coherent, @@ -410,6 +415,7 @@ static const struct dma_map_ops sbus_dma_ops = { .unmap_sg = sbus_unmap_sg, .sync_sg_for_cpu = sbus_sync_sg_for_cpu, .sync_sg_for_device = sbus_sync_sg_for_device, + .dma_supported = sbus_dma_supported, }; static int __init sparc_register_ioport(void) @@ -655,22 +661,6 @@ EXPORT_SYMBOL(pci32_dma_ops); const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); - -/* - * Return whether the given PCI device DMA address mask can be - * supported properly. For example, if your device can only drive the - * low 24-bits during PCI bus mastering, then you would pass - * 0x00ffffff as the mask to this function. - */ -int dma_supported(struct device *dev, u64 mask) -{ - if (dev_is_pci(dev)) - return 1; - - return 0; -} -EXPORT_SYMBOL(dma_supported); - #ifdef CONFIG_PROC_FS static int sparc_io_proc_show(struct seq_file *m, void *v) diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 8e2a56f4c03a..24f21c726dfa 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -24,6 +24,7 @@ #include "pci_impl.h" #include "iommu_common.h" +#include "kernel.h" #include "pci_sun4v.h" @@ -669,6 +670,21 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, local_irq_restore(flags); } +static int dma_4v_supported(struct device *dev, u64 device_mask) +{ + struct iommu *iommu = dev->archdata.iommu; + u64 dma_addr_mask; + + if (device_mask > DMA_BIT_MASK(32) && iommu->atu) + dma_addr_mask = iommu->atu->dma_addr_mask; + else + dma_addr_mask = iommu->dma_addr_mask; + + if ((device_mask & dma_addr_mask) == dma_addr_mask) + return 1; + return pci64_dma_supported(to_pci_dev(dev), device_mask); +} + static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == SPARC_MAPPING_ERROR; @@ -681,6 +697,7 @@ static const struct dma_map_ops sun4v_dma_ops = { .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, + .dma_supported = dma_4v_supported, .mapping_error = dma_4v_mapping_error, }; -- cgit v1.2.3 From 050d228a740a2e4d988801e64b2aee0159542f59 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:03:30 +0200 Subject: dma-noop: remove dma_supported and mapping_error methods These just duplicate the default behavior if no method is provided. Signed-off-by: Christoph Hellwig --- lib/dma-noop.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/dma-noop.c b/lib/dma-noop.c index de26c8b68f34..643a074f139d 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -54,23 +54,11 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent return nents; } -static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static int dma_noop_supported(struct device *dev, u64 mask) -{ - return 1; -} - const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, - .mapping_error = dma_noop_mapping_error, - .dma_supported = dma_noop_supported, }; EXPORT_SYMBOL(dma_noop_ops); -- cgit v1.2.3 From 3be6d9b6da2ca62a4fd73a401b26f4a1dac2a47d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:04:17 +0200 Subject: dma-virt: remove dma_supported and mapping_error methods These just duplicate the default behavior if no method is provided. Signed-off-by: Christoph Hellwig --- lib/dma-virt.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/dma-virt.c b/lib/dma-virt.c index dcd4df1f7174..5c4f11329721 100644 --- a/lib/dma-virt.c +++ b/lib/dma-virt.c @@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} - -static int dma_virt_supported(struct device *dev, u64 mask) -{ - return true; -} - const struct dma_map_ops dma_virt_ops = { .alloc = dma_virt_alloc, .free = dma_virt_free, .map_page = dma_virt_map_page, .map_sg = dma_virt_map_sg, - .mapping_error = dma_virt_mapping_error, - .dma_supported = dma_virt_supported, }; EXPORT_SYMBOL(dma_virt_ops); -- cgit v1.2.3 From a2b63aa2f45314bea077275c6009ba00c83826a0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:07:48 +0200 Subject: hexagon: remove arch-specific dma_supported implementation This implementation is simply bogus - hexagon only has a simple direct mapped DMA implementation and thus doesn't care about the address. Signed-off-by: Christoph Hellwig Acked-by: Richard Kuo --- arch/hexagon/include/asm/dma-mapping.h | 2 -- arch/hexagon/kernel/dma.c | 9 --------- 2 files changed, 11 deletions(-) diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 00e3f10113b0..9c15cb5271a6 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -37,8 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -#define HAVE_ARCH_DMA_SUPPORTED 1 -extern int dma_supported(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 8140b1c5de68..546792d176a4 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -35,15 +35,6 @@ static inline void *dma_addr_to_virt(dma_addr_t dma_addr) return phys_to_virt((unsigned long) dma_addr); } -int dma_supported(struct device *dev, u64 mask) -{ - if (mask == DMA_BIT_MASK(32)) - return 1; - else - return 0; -} -EXPORT_SYMBOL(dma_supported); - static struct gen_pool *coherent_pool; -- cgit v1.2.3 From 3d6119a4a1f3f6c36be494d72a0374822b4a265c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:10:08 +0200 Subject: hexagon: remove the unused dma_is_consistent prototype Signed-off-by: Christoph Hellwig --- arch/hexagon/include/asm/dma-mapping.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 9c15cb5271a6..463dbc18f853 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -37,7 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -- cgit v1.2.3 From 19ad53e498745416a77c0377cbb4ee771eedd35b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:11:20 +0200 Subject: openrisc: remove arch-specific dma_supported implementation This implementation is simply bogus - openrisc only has a simple direct mapped DMA implementation and thus doesn't care about the address. Signed-off-by: Christoph Hellwig --- arch/openrisc/include/asm/dma-mapping.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index a4ea139c2ef9..f41bd3cb76d9 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -33,11 +33,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &or1k_dma_map_ops; } -#define HAVE_ARCH_DMA_SUPPORTED 1 -static inline int dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ -- cgit v1.2.3 From 418a7a7e4f05f36d6e4ab5b8548ea71f0b602140 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:20:18 +0200 Subject: arm: remove arch specific dma_supported implementation And instead wire it up as method for all the dma_map_ops instances. Note that the code seems a little fishy for dmabounce and iommu, but for now I'd like to preserve the existing behavior 1:1. Signed-off-by: Christoph Hellwig --- arch/arm/common/dmabounce.c | 1 + arch/arm/include/asm/dma-iommu.h | 2 ++ arch/arm/include/asm/dma-mapping.h | 3 --- arch/arm/mm/dma-mapping.c | 7 +++++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 4060378e0f14..6ecd5be5d37e 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -473,6 +473,7 @@ static const struct dma_map_ops dmabounce_ops = { .sync_sg_for_device = arm_dma_sync_sg_for_device, .set_dma_mask = dmabounce_set_mask, .mapping_error = dmabounce_mapping_error, + .dma_supported = arm_dma_supported, }; static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 389a26a10ea3..c090ec675eac 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -35,5 +35,7 @@ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping); void arm_iommu_detach_device(struct device *dev); +int arm_dma_supported(struct device *dev, u64 mask); + #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 52a8fd5a8edb..8dabcfdf4505 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -20,9 +20,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &arm_dma_ops; } -#define HAVE_ARCH_DMA_SUPPORTED 1 -extern int dma_supported(struct device *dev, u64 mask); - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 8f2c5a8a98f0..b9677ada421f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -199,6 +199,7 @@ const struct dma_map_ops arm_dma_ops = { .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .mapping_error = arm_dma_mapping_error, + .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_dma_ops); @@ -218,6 +219,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, .mapping_error = arm_dma_mapping_error, + .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -1184,11 +1186,10 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, * during bus mastering, then you would pass 0x00ffffff as the mask * to this function. */ -int dma_supported(struct device *dev, u64 mask) +int arm_dma_supported(struct device *dev, u64 mask) { return __dma_supported(dev, mask, false); } -EXPORT_SYMBOL(dma_supported); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 @@ -2149,6 +2150,7 @@ const struct dma_map_ops iommu_ops = { .unmap_resource = arm_iommu_unmap_resource, .mapping_error = arm_dma_mapping_error, + .dma_supported = arm_dma_supported, }; const struct dma_map_ops iommu_coherent_ops = { @@ -2167,6 +2169,7 @@ const struct dma_map_ops iommu_coherent_ops = { .unmap_resource = arm_iommu_unmap_resource, .mapping_error = arm_dma_mapping_error, + .dma_supported = arm_dma_supported, }; /** -- cgit v1.2.3 From 5860acc1a905b05c14e9ed9e22c20aad1a72554a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:38:27 +0200 Subject: x86: remove arch specific dma_supported implementation And instead wire it up as method for all the dma_map_ops instances. Note that this also means the arch specific check will be fully instead of partially applied in the AMD iommu driver. Signed-off-by: Christoph Hellwig --- arch/x86/include/asm/dma-mapping.h | 3 --- arch/x86/include/asm/iommu.h | 2 ++ arch/x86/kernel/amd_gart_64.c | 1 + arch/x86/kernel/pci-calgary_64.c | 1 + arch/x86/kernel/pci-dma.c | 8 +------- arch/x86/kernel/pci-nommu.c | 1 + arch/x86/pci/sta2x11-fixup.c | 3 ++- drivers/iommu/amd_iommu.c | 2 ++ drivers/iommu/intel-iommu.c | 3 +++ 9 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index c35d228aa381..398c79889f5c 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -33,9 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define arch_dma_alloc_attrs arch_dma_alloc_attrs -#define HAVE_ARCH_DMA_SUPPORTED 1 -extern int dma_supported(struct device *hwdev, u64 mask); - extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, unsigned long attrs); diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 793869879464..fca144a104e4 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -6,6 +6,8 @@ extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; +int x86_dma_supported(struct device *dev, u64 mask); + /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 815dd63f49d0..cc0e8bc0ea3f 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = { .alloc = gart_alloc_coherent, .free = gart_free_coherent, .mapping_error = gart_mapping_error, + .dma_supported = x86_dma_supported, }; static void gart_iommu_shutdown(void) diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index e75b490f2b0b..5286a4a92cf7 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -493,6 +493,7 @@ static const struct dma_map_ops calgary_dma_ops = { .map_page = calgary_map_page, .unmap_page = calgary_unmap_page, .mapping_error = calgary_mapping_error, + .dma_supported = x86_dma_supported, }; static inline void __iomem * busno_to_bbar(unsigned char num) diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 3a216ec869cd..5e16d3f29594 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -213,10 +213,8 @@ static __init int iommu_setup(char *p) } early_param("iommu", iommu_setup); -int dma_supported(struct device *dev, u64 mask) +int x86_dma_supported(struct device *dev, u64 mask) { - const struct dma_map_ops *ops = get_dma_ops(dev); - #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { dev_info(dev, "PCI: Disallowing DAC for device\n"); @@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask) } #endif - if (ops->dma_supported) - return ops->dma_supported(dev, mask); - /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. The caller just has to use GFP_DMA in this case. */ @@ -252,7 +247,6 @@ int dma_supported(struct device *dev, u64 mask) return 1; } -EXPORT_SYMBOL(dma_supported); static int __init pci_iommu_init(void) { diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 085fe6ce4049..a6d404087fe3 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -104,4 +104,5 @@ const struct dma_map_ops nommu_dma_ops = { .sync_sg_for_device = nommu_sync_sg_for_device, .is_phys = 1, .mapping_error = nommu_mapping_error, + .dma_supported = x86_dma_supported, }; diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index ec008e800b45..53d600217973 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -26,6 +26,7 @@ #include #include #include +#include #define STA2X11_SWIOTLB_SIZE (4*1024*1024) extern int swiotlb_late_init_with_default_size(size_t default_size); @@ -191,7 +192,7 @@ static const struct dma_map_ops sta2x11_dma_ops = { .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, - .dma_supported = NULL, /* FIXME: we should use this instead! */ + .dma_supported = x86_dma_supported, }; /* At setup time, we use our own ops if the device is a ConneXt one */ diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d41280e869de..521fdf2d41bc 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2731,6 +2731,8 @@ free_mem: */ static int amd_iommu_dma_supported(struct device *dev, u64 mask) { + if (!x86_dma_supported(dev, mask)) + return 0; return check_device(dev); } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index fc2765ccdb57..53cc0a393f04 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = { .map_page = intel_map_page, .unmap_page = intel_unmap_page, .mapping_error = intel_mapping_error, +#ifdef CONFIG_X86 + .dma_supported = x86_dma_supported, +#endif }; static inline int iommu_domain_cache_init(void) -- cgit v1.2.3 From 447d899b18169b2ee5e42b2fa8b32dbb40a30a24 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:40:56 +0200 Subject: dma-mapping: remove HAVE_ARCH_DMA_SUPPORTED Signed-off-by: Christoph Hellwig --- include/linux/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index a57875309bfd..3e5908656226 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -549,7 +549,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } -#ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); @@ -560,7 +559,6 @@ static inline int dma_supported(struct device *dev, u64 mask) return 1; return ops->dma_supported(dev, mask); } -#endif #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) -- cgit v1.2.3 From 69f07be6221c4af8b1164a4ef229bced56846f92 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:50:29 +0200 Subject: mips/loongson64: implement ->dma_supported instead of ->set_dma_mask Same behavior, less code duplication. Signed-off-by: Christoph Hellwig --- arch/mips/loongson64/common/dma-swiotlb.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 178ca17a5667..34486c138206 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -75,19 +75,11 @@ static void loongson_dma_sync_sg_for_device(struct device *dev, mb(); } -static int loongson_dma_set_mask(struct device *dev, u64 mask) +static int loongson_dma_supported(struct device *dev, u64 mask) { - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) { - *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits); - return -EIO; - } - - *dev->dma_mask = mask; - - return 0; + if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) + return 0; + return swiotlb_dma_supported(dev, mask); } dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) @@ -126,8 +118,7 @@ static const struct dma_map_ops loongson_dma_map_ops = { .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = loongson_dma_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, - .dma_supported = swiotlb_dma_supported, - .set_dma_mask = loongson_dma_set_mask + .dma_supported = loongson_dma_supported, }; void __init plat_swiotlb_setup(void) -- cgit v1.2.3 From 64273a61b2e247e29c2647e83edc090fd01a118b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:53:36 +0200 Subject: arm: implement ->dma_supported instead of ->set_dma_mask Same behavior, less code duplication. Signed-off-by: Christoph Hellwig --- arch/arm/common/dmabounce.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 6ecd5be5d37e..9a92de63426f 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -445,12 +445,12 @@ static void dmabounce_sync_for_device(struct device *dev, arm_dma_ops.sync_single_for_device(dev, handle, size, dir); } -static int dmabounce_set_mask(struct device *dev, u64 dma_mask) +static int dmabounce_dma_supported(struct device *dev, u64 dma_mask) { if (dev->archdata.dmabounce) return 0; - return arm_dma_ops.set_dma_mask(dev, dma_mask); + return arm_dma_ops.dma_supported(dev, dma_mask); } static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -471,9 +471,8 @@ static const struct dma_map_ops dmabounce_ops = { .unmap_sg = arm_dma_unmap_sg, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .set_dma_mask = dmabounce_set_mask, + .dma_supported = dmabounce_dma_supported, .mapping_error = dmabounce_mapping_error, - .dma_supported = arm_dma_supported, }; static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, -- cgit v1.2.3 From a88f5401010115ae8b1a21c1609f2fb4332ea5eb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:52:17 +0200 Subject: xen-swiotlb: remove xen_swiotlb_set_dma_mask This just duplicates the generic implementation. Signed-off-by: Christoph Hellwig --- drivers/xen/swiotlb-xen.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index c3a04b2d7532..82fc54f8eb77 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -661,17 +661,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; } -static int -xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - /* * Create userspace mapping for the DMA-coherent memory. * This function should be called with the pages from the current domain only, @@ -734,7 +723,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .map_page = xen_swiotlb_map_page, .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, - .set_dma_mask = xen_swiotlb_set_dma_mask, .mmap = xen_swiotlb_dma_mmap, .get_sgtable = xen_swiotlb_get_sgtable, .mapping_error = xen_swiotlb_mapping_error, -- cgit v1.2.3 From c1f03776327f97674058fe0e10bf5408b8db14b9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 12:02:58 +0200 Subject: tile: remove dma_supported and mapping_error methods These just duplicate the default behavior if no method is provided. Signed-off-by: Christoph Hellwig --- arch/tile/kernel/pci-dma.c | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 569bb6dd154a..f2abedc8a080 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -317,18 +317,6 @@ static void tile_dma_sync_sg_for_device(struct device *dev, } } -static inline int -tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int -tile_dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - static const struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, @@ -340,8 +328,6 @@ static const struct dma_map_ops tile_default_dma_map_ops = { .sync_single_for_device = tile_dma_sync_single_for_device, .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, .sync_sg_for_device = tile_dma_sync_sg_for_device, - .mapping_error = tile_dma_mapping_error, - .dma_supported = tile_dma_supported }; const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; @@ -504,18 +490,6 @@ static void tile_pci_dma_sync_sg_for_device(struct device *dev, } } -static inline int -tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int -tile_pci_dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - static const struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, @@ -527,8 +501,6 @@ static const struct dma_map_ops tile_pci_default_dma_map_ops = { .sync_single_for_device = tile_pci_dma_sync_single_for_device, .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, - .mapping_error = tile_pci_dma_mapping_error, - .dma_supported = tile_pci_dma_supported }; const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; @@ -578,8 +550,6 @@ static const struct dma_map_ops pci_hybrid_dma_ops = { .sync_single_for_device = tile_pci_dma_sync_single_for_device, .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, - .mapping_error = tile_pci_dma_mapping_error, - .dma_supported = tile_pci_dma_supported }; const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; -- cgit v1.2.3 From 228a5e1a875006588f394345e96c64215f8b132e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 13:18:56 +0200 Subject: powerpc/cell: clean up fixed mapping dma_ops initialization By the time cell_pci_dma_dev_setup calls cell_dma_dev_setup no device can have the fixed map_ops set yet as it's only set by the set_dma_mask method. So move the setup for the fixed case to be only called in that place instead of indirecting through cell_dma_dev_setup. Signed-off-by: Christoph Hellwig --- arch/powerpc/platforms/cell/iommu.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 948086e33a0c..497bfbdbd967 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -663,14 +663,9 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { .mapping_error = dma_iommu_mapping_error, }; -static void cell_dma_dev_setup_fixed(struct device *dev); - static void cell_dma_dev_setup(struct device *dev) { - /* Order is important here, these are not mutually exclusive */ - if (get_dma_ops(dev) == &dma_iommu_fixed_ops) - cell_dma_dev_setup_fixed(dev); - else if (get_pci_dma_ops() == &dma_iommu_ops) + if (get_pci_dma_ops() == &dma_iommu_ops) set_iommu_table_base(dev, cell_get_iommu_table(dev)); else if (get_pci_dma_ops() == &dma_direct_ops) set_dma_offset(dev, cell_dma_direct_offset); @@ -963,32 +958,24 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) return -EIO; if (dma_mask == DMA_BIT_MASK(64) && - cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) - { + cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { + u64 addr = cell_iommu_get_fixed_address(dev) + + dma_iommu_fixed_base; dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); + dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); set_dma_ops(dev, &dma_iommu_fixed_ops); + set_dma_offset(dev, addr); } else { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); + cell_dma_dev_setup(dev); } - cell_dma_dev_setup(dev); - *dev->dma_mask = dma_mask; return 0; } -static void cell_dma_dev_setup_fixed(struct device *dev) -{ - u64 addr; - - addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; - set_dma_offset(dev, addr); - - dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); -} - static void insert_16M_pte(unsigned long addr, unsigned long *ptab, unsigned long base_pte) { -- cgit v1.2.3 From 7eb8a7a9e854640508fefc5a6d0bb50de243610b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 23 May 2017 14:57:57 +0200 Subject: powerpc/cell: use the dma_supported method for ops switching Besides removing the last instance of the set_dma_mask method this also reduced the code duplication. Signed-off-by: Christoph Hellwig --- arch/powerpc/platforms/cell/iommu.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 497bfbdbd967..29d4f96ed33e 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -644,20 +644,14 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, direction, attrs); } -static int dma_fixed_dma_supported(struct device *dev, u64 mask) -{ - return mask == DMA_BIT_MASK(64); -} - -static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); +static int dma_suported_and_switch(struct device *dev, u64 dma_mask); static const struct dma_map_ops dma_iommu_fixed_ops = { .alloc = dma_fixed_alloc_coherent, .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, - .dma_supported = dma_fixed_dma_supported, - .set_dma_mask = dma_set_mask_and_switch, + .dma_supported = dma_suported_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, .mapping_error = dma_iommu_mapping_error, @@ -952,11 +946,8 @@ out: return dev_addr; } -static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) +static int dma_suported_and_switch(struct device *dev, u64 dma_mask) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - if (dma_mask == DMA_BIT_MASK(64) && cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { u64 addr = cell_iommu_get_fixed_address(dev) + @@ -965,14 +956,16 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); set_dma_ops(dev, &dma_iommu_fixed_ops); set_dma_offset(dev, addr); - } else { + return 1; + } + + if (dma_iommu_dma_supported(dev, dma_mask)) { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); cell_dma_dev_setup(dev); + return 1; } - *dev->dma_mask = dma_mask; - return 0; } @@ -1127,7 +1120,7 @@ static int __init cell_iommu_fixed_mapping_init(void) cell_iommu_setup_window(iommu, np, dbase, dsize, 0); } - dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; + dma_iommu_ops.dma_supported = dma_suported_and_switch; set_pci_dma_ops(&dma_iommu_ops); return 0; -- cgit v1.2.3 From 8cc9c26029d8ac3c627ecf8545b617fb78def5d4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Jun 2017 17:05:25 +0200 Subject: dma-mapping: remove the set_dma_mask method Signed-off-by: Christoph Hellwig --- arch/powerpc/kernel/dma.c | 4 ---- include/linux/dma-mapping.h | 6 ------ 2 files changed, 10 deletions(-) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 41c749586bd2..466c9f07b288 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -316,10 +316,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask); int __dma_set_mask(struct device *dev, u64 dma_mask) { - const struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) - return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 3e5908656226..527f2ed8c645 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -127,7 +127,6 @@ struct dma_map_ops { enum dma_data_direction dir); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); - int (*set_dma_mask)(struct device *dev, u64 mask); #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 (*get_required_mask)(struct device *dev); #endif @@ -563,11 +562,6 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; -- cgit v1.2.3 From a9a7b06f58d82191f18f626c0d942ec3655c0bde Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 23 May 2017 15:42:02 +0200 Subject: powerpc: merge __dma_set_mask into dma_set_mask Signed-off-by: Christoph Hellwig --- arch/powerpc/include/asm/dma-mapping.h | 1 - arch/powerpc/kernel/dma.c | 13 ++++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 73aedbe6c977..eaece3d3e225 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -112,7 +112,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) #define HAVE_ARCH_DMA_SET_MASK 1 extern int dma_set_mask(struct device *dev, u64 dma_mask); -extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 466c9f07b288..4194bbbbdb10 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -314,14 +314,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int __dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - int dma_set_mask(struct device *dev, u64 dma_mask) { if (ppc_md.dma_set_mask) @@ -334,7 +326,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask) return phb->controller_ops.dma_set_mask(pdev, dma_mask); } - return __dma_set_mask(dev, dma_mask); + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + *dev->dma_mask = dma_mask; + return 0; } EXPORT_SYMBOL(dma_set_mask); -- cgit v1.2.3 From 2e7d1098c00caebc8e31c4d338a49e88c979dd2b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 20 Jun 2017 11:17:30 +0200 Subject: MAINTAINERS: add entry for dma mapping helpers This code has been spread between getting in through arch trees, the iommu tree, -mm and the drivers tree. There will be a lot of work in this area, including consolidating various arch implementations into more common code, so ensure we have a proper git tree that facilitates cooperation with the architecture maintainers. Signed-off-by: Christoph Hellwig --- MAINTAINERS | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 09b5ab6a8a5c..9a08861ba201 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2595,6 +2595,21 @@ S: Maintained F: net/bluetooth/ F: include/net/bluetooth/ +DMA MAPPING HELPERS +M: Christoph Hellwig +M: Marek Szyprowski +R: Robin Murphy +L: linux-kernel@vger.kernel.org +T: git git://git.infradead.org/users/hch/dma-mapping.git +W: http://git.infradead.org/users/hch/dma-mapping.git +S: Supported +F: lib/dma-debug.c +F: lib/dma-noop.c +F: lib/dma-virt.c +F: drivers/base/dma-mapping.c +F: drivers/base/dma-coherent.c +F: include/linux/dma-mapping.h + BONDING DRIVER M: Jay Vosburgh M: Veaceslav Falico -- cgit v1.2.3 From c4d9d598fd2a82136175706e599c4150d0962b1b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jun 2017 10:37:32 +0200 Subject: au1100fb: remove a bogus dma_free_nonconsistent call au1100fb is using managed dma allocations, so it doesn't need to explicitly free the dma memory in the error path (and if it did it would have to use the managed version). Signed-off-by: Christoph Hellwig Acked-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/au1100fb.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 35df2c1a8a63..8de42f617d16 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -532,10 +532,6 @@ failed: clk_disable_unprepare(fbdev->lcdclk); clk_put(fbdev->lcdclk); } - if (fbdev->fb_mem) { - dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem, - fbdev->fb_phys); - } if (fbdev->info.cmap.len != 0) { fb_dealloc_cmap(&fbdev->info.cmap); } -- cgit v1.2.3 From 72eed063767e131831fa10a0909c39a0254836ec Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 22 Jun 2017 14:35:46 +0200 Subject: crypto: qat - avoid an uninitialized variable warning After commit 9e442aa6a753 ("x86: remove DMA_ERROR_CODE"), the inlining decisions in the qat driver changed slightly, introducing a new false-positive warning: drivers/crypto/qat/qat_common/qat_algs.c: In function 'qat_alg_sgl_to_bufl.isra.6': include/linux/dma-mapping.h:228:2: error: 'sz_out' may be used uninitialized in this function [-Werror=maybe-uninitialized] drivers/crypto/qat/qat_common/qat_algs.c:676:9: note: 'sz_out' was declared here The patch that introduced this is correct, so let's just avoid the warning in this driver by rearranging the unwinding after an error to make it more obvious to the compiler what is going on. The problem here is the 'if (unlikely(dma_mapping_error(dev, blp)))' check, in which the 'unlikely' causes gcc to forget what it knew about the state of the variables. Cleaning up the dma state in the reverse order it was created means we can simplify the logic so it doesn't have to know about that state, and also makes it easier to understand. Cc: Christoph Hellwig Signed-off-by: Arnd Bergmann Signed-off-by: Christoph Hellwig --- drivers/crypto/qat/qat_common/qat_algs.c | 40 +++++++++++++++++--------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 20f35df8a01f..1a1d75fb189f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -685,7 +685,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, blp))) - goto err; + goto err_in; for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -698,7 +698,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, DMA_BIDIRECTIONAL); bufl->bufers[y].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) - goto err; + goto err_in; sg_nctr++; } bufl->num_bufs = sg_nctr; @@ -716,10 +716,10 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, buflout = kzalloc_node(sz_out, GFP_ATOMIC, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) - goto err; + goto err_in; bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, bloutp))) - goto err; + goto err_out; bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -731,7 +731,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, sg->length, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, bufers[y].addr))) - goto err; + goto err_out; bufers[y].len = sg->length; sg_nctr++; } @@ -746,9 +746,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, qat_req->buf.sz_out = 0; } return 0; -err: - dev_err(dev, "Failed to map buf for dma\n"); - sg_nctr = 0; + +err_out: + n = sg_nents(sglout); + for (i = 0; i < n; i++) + if (!dma_mapping_error(dev, buflout->bufers[i].addr)) + dma_unmap_single(dev, buflout->bufers[i].addr, + buflout->bufers[i].len, + DMA_BIDIRECTIONAL); + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + kfree(buflout); + +err_in: + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) dma_unmap_single(dev, bufl->bufers[i].addr, @@ -758,17 +769,8 @@ err: if (!dma_mapping_error(dev, blp)) dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bufl); - if (sgl != sglout && buflout) { - n = sg_nents(sglout); - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, buflout->bufers[i].addr)) - dma_unmap_single(dev, buflout->bufers[i].addr, - buflout->bufers[i].len, - DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); - kfree(buflout); - } + + dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; } -- cgit v1.2.3 From 03b643866d889d6edc87cdcee2b3880b7879a441 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Jun 2017 19:05:09 +0200 Subject: dma-mapping: remove dmam_free_noncoherent This function was never used since it was added. Signed-off-by: Christoph Hellwig Acked-by: Tejun Heo --- Documentation/driver-model/devres.txt | 1 - drivers/base/dma-mapping.c | 20 -------------------- include/linux/dma-mapping.h | 2 -- 3 files changed, 23 deletions(-) diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index e72587fe477d..9070ff06b558 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -243,7 +243,6 @@ DMA dmam_alloc_noncoherent() dmam_declare_coherent_memory() dmam_free_coherent() - dmam_free_noncoherent() dmam_pool_create() dmam_pool_destroy() diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index f3deb6af42ad..5940c9dace36 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -148,26 +148,6 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size, } EXPORT_SYMBOL(dmam_alloc_noncoherent); -/** - * dmam_free_coherent - Managed dma_free_noncoherent() - * @dev: Device to free noncoherent memory for - * @size: Size of allocation - * @vaddr: Virtual address of the memory to free - * @dma_handle: DMA handle of the memory to free - * - * Managed dma_free_noncoherent(). - */ -void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - struct dma_devres match_data = { size, vaddr, dma_handle }; - - dma_free_noncoherent(dev, size, vaddr, dma_handle); - WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match, - &match_data)); -} -EXPORT_SYMBOL(dmam_free_noncoherent); - #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT static void dmam_coherent_decl_release(struct device *dev, void *res) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 527f2ed8c645..4038dd34afa3 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -736,8 +736,6 @@ extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); -extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT extern int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, -- cgit v1.2.3 From 63d36c95500400642f656ba1970980746cf437f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Jun 2017 19:15:04 +0200 Subject: dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dmam_alloc_noncoherent is a trivial wrapper around dmam_alloc_attrs, that hardcodes one particular flag. Make the devres code more flexible by allowing the callers to pass arbitrary flags. Signed-off-by: Christoph Hellwig Acked-by: Tejun Heo --- Documentation/driver-model/devres.txt | 2 +- drivers/base/dma-mapping.c | 36 ++++++++++++++++------------------- drivers/video/fbdev/au1200fb.c | 5 +++-- include/linux/dma-mapping.h | 5 +++-- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 9070ff06b558..7e08c02b5393 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -240,7 +240,7 @@ CLOCK DMA dmam_alloc_coherent() - dmam_alloc_noncoherent() + dmam_alloc_attrs() dmam_declare_coherent_memory() dmam_free_coherent() dmam_pool_create() diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5940c9dace36..10e7c022e8cf 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -22,20 +22,15 @@ struct dma_devres { size_t size; void *vaddr; dma_addr_t dma_handle; + unsigned long attrs; }; -static void dmam_coherent_release(struct device *dev, void *res) +static void dmam_release(struct device *dev, void *res) { struct dma_devres *this = res; - dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); -} - -static void dmam_noncoherent_release(struct device *dev, void *res) -{ - struct dma_devres *this = res; - - dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle); + dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, + this->attrs); } static int dmam_match(struct device *dev, void *res, void *match_data) @@ -69,7 +64,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size, struct dma_devres *dr; void *vaddr; - dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp); + dr = devres_alloc(dmam_release, sizeof(*dr), gfp); if (!dr) return NULL; @@ -104,35 +99,35 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, struct dma_devres match_data = { size, vaddr, dma_handle }; dma_free_coherent(dev, size, vaddr, dma_handle); - WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match, - &match_data)); + WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); } EXPORT_SYMBOL(dmam_free_coherent); /** - * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent() + * dmam_alloc_attrs - Managed dma_alloc_attrs() * @dev: Device to allocate non_coherent memory for * @size: Size of allocation * @dma_handle: Out argument for allocated DMA handle * @gfp: Allocation flags + * @attrs: Flags in the DMA_ATTR_* namespace. * - * Managed dma_alloc_noncoherent(). Memory allocated using this - * function will be automatically released on driver detach. + * Managed dma_alloc_attrs(). Memory allocated using this function will be + * automatically released on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ -void *dmam_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { struct dma_devres *dr; void *vaddr; - dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp); + dr = devres_alloc(dmam_release, sizeof(*dr), gfp); if (!dr) return NULL; - vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); + vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); if (!vaddr) { devres_free(dr); return NULL; @@ -141,12 +136,13 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size, dr->vaddr = vaddr; dr->dma_handle = *dma_handle; dr->size = size; + dr->attrs = attrs; devres_add(dev, dr); return vaddr; } -EXPORT_SYMBOL(dmam_alloc_noncoherent); +EXPORT_SYMBOL(dmam_alloc_attrs); #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 6c2b2ca4a909..5f04b4096c42 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1694,9 +1694,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) /* Allocate the framebuffer to the maximum screen size */ fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; - fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, + fbdev->fb_mem = dmam_alloc_attrs(&dev->dev, PAGE_ALIGN(fbdev->fb_len), - &fbdev->fb_phys, GFP_KERNEL); + &fbdev->fb_phys, GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4038dd34afa3..843ab866e0f4 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -734,8 +734,9 @@ extern void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); -extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); +extern void *dmam_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT extern int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, -- cgit v1.2.3 From 25f1e18870881f3366a5abec27b651983ce9032c Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 26 Jun 2017 10:18:55 +0100 Subject: dma: Take into account dma_pfn_offset Even though dma-noop-ops assumes 1:1 memory mapping DMA memory range can be different to RAM. For example, ARM STM32F4 MCU offers the possibility to remap SDRAM from 0xc000_0000 to 0x0 to get CPU performance boost, but DMA continue to see SDRAM at 0xc000_0000. This difference in mapping is handled via device-tree "dma-range" property which leads to dev->dma_pfn_offset is set nonzero. To handle such cases take dma_pfn_offset into account. Cc: Joerg Roedel Cc: Christian Borntraeger Reported-by: Benjamin Gaignard Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Reviewed-by: Robin Murphy Signed-off-by: Vladimir Murzin Signed-off-by: Christoph Hellwig --- lib/dma-noop.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/dma-noop.c b/lib/dma-noop.c index 643a074f139d..acc4190e2731 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -7,6 +7,7 @@ #include #include #include +#include static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size, ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) - *dma_handle = virt_to_phys(ret); + *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + return ret; } @@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return page_to_phys(page) + offset; + return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, @@ -43,11 +45,12 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { + dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); - sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; sg_dma_len(sg) = sg->length; } -- cgit v1.2.3 From c41f9ea998f3ba61f38fa350eef28ec6caf0a07d Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 26 Jun 2017 10:18:57 +0100 Subject: drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma_declare_coherent_memory() and friends are designed to account difference in CPU and device addresses. However, when it is used with reserved memory regions there is assumption that CPU and device have the same view on address space. This assumption gets invalid when reserved memory for coherent DMA allocations is referenced by device with non-empty "dma-range" property. Simply feeding device address as rmem->base + dev->dma_pfn_offset would not work due to reserved memory region can be shared, so this patch turns device address to be expressed with help of CPU address and device's dma_pfn_offset in case memory reservation has been done via device tree; non device tree users continue to use the old scheme. Cc: Michal Nazarewicz Cc: Marek Szyprowski Cc: Roger Quadros Cc: Greg Kroah-Hartman Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Signed-off-by: Vladimir Murzin Signed-off-by: Christoph Hellwig --- drivers/base/dma-coherent.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 640a7e63c453..99c969520f30 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -16,8 +16,18 @@ struct dma_coherent_mem { int flags; unsigned long *bitmap; spinlock_t spinlock; + bool use_dev_dma_pfn_offset; }; +static inline dma_addr_t dma_get_device_base(struct device *dev, + struct dma_coherent_mem * mem) +{ + if (mem->use_dev_dma_pfn_offset) + return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; + else + return mem->device_base; +} + static bool dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, struct dma_coherent_mem **mem) @@ -133,7 +143,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev, return ERR_PTR(-EINVAL); spin_lock_irqsave(&mem->spinlock, flags); - pos = (device_addr - mem->device_base) >> PAGE_SHIFT; + pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); spin_unlock_irqrestore(&mem->spinlock, flags); @@ -186,7 +196,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, /* * Memory was found in the per-device area. */ - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); *ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); @@ -299,6 +309,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) &rmem->base, (unsigned long)rmem->size / SZ_1M); return -ENODEV; } + mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; dma_assign_coherent_memory(dev, mem); return 0; -- cgit v1.2.3 From 93228b44c33a572cb36cec2dbed42e9bdbc88d79 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 26 Jun 2017 10:18:58 +0100 Subject: drivers: dma-coherent: Introduce default DMA pool This patch introduces default coherent DMA pool similar to default CMA area concept. To keep other users safe code kept under CONFIG_ARM. Cc: Michal Nazarewicz Cc: Marek Szyprowski Cc: Rob Herring Cc: Mark Rutland Cc: Greg Kroah-Hartman Suggested-by: Robin Murphy Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Signed-off-by: Vladimir Murzin Signed-off-by: Christoph Hellwig --- .../bindings/reserved-memory/reserved-memory.txt | 3 ++ drivers/base/dma-coherent.c | 59 +++++++++++++++++++--- 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt index 3da0ebdba8d9..16291f2a4688 100644 --- a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt +++ b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt @@ -68,6 +68,9 @@ Linux implementation note: - If a "linux,cma-default" property is present, then Linux will use the region for the default pool of the contiguous memory allocator. +- If a "linux,dma-default" property is present, then Linux will use the + region for the default pool of the consistent DMA allocator. + Device node references to reserved memory ----------------------------------------- Regions in the /reserved-memory node may be referenced by other device diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 99c969520f30..2ae24c28e70c 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -19,6 +19,15 @@ struct dma_coherent_mem { bool use_dev_dma_pfn_offset; }; +static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; + +static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) +{ + if (dev && dev->dma_mem) + return dev->dma_mem; + return dma_coherent_default_memory; +} + static inline dma_addr_t dma_get_device_base(struct device *dev, struct dma_coherent_mem * mem) { @@ -93,6 +102,9 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) static int dma_assign_coherent_memory(struct device *dev, struct dma_coherent_mem *mem) { + if (!dev) + return -ENODEV; + if (dev->dma_mem) return -EBUSY; @@ -171,15 +183,12 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); int dma_alloc_from_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret) { - struct dma_coherent_mem *mem; + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); int order = get_order(size); unsigned long flags; int pageno; int dma_memory_map; - if (!dev) - return 0; - mem = dev->dma_mem; if (!mem) return 0; @@ -233,7 +242,7 @@ EXPORT_SYMBOL(dma_alloc_from_coherent); */ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) { - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { @@ -267,7 +276,7 @@ EXPORT_SYMBOL(dma_release_from_coherent); int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { @@ -297,6 +306,8 @@ EXPORT_SYMBOL(dma_mmap_from_coherent); #include #include +static struct reserved_mem *dma_reserved_default_memory __initdata; + static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { struct dma_coherent_mem *mem = rmem->priv; @@ -318,7 +329,8 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) static void rmem_dma_device_release(struct reserved_mem *rmem, struct device *dev) { - dev->dma_mem = NULL; + if (dev) + dev->dma_mem = NULL; } static const struct reserved_mem_ops rmem_dma_ops = { @@ -338,6 +350,12 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) pr_err("Reserved memory: regions without no-map are not yet supported\n"); return -EINVAL; } + + if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { + WARN(dma_reserved_default_memory, + "Reserved memory: region for default DMA coherent area is redefined\n"); + dma_reserved_default_memory = rmem; + } #endif rmem->ops = &rmem_dma_ops; @@ -345,5 +363,32 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) &rmem->base, (unsigned long)rmem->size / SZ_1M); return 0; } + +static int __init dma_init_reserved_memory(void) +{ + const struct reserved_mem_ops *ops; + int ret; + + if (!dma_reserved_default_memory) + return -ENOMEM; + + ops = dma_reserved_default_memory->ops; + + /* + * We rely on rmem_dma_device_init() does not propagate error of + * dma_assign_coherent_memory() for "NULL" device. + */ + ret = ops->device_init(dma_reserved_default_memory, NULL); + + if (!ret) { + dma_coherent_default_memory = dma_reserved_default_memory->priv; + pr_info("DMA: default coherent area is set\n"); + } + + return ret; +} + +core_initcall(dma_init_reserved_memory); + RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); #endif -- cgit v1.2.3 From 07c75d7a6b9eae24ab72c6eb2fbd39963775b0bf Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 28 Jun 2017 10:16:57 +0100 Subject: drivers: dma-mapping: allow dma_common_mmap() for NOMMU Currently, internals of dma_common_mmap() is compiled out if build is done for either NOMMU or target which explicitly says it does not have/want coherent DMA mmap. It turned out that dma_common_mmap() can be handy in NOMMU setup (at least for ARM). This patch converts exitent NOMMU targets to use ARCH_NO_COHERENT_DMA_MMAP, thus when CONFIG_MMU is gone from dma_common_mmap() their behaviour stays unchanged. ARM is not converted to ARCH_NO_COHERENT_DMA_MMAP because it 1) already has mmap callback which can handle (at some extent) NOMMU 2) already defines dummy pgprot_noncached() for NOMMU build. c6x and frv stay untouched since they already have ARCH_NO_COHERENT_DMA_MMAP. Cc: Steven Miao Cc: Geert Uytterhoeven Cc: Michal Simek Cc: Yoshinori Sato Cc: Rich Felker Cc: Chris Zankel Cc: Max Filippov Suggested-by: Christoph Hellwig Signed-off-by: Vladimir Murzin Tested-by: Benjamin Gaignard --- arch/blackfin/Kconfig | 1 + arch/m32r/Kconfig | 1 + arch/m68k/Kconfig | 1 + arch/microblaze/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/xtensa/Kconfig | 1 + drivers/base/dma-mapping.c | 4 ++-- 7 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 3c1bd640042a..89bdb8264305 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -41,6 +41,7 @@ config BLACKFIN select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW select HAVE_NMI + select ARCH_NO_COHERENT_DMA_MMAP config GENERIC_CSUM def_bool y diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 95474460b367..87cde1e4b38c 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -19,6 +19,7 @@ config M32R select HAVE_DEBUG_STACKOVERFLOW select CPU_NO_EFFICIENT_FFS select DMA_NOOP_OPS + select ARCH_NO_COHERENT_DMA_MMAP if !MMU config SBUS bool diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index d140206d5d29..5abb548f0e70 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -2,6 +2,7 @@ config M68K bool default y select ARCH_MIGHT_HAVE_PC_PARPORT if ISA + select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_IDE select HAVE_AOUT if MMU select HAVE_DEBUG_BUGVERBOSE diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 85885a501dce..8ba7b7c01418 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -2,6 +2,7 @@ config MICROBLAZE def_bool y select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLKSRC_OF diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ee086958b2b2..640a85925060 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,6 +2,7 @@ config SUPERH def_bool y select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP select HAVE_IDE if HAS_IOPORT_MAP diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f4126cf997a4..7ad6d77b2f22 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -3,6 +3,7 @@ config ZONE_DMA config XTENSA def_bool y + select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 10e7c022e8cf..ce1e02b567c3 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -227,7 +227,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { int ret = -ENXIO; -#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) +#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); @@ -244,7 +244,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, user_count << PAGE_SHIFT, vma->vm_page_prot); } -#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ +#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ return ret; } -- cgit v1.2.3 From 1c51c429f30ea10428337f3a33c12059ba59f668 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 24 May 2017 11:24:30 +0100 Subject: ARM: NOMMU: Introduce dma operations for noMMU R/M classes of cpus can have memory covered by MPU which in turn might configure RAM as Normal i.e. bufferable and cacheable. It breaks dma_alloc_coherent() and friends, since data can stuck in caches now or be buffered. This patch factors out DMA support for NOMMU configuration into separate entity which provides dedicated dma_ops. We have to handle there several cases: - configurations with MMU/MPU setup - configurations without MMU/MPU setup - special case for M-class, since caches and MPU there are optional In general we rely on default DMA area for coherent allocations or/and per-device memory reserves suitable for coherent DMA, so if such regions are set coherent allocations go from there. In case MMU/MPU was not setup we fallback to normal page allocator for DMA memory allocation. In case we run M-class cpus, for configuration without cache support (like Cortex-M3/M4) dma operations are forced to be coherent and wired with dma-noop (such decision is made based on cacheid global variable); however, if caches are detected there and no DMA coherent region is given (either default or per-device), dma is disallowed even MPU is not set - it is because M-class implement system memory map which defines part of address space as Normal memory. Reported-by: Alexandre Torgue Reported-by: Andras Szemzo Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Reviewed-by: Robin Murphy Signed-off-by: Vladimir Murzin Acked-by: Arnd Bergmann Acked-by: Russell King [hch: removed the dma_supported() implementation that isn't required anymore] Signed-off-by: Christoph Hellwig --- arch/arm/Kconfig | 1 + arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/Makefile | 5 +- arch/arm/mm/dma-mapping-nommu.c | 228 +++++++++++++++++++++++++++++++++++++ 4 files changed, 232 insertions(+), 4 deletions(-) create mode 100644 arch/arm/mm/dma-mapping-nommu.c diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4c1a35f15838..8a94f2c7606c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -22,6 +22,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS + select DMA_NOOP_OPS if !MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB select GENERIC_ALLOCATOR diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 8dabcfdf4505..4e0285a66ef8 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -17,7 +17,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return &arm_dma_ops; + return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops; } #ifdef __arch_page_to_dma diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index b3dea80715b4..950d19babb5f 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -2,9 +2,8 @@ # Makefile for the linux arm-specific parts of the memory manager. # -obj-y := dma-mapping.o extable.o fault.o init.o \ - iomap.o - +obj-y := extable.o fault.o init.o iomap.o +obj-y += dma-mapping$(MMUEXT).o obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ mmap.o pgd.o mmu.o pageattr.o diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c new file mode 100644 index 000000000000..90ee354d803e --- /dev/null +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -0,0 +1,228 @@ +/* + * Based on linux/arch/arm/mm/dma-mapping.c + * + * Copyright (C) 2000-2004 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "dma.h" + +/* + * dma_noop_ops is used if + * - MMU/MPU is off + * - cpu is v7m w/o cache support + * - device is coherent + * otherwise arm_nommu_dma_ops is used. + * + * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to + * [1] on how to declare such memory). + * + * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt + */ + +static void *arm_nommu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) + +{ + const struct dma_map_ops *ops = &dma_noop_ops; + + /* + * We are here because: + * - no consistent DMA region has been defined, so we can't + * continue. + * - there is no space left in consistent DMA region, so we + * only can fallback to generic allocator if we are + * advertised that consistency is not required. + */ + + if (attrs & DMA_ATTR_NON_CONSISTENT) + return ops->alloc(dev, size, dma_handle, gfp, attrs); + + WARN_ON_ONCE(1); + return NULL; +} + +static void arm_nommu_dma_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + const struct dma_map_ops *ops = &dma_noop_ops; + + if (attrs & DMA_ATTR_NON_CONSISTENT) + ops->free(dev, size, cpu_addr, dma_addr, attrs); + else + WARN_ON_ONCE(1); + + return; +} + +static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + dmac_map_area(__va(paddr), size, dir); + + if (dir == DMA_FROM_DEVICE) + outer_inv_range(paddr, paddr + size); + else + outer_clean_range(paddr, paddr + size); +} + +static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + if (dir != DMA_TO_DEVICE) { + outer_inv_range(paddr, paddr + size); + dmac_unmap_area(__va(paddr), size, dir); + } +} + +static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t handle = page_to_phys(page) + offset; + + __dma_page_cpu_to_dev(handle, size, dir); + + return handle; +} + +static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + __dma_page_dev_to_cpu(handle, size, dir); +} + + +static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + sg_dma_address(sg) = sg_phys(sg); + sg_dma_len(sg) = sg->length; + __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); + } + + return nents; +} + +static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); +} + +static void arm_nommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(handle, size, dir); +} + +static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(handle, size, dir); +} + +static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); +} + +static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); +} + +const struct dma_map_ops arm_nommu_dma_ops = { + .alloc = arm_nommu_dma_alloc, + .free = arm_nommu_dma_free, + .map_page = arm_nommu_dma_map_page, + .unmap_page = arm_nommu_dma_unmap_page, + .map_sg = arm_nommu_dma_map_sg, + .unmap_sg = arm_nommu_dma_unmap_sg, + .sync_single_for_device = arm_nommu_dma_sync_single_for_device, + .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, + .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, + .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, +}; +EXPORT_SYMBOL(arm_nommu_dma_ops); + +static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) +{ + return coherent ? &dma_noop_ops : &arm_nommu_dma_ops; +} + +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) +{ + const struct dma_map_ops *dma_ops; + + if (IS_ENABLED(CONFIG_CPU_V7M)) { + /* + * Cache support for v7m is optional, so can be treated as + * coherent if no cache has been detected. Note that it is not + * enough to check if MPU is in use or not since in absense of + * MPU system memory map is used. + */ + dev->archdata.dma_coherent = (cacheid) ? coherent : true; + } else { + /* + * Assume coherent DMA in case MMU/MPU has not been set up. + */ + dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; + } + + dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); + + set_dma_ops(dev, dma_ops); +} + +void arch_teardown_dma_ops(struct device *dev) +{ +} + +#define PREALLOC_DMA_DEBUG_ENTRIES 4096 + +static int __init dma_debug_do_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +core_initcall(dma_debug_do_init); -- cgit v1.2.3 From 1b11d39e6a6864a9dfb64e2c3ac03090e3b34399 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 24 May 2017 11:24:31 +0100 Subject: ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus Now, we have dedicated non-cacheable region for consistent DMA operations. However, that region can still be marked as bufferable by MPU, so it'd be safer to have barriers by default. M-class machines that didn't need it until now also likely won't need it in the future, therefore, we offer this as an option. Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Reviewed-by: Robin Murphy Signed-off-by: Vladimir Murzin Acked-by: Russell King Signed-off-by: Christoph Hellwig --- arch/arm/mm/Kconfig | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c6c4c9c8824b..877a0e3fd17d 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1045,8 +1045,8 @@ config ARM_L1_CACHE_SHIFT default 5 config ARM_DMA_MEM_BUFFERABLE - bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 - default y if CPU_V6 || CPU_V6K || CPU_V7 + bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K || CPU_V7M) && !CPU_V7 + default y if CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M help Historically, the kernel has used strongly ordered mappings to provide DMA coherent memory. With the advent of ARMv7, mapping @@ -1061,6 +1061,10 @@ config ARM_DMA_MEM_BUFFERABLE and therefore turning this on may result in unpredictable driver behaviour. Therefore, we offer this as an option. + On some of the beefier ARMv7-M machines (with DMA and write + buffers) you likely want this enabled, while those that + didn't need it until now also won't need it in the future. + You are recommended say 'Y' here and debug any affected drivers. config ARM_HEAVY_MB -- cgit v1.2.3 From 1655cf8829d82d367d8fdb5cb58e5885d7d2a391 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 24 May 2017 11:24:32 +0100 Subject: ARM: dma-mapping: Remove traces of NOMMU code DMA operations for NOMMU case have been just factored out into separate compilation unit, so don't keep dead code. Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Signed-off-by: Vladimir Murzin Acked-by: Arnd Bergmann Acked-by: Russell King Signed-off-by: Christoph Hellwig --- arch/arm/mm/dma-mapping.c | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b9677ada421f..e7380bafbfa6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -353,8 +353,6 @@ static void __dma_free_buffer(struct page *page, size_t size) } } -#ifdef CONFIG_MMU - static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, const void *caller, bool want_vaddr, @@ -656,22 +654,6 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) return prot; } -#define nommu() 0 - -#else /* !CONFIG_MMU */ - -#define nommu() 1 - -#define __get_dma_pgprot(attrs, prot) __pgprot(0) -#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL -#define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL -#define __free_from_pool(cpu_addr, size) do { } while (0) -#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) -#define __dma_free_remap(cpu_addr, size) do { } while (0) - -#endif /* CONFIG_MMU */ - static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { @@ -814,7 +796,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (cma) buf->allocator = &cma_allocator; - else if (nommu() || is_coherent) + else if (is_coherent) buf->allocator = &simple_allocator; else if (allowblock) buf->allocator = &remap_allocator; @@ -863,8 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - int ret = -ENXIO; -#ifdef CONFIG_MMU + int ret; unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); @@ -879,10 +860,6 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_end - vma->vm_start, vma->vm_page_prot); } -#else - ret = vm_iomap_memory(vma, vma->vm_start, - (vma->vm_end - vma->vm_start)); -#endif /* CONFIG_MMU */ return ret; } @@ -901,9 +878,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { -#ifdef CONFIG_MMU vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); -#endif /* CONFIG_MMU */ return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } -- cgit v1.2.3