summaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/pci-dma.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-02 12:24:42 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-08-06 12:52:33 -0400
commit803c874abe1358998ab65a8cca728684ebb50a13 (patch)
tree06f6ca14ed84458c0bd2b40e8b61a3cb4c0e8e1c /arch/tile/kernel/pci-dma.c
parent26cde05a2cb7d4c0f4cd1d4aeeadc2939c972786 (diff)
downloadlinux-803c874abe1358998ab65a8cca728684ebb50a13.tar.bz2
tile: support LSI MEGARAID SAS HBA hybrid dma_ops
The LSI MEGARAID SAS HBA suffers from the problem where it can do 64-bit DMA to streaming buffers but not to consistent buffers. In other words, 64-bit DMA is used for disk data transfers and 32-bit DMA must be used for control message transfers. According to LSI, the firmware is not fully functional yet. This change implements a kind of hybrid dma_ops to support this. Note that on most other platforms, the 64-bit DMA addressing space is the same as the 32-bit DMA space and they overlap the physical memory space. No special arrangement is needed to support this kind of mixed DMA capability. On TILE-Gx, the 64-bit DMA space is completely separate from the 32-bit DMA space. Due to the use of the IOMMU, the 64-bit DMA space doesn't overlap the physical memory space. On the other hand, the 32-bit DMA space overlaps the physical memory space under 4GB. The separate address spaces make it necessary to have separate dma_ops. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r--arch/tile/kernel/pci-dma.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index b9fe80ec1089..7e22e73264a9 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -357,7 +357,7 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
addr = page_to_phys(pg);
- *dma_handle = phys_to_dma(dev, addr);
+ *dma_handle = addr + get_dma_offset(dev);
return page_address(pg);
}
@@ -387,7 +387,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
sg->dma_address = sg_phys(sg);
__dma_prep_pa_range(sg->dma_address, sg->length, direction);
- sg->dma_address = phys_to_dma(dev, sg->dma_address);
+ sg->dma_address = sg->dma_address + get_dma_offset(dev);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
@@ -422,7 +422,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
BUG_ON(offset + size > PAGE_SIZE);
__dma_prep_page(page, offset, size, direction);
- return phys_to_dma(dev, page_to_pa(page) + offset);
+ return page_to_pa(page) + offset + get_dma_offset(dev);
}
static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
@@ -432,7 +432,7 @@ static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
{
BUG_ON(!valid_dma_direction(direction));
- dma_address = dma_to_phys(dev, dma_address);
+ dma_address -= get_dma_offset(dev);
__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
dma_address & PAGE_OFFSET, size, direction);
@@ -445,7 +445,7 @@ static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(direction));
- dma_handle = dma_to_phys(dev, dma_handle);
+ dma_handle -= get_dma_offset(dev);
__dma_complete_pa_range(dma_handle, size, direction);
}
@@ -456,7 +456,7 @@ static void tile_pci_dma_sync_single_for_device(struct device *dev,
enum dma_data_direction
direction)
{
- dma_handle = dma_to_phys(dev, dma_handle);
+ dma_handle -= get_dma_offset(dev);
__dma_prep_pa_range(dma_handle, size, direction);
}
@@ -558,21 +558,43 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
};
+static struct dma_map_ops pci_hybrid_dma_ops = {
+ .alloc = tile_swiotlb_alloc_coherent,
+ .free = tile_swiotlb_free_coherent,
+ .map_page = tile_pci_dma_map_page,
+ .unmap_page = tile_pci_dma_unmap_page,
+ .map_sg = tile_pci_dma_map_sg,
+ .unmap_sg = tile_pci_dma_unmap_sg,
+ .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
+ .sync_single_for_device = tile_pci_dma_sync_single_for_device,
+ .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
+ .mapping_error = tile_pci_dma_mapping_error,
+ .dma_supported = tile_pci_dma_supported
+};
+
struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else
struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
#endif
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
+EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- /* Handle legacy PCI devices with limited memory addressability. */
- if (((dma_ops == gx_pci_dma_map_ops) ||
- (dma_ops == gx_legacy_pci_dma_map_ops)) &&
+ /* Handle hybrid PCI devices with limited memory addressability. */
+ if ((dma_ops == gx_pci_dma_map_ops ||
+ dma_ops == gx_hybrid_pci_dma_map_ops ||
+ dma_ops == gx_legacy_pci_dma_map_ops) &&
(mask <= DMA_BIT_MASK(32))) {
+ if (dma_ops == gx_pci_dma_map_ops)
+ set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+
if (mask > dev->archdata.max_direct_dma_addr)
mask = dev->archdata.max_direct_dma_addr;
}