#ifndef _ASM_IA64_DMA_MAPPING_H #define _ASM_IA64_DMA_MAPPING_H /* * Copyright (C) 2003-2004 Hewlett-Packard Co * David Mosberger-Tang */ #include #include #include #include #define ARCH_HAS_DMA_GET_REQUIRED_MASK #define DMA_ERROR_CODE 0 extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); #define get_dma_ops(dev) platform_dma_get_ops(dev) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) return 0; return addr + size - 1 <= *dev->dma_mask; } static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr; } static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { return daddr; } static inline void dma_cache_sync (struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { /* * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to * ensure that dma_cache_sync() enforces order, hence the mb(). */ mb(); } #endif /* _ASM_IA64_DMA_MAPPING_H */