summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/dma-mapping.h
blob: cea7b74963e9758e9338c7c739c02bb53e9d63e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H

/*
 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
 * Documentation/DMA-API.txt for documentation.
 */

#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>

extern dma_addr_t bad_dma_address;
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;

extern struct dma_map_ops *dma_ops;

static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#ifdef CONFIG_X86_32
	return dma_ops;
#else
	if (unlikely(!dev) || !dev->archdata.dma_ops)
		return dma_ops;
	else
		return dev->archdata.dma_ops;
#endif
}

/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);

	return (dma_addr == bad_dma_address);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h)	(1)

extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);

extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag);

static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
	       enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);
	dma_addr_t addr;

	BUG_ON(!valid_dma_direction(dir));
	addr = ops->map_page(hwdev, virt_to_page(ptr),
			     (unsigned long)ptr & ~PAGE_MASK, size,
			     dir, NULL);
	debug_dma_map_page(hwdev, virt_to_page(ptr),
			   (unsigned long)ptr & ~PAGE_MASK, size,
			   dir, addr, true);
	return addr;
}

static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
		 enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(dev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->unmap_page)
		ops->unmap_page(dev, addr, size, dir, NULL);
	debug_dma_unmap_page(dev, addr, size, dir, true);
}

static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
	   int nents, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);
	int ents;

	BUG_ON(!valid_dma_direction(dir));
	ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
	debug_dma_map_sg(hwdev, sg, nents, ents, dir);

	return ents;
}

static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
	     enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	debug_dma_unmap_sg(hwdev, sg, nents, dir);
	if (ops->unmap_sg)
		ops->unmap_sg(hwdev, sg, nents, dir, NULL);
}

static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			size_t size, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_single_for_cpu)
		ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
	debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
	flush_write_buffers();
}

static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
			   size_t size, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_single_for_device)
		ops->sync_single_for_device(hwdev, dma_handle, size, dir);
	debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
	flush_write_buffers();
}

static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			      unsigned long offset, size_t size,
			      enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_single_range_for_cpu)
		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
					       size, dir);
	debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
					    offset, size, dir);
	flush_write_buffers();
}

static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_single_range_for_device)
		ops->sync_single_range_for_device(hwdev, dma_handle,
						  offset, size, dir);
	debug_dma_sync_single_range_for_device(hwdev, dma_handle,
					       offset, size, dir);
	flush_write_buffers();
}

static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
		    int nelems, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_sg_for_cpu)
		ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
	debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
	flush_write_buffers();
}

static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
		       int nelems, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(hwdev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->sync_sg_for_device)
		ops->sync_sg_for_device(hwdev, sg, nelems, dir);
	debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);

	flush_write_buffers();
}

static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      size_t offset, size_t size,
				      enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
	dma_addr_t addr;

	BUG_ON(!valid_dma_direction(dir));
	addr = ops->map_page(dev, page, offset, size, dir, NULL);
	debug_dma_map_page(dev, page, offset, size, dir, addr, false);

	return addr;
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
				  size_t size, enum dma_data_direction dir)
{
	struct dma_map_ops *ops = get_dma_ops(dev);

	BUG_ON(!valid_dma_direction(dir));
	if (ops->unmap_page)
		ops->unmap_page(dev, addr, size, dir, NULL);
	debug_dma_unmap_page(dev, addr, size, dir, false);
}

static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
	enum dma_data_direction dir)
{
	flush_write_buffers();
}

static inline int dma_get_cache_alignment(void)
{
	/* no easy way to get cache size on all x86, so return the
	 * maximum possible, to be safe */
	return boot_cpu_data.x86_clflush_size;
}

static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
						    gfp_t gfp)
{
	unsigned long dma_mask = 0;

	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;

	return dma_mask;
}

static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);

	if (dma_mask <= DMA_24BIT_MASK)
		gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
		gfp |= GFP_DMA32;
#endif
       return gfp;
}

static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
	void *memory;

	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;

	if (!dev) {
		dev = &x86_dma_fallback_dev;
		gfp |= GFP_DMA;
	}

	if (!is_device_dma_capable(dev))
		return NULL;

	if (!ops->alloc_coherent)
		return NULL;

	memory = ops->alloc_coherent(dev, size, dma_handle,
				     dma_alloc_coherent_gfp_flags(dev, gfp));
	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);

	return memory;
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t bus)
{
	struct dma_map_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());       /* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	debug_dma_free_coherent(dev, size, vaddr, bus);
	if (ops->free_coherent)
		ops->free_coherent(dev, size, vaddr, bus);
}

#endif