summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dca.c
blob: 70fd8454d0025556689fbe93912f72c7bb57d105 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel I/OAT DMA Linux driver
 * Copyright(c) 2007 - 2009 Intel Corporation.
 */

#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/dca.h>

/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
#undef cpu_physical_id
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
#endif

#include "dma.h"
#include "registers.h"

/*
 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
 * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
 */
#define DCA_TAG_MAP_VALID 0x80

#define DCA3_TAG_MAP_BIT_TO_INV 0x80
#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
#define DCA3_TAG_MAP_LITERAL_VAL 0x1

#define DCA_TAG_MAP_MASK 0xDF

/* expected tag map bytes for I/OAT ver.2 */
#define DCA2_TAG_MAP_BYTE0 0x80
#define DCA2_TAG_MAP_BYTE1 0x0
#define DCA2_TAG_MAP_BYTE2 0x81
#define DCA2_TAG_MAP_BYTE3 0x82
#define DCA2_TAG_MAP_BYTE4 0x82

/* verify if tag map matches expected values */
static inline int dca2_tag_map_valid(u8 *tag_map)
{
	return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
		(tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
		(tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
		(tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
		(tag_map[4] == DCA2_TAG_MAP_BYTE4));
}

/*
 * "Legacy" DCA systems do not implement the DCA register set in the
 * I/OAT device.  Software needs direct support for their tag mappings.
 */

#define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
#define IOAT_TAG_MAP_LEN	8

/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
	return (pci->bus->number << 8) | pci->devfn;
}

static int dca_enabled_in_bios(struct pci_dev *pdev)
{
	/* CPUID level 9 returns DCA configuration */
	/* Bit 0 indicates DCA enabled by the BIOS */
	unsigned long cpuid_level_9;
	int res;

	cpuid_level_9 = cpuid_eax(9);
	res = test_bit(0, &cpuid_level_9);
	if (!res)
		dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");

	return res;
}

int system_has_dca_enabled(struct pci_dev *pdev)
{
	if (boot_cpu_has(X86_FEATURE_DCA))
		return dca_enabled_in_bios(pdev);

	dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
	return 0;
}

struct ioat_dca_slot {
	struct pci_dev *pdev;	/* requester device */
	u16 rid;		/* requester id, as used by IOAT */
};

#define IOAT_DCA_MAX_REQ 6
#define IOAT3_DCA_MAX_REQ 2

struct ioat_dca_priv {
	void __iomem		*iobase;
	void __iomem		*dca_base;
	int			 max_requesters;
	int			 requester_count;
	u8			 tag_map[IOAT_TAG_MAP_LEN];
	struct ioat_dca_slot 	 req_slots[0];
};

static int ioat_dca_dev_managed(struct dca_provider *dca,
				struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	struct pci_dev *pdev;
	int i;

	pdev = to_pci_dev(dev);
	for (i = 0; i < ioatdca->max_requesters; i++) {
		if (ioatdca->req_slots[i].pdev == pdev)
			return 1;
	}
	return 0;
}

static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	struct pci_dev *pdev;
	int i;
	u16 id;
	u16 global_req_table;

	/* This implementation only supports PCI-Express */
	if (!dev_is_pci(dev))
		return -ENODEV;
	pdev = to_pci_dev(dev);
	id = dcaid_from_pcidev(pdev);

	if (ioatdca->requester_count == ioatdca->max_requesters)
		return -ENODEV;

	for (i = 0; i < ioatdca->max_requesters; i++) {
		if (ioatdca->req_slots[i].pdev == NULL) {
			/* found an empty slot */
			ioatdca->requester_count++;
			ioatdca->req_slots[i].pdev = pdev;
			ioatdca->req_slots[i].rid = id;
			global_req_table =
			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
			writel(id | IOAT_DCA_GREQID_VALID,
			       ioatdca->iobase + global_req_table + (i * 4));
			return i;
		}
	}
	/* Error, ioatdma->requester_count is out of whack */
	return -EFAULT;
}

static int ioat_dca_remove_requester(struct dca_provider *dca,
				      struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	struct pci_dev *pdev;
	int i;
	u16 global_req_table;

	/* This implementation only supports PCI-Express */
	if (!dev_is_pci(dev))
		return -ENODEV;
	pdev = to_pci_dev(dev);

	for (i = 0; i < ioatdca->max_requesters; i++) {
		if (ioatdca->req_slots[i].pdev == pdev) {
			global_req_table =
			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
			writel(0, ioatdca->iobase + global_req_table + (i * 4));
			ioatdca->req_slots[i].pdev = NULL;
			ioatdca->req_slots[i].rid = 0;
			ioatdca->requester_count--;
			return i;
		}
	}
	return -ENODEV;
}

static u8 ioat_dca_get_tag(struct dca_provider *dca,
			    struct device *dev,
			    int cpu)
{
	u8 tag;

	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	int i, apic_id, bit, value;
	u8 entry;

	tag = 0;
	apic_id = cpu_physical_id(cpu);

	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
		entry = ioatdca->tag_map[i];
		if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
			bit = entry &
				~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
			value = (apic_id & (1 << bit)) ? 1 : 0;
		} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
			bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
			value = (apic_id & (1 << bit)) ? 0 : 1;
		} else {
			value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
		}
		tag |= (value << i);
	}

	return tag;
}

static const struct dca_ops ioat_dca_ops = {
	.add_requester		= ioat_dca_add_requester,
	.remove_requester	= ioat_dca_remove_requester,
	.get_tag		= ioat_dca_get_tag,
	.dev_managed		= ioat_dca_dev_managed,
};

static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
{
	int slots = 0;
	u32 req;
	u16 global_req_table;

	global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
	if (global_req_table == 0)
		return 0;

	do {
		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
		slots++;
	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);

	return slots;
}

static inline int dca3_tag_map_invalid(u8 *tag_map)
{
	/*
	 * If the tag map is not programmed by the BIOS the default is:
	 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
	 *
	 * This an invalid map and will result in only 2 possible tags
	 * 0x1F and 0x00.  0x00 is an invalid DCA tag so we know that
	 * this entire definition is invalid.
	 */
	return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
		(tag_map[1] == DCA_TAG_MAP_VALID) &&
		(tag_map[2] == DCA_TAG_MAP_VALID) &&
		(tag_map[3] == DCA_TAG_MAP_VALID) &&
		(tag_map[4] == DCA_TAG_MAP_VALID));
}

struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
	struct dca_provider *dca;
	struct ioat_dca_priv *ioatdca;
	int slots;
	int i;
	int err;
	u16 dca_offset;
	u16 csi_fsb_control;
	u16 pcie_control;
	u8 bit;

	union {
		u64 full;
		struct {
			u32 low;
			u32 high;
		};
	} tag_map;

	if (!system_has_dca_enabled(pdev))
		return NULL;

	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
	if (dca_offset == 0)
		return NULL;

	slots = ioat_dca_count_dca_slots(iobase, dca_offset);
	if (slots == 0)
		return NULL;

	dca = alloc_dca_provider(&ioat_dca_ops,
				 sizeof(*ioatdca)
				      + (sizeof(struct ioat_dca_slot) * slots));
	if (!dca)
		return NULL;

	ioatdca = dca_priv(dca);
	ioatdca->iobase = iobase;
	ioatdca->dca_base = iobase + dca_offset;
	ioatdca->max_requesters = slots;

	/* some bios might not know to turn these on */
	csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
	if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
		csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
		writew(csi_fsb_control,
		       ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
	}
	pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
	if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
		pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
		writew(pcie_control,
		       ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
	}


	/* TODO version, compatibility and configuration checks */

	/* copy out the APIC to DCA tag map */
	tag_map.low =
		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
	tag_map.high =
		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
	for (i = 0; i < 8; i++) {
		bit = tag_map.full >> (8 * i);
		ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
	}

	if (dca3_tag_map_invalid(ioatdca->tag_map)) {
		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
		pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
			     dev_driver_string(&pdev->dev),
			     dev_name(&pdev->dev));
		free_dca_provider(dca);
		return NULL;
	}

	err = register_dca_provider(dca, &pdev->dev);
	if (err) {
		free_dca_provider(dca);
		return NULL;
	}

	return dca;
}