summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_common/adf_sriov.c
blob: b960bca1f9d2fdd42ca1065a39de1e321246b41b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_pfvf_pf_msg.h"

#define ADF_VF2PF_RATELIMIT_INTERVAL	8
#define ADF_VF2PF_RATELIMIT_BURST	130

static struct workqueue_struct *pf2vf_resp_wq;

struct adf_pf2vf_resp {
	struct work_struct pf2vf_resp_work;
	struct adf_accel_vf_info *vf_info;
};

static void adf_iov_send_resp(struct work_struct *work)
{
	struct adf_pf2vf_resp *pf2vf_resp =
		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
	struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
	u32 vf_nr = vf_info->vf_nr;
	bool ret;

	ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
	if (ret)
		/* re-enable interrupt on PF from this VF */
		adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);

	kfree(pf2vf_resp);
}

void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
{
	struct adf_pf2vf_resp *pf2vf_resp;

	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
	if (!pf2vf_resp)
		return;

	pf2vf_resp->vf_info = vf_info;
	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
}

static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
{
	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
	int totalvfs = pci_sriov_get_totalvfs(pdev);
	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
	struct adf_accel_vf_info *vf_info;
	int i;

	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
	     i++, vf_info++) {
		/* This ptr will be populated when VFs will be created */
		vf_info->accel_dev = accel_dev;
		vf_info->vf_nr = i;
		vf_info->vf_compat_ver = 0;

		mutex_init(&vf_info->pf2vf_lock);
		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
				     ADF_VF2PF_RATELIMIT_INTERVAL,
				     ADF_VF2PF_RATELIMIT_BURST);
	}

	/* Set Valid bits in AE Thread to PCIe Function Mapping */
	if (hw_data->configure_iov_threads)
		hw_data->configure_iov_threads(accel_dev, true);

	/* Enable VF to PF interrupts for all VFs */
	if (hw_data->pfvf_ops.get_pf2vf_offset)
		adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);

	/*
	 * Due to the hardware design, when SR-IOV and the ring arbiter
	 * are enabled all the VFs supported in hardware must be enabled in
	 * order for all the hardware resources (i.e. bundles) to be usable.
	 * When SR-IOV is enabled, each of the VFs will own one bundle.
	 */
	return pci_enable_sriov(pdev, totalvfs);
}

/**
 * adf_disable_sriov() - Disable SRIOV for the device
 * @accel_dev:  Pointer to accel device.
 *
 * Function disables SRIOV for the accel device.
 *
 * Return: 0 on success, error code otherwise.
 */
void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
	struct adf_accel_vf_info *vf;
	int i;

	if (!accel_dev->pf.vf_info)
		return;

	if (hw_data->pfvf_ops.get_pf2vf_offset)
		adf_pf2vf_notify_restarting(accel_dev);

	pci_disable_sriov(accel_to_pci_dev(accel_dev));

	/* Disable VF to PF interrupts */
	if (hw_data->pfvf_ops.get_pf2vf_offset)
		adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));

	/* Clear Valid bits in AE Thread to PCIe Function Mapping */
	if (hw_data->configure_iov_threads)
		hw_data->configure_iov_threads(accel_dev, false);

	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
		mutex_destroy(&vf->pf2vf_lock);
	}

	kfree(accel_dev->pf.vf_info);
	accel_dev->pf.vf_info = NULL;
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);

static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
{
	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
	int ret;

	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
				      ADF_SERVICES_ENABLED, services);

	adf_dev_stop(accel_dev);
	adf_dev_shutdown(accel_dev);

	if (!ret) {
		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
		if (ret)
			return ret;

		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
						  ADF_SERVICES_ENABLED,
						  services, ADF_STR);
		if (ret)
			return ret;
	}

	return 0;
}

/**
 * adf_sriov_configure() - Enable SRIOV for the device
 * @pdev:  Pointer to PCI device.
 * @numvfs: Number of virtual functions (VFs) to enable.
 *
 * Note that the @numvfs parameter is ignored and all VFs supported by the
 * device are enabled due to the design of the hardware.
 *
 * Function enables SRIOV for the PCI device.
 *
 * Return: number of VFs enabled on success, error code otherwise.
 */
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
	int totalvfs = pci_sriov_get_totalvfs(pdev);
	unsigned long val;
	int ret;

	if (!accel_dev) {
		dev_err(&pdev->dev, "Failed to find accel_dev\n");
		return -EFAULT;
	}

	if (!iommu_present(&pci_bus_type))
		dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");

	if (accel_dev->pf.vf_info) {
		dev_info(&pdev->dev, "Already enabled for this device\n");
		return -EINVAL;
	}

	if (adf_dev_started(accel_dev)) {
		if (adf_devmgr_in_reset(accel_dev) ||
		    adf_dev_in_use(accel_dev)) {
			dev_err(&GET_DEV(accel_dev), "Device busy\n");
			return -EBUSY;
		}

		ret = adf_sriov_prepare_restart(accel_dev);
		if (ret)
			return ret;
	}

	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
		return -EFAULT;
	val = 0;
	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
					ADF_NUM_CY, (void *)&val, ADF_DEC))
		return -EFAULT;

	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);

	/* Allocate memory for VF info structs */
	accel_dev->pf.vf_info = kcalloc(totalvfs,
					sizeof(struct adf_accel_vf_info),
					GFP_KERNEL);
	if (!accel_dev->pf.vf_info)
		return -ENOMEM;

	if (adf_dev_init(accel_dev)) {
		dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
			accel_dev->accel_id);
		return -EFAULT;
	}

	if (adf_dev_start(accel_dev)) {
		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
			accel_dev->accel_id);
		return -EFAULT;
	}

	ret = adf_enable_sriov(accel_dev);
	if (ret)
		return ret;

	return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);

int __init adf_init_pf_wq(void)
{
	/* Workqueue for PF2VF responses */
	pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);

	return !pf2vf_resp_wq ? -ENOMEM : 0;
}

void adf_exit_pf_wq(void)
{
	if (pf2vf_resp_wq) {
		destroy_workqueue(pf2vf_resp_wq);
		pf2vf_resp_wq = NULL;
	}
}