1/*
2  This file is provided under a dual BSD/GPLv2 license.  When using or
3  redistributing this file, you may do so under either license.
4
5  GPL LICENSE SUMMARY
6  Copyright(c) 2015 Intel Corporation.
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of version 2 of the GNU General Public License as
9  published by the Free Software Foundation.
10
11  This program is distributed in the hope that it will be useful, but
12  WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  General Public License for more details.
15
16  Contact Information:
17  qat-linux@intel.com
18
19  BSD LICENSE
20  Copyright(c) 2015 Intel Corporation.
21  Redistribution and use in source and binary forms, with or without
22  modification, are permitted provided that the following conditions
23  are met:
24
25    * Redistributions of source code must retain the above copyright
26      notice, this list of conditions and the following disclaimer.
27    * Redistributions in binary form must reproduce the above copyright
28      notice, this list of conditions and the following disclaimer in
29      the documentation and/or other materials provided with the
30      distribution.
31    * Neither the name of Intel Corporation nor the names of its
32      contributors may be used to endorse or promote products derived
33      from this software without specific prior written permission.
34
35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47
48#include <linux/pci.h>
49#include <linux/mutex.h>
50#include <linux/delay.h>
51#include "adf_accel_devices.h"
52#include "adf_common_drv.h"
53#include "adf_pf2vf_msg.h"
54
55#define ADF_DH895XCC_EP_OFFSET	0x3A000
56#define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
57#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
58#define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
59#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
60
61/**
62 * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
63 * @accel_dev:  Pointer to acceleration device.
64 *
65 * Function enables PF to VF interrupts
66 */
67void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
68{
69	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
70	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
71	void __iomem *pmisc_bar_addr =
72		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
73
74	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
75}
76EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
77
78/**
79 * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
80 * @accel_dev:  Pointer to acceleration device.
81 *
82 * Function disables PF to VF interrupts
83 */
84void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
85{
86	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
87	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
88	void __iomem *pmisc_bar_addr =
89		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
90
91	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
92}
93EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
94
95void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
96				 u32 vf_mask)
97{
98	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
99	struct adf_bar *pmisc =
100			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
101	void __iomem *pmisc_addr = pmisc->virt_addr;
102	u32 reg;
103
104	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
105	if (vf_mask & 0xFFFF) {
106		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
107		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
108		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
109	}
110
111	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
112	if (vf_mask >> 16) {
113		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
114		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
115		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
116	}
117}
118
119/**
120 * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
121 * @accel_dev:  Pointer to acceleration device.
122 *
123 * Function disables VF to PF interrupts
124 */
125void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
126{
127	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
128	struct adf_bar *pmisc =
129			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
130	void __iomem *pmisc_addr = pmisc->virt_addr;
131	u32 reg;
132
133	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
134	if (vf_mask & 0xFFFF) {
135		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
136			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
137		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
138	}
139
140	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
141	if (vf_mask >> 16) {
142		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
143			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
144		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
145	}
146}
147EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
148
149static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
150{
151	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
152	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
153	void __iomem *pmisc_bar_addr =
154		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
155	u32 val, pf2vf_offset, count = 0;
156	u32 local_in_use_mask, local_in_use_pattern;
157	u32 remote_in_use_mask, remote_in_use_pattern;
158	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
159	u32 int_bit;
160	int ret = 0;
161
162	if (accel_dev->is_vf) {
163		pf2vf_offset = hw_data->get_pf2vf_offset(0);
164		lock = &accel_dev->vf.vf2pf_lock;
165		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
166		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
167		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
168		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
169		int_bit = ADF_VF2PF_INT;
170	} else {
171		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
172		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
173		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
174		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
175		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
176		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
177		int_bit = ADF_PF2VF_INT;
178	}
179
180	mutex_lock(lock);
181
182	/* Check if PF2VF CSR is in use by remote function */
183	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
184	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
185		dev_dbg(&GET_DEV(accel_dev),
186			"PF2VF CSR in use by remote function\n");
187		ret = -EBUSY;
188		goto out;
189	}
190
191	/* Attempt to get ownership of PF2VF CSR */
192	msg &= ~local_in_use_mask;
193	msg |= local_in_use_pattern;
194	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
195
196	/* Wait in case remote func also attempting to get ownership */
197	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
198
199	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
200	if ((val & local_in_use_mask) != local_in_use_pattern) {
201		dev_dbg(&GET_DEV(accel_dev),
202			"PF2VF CSR in use by remote - collision detected\n");
203		ret = -EBUSY;
204		goto out;
205	}
206
207	/*
208	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
209	 * remain in the PF2VF CSR for all writes including ACK from remote
210	 * until this local function relinquishes the CSR.  Send the message
211	 * by interrupting the remote.
212	 */
213	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
214
215	/* Wait for confirmation from remote func it received the message */
216	do {
217		msleep(ADF_IOV_MSG_ACK_DELAY);
218		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
219	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
220
221	if (val & int_bit) {
222		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
223		val &= ~int_bit;
224		ret = -EIO;
225	}
226
227	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
228	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
229out:
230	mutex_unlock(lock);
231	return ret;
232}
233
234/**
235 * adf_iov_putmsg() - send PF2VF message
236 * @accel_dev:  Pointer to acceleration device.
237 * @msg:	Message to send
238 * @vf_nr:	VF number to which the message will be sent
239 *
240 * Function sends a messge from the PF to a VF
241 *
242 * Return: 0 on success, error code otherwise.
243 */
244int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
245{
246	u32 count = 0;
247	int ret;
248
249	do {
250		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
251		if (ret)
252			msleep(ADF_IOV_MSG_RETRY_DELAY);
253	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
254
255	return ret;
256}
257EXPORT_SYMBOL_GPL(adf_iov_putmsg);
258
259void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
260{
261	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
262	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
263	int bar_id = hw_data->get_misc_bar_id(hw_data);
264	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
265	void __iomem *pmisc_addr = pmisc->virt_addr;
266	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
267
268	/* Read message from the VF */
269	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
270
271	/* To ACK, clear the VF2PFINT bit */
272	msg &= ~ADF_VF2PF_INT;
273	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
274
275	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
276		/* Ignore legacy non-system (non-kernel) VF2PF messages */
277		goto err;
278
279	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
280	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
281		{
282		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
283
284		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
285			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
286			  ADF_PF2VF_MSGTYPE_SHIFT) |
287			 (ADF_PFVF_COMPATIBILITY_VERSION <<
288			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
289
290		dev_dbg(&GET_DEV(accel_dev),
291			"Compatibility Version Request from VF%d vers=%u\n",
292			vf_nr + 1, vf_compat_ver);
293
294		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
295			dev_err(&GET_DEV(accel_dev),
296				"VF (vers %d) incompatible with PF (vers %d)\n",
297				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
298			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
299				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
300		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
301			dev_err(&GET_DEV(accel_dev),
302				"VF (vers %d) compat with PF (vers %d) unkn.\n",
303				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
304			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
305				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
306		} else {
307			dev_dbg(&GET_DEV(accel_dev),
308				"VF (vers %d) compatible with PF (vers %d)\n",
309				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
310			resp |= ADF_PF2VF_VF_COMPATIBLE <<
311				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
312		}
313		}
314		break;
315	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
316		dev_dbg(&GET_DEV(accel_dev),
317			"Legacy VersionRequest received from VF%d 0x%x\n",
318			vf_nr + 1, msg);
319		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
320			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
321			  ADF_PF2VF_MSGTYPE_SHIFT) |
322			 (ADF_PFVF_COMPATIBILITY_VERSION <<
323			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
324		resp |= ADF_PF2VF_VF_COMPATIBLE <<
325			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
326		/* Set legacy major and minor version num */
327		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
328			1 << ADF_PF2VF_MINORVERSION_SHIFT;
329		break;
330	case ADF_VF2PF_MSGTYPE_INIT:
331		{
332		dev_dbg(&GET_DEV(accel_dev),
333			"Init message received from VF%d 0x%x\n",
334			vf_nr + 1, msg);
335		vf_info->init = true;
336		}
337		break;
338	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
339		{
340		dev_dbg(&GET_DEV(accel_dev),
341			"Shutdown message received from VF%d 0x%x\n",
342			vf_nr + 1, msg);
343		vf_info->init = false;
344		}
345		break;
346	default:
347		goto err;
348	}
349
350	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
351		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
352
353	/* re-enable interrupt on PF from this VF */
354	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
355	return;
356err:
357	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
358		vf_nr + 1, msg);
359}
360
361void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
362{
363	struct adf_accel_vf_info *vf;
364	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
365		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
366	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
367
368	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
369		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
370			dev_err(&GET_DEV(accel_dev),
371				"Failed to send restarting msg to VF%d\n", i);
372	}
373}
374
375static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
376{
377	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
378	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
379	u32 msg = 0;
380	int ret;
381
382	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
383	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
384	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
385	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
386
387	/* Send request from VF to PF */
388	ret = adf_iov_putmsg(accel_dev, msg, 0);
389	if (ret) {
390		dev_err(&GET_DEV(accel_dev),
391			"Failed to send Compatibility Version Request.\n");
392		return ret;
393	}
394
395	/* Wait for response */
396	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
397					 timeout)) {
398		dev_err(&GET_DEV(accel_dev),
399			"IOV request/response message timeout expired\n");
400		return -EIO;
401	}
402
403	/* Response from PF received, check compatibility */
404	switch (accel_dev->vf.compatible) {
405	case ADF_PF2VF_VF_COMPATIBLE:
406		break;
407	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
408		/* VF is newer than PF and decides whether it is compatible */
409		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
410			break;
411		/* fall through */
412	case ADF_PF2VF_VF_INCOMPATIBLE:
413		dev_err(&GET_DEV(accel_dev),
414			"PF (vers %d) and VF (vers %d) are not compatible\n",
415			accel_dev->vf.pf_version,
416			ADF_PFVF_COMPATIBILITY_VERSION);
417		return -EINVAL;
418	default:
419		dev_err(&GET_DEV(accel_dev),
420			"Invalid response from PF; assume not compatible\n");
421		return -EINVAL;
422	}
423	return ret;
424}
425
426/**
427 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
428 *
429 * @accel_dev: Pointer to acceleration device virtual function.
430 *
431 * Return: 0 on success, error code otherwise.
432 */
433int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
434{
435	adf_enable_pf2vf_interrupts(accel_dev);
436	return adf_vf2pf_request_version(accel_dev);
437}
438EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
439