This source file includes following definitions.
- adf_enable_pf2vf_interrupts
- adf_disable_pf2vf_interrupts
- adf_enable_vf2pf_interrupts
- adf_disable_vf2pf_interrupts
- __adf_iov_putmsg
- adf_iov_putmsg
- adf_vf2pf_req_hndl
- adf_pf2vf_notify_restarting
- adf_vf2pf_request_version
- adf_enable_vf2pf_comms
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <linux/delay.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_pf2vf_msg.h"
52
53 #define ADF_DH895XCC_EP_OFFSET 0x3A000
54 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
55 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
56 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
57 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
58
59 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
60 {
61 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
62 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
63 void __iomem *pmisc_bar_addr =
64 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
65
66 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
67 }
68
69 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
70 {
71 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
72 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
73 void __iomem *pmisc_bar_addr =
74 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
75
76 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
77 }
78
79 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
80 u32 vf_mask)
81 {
82 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
83 struct adf_bar *pmisc =
84 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
85 void __iomem *pmisc_addr = pmisc->virt_addr;
86 u32 reg;
87
88
89 if (vf_mask & 0xFFFF) {
90 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
91 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
92 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
93 }
94
95
96 if (vf_mask >> 16) {
97 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
98 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
99 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
100 }
101 }
102
103 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
104 {
105 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
106 struct adf_bar *pmisc =
107 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
108 void __iomem *pmisc_addr = pmisc->virt_addr;
109 u32 reg;
110
111
112 if (vf_mask & 0xFFFF) {
113 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
114 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
115 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
116 }
117
118
119 if (vf_mask >> 16) {
120 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
121 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
122 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
123 }
124 }
125
126 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
127 {
128 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
129 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
130 void __iomem *pmisc_bar_addr =
131 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
132 u32 val, pf2vf_offset, count = 0;
133 u32 local_in_use_mask, local_in_use_pattern;
134 u32 remote_in_use_mask, remote_in_use_pattern;
135 struct mutex *lock;
136 u32 int_bit;
137 int ret = 0;
138
139 if (accel_dev->is_vf) {
140 pf2vf_offset = hw_data->get_pf2vf_offset(0);
141 lock = &accel_dev->vf.vf2pf_lock;
142 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
143 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
144 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
145 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
146 int_bit = ADF_VF2PF_INT;
147 } else {
148 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
149 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
150 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
151 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
152 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
153 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
154 int_bit = ADF_PF2VF_INT;
155 }
156
157 mutex_lock(lock);
158
159
160 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
161 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
162 dev_dbg(&GET_DEV(accel_dev),
163 "PF2VF CSR in use by remote function\n");
164 ret = -EBUSY;
165 goto out;
166 }
167
168
169 msg &= ~local_in_use_mask;
170 msg |= local_in_use_pattern;
171 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
172
173
174 msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
175
176 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
177 if ((val & local_in_use_mask) != local_in_use_pattern) {
178 dev_dbg(&GET_DEV(accel_dev),
179 "PF2VF CSR in use by remote - collision detected\n");
180 ret = -EBUSY;
181 goto out;
182 }
183
184
185
186
187
188
189
190 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
191
192
193 do {
194 msleep(ADF_IOV_MSG_ACK_DELAY);
195 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
196 } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
197
198 if (val & int_bit) {
199 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
200 val &= ~int_bit;
201 ret = -EIO;
202 }
203
204
205 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
206 out:
207 mutex_unlock(lock);
208 return ret;
209 }
210
211
212
213
214
215
216
217
218
219
220
221 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
222 {
223 u32 count = 0;
224 int ret;
225
226 do {
227 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
228 if (ret)
229 msleep(ADF_IOV_MSG_RETRY_DELAY);
230 } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
231
232 return ret;
233 }
234 EXPORT_SYMBOL_GPL(adf_iov_putmsg);
235
236 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
237 {
238 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
239 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
240 int bar_id = hw_data->get_misc_bar_id(hw_data);
241 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
242 void __iomem *pmisc_addr = pmisc->virt_addr;
243 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
244
245
246 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
247
248
249 msg &= ~ADF_VF2PF_INT;
250 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
251
252 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
253
254 goto err;
255
256 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
257 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
258 {
259 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
260
261 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
262 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
263 ADF_PF2VF_MSGTYPE_SHIFT) |
264 (ADF_PFVF_COMPATIBILITY_VERSION <<
265 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
266
267 dev_dbg(&GET_DEV(accel_dev),
268 "Compatibility Version Request from VF%d vers=%u\n",
269 vf_nr + 1, vf_compat_ver);
270
271 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
272 dev_err(&GET_DEV(accel_dev),
273 "VF (vers %d) incompatible with PF (vers %d)\n",
274 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
275 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
276 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
277 } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
278 dev_err(&GET_DEV(accel_dev),
279 "VF (vers %d) compat with PF (vers %d) unkn.\n",
280 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
281 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
282 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
283 } else {
284 dev_dbg(&GET_DEV(accel_dev),
285 "VF (vers %d) compatible with PF (vers %d)\n",
286 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
287 resp |= ADF_PF2VF_VF_COMPATIBLE <<
288 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
289 }
290 }
291 break;
292 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
293 dev_dbg(&GET_DEV(accel_dev),
294 "Legacy VersionRequest received from VF%d 0x%x\n",
295 vf_nr + 1, msg);
296 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
297 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
298 ADF_PF2VF_MSGTYPE_SHIFT) |
299 (ADF_PFVF_COMPATIBILITY_VERSION <<
300 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
301 resp |= ADF_PF2VF_VF_COMPATIBLE <<
302 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
303
304 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
305 1 << ADF_PF2VF_MINORVERSION_SHIFT;
306 break;
307 case ADF_VF2PF_MSGTYPE_INIT:
308 {
309 dev_dbg(&GET_DEV(accel_dev),
310 "Init message received from VF%d 0x%x\n",
311 vf_nr + 1, msg);
312 vf_info->init = true;
313 }
314 break;
315 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
316 {
317 dev_dbg(&GET_DEV(accel_dev),
318 "Shutdown message received from VF%d 0x%x\n",
319 vf_nr + 1, msg);
320 vf_info->init = false;
321 }
322 break;
323 default:
324 goto err;
325 }
326
327 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
328 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
329
330
331 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
332 return;
333 err:
334 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
335 vf_nr + 1, msg);
336 }
337
338 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
339 {
340 struct adf_accel_vf_info *vf;
341 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
342 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
343 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
344
345 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
346 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
347 dev_err(&GET_DEV(accel_dev),
348 "Failed to send restarting msg to VF%d\n", i);
349 }
350 }
351
352 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
353 {
354 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
355 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
356 u32 msg = 0;
357 int ret;
358
359 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
360 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
361 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
362 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
363
364
365 ret = adf_iov_putmsg(accel_dev, msg, 0);
366 if (ret) {
367 dev_err(&GET_DEV(accel_dev),
368 "Failed to send Compatibility Version Request.\n");
369 return ret;
370 }
371
372
373 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
374 timeout)) {
375 dev_err(&GET_DEV(accel_dev),
376 "IOV request/response message timeout expired\n");
377 return -EIO;
378 }
379
380
381 switch (accel_dev->vf.compatible) {
382 case ADF_PF2VF_VF_COMPATIBLE:
383 break;
384 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
385
386 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
387 break;
388
389 case ADF_PF2VF_VF_INCOMPATIBLE:
390 dev_err(&GET_DEV(accel_dev),
391 "PF (vers %d) and VF (vers %d) are not compatible\n",
392 accel_dev->vf.pf_version,
393 ADF_PFVF_COMPATIBILITY_VERSION);
394 return -EINVAL;
395 default:
396 dev_err(&GET_DEV(accel_dev),
397 "Invalid response from PF; assume not compatible\n");
398 return -EINVAL;
399 }
400 return ret;
401 }
402
403
404
405
406
407
408
409
410 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
411 {
412 adf_enable_pf2vf_interrupts(accel_dev);
413 return adf_vf2pf_request_version(accel_dev);
414 }
415 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);