This source file includes following definitions.
- vchnl_vf_send_get_ver_req
- vchnl_vf_send_get_hmc_fcn_req
- vchnl_vf_send_get_pe_stats_req
- vchnl_vf_send_add_hmc_objs_req
- vchnl_vf_send_del_hmc_objs_req
- vchnl_pf_send_get_ver_resp
- vchnl_pf_send_get_hmc_fcn_resp
- vchnl_pf_send_get_pe_stats_resp
- vchnl_pf_send_error_resp
- pf_cqp_get_hmc_fcn_callback
- pf_add_hmc_obj_callback
- pf_del_hmc_obj_callback
- i40iw_vf_init_pestat
- i40iw_vchnl_recv_pf
- i40iw_vchnl_recv_vf
- i40iw_vchnl_vf_get_ver
- i40iw_vchnl_vf_get_hmc_fcn
- i40iw_vchnl_vf_add_hmc_objs
- i40iw_vchnl_vf_del_hmc_obj
- i40iw_vchnl_vf_get_pe_stats
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
39 #include "i40iw_d.h"
40 #include "i40iw_type.h"
41 #include "i40iw_p.h"
42 #include "i40iw_virtchnl.h"
43
44
45
46
47
48
49 static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
50 struct i40iw_virtchnl_req *vchnl_req)
51 {
52 enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
53 struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
54
55 if (!dev->vchnl_up)
56 return ret_code;
57
58 memset(vchnl_msg, 0, sizeof(*vchnl_msg));
59 vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
60 vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
61 vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
62 vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
63 ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
64 if (ret_code)
65 i40iw_debug(dev, I40IW_DEBUG_VIRT,
66 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
67 return ret_code;
68 }
69
70
71
72
73
74
75 static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
76 struct i40iw_virtchnl_req *vchnl_req)
77 {
78 enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
79 struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
80
81 if (!dev->vchnl_up)
82 return ret_code;
83
84 memset(vchnl_msg, 0, sizeof(*vchnl_msg));
85 vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
86 vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
87 vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
88 vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
89 ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
90 if (ret_code)
91 i40iw_debug(dev, I40IW_DEBUG_VIRT,
92 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
93 return ret_code;
94 }
95
96
97
98
99
100
101 static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
102 struct i40iw_virtchnl_req *vchnl_req)
103 {
104 enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
105 struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
106
107 if (!dev->vchnl_up)
108 return ret_code;
109
110 memset(vchnl_msg, 0, sizeof(*vchnl_msg));
111 vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
112 vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
113 vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
114 vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
115 ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
116 if (ret_code)
117 i40iw_debug(dev, I40IW_DEBUG_VIRT,
118 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
119 return ret_code;
120 }
121
122
123
124
125
126
127 static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
128 struct i40iw_virtchnl_req *vchnl_req,
129 enum i40iw_hmc_rsrc_type rsrc_type,
130 u32 start_index,
131 u32 rsrc_count)
132 {
133 enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
134 struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
135 struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
136
137 if (!dev->vchnl_up)
138 return ret_code;
139
140 add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
141 memset(vchnl_msg, 0, sizeof(*vchnl_msg));
142 memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
143 vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
144 vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
145 vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
146 vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
147 add_hmc_obj->obj_type = (u16)rsrc_type;
148 add_hmc_obj->start_index = start_index;
149 add_hmc_obj->obj_count = rsrc_count;
150 ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
151 if (ret_code)
152 i40iw_debug(dev, I40IW_DEBUG_VIRT,
153 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
154 return ret_code;
155 }
156
157
158
159
160
161
162
163
164
165 static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
166 struct i40iw_virtchnl_req *vchnl_req,
167 enum i40iw_hmc_rsrc_type rsrc_type,
168 u32 start_index,
169 u32 rsrc_count)
170 {
171 enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
172 struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
173 struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
174
175 if (!dev->vchnl_up)
176 return ret_code;
177
178 add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
179 memset(vchnl_msg, 0, sizeof(*vchnl_msg));
180 memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
181 vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
182 vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
183 vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
184 vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
185 add_hmc_obj->obj_type = (u16)rsrc_type;
186 add_hmc_obj->start_index = start_index;
187 add_hmc_obj->obj_count = rsrc_count;
188 ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
189 if (ret_code)
190 i40iw_debug(dev, I40IW_DEBUG_VIRT,
191 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
192 return ret_code;
193 }
194
195
196
197
198
199
200
201 static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
202 u32 vf_id,
203 struct i40iw_virtchnl_op_buf *vchnl_msg)
204 {
205 enum i40iw_status_code ret_code;
206 u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
207 struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
208
209 memset(resp_buffer, 0, sizeof(*resp_buffer));
210 vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
211 vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
212 vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
213 *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
214 ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
215 if (ret_code)
216 i40iw_debug(dev, I40IW_DEBUG_VIRT,
217 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
218 }
219
220
221
222
223
224
225
226 static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
227 u32 vf_id,
228 struct i40iw_virtchnl_op_buf *vchnl_msg,
229 u16 hmc_fcn)
230 {
231 enum i40iw_status_code ret_code;
232 u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
233 struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
234
235 memset(resp_buffer, 0, sizeof(*resp_buffer));
236 vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
237 vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
238 vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
239 *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
240 ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
241 if (ret_code)
242 i40iw_debug(dev, I40IW_DEBUG_VIRT,
243 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
244 }
245
246
247
248
249
250
251
252
253
254 static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
255 u32 vf_id,
256 struct i40iw_virtchnl_op_buf *vchnl_msg,
257 struct i40iw_dev_hw_stats *hw_stats)
258 {
259 enum i40iw_status_code ret_code;
260 u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
261 struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
262
263 memset(resp_buffer, 0, sizeof(*resp_buffer));
264 vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
265 vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
266 vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
267 *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;
268 ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
269 if (ret_code)
270 i40iw_debug(dev, I40IW_DEBUG_VIRT,
271 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
272 }
273
274
275
276
277
278
279
280 static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
281 struct i40iw_virtchnl_op_buf *vchnl_msg,
282 u16 op_ret_code)
283 {
284 enum i40iw_status_code ret_code;
285 u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
286 struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
287
288 memset(resp_buffer, 0, sizeof(resp_buffer));
289 vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
290 vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
291 vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
292 ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
293 if (ret_code)
294 i40iw_debug(dev, I40IW_DEBUG_VIRT,
295 "%s: virt channel send failed 0x%x\n", __func__, ret_code);
296 }
297
298
299
300
301
302
303 static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
304 struct i40iw_ccq_cqe_info *cqe_info)
305 {
306 struct i40iw_vfdev *vf_dev = callback_param;
307 struct i40iw_virt_mem vf_dev_mem;
308
309 if (cqe_info->error) {
310 i40iw_debug(dev, I40IW_DEBUG_VIRT,
311 "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
312 cqe_info->maj_err_code, cqe_info->min_err_code);
313 dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
314 vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
315 (u16)I40IW_ERR_CQP_COMPL_ERROR);
316 vf_dev_mem.va = vf_dev;
317 vf_dev_mem.size = sizeof(*vf_dev);
318 i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
319 } else {
320 i40iw_debug(dev, I40IW_DEBUG_VIRT,
321 "CQP Completion Operation Return information = 0x%08x\n",
322 cqe_info->op_ret_val);
323 vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
324 vf_dev->msg_count--;
325 vchnl_pf_send_get_hmc_fcn_resp(dev,
326 vf_dev->vf_id,
327 &vf_dev->vf_msg_buffer.vchnl_msg,
328 vf_dev->pmf_index);
329 }
330 }
331
332
333
334
335
336 static void pf_add_hmc_obj_callback(void *work_vf_dev)
337 {
338 struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
339 struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
340 struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
341 struct i40iw_hmc_create_obj_info info;
342 struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
343 enum i40iw_status_code ret_code;
344
345 if (!vf_dev->pf_hmc_initialized) {
346 ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
347 if (ret_code)
348 goto add_out;
349 vf_dev->pf_hmc_initialized = true;
350 }
351
352 add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
353
354 memset(&info, 0, sizeof(info));
355 info.hmc_info = hmc_info;
356 info.is_pf = false;
357 info.rsrc_type = (u32)add_hmc_obj->obj_type;
358 info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
359 info.start_idx = add_hmc_obj->start_index;
360 info.count = add_hmc_obj->obj_count;
361 i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
362 "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
363 info.count, info.rsrc_type);
364 ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
365 if (!ret_code)
366 vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
367 add_out:
368 vf_dev->msg_count--;
369 vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
370 }
371
372
373
374
375
376 static void pf_del_hmc_obj_callback(void *work_vf_dev)
377 {
378 struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
379 struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
380 struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
381 struct i40iw_hmc_del_obj_info info;
382 struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
383 enum i40iw_status_code ret_code = I40IW_SUCCESS;
384
385 if (!vf_dev->pf_hmc_initialized)
386 goto del_out;
387
388 del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
389
390 memset(&info, 0, sizeof(info));
391 info.hmc_info = hmc_info;
392 info.is_pf = false;
393 info.rsrc_type = (u32)del_hmc_obj->obj_type;
394 info.start_idx = del_hmc_obj->start_index;
395 info.count = del_hmc_obj->obj_count;
396 i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
397 "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
398 info.count, info.rsrc_type);
399 ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
400 del_out:
401 vf_dev->msg_count--;
402 vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
403 }
404
405
406
407
408
409
410
411 static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
412 {
413 stats->hw = dev->hw;
414 i40iw_hw_stats_init(stats, (u8)index, false);
415 spin_lock_init(&stats->lock);
416 }
417
418
419
420
421
422
423
424
425 enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
426 u32 vf_id,
427 u8 *msg,
428 u16 len)
429 {
430 struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
431 struct i40iw_vfdev *vf_dev = NULL;
432 struct i40iw_hmc_fcn_info hmc_fcn_info;
433 u16 iw_vf_idx;
434 u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
435 struct i40iw_virt_mem vf_dev_mem;
436 struct i40iw_virtchnl_work_info work_info;
437 struct i40iw_vsi_pestat *stats;
438 enum i40iw_status_code ret_code;
439
440 if (!dev || !msg || !len)
441 return I40IW_ERR_PARAM;
442
443 if (!dev->vchnl_up)
444 return I40IW_ERR_NOT_READY;
445 if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
446 vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
447 return I40IW_SUCCESS;
448 }
449 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
450 if (!dev->vf_dev[iw_vf_idx]) {
451 if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)
452 first_avail_iw_vf = iw_vf_idx;
453 continue;
454 }
455 if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
456 vf_dev = dev->vf_dev[iw_vf_idx];
457 break;
458 }
459 }
460 if (vf_dev) {
461 if (!vf_dev->msg_count) {
462 vf_dev->msg_count++;
463 } else {
464 i40iw_debug(dev, I40IW_DEBUG_VIRT,
465 "VF%u already has a channel message in progress.\n",
466 vf_id);
467 return I40IW_SUCCESS;
468 }
469 }
470 switch (vchnl_msg->iw_op_code) {
471 case I40IW_VCHNL_OP_GET_HMC_FCN:
472 if (!vf_dev &&
473 (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
474 ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
475 (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
476 if (!ret_code) {
477 vf_dev = vf_dev_mem.va;
478 vf_dev->stats_initialized = false;
479 vf_dev->pf_dev = dev;
480 vf_dev->msg_count = 1;
481 vf_dev->vf_id = vf_id;
482 vf_dev->iw_vf_idx = first_avail_iw_vf;
483 vf_dev->pf_hmc_initialized = false;
484 vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
485 i40iw_debug(dev, I40IW_DEBUG_VIRT,
486 "vf_dev %p, hmc_info %p, hmc_obj %p\n",
487 vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
488 dev->vf_dev[first_avail_iw_vf] = vf_dev;
489 iw_vf_idx = first_avail_iw_vf;
490 } else {
491 i40iw_debug(dev, I40IW_DEBUG_VIRT,
492 "VF%u Unable to allocate a VF device structure.\n",
493 vf_id);
494 vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
495 return I40IW_SUCCESS;
496 }
497 memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
498 hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
499 hmc_fcn_info.vf_id = vf_id;
500 hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
501 hmc_fcn_info.cqp_callback_param = vf_dev;
502 hmc_fcn_info.free_fcn = false;
503 ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
504 if (ret_code)
505 i40iw_debug(dev, I40IW_DEBUG_VIRT,
506 "VF%u error CQP HMC Function operation.\n",
507 vf_id);
508 i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
509 vf_dev->stats_initialized = true;
510 } else {
511 if (vf_dev) {
512 vf_dev->msg_count--;
513 vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
514 } else {
515 vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
516 (u16)I40IW_ERR_NO_MEMORY);
517 }
518 }
519 break;
520 case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
521 if (!vf_dev)
522 return I40IW_ERR_BAD_PTR;
523 work_info.worker_vf_dev = vf_dev;
524 work_info.callback_fcn = pf_add_hmc_obj_callback;
525 memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
526 i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
527 break;
528 case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
529 if (!vf_dev)
530 return I40IW_ERR_BAD_PTR;
531 work_info.worker_vf_dev = vf_dev;
532 work_info.callback_fcn = pf_del_hmc_obj_callback;
533 memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
534 i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
535 break;
536 case I40IW_VCHNL_OP_GET_STATS:
537 if (!vf_dev)
538 return I40IW_ERR_BAD_PTR;
539 stats = &vf_dev->pestat;
540 i40iw_hw_stats_read_all(stats, &stats->hw_stats);
541 vf_dev->msg_count--;
542 vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
543 break;
544 default:
545 i40iw_debug(dev, I40IW_DEBUG_VIRT,
546 "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
547 vchnl_msg->iw_op_code);
548 vchnl_pf_send_error_resp(dev, vf_id,
549 vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
550 }
551 return I40IW_SUCCESS;
552 }
553
554
555
556
557
558
559
560
561 enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
562 u32 vf_id,
563 u8 *msg,
564 u16 len)
565 {
566 struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
567 struct i40iw_virtchnl_req *vchnl_req;
568
569 vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
570 vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
571 if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
572 if (vchnl_req->parm_len && vchnl_req->parm)
573 memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
574 i40iw_debug(dev, I40IW_DEBUG_VIRT,
575 "%s: Got response, data size %u\n", __func__,
576 vchnl_req->parm_len);
577 } else {
578 i40iw_debug(dev, I40IW_DEBUG_VIRT,
579 "%s: error length on response, Got %u, expected %u\n", __func__,
580 len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
581 }
582
583 return I40IW_SUCCESS;
584 }
585
586
587
588
589
590
591 enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
592 u32 *vchnl_ver)
593 {
594 struct i40iw_virtchnl_req vchnl_req;
595 enum i40iw_status_code ret_code;
596
597 if (!i40iw_vf_clear_to_send(dev))
598 return I40IW_ERR_TIMEOUT;
599 memset(&vchnl_req, 0, sizeof(vchnl_req));
600 vchnl_req.dev = dev;
601 vchnl_req.parm = vchnl_ver;
602 vchnl_req.parm_len = sizeof(*vchnl_ver);
603 vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
604
605 ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
606 if (ret_code) {
607 i40iw_debug(dev, I40IW_DEBUG_VIRT,
608 "%s Send message failed 0x%0x\n", __func__, ret_code);
609 return ret_code;
610 }
611 ret_code = i40iw_vf_wait_vchnl_resp(dev);
612 if (ret_code)
613 return ret_code;
614 else
615 return vchnl_req.ret_code;
616 }
617
618
619
620
621
622
623 enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
624 u16 *hmc_fcn)
625 {
626 struct i40iw_virtchnl_req vchnl_req;
627 enum i40iw_status_code ret_code;
628
629 if (!i40iw_vf_clear_to_send(dev))
630 return I40IW_ERR_TIMEOUT;
631 memset(&vchnl_req, 0, sizeof(vchnl_req));
632 vchnl_req.dev = dev;
633 vchnl_req.parm = hmc_fcn;
634 vchnl_req.parm_len = sizeof(*hmc_fcn);
635 vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
636
637 ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
638 if (ret_code) {
639 i40iw_debug(dev, I40IW_DEBUG_VIRT,
640 "%s Send message failed 0x%0x\n", __func__, ret_code);
641 return ret_code;
642 }
643 ret_code = i40iw_vf_wait_vchnl_resp(dev);
644 if (ret_code)
645 return ret_code;
646 else
647 return vchnl_req.ret_code;
648 }
649
650
651
652
653
654
655
656
657 enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
658 enum i40iw_hmc_rsrc_type rsrc_type,
659 u32 start_index,
660 u32 rsrc_count)
661 {
662 struct i40iw_virtchnl_req vchnl_req;
663 enum i40iw_status_code ret_code;
664
665 if (!i40iw_vf_clear_to_send(dev))
666 return I40IW_ERR_TIMEOUT;
667 memset(&vchnl_req, 0, sizeof(vchnl_req));
668 vchnl_req.dev = dev;
669 vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
670
671 ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
672 &vchnl_req,
673 rsrc_type,
674 start_index,
675 rsrc_count);
676 if (ret_code) {
677 i40iw_debug(dev, I40IW_DEBUG_VIRT,
678 "%s Send message failed 0x%0x\n", __func__, ret_code);
679 return ret_code;
680 }
681 ret_code = i40iw_vf_wait_vchnl_resp(dev);
682 if (ret_code)
683 return ret_code;
684 else
685 return vchnl_req.ret_code;
686 }
687
688
689
690
691
692
693
694
695 enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
696 enum i40iw_hmc_rsrc_type rsrc_type,
697 u32 start_index,
698 u32 rsrc_count)
699 {
700 struct i40iw_virtchnl_req vchnl_req;
701 enum i40iw_status_code ret_code;
702
703 if (!i40iw_vf_clear_to_send(dev))
704 return I40IW_ERR_TIMEOUT;
705 memset(&vchnl_req, 0, sizeof(vchnl_req));
706 vchnl_req.dev = dev;
707 vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
708
709 ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
710 &vchnl_req,
711 rsrc_type,
712 start_index,
713 rsrc_count);
714 if (ret_code) {
715 i40iw_debug(dev, I40IW_DEBUG_VIRT,
716 "%s Send message failed 0x%0x\n", __func__, ret_code);
717 return ret_code;
718 }
719 ret_code = i40iw_vf_wait_vchnl_resp(dev);
720 if (ret_code)
721 return ret_code;
722 else
723 return vchnl_req.ret_code;
724 }
725
726
727
728
729
730
731 enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
732 struct i40iw_dev_hw_stats *hw_stats)
733 {
734 struct i40iw_virtchnl_req vchnl_req;
735 enum i40iw_status_code ret_code;
736
737 if (!i40iw_vf_clear_to_send(dev))
738 return I40IW_ERR_TIMEOUT;
739 memset(&vchnl_req, 0, sizeof(vchnl_req));
740 vchnl_req.dev = dev;
741 vchnl_req.parm = hw_stats;
742 vchnl_req.parm_len = sizeof(*hw_stats);
743 vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
744
745 ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
746 if (ret_code) {
747 i40iw_debug(dev, I40IW_DEBUG_VIRT,
748 "%s Send message failed 0x%0x\n", __func__, ret_code);
749 return ret_code;
750 }
751 ret_code = i40iw_vf_wait_vchnl_resp(dev);
752 if (ret_code)
753 return ret_code;
754 else
755 return vchnl_req.ret_code;
756 }