This source file includes following definitions.
- ice_adminq_init_regs
- ice_mailbox_init_regs
- ice_check_sq_alive
- ice_alloc_ctrlq_sq_ring
- ice_alloc_ctrlq_rq_ring
- ice_free_cq_ring
- ice_alloc_rq_bufs
- ice_alloc_sq_bufs
- ice_cfg_cq_regs
- ice_cfg_sq_regs
- ice_cfg_rq_regs
- ice_init_sq
- ice_init_rq
- ice_shutdown_sq
- ice_aq_ver_check
- ice_shutdown_rq
- ice_init_check_adminq
- ice_init_ctrlq
- ice_init_all_ctrlq
- ice_init_ctrlq_locks
- ice_create_all_ctrlq
- ice_shutdown_ctrlq
- ice_shutdown_all_ctrlq
- ice_destroy_ctrlq_locks
- ice_destroy_all_ctrlq
- ice_clean_sq
- ice_sq_done
- ice_sq_send_cmd
- ice_fill_dflt_direct_cmd_desc
- ice_clean_rq_elem
1
2
3
4 #include "ice_common.h"
5
6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
7 do { \
8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
16 (qinfo)->rq.head = prefix##_ARQH; \
17 (qinfo)->rq.tail = prefix##_ARQT; \
18 (qinfo)->rq.len = prefix##_ARQLEN; \
19 (qinfo)->rq.bah = prefix##_ARQBAH; \
20 (qinfo)->rq.bal = prefix##_ARQBAL; \
21 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
22 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
23 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
24 } while (0)
25
26
27
28
29
30
31
32 static void ice_adminq_init_regs(struct ice_hw *hw)
33 {
34 struct ice_ctl_q_info *cq = &hw->adminq;
35
36 ICE_CQ_INIT_REGS(cq, PF_FW);
37 }
38
39
40
41
42
43
44
45 static void ice_mailbox_init_regs(struct ice_hw *hw)
46 {
47 struct ice_ctl_q_info *cq = &hw->mailboxq;
48
49 ICE_CQ_INIT_REGS(cq, PF_MBX);
50 }
51
52
53
54
55
56
57
58
59 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
60 {
61
62 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
63 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
64 cq->sq.len_ena_mask)) ==
65 (cq->num_sq_entries | cq->sq.len_ena_mask);
66
67 return false;
68 }
69
70
71
72
73
74
75 static enum ice_status
76 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
77 {
78 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
79
80 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
81 &cq->sq.desc_buf.pa,
82 GFP_KERNEL | __GFP_ZERO);
83 if (!cq->sq.desc_buf.va)
84 return ICE_ERR_NO_MEMORY;
85 cq->sq.desc_buf.size = size;
86
87 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
88 sizeof(struct ice_sq_cd), GFP_KERNEL);
89 if (!cq->sq.cmd_buf) {
90 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
91 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
92 cq->sq.desc_buf.va = NULL;
93 cq->sq.desc_buf.pa = 0;
94 cq->sq.desc_buf.size = 0;
95 return ICE_ERR_NO_MEMORY;
96 }
97
98 return 0;
99 }
100
101
102
103
104
105
106 static enum ice_status
107 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
108 {
109 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
110
111 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
112 &cq->rq.desc_buf.pa,
113 GFP_KERNEL | __GFP_ZERO);
114 if (!cq->rq.desc_buf.va)
115 return ICE_ERR_NO_MEMORY;
116 cq->rq.desc_buf.size = size;
117 return 0;
118 }
119
120
121
122
123
124
125
126
127
128 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
129 {
130 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
131 ring->desc_buf.va, ring->desc_buf.pa);
132 ring->desc_buf.va = NULL;
133 ring->desc_buf.pa = 0;
134 ring->desc_buf.size = 0;
135 }
136
137
138
139
140
141
142 static enum ice_status
143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
144 {
145 int i;
146
147
148
149
150 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
151 sizeof(cq->rq.desc_buf), GFP_KERNEL);
152 if (!cq->rq.dma_head)
153 return ICE_ERR_NO_MEMORY;
154 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
155
156
157 for (i = 0; i < cq->num_rq_entries; i++) {
158 struct ice_aq_desc *desc;
159 struct ice_dma_mem *bi;
160
161 bi = &cq->rq.r.rq_bi[i];
162 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
163 cq->rq_buf_size, &bi->pa,
164 GFP_KERNEL | __GFP_ZERO);
165 if (!bi->va)
166 goto unwind_alloc_rq_bufs;
167 bi->size = cq->rq_buf_size;
168
169
170 desc = ICE_CTL_Q_DESC(cq->rq, i);
171
172 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
173 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
174 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
175 desc->opcode = 0;
176
177
178
179 desc->datalen = cpu_to_le16(bi->size);
180 desc->retval = 0;
181 desc->cookie_high = 0;
182 desc->cookie_low = 0;
183 desc->params.generic.addr_high =
184 cpu_to_le32(upper_32_bits(bi->pa));
185 desc->params.generic.addr_low =
186 cpu_to_le32(lower_32_bits(bi->pa));
187 desc->params.generic.param0 = 0;
188 desc->params.generic.param1 = 0;
189 }
190 return 0;
191
192 unwind_alloc_rq_bufs:
193
194 i--;
195 for (; i >= 0; i--) {
196 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
197 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
198 cq->rq.r.rq_bi[i].va = NULL;
199 cq->rq.r.rq_bi[i].pa = 0;
200 cq->rq.r.rq_bi[i].size = 0;
201 }
202 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
203
204 return ICE_ERR_NO_MEMORY;
205 }
206
207
208
209
210
211
212 static enum ice_status
213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214 {
215 int i;
216
217
218 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
219 sizeof(cq->sq.desc_buf), GFP_KERNEL);
220 if (!cq->sq.dma_head)
221 return ICE_ERR_NO_MEMORY;
222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223
224
225 for (i = 0; i < cq->num_sq_entries; i++) {
226 struct ice_dma_mem *bi;
227
228 bi = &cq->sq.r.sq_bi[i];
229 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
230 cq->sq_buf_size, &bi->pa,
231 GFP_KERNEL | __GFP_ZERO);
232 if (!bi->va)
233 goto unwind_alloc_sq_bufs;
234 bi->size = cq->sq_buf_size;
235 }
236 return 0;
237
238 unwind_alloc_sq_bufs:
239
240 i--;
241 for (; i >= 0; i--) {
242 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
243 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
244 cq->sq.r.sq_bi[i].va = NULL;
245 cq->sq.r.sq_bi[i].pa = 0;
246 cq->sq.r.sq_bi[i].size = 0;
247 }
248 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
249
250 return ICE_ERR_NO_MEMORY;
251 }
252
253 static enum ice_status
254 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
255 {
256
257 wr32(hw, ring->head, 0);
258 wr32(hw, ring->tail, 0);
259
260
261 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
262 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
263 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
264
265
266 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
267 return ICE_ERR_AQ_ERROR;
268
269 return 0;
270 }
271
272
273
274
275
276
277
278
279 static enum ice_status
280 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281 {
282 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
283 }
284
285
286
287
288
289
290
291
292 static enum ice_status
293 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
294 {
295 enum ice_status status;
296
297 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
298 if (status)
299 return status;
300
301
302 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
303
304 return 0;
305 }
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
322 {
323 enum ice_status ret_code;
324
325 if (cq->sq.count > 0) {
326
327 ret_code = ICE_ERR_NOT_READY;
328 goto init_ctrlq_exit;
329 }
330
331
332 if (!cq->num_sq_entries || !cq->sq_buf_size) {
333 ret_code = ICE_ERR_CFG;
334 goto init_ctrlq_exit;
335 }
336
337 cq->sq.next_to_use = 0;
338 cq->sq.next_to_clean = 0;
339
340
341 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
342 if (ret_code)
343 goto init_ctrlq_exit;
344
345
346 ret_code = ice_alloc_sq_bufs(hw, cq);
347 if (ret_code)
348 goto init_ctrlq_free_rings;
349
350
351 ret_code = ice_cfg_sq_regs(hw, cq);
352 if (ret_code)
353 goto init_ctrlq_free_rings;
354
355
356 cq->sq.count = cq->num_sq_entries;
357 goto init_ctrlq_exit;
358
359 init_ctrlq_free_rings:
360 ice_free_cq_ring(hw, &cq->sq);
361
362 init_ctrlq_exit:
363 return ret_code;
364 }
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
381 {
382 enum ice_status ret_code;
383
384 if (cq->rq.count > 0) {
385
386 ret_code = ICE_ERR_NOT_READY;
387 goto init_ctrlq_exit;
388 }
389
390
391 if (!cq->num_rq_entries || !cq->rq_buf_size) {
392 ret_code = ICE_ERR_CFG;
393 goto init_ctrlq_exit;
394 }
395
396 cq->rq.next_to_use = 0;
397 cq->rq.next_to_clean = 0;
398
399
400 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
401 if (ret_code)
402 goto init_ctrlq_exit;
403
404
405 ret_code = ice_alloc_rq_bufs(hw, cq);
406 if (ret_code)
407 goto init_ctrlq_free_rings;
408
409
410 ret_code = ice_cfg_rq_regs(hw, cq);
411 if (ret_code)
412 goto init_ctrlq_free_rings;
413
414
415 cq->rq.count = cq->num_rq_entries;
416 goto init_ctrlq_exit;
417
418 init_ctrlq_free_rings:
419 ice_free_cq_ring(hw, &cq->rq);
420
421 init_ctrlq_exit:
422 return ret_code;
423 }
424
425 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
426 do { \
427 int i; \
428 \
429 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
430 if ((qi)->ring.r.ring##_bi[i].pa) { \
431 dmam_free_coherent(ice_hw_to_dev(hw), \
432 (qi)->ring.r.ring##_bi[i].size,\
433 (qi)->ring.r.ring##_bi[i].va,\
434 (qi)->ring.r.ring##_bi[i].pa);\
435 (qi)->ring.r.ring##_bi[i].va = NULL; \
436 (qi)->ring.r.ring##_bi[i].pa = 0; \
437 (qi)->ring.r.ring##_bi[i].size = 0; \
438 } \
439 \
440 if ((qi)->ring.cmd_buf) \
441 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
442 \
443 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
444 } while (0)
445
446
447
448
449
450
451
452
453 static enum ice_status
454 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
455 {
456 enum ice_status ret_code = 0;
457
458 mutex_lock(&cq->sq_lock);
459
460 if (!cq->sq.count) {
461 ret_code = ICE_ERR_NOT_READY;
462 goto shutdown_sq_out;
463 }
464
465
466 wr32(hw, cq->sq.head, 0);
467 wr32(hw, cq->sq.tail, 0);
468 wr32(hw, cq->sq.len, 0);
469 wr32(hw, cq->sq.bal, 0);
470 wr32(hw, cq->sq.bah, 0);
471
472 cq->sq.count = 0;
473
474
475 ICE_FREE_CQ_BUFS(hw, cq, sq);
476 ice_free_cq_ring(hw, &cq->sq);
477
478 shutdown_sq_out:
479 mutex_unlock(&cq->sq_lock);
480 return ret_code;
481 }
482
483
484
485
486
487
488
489
490
491 static bool ice_aq_ver_check(struct ice_hw *hw)
492 {
493 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
494
495 dev_warn(ice_hw_to_dev(hw),
496 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
497 return false;
498 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
499 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
500 dev_info(ice_hw_to_dev(hw),
501 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
502 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
503 dev_info(ice_hw_to_dev(hw),
504 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
505 } else {
506
507 dev_info(ice_hw_to_dev(hw),
508 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
509 }
510 return true;
511 }
512
513
514
515
516
517
518
519
520 static enum ice_status
521 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
522 {
523 enum ice_status ret_code = 0;
524
525 mutex_lock(&cq->rq_lock);
526
527 if (!cq->rq.count) {
528 ret_code = ICE_ERR_NOT_READY;
529 goto shutdown_rq_out;
530 }
531
532
533 wr32(hw, cq->rq.head, 0);
534 wr32(hw, cq->rq.tail, 0);
535 wr32(hw, cq->rq.len, 0);
536 wr32(hw, cq->rq.bal, 0);
537 wr32(hw, cq->rq.bah, 0);
538
539
540 cq->rq.count = 0;
541
542
543 ICE_FREE_CQ_BUFS(hw, cq, rq);
544 ice_free_cq_ring(hw, &cq->rq);
545
546 shutdown_rq_out:
547 mutex_unlock(&cq->rq_lock);
548 return ret_code;
549 }
550
551
552
553
554
555 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
556 {
557 struct ice_ctl_q_info *cq = &hw->adminq;
558 enum ice_status status;
559
560 status = ice_aq_get_fw_ver(hw, NULL);
561 if (status)
562 goto init_ctrlq_free_rq;
563
564 if (!ice_aq_ver_check(hw)) {
565 status = ICE_ERR_FW_API_VER;
566 goto init_ctrlq_free_rq;
567 }
568
569 return 0;
570
571 init_ctrlq_free_rq:
572 ice_shutdown_rq(hw, cq);
573 ice_shutdown_sq(hw, cq);
574 return status;
575 }
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
592 {
593 struct ice_ctl_q_info *cq;
594 enum ice_status ret_code;
595
596 switch (q_type) {
597 case ICE_CTL_Q_ADMIN:
598 ice_adminq_init_regs(hw);
599 cq = &hw->adminq;
600 break;
601 case ICE_CTL_Q_MAILBOX:
602 ice_mailbox_init_regs(hw);
603 cq = &hw->mailboxq;
604 break;
605 default:
606 return ICE_ERR_PARAM;
607 }
608 cq->qtype = q_type;
609
610
611 if (!cq->num_rq_entries || !cq->num_sq_entries ||
612 !cq->rq_buf_size || !cq->sq_buf_size) {
613 return ICE_ERR_CFG;
614 }
615
616
617 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
618
619
620 ret_code = ice_init_sq(hw, cq);
621 if (ret_code)
622 return ret_code;
623
624
625 ret_code = ice_init_rq(hw, cq);
626 if (ret_code)
627 goto init_ctrlq_free_sq;
628
629
630 return 0;
631
632 init_ctrlq_free_sq:
633 ice_shutdown_sq(hw, cq);
634 return ret_code;
635 }
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
651 {
652 enum ice_status ret_code;
653
654
655 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
656 if (ret_code)
657 return ret_code;
658
659 ret_code = ice_init_check_adminq(hw);
660 if (ret_code)
661 return ret_code;
662
663
664 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
665 }
666
667
668
669
670
671
672
673 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
674 {
675 mutex_init(&cq->sq_lock);
676 mutex_init(&cq->rq_lock);
677 }
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
696 {
697 ice_init_ctrlq_locks(&hw->adminq);
698 ice_init_ctrlq_locks(&hw->mailboxq);
699
700 return ice_init_all_ctrlq(hw);
701 }
702
703
704
705
706
707
708
709
710 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
711 {
712 struct ice_ctl_q_info *cq;
713
714 switch (q_type) {
715 case ICE_CTL_Q_ADMIN:
716 cq = &hw->adminq;
717 if (ice_check_sq_alive(hw, cq))
718 ice_aq_q_shutdown(hw, true);
719 break;
720 case ICE_CTL_Q_MAILBOX:
721 cq = &hw->mailboxq;
722 break;
723 default:
724 return;
725 }
726
727 ice_shutdown_sq(hw, cq);
728 ice_shutdown_rq(hw, cq);
729 }
730
731
732
733
734
735
736
737
738
739 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
740 {
741
742 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
743
744 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
745 }
746
747
748
749
750
751
752
753 static void
754 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
755 {
756 mutex_destroy(&cq->sq_lock);
757 mutex_destroy(&cq->rq_lock);
758 }
759
760
761
762
763
764
765
766
767
768
769 void ice_destroy_all_ctrlq(struct ice_hw *hw)
770 {
771
772 ice_shutdown_all_ctrlq(hw);
773
774 ice_destroy_ctrlq_locks(&hw->adminq);
775 ice_destroy_ctrlq_locks(&hw->mailboxq);
776 }
777
778
779
780
781
782
783
784
785 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
786 {
787 struct ice_ctl_q_ring *sq = &cq->sq;
788 u16 ntc = sq->next_to_clean;
789 struct ice_sq_cd *details;
790 struct ice_aq_desc *desc;
791
792 desc = ICE_CTL_Q_DESC(*sq, ntc);
793 details = ICE_CTL_Q_DETAILS(*sq, ntc);
794
795 while (rd32(hw, cq->sq.head) != ntc) {
796 ice_debug(hw, ICE_DBG_AQ_MSG,
797 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
798 memset(desc, 0, sizeof(*desc));
799 memset(details, 0, sizeof(*details));
800 ntc++;
801 if (ntc == sq->count)
802 ntc = 0;
803 desc = ICE_CTL_Q_DESC(*sq, ntc);
804 details = ICE_CTL_Q_DETAILS(*sq, ntc);
805 }
806
807 sq->next_to_clean = ntc;
808
809 return ICE_CTL_Q_DESC_UNUSED(sq);
810 }
811
812
813
814
815
816
817
818
819
820 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
821 {
822
823
824
825 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
826 }
827
828
829
830
831
832
833
834
835
836
837
838
839
840 enum ice_status
841 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
842 struct ice_aq_desc *desc, void *buf, u16 buf_size,
843 struct ice_sq_cd *cd)
844 {
845 struct ice_dma_mem *dma_buf = NULL;
846 struct ice_aq_desc *desc_on_ring;
847 bool cmd_completed = false;
848 enum ice_status status = 0;
849 struct ice_sq_cd *details;
850 u32 total_delay = 0;
851 u16 retval = 0;
852 u32 val = 0;
853
854
855 if (hw->reset_ongoing)
856 return ICE_ERR_RESET_ONGOING;
857 mutex_lock(&cq->sq_lock);
858
859 cq->sq_last_status = ICE_AQ_RC_OK;
860
861 if (!cq->sq.count) {
862 ice_debug(hw, ICE_DBG_AQ_MSG,
863 "Control Send queue not initialized.\n");
864 status = ICE_ERR_AQ_EMPTY;
865 goto sq_send_command_error;
866 }
867
868 if ((buf && !buf_size) || (!buf && buf_size)) {
869 status = ICE_ERR_PARAM;
870 goto sq_send_command_error;
871 }
872
873 if (buf) {
874 if (buf_size > cq->sq_buf_size) {
875 ice_debug(hw, ICE_DBG_AQ_MSG,
876 "Invalid buffer size for Control Send queue: %d.\n",
877 buf_size);
878 status = ICE_ERR_INVAL_SIZE;
879 goto sq_send_command_error;
880 }
881
882 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
883 if (buf_size > ICE_AQ_LG_BUF)
884 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
885 }
886
887 val = rd32(hw, cq->sq.head);
888 if (val >= cq->num_sq_entries) {
889 ice_debug(hw, ICE_DBG_AQ_MSG,
890 "head overrun at %d in the Control Send Queue ring\n",
891 val);
892 status = ICE_ERR_AQ_EMPTY;
893 goto sq_send_command_error;
894 }
895
896 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
897 if (cd)
898 *details = *cd;
899 else
900 memset(details, 0, sizeof(*details));
901
902
903
904
905
906
907 if (ice_clean_sq(hw, cq) == 0) {
908 ice_debug(hw, ICE_DBG_AQ_MSG,
909 "Error: Control Send Queue is full.\n");
910 status = ICE_ERR_AQ_FULL;
911 goto sq_send_command_error;
912 }
913
914
915 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
916
917
918 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
919
920
921 if (buf) {
922 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
923
924 memcpy(dma_buf->va, buf, buf_size);
925 desc_on_ring->datalen = cpu_to_le16(buf_size);
926
927
928
929
930 desc_on_ring->params.generic.addr_high =
931 cpu_to_le32(upper_32_bits(dma_buf->pa));
932 desc_on_ring->params.generic.addr_low =
933 cpu_to_le32(lower_32_bits(dma_buf->pa));
934 }
935
936
937 ice_debug(hw, ICE_DBG_AQ_MSG,
938 "ATQ: Control Send queue desc and buffer:\n");
939
940 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
941
942 (cq->sq.next_to_use)++;
943 if (cq->sq.next_to_use == cq->sq.count)
944 cq->sq.next_to_use = 0;
945 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
946
947 do {
948 if (ice_sq_done(hw, cq))
949 break;
950
951 udelay(ICE_CTL_Q_SQ_CMD_USEC);
952 total_delay++;
953 } while (total_delay < cq->sq_cmd_timeout);
954
955
956 if (ice_sq_done(hw, cq)) {
957 memcpy(desc, desc_on_ring, sizeof(*desc));
958 if (buf) {
959
960 u16 copy_size = le16_to_cpu(desc->datalen);
961
962 if (copy_size > buf_size) {
963 ice_debug(hw, ICE_DBG_AQ_MSG,
964 "Return len %d > than buf len %d\n",
965 copy_size, buf_size);
966 status = ICE_ERR_AQ_ERROR;
967 } else {
968 memcpy(buf, dma_buf->va, copy_size);
969 }
970 }
971 retval = le16_to_cpu(desc->retval);
972 if (retval) {
973 ice_debug(hw, ICE_DBG_AQ_MSG,
974 "Control Send Queue command completed with error 0x%x\n",
975 retval);
976
977
978 retval &= 0xff;
979 }
980 cmd_completed = true;
981 if (!status && retval != ICE_AQ_RC_OK)
982 status = ICE_ERR_AQ_ERROR;
983 cq->sq_last_status = (enum ice_aq_err)retval;
984 }
985
986 ice_debug(hw, ICE_DBG_AQ_MSG,
987 "ATQ: desc and buffer writeback:\n");
988
989 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
990
991
992 if (details->wb_desc)
993 memcpy(details->wb_desc, desc_on_ring,
994 sizeof(*details->wb_desc));
995
996
997 if (!cmd_completed) {
998 ice_debug(hw, ICE_DBG_AQ_MSG,
999 "Control Send Queue Writeback timeout.\n");
1000 status = ICE_ERR_AQ_TIMEOUT;
1001 }
1002
1003 sq_send_command_error:
1004 mutex_unlock(&cq->sq_lock);
1005 return status;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014
1015 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1016 {
1017
1018 memset(desc, 0, sizeof(*desc));
1019 desc->opcode = cpu_to_le16(opcode);
1020 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1021 }
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 enum ice_status
1035 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1036 struct ice_rq_event_info *e, u16 *pending)
1037 {
1038 u16 ntc = cq->rq.next_to_clean;
1039 enum ice_status ret_code = 0;
1040 struct ice_aq_desc *desc;
1041 struct ice_dma_mem *bi;
1042 u16 desc_idx;
1043 u16 datalen;
1044 u16 flags;
1045 u16 ntu;
1046
1047
1048 memset(&e->desc, 0, sizeof(e->desc));
1049
1050
1051 mutex_lock(&cq->rq_lock);
1052
1053 if (!cq->rq.count) {
1054 ice_debug(hw, ICE_DBG_AQ_MSG,
1055 "Control Receive queue not initialized.\n");
1056 ret_code = ICE_ERR_AQ_EMPTY;
1057 goto clean_rq_elem_err;
1058 }
1059
1060
1061 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1062
1063 if (ntu == ntc) {
1064
1065 ret_code = ICE_ERR_AQ_NO_WORK;
1066 goto clean_rq_elem_out;
1067 }
1068
1069
1070 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1071 desc_idx = ntc;
1072
1073 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1074 flags = le16_to_cpu(desc->flags);
1075 if (flags & ICE_AQ_FLAG_ERR) {
1076 ret_code = ICE_ERR_AQ_ERROR;
1077 ice_debug(hw, ICE_DBG_AQ_MSG,
1078 "Control Receive Queue Event received with error 0x%x\n",
1079 cq->rq_last_status);
1080 }
1081 memcpy(&e->desc, desc, sizeof(e->desc));
1082 datalen = le16_to_cpu(desc->datalen);
1083 e->msg_len = min(datalen, e->buf_len);
1084 if (e->msg_buf && e->msg_len)
1085 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1086
1087 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1088
1089 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
1090 cq->rq_buf_size);
1091
1092
1093
1094
1095 bi = &cq->rq.r.rq_bi[ntc];
1096 memset(desc, 0, sizeof(*desc));
1097
1098 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1099 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1100 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1101 desc->datalen = cpu_to_le16(bi->size);
1102 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1103 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1104
1105
1106 wr32(hw, cq->rq.tail, ntc);
1107
1108 ntc++;
1109 if (ntc == cq->num_rq_entries)
1110 ntc = 0;
1111 cq->rq.next_to_clean = ntc;
1112 cq->rq.next_to_use = ntu;
1113
1114 clean_rq_elem_out:
1115
1116 if (pending) {
1117
1118 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1119 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1120 }
1121 clean_rq_elem_err:
1122 mutex_unlock(&cq->rq_lock);
1123
1124 return ret_code;
1125 }