This source file includes following definitions.
- i40e_adminq_init_regs
- i40e_alloc_adminq_asq_ring
- i40e_alloc_adminq_arq_ring
- i40e_free_adminq_asq
- i40e_free_adminq_arq
- i40e_alloc_arq_bufs
- i40e_alloc_asq_bufs
- i40e_free_arq_bufs
- i40e_free_asq_bufs
- i40e_config_asq_regs
- i40e_config_arq_regs
- i40e_init_asq
- i40e_init_arq
- i40e_shutdown_asq
- i40e_shutdown_arq
- i40e_init_adminq
- i40e_shutdown_adminq
- i40e_clean_asq
- i40e_asq_done
- i40e_asq_send_command
- i40e_fill_default_direct_cmd_desc
- i40e_clean_arq_element
- i40e_resume_aq
1
2
3
4 #include "i40e_status.h"
5 #include "i40e_type.h"
6 #include "i40e_register.h"
7 #include "i40e_adminq.h"
8 #include "i40e_prototype.h"
9
10 static void i40e_resume_aq(struct i40e_hw *hw);
11
12
13
14
15
16
17
18 static void i40e_adminq_init_regs(struct i40e_hw *hw)
19 {
20
21 if (i40e_is_vf(hw)) {
22 hw->aq.asq.tail = I40E_VF_ATQT1;
23 hw->aq.asq.head = I40E_VF_ATQH1;
24 hw->aq.asq.len = I40E_VF_ATQLEN1;
25 hw->aq.asq.bal = I40E_VF_ATQBAL1;
26 hw->aq.asq.bah = I40E_VF_ATQBAH1;
27 hw->aq.arq.tail = I40E_VF_ARQT1;
28 hw->aq.arq.head = I40E_VF_ARQH1;
29 hw->aq.arq.len = I40E_VF_ARQLEN1;
30 hw->aq.arq.bal = I40E_VF_ARQBAL1;
31 hw->aq.arq.bah = I40E_VF_ARQBAH1;
32 } else {
33 hw->aq.asq.tail = I40E_PF_ATQT;
34 hw->aq.asq.head = I40E_PF_ATQH;
35 hw->aq.asq.len = I40E_PF_ATQLEN;
36 hw->aq.asq.bal = I40E_PF_ATQBAL;
37 hw->aq.asq.bah = I40E_PF_ATQBAH;
38 hw->aq.arq.tail = I40E_PF_ARQT;
39 hw->aq.arq.head = I40E_PF_ARQH;
40 hw->aq.arq.len = I40E_PF_ARQLEN;
41 hw->aq.arq.bal = I40E_PF_ARQBAL;
42 hw->aq.arq.bah = I40E_PF_ARQBAH;
43 }
44 }
45
46
47
48
49
50 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
51 {
52 i40e_status ret_code;
53
54 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
55 i40e_mem_atq_ring,
56 (hw->aq.num_asq_entries *
57 sizeof(struct i40e_aq_desc)),
58 I40E_ADMINQ_DESC_ALIGNMENT);
59 if (ret_code)
60 return ret_code;
61
62 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
63 (hw->aq.num_asq_entries *
64 sizeof(struct i40e_asq_cmd_details)));
65 if (ret_code) {
66 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
67 return ret_code;
68 }
69
70 return ret_code;
71 }
72
73
74
75
76
77 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
78 {
79 i40e_status ret_code;
80
81 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
82 i40e_mem_arq_ring,
83 (hw->aq.num_arq_entries *
84 sizeof(struct i40e_aq_desc)),
85 I40E_ADMINQ_DESC_ALIGNMENT);
86
87 return ret_code;
88 }
89
90
91
92
93
94
95
96
97 static void i40e_free_adminq_asq(struct i40e_hw *hw)
98 {
99 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
100 }
101
102
103
104
105
106
107
108
109 static void i40e_free_adminq_arq(struct i40e_hw *hw)
110 {
111 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
112 }
113
114
115
116
117
118 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
119 {
120 i40e_status ret_code;
121 struct i40e_aq_desc *desc;
122 struct i40e_dma_mem *bi;
123 int i;
124
125
126
127
128
129
130 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
131 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
132 if (ret_code)
133 goto alloc_arq_bufs;
134 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
135
136
137 for (i = 0; i < hw->aq.num_arq_entries; i++) {
138 bi = &hw->aq.arq.r.arq_bi[i];
139 ret_code = i40e_allocate_dma_mem(hw, bi,
140 i40e_mem_arq_buf,
141 hw->aq.arq_buf_size,
142 I40E_ADMINQ_DESC_ALIGNMENT);
143 if (ret_code)
144 goto unwind_alloc_arq_bufs;
145
146
147 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
148
149 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
150 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
151 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
152 desc->opcode = 0;
153
154
155
156 desc->datalen = cpu_to_le16((u16)bi->size);
157 desc->retval = 0;
158 desc->cookie_high = 0;
159 desc->cookie_low = 0;
160 desc->params.external.addr_high =
161 cpu_to_le32(upper_32_bits(bi->pa));
162 desc->params.external.addr_low =
163 cpu_to_le32(lower_32_bits(bi->pa));
164 desc->params.external.param0 = 0;
165 desc->params.external.param1 = 0;
166 }
167
168 alloc_arq_bufs:
169 return ret_code;
170
171 unwind_alloc_arq_bufs:
172
173 i--;
174 for (; i >= 0; i--)
175 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
176 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
177
178 return ret_code;
179 }
180
181
182
183
184
185 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
186 {
187 i40e_status ret_code;
188 struct i40e_dma_mem *bi;
189 int i;
190
191
192 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
193 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
194 if (ret_code)
195 goto alloc_asq_bufs;
196 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
197
198
199 for (i = 0; i < hw->aq.num_asq_entries; i++) {
200 bi = &hw->aq.asq.r.asq_bi[i];
201 ret_code = i40e_allocate_dma_mem(hw, bi,
202 i40e_mem_asq_buf,
203 hw->aq.asq_buf_size,
204 I40E_ADMINQ_DESC_ALIGNMENT);
205 if (ret_code)
206 goto unwind_alloc_asq_bufs;
207 }
208 alloc_asq_bufs:
209 return ret_code;
210
211 unwind_alloc_asq_bufs:
212
213 i--;
214 for (; i >= 0; i--)
215 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
216 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
217
218 return ret_code;
219 }
220
221
222
223
224
225 static void i40e_free_arq_bufs(struct i40e_hw *hw)
226 {
227 int i;
228
229
230 for (i = 0; i < hw->aq.num_arq_entries; i++)
231 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
232
233
234 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
235
236
237 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
238 }
239
240
241
242
243
244 static void i40e_free_asq_bufs(struct i40e_hw *hw)
245 {
246 int i;
247
248
249 for (i = 0; i < hw->aq.num_asq_entries; i++)
250 if (hw->aq.asq.r.asq_bi[i].pa)
251 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
252
253
254 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
255
256
257 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
258
259
260 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
261 }
262
263
264
265
266
267
268
269 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
270 {
271 i40e_status ret_code = 0;
272 u32 reg = 0;
273
274
275 wr32(hw, hw->aq.asq.head, 0);
276 wr32(hw, hw->aq.asq.tail, 0);
277
278
279 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
280 I40E_PF_ATQLEN_ATQENABLE_MASK));
281 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
282 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
283
284
285 reg = rd32(hw, hw->aq.asq.bal);
286 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
287 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
288
289 return ret_code;
290 }
291
292
293
294
295
296
297
298 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
299 {
300 i40e_status ret_code = 0;
301 u32 reg = 0;
302
303
304 wr32(hw, hw->aq.arq.head, 0);
305 wr32(hw, hw->aq.arq.tail, 0);
306
307
308 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
309 I40E_PF_ARQLEN_ARQENABLE_MASK));
310 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
311 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
312
313
314 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
315
316
317 reg = rd32(hw, hw->aq.arq.bal);
318 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
319 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
320
321 return ret_code;
322 }
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 static i40e_status i40e_init_asq(struct i40e_hw *hw)
338 {
339 i40e_status ret_code = 0;
340
341 if (hw->aq.asq.count > 0) {
342
343 ret_code = I40E_ERR_NOT_READY;
344 goto init_adminq_exit;
345 }
346
347
348 if ((hw->aq.num_asq_entries == 0) ||
349 (hw->aq.asq_buf_size == 0)) {
350 ret_code = I40E_ERR_CONFIG;
351 goto init_adminq_exit;
352 }
353
354 hw->aq.asq.next_to_use = 0;
355 hw->aq.asq.next_to_clean = 0;
356
357
358 ret_code = i40e_alloc_adminq_asq_ring(hw);
359 if (ret_code)
360 goto init_adminq_exit;
361
362
363 ret_code = i40e_alloc_asq_bufs(hw);
364 if (ret_code)
365 goto init_adminq_free_rings;
366
367
368 ret_code = i40e_config_asq_regs(hw);
369 if (ret_code)
370 goto init_adminq_free_rings;
371
372
373 hw->aq.asq.count = hw->aq.num_asq_entries;
374 goto init_adminq_exit;
375
376 init_adminq_free_rings:
377 i40e_free_adminq_asq(hw);
378
379 init_adminq_exit:
380 return ret_code;
381 }
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396 static i40e_status i40e_init_arq(struct i40e_hw *hw)
397 {
398 i40e_status ret_code = 0;
399
400 if (hw->aq.arq.count > 0) {
401
402 ret_code = I40E_ERR_NOT_READY;
403 goto init_adminq_exit;
404 }
405
406
407 if ((hw->aq.num_arq_entries == 0) ||
408 (hw->aq.arq_buf_size == 0)) {
409 ret_code = I40E_ERR_CONFIG;
410 goto init_adminq_exit;
411 }
412
413 hw->aq.arq.next_to_use = 0;
414 hw->aq.arq.next_to_clean = 0;
415
416
417 ret_code = i40e_alloc_adminq_arq_ring(hw);
418 if (ret_code)
419 goto init_adminq_exit;
420
421
422 ret_code = i40e_alloc_arq_bufs(hw);
423 if (ret_code)
424 goto init_adminq_free_rings;
425
426
427 ret_code = i40e_config_arq_regs(hw);
428 if (ret_code)
429 goto init_adminq_free_rings;
430
431
432 hw->aq.arq.count = hw->aq.num_arq_entries;
433 goto init_adminq_exit;
434
435 init_adminq_free_rings:
436 i40e_free_adminq_arq(hw);
437
438 init_adminq_exit:
439 return ret_code;
440 }
441
442
443
444
445
446
447
448 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
449 {
450 i40e_status ret_code = 0;
451
452 mutex_lock(&hw->aq.asq_mutex);
453
454 if (hw->aq.asq.count == 0) {
455 ret_code = I40E_ERR_NOT_READY;
456 goto shutdown_asq_out;
457 }
458
459
460 wr32(hw, hw->aq.asq.head, 0);
461 wr32(hw, hw->aq.asq.tail, 0);
462 wr32(hw, hw->aq.asq.len, 0);
463 wr32(hw, hw->aq.asq.bal, 0);
464 wr32(hw, hw->aq.asq.bah, 0);
465
466 hw->aq.asq.count = 0;
467
468
469 i40e_free_asq_bufs(hw);
470
471 shutdown_asq_out:
472 mutex_unlock(&hw->aq.asq_mutex);
473 return ret_code;
474 }
475
476
477
478
479
480
481
482 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
483 {
484 i40e_status ret_code = 0;
485
486 mutex_lock(&hw->aq.arq_mutex);
487
488 if (hw->aq.arq.count == 0) {
489 ret_code = I40E_ERR_NOT_READY;
490 goto shutdown_arq_out;
491 }
492
493
494 wr32(hw, hw->aq.arq.head, 0);
495 wr32(hw, hw->aq.arq.tail, 0);
496 wr32(hw, hw->aq.arq.len, 0);
497 wr32(hw, hw->aq.arq.bal, 0);
498 wr32(hw, hw->aq.arq.bah, 0);
499
500 hw->aq.arq.count = 0;
501
502
503 i40e_free_arq_bufs(hw);
504
505 shutdown_arq_out:
506 mutex_unlock(&hw->aq.arq_mutex);
507 return ret_code;
508 }
509
510
511
512
513
514
515
516
517
518
519
520
521 i40e_status i40e_init_adminq(struct i40e_hw *hw)
522 {
523 u16 cfg_ptr, oem_hi, oem_lo;
524 u16 eetrack_lo, eetrack_hi;
525 i40e_status ret_code;
526 int retry = 0;
527
528
529 if ((hw->aq.num_arq_entries == 0) ||
530 (hw->aq.num_asq_entries == 0) ||
531 (hw->aq.arq_buf_size == 0) ||
532 (hw->aq.asq_buf_size == 0)) {
533 ret_code = I40E_ERR_CONFIG;
534 goto init_adminq_exit;
535 }
536
537
538 i40e_adminq_init_regs(hw);
539
540
541 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
542
543
544 ret_code = i40e_init_asq(hw);
545 if (ret_code)
546 goto init_adminq_destroy_locks;
547
548
549 ret_code = i40e_init_arq(hw);
550 if (ret_code)
551 goto init_adminq_free_asq;
552
553
554
555
556
557 do {
558 ret_code = i40e_aq_get_firmware_version(hw,
559 &hw->aq.fw_maj_ver,
560 &hw->aq.fw_min_ver,
561 &hw->aq.fw_build,
562 &hw->aq.api_maj_ver,
563 &hw->aq.api_min_ver,
564 NULL);
565 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
566 break;
567 retry++;
568 msleep(100);
569 i40e_resume_aq(hw);
570 } while (retry < 10);
571 if (ret_code != I40E_SUCCESS)
572 goto init_adminq_free_arq;
573
574
575 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
576 &hw->nvm.version);
577 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
578 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
579 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
580 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
581 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
582 &oem_hi);
583 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
584 &oem_lo);
585 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
586
587 if (hw->mac.type == I40E_MAC_XL710 &&
588 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
589 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
590 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
591 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
592 }
593 if (hw->mac.type == I40E_MAC_X722 &&
594 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
595 hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
596 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
597 }
598
599
600 if (hw->aq.api_maj_ver > 1 ||
601 (hw->aq.api_maj_ver == 1 &&
602 hw->aq.api_min_ver >= 5))
603 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
604
605
606 if (hw->aq.api_maj_ver > 1 ||
607 (hw->aq.api_maj_ver == 1 &&
608 hw->aq.api_min_ver >= 7))
609 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
610
611 if (hw->aq.api_maj_ver > 1 ||
612 (hw->aq.api_maj_ver == 1 &&
613 hw->aq.api_min_ver >= 8)) {
614 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
615 hw->flags |= I40E_HW_FLAG_DROP_MODE;
616 }
617
618 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
619 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
620 goto init_adminq_free_arq;
621 }
622
623
624 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
625 hw->nvm_release_on_done = false;
626 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
627
628 ret_code = 0;
629
630
631 goto init_adminq_exit;
632
633 init_adminq_free_arq:
634 i40e_shutdown_arq(hw);
635 init_adminq_free_asq:
636 i40e_shutdown_asq(hw);
637 init_adminq_destroy_locks:
638
639 init_adminq_exit:
640 return ret_code;
641 }
642
643
644
645
646
647 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
648 {
649 i40e_status ret_code = 0;
650
651 if (i40e_check_asq_alive(hw))
652 i40e_aq_queue_shutdown(hw, true);
653
654 i40e_shutdown_asq(hw);
655 i40e_shutdown_arq(hw);
656
657 if (hw->nvm_buff.va)
658 i40e_free_virt_mem(hw, &hw->nvm_buff);
659
660 return ret_code;
661 }
662
663
664
665
666
667
668
669 static u16 i40e_clean_asq(struct i40e_hw *hw)
670 {
671 struct i40e_adminq_ring *asq = &(hw->aq.asq);
672 struct i40e_asq_cmd_details *details;
673 u16 ntc = asq->next_to_clean;
674 struct i40e_aq_desc desc_cb;
675 struct i40e_aq_desc *desc;
676
677 desc = I40E_ADMINQ_DESC(*asq, ntc);
678 details = I40E_ADMINQ_DETAILS(*asq, ntc);
679 while (rd32(hw, hw->aq.asq.head) != ntc) {
680 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
681 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
682
683 if (details->callback) {
684 I40E_ADMINQ_CALLBACK cb_func =
685 (I40E_ADMINQ_CALLBACK)details->callback;
686 desc_cb = *desc;
687 cb_func(hw, &desc_cb);
688 }
689 memset(desc, 0, sizeof(*desc));
690 memset(details, 0, sizeof(*details));
691 ntc++;
692 if (ntc == asq->count)
693 ntc = 0;
694 desc = I40E_ADMINQ_DESC(*asq, ntc);
695 details = I40E_ADMINQ_DETAILS(*asq, ntc);
696 }
697
698 asq->next_to_clean = ntc;
699
700 return I40E_DESC_UNUSED(asq);
701 }
702
703
704
705
706
707
708
709
710 static bool i40e_asq_done(struct i40e_hw *hw)
711 {
712
713
714
715 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
716
717 }
718
719
720
721
722
723
724
725
726
727
728
729
730 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
731 struct i40e_aq_desc *desc,
732 void *buff,
733 u16 buff_size,
734 struct i40e_asq_cmd_details *cmd_details)
735 {
736 i40e_status status = 0;
737 struct i40e_dma_mem *dma_buff = NULL;
738 struct i40e_asq_cmd_details *details;
739 struct i40e_aq_desc *desc_on_ring;
740 bool cmd_completed = false;
741 u16 retval = 0;
742 u32 val = 0;
743
744 mutex_lock(&hw->aq.asq_mutex);
745
746 if (hw->aq.asq.count == 0) {
747 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
748 "AQTX: Admin queue not initialized.\n");
749 status = I40E_ERR_QUEUE_EMPTY;
750 goto asq_send_command_error;
751 }
752
753 hw->aq.asq_last_status = I40E_AQ_RC_OK;
754
755 val = rd32(hw, hw->aq.asq.head);
756 if (val >= hw->aq.num_asq_entries) {
757 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
758 "AQTX: head overrun at %d\n", val);
759 status = I40E_ERR_ADMIN_QUEUE_FULL;
760 goto asq_send_command_error;
761 }
762
763 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
764 if (cmd_details) {
765 *details = *cmd_details;
766
767
768
769
770
771 if (details->cookie) {
772 desc->cookie_high =
773 cpu_to_le32(upper_32_bits(details->cookie));
774 desc->cookie_low =
775 cpu_to_le32(lower_32_bits(details->cookie));
776 }
777 } else {
778 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
779 }
780
781
782 desc->flags &= ~cpu_to_le16(details->flags_dis);
783 desc->flags |= cpu_to_le16(details->flags_ena);
784
785 if (buff_size > hw->aq.asq_buf_size) {
786 i40e_debug(hw,
787 I40E_DEBUG_AQ_MESSAGE,
788 "AQTX: Invalid buffer size: %d.\n",
789 buff_size);
790 status = I40E_ERR_INVALID_SIZE;
791 goto asq_send_command_error;
792 }
793
794 if (details->postpone && !details->async) {
795 i40e_debug(hw,
796 I40E_DEBUG_AQ_MESSAGE,
797 "AQTX: Async flag not set along with postpone flag");
798 status = I40E_ERR_PARAM;
799 goto asq_send_command_error;
800 }
801
802
803
804
805
806
807
808
809 if (i40e_clean_asq(hw) == 0) {
810 i40e_debug(hw,
811 I40E_DEBUG_AQ_MESSAGE,
812 "AQTX: Error queue is full.\n");
813 status = I40E_ERR_ADMIN_QUEUE_FULL;
814 goto asq_send_command_error;
815 }
816
817
818 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
819
820
821 *desc_on_ring = *desc;
822
823
824 if (buff != NULL) {
825 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
826
827 memcpy(dma_buff->va, buff, buff_size);
828 desc_on_ring->datalen = cpu_to_le16(buff_size);
829
830
831
832
833 desc_on_ring->params.external.addr_high =
834 cpu_to_le32(upper_32_bits(dma_buff->pa));
835 desc_on_ring->params.external.addr_low =
836 cpu_to_le32(lower_32_bits(dma_buff->pa));
837 }
838
839
840 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
841 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
842 buff, buff_size);
843 (hw->aq.asq.next_to_use)++;
844 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
845 hw->aq.asq.next_to_use = 0;
846 if (!details->postpone)
847 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
848
849
850
851
852 if (!details->async && !details->postpone) {
853 u32 total_delay = 0;
854
855 do {
856
857
858
859 if (i40e_asq_done(hw))
860 break;
861 udelay(50);
862 total_delay += 50;
863 } while (total_delay < hw->aq.asq_cmd_timeout);
864 }
865
866
867 if (i40e_asq_done(hw)) {
868 *desc = *desc_on_ring;
869 if (buff != NULL)
870 memcpy(buff, dma_buff->va, buff_size);
871 retval = le16_to_cpu(desc->retval);
872 if (retval != 0) {
873 i40e_debug(hw,
874 I40E_DEBUG_AQ_MESSAGE,
875 "AQTX: Command completed with error 0x%X.\n",
876 retval);
877
878
879 retval &= 0xff;
880 }
881 cmd_completed = true;
882 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
883 status = 0;
884 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
885 status = I40E_ERR_NOT_READY;
886 else
887 status = I40E_ERR_ADMIN_QUEUE_ERROR;
888 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
889 }
890
891 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
892 "AQTX: desc and buffer writeback:\n");
893 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
894
895
896 if (details->wb_desc)
897 *details->wb_desc = *desc_on_ring;
898
899
900 if ((!cmd_completed) &&
901 (!details->async && !details->postpone)) {
902 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
903 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
904 "AQTX: AQ Critical error.\n");
905 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
906 } else {
907 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
908 "AQTX: Writeback timeout.\n");
909 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
910 }
911 }
912
913 asq_send_command_error:
914 mutex_unlock(&hw->aq.asq_mutex);
915 return status;
916 }
917
918
919
920
921
922
923
924
925 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
926 u16 opcode)
927 {
928
929 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
930 desc->opcode = cpu_to_le16(opcode);
931 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
932 }
933
934
935
936
937
938
939
940
941
942
943
944 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
945 struct i40e_arq_event_info *e,
946 u16 *pending)
947 {
948 i40e_status ret_code = 0;
949 u16 ntc = hw->aq.arq.next_to_clean;
950 struct i40e_aq_desc *desc;
951 struct i40e_dma_mem *bi;
952 u16 desc_idx;
953 u16 datalen;
954 u16 flags;
955 u16 ntu;
956
957
958 memset(&e->desc, 0, sizeof(e->desc));
959
960
961 mutex_lock(&hw->aq.arq_mutex);
962
963 if (hw->aq.arq.count == 0) {
964 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
965 "AQRX: Admin queue not initialized.\n");
966 ret_code = I40E_ERR_QUEUE_EMPTY;
967 goto clean_arq_element_err;
968 }
969
970
971 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
972 if (ntu == ntc) {
973
974 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
975 goto clean_arq_element_out;
976 }
977
978
979 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
980 desc_idx = ntc;
981
982 hw->aq.arq_last_status =
983 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
984 flags = le16_to_cpu(desc->flags);
985 if (flags & I40E_AQ_FLAG_ERR) {
986 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
987 i40e_debug(hw,
988 I40E_DEBUG_AQ_MESSAGE,
989 "AQRX: Event received with error 0x%X.\n",
990 hw->aq.arq_last_status);
991 }
992
993 e->desc = *desc;
994 datalen = le16_to_cpu(desc->datalen);
995 e->msg_len = min(datalen, e->buf_len);
996 if (e->msg_buf != NULL && (e->msg_len != 0))
997 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
998 e->msg_len);
999
1000 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1001 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1002 hw->aq.arq_buf_size);
1003
1004
1005
1006
1007
1008 bi = &hw->aq.arq.r.arq_bi[ntc];
1009 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1010
1011 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1012 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1013 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1014 desc->datalen = cpu_to_le16((u16)bi->size);
1015 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1016 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1017
1018
1019 wr32(hw, hw->aq.arq.tail, ntc);
1020
1021 ntc++;
1022 if (ntc == hw->aq.num_arq_entries)
1023 ntc = 0;
1024 hw->aq.arq.next_to_clean = ntc;
1025 hw->aq.arq.next_to_use = ntu;
1026
1027 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1028 clean_arq_element_out:
1029
1030 if (pending)
1031 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1032 clean_arq_element_err:
1033 mutex_unlock(&hw->aq.arq_mutex);
1034
1035 return ret_code;
1036 }
1037
1038 static void i40e_resume_aq(struct i40e_hw *hw)
1039 {
1040
1041 hw->aq.asq.next_to_use = 0;
1042 hw->aq.asq.next_to_clean = 0;
1043
1044 i40e_config_asq_regs(hw);
1045
1046 hw->aq.arq.next_to_use = 0;
1047 hw->aq.arq.next_to_clean = 0;
1048
1049 i40e_config_arq_regs(hw);
1050 }