This source file includes following definitions.
- lpfc_dump_static_vport
- lpfc_down_link
- lpfc_dump_mem
- lpfc_dump_wakeup_param
- lpfc_read_nv
- lpfc_config_async
- lpfc_heart_beat
- lpfc_read_topology
- lpfc_clear_la
- lpfc_config_link
- lpfc_config_msi
- lpfc_init_link
- lpfc_read_sparam
- lpfc_unreg_did
- lpfc_read_config
- lpfc_read_lnk_stat
- lpfc_reg_rpi
- lpfc_unreg_login
- lpfc_sli4_unreg_all_rpis
- lpfc_reg_vpi
- lpfc_unreg_vpi
- lpfc_config_pcb_setup
- lpfc_read_rev
- lpfc_sli4_swap_str
- lpfc_build_hbq_profile2
- lpfc_build_hbq_profile3
- lpfc_build_hbq_profile5
- lpfc_config_hbq
- lpfc_config_ring
- lpfc_config_port
- lpfc_kill_board
- lpfc_mbox_put
- lpfc_mbox_get
- __lpfc_mbox_cmpl_put
- lpfc_mbox_cmpl_put
- lpfc_mbox_cmd_check
- lpfc_mbox_dev_check
- lpfc_mbox_tmo_val
- lpfc_sli4_mbx_sge_set
- lpfc_sli4_mbx_sge_get
- lpfc_sli4_mbox_cmd_free
- lpfc_sli4_config
- lpfc_sli4_mbox_rsrc_extent
- lpfc_sli_config_mbox_subsys_get
- lpfc_sli_config_mbox_opcode_get
- lpfc_sli4_mbx_read_fcf_rec
- lpfc_request_features
- lpfc_init_vfi
- lpfc_reg_vfi
- lpfc_init_vpi
- lpfc_unreg_vfi
- lpfc_sli4_dump_cfg_rg23
- lpfc_mbx_cmpl_rdp_link_stat
- lpfc_mbx_cmpl_rdp_page_a2
- lpfc_mbx_cmpl_rdp_page_a0
- lpfc_sli4_dump_page_a0
- lpfc_reg_fcfi
- lpfc_reg_fcfi_mrq
- lpfc_unreg_fcfi
- lpfc_resume_rpi
- lpfc_supported_pages
- lpfc_pc_sli4_params
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi.h>
32 #include <scsi/fc/fc_fs.h>
33
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_compat.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58 int
59 lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
60 uint16_t offset)
61 {
62 MAILBOX_t *mb;
63 struct lpfc_dmabuf *mp;
64
65 mb = &pmb->u.mb;
66
67
68 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
69 mb->mbxCommand = MBX_DUMP_MEMORY;
70 mb->un.varDmp.type = DMP_NV_PARAMS;
71 mb->un.varDmp.entry_index = offset;
72 mb->un.varDmp.region_id = DMP_REGION_VPORT;
73 mb->mbxOwner = OWN_HOST;
74
75
76 if (phba->sli_rev != LPFC_SLI_REV4) {
77 mb->un.varDmp.cv = 1;
78 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
79 return 0;
80 }
81
82
83 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
84 if (mp)
85 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
86
87 if (!mp || !mp->virt) {
88 kfree(mp);
89 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
90 "2605 lpfc_dump_static_vport: memory"
91 " allocation failed\n");
92 return 1;
93 }
94 memset(mp->virt, 0, LPFC_BPL_SIZE);
95 INIT_LIST_HEAD(&mp->list);
96
97 pmb->ctx_buf = (uint8_t *)mp;
98 mb->un.varWords[3] = putPaddrLow(mp->phys);
99 mb->un.varWords[4] = putPaddrHigh(mp->phys);
100 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
101
102 return 0;
103 }
104
105
106
107
108
109
110
111
112 void
113 lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
114 {
115 MAILBOX_t *mb;
116 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
117 mb = &pmb->u.mb;
118 mb->mbxCommand = MBX_DOWN_LINK;
119 mb->mbxOwner = OWN_HOST;
120 }
121
122
123
124
125
126
127
128
129
130
131
132
133
134 void
135 lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
136 uint16_t region_id)
137 {
138 MAILBOX_t *mb;
139 void *ctx;
140
141 mb = &pmb->u.mb;
142 ctx = pmb->ctx_buf;
143
144
145 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
146 mb->mbxCommand = MBX_DUMP_MEMORY;
147 mb->un.varDmp.cv = 1;
148 mb->un.varDmp.type = DMP_NV_PARAMS;
149 mb->un.varDmp.entry_index = offset;
150 mb->un.varDmp.region_id = region_id;
151 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
152 mb->un.varDmp.co = 0;
153 mb->un.varDmp.resp_offset = 0;
154 pmb->ctx_buf = ctx;
155 mb->mbxOwner = OWN_HOST;
156 return;
157 }
158
159
160
161
162
163
164
165
166
167 void
168 lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
169 {
170 MAILBOX_t *mb;
171 void *ctx;
172
173 mb = &pmb->u.mb;
174
175 ctx = pmb->ctx_buf;
176
177
178 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
179 mb->mbxCommand = MBX_DUMP_MEMORY;
180 mb->mbxOwner = OWN_HOST;
181 mb->un.varDmp.cv = 1;
182 mb->un.varDmp.type = DMP_NV_PARAMS;
183 if (phba->sli_rev < LPFC_SLI_REV4)
184 mb->un.varDmp.entry_index = 0;
185 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
186 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
187 mb->un.varDmp.co = 0;
188 mb->un.varDmp.resp_offset = 0;
189 pmb->ctx_buf = ctx;
190 return;
191 }
192
193
194
195
196
197
198
199
200
201
202
203
204 void
205 lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
206 {
207 MAILBOX_t *mb;
208
209 mb = &pmb->u.mb;
210 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
211 mb->mbxCommand = MBX_READ_NV;
212 mb->mbxOwner = OWN_HOST;
213 return;
214 }
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229 void
230 lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
231 uint32_t ring)
232 {
233 MAILBOX_t *mb;
234
235 mb = &pmb->u.mb;
236 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
237 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
238 mb->un.varCfgAsyncEvent.ring = ring;
239 mb->mbxOwner = OWN_HOST;
240 return;
241 }
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256 void
257 lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
258 {
259 MAILBOX_t *mb;
260
261 mb = &pmb->u.mb;
262 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
263 mb->mbxCommand = MBX_HEARTBEAT;
264 mb->mbxOwner = OWN_HOST;
265 return;
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289 int
290 lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
291 struct lpfc_dmabuf *mp)
292 {
293 MAILBOX_t *mb;
294
295 mb = &pmb->u.mb;
296 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
297
298 INIT_LIST_HEAD(&mp->list);
299 mb->mbxCommand = MBX_READ_TOPOLOGY;
300 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
301 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
302 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
303
304
305
306
307 pmb->ctx_buf = (uint8_t *)mp;
308 mb->mbxOwner = OWN_HOST;
309 return (0);
310 }
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327 void
328 lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
329 {
330 MAILBOX_t *mb;
331
332 mb = &pmb->u.mb;
333 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
334
335 mb->un.varClearLA.eventTag = phba->fc_eventTag;
336 mb->mbxCommand = MBX_CLEAR_LA;
337 mb->mbxOwner = OWN_HOST;
338 return;
339 }
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355 void
356 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
357 {
358 struct lpfc_vport *vport = phba->pport;
359 MAILBOX_t *mb = &pmb->u.mb;
360 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
361
362
363
364
365 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
366 mb->un.varCfgLnk.cr = 1;
367 mb->un.varCfgLnk.ci = 1;
368 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
369 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
370 }
371
372 mb->un.varCfgLnk.myId = vport->fc_myDID;
373 mb->un.varCfgLnk.edtov = phba->fc_edtov;
374 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
375 mb->un.varCfgLnk.ratov = phba->fc_ratov;
376 mb->un.varCfgLnk.rttov = phba->fc_rttov;
377 mb->un.varCfgLnk.altov = phba->fc_altov;
378 mb->un.varCfgLnk.crtov = phba->fc_crtov;
379 mb->un.varCfgLnk.cscn = 0;
380 if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
381 mb->un.varCfgLnk.cscn = 1;
382 mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
383 &phba->sli4_hba.bbscn_params);
384 }
385
386 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
387 mb->un.varCfgLnk.ack0_enable = 1;
388
389 mb->mbxCommand = MBX_CONFIG_LINK;
390 mb->mbxOwner = OWN_HOST;
391 return;
392 }
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407 int
408 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
409 {
410 MAILBOX_t *mb = &pmb->u.mb;
411 uint32_t attentionConditions[2];
412
413
414 if (phba->cfg_use_msi != 2) {
415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
416 "0475 Not configured for supporting MSI-X "
417 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
418 return -EINVAL;
419 }
420
421 if (phba->sli_rev < 3) {
422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
423 "0476 HBA not supporting SLI-3 or later "
424 "SLI Revision: 0x%x\n", phba->sli_rev);
425 return -EINVAL;
426 }
427
428
429 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
430
431
432
433
434
435
436 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
437 HA_LATT | HA_MBATT);
438 attentionConditions[1] = 0;
439
440 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
441 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
442
443
444
445
446 #ifdef __BIG_ENDIAN_BITFIELD
447
448 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
449
450 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
451 #else
452
453 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
454
455 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
456 #endif
457
458 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
459 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
460
461
462 mb->un.varCfgMSI.autoClearHA[0] = 0;
463 mb->un.varCfgMSI.autoClearHA[1] = 0;
464
465
466 mb->mbxCommand = MBX_CONFIG_MSI;
467 mb->mbxOwner = OWN_HOST;
468
469 return 0;
470 }
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486 void
487 lpfc_init_link(struct lpfc_hba * phba,
488 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
489 {
490 lpfc_vpd_t *vpd;
491 MAILBOX_t *mb;
492
493 mb = &pmb->u.mb;
494 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
495
496 switch (topology) {
497 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
499 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
500 break;
501 case FLAGS_TOPOLOGY_MODE_PT_PT:
502 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
503 break;
504 case FLAGS_TOPOLOGY_MODE_LOOP:
505 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
506 break;
507 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
508 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
509 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
510 break;
511 case FLAGS_LOCAL_LB:
512 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
513 break;
514 }
515
516 if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
517 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
518 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
519 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
520 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
521 }
522
523
524 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
525
526
527
528
529 vpd = &phba->vpd;
530 if (vpd->rev.feaLevelHigh >= 0x02){
531 switch(linkspeed){
532 case LPFC_USER_LINK_SPEED_1G:
533 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
534 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
535 break;
536 case LPFC_USER_LINK_SPEED_2G:
537 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
538 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
539 break;
540 case LPFC_USER_LINK_SPEED_4G:
541 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
542 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
543 break;
544 case LPFC_USER_LINK_SPEED_8G:
545 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
546 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
547 break;
548 case LPFC_USER_LINK_SPEED_10G:
549 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
550 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
551 break;
552 case LPFC_USER_LINK_SPEED_16G:
553 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
554 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
555 break;
556 case LPFC_USER_LINK_SPEED_32G:
557 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
558 mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
559 break;
560 case LPFC_USER_LINK_SPEED_64G:
561 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
562 mb->un.varInitLnk.link_speed = LINK_SPEED_64G;
563 break;
564 case LPFC_USER_LINK_SPEED_AUTO:
565 default:
566 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
567 break;
568 }
569
570 }
571 else
572 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
573
574 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
575 mb->mbxOwner = OWN_HOST;
576 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
577 return;
578 }
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601 int
602 lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
603 {
604 struct lpfc_dmabuf *mp;
605 MAILBOX_t *mb;
606
607 mb = &pmb->u.mb;
608 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
609
610 mb->mbxOwner = OWN_HOST;
611
612
613
614 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
615 if (mp)
616 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
617 if (!mp || !mp->virt) {
618 kfree(mp);
619 mb->mbxCommand = MBX_READ_SPARM64;
620
621 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
622 "0301 READ_SPARAM: no buffers\n");
623 return (1);
624 }
625 INIT_LIST_HEAD(&mp->list);
626 mb->mbxCommand = MBX_READ_SPARM64;
627 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
628 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
629 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
630 if (phba->sli_rev >= LPFC_SLI_REV3)
631 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
632
633
634 pmb->ctx_buf = mp;
635
636 return (0);
637 }
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654 void
655 lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
656 LPFC_MBOXQ_t * pmb)
657 {
658 MAILBOX_t *mb;
659
660 mb = &pmb->u.mb;
661 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
662
663 mb->un.varUnregDID.did = did;
664 mb->un.varUnregDID.vpi = vpi;
665 if ((vpi != 0xffff) &&
666 (phba->sli_rev == LPFC_SLI_REV4))
667 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
668
669 mb->mbxCommand = MBX_UNREG_D_ID;
670 mb->mbxOwner = OWN_HOST;
671 return;
672 }
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687 void
688 lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
689 {
690 MAILBOX_t *mb;
691
692 mb = &pmb->u.mb;
693 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
694
695 mb->mbxCommand = MBX_READ_CONFIG;
696 mb->mbxOwner = OWN_HOST;
697 return;
698 }
699
700
701
702
703
704
705
706
707
708
709
710
711
712 void
713 lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
714 {
715 MAILBOX_t *mb;
716
717 mb = &pmb->u.mb;
718 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
719
720 mb->mbxCommand = MBX_READ_LNK_STAT;
721 mb->mbxOwner = OWN_HOST;
722 return;
723 }
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749 int
750 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
751 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
752 {
753 MAILBOX_t *mb = &pmb->u.mb;
754 uint8_t *sparam;
755 struct lpfc_dmabuf *mp;
756
757 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
758
759 mb->un.varRegLogin.rpi = 0;
760 if (phba->sli_rev == LPFC_SLI_REV4)
761 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
762 if (phba->sli_rev >= LPFC_SLI_REV3)
763 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
764 mb->un.varRegLogin.did = did;
765 mb->mbxOwner = OWN_HOST;
766
767 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
768 if (mp)
769 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
770 if (!mp || !mp->virt) {
771 kfree(mp);
772 mb->mbxCommand = MBX_REG_LOGIN64;
773
774 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
775 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
776 "rpi x%x\n", vpi, did, rpi);
777 return 1;
778 }
779 INIT_LIST_HEAD(&mp->list);
780 sparam = mp->virt;
781
782
783 memcpy(sparam, param, sizeof (struct serv_parm));
784
785
786 pmb->ctx_buf = (uint8_t *)mp;
787
788 mb->mbxCommand = MBX_REG_LOGIN64;
789 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
790 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
791 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
792
793 return 0;
794 }
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 void
814 lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
815 LPFC_MBOXQ_t * pmb)
816 {
817 MAILBOX_t *mb;
818
819 mb = &pmb->u.mb;
820 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
821
822 mb->un.varUnregLogin.rpi = rpi;
823 mb->un.varUnregLogin.rsvd1 = 0;
824 if (phba->sli_rev >= LPFC_SLI_REV3)
825 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
826
827 mb->mbxCommand = MBX_UNREG_LOGIN;
828 mb->mbxOwner = OWN_HOST;
829
830 return;
831 }
832
833
834
835
836
837
838
839
840 void
841 lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
842 {
843 struct lpfc_hba *phba = vport->phba;
844 LPFC_MBOXQ_t *mbox;
845 int rc;
846
847 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
848 if (mbox) {
849
850
851
852
853
854
855
856 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
857 mbox);
858 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
859 mbox->vport = vport;
860 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
861 mbox->ctx_ndlp = NULL;
862 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
863 if (rc == MBX_NOT_FINISHED)
864 mempool_free(mbox, phba->mbox_mem_pool);
865 }
866 }
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883 void
884 lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
885 {
886 MAILBOX_t *mb = &pmb->u.mb;
887 struct lpfc_hba *phba = vport->phba;
888
889 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
890
891
892
893 if ((phba->sli_rev == LPFC_SLI_REV4) &&
894 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
895 mb->un.varRegVpi.upd = 1;
896
897 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
898 mb->un.varRegVpi.sid = vport->fc_myDID;
899 if (phba->sli_rev == LPFC_SLI_REV4)
900 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
901 else
902 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
903 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
904 sizeof(struct lpfc_name));
905 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
906 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
907
908 mb->mbxCommand = MBX_REG_VPI;
909 mb->mbxOwner = OWN_HOST;
910 return;
911
912 }
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930 void
931 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
932 {
933 MAILBOX_t *mb = &pmb->u.mb;
934 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
935
936 if (phba->sli_rev == LPFC_SLI_REV3)
937 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
938 else if (phba->sli_rev >= LPFC_SLI_REV4)
939 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
940
941 mb->mbxCommand = MBX_UNREG_VPI;
942 mb->mbxOwner = OWN_HOST;
943 return;
944
945 }
946
947
948
949
950
951
952
953
954 static void
955 lpfc_config_pcb_setup(struct lpfc_hba * phba)
956 {
957 struct lpfc_sli *psli = &phba->sli;
958 struct lpfc_sli_ring *pring;
959 PCB_t *pcbp = phba->pcb;
960 dma_addr_t pdma_addr;
961 uint32_t offset;
962 uint32_t iocbCnt = 0;
963 int i;
964
965 pcbp->maxRing = (psli->num_rings - 1);
966
967 for (i = 0; i < psli->num_rings; i++) {
968 pring = &psli->sli3_ring[i];
969
970 pring->sli.sli3.sizeCiocb =
971 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
972 SLI2_IOCB_CMD_SIZE;
973 pring->sli.sli3.sizeRiocb =
974 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
975 SLI2_IOCB_RSP_SIZE;
976
977
978 if ((pring->sli.sli3.numCiocb == 0) ||
979 (pring->sli.sli3.numRiocb == 0)) {
980 pcbp->rdsc[i].cmdEntries = 0;
981 pcbp->rdsc[i].rspEntries = 0;
982 pcbp->rdsc[i].cmdAddrHigh = 0;
983 pcbp->rdsc[i].rspAddrHigh = 0;
984 pcbp->rdsc[i].cmdAddrLow = 0;
985 pcbp->rdsc[i].rspAddrLow = 0;
986 pring->sli.sli3.cmdringaddr = NULL;
987 pring->sli.sli3.rspringaddr = NULL;
988 continue;
989 }
990
991 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
992 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
993
994 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
995 (uint8_t *) phba->slim2p.virt;
996 pdma_addr = phba->slim2p.phys + offset;
997 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
998 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
999 iocbCnt += pring->sli.sli3.numCiocb;
1000
1001
1002 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
1003
1004 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
1005 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
1006 (uint8_t *)phba->slim2p.virt;
1007 pdma_addr = phba->slim2p.phys + offset;
1008 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
1009 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
1010 iocbCnt += pring->sli.sli3.numRiocb;
1011 }
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 void
1029 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1030 {
1031 MAILBOX_t *mb = &pmb->u.mb;
1032 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1033 mb->un.varRdRev.cv = 1;
1034 mb->un.varRdRev.v3req = 1;
1035 mb->mbxCommand = MBX_READ_REV;
1036 mb->mbxOwner = OWN_HOST;
1037 return;
1038 }
1039
1040 void
1041 lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1042 {
1043 MAILBOX_t *mb = &pmb->u.mb;
1044 struct lpfc_mqe *mqe;
1045
1046 switch (mb->mbxCommand) {
1047 case MBX_READ_REV:
1048 mqe = &pmb->u.mqe;
1049 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1050 mqe->un.read_rev.fw_name, 16);
1051 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1052 mqe->un.read_rev.ulp_fw_name, 16);
1053 break;
1054 default:
1055 break;
1056 }
1057 return;
1058 }
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 static void
1071 lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1072 struct lpfc_hbq_init *hbq_desc)
1073 {
1074 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1075 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1076 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 static void
1090 lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1091 struct lpfc_hbq_init *hbq_desc)
1092 {
1093 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1094 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1095 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1096 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1097 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1098 sizeof(hbqmb->profiles.profile3.cmdmatch));
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 static void
1113 lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1114 struct lpfc_hbq_init *hbq_desc)
1115 {
1116 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1117 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1118 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1119 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1120 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1121 sizeof(hbqmb->profiles.profile5.cmdmatch));
1122 }
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 void
1139 lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1140 struct lpfc_hbq_init *hbq_desc,
1141 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1142 {
1143 int i;
1144 MAILBOX_t *mb = &pmb->u.mb;
1145 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1146
1147 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1148 hbqmb->hbqId = id;
1149 hbqmb->entry_count = hbq_desc->entry_count;
1150 hbqmb->recvNotify = hbq_desc->rn;
1151
1152 hbqmb->numMask = hbq_desc->mask_count;
1153
1154 hbqmb->profile = hbq_desc->profile;
1155
1156
1157 hbqmb->ringMask = hbq_desc->ring_mask;
1158
1159
1160 hbqmb->headerLen = hbq_desc->headerLen;
1161
1162 hbqmb->logEntry = hbq_desc->logEntry;
1163
1164
1165
1166 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1167 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1168 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1169
1170 mb->mbxCommand = MBX_CONFIG_HBQ;
1171 mb->mbxOwner = OWN_HOST;
1172
1173
1174
1175
1176 if (hbq_desc->profile == 2)
1177 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1178 else if (hbq_desc->profile == 3)
1179 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1180 else if (hbq_desc->profile == 5)
1181 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1182
1183
1184 if (!hbq_desc->mask_count)
1185 return;
1186
1187
1188 for (i = 0; i < hbq_desc->mask_count; i++) {
1189 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1190 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1191 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1192 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1193 }
1194
1195 return;
1196 }
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 void
1216 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1217 {
1218 int i;
1219 MAILBOX_t *mb = &pmb->u.mb;
1220 struct lpfc_sli *psli;
1221 struct lpfc_sli_ring *pring;
1222
1223 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1224
1225 mb->un.varCfgRing.ring = ring;
1226 mb->un.varCfgRing.maxOrigXchg = 0;
1227 mb->un.varCfgRing.maxRespXchg = 0;
1228 mb->un.varCfgRing.recvNotify = 1;
1229
1230 psli = &phba->sli;
1231 pring = &psli->sli3_ring[ring];
1232 mb->un.varCfgRing.numMask = pring->num_mask;
1233 mb->mbxCommand = MBX_CONFIG_RING;
1234 mb->mbxOwner = OWN_HOST;
1235
1236
1237 if (pring->prt[0].profile) {
1238 mb->un.varCfgRing.profile = pring->prt[0].profile;
1239 return;
1240 }
1241
1242
1243 for (i = 0; i < pring->num_mask; i++) {
1244 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1245 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1246 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1247 else
1248 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1249 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1250 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1251 }
1252
1253 return;
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 void
1271 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1272 {
1273 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1274 MAILBOX_t *mb = &pmb->u.mb;
1275 dma_addr_t pdma_addr;
1276 uint32_t bar_low, bar_high;
1277 size_t offset;
1278 struct lpfc_hgp hgp;
1279 int i;
1280 uint32_t pgp_offset;
1281
1282 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1283 mb->mbxCommand = MBX_CONFIG_PORT;
1284 mb->mbxOwner = OWN_HOST;
1285
1286 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1287
1288 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1289 pdma_addr = phba->slim2p.phys + offset;
1290 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1291 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1292
1293
1294 mb->un.varCfgPort.hps = 1;
1295
1296
1297
1298 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1299 if (phba->cfg_enable_bg)
1300 mb->un.varCfgPort.cbg = 1;
1301 if (phba->cfg_enable_dss)
1302 mb->un.varCfgPort.cdss = 1;
1303 mb->un.varCfgPort.cerbm = 1;
1304 mb->un.varCfgPort.ccrp = 1;
1305 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1306 if (phba->max_vpi && phba->cfg_enable_npiv &&
1307 phba->vpd.sli3Feat.cmv) {
1308 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1309 mb->un.varCfgPort.cmv = 1;
1310 } else
1311 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1312 } else
1313 phba->sli_rev = LPFC_SLI_REV2;
1314 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1315
1316
1317 if (phba->sli_rev == LPFC_SLI_REV3)
1318 mb->un.varCfgPort.casabt = 1;
1319
1320
1321 phba->pcb->type = TYPE_NATIVE_SLI2;
1322 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1323
1324
1325 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1326 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1327 pdma_addr = phba->slim2p.phys + offset;
1328 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1329 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1351 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1382 phba->host_gp = &phba->mbox->us.s2.host[0];
1383 phba->hbq_put = NULL;
1384 offset = (uint8_t *)&phba->mbox->us.s2.host -
1385 (uint8_t *)phba->slim2p.virt;
1386 pdma_addr = phba->slim2p.phys + offset;
1387 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1388 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1389 } else {
1390
1391 mb->un.varCfgPort.hps = 1;
1392
1393 if (phba->sli_rev == 3) {
1394 phba->host_gp = &mb_slim->us.s3.host[0];
1395 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1396 } else {
1397 phba->host_gp = &mb_slim->us.s2.host[0];
1398 phba->hbq_put = NULL;
1399 }
1400
1401
1402 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1403 (void __iomem *)phba->host_gp -
1404 (void __iomem *)phba->MBslimaddr;
1405 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1406 phba->pcb->hgpAddrHigh = bar_high;
1407 else
1408 phba->pcb->hgpAddrHigh = 0;
1409
1410 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1411
1412 for (i = 0; i < phba->sli.num_rings; i++) {
1413 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1414 sizeof(*phba->host_gp));
1415 }
1416 }
1417
1418
1419 if (phba->sli_rev == 3)
1420 pgp_offset = offsetof(struct lpfc_sli2_slim,
1421 mbx.us.s3_pgp.port);
1422 else
1423 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1424 pdma_addr = phba->slim2p.phys + pgp_offset;
1425 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1426 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1427
1428
1429 lpfc_config_pcb_setup(phba);
1430
1431
1432 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1433 uint32_t hbainit[5];
1434
1435 lpfc_hba_init(phba, hbainit);
1436
1437 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1438 }
1439
1440
1441 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1442 }
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459 void
1460 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1461 {
1462 MAILBOX_t *mb = &pmb->u.mb;
1463
1464 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1465 mb->mbxCommand = MBX_KILL_BOARD;
1466 mb->mbxOwner = OWN_HOST;
1467 return;
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 void
1481 lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1482 {
1483 struct lpfc_sli *psli;
1484
1485 psli = &phba->sli;
1486
1487 list_add_tail(&mbq->list, &psli->mboxq);
1488
1489 psli->mboxq_cnt++;
1490
1491 return;
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 LPFC_MBOXQ_t *
1509 lpfc_mbox_get(struct lpfc_hba * phba)
1510 {
1511 LPFC_MBOXQ_t *mbq = NULL;
1512 struct lpfc_sli *psli = &phba->sli;
1513
1514 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1515 if (mbq)
1516 psli->mboxq_cnt--;
1517
1518 return mbq;
1519 }
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 void
1532 __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1533 {
1534 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 void
1548 lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1549 {
1550 unsigned long iflag;
1551
1552
1553 spin_lock_irqsave(&phba->hbalock, iflag);
1554 __lpfc_mbox_cmpl_put(phba, mbq);
1555 spin_unlock_irqrestore(&phba->hbalock, iflag);
1556 return;
1557 }
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 int
1571 lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1572 {
1573
1574
1575
1576 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1577 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1578 if (!mboxq->vport) {
1579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1580 "1814 Mbox x%x failed, no vport\n",
1581 mboxq->u.mb.mbxCommand);
1582 dump_stack();
1583 return -ENODEV;
1584 }
1585 }
1586 return 0;
1587 }
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 int
1600 lpfc_mbox_dev_check(struct lpfc_hba *phba)
1601 {
1602
1603 if (unlikely(pci_channel_offline(phba->pcidev)))
1604 return -ENODEV;
1605
1606
1607 if (phba->link_state == LPFC_HBA_ERROR)
1608 return -ENODEV;
1609
1610 return 0;
1611 }
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 int
1625 lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1626 {
1627 MAILBOX_t *mbox = &mboxq->u.mb;
1628 uint8_t subsys, opcode;
1629
1630 switch (mbox->mbxCommand) {
1631 case MBX_WRITE_NV:
1632 case MBX_DUMP_MEMORY:
1633 case MBX_UPDATE_CFG:
1634 case MBX_DOWN_LOAD:
1635 case MBX_DEL_LD_ENTRY:
1636 case MBX_WRITE_VPARMS:
1637 case MBX_LOAD_AREA:
1638 case MBX_WRITE_WWN:
1639 case MBX_LOAD_EXP_ROM:
1640 case MBX_ACCESS_VDATA:
1641 return LPFC_MBOX_TMO_FLASH_CMD;
1642 case MBX_SLI4_CONFIG:
1643 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1644 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1645 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1646 switch (opcode) {
1647 case LPFC_MBOX_OPCODE_READ_OBJECT:
1648 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1649 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1650 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1651 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1652 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1653 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1654 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1655 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1656 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1657 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1658 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1659 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1660 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1661 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1662 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1663 }
1664 }
1665 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1666 switch (opcode) {
1667 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1668 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1669 }
1670 }
1671 return LPFC_MBOX_SLI4_CONFIG_TMO;
1672 }
1673 return LPFC_MBOX_TMO;
1674 }
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 void
1687 lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1688 dma_addr_t phyaddr, uint32_t length)
1689 {
1690 struct lpfc_mbx_nembed_cmd *nembed_sge;
1691
1692 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1693 &mbox->u.mqe.un.nembed_cmd;
1694 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1695 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1696 nembed_sge->sge[sgentry].length = length;
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 void
1708 lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1709 struct lpfc_mbx_sge *sge)
1710 {
1711 struct lpfc_mbx_nembed_cmd *nembed_sge;
1712
1713 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1714 &mbox->u.mqe.un.nembed_cmd;
1715 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1716 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1717 sge->length = nembed_sge->sge[sgentry].length;
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727 void
1728 lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1729 {
1730 struct lpfc_mbx_sli4_config *sli4_cfg;
1731 struct lpfc_mbx_sge sge;
1732 dma_addr_t phyaddr;
1733 uint32_t sgecount, sgentry;
1734
1735 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1736
1737
1738 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1739 mempool_free(mbox, phba->mbox_mem_pool);
1740 return;
1741 }
1742
1743
1744 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1745
1746 if (unlikely(!mbox->sge_array)) {
1747 mempool_free(mbox, phba->mbox_mem_pool);
1748 return;
1749 }
1750
1751 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1752 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1753 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1754 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1755 mbox->sge_array->addr[sgentry], phyaddr);
1756 }
1757
1758 kfree(mbox->sge_array);
1759
1760 mempool_free(mbox, phba->mbox_mem_pool);
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 int
1778 lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1779 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1780 {
1781 struct lpfc_mbx_sli4_config *sli4_config;
1782 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1783 uint32_t alloc_len;
1784 uint32_t resid_len;
1785 uint32_t pagen, pcount;
1786 void *viraddr;
1787 dma_addr_t phyaddr;
1788
1789
1790 memset(mbox, 0, sizeof(*mbox));
1791 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1792
1793
1794 sli4_config = &mbox->u.mqe.un.sli4_config;
1795
1796
1797 if (emb) {
1798
1799 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1800 sli4_config->header.cfg_mhdr.payload_length = length;
1801
1802 bf_set(lpfc_mbox_hdr_opcode,
1803 &sli4_config->header.cfg_shdr.request, opcode);
1804 bf_set(lpfc_mbox_hdr_subsystem,
1805 &sli4_config->header.cfg_shdr.request, subsystem);
1806 sli4_config->header.cfg_shdr.request.request_length =
1807 length - LPFC_MBX_CMD_HDR_LENGTH;
1808 return length;
1809 }
1810
1811
1812 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1813 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1814 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1815
1816 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1817 GFP_KERNEL);
1818 if (!mbox->sge_array) {
1819 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1820 "2527 Failed to allocate non-embedded SGE "
1821 "array.\n");
1822 return 0;
1823 }
1824 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1825
1826
1827
1828
1829
1830 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1831 SLI4_PAGE_SIZE, &phyaddr,
1832 GFP_KERNEL);
1833
1834 if (!viraddr)
1835 break;
1836 mbox->sge_array->addr[pagen] = viraddr;
1837
1838 if (pagen == 0)
1839 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1840 resid_len = length - alloc_len;
1841 if (resid_len > SLI4_PAGE_SIZE) {
1842 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1843 SLI4_PAGE_SIZE);
1844 alloc_len += SLI4_PAGE_SIZE;
1845 } else {
1846 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1847 resid_len);
1848 alloc_len = length;
1849 }
1850 }
1851
1852
1853 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1854 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1855
1856
1857 if (pagen > 0) {
1858 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1859 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1860 cfg_shdr->request.request_length =
1861 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1862 }
1863
1864 if (cfg_shdr)
1865 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1866 sizeof(union lpfc_sli4_cfg_shdr));
1867 return alloc_len;
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 int
1886 lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1887 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1888 {
1889 uint8_t opcode = 0;
1890 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1891 void *virtaddr = NULL;
1892
1893
1894 if (emb == LPFC_SLI4_MBX_NEMBED) {
1895
1896 virtaddr = mbox->sge_array->addr[0];
1897 if (virtaddr == NULL)
1898 return 1;
1899 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1900 }
1901
1902
1903
1904
1905
1906 if (emb == LPFC_SLI4_MBX_EMBED)
1907 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1908 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1909 rsrc_type);
1910 else {
1911
1912 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1913 n_rsrc_extnt, rsrc_type);
1914 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1915 &n_rsrc_extnt->word4,
1916 sizeof(uint32_t));
1917 }
1918
1919
1920 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1921 switch (opcode) {
1922 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1923 if (emb == LPFC_SLI4_MBX_EMBED)
1924 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1925 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1926 exts_count);
1927 else
1928 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1929 n_rsrc_extnt, exts_count);
1930 break;
1931 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1932 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1933 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1934
1935 break;
1936 default:
1937 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1938 "2929 Resource Extent Opcode x%x is "
1939 "unsupported\n", opcode);
1940 return 1;
1941 }
1942
1943 return 0;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 uint8_t
1957 lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1958 {
1959 struct lpfc_mbx_sli4_config *sli4_cfg;
1960 union lpfc_sli4_cfg_shdr *cfg_shdr;
1961
1962 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1963 return LPFC_MBOX_SUBSYSTEM_NA;
1964 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1965
1966
1967 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1968 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1969 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1970 }
1971
1972
1973 if (unlikely(!mbox->sge_array))
1974 return LPFC_MBOX_SUBSYSTEM_NA;
1975 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1976 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1977 }
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989 uint8_t
1990 lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1991 {
1992 struct lpfc_mbx_sli4_config *sli4_cfg;
1993 union lpfc_sli4_cfg_shdr *cfg_shdr;
1994
1995 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1996 return LPFC_MBOX_OPCODE_NA;
1997 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1998
1999
2000 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
2001 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
2002 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2003 }
2004
2005
2006 if (unlikely(!mbox->sge_array))
2007 return LPFC_MBOX_OPCODE_NA;
2008 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
2009 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2010 }
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 int
2024 lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2025 struct lpfcMboxq *mboxq,
2026 uint16_t fcf_index)
2027 {
2028 void *virt_addr;
2029 uint8_t *bytep;
2030 struct lpfc_mbx_sge sge;
2031 uint32_t alloc_len, req_len;
2032 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2033
2034 if (!mboxq)
2035 return -ENOMEM;
2036
2037 req_len = sizeof(struct fcf_record) +
2038 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2039
2040
2041 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2042 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2043 LPFC_SLI4_MBX_NEMBED);
2044
2045 if (alloc_len < req_len) {
2046 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2047 "0291 Allocated DMA memory size (x%x) is "
2048 "less than the requested DMA memory "
2049 "size (x%x)\n", alloc_len, req_len);
2050 return -ENOMEM;
2051 }
2052
2053
2054
2055
2056 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2057 virt_addr = mboxq->sge_array->addr[0];
2058 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2059
2060
2061 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2062
2063 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2064 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2065
2066 return 0;
2067 }
2068
2069
2070
2071
2072
2073
2074
2075
2076 void
2077 lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2078 {
2079
2080 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2081 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2082
2083
2084 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2085 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2086
2087
2088 if (phba->cfg_enable_bg)
2089 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2090
2091
2092 if (phba->max_vpi && phba->cfg_enable_npiv)
2093 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2094
2095 if (phba->nvmet_support) {
2096 bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
2097
2098 bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
2099 bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
2100 }
2101 return;
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 void
2116 lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2117 {
2118 struct lpfc_mbx_init_vfi *init_vfi;
2119
2120 memset(mbox, 0, sizeof(*mbox));
2121 mbox->vport = vport;
2122 init_vfi = &mbox->u.mqe.un.init_vfi;
2123 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2124 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2125 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2126 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2127 bf_set(lpfc_init_vfi_vfi, init_vfi,
2128 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2129 bf_set(lpfc_init_vfi_vpi, init_vfi,
2130 vport->phba->vpi_ids[vport->vpi]);
2131 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2132 vport->phba->fcf.fcfi);
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 void
2147 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2148 {
2149 struct lpfc_mbx_reg_vfi *reg_vfi;
2150 struct lpfc_hba *phba = vport->phba;
2151 uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
2152
2153 memset(mbox, 0, sizeof(*mbox));
2154 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2155 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2156 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2157 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2158 phba->sli4_hba.vfi_ids[vport->vfi]);
2159 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2160 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2161 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2162 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2163 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2164 reg_vfi->e_d_tov = phba->fc_edtov;
2165 reg_vfi->r_a_tov = phba->fc_ratov;
2166 if (phys) {
2167 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2168 reg_vfi->bde.addrLow = putPaddrLow(phys);
2169 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2170 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2171 }
2172 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2173
2174
2175 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2176 (vport->fc_flag & FC_VFI_REGISTERED) &&
2177 (!phba->fc_topology_changed))
2178 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2179
2180 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
2181 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
2182 bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
2183
2184 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
2185 bbscn_fabric != 0) {
2186 bbscn_max = bf_get(lpfc_bbscn_max,
2187 &phba->sli4_hba.bbscn_params);
2188 if (bbscn_fabric <= bbscn_max) {
2189 bbscn_def = bf_get(lpfc_bbscn_def,
2190 &phba->sli4_hba.bbscn_params);
2191
2192 if (bbscn_fabric > bbscn_def)
2193 bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
2194 bbscn_fabric);
2195 else
2196 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
2197
2198 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
2199 }
2200 }
2201 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2202 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2203 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2204 " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
2205 vport->fc_myDID,
2206 phba->fcf.fcfi,
2207 phba->sli4_hba.vfi_ids[vport->vfi],
2208 phba->vpi_ids[vport->vpi],
2209 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2210 vport->port_state, phba->fc_topology_changed,
2211 bbscn_fabric);
2212 }
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 void
2227 lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2228 {
2229 memset(mbox, 0, sizeof(*mbox));
2230 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2231 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2232 phba->vpi_ids[vpi]);
2233 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2234 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2235 }
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248 void
2249 lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2250 {
2251 memset(mbox, 0, sizeof(*mbox));
2252 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2253 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2254 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2255 }
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265 int
2266 lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2267 {
2268 struct lpfc_dmabuf *mp = NULL;
2269 MAILBOX_t *mb;
2270
2271 memset(mbox, 0, sizeof(*mbox));
2272 mb = &mbox->u.mb;
2273
2274 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2275 if (mp)
2276 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2277
2278 if (!mp || !mp->virt) {
2279 kfree(mp);
2280
2281 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2282 "2569 lpfc dump config region 23: memory"
2283 " allocation failed\n");
2284 return 1;
2285 }
2286
2287 memset(mp->virt, 0, LPFC_BPL_SIZE);
2288 INIT_LIST_HEAD(&mp->list);
2289
2290
2291 mbox->ctx_buf = (uint8_t *)mp;
2292
2293 mb->mbxCommand = MBX_DUMP_MEMORY;
2294 mb->un.varDmp.type = DMP_NV_PARAMS;
2295 mb->un.varDmp.region_id = DMP_REGION_23;
2296 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2297 mb->un.varWords[3] = putPaddrLow(mp->phys);
2298 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2299 return 0;
2300 }
2301
2302 static void
2303 lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2304 {
2305 MAILBOX_t *mb;
2306 int rc = FAILURE;
2307 struct lpfc_rdp_context *rdp_context =
2308 (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
2309
2310 mb = &mboxq->u.mb;
2311 if (mb->mbxStatus)
2312 goto mbx_failed;
2313
2314 memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
2315
2316 rc = SUCCESS;
2317
2318 mbx_failed:
2319 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2320 rdp_context->cmpl(phba, rdp_context, rc);
2321 }
2322
2323 static void
2324 lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2325 {
2326 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
2327 struct lpfc_rdp_context *rdp_context =
2328 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2329
2330 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2331 goto error_mbuf_free;
2332
2333 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2334 DMP_SFF_PAGE_A2_SIZE);
2335
2336
2337 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2338 kfree(mp);
2339
2340 memset(mbox, 0, sizeof(*mbox));
2341 lpfc_read_lnk_stat(phba, mbox);
2342 mbox->vport = rdp_context->ndlp->vport;
2343 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2344 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2345 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2346 goto error_cmd_free;
2347
2348 return;
2349
2350 error_mbuf_free:
2351 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2352 kfree(mp);
2353 error_cmd_free:
2354 lpfc_sli4_mbox_cmd_free(phba, mbox);
2355 rdp_context->cmpl(phba, rdp_context, FAILURE);
2356 }
2357
2358 void
2359 lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2360 {
2361 int rc;
2362 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
2363 struct lpfc_rdp_context *rdp_context =
2364 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2365
2366 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2367 goto error;
2368
2369 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
2370 DMP_SFF_PAGE_A0_SIZE);
2371
2372 memset(mbox, 0, sizeof(*mbox));
2373
2374 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
2375 INIT_LIST_HEAD(&mp->list);
2376
2377
2378 mbox->ctx_buf = mp;
2379 mbox->vport = rdp_context->ndlp->vport;
2380
2381 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2382 bf_set(lpfc_mbx_memory_dump_type3_type,
2383 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2384 bf_set(lpfc_mbx_memory_dump_type3_link,
2385 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2386 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2387 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
2388 bf_set(lpfc_mbx_memory_dump_type3_length,
2389 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
2390 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2391 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2392
2393 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2394 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2396 if (rc == MBX_NOT_FINISHED)
2397 goto error;
2398
2399 return;
2400
2401 error:
2402 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2403 kfree(mp);
2404 lpfc_sli4_mbox_cmd_free(phba, mbox);
2405 rdp_context->cmpl(phba, rdp_context, FAILURE);
2406 }
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417 int
2418 lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2419 {
2420 struct lpfc_dmabuf *mp = NULL;
2421
2422 memset(mbox, 0, sizeof(*mbox));
2423
2424 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2425 if (mp)
2426 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2427 if (!mp || !mp->virt) {
2428 kfree(mp);
2429 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2430 "3569 dump type 3 page 0xA0 allocation failed\n");
2431 return 1;
2432 }
2433
2434 memset(mp->virt, 0, LPFC_BPL_SIZE);
2435 INIT_LIST_HEAD(&mp->list);
2436
2437 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2438
2439 mbox->ctx_buf = mp;
2440
2441 bf_set(lpfc_mbx_memory_dump_type3_type,
2442 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2443 bf_set(lpfc_mbx_memory_dump_type3_link,
2444 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2445 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2446 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
2447 bf_set(lpfc_mbx_memory_dump_type3_length,
2448 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
2449 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2450 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2451
2452 return 0;
2453 }
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468 void
2469 lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2470 {
2471 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2472
2473 memset(mbox, 0, sizeof(*mbox));
2474 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2475 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2476 if (phba->nvmet_support == 0) {
2477 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2478 phba->sli4_hba.hdr_rq->queue_id);
2479
2480 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
2481 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
2482 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
2483 bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
2484
2485 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2486
2487
2488 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
2489 (~phba->fcf.addr_mode) & 0x3);
2490 } else {
2491
2492 if (phba->cfg_nvmet_mrq != 1)
2493 return;
2494
2495 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2496 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2497
2498 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
2499 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
2500 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
2501 FC_RCTL_DD_UNSOL_CMD);
2502
2503 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
2504 phba->sli4_hba.hdr_rq->queue_id);
2505
2506 bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
2507 bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
2508 bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
2509 bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
2510 }
2511 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2512 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2513 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2514 phba->fcf.current_rec.fcf_indx);
2515 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2516 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2517 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2518 phba->fcf.current_rec.vlan_id);
2519 }
2520 }
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 void
2537 lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
2538 {
2539 struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
2540
2541
2542 if (phba->cfg_nvmet_mrq <= 1)
2543 return;
2544
2545 memset(mbox, 0, sizeof(*mbox));
2546 reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
2547 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
2548 if (mode == 0) {
2549 bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
2550 phba->fcf.current_rec.fcf_indx);
2551 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2552 bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
2553 bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
2554 phba->fcf.current_rec.vlan_id);
2555 }
2556 return;
2557 }
2558
2559 bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
2560 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2561
2562 bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
2563 bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
2564 bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
2565 bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
2566 bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
2567 bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
2568
2569 bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3);
2570 bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
2571 bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1);
2572 bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
2573
2574 bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
2575 phba->sli4_hba.hdr_rq->queue_id);
2576
2577 bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
2578 bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
2579 bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
2580 bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
2581
2582 bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2583 bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2584 }
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 void
2595 lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2596 {
2597 memset(mbox, 0, sizeof(*mbox));
2598 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2599 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2600 }
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610 void
2611 lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2612 {
2613 struct lpfc_hba *phba = ndlp->phba;
2614 struct lpfc_mbx_resume_rpi *resume_rpi;
2615
2616 memset(mbox, 0, sizeof(*mbox));
2617 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2618 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2619 bf_set(lpfc_resume_rpi_index, resume_rpi,
2620 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2621 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2622 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2623 }
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 void
2634 lpfc_supported_pages(struct lpfcMboxq *mbox)
2635 {
2636 struct lpfc_mbx_supp_pages *supp_pages;
2637
2638 memset(mbox, 0, sizeof(*mbox));
2639 supp_pages = &mbox->u.mqe.un.supp_pages;
2640 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2641 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2642 }
2643
2644
2645
2646
2647
2648
2649
2650
2651 void
2652 lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
2653 {
2654 struct lpfc_mbx_pc_sli4_params *sli4_params;
2655
2656 memset(mbox, 0, sizeof(*mbox));
2657 sli4_params = &mbox->u.mqe.un.sli4_params;
2658 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2659 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2660 }