This source file includes following definitions.
- lpfc_get_iocb_from_iocbq
- lpfc_sli4_pcimem_bcopy
- lpfc_sli4_wq_put
- lpfc_sli4_wq_release
- lpfc_sli4_mq_put
- lpfc_sli4_mq_release
- lpfc_sli4_eq_get
- lpfc_sli4_eq_clr_intr
- lpfc_sli4_if6_eq_clr_intr
- lpfc_sli4_write_eq_db
- lpfc_sli4_if6_write_eq_db
- __lpfc_sli4_consume_eqe
- lpfc_sli4_eq_flush
- lpfc_sli4_process_eq
- lpfc_sli4_cq_get
- __lpfc_sli4_consume_cqe
- lpfc_sli4_write_cq_db
- lpfc_sli4_if6_write_cq_db
- lpfc_sli4_rq_put
- lpfc_sli4_rq_release
- lpfc_cmd_iocb
- lpfc_resp_iocb
- __lpfc_sli_get_iocbq
- __lpfc_clear_active_sglq
- __lpfc_get_active_sglq
- lpfc_clr_rrq_active
- lpfc_handle_rrq_active
- lpfc_get_active_rrq
- lpfc_cleanup_vports_rrqs
- lpfc_test_rrq_active
- lpfc_set_rrq_active
- __lpfc_sli_get_els_sglq
- __lpfc_sli_get_nvmet_sglq
- lpfc_sli_get_iocbq
- __lpfc_sli_release_iocbq_s4
- __lpfc_sli_release_iocbq_s3
- __lpfc_sli_release_iocbq
- lpfc_sli_release_iocbq
- lpfc_sli_cancel_iocbs
- lpfc_sli_iocb_cmd_type
- lpfc_sli_ring_map
- lpfc_sli_ringtxcmpl_put
- lpfc_sli_ringtx_get
- lpfc_sli_next_iocb_slot
- lpfc_sli_next_iotag
- lpfc_sli_submit_iocb
- lpfc_sli_update_full_ring
- lpfc_sli_update_ring
- lpfc_sli_resume_iocb
- lpfc_sli_next_hbq_slot
- lpfc_sli_hbqbuf_free_all
- lpfc_sli_hbq_to_firmware
- lpfc_sli_hbq_to_firmware_s3
- lpfc_sli_hbq_to_firmware_s4
- lpfc_sli_hbqbuf_fill_hbqs
- lpfc_sli_hbqbuf_add_hbqs
- lpfc_sli_hbqbuf_init_hbqs
- lpfc_sli_hbqbuf_get
- lpfc_sli_rqbuf_get
- lpfc_sli_hbqbuf_find
- lpfc_sli_free_hbq
- lpfc_sli_chk_mbx_command
- lpfc_sli_wake_mbox_wait
- __lpfc_sli_rpi_release
- lpfc_sli_def_mbox_cmpl
- lpfc_sli4_unreg_rpi_cmpl_clr
- lpfc_sli_handle_mb_event
- lpfc_sli_get_buff
- lpfc_complete_unsol_iocb
- lpfc_sli_process_unsol_iocb
- lpfc_sli_iocbq_lookup
- lpfc_sli_iocbq_lookup_by_tag
- lpfc_sli_process_sol_iocb
- lpfc_sli_rsp_pointers_error
- lpfc_poll_eratt
- lpfc_sli_handle_fast_ring_event
- lpfc_sli_sp_handle_rspiocb
- lpfc_sli_handle_slow_ring_event
- lpfc_sli_handle_slow_ring_event_s3
- lpfc_sli_handle_slow_ring_event_s4
- lpfc_sli_abort_iocb_ring
- lpfc_sli_abort_fcp_rings
- lpfc_sli_flush_io_rings
- lpfc_sli_brdready_s3
- lpfc_sli_brdready_s4
- lpfc_sli_brdready
- lpfc_reset_barrier
- lpfc_sli_brdkill
- lpfc_sli_brdreset
- lpfc_sli4_brdreset
- lpfc_sli_brdrestart_s3
- lpfc_sli_brdrestart_s4
- lpfc_sli_brdrestart
- lpfc_sli_chipset_init
- lpfc_sli_hbq_count
- lpfc_sli_hbq_entry_count
- lpfc_sli_hbq_size
- lpfc_sli_hbq_setup
- lpfc_sli4_rb_setup
- lpfc_sli_config_port
- lpfc_sli_hba_setup
- lpfc_sli4_read_fcoe_params
- lpfc_sli4_read_rev
- lpfc_sli4_get_ctl_attr
- lpfc_sli4_retrieve_pport_name
- lpfc_sli4_arm_cqeq_intr
- lpfc_sli4_get_avail_extnt_rsrc
- lpfc_sli4_chk_avail_extnt_rsrc
- lpfc_sli4_cfg_post_extnts
- lpfc_sli4_alloc_extent
- lpfc_sli4_dealloc_extent
- lpfc_set_features
- lpfc_ras_stop_fwlog
- lpfc_sli4_ras_dma_free
- lpfc_sli4_ras_dma_alloc
- lpfc_sli4_ras_mbox_cmpl
- lpfc_sli4_ras_fwlog_init
- lpfc_sli4_ras_setup
- lpfc_sli4_alloc_resource_identifiers
- lpfc_sli4_dealloc_resource_identifiers
- lpfc_sli4_get_allocated_extnts
- lpfc_sli4_repost_sgl_list
- lpfc_sli4_repost_io_sgl_list
- lpfc_set_host_data
- lpfc_post_rq_buffer
- lpfc_sli4_hba_setup
- lpfc_mbox_timeout
- lpfc_sli4_mbox_completions_pending
- lpfc_sli4_process_missed_mbox_completions
- lpfc_mbox_timeout_handler
- lpfc_sli_issue_mbox_s3
- lpfc_sli4_async_mbox_block
- lpfc_sli4_async_mbox_unblock
- lpfc_sli4_wait_bmbx_ready
- lpfc_sli4_post_sync_mbox
- lpfc_sli_issue_mbox_s4
- lpfc_sli4_post_async_mbox
- lpfc_sli_issue_mbox
- lpfc_mbox_api_table_setup
- __lpfc_sli_ringtx_put
- lpfc_sli_next_iocb
- __lpfc_sli_issue_iocb_s3
- lpfc_sli4_bpl2sgl
- lpfc_sli4_iocb2wqe
- __lpfc_sli_issue_iocb_s4
- __lpfc_sli_issue_iocb
- lpfc_sli_api_table_setup
- lpfc_sli4_calc_ring
- lpfc_sli_issue_iocb
- lpfc_extra_ring_setup
- lpfc_sli_abts_err_handler
- lpfc_sli4_abts_err_handler
- lpfc_sli_async_event_handler
- lpfc_sli4_setup
- lpfc_sli_setup
- lpfc_sli4_queue_init
- lpfc_sli_queue_init
- lpfc_sli_mbox_sys_flush
- lpfc_sli_host_down
- lpfc_sli_hba_down
- lpfc_sli_pcimem_bcopy
- lpfc_sli_bemem_bcopy
- lpfc_sli_ringpostbuf_put
- lpfc_sli_get_buffer_tag
- lpfc_sli_ring_taggedbuf_get
- lpfc_sli_ringpostbuf_get
- lpfc_sli_abort_els_cmpl
- lpfc_ignore_els_cmpl
- lpfc_sli_abort_iotag_issue
- lpfc_sli_issue_abort_iotag
- lpfc_sli_hba_iocb_abort
- lpfc_sli_validate_fcp_iocb
- lpfc_sli_sum_iocb
- lpfc_sli_abort_fcp_cmpl
- lpfc_sli_abort_iocb
- lpfc_sli_abort_taskmgmt
- lpfc_sli_wake_iocb_wait
- lpfc_chk_iocb_flg
- lpfc_sli_issue_iocb_wait
- lpfc_sli_issue_mbox_wait
- lpfc_sli_mbox_sys_shutdown
- lpfc_sli_eratt_read
- lpfc_sli4_eratt_read
- lpfc_sli_check_eratt
- lpfc_intr_state_check
- lpfc_sli_sp_intr_handler
- lpfc_sli_fp_intr_handler
- lpfc_sli_intr_handler
- lpfc_sli4_els_xri_abort_event_proc
- lpfc_sli4_iocb_param_transfer
- lpfc_sli4_els_wcqe_to_rspiocbq
- lpfc_cq_event_setup
- lpfc_sli4_sp_handle_async_event
- lpfc_sli4_sp_handle_mbox_event
- lpfc_sli4_sp_handle_mcqe
- lpfc_sli4_sp_handle_els_wcqe
- lpfc_sli4_sp_handle_rel_wcqe
- lpfc_sli4_sp_handle_abort_xri_wcqe
- lpfc_sli4_sp_handle_rcqe
- lpfc_sli4_sp_handle_cqe
- lpfc_sli4_sp_handle_eqe
- __lpfc_sli4_process_cq
- __lpfc_sli4_sp_process_cq
- lpfc_sli4_sp_process_cq
- lpfc_sli4_dly_sp_process_cq
- lpfc_sli4_fp_handle_fcp_wcqe
- lpfc_sli4_fp_handle_rel_wcqe
- lpfc_sli4_nvmet_handle_rcqe
- lpfc_sli4_fp_handle_cqe
- lpfc_sli4_hba_handle_eqe
- __lpfc_sli4_hba_process_cq
- lpfc_sli4_hba_process_cq
- lpfc_sli4_dly_hba_process_cq
- lpfc_sli4_hba_intr_handler
- lpfc_sli4_intr_handler
- lpfc_sli4_poll_hbtimer
- lpfc_sli4_poll_eq
- lpfc_sli4_add_to_poll_list
- lpfc_sli4_remove_from_poll_list
- lpfc_sli4_cleanup_poll_list
- __lpfc_sli4_switch_eqmode
- lpfc_sli4_start_polling
- lpfc_sli4_stop_polling
- lpfc_sli4_queue_free
- lpfc_sli4_queue_alloc
- lpfc_dual_chute_pci_bar_map
- lpfc_modify_hba_eq_delay
- lpfc_eq_create
- lpfc_cq_create
- lpfc_cq_create_set
- lpfc_mq_create_fb_init
- lpfc_mq_create
- lpfc_wq_create
- lpfc_rq_create
- lpfc_mrq_create
- lpfc_eq_destroy
- lpfc_cq_destroy
- lpfc_mq_destroy
- lpfc_wq_destroy
- lpfc_rq_destroy
- lpfc_sli4_post_sgl
- lpfc_sli4_alloc_xri
- __lpfc_sli4_free_xri
- lpfc_sli4_free_xri
- lpfc_sli4_next_xritag
- lpfc_sli4_post_sgl_list
- lpfc_sli4_post_io_sgl_block
- lpfc_sli4_post_io_sgl_list
- lpfc_fc_frame_check
- lpfc_fc_hdr_get_vfi
- lpfc_fc_frame_to_vport
- lpfc_update_rcv_time_stamp
- lpfc_cleanup_rcv_buffers
- lpfc_rcv_seq_check_edtov
- lpfc_fc_frame_add
- lpfc_sli4_abort_partial_seq
- lpfc_sli4_abort_ulp_seq
- lpfc_sli4_seq_abort_rsp_cmpl
- lpfc_sli4_xri_inrange
- lpfc_sli4_seq_abort_rsp
- lpfc_sli4_handle_unsol_abort
- lpfc_seq_complete
- lpfc_prep_seq
- lpfc_sli4_send_seq_to_ulp
- lpfc_sli4_mds_loopback_cmpl
- lpfc_sli4_handle_mds_loopback
- lpfc_sli4_handle_received_buffer
- lpfc_sli4_post_all_rpi_hdrs
- lpfc_sli4_post_rpi_hdr
- lpfc_sli4_alloc_rpi
- __lpfc_sli4_free_rpi
- lpfc_sli4_free_rpi
- lpfc_sli4_remove_rpis
- lpfc_sli4_resume_rpi
- lpfc_sli4_init_vpi
- lpfc_mbx_cmpl_add_fcf_record
- lpfc_sli4_add_fcf_record
- lpfc_sli4_build_dflt_fcf_record
- lpfc_sli4_fcf_scan_read_fcf_rec
- lpfc_sli4_fcf_rr_read_fcf_rec
- lpfc_sli4_read_fcf_rec
- lpfc_check_next_fcf_pri_level
- lpfc_sli4_fcf_rr_next_index_get
- lpfc_sli4_fcf_rr_index_set
- lpfc_sli4_fcf_rr_index_clear
- lpfc_mbx_cmpl_redisc_fcf_table
- lpfc_sli4_redisc_fcf_table
- lpfc_sli4_fcf_dead_failthrough
- lpfc_sli_get_config_region23
- lpfc_sli4_get_config_region23
- lpfc_sli_read_link_ste
- lpfc_wr_object
- lpfc_cleanup_pending_mbox
- lpfc_drain_txq
- lpfc_wqe_bpl2sgl
- lpfc_sli4_issue_wqe
- lpfc_snapshot_mxp
- lpfc_adjust_pvt_pool_count
- lpfc_adjust_high_watermark
- lpfc_move_xri_pvt_to_pbl
- _lpfc_move_xri_pbl_to_pvt
- lpfc_move_xri_pbl_to_pvt
- lpfc_keep_pvt_pool_above_lowwm
- lpfc_release_io_buf
- lpfc_get_io_buf_from_private_pool
- lpfc_get_io_buf_from_expedite_pool
- lpfc_get_io_buf_from_multixri_pools
- lpfc_io_buf
- lpfc_get_io_buf
- lpfc_get_sgl_per_hdwq
- lpfc_put_sgl_per_hdwq
- lpfc_free_sgl_per_hdwq
- lpfc_get_cmd_rsp_buf_per_hdwq
- lpfc_put_cmd_rsp_buf_per_hdwq
- lpfc_free_cmd_rsp_buf_per_hdwq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include <linux/nvme-fc-driver.h>
43
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
60
61
62 typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69
70
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 int);
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90
91 static IOCB_t *
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
93 {
94 return &iocbq->iocb;
95 }
96
97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
98
99
100
101
102
103
104
105
106
107
108
109
110
111 static void
112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
113 {
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
116 int i;
117
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
119 *dest++ = *src++;
120 }
121 #else
122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
123 #endif
124
125
126
127
128
129
130
131
132
133
134
135
136
137 static int
138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
139 {
140 union lpfc_wqe *temp_wqe;
141 struct lpfc_register doorbell;
142 uint32_t host_index;
143 uint32_t idx;
144 uint32_t i = 0;
145 uint8_t *tmp;
146 u32 if_type;
147
148
149 if (unlikely(!q))
150 return -ENOMEM;
151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
152
153
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
156 q->WQ_overflow++;
157 return -EBUSY;
158 }
159 q->WQ_posted++;
160
161 if (!((q->host_index + 1) % q->notify_interval))
162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
163 else
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169
170 tmp = (uint8_t *)temp_wqe;
171 #ifdef __raw_writeq
172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
173 __raw_writeq(*((uint64_t *)(tmp + i)),
174 q->dpp_regaddr + i);
175 #else
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
178 q->dpp_regaddr + i);
179 #endif
180 }
181
182 wmb();
183
184
185 host_index = q->host_index;
186
187 q->host_index = idx;
188
189
190 doorbell.word0 = 0;
191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
196 q->dpp_id);
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
198 q->queue_id);
199 } else {
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
202
203
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
208 host_index);
209 }
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
213 } else {
214 return -EINVAL;
215 }
216 writel(doorbell.word0, q->db_regaddr);
217
218 return 0;
219 }
220
221
222
223
224
225
226
227
228
229
230
231
232 static uint32_t
233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
234 {
235 uint32_t released = 0;
236
237
238 if (unlikely(!q))
239 return 0;
240
241 if (q->hba_index == index)
242 return 0;
243 do {
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
245 released++;
246 } while (q->hba_index != index);
247 return released;
248 }
249
250
251
252
253
254
255
256
257
258
259
260
261
262 static uint32_t
263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
264 {
265 struct lpfc_mqe *temp_mqe;
266 struct lpfc_register doorbell;
267
268
269 if (unlikely(!q))
270 return -ENOMEM;
271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
272
273
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
275 return -ENOMEM;
276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
277
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
279
280
281 q->host_index = ((q->host_index + 1) % q->entry_count);
282
283
284 doorbell.word0 = 0;
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
288 return 0;
289 }
290
291
292
293
294
295
296
297
298
299
300
301 static uint32_t
302 lpfc_sli4_mq_release(struct lpfc_queue *q)
303 {
304
305 if (unlikely(!q))
306 return 0;
307
308
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
311 return 1;
312 }
313
314
315
316
317
318
319
320
321
322
323 static struct lpfc_eqe *
324 lpfc_sli4_eq_get(struct lpfc_queue *q)
325 {
326 struct lpfc_eqe *eqe;
327
328
329 if (unlikely(!q))
330 return NULL;
331 eqe = lpfc_sli4_qe(q, q->host_index);
332
333
334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
335 return NULL;
336
337
338
339
340
341
342
343
344
345
346 mb();
347 return eqe;
348 }
349
350
351
352
353
354
355 void
356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
357 {
358 struct lpfc_register doorbell;
359
360 doorbell.word0 = 0;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
367 }
368
369
370
371
372
373
374 void
375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
376 {
377 struct lpfc_register doorbell;
378
379 doorbell.word0 = 0;
380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
382 }
383
384
385
386
387
388
389
390
391
392
393
394
395 void
396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
398 {
399 struct lpfc_register doorbell;
400
401
402 if (unlikely(!q || (count == 0 && !arm)))
403 return;
404
405
406 doorbell.word0 = 0;
407 if (arm) {
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
410 }
411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
417
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
419 readl(q->phba->sli4_hba.EQDBregaddr);
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433 void
434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
436 {
437 struct lpfc_register doorbell;
438
439
440 if (unlikely(!q || (count == 0 && !arm)))
441 return;
442
443
444 doorbell.word0 = 0;
445 if (arm)
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
453 }
454
455 static void
456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
458 {
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
461
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
463
464
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
467 }
468
469 static void
470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
471 {
472 struct lpfc_eqe *eqe;
473 uint32_t count = 0;
474
475
476 eqe = lpfc_sli4_eq_get(eq);
477 while (eqe) {
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
479 count++;
480 eqe = lpfc_sli4_eq_get(eq);
481 }
482
483
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
485 }
486
487 static int
488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
489 uint8_t rearm)
490 {
491 struct lpfc_eqe *eqe;
492 int count = 0, consumed = 0;
493
494 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
495 goto rearm_and_exit;
496
497 eqe = lpfc_sli4_eq_get(eq);
498 while (eqe) {
499 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
500 __lpfc_sli4_consume_eqe(phba, eq, eqe);
501
502 consumed++;
503 if (!(++count % eq->max_proc_limit))
504 break;
505
506 if (!(count % eq->notify_interval)) {
507 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
508 LPFC_QUEUE_NOARM);
509 consumed = 0;
510 }
511
512 eqe = lpfc_sli4_eq_get(eq);
513 }
514 eq->EQ_processed += count;
515
516
517 if (count > eq->EQ_max_eqe)
518 eq->EQ_max_eqe = count;
519
520 eq->queue_claimed = 0;
521
522 rearm_and_exit:
523
524 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
525
526 return count;
527 }
528
529
530
531
532
533
534
535
536
537
538 static struct lpfc_cqe *
539 lpfc_sli4_cq_get(struct lpfc_queue *q)
540 {
541 struct lpfc_cqe *cqe;
542
543
544 if (unlikely(!q))
545 return NULL;
546 cqe = lpfc_sli4_qe(q, q->host_index);
547
548
549 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
550 return NULL;
551
552
553
554
555
556
557
558
559
560 mb();
561 return cqe;
562 }
563
564 static void
565 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
566 struct lpfc_cqe *cqe)
567 {
568 if (!phba->sli4_hba.pc_sli4_params.cqav)
569 bf_set_le32(lpfc_cqe_valid, cqe, 0);
570
571 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
572
573
574 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
575 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589 void
590 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
591 uint32_t count, bool arm)
592 {
593 struct lpfc_register doorbell;
594
595
596 if (unlikely(!q || (count == 0 && !arm)))
597 return;
598
599
600 doorbell.word0 = 0;
601 if (arm)
602 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
603 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
604 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
605 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
606 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
607 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
608 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
609 }
610
611
612
613
614
615
616
617
618
619
620
621
622 void
623 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
624 uint32_t count, bool arm)
625 {
626 struct lpfc_register doorbell;
627
628
629 if (unlikely(!q || (count == 0 && !arm)))
630 return;
631
632
633 doorbell.word0 = 0;
634 if (arm)
635 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
636 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
637 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
638 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
639 }
640
641
642
643
644
645
646
647
648
649
650
651
652
653 int
654 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
655 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
656 {
657 struct lpfc_rqe *temp_hrqe;
658 struct lpfc_rqe *temp_drqe;
659 struct lpfc_register doorbell;
660 int hq_put_index;
661 int dq_put_index;
662
663
664 if (unlikely(!hq) || unlikely(!dq))
665 return -ENOMEM;
666 hq_put_index = hq->host_index;
667 dq_put_index = dq->host_index;
668 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
669 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
670
671 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
672 return -EINVAL;
673 if (hq_put_index != dq_put_index)
674 return -EINVAL;
675
676 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
677 return -EBUSY;
678 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
679 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
680
681
682 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
683 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
684 hq->RQ_buf_posted++;
685
686
687 if (!(hq->host_index % hq->notify_interval)) {
688 doorbell.word0 = 0;
689 if (hq->db_format == LPFC_DB_RING_FORMAT) {
690 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
691 hq->notify_interval);
692 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
693 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
694 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
695 hq->notify_interval);
696 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
697 hq->host_index);
698 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
699 } else {
700 return -EINVAL;
701 }
702 writel(doorbell.word0, hq->db_regaddr);
703 }
704 return hq_put_index;
705 }
706
707
708
709
710
711
712
713
714
715
716
717 static uint32_t
718 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
719 {
720
721 if (unlikely(!hq) || unlikely(!dq))
722 return 0;
723
724 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
725 return 0;
726 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
727 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
728 return 1;
729 }
730
731
732
733
734
735
736
737
738
739
740
741 static inline IOCB_t *
742 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
743 {
744 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
745 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
746 }
747
748
749
750
751
752
753
754
755
756
757
758 static inline IOCB_t *
759 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
760 {
761 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
762 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
763 }
764
765
766
767
768
769
770
771
772
773
774 struct lpfc_iocbq *
775 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
776 {
777 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
778 struct lpfc_iocbq * iocbq = NULL;
779
780 lockdep_assert_held(&phba->hbalock);
781
782 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
783 if (iocbq)
784 phba->iocb_cnt++;
785 if (phba->iocb_cnt > phba->iocb_max)
786 phba->iocb_max = phba->iocb_cnt;
787 return iocbq;
788 }
789
790
791
792
793
794
795
796
797
798
799
800
801
802 struct lpfc_sglq *
803 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
804 {
805 struct lpfc_sglq *sglq;
806
807 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
808 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
809 return sglq;
810 }
811
812
813
814
815
816
817
818
819
820
821
822
823
824 struct lpfc_sglq *
825 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
826 {
827 struct lpfc_sglq *sglq;
828
829 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
830 return sglq;
831 }
832
833
834
835
836
837
838
839
840 void
841 lpfc_clr_rrq_active(struct lpfc_hba *phba,
842 uint16_t xritag,
843 struct lpfc_node_rrq *rrq)
844 {
845 struct lpfc_nodelist *ndlp = NULL;
846
847 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
848 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
849
850
851
852
853
854 if ((!ndlp) && rrq->ndlp)
855 ndlp = rrq->ndlp;
856
857 if (!ndlp)
858 goto out;
859
860 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
861 rrq->send_rrq = 0;
862 rrq->xritag = 0;
863 rrq->rrq_stop_time = 0;
864 }
865 out:
866 mempool_free(rrq, phba->rrq_pool);
867 }
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883 void
884 lpfc_handle_rrq_active(struct lpfc_hba *phba)
885 {
886 struct lpfc_node_rrq *rrq;
887 struct lpfc_node_rrq *nextrrq;
888 unsigned long next_time;
889 unsigned long iflags;
890 LIST_HEAD(send_rrq);
891
892 spin_lock_irqsave(&phba->hbalock, iflags);
893 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
894 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
895 list_for_each_entry_safe(rrq, nextrrq,
896 &phba->active_rrq_list, list) {
897 if (time_after(jiffies, rrq->rrq_stop_time))
898 list_move(&rrq->list, &send_rrq);
899 else if (time_before(rrq->rrq_stop_time, next_time))
900 next_time = rrq->rrq_stop_time;
901 }
902 spin_unlock_irqrestore(&phba->hbalock, iflags);
903 if ((!list_empty(&phba->active_rrq_list)) &&
904 (!(phba->pport->load_flag & FC_UNLOADING)))
905 mod_timer(&phba->rrq_tmr, next_time);
906 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
907 list_del(&rrq->list);
908 if (!rrq->send_rrq) {
909
910 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
911 } else if (lpfc_send_rrq(phba, rrq)) {
912
913
914
915 lpfc_clr_rrq_active(phba, rrq->xritag,
916 rrq);
917 }
918 }
919 }
920
921
922
923
924
925
926
927
928
929
930 struct lpfc_node_rrq *
931 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
932 {
933 struct lpfc_hba *phba = vport->phba;
934 struct lpfc_node_rrq *rrq;
935 struct lpfc_node_rrq *nextrrq;
936 unsigned long iflags;
937
938 if (phba->sli_rev != LPFC_SLI_REV4)
939 return NULL;
940 spin_lock_irqsave(&phba->hbalock, iflags);
941 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
942 if (rrq->vport == vport && rrq->xritag == xri &&
943 rrq->nlp_DID == did){
944 list_del(&rrq->list);
945 spin_unlock_irqrestore(&phba->hbalock, iflags);
946 return rrq;
947 }
948 }
949 spin_unlock_irqrestore(&phba->hbalock, iflags);
950 return NULL;
951 }
952
953
954
955
956
957
958
959
960
961 void
962 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
963
964 {
965 struct lpfc_hba *phba = vport->phba;
966 struct lpfc_node_rrq *rrq;
967 struct lpfc_node_rrq *nextrrq;
968 unsigned long iflags;
969 LIST_HEAD(rrq_list);
970
971 if (phba->sli_rev != LPFC_SLI_REV4)
972 return;
973 if (!ndlp) {
974 lpfc_sli4_vport_delete_els_xri_aborted(vport);
975 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
976 }
977 spin_lock_irqsave(&phba->hbalock, iflags);
978 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
979 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
980 list_move(&rrq->list, &rrq_list);
981 spin_unlock_irqrestore(&phba->hbalock, iflags);
982
983 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
984 list_del(&rrq->list);
985 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
986 }
987 }
988
989
990
991
992
993
994
995
996
997
998
999 int
1000 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1001 uint16_t xritag)
1002 {
1003 if (!ndlp)
1004 return 0;
1005 if (!ndlp->active_rrqs_xri_bitmap)
1006 return 0;
1007 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1008 return 1;
1009 else
1010 return 0;
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 int
1029 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1030 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1031 {
1032 unsigned long iflags;
1033 struct lpfc_node_rrq *rrq;
1034 int empty;
1035
1036 if (!ndlp)
1037 return -EINVAL;
1038
1039 if (!phba->cfg_enable_rrq)
1040 return -EINVAL;
1041
1042 spin_lock_irqsave(&phba->hbalock, iflags);
1043 if (phba->pport->load_flag & FC_UNLOADING) {
1044 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1045 goto out;
1046 }
1047
1048
1049
1050
1051 if (NLP_CHK_FREE_REQ(ndlp))
1052 goto out;
1053
1054 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1055 goto out;
1056
1057 if (!ndlp->active_rrqs_xri_bitmap)
1058 goto out;
1059
1060 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1061 goto out;
1062
1063 spin_unlock_irqrestore(&phba->hbalock, iflags);
1064 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1065 if (!rrq) {
1066 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1067 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1068 " DID:0x%x Send:%d\n",
1069 xritag, rxid, ndlp->nlp_DID, send_rrq);
1070 return -EINVAL;
1071 }
1072 if (phba->cfg_enable_rrq == 1)
1073 rrq->send_rrq = send_rrq;
1074 else
1075 rrq->send_rrq = 0;
1076 rrq->xritag = xritag;
1077 rrq->rrq_stop_time = jiffies +
1078 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1079 rrq->ndlp = ndlp;
1080 rrq->nlp_DID = ndlp->nlp_DID;
1081 rrq->vport = ndlp->vport;
1082 rrq->rxid = rxid;
1083 spin_lock_irqsave(&phba->hbalock, iflags);
1084 empty = list_empty(&phba->active_rrq_list);
1085 list_add_tail(&rrq->list, &phba->active_rrq_list);
1086 phba->hba_flag |= HBA_RRQ_ACTIVE;
1087 if (empty)
1088 lpfc_worker_wake_up(phba);
1089 spin_unlock_irqrestore(&phba->hbalock, iflags);
1090 return 0;
1091 out:
1092 spin_unlock_irqrestore(&phba->hbalock, iflags);
1093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1094 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1095 " DID:0x%x Send:%d\n",
1096 xritag, rxid, ndlp->nlp_DID, send_rrq);
1097 return -EINVAL;
1098 }
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 static struct lpfc_sglq *
1112 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1113 {
1114 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1115 struct lpfc_sglq *sglq = NULL;
1116 struct lpfc_sglq *start_sglq = NULL;
1117 struct lpfc_io_buf *lpfc_cmd;
1118 struct lpfc_nodelist *ndlp;
1119 struct lpfc_sli_ring *pring = NULL;
1120 int found = 0;
1121
1122 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1123 pring = phba->sli4_hba.nvmels_wq->pring;
1124 else
1125 pring = lpfc_phba_elsring(phba);
1126
1127 lockdep_assert_held(&pring->ring_lock);
1128
1129 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1130 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1131 ndlp = lpfc_cmd->rdata->pnode;
1132 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1133 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1134 ndlp = piocbq->context_un.ndlp;
1135 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1136 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1137 ndlp = NULL;
1138 else
1139 ndlp = piocbq->context_un.ndlp;
1140 } else {
1141 ndlp = piocbq->context1;
1142 }
1143
1144 spin_lock(&phba->sli4_hba.sgl_list_lock);
1145 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1146 start_sglq = sglq;
1147 while (!found) {
1148 if (!sglq)
1149 break;
1150 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1151 test_bit(sglq->sli4_lxritag,
1152 ndlp->active_rrqs_xri_bitmap)) {
1153
1154
1155
1156 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1157 sglq = NULL;
1158 list_remove_head(lpfc_els_sgl_list, sglq,
1159 struct lpfc_sglq, list);
1160 if (sglq == start_sglq) {
1161 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1162 sglq = NULL;
1163 break;
1164 } else
1165 continue;
1166 }
1167 sglq->ndlp = ndlp;
1168 found = 1;
1169 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1170 sglq->state = SGL_ALLOCATED;
1171 }
1172 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1173 return sglq;
1174 }
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 struct lpfc_sglq *
1187 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1188 {
1189 struct list_head *lpfc_nvmet_sgl_list;
1190 struct lpfc_sglq *sglq = NULL;
1191
1192 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1193
1194 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1195
1196 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1197 if (!sglq)
1198 return NULL;
1199 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1200 sglq->state = SGL_ALLOCATED;
1201 return sglq;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 struct lpfc_iocbq *
1214 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1215 {
1216 struct lpfc_iocbq * iocbq = NULL;
1217 unsigned long iflags;
1218
1219 spin_lock_irqsave(&phba->hbalock, iflags);
1220 iocbq = __lpfc_sli_get_iocbq(phba);
1221 spin_unlock_irqrestore(&phba->hbalock, iflags);
1222 return iocbq;
1223 }
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 static void
1244 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1245 {
1246 struct lpfc_sglq *sglq;
1247 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1248 unsigned long iflag = 0;
1249 struct lpfc_sli_ring *pring;
1250
1251 lockdep_assert_held(&phba->hbalock);
1252
1253 if (iocbq->sli4_xritag == NO_XRI)
1254 sglq = NULL;
1255 else
1256 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1257
1258
1259 if (sglq) {
1260 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1261 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1262 iflag);
1263 sglq->state = SGL_FREED;
1264 sglq->ndlp = NULL;
1265 list_add_tail(&sglq->list,
1266 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1267 spin_unlock_irqrestore(
1268 &phba->sli4_hba.sgl_list_lock, iflag);
1269 goto out;
1270 }
1271
1272 pring = phba->sli4_hba.els_wq->pring;
1273 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1274 (sglq->state != SGL_XRI_ABORTED)) {
1275 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1276 iflag);
1277 list_add(&sglq->list,
1278 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1279 spin_unlock_irqrestore(
1280 &phba->sli4_hba.sgl_list_lock, iflag);
1281 } else {
1282 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1283 iflag);
1284 sglq->state = SGL_FREED;
1285 sglq->ndlp = NULL;
1286 list_add_tail(&sglq->list,
1287 &phba->sli4_hba.lpfc_els_sgl_list);
1288 spin_unlock_irqrestore(
1289 &phba->sli4_hba.sgl_list_lock, iflag);
1290
1291
1292 if (!list_empty(&pring->txq))
1293 lpfc_worker_wake_up(phba);
1294 }
1295 }
1296
1297 out:
1298
1299
1300
1301 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1302 iocbq->sli4_lxritag = NO_XRI;
1303 iocbq->sli4_xritag = NO_XRI;
1304 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1305 LPFC_IO_NVME_LS);
1306 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1307 }
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 static void
1321 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1322 {
1323 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1324
1325 lockdep_assert_held(&phba->hbalock);
1326
1327
1328
1329
1330 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1331 iocbq->sli4_xritag = NO_XRI;
1332 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 static void
1346 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1347 {
1348 lockdep_assert_held(&phba->hbalock);
1349
1350 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1351 phba->iocb_cnt--;
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 void
1363 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1364 {
1365 unsigned long iflags;
1366
1367
1368
1369
1370 spin_lock_irqsave(&phba->hbalock, iflags);
1371 __lpfc_sli_release_iocbq(phba, iocbq);
1372 spin_unlock_irqrestore(&phba->hbalock, iflags);
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 void
1388 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1389 uint32_t ulpstatus, uint32_t ulpWord4)
1390 {
1391 struct lpfc_iocbq *piocb;
1392
1393 while (!list_empty(iocblist)) {
1394 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1395 if (!piocb->iocb_cmpl) {
1396 if (piocb->iocb_flag & LPFC_IO_NVME)
1397 lpfc_nvme_cancel_iocb(phba, piocb);
1398 else
1399 lpfc_sli_release_iocbq(phba, piocb);
1400 } else {
1401 piocb->iocb.ulpStatus = ulpstatus;
1402 piocb->iocb.un.ulpWord[4] = ulpWord4;
1403 (piocb->iocb_cmpl) (phba, piocb, piocb);
1404 }
1405 }
1406 return;
1407 }
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 static lpfc_iocb_type
1425 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1426 {
1427 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1428
1429 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1430 return 0;
1431
1432 switch (iocb_cmnd) {
1433 case CMD_XMIT_SEQUENCE_CR:
1434 case CMD_XMIT_SEQUENCE_CX:
1435 case CMD_XMIT_BCAST_CN:
1436 case CMD_XMIT_BCAST_CX:
1437 case CMD_ELS_REQUEST_CR:
1438 case CMD_ELS_REQUEST_CX:
1439 case CMD_CREATE_XRI_CR:
1440 case CMD_CREATE_XRI_CX:
1441 case CMD_GET_RPI_CN:
1442 case CMD_XMIT_ELS_RSP_CX:
1443 case CMD_GET_RPI_CR:
1444 case CMD_FCP_IWRITE_CR:
1445 case CMD_FCP_IWRITE_CX:
1446 case CMD_FCP_IREAD_CR:
1447 case CMD_FCP_IREAD_CX:
1448 case CMD_FCP_ICMND_CR:
1449 case CMD_FCP_ICMND_CX:
1450 case CMD_FCP_TSEND_CX:
1451 case CMD_FCP_TRSP_CX:
1452 case CMD_FCP_TRECEIVE_CX:
1453 case CMD_FCP_AUTO_TRSP_CX:
1454 case CMD_ADAPTER_MSG:
1455 case CMD_ADAPTER_DUMP:
1456 case CMD_XMIT_SEQUENCE64_CR:
1457 case CMD_XMIT_SEQUENCE64_CX:
1458 case CMD_XMIT_BCAST64_CN:
1459 case CMD_XMIT_BCAST64_CX:
1460 case CMD_ELS_REQUEST64_CR:
1461 case CMD_ELS_REQUEST64_CX:
1462 case CMD_FCP_IWRITE64_CR:
1463 case CMD_FCP_IWRITE64_CX:
1464 case CMD_FCP_IREAD64_CR:
1465 case CMD_FCP_IREAD64_CX:
1466 case CMD_FCP_ICMND64_CR:
1467 case CMD_FCP_ICMND64_CX:
1468 case CMD_FCP_TSEND64_CX:
1469 case CMD_FCP_TRSP64_CX:
1470 case CMD_FCP_TRECEIVE64_CX:
1471 case CMD_GEN_REQUEST64_CR:
1472 case CMD_GEN_REQUEST64_CX:
1473 case CMD_XMIT_ELS_RSP64_CX:
1474 case DSSCMD_IWRITE64_CR:
1475 case DSSCMD_IWRITE64_CX:
1476 case DSSCMD_IREAD64_CR:
1477 case DSSCMD_IREAD64_CX:
1478 type = LPFC_SOL_IOCB;
1479 break;
1480 case CMD_ABORT_XRI_CN:
1481 case CMD_ABORT_XRI_CX:
1482 case CMD_CLOSE_XRI_CN:
1483 case CMD_CLOSE_XRI_CX:
1484 case CMD_XRI_ABORTED_CX:
1485 case CMD_ABORT_MXRI64_CN:
1486 case CMD_XMIT_BLS_RSP64_CX:
1487 type = LPFC_ABORT_IOCB;
1488 break;
1489 case CMD_RCV_SEQUENCE_CX:
1490 case CMD_RCV_ELS_REQ_CX:
1491 case CMD_RCV_SEQUENCE64_CX:
1492 case CMD_RCV_ELS_REQ64_CX:
1493 case CMD_ASYNC_STATUS:
1494 case CMD_IOCB_RCV_SEQ64_CX:
1495 case CMD_IOCB_RCV_ELS64_CX:
1496 case CMD_IOCB_RCV_CONT64_CX:
1497 case CMD_IOCB_RET_XRI64_CX:
1498 type = LPFC_UNSOL_IOCB;
1499 break;
1500 case CMD_IOCB_XMIT_MSEQ64_CR:
1501 case CMD_IOCB_XMIT_MSEQ64_CX:
1502 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1503 case CMD_IOCB_RCV_ELS_LIST64_CX:
1504 case CMD_IOCB_CLOSE_EXTENDED_CN:
1505 case CMD_IOCB_ABORT_EXTENDED_CN:
1506 case CMD_IOCB_RET_HBQE64_CN:
1507 case CMD_IOCB_FCP_IBIDIR64_CR:
1508 case CMD_IOCB_FCP_IBIDIR64_CX:
1509 case CMD_IOCB_FCP_ITASKMGT64_CX:
1510 case CMD_IOCB_LOGENTRY_CN:
1511 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1512 printk("%s - Unhandled SLI-3 Command x%x\n",
1513 __func__, iocb_cmnd);
1514 type = LPFC_UNKNOWN_IOCB;
1515 break;
1516 default:
1517 type = LPFC_UNKNOWN_IOCB;
1518 break;
1519 }
1520
1521 return type;
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 static int
1536 lpfc_sli_ring_map(struct lpfc_hba *phba)
1537 {
1538 struct lpfc_sli *psli = &phba->sli;
1539 LPFC_MBOXQ_t *pmb;
1540 MAILBOX_t *pmbox;
1541 int i, rc, ret = 0;
1542
1543 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1544 if (!pmb)
1545 return -ENOMEM;
1546 pmbox = &pmb->u.mb;
1547 phba->link_state = LPFC_INIT_MBX_CMDS;
1548 for (i = 0; i < psli->num_rings; i++) {
1549 lpfc_config_ring(phba, i, pmb);
1550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1551 if (rc != MBX_SUCCESS) {
1552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1553 "0446 Adapter failed to init (%d), "
1554 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1555 "ring %d\n",
1556 rc, pmbox->mbxCommand,
1557 pmbox->mbxStatus, i);
1558 phba->link_state = LPFC_HBA_ERROR;
1559 ret = -ENXIO;
1560 break;
1561 }
1562 }
1563 mempool_free(pmb, phba->mbox_mem_pool);
1564 return ret;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 static int
1581 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1582 struct lpfc_iocbq *piocb)
1583 {
1584 if (phba->sli_rev == LPFC_SLI_REV4)
1585 lockdep_assert_held(&pring->ring_lock);
1586 else
1587 lockdep_assert_held(&phba->hbalock);
1588
1589 BUG_ON(!piocb);
1590
1591 list_add_tail(&piocb->list, &pring->txcmplq);
1592 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1593 pring->txcmplq_cnt++;
1594
1595 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1596 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1597 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1598 BUG_ON(!piocb->vport);
1599 if (!(piocb->vport->load_flag & FC_UNLOADING))
1600 mod_timer(&piocb->vport->els_tmofunc,
1601 jiffies +
1602 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1603 }
1604
1605 return 0;
1606 }
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 struct lpfc_iocbq *
1619 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1620 {
1621 struct lpfc_iocbq *cmd_iocb;
1622
1623 lockdep_assert_held(&phba->hbalock);
1624
1625 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1626 return cmd_iocb;
1627 }
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 static IOCB_t *
1644 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1645 {
1646 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1647 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1648
1649 lockdep_assert_held(&phba->hbalock);
1650
1651 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1652 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1653 pring->sli.sli3.next_cmdidx = 0;
1654
1655 if (unlikely(pring->sli.sli3.local_getidx ==
1656 pring->sli.sli3.next_cmdidx)) {
1657
1658 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1659
1660 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1661 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1662 "0315 Ring %d issue: portCmdGet %d "
1663 "is bigger than cmd ring %d\n",
1664 pring->ringno,
1665 pring->sli.sli3.local_getidx,
1666 max_cmd_idx);
1667
1668 phba->link_state = LPFC_HBA_ERROR;
1669
1670
1671
1672
1673 phba->work_ha |= HA_ERATT;
1674 phba->work_hs = HS_FFER3;
1675
1676 lpfc_worker_wake_up(phba);
1677
1678 return NULL;
1679 }
1680
1681 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1682 return NULL;
1683 }
1684
1685 return lpfc_cmd_iocb(phba, pring);
1686 }
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 uint16_t
1701 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1702 {
1703 struct lpfc_iocbq **new_arr;
1704 struct lpfc_iocbq **old_arr;
1705 size_t new_len;
1706 struct lpfc_sli *psli = &phba->sli;
1707 uint16_t iotag;
1708
1709 spin_lock_irq(&phba->hbalock);
1710 iotag = psli->last_iotag;
1711 if(++iotag < psli->iocbq_lookup_len) {
1712 psli->last_iotag = iotag;
1713 psli->iocbq_lookup[iotag] = iocbq;
1714 spin_unlock_irq(&phba->hbalock);
1715 iocbq->iotag = iotag;
1716 return iotag;
1717 } else if (psli->iocbq_lookup_len < (0xffff
1718 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1719 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1720 spin_unlock_irq(&phba->hbalock);
1721 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1722 GFP_KERNEL);
1723 if (new_arr) {
1724 spin_lock_irq(&phba->hbalock);
1725 old_arr = psli->iocbq_lookup;
1726 if (new_len <= psli->iocbq_lookup_len) {
1727
1728 kfree(new_arr);
1729 iotag = psli->last_iotag;
1730 if(++iotag < psli->iocbq_lookup_len) {
1731 psli->last_iotag = iotag;
1732 psli->iocbq_lookup[iotag] = iocbq;
1733 spin_unlock_irq(&phba->hbalock);
1734 iocbq->iotag = iotag;
1735 return iotag;
1736 }
1737 spin_unlock_irq(&phba->hbalock);
1738 return 0;
1739 }
1740 if (psli->iocbq_lookup)
1741 memcpy(new_arr, old_arr,
1742 ((psli->last_iotag + 1) *
1743 sizeof (struct lpfc_iocbq *)));
1744 psli->iocbq_lookup = new_arr;
1745 psli->iocbq_lookup_len = new_len;
1746 psli->last_iotag = iotag;
1747 psli->iocbq_lookup[iotag] = iocbq;
1748 spin_unlock_irq(&phba->hbalock);
1749 iocbq->iotag = iotag;
1750 kfree(old_arr);
1751 return iotag;
1752 }
1753 } else
1754 spin_unlock_irq(&phba->hbalock);
1755
1756 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1757 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1758 psli->last_iotag);
1759
1760 return 0;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 static void
1778 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1779 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1780 {
1781 lockdep_assert_held(&phba->hbalock);
1782
1783
1784
1785 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1786
1787
1788 if (pring->ringno == LPFC_ELS_RING) {
1789 lpfc_debugfs_slow_ring_trc(phba,
1790 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1791 *(((uint32_t *) &nextiocb->iocb) + 4),
1792 *(((uint32_t *) &nextiocb->iocb) + 6),
1793 *(((uint32_t *) &nextiocb->iocb) + 7));
1794 }
1795
1796
1797
1798
1799 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1800 wmb();
1801 pring->stats.iocb_cmd++;
1802
1803
1804
1805
1806
1807
1808 if (nextiocb->iocb_cmpl)
1809 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1810 else
1811 __lpfc_sli_release_iocbq(phba, nextiocb);
1812
1813
1814
1815
1816
1817 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1818 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833 static void
1834 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1835 {
1836 int ringno = pring->ringno;
1837
1838 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1839
1840 wmb();
1841
1842
1843
1844
1845
1846 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1847 readl(phba->CAregaddr);
1848
1849 pring->stats.iocb_cmd_full++;
1850 }
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 static void
1862 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1863 {
1864 int ringno = pring->ringno;
1865
1866
1867
1868
1869 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1870 wmb();
1871 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1872 readl(phba->CAregaddr);
1873 }
1874 }
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 static void
1886 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1887 {
1888 IOCB_t *iocb;
1889 struct lpfc_iocbq *nextiocb;
1890
1891 lockdep_assert_held(&phba->hbalock);
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 if (lpfc_is_link_up(phba) &&
1902 (!list_empty(&pring->txq)) &&
1903 (pring->ringno != LPFC_FCP_RING ||
1904 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1905
1906 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1907 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1908 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1909
1910 if (iocb)
1911 lpfc_sli_update_ring(phba, pring);
1912 else
1913 lpfc_sli_update_full_ring(phba, pring);
1914 }
1915
1916 return;
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929 static struct lpfc_hbq_entry *
1930 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1931 {
1932 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1933
1934 lockdep_assert_held(&phba->hbalock);
1935
1936 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1937 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1938 hbqp->next_hbqPutIdx = 0;
1939
1940 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1941 uint32_t raw_index = phba->hbq_get[hbqno];
1942 uint32_t getidx = le32_to_cpu(raw_index);
1943
1944 hbqp->local_hbqGetIdx = getidx;
1945
1946 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1947 lpfc_printf_log(phba, KERN_ERR,
1948 LOG_SLI | LOG_VPORT,
1949 "1802 HBQ %d: local_hbqGetIdx "
1950 "%u is > than hbqp->entry_count %u\n",
1951 hbqno, hbqp->local_hbqGetIdx,
1952 hbqp->entry_count);
1953
1954 phba->link_state = LPFC_HBA_ERROR;
1955 return NULL;
1956 }
1957
1958 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1959 return NULL;
1960 }
1961
1962 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1963 hbqp->hbqPutIdx;
1964 }
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 void
1976 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1977 {
1978 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1979 struct hbq_dmabuf *hbq_buf;
1980 unsigned long flags;
1981 int i, hbq_count;
1982
1983 hbq_count = lpfc_sli_hbq_count();
1984
1985 spin_lock_irqsave(&phba->hbalock, flags);
1986 for (i = 0; i < hbq_count; ++i) {
1987 list_for_each_entry_safe(dmabuf, next_dmabuf,
1988 &phba->hbqs[i].hbq_buffer_list, list) {
1989 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1990 list_del(&hbq_buf->dbuf.list);
1991 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1992 }
1993 phba->hbqs[i].buffer_count = 0;
1994 }
1995
1996
1997 phba->hbq_in_use = 0;
1998 spin_unlock_irqrestore(&phba->hbalock, flags);
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 static int
2014 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2015 struct hbq_dmabuf *hbq_buf)
2016 {
2017 lockdep_assert_held(&phba->hbalock);
2018 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032 static int
2033 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2034 struct hbq_dmabuf *hbq_buf)
2035 {
2036 struct lpfc_hbq_entry *hbqe;
2037 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2038
2039 lockdep_assert_held(&phba->hbalock);
2040
2041 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2042 if (hbqe) {
2043 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2044
2045 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2046 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2047 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2048 hbqe->bde.tus.f.bdeFlags = 0;
2049 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2050 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2051
2052 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2053 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2054
2055 readl(phba->hbq_put + hbqno);
2056 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2057 return 0;
2058 } else
2059 return -ENOMEM;
2060 }
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072 static int
2073 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2074 struct hbq_dmabuf *hbq_buf)
2075 {
2076 int rc;
2077 struct lpfc_rqe hrqe;
2078 struct lpfc_rqe drqe;
2079 struct lpfc_queue *hrq;
2080 struct lpfc_queue *drq;
2081
2082 if (hbqno != LPFC_ELS_HBQ)
2083 return 1;
2084 hrq = phba->sli4_hba.hdr_rq;
2085 drq = phba->sli4_hba.dat_rq;
2086
2087 lockdep_assert_held(&phba->hbalock);
2088 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2089 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2090 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2091 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2092 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2093 if (rc < 0)
2094 return rc;
2095 hbq_buf->tag = (rc | (hbqno << 16));
2096 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2097 return 0;
2098 }
2099
2100
2101 static struct lpfc_hbq_init lpfc_els_hbq = {
2102 .rn = 1,
2103 .entry_count = 256,
2104 .mask_count = 0,
2105 .profile = 0,
2106 .ring_mask = (1 << LPFC_ELS_RING),
2107 .buffer_count = 0,
2108 .init_count = 40,
2109 .add_count = 40,
2110 };
2111
2112
2113 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2114 &lpfc_els_hbq,
2115 };
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127 static int
2128 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2129 {
2130 uint32_t i, posted = 0;
2131 unsigned long flags;
2132 struct hbq_dmabuf *hbq_buffer;
2133 LIST_HEAD(hbq_buf_list);
2134 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2135 return 0;
2136
2137 if ((phba->hbqs[hbqno].buffer_count + count) >
2138 lpfc_hbq_defs[hbqno]->entry_count)
2139 count = lpfc_hbq_defs[hbqno]->entry_count -
2140 phba->hbqs[hbqno].buffer_count;
2141 if (!count)
2142 return 0;
2143
2144 for (i = 0; i < count; i++) {
2145 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2146 if (!hbq_buffer)
2147 break;
2148 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2149 }
2150
2151 spin_lock_irqsave(&phba->hbalock, flags);
2152 if (!phba->hbq_in_use)
2153 goto err;
2154 while (!list_empty(&hbq_buf_list)) {
2155 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2156 dbuf.list);
2157 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2158 (hbqno << 16));
2159 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2160 phba->hbqs[hbqno].buffer_count++;
2161 posted++;
2162 } else
2163 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2164 }
2165 spin_unlock_irqrestore(&phba->hbalock, flags);
2166 return posted;
2167 err:
2168 spin_unlock_irqrestore(&phba->hbalock, flags);
2169 while (!list_empty(&hbq_buf_list)) {
2170 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2171 dbuf.list);
2172 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2173 }
2174 return 0;
2175 }
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186 int
2187 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2188 {
2189 if (phba->sli_rev == LPFC_SLI_REV4)
2190 return 0;
2191 else
2192 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2193 lpfc_hbq_defs[qno]->add_count);
2194 }
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 static int
2206 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2207 {
2208 if (phba->sli_rev == LPFC_SLI_REV4)
2209 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2210 lpfc_hbq_defs[qno]->entry_count);
2211 else
2212 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2213 lpfc_hbq_defs[qno]->init_count);
2214 }
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224 static struct hbq_dmabuf *
2225 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2226 {
2227 struct lpfc_dmabuf *d_buf;
2228
2229 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2230 if (!d_buf)
2231 return NULL;
2232 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2233 }
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 static struct rqb_dmabuf *
2244 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2245 {
2246 struct lpfc_dmabuf *h_buf;
2247 struct lpfc_rqb *rqbp;
2248
2249 rqbp = hrq->rqbp;
2250 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2251 struct lpfc_dmabuf, list);
2252 if (!h_buf)
2253 return NULL;
2254 rqbp->buffer_count--;
2255 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 static struct hbq_dmabuf *
2268 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2269 {
2270 struct lpfc_dmabuf *d_buf;
2271 struct hbq_dmabuf *hbq_buf;
2272 uint32_t hbqno;
2273
2274 hbqno = tag >> 16;
2275 if (hbqno >= LPFC_MAX_HBQS)
2276 return NULL;
2277
2278 spin_lock_irq(&phba->hbalock);
2279 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2280 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2281 if (hbq_buf->tag == tag) {
2282 spin_unlock_irq(&phba->hbalock);
2283 return hbq_buf;
2284 }
2285 }
2286 spin_unlock_irq(&phba->hbalock);
2287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2288 "1803 Bad hbq tag. Data: x%x x%x\n",
2289 tag, phba->hbqs[tag >> 16].buffer_count);
2290 return NULL;
2291 }
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302 void
2303 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2304 {
2305 uint32_t hbqno;
2306
2307 if (hbq_buffer) {
2308 hbqno = hbq_buffer->tag >> 16;
2309 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2310 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2311 }
2312 }
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323 static int
2324 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2325 {
2326 uint8_t ret;
2327
2328 switch (mbxCommand) {
2329 case MBX_LOAD_SM:
2330 case MBX_READ_NV:
2331 case MBX_WRITE_NV:
2332 case MBX_WRITE_VPARMS:
2333 case MBX_RUN_BIU_DIAG:
2334 case MBX_INIT_LINK:
2335 case MBX_DOWN_LINK:
2336 case MBX_CONFIG_LINK:
2337 case MBX_CONFIG_RING:
2338 case MBX_RESET_RING:
2339 case MBX_READ_CONFIG:
2340 case MBX_READ_RCONFIG:
2341 case MBX_READ_SPARM:
2342 case MBX_READ_STATUS:
2343 case MBX_READ_RPI:
2344 case MBX_READ_XRI:
2345 case MBX_READ_REV:
2346 case MBX_READ_LNK_STAT:
2347 case MBX_REG_LOGIN:
2348 case MBX_UNREG_LOGIN:
2349 case MBX_CLEAR_LA:
2350 case MBX_DUMP_MEMORY:
2351 case MBX_DUMP_CONTEXT:
2352 case MBX_RUN_DIAGS:
2353 case MBX_RESTART:
2354 case MBX_UPDATE_CFG:
2355 case MBX_DOWN_LOAD:
2356 case MBX_DEL_LD_ENTRY:
2357 case MBX_RUN_PROGRAM:
2358 case MBX_SET_MASK:
2359 case MBX_SET_VARIABLE:
2360 case MBX_UNREG_D_ID:
2361 case MBX_KILL_BOARD:
2362 case MBX_CONFIG_FARP:
2363 case MBX_BEACON:
2364 case MBX_LOAD_AREA:
2365 case MBX_RUN_BIU_DIAG64:
2366 case MBX_CONFIG_PORT:
2367 case MBX_READ_SPARM64:
2368 case MBX_READ_RPI64:
2369 case MBX_REG_LOGIN64:
2370 case MBX_READ_TOPOLOGY:
2371 case MBX_WRITE_WWN:
2372 case MBX_SET_DEBUG:
2373 case MBX_LOAD_EXP_ROM:
2374 case MBX_ASYNCEVT_ENABLE:
2375 case MBX_REG_VPI:
2376 case MBX_UNREG_VPI:
2377 case MBX_HEARTBEAT:
2378 case MBX_PORT_CAPABILITIES:
2379 case MBX_PORT_IOV_CONTROL:
2380 case MBX_SLI4_CONFIG:
2381 case MBX_SLI4_REQ_FTRS:
2382 case MBX_REG_FCFI:
2383 case MBX_UNREG_FCFI:
2384 case MBX_REG_VFI:
2385 case MBX_UNREG_VFI:
2386 case MBX_INIT_VPI:
2387 case MBX_INIT_VFI:
2388 case MBX_RESUME_RPI:
2389 case MBX_READ_EVENT_LOG_STATUS:
2390 case MBX_READ_EVENT_LOG:
2391 case MBX_SECURITY_MGMT:
2392 case MBX_AUTH_PORT:
2393 case MBX_ACCESS_VDATA:
2394 ret = mbxCommand;
2395 break;
2396 default:
2397 ret = MBX_SHUTDOWN;
2398 break;
2399 }
2400 return ret;
2401 }
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414 void
2415 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2416 {
2417 unsigned long drvr_flag;
2418 struct completion *pmbox_done;
2419
2420
2421
2422
2423
2424 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2425 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2426 pmbox_done = (struct completion *)pmboxq->context3;
2427 if (pmbox_done)
2428 complete(pmbox_done);
2429 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2430 return;
2431 }
2432
2433 static void
2434 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2435 {
2436 unsigned long iflags;
2437
2438 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2439 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2440 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2441 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2442 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2443 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2444 }
2445 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2446 }
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458 void
2459 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2460 {
2461 struct lpfc_vport *vport = pmb->vport;
2462 struct lpfc_dmabuf *mp;
2463 struct lpfc_nodelist *ndlp;
2464 struct Scsi_Host *shost;
2465 uint16_t rpi, vpi;
2466 int rc;
2467
2468 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2469
2470 if (mp) {
2471 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2472 kfree(mp);
2473 }
2474
2475
2476
2477
2478
2479 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2480 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2481 !pmb->u.mb.mbxStatus) {
2482 rpi = pmb->u.mb.un.varWords[0];
2483 vpi = pmb->u.mb.un.varRegLogin.vpi;
2484 if (phba->sli_rev == LPFC_SLI_REV4)
2485 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2486 lpfc_unreg_login(phba, vpi, rpi, pmb);
2487 pmb->vport = vport;
2488 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2489 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2490 if (rc != MBX_NOT_FINISHED)
2491 return;
2492 }
2493
2494 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2495 !(phba->pport->load_flag & FC_UNLOADING) &&
2496 !pmb->u.mb.mbxStatus) {
2497 shost = lpfc_shost_from_vport(vport);
2498 spin_lock_irq(shost->host_lock);
2499 vport->vpi_state |= LPFC_VPI_REGISTERED;
2500 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2501 spin_unlock_irq(shost->host_lock);
2502 }
2503
2504 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2505 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2506 lpfc_nlp_put(ndlp);
2507 pmb->ctx_buf = NULL;
2508 pmb->ctx_ndlp = NULL;
2509 }
2510
2511 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2512 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2513
2514
2515 if (ndlp) {
2516 lpfc_printf_vlog(
2517 vport,
2518 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2519 "1438 UNREG cmpl deferred mbox x%x "
2520 "on NPort x%x Data: x%x x%x %px\n",
2521 ndlp->nlp_rpi, ndlp->nlp_DID,
2522 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2523
2524 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2525 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2526 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2527 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2528 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2529 } else {
2530 __lpfc_sli_rpi_release(vport, ndlp);
2531 }
2532 if (vport->load_flag & FC_UNLOADING)
2533 lpfc_nlp_put(ndlp);
2534 pmb->ctx_ndlp = NULL;
2535 }
2536 }
2537
2538
2539 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2540 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2541 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2542 "2860 SLI authentication is required "
2543 "for INIT_LINK but has not done yet\n");
2544
2545 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2546 lpfc_sli4_mbox_cmd_free(phba, pmb);
2547 else
2548 mempool_free(pmb, phba->mbox_mem_pool);
2549 }
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 void
2564 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2565 {
2566 struct lpfc_vport *vport = pmb->vport;
2567 struct lpfc_nodelist *ndlp;
2568
2569 ndlp = pmb->ctx_ndlp;
2570 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2571 if (phba->sli_rev == LPFC_SLI_REV4 &&
2572 (bf_get(lpfc_sli_intf_if_type,
2573 &phba->sli4_hba.sli_intf) >=
2574 LPFC_SLI_INTF_IF_TYPE_2)) {
2575 if (ndlp) {
2576 lpfc_printf_vlog(
2577 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2578 "0010 UNREG_LOGIN vpi:%x "
2579 "rpi:%x DID:%x defer x%x flg x%x "
2580 "map:%x %px\n",
2581 vport->vpi, ndlp->nlp_rpi,
2582 ndlp->nlp_DID, ndlp->nlp_defer_did,
2583 ndlp->nlp_flag,
2584 ndlp->nlp_usg_map, ndlp);
2585 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2586 lpfc_nlp_put(ndlp);
2587
2588
2589
2590
2591 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2592 (ndlp->nlp_defer_did !=
2593 NLP_EVT_NOTHING_PENDING)) {
2594 lpfc_printf_vlog(
2595 vport, KERN_INFO, LOG_DISCOVERY,
2596 "4111 UNREG cmpl deferred "
2597 "clr x%x on "
2598 "NPort x%x Data: x%x x%px\n",
2599 ndlp->nlp_rpi, ndlp->nlp_DID,
2600 ndlp->nlp_defer_did, ndlp);
2601 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2602 ndlp->nlp_defer_did =
2603 NLP_EVT_NOTHING_PENDING;
2604 lpfc_issue_els_plogi(
2605 vport, ndlp->nlp_DID, 0);
2606 } else {
2607 __lpfc_sli_rpi_release(vport, ndlp);
2608 }
2609 }
2610 }
2611 }
2612
2613 mempool_free(pmb, phba->mbox_mem_pool);
2614 }
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629 int
2630 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2631 {
2632 MAILBOX_t *pmbox;
2633 LPFC_MBOXQ_t *pmb;
2634 int rc;
2635 LIST_HEAD(cmplq);
2636
2637 phba->sli.slistat.mbox_event++;
2638
2639
2640 spin_lock_irq(&phba->hbalock);
2641 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2642 spin_unlock_irq(&phba->hbalock);
2643
2644
2645 do {
2646 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2647 if (pmb == NULL)
2648 break;
2649
2650 pmbox = &pmb->u.mb;
2651
2652 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2653 if (pmb->vport) {
2654 lpfc_debugfs_disc_trc(pmb->vport,
2655 LPFC_DISC_TRC_MBOX_VPORT,
2656 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2657 (uint32_t)pmbox->mbxCommand,
2658 pmbox->un.varWords[0],
2659 pmbox->un.varWords[1]);
2660 }
2661 else {
2662 lpfc_debugfs_disc_trc(phba->pport,
2663 LPFC_DISC_TRC_MBOX,
2664 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2665 (uint32_t)pmbox->mbxCommand,
2666 pmbox->un.varWords[0],
2667 pmbox->un.varWords[1]);
2668 }
2669 }
2670
2671
2672
2673
2674 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2675 MBX_SHUTDOWN) {
2676
2677 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2678 "(%d):0323 Unknown Mailbox command "
2679 "x%x (x%x/x%x) Cmpl\n",
2680 pmb->vport ? pmb->vport->vpi : 0,
2681 pmbox->mbxCommand,
2682 lpfc_sli_config_mbox_subsys_get(phba,
2683 pmb),
2684 lpfc_sli_config_mbox_opcode_get(phba,
2685 pmb));
2686 phba->link_state = LPFC_HBA_ERROR;
2687 phba->work_hs = HS_FFER3;
2688 lpfc_handle_eratt(phba);
2689 continue;
2690 }
2691
2692 if (pmbox->mbxStatus) {
2693 phba->sli.slistat.mbox_stat_err++;
2694 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2695
2696 lpfc_printf_log(phba, KERN_INFO,
2697 LOG_MBOX | LOG_SLI,
2698 "(%d):0305 Mbox cmd cmpl "
2699 "error - RETRYing Data: x%x "
2700 "(x%x/x%x) x%x x%x x%x\n",
2701 pmb->vport ? pmb->vport->vpi : 0,
2702 pmbox->mbxCommand,
2703 lpfc_sli_config_mbox_subsys_get(phba,
2704 pmb),
2705 lpfc_sli_config_mbox_opcode_get(phba,
2706 pmb),
2707 pmbox->mbxStatus,
2708 pmbox->un.varWords[0],
2709 pmb->vport->port_state);
2710 pmbox->mbxStatus = 0;
2711 pmbox->mbxOwner = OWN_HOST;
2712 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2713 if (rc != MBX_NOT_FINISHED)
2714 continue;
2715 }
2716 }
2717
2718
2719 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2720 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2721 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2722 "x%x x%x x%x\n",
2723 pmb->vport ? pmb->vport->vpi : 0,
2724 pmbox->mbxCommand,
2725 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2726 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2727 pmb->mbox_cmpl,
2728 *((uint32_t *) pmbox),
2729 pmbox->un.varWords[0],
2730 pmbox->un.varWords[1],
2731 pmbox->un.varWords[2],
2732 pmbox->un.varWords[3],
2733 pmbox->un.varWords[4],
2734 pmbox->un.varWords[5],
2735 pmbox->un.varWords[6],
2736 pmbox->un.varWords[7],
2737 pmbox->un.varWords[8],
2738 pmbox->un.varWords[9],
2739 pmbox->un.varWords[10]);
2740
2741 if (pmb->mbox_cmpl)
2742 pmb->mbox_cmpl(phba,pmb);
2743 } while (1);
2744 return 0;
2745 }
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759 static struct lpfc_dmabuf *
2760 lpfc_sli_get_buff(struct lpfc_hba *phba,
2761 struct lpfc_sli_ring *pring,
2762 uint32_t tag)
2763 {
2764 struct hbq_dmabuf *hbq_entry;
2765
2766 if (tag & QUE_BUFTAG_BIT)
2767 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2768 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2769 if (!hbq_entry)
2770 return NULL;
2771 return &hbq_entry->dbuf;
2772 }
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786 static int
2787 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2788 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2789 uint32_t fch_type)
2790 {
2791 int i;
2792
2793 switch (fch_type) {
2794 case FC_TYPE_NVME:
2795 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2796 return 1;
2797 default:
2798 break;
2799 }
2800
2801
2802 if (pring->prt[0].profile) {
2803 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2804 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2805 saveq);
2806 return 1;
2807 }
2808
2809
2810 for (i = 0; i < pring->num_mask; i++) {
2811 if ((pring->prt[i].rctl == fch_r_ctl) &&
2812 (pring->prt[i].type == fch_type)) {
2813 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2814 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2815 (phba, pring, saveq);
2816 return 1;
2817 }
2818 }
2819 return 0;
2820 }
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836 static int
2837 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2838 struct lpfc_iocbq *saveq)
2839 {
2840 IOCB_t * irsp;
2841 WORD5 * w5p;
2842 uint32_t Rctl, Type;
2843 struct lpfc_iocbq *iocbq;
2844 struct lpfc_dmabuf *dmzbuf;
2845
2846 irsp = &(saveq->iocb);
2847
2848 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2849 if (pring->lpfc_sli_rcv_async_status)
2850 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2851 else
2852 lpfc_printf_log(phba,
2853 KERN_WARNING,
2854 LOG_SLI,
2855 "0316 Ring %d handler: unexpected "
2856 "ASYNC_STATUS iocb received evt_code "
2857 "0x%x\n",
2858 pring->ringno,
2859 irsp->un.asyncstat.evt_code);
2860 return 1;
2861 }
2862
2863 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2864 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2865 if (irsp->ulpBdeCount > 0) {
2866 dmzbuf = lpfc_sli_get_buff(phba, pring,
2867 irsp->un.ulpWord[3]);
2868 lpfc_in_buf_free(phba, dmzbuf);
2869 }
2870
2871 if (irsp->ulpBdeCount > 1) {
2872 dmzbuf = lpfc_sli_get_buff(phba, pring,
2873 irsp->unsli3.sli3Words[3]);
2874 lpfc_in_buf_free(phba, dmzbuf);
2875 }
2876
2877 if (irsp->ulpBdeCount > 2) {
2878 dmzbuf = lpfc_sli_get_buff(phba, pring,
2879 irsp->unsli3.sli3Words[7]);
2880 lpfc_in_buf_free(phba, dmzbuf);
2881 }
2882
2883 return 1;
2884 }
2885
2886 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2887 if (irsp->ulpBdeCount != 0) {
2888 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2889 irsp->un.ulpWord[3]);
2890 if (!saveq->context2)
2891 lpfc_printf_log(phba,
2892 KERN_ERR,
2893 LOG_SLI,
2894 "0341 Ring %d Cannot find buffer for "
2895 "an unsolicited iocb. tag 0x%x\n",
2896 pring->ringno,
2897 irsp->un.ulpWord[3]);
2898 }
2899 if (irsp->ulpBdeCount == 2) {
2900 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2901 irsp->unsli3.sli3Words[7]);
2902 if (!saveq->context3)
2903 lpfc_printf_log(phba,
2904 KERN_ERR,
2905 LOG_SLI,
2906 "0342 Ring %d Cannot find buffer for an"
2907 " unsolicited iocb. tag 0x%x\n",
2908 pring->ringno,
2909 irsp->unsli3.sli3Words[7]);
2910 }
2911 list_for_each_entry(iocbq, &saveq->list, list) {
2912 irsp = &(iocbq->iocb);
2913 if (irsp->ulpBdeCount != 0) {
2914 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2915 irsp->un.ulpWord[3]);
2916 if (!iocbq->context2)
2917 lpfc_printf_log(phba,
2918 KERN_ERR,
2919 LOG_SLI,
2920 "0343 Ring %d Cannot find "
2921 "buffer for an unsolicited iocb"
2922 ". tag 0x%x\n", pring->ringno,
2923 irsp->un.ulpWord[3]);
2924 }
2925 if (irsp->ulpBdeCount == 2) {
2926 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2927 irsp->unsli3.sli3Words[7]);
2928 if (!iocbq->context3)
2929 lpfc_printf_log(phba,
2930 KERN_ERR,
2931 LOG_SLI,
2932 "0344 Ring %d Cannot find "
2933 "buffer for an unsolicited "
2934 "iocb. tag 0x%x\n",
2935 pring->ringno,
2936 irsp->unsli3.sli3Words[7]);
2937 }
2938 }
2939 }
2940 if (irsp->ulpBdeCount != 0 &&
2941 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2942 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2943 int found = 0;
2944
2945
2946 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2947 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2948 saveq->iocb.unsli3.rcvsli3.ox_id) {
2949 list_add_tail(&saveq->list, &iocbq->list);
2950 found = 1;
2951 break;
2952 }
2953 }
2954 if (!found)
2955 list_add_tail(&saveq->clist,
2956 &pring->iocb_continue_saveq);
2957 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2958 list_del_init(&iocbq->clist);
2959 saveq = iocbq;
2960 irsp = &(saveq->iocb);
2961 } else
2962 return 0;
2963 }
2964 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2965 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2966 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2967 Rctl = FC_RCTL_ELS_REQ;
2968 Type = FC_TYPE_ELS;
2969 } else {
2970 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2971 Rctl = w5p->hcsw.Rctl;
2972 Type = w5p->hcsw.Type;
2973
2974
2975 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2976 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2977 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2978 Rctl = FC_RCTL_ELS_REQ;
2979 Type = FC_TYPE_ELS;
2980 w5p->hcsw.Rctl = Rctl;
2981 w5p->hcsw.Type = Type;
2982 }
2983 }
2984
2985 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2986 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2987 "0313 Ring %d handler: unexpected Rctl x%x "
2988 "Type x%x received\n",
2989 pring->ringno, Rctl, Type);
2990
2991 return 1;
2992 }
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007 static struct lpfc_iocbq *
3008 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3009 struct lpfc_sli_ring *pring,
3010 struct lpfc_iocbq *prspiocb)
3011 {
3012 struct lpfc_iocbq *cmd_iocb = NULL;
3013 uint16_t iotag;
3014 spinlock_t *temp_lock = NULL;
3015 unsigned long iflag = 0;
3016
3017 if (phba->sli_rev == LPFC_SLI_REV4)
3018 temp_lock = &pring->ring_lock;
3019 else
3020 temp_lock = &phba->hbalock;
3021
3022 spin_lock_irqsave(temp_lock, iflag);
3023 iotag = prspiocb->iocb.ulpIoTag;
3024
3025 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3026 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3027 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3028
3029 list_del_init(&cmd_iocb->list);
3030 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3031 pring->txcmplq_cnt--;
3032 spin_unlock_irqrestore(temp_lock, iflag);
3033 return cmd_iocb;
3034 }
3035 }
3036
3037 spin_unlock_irqrestore(temp_lock, iflag);
3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3039 "0317 iotag x%x is out of "
3040 "range: max iotag x%x wd0 x%x\n",
3041 iotag, phba->sli.last_iotag,
3042 *(((uint32_t *) &prspiocb->iocb) + 7));
3043 return NULL;
3044 }
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058 static struct lpfc_iocbq *
3059 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3060 struct lpfc_sli_ring *pring, uint16_t iotag)
3061 {
3062 struct lpfc_iocbq *cmd_iocb = NULL;
3063 spinlock_t *temp_lock = NULL;
3064 unsigned long iflag = 0;
3065
3066 if (phba->sli_rev == LPFC_SLI_REV4)
3067 temp_lock = &pring->ring_lock;
3068 else
3069 temp_lock = &phba->hbalock;
3070
3071 spin_lock_irqsave(temp_lock, iflag);
3072 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3073 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3074 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3075
3076 list_del_init(&cmd_iocb->list);
3077 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3078 pring->txcmplq_cnt--;
3079 spin_unlock_irqrestore(temp_lock, iflag);
3080 return cmd_iocb;
3081 }
3082 }
3083
3084 spin_unlock_irqrestore(temp_lock, iflag);
3085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3086 "0372 iotag x%x lookup error: max iotag (x%x) "
3087 "iocb_flag x%x\n",
3088 iotag, phba->sli.last_iotag,
3089 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3090 return NULL;
3091 }
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 static int
3111 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3112 struct lpfc_iocbq *saveq)
3113 {
3114 struct lpfc_iocbq *cmdiocbp;
3115 int rc = 1;
3116 unsigned long iflag;
3117
3118 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3119 if (cmdiocbp) {
3120 if (cmdiocbp->iocb_cmpl) {
3121
3122
3123
3124
3125 if (saveq->iocb.ulpStatus &&
3126 (pring->ringno == LPFC_ELS_RING) &&
3127 (cmdiocbp->iocb.ulpCommand ==
3128 CMD_ELS_REQUEST64_CR))
3129 lpfc_send_els_failure_event(phba,
3130 cmdiocbp, saveq);
3131
3132
3133
3134
3135
3136 if (pring->ringno == LPFC_ELS_RING) {
3137 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3138 (cmdiocbp->iocb_flag &
3139 LPFC_DRIVER_ABORTED)) {
3140 spin_lock_irqsave(&phba->hbalock,
3141 iflag);
3142 cmdiocbp->iocb_flag &=
3143 ~LPFC_DRIVER_ABORTED;
3144 spin_unlock_irqrestore(&phba->hbalock,
3145 iflag);
3146 saveq->iocb.ulpStatus =
3147 IOSTAT_LOCAL_REJECT;
3148 saveq->iocb.un.ulpWord[4] =
3149 IOERR_SLI_ABORTED;
3150
3151
3152
3153
3154
3155 spin_lock_irqsave(&phba->hbalock,
3156 iflag);
3157 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3158 spin_unlock_irqrestore(&phba->hbalock,
3159 iflag);
3160 }
3161 if (phba->sli_rev == LPFC_SLI_REV4) {
3162 if (saveq->iocb_flag &
3163 LPFC_EXCHANGE_BUSY) {
3164
3165
3166
3167
3168
3169
3170 spin_lock_irqsave(
3171 &phba->hbalock, iflag);
3172 cmdiocbp->iocb_flag |=
3173 LPFC_EXCHANGE_BUSY;
3174 spin_unlock_irqrestore(
3175 &phba->hbalock, iflag);
3176 }
3177 if (cmdiocbp->iocb_flag &
3178 LPFC_DRIVER_ABORTED) {
3179
3180
3181
3182
3183
3184 spin_lock_irqsave(
3185 &phba->hbalock, iflag);
3186 cmdiocbp->iocb_flag &=
3187 ~LPFC_DRIVER_ABORTED;
3188 spin_unlock_irqrestore(
3189 &phba->hbalock, iflag);
3190 cmdiocbp->iocb.ulpStatus =
3191 IOSTAT_LOCAL_REJECT;
3192 cmdiocbp->iocb.un.ulpWord[4] =
3193 IOERR_ABORT_REQUESTED;
3194
3195
3196
3197
3198
3199
3200 saveq->iocb.ulpStatus =
3201 IOSTAT_LOCAL_REJECT;
3202 saveq->iocb.un.ulpWord[4] =
3203 IOERR_SLI_ABORTED;
3204 spin_lock_irqsave(
3205 &phba->hbalock, iflag);
3206 saveq->iocb_flag |=
3207 LPFC_DELAY_MEM_FREE;
3208 spin_unlock_irqrestore(
3209 &phba->hbalock, iflag);
3210 }
3211 }
3212 }
3213 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3214 } else
3215 lpfc_sli_release_iocbq(phba, cmdiocbp);
3216 } else {
3217
3218
3219
3220
3221
3222 if (pring->ringno != LPFC_ELS_RING) {
3223
3224
3225
3226
3227 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3228 "0322 Ring %d handler: "
3229 "unexpected completion IoTag x%x "
3230 "Data: x%x x%x x%x x%x\n",
3231 pring->ringno,
3232 saveq->iocb.ulpIoTag,
3233 saveq->iocb.ulpStatus,
3234 saveq->iocb.un.ulpWord[4],
3235 saveq->iocb.ulpCommand,
3236 saveq->iocb.ulpContext);
3237 }
3238 }
3239
3240 return rc;
3241 }
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253 static void
3254 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3255 {
3256 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3257
3258
3259
3260
3261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3262 "0312 Ring %d handler: portRspPut %d "
3263 "is bigger than rsp ring %d\n",
3264 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3265 pring->sli.sli3.numRiocb);
3266
3267 phba->link_state = LPFC_HBA_ERROR;
3268
3269
3270
3271
3272
3273 phba->work_ha |= HA_ERATT;
3274 phba->work_hs = HS_FFER3;
3275
3276 lpfc_worker_wake_up(phba);
3277
3278 return;
3279 }
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291 void lpfc_poll_eratt(struct timer_list *t)
3292 {
3293 struct lpfc_hba *phba;
3294 uint32_t eratt = 0;
3295 uint64_t sli_intr, cnt;
3296
3297 phba = from_timer(phba, t, eratt_poll);
3298
3299
3300 sli_intr = phba->sli.slistat.sli_intr;
3301
3302 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3303 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3304 sli_intr);
3305 else
3306 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3307
3308
3309 do_div(cnt, phba->eratt_poll_interval);
3310 phba->sli.slistat.sli_ips = cnt;
3311
3312 phba->sli.slistat.sli_prev_intr = sli_intr;
3313
3314
3315 eratt = lpfc_sli_check_eratt(phba);
3316
3317 if (eratt)
3318
3319 lpfc_worker_wake_up(phba);
3320 else
3321
3322 mod_timer(&phba->eratt_poll,
3323 jiffies +
3324 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3325 return;
3326 }
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346 int
3347 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3348 struct lpfc_sli_ring *pring, uint32_t mask)
3349 {
3350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3351 IOCB_t *irsp = NULL;
3352 IOCB_t *entry = NULL;
3353 struct lpfc_iocbq *cmdiocbq = NULL;
3354 struct lpfc_iocbq rspiocbq;
3355 uint32_t status;
3356 uint32_t portRspPut, portRspMax;
3357 int rc = 1;
3358 lpfc_iocb_type type;
3359 unsigned long iflag;
3360 uint32_t rsp_cmpl = 0;
3361
3362 spin_lock_irqsave(&phba->hbalock, iflag);
3363 pring->stats.iocb_event++;
3364
3365
3366
3367
3368
3369 portRspMax = pring->sli.sli3.numRiocb;
3370 portRspPut = le32_to_cpu(pgp->rspPutInx);
3371 if (unlikely(portRspPut >= portRspMax)) {
3372 lpfc_sli_rsp_pointers_error(phba, pring);
3373 spin_unlock_irqrestore(&phba->hbalock, iflag);
3374 return 1;
3375 }
3376 if (phba->fcp_ring_in_use) {
3377 spin_unlock_irqrestore(&phba->hbalock, iflag);
3378 return 1;
3379 } else
3380 phba->fcp_ring_in_use = 1;
3381
3382 rmb();
3383 while (pring->sli.sli3.rspidx != portRspPut) {
3384
3385
3386
3387
3388
3389 entry = lpfc_resp_iocb(phba, pring);
3390 phba->last_completion_time = jiffies;
3391
3392 if (++pring->sli.sli3.rspidx >= portRspMax)
3393 pring->sli.sli3.rspidx = 0;
3394
3395 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3396 (uint32_t *) &rspiocbq.iocb,
3397 phba->iocb_rsp_size);
3398 INIT_LIST_HEAD(&(rspiocbq.list));
3399 irsp = &rspiocbq.iocb;
3400
3401 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3402 pring->stats.iocb_rsp++;
3403 rsp_cmpl++;
3404
3405 if (unlikely(irsp->ulpStatus)) {
3406
3407
3408
3409
3410 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3411 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3412 IOERR_NO_RESOURCES)) {
3413 spin_unlock_irqrestore(&phba->hbalock, iflag);
3414 phba->lpfc_rampdown_queue_depth(phba);
3415 spin_lock_irqsave(&phba->hbalock, iflag);
3416 }
3417
3418
3419 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3420 "0336 Rsp Ring %d error: IOCB Data: "
3421 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3422 pring->ringno,
3423 irsp->un.ulpWord[0],
3424 irsp->un.ulpWord[1],
3425 irsp->un.ulpWord[2],
3426 irsp->un.ulpWord[3],
3427 irsp->un.ulpWord[4],
3428 irsp->un.ulpWord[5],
3429 *(uint32_t *)&irsp->un1,
3430 *((uint32_t *)&irsp->un1 + 1));
3431 }
3432
3433 switch (type) {
3434 case LPFC_ABORT_IOCB:
3435 case LPFC_SOL_IOCB:
3436
3437
3438
3439
3440 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3441 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3442 "0333 IOCB cmd 0x%x"
3443 " processed. Skipping"
3444 " completion\n",
3445 irsp->ulpCommand);
3446 break;
3447 }
3448
3449 spin_unlock_irqrestore(&phba->hbalock, iflag);
3450 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3451 &rspiocbq);
3452 spin_lock_irqsave(&phba->hbalock, iflag);
3453 if (unlikely(!cmdiocbq))
3454 break;
3455 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3456 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3457 if (cmdiocbq->iocb_cmpl) {
3458 spin_unlock_irqrestore(&phba->hbalock, iflag);
3459 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3460 &rspiocbq);
3461 spin_lock_irqsave(&phba->hbalock, iflag);
3462 }
3463 break;
3464 case LPFC_UNSOL_IOCB:
3465 spin_unlock_irqrestore(&phba->hbalock, iflag);
3466 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3467 spin_lock_irqsave(&phba->hbalock, iflag);
3468 break;
3469 default:
3470 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3471 char adaptermsg[LPFC_MAX_ADPTMSG];
3472 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3473 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3474 MAX_MSG_DATA);
3475 dev_warn(&((phba->pcidev)->dev),
3476 "lpfc%d: %s\n",
3477 phba->brd_no, adaptermsg);
3478 } else {
3479
3480 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3481 "0334 Unknown IOCB command "
3482 "Data: x%x, x%x x%x x%x x%x\n",
3483 type, irsp->ulpCommand,
3484 irsp->ulpStatus,
3485 irsp->ulpIoTag,
3486 irsp->ulpContext);
3487 }
3488 break;
3489 }
3490
3491
3492
3493
3494
3495
3496
3497 writel(pring->sli.sli3.rspidx,
3498 &phba->host_gp[pring->ringno].rspGetInx);
3499
3500 if (pring->sli.sli3.rspidx == portRspPut)
3501 portRspPut = le32_to_cpu(pgp->rspPutInx);
3502 }
3503
3504 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3505 pring->stats.iocb_rsp_full++;
3506 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3507 writel(status, phba->CAregaddr);
3508 readl(phba->CAregaddr);
3509 }
3510 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3511 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3512 pring->stats.iocb_cmd_empty++;
3513
3514
3515 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3516 lpfc_sli_resume_iocb(phba, pring);
3517
3518 if ((pring->lpfc_sli_cmd_available))
3519 (pring->lpfc_sli_cmd_available) (phba, pring);
3520
3521 }
3522
3523 phba->fcp_ring_in_use = 0;
3524 spin_unlock_irqrestore(&phba->hbalock, iflag);
3525 return rc;
3526 }
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546 static struct lpfc_iocbq *
3547 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3548 struct lpfc_iocbq *rspiocbp)
3549 {
3550 struct lpfc_iocbq *saveq;
3551 struct lpfc_iocbq *cmdiocbp;
3552 struct lpfc_iocbq *next_iocb;
3553 IOCB_t *irsp = NULL;
3554 uint32_t free_saveq;
3555 uint8_t iocb_cmd_type;
3556 lpfc_iocb_type type;
3557 unsigned long iflag;
3558 int rc;
3559
3560 spin_lock_irqsave(&phba->hbalock, iflag);
3561
3562 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3563 pring->iocb_continueq_cnt++;
3564
3565
3566 irsp = &rspiocbp->iocb;
3567 if (irsp->ulpLe) {
3568
3569
3570
3571
3572 free_saveq = 1;
3573 saveq = list_get_first(&pring->iocb_continueq,
3574 struct lpfc_iocbq, list);
3575 irsp = &(saveq->iocb);
3576 list_del_init(&pring->iocb_continueq);
3577 pring->iocb_continueq_cnt = 0;
3578
3579 pring->stats.iocb_rsp++;
3580
3581
3582
3583
3584
3585 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3586 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3587 IOERR_NO_RESOURCES)) {
3588 spin_unlock_irqrestore(&phba->hbalock, iflag);
3589 phba->lpfc_rampdown_queue_depth(phba);
3590 spin_lock_irqsave(&phba->hbalock, iflag);
3591 }
3592
3593 if (irsp->ulpStatus) {
3594
3595 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3596 "0328 Rsp Ring %d error: "
3597 "IOCB Data: "
3598 "x%x x%x x%x x%x "
3599 "x%x x%x x%x x%x "
3600 "x%x x%x x%x x%x "
3601 "x%x x%x x%x x%x\n",
3602 pring->ringno,
3603 irsp->un.ulpWord[0],
3604 irsp->un.ulpWord[1],
3605 irsp->un.ulpWord[2],
3606 irsp->un.ulpWord[3],
3607 irsp->un.ulpWord[4],
3608 irsp->un.ulpWord[5],
3609 *(((uint32_t *) irsp) + 6),
3610 *(((uint32_t *) irsp) + 7),
3611 *(((uint32_t *) irsp) + 8),
3612 *(((uint32_t *) irsp) + 9),
3613 *(((uint32_t *) irsp) + 10),
3614 *(((uint32_t *) irsp) + 11),
3615 *(((uint32_t *) irsp) + 12),
3616 *(((uint32_t *) irsp) + 13),
3617 *(((uint32_t *) irsp) + 14),
3618 *(((uint32_t *) irsp) + 15));
3619 }
3620
3621
3622
3623
3624
3625
3626
3627 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3628 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3629 switch (type) {
3630 case LPFC_SOL_IOCB:
3631 spin_unlock_irqrestore(&phba->hbalock, iflag);
3632 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3633 spin_lock_irqsave(&phba->hbalock, iflag);
3634 break;
3635
3636 case LPFC_UNSOL_IOCB:
3637 spin_unlock_irqrestore(&phba->hbalock, iflag);
3638 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3639 spin_lock_irqsave(&phba->hbalock, iflag);
3640 if (!rc)
3641 free_saveq = 0;
3642 break;
3643
3644 case LPFC_ABORT_IOCB:
3645 cmdiocbp = NULL;
3646 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3647 spin_unlock_irqrestore(&phba->hbalock, iflag);
3648 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3649 saveq);
3650 spin_lock_irqsave(&phba->hbalock, iflag);
3651 }
3652 if (cmdiocbp) {
3653
3654 if (cmdiocbp->iocb_cmpl) {
3655 spin_unlock_irqrestore(&phba->hbalock,
3656 iflag);
3657 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3658 saveq);
3659 spin_lock_irqsave(&phba->hbalock,
3660 iflag);
3661 } else
3662 __lpfc_sli_release_iocbq(phba,
3663 cmdiocbp);
3664 }
3665 break;
3666
3667 case LPFC_UNKNOWN_IOCB:
3668 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3669 char adaptermsg[LPFC_MAX_ADPTMSG];
3670 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3671 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3672 MAX_MSG_DATA);
3673 dev_warn(&((phba->pcidev)->dev),
3674 "lpfc%d: %s\n",
3675 phba->brd_no, adaptermsg);
3676 } else {
3677
3678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3679 "0335 Unknown IOCB "
3680 "command Data: x%x "
3681 "x%x x%x x%x\n",
3682 irsp->ulpCommand,
3683 irsp->ulpStatus,
3684 irsp->ulpIoTag,
3685 irsp->ulpContext);
3686 }
3687 break;
3688 }
3689
3690 if (free_saveq) {
3691 list_for_each_entry_safe(rspiocbp, next_iocb,
3692 &saveq->list, list) {
3693 list_del_init(&rspiocbp->list);
3694 __lpfc_sli_release_iocbq(phba, rspiocbp);
3695 }
3696 __lpfc_sli_release_iocbq(phba, saveq);
3697 }
3698 rspiocbp = NULL;
3699 }
3700 spin_unlock_irqrestore(&phba->hbalock, iflag);
3701 return rspiocbp;
3702 }
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713 void
3714 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3715 struct lpfc_sli_ring *pring, uint32_t mask)
3716 {
3717 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3718 }
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731 static void
3732 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3733 struct lpfc_sli_ring *pring, uint32_t mask)
3734 {
3735 struct lpfc_pgp *pgp;
3736 IOCB_t *entry;
3737 IOCB_t *irsp = NULL;
3738 struct lpfc_iocbq *rspiocbp = NULL;
3739 uint32_t portRspPut, portRspMax;
3740 unsigned long iflag;
3741 uint32_t status;
3742
3743 pgp = &phba->port_gp[pring->ringno];
3744 spin_lock_irqsave(&phba->hbalock, iflag);
3745 pring->stats.iocb_event++;
3746
3747
3748
3749
3750
3751 portRspMax = pring->sli.sli3.numRiocb;
3752 portRspPut = le32_to_cpu(pgp->rspPutInx);
3753 if (portRspPut >= portRspMax) {
3754
3755
3756
3757
3758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3759 "0303 Ring %d handler: portRspPut %d "
3760 "is bigger than rsp ring %d\n",
3761 pring->ringno, portRspPut, portRspMax);
3762
3763 phba->link_state = LPFC_HBA_ERROR;
3764 spin_unlock_irqrestore(&phba->hbalock, iflag);
3765
3766 phba->work_hs = HS_FFER3;
3767 lpfc_handle_eratt(phba);
3768
3769 return;
3770 }
3771
3772 rmb();
3773 while (pring->sli.sli3.rspidx != portRspPut) {
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787 entry = lpfc_resp_iocb(phba, pring);
3788
3789 phba->last_completion_time = jiffies;
3790 rspiocbp = __lpfc_sli_get_iocbq(phba);
3791 if (rspiocbp == NULL) {
3792 printk(KERN_ERR "%s: out of buffers! Failing "
3793 "completion.\n", __func__);
3794 break;
3795 }
3796
3797 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3798 phba->iocb_rsp_size);
3799 irsp = &rspiocbp->iocb;
3800
3801 if (++pring->sli.sli3.rspidx >= portRspMax)
3802 pring->sli.sli3.rspidx = 0;
3803
3804 if (pring->ringno == LPFC_ELS_RING) {
3805 lpfc_debugfs_slow_ring_trc(phba,
3806 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3807 *(((uint32_t *) irsp) + 4),
3808 *(((uint32_t *) irsp) + 6),
3809 *(((uint32_t *) irsp) + 7));
3810 }
3811
3812 writel(pring->sli.sli3.rspidx,
3813 &phba->host_gp[pring->ringno].rspGetInx);
3814
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3816
3817 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3818 spin_lock_irqsave(&phba->hbalock, iflag);
3819
3820
3821
3822
3823
3824
3825 if (pring->sli.sli3.rspidx == portRspPut) {
3826 portRspPut = le32_to_cpu(pgp->rspPutInx);
3827 }
3828 }
3829
3830 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3831
3832 pring->stats.iocb_rsp_full++;
3833
3834 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3835 writel(status, phba->CAregaddr);
3836 readl(phba->CAregaddr);
3837 }
3838 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3839 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3840 pring->stats.iocb_cmd_empty++;
3841
3842
3843 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3844 lpfc_sli_resume_iocb(phba, pring);
3845
3846 if ((pring->lpfc_sli_cmd_available))
3847 (pring->lpfc_sli_cmd_available) (phba, pring);
3848
3849 }
3850
3851 spin_unlock_irqrestore(&phba->hbalock, iflag);
3852 return;
3853 }
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867 static void
3868 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3869 struct lpfc_sli_ring *pring, uint32_t mask)
3870 {
3871 struct lpfc_iocbq *irspiocbq;
3872 struct hbq_dmabuf *dmabuf;
3873 struct lpfc_cq_event *cq_event;
3874 unsigned long iflag;
3875 int count = 0;
3876
3877 spin_lock_irqsave(&phba->hbalock, iflag);
3878 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3881
3882 spin_lock_irqsave(&phba->hbalock, iflag);
3883 list_remove_head(&phba->sli4_hba.sp_queue_event,
3884 cq_event, struct lpfc_cq_event, list);
3885 spin_unlock_irqrestore(&phba->hbalock, iflag);
3886
3887 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3888 case CQE_CODE_COMPL_WQE:
3889 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3890 cq_event);
3891
3892 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3893 irspiocbq);
3894 if (irspiocbq)
3895 lpfc_sli_sp_handle_rspiocb(phba, pring,
3896 irspiocbq);
3897 count++;
3898 break;
3899 case CQE_CODE_RECEIVE:
3900 case CQE_CODE_RECEIVE_V1:
3901 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3902 cq_event);
3903 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3904 count++;
3905 break;
3906 default:
3907 break;
3908 }
3909
3910
3911 if (count == 64)
3912 break;
3913 }
3914 }
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926 void
3927 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3928 {
3929 LIST_HEAD(completions);
3930 struct lpfc_iocbq *iocb, *next_iocb;
3931
3932 if (pring->ringno == LPFC_ELS_RING) {
3933 lpfc_fabric_abort_hba(phba);
3934 }
3935
3936
3937
3938
3939 if (phba->sli_rev >= LPFC_SLI_REV4) {
3940 spin_lock_irq(&pring->ring_lock);
3941 list_splice_init(&pring->txq, &completions);
3942 pring->txq_cnt = 0;
3943 spin_unlock_irq(&pring->ring_lock);
3944
3945 spin_lock_irq(&phba->hbalock);
3946
3947 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3948 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3949 spin_unlock_irq(&phba->hbalock);
3950 } else {
3951 spin_lock_irq(&phba->hbalock);
3952 list_splice_init(&pring->txq, &completions);
3953 pring->txq_cnt = 0;
3954
3955
3956 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3957 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3958 spin_unlock_irq(&phba->hbalock);
3959 }
3960
3961
3962 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3963 IOERR_SLI_ABORTED);
3964 }
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976 void
3977 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3978 {
3979 struct lpfc_sli *psli = &phba->sli;
3980 struct lpfc_sli_ring *pring;
3981 uint32_t i;
3982
3983
3984 if (phba->sli_rev >= LPFC_SLI_REV4) {
3985 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3986 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
3987 lpfc_sli_abort_iocb_ring(phba, pring);
3988 }
3989 } else {
3990 pring = &psli->sli3_ring[LPFC_FCP_RING];
3991 lpfc_sli_abort_iocb_ring(phba, pring);
3992 }
3993 }
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005 void
4006 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4007 {
4008 LIST_HEAD(txq);
4009 LIST_HEAD(txcmplq);
4010 struct lpfc_sli *psli = &phba->sli;
4011 struct lpfc_sli_ring *pring;
4012 uint32_t i;
4013 struct lpfc_iocbq *piocb, *next_iocb;
4014
4015 spin_lock_irq(&phba->hbalock);
4016 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4017 !phba->sli4_hba.hdwq) {
4018 spin_unlock_irq(&phba->hbalock);
4019 return;
4020 }
4021
4022 phba->hba_flag |= HBA_IOQ_FLUSH;
4023 spin_unlock_irq(&phba->hbalock);
4024
4025
4026 if (phba->sli_rev >= LPFC_SLI_REV4) {
4027 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4028 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4029
4030 spin_lock_irq(&pring->ring_lock);
4031
4032 list_splice_init(&pring->txq, &txq);
4033 list_for_each_entry_safe(piocb, next_iocb,
4034 &pring->txcmplq, list)
4035 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4036
4037 list_splice_init(&pring->txcmplq, &txcmplq);
4038 pring->txq_cnt = 0;
4039 pring->txcmplq_cnt = 0;
4040 spin_unlock_irq(&pring->ring_lock);
4041
4042
4043 lpfc_sli_cancel_iocbs(phba, &txq,
4044 IOSTAT_LOCAL_REJECT,
4045 IOERR_SLI_DOWN);
4046
4047 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4048 IOSTAT_LOCAL_REJECT,
4049 IOERR_SLI_DOWN);
4050 }
4051 } else {
4052 pring = &psli->sli3_ring[LPFC_FCP_RING];
4053
4054 spin_lock_irq(&phba->hbalock);
4055
4056 list_splice_init(&pring->txq, &txq);
4057 list_for_each_entry_safe(piocb, next_iocb,
4058 &pring->txcmplq, list)
4059 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4060
4061 list_splice_init(&pring->txcmplq, &txcmplq);
4062 pring->txq_cnt = 0;
4063 pring->txcmplq_cnt = 0;
4064 spin_unlock_irq(&phba->hbalock);
4065
4066
4067 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4068 IOERR_SLI_DOWN);
4069
4070 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4071 IOERR_SLI_DOWN);
4072 }
4073 }
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088 static int
4089 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4090 {
4091 uint32_t status;
4092 int i = 0;
4093 int retval = 0;
4094
4095
4096 if (lpfc_readl(phba->HSregaddr, &status))
4097 return 1;
4098
4099
4100
4101
4102
4103
4104
4105 while (((status & mask) != mask) &&
4106 !(status & HS_FFERM) &&
4107 i++ < 20) {
4108
4109 if (i <= 5)
4110 msleep(10);
4111 else if (i <= 10)
4112 msleep(500);
4113 else
4114 msleep(2500);
4115
4116 if (i == 15) {
4117
4118 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4119 lpfc_sli_brdrestart(phba);
4120 }
4121
4122 if (lpfc_readl(phba->HSregaddr, &status)) {
4123 retval = 1;
4124 break;
4125 }
4126 }
4127
4128
4129 if ((status & HS_FFERM) || (i >= 20)) {
4130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4131 "2751 Adapter failed to restart, "
4132 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4133 status,
4134 readl(phba->MBslimaddr + 0xa8),
4135 readl(phba->MBslimaddr + 0xac));
4136 phba->link_state = LPFC_HBA_ERROR;
4137 retval = 1;
4138 }
4139
4140 return retval;
4141 }
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154 static int
4155 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4156 {
4157 uint32_t status;
4158 int retval = 0;
4159
4160
4161 status = lpfc_sli4_post_status_check(phba);
4162
4163 if (status) {
4164 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4165 lpfc_sli_brdrestart(phba);
4166 status = lpfc_sli4_post_status_check(phba);
4167 }
4168
4169
4170 if (status) {
4171 phba->link_state = LPFC_HBA_ERROR;
4172 retval = 1;
4173 } else
4174 phba->sli4_hba.intr_enable = 0;
4175
4176 return retval;
4177 }
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187 int
4188 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4189 {
4190 return phba->lpfc_sli_brdready(phba, mask);
4191 }
4192
4193 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4194
4195
4196
4197
4198
4199
4200
4201
4202 void lpfc_reset_barrier(struct lpfc_hba *phba)
4203 {
4204 uint32_t __iomem *resp_buf;
4205 uint32_t __iomem *mbox_buf;
4206 volatile uint32_t mbox;
4207 uint32_t hc_copy, ha_copy, resp_data;
4208 int i;
4209 uint8_t hdrtype;
4210
4211 lockdep_assert_held(&phba->hbalock);
4212
4213 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4214 if (hdrtype != 0x80 ||
4215 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4216 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4217 return;
4218
4219
4220
4221
4222
4223 resp_buf = phba->MBslimaddr;
4224
4225
4226 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4227 return;
4228 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4229 readl(phba->HCregaddr);
4230 phba->link_flag |= LS_IGNORE_ERATT;
4231
4232 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4233 return;
4234 if (ha_copy & HA_ERATT) {
4235
4236 writel(HA_ERATT, phba->HAregaddr);
4237 phba->pport->stopped = 1;
4238 }
4239
4240 mbox = 0;
4241 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4242 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4243
4244 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4245 mbox_buf = phba->MBslimaddr;
4246 writel(mbox, mbox_buf);
4247
4248 for (i = 0; i < 50; i++) {
4249 if (lpfc_readl((resp_buf + 1), &resp_data))
4250 return;
4251 if (resp_data != ~(BARRIER_TEST_PATTERN))
4252 mdelay(1);
4253 else
4254 break;
4255 }
4256 resp_data = 0;
4257 if (lpfc_readl((resp_buf + 1), &resp_data))
4258 return;
4259 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4260 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4261 phba->pport->stopped)
4262 goto restore_hc;
4263 else
4264 goto clear_errat;
4265 }
4266
4267 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4268 resp_data = 0;
4269 for (i = 0; i < 500; i++) {
4270 if (lpfc_readl(resp_buf, &resp_data))
4271 return;
4272 if (resp_data != mbox)
4273 mdelay(1);
4274 else
4275 break;
4276 }
4277
4278 clear_errat:
4279
4280 while (++i < 500) {
4281 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4282 return;
4283 if (!(ha_copy & HA_ERATT))
4284 mdelay(1);
4285 else
4286 break;
4287 }
4288
4289 if (readl(phba->HAregaddr) & HA_ERATT) {
4290 writel(HA_ERATT, phba->HAregaddr);
4291 phba->pport->stopped = 1;
4292 }
4293
4294 restore_hc:
4295 phba->link_flag &= ~LS_IGNORE_ERATT;
4296 writel(hc_copy, phba->HCregaddr);
4297 readl(phba->HCregaddr);
4298 }
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311 int
4312 lpfc_sli_brdkill(struct lpfc_hba *phba)
4313 {
4314 struct lpfc_sli *psli;
4315 LPFC_MBOXQ_t *pmb;
4316 uint32_t status;
4317 uint32_t ha_copy;
4318 int retval;
4319 int i = 0;
4320
4321 psli = &phba->sli;
4322
4323
4324 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4325 "0329 Kill HBA Data: x%x x%x\n",
4326 phba->pport->port_state, psli->sli_flag);
4327
4328 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4329 if (!pmb)
4330 return 1;
4331
4332
4333 spin_lock_irq(&phba->hbalock);
4334 if (lpfc_readl(phba->HCregaddr, &status)) {
4335 spin_unlock_irq(&phba->hbalock);
4336 mempool_free(pmb, phba->mbox_mem_pool);
4337 return 1;
4338 }
4339 status &= ~HC_ERINT_ENA;
4340 writel(status, phba->HCregaddr);
4341 readl(phba->HCregaddr);
4342 phba->link_flag |= LS_IGNORE_ERATT;
4343 spin_unlock_irq(&phba->hbalock);
4344
4345 lpfc_kill_board(phba, pmb);
4346 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4347 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4348
4349 if (retval != MBX_SUCCESS) {
4350 if (retval != MBX_BUSY)
4351 mempool_free(pmb, phba->mbox_mem_pool);
4352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4353 "2752 KILL_BOARD command failed retval %d\n",
4354 retval);
4355 spin_lock_irq(&phba->hbalock);
4356 phba->link_flag &= ~LS_IGNORE_ERATT;
4357 spin_unlock_irq(&phba->hbalock);
4358 return 1;
4359 }
4360
4361 spin_lock_irq(&phba->hbalock);
4362 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4363 spin_unlock_irq(&phba->hbalock);
4364
4365 mempool_free(pmb, phba->mbox_mem_pool);
4366
4367
4368
4369
4370
4371
4372 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4373 return 1;
4374 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4375 mdelay(100);
4376 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4377 return 1;
4378 }
4379
4380 del_timer_sync(&psli->mbox_tmo);
4381 if (ha_copy & HA_ERATT) {
4382 writel(HA_ERATT, phba->HAregaddr);
4383 phba->pport->stopped = 1;
4384 }
4385 spin_lock_irq(&phba->hbalock);
4386 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4387 psli->mbox_active = NULL;
4388 phba->link_flag &= ~LS_IGNORE_ERATT;
4389 spin_unlock_irq(&phba->hbalock);
4390
4391 lpfc_hba_down_post(phba);
4392 phba->link_state = LPFC_HBA_ERROR;
4393
4394 return ha_copy & HA_ERATT ? 0 : 1;
4395 }
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408 int
4409 lpfc_sli_brdreset(struct lpfc_hba *phba)
4410 {
4411 struct lpfc_sli *psli;
4412 struct lpfc_sli_ring *pring;
4413 uint16_t cfg_value;
4414 int i;
4415
4416 psli = &phba->sli;
4417
4418
4419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4420 "0325 Reset HBA Data: x%x x%x\n",
4421 (phba->pport) ? phba->pport->port_state : 0,
4422 psli->sli_flag);
4423
4424
4425 phba->fc_eventTag = 0;
4426 phba->link_events = 0;
4427 if (phba->pport) {
4428 phba->pport->fc_myDID = 0;
4429 phba->pport->fc_prevDID = 0;
4430 }
4431
4432
4433 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4434 return -EIO;
4435
4436 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4437 (cfg_value &
4438 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4439
4440 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4441
4442
4443 writel(HC_INITFF, phba->HCregaddr);
4444 mdelay(1);
4445 readl(phba->HCregaddr);
4446 writel(0, phba->HCregaddr);
4447 readl(phba->HCregaddr);
4448
4449
4450 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4451
4452
4453 for (i = 0; i < psli->num_rings; i++) {
4454 pring = &psli->sli3_ring[i];
4455 pring->flag = 0;
4456 pring->sli.sli3.rspidx = 0;
4457 pring->sli.sli3.next_cmdidx = 0;
4458 pring->sli.sli3.local_getidx = 0;
4459 pring->sli.sli3.cmdidx = 0;
4460 pring->missbufcnt = 0;
4461 }
4462
4463 phba->link_state = LPFC_WARM_START;
4464 return 0;
4465 }
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477 int
4478 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4479 {
4480 struct lpfc_sli *psli = &phba->sli;
4481 uint16_t cfg_value;
4482 int rc = 0;
4483
4484
4485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4486 "0295 Reset HBA Data: x%x x%x x%x\n",
4487 phba->pport->port_state, psli->sli_flag,
4488 phba->hba_flag);
4489
4490
4491 phba->fc_eventTag = 0;
4492 phba->link_events = 0;
4493 phba->pport->fc_myDID = 0;
4494 phba->pport->fc_prevDID = 0;
4495
4496 spin_lock_irq(&phba->hbalock);
4497 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4498 phba->fcf.fcf_flag = 0;
4499 spin_unlock_irq(&phba->hbalock);
4500
4501
4502 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4503 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4504 return rc;
4505 }
4506
4507
4508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4509 "0389 Performing PCI function reset!\n");
4510
4511
4512 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4514 "3205 PCI read Config failed\n");
4515 return -EIO;
4516 }
4517
4518 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4519 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4520
4521
4522 rc = lpfc_pci_function_reset(phba);
4523
4524
4525 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4526
4527 return rc;
4528 }
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543 static int
4544 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4545 {
4546 MAILBOX_t *mb;
4547 struct lpfc_sli *psli;
4548 volatile uint32_t word0;
4549 void __iomem *to_slim;
4550 uint32_t hba_aer_enabled;
4551
4552 spin_lock_irq(&phba->hbalock);
4553
4554
4555 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4556
4557 psli = &phba->sli;
4558
4559
4560 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4561 "0337 Restart HBA Data: x%x x%x\n",
4562 (phba->pport) ? phba->pport->port_state : 0,
4563 psli->sli_flag);
4564
4565 word0 = 0;
4566 mb = (MAILBOX_t *) &word0;
4567 mb->mbxCommand = MBX_RESTART;
4568 mb->mbxHc = 1;
4569
4570 lpfc_reset_barrier(phba);
4571
4572 to_slim = phba->MBslimaddr;
4573 writel(*(uint32_t *) mb, to_slim);
4574 readl(to_slim);
4575
4576
4577 if (phba->pport && phba->pport->port_state)
4578 word0 = 1;
4579 else
4580 word0 = 0;
4581 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4582 writel(*(uint32_t *) mb, to_slim);
4583 readl(to_slim);
4584
4585 lpfc_sli_brdreset(phba);
4586 if (phba->pport)
4587 phba->pport->stopped = 0;
4588 phba->link_state = LPFC_INIT_START;
4589 phba->hba_flag = 0;
4590 spin_unlock_irq(&phba->hbalock);
4591
4592 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4593 psli->stats_start = ktime_get_seconds();
4594
4595
4596 mdelay(100);
4597
4598
4599 if (hba_aer_enabled)
4600 pci_disable_pcie_error_reporting(phba->pcidev);
4601
4602 lpfc_hba_down_post(phba);
4603
4604 return 0;
4605 }
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616 static int
4617 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4618 {
4619 struct lpfc_sli *psli = &phba->sli;
4620 uint32_t hba_aer_enabled;
4621 int rc;
4622
4623
4624 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4625 "0296 Restart HBA Data: x%x x%x\n",
4626 phba->pport->port_state, psli->sli_flag);
4627
4628
4629 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4630
4631 rc = lpfc_sli4_brdreset(phba);
4632 if (rc) {
4633 phba->link_state = LPFC_HBA_ERROR;
4634 goto hba_down_queue;
4635 }
4636
4637 spin_lock_irq(&phba->hbalock);
4638 phba->pport->stopped = 0;
4639 phba->link_state = LPFC_INIT_START;
4640 phba->hba_flag = 0;
4641 spin_unlock_irq(&phba->hbalock);
4642
4643 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4644 psli->stats_start = ktime_get_seconds();
4645
4646
4647 if (hba_aer_enabled)
4648 pci_disable_pcie_error_reporting(phba->pcidev);
4649
4650 hba_down_queue:
4651 lpfc_hba_down_post(phba);
4652 lpfc_sli4_queue_destroy(phba);
4653
4654 return rc;
4655 }
4656
4657
4658
4659
4660
4661
4662
4663
4664 int
4665 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4666 {
4667 return phba->lpfc_sli_brdrestart(phba);
4668 }
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680 int
4681 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4682 {
4683 uint32_t status, i = 0;
4684
4685
4686 if (lpfc_readl(phba->HSregaddr, &status))
4687 return -EIO;
4688
4689
4690 i = 0;
4691 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701 if (i++ >= 200) {
4702
4703
4704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4705 "0436 Adapter failed to init, "
4706 "timeout, status reg x%x, "
4707 "FW Data: A8 x%x AC x%x\n", status,
4708 readl(phba->MBslimaddr + 0xa8),
4709 readl(phba->MBslimaddr + 0xac));
4710 phba->link_state = LPFC_HBA_ERROR;
4711 return -ETIMEDOUT;
4712 }
4713
4714
4715 if (status & HS_FFERM) {
4716
4717
4718
4719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4720 "0437 Adapter failed to init, "
4721 "chipset, status reg x%x, "
4722 "FW Data: A8 x%x AC x%x\n", status,
4723 readl(phba->MBslimaddr + 0xa8),
4724 readl(phba->MBslimaddr + 0xac));
4725 phba->link_state = LPFC_HBA_ERROR;
4726 return -EIO;
4727 }
4728
4729 if (i <= 10)
4730 msleep(10);
4731 else if (i <= 100)
4732 msleep(100);
4733 else
4734 msleep(1000);
4735
4736 if (i == 150) {
4737
4738 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4739 lpfc_sli_brdrestart(phba);
4740 }
4741
4742 if (lpfc_readl(phba->HSregaddr, &status))
4743 return -EIO;
4744 }
4745
4746
4747 if (status & HS_FFERM) {
4748
4749
4750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4751 "0438 Adapter failed to init, chipset, "
4752 "status reg x%x, "
4753 "FW Data: A8 x%x AC x%x\n", status,
4754 readl(phba->MBslimaddr + 0xa8),
4755 readl(phba->MBslimaddr + 0xac));
4756 phba->link_state = LPFC_HBA_ERROR;
4757 return -EIO;
4758 }
4759
4760
4761 writel(0, phba->HCregaddr);
4762 readl(phba->HCregaddr);
4763
4764
4765 writel(0xffffffff, phba->HAregaddr);
4766 readl(phba->HAregaddr);
4767 return 0;
4768 }
4769
4770
4771
4772
4773
4774
4775
4776 int
4777 lpfc_sli_hbq_count(void)
4778 {
4779 return ARRAY_SIZE(lpfc_hbq_defs);
4780 }
4781
4782
4783
4784
4785
4786
4787
4788
4789 static int
4790 lpfc_sli_hbq_entry_count(void)
4791 {
4792 int hbq_count = lpfc_sli_hbq_count();
4793 int count = 0;
4794 int i;
4795
4796 for (i = 0; i < hbq_count; ++i)
4797 count += lpfc_hbq_defs[i]->entry_count;
4798 return count;
4799 }
4800
4801
4802
4803
4804
4805
4806
4807 int
4808 lpfc_sli_hbq_size(void)
4809 {
4810 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4811 }
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822 static int
4823 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4824 {
4825 int hbq_count = lpfc_sli_hbq_count();
4826 LPFC_MBOXQ_t *pmb;
4827 MAILBOX_t *pmbox;
4828 uint32_t hbqno;
4829 uint32_t hbq_entry_index;
4830
4831
4832
4833
4834 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4835
4836 if (!pmb)
4837 return -ENOMEM;
4838
4839 pmbox = &pmb->u.mb;
4840
4841
4842 phba->link_state = LPFC_INIT_MBX_CMDS;
4843 phba->hbq_in_use = 1;
4844
4845 hbq_entry_index = 0;
4846 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4847 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4848 phba->hbqs[hbqno].hbqPutIdx = 0;
4849 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4850 phba->hbqs[hbqno].entry_count =
4851 lpfc_hbq_defs[hbqno]->entry_count;
4852 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4853 hbq_entry_index, pmb);
4854 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4855
4856 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4857
4858
4859
4860 lpfc_printf_log(phba, KERN_ERR,
4861 LOG_SLI | LOG_VPORT,
4862 "1805 Adapter failed to init. "
4863 "Data: x%x x%x x%x\n",
4864 pmbox->mbxCommand,
4865 pmbox->mbxStatus, hbqno);
4866
4867 phba->link_state = LPFC_HBA_ERROR;
4868 mempool_free(pmb, phba->mbox_mem_pool);
4869 return -ENXIO;
4870 }
4871 }
4872 phba->hbq_count = hbq_count;
4873
4874 mempool_free(pmb, phba->mbox_mem_pool);
4875
4876
4877 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4878 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4879 return 0;
4880 }
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891 static int
4892 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4893 {
4894 phba->hbq_in_use = 1;
4895 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4896 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4897 phba->hbq_count = 1;
4898 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4899
4900 return 0;
4901 }
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916 int
4917 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4918 {
4919 LPFC_MBOXQ_t *pmb;
4920 uint32_t resetcount = 0, rc = 0, done = 0;
4921
4922 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4923 if (!pmb) {
4924 phba->link_state = LPFC_HBA_ERROR;
4925 return -ENOMEM;
4926 }
4927
4928 phba->sli_rev = sli_mode;
4929 while (resetcount < 2 && !done) {
4930 spin_lock_irq(&phba->hbalock);
4931 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4932 spin_unlock_irq(&phba->hbalock);
4933 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4934 lpfc_sli_brdrestart(phba);
4935 rc = lpfc_sli_chipset_init(phba);
4936 if (rc)
4937 break;
4938
4939 spin_lock_irq(&phba->hbalock);
4940 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4941 spin_unlock_irq(&phba->hbalock);
4942 resetcount++;
4943
4944
4945
4946
4947
4948
4949 rc = lpfc_config_port_prep(phba);
4950 if (rc == -ERESTART) {
4951 phba->link_state = LPFC_LINK_UNKNOWN;
4952 continue;
4953 } else if (rc)
4954 break;
4955
4956 phba->link_state = LPFC_INIT_MBX_CMDS;
4957 lpfc_config_port(phba, pmb);
4958 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4959 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4960 LPFC_SLI3_HBQ_ENABLED |
4961 LPFC_SLI3_CRP_ENABLED |
4962 LPFC_SLI3_DSS_ENABLED);
4963 if (rc != MBX_SUCCESS) {
4964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4965 "0442 Adapter failed to init, mbxCmd x%x "
4966 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4967 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4968 spin_lock_irq(&phba->hbalock);
4969 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4970 spin_unlock_irq(&phba->hbalock);
4971 rc = -ENXIO;
4972 } else {
4973
4974 spin_lock_irq(&phba->hbalock);
4975 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4976 spin_unlock_irq(&phba->hbalock);
4977 done = 1;
4978
4979 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4980 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "3110 Port did not grant ASABT\n");
4983 }
4984 }
4985 if (!done) {
4986 rc = -EINVAL;
4987 goto do_prep_failed;
4988 }
4989 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4990 if (!pmb->u.mb.un.varCfgPort.cMA) {
4991 rc = -ENXIO;
4992 goto do_prep_failed;
4993 }
4994 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4995 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4996 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4997 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4998 phba->max_vpi : phba->max_vports;
4999
5000 } else
5001 phba->max_vpi = 0;
5002 phba->fips_level = 0;
5003 phba->fips_spec_rev = 0;
5004 if (pmb->u.mb.un.varCfgPort.gdss) {
5005 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5006 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5007 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5009 "2850 Security Crypto Active. FIPS x%d "
5010 "(Spec Rev: x%d)",
5011 phba->fips_level, phba->fips_spec_rev);
5012 }
5013 if (pmb->u.mb.un.varCfgPort.sec_err) {
5014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5015 "2856 Config Port Security Crypto "
5016 "Error: x%x ",
5017 pmb->u.mb.un.varCfgPort.sec_err);
5018 }
5019 if (pmb->u.mb.un.varCfgPort.gerbm)
5020 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5021 if (pmb->u.mb.un.varCfgPort.gcrp)
5022 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5023
5024 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5025 phba->port_gp = phba->mbox->us.s3_pgp.port;
5026
5027 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5028 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5029 phba->cfg_enable_bg = 0;
5030 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5032 "0443 Adapter did not grant "
5033 "BlockGuard\n");
5034 }
5035 }
5036 } else {
5037 phba->hbq_get = NULL;
5038 phba->port_gp = phba->mbox->us.s2.port;
5039 phba->max_vpi = 0;
5040 }
5041 do_prep_failed:
5042 mempool_free(pmb, phba->mbox_mem_pool);
5043 return rc;
5044 }
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060 int
5061 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5062 {
5063 uint32_t rc;
5064 int mode = 3, i;
5065 int longs;
5066
5067 switch (phba->cfg_sli_mode) {
5068 case 2:
5069 if (phba->cfg_enable_npiv) {
5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5071 "1824 NPIV enabled: Override sli_mode "
5072 "parameter (%d) to auto (0).\n",
5073 phba->cfg_sli_mode);
5074 break;
5075 }
5076 mode = 2;
5077 break;
5078 case 0:
5079 case 3:
5080 break;
5081 default:
5082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5083 "1819 Unrecognized sli_mode parameter: %d.\n",
5084 phba->cfg_sli_mode);
5085
5086 break;
5087 }
5088 phba->fcp_embed_io = 0;
5089
5090 rc = lpfc_sli_config_port(phba, mode);
5091
5092 if (rc && phba->cfg_sli_mode == 3)
5093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5094 "1820 Unable to select SLI-3. "
5095 "Not supported by adapter.\n");
5096 if (rc && mode != 2)
5097 rc = lpfc_sli_config_port(phba, 2);
5098 else if (rc && mode == 2)
5099 rc = lpfc_sli_config_port(phba, 3);
5100 if (rc)
5101 goto lpfc_sli_hba_setup_error;
5102
5103
5104 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5105 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5106 if (!rc) {
5107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5108 "2709 This device supports "
5109 "Advanced Error Reporting (AER)\n");
5110 spin_lock_irq(&phba->hbalock);
5111 phba->hba_flag |= HBA_AER_ENABLED;
5112 spin_unlock_irq(&phba->hbalock);
5113 } else {
5114 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5115 "2708 This device does not support "
5116 "Advanced Error Reporting (AER): %d\n",
5117 rc);
5118 phba->cfg_aer_support = 0;
5119 }
5120 }
5121
5122 if (phba->sli_rev == 3) {
5123 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5124 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5125 } else {
5126 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5127 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5128 phba->sli3_options = 0;
5129 }
5130
5131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5132 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5133 phba->sli_rev, phba->max_vpi);
5134 rc = lpfc_sli_ring_map(phba);
5135
5136 if (rc)
5137 goto lpfc_sli_hba_setup_error;
5138
5139
5140 if (phba->sli_rev == LPFC_SLI_REV3) {
5141
5142
5143
5144
5145
5146 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5147 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5148 phba->vpi_bmask = kcalloc(longs,
5149 sizeof(unsigned long),
5150 GFP_KERNEL);
5151 if (!phba->vpi_bmask) {
5152 rc = -ENOMEM;
5153 goto lpfc_sli_hba_setup_error;
5154 }
5155
5156 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5157 sizeof(uint16_t),
5158 GFP_KERNEL);
5159 if (!phba->vpi_ids) {
5160 kfree(phba->vpi_bmask);
5161 rc = -ENOMEM;
5162 goto lpfc_sli_hba_setup_error;
5163 }
5164 for (i = 0; i < phba->max_vpi; i++)
5165 phba->vpi_ids[i] = i;
5166 }
5167 }
5168
5169
5170 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5171 rc = lpfc_sli_hbq_setup(phba);
5172 if (rc)
5173 goto lpfc_sli_hba_setup_error;
5174 }
5175 spin_lock_irq(&phba->hbalock);
5176 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5177 spin_unlock_irq(&phba->hbalock);
5178
5179 rc = lpfc_config_port_post(phba);
5180 if (rc)
5181 goto lpfc_sli_hba_setup_error;
5182
5183 return rc;
5184
5185 lpfc_sli_hba_setup_error:
5186 phba->link_state = LPFC_HBA_ERROR;
5187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5188 "0445 Firmware initialization failed\n");
5189 return rc;
5190 }
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200 static int
5201 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5202 {
5203 LPFC_MBOXQ_t *mboxq;
5204 struct lpfc_dmabuf *mp;
5205 struct lpfc_mqe *mqe;
5206 uint32_t data_length;
5207 int rc;
5208
5209
5210 phba->valid_vlan = 0;
5211 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5212 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5213 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5214
5215 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5216 if (!mboxq)
5217 return -ENOMEM;
5218
5219 mqe = &mboxq->u.mqe;
5220 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5221 rc = -ENOMEM;
5222 goto out_free_mboxq;
5223 }
5224
5225 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5226 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5227
5228 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5229 "(%d):2571 Mailbox cmd x%x Status x%x "
5230 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5231 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5232 "CQ: x%x x%x x%x x%x\n",
5233 mboxq->vport ? mboxq->vport->vpi : 0,
5234 bf_get(lpfc_mqe_command, mqe),
5235 bf_get(lpfc_mqe_status, mqe),
5236 mqe->un.mb_words[0], mqe->un.mb_words[1],
5237 mqe->un.mb_words[2], mqe->un.mb_words[3],
5238 mqe->un.mb_words[4], mqe->un.mb_words[5],
5239 mqe->un.mb_words[6], mqe->un.mb_words[7],
5240 mqe->un.mb_words[8], mqe->un.mb_words[9],
5241 mqe->un.mb_words[10], mqe->un.mb_words[11],
5242 mqe->un.mb_words[12], mqe->un.mb_words[13],
5243 mqe->un.mb_words[14], mqe->un.mb_words[15],
5244 mqe->un.mb_words[16], mqe->un.mb_words[50],
5245 mboxq->mcqe.word0,
5246 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5247 mboxq->mcqe.trailer);
5248
5249 if (rc) {
5250 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5251 kfree(mp);
5252 rc = -EIO;
5253 goto out_free_mboxq;
5254 }
5255 data_length = mqe->un.mb_words[5];
5256 if (data_length > DMP_RGN23_SIZE) {
5257 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5258 kfree(mp);
5259 rc = -EIO;
5260 goto out_free_mboxq;
5261 }
5262
5263 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5265 kfree(mp);
5266 rc = 0;
5267
5268 out_free_mboxq:
5269 mempool_free(mboxq, phba->mbox_mem_pool);
5270 return rc;
5271 }
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288 static int
5289 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5290 uint8_t *vpd, uint32_t *vpd_size)
5291 {
5292 int rc = 0;
5293 uint32_t dma_size;
5294 struct lpfc_dmabuf *dmabuf;
5295 struct lpfc_mqe *mqe;
5296
5297 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5298 if (!dmabuf)
5299 return -ENOMEM;
5300
5301
5302
5303
5304
5305 dma_size = *vpd_size;
5306 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5307 &dmabuf->phys, GFP_KERNEL);
5308 if (!dmabuf->virt) {
5309 kfree(dmabuf);
5310 return -ENOMEM;
5311 }
5312
5313
5314
5315
5316
5317
5318 lpfc_read_rev(phba, mboxq);
5319 mqe = &mboxq->u.mqe;
5320 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5321 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5322 mqe->un.read_rev.word1 &= 0x0000FFFF;
5323 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5324 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5325
5326 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5327 if (rc) {
5328 dma_free_coherent(&phba->pcidev->dev, dma_size,
5329 dmabuf->virt, dmabuf->phys);
5330 kfree(dmabuf);
5331 return -EIO;
5332 }
5333
5334
5335
5336
5337
5338
5339 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5340 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5341
5342 memcpy(vpd, dmabuf->virt, *vpd_size);
5343
5344 dma_free_coherent(&phba->pcidev->dev, dma_size,
5345 dmabuf->virt, dmabuf->phys);
5346 kfree(dmabuf);
5347 return 0;
5348 }
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361 static int
5362 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5363 {
5364 LPFC_MBOXQ_t *mboxq;
5365 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5366 struct lpfc_controller_attribute *cntl_attr;
5367 void *virtaddr = NULL;
5368 uint32_t alloclen, reqlen;
5369 uint32_t shdr_status, shdr_add_status;
5370 union lpfc_sli4_cfg_shdr *shdr;
5371 int rc;
5372
5373 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5374 if (!mboxq)
5375 return -ENOMEM;
5376
5377
5378 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5379 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5380 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5381 LPFC_SLI4_MBX_NEMBED);
5382
5383 if (alloclen < reqlen) {
5384 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5385 "3084 Allocated DMA memory size (%d) is "
5386 "less than the requested DMA memory size "
5387 "(%d)\n", alloclen, reqlen);
5388 rc = -ENOMEM;
5389 goto out_free_mboxq;
5390 }
5391 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5392 virtaddr = mboxq->sge_array->addr[0];
5393 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5394 shdr = &mbx_cntl_attr->cfg_shdr;
5395 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5396 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5397 if (shdr_status || shdr_add_status || rc) {
5398 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5399 "3085 Mailbox x%x (x%x/x%x) failed, "
5400 "rc:x%x, status:x%x, add_status:x%x\n",
5401 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5402 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5403 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5404 rc, shdr_status, shdr_add_status);
5405 rc = -ENXIO;
5406 goto out_free_mboxq;
5407 }
5408
5409 cntl_attr = &mbx_cntl_attr->cntl_attr;
5410 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5411 phba->sli4_hba.lnk_info.lnk_tp =
5412 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5413 phba->sli4_hba.lnk_info.lnk_no =
5414 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5415
5416 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5417 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5418 sizeof(phba->BIOSVersion));
5419
5420 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5421 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5422 phba->sli4_hba.lnk_info.lnk_tp,
5423 phba->sli4_hba.lnk_info.lnk_no,
5424 phba->BIOSVersion);
5425 out_free_mboxq:
5426 if (rc != MBX_TIMEOUT) {
5427 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5428 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5429 else
5430 mempool_free(mboxq, phba->mbox_mem_pool);
5431 }
5432 return rc;
5433 }
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446 static int
5447 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5448 {
5449 LPFC_MBOXQ_t *mboxq;
5450 struct lpfc_mbx_get_port_name *get_port_name;
5451 uint32_t shdr_status, shdr_add_status;
5452 union lpfc_sli4_cfg_shdr *shdr;
5453 char cport_name = 0;
5454 int rc;
5455
5456
5457 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5458 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5459
5460 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5461 if (!mboxq)
5462 return -ENOMEM;
5463
5464 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5465 lpfc_sli4_read_config(phba);
5466 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5467 goto retrieve_ppname;
5468
5469
5470 rc = lpfc_sli4_get_ctl_attr(phba);
5471 if (rc)
5472 goto out_free_mboxq;
5473
5474 retrieve_ppname:
5475 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5476 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5477 sizeof(struct lpfc_mbx_get_port_name) -
5478 sizeof(struct lpfc_sli4_cfg_mhdr),
5479 LPFC_SLI4_MBX_EMBED);
5480 get_port_name = &mboxq->u.mqe.un.get_port_name;
5481 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5482 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5483 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5484 phba->sli4_hba.lnk_info.lnk_tp);
5485 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5488 if (shdr_status || shdr_add_status || rc) {
5489 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5490 "3087 Mailbox x%x (x%x/x%x) failed: "
5491 "rc:x%x, status:x%x, add_status:x%x\n",
5492 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5493 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5494 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5495 rc, shdr_status, shdr_add_status);
5496 rc = -ENXIO;
5497 goto out_free_mboxq;
5498 }
5499 switch (phba->sli4_hba.lnk_info.lnk_no) {
5500 case LPFC_LINK_NUMBER_0:
5501 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5502 &get_port_name->u.response);
5503 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5504 break;
5505 case LPFC_LINK_NUMBER_1:
5506 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5507 &get_port_name->u.response);
5508 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5509 break;
5510 case LPFC_LINK_NUMBER_2:
5511 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5512 &get_port_name->u.response);
5513 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5514 break;
5515 case LPFC_LINK_NUMBER_3:
5516 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5517 &get_port_name->u.response);
5518 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5519 break;
5520 default:
5521 break;
5522 }
5523
5524 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5525 phba->Port[0] = cport_name;
5526 phba->Port[1] = '\0';
5527 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5528 "3091 SLI get port name: %s\n", phba->Port);
5529 }
5530
5531 out_free_mboxq:
5532 if (rc != MBX_TIMEOUT) {
5533 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5534 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5535 else
5536 mempool_free(mboxq, phba->mbox_mem_pool);
5537 }
5538 return rc;
5539 }
5540
5541
5542
5543
5544
5545
5546
5547
5548 static void
5549 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5550 {
5551 int qidx;
5552 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5553 struct lpfc_sli4_hdw_queue *qp;
5554 struct lpfc_queue *eq;
5555
5556 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5557 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5558 if (sli4_hba->nvmels_cq)
5559 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5560 LPFC_QUEUE_REARM);
5561
5562 if (sli4_hba->hdwq) {
5563
5564 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5565 qp = &sli4_hba->hdwq[qidx];
5566
5567 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5568 LPFC_QUEUE_REARM);
5569 }
5570
5571
5572 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5573 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5574
5575 sli4_hba->sli4_write_eq_db(phba, eq,
5576 0, LPFC_QUEUE_REARM);
5577 }
5578 }
5579
5580 if (phba->nvmet_support) {
5581 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5582 sli4_hba->sli4_write_cq_db(phba,
5583 sli4_hba->nvmet_cqset[qidx], 0,
5584 LPFC_QUEUE_REARM);
5585 }
5586 }
5587 }
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601 int
5602 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5603 uint16_t *extnt_count, uint16_t *extnt_size)
5604 {
5605 int rc = 0;
5606 uint32_t length;
5607 uint32_t mbox_tmo;
5608 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5609 LPFC_MBOXQ_t *mbox;
5610
5611 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5612 if (!mbox)
5613 return -ENOMEM;
5614
5615
5616 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5617 sizeof(struct lpfc_sli4_cfg_mhdr));
5618 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5619 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5620 length, LPFC_SLI4_MBX_EMBED);
5621
5622
5623 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5624 LPFC_SLI4_MBX_EMBED);
5625 if (unlikely(rc)) {
5626 rc = -EIO;
5627 goto err_exit;
5628 }
5629
5630 if (!phba->sli4_hba.intr_enable)
5631 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5632 else {
5633 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5634 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5635 }
5636 if (unlikely(rc)) {
5637 rc = -EIO;
5638 goto err_exit;
5639 }
5640
5641 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5642 if (bf_get(lpfc_mbox_hdr_status,
5643 &rsrc_info->header.cfg_shdr.response)) {
5644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5645 "2930 Failed to get resource extents "
5646 "Status 0x%x Add'l Status 0x%x\n",
5647 bf_get(lpfc_mbox_hdr_status,
5648 &rsrc_info->header.cfg_shdr.response),
5649 bf_get(lpfc_mbox_hdr_add_status,
5650 &rsrc_info->header.cfg_shdr.response));
5651 rc = -EIO;
5652 goto err_exit;
5653 }
5654
5655 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5656 &rsrc_info->u.rsp);
5657 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5658 &rsrc_info->u.rsp);
5659
5660 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5661 "3162 Retrieved extents type-%d from port: count:%d, "
5662 "size:%d\n", type, *extnt_count, *extnt_size);
5663
5664 err_exit:
5665 mempool_free(mbox, phba->mbox_mem_pool);
5666 return rc;
5667 }
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684 static int
5685 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5686 {
5687 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5688 uint16_t size_diff, rsrc_ext_size;
5689 int rc = 0;
5690 struct lpfc_rsrc_blks *rsrc_entry;
5691 struct list_head *rsrc_blk_list = NULL;
5692
5693 size_diff = 0;
5694 curr_ext_cnt = 0;
5695 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5696 &rsrc_ext_cnt,
5697 &rsrc_ext_size);
5698 if (unlikely(rc))
5699 return -EIO;
5700
5701 switch (type) {
5702 case LPFC_RSC_TYPE_FCOE_RPI:
5703 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5704 break;
5705 case LPFC_RSC_TYPE_FCOE_VPI:
5706 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5707 break;
5708 case LPFC_RSC_TYPE_FCOE_XRI:
5709 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5710 break;
5711 case LPFC_RSC_TYPE_FCOE_VFI:
5712 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5713 break;
5714 default:
5715 break;
5716 }
5717
5718 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5719 curr_ext_cnt++;
5720 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5721 size_diff++;
5722 }
5723
5724 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5725 rc = 1;
5726
5727 return rc;
5728 }
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747 static int
5748 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5749 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5750 {
5751 int rc = 0;
5752 uint32_t req_len;
5753 uint32_t emb_len;
5754 uint32_t alloc_len, mbox_tmo;
5755
5756
5757 req_len = extnt_cnt * sizeof(uint16_t);
5758
5759
5760
5761
5762
5763 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5764 sizeof(uint32_t);
5765
5766
5767
5768
5769
5770 *emb = LPFC_SLI4_MBX_EMBED;
5771 if (req_len > emb_len) {
5772 req_len = extnt_cnt * sizeof(uint16_t) +
5773 sizeof(union lpfc_sli4_cfg_shdr) +
5774 sizeof(uint32_t);
5775 *emb = LPFC_SLI4_MBX_NEMBED;
5776 }
5777
5778 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5779 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5780 req_len, *emb);
5781 if (alloc_len < req_len) {
5782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5783 "2982 Allocated DMA memory size (x%x) is "
5784 "less than the requested DMA memory "
5785 "size (x%x)\n", alloc_len, req_len);
5786 return -ENOMEM;
5787 }
5788 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5789 if (unlikely(rc))
5790 return -EIO;
5791
5792 if (!phba->sli4_hba.intr_enable)
5793 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5794 else {
5795 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5796 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5797 }
5798
5799 if (unlikely(rc))
5800 rc = -EIO;
5801 return rc;
5802 }
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812 static int
5813 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5814 {
5815 bool emb = false;
5816 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5817 uint16_t rsrc_id, rsrc_start, j, k;
5818 uint16_t *ids;
5819 int i, rc;
5820 unsigned long longs;
5821 unsigned long *bmask;
5822 struct lpfc_rsrc_blks *rsrc_blks;
5823 LPFC_MBOXQ_t *mbox;
5824 uint32_t length;
5825 struct lpfc_id_range *id_array = NULL;
5826 void *virtaddr = NULL;
5827 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5828 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5829 struct list_head *ext_blk_list;
5830
5831 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5832 &rsrc_cnt,
5833 &rsrc_size);
5834 if (unlikely(rc))
5835 return -EIO;
5836
5837 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5839 "3009 No available Resource Extents "
5840 "for resource type 0x%x: Count: 0x%x, "
5841 "Size 0x%x\n", type, rsrc_cnt,
5842 rsrc_size);
5843 return -ENOMEM;
5844 }
5845
5846 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5847 "2903 Post resource extents type-0x%x: "
5848 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5849
5850 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5851 if (!mbox)
5852 return -ENOMEM;
5853
5854 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5855 if (unlikely(rc)) {
5856 rc = -EIO;
5857 goto err_exit;
5858 }
5859
5860
5861
5862
5863
5864
5865
5866 if (emb == LPFC_SLI4_MBX_EMBED) {
5867 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5868 id_array = &rsrc_ext->u.rsp.id[0];
5869 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5870 } else {
5871 virtaddr = mbox->sge_array->addr[0];
5872 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5873 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5874 id_array = &n_rsrc->id;
5875 }
5876
5877 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5878 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5879
5880
5881
5882
5883
5884 length = sizeof(struct lpfc_rsrc_blks);
5885 switch (type) {
5886 case LPFC_RSC_TYPE_FCOE_RPI:
5887 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5888 sizeof(unsigned long),
5889 GFP_KERNEL);
5890 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5891 rc = -ENOMEM;
5892 goto err_exit;
5893 }
5894 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5895 sizeof(uint16_t),
5896 GFP_KERNEL);
5897 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5898 kfree(phba->sli4_hba.rpi_bmask);
5899 rc = -ENOMEM;
5900 goto err_exit;
5901 }
5902
5903
5904
5905
5906
5907
5908 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5909
5910
5911 bmask = phba->sli4_hba.rpi_bmask;
5912 ids = phba->sli4_hba.rpi_ids;
5913 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5914 break;
5915 case LPFC_RSC_TYPE_FCOE_VPI:
5916 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5917 GFP_KERNEL);
5918 if (unlikely(!phba->vpi_bmask)) {
5919 rc = -ENOMEM;
5920 goto err_exit;
5921 }
5922 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5923 GFP_KERNEL);
5924 if (unlikely(!phba->vpi_ids)) {
5925 kfree(phba->vpi_bmask);
5926 rc = -ENOMEM;
5927 goto err_exit;
5928 }
5929
5930
5931 bmask = phba->vpi_bmask;
5932 ids = phba->vpi_ids;
5933 ext_blk_list = &phba->lpfc_vpi_blk_list;
5934 break;
5935 case LPFC_RSC_TYPE_FCOE_XRI:
5936 phba->sli4_hba.xri_bmask = kcalloc(longs,
5937 sizeof(unsigned long),
5938 GFP_KERNEL);
5939 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5940 rc = -ENOMEM;
5941 goto err_exit;
5942 }
5943 phba->sli4_hba.max_cfg_param.xri_used = 0;
5944 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5945 sizeof(uint16_t),
5946 GFP_KERNEL);
5947 if (unlikely(!phba->sli4_hba.xri_ids)) {
5948 kfree(phba->sli4_hba.xri_bmask);
5949 rc = -ENOMEM;
5950 goto err_exit;
5951 }
5952
5953
5954 bmask = phba->sli4_hba.xri_bmask;
5955 ids = phba->sli4_hba.xri_ids;
5956 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5957 break;
5958 case LPFC_RSC_TYPE_FCOE_VFI:
5959 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5960 sizeof(unsigned long),
5961 GFP_KERNEL);
5962 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5963 rc = -ENOMEM;
5964 goto err_exit;
5965 }
5966 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5967 sizeof(uint16_t),
5968 GFP_KERNEL);
5969 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5970 kfree(phba->sli4_hba.vfi_bmask);
5971 rc = -ENOMEM;
5972 goto err_exit;
5973 }
5974
5975
5976 bmask = phba->sli4_hba.vfi_bmask;
5977 ids = phba->sli4_hba.vfi_ids;
5978 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5979 break;
5980 default:
5981
5982 id_array = NULL;
5983 bmask = NULL;
5984 ids = NULL;
5985 ext_blk_list = NULL;
5986 goto err_exit;
5987 }
5988
5989
5990
5991
5992
5993
5994
5995 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5996 if ((i % 2) == 0)
5997 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5998 &id_array[k]);
5999 else
6000 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6001 &id_array[k]);
6002
6003 rsrc_blks = kzalloc(length, GFP_KERNEL);
6004 if (unlikely(!rsrc_blks)) {
6005 rc = -ENOMEM;
6006 kfree(bmask);
6007 kfree(ids);
6008 goto err_exit;
6009 }
6010 rsrc_blks->rsrc_start = rsrc_id;
6011 rsrc_blks->rsrc_size = rsrc_size;
6012 list_add_tail(&rsrc_blks->list, ext_blk_list);
6013 rsrc_start = rsrc_id;
6014 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6015 phba->sli4_hba.io_xri_start = rsrc_start +
6016 lpfc_sli4_get_iocb_cnt(phba);
6017 }
6018
6019 while (rsrc_id < (rsrc_start + rsrc_size)) {
6020 ids[j] = rsrc_id;
6021 rsrc_id++;
6022 j++;
6023 }
6024
6025 if ((i % 2) == 1)
6026 k++;
6027 }
6028 err_exit:
6029 lpfc_sli4_mbox_cmd_free(phba, mbox);
6030 return rc;
6031 }
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044 static int
6045 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6046 {
6047 int rc;
6048 uint32_t length, mbox_tmo = 0;
6049 LPFC_MBOXQ_t *mbox;
6050 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6051 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6052
6053 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6054 if (!mbox)
6055 return -ENOMEM;
6056
6057
6058
6059
6060
6061
6062 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6063 sizeof(struct lpfc_sli4_cfg_mhdr));
6064 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6065 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6066 length, LPFC_SLI4_MBX_EMBED);
6067
6068
6069 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6070 LPFC_SLI4_MBX_EMBED);
6071 if (unlikely(rc)) {
6072 rc = -EIO;
6073 goto out_free_mbox;
6074 }
6075 if (!phba->sli4_hba.intr_enable)
6076 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6077 else {
6078 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6079 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6080 }
6081 if (unlikely(rc)) {
6082 rc = -EIO;
6083 goto out_free_mbox;
6084 }
6085
6086 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6087 if (bf_get(lpfc_mbox_hdr_status,
6088 &dealloc_rsrc->header.cfg_shdr.response)) {
6089 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6090 "2919 Failed to release resource extents "
6091 "for type %d - Status 0x%x Add'l Status 0x%x. "
6092 "Resource memory not released.\n",
6093 type,
6094 bf_get(lpfc_mbox_hdr_status,
6095 &dealloc_rsrc->header.cfg_shdr.response),
6096 bf_get(lpfc_mbox_hdr_add_status,
6097 &dealloc_rsrc->header.cfg_shdr.response));
6098 rc = -EIO;
6099 goto out_free_mbox;
6100 }
6101
6102
6103 switch (type) {
6104 case LPFC_RSC_TYPE_FCOE_VPI:
6105 kfree(phba->vpi_bmask);
6106 kfree(phba->vpi_ids);
6107 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6108 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6109 &phba->lpfc_vpi_blk_list, list) {
6110 list_del_init(&rsrc_blk->list);
6111 kfree(rsrc_blk);
6112 }
6113 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6114 break;
6115 case LPFC_RSC_TYPE_FCOE_XRI:
6116 kfree(phba->sli4_hba.xri_bmask);
6117 kfree(phba->sli4_hba.xri_ids);
6118 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6119 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6120 list_del_init(&rsrc_blk->list);
6121 kfree(rsrc_blk);
6122 }
6123 break;
6124 case LPFC_RSC_TYPE_FCOE_VFI:
6125 kfree(phba->sli4_hba.vfi_bmask);
6126 kfree(phba->sli4_hba.vfi_ids);
6127 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6128 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6129 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6130 list_del_init(&rsrc_blk->list);
6131 kfree(rsrc_blk);
6132 }
6133 break;
6134 case LPFC_RSC_TYPE_FCOE_RPI:
6135
6136 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6137 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6138 list_del_init(&rsrc_blk->list);
6139 kfree(rsrc_blk);
6140 }
6141 break;
6142 default:
6143 break;
6144 }
6145
6146 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6147
6148 out_free_mbox:
6149 mempool_free(mbox, phba->mbox_mem_pool);
6150 return rc;
6151 }
6152
6153 static void
6154 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6155 uint32_t feature)
6156 {
6157 uint32_t len;
6158
6159 len = sizeof(struct lpfc_mbx_set_feature) -
6160 sizeof(struct lpfc_sli4_cfg_mhdr);
6161 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6162 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6163 LPFC_SLI4_MBX_EMBED);
6164
6165 switch (feature) {
6166 case LPFC_SET_UE_RECOVERY:
6167 bf_set(lpfc_mbx_set_feature_UER,
6168 &mbox->u.mqe.un.set_feature, 1);
6169 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6170 mbox->u.mqe.un.set_feature.param_len = 8;
6171 break;
6172 case LPFC_SET_MDS_DIAGS:
6173 bf_set(lpfc_mbx_set_feature_mds,
6174 &mbox->u.mqe.un.set_feature, 1);
6175 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6176 &mbox->u.mqe.un.set_feature, 1);
6177 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6178 mbox->u.mqe.un.set_feature.param_len = 8;
6179 break;
6180 }
6181
6182 return;
6183 }
6184
6185
6186
6187
6188
6189
6190
6191
6192 void
6193 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6194 {
6195 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6196
6197 ras_fwlog->ras_active = false;
6198
6199
6200 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6201 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6202 }
6203
6204
6205
6206
6207
6208
6209
6210
6211 void
6212 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6213 {
6214 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6215 struct lpfc_dmabuf *dmabuf, *next;
6216
6217 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6218 list_for_each_entry_safe(dmabuf, next,
6219 &ras_fwlog->fwlog_buff_list,
6220 list) {
6221 list_del(&dmabuf->list);
6222 dma_free_coherent(&phba->pcidev->dev,
6223 LPFC_RAS_MAX_ENTRY_SIZE,
6224 dmabuf->virt, dmabuf->phys);
6225 kfree(dmabuf);
6226 }
6227 }
6228
6229 if (ras_fwlog->lwpd.virt) {
6230 dma_free_coherent(&phba->pcidev->dev,
6231 sizeof(uint32_t) * 2,
6232 ras_fwlog->lwpd.virt,
6233 ras_fwlog->lwpd.phys);
6234 ras_fwlog->lwpd.virt = NULL;
6235 }
6236
6237 ras_fwlog->ras_active = false;
6238 }
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251 static int
6252 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6253 uint32_t fwlog_buff_count)
6254 {
6255 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6256 struct lpfc_dmabuf *dmabuf;
6257 int rc = 0, i = 0;
6258
6259
6260 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6261
6262
6263 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6264 sizeof(uint32_t) * 2,
6265 &ras_fwlog->lwpd.phys,
6266 GFP_KERNEL);
6267 if (!ras_fwlog->lwpd.virt) {
6268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6269 "6185 LWPD Memory Alloc Failed\n");
6270
6271 return -ENOMEM;
6272 }
6273
6274 ras_fwlog->fw_buffcount = fwlog_buff_count;
6275 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6276 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6277 GFP_KERNEL);
6278 if (!dmabuf) {
6279 rc = -ENOMEM;
6280 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6281 "6186 Memory Alloc failed FW logging");
6282 goto free_mem;
6283 }
6284
6285 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6286 LPFC_RAS_MAX_ENTRY_SIZE,
6287 &dmabuf->phys, GFP_KERNEL);
6288 if (!dmabuf->virt) {
6289 kfree(dmabuf);
6290 rc = -ENOMEM;
6291 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6292 "6187 DMA Alloc Failed FW logging");
6293 goto free_mem;
6294 }
6295 dmabuf->buffer_tag = i;
6296 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6297 }
6298
6299 free_mem:
6300 if (rc)
6301 lpfc_sli4_ras_dma_free(phba);
6302
6303 return rc;
6304 }
6305
6306
6307
6308
6309
6310
6311
6312
6313 static void
6314 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6315 {
6316 MAILBOX_t *mb;
6317 union lpfc_sli4_cfg_shdr *shdr;
6318 uint32_t shdr_status, shdr_add_status;
6319 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6320
6321 mb = &pmb->u.mb;
6322
6323 shdr = (union lpfc_sli4_cfg_shdr *)
6324 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6325 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6326 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6327
6328 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6329 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6330 "6188 FW LOG mailbox "
6331 "completed with status x%x add_status x%x,"
6332 " mbx status x%x\n",
6333 shdr_status, shdr_add_status, mb->mbxStatus);
6334
6335 ras_fwlog->ras_hwsupport = false;
6336 goto disable_ras;
6337 }
6338
6339 ras_fwlog->ras_active = true;
6340 mempool_free(pmb, phba->mbox_mem_pool);
6341
6342 return;
6343
6344 disable_ras:
6345
6346 lpfc_sli4_ras_dma_free(phba);
6347 mempool_free(pmb, phba->mbox_mem_pool);
6348 }
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359 int
6360 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6361 uint32_t fwlog_level,
6362 uint32_t fwlog_enable)
6363 {
6364 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6365 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6366 struct lpfc_dmabuf *dmabuf;
6367 LPFC_MBOXQ_t *mbox;
6368 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6369 int rc = 0;
6370
6371 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6372 phba->cfg_ras_fwlog_buffsize);
6373 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6374
6375
6376
6377
6378
6379 if (!ras_fwlog->lwpd.virt) {
6380 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6381 if (rc) {
6382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6383 "6189 FW Log Memory Allocation Failed");
6384 return rc;
6385 }
6386 }
6387
6388
6389 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6390 if (!mbox) {
6391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6392 "6190 RAS MBX Alloc Failed");
6393 rc = -ENOMEM;
6394 goto mem_free;
6395 }
6396
6397 ras_fwlog->fw_loglevel = fwlog_level;
6398 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6399 sizeof(struct lpfc_sli4_cfg_mhdr));
6400
6401 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6402 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6403 len, LPFC_SLI4_MBX_EMBED);
6404
6405 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6406 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6407 fwlog_enable);
6408 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6409 ras_fwlog->fw_loglevel);
6410 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6411 ras_fwlog->fw_buffcount);
6412 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6413 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6414
6415
6416 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6417 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6418
6419 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6420 putPaddrLow(dmabuf->phys);
6421
6422 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6423 putPaddrHigh(dmabuf->phys);
6424 }
6425
6426
6427 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6428 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6429
6430 mbox->vport = phba->pport;
6431 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6432
6433 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6434
6435 if (rc == MBX_NOT_FINISHED) {
6436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6437 "6191 FW-Log Mailbox failed. "
6438 "status %d mbxStatus : x%x", rc,
6439 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6440 mempool_free(mbox, phba->mbox_mem_pool);
6441 rc = -EIO;
6442 goto mem_free;
6443 } else
6444 rc = 0;
6445 mem_free:
6446 if (rc)
6447 lpfc_sli4_ras_dma_free(phba);
6448
6449 return rc;
6450 }
6451
6452
6453
6454
6455
6456
6457
6458 void
6459 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6460 {
6461
6462 if (lpfc_check_fwlog_support(phba))
6463 return;
6464
6465 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6466 LPFC_RAS_ENABLE_LOGGING);
6467 }
6468
6469
6470
6471
6472
6473
6474
6475 int
6476 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6477 {
6478 int i, rc, error = 0;
6479 uint16_t count, base;
6480 unsigned long longs;
6481
6482 if (!phba->sli4_hba.rpi_hdrs_in_use)
6483 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6484 if (phba->sli4_hba.extents_in_use) {
6485
6486
6487
6488
6489
6490 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6491 LPFC_IDX_RSRC_RDY) {
6492
6493
6494
6495
6496
6497 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6498 LPFC_RSC_TYPE_FCOE_VFI);
6499 if (rc != 0)
6500 error++;
6501 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6502 LPFC_RSC_TYPE_FCOE_VPI);
6503 if (rc != 0)
6504 error++;
6505 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6506 LPFC_RSC_TYPE_FCOE_XRI);
6507 if (rc != 0)
6508 error++;
6509 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6510 LPFC_RSC_TYPE_FCOE_RPI);
6511 if (rc != 0)
6512 error++;
6513
6514
6515
6516
6517
6518
6519
6520 if (error) {
6521 lpfc_printf_log(phba, KERN_INFO,
6522 LOG_MBOX | LOG_INIT,
6523 "2931 Detected extent resource "
6524 "change. Reallocating all "
6525 "extents.\n");
6526 rc = lpfc_sli4_dealloc_extent(phba,
6527 LPFC_RSC_TYPE_FCOE_VFI);
6528 rc = lpfc_sli4_dealloc_extent(phba,
6529 LPFC_RSC_TYPE_FCOE_VPI);
6530 rc = lpfc_sli4_dealloc_extent(phba,
6531 LPFC_RSC_TYPE_FCOE_XRI);
6532 rc = lpfc_sli4_dealloc_extent(phba,
6533 LPFC_RSC_TYPE_FCOE_RPI);
6534 } else
6535 return 0;
6536 }
6537
6538 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6539 if (unlikely(rc))
6540 goto err_exit;
6541
6542 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6543 if (unlikely(rc))
6544 goto err_exit;
6545
6546 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6547 if (unlikely(rc))
6548 goto err_exit;
6549
6550 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6551 if (unlikely(rc))
6552 goto err_exit;
6553 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6554 LPFC_IDX_RSRC_RDY);
6555 return rc;
6556 } else {
6557
6558
6559
6560
6561
6562
6563
6564 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6565 LPFC_IDX_RSRC_RDY) {
6566 lpfc_sli4_dealloc_resource_identifiers(phba);
6567 lpfc_sli4_remove_rpis(phba);
6568 }
6569
6570 count = phba->sli4_hba.max_cfg_param.max_rpi;
6571 if (count <= 0) {
6572 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6573 "3279 Invalid provisioning of "
6574 "rpi:%d\n", count);
6575 rc = -EINVAL;
6576 goto err_exit;
6577 }
6578 base = phba->sli4_hba.max_cfg_param.rpi_base;
6579 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6580 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6581 sizeof(unsigned long),
6582 GFP_KERNEL);
6583 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6584 rc = -ENOMEM;
6585 goto err_exit;
6586 }
6587 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6588 GFP_KERNEL);
6589 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6590 rc = -ENOMEM;
6591 goto free_rpi_bmask;
6592 }
6593
6594 for (i = 0; i < count; i++)
6595 phba->sli4_hba.rpi_ids[i] = base + i;
6596
6597
6598 count = phba->sli4_hba.max_cfg_param.max_vpi;
6599 if (count <= 0) {
6600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6601 "3280 Invalid provisioning of "
6602 "vpi:%d\n", count);
6603 rc = -EINVAL;
6604 goto free_rpi_ids;
6605 }
6606 base = phba->sli4_hba.max_cfg_param.vpi_base;
6607 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6608 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6609 GFP_KERNEL);
6610 if (unlikely(!phba->vpi_bmask)) {
6611 rc = -ENOMEM;
6612 goto free_rpi_ids;
6613 }
6614 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6615 GFP_KERNEL);
6616 if (unlikely(!phba->vpi_ids)) {
6617 rc = -ENOMEM;
6618 goto free_vpi_bmask;
6619 }
6620
6621 for (i = 0; i < count; i++)
6622 phba->vpi_ids[i] = base + i;
6623
6624
6625 count = phba->sli4_hba.max_cfg_param.max_xri;
6626 if (count <= 0) {
6627 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6628 "3281 Invalid provisioning of "
6629 "xri:%d\n", count);
6630 rc = -EINVAL;
6631 goto free_vpi_ids;
6632 }
6633 base = phba->sli4_hba.max_cfg_param.xri_base;
6634 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6635 phba->sli4_hba.xri_bmask = kcalloc(longs,
6636 sizeof(unsigned long),
6637 GFP_KERNEL);
6638 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6639 rc = -ENOMEM;
6640 goto free_vpi_ids;
6641 }
6642 phba->sli4_hba.max_cfg_param.xri_used = 0;
6643 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6644 GFP_KERNEL);
6645 if (unlikely(!phba->sli4_hba.xri_ids)) {
6646 rc = -ENOMEM;
6647 goto free_xri_bmask;
6648 }
6649
6650 for (i = 0; i < count; i++)
6651 phba->sli4_hba.xri_ids[i] = base + i;
6652
6653
6654 count = phba->sli4_hba.max_cfg_param.max_vfi;
6655 if (count <= 0) {
6656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6657 "3282 Invalid provisioning of "
6658 "vfi:%d\n", count);
6659 rc = -EINVAL;
6660 goto free_xri_ids;
6661 }
6662 base = phba->sli4_hba.max_cfg_param.vfi_base;
6663 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6664 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6665 sizeof(unsigned long),
6666 GFP_KERNEL);
6667 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6668 rc = -ENOMEM;
6669 goto free_xri_ids;
6670 }
6671 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6672 GFP_KERNEL);
6673 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6674 rc = -ENOMEM;
6675 goto free_vfi_bmask;
6676 }
6677
6678 for (i = 0; i < count; i++)
6679 phba->sli4_hba.vfi_ids[i] = base + i;
6680
6681
6682
6683
6684
6685 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6686 LPFC_IDX_RSRC_RDY);
6687 return 0;
6688 }
6689
6690 free_vfi_bmask:
6691 kfree(phba->sli4_hba.vfi_bmask);
6692 phba->sli4_hba.vfi_bmask = NULL;
6693 free_xri_ids:
6694 kfree(phba->sli4_hba.xri_ids);
6695 phba->sli4_hba.xri_ids = NULL;
6696 free_xri_bmask:
6697 kfree(phba->sli4_hba.xri_bmask);
6698 phba->sli4_hba.xri_bmask = NULL;
6699 free_vpi_ids:
6700 kfree(phba->vpi_ids);
6701 phba->vpi_ids = NULL;
6702 free_vpi_bmask:
6703 kfree(phba->vpi_bmask);
6704 phba->vpi_bmask = NULL;
6705 free_rpi_ids:
6706 kfree(phba->sli4_hba.rpi_ids);
6707 phba->sli4_hba.rpi_ids = NULL;
6708 free_rpi_bmask:
6709 kfree(phba->sli4_hba.rpi_bmask);
6710 phba->sli4_hba.rpi_bmask = NULL;
6711 err_exit:
6712 return rc;
6713 }
6714
6715
6716
6717
6718
6719
6720
6721
6722 int
6723 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6724 {
6725 if (phba->sli4_hba.extents_in_use) {
6726 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6727 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6728 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6729 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6730 } else {
6731 kfree(phba->vpi_bmask);
6732 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6733 kfree(phba->vpi_ids);
6734 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6735 kfree(phba->sli4_hba.xri_bmask);
6736 kfree(phba->sli4_hba.xri_ids);
6737 kfree(phba->sli4_hba.vfi_bmask);
6738 kfree(phba->sli4_hba.vfi_ids);
6739 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6740 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6741 }
6742
6743 return 0;
6744 }
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756 int
6757 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6758 uint16_t *extnt_cnt, uint16_t *extnt_size)
6759 {
6760 bool emb;
6761 int rc = 0;
6762 uint16_t curr_blks = 0;
6763 uint32_t req_len, emb_len;
6764 uint32_t alloc_len, mbox_tmo;
6765 struct list_head *blk_list_head;
6766 struct lpfc_rsrc_blks *rsrc_blk;
6767 LPFC_MBOXQ_t *mbox;
6768 void *virtaddr = NULL;
6769 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6770 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6771 union lpfc_sli4_cfg_shdr *shdr;
6772
6773 switch (type) {
6774 case LPFC_RSC_TYPE_FCOE_VPI:
6775 blk_list_head = &phba->lpfc_vpi_blk_list;
6776 break;
6777 case LPFC_RSC_TYPE_FCOE_XRI:
6778 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6779 break;
6780 case LPFC_RSC_TYPE_FCOE_VFI:
6781 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6782 break;
6783 case LPFC_RSC_TYPE_FCOE_RPI:
6784 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6785 break;
6786 default:
6787 return -EIO;
6788 }
6789
6790
6791 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6792 if (curr_blks == 0) {
6793
6794
6795
6796
6797
6798
6799
6800 *extnt_size = rsrc_blk->rsrc_size;
6801 }
6802 curr_blks++;
6803 }
6804
6805
6806
6807
6808
6809 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6810 sizeof(uint32_t);
6811
6812
6813
6814
6815
6816 emb = LPFC_SLI4_MBX_EMBED;
6817 req_len = emb_len;
6818 if (req_len > emb_len) {
6819 req_len = curr_blks * sizeof(uint16_t) +
6820 sizeof(union lpfc_sli4_cfg_shdr) +
6821 sizeof(uint32_t);
6822 emb = LPFC_SLI4_MBX_NEMBED;
6823 }
6824
6825 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6826 if (!mbox)
6827 return -ENOMEM;
6828 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6829
6830 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6831 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6832 req_len, emb);
6833 if (alloc_len < req_len) {
6834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6835 "2983 Allocated DMA memory size (x%x) is "
6836 "less than the requested DMA memory "
6837 "size (x%x)\n", alloc_len, req_len);
6838 rc = -ENOMEM;
6839 goto err_exit;
6840 }
6841 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6842 if (unlikely(rc)) {
6843 rc = -EIO;
6844 goto err_exit;
6845 }
6846
6847 if (!phba->sli4_hba.intr_enable)
6848 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6849 else {
6850 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6851 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6852 }
6853
6854 if (unlikely(rc)) {
6855 rc = -EIO;
6856 goto err_exit;
6857 }
6858
6859
6860
6861
6862
6863
6864
6865 if (emb == LPFC_SLI4_MBX_EMBED) {
6866 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6867 shdr = &rsrc_ext->header.cfg_shdr;
6868 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6869 } else {
6870 virtaddr = mbox->sge_array->addr[0];
6871 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6872 shdr = &n_rsrc->cfg_shdr;
6873 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6874 }
6875
6876 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6877 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6878 "2984 Failed to read allocated resources "
6879 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6880 type,
6881 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6882 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6883 rc = -EIO;
6884 goto err_exit;
6885 }
6886 err_exit:
6887 lpfc_sli4_mbox_cmd_free(phba, mbox);
6888 return rc;
6889 }
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908 static int
6909 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6910 struct list_head *sgl_list, int cnt)
6911 {
6912 struct lpfc_sglq *sglq_entry = NULL;
6913 struct lpfc_sglq *sglq_entry_next = NULL;
6914 struct lpfc_sglq *sglq_entry_first = NULL;
6915 int status, total_cnt;
6916 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6917 int last_xritag = NO_XRI;
6918 LIST_HEAD(prep_sgl_list);
6919 LIST_HEAD(blck_sgl_list);
6920 LIST_HEAD(allc_sgl_list);
6921 LIST_HEAD(post_sgl_list);
6922 LIST_HEAD(free_sgl_list);
6923
6924 spin_lock_irq(&phba->hbalock);
6925 spin_lock(&phba->sli4_hba.sgl_list_lock);
6926 list_splice_init(sgl_list, &allc_sgl_list);
6927 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6928 spin_unlock_irq(&phba->hbalock);
6929
6930 total_cnt = cnt;
6931 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6932 &allc_sgl_list, list) {
6933 list_del_init(&sglq_entry->list);
6934 block_cnt++;
6935 if ((last_xritag != NO_XRI) &&
6936 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6937
6938 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6939 post_cnt = block_cnt - 1;
6940
6941 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6942 block_cnt = 1;
6943 } else {
6944
6945 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6946
6947 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6948 list_splice_init(&prep_sgl_list,
6949 &blck_sgl_list);
6950 post_cnt = block_cnt;
6951 block_cnt = 0;
6952 }
6953 }
6954 num_posted++;
6955
6956
6957 last_xritag = sglq_entry->sli4_xritag;
6958
6959
6960 if (num_posted == total_cnt) {
6961 if (post_cnt == 0) {
6962 list_splice_init(&prep_sgl_list,
6963 &blck_sgl_list);
6964 post_cnt = block_cnt;
6965 } else if (block_cnt == 1) {
6966 status = lpfc_sli4_post_sgl(phba,
6967 sglq_entry->phys, 0,
6968 sglq_entry->sli4_xritag);
6969 if (!status) {
6970
6971 list_add_tail(&sglq_entry->list,
6972 &post_sgl_list);
6973 } else {
6974
6975 lpfc_printf_log(phba, KERN_WARNING,
6976 LOG_SLI,
6977 "3159 Failed to post "
6978 "sgl, xritag:x%x\n",
6979 sglq_entry->sli4_xritag);
6980 list_add_tail(&sglq_entry->list,
6981 &free_sgl_list);
6982 total_cnt--;
6983 }
6984 }
6985 }
6986
6987
6988 if (post_cnt == 0)
6989 continue;
6990
6991
6992 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6993 post_cnt);
6994
6995 if (!status) {
6996
6997 list_splice_init(&blck_sgl_list, &post_sgl_list);
6998 } else {
6999
7000 sglq_entry_first = list_first_entry(&blck_sgl_list,
7001 struct lpfc_sglq,
7002 list);
7003 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7004 "3160 Failed to post sgl-list, "
7005 "xritag:x%x-x%x\n",
7006 sglq_entry_first->sli4_xritag,
7007 (sglq_entry_first->sli4_xritag +
7008 post_cnt - 1));
7009 list_splice_init(&blck_sgl_list, &free_sgl_list);
7010 total_cnt -= post_cnt;
7011 }
7012
7013
7014 if (block_cnt == 0)
7015 last_xritag = NO_XRI;
7016
7017
7018 post_cnt = 0;
7019 }
7020
7021
7022 lpfc_free_sgl_list(phba, &free_sgl_list);
7023
7024
7025 if (!list_empty(&post_sgl_list)) {
7026 spin_lock_irq(&phba->hbalock);
7027 spin_lock(&phba->sli4_hba.sgl_list_lock);
7028 list_splice_init(&post_sgl_list, sgl_list);
7029 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7030 spin_unlock_irq(&phba->hbalock);
7031 } else {
7032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7033 "3161 Failure to post sgl to port.\n");
7034 return -EIO;
7035 }
7036
7037
7038 return total_cnt;
7039 }
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053 static int
7054 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7055 {
7056 LIST_HEAD(post_nblist);
7057 int num_posted, rc = 0;
7058
7059
7060 lpfc_io_buf_flush(phba, &post_nblist);
7061
7062
7063 if (!list_empty(&post_nblist)) {
7064 num_posted = lpfc_sli4_post_io_sgl_list(
7065 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7066
7067 if (num_posted == 0)
7068 rc = -EIO;
7069 }
7070 return rc;
7071 }
7072
7073 static void
7074 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7075 {
7076 uint32_t len;
7077
7078 len = sizeof(struct lpfc_mbx_set_host_data) -
7079 sizeof(struct lpfc_sli4_cfg_mhdr);
7080 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7081 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7082 LPFC_SLI4_MBX_EMBED);
7083
7084 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7085 mbox->u.mqe.un.set_host_data.param_len =
7086 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7087 snprintf(mbox->u.mqe.un.set_host_data.data,
7088 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7089 "Linux %s v"LPFC_DRIVER_VERSION,
7090 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7091 }
7092
7093 int
7094 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7095 struct lpfc_queue *drq, int count, int idx)
7096 {
7097 int rc, i;
7098 struct lpfc_rqe hrqe;
7099 struct lpfc_rqe drqe;
7100 struct lpfc_rqb *rqbp;
7101 unsigned long flags;
7102 struct rqb_dmabuf *rqb_buffer;
7103 LIST_HEAD(rqb_buf_list);
7104
7105 spin_lock_irqsave(&phba->hbalock, flags);
7106 rqbp = hrq->rqbp;
7107 for (i = 0; i < count; i++) {
7108
7109 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7110 break;
7111 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7112 if (!rqb_buffer)
7113 break;
7114 rqb_buffer->hrq = hrq;
7115 rqb_buffer->drq = drq;
7116 rqb_buffer->idx = idx;
7117 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7118 }
7119 while (!list_empty(&rqb_buf_list)) {
7120 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7121 hbuf.list);
7122
7123 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7124 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7125 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7126 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7127 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7128 if (rc < 0) {
7129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7130 "6421 Cannot post to HRQ %d: %x %x %x "
7131 "DRQ %x %x\n",
7132 hrq->queue_id,
7133 hrq->host_index,
7134 hrq->hba_index,
7135 hrq->entry_count,
7136 drq->host_index,
7137 drq->hba_index);
7138 rqbp->rqb_free_buffer(phba, rqb_buffer);
7139 } else {
7140 list_add_tail(&rqb_buffer->hbuf.list,
7141 &rqbp->rqb_buffer_list);
7142 rqbp->buffer_count++;
7143 }
7144 }
7145 spin_unlock_irqrestore(&phba->hbalock, flags);
7146 return 1;
7147 }
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158 int
7159 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7160 {
7161 int rc, i, cnt, len;
7162 LPFC_MBOXQ_t *mboxq;
7163 struct lpfc_mqe *mqe;
7164 uint8_t *vpd;
7165 uint32_t vpd_size;
7166 uint32_t ftr_rsp = 0;
7167 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7168 struct lpfc_vport *vport = phba->pport;
7169 struct lpfc_dmabuf *mp;
7170 struct lpfc_rqb *rqbp;
7171
7172
7173 rc = lpfc_pci_function_reset(phba);
7174 if (unlikely(rc))
7175 return -ENODEV;
7176
7177
7178 rc = lpfc_sli4_post_status_check(phba);
7179 if (unlikely(rc))
7180 return -ENODEV;
7181 else {
7182 spin_lock_irq(&phba->hbalock);
7183 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7184 spin_unlock_irq(&phba->hbalock);
7185 }
7186
7187
7188
7189
7190
7191 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7192 if (!mboxq)
7193 return -ENOMEM;
7194
7195
7196 vpd_size = SLI4_PAGE_SIZE;
7197 vpd = kzalloc(vpd_size, GFP_KERNEL);
7198 if (!vpd) {
7199 rc = -ENOMEM;
7200 goto out_free_mbox;
7201 }
7202
7203 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7204 if (unlikely(rc)) {
7205 kfree(vpd);
7206 goto out_free_mbox;
7207 }
7208
7209 mqe = &mboxq->u.mqe;
7210 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7211 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7212 phba->hba_flag |= HBA_FCOE_MODE;
7213 phba->fcp_embed_io = 0;
7214 } else {
7215 phba->hba_flag &= ~HBA_FCOE_MODE;
7216 }
7217
7218 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7219 LPFC_DCBX_CEE_MODE)
7220 phba->hba_flag |= HBA_FIP_SUPPORT;
7221 else
7222 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7223
7224 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7225
7226 if (phba->sli_rev != LPFC_SLI_REV4) {
7227 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7228 "0376 READ_REV Error. SLI Level %d "
7229 "FCoE enabled %d\n",
7230 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7231 rc = -EIO;
7232 kfree(vpd);
7233 goto out_free_mbox;
7234 }
7235
7236
7237
7238
7239
7240
7241 if (phba->hba_flag & HBA_FCOE_MODE &&
7242 lpfc_sli4_read_fcoe_params(phba))
7243 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7244 "2570 Failed to read FCoE parameters\n");
7245
7246
7247
7248
7249
7250 rc = lpfc_sli4_retrieve_pport_name(phba);
7251 if (!rc)
7252 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7253 "3080 Successful retrieving SLI4 device "
7254 "physical port name: %s.\n", phba->Port);
7255
7256 rc = lpfc_sli4_get_ctl_attr(phba);
7257 if (!rc)
7258 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7259 "8351 Successful retrieving SLI4 device "
7260 "CTL ATTR\n");
7261
7262
7263
7264
7265
7266
7267 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7268 if (unlikely(!rc)) {
7269 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7270 "0377 Error %d parsing vpd. "
7271 "Using defaults.\n", rc);
7272 rc = 0;
7273 }
7274 kfree(vpd);
7275
7276
7277 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7278 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7279
7280
7281
7282
7283
7284 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7285 LPFC_SLI_INTF_IF_TYPE_6) &&
7286 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7287 (phba->vpd.rev.smRev == 0) &&
7288 (phba->cfg_nvme_embed_cmd == 1))
7289 phba->cfg_nvme_embed_cmd = 0;
7290
7291 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7292 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7293 &mqe->un.read_rev);
7294 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7295 &mqe->un.read_rev);
7296 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7297 &mqe->un.read_rev);
7298 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7299 &mqe->un.read_rev);
7300 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7301 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7302 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7303 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7304 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7305 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7306 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7307 "(%d):0380 READ_REV Status x%x "
7308 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7309 mboxq->vport ? mboxq->vport->vpi : 0,
7310 bf_get(lpfc_mqe_status, mqe),
7311 phba->vpd.rev.opFwName,
7312 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7313 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7314
7315
7316 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7317 if (phba->pport->cfg_lun_queue_depth > rc) {
7318 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7319 "3362 LUN queue depth changed from %d to %d\n",
7320 phba->pport->cfg_lun_queue_depth, rc);
7321 phba->pport->cfg_lun_queue_depth = rc;
7322 }
7323
7324 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7325 LPFC_SLI_INTF_IF_TYPE_0) {
7326 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7327 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7328 if (rc == MBX_SUCCESS) {
7329 phba->hba_flag |= HBA_RECOVERABLE_UE;
7330
7331 phba->eratt_poll_interval = 1;
7332 phba->sli4_hba.ue_to_sr = bf_get(
7333 lpfc_mbx_set_feature_UESR,
7334 &mboxq->u.mqe.un.set_feature);
7335 phba->sli4_hba.ue_to_rp = bf_get(
7336 lpfc_mbx_set_feature_UERP,
7337 &mboxq->u.mqe.un.set_feature);
7338 }
7339 }
7340
7341 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7342
7343 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7344 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7345 if (rc != MBX_SUCCESS)
7346 phba->mds_diags_support = 0;
7347 }
7348
7349
7350
7351
7352
7353 lpfc_request_features(phba, mboxq);
7354 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7355 if (unlikely(rc)) {
7356 rc = -EIO;
7357 goto out_free_mbox;
7358 }
7359
7360
7361
7362
7363
7364 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7365 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7366 "0378 No support for fcpi mode.\n");
7367 ftr_rsp++;
7368 }
7369
7370
7371 if (phba->hba_flag & HBA_FCOE_MODE) {
7372 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7373 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7374 else
7375 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7376 }
7377
7378
7379
7380
7381
7382
7383 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7384 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7385 phba->cfg_enable_bg = 0;
7386 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7387 ftr_rsp++;
7388 }
7389 }
7390
7391 if (phba->max_vpi && phba->cfg_enable_npiv &&
7392 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7393 ftr_rsp++;
7394
7395 if (ftr_rsp) {
7396 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7397 "0379 Feature Mismatch Data: x%08x %08x "
7398 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7399 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7400 phba->cfg_enable_npiv, phba->max_vpi);
7401 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7402 phba->cfg_enable_bg = 0;
7403 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7404 phba->cfg_enable_npiv = 0;
7405 }
7406
7407
7408 spin_lock_irq(&phba->hbalock);
7409 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7410 spin_unlock_irq(&phba->hbalock);
7411
7412
7413
7414
7415
7416 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7417 if (rc) {
7418 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7419 "2920 Failed to alloc Resource IDs "
7420 "rc = x%x\n", rc);
7421 goto out_free_mbox;
7422 }
7423
7424 lpfc_set_host_data(phba, mboxq);
7425
7426 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7427 if (rc) {
7428 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7429 "2134 Failed to set host os driver version %x",
7430 rc);
7431 }
7432
7433
7434 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7435 if (rc) {
7436 phba->link_state = LPFC_HBA_ERROR;
7437 rc = -ENOMEM;
7438 goto out_free_mbox;
7439 }
7440
7441 mboxq->vport = vport;
7442 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7443 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7444 if (rc == MBX_SUCCESS) {
7445 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7446 rc = 0;
7447 }
7448
7449
7450
7451
7452
7453 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7454 kfree(mp);
7455 mboxq->ctx_buf = NULL;
7456 if (unlikely(rc)) {
7457 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7458 "0382 READ_SPARAM command failed "
7459 "status %d, mbxStatus x%x\n",
7460 rc, bf_get(lpfc_mqe_status, mqe));
7461 phba->link_state = LPFC_HBA_ERROR;
7462 rc = -EIO;
7463 goto out_free_mbox;
7464 }
7465
7466 lpfc_update_vport_wwn(vport);
7467
7468
7469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7471
7472
7473 rc = lpfc_sli4_queue_create(phba);
7474 if (rc) {
7475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7476 "3089 Failed to allocate queues\n");
7477 rc = -ENODEV;
7478 goto out_free_mbox;
7479 }
7480
7481 rc = lpfc_sli4_queue_setup(phba);
7482 if (unlikely(rc)) {
7483 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7484 "0381 Error %d during queue setup.\n ", rc);
7485 goto out_stop_timers;
7486 }
7487
7488 lpfc_sli4_setup(phba);
7489 lpfc_sli4_queue_init(phba);
7490
7491
7492 rc = lpfc_sli4_els_sgl_update(phba);
7493 if (unlikely(rc)) {
7494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7495 "1400 Failed to update xri-sgl size and "
7496 "mapping: %d\n", rc);
7497 goto out_destroy_queue;
7498 }
7499
7500
7501 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7502 phba->sli4_hba.els_xri_cnt);
7503 if (unlikely(rc < 0)) {
7504 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7505 "0582 Error %d during els sgl post "
7506 "operation\n", rc);
7507 rc = -ENODEV;
7508 goto out_destroy_queue;
7509 }
7510 phba->sli4_hba.els_xri_cnt = rc;
7511
7512 if (phba->nvmet_support) {
7513
7514 rc = lpfc_sli4_nvmet_sgl_update(phba);
7515 if (unlikely(rc)) {
7516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7517 "6308 Failed to update nvmet-sgl size "
7518 "and mapping: %d\n", rc);
7519 goto out_destroy_queue;
7520 }
7521
7522
7523 rc = lpfc_sli4_repost_sgl_list(
7524 phba,
7525 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7526 phba->sli4_hba.nvmet_xri_cnt);
7527 if (unlikely(rc < 0)) {
7528 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7529 "3117 Error %d during nvmet "
7530 "sgl post\n", rc);
7531 rc = -ENODEV;
7532 goto out_destroy_queue;
7533 }
7534 phba->sli4_hba.nvmet_xri_cnt = rc;
7535
7536 cnt = phba->cfg_iocb_cnt * 1024;
7537
7538 cnt += phba->sli4_hba.nvmet_xri_cnt;
7539 } else {
7540
7541 rc = lpfc_sli4_io_sgl_update(phba);
7542 if (unlikely(rc)) {
7543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7544 "6082 Failed to update nvme-sgl size "
7545 "and mapping: %d\n", rc);
7546 goto out_destroy_queue;
7547 }
7548
7549
7550 rc = lpfc_sli4_repost_io_sgl_list(phba);
7551 if (unlikely(rc)) {
7552 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7553 "6116 Error %d during nvme sgl post "
7554 "operation\n", rc);
7555
7556
7557 rc = -ENODEV;
7558 goto out_destroy_queue;
7559 }
7560 cnt = phba->cfg_iocb_cnt * 1024;
7561 }
7562
7563 if (!phba->sli.iocbq_lookup) {
7564
7565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7566 "2821 initialize iocb list %d total %d\n",
7567 phba->cfg_iocb_cnt, cnt);
7568 rc = lpfc_init_iocb_list(phba, cnt);
7569 if (rc) {
7570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7571 "1413 Failed to init iocb list.\n");
7572 goto out_destroy_queue;
7573 }
7574 }
7575
7576 if (phba->nvmet_support)
7577 lpfc_nvmet_create_targetport(phba);
7578
7579 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7580
7581 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7582 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7583 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7584 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7585 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7586 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7587 rqbp->buffer_count = 0;
7588
7589 lpfc_post_rq_buffer(
7590 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7591 phba->sli4_hba.nvmet_mrq_data[i],
7592 phba->cfg_nvmet_mrq_post, i);
7593 }
7594 }
7595
7596
7597 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7598 if (unlikely(rc)) {
7599 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7600 "0393 Error %d during rpi post operation\n",
7601 rc);
7602 rc = -ENODEV;
7603 goto out_destroy_queue;
7604 }
7605 lpfc_sli4_node_prep(phba);
7606
7607 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7608 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7609
7610
7611
7612 lpfc_reg_fcfi(phba, mboxq);
7613 mboxq->vport = phba->pport;
7614 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7615 if (rc != MBX_SUCCESS)
7616 goto out_unset_queue;
7617 rc = 0;
7618 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7619 &mboxq->u.mqe.un.reg_fcfi);
7620 } else {
7621
7622
7623
7624 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7625 mboxq->vport = phba->pport;
7626 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7627 if (rc != MBX_SUCCESS)
7628 goto out_unset_queue;
7629 rc = 0;
7630 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7631 &mboxq->u.mqe.un.reg_fcfi_mrq);
7632
7633
7634 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7635 mboxq->vport = phba->pport;
7636 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7637 if (rc != MBX_SUCCESS)
7638 goto out_unset_queue;
7639 rc = 0;
7640 }
7641
7642 lpfc_sli_read_link_ste(phba);
7643 }
7644
7645
7646
7647
7648 if (phba->nvmet_support == 0) {
7649 if (phba->sli4_hba.io_xri_cnt == 0) {
7650 len = lpfc_new_io_buf(
7651 phba, phba->sli4_hba.io_xri_max);
7652 if (len == 0) {
7653 rc = -ENOMEM;
7654 goto out_unset_queue;
7655 }
7656
7657 if (phba->cfg_xri_rebalancing)
7658 lpfc_create_multixri_pools(phba);
7659 }
7660 } else {
7661 phba->cfg_xri_rebalancing = 0;
7662 }
7663
7664
7665 spin_lock_irq(&phba->hbalock);
7666 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7667 spin_unlock_irq(&phba->hbalock);
7668
7669
7670 lpfc_sli4_rb_setup(phba);
7671
7672
7673 phba->fcf.fcf_flag = 0;
7674 phba->fcf.current_rec.flag = 0;
7675
7676
7677 mod_timer(&vport->els_tmofunc,
7678 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7679
7680
7681 mod_timer(&phba->hb_tmofunc,
7682 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7683 phba->hb_outstanding = 0;
7684 phba->last_completion_time = jiffies;
7685
7686
7687 if (phba->cfg_auto_imax)
7688 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7689 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7690
7691
7692 mod_timer(&phba->eratt_poll,
7693 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7694
7695
7696 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7697 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7698 if (!rc) {
7699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7700 "2829 This device supports "
7701 "Advanced Error Reporting (AER)\n");
7702 spin_lock_irq(&phba->hbalock);
7703 phba->hba_flag |= HBA_AER_ENABLED;
7704 spin_unlock_irq(&phba->hbalock);
7705 } else {
7706 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7707 "2830 This device does not support "
7708 "Advanced Error Reporting (AER)\n");
7709 phba->cfg_aer_support = 0;
7710 }
7711 rc = 0;
7712 }
7713
7714
7715
7716
7717
7718 spin_lock_irq(&phba->hbalock);
7719 phba->link_state = LPFC_LINK_DOWN;
7720
7721
7722 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7723 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7724 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7725 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7726 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7727 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7728 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7729 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7730 spin_unlock_irq(&phba->hbalock);
7731
7732
7733 lpfc_sli4_arm_cqeq_intr(phba);
7734
7735
7736 phba->sli4_hba.intr_enable = 1;
7737
7738 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7739 (phba->hba_flag & LINK_DISABLED)) {
7740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7741 "3103 Adapter Link is disabled.\n");
7742 lpfc_down_link(phba, mboxq);
7743 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7744 if (rc != MBX_SUCCESS) {
7745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7746 "3104 Adapter failed to issue "
7747 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7748 goto out_io_buff_free;
7749 }
7750 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7751
7752 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7753 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7754 if (rc)
7755 goto out_io_buff_free;
7756 }
7757 }
7758 mempool_free(mboxq, phba->mbox_mem_pool);
7759 return rc;
7760 out_io_buff_free:
7761
7762 lpfc_io_free(phba);
7763 out_unset_queue:
7764
7765 lpfc_sli4_queue_unset(phba);
7766 out_destroy_queue:
7767 lpfc_free_iocb_list(phba);
7768 lpfc_sli4_queue_destroy(phba);
7769 out_stop_timers:
7770 lpfc_stop_hba_timers(phba);
7771 out_free_mbox:
7772 mempool_free(mboxq, phba->mbox_mem_pool);
7773 return rc;
7774 }
7775
7776
7777
7778
7779
7780
7781
7782
7783
7784
7785
7786
7787
7788 void
7789 lpfc_mbox_timeout(struct timer_list *t)
7790 {
7791 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7792 unsigned long iflag;
7793 uint32_t tmo_posted;
7794
7795 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7796 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7797 if (!tmo_posted)
7798 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7799 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7800
7801 if (!tmo_posted)
7802 lpfc_worker_wake_up(phba);
7803 return;
7804 }
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814 static bool
7815 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7816 {
7817
7818 uint32_t idx;
7819 struct lpfc_queue *mcq;
7820 struct lpfc_mcqe *mcqe;
7821 bool pending_completions = false;
7822 uint8_t qe_valid;
7823
7824 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7825 return false;
7826
7827
7828
7829 mcq = phba->sli4_hba.mbx_cq;
7830 idx = mcq->hba_index;
7831 qe_valid = mcq->qe_valid;
7832 while (bf_get_le32(lpfc_cqe_valid,
7833 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7834 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7835 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7836 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7837 pending_completions = true;
7838 break;
7839 }
7840 idx = (idx + 1) % mcq->entry_count;
7841 if (mcq->hba_index == idx)
7842 break;
7843
7844
7845 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7846 qe_valid = (qe_valid) ? 0 : 1;
7847 }
7848 return pending_completions;
7849
7850 }
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860
7861
7862
7863 static bool
7864 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7865 {
7866 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7867 uint32_t eqidx;
7868 struct lpfc_queue *fpeq = NULL;
7869 struct lpfc_queue *eq;
7870 bool mbox_pending;
7871
7872 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7873 return false;
7874
7875
7876 if (sli4_hba->hdwq) {
7877 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7878 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7879 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7880 fpeq = eq;
7881 break;
7882 }
7883 }
7884 }
7885 if (!fpeq)
7886 return false;
7887
7888
7889
7890 sli4_hba->sli4_eq_clr_intr(fpeq);
7891
7892
7893
7894 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7895
7896
7897
7898
7899
7900
7901
7902
7903 if (mbox_pending)
7904
7905 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
7906 else
7907
7908 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7909
7910 return mbox_pending;
7911
7912 }
7913
7914
7915
7916
7917
7918
7919
7920
7921
7922 void
7923 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7924 {
7925 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7926 MAILBOX_t *mb = NULL;
7927
7928 struct lpfc_sli *psli = &phba->sli;
7929
7930
7931 if (lpfc_sli4_process_missed_mbox_completions(phba))
7932 return;
7933
7934 if (pmbox != NULL)
7935 mb = &pmbox->u.mb;
7936
7937
7938
7939
7940
7941 spin_lock_irq(&phba->hbalock);
7942 if (pmbox == NULL) {
7943 lpfc_printf_log(phba, KERN_WARNING,
7944 LOG_MBOX | LOG_SLI,
7945 "0353 Active Mailbox cleared - mailbox timeout "
7946 "exiting\n");
7947 spin_unlock_irq(&phba->hbalock);
7948 return;
7949 }
7950
7951
7952 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7953 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
7954 mb->mbxCommand,
7955 phba->pport->port_state,
7956 phba->sli.sli_flag,
7957 phba->sli.mbox_active);
7958 spin_unlock_irq(&phba->hbalock);
7959
7960
7961
7962
7963
7964 spin_lock_irq(&phba->pport->work_port_lock);
7965 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7966 spin_unlock_irq(&phba->pport->work_port_lock);
7967 spin_lock_irq(&phba->hbalock);
7968 phba->link_state = LPFC_LINK_UNKNOWN;
7969 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7970 spin_unlock_irq(&phba->hbalock);
7971
7972 lpfc_sli_abort_fcp_rings(phba);
7973
7974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7975 "0345 Resetting board due to mailbox timeout\n");
7976
7977
7978 lpfc_reset_hba(phba);
7979 }
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
8002
8003
8004
8005
8006
8007 static int
8008 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8009 uint32_t flag)
8010 {
8011 MAILBOX_t *mbx;
8012 struct lpfc_sli *psli = &phba->sli;
8013 uint32_t status, evtctr;
8014 uint32_t ha_copy, hc_copy;
8015 int i;
8016 unsigned long timeout;
8017 unsigned long drvr_flag = 0;
8018 uint32_t word0, ldata;
8019 void __iomem *to_slim;
8020 int processing_queue = 0;
8021
8022 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8023 if (!pmbox) {
8024 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8025
8026 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8027 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8028 return MBX_SUCCESS;
8029 }
8030 processing_queue = 1;
8031 pmbox = lpfc_mbox_get(phba);
8032 if (!pmbox) {
8033 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8034 return MBX_SUCCESS;
8035 }
8036 }
8037
8038 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8039 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8040 if(!pmbox->vport) {
8041 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8042 lpfc_printf_log(phba, KERN_ERR,
8043 LOG_MBOX | LOG_VPORT,
8044 "1806 Mbox x%x failed. No vport\n",
8045 pmbox->u.mb.mbxCommand);
8046 dump_stack();
8047 goto out_not_finished;
8048 }
8049 }
8050
8051
8052 if (unlikely(pci_channel_offline(phba->pcidev))) {
8053 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8054 goto out_not_finished;
8055 }
8056
8057
8058 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8059 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8060 goto out_not_finished;
8061 }
8062
8063 psli = &phba->sli;
8064
8065 mbx = &pmbox->u.mb;
8066 status = MBX_SUCCESS;
8067
8068 if (phba->link_state == LPFC_HBA_ERROR) {
8069 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8070
8071
8072 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8073 "(%d):0311 Mailbox command x%x cannot "
8074 "issue Data: x%x x%x\n",
8075 pmbox->vport ? pmbox->vport->vpi : 0,
8076 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8077 goto out_not_finished;
8078 }
8079
8080 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8081 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8082 !(hc_copy & HC_MBINT_ENA)) {
8083 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8084 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8085 "(%d):2528 Mailbox command x%x cannot "
8086 "issue Data: x%x x%x\n",
8087 pmbox->vport ? pmbox->vport->vpi : 0,
8088 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8089 goto out_not_finished;
8090 }
8091 }
8092
8093 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8094
8095
8096
8097
8098
8099 if (flag & MBX_POLL) {
8100 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8101
8102
8103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8104 "(%d):2529 Mailbox command x%x "
8105 "cannot issue Data: x%x x%x\n",
8106 pmbox->vport ? pmbox->vport->vpi : 0,
8107 pmbox->u.mb.mbxCommand,
8108 psli->sli_flag, flag);
8109 goto out_not_finished;
8110 }
8111
8112 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8113 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8114
8115 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8116 "(%d):2530 Mailbox command x%x "
8117 "cannot issue Data: x%x x%x\n",
8118 pmbox->vport ? pmbox->vport->vpi : 0,
8119 pmbox->u.mb.mbxCommand,
8120 psli->sli_flag, flag);
8121 goto out_not_finished;
8122 }
8123
8124
8125
8126
8127 lpfc_mbox_put(phba, pmbox);
8128
8129
8130 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8131 "(%d):0308 Mbox cmd issue - BUSY Data: "
8132 "x%x x%x x%x x%x\n",
8133 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8134 mbx->mbxCommand,
8135 phba->pport ? phba->pport->port_state : 0xff,
8136 psli->sli_flag, flag);
8137
8138 psli->slistat.mbox_busy++;
8139 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8140
8141 if (pmbox->vport) {
8142 lpfc_debugfs_disc_trc(pmbox->vport,
8143 LPFC_DISC_TRC_MBOX_VPORT,
8144 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8145 (uint32_t)mbx->mbxCommand,
8146 mbx->un.varWords[0], mbx->un.varWords[1]);
8147 }
8148 else {
8149 lpfc_debugfs_disc_trc(phba->pport,
8150 LPFC_DISC_TRC_MBOX,
8151 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8152 (uint32_t)mbx->mbxCommand,
8153 mbx->un.varWords[0], mbx->un.varWords[1]);
8154 }
8155
8156 return MBX_BUSY;
8157 }
8158
8159 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8160
8161
8162 if (flag != MBX_POLL) {
8163 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8164 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8166 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8167
8168 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8169 "(%d):2531 Mailbox command x%x "
8170 "cannot issue Data: x%x x%x\n",
8171 pmbox->vport ? pmbox->vport->vpi : 0,
8172 pmbox->u.mb.mbxCommand,
8173 psli->sli_flag, flag);
8174 goto out_not_finished;
8175 }
8176
8177 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8178 1000);
8179 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8180 }
8181
8182
8183 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8184 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8185 "x%x\n",
8186 pmbox->vport ? pmbox->vport->vpi : 0,
8187 mbx->mbxCommand,
8188 phba->pport ? phba->pport->port_state : 0xff,
8189 psli->sli_flag, flag);
8190
8191 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8192 if (pmbox->vport) {
8193 lpfc_debugfs_disc_trc(pmbox->vport,
8194 LPFC_DISC_TRC_MBOX_VPORT,
8195 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8196 (uint32_t)mbx->mbxCommand,
8197 mbx->un.varWords[0], mbx->un.varWords[1]);
8198 }
8199 else {
8200 lpfc_debugfs_disc_trc(phba->pport,
8201 LPFC_DISC_TRC_MBOX,
8202 "MBOX Send: cmd:x%x mb:x%x x%x",
8203 (uint32_t)mbx->mbxCommand,
8204 mbx->un.varWords[0], mbx->un.varWords[1]);
8205 }
8206 }
8207
8208 psli->slistat.mbox_cmd++;
8209 evtctr = psli->slistat.mbox_event;
8210
8211
8212 mbx->mbxOwner = OWN_CHIP;
8213
8214 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8215
8216 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8217 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8218 = (uint8_t *)phba->mbox_ext
8219 - (uint8_t *)phba->mbox;
8220 }
8221
8222
8223 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8224 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8225 (uint8_t *)phba->mbox_ext,
8226 pmbox->in_ext_byte_len);
8227 }
8228
8229 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8230 } else {
8231
8232 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8233 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8234 = MAILBOX_HBA_EXT_OFFSET;
8235
8236
8237 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8238 lpfc_memcpy_to_slim(phba->MBslimaddr +
8239 MAILBOX_HBA_EXT_OFFSET,
8240 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8241
8242 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8243
8244 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8245 MAILBOX_CMD_SIZE);
8246
8247
8248
8249 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8250 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8251 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8252
8253
8254 ldata = *((uint32_t *)mbx);
8255 to_slim = phba->MBslimaddr;
8256 writel(ldata, to_slim);
8257 readl(to_slim);
8258
8259 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8260
8261 psli->sli_flag |= LPFC_SLI_ACTIVE;
8262 }
8263
8264 wmb();
8265
8266 switch (flag) {
8267 case MBX_NOWAIT:
8268
8269 psli->mbox_active = pmbox;
8270
8271 writel(CA_MBATT, phba->CAregaddr);
8272 readl(phba->CAregaddr);
8273
8274 break;
8275
8276 case MBX_POLL:
8277
8278 psli->mbox_active = NULL;
8279
8280 writel(CA_MBATT, phba->CAregaddr);
8281 readl(phba->CAregaddr);
8282
8283 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8284
8285 word0 = *((uint32_t *)phba->mbox);
8286 word0 = le32_to_cpu(word0);
8287 } else {
8288
8289 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8290 spin_unlock_irqrestore(&phba->hbalock,
8291 drvr_flag);
8292 goto out_not_finished;
8293 }
8294 }
8295
8296
8297 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8298 spin_unlock_irqrestore(&phba->hbalock,
8299 drvr_flag);
8300 goto out_not_finished;
8301 }
8302 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8303 1000) + jiffies;
8304 i = 0;
8305
8306 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8307 (!(ha_copy & HA_MBATT) &&
8308 (phba->link_state > LPFC_WARM_START))) {
8309 if (time_after(jiffies, timeout)) {
8310 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8311 spin_unlock_irqrestore(&phba->hbalock,
8312 drvr_flag);
8313 goto out_not_finished;
8314 }
8315
8316
8317
8318 if (((word0 & OWN_CHIP) != OWN_CHIP)
8319 && (evtctr != psli->slistat.mbox_event))
8320 break;
8321
8322 if (i++ > 10) {
8323 spin_unlock_irqrestore(&phba->hbalock,
8324 drvr_flag);
8325 msleep(1);
8326 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8327 }
8328
8329 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8330
8331 word0 = *((uint32_t *)phba->mbox);
8332 word0 = le32_to_cpu(word0);
8333 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8334 MAILBOX_t *slimmb;
8335 uint32_t slimword0;
8336
8337 slimword0 = readl(phba->MBslimaddr);
8338 slimmb = (MAILBOX_t *) & slimword0;
8339 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8340 && slimmb->mbxStatus) {
8341 psli->sli_flag &=
8342 ~LPFC_SLI_ACTIVE;
8343 word0 = slimword0;
8344 }
8345 }
8346 } else {
8347
8348 word0 = readl(phba->MBslimaddr);
8349 }
8350
8351 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8352 spin_unlock_irqrestore(&phba->hbalock,
8353 drvr_flag);
8354 goto out_not_finished;
8355 }
8356 }
8357
8358 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8359
8360 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8361 MAILBOX_CMD_SIZE);
8362
8363 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8364 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8365 pmbox->ctx_buf,
8366 pmbox->out_ext_byte_len);
8367 }
8368 } else {
8369
8370 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8371 MAILBOX_CMD_SIZE);
8372
8373 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8374 lpfc_memcpy_from_slim(
8375 pmbox->ctx_buf,
8376 phba->MBslimaddr +
8377 MAILBOX_HBA_EXT_OFFSET,
8378 pmbox->out_ext_byte_len);
8379 }
8380 }
8381
8382 writel(HA_MBATT, phba->HAregaddr);
8383 readl(phba->HAregaddr);
8384
8385 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8386 status = mbx->mbxStatus;
8387 }
8388
8389 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8390 return status;
8391
8392 out_not_finished:
8393 if (processing_queue) {
8394 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8395 lpfc_mbox_cmpl_put(phba, pmbox);
8396 }
8397 return MBX_NOT_FINISHED;
8398 }
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408
8409
8410
8411
8412 static int
8413 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8414 {
8415 struct lpfc_sli *psli = &phba->sli;
8416 int rc = 0;
8417 unsigned long timeout = 0;
8418
8419
8420 spin_lock_irq(&phba->hbalock);
8421 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8422
8423
8424
8425 if (phba->sli.mbox_active)
8426 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8427 phba->sli.mbox_active) *
8428 1000) + jiffies;
8429 spin_unlock_irq(&phba->hbalock);
8430
8431
8432 if (timeout)
8433 lpfc_sli4_process_missed_mbox_completions(phba);
8434
8435
8436 while (phba->sli.mbox_active) {
8437
8438 msleep(2);
8439 if (time_after(jiffies, timeout)) {
8440
8441 rc = 1;
8442 break;
8443 }
8444 }
8445
8446
8447 if (rc) {
8448 spin_lock_irq(&phba->hbalock);
8449 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8450 spin_unlock_irq(&phba->hbalock);
8451 }
8452 return rc;
8453 }
8454
8455
8456
8457
8458
8459
8460
8461
8462
8463
8464
8465
8466 static void
8467 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8468 {
8469 struct lpfc_sli *psli = &phba->sli;
8470
8471 spin_lock_irq(&phba->hbalock);
8472 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8473
8474 spin_unlock_irq(&phba->hbalock);
8475 return;
8476 }
8477
8478
8479
8480
8481
8482
8483 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8484 spin_unlock_irq(&phba->hbalock);
8485
8486
8487 lpfc_worker_wake_up(phba);
8488 }
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500
8501 static int
8502 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8503 {
8504 uint32_t db_ready;
8505 unsigned long timeout;
8506 struct lpfc_register bmbx_reg;
8507
8508 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8509 * 1000) + jiffies;
8510
8511 do {
8512 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8513 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8514 if (!db_ready)
8515 mdelay(2);
8516
8517 if (time_after(jiffies, timeout))
8518 return MBXERR_ERROR;
8519 } while (!db_ready);
8520
8521 return 0;
8522 }
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
8538
8539
8540 static int
8541 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8542 {
8543 int rc = MBX_SUCCESS;
8544 unsigned long iflag;
8545 uint32_t mcqe_status;
8546 uint32_t mbx_cmnd;
8547 struct lpfc_sli *psli = &phba->sli;
8548 struct lpfc_mqe *mb = &mboxq->u.mqe;
8549 struct lpfc_bmbx_create *mbox_rgn;
8550 struct dma_address *dma_address;
8551
8552
8553
8554
8555
8556 spin_lock_irqsave(&phba->hbalock, iflag);
8557 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8558 spin_unlock_irqrestore(&phba->hbalock, iflag);
8559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8560 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8561 "cannot issue Data: x%x x%x\n",
8562 mboxq->vport ? mboxq->vport->vpi : 0,
8563 mboxq->u.mb.mbxCommand,
8564 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8565 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8566 psli->sli_flag, MBX_POLL);
8567 return MBXERR_ERROR;
8568 }
8569
8570 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8571 phba->sli.mbox_active = mboxq;
8572 spin_unlock_irqrestore(&phba->hbalock, iflag);
8573
8574
8575 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8576 if (rc)
8577 goto exit;
8578
8579
8580
8581
8582
8583 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8584 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8585 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8586 sizeof(struct lpfc_mqe));
8587
8588
8589 dma_address = &phba->sli4_hba.bmbx.dma_address;
8590 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8591
8592
8593 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8594 if (rc)
8595 goto exit;
8596
8597
8598 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8599
8600
8601 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8602 if (rc)
8603 goto exit;
8604
8605
8606
8607
8608
8609
8610 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8611 sizeof(struct lpfc_mqe));
8612 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8613 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8614 sizeof(struct lpfc_mcqe));
8615 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8616
8617
8618
8619
8620
8621 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8622 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8623 bf_set(lpfc_mqe_status, mb,
8624 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8625 rc = MBXERR_ERROR;
8626 } else
8627 lpfc_sli4_swap_str(phba, mboxq);
8628
8629 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8630 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8631 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8632 " x%x x%x CQ: x%x x%x x%x x%x\n",
8633 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8634 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8635 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8636 bf_get(lpfc_mqe_status, mb),
8637 mb->un.mb_words[0], mb->un.mb_words[1],
8638 mb->un.mb_words[2], mb->un.mb_words[3],
8639 mb->un.mb_words[4], mb->un.mb_words[5],
8640 mb->un.mb_words[6], mb->un.mb_words[7],
8641 mb->un.mb_words[8], mb->un.mb_words[9],
8642 mb->un.mb_words[10], mb->un.mb_words[11],
8643 mb->un.mb_words[12], mboxq->mcqe.word0,
8644 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8645 mboxq->mcqe.trailer);
8646 exit:
8647
8648 spin_lock_irqsave(&phba->hbalock, iflag);
8649 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8650 phba->sli.mbox_active = NULL;
8651 spin_unlock_irqrestore(&phba->hbalock, iflag);
8652 return rc;
8653 }
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667 static int
8668 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8669 uint32_t flag)
8670 {
8671 struct lpfc_sli *psli = &phba->sli;
8672 unsigned long iflags;
8673 int rc;
8674
8675
8676 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8677
8678 rc = lpfc_mbox_dev_check(phba);
8679 if (unlikely(rc)) {
8680 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8681 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8682 "cannot issue Data: x%x x%x\n",
8683 mboxq->vport ? mboxq->vport->vpi : 0,
8684 mboxq->u.mb.mbxCommand,
8685 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8686 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8687 psli->sli_flag, flag);
8688 goto out_not_finished;
8689 }
8690
8691
8692 if (!phba->sli4_hba.intr_enable) {
8693 if (flag == MBX_POLL)
8694 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8695 else
8696 rc = -EIO;
8697 if (rc != MBX_SUCCESS)
8698 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8699 "(%d):2541 Mailbox command x%x "
8700 "(x%x/x%x) failure: "
8701 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8702 "Data: x%x x%x\n,",
8703 mboxq->vport ? mboxq->vport->vpi : 0,
8704 mboxq->u.mb.mbxCommand,
8705 lpfc_sli_config_mbox_subsys_get(phba,
8706 mboxq),
8707 lpfc_sli_config_mbox_opcode_get(phba,
8708 mboxq),
8709 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8710 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8711 bf_get(lpfc_mcqe_ext_status,
8712 &mboxq->mcqe),
8713 psli->sli_flag, flag);
8714 return rc;
8715 } else if (flag == MBX_POLL) {
8716 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8717 "(%d):2542 Try to issue mailbox command "
8718 "x%x (x%x/x%x) synchronously ahead of async "
8719 "mailbox command queue: x%x x%x\n",
8720 mboxq->vport ? mboxq->vport->vpi : 0,
8721 mboxq->u.mb.mbxCommand,
8722 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8723 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8724 psli->sli_flag, flag);
8725
8726 rc = lpfc_sli4_async_mbox_block(phba);
8727 if (!rc) {
8728
8729 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8730 if (rc != MBX_SUCCESS)
8731 lpfc_printf_log(phba, KERN_WARNING,
8732 LOG_MBOX | LOG_SLI,
8733 "(%d):2597 Sync Mailbox command "
8734 "x%x (x%x/x%x) failure: "
8735 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8736 "Data: x%x x%x\n,",
8737 mboxq->vport ? mboxq->vport->vpi : 0,
8738 mboxq->u.mb.mbxCommand,
8739 lpfc_sli_config_mbox_subsys_get(phba,
8740 mboxq),
8741 lpfc_sli_config_mbox_opcode_get(phba,
8742 mboxq),
8743 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8744 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8745 bf_get(lpfc_mcqe_ext_status,
8746 &mboxq->mcqe),
8747 psli->sli_flag, flag);
8748
8749 lpfc_sli4_async_mbox_unblock(phba);
8750 }
8751 return rc;
8752 }
8753
8754
8755 rc = lpfc_mbox_cmd_check(phba, mboxq);
8756 if (rc) {
8757 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8758 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8759 "cannot issue Data: x%x x%x\n",
8760 mboxq->vport ? mboxq->vport->vpi : 0,
8761 mboxq->u.mb.mbxCommand,
8762 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8763 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8764 psli->sli_flag, flag);
8765 goto out_not_finished;
8766 }
8767
8768
8769 psli->slistat.mbox_busy++;
8770 spin_lock_irqsave(&phba->hbalock, iflags);
8771 lpfc_mbox_put(phba, mboxq);
8772 spin_unlock_irqrestore(&phba->hbalock, iflags);
8773 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8774 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8775 "x%x (x%x/x%x) x%x x%x x%x\n",
8776 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8777 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8778 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8779 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8780 phba->pport->port_state,
8781 psli->sli_flag, MBX_NOWAIT);
8782
8783 lpfc_worker_wake_up(phba);
8784
8785 return MBX_BUSY;
8786
8787 out_not_finished:
8788 return MBX_NOT_FINISHED;
8789 }
8790
8791
8792
8793
8794
8795
8796
8797
8798
8799 int
8800 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8801 {
8802 struct lpfc_sli *psli = &phba->sli;
8803 LPFC_MBOXQ_t *mboxq;
8804 int rc = MBX_SUCCESS;
8805 unsigned long iflags;
8806 struct lpfc_mqe *mqe;
8807 uint32_t mbx_cmnd;
8808
8809
8810 if (unlikely(!phba->sli4_hba.intr_enable))
8811 return MBX_NOT_FINISHED;
8812
8813
8814 spin_lock_irqsave(&phba->hbalock, iflags);
8815 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8816 spin_unlock_irqrestore(&phba->hbalock, iflags);
8817 return MBX_NOT_FINISHED;
8818 }
8819 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8820 spin_unlock_irqrestore(&phba->hbalock, iflags);
8821 return MBX_NOT_FINISHED;
8822 }
8823 if (unlikely(phba->sli.mbox_active)) {
8824 spin_unlock_irqrestore(&phba->hbalock, iflags);
8825 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8826 "0384 There is pending active mailbox cmd\n");
8827 return MBX_NOT_FINISHED;
8828 }
8829
8830 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8831
8832
8833 mboxq = lpfc_mbox_get(phba);
8834
8835
8836 if (!mboxq) {
8837 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8838 spin_unlock_irqrestore(&phba->hbalock, iflags);
8839 return MBX_SUCCESS;
8840 }
8841 phba->sli.mbox_active = mboxq;
8842 spin_unlock_irqrestore(&phba->hbalock, iflags);
8843
8844
8845 rc = lpfc_mbox_dev_check(phba);
8846 if (unlikely(rc))
8847
8848 goto out_not_finished;
8849
8850
8851 mqe = &mboxq->u.mqe;
8852 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8853
8854
8855 mod_timer(&psli->mbox_tmo, (jiffies +
8856 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8857
8858 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8859 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8860 "x%x x%x\n",
8861 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8862 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8863 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8864 phba->pport->port_state, psli->sli_flag);
8865
8866 if (mbx_cmnd != MBX_HEARTBEAT) {
8867 if (mboxq->vport) {
8868 lpfc_debugfs_disc_trc(mboxq->vport,
8869 LPFC_DISC_TRC_MBOX_VPORT,
8870 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8871 mbx_cmnd, mqe->un.mb_words[0],
8872 mqe->un.mb_words[1]);
8873 } else {
8874 lpfc_debugfs_disc_trc(phba->pport,
8875 LPFC_DISC_TRC_MBOX,
8876 "MBOX Send: cmd:x%x mb:x%x x%x",
8877 mbx_cmnd, mqe->un.mb_words[0],
8878 mqe->un.mb_words[1]);
8879 }
8880 }
8881 psli->slistat.mbox_cmd++;
8882
8883
8884 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8885 if (rc != MBX_SUCCESS) {
8886 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8887 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8888 "cannot issue Data: x%x x%x\n",
8889 mboxq->vport ? mboxq->vport->vpi : 0,
8890 mboxq->u.mb.mbxCommand,
8891 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8892 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8893 psli->sli_flag, MBX_NOWAIT);
8894 goto out_not_finished;
8895 }
8896
8897 return rc;
8898
8899 out_not_finished:
8900 spin_lock_irqsave(&phba->hbalock, iflags);
8901 if (phba->sli.mbox_active) {
8902 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8903 __lpfc_mbox_cmpl_put(phba, mboxq);
8904
8905 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8906 phba->sli.mbox_active = NULL;
8907 }
8908 spin_unlock_irqrestore(&phba->hbalock, iflags);
8909
8910 return MBX_NOT_FINISHED;
8911 }
8912
8913
8914
8915
8916
8917
8918
8919
8920
8921
8922
8923
8924
8925 int
8926 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8927 {
8928 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8929 }
8930
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940 int
8941 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8942 {
8943
8944 switch (dev_grp) {
8945 case LPFC_PCI_DEV_LP:
8946 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8947 phba->lpfc_sli_handle_slow_ring_event =
8948 lpfc_sli_handle_slow_ring_event_s3;
8949 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8950 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8951 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8952 break;
8953 case LPFC_PCI_DEV_OC:
8954 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8955 phba->lpfc_sli_handle_slow_ring_event =
8956 lpfc_sli_handle_slow_ring_event_s4;
8957 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8958 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8959 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8960 break;
8961 default:
8962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8963 "1420 Invalid HBA PCI-device group: 0x%x\n",
8964 dev_grp);
8965 return -ENODEV;
8966 break;
8967 }
8968 return 0;
8969 }
8970
8971
8972
8973
8974
8975
8976
8977
8978
8979
8980
8981 void
8982 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8983 struct lpfc_iocbq *piocb)
8984 {
8985 lockdep_assert_held(&phba->hbalock);
8986
8987 list_add_tail(&piocb->list, &pring->txq);
8988 }
8989
8990
8991
8992
8993
8994
8995
8996
8997
8998
8999
9000
9001
9002
9003
9004
9005
9006
9007 static struct lpfc_iocbq *
9008 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9009 struct lpfc_iocbq **piocb)
9010 {
9011 struct lpfc_iocbq * nextiocb;
9012
9013 lockdep_assert_held(&phba->hbalock);
9014
9015 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9016 if (!nextiocb) {
9017 nextiocb = *piocb;
9018 *piocb = NULL;
9019 }
9020
9021 return nextiocb;
9022 }
9023
9024
9025
9026
9027
9028
9029
9030
9031
9032
9033
9034
9035
9036
9037
9038
9039
9040
9041
9042
9043
9044
9045
9046 static int
9047 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9048 struct lpfc_iocbq *piocb, uint32_t flag)
9049 {
9050 struct lpfc_iocbq *nextiocb;
9051 IOCB_t *iocb;
9052 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9053
9054 lockdep_assert_held(&phba->hbalock);
9055
9056 if (piocb->iocb_cmpl && (!piocb->vport) &&
9057 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9058 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9059 lpfc_printf_log(phba, KERN_ERR,
9060 LOG_SLI | LOG_VPORT,
9061 "1807 IOCB x%x failed. No vport\n",
9062 piocb->iocb.ulpCommand);
9063 dump_stack();
9064 return IOCB_ERROR;
9065 }
9066
9067
9068
9069 if (unlikely(pci_channel_offline(phba->pcidev)))
9070 return IOCB_ERROR;
9071
9072
9073 if (unlikely(phba->hba_flag & DEFER_ERATT))
9074 return IOCB_ERROR;
9075
9076
9077
9078
9079 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9080 return IOCB_ERROR;
9081
9082
9083
9084
9085
9086 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9087 goto iocb_busy;
9088
9089 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9090
9091
9092
9093
9094 switch (piocb->iocb.ulpCommand) {
9095 case CMD_GEN_REQUEST64_CR:
9096 case CMD_GEN_REQUEST64_CX:
9097 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9098 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9099 FC_RCTL_DD_UNSOL_CMD) ||
9100 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9101 MENLO_TRANSPORT_TYPE))
9102
9103 goto iocb_busy;
9104 break;
9105 case CMD_QUE_RING_BUF_CN:
9106 case CMD_QUE_RING_BUF64_CN:
9107
9108
9109
9110
9111 if (piocb->iocb_cmpl)
9112 piocb->iocb_cmpl = NULL;
9113
9114 case CMD_CREATE_XRI_CR:
9115 case CMD_CLOSE_XRI_CN:
9116 case CMD_CLOSE_XRI_CX:
9117 break;
9118 default:
9119 goto iocb_busy;
9120 }
9121
9122
9123
9124
9125
9126 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9127 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9128 goto iocb_busy;
9129 }
9130
9131 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9132 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9133 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9134
9135 if (iocb)
9136 lpfc_sli_update_ring(phba, pring);
9137 else
9138 lpfc_sli_update_full_ring(phba, pring);
9139
9140 if (!piocb)
9141 return IOCB_SUCCESS;
9142
9143 goto out_busy;
9144
9145 iocb_busy:
9146 pring->stats.iocb_cmd_delay++;
9147
9148 out_busy:
9149
9150 if (!(flag & SLI_IOCB_RET_IOCB)) {
9151 __lpfc_sli_ringtx_put(phba, pring, piocb);
9152 return IOCB_SUCCESS;
9153 }
9154
9155 return IOCB_BUSY;
9156 }
9157
9158
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168
9169
9170
9171
9172
9173
9174
9175 static uint16_t
9176 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9177 struct lpfc_sglq *sglq)
9178 {
9179 uint16_t xritag = NO_XRI;
9180 struct ulp_bde64 *bpl = NULL;
9181 struct ulp_bde64 bde;
9182 struct sli4_sge *sgl = NULL;
9183 struct lpfc_dmabuf *dmabuf;
9184 IOCB_t *icmd;
9185 int numBdes = 0;
9186 int i = 0;
9187 uint32_t offset = 0;
9188 int inbound = 0;
9189
9190 if (!piocbq || !sglq)
9191 return xritag;
9192
9193 sgl = (struct sli4_sge *)sglq->sgl;
9194 icmd = &piocbq->iocb;
9195 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9196 return sglq->sli4_xritag;
9197 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9198 numBdes = icmd->un.genreq64.bdl.bdeSize /
9199 sizeof(struct ulp_bde64);
9200
9201
9202
9203
9204 if (piocbq->context3)
9205 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9206 else
9207 return xritag;
9208
9209 bpl = (struct ulp_bde64 *)dmabuf->virt;
9210 if (!bpl)
9211 return xritag;
9212
9213 for (i = 0; i < numBdes; i++) {
9214
9215 sgl->addr_hi = bpl->addrHigh;
9216 sgl->addr_lo = bpl->addrLow;
9217
9218 sgl->word2 = le32_to_cpu(sgl->word2);
9219 if ((i+1) == numBdes)
9220 bf_set(lpfc_sli4_sge_last, sgl, 1);
9221 else
9222 bf_set(lpfc_sli4_sge_last, sgl, 0);
9223
9224
9225
9226 bde.tus.w = le32_to_cpu(bpl->tus.w);
9227 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9228
9229
9230
9231
9232 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9233
9234 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9235 inbound++;
9236
9237 if (inbound == 1)
9238 offset = 0;
9239 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9240 bf_set(lpfc_sli4_sge_type, sgl,
9241 LPFC_SGE_TYPE_DATA);
9242 offset += bde.tus.f.bdeSize;
9243 }
9244 sgl->word2 = cpu_to_le32(sgl->word2);
9245 bpl++;
9246 sgl++;
9247 }
9248 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9249
9250
9251
9252
9253 sgl->addr_hi =
9254 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9255 sgl->addr_lo =
9256 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9257 sgl->word2 = le32_to_cpu(sgl->word2);
9258 bf_set(lpfc_sli4_sge_last, sgl, 1);
9259 sgl->word2 = cpu_to_le32(sgl->word2);
9260 sgl->sge_len =
9261 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9262 }
9263 return sglq->sli4_xritag;
9264 }
9265
9266
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279
9280 static int
9281 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9282 union lpfc_wqe128 *wqe)
9283 {
9284 uint32_t xmit_len = 0, total_len = 0;
9285 uint8_t ct = 0;
9286 uint32_t fip;
9287 uint32_t abort_tag;
9288 uint8_t command_type = ELS_COMMAND_NON_FIP;
9289 uint8_t cmnd;
9290 uint16_t xritag;
9291 uint16_t abrt_iotag;
9292 struct lpfc_iocbq *abrtiocbq;
9293 struct ulp_bde64 *bpl = NULL;
9294 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9295 int numBdes, i;
9296 struct ulp_bde64 bde;
9297 struct lpfc_nodelist *ndlp;
9298 uint32_t *pcmd;
9299 uint32_t if_type;
9300
9301 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9302
9303 if (iocbq->iocb_flag & LPFC_IO_FCP)
9304 command_type = FCP_COMMAND;
9305 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9306 command_type = ELS_COMMAND_FIP;
9307 else
9308 command_type = ELS_COMMAND_NON_FIP;
9309
9310 if (phba->fcp_embed_io)
9311 memset(wqe, 0, sizeof(union lpfc_wqe128));
9312
9313 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9314
9315 wqe->generic.wqe_com.word7 = 0;
9316 wqe->generic.wqe_com.word10 = 0;
9317
9318 abort_tag = (uint32_t) iocbq->iotag;
9319 xritag = iocbq->sli4_xritag;
9320
9321 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9322 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9323 sizeof(struct ulp_bde64);
9324 bpl = (struct ulp_bde64 *)
9325 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9326 if (!bpl)
9327 return IOCB_ERROR;
9328
9329
9330 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9331 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9332
9333
9334
9335 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9336 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9337 total_len = 0;
9338 for (i = 0; i < numBdes; i++) {
9339 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9340 total_len += bde.tus.f.bdeSize;
9341 }
9342 } else
9343 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9344
9345 iocbq->iocb.ulpIoTag = iocbq->iotag;
9346 cmnd = iocbq->iocb.ulpCommand;
9347
9348 switch (iocbq->iocb.ulpCommand) {
9349 case CMD_ELS_REQUEST64_CR:
9350 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9351 ndlp = iocbq->context_un.ndlp;
9352 else
9353 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9354 if (!iocbq->iocb.ulpLe) {
9355 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9356 "2007 Only Limited Edition cmd Format"
9357 " supported 0x%x\n",
9358 iocbq->iocb.ulpCommand);
9359 return IOCB_ERROR;
9360 }
9361
9362 wqe->els_req.payload_len = xmit_len;
9363
9364 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9365 iocbq->iocb.ulpTimeout);
9366
9367 bf_set(els_req64_vf, &wqe->els_req, 0);
9368
9369 bf_set(els_req64_vfid, &wqe->els_req, 0);
9370 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9371 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9372 iocbq->iocb.ulpContext);
9373 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9374 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9375
9376 if (command_type == ELS_COMMAND_FIP)
9377 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9378 >> LPFC_FIP_ELS_ID_SHIFT);
9379 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9380 iocbq->context2)->virt);
9381 if_type = bf_get(lpfc_sli_intf_if_type,
9382 &phba->sli4_hba.sli_intf);
9383 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9384 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9385 *pcmd == ELS_CMD_SCR ||
9386 *pcmd == ELS_CMD_RSCN_XMT ||
9387 *pcmd == ELS_CMD_FDISC ||
9388 *pcmd == ELS_CMD_LOGO ||
9389 *pcmd == ELS_CMD_PLOGI)) {
9390 bf_set(els_req64_sp, &wqe->els_req, 1);
9391 bf_set(els_req64_sid, &wqe->els_req,
9392 iocbq->vport->fc_myDID);
9393 if ((*pcmd == ELS_CMD_FLOGI) &&
9394 !(phba->fc_topology ==
9395 LPFC_TOPOLOGY_LOOP))
9396 bf_set(els_req64_sid, &wqe->els_req, 0);
9397 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9398 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9399 phba->vpi_ids[iocbq->vport->vpi]);
9400 } else if (pcmd && iocbq->context1) {
9401 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9402 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9403 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9404 }
9405 }
9406 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9407 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9408 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9409 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9410 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9411 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9412 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9413 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9414 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9415 break;
9416 case CMD_XMIT_SEQUENCE64_CX:
9417 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9418 iocbq->iocb.un.ulpWord[3]);
9419 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9420 iocbq->iocb.unsli3.rcvsli3.ox_id);
9421
9422 xmit_len = total_len;
9423 cmnd = CMD_XMIT_SEQUENCE64_CR;
9424 if (phba->link_flag & LS_LOOPBACK_MODE)
9425 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9426
9427 case CMD_XMIT_SEQUENCE64_CR:
9428
9429 wqe->xmit_sequence.rsvd3 = 0;
9430
9431
9432 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9433 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9434 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9435 LPFC_WQE_IOD_WRITE);
9436 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9437 LPFC_WQE_LENLOC_WORD12);
9438 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9439 wqe->xmit_sequence.xmit_len = xmit_len;
9440 command_type = OTHER_COMMAND;
9441 break;
9442 case CMD_XMIT_BCAST64_CN:
9443
9444 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9445
9446
9447
9448 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9449 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9450 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9451 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9452 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9453 LPFC_WQE_LENLOC_WORD3);
9454 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9455 break;
9456 case CMD_FCP_IWRITE64_CR:
9457 command_type = FCP_COMMAND_DATA_OUT;
9458
9459
9460 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9461 xmit_len + sizeof(struct fcp_rsp));
9462 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9463 0);
9464
9465
9466 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9467 iocbq->iocb.ulpFCP2Rcvy);
9468 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9469
9470 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9471 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9472 LPFC_WQE_LENLOC_WORD4);
9473 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9474 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9475 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9476 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9477 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9478 if (iocbq->priority) {
9479 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9480 (iocbq->priority << 1));
9481 } else {
9482 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9483 (phba->cfg_XLanePriority << 1));
9484 }
9485 }
9486
9487
9488
9489 if (phba->cfg_enable_pbde)
9490 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9491 else
9492 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9493
9494 if (phba->fcp_embed_io) {
9495 struct lpfc_io_buf *lpfc_cmd;
9496 struct sli4_sge *sgl;
9497 struct fcp_cmnd *fcp_cmnd;
9498 uint32_t *ptr;
9499
9500
9501
9502 lpfc_cmd = iocbq->context1;
9503 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9504 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9505
9506
9507 wqe->generic.bde.tus.f.bdeFlags =
9508 BUFF_TYPE_BDE_IMMED;
9509 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9510 wqe->generic.bde.addrHigh = 0;
9511 wqe->generic.bde.addrLow = 88;
9512
9513 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9514 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9515
9516
9517 ptr = &wqe->words[22];
9518 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9519 }
9520 break;
9521 case CMD_FCP_IREAD64_CR:
9522
9523
9524 bf_set(payload_offset_len, &wqe->fcp_iread,
9525 xmit_len + sizeof(struct fcp_rsp));
9526 bf_set(cmd_buff_len, &wqe->fcp_iread,
9527 0);
9528
9529
9530 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9531 iocbq->iocb.ulpFCP2Rcvy);
9532 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9533
9534 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9535 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9536 LPFC_WQE_LENLOC_WORD4);
9537 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9538 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9539 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9540 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9541 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9542 if (iocbq->priority) {
9543 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9544 (iocbq->priority << 1));
9545 } else {
9546 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9547 (phba->cfg_XLanePriority << 1));
9548 }
9549 }
9550
9551
9552
9553 if (phba->cfg_enable_pbde)
9554 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9555 else
9556 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9557
9558 if (phba->fcp_embed_io) {
9559 struct lpfc_io_buf *lpfc_cmd;
9560 struct sli4_sge *sgl;
9561 struct fcp_cmnd *fcp_cmnd;
9562 uint32_t *ptr;
9563
9564
9565
9566 lpfc_cmd = iocbq->context1;
9567 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9568 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9569
9570
9571 wqe->generic.bde.tus.f.bdeFlags =
9572 BUFF_TYPE_BDE_IMMED;
9573 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9574 wqe->generic.bde.addrHigh = 0;
9575 wqe->generic.bde.addrLow = 88;
9576
9577 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9578 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9579
9580
9581 ptr = &wqe->words[22];
9582 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9583 }
9584 break;
9585 case CMD_FCP_ICMND64_CR:
9586
9587
9588 bf_set(payload_offset_len, &wqe->fcp_icmd,
9589 xmit_len + sizeof(struct fcp_rsp));
9590 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9591 0);
9592
9593 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9594
9595 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9596 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9597 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9598 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9599 LPFC_WQE_LENLOC_NONE);
9600 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9601 iocbq->iocb.ulpFCP2Rcvy);
9602 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9603 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9604 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9605 if (iocbq->priority) {
9606 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9607 (iocbq->priority << 1));
9608 } else {
9609 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9610 (phba->cfg_XLanePriority << 1));
9611 }
9612 }
9613
9614
9615 if (phba->fcp_embed_io) {
9616 struct lpfc_io_buf *lpfc_cmd;
9617 struct sli4_sge *sgl;
9618 struct fcp_cmnd *fcp_cmnd;
9619 uint32_t *ptr;
9620
9621
9622
9623 lpfc_cmd = iocbq->context1;
9624 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9625 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9626
9627
9628 wqe->generic.bde.tus.f.bdeFlags =
9629 BUFF_TYPE_BDE_IMMED;
9630 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9631 wqe->generic.bde.addrHigh = 0;
9632 wqe->generic.bde.addrLow = 88;
9633
9634 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9635 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9636
9637
9638 ptr = &wqe->words[22];
9639 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9640 }
9641 break;
9642 case CMD_GEN_REQUEST64_CR:
9643
9644
9645
9646 xmit_len = 0;
9647 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9648 sizeof(struct ulp_bde64);
9649 for (i = 0; i < numBdes; i++) {
9650 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9651 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9652 break;
9653 xmit_len += bde.tus.f.bdeSize;
9654 }
9655
9656 wqe->gen_req.request_payload_len = xmit_len;
9657
9658
9659
9660 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9661 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9662 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9663 "2015 Invalid CT %x command 0x%x\n",
9664 ct, iocbq->iocb.ulpCommand);
9665 return IOCB_ERROR;
9666 }
9667 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9668 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9669 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9670 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9671 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9672 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9673 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9674 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9675 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9676 command_type = OTHER_COMMAND;
9677 break;
9678 case CMD_XMIT_ELS_RSP64_CX:
9679 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9680
9681
9682 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9683
9684 wqe->xmit_els_rsp.word4 = 0;
9685
9686 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9687 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9688
9689 if_type = bf_get(lpfc_sli_intf_if_type,
9690 &phba->sli4_hba.sli_intf);
9691 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9692 if (iocbq->vport->fc_flag & FC_PT2PT) {
9693 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9694 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9695 iocbq->vport->fc_myDID);
9696 if (iocbq->vport->fc_myDID == Fabric_DID) {
9697 bf_set(wqe_els_did,
9698 &wqe->xmit_els_rsp.wqe_dest, 0);
9699 }
9700 }
9701 }
9702 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9703 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9704 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9705 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9706 iocbq->iocb.unsli3.rcvsli3.ox_id);
9707 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9708 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9709 phba->vpi_ids[iocbq->vport->vpi]);
9710 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9711 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9712 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9713 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9714 LPFC_WQE_LENLOC_WORD3);
9715 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9716 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9717 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9718 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9719 iocbq->context2)->virt);
9720 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9721 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9722 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9723 iocbq->vport->fc_myDID);
9724 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9725 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9726 phba->vpi_ids[phba->pport->vpi]);
9727 }
9728 command_type = OTHER_COMMAND;
9729 break;
9730 case CMD_CLOSE_XRI_CN:
9731 case CMD_ABORT_XRI_CN:
9732 case CMD_ABORT_XRI_CX:
9733
9734
9735 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9736 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9737 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9738 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9739 } else
9740 fip = 0;
9741
9742 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9743
9744
9745
9746
9747
9748 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9749 else
9750 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9751 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9752
9753 wqe->abort_cmd.rsrvd5 = 0;
9754 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9755 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9756 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9757
9758
9759
9760
9761 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9762 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9763 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9764 LPFC_WQE_LENLOC_NONE);
9765 cmnd = CMD_ABORT_XRI_CX;
9766 command_type = OTHER_COMMAND;
9767 xritag = 0;
9768 break;
9769 case CMD_XMIT_BLS_RSP64_CX:
9770 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9771
9772
9773
9774
9775 memset(wqe, 0, sizeof(*wqe));
9776
9777 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9778 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9779 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9780 LPFC_ABTS_UNSOL_INT) {
9781
9782
9783
9784
9785 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9786 iocbq->sli4_xritag);
9787 } else {
9788
9789
9790
9791
9792 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9793 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9794 }
9795 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9796 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9797
9798
9799 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9800 ndlp->nlp_DID);
9801 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9802 iocbq->iocb.ulpContext);
9803 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9804 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9805 phba->vpi_ids[phba->pport->vpi]);
9806 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9807 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9808 LPFC_WQE_LENLOC_NONE);
9809
9810 command_type = OTHER_COMMAND;
9811 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9812 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9813 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9814 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9815 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9816 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9817 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9818 }
9819
9820 break;
9821 case CMD_SEND_FRAME:
9822 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9823 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E);
9824 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41);
9825 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9826 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9827 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9828 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9829 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9830 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9831 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9832 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9833 return 0;
9834 case CMD_XRI_ABORTED_CX:
9835 case CMD_CREATE_XRI_CR:
9836 case CMD_IOCB_FCP_IBIDIR64_CR:
9837 case CMD_FCP_TSEND64_CX:
9838 case CMD_FCP_TRSP64_CX:
9839 case CMD_FCP_AUTO_TRSP_CX:
9840 default:
9841 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9842 "2014 Invalid command 0x%x\n",
9843 iocbq->iocb.ulpCommand);
9844 return IOCB_ERROR;
9845 break;
9846 }
9847
9848 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9849 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9850 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9851 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9852 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9853 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9854 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9855 LPFC_IO_DIF_INSERT);
9856 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9857 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9858 wqe->generic.wqe_com.abort_tag = abort_tag;
9859 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9860 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9861 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9862 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9863 return 0;
9864 }
9865
9866
9867
9868
9869
9870
9871
9872
9873
9874
9875
9876
9877
9878
9879
9880 static int
9881 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9882 struct lpfc_iocbq *piocb, uint32_t flag)
9883 {
9884 struct lpfc_sglq *sglq;
9885 union lpfc_wqe128 wqe;
9886 struct lpfc_queue *wq;
9887 struct lpfc_sli_ring *pring;
9888
9889
9890 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9891 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9892 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
9893 } else {
9894 wq = phba->sli4_hba.els_wq;
9895 }
9896
9897
9898 pring = wq->pring;
9899
9900
9901
9902
9903
9904 lockdep_assert_held(&pring->ring_lock);
9905
9906 if (piocb->sli4_xritag == NO_XRI) {
9907 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9908 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9909 sglq = NULL;
9910 else {
9911 if (!list_empty(&pring->txq)) {
9912 if (!(flag & SLI_IOCB_RET_IOCB)) {
9913 __lpfc_sli_ringtx_put(phba,
9914 pring, piocb);
9915 return IOCB_SUCCESS;
9916 } else {
9917 return IOCB_BUSY;
9918 }
9919 } else {
9920 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9921 if (!sglq) {
9922 if (!(flag & SLI_IOCB_RET_IOCB)) {
9923 __lpfc_sli_ringtx_put(phba,
9924 pring,
9925 piocb);
9926 return IOCB_SUCCESS;
9927 } else
9928 return IOCB_BUSY;
9929 }
9930 }
9931 }
9932 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9933
9934 sglq = NULL;
9935 else {
9936
9937
9938
9939
9940 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9941 if (!sglq)
9942 return IOCB_ERROR;
9943 }
9944
9945 if (sglq) {
9946 piocb->sli4_lxritag = sglq->sli4_lxritag;
9947 piocb->sli4_xritag = sglq->sli4_xritag;
9948 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9949 return IOCB_ERROR;
9950 }
9951
9952 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9953 return IOCB_ERROR;
9954
9955 if (lpfc_sli4_wq_put(wq, &wqe))
9956 return IOCB_ERROR;
9957 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9958
9959 return 0;
9960 }
9961
9962
9963
9964
9965
9966
9967
9968
9969
9970
9971
9972
9973 int
9974 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9975 struct lpfc_iocbq *piocb, uint32_t flag)
9976 {
9977 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9978 }
9979
9980
9981
9982
9983
9984
9985
9986
9987
9988
9989 int
9990 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9991 {
9992
9993 switch (dev_grp) {
9994 case LPFC_PCI_DEV_LP:
9995 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9996 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9997 break;
9998 case LPFC_PCI_DEV_OC:
9999 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10000 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10001 break;
10002 default:
10003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10004 "1419 Invalid HBA PCI-device group: 0x%x\n",
10005 dev_grp);
10006 return -ENODEV;
10007 break;
10008 }
10009 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10010 return 0;
10011 }
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023 struct lpfc_sli_ring *
10024 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10025 {
10026 struct lpfc_io_buf *lpfc_cmd;
10027
10028 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10029 if (unlikely(!phba->sli4_hba.hdwq))
10030 return NULL;
10031
10032
10033
10034
10035 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10036 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10037 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10038 }
10039 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10040 } else {
10041 if (unlikely(!phba->sli4_hba.els_wq))
10042 return NULL;
10043 piocb->hba_wqidx = 0;
10044 return phba->sli4_hba.els_wq->pring;
10045 }
10046 }
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061 int
10062 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10063 struct lpfc_iocbq *piocb, uint32_t flag)
10064 {
10065 struct lpfc_sli_ring *pring;
10066 struct lpfc_queue *eq;
10067 unsigned long iflags;
10068 int rc;
10069
10070 if (phba->sli_rev == LPFC_SLI_REV4) {
10071 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10072
10073 pring = lpfc_sli4_calc_ring(phba, piocb);
10074 if (unlikely(pring == NULL))
10075 return IOCB_ERROR;
10076
10077 spin_lock_irqsave(&pring->ring_lock, iflags);
10078 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10079 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10080
10081 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10082 } else {
10083
10084 spin_lock_irqsave(&phba->hbalock, iflags);
10085 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10086 spin_unlock_irqrestore(&phba->hbalock, iflags);
10087 }
10088 return rc;
10089 }
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102 static int
10103 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10104 {
10105 struct lpfc_sli *psli;
10106 struct lpfc_sli_ring *pring;
10107
10108 psli = &phba->sli;
10109
10110
10111
10112
10113 pring = &psli->sli3_ring[LPFC_FCP_RING];
10114 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10115 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10116 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10117 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10118
10119
10120 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10121
10122 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10123 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10124 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10125 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10126
10127
10128 pring->iotag_max = 4096;
10129 pring->num_mask = 1;
10130 pring->prt[0].profile = 0;
10131 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10132 pring->prt[0].type = phba->cfg_multi_ring_type;
10133 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10134 return 0;
10135 }
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149 static void
10150 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10151 struct lpfc_iocbq *iocbq)
10152 {
10153 struct lpfc_nodelist *ndlp = NULL;
10154 uint16_t rpi = 0, vpi = 0;
10155 struct lpfc_vport *vport = NULL;
10156
10157
10158 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10159 rpi = iocbq->iocb.ulpContext;
10160
10161 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10162 "3092 Port generated ABTS async event "
10163 "on vpi %d rpi %d status 0x%x\n",
10164 vpi, rpi, iocbq->iocb.ulpStatus);
10165
10166 vport = lpfc_find_vport_by_vpid(phba, vpi);
10167 if (!vport)
10168 goto err_exit;
10169 ndlp = lpfc_findnode_rpi(vport, rpi);
10170 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10171 goto err_exit;
10172
10173 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10174 lpfc_sli_abts_recover_port(vport, ndlp);
10175 return;
10176
10177 err_exit:
10178 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10179 "3095 Event Context not found, no "
10180 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10181 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10182 vpi, rpi);
10183 }
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195 void
10196 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10197 struct lpfc_nodelist *ndlp,
10198 struct sli4_wcqe_xri_aborted *axri)
10199 {
10200 struct lpfc_vport *vport;
10201 uint32_t ext_status = 0;
10202
10203 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10204 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10205 "3115 Node Context not found, driver "
10206 "ignoring abts err event\n");
10207 return;
10208 }
10209
10210 vport = ndlp->vport;
10211 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10212 "3116 Port generated FCP XRI ABORT event on "
10213 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10214 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10215 bf_get(lpfc_wcqe_xa_xri, axri),
10216 bf_get(lpfc_wcqe_xa_status, axri),
10217 axri->parameter);
10218
10219
10220
10221
10222
10223
10224 ext_status = axri->parameter & IOERR_PARAM_MASK;
10225 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10226 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10227 lpfc_sli_abts_recover_port(vport, ndlp);
10228 }
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243 static void
10244 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10245 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10246 {
10247 IOCB_t *icmd;
10248 uint16_t evt_code;
10249 struct temp_event temp_event_data;
10250 struct Scsi_Host *shost;
10251 uint32_t *iocb_w;
10252
10253 icmd = &iocbq->iocb;
10254 evt_code = icmd->un.asyncstat.evt_code;
10255
10256 switch (evt_code) {
10257 case ASYNC_TEMP_WARN:
10258 case ASYNC_TEMP_SAFE:
10259 temp_event_data.data = (uint32_t) icmd->ulpContext;
10260 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10261 if (evt_code == ASYNC_TEMP_WARN) {
10262 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10263 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10264 "0347 Adapter is very hot, please take "
10265 "corrective action. temperature : %d Celsius\n",
10266 (uint32_t) icmd->ulpContext);
10267 } else {
10268 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10269 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10270 "0340 Adapter temperature is OK now. "
10271 "temperature : %d Celsius\n",
10272 (uint32_t) icmd->ulpContext);
10273 }
10274
10275
10276 shost = lpfc_shost_from_vport(phba->pport);
10277 fc_host_post_vendor_event(shost, fc_get_event_number(),
10278 sizeof(temp_event_data), (char *) &temp_event_data,
10279 LPFC_NL_VENDOR_ID);
10280 break;
10281 case ASYNC_STATUS_CN:
10282 lpfc_sli_abts_err_handler(phba, iocbq);
10283 break;
10284 default:
10285 iocb_w = (uint32_t *) icmd;
10286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10287 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10288 " evt_code 0x%x\n"
10289 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10290 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10291 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10292 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10293 pring->ringno, icmd->un.asyncstat.evt_code,
10294 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10295 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10296 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10297 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10298
10299 break;
10300 }
10301 }
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315 int
10316 lpfc_sli4_setup(struct lpfc_hba *phba)
10317 {
10318 struct lpfc_sli_ring *pring;
10319
10320 pring = phba->sli4_hba.els_wq->pring;
10321 pring->num_mask = LPFC_MAX_RING_MASK;
10322 pring->prt[0].profile = 0;
10323 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10324 pring->prt[0].type = FC_TYPE_ELS;
10325 pring->prt[0].lpfc_sli_rcv_unsol_event =
10326 lpfc_els_unsol_event;
10327 pring->prt[1].profile = 0;
10328 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10329 pring->prt[1].type = FC_TYPE_ELS;
10330 pring->prt[1].lpfc_sli_rcv_unsol_event =
10331 lpfc_els_unsol_event;
10332 pring->prt[2].profile = 0;
10333
10334 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10335
10336 pring->prt[2].type = FC_TYPE_CT;
10337 pring->prt[2].lpfc_sli_rcv_unsol_event =
10338 lpfc_ct_unsol_event;
10339 pring->prt[3].profile = 0;
10340
10341 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10342
10343 pring->prt[3].type = FC_TYPE_CT;
10344 pring->prt[3].lpfc_sli_rcv_unsol_event =
10345 lpfc_ct_unsol_event;
10346 return 0;
10347 }
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360 int
10361 lpfc_sli_setup(struct lpfc_hba *phba)
10362 {
10363 int i, totiocbsize = 0;
10364 struct lpfc_sli *psli = &phba->sli;
10365 struct lpfc_sli_ring *pring;
10366
10367 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10368 psli->sli_flag = 0;
10369
10370 psli->iocbq_lookup = NULL;
10371 psli->iocbq_lookup_len = 0;
10372 psli->last_iotag = 0;
10373
10374 for (i = 0; i < psli->num_rings; i++) {
10375 pring = &psli->sli3_ring[i];
10376 switch (i) {
10377 case LPFC_FCP_RING:
10378
10379 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10380 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10381 pring->sli.sli3.numCiocb +=
10382 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10383 pring->sli.sli3.numRiocb +=
10384 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10385 pring->sli.sli3.numCiocb +=
10386 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10387 pring->sli.sli3.numRiocb +=
10388 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10389 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10390 SLI3_IOCB_CMD_SIZE :
10391 SLI2_IOCB_CMD_SIZE;
10392 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10393 SLI3_IOCB_RSP_SIZE :
10394 SLI2_IOCB_RSP_SIZE;
10395 pring->iotag_ctr = 0;
10396 pring->iotag_max =
10397 (phba->cfg_hba_queue_depth * 2);
10398 pring->fast_iotag = pring->iotag_max;
10399 pring->num_mask = 0;
10400 break;
10401 case LPFC_EXTRA_RING:
10402
10403 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10404 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10405 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10406 SLI3_IOCB_CMD_SIZE :
10407 SLI2_IOCB_CMD_SIZE;
10408 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10409 SLI3_IOCB_RSP_SIZE :
10410 SLI2_IOCB_RSP_SIZE;
10411 pring->iotag_max = phba->cfg_hba_queue_depth;
10412 pring->num_mask = 0;
10413 break;
10414 case LPFC_ELS_RING:
10415
10416 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10417 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10418 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10419 SLI3_IOCB_CMD_SIZE :
10420 SLI2_IOCB_CMD_SIZE;
10421 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10422 SLI3_IOCB_RSP_SIZE :
10423 SLI2_IOCB_RSP_SIZE;
10424 pring->fast_iotag = 0;
10425 pring->iotag_ctr = 0;
10426 pring->iotag_max = 4096;
10427 pring->lpfc_sli_rcv_async_status =
10428 lpfc_sli_async_event_handler;
10429 pring->num_mask = LPFC_MAX_RING_MASK;
10430 pring->prt[0].profile = 0;
10431 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10432 pring->prt[0].type = FC_TYPE_ELS;
10433 pring->prt[0].lpfc_sli_rcv_unsol_event =
10434 lpfc_els_unsol_event;
10435 pring->prt[1].profile = 0;
10436 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10437 pring->prt[1].type = FC_TYPE_ELS;
10438 pring->prt[1].lpfc_sli_rcv_unsol_event =
10439 lpfc_els_unsol_event;
10440 pring->prt[2].profile = 0;
10441
10442 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10443
10444 pring->prt[2].type = FC_TYPE_CT;
10445 pring->prt[2].lpfc_sli_rcv_unsol_event =
10446 lpfc_ct_unsol_event;
10447 pring->prt[3].profile = 0;
10448
10449 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10450
10451 pring->prt[3].type = FC_TYPE_CT;
10452 pring->prt[3].lpfc_sli_rcv_unsol_event =
10453 lpfc_ct_unsol_event;
10454 break;
10455 }
10456 totiocbsize += (pring->sli.sli3.numCiocb *
10457 pring->sli.sli3.sizeCiocb) +
10458 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10459 }
10460 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10461
10462 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10463 "SLI2 SLIM Data: x%x x%lx\n",
10464 phba->brd_no, totiocbsize,
10465 (unsigned long) MAX_SLIM_IOCB_SIZE);
10466 }
10467 if (phba->cfg_multi_ring_support == 2)
10468 lpfc_extra_ring_setup(phba);
10469
10470 return 0;
10471 }
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484 void
10485 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10486 {
10487 struct lpfc_sli *psli;
10488 struct lpfc_sli_ring *pring;
10489 int i;
10490
10491 psli = &phba->sli;
10492 spin_lock_irq(&phba->hbalock);
10493 INIT_LIST_HEAD(&psli->mboxq);
10494 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10495
10496 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10497 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10498 pring->flag = 0;
10499 pring->ringno = LPFC_FCP_RING;
10500 pring->txcmplq_cnt = 0;
10501 INIT_LIST_HEAD(&pring->txq);
10502 INIT_LIST_HEAD(&pring->txcmplq);
10503 INIT_LIST_HEAD(&pring->iocb_continueq);
10504 spin_lock_init(&pring->ring_lock);
10505 }
10506 pring = phba->sli4_hba.els_wq->pring;
10507 pring->flag = 0;
10508 pring->ringno = LPFC_ELS_RING;
10509 pring->txcmplq_cnt = 0;
10510 INIT_LIST_HEAD(&pring->txq);
10511 INIT_LIST_HEAD(&pring->txcmplq);
10512 INIT_LIST_HEAD(&pring->iocb_continueq);
10513 spin_lock_init(&pring->ring_lock);
10514
10515 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10516 pring = phba->sli4_hba.nvmels_wq->pring;
10517 pring->flag = 0;
10518 pring->ringno = LPFC_ELS_RING;
10519 pring->txcmplq_cnt = 0;
10520 INIT_LIST_HEAD(&pring->txq);
10521 INIT_LIST_HEAD(&pring->txcmplq);
10522 INIT_LIST_HEAD(&pring->iocb_continueq);
10523 spin_lock_init(&pring->ring_lock);
10524 }
10525
10526 spin_unlock_irq(&phba->hbalock);
10527 }
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540 void
10541 lpfc_sli_queue_init(struct lpfc_hba *phba)
10542 {
10543 struct lpfc_sli *psli;
10544 struct lpfc_sli_ring *pring;
10545 int i;
10546
10547 psli = &phba->sli;
10548 spin_lock_irq(&phba->hbalock);
10549 INIT_LIST_HEAD(&psli->mboxq);
10550 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10551
10552 for (i = 0; i < psli->num_rings; i++) {
10553 pring = &psli->sli3_ring[i];
10554 pring->ringno = i;
10555 pring->sli.sli3.next_cmdidx = 0;
10556 pring->sli.sli3.local_getidx = 0;
10557 pring->sli.sli3.cmdidx = 0;
10558 INIT_LIST_HEAD(&pring->iocb_continueq);
10559 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10560 INIT_LIST_HEAD(&pring->postbufq);
10561 pring->flag = 0;
10562 INIT_LIST_HEAD(&pring->txq);
10563 INIT_LIST_HEAD(&pring->txcmplq);
10564 spin_lock_init(&pring->ring_lock);
10565 }
10566 spin_unlock_irq(&phba->hbalock);
10567 }
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584 static void
10585 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10586 {
10587 LIST_HEAD(completions);
10588 struct lpfc_sli *psli = &phba->sli;
10589 LPFC_MBOXQ_t *pmb;
10590 unsigned long iflag;
10591
10592
10593 local_bh_disable();
10594
10595
10596 spin_lock_irqsave(&phba->hbalock, iflag);
10597
10598
10599 list_splice_init(&phba->sli.mboxq, &completions);
10600
10601 if (psli->mbox_active) {
10602 list_add_tail(&psli->mbox_active->list, &completions);
10603 psli->mbox_active = NULL;
10604 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10605 }
10606
10607 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10608 spin_unlock_irqrestore(&phba->hbalock, iflag);
10609
10610
10611 local_bh_enable();
10612
10613
10614 while (!list_empty(&completions)) {
10615 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10616 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10617 if (pmb->mbox_cmpl)
10618 pmb->mbox_cmpl(phba, pmb);
10619 }
10620 }
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639 int
10640 lpfc_sli_host_down(struct lpfc_vport *vport)
10641 {
10642 LIST_HEAD(completions);
10643 struct lpfc_hba *phba = vport->phba;
10644 struct lpfc_sli *psli = &phba->sli;
10645 struct lpfc_queue *qp = NULL;
10646 struct lpfc_sli_ring *pring;
10647 struct lpfc_iocbq *iocb, *next_iocb;
10648 int i;
10649 unsigned long flags = 0;
10650 uint16_t prev_pring_flag;
10651
10652 lpfc_cleanup_discovery_resources(vport);
10653
10654 spin_lock_irqsave(&phba->hbalock, flags);
10655
10656
10657
10658
10659
10660
10661 if (phba->sli_rev != LPFC_SLI_REV4) {
10662 for (i = 0; i < psli->num_rings; i++) {
10663 pring = &psli->sli3_ring[i];
10664 prev_pring_flag = pring->flag;
10665
10666 if (pring->ringno == LPFC_ELS_RING) {
10667 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10668
10669 set_bit(LPFC_DATA_READY, &phba->data_flags);
10670 }
10671 list_for_each_entry_safe(iocb, next_iocb,
10672 &pring->txq, list) {
10673 if (iocb->vport != vport)
10674 continue;
10675 list_move_tail(&iocb->list, &completions);
10676 }
10677 list_for_each_entry_safe(iocb, next_iocb,
10678 &pring->txcmplq, list) {
10679 if (iocb->vport != vport)
10680 continue;
10681 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10682 }
10683 pring->flag = prev_pring_flag;
10684 }
10685 } else {
10686 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10687 pring = qp->pring;
10688 if (!pring)
10689 continue;
10690 if (pring == phba->sli4_hba.els_wq->pring) {
10691 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10692
10693 set_bit(LPFC_DATA_READY, &phba->data_flags);
10694 }
10695 prev_pring_flag = pring->flag;
10696 spin_lock_irq(&pring->ring_lock);
10697 list_for_each_entry_safe(iocb, next_iocb,
10698 &pring->txq, list) {
10699 if (iocb->vport != vport)
10700 continue;
10701 list_move_tail(&iocb->list, &completions);
10702 }
10703 spin_unlock_irq(&pring->ring_lock);
10704 list_for_each_entry_safe(iocb, next_iocb,
10705 &pring->txcmplq, list) {
10706 if (iocb->vport != vport)
10707 continue;
10708 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10709 }
10710 pring->flag = prev_pring_flag;
10711 }
10712 }
10713 spin_unlock_irqrestore(&phba->hbalock, flags);
10714
10715
10716 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10717 IOERR_SLI_DOWN);
10718 return 1;
10719 }
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736 int
10737 lpfc_sli_hba_down(struct lpfc_hba *phba)
10738 {
10739 LIST_HEAD(completions);
10740 struct lpfc_sli *psli = &phba->sli;
10741 struct lpfc_queue *qp = NULL;
10742 struct lpfc_sli_ring *pring;
10743 struct lpfc_dmabuf *buf_ptr;
10744 unsigned long flags = 0;
10745 int i;
10746
10747
10748 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10749
10750 lpfc_hba_down_prep(phba);
10751
10752
10753 local_bh_disable();
10754
10755 lpfc_fabric_abort_hba(phba);
10756
10757 spin_lock_irqsave(&phba->hbalock, flags);
10758
10759
10760
10761
10762
10763 if (phba->sli_rev != LPFC_SLI_REV4) {
10764 for (i = 0; i < psli->num_rings; i++) {
10765 pring = &psli->sli3_ring[i];
10766
10767 if (pring->ringno == LPFC_ELS_RING) {
10768 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10769
10770 set_bit(LPFC_DATA_READY, &phba->data_flags);
10771 }
10772 list_splice_init(&pring->txq, &completions);
10773 }
10774 } else {
10775 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10776 pring = qp->pring;
10777 if (!pring)
10778 continue;
10779 spin_lock(&pring->ring_lock);
10780 list_splice_init(&pring->txq, &completions);
10781 spin_unlock(&pring->ring_lock);
10782 if (pring == phba->sli4_hba.els_wq->pring) {
10783 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10784
10785 set_bit(LPFC_DATA_READY, &phba->data_flags);
10786 }
10787 }
10788 }
10789 spin_unlock_irqrestore(&phba->hbalock, flags);
10790
10791
10792 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10793 IOERR_SLI_DOWN);
10794
10795 spin_lock_irqsave(&phba->hbalock, flags);
10796 list_splice_init(&phba->elsbuf, &completions);
10797 phba->elsbuf_cnt = 0;
10798 phba->elsbuf_prev_cnt = 0;
10799 spin_unlock_irqrestore(&phba->hbalock, flags);
10800
10801 while (!list_empty(&completions)) {
10802 list_remove_head(&completions, buf_ptr,
10803 struct lpfc_dmabuf, list);
10804 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10805 kfree(buf_ptr);
10806 }
10807
10808
10809 local_bh_enable();
10810
10811
10812 del_timer_sync(&psli->mbox_tmo);
10813
10814 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10815 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10816 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10817
10818 return 1;
10819 }
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833 void
10834 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10835 {
10836 uint32_t *src = srcp;
10837 uint32_t *dest = destp;
10838 uint32_t ldata;
10839 int i;
10840
10841 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10842 ldata = *src;
10843 ldata = le32_to_cpu(ldata);
10844 *dest = ldata;
10845 src++;
10846 dest++;
10847 }
10848 }
10849
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861 void
10862 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10863 {
10864 uint32_t *src = srcp;
10865 uint32_t *dest = destp;
10866 uint32_t ldata;
10867 int i;
10868
10869 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10870 ldata = *src;
10871 ldata = be32_to_cpu(ldata);
10872 *dest = ldata;
10873 src++;
10874 dest++;
10875 }
10876 }
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888 int
10889 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10890 struct lpfc_dmabuf *mp)
10891 {
10892
10893
10894 spin_lock_irq(&phba->hbalock);
10895 list_add_tail(&mp->list, &pring->postbufq);
10896 pring->postbufq_cnt++;
10897 spin_unlock_irq(&phba->hbalock);
10898 return 0;
10899 }
10900
10901
10902
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912 uint32_t
10913 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10914 {
10915 spin_lock_irq(&phba->hbalock);
10916 phba->buffer_tag_count++;
10917
10918
10919
10920
10921 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10922 spin_unlock_irq(&phba->hbalock);
10923 return phba->buffer_tag_count;
10924 }
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940
10941 struct lpfc_dmabuf *
10942 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10943 uint32_t tag)
10944 {
10945 struct lpfc_dmabuf *mp, *next_mp;
10946 struct list_head *slp = &pring->postbufq;
10947
10948
10949 spin_lock_irq(&phba->hbalock);
10950 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10951 if (mp->buffer_tag == tag) {
10952 list_del_init(&mp->list);
10953 pring->postbufq_cnt--;
10954 spin_unlock_irq(&phba->hbalock);
10955 return mp;
10956 }
10957 }
10958
10959 spin_unlock_irq(&phba->hbalock);
10960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10961 "0402 Cannot find virtual addr for buffer tag on "
10962 "ring %d Data x%lx x%px x%px x%x\n",
10963 pring->ringno, (unsigned long) tag,
10964 slp->next, slp->prev, pring->postbufq_cnt);
10965
10966 return NULL;
10967 }
10968
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984
10985 struct lpfc_dmabuf *
10986 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10987 dma_addr_t phys)
10988 {
10989 struct lpfc_dmabuf *mp, *next_mp;
10990 struct list_head *slp = &pring->postbufq;
10991
10992
10993 spin_lock_irq(&phba->hbalock);
10994 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10995 if (mp->phys == phys) {
10996 list_del_init(&mp->list);
10997 pring->postbufq_cnt--;
10998 spin_unlock_irq(&phba->hbalock);
10999 return mp;
11000 }
11001 }
11002
11003 spin_unlock_irq(&phba->hbalock);
11004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11005 "0410 Cannot find virtual addr for mapped buf on "
11006 "ring %d Data x%llx x%px x%px x%x\n",
11007 pring->ringno, (unsigned long long)phys,
11008 slp->next, slp->prev, pring->postbufq_cnt);
11009 return NULL;
11010 }
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021
11022
11023 static void
11024 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11025 struct lpfc_iocbq *rspiocb)
11026 {
11027 IOCB_t *irsp = &rspiocb->iocb;
11028 uint16_t abort_iotag, abort_context;
11029 struct lpfc_iocbq *abort_iocb = NULL;
11030
11031 if (irsp->ulpStatus) {
11032
11033
11034
11035
11036
11037 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11038 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11039
11040 spin_lock_irq(&phba->hbalock);
11041 if (phba->sli_rev < LPFC_SLI_REV4) {
11042 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11043 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11044 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11045 spin_unlock_irq(&phba->hbalock);
11046 goto release_iocb;
11047 }
11048 if (abort_iotag != 0 &&
11049 abort_iotag <= phba->sli.last_iotag)
11050 abort_iocb =
11051 phba->sli.iocbq_lookup[abort_iotag];
11052 } else
11053
11054
11055
11056
11057
11058 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11059
11060 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11061 "0327 Cannot abort els iocb x%px "
11062 "with tag %x context %x, abort status %x, "
11063 "abort code %x\n",
11064 abort_iocb, abort_iotag, abort_context,
11065 irsp->ulpStatus, irsp->un.ulpWord[4]);
11066
11067 spin_unlock_irq(&phba->hbalock);
11068 }
11069 release_iocb:
11070 lpfc_sli_release_iocbq(phba, cmdiocb);
11071 return;
11072 }
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082
11083
11084
11085 static void
11086 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11087 struct lpfc_iocbq *rspiocb)
11088 {
11089 IOCB_t *irsp = &rspiocb->iocb;
11090
11091
11092 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11093 "0139 Ignoring ELS cmd tag x%x completion Data: "
11094 "x%x x%x x%x\n",
11095 irsp->ulpIoTag, irsp->ulpStatus,
11096 irsp->un.ulpWord[4], irsp->ulpTimeout);
11097 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11098 lpfc_ct_free_iocb(phba, cmdiocb);
11099 else
11100 lpfc_els_free_iocb(phba, cmdiocb);
11101 return;
11102 }
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116 static int
11117 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11118 struct lpfc_iocbq *cmdiocb)
11119 {
11120 struct lpfc_vport *vport = cmdiocb->vport;
11121 struct lpfc_iocbq *abtsiocbp;
11122 IOCB_t *icmd = NULL;
11123 IOCB_t *iabt = NULL;
11124 int retval;
11125 unsigned long iflags;
11126 struct lpfc_nodelist *ndlp;
11127
11128 lockdep_assert_held(&phba->hbalock);
11129
11130
11131
11132
11133
11134
11135 icmd = &cmdiocb->iocb;
11136 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11137 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11138 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11139 return 0;
11140
11141
11142 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11143 if (abtsiocbp == NULL)
11144 return 0;
11145
11146
11147
11148
11149 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11150
11151 iabt = &abtsiocbp->iocb;
11152 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11153 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11154 if (phba->sli_rev == LPFC_SLI_REV4) {
11155 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11156 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11157 } else {
11158 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11159 if (pring->ringno == LPFC_ELS_RING) {
11160 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11161 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11162 }
11163 }
11164 iabt->ulpLe = 1;
11165 iabt->ulpClass = icmd->ulpClass;
11166
11167
11168 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11169 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11170 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11171 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11172 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11173
11174 if (phba->link_state >= LPFC_LINK_UP)
11175 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11176 else
11177 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11178
11179 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11180 abtsiocbp->vport = vport;
11181
11182 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11183 "0339 Abort xri x%x, original iotag x%x, "
11184 "abort cmd iotag x%x\n",
11185 iabt->un.acxri.abortIoTag,
11186 iabt->un.acxri.abortContextTag,
11187 abtsiocbp->iotag);
11188
11189 if (phba->sli_rev == LPFC_SLI_REV4) {
11190 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11191 if (unlikely(pring == NULL))
11192 return 0;
11193
11194 spin_lock_irqsave(&pring->ring_lock, iflags);
11195 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11196 abtsiocbp, 0);
11197 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11198 } else {
11199 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11200 abtsiocbp, 0);
11201 }
11202
11203 if (retval)
11204 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11205
11206
11207
11208
11209
11210
11211 return retval;
11212 }
11213
11214
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227 int
11228 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11229 struct lpfc_iocbq *cmdiocb)
11230 {
11231 struct lpfc_vport *vport = cmdiocb->vport;
11232 int retval = IOCB_ERROR;
11233 IOCB_t *icmd = NULL;
11234
11235 lockdep_assert_held(&phba->hbalock);
11236
11237
11238
11239
11240
11241
11242 icmd = &cmdiocb->iocb;
11243 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11244 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11245 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11246 return 0;
11247
11248 if (!pring) {
11249 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11250 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11251 else
11252 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11253 goto abort_iotag_exit;
11254 }
11255
11256
11257
11258
11259
11260 if ((vport->load_flag & FC_UNLOADING) &&
11261 (pring->ringno == LPFC_ELS_RING)) {
11262 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11263 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11264 else
11265 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11266 goto abort_iotag_exit;
11267 }
11268
11269
11270 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11271
11272 abort_iotag_exit:
11273
11274
11275
11276
11277
11278 return retval;
11279 }
11280
11281
11282
11283
11284
11285
11286
11287 void
11288 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11289 {
11290 struct lpfc_sli *psli = &phba->sli;
11291 struct lpfc_sli_ring *pring;
11292 struct lpfc_queue *qp = NULL;
11293 int i;
11294
11295 if (phba->sli_rev != LPFC_SLI_REV4) {
11296 for (i = 0; i < psli->num_rings; i++) {
11297 pring = &psli->sli3_ring[i];
11298 lpfc_sli_abort_iocb_ring(phba, pring);
11299 }
11300 return;
11301 }
11302 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11303 pring = qp->pring;
11304 if (!pring)
11305 continue;
11306 lpfc_sli_abort_iocb_ring(phba, pring);
11307 }
11308 }
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332 static int
11333 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11334 uint16_t tgt_id, uint64_t lun_id,
11335 lpfc_ctx_cmd ctx_cmd)
11336 {
11337 struct lpfc_io_buf *lpfc_cmd;
11338 int rc = 1;
11339
11340 if (iocbq->vport != vport)
11341 return rc;
11342
11343 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11344 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11345 return rc;
11346
11347 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11348
11349 if (lpfc_cmd->pCmd == NULL)
11350 return rc;
11351
11352 switch (ctx_cmd) {
11353 case LPFC_CTX_LUN:
11354 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11355 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11356 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11357 rc = 0;
11358 break;
11359 case LPFC_CTX_TGT:
11360 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11361 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11362 rc = 0;
11363 break;
11364 case LPFC_CTX_HOST:
11365 rc = 0;
11366 break;
11367 default:
11368 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11369 __func__, ctx_cmd);
11370 break;
11371 }
11372
11373 return rc;
11374 }
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395 int
11396 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11397 lpfc_ctx_cmd ctx_cmd)
11398 {
11399 struct lpfc_hba *phba = vport->phba;
11400 struct lpfc_iocbq *iocbq;
11401 int sum, i;
11402
11403 spin_lock_irq(&phba->hbalock);
11404 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11405 iocbq = phba->sli.iocbq_lookup[i];
11406
11407 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11408 ctx_cmd) == 0)
11409 sum++;
11410 }
11411 spin_unlock_irq(&phba->hbalock);
11412
11413 return sum;
11414 }
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426 void
11427 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11428 struct lpfc_iocbq *rspiocb)
11429 {
11430 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11431 "3096 ABORT_XRI_CN completing on rpi x%x "
11432 "original iotag x%x, abort cmd iotag x%x "
11433 "status 0x%x, reason 0x%x\n",
11434 cmdiocb->iocb.un.acxri.abortContextTag,
11435 cmdiocb->iocb.un.acxri.abortIoTag,
11436 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11437 rspiocb->iocb.un.ulpWord[4]);
11438 lpfc_sli_release_iocbq(phba, cmdiocb);
11439 return;
11440 }
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463 int
11464 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11465 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11466 {
11467 struct lpfc_hba *phba = vport->phba;
11468 struct lpfc_iocbq *iocbq;
11469 struct lpfc_iocbq *abtsiocb;
11470 struct lpfc_sli_ring *pring_s4;
11471 IOCB_t *cmd = NULL;
11472 int errcnt = 0, ret_val = 0;
11473 int i;
11474
11475
11476 if (phba->hba_flag & HBA_IOQ_FLUSH)
11477 return errcnt;
11478
11479 for (i = 1; i <= phba->sli.last_iotag; i++) {
11480 iocbq = phba->sli.iocbq_lookup[i];
11481
11482 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11483 abort_cmd) != 0)
11484 continue;
11485
11486
11487
11488
11489
11490 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11491 continue;
11492
11493
11494 abtsiocb = lpfc_sli_get_iocbq(phba);
11495 if (abtsiocb == NULL) {
11496 errcnt++;
11497 continue;
11498 }
11499
11500
11501 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11502
11503 cmd = &iocbq->iocb;
11504 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11505 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11506 if (phba->sli_rev == LPFC_SLI_REV4)
11507 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11508 else
11509 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11510 abtsiocb->iocb.ulpLe = 1;
11511 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11512 abtsiocb->vport = vport;
11513
11514
11515 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11516 if (iocbq->iocb_flag & LPFC_IO_FCP)
11517 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11518 if (iocbq->iocb_flag & LPFC_IO_FOF)
11519 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11520
11521 if (lpfc_is_link_up(phba))
11522 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11523 else
11524 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11525
11526
11527 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11528 if (phba->sli_rev == LPFC_SLI_REV4) {
11529 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11530 if (!pring_s4)
11531 continue;
11532 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11533 abtsiocb, 0);
11534 } else
11535 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11536 abtsiocb, 0);
11537 if (ret_val == IOCB_ERROR) {
11538 lpfc_sli_release_iocbq(phba, abtsiocb);
11539 errcnt++;
11540 continue;
11541 }
11542 }
11543
11544 return errcnt;
11545 }
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569 int
11570 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11571 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11572 {
11573 struct lpfc_hba *phba = vport->phba;
11574 struct lpfc_io_buf *lpfc_cmd;
11575 struct lpfc_iocbq *abtsiocbq;
11576 struct lpfc_nodelist *ndlp;
11577 struct lpfc_iocbq *iocbq;
11578 IOCB_t *icmd;
11579 int sum, i, ret_val;
11580 unsigned long iflags;
11581 struct lpfc_sli_ring *pring_s4 = NULL;
11582
11583 spin_lock_irqsave(&phba->hbalock, iflags);
11584
11585
11586 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11587 spin_unlock_irqrestore(&phba->hbalock, iflags);
11588 return 0;
11589 }
11590 sum = 0;
11591
11592 for (i = 1; i <= phba->sli.last_iotag; i++) {
11593 iocbq = phba->sli.iocbq_lookup[i];
11594
11595 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11596 cmd) != 0)
11597 continue;
11598
11599
11600 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11601 spin_lock(&lpfc_cmd->buf_lock);
11602
11603 if (!lpfc_cmd->pCmd) {
11604 spin_unlock(&lpfc_cmd->buf_lock);
11605 continue;
11606 }
11607
11608 if (phba->sli_rev == LPFC_SLI_REV4) {
11609 pring_s4 =
11610 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11611 if (!pring_s4) {
11612 spin_unlock(&lpfc_cmd->buf_lock);
11613 continue;
11614 }
11615
11616 spin_lock(&pring_s4->ring_lock);
11617 }
11618
11619
11620
11621
11622
11623 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11624 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11625 if (phba->sli_rev == LPFC_SLI_REV4)
11626 spin_unlock(&pring_s4->ring_lock);
11627 spin_unlock(&lpfc_cmd->buf_lock);
11628 continue;
11629 }
11630
11631
11632 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11633 if (!abtsiocbq) {
11634 if (phba->sli_rev == LPFC_SLI_REV4)
11635 spin_unlock(&pring_s4->ring_lock);
11636 spin_unlock(&lpfc_cmd->buf_lock);
11637 continue;
11638 }
11639
11640 icmd = &iocbq->iocb;
11641 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11642 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11643 if (phba->sli_rev == LPFC_SLI_REV4)
11644 abtsiocbq->iocb.un.acxri.abortIoTag =
11645 iocbq->sli4_xritag;
11646 else
11647 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11648 abtsiocbq->iocb.ulpLe = 1;
11649 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11650 abtsiocbq->vport = vport;
11651
11652
11653 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11654 if (iocbq->iocb_flag & LPFC_IO_FCP)
11655 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11656 if (iocbq->iocb_flag & LPFC_IO_FOF)
11657 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11658
11659 ndlp = lpfc_cmd->rdata->pnode;
11660
11661 if (lpfc_is_link_up(phba) &&
11662 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11663 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11664 else
11665 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11666
11667
11668 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11669
11670
11671
11672
11673
11674 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11675
11676 if (phba->sli_rev == LPFC_SLI_REV4) {
11677 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11678 abtsiocbq, 0);
11679 spin_unlock(&pring_s4->ring_lock);
11680 } else {
11681 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11682 abtsiocbq, 0);
11683 }
11684
11685 spin_unlock(&lpfc_cmd->buf_lock);
11686
11687 if (ret_val == IOCB_ERROR)
11688 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11689 else
11690 sum++;
11691 }
11692 spin_unlock_irqrestore(&phba->hbalock, iflags);
11693 return sum;
11694 }
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713 static void
11714 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11715 struct lpfc_iocbq *cmdiocbq,
11716 struct lpfc_iocbq *rspiocbq)
11717 {
11718 wait_queue_head_t *pdone_q;
11719 unsigned long iflags;
11720 struct lpfc_io_buf *lpfc_cmd;
11721
11722 spin_lock_irqsave(&phba->hbalock, iflags);
11723 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11724
11725
11726
11727
11728
11729
11730
11731 spin_unlock_irqrestore(&phba->hbalock, iflags);
11732 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11733 cmdiocbq->wait_iocb_cmpl = NULL;
11734 if (cmdiocbq->iocb_cmpl)
11735 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11736 else
11737 lpfc_sli_release_iocbq(phba, cmdiocbq);
11738 return;
11739 }
11740
11741 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11742 if (cmdiocbq->context2 && rspiocbq)
11743 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11744 &rspiocbq->iocb, sizeof(IOCB_t));
11745
11746
11747 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11748 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11749 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11750 cur_iocbq);
11751 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11752 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11753 else
11754 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11755 }
11756
11757 pdone_q = cmdiocbq->context_un.wait_queue;
11758 if (pdone_q)
11759 wake_up(pdone_q);
11760 spin_unlock_irqrestore(&phba->hbalock, iflags);
11761 return;
11762 }
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776 static int
11777 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11778 struct lpfc_iocbq *piocbq, uint32_t flag)
11779 {
11780 unsigned long iflags;
11781 int ret;
11782
11783 spin_lock_irqsave(&phba->hbalock, iflags);
11784 ret = piocbq->iocb_flag & flag;
11785 spin_unlock_irqrestore(&phba->hbalock, iflags);
11786 return ret;
11787
11788 }
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826 int
11827 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11828 uint32_t ring_number,
11829 struct lpfc_iocbq *piocb,
11830 struct lpfc_iocbq *prspiocbq,
11831 uint32_t timeout)
11832 {
11833 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11834 long timeleft, timeout_req = 0;
11835 int retval = IOCB_SUCCESS;
11836 uint32_t creg_val;
11837 struct lpfc_iocbq *iocb;
11838 int txq_cnt = 0;
11839 int txcmplq_cnt = 0;
11840 struct lpfc_sli_ring *pring;
11841 unsigned long iflags;
11842 bool iocb_completed = true;
11843
11844 if (phba->sli_rev >= LPFC_SLI_REV4)
11845 pring = lpfc_sli4_calc_ring(phba, piocb);
11846 else
11847 pring = &phba->sli.sli3_ring[ring_number];
11848
11849
11850
11851
11852 if (prspiocbq) {
11853 if (piocb->context2)
11854 return IOCB_ERROR;
11855 piocb->context2 = prspiocbq;
11856 }
11857
11858 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11859 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11860 piocb->context_un.wait_queue = &done_q;
11861 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11862
11863 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11864 if (lpfc_readl(phba->HCregaddr, &creg_val))
11865 return IOCB_ERROR;
11866 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11867 writel(creg_val, phba->HCregaddr);
11868 readl(phba->HCregaddr);
11869 }
11870
11871 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11872 SLI_IOCB_RET_IOCB);
11873 if (retval == IOCB_SUCCESS) {
11874 timeout_req = msecs_to_jiffies(timeout * 1000);
11875 timeleft = wait_event_timeout(done_q,
11876 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11877 timeout_req);
11878 spin_lock_irqsave(&phba->hbalock, iflags);
11879 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11880
11881
11882
11883
11884
11885
11886 iocb_completed = false;
11887 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11888 }
11889 spin_unlock_irqrestore(&phba->hbalock, iflags);
11890 if (iocb_completed) {
11891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11892 "0331 IOCB wake signaled\n");
11893
11894
11895
11896
11897
11898 } else if (timeleft == 0) {
11899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11900 "0338 IOCB wait timeout error - no "
11901 "wake response Data x%x\n", timeout);
11902 retval = IOCB_TIMEDOUT;
11903 } else {
11904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11905 "0330 IOCB wake NOT set, "
11906 "Data x%x x%lx\n",
11907 timeout, (timeleft / jiffies));
11908 retval = IOCB_TIMEDOUT;
11909 }
11910 } else if (retval == IOCB_BUSY) {
11911 if (phba->cfg_log_verbose & LOG_SLI) {
11912 list_for_each_entry(iocb, &pring->txq, list) {
11913 txq_cnt++;
11914 }
11915 list_for_each_entry(iocb, &pring->txcmplq, list) {
11916 txcmplq_cnt++;
11917 }
11918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11919 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11920 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11921 }
11922 return retval;
11923 } else {
11924 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11925 "0332 IOCB wait issue failed, Data x%x\n",
11926 retval);
11927 retval = IOCB_ERROR;
11928 }
11929
11930 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11931 if (lpfc_readl(phba->HCregaddr, &creg_val))
11932 return IOCB_ERROR;
11933 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11934 writel(creg_val, phba->HCregaddr);
11935 readl(phba->HCregaddr);
11936 }
11937
11938 if (prspiocbq)
11939 piocb->context2 = NULL;
11940
11941 piocb->context_un.wait_queue = NULL;
11942 piocb->iocb_cmpl = NULL;
11943 return retval;
11944 }
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972 int
11973 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11974 uint32_t timeout)
11975 {
11976 struct completion mbox_done;
11977 int retval;
11978 unsigned long flag;
11979
11980 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11981
11982 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11983
11984
11985 init_completion(&mbox_done);
11986 pmboxq->context3 = &mbox_done;
11987
11988 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11989 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11990 wait_for_completion_timeout(&mbox_done,
11991 msecs_to_jiffies(timeout * 1000));
11992
11993 spin_lock_irqsave(&phba->hbalock, flag);
11994 pmboxq->context3 = NULL;
11995
11996
11997
11998
11999 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12000 retval = MBX_SUCCESS;
12001 } else {
12002 retval = MBX_TIMEOUT;
12003 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12004 }
12005 spin_unlock_irqrestore(&phba->hbalock, flag);
12006 }
12007 return retval;
12008 }
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019
12020
12021
12022
12023
12024
12025 void
12026 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12027 {
12028 struct lpfc_sli *psli = &phba->sli;
12029 unsigned long timeout;
12030
12031 if (mbx_action == LPFC_MBX_NO_WAIT) {
12032
12033 msleep(100);
12034 lpfc_sli_mbox_sys_flush(phba);
12035 return;
12036 }
12037 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12038
12039
12040 local_bh_disable();
12041
12042 spin_lock_irq(&phba->hbalock);
12043 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12044
12045 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12046
12047
12048
12049 if (phba->sli.mbox_active)
12050 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12051 phba->sli.mbox_active) *
12052 1000) + jiffies;
12053 spin_unlock_irq(&phba->hbalock);
12054
12055
12056 local_bh_enable();
12057
12058 while (phba->sli.mbox_active) {
12059
12060 msleep(2);
12061 if (time_after(jiffies, timeout))
12062
12063
12064
12065 break;
12066 }
12067 } else {
12068 spin_unlock_irq(&phba->hbalock);
12069
12070
12071 local_bh_enable();
12072 }
12073
12074 lpfc_sli_mbox_sys_flush(phba);
12075 }
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088 static int
12089 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12090 {
12091 uint32_t ha_copy;
12092
12093
12094 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12095 goto unplug_err;
12096
12097 if (ha_copy & HA_ERATT) {
12098
12099 if (lpfc_sli_read_hs(phba))
12100 goto unplug_err;
12101
12102
12103 if ((HS_FFER1 & phba->work_hs) &&
12104 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12105 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12106 phba->hba_flag |= DEFER_ERATT;
12107
12108 writel(0, phba->HCregaddr);
12109 readl(phba->HCregaddr);
12110 }
12111
12112
12113 phba->work_ha |= HA_ERATT;
12114
12115 phba->hba_flag |= HBA_ERATT_HANDLED;
12116 return 1;
12117 }
12118 return 0;
12119
12120 unplug_err:
12121
12122 phba->work_hs |= UNPLUG_ERR;
12123
12124 phba->work_ha |= HA_ERATT;
12125
12126 phba->hba_flag |= HBA_ERATT_HANDLED;
12127 return 1;
12128 }
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141 static int
12142 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12143 {
12144 uint32_t uerr_sta_hi, uerr_sta_lo;
12145 uint32_t if_type, portsmphr;
12146 struct lpfc_register portstat_reg;
12147
12148
12149
12150
12151
12152 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12153 switch (if_type) {
12154 case LPFC_SLI_INTF_IF_TYPE_0:
12155 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12156 &uerr_sta_lo) ||
12157 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12158 &uerr_sta_hi)) {
12159 phba->work_hs |= UNPLUG_ERR;
12160 phba->work_ha |= HA_ERATT;
12161 phba->hba_flag |= HBA_ERATT_HANDLED;
12162 return 1;
12163 }
12164 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12165 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12167 "1423 HBA Unrecoverable error: "
12168 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12169 "ue_mask_lo_reg=0x%x, "
12170 "ue_mask_hi_reg=0x%x\n",
12171 uerr_sta_lo, uerr_sta_hi,
12172 phba->sli4_hba.ue_mask_lo,
12173 phba->sli4_hba.ue_mask_hi);
12174 phba->work_status[0] = uerr_sta_lo;
12175 phba->work_status[1] = uerr_sta_hi;
12176 phba->work_ha |= HA_ERATT;
12177 phba->hba_flag |= HBA_ERATT_HANDLED;
12178 return 1;
12179 }
12180 break;
12181 case LPFC_SLI_INTF_IF_TYPE_2:
12182 case LPFC_SLI_INTF_IF_TYPE_6:
12183 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12184 &portstat_reg.word0) ||
12185 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12186 &portsmphr)){
12187 phba->work_hs |= UNPLUG_ERR;
12188 phba->work_ha |= HA_ERATT;
12189 phba->hba_flag |= HBA_ERATT_HANDLED;
12190 return 1;
12191 }
12192 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12193 phba->work_status[0] =
12194 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12195 phba->work_status[1] =
12196 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12198 "2885 Port Status Event: "
12199 "port status reg 0x%x, "
12200 "port smphr reg 0x%x, "
12201 "error 1=0x%x, error 2=0x%x\n",
12202 portstat_reg.word0,
12203 portsmphr,
12204 phba->work_status[0],
12205 phba->work_status[1]);
12206 phba->work_ha |= HA_ERATT;
12207 phba->hba_flag |= HBA_ERATT_HANDLED;
12208 return 1;
12209 }
12210 break;
12211 case LPFC_SLI_INTF_IF_TYPE_1:
12212 default:
12213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12214 "2886 HBA Error Attention on unsupported "
12215 "if type %d.", if_type);
12216 return 1;
12217 }
12218
12219 return 0;
12220 }
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230
12231
12232 int
12233 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12234 {
12235 uint32_t ha_copy;
12236
12237
12238
12239
12240 if (phba->link_flag & LS_IGNORE_ERATT)
12241 return 0;
12242
12243
12244 spin_lock_irq(&phba->hbalock);
12245 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12246
12247 spin_unlock_irq(&phba->hbalock);
12248 return 0;
12249 }
12250
12251
12252
12253
12254
12255 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12256 spin_unlock_irq(&phba->hbalock);
12257 return 0;
12258 }
12259
12260
12261 if (unlikely(pci_channel_offline(phba->pcidev))) {
12262 spin_unlock_irq(&phba->hbalock);
12263 return 0;
12264 }
12265
12266 switch (phba->sli_rev) {
12267 case LPFC_SLI_REV2:
12268 case LPFC_SLI_REV3:
12269
12270 ha_copy = lpfc_sli_eratt_read(phba);
12271 break;
12272 case LPFC_SLI_REV4:
12273
12274 ha_copy = lpfc_sli4_eratt_read(phba);
12275 break;
12276 default:
12277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12278 "0299 Invalid SLI revision (%d)\n",
12279 phba->sli_rev);
12280 ha_copy = 0;
12281 break;
12282 }
12283 spin_unlock_irq(&phba->hbalock);
12284
12285 return ha_copy;
12286 }
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298 static inline int
12299 lpfc_intr_state_check(struct lpfc_hba *phba)
12300 {
12301
12302 if (unlikely(pci_channel_offline(phba->pcidev)))
12303 return -EIO;
12304
12305
12306 phba->sli.slistat.sli_intr++;
12307
12308
12309 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12310 return -EIO;
12311
12312 return 0;
12313 }
12314
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336 irqreturn_t
12337 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12338 {
12339 struct lpfc_hba *phba;
12340 uint32_t ha_copy, hc_copy;
12341 uint32_t work_ha_copy;
12342 unsigned long status;
12343 unsigned long iflag;
12344 uint32_t control;
12345
12346 MAILBOX_t *mbox, *pmbox;
12347 struct lpfc_vport *vport;
12348 struct lpfc_nodelist *ndlp;
12349 struct lpfc_dmabuf *mp;
12350 LPFC_MBOXQ_t *pmb;
12351 int rc;
12352
12353
12354
12355
12356
12357 phba = (struct lpfc_hba *)dev_id;
12358
12359 if (unlikely(!phba))
12360 return IRQ_NONE;
12361
12362
12363
12364
12365
12366 if (phba->intr_type == MSIX) {
12367
12368 if (lpfc_intr_state_check(phba))
12369 return IRQ_NONE;
12370
12371 spin_lock_irqsave(&phba->hbalock, iflag);
12372 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12373 goto unplug_error;
12374
12375
12376
12377 if (phba->link_flag & LS_IGNORE_ERATT)
12378 ha_copy &= ~HA_ERATT;
12379
12380 if (ha_copy & HA_ERATT) {
12381 if (phba->hba_flag & HBA_ERATT_HANDLED)
12382
12383 ha_copy &= ~HA_ERATT;
12384 else
12385
12386 phba->hba_flag |= HBA_ERATT_HANDLED;
12387 }
12388
12389
12390
12391
12392
12393 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12394 spin_unlock_irqrestore(&phba->hbalock, iflag);
12395 return IRQ_NONE;
12396 }
12397
12398
12399 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12400 goto unplug_error;
12401
12402 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12403 HC_LAINT_ENA | HC_ERINT_ENA),
12404 phba->HCregaddr);
12405 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12406 phba->HAregaddr);
12407 writel(hc_copy, phba->HCregaddr);
12408 readl(phba->HAregaddr);
12409 spin_unlock_irqrestore(&phba->hbalock, iflag);
12410 } else
12411 ha_copy = phba->ha_copy;
12412
12413 work_ha_copy = ha_copy & phba->work_ha_mask;
12414
12415 if (work_ha_copy) {
12416 if (work_ha_copy & HA_LATT) {
12417 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12418
12419
12420
12421
12422 spin_lock_irqsave(&phba->hbalock, iflag);
12423 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12424 if (lpfc_readl(phba->HCregaddr, &control))
12425 goto unplug_error;
12426 control &= ~HC_LAINT_ENA;
12427 writel(control, phba->HCregaddr);
12428 readl(phba->HCregaddr);
12429 spin_unlock_irqrestore(&phba->hbalock, iflag);
12430 }
12431 else
12432 work_ha_copy &= ~HA_LATT;
12433 }
12434
12435 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12436
12437
12438
12439
12440 status = (work_ha_copy &
12441 (HA_RXMASK << (4*LPFC_ELS_RING)));
12442 status >>= (4*LPFC_ELS_RING);
12443 if (status & HA_RXMASK) {
12444 spin_lock_irqsave(&phba->hbalock, iflag);
12445 if (lpfc_readl(phba->HCregaddr, &control))
12446 goto unplug_error;
12447
12448 lpfc_debugfs_slow_ring_trc(phba,
12449 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12450 control, status,
12451 (uint32_t)phba->sli.slistat.sli_intr);
12452
12453 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12454 lpfc_debugfs_slow_ring_trc(phba,
12455 "ISR Disable ring:"
12456 "pwork:x%x hawork:x%x wait:x%x",
12457 phba->work_ha, work_ha_copy,
12458 (uint32_t)((unsigned long)
12459 &phba->work_waitq));
12460
12461 control &=
12462 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12463 writel(control, phba->HCregaddr);
12464 readl(phba->HCregaddr);
12465 }
12466 else {
12467 lpfc_debugfs_slow_ring_trc(phba,
12468 "ISR slow ring: pwork:"
12469 "x%x hawork:x%x wait:x%x",
12470 phba->work_ha, work_ha_copy,
12471 (uint32_t)((unsigned long)
12472 &phba->work_waitq));
12473 }
12474 spin_unlock_irqrestore(&phba->hbalock, iflag);
12475 }
12476 }
12477 spin_lock_irqsave(&phba->hbalock, iflag);
12478 if (work_ha_copy & HA_ERATT) {
12479 if (lpfc_sli_read_hs(phba))
12480 goto unplug_error;
12481
12482
12483
12484
12485 if ((HS_FFER1 & phba->work_hs) &&
12486 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12487 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12488 phba->work_hs)) {
12489 phba->hba_flag |= DEFER_ERATT;
12490
12491 writel(0, phba->HCregaddr);
12492 readl(phba->HCregaddr);
12493 }
12494 }
12495
12496 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12497 pmb = phba->sli.mbox_active;
12498 pmbox = &pmb->u.mb;
12499 mbox = phba->mbox;
12500 vport = pmb->vport;
12501
12502
12503 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12504 if (pmbox->mbxOwner != OWN_HOST) {
12505 spin_unlock_irqrestore(&phba->hbalock, iflag);
12506
12507
12508
12509
12510 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12511 LOG_SLI,
12512 "(%d):0304 Stray Mailbox "
12513 "Interrupt mbxCommand x%x "
12514 "mbxStatus x%x\n",
12515 (vport ? vport->vpi : 0),
12516 pmbox->mbxCommand,
12517 pmbox->mbxStatus);
12518
12519 work_ha_copy &= ~HA_MBATT;
12520 } else {
12521 phba->sli.mbox_active = NULL;
12522 spin_unlock_irqrestore(&phba->hbalock, iflag);
12523 phba->last_completion_time = jiffies;
12524 del_timer(&phba->sli.mbox_tmo);
12525 if (pmb->mbox_cmpl) {
12526 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12527 MAILBOX_CMD_SIZE);
12528 if (pmb->out_ext_byte_len &&
12529 pmb->ctx_buf)
12530 lpfc_sli_pcimem_bcopy(
12531 phba->mbox_ext,
12532 pmb->ctx_buf,
12533 pmb->out_ext_byte_len);
12534 }
12535 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12536 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12537
12538 lpfc_debugfs_disc_trc(vport,
12539 LPFC_DISC_TRC_MBOX_VPORT,
12540 "MBOX dflt rpi: : "
12541 "status:x%x rpi:x%x",
12542 (uint32_t)pmbox->mbxStatus,
12543 pmbox->un.varWords[0], 0);
12544
12545 if (!pmbox->mbxStatus) {
12546 mp = (struct lpfc_dmabuf *)
12547 (pmb->ctx_buf);
12548 ndlp = (struct lpfc_nodelist *)
12549 pmb->ctx_ndlp;
12550
12551
12552
12553
12554
12555
12556 lpfc_unreg_login(phba,
12557 vport->vpi,
12558 pmbox->un.varWords[0],
12559 pmb);
12560 pmb->mbox_cmpl =
12561 lpfc_mbx_cmpl_dflt_rpi;
12562 pmb->ctx_buf = mp;
12563 pmb->ctx_ndlp = ndlp;
12564 pmb->vport = vport;
12565 rc = lpfc_sli_issue_mbox(phba,
12566 pmb,
12567 MBX_NOWAIT);
12568 if (rc != MBX_BUSY)
12569 lpfc_printf_log(phba,
12570 KERN_ERR,
12571 LOG_MBOX | LOG_SLI,
12572 "0350 rc should have"
12573 "been MBX_BUSY\n");
12574 if (rc != MBX_NOT_FINISHED)
12575 goto send_current_mbox;
12576 }
12577 }
12578 spin_lock_irqsave(
12579 &phba->pport->work_port_lock,
12580 iflag);
12581 phba->pport->work_port_events &=
12582 ~WORKER_MBOX_TMO;
12583 spin_unlock_irqrestore(
12584 &phba->pport->work_port_lock,
12585 iflag);
12586 lpfc_mbox_cmpl_put(phba, pmb);
12587 }
12588 } else
12589 spin_unlock_irqrestore(&phba->hbalock, iflag);
12590
12591 if ((work_ha_copy & HA_MBATT) &&
12592 (phba->sli.mbox_active == NULL)) {
12593 send_current_mbox:
12594
12595 do {
12596 rc = lpfc_sli_issue_mbox(phba, NULL,
12597 MBX_NOWAIT);
12598 } while (rc == MBX_NOT_FINISHED);
12599 if (rc != MBX_SUCCESS)
12600 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12601 LOG_SLI, "0349 rc should be "
12602 "MBX_SUCCESS\n");
12603 }
12604
12605 spin_lock_irqsave(&phba->hbalock, iflag);
12606 phba->work_ha |= work_ha_copy;
12607 spin_unlock_irqrestore(&phba->hbalock, iflag);
12608 lpfc_worker_wake_up(phba);
12609 }
12610 return IRQ_HANDLED;
12611 unplug_error:
12612 spin_unlock_irqrestore(&phba->hbalock, iflag);
12613 return IRQ_HANDLED;
12614
12615 }
12616
12617
12618
12619
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636 irqreturn_t
12637 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12638 {
12639 struct lpfc_hba *phba;
12640 uint32_t ha_copy;
12641 unsigned long status;
12642 unsigned long iflag;
12643 struct lpfc_sli_ring *pring;
12644
12645
12646
12647
12648 phba = (struct lpfc_hba *) dev_id;
12649
12650 if (unlikely(!phba))
12651 return IRQ_NONE;
12652
12653
12654
12655
12656
12657 if (phba->intr_type == MSIX) {
12658
12659 if (lpfc_intr_state_check(phba))
12660 return IRQ_NONE;
12661
12662 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12663 return IRQ_HANDLED;
12664
12665 spin_lock_irqsave(&phba->hbalock, iflag);
12666
12667
12668
12669
12670 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12671 spin_unlock_irqrestore(&phba->hbalock, iflag);
12672 return IRQ_NONE;
12673 }
12674 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12675 phba->HAregaddr);
12676 readl(phba->HAregaddr);
12677 spin_unlock_irqrestore(&phba->hbalock, iflag);
12678 } else
12679 ha_copy = phba->ha_copy;
12680
12681
12682
12683
12684 ha_copy &= ~(phba->work_ha_mask);
12685
12686 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12687 status >>= (4*LPFC_FCP_RING);
12688 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12689 if (status & HA_RXMASK)
12690 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12691
12692 if (phba->cfg_multi_ring_support == 2) {
12693
12694
12695
12696
12697 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12698 status >>= (4*LPFC_EXTRA_RING);
12699 if (status & HA_RXMASK) {
12700 lpfc_sli_handle_fast_ring_event(phba,
12701 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12702 status);
12703 }
12704 }
12705 return IRQ_HANDLED;
12706 }
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725 irqreturn_t
12726 lpfc_sli_intr_handler(int irq, void *dev_id)
12727 {
12728 struct lpfc_hba *phba;
12729 irqreturn_t sp_irq_rc, fp_irq_rc;
12730 unsigned long status1, status2;
12731 uint32_t hc_copy;
12732
12733
12734
12735
12736
12737 phba = (struct lpfc_hba *) dev_id;
12738
12739 if (unlikely(!phba))
12740 return IRQ_NONE;
12741
12742
12743 if (lpfc_intr_state_check(phba))
12744 return IRQ_NONE;
12745
12746 spin_lock(&phba->hbalock);
12747 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12748 spin_unlock(&phba->hbalock);
12749 return IRQ_HANDLED;
12750 }
12751
12752 if (unlikely(!phba->ha_copy)) {
12753 spin_unlock(&phba->hbalock);
12754 return IRQ_NONE;
12755 } else if (phba->ha_copy & HA_ERATT) {
12756 if (phba->hba_flag & HBA_ERATT_HANDLED)
12757
12758 phba->ha_copy &= ~HA_ERATT;
12759 else
12760
12761 phba->hba_flag |= HBA_ERATT_HANDLED;
12762 }
12763
12764
12765
12766
12767 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12768 spin_unlock(&phba->hbalock);
12769 return IRQ_NONE;
12770 }
12771
12772
12773 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12774 spin_unlock(&phba->hbalock);
12775 return IRQ_HANDLED;
12776 }
12777 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12778 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12779 phba->HCregaddr);
12780 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12781 writel(hc_copy, phba->HCregaddr);
12782 readl(phba->HAregaddr);
12783 spin_unlock(&phba->hbalock);
12784
12785
12786
12787
12788
12789
12790 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12791
12792
12793 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12794 status2 >>= (4*LPFC_ELS_RING);
12795
12796 if (status1 || (status2 & HA_RXMASK))
12797 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12798 else
12799 sp_irq_rc = IRQ_NONE;
12800
12801
12802
12803
12804
12805
12806 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12807 status1 >>= (4*LPFC_FCP_RING);
12808
12809
12810 if (phba->cfg_multi_ring_support == 2) {
12811 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12812 status2 >>= (4*LPFC_EXTRA_RING);
12813 } else
12814 status2 = 0;
12815
12816 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12817 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12818 else
12819 fp_irq_rc = IRQ_NONE;
12820
12821
12822 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12823 }
12824
12825
12826
12827
12828
12829
12830
12831
12832 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12833 {
12834 struct lpfc_cq_event *cq_event;
12835
12836
12837 spin_lock_irq(&phba->hbalock);
12838 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12839 spin_unlock_irq(&phba->hbalock);
12840
12841 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12842
12843 spin_lock_irq(&phba->hbalock);
12844 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12845 cq_event, struct lpfc_cq_event, list);
12846 spin_unlock_irq(&phba->hbalock);
12847
12848 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12849
12850 lpfc_sli4_cq_event_release(phba, cq_event);
12851 }
12852 }
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865 static void
12866 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12867 struct lpfc_iocbq *pIocbIn,
12868 struct lpfc_iocbq *pIocbOut,
12869 struct lpfc_wcqe_complete *wcqe)
12870 {
12871 int numBdes, i;
12872 unsigned long iflags;
12873 uint32_t status, max_response;
12874 struct lpfc_dmabuf *dmabuf;
12875 struct ulp_bde64 *bpl, bde;
12876 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12877
12878 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12879 sizeof(struct lpfc_iocbq) - offset);
12880
12881 status = bf_get(lpfc_wcqe_c_status, wcqe);
12882 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12883 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12884 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12885 pIocbIn->iocb.un.fcpi.fcpi_parm =
12886 pIocbOut->iocb.un.fcpi.fcpi_parm -
12887 wcqe->total_data_placed;
12888 else
12889 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12890 else {
12891 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12892 switch (pIocbOut->iocb.ulpCommand) {
12893 case CMD_ELS_REQUEST64_CR:
12894 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12895 bpl = (struct ulp_bde64 *)dmabuf->virt;
12896 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12897 max_response = bde.tus.f.bdeSize;
12898 break;
12899 case CMD_GEN_REQUEST64_CR:
12900 max_response = 0;
12901 if (!pIocbOut->context3)
12902 break;
12903 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12904 sizeof(struct ulp_bde64);
12905 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12906 bpl = (struct ulp_bde64 *)dmabuf->virt;
12907 for (i = 0; i < numBdes; i++) {
12908 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12909 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12910 max_response += bde.tus.f.bdeSize;
12911 }
12912 break;
12913 default:
12914 max_response = wcqe->total_data_placed;
12915 break;
12916 }
12917 if (max_response < wcqe->total_data_placed)
12918 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12919 else
12920 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12921 wcqe->total_data_placed;
12922 }
12923
12924
12925 if (status == CQE_STATUS_DI_ERROR) {
12926 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12927
12928 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12929 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12930 else
12931 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12932
12933 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12934 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
12935 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12936 BGS_GUARD_ERR_MASK;
12937 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
12938 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12939 BGS_APPTAG_ERR_MASK;
12940 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
12941 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12942 BGS_REFTAG_ERR_MASK;
12943
12944
12945 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12946 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12947 BGS_HI_WATER_MARK_PRESENT_MASK;
12948 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12949 wcqe->total_data_placed;
12950 }
12951
12952
12953
12954
12955
12956 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12957 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12958 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12959 BGS_GUARD_ERR_MASK);
12960 }
12961
12962
12963 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12964 spin_lock_irqsave(&phba->hbalock, iflags);
12965 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12966 spin_unlock_irqrestore(&phba->hbalock, iflags);
12967 }
12968 }
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981 static struct lpfc_iocbq *
12982 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12983 struct lpfc_iocbq *irspiocbq)
12984 {
12985 struct lpfc_sli_ring *pring;
12986 struct lpfc_iocbq *cmdiocbq;
12987 struct lpfc_wcqe_complete *wcqe;
12988 unsigned long iflags;
12989
12990 pring = lpfc_phba_elsring(phba);
12991 if (unlikely(!pring))
12992 return NULL;
12993
12994 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12995 pring->stats.iocb_event++;
12996
12997 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12998 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12999 if (unlikely(!cmdiocbq)) {
13000 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13001 "0386 ELS complete with no corresponding "
13002 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13003 wcqe->word0, wcqe->total_data_placed,
13004 wcqe->parameter, wcqe->word3);
13005 lpfc_sli_release_iocbq(phba, irspiocbq);
13006 return NULL;
13007 }
13008
13009 spin_lock_irqsave(&pring->ring_lock, iflags);
13010
13011 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13012 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13013
13014
13015 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13016
13017 return irspiocbq;
13018 }
13019
13020 inline struct lpfc_cq_event *
13021 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13022 {
13023 struct lpfc_cq_event *cq_event;
13024
13025
13026 cq_event = lpfc_sli4_cq_event_alloc(phba);
13027 if (!cq_event) {
13028 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13029 "0602 Failed to alloc CQ_EVENT entry\n");
13030 return NULL;
13031 }
13032
13033
13034 memcpy(&cq_event->cqe, entry, size);
13035 return cq_event;
13036 }
13037
13038
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048 static bool
13049 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13050 {
13051 struct lpfc_cq_event *cq_event;
13052 unsigned long iflags;
13053
13054 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13055 "0392 Async Event: word0:x%x, word1:x%x, "
13056 "word2:x%x, word3:x%x\n", mcqe->word0,
13057 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13058
13059 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13060 if (!cq_event)
13061 return false;
13062 spin_lock_irqsave(&phba->hbalock, iflags);
13063 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13064
13065 phba->hba_flag |= ASYNC_EVENT;
13066 spin_unlock_irqrestore(&phba->hbalock, iflags);
13067
13068 return true;
13069 }
13070
13071
13072
13073
13074
13075
13076
13077
13078
13079
13080
13081 static bool
13082 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13083 {
13084 uint32_t mcqe_status;
13085 MAILBOX_t *mbox, *pmbox;
13086 struct lpfc_mqe *mqe;
13087 struct lpfc_vport *vport;
13088 struct lpfc_nodelist *ndlp;
13089 struct lpfc_dmabuf *mp;
13090 unsigned long iflags;
13091 LPFC_MBOXQ_t *pmb;
13092 bool workposted = false;
13093 int rc;
13094
13095
13096 if (!bf_get(lpfc_trailer_completed, mcqe))
13097 goto out_no_mqe_complete;
13098
13099
13100 spin_lock_irqsave(&phba->hbalock, iflags);
13101 pmb = phba->sli.mbox_active;
13102 if (unlikely(!pmb)) {
13103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13104 "1832 No pending MBOX command to handle\n");
13105 spin_unlock_irqrestore(&phba->hbalock, iflags);
13106 goto out_no_mqe_complete;
13107 }
13108 spin_unlock_irqrestore(&phba->hbalock, iflags);
13109 mqe = &pmb->u.mqe;
13110 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13111 mbox = phba->mbox;
13112 vport = pmb->vport;
13113
13114
13115 phba->last_completion_time = jiffies;
13116 del_timer(&phba->sli.mbox_tmo);
13117
13118
13119 if (pmb->mbox_cmpl && mbox)
13120 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13121
13122
13123
13124
13125
13126 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13127 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13128 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13129 bf_set(lpfc_mqe_status, mqe,
13130 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13131 }
13132 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13133 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13134 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13135 "MBOX dflt rpi: status:x%x rpi:x%x",
13136 mcqe_status,
13137 pmbox->un.varWords[0], 0);
13138 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13139 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13140 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13141
13142
13143
13144 lpfc_unreg_login(phba, vport->vpi,
13145 pmbox->un.varWords[0], pmb);
13146 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13147 pmb->ctx_buf = mp;
13148 pmb->ctx_ndlp = ndlp;
13149 pmb->vport = vport;
13150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13151 if (rc != MBX_BUSY)
13152 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13153 LOG_SLI, "0385 rc should "
13154 "have been MBX_BUSY\n");
13155 if (rc != MBX_NOT_FINISHED)
13156 goto send_current_mbox;
13157 }
13158 }
13159 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13160 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13161 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13162
13163
13164 spin_lock_irqsave(&phba->hbalock, iflags);
13165 __lpfc_mbox_cmpl_put(phba, pmb);
13166 phba->work_ha |= HA_MBATT;
13167 spin_unlock_irqrestore(&phba->hbalock, iflags);
13168 workposted = true;
13169
13170 send_current_mbox:
13171 spin_lock_irqsave(&phba->hbalock, iflags);
13172
13173 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13174
13175 phba->sli.mbox_active = NULL;
13176 if (bf_get(lpfc_trailer_consumed, mcqe))
13177 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13178 spin_unlock_irqrestore(&phba->hbalock, iflags);
13179
13180 lpfc_worker_wake_up(phba);
13181 return workposted;
13182
13183 out_no_mqe_complete:
13184 spin_lock_irqsave(&phba->hbalock, iflags);
13185 if (bf_get(lpfc_trailer_consumed, mcqe))
13186 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13187 spin_unlock_irqrestore(&phba->hbalock, iflags);
13188 return false;
13189 }
13190
13191
13192
13193
13194
13195
13196
13197
13198
13199
13200
13201
13202 static bool
13203 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13204 struct lpfc_cqe *cqe)
13205 {
13206 struct lpfc_mcqe mcqe;
13207 bool workposted;
13208
13209 cq->CQ_mbox++;
13210
13211
13212 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13213
13214
13215 if (!bf_get(lpfc_trailer_async, &mcqe))
13216 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13217 else
13218 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13219 return workposted;
13220 }
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231
13232 static bool
13233 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13234 struct lpfc_wcqe_complete *wcqe)
13235 {
13236 struct lpfc_iocbq *irspiocbq;
13237 unsigned long iflags;
13238 struct lpfc_sli_ring *pring = cq->pring;
13239 int txq_cnt = 0;
13240 int txcmplq_cnt = 0;
13241 int fcp_txcmplq_cnt = 0;
13242
13243
13244 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13245
13246 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13247 "0357 ELS CQE error: status=x%x: "
13248 "CQE: %08x %08x %08x %08x\n",
13249 bf_get(lpfc_wcqe_c_status, wcqe),
13250 wcqe->word0, wcqe->total_data_placed,
13251 wcqe->parameter, wcqe->word3);
13252 }
13253
13254
13255 irspiocbq = lpfc_sli_get_iocbq(phba);
13256 if (!irspiocbq) {
13257 if (!list_empty(&pring->txq))
13258 txq_cnt++;
13259 if (!list_empty(&pring->txcmplq))
13260 txcmplq_cnt++;
13261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13262 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13263 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13264 txq_cnt, phba->iocb_cnt,
13265 fcp_txcmplq_cnt,
13266 txcmplq_cnt);
13267 return false;
13268 }
13269
13270
13271 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13272 spin_lock_irqsave(&phba->hbalock, iflags);
13273 list_add_tail(&irspiocbq->cq_event.list,
13274 &phba->sli4_hba.sp_queue_event);
13275 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13276 spin_unlock_irqrestore(&phba->hbalock, iflags);
13277
13278 return true;
13279 }
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289 static void
13290 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13291 struct lpfc_wcqe_release *wcqe)
13292 {
13293
13294 if (unlikely(!phba->sli4_hba.els_wq))
13295 return;
13296
13297 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13298 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13299 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13300 else
13301 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13302 "2579 Slow-path wqe consume event carries "
13303 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13304 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13305 phba->sli4_hba.els_wq->queue_id);
13306 }
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316
13317
13318 static bool
13319 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13320 struct lpfc_queue *cq,
13321 struct sli4_wcqe_xri_aborted *wcqe)
13322 {
13323 bool workposted = false;
13324 struct lpfc_cq_event *cq_event;
13325 unsigned long iflags;
13326
13327 switch (cq->subtype) {
13328 case LPFC_IO:
13329 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13330 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13331
13332 if (phba->nvmet_support)
13333 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13334 }
13335 workposted = false;
13336 break;
13337 case LPFC_NVME_LS:
13338 case LPFC_ELS:
13339 cq_event = lpfc_cq_event_setup(
13340 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13341 if (!cq_event)
13342 return false;
13343 cq_event->hdwq = cq->hdwq;
13344 spin_lock_irqsave(&phba->hbalock, iflags);
13345 list_add_tail(&cq_event->list,
13346 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13347
13348 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13349 spin_unlock_irqrestore(&phba->hbalock, iflags);
13350 workposted = true;
13351 break;
13352 default:
13353 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13354 "0603 Invalid CQ subtype %d: "
13355 "%08x %08x %08x %08x\n",
13356 cq->subtype, wcqe->word0, wcqe->parameter,
13357 wcqe->word2, wcqe->word3);
13358 workposted = false;
13359 break;
13360 }
13361 return workposted;
13362 }
13363
13364 #define FC_RCTL_MDS_DIAGS 0xF4
13365
13366
13367
13368
13369
13370
13371
13372
13373
13374
13375 static bool
13376 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13377 {
13378 bool workposted = false;
13379 struct fc_frame_header *fc_hdr;
13380 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13381 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13382 struct lpfc_nvmet_tgtport *tgtp;
13383 struct hbq_dmabuf *dma_buf;
13384 uint32_t status, rq_id;
13385 unsigned long iflags;
13386
13387
13388 if (unlikely(!hrq) || unlikely(!drq))
13389 return workposted;
13390
13391 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13392 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13393 else
13394 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13395 if (rq_id != hrq->queue_id)
13396 goto out;
13397
13398 status = bf_get(lpfc_rcqe_status, rcqe);
13399 switch (status) {
13400 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13402 "2537 Receive Frame Truncated!!\n");
13403
13404 case FC_STATUS_RQ_SUCCESS:
13405 spin_lock_irqsave(&phba->hbalock, iflags);
13406 lpfc_sli4_rq_release(hrq, drq);
13407 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13408 if (!dma_buf) {
13409 hrq->RQ_no_buf_found++;
13410 spin_unlock_irqrestore(&phba->hbalock, iflags);
13411 goto out;
13412 }
13413 hrq->RQ_rcv_buf++;
13414 hrq->RQ_buf_posted--;
13415 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13416
13417 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13418
13419 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13420 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13421 spin_unlock_irqrestore(&phba->hbalock, iflags);
13422
13423 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13424 break;
13425 }
13426
13427
13428 list_add_tail(&dma_buf->cq_event.list,
13429 &phba->sli4_hba.sp_queue_event);
13430
13431 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13432 spin_unlock_irqrestore(&phba->hbalock, iflags);
13433 workposted = true;
13434 break;
13435 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13436 if (phba->nvmet_support) {
13437 tgtp = phba->targetport->private;
13438 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13439 "6402 RQE Error x%x, posted %d err_cnt "
13440 "%d: %x %x %x\n",
13441 status, hrq->RQ_buf_posted,
13442 hrq->RQ_no_posted_buf,
13443 atomic_read(&tgtp->rcv_fcp_cmd_in),
13444 atomic_read(&tgtp->rcv_fcp_cmd_out),
13445 atomic_read(&tgtp->xmt_fcp_release));
13446 }
13447
13448
13449 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13450 hrq->RQ_no_posted_buf++;
13451
13452 spin_lock_irqsave(&phba->hbalock, iflags);
13453 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13454 spin_unlock_irqrestore(&phba->hbalock, iflags);
13455 workposted = true;
13456 break;
13457 }
13458 out:
13459 return workposted;
13460 }
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473 static bool
13474 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13475 struct lpfc_cqe *cqe)
13476 {
13477 struct lpfc_cqe cqevt;
13478 bool workposted = false;
13479
13480
13481 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13482
13483
13484 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13485 case CQE_CODE_COMPL_WQE:
13486
13487 phba->last_completion_time = jiffies;
13488 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13489 (struct lpfc_wcqe_complete *)&cqevt);
13490 break;
13491 case CQE_CODE_RELEASE_WQE:
13492
13493 lpfc_sli4_sp_handle_rel_wcqe(phba,
13494 (struct lpfc_wcqe_release *)&cqevt);
13495 break;
13496 case CQE_CODE_XRI_ABORTED:
13497
13498 phba->last_completion_time = jiffies;
13499 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13500 (struct sli4_wcqe_xri_aborted *)&cqevt);
13501 break;
13502 case CQE_CODE_RECEIVE:
13503 case CQE_CODE_RECEIVE_V1:
13504
13505 phba->last_completion_time = jiffies;
13506 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13507 (struct lpfc_rcqe *)&cqevt);
13508 break;
13509 default:
13510 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13511 "0388 Not a valid WCQE code: x%x\n",
13512 bf_get(lpfc_cqe_code, &cqevt));
13513 break;
13514 }
13515 return workposted;
13516 }
13517
13518
13519
13520
13521
13522
13523
13524
13525
13526
13527
13528
13529
13530
13531 static void
13532 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13533 struct lpfc_queue *speq)
13534 {
13535 struct lpfc_queue *cq = NULL, *childq;
13536 uint16_t cqid;
13537
13538
13539 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13540
13541 list_for_each_entry(childq, &speq->child_list, list) {
13542 if (childq->queue_id == cqid) {
13543 cq = childq;
13544 break;
13545 }
13546 }
13547 if (unlikely(!cq)) {
13548 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13549 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13550 "0365 Slow-path CQ identifier "
13551 "(%d) does not exist\n", cqid);
13552 return;
13553 }
13554
13555
13556 cq->assoc_qp = speq;
13557
13558 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13559 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13560 "0390 Cannot schedule soft IRQ "
13561 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13562 cqid, cq->queue_id, raw_smp_processor_id());
13563 }
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577
13578
13579
13580
13581
13582
13583
13584
13585 static bool
13586 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13587 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13588 struct lpfc_cqe *), unsigned long *delay)
13589 {
13590 struct lpfc_cqe *cqe;
13591 bool workposted = false;
13592 int count = 0, consumed = 0;
13593 bool arm = true;
13594
13595
13596 *delay = 0;
13597
13598 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13599 goto rearm_and_exit;
13600
13601
13602 cq->q_flag = 0;
13603 cqe = lpfc_sli4_cq_get(cq);
13604 while (cqe) {
13605 workposted |= handler(phba, cq, cqe);
13606 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13607
13608 consumed++;
13609 if (!(++count % cq->max_proc_limit))
13610 break;
13611
13612 if (!(count % cq->notify_interval)) {
13613 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13614 LPFC_QUEUE_NOARM);
13615 consumed = 0;
13616 }
13617
13618 if (count == LPFC_NVMET_CQ_NOTIFY)
13619 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13620
13621 cqe = lpfc_sli4_cq_get(cq);
13622 }
13623 if (count >= phba->cfg_cq_poll_threshold) {
13624 *delay = 1;
13625 arm = false;
13626 }
13627
13628
13629 if (count > cq->CQ_max_cqe)
13630 cq->CQ_max_cqe = count;
13631
13632 cq->assoc_qp->EQ_cqe_cnt += count;
13633
13634
13635 if (unlikely(count == 0))
13636 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13637 "0369 No entry from completion queue "
13638 "qid=%d\n", cq->queue_id);
13639
13640 cq->queue_claimed = 0;
13641
13642 rearm_and_exit:
13643 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13644 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13645
13646 return workposted;
13647 }
13648
13649
13650
13651
13652
13653
13654
13655
13656
13657
13658
13659
13660
13661
13662
13663
13664 static void
13665 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13666 {
13667 struct lpfc_hba *phba = cq->phba;
13668 unsigned long delay;
13669 bool workposted = false;
13670
13671
13672 switch (cq->type) {
13673 case LPFC_MCQ:
13674 workposted |= __lpfc_sli4_process_cq(phba, cq,
13675 lpfc_sli4_sp_handle_mcqe,
13676 &delay);
13677 break;
13678 case LPFC_WCQ:
13679 if (cq->subtype == LPFC_IO)
13680 workposted |= __lpfc_sli4_process_cq(phba, cq,
13681 lpfc_sli4_fp_handle_cqe,
13682 &delay);
13683 else
13684 workposted |= __lpfc_sli4_process_cq(phba, cq,
13685 lpfc_sli4_sp_handle_cqe,
13686 &delay);
13687 break;
13688 default:
13689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13690 "0370 Invalid completion queue type (%d)\n",
13691 cq->type);
13692 return;
13693 }
13694
13695 if (delay) {
13696 if (!queue_delayed_work_on(cq->chann, phba->wq,
13697 &cq->sched_spwork, delay))
13698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13699 "0394 Cannot schedule soft IRQ "
13700 "for cqid=%d on CPU %d\n",
13701 cq->queue_id, cq->chann);
13702 }
13703
13704
13705 if (workposted)
13706 lpfc_worker_wake_up(phba);
13707 }
13708
13709
13710
13711
13712
13713
13714
13715
13716 static void
13717 lpfc_sli4_sp_process_cq(struct work_struct *work)
13718 {
13719 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13720
13721 __lpfc_sli4_sp_process_cq(cq);
13722 }
13723
13724
13725
13726
13727
13728
13729
13730 static void
13731 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13732 {
13733 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13734 struct lpfc_queue, sched_spwork);
13735
13736 __lpfc_sli4_sp_process_cq(cq);
13737 }
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748 static void
13749 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13750 struct lpfc_wcqe_complete *wcqe)
13751 {
13752 struct lpfc_sli_ring *pring = cq->pring;
13753 struct lpfc_iocbq *cmdiocbq;
13754 struct lpfc_iocbq irspiocbq;
13755 unsigned long iflags;
13756
13757
13758 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13759
13760
13761
13762 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13763 IOSTAT_LOCAL_REJECT)) &&
13764 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13765 IOERR_NO_RESOURCES))
13766 phba->lpfc_rampdown_queue_depth(phba);
13767
13768
13769 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13770 "0373 FCP CQE error: status=x%x: "
13771 "CQE: %08x %08x %08x %08x\n",
13772 bf_get(lpfc_wcqe_c_status, wcqe),
13773 wcqe->word0, wcqe->total_data_placed,
13774 wcqe->parameter, wcqe->word3);
13775 }
13776
13777
13778 spin_lock_irqsave(&pring->ring_lock, iflags);
13779 pring->stats.iocb_event++;
13780 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13781 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13782 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13783 if (unlikely(!cmdiocbq)) {
13784 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13785 "0374 FCP complete with no corresponding "
13786 "cmdiocb: iotag (%d)\n",
13787 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13788 return;
13789 }
13790 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13791 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13792 #endif
13793 if (cmdiocbq->iocb_cmpl == NULL) {
13794 if (cmdiocbq->wqe_cmpl) {
13795 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13796 spin_lock_irqsave(&phba->hbalock, iflags);
13797 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13798 spin_unlock_irqrestore(&phba->hbalock, iflags);
13799 }
13800
13801
13802 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13803 return;
13804 }
13805 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13806 "0375 FCP cmdiocb not callback function "
13807 "iotag: (%d)\n",
13808 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13809 return;
13810 }
13811
13812
13813 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13814
13815 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13816 spin_lock_irqsave(&phba->hbalock, iflags);
13817 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13818 spin_unlock_irqrestore(&phba->hbalock, iflags);
13819 }
13820
13821
13822 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13823 }
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834 static void
13835 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13836 struct lpfc_wcqe_release *wcqe)
13837 {
13838 struct lpfc_queue *childwq;
13839 bool wqid_matched = false;
13840 uint16_t hba_wqid;
13841
13842
13843 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13844 list_for_each_entry(childwq, &cq->child_list, list) {
13845 if (childwq->queue_id == hba_wqid) {
13846 lpfc_sli4_wq_release(childwq,
13847 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13848 if (childwq->q_flag & HBA_NVMET_WQFULL)
13849 lpfc_nvmet_wqfull_process(phba, childwq);
13850 wqid_matched = true;
13851 break;
13852 }
13853 }
13854
13855 if (wqid_matched != true)
13856 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13857 "2580 Fast-path wqe consume event carries "
13858 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13859 }
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870 static bool
13871 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13872 struct lpfc_rcqe *rcqe)
13873 {
13874 bool workposted = false;
13875 struct lpfc_queue *hrq;
13876 struct lpfc_queue *drq;
13877 struct rqb_dmabuf *dma_buf;
13878 struct fc_frame_header *fc_hdr;
13879 struct lpfc_nvmet_tgtport *tgtp;
13880 uint32_t status, rq_id;
13881 unsigned long iflags;
13882 uint32_t fctl, idx;
13883
13884 if ((phba->nvmet_support == 0) ||
13885 (phba->sli4_hba.nvmet_cqset == NULL))
13886 return workposted;
13887
13888 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13889 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13890 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13891
13892
13893 if (unlikely(!hrq) || unlikely(!drq))
13894 return workposted;
13895
13896 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13897 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13898 else
13899 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13900
13901 if ((phba->nvmet_support == 0) ||
13902 (rq_id != hrq->queue_id))
13903 return workposted;
13904
13905 status = bf_get(lpfc_rcqe_status, rcqe);
13906 switch (status) {
13907 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13909 "6126 Receive Frame Truncated!!\n");
13910
13911 case FC_STATUS_RQ_SUCCESS:
13912 spin_lock_irqsave(&phba->hbalock, iflags);
13913 lpfc_sli4_rq_release(hrq, drq);
13914 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13915 if (!dma_buf) {
13916 hrq->RQ_no_buf_found++;
13917 spin_unlock_irqrestore(&phba->hbalock, iflags);
13918 goto out;
13919 }
13920 spin_unlock_irqrestore(&phba->hbalock, iflags);
13921 hrq->RQ_rcv_buf++;
13922 hrq->RQ_buf_posted--;
13923 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13924
13925
13926 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13927 fc_hdr->fh_f_ctl[1] << 8 |
13928 fc_hdr->fh_f_ctl[2]);
13929 if (((fctl &
13930 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13931 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13932 (fc_hdr->fh_seq_cnt != 0))
13933 goto drop;
13934
13935 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13936 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13937 lpfc_nvmet_unsol_fcp_event(
13938 phba, idx, dma_buf, cq->isr_timestamp,
13939 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13940 return false;
13941 }
13942 drop:
13943 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13944 break;
13945 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13946 if (phba->nvmet_support) {
13947 tgtp = phba->targetport->private;
13948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13949 "6401 RQE Error x%x, posted %d err_cnt "
13950 "%d: %x %x %x\n",
13951 status, hrq->RQ_buf_posted,
13952 hrq->RQ_no_posted_buf,
13953 atomic_read(&tgtp->rcv_fcp_cmd_in),
13954 atomic_read(&tgtp->rcv_fcp_cmd_out),
13955 atomic_read(&tgtp->xmt_fcp_release));
13956 }
13957
13958
13959 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13960 hrq->RQ_no_posted_buf++;
13961
13962 break;
13963 }
13964 out:
13965 return workposted;
13966 }
13967
13968
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978
13979 static bool
13980 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13981 struct lpfc_cqe *cqe)
13982 {
13983 struct lpfc_wcqe_release wcqe;
13984 bool workposted = false;
13985
13986
13987 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13988
13989
13990 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13991 case CQE_CODE_COMPL_WQE:
13992 case CQE_CODE_NVME_ERSP:
13993 cq->CQ_wq++;
13994
13995 phba->last_completion_time = jiffies;
13996 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
13997 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13998 (struct lpfc_wcqe_complete *)&wcqe);
13999 break;
14000 case CQE_CODE_RELEASE_WQE:
14001 cq->CQ_release_wqe++;
14002
14003 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14004 (struct lpfc_wcqe_release *)&wcqe);
14005 break;
14006 case CQE_CODE_XRI_ABORTED:
14007 cq->CQ_xri_aborted++;
14008
14009 phba->last_completion_time = jiffies;
14010 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14011 (struct sli4_wcqe_xri_aborted *)&wcqe);
14012 break;
14013 case CQE_CODE_RECEIVE_V1:
14014 case CQE_CODE_RECEIVE:
14015 phba->last_completion_time = jiffies;
14016 if (cq->subtype == LPFC_NVMET) {
14017 workposted = lpfc_sli4_nvmet_handle_rcqe(
14018 phba, cq, (struct lpfc_rcqe *)&wcqe);
14019 }
14020 break;
14021 default:
14022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14023 "0144 Not a valid CQE code: x%x\n",
14024 bf_get(lpfc_wcqe_c_code, &wcqe));
14025 break;
14026 }
14027 return workposted;
14028 }
14029
14030
14031
14032
14033
14034
14035
14036
14037
14038
14039
14040
14041
14042 static void
14043 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14044 struct lpfc_eqe *eqe)
14045 {
14046 struct lpfc_queue *cq = NULL;
14047 uint32_t qidx = eq->hdwq;
14048 uint16_t cqid, id;
14049
14050 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14052 "0366 Not a valid completion "
14053 "event: majorcode=x%x, minorcode=x%x\n",
14054 bf_get_le32(lpfc_eqe_major_code, eqe),
14055 bf_get_le32(lpfc_eqe_minor_code, eqe));
14056 return;
14057 }
14058
14059
14060 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14061
14062
14063 if (cqid <= phba->sli4_hba.cq_max) {
14064 cq = phba->sli4_hba.cq_lookup[cqid];
14065 if (cq)
14066 goto work_cq;
14067 }
14068
14069
14070 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14071 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14072 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14073
14074 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14075 goto process_cq;
14076 }
14077 }
14078
14079 if (phba->sli4_hba.nvmels_cq &&
14080 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14081
14082 cq = phba->sli4_hba.nvmels_cq;
14083 }
14084
14085
14086 if (cq == NULL) {
14087 lpfc_sli4_sp_handle_eqe(phba, eqe,
14088 phba->sli4_hba.hdwq[qidx].hba_eq);
14089 return;
14090 }
14091
14092 process_cq:
14093 if (unlikely(cqid != cq->queue_id)) {
14094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14095 "0368 Miss-matched fast-path completion "
14096 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14097 cqid, cq->queue_id);
14098 return;
14099 }
14100
14101 work_cq:
14102 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14103 if (phba->ktime_on)
14104 cq->isr_timestamp = ktime_get_ns();
14105 else
14106 cq->isr_timestamp = 0;
14107 #endif
14108 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14110 "0363 Cannot schedule soft IRQ "
14111 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14112 cqid, cq->queue_id, raw_smp_processor_id());
14113 }
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130 static void
14131 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14132 {
14133 struct lpfc_hba *phba = cq->phba;
14134 unsigned long delay;
14135 bool workposted = false;
14136
14137
14138 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14139 &delay);
14140
14141 if (delay) {
14142 if (!queue_delayed_work_on(cq->chann, phba->wq,
14143 &cq->sched_irqwork, delay))
14144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14145 "0367 Cannot schedule soft IRQ "
14146 "for cqid=%d on CPU %d\n",
14147 cq->queue_id, cq->chann);
14148 }
14149
14150
14151 if (workposted)
14152 lpfc_worker_wake_up(phba);
14153 }
14154
14155
14156
14157
14158
14159
14160
14161
14162 static void
14163 lpfc_sli4_hba_process_cq(struct work_struct *work)
14164 {
14165 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14166
14167 __lpfc_sli4_hba_process_cq(cq);
14168 }
14169
14170
14171
14172
14173
14174
14175
14176 static void
14177 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14178 {
14179 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14180 struct lpfc_queue, sched_irqwork);
14181
14182 __lpfc_sli4_hba_process_cq(cq);
14183 }
14184
14185
14186
14187
14188
14189
14190
14191
14192
14193
14194
14195
14196
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211 irqreturn_t
14212 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14213 {
14214 struct lpfc_hba *phba;
14215 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14216 struct lpfc_queue *fpeq;
14217 unsigned long iflag;
14218 int ecount = 0;
14219 int hba_eqidx;
14220 struct lpfc_eq_intr_info *eqi;
14221 uint32_t icnt;
14222
14223
14224 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14225 phba = hba_eq_hdl->phba;
14226 hba_eqidx = hba_eq_hdl->idx;
14227
14228 if (unlikely(!phba))
14229 return IRQ_NONE;
14230 if (unlikely(!phba->sli4_hba.hdwq))
14231 return IRQ_NONE;
14232
14233
14234 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14235 if (unlikely(!fpeq))
14236 return IRQ_NONE;
14237
14238
14239 if (unlikely(lpfc_intr_state_check(phba))) {
14240
14241 spin_lock_irqsave(&phba->hbalock, iflag);
14242 if (phba->link_state < LPFC_LINK_DOWN)
14243
14244 lpfc_sli4_eq_flush(phba, fpeq);
14245 spin_unlock_irqrestore(&phba->hbalock, iflag);
14246 return IRQ_NONE;
14247 }
14248
14249 eqi = phba->sli4_hba.eq_info;
14250 icnt = this_cpu_inc_return(eqi->icnt);
14251 fpeq->last_cpu = raw_smp_processor_id();
14252
14253 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14254 phba->cfg_irq_chann == 1 &&
14255 phba->cfg_auto_imax &&
14256 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14257 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14258 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14259
14260
14261 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14262
14263 if (unlikely(ecount == 0)) {
14264 fpeq->EQ_no_entry++;
14265 if (phba->intr_type == MSIX)
14266
14267 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14268 "0358 MSI-X interrupt with no EQE\n");
14269 else
14270
14271 return IRQ_NONE;
14272 }
14273
14274 return IRQ_HANDLED;
14275 }
14276
14277
14278
14279
14280
14281
14282
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294 irqreturn_t
14295 lpfc_sli4_intr_handler(int irq, void *dev_id)
14296 {
14297 struct lpfc_hba *phba;
14298 irqreturn_t hba_irq_rc;
14299 bool hba_handled = false;
14300 int qidx;
14301
14302
14303 phba = (struct lpfc_hba *)dev_id;
14304
14305 if (unlikely(!phba))
14306 return IRQ_NONE;
14307
14308
14309
14310
14311 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14312 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14313 &phba->sli4_hba.hba_eq_hdl[qidx]);
14314 if (hba_irq_rc == IRQ_HANDLED)
14315 hba_handled |= true;
14316 }
14317
14318 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14319 }
14320
14321 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14322 {
14323 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14324 struct lpfc_queue *eq;
14325 int i = 0;
14326
14327 rcu_read_lock();
14328
14329 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14330 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14331 if (!list_empty(&phba->poll_list))
14332 mod_timer(&phba->cpuhp_poll_timer,
14333 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14334
14335 rcu_read_unlock();
14336 }
14337
14338 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14339 {
14340 struct lpfc_hba *phba = eq->phba;
14341 int i = 0;
14342
14343
14344
14345
14346
14347
14348
14349
14350 smp_rmb();
14351
14352 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14353
14354
14355
14356
14357
14358
14359
14360 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14361
14362 return i;
14363 }
14364
14365 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14366 {
14367 struct lpfc_hba *phba = eq->phba;
14368
14369 if (list_empty(&phba->poll_list)) {
14370 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14371
14372 mod_timer(&phba->cpuhp_poll_timer,
14373 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14374 }
14375
14376 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14377 synchronize_rcu();
14378 }
14379
14380 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14381 {
14382 struct lpfc_hba *phba = eq->phba;
14383
14384
14385
14386
14387 list_del_rcu(&eq->_poll_list);
14388 synchronize_rcu();
14389
14390 if (list_empty(&phba->poll_list))
14391 del_timer_sync(&phba->cpuhp_poll_timer);
14392 }
14393
14394 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14395 {
14396 struct lpfc_queue *eq, *next;
14397
14398 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14399 list_del(&eq->_poll_list);
14400
14401 INIT_LIST_HEAD(&phba->poll_list);
14402 synchronize_rcu();
14403 }
14404
14405 static inline void
14406 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14407 {
14408 if (mode == eq->mode)
14409 return;
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420
14421 WRITE_ONCE(eq->mode, mode);
14422
14423 smp_wmb();
14424
14425
14426
14427
14428
14429
14430
14431
14432
14433
14434
14435
14436
14437 mode ? lpfc_sli4_add_to_poll_list(eq) :
14438 lpfc_sli4_remove_from_poll_list(eq);
14439 }
14440
14441 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14442 {
14443 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14444 }
14445
14446 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14447 {
14448 struct lpfc_hba *phba = eq->phba;
14449
14450 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14451
14452
14453
14454
14455
14456
14457
14458
14459 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14460 }
14461
14462
14463
14464
14465
14466
14467
14468
14469
14470 void
14471 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14472 {
14473 struct lpfc_dmabuf *dmabuf;
14474
14475 if (!queue)
14476 return;
14477
14478 if (!list_empty(&queue->wq_list))
14479 list_del(&queue->wq_list);
14480
14481 while (!list_empty(&queue->page_list)) {
14482 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14483 list);
14484 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14485 dmabuf->virt, dmabuf->phys);
14486 kfree(dmabuf);
14487 }
14488 if (queue->rqbp) {
14489 lpfc_free_rq_buffer(queue->phba, queue);
14490 kfree(queue->rqbp);
14491 }
14492
14493 if (!list_empty(&queue->cpu_list))
14494 list_del(&queue->cpu_list);
14495
14496 kfree(queue);
14497 return;
14498 }
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511
14512 struct lpfc_queue *
14513 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14514 uint32_t entry_size, uint32_t entry_count, int cpu)
14515 {
14516 struct lpfc_queue *queue;
14517 struct lpfc_dmabuf *dmabuf;
14518 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14519 uint16_t x, pgcnt;
14520
14521 if (!phba->sli4_hba.pc_sli4_params.supported)
14522 hw_page_size = page_size;
14523
14524 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14525
14526
14527 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14528 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14529
14530 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14531 GFP_KERNEL, cpu_to_node(cpu));
14532 if (!queue)
14533 return NULL;
14534
14535 INIT_LIST_HEAD(&queue->list);
14536 INIT_LIST_HEAD(&queue->_poll_list);
14537 INIT_LIST_HEAD(&queue->wq_list);
14538 INIT_LIST_HEAD(&queue->wqfull_list);
14539 INIT_LIST_HEAD(&queue->page_list);
14540 INIT_LIST_HEAD(&queue->child_list);
14541 INIT_LIST_HEAD(&queue->cpu_list);
14542
14543
14544
14545
14546 queue->page_count = pgcnt;
14547 queue->q_pgs = (void **)&queue[1];
14548 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14549 queue->entry_size = entry_size;
14550 queue->entry_count = entry_count;
14551 queue->page_size = hw_page_size;
14552 queue->phba = phba;
14553
14554 for (x = 0; x < queue->page_count; x++) {
14555 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14556 dev_to_node(&phba->pcidev->dev));
14557 if (!dmabuf)
14558 goto out_fail;
14559 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14560 hw_page_size, &dmabuf->phys,
14561 GFP_KERNEL);
14562 if (!dmabuf->virt) {
14563 kfree(dmabuf);
14564 goto out_fail;
14565 }
14566 dmabuf->buffer_tag = x;
14567 list_add_tail(&dmabuf->list, &queue->page_list);
14568
14569 queue->q_pgs[x] = dmabuf->virt;
14570 }
14571 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14572 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14573 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14574 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14575
14576
14577
14578 return queue;
14579 out_fail:
14580 lpfc_sli4_queue_free(queue);
14581 return NULL;
14582 }
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593 static void __iomem *
14594 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14595 {
14596 if (!phba->pcidev)
14597 return NULL;
14598
14599 switch (pci_barset) {
14600 case WQ_PCI_BAR_0_AND_1:
14601 return phba->pci_bar0_memmap_p;
14602 case WQ_PCI_BAR_2_AND_3:
14603 return phba->pci_bar2_memmap_p;
14604 case WQ_PCI_BAR_4_AND_5:
14605 return phba->pci_bar4_memmap_p;
14606 default:
14607 break;
14608 }
14609 return NULL;
14610 }
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635 void
14636 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14637 uint32_t numq, uint32_t usdelay)
14638 {
14639 struct lpfc_mbx_modify_eq_delay *eq_delay;
14640 LPFC_MBOXQ_t *mbox;
14641 struct lpfc_queue *eq;
14642 int cnt = 0, rc, length;
14643 uint32_t shdr_status, shdr_add_status;
14644 uint32_t dmult;
14645 int qidx;
14646 union lpfc_sli4_cfg_shdr *shdr;
14647
14648 if (startq >= phba->cfg_irq_chann)
14649 return;
14650
14651 if (usdelay > 0xFFFF) {
14652 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14653 "6429 usdelay %d too large. Scaled down to "
14654 "0xFFFF.\n", usdelay);
14655 usdelay = 0xFFFF;
14656 }
14657
14658
14659 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14660 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14661 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14662 if (!eq)
14663 continue;
14664
14665 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14666
14667 if (++cnt >= numq)
14668 break;
14669 }
14670 return;
14671 }
14672
14673
14674
14675 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14676 if (!mbox) {
14677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14678 "6428 Failed allocating mailbox cmd buffer."
14679 " EQ delay was not set.\n");
14680 return;
14681 }
14682 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14683 sizeof(struct lpfc_sli4_cfg_mhdr));
14684 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14685 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14686 length, LPFC_SLI4_MBX_EMBED);
14687 eq_delay = &mbox->u.mqe.un.eq_delay;
14688
14689
14690 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14691 if (dmult)
14692 dmult--;
14693 if (dmult > LPFC_DMULT_MAX)
14694 dmult = LPFC_DMULT_MAX;
14695
14696 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14697 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14698 if (!eq)
14699 continue;
14700 eq->q_mode = usdelay;
14701 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14702 eq_delay->u.request.eq[cnt].phase = 0;
14703 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14704
14705 if (++cnt >= numq)
14706 break;
14707 }
14708 eq_delay->u.request.num_eq = cnt;
14709
14710 mbox->vport = phba->pport;
14711 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14712 mbox->ctx_buf = NULL;
14713 mbox->ctx_ndlp = NULL;
14714 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14715 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14716 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14717 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14718 if (shdr_status || shdr_add_status || rc) {
14719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14720 "2512 MODIFY_EQ_DELAY mailbox failed with "
14721 "status x%x add_status x%x, mbx status x%x\n",
14722 shdr_status, shdr_add_status, rc);
14723 }
14724 mempool_free(mbox, phba->mbox_mem_pool);
14725 return;
14726 }
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747
14748 int
14749 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14750 {
14751 struct lpfc_mbx_eq_create *eq_create;
14752 LPFC_MBOXQ_t *mbox;
14753 int rc, length, status = 0;
14754 struct lpfc_dmabuf *dmabuf;
14755 uint32_t shdr_status, shdr_add_status;
14756 union lpfc_sli4_cfg_shdr *shdr;
14757 uint16_t dmult;
14758 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14759
14760
14761 if (!eq)
14762 return -ENODEV;
14763 if (!phba->sli4_hba.pc_sli4_params.supported)
14764 hw_page_size = SLI4_PAGE_SIZE;
14765
14766 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14767 if (!mbox)
14768 return -ENOMEM;
14769 length = (sizeof(struct lpfc_mbx_eq_create) -
14770 sizeof(struct lpfc_sli4_cfg_mhdr));
14771 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14772 LPFC_MBOX_OPCODE_EQ_CREATE,
14773 length, LPFC_SLI4_MBX_EMBED);
14774 eq_create = &mbox->u.mqe.un.eq_create;
14775 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14776 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14777 eq->page_count);
14778 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14779 LPFC_EQE_SIZE);
14780 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14781
14782
14783 if (phba->sli4_hba.pc_sli4_params.eqav) {
14784 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14785 LPFC_Q_CREATE_VERSION_2);
14786 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14787 phba->sli4_hba.pc_sli4_params.eqav);
14788 }
14789
14790
14791 dmult = 0;
14792 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14793 dmult);
14794 switch (eq->entry_count) {
14795 default:
14796 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14797 "0360 Unsupported EQ count. (%d)\n",
14798 eq->entry_count);
14799 if (eq->entry_count < 256) {
14800 status = -EINVAL;
14801 goto out;
14802 }
14803
14804 case 256:
14805 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14806 LPFC_EQ_CNT_256);
14807 break;
14808 case 512:
14809 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14810 LPFC_EQ_CNT_512);
14811 break;
14812 case 1024:
14813 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14814 LPFC_EQ_CNT_1024);
14815 break;
14816 case 2048:
14817 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14818 LPFC_EQ_CNT_2048);
14819 break;
14820 case 4096:
14821 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14822 LPFC_EQ_CNT_4096);
14823 break;
14824 }
14825 list_for_each_entry(dmabuf, &eq->page_list, list) {
14826 memset(dmabuf->virt, 0, hw_page_size);
14827 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14828 putPaddrLow(dmabuf->phys);
14829 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14830 putPaddrHigh(dmabuf->phys);
14831 }
14832 mbox->vport = phba->pport;
14833 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14834 mbox->ctx_buf = NULL;
14835 mbox->ctx_ndlp = NULL;
14836 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14837 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14838 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14839 if (shdr_status || shdr_add_status || rc) {
14840 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14841 "2500 EQ_CREATE mailbox failed with "
14842 "status x%x add_status x%x, mbx status x%x\n",
14843 shdr_status, shdr_add_status, rc);
14844 status = -ENXIO;
14845 }
14846 eq->type = LPFC_EQ;
14847 eq->subtype = LPFC_NONE;
14848 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14849 if (eq->queue_id == 0xFFFF)
14850 status = -ENXIO;
14851 eq->host_index = 0;
14852 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14853 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14854 out:
14855 mempool_free(mbox, phba->mbox_mem_pool);
14856 return status;
14857 }
14858
14859
14860
14861
14862
14863
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879
14880 int
14881 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14882 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14883 {
14884 struct lpfc_mbx_cq_create *cq_create;
14885 struct lpfc_dmabuf *dmabuf;
14886 LPFC_MBOXQ_t *mbox;
14887 int rc, length, status = 0;
14888 uint32_t shdr_status, shdr_add_status;
14889 union lpfc_sli4_cfg_shdr *shdr;
14890
14891
14892 if (!cq || !eq)
14893 return -ENODEV;
14894
14895 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14896 if (!mbox)
14897 return -ENOMEM;
14898 length = (sizeof(struct lpfc_mbx_cq_create) -
14899 sizeof(struct lpfc_sli4_cfg_mhdr));
14900 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14901 LPFC_MBOX_OPCODE_CQ_CREATE,
14902 length, LPFC_SLI4_MBX_EMBED);
14903 cq_create = &mbox->u.mqe.un.cq_create;
14904 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14905 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14906 cq->page_count);
14907 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14908 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14909 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14910 phba->sli4_hba.pc_sli4_params.cqv);
14911 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14912 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14913 (cq->page_size / SLI4_PAGE_SIZE));
14914 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14915 eq->queue_id);
14916 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14917 phba->sli4_hba.pc_sli4_params.cqav);
14918 } else {
14919 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14920 eq->queue_id);
14921 }
14922 switch (cq->entry_count) {
14923 case 2048:
14924 case 4096:
14925 if (phba->sli4_hba.pc_sli4_params.cqv ==
14926 LPFC_Q_CREATE_VERSION_2) {
14927 cq_create->u.request.context.lpfc_cq_context_count =
14928 cq->entry_count;
14929 bf_set(lpfc_cq_context_count,
14930 &cq_create->u.request.context,
14931 LPFC_CQ_CNT_WORD7);
14932 break;
14933 }
14934
14935 default:
14936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14937 "0361 Unsupported CQ count: "
14938 "entry cnt %d sz %d pg cnt %d\n",
14939 cq->entry_count, cq->entry_size,
14940 cq->page_count);
14941 if (cq->entry_count < 256) {
14942 status = -EINVAL;
14943 goto out;
14944 }
14945
14946 case 256:
14947 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14948 LPFC_CQ_CNT_256);
14949 break;
14950 case 512:
14951 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14952 LPFC_CQ_CNT_512);
14953 break;
14954 case 1024:
14955 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14956 LPFC_CQ_CNT_1024);
14957 break;
14958 }
14959 list_for_each_entry(dmabuf, &cq->page_list, list) {
14960 memset(dmabuf->virt, 0, cq->page_size);
14961 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14962 putPaddrLow(dmabuf->phys);
14963 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14964 putPaddrHigh(dmabuf->phys);
14965 }
14966 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14967
14968
14969 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14971 if (shdr_status || shdr_add_status || rc) {
14972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14973 "2501 CQ_CREATE mailbox failed with "
14974 "status x%x add_status x%x, mbx status x%x\n",
14975 shdr_status, shdr_add_status, rc);
14976 status = -ENXIO;
14977 goto out;
14978 }
14979 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14980 if (cq->queue_id == 0xFFFF) {
14981 status = -ENXIO;
14982 goto out;
14983 }
14984
14985 list_add_tail(&cq->list, &eq->child_list);
14986
14987 cq->type = type;
14988 cq->subtype = subtype;
14989 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14990 cq->assoc_qid = eq->queue_id;
14991 cq->assoc_qp = eq;
14992 cq->host_index = 0;
14993 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14994 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14995
14996 if (cq->queue_id > phba->sli4_hba.cq_max)
14997 phba->sli4_hba.cq_max = cq->queue_id;
14998 out:
14999 mempool_free(mbox, phba->mbox_mem_pool);
15000 return status;
15001 }
15002
15003
15004
15005
15006
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024
15025 int
15026 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15027 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15028 uint32_t subtype)
15029 {
15030 struct lpfc_queue *cq;
15031 struct lpfc_queue *eq;
15032 struct lpfc_mbx_cq_create_set *cq_set;
15033 struct lpfc_dmabuf *dmabuf;
15034 LPFC_MBOXQ_t *mbox;
15035 int rc, length, alloclen, status = 0;
15036 int cnt, idx, numcq, page_idx = 0;
15037 uint32_t shdr_status, shdr_add_status;
15038 union lpfc_sli4_cfg_shdr *shdr;
15039 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15040
15041
15042 numcq = phba->cfg_nvmet_mrq;
15043 if (!cqp || !hdwq || !numcq)
15044 return -ENODEV;
15045
15046 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15047 if (!mbox)
15048 return -ENOMEM;
15049
15050 length = sizeof(struct lpfc_mbx_cq_create_set);
15051 length += ((numcq * cqp[0]->page_count) *
15052 sizeof(struct dma_address));
15053 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15054 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15055 LPFC_SLI4_MBX_NEMBED);
15056 if (alloclen < length) {
15057 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15058 "3098 Allocated DMA memory size (%d) is "
15059 "less than the requested DMA memory size "
15060 "(%d)\n", alloclen, length);
15061 status = -ENOMEM;
15062 goto out;
15063 }
15064 cq_set = mbox->sge_array->addr[0];
15065 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15066 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15067
15068 for (idx = 0; idx < numcq; idx++) {
15069 cq = cqp[idx];
15070 eq = hdwq[idx].hba_eq;
15071 if (!cq || !eq) {
15072 status = -ENOMEM;
15073 goto out;
15074 }
15075 if (!phba->sli4_hba.pc_sli4_params.supported)
15076 hw_page_size = cq->page_size;
15077
15078 switch (idx) {
15079 case 0:
15080 bf_set(lpfc_mbx_cq_create_set_page_size,
15081 &cq_set->u.request,
15082 (hw_page_size / SLI4_PAGE_SIZE));
15083 bf_set(lpfc_mbx_cq_create_set_num_pages,
15084 &cq_set->u.request, cq->page_count);
15085 bf_set(lpfc_mbx_cq_create_set_evt,
15086 &cq_set->u.request, 1);
15087 bf_set(lpfc_mbx_cq_create_set_valid,
15088 &cq_set->u.request, 1);
15089 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15090 &cq_set->u.request, 0);
15091 bf_set(lpfc_mbx_cq_create_set_num_cq,
15092 &cq_set->u.request, numcq);
15093 bf_set(lpfc_mbx_cq_create_set_autovalid,
15094 &cq_set->u.request,
15095 phba->sli4_hba.pc_sli4_params.cqav);
15096 switch (cq->entry_count) {
15097 case 2048:
15098 case 4096:
15099 if (phba->sli4_hba.pc_sli4_params.cqv ==
15100 LPFC_Q_CREATE_VERSION_2) {
15101 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15102 &cq_set->u.request,
15103 cq->entry_count);
15104 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15105 &cq_set->u.request,
15106 LPFC_CQ_CNT_WORD7);
15107 break;
15108 }
15109
15110 default:
15111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15112 "3118 Bad CQ count. (%d)\n",
15113 cq->entry_count);
15114 if (cq->entry_count < 256) {
15115 status = -EINVAL;
15116 goto out;
15117 }
15118
15119 case 256:
15120 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15121 &cq_set->u.request, LPFC_CQ_CNT_256);
15122 break;
15123 case 512:
15124 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15125 &cq_set->u.request, LPFC_CQ_CNT_512);
15126 break;
15127 case 1024:
15128 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15129 &cq_set->u.request, LPFC_CQ_CNT_1024);
15130 break;
15131 }
15132 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15133 &cq_set->u.request, eq->queue_id);
15134 break;
15135 case 1:
15136 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15137 &cq_set->u.request, eq->queue_id);
15138 break;
15139 case 2:
15140 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15141 &cq_set->u.request, eq->queue_id);
15142 break;
15143 case 3:
15144 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15145 &cq_set->u.request, eq->queue_id);
15146 break;
15147 case 4:
15148 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15149 &cq_set->u.request, eq->queue_id);
15150 break;
15151 case 5:
15152 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15153 &cq_set->u.request, eq->queue_id);
15154 break;
15155 case 6:
15156 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15157 &cq_set->u.request, eq->queue_id);
15158 break;
15159 case 7:
15160 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15161 &cq_set->u.request, eq->queue_id);
15162 break;
15163 case 8:
15164 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15165 &cq_set->u.request, eq->queue_id);
15166 break;
15167 case 9:
15168 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15169 &cq_set->u.request, eq->queue_id);
15170 break;
15171 case 10:
15172 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15173 &cq_set->u.request, eq->queue_id);
15174 break;
15175 case 11:
15176 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15177 &cq_set->u.request, eq->queue_id);
15178 break;
15179 case 12:
15180 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15181 &cq_set->u.request, eq->queue_id);
15182 break;
15183 case 13:
15184 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15185 &cq_set->u.request, eq->queue_id);
15186 break;
15187 case 14:
15188 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15189 &cq_set->u.request, eq->queue_id);
15190 break;
15191 case 15:
15192 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15193 &cq_set->u.request, eq->queue_id);
15194 break;
15195 }
15196
15197
15198 list_add_tail(&cq->list, &eq->child_list);
15199
15200 cq->type = type;
15201 cq->subtype = subtype;
15202 cq->assoc_qid = eq->queue_id;
15203 cq->assoc_qp = eq;
15204 cq->host_index = 0;
15205 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15206 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15207 cq->entry_count);
15208 cq->chann = idx;
15209
15210 rc = 0;
15211 list_for_each_entry(dmabuf, &cq->page_list, list) {
15212 memset(dmabuf->virt, 0, hw_page_size);
15213 cnt = page_idx + dmabuf->buffer_tag;
15214 cq_set->u.request.page[cnt].addr_lo =
15215 putPaddrLow(dmabuf->phys);
15216 cq_set->u.request.page[cnt].addr_hi =
15217 putPaddrHigh(dmabuf->phys);
15218 rc++;
15219 }
15220 page_idx += rc;
15221 }
15222
15223 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15224
15225
15226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15228 if (shdr_status || shdr_add_status || rc) {
15229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15230 "3119 CQ_CREATE_SET mailbox failed with "
15231 "status x%x add_status x%x, mbx status x%x\n",
15232 shdr_status, shdr_add_status, rc);
15233 status = -ENXIO;
15234 goto out;
15235 }
15236 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15237 if (rc == 0xFFFF) {
15238 status = -ENXIO;
15239 goto out;
15240 }
15241
15242 for (idx = 0; idx < numcq; idx++) {
15243 cq = cqp[idx];
15244 cq->queue_id = rc + idx;
15245 if (cq->queue_id > phba->sli4_hba.cq_max)
15246 phba->sli4_hba.cq_max = cq->queue_id;
15247 }
15248
15249 out:
15250 lpfc_sli4_mbox_cmd_free(phba, mbox);
15251 return status;
15252 }
15253
15254
15255
15256
15257
15258
15259
15260
15261
15262
15263
15264
15265
15266
15267
15268 static void
15269 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15270 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15271 {
15272 struct lpfc_mbx_mq_create *mq_create;
15273 struct lpfc_dmabuf *dmabuf;
15274 int length;
15275
15276 length = (sizeof(struct lpfc_mbx_mq_create) -
15277 sizeof(struct lpfc_sli4_cfg_mhdr));
15278 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15279 LPFC_MBOX_OPCODE_MQ_CREATE,
15280 length, LPFC_SLI4_MBX_EMBED);
15281 mq_create = &mbox->u.mqe.un.mq_create;
15282 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15283 mq->page_count);
15284 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15285 cq->queue_id);
15286 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15287 switch (mq->entry_count) {
15288 case 16:
15289 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15290 LPFC_MQ_RING_SIZE_16);
15291 break;
15292 case 32:
15293 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15294 LPFC_MQ_RING_SIZE_32);
15295 break;
15296 case 64:
15297 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15298 LPFC_MQ_RING_SIZE_64);
15299 break;
15300 case 128:
15301 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15302 LPFC_MQ_RING_SIZE_128);
15303 break;
15304 }
15305 list_for_each_entry(dmabuf, &mq->page_list, list) {
15306 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15307 putPaddrLow(dmabuf->phys);
15308 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15309 putPaddrHigh(dmabuf->phys);
15310 }
15311 }
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334 int32_t
15335 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15336 struct lpfc_queue *cq, uint32_t subtype)
15337 {
15338 struct lpfc_mbx_mq_create *mq_create;
15339 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15340 struct lpfc_dmabuf *dmabuf;
15341 LPFC_MBOXQ_t *mbox;
15342 int rc, length, status = 0;
15343 uint32_t shdr_status, shdr_add_status;
15344 union lpfc_sli4_cfg_shdr *shdr;
15345 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15346
15347
15348 if (!mq || !cq)
15349 return -ENODEV;
15350 if (!phba->sli4_hba.pc_sli4_params.supported)
15351 hw_page_size = SLI4_PAGE_SIZE;
15352
15353 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15354 if (!mbox)
15355 return -ENOMEM;
15356 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15357 sizeof(struct lpfc_sli4_cfg_mhdr));
15358 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15359 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15360 length, LPFC_SLI4_MBX_EMBED);
15361
15362 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15363 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15364 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15365 &mq_create_ext->u.request, mq->page_count);
15366 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15367 &mq_create_ext->u.request, 1);
15368 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15369 &mq_create_ext->u.request, 1);
15370 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15371 &mq_create_ext->u.request, 1);
15372 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15373 &mq_create_ext->u.request, 1);
15374 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15375 &mq_create_ext->u.request, 1);
15376 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15377 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15378 phba->sli4_hba.pc_sli4_params.mqv);
15379 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15380 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15381 cq->queue_id);
15382 else
15383 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15384 cq->queue_id);
15385 switch (mq->entry_count) {
15386 default:
15387 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15388 "0362 Unsupported MQ count. (%d)\n",
15389 mq->entry_count);
15390 if (mq->entry_count < 16) {
15391 status = -EINVAL;
15392 goto out;
15393 }
15394
15395 case 16:
15396 bf_set(lpfc_mq_context_ring_size,
15397 &mq_create_ext->u.request.context,
15398 LPFC_MQ_RING_SIZE_16);
15399 break;
15400 case 32:
15401 bf_set(lpfc_mq_context_ring_size,
15402 &mq_create_ext->u.request.context,
15403 LPFC_MQ_RING_SIZE_32);
15404 break;
15405 case 64:
15406 bf_set(lpfc_mq_context_ring_size,
15407 &mq_create_ext->u.request.context,
15408 LPFC_MQ_RING_SIZE_64);
15409 break;
15410 case 128:
15411 bf_set(lpfc_mq_context_ring_size,
15412 &mq_create_ext->u.request.context,
15413 LPFC_MQ_RING_SIZE_128);
15414 break;
15415 }
15416 list_for_each_entry(dmabuf, &mq->page_list, list) {
15417 memset(dmabuf->virt, 0, hw_page_size);
15418 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15419 putPaddrLow(dmabuf->phys);
15420 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15421 putPaddrHigh(dmabuf->phys);
15422 }
15423 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15424 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15425 &mq_create_ext->u.response);
15426 if (rc != MBX_SUCCESS) {
15427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15428 "2795 MQ_CREATE_EXT failed with "
15429 "status x%x. Failback to MQ_CREATE.\n",
15430 rc);
15431 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15432 mq_create = &mbox->u.mqe.un.mq_create;
15433 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15434 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15435 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15436 &mq_create->u.response);
15437 }
15438
15439
15440 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15441 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15442 if (shdr_status || shdr_add_status || rc) {
15443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15444 "2502 MQ_CREATE mailbox failed with "
15445 "status x%x add_status x%x, mbx status x%x\n",
15446 shdr_status, shdr_add_status, rc);
15447 status = -ENXIO;
15448 goto out;
15449 }
15450 if (mq->queue_id == 0xFFFF) {
15451 status = -ENXIO;
15452 goto out;
15453 }
15454 mq->type = LPFC_MQ;
15455 mq->assoc_qid = cq->queue_id;
15456 mq->subtype = subtype;
15457 mq->host_index = 0;
15458 mq->hba_index = 0;
15459
15460
15461 list_add_tail(&mq->list, &cq->child_list);
15462 out:
15463 mempool_free(mbox, phba->mbox_mem_pool);
15464 return status;
15465 }
15466
15467
15468
15469
15470
15471
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487
15488
15489 int
15490 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15491 struct lpfc_queue *cq, uint32_t subtype)
15492 {
15493 struct lpfc_mbx_wq_create *wq_create;
15494 struct lpfc_dmabuf *dmabuf;
15495 LPFC_MBOXQ_t *mbox;
15496 int rc, length, status = 0;
15497 uint32_t shdr_status, shdr_add_status;
15498 union lpfc_sli4_cfg_shdr *shdr;
15499 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15500 struct dma_address *page;
15501 void __iomem *bar_memmap_p;
15502 uint32_t db_offset;
15503 uint16_t pci_barset;
15504 uint8_t dpp_barset;
15505 uint32_t dpp_offset;
15506 unsigned long pg_addr;
15507 uint8_t wq_create_version;
15508
15509
15510 if (!wq || !cq)
15511 return -ENODEV;
15512 if (!phba->sli4_hba.pc_sli4_params.supported)
15513 hw_page_size = wq->page_size;
15514
15515 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15516 if (!mbox)
15517 return -ENOMEM;
15518 length = (sizeof(struct lpfc_mbx_wq_create) -
15519 sizeof(struct lpfc_sli4_cfg_mhdr));
15520 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15521 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15522 length, LPFC_SLI4_MBX_EMBED);
15523 wq_create = &mbox->u.mqe.un.wq_create;
15524 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15525 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15526 wq->page_count);
15527 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15528 cq->queue_id);
15529
15530
15531 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15532 phba->sli4_hba.pc_sli4_params.wqv);
15533
15534 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15535 (wq->page_size > SLI4_PAGE_SIZE))
15536 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15537 else
15538 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15539
15540
15541 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15542 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15543 else
15544 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15545
15546 switch (wq_create_version) {
15547 case LPFC_Q_CREATE_VERSION_1:
15548 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15549 wq->entry_count);
15550 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15551 LPFC_Q_CREATE_VERSION_1);
15552
15553 switch (wq->entry_size) {
15554 default:
15555 case 64:
15556 bf_set(lpfc_mbx_wq_create_wqe_size,
15557 &wq_create->u.request_1,
15558 LPFC_WQ_WQE_SIZE_64);
15559 break;
15560 case 128:
15561 bf_set(lpfc_mbx_wq_create_wqe_size,
15562 &wq_create->u.request_1,
15563 LPFC_WQ_WQE_SIZE_128);
15564 break;
15565 }
15566
15567 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15568 bf_set(lpfc_mbx_wq_create_page_size,
15569 &wq_create->u.request_1,
15570 (wq->page_size / SLI4_PAGE_SIZE));
15571 page = wq_create->u.request_1.page;
15572 break;
15573 default:
15574 page = wq_create->u.request.page;
15575 break;
15576 }
15577
15578 list_for_each_entry(dmabuf, &wq->page_list, list) {
15579 memset(dmabuf->virt, 0, hw_page_size);
15580 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15581 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15582 }
15583
15584 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15585 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15586
15587 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15588
15589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15591 if (shdr_status || shdr_add_status || rc) {
15592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15593 "2503 WQ_CREATE mailbox failed with "
15594 "status x%x add_status x%x, mbx status x%x\n",
15595 shdr_status, shdr_add_status, rc);
15596 status = -ENXIO;
15597 goto out;
15598 }
15599
15600 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15601 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15602 &wq_create->u.response);
15603 else
15604 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15605 &wq_create->u.response_1);
15606
15607 if (wq->queue_id == 0xFFFF) {
15608 status = -ENXIO;
15609 goto out;
15610 }
15611
15612 wq->db_format = LPFC_DB_LIST_FORMAT;
15613 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15614 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15615 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15616 &wq_create->u.response);
15617 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15618 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15620 "3265 WQ[%d] doorbell format "
15621 "not supported: x%x\n",
15622 wq->queue_id, wq->db_format);
15623 status = -EINVAL;
15624 goto out;
15625 }
15626 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15627 &wq_create->u.response);
15628 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15629 pci_barset);
15630 if (!bar_memmap_p) {
15631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15632 "3263 WQ[%d] failed to memmap "
15633 "pci barset:x%x\n",
15634 wq->queue_id, pci_barset);
15635 status = -ENOMEM;
15636 goto out;
15637 }
15638 db_offset = wq_create->u.response.doorbell_offset;
15639 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15640 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15642 "3252 WQ[%d] doorbell offset "
15643 "not supported: x%x\n",
15644 wq->queue_id, db_offset);
15645 status = -EINVAL;
15646 goto out;
15647 }
15648 wq->db_regaddr = bar_memmap_p + db_offset;
15649 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15650 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15651 "format:x%x\n", wq->queue_id,
15652 pci_barset, db_offset, wq->db_format);
15653 } else
15654 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15655 } else {
15656
15657 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15658 &wq_create->u.response_1);
15659 if (wq->dpp_enable) {
15660 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15661 &wq_create->u.response_1);
15662 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15663 pci_barset);
15664 if (!bar_memmap_p) {
15665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15666 "3267 WQ[%d] failed to memmap "
15667 "pci barset:x%x\n",
15668 wq->queue_id, pci_barset);
15669 status = -ENOMEM;
15670 goto out;
15671 }
15672 db_offset = wq_create->u.response_1.doorbell_offset;
15673 wq->db_regaddr = bar_memmap_p + db_offset;
15674 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15675 &wq_create->u.response_1);
15676 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15677 &wq_create->u.response_1);
15678 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15679 dpp_barset);
15680 if (!bar_memmap_p) {
15681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15682 "3268 WQ[%d] failed to memmap "
15683 "pci barset:x%x\n",
15684 wq->queue_id, dpp_barset);
15685 status = -ENOMEM;
15686 goto out;
15687 }
15688 dpp_offset = wq_create->u.response_1.dpp_offset;
15689 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15691 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15692 "dpp_id:x%x dpp_barset:x%x "
15693 "dpp_offset:x%x\n",
15694 wq->queue_id, pci_barset, db_offset,
15695 wq->dpp_id, dpp_barset, dpp_offset);
15696
15697
15698 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15699 #ifdef CONFIG_X86
15700 rc = set_memory_wc(pg_addr, 1);
15701 if (rc) {
15702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15703 "3272 Cannot setup Combined "
15704 "Write on WQ[%d] - disable DPP\n",
15705 wq->queue_id);
15706 phba->cfg_enable_dpp = 0;
15707 }
15708 #else
15709 phba->cfg_enable_dpp = 0;
15710 #endif
15711 } else
15712 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15713 }
15714 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15715 if (wq->pring == NULL) {
15716 status = -ENOMEM;
15717 goto out;
15718 }
15719 wq->type = LPFC_WQ;
15720 wq->assoc_qid = cq->queue_id;
15721 wq->subtype = subtype;
15722 wq->host_index = 0;
15723 wq->hba_index = 0;
15724 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15725
15726
15727 list_add_tail(&wq->list, &cq->child_list);
15728 out:
15729 mempool_free(mbox, phba->mbox_mem_pool);
15730 return status;
15731 }
15732
15733
15734
15735
15736
15737
15738
15739
15740
15741
15742
15743
15744
15745
15746
15747
15748
15749
15750
15751
15752
15753
15754
15755
15756 int
15757 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15758 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15759 {
15760 struct lpfc_mbx_rq_create *rq_create;
15761 struct lpfc_dmabuf *dmabuf;
15762 LPFC_MBOXQ_t *mbox;
15763 int rc, length, status = 0;
15764 uint32_t shdr_status, shdr_add_status;
15765 union lpfc_sli4_cfg_shdr *shdr;
15766 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15767 void __iomem *bar_memmap_p;
15768 uint32_t db_offset;
15769 uint16_t pci_barset;
15770
15771
15772 if (!hrq || !drq || !cq)
15773 return -ENODEV;
15774 if (!phba->sli4_hba.pc_sli4_params.supported)
15775 hw_page_size = SLI4_PAGE_SIZE;
15776
15777 if (hrq->entry_count != drq->entry_count)
15778 return -EINVAL;
15779 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15780 if (!mbox)
15781 return -ENOMEM;
15782 length = (sizeof(struct lpfc_mbx_rq_create) -
15783 sizeof(struct lpfc_sli4_cfg_mhdr));
15784 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15785 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15786 length, LPFC_SLI4_MBX_EMBED);
15787 rq_create = &mbox->u.mqe.un.rq_create;
15788 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15789 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15790 phba->sli4_hba.pc_sli4_params.rqv);
15791 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15792 bf_set(lpfc_rq_context_rqe_count_1,
15793 &rq_create->u.request.context,
15794 hrq->entry_count);
15795 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15796 bf_set(lpfc_rq_context_rqe_size,
15797 &rq_create->u.request.context,
15798 LPFC_RQE_SIZE_8);
15799 bf_set(lpfc_rq_context_page_size,
15800 &rq_create->u.request.context,
15801 LPFC_RQ_PAGE_SIZE_4096);
15802 } else {
15803 switch (hrq->entry_count) {
15804 default:
15805 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15806 "2535 Unsupported RQ count. (%d)\n",
15807 hrq->entry_count);
15808 if (hrq->entry_count < 512) {
15809 status = -EINVAL;
15810 goto out;
15811 }
15812
15813 case 512:
15814 bf_set(lpfc_rq_context_rqe_count,
15815 &rq_create->u.request.context,
15816 LPFC_RQ_RING_SIZE_512);
15817 break;
15818 case 1024:
15819 bf_set(lpfc_rq_context_rqe_count,
15820 &rq_create->u.request.context,
15821 LPFC_RQ_RING_SIZE_1024);
15822 break;
15823 case 2048:
15824 bf_set(lpfc_rq_context_rqe_count,
15825 &rq_create->u.request.context,
15826 LPFC_RQ_RING_SIZE_2048);
15827 break;
15828 case 4096:
15829 bf_set(lpfc_rq_context_rqe_count,
15830 &rq_create->u.request.context,
15831 LPFC_RQ_RING_SIZE_4096);
15832 break;
15833 }
15834 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15835 LPFC_HDR_BUF_SIZE);
15836 }
15837 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15838 cq->queue_id);
15839 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15840 hrq->page_count);
15841 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15842 memset(dmabuf->virt, 0, hw_page_size);
15843 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15844 putPaddrLow(dmabuf->phys);
15845 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15846 putPaddrHigh(dmabuf->phys);
15847 }
15848 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15849 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15850
15851 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15852
15853 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15854 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15855 if (shdr_status || shdr_add_status || rc) {
15856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15857 "2504 RQ_CREATE mailbox failed with "
15858 "status x%x add_status x%x, mbx status x%x\n",
15859 shdr_status, shdr_add_status, rc);
15860 status = -ENXIO;
15861 goto out;
15862 }
15863 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15864 if (hrq->queue_id == 0xFFFF) {
15865 status = -ENXIO;
15866 goto out;
15867 }
15868
15869 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15870 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15871 &rq_create->u.response);
15872 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15873 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15874 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15875 "3262 RQ [%d] doorbell format not "
15876 "supported: x%x\n", hrq->queue_id,
15877 hrq->db_format);
15878 status = -EINVAL;
15879 goto out;
15880 }
15881
15882 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15883 &rq_create->u.response);
15884 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15885 if (!bar_memmap_p) {
15886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15887 "3269 RQ[%d] failed to memmap pci "
15888 "barset:x%x\n", hrq->queue_id,
15889 pci_barset);
15890 status = -ENOMEM;
15891 goto out;
15892 }
15893
15894 db_offset = rq_create->u.response.doorbell_offset;
15895 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15896 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15898 "3270 RQ[%d] doorbell offset not "
15899 "supported: x%x\n", hrq->queue_id,
15900 db_offset);
15901 status = -EINVAL;
15902 goto out;
15903 }
15904 hrq->db_regaddr = bar_memmap_p + db_offset;
15905 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15906 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15907 "format:x%x\n", hrq->queue_id, pci_barset,
15908 db_offset, hrq->db_format);
15909 } else {
15910 hrq->db_format = LPFC_DB_RING_FORMAT;
15911 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15912 }
15913 hrq->type = LPFC_HRQ;
15914 hrq->assoc_qid = cq->queue_id;
15915 hrq->subtype = subtype;
15916 hrq->host_index = 0;
15917 hrq->hba_index = 0;
15918 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15919
15920
15921 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15922 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15923 length, LPFC_SLI4_MBX_EMBED);
15924 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15925 phba->sli4_hba.pc_sli4_params.rqv);
15926 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15927 bf_set(lpfc_rq_context_rqe_count_1,
15928 &rq_create->u.request.context, hrq->entry_count);
15929 if (subtype == LPFC_NVMET)
15930 rq_create->u.request.context.buffer_size =
15931 LPFC_NVMET_DATA_BUF_SIZE;
15932 else
15933 rq_create->u.request.context.buffer_size =
15934 LPFC_DATA_BUF_SIZE;
15935 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15936 LPFC_RQE_SIZE_8);
15937 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15938 (PAGE_SIZE/SLI4_PAGE_SIZE));
15939 } else {
15940 switch (drq->entry_count) {
15941 default:
15942 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15943 "2536 Unsupported RQ count. (%d)\n",
15944 drq->entry_count);
15945 if (drq->entry_count < 512) {
15946 status = -EINVAL;
15947 goto out;
15948 }
15949
15950 case 512:
15951 bf_set(lpfc_rq_context_rqe_count,
15952 &rq_create->u.request.context,
15953 LPFC_RQ_RING_SIZE_512);
15954 break;
15955 case 1024:
15956 bf_set(lpfc_rq_context_rqe_count,
15957 &rq_create->u.request.context,
15958 LPFC_RQ_RING_SIZE_1024);
15959 break;
15960 case 2048:
15961 bf_set(lpfc_rq_context_rqe_count,
15962 &rq_create->u.request.context,
15963 LPFC_RQ_RING_SIZE_2048);
15964 break;
15965 case 4096:
15966 bf_set(lpfc_rq_context_rqe_count,
15967 &rq_create->u.request.context,
15968 LPFC_RQ_RING_SIZE_4096);
15969 break;
15970 }
15971 if (subtype == LPFC_NVMET)
15972 bf_set(lpfc_rq_context_buf_size,
15973 &rq_create->u.request.context,
15974 LPFC_NVMET_DATA_BUF_SIZE);
15975 else
15976 bf_set(lpfc_rq_context_buf_size,
15977 &rq_create->u.request.context,
15978 LPFC_DATA_BUF_SIZE);
15979 }
15980 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15981 cq->queue_id);
15982 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15983 drq->page_count);
15984 list_for_each_entry(dmabuf, &drq->page_list, list) {
15985 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15986 putPaddrLow(dmabuf->phys);
15987 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15988 putPaddrHigh(dmabuf->phys);
15989 }
15990 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15991 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15992 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15993
15994 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15995 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15996 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15997 if (shdr_status || shdr_add_status || rc) {
15998 status = -ENXIO;
15999 goto out;
16000 }
16001 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16002 if (drq->queue_id == 0xFFFF) {
16003 status = -ENXIO;
16004 goto out;
16005 }
16006 drq->type = LPFC_DRQ;
16007 drq->assoc_qid = cq->queue_id;
16008 drq->subtype = subtype;
16009 drq->host_index = 0;
16010 drq->hba_index = 0;
16011 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16012
16013
16014 list_add_tail(&hrq->list, &cq->child_list);
16015 list_add_tail(&drq->list, &cq->child_list);
16016
16017 out:
16018 mempool_free(mbox, phba->mbox_mem_pool);
16019 return status;
16020 }
16021
16022
16023
16024
16025
16026
16027
16028
16029
16030
16031
16032
16033
16034
16035
16036
16037
16038
16039
16040
16041
16042
16043
16044
16045 int
16046 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16047 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16048 uint32_t subtype)
16049 {
16050 struct lpfc_queue *hrq, *drq, *cq;
16051 struct lpfc_mbx_rq_create_v2 *rq_create;
16052 struct lpfc_dmabuf *dmabuf;
16053 LPFC_MBOXQ_t *mbox;
16054 int rc, length, alloclen, status = 0;
16055 int cnt, idx, numrq, page_idx = 0;
16056 uint32_t shdr_status, shdr_add_status;
16057 union lpfc_sli4_cfg_shdr *shdr;
16058 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16059
16060 numrq = phba->cfg_nvmet_mrq;
16061
16062 if (!hrqp || !drqp || !cqp || !numrq)
16063 return -ENODEV;
16064 if (!phba->sli4_hba.pc_sli4_params.supported)
16065 hw_page_size = SLI4_PAGE_SIZE;
16066
16067 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16068 if (!mbox)
16069 return -ENOMEM;
16070
16071 length = sizeof(struct lpfc_mbx_rq_create_v2);
16072 length += ((2 * numrq * hrqp[0]->page_count) *
16073 sizeof(struct dma_address));
16074
16075 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16076 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16077 LPFC_SLI4_MBX_NEMBED);
16078 if (alloclen < length) {
16079 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16080 "3099 Allocated DMA memory size (%d) is "
16081 "less than the requested DMA memory size "
16082 "(%d)\n", alloclen, length);
16083 status = -ENOMEM;
16084 goto out;
16085 }
16086
16087
16088
16089 rq_create = mbox->sge_array->addr[0];
16090 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16091
16092 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16093 cnt = 0;
16094
16095 for (idx = 0; idx < numrq; idx++) {
16096 hrq = hrqp[idx];
16097 drq = drqp[idx];
16098 cq = cqp[idx];
16099
16100
16101 if (!hrq || !drq || !cq) {
16102 status = -ENODEV;
16103 goto out;
16104 }
16105
16106 if (hrq->entry_count != drq->entry_count) {
16107 status = -EINVAL;
16108 goto out;
16109 }
16110
16111 if (idx == 0) {
16112 bf_set(lpfc_mbx_rq_create_num_pages,
16113 &rq_create->u.request,
16114 hrq->page_count);
16115 bf_set(lpfc_mbx_rq_create_rq_cnt,
16116 &rq_create->u.request, (numrq * 2));
16117 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16118 1);
16119 bf_set(lpfc_rq_context_base_cq,
16120 &rq_create->u.request.context,
16121 cq->queue_id);
16122 bf_set(lpfc_rq_context_data_size,
16123 &rq_create->u.request.context,
16124 LPFC_NVMET_DATA_BUF_SIZE);
16125 bf_set(lpfc_rq_context_hdr_size,
16126 &rq_create->u.request.context,
16127 LPFC_HDR_BUF_SIZE);
16128 bf_set(lpfc_rq_context_rqe_count_1,
16129 &rq_create->u.request.context,
16130 hrq->entry_count);
16131 bf_set(lpfc_rq_context_rqe_size,
16132 &rq_create->u.request.context,
16133 LPFC_RQE_SIZE_8);
16134 bf_set(lpfc_rq_context_page_size,
16135 &rq_create->u.request.context,
16136 (PAGE_SIZE/SLI4_PAGE_SIZE));
16137 }
16138 rc = 0;
16139 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16140 memset(dmabuf->virt, 0, hw_page_size);
16141 cnt = page_idx + dmabuf->buffer_tag;
16142 rq_create->u.request.page[cnt].addr_lo =
16143 putPaddrLow(dmabuf->phys);
16144 rq_create->u.request.page[cnt].addr_hi =
16145 putPaddrHigh(dmabuf->phys);
16146 rc++;
16147 }
16148 page_idx += rc;
16149
16150 rc = 0;
16151 list_for_each_entry(dmabuf, &drq->page_list, list) {
16152 memset(dmabuf->virt, 0, hw_page_size);
16153 cnt = page_idx + dmabuf->buffer_tag;
16154 rq_create->u.request.page[cnt].addr_lo =
16155 putPaddrLow(dmabuf->phys);
16156 rq_create->u.request.page[cnt].addr_hi =
16157 putPaddrHigh(dmabuf->phys);
16158 rc++;
16159 }
16160 page_idx += rc;
16161
16162 hrq->db_format = LPFC_DB_RING_FORMAT;
16163 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16164 hrq->type = LPFC_HRQ;
16165 hrq->assoc_qid = cq->queue_id;
16166 hrq->subtype = subtype;
16167 hrq->host_index = 0;
16168 hrq->hba_index = 0;
16169 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16170
16171 drq->db_format = LPFC_DB_RING_FORMAT;
16172 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16173 drq->type = LPFC_DRQ;
16174 drq->assoc_qid = cq->queue_id;
16175 drq->subtype = subtype;
16176 drq->host_index = 0;
16177 drq->hba_index = 0;
16178 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16179
16180 list_add_tail(&hrq->list, &cq->child_list);
16181 list_add_tail(&drq->list, &cq->child_list);
16182 }
16183
16184 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16185
16186 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16187 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16188 if (shdr_status || shdr_add_status || rc) {
16189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16190 "3120 RQ_CREATE mailbox failed with "
16191 "status x%x add_status x%x, mbx status x%x\n",
16192 shdr_status, shdr_add_status, rc);
16193 status = -ENXIO;
16194 goto out;
16195 }
16196 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16197 if (rc == 0xFFFF) {
16198 status = -ENXIO;
16199 goto out;
16200 }
16201
16202
16203 for (idx = 0; idx < numrq; idx++) {
16204 hrq = hrqp[idx];
16205 hrq->queue_id = rc + (2 * idx);
16206 drq = drqp[idx];
16207 drq->queue_id = rc + (2 * idx) + 1;
16208 }
16209
16210 out:
16211 lpfc_sli4_mbox_cmd_free(phba, mbox);
16212 return status;
16213 }
16214
16215
16216
16217
16218
16219
16220
16221
16222
16223
16224
16225
16226
16227 int
16228 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16229 {
16230 LPFC_MBOXQ_t *mbox;
16231 int rc, length, status = 0;
16232 uint32_t shdr_status, shdr_add_status;
16233 union lpfc_sli4_cfg_shdr *shdr;
16234
16235
16236 if (!eq)
16237 return -ENODEV;
16238
16239 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16240 if (!mbox)
16241 return -ENOMEM;
16242 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16243 sizeof(struct lpfc_sli4_cfg_mhdr));
16244 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16245 LPFC_MBOX_OPCODE_EQ_DESTROY,
16246 length, LPFC_SLI4_MBX_EMBED);
16247 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16248 eq->queue_id);
16249 mbox->vport = eq->phba->pport;
16250 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16251
16252 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16253
16254 shdr = (union lpfc_sli4_cfg_shdr *)
16255 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16256 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16257 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16258 if (shdr_status || shdr_add_status || rc) {
16259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16260 "2505 EQ_DESTROY mailbox failed with "
16261 "status x%x add_status x%x, mbx status x%x\n",
16262 shdr_status, shdr_add_status, rc);
16263 status = -ENXIO;
16264 }
16265
16266
16267 list_del_init(&eq->list);
16268 mempool_free(mbox, eq->phba->mbox_mem_pool);
16269 return status;
16270 }
16271
16272
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284 int
16285 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16286 {
16287 LPFC_MBOXQ_t *mbox;
16288 int rc, length, status = 0;
16289 uint32_t shdr_status, shdr_add_status;
16290 union lpfc_sli4_cfg_shdr *shdr;
16291
16292
16293 if (!cq)
16294 return -ENODEV;
16295 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16296 if (!mbox)
16297 return -ENOMEM;
16298 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16299 sizeof(struct lpfc_sli4_cfg_mhdr));
16300 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16301 LPFC_MBOX_OPCODE_CQ_DESTROY,
16302 length, LPFC_SLI4_MBX_EMBED);
16303 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16304 cq->queue_id);
16305 mbox->vport = cq->phba->pport;
16306 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16307 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16308
16309 shdr = (union lpfc_sli4_cfg_shdr *)
16310 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16311 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16312 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16313 if (shdr_status || shdr_add_status || rc) {
16314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16315 "2506 CQ_DESTROY mailbox failed with "
16316 "status x%x add_status x%x, mbx status x%x\n",
16317 shdr_status, shdr_add_status, rc);
16318 status = -ENXIO;
16319 }
16320
16321 list_del_init(&cq->list);
16322 mempool_free(mbox, cq->phba->mbox_mem_pool);
16323 return status;
16324 }
16325
16326
16327
16328
16329
16330
16331
16332
16333
16334
16335
16336
16337
16338 int
16339 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16340 {
16341 LPFC_MBOXQ_t *mbox;
16342 int rc, length, status = 0;
16343 uint32_t shdr_status, shdr_add_status;
16344 union lpfc_sli4_cfg_shdr *shdr;
16345
16346
16347 if (!mq)
16348 return -ENODEV;
16349 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16350 if (!mbox)
16351 return -ENOMEM;
16352 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16353 sizeof(struct lpfc_sli4_cfg_mhdr));
16354 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16355 LPFC_MBOX_OPCODE_MQ_DESTROY,
16356 length, LPFC_SLI4_MBX_EMBED);
16357 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16358 mq->queue_id);
16359 mbox->vport = mq->phba->pport;
16360 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16361 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16362
16363 shdr = (union lpfc_sli4_cfg_shdr *)
16364 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16365 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16366 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16367 if (shdr_status || shdr_add_status || rc) {
16368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16369 "2507 MQ_DESTROY mailbox failed with "
16370 "status x%x add_status x%x, mbx status x%x\n",
16371 shdr_status, shdr_add_status, rc);
16372 status = -ENXIO;
16373 }
16374
16375 list_del_init(&mq->list);
16376 mempool_free(mbox, mq->phba->mbox_mem_pool);
16377 return status;
16378 }
16379
16380
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392 int
16393 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16394 {
16395 LPFC_MBOXQ_t *mbox;
16396 int rc, length, status = 0;
16397 uint32_t shdr_status, shdr_add_status;
16398 union lpfc_sli4_cfg_shdr *shdr;
16399
16400
16401 if (!wq)
16402 return -ENODEV;
16403 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16404 if (!mbox)
16405 return -ENOMEM;
16406 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16407 sizeof(struct lpfc_sli4_cfg_mhdr));
16408 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16409 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16410 length, LPFC_SLI4_MBX_EMBED);
16411 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16412 wq->queue_id);
16413 mbox->vport = wq->phba->pport;
16414 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16415 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16416 shdr = (union lpfc_sli4_cfg_shdr *)
16417 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16418 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16420 if (shdr_status || shdr_add_status || rc) {
16421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16422 "2508 WQ_DESTROY mailbox failed with "
16423 "status x%x add_status x%x, mbx status x%x\n",
16424 shdr_status, shdr_add_status, rc);
16425 status = -ENXIO;
16426 }
16427
16428 list_del_init(&wq->list);
16429 kfree(wq->pring);
16430 wq->pring = NULL;
16431 mempool_free(mbox, wq->phba->mbox_mem_pool);
16432 return status;
16433 }
16434
16435
16436
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447 int
16448 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16449 struct lpfc_queue *drq)
16450 {
16451 LPFC_MBOXQ_t *mbox;
16452 int rc, length, status = 0;
16453 uint32_t shdr_status, shdr_add_status;
16454 union lpfc_sli4_cfg_shdr *shdr;
16455
16456
16457 if (!hrq || !drq)
16458 return -ENODEV;
16459 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16460 if (!mbox)
16461 return -ENOMEM;
16462 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16463 sizeof(struct lpfc_sli4_cfg_mhdr));
16464 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16465 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16466 length, LPFC_SLI4_MBX_EMBED);
16467 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16468 hrq->queue_id);
16469 mbox->vport = hrq->phba->pport;
16470 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16471 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16472
16473 shdr = (union lpfc_sli4_cfg_shdr *)
16474 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16475 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16476 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16477 if (shdr_status || shdr_add_status || rc) {
16478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16479 "2509 RQ_DESTROY mailbox failed with "
16480 "status x%x add_status x%x, mbx status x%x\n",
16481 shdr_status, shdr_add_status, rc);
16482 if (rc != MBX_TIMEOUT)
16483 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16484 return -ENXIO;
16485 }
16486 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16487 drq->queue_id);
16488 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16489 shdr = (union lpfc_sli4_cfg_shdr *)
16490 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16493 if (shdr_status || shdr_add_status || rc) {
16494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16495 "2510 RQ_DESTROY mailbox failed with "
16496 "status x%x add_status x%x, mbx status x%x\n",
16497 shdr_status, shdr_add_status, rc);
16498 status = -ENXIO;
16499 }
16500 list_del_init(&hrq->list);
16501 list_del_init(&drq->list);
16502 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16503 return status;
16504 }
16505
16506
16507
16508
16509
16510
16511
16512
16513
16514
16515
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526
16527
16528 int
16529 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16530 dma_addr_t pdma_phys_addr0,
16531 dma_addr_t pdma_phys_addr1,
16532 uint16_t xritag)
16533 {
16534 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16535 LPFC_MBOXQ_t *mbox;
16536 int rc;
16537 uint32_t shdr_status, shdr_add_status;
16538 uint32_t mbox_tmo;
16539 union lpfc_sli4_cfg_shdr *shdr;
16540
16541 if (xritag == NO_XRI) {
16542 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16543 "0364 Invalid param:\n");
16544 return -EINVAL;
16545 }
16546
16547 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16548 if (!mbox)
16549 return -ENOMEM;
16550
16551 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16552 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16553 sizeof(struct lpfc_mbx_post_sgl_pages) -
16554 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16555
16556 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16557 &mbox->u.mqe.un.post_sgl_pages;
16558 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16559 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16560
16561 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16562 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16563 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16564 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16565
16566 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16567 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16568 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16569 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16570 if (!phba->sli4_hba.intr_enable)
16571 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16572 else {
16573 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16574 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16575 }
16576
16577 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16578 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16579 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16580 if (rc != MBX_TIMEOUT)
16581 mempool_free(mbox, phba->mbox_mem_pool);
16582 if (shdr_status || shdr_add_status || rc) {
16583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16584 "2511 POST_SGL mailbox failed with "
16585 "status x%x add_status x%x, mbx status x%x\n",
16586 shdr_status, shdr_add_status, rc);
16587 }
16588 return 0;
16589 }
16590
16591
16592
16593
16594
16595
16596
16597
16598
16599
16600
16601
16602
16603
16604 static uint16_t
16605 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16606 {
16607 unsigned long xri;
16608
16609
16610
16611
16612
16613 spin_lock_irq(&phba->hbalock);
16614 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16615 phba->sli4_hba.max_cfg_param.max_xri, 0);
16616 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16617 spin_unlock_irq(&phba->hbalock);
16618 return NO_XRI;
16619 } else {
16620 set_bit(xri, phba->sli4_hba.xri_bmask);
16621 phba->sli4_hba.max_cfg_param.xri_used++;
16622 }
16623 spin_unlock_irq(&phba->hbalock);
16624 return xri;
16625 }
16626
16627
16628
16629
16630
16631
16632
16633
16634 static void
16635 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16636 {
16637 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16638 phba->sli4_hba.max_cfg_param.xri_used--;
16639 }
16640 }
16641
16642
16643
16644
16645
16646
16647
16648
16649 void
16650 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16651 {
16652 spin_lock_irq(&phba->hbalock);
16653 __lpfc_sli4_free_xri(phba, xri);
16654 spin_unlock_irq(&phba->hbalock);
16655 }
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667 uint16_t
16668 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16669 {
16670 uint16_t xri_index;
16671
16672 xri_index = lpfc_sli4_alloc_xri(phba);
16673 if (xri_index == NO_XRI)
16674 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16675 "2004 Failed to allocate XRI.last XRITAG is %d"
16676 " Max XRI is %d, Used XRI is %d\n",
16677 xri_index,
16678 phba->sli4_hba.max_cfg_param.max_xri,
16679 phba->sli4_hba.max_cfg_param.xri_used);
16680 return xri_index;
16681 }
16682
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692
16693
16694 static int
16695 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16696 struct list_head *post_sgl_list,
16697 int post_cnt)
16698 {
16699 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16700 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16701 struct sgl_page_pairs *sgl_pg_pairs;
16702 void *viraddr;
16703 LPFC_MBOXQ_t *mbox;
16704 uint32_t reqlen, alloclen, pg_pairs;
16705 uint32_t mbox_tmo;
16706 uint16_t xritag_start = 0;
16707 int rc = 0;
16708 uint32_t shdr_status, shdr_add_status;
16709 union lpfc_sli4_cfg_shdr *shdr;
16710
16711 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16712 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16713 if (reqlen > SLI4_PAGE_SIZE) {
16714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16715 "2559 Block sgl registration required DMA "
16716 "size (%d) great than a page\n", reqlen);
16717 return -ENOMEM;
16718 }
16719
16720 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16721 if (!mbox)
16722 return -ENOMEM;
16723
16724
16725 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16726 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16727 LPFC_SLI4_MBX_NEMBED);
16728
16729 if (alloclen < reqlen) {
16730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16731 "0285 Allocated DMA memory size (%d) is "
16732 "less than the requested DMA memory "
16733 "size (%d)\n", alloclen, reqlen);
16734 lpfc_sli4_mbox_cmd_free(phba, mbox);
16735 return -ENOMEM;
16736 }
16737
16738 viraddr = mbox->sge_array->addr[0];
16739 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16740 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16741
16742 pg_pairs = 0;
16743 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16744
16745 sgl_pg_pairs->sgl_pg0_addr_lo =
16746 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16747 sgl_pg_pairs->sgl_pg0_addr_hi =
16748 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16749 sgl_pg_pairs->sgl_pg1_addr_lo =
16750 cpu_to_le32(putPaddrLow(0));
16751 sgl_pg_pairs->sgl_pg1_addr_hi =
16752 cpu_to_le32(putPaddrHigh(0));
16753
16754
16755 if (pg_pairs == 0)
16756 xritag_start = sglq_entry->sli4_xritag;
16757 sgl_pg_pairs++;
16758 pg_pairs++;
16759 }
16760
16761
16762 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16763 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16764 sgl->word0 = cpu_to_le32(sgl->word0);
16765
16766 if (!phba->sli4_hba.intr_enable)
16767 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16768 else {
16769 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16770 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16771 }
16772 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16773 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16774 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16775 if (rc != MBX_TIMEOUT)
16776 lpfc_sli4_mbox_cmd_free(phba, mbox);
16777 if (shdr_status || shdr_add_status || rc) {
16778 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16779 "2513 POST_SGL_BLOCK mailbox command failed "
16780 "status x%x add_status x%x mbx status x%x\n",
16781 shdr_status, shdr_add_status, rc);
16782 rc = -ENXIO;
16783 }
16784 return rc;
16785 }
16786
16787
16788
16789
16790
16791
16792
16793
16794
16795
16796
16797
16798 static int
16799 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16800 int count)
16801 {
16802 struct lpfc_io_buf *lpfc_ncmd;
16803 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16804 struct sgl_page_pairs *sgl_pg_pairs;
16805 void *viraddr;
16806 LPFC_MBOXQ_t *mbox;
16807 uint32_t reqlen, alloclen, pg_pairs;
16808 uint32_t mbox_tmo;
16809 uint16_t xritag_start = 0;
16810 int rc = 0;
16811 uint32_t shdr_status, shdr_add_status;
16812 dma_addr_t pdma_phys_bpl1;
16813 union lpfc_sli4_cfg_shdr *shdr;
16814
16815
16816 reqlen = count * sizeof(struct sgl_page_pairs) +
16817 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16818 if (reqlen > SLI4_PAGE_SIZE) {
16819 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16820 "6118 Block sgl registration required DMA "
16821 "size (%d) great than a page\n", reqlen);
16822 return -ENOMEM;
16823 }
16824 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16825 if (!mbox) {
16826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16827 "6119 Failed to allocate mbox cmd memory\n");
16828 return -ENOMEM;
16829 }
16830
16831
16832 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16833 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16834 reqlen, LPFC_SLI4_MBX_NEMBED);
16835
16836 if (alloclen < reqlen) {
16837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16838 "6120 Allocated DMA memory size (%d) is "
16839 "less than the requested DMA memory "
16840 "size (%d)\n", alloclen, reqlen);
16841 lpfc_sli4_mbox_cmd_free(phba, mbox);
16842 return -ENOMEM;
16843 }
16844
16845
16846 viraddr = mbox->sge_array->addr[0];
16847
16848
16849 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16850 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16851
16852 pg_pairs = 0;
16853 list_for_each_entry(lpfc_ncmd, nblist, list) {
16854
16855 sgl_pg_pairs->sgl_pg0_addr_lo =
16856 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16857 sgl_pg_pairs->sgl_pg0_addr_hi =
16858 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16859 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16860 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16861 SGL_PAGE_SIZE;
16862 else
16863 pdma_phys_bpl1 = 0;
16864 sgl_pg_pairs->sgl_pg1_addr_lo =
16865 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16866 sgl_pg_pairs->sgl_pg1_addr_hi =
16867 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16868
16869 if (pg_pairs == 0)
16870 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16871 sgl_pg_pairs++;
16872 pg_pairs++;
16873 }
16874 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16875 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16876
16877 sgl->word0 = cpu_to_le32(sgl->word0);
16878
16879 if (!phba->sli4_hba.intr_enable) {
16880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16881 } else {
16882 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16883 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16884 }
16885 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16886 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16887 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16888 if (rc != MBX_TIMEOUT)
16889 lpfc_sli4_mbox_cmd_free(phba, mbox);
16890 if (shdr_status || shdr_add_status || rc) {
16891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16892 "6125 POST_SGL_BLOCK mailbox command failed "
16893 "status x%x add_status x%x mbx status x%x\n",
16894 shdr_status, shdr_add_status, rc);
16895 rc = -ENXIO;
16896 }
16897 return rc;
16898 }
16899
16900
16901
16902
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914 int
16915 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16916 struct list_head *post_nblist, int sb_count)
16917 {
16918 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16919 int status, sgl_size;
16920 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16921 dma_addr_t pdma_phys_sgl1;
16922 int last_xritag = NO_XRI;
16923 int cur_xritag;
16924 LIST_HEAD(prep_nblist);
16925 LIST_HEAD(blck_nblist);
16926 LIST_HEAD(nvme_nblist);
16927
16928
16929 if (sb_count <= 0)
16930 return -EINVAL;
16931
16932 sgl_size = phba->cfg_sg_dma_buf_size;
16933 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16934 list_del_init(&lpfc_ncmd->list);
16935 block_cnt++;
16936 if ((last_xritag != NO_XRI) &&
16937 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16938
16939 list_splice_init(&prep_nblist, &blck_nblist);
16940 post_cnt = block_cnt - 1;
16941
16942 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16943 block_cnt = 1;
16944 } else {
16945
16946 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16947
16948 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16949 list_splice_init(&prep_nblist, &blck_nblist);
16950 post_cnt = block_cnt;
16951 block_cnt = 0;
16952 }
16953 }
16954 num_posting++;
16955 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16956
16957
16958 if (num_posting == sb_count) {
16959 if (post_cnt == 0) {
16960
16961 list_splice_init(&prep_nblist, &blck_nblist);
16962 post_cnt = block_cnt;
16963 } else if (block_cnt == 1) {
16964
16965 if (sgl_size > SGL_PAGE_SIZE)
16966 pdma_phys_sgl1 =
16967 lpfc_ncmd->dma_phys_sgl +
16968 SGL_PAGE_SIZE;
16969 else
16970 pdma_phys_sgl1 = 0;
16971 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16972 status = lpfc_sli4_post_sgl(
16973 phba, lpfc_ncmd->dma_phys_sgl,
16974 pdma_phys_sgl1, cur_xritag);
16975 if (status) {
16976
16977 lpfc_ncmd->flags |=
16978 LPFC_SBUF_NOT_POSTED;
16979 } else {
16980
16981 lpfc_ncmd->flags &=
16982 ~LPFC_SBUF_NOT_POSTED;
16983 lpfc_ncmd->status = IOSTAT_SUCCESS;
16984 num_posted++;
16985 }
16986
16987 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16988 }
16989 }
16990
16991
16992 if (post_cnt == 0)
16993 continue;
16994
16995
16996 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16997 post_cnt);
16998
16999
17000 if (block_cnt == 0)
17001 last_xritag = NO_XRI;
17002
17003
17004 post_cnt = 0;
17005
17006
17007 while (!list_empty(&blck_nblist)) {
17008 list_remove_head(&blck_nblist, lpfc_ncmd,
17009 struct lpfc_io_buf, list);
17010 if (status) {
17011
17012 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17013 } else {
17014
17015 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17016 lpfc_ncmd->status = IOSTAT_SUCCESS;
17017 num_posted++;
17018 }
17019 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17020 }
17021 }
17022
17023 lpfc_io_buf_replenish(phba, &nvme_nblist);
17024
17025 return num_posted;
17026 }
17027
17028
17029
17030
17031
17032
17033
17034
17035
17036
17037
17038 static int
17039 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17040 {
17041
17042 struct fc_vft_header *fc_vft_hdr;
17043 uint32_t *header = (uint32_t *) fc_hdr;
17044
17045 #define FC_RCTL_MDS_DIAGS 0xF4
17046
17047 switch (fc_hdr->fh_r_ctl) {
17048 case FC_RCTL_DD_UNCAT:
17049 case FC_RCTL_DD_SOL_DATA:
17050 case FC_RCTL_DD_UNSOL_CTL:
17051 case FC_RCTL_DD_SOL_CTL:
17052 case FC_RCTL_DD_UNSOL_DATA:
17053 case FC_RCTL_DD_DATA_DESC:
17054 case FC_RCTL_DD_UNSOL_CMD:
17055 case FC_RCTL_DD_CMD_STATUS:
17056 case FC_RCTL_ELS_REQ:
17057 case FC_RCTL_ELS_REP:
17058 case FC_RCTL_ELS4_REQ:
17059 case FC_RCTL_ELS4_REP:
17060 case FC_RCTL_BA_NOP:
17061 case FC_RCTL_BA_ABTS:
17062 case FC_RCTL_BA_RMC:
17063 case FC_RCTL_BA_ACC:
17064 case FC_RCTL_BA_RJT:
17065 case FC_RCTL_BA_PRMT:
17066 case FC_RCTL_ACK_1:
17067 case FC_RCTL_ACK_0:
17068 case FC_RCTL_P_RJT:
17069 case FC_RCTL_F_RJT:
17070 case FC_RCTL_P_BSY:
17071 case FC_RCTL_F_BSY:
17072 case FC_RCTL_F_BSYL:
17073 case FC_RCTL_LCR:
17074 case FC_RCTL_MDS_DIAGS:
17075 case FC_RCTL_END:
17076 break;
17077 case FC_RCTL_VFTH:
17078 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17079 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17080 return lpfc_fc_frame_check(phba, fc_hdr);
17081 default:
17082 goto drop;
17083 }
17084
17085 switch (fc_hdr->fh_type) {
17086 case FC_TYPE_BLS:
17087 case FC_TYPE_ELS:
17088 case FC_TYPE_FCP:
17089 case FC_TYPE_CT:
17090 case FC_TYPE_NVME:
17091 break;
17092 case FC_TYPE_IP:
17093 case FC_TYPE_ILS:
17094 default:
17095 goto drop;
17096 }
17097
17098 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17099 "2538 Received frame rctl:x%x, type:x%x, "
17100 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17101 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17102 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17103 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17104 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17105 be32_to_cpu(header[6]));
17106 return 0;
17107 drop:
17108 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17109 "2539 Dropped frame rctl:x%x type:x%x\n",
17110 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17111 return 1;
17112 }
17113
17114
17115
17116
17117
17118
17119
17120
17121
17122 static uint32_t
17123 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17124 {
17125 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17126
17127 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17128 return 0;
17129 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17130 }
17131
17132
17133
17134
17135
17136
17137
17138
17139
17140
17141
17142
17143
17144 static struct lpfc_vport *
17145 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17146 uint16_t fcfi, uint32_t did)
17147 {
17148 struct lpfc_vport **vports;
17149 struct lpfc_vport *vport = NULL;
17150 int i;
17151
17152 if (did == Fabric_DID)
17153 return phba->pport;
17154 if ((phba->pport->fc_flag & FC_PT2PT) &&
17155 !(phba->link_state == LPFC_HBA_READY))
17156 return phba->pport;
17157
17158 vports = lpfc_create_vport_work_array(phba);
17159 if (vports != NULL) {
17160 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17161 if (phba->fcf.fcfi == fcfi &&
17162 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17163 vports[i]->fc_myDID == did) {
17164 vport = vports[i];
17165 break;
17166 }
17167 }
17168 }
17169 lpfc_destroy_vport_work_array(phba, vports);
17170 return vport;
17171 }
17172
17173
17174
17175
17176
17177
17178
17179
17180
17181
17182
17183 static void
17184 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17185 {
17186 struct lpfc_dmabuf *h_buf;
17187 struct hbq_dmabuf *dmabuf = NULL;
17188
17189
17190 h_buf = list_get_first(&vport->rcv_buffer_list,
17191 struct lpfc_dmabuf, list);
17192 if (!h_buf)
17193 return;
17194 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17195 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17196 }
17197
17198
17199
17200
17201
17202
17203
17204
17205
17206 void
17207 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17208 {
17209 struct lpfc_dmabuf *h_buf, *hnext;
17210 struct lpfc_dmabuf *d_buf, *dnext;
17211 struct hbq_dmabuf *dmabuf = NULL;
17212
17213
17214 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17215 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17216 list_del_init(&dmabuf->hbuf.list);
17217 list_for_each_entry_safe(d_buf, dnext,
17218 &dmabuf->dbuf.list, list) {
17219 list_del_init(&d_buf->list);
17220 lpfc_in_buf_free(vport->phba, d_buf);
17221 }
17222 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17223 }
17224 }
17225
17226
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238 void
17239 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17240 {
17241 struct lpfc_dmabuf *h_buf, *hnext;
17242 struct lpfc_dmabuf *d_buf, *dnext;
17243 struct hbq_dmabuf *dmabuf = NULL;
17244 unsigned long timeout;
17245 int abort_count = 0;
17246
17247 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17248 vport->rcv_buffer_time_stamp);
17249 if (list_empty(&vport->rcv_buffer_list) ||
17250 time_before(jiffies, timeout))
17251 return;
17252
17253 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17254 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17255 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17256 dmabuf->time_stamp);
17257 if (time_before(jiffies, timeout))
17258 break;
17259 abort_count++;
17260 list_del_init(&dmabuf->hbuf.list);
17261 list_for_each_entry_safe(d_buf, dnext,
17262 &dmabuf->dbuf.list, list) {
17263 list_del_init(&d_buf->list);
17264 lpfc_in_buf_free(vport->phba, d_buf);
17265 }
17266 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17267 }
17268 if (abort_count)
17269 lpfc_update_rcv_time_stamp(vport);
17270 }
17271
17272
17273
17274
17275
17276
17277
17278
17279
17280
17281
17282
17283
17284 static struct hbq_dmabuf *
17285 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17286 {
17287 struct fc_frame_header *new_hdr;
17288 struct fc_frame_header *temp_hdr;
17289 struct lpfc_dmabuf *d_buf;
17290 struct lpfc_dmabuf *h_buf;
17291 struct hbq_dmabuf *seq_dmabuf = NULL;
17292 struct hbq_dmabuf *temp_dmabuf = NULL;
17293 uint8_t found = 0;
17294
17295 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17296 dmabuf->time_stamp = jiffies;
17297 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17298
17299
17300 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17301 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17302 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17303 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17304 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17305 continue;
17306
17307 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17308 break;
17309 }
17310 if (!seq_dmabuf) {
17311
17312
17313
17314
17315 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17316 lpfc_update_rcv_time_stamp(vport);
17317 return dmabuf;
17318 }
17319 temp_hdr = seq_dmabuf->hbuf.virt;
17320 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17321 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17322 list_del_init(&seq_dmabuf->hbuf.list);
17323 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17324 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17325 lpfc_update_rcv_time_stamp(vport);
17326 return dmabuf;
17327 }
17328
17329 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17330 seq_dmabuf->time_stamp = jiffies;
17331 lpfc_update_rcv_time_stamp(vport);
17332 if (list_empty(&seq_dmabuf->dbuf.list)) {
17333 temp_hdr = dmabuf->hbuf.virt;
17334 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17335 return seq_dmabuf;
17336 }
17337
17338 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17339 while (!found) {
17340 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17341 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17342
17343
17344
17345
17346 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17347 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17348 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17349 found = 1;
17350 break;
17351 }
17352
17353 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17354 break;
17355 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17356 }
17357
17358 if (found)
17359 return seq_dmabuf;
17360 return NULL;
17361 }
17362
17363
17364
17365
17366
17367
17368
17369
17370
17371
17372
17373
17374
17375
17376
17377
17378
17379 static bool
17380 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17381 struct hbq_dmabuf *dmabuf)
17382 {
17383 struct fc_frame_header *new_hdr;
17384 struct fc_frame_header *temp_hdr;
17385 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17386 struct hbq_dmabuf *seq_dmabuf = NULL;
17387
17388
17389 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17390 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17391 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17392 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17393 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17394 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17395 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17396 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17397 continue;
17398
17399 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17400 break;
17401 }
17402
17403
17404 if (seq_dmabuf) {
17405 list_for_each_entry_safe(d_buf, n_buf,
17406 &seq_dmabuf->dbuf.list, list) {
17407 list_del_init(&d_buf->list);
17408 lpfc_in_buf_free(vport->phba, d_buf);
17409 }
17410 return true;
17411 }
17412 return false;
17413 }
17414
17415
17416
17417
17418
17419
17420
17421
17422
17423
17424
17425
17426
17427
17428
17429
17430
17431 static bool
17432 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17433 {
17434 struct lpfc_hba *phba = vport->phba;
17435 int handled;
17436
17437
17438 if (phba->sli_rev < LPFC_SLI_REV4)
17439 return false;
17440
17441
17442 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17443 if (handled)
17444 return true;
17445
17446 return false;
17447 }
17448
17449
17450
17451
17452
17453
17454
17455
17456
17457
17458
17459 static void
17460 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17461 struct lpfc_iocbq *cmd_iocbq,
17462 struct lpfc_iocbq *rsp_iocbq)
17463 {
17464 struct lpfc_nodelist *ndlp;
17465
17466 if (cmd_iocbq) {
17467 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17468 lpfc_nlp_put(ndlp);
17469 lpfc_nlp_not_used(ndlp);
17470 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17471 }
17472
17473
17474 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17476 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17477 rsp_iocbq->iocb.ulpStatus,
17478 rsp_iocbq->iocb.un.ulpWord[4]);
17479 }
17480
17481
17482
17483
17484
17485
17486
17487
17488
17489 uint16_t
17490 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17491 uint16_t xri)
17492 {
17493 uint16_t i;
17494
17495 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17496 if (xri == phba->sli4_hba.xri_ids[i])
17497 return i;
17498 }
17499 return NO_XRI;
17500 }
17501
17502
17503
17504
17505
17506
17507
17508
17509
17510 void
17511 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17512 struct fc_frame_header *fc_hdr, bool aborted)
17513 {
17514 struct lpfc_hba *phba = vport->phba;
17515 struct lpfc_iocbq *ctiocb = NULL;
17516 struct lpfc_nodelist *ndlp;
17517 uint16_t oxid, rxid, xri, lxri;
17518 uint32_t sid, fctl;
17519 IOCB_t *icmd;
17520 int rc;
17521
17522 if (!lpfc_is_link_up(phba))
17523 return;
17524
17525 sid = sli4_sid_from_fc_hdr(fc_hdr);
17526 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17527 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17528
17529 ndlp = lpfc_findnode_did(vport, sid);
17530 if (!ndlp) {
17531 ndlp = lpfc_nlp_init(vport, sid);
17532 if (!ndlp) {
17533 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17534 "1268 Failed to allocate ndlp for "
17535 "oxid:x%x SID:x%x\n", oxid, sid);
17536 return;
17537 }
17538
17539 lpfc_enqueue_node(vport, ndlp);
17540 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17541
17542 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17543 if (!ndlp) {
17544 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17545 "3275 Failed to active ndlp found "
17546 "for oxid:x%x SID:x%x\n", oxid, sid);
17547 return;
17548 }
17549 }
17550
17551
17552 ctiocb = lpfc_sli_get_iocbq(phba);
17553 if (!ctiocb)
17554 return;
17555
17556
17557 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17558
17559 icmd = &ctiocb->iocb;
17560 icmd->un.xseq64.bdl.bdeSize = 0;
17561 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17562 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17563 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17564 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17565
17566
17567 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17568 icmd->ulpBdeCount = 0;
17569 icmd->ulpLe = 1;
17570 icmd->ulpClass = CLASS3;
17571 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17572 ctiocb->context1 = lpfc_nlp_get(ndlp);
17573
17574 ctiocb->vport = phba->pport;
17575 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17576 ctiocb->sli4_lxritag = NO_XRI;
17577 ctiocb->sli4_xritag = NO_XRI;
17578
17579 if (fctl & FC_FC_EX_CTX)
17580
17581
17582
17583 xri = oxid;
17584 else
17585 xri = rxid;
17586 lxri = lpfc_sli4_xri_inrange(phba, xri);
17587 if (lxri != NO_XRI)
17588 lpfc_set_rrq_active(phba, ndlp, lxri,
17589 (xri == oxid) ? rxid : oxid, 0);
17590
17591
17592
17593
17594
17595 if ((fctl & FC_FC_EX_CTX) &&
17596 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17597 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17598 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17599 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17600 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17601 }
17602
17603
17604
17605
17606
17607 if (aborted == false) {
17608 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17609 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17610 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17611 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17612 }
17613
17614 if (fctl & FC_FC_EX_CTX) {
17615
17616
17617
17618
17619 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17620 } else {
17621
17622
17623
17624
17625 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17626 }
17627 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17628 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17629
17630
17631 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17632 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17633 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17634
17635 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17636 if (rc == IOCB_ERROR) {
17637 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17638 "2925 Failed to issue CT ABTS RSP x%x on "
17639 "xri x%x, Data x%x\n",
17640 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17641 phba->link_state);
17642 lpfc_nlp_put(ndlp);
17643 ctiocb->context1 = NULL;
17644 lpfc_sli_release_iocbq(phba, ctiocb);
17645 }
17646 }
17647
17648
17649
17650
17651
17652
17653
17654
17655
17656
17657
17658
17659
17660
17661 static void
17662 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17663 struct hbq_dmabuf *dmabuf)
17664 {
17665 struct lpfc_hba *phba = vport->phba;
17666 struct fc_frame_header fc_hdr;
17667 uint32_t fctl;
17668 bool aborted;
17669
17670
17671 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17672 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17673
17674 if (fctl & FC_FC_EX_CTX) {
17675
17676 aborted = true;
17677 } else {
17678
17679 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17680 if (aborted == false)
17681 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17682 }
17683 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17684
17685 if (phba->nvmet_support) {
17686 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17687 return;
17688 }
17689
17690
17691 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17692 }
17693
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703
17704
17705
17706 static int
17707 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17708 {
17709 struct fc_frame_header *hdr;
17710 struct lpfc_dmabuf *d_buf;
17711 struct hbq_dmabuf *seq_dmabuf;
17712 uint32_t fctl;
17713 int seq_count = 0;
17714
17715 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17716
17717 if (hdr->fh_seq_cnt != seq_count)
17718 return 0;
17719 fctl = (hdr->fh_f_ctl[0] << 16 |
17720 hdr->fh_f_ctl[1] << 8 |
17721 hdr->fh_f_ctl[2]);
17722
17723 if (fctl & FC_FC_END_SEQ)
17724 return 1;
17725 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17726 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17727 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17728
17729 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17730 return 0;
17731 fctl = (hdr->fh_f_ctl[0] << 16 |
17732 hdr->fh_f_ctl[1] << 8 |
17733 hdr->fh_f_ctl[2]);
17734
17735 if (fctl & FC_FC_END_SEQ)
17736 return 1;
17737 }
17738 return 0;
17739 }
17740
17741
17742
17743
17744
17745
17746
17747
17748
17749
17750
17751
17752
17753
17754 static struct lpfc_iocbq *
17755 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17756 {
17757 struct hbq_dmabuf *hbq_buf;
17758 struct lpfc_dmabuf *d_buf, *n_buf;
17759 struct lpfc_iocbq *first_iocbq, *iocbq;
17760 struct fc_frame_header *fc_hdr;
17761 uint32_t sid;
17762 uint32_t len, tot_len;
17763 struct ulp_bde64 *pbde;
17764
17765 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17766
17767 list_del_init(&seq_dmabuf->hbuf.list);
17768 lpfc_update_rcv_time_stamp(vport);
17769
17770 sid = sli4_sid_from_fc_hdr(fc_hdr);
17771 tot_len = 0;
17772
17773 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17774 if (first_iocbq) {
17775
17776 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17777 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17778 first_iocbq->vport = vport;
17779
17780
17781 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17782 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17783 first_iocbq->iocb.un.rcvels.parmRo =
17784 sli4_did_from_fc_hdr(fc_hdr);
17785 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17786 } else
17787 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17788 first_iocbq->iocb.ulpContext = NO_XRI;
17789 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17790 be16_to_cpu(fc_hdr->fh_ox_id);
17791
17792 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17793 vport->phba->vpi_ids[vport->vpi];
17794
17795 tot_len = bf_get(lpfc_rcqe_length,
17796 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17797
17798 first_iocbq->context2 = &seq_dmabuf->dbuf;
17799 first_iocbq->context3 = NULL;
17800 first_iocbq->iocb.ulpBdeCount = 1;
17801 if (tot_len > LPFC_DATA_BUF_SIZE)
17802 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17803 LPFC_DATA_BUF_SIZE;
17804 else
17805 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17806
17807 first_iocbq->iocb.un.rcvels.remoteID = sid;
17808
17809 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17810 }
17811 iocbq = first_iocbq;
17812
17813
17814
17815
17816 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17817 if (!iocbq) {
17818 lpfc_in_buf_free(vport->phba, d_buf);
17819 continue;
17820 }
17821 if (!iocbq->context3) {
17822 iocbq->context3 = d_buf;
17823 iocbq->iocb.ulpBdeCount++;
17824
17825 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17826 len = bf_get(lpfc_rcqe_length,
17827 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17828 pbde = (struct ulp_bde64 *)
17829 &iocbq->iocb.unsli3.sli3Words[4];
17830 if (len > LPFC_DATA_BUF_SIZE)
17831 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17832 else
17833 pbde->tus.f.bdeSize = len;
17834
17835 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17836 tot_len += len;
17837 } else {
17838 iocbq = lpfc_sli_get_iocbq(vport->phba);
17839 if (!iocbq) {
17840 if (first_iocbq) {
17841 first_iocbq->iocb.ulpStatus =
17842 IOSTAT_FCP_RSP_ERROR;
17843 first_iocbq->iocb.un.ulpWord[4] =
17844 IOERR_NO_RESOURCES;
17845 }
17846 lpfc_in_buf_free(vport->phba, d_buf);
17847 continue;
17848 }
17849
17850 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17851 len = bf_get(lpfc_rcqe_length,
17852 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17853 iocbq->context2 = d_buf;
17854 iocbq->context3 = NULL;
17855 iocbq->iocb.ulpBdeCount = 1;
17856 if (len > LPFC_DATA_BUF_SIZE)
17857 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17858 LPFC_DATA_BUF_SIZE;
17859 else
17860 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17861
17862 tot_len += len;
17863 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17864
17865 iocbq->iocb.un.rcvels.remoteID = sid;
17866 list_add_tail(&iocbq->list, &first_iocbq->list);
17867 }
17868 }
17869 return first_iocbq;
17870 }
17871
17872 static void
17873 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17874 struct hbq_dmabuf *seq_dmabuf)
17875 {
17876 struct fc_frame_header *fc_hdr;
17877 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17878 struct lpfc_hba *phba = vport->phba;
17879
17880 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17881 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17882 if (!iocbq) {
17883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17884 "2707 Ring %d handler: Failed to allocate "
17885 "iocb Rctl x%x Type x%x received\n",
17886 LPFC_ELS_RING,
17887 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17888 return;
17889 }
17890 if (!lpfc_complete_unsol_iocb(phba,
17891 phba->sli4_hba.els_wq->pring,
17892 iocbq, fc_hdr->fh_r_ctl,
17893 fc_hdr->fh_type))
17894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17895 "2540 Ring %d handler: unexpected Rctl "
17896 "x%x Type x%x received\n",
17897 LPFC_ELS_RING,
17898 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17899
17900
17901 list_for_each_entry_safe(curr_iocb, next_iocb,
17902 &iocbq->list, list) {
17903 list_del_init(&curr_iocb->list);
17904 lpfc_sli_release_iocbq(phba, curr_iocb);
17905 }
17906 lpfc_sli_release_iocbq(phba, iocbq);
17907 }
17908
17909 static void
17910 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17911 struct lpfc_iocbq *rspiocb)
17912 {
17913 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17914
17915 if (pcmd && pcmd->virt)
17916 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17917 kfree(pcmd);
17918 lpfc_sli_release_iocbq(phba, cmdiocb);
17919 lpfc_drain_txq(phba);
17920 }
17921
17922 static void
17923 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17924 struct hbq_dmabuf *dmabuf)
17925 {
17926 struct fc_frame_header *fc_hdr;
17927 struct lpfc_hba *phba = vport->phba;
17928 struct lpfc_iocbq *iocbq = NULL;
17929 union lpfc_wqe *wqe;
17930 struct lpfc_dmabuf *pcmd = NULL;
17931 uint32_t frame_len;
17932 int rc;
17933 unsigned long iflags;
17934
17935 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17936 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17937
17938
17939 iocbq = lpfc_sli_get_iocbq(phba);
17940 if (!iocbq) {
17941
17942 spin_lock_irqsave(&phba->hbalock, iflags);
17943 list_add_tail(&dmabuf->cq_event.list,
17944 &phba->sli4_hba.sp_queue_event);
17945 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17946 spin_unlock_irqrestore(&phba->hbalock, iflags);
17947 lpfc_worker_wake_up(phba);
17948 return;
17949 }
17950
17951
17952 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17953 if (pcmd)
17954 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17955 &pcmd->phys);
17956 if (!pcmd || !pcmd->virt)
17957 goto exit;
17958
17959 INIT_LIST_HEAD(&pcmd->list);
17960
17961
17962 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17963
17964
17965 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17966 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17967 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17968 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17969
17970 iocbq->context2 = pcmd;
17971 iocbq->vport = vport;
17972 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17973 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17974
17975
17976
17977
17978
17979 wqe = (union lpfc_wqe *)&iocbq->iocb;
17980
17981 wqe->send_frame.frame_len = frame_len;
17982 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17983 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17984 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17985 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17986 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17987 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17988
17989 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17990 iocbq->iocb.ulpLe = 1;
17991 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17992 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17993 if (rc == IOCB_ERROR)
17994 goto exit;
17995
17996 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17997 return;
17998
17999 exit:
18000 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18001 "2023 Unable to process MDS loopback frame\n");
18002 if (pcmd && pcmd->virt)
18003 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18004 kfree(pcmd);
18005 if (iocbq)
18006 lpfc_sli_release_iocbq(phba, iocbq);
18007 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18008 }
18009
18010
18011
18012
18013
18014
18015
18016
18017
18018
18019
18020
18021 void
18022 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18023 struct hbq_dmabuf *dmabuf)
18024 {
18025 struct hbq_dmabuf *seq_dmabuf;
18026 struct fc_frame_header *fc_hdr;
18027 struct lpfc_vport *vport;
18028 uint32_t fcfi;
18029 uint32_t did;
18030
18031
18032 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18033
18034 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18035 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18036 vport = phba->pport;
18037
18038 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18039 return;
18040 }
18041
18042
18043 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18044 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18045 return;
18046 }
18047
18048 if ((bf_get(lpfc_cqe_code,
18049 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18050 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18051 &dmabuf->cq_event.cqe.rcqe_cmpl);
18052 else
18053 fcfi = bf_get(lpfc_rcqe_fcf_id,
18054 &dmabuf->cq_event.cqe.rcqe_cmpl);
18055
18056 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18057 vport = phba->pport;
18058 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18059 "2023 MDS Loopback %d bytes\n",
18060 bf_get(lpfc_rcqe_length,
18061 &dmabuf->cq_event.cqe.rcqe_cmpl));
18062
18063 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18064 return;
18065 }
18066
18067
18068 did = sli4_did_from_fc_hdr(fc_hdr);
18069
18070 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18071 if (!vport) {
18072
18073 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18074 return;
18075 }
18076
18077
18078 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18079 (did != Fabric_DID)) {
18080
18081
18082
18083
18084
18085 if (!(vport->fc_flag & FC_PT2PT) ||
18086 (phba->link_state == LPFC_HBA_READY)) {
18087 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18088 return;
18089 }
18090 }
18091
18092
18093 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18094 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18095 return;
18096 }
18097
18098
18099 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18100 if (!seq_dmabuf) {
18101
18102 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18103 return;
18104 }
18105
18106 if (!lpfc_seq_complete(seq_dmabuf))
18107 return;
18108
18109
18110 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18111 }
18112
18113
18114
18115
18116
18117
18118
18119
18120
18121
18122
18123
18124
18125
18126
18127
18128
18129
18130
18131
18132
18133
18134 int
18135 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18136 {
18137 struct lpfc_rpi_hdr *rpi_page;
18138 uint32_t rc = 0;
18139 uint16_t lrpi = 0;
18140
18141
18142 if (!phba->sli4_hba.rpi_hdrs_in_use)
18143 goto exit;
18144 if (phba->sli4_hba.extents_in_use)
18145 return -EIO;
18146
18147 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18148
18149
18150
18151
18152
18153 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18154 LPFC_RPI_RSRC_RDY)
18155 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18156
18157 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18158 if (rc != MBX_SUCCESS) {
18159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18160 "2008 Error %d posting all rpi "
18161 "headers\n", rc);
18162 rc = -EIO;
18163 break;
18164 }
18165 }
18166
18167 exit:
18168 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18169 LPFC_RPI_RSRC_RDY);
18170 return rc;
18171 }
18172
18173
18174
18175
18176
18177
18178
18179
18180
18181
18182
18183
18184
18185
18186
18187 int
18188 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18189 {
18190 LPFC_MBOXQ_t *mboxq;
18191 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18192 uint32_t rc = 0;
18193 uint32_t shdr_status, shdr_add_status;
18194 union lpfc_sli4_cfg_shdr *shdr;
18195
18196
18197 if (!phba->sli4_hba.rpi_hdrs_in_use)
18198 return rc;
18199 if (phba->sli4_hba.extents_in_use)
18200 return -EIO;
18201
18202
18203 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18204 if (!mboxq) {
18205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18206 "2001 Unable to allocate memory for issuing "
18207 "SLI_CONFIG_SPECIAL mailbox command\n");
18208 return -ENOMEM;
18209 }
18210
18211
18212 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18213 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18214 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18215 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18216 sizeof(struct lpfc_sli4_cfg_mhdr),
18217 LPFC_SLI4_MBX_EMBED);
18218
18219
18220
18221 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18222 rpi_page->start_rpi);
18223 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18224 hdr_tmpl, rpi_page->page_count);
18225
18226 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18227 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18228 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18229 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18230 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18231 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18232 if (rc != MBX_TIMEOUT)
18233 mempool_free(mboxq, phba->mbox_mem_pool);
18234 if (shdr_status || shdr_add_status || rc) {
18235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18236 "2514 POST_RPI_HDR mailbox failed with "
18237 "status x%x add_status x%x, mbx status x%x\n",
18238 shdr_status, shdr_add_status, rc);
18239 rc = -ENXIO;
18240 } else {
18241
18242
18243
18244
18245 spin_lock_irq(&phba->hbalock);
18246 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18247 spin_unlock_irq(&phba->hbalock);
18248 }
18249 return rc;
18250 }
18251
18252
18253
18254
18255
18256
18257
18258
18259
18260
18261
18262
18263
18264
18265 int
18266 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18267 {
18268 unsigned long rpi;
18269 uint16_t max_rpi, rpi_limit;
18270 uint16_t rpi_remaining, lrpi = 0;
18271 struct lpfc_rpi_hdr *rpi_hdr;
18272 unsigned long iflag;
18273
18274
18275
18276
18277
18278 spin_lock_irqsave(&phba->hbalock, iflag);
18279 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18280 rpi_limit = phba->sli4_hba.next_rpi;
18281
18282 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18283 if (rpi >= rpi_limit)
18284 rpi = LPFC_RPI_ALLOC_ERROR;
18285 else {
18286 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18287 phba->sli4_hba.max_cfg_param.rpi_used++;
18288 phba->sli4_hba.rpi_count++;
18289 }
18290 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18291 "0001 rpi:%x max:%x lim:%x\n",
18292 (int) rpi, max_rpi, rpi_limit);
18293
18294
18295
18296
18297
18298 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18299 (phba->sli4_hba.rpi_count >= max_rpi)) {
18300 spin_unlock_irqrestore(&phba->hbalock, iflag);
18301 return rpi;
18302 }
18303
18304
18305
18306
18307
18308 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18309 spin_unlock_irqrestore(&phba->hbalock, iflag);
18310 return rpi;
18311 }
18312
18313
18314
18315
18316
18317
18318
18319 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18320 spin_unlock_irqrestore(&phba->hbalock, iflag);
18321 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18322 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18323 if (!rpi_hdr) {
18324 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18325 "2002 Error Could not grow rpi "
18326 "count\n");
18327 } else {
18328 lrpi = rpi_hdr->start_rpi;
18329 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18330 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18331 }
18332 }
18333
18334 return rpi;
18335 }
18336
18337
18338
18339
18340
18341
18342
18343
18344 static void
18345 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18346 {
18347
18348
18349
18350
18351 if (rpi == LPFC_RPI_ALLOC_ERROR)
18352 return;
18353
18354 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18355 phba->sli4_hba.rpi_count--;
18356 phba->sli4_hba.max_cfg_param.rpi_used--;
18357 } else {
18358 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18359 "2016 rpi %x not inuse\n",
18360 rpi);
18361 }
18362 }
18363
18364
18365
18366
18367
18368
18369
18370
18371 void
18372 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18373 {
18374 spin_lock_irq(&phba->hbalock);
18375 __lpfc_sli4_free_rpi(phba, rpi);
18376 spin_unlock_irq(&phba->hbalock);
18377 }
18378
18379
18380
18381
18382
18383
18384
18385
18386 void
18387 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18388 {
18389 kfree(phba->sli4_hba.rpi_bmask);
18390 kfree(phba->sli4_hba.rpi_ids);
18391 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18392 }
18393
18394
18395
18396
18397
18398
18399
18400
18401 int
18402 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18403 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18404 {
18405 LPFC_MBOXQ_t *mboxq;
18406 struct lpfc_hba *phba = ndlp->phba;
18407 int rc;
18408
18409
18410 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18411 if (!mboxq)
18412 return -ENOMEM;
18413
18414
18415 lpfc_resume_rpi(mboxq, ndlp);
18416 if (cmpl) {
18417 mboxq->mbox_cmpl = cmpl;
18418 mboxq->ctx_buf = arg;
18419 mboxq->ctx_ndlp = ndlp;
18420 } else
18421 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18422 mboxq->vport = ndlp->vport;
18423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18424 if (rc == MBX_NOT_FINISHED) {
18425 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18426 "2010 Resume RPI Mailbox failed "
18427 "status %d, mbxStatus x%x\n", rc,
18428 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18429 mempool_free(mboxq, phba->mbox_mem_pool);
18430 return -EIO;
18431 }
18432 return 0;
18433 }
18434
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444
18445 int
18446 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18447 {
18448 LPFC_MBOXQ_t *mboxq;
18449 int rc = 0;
18450 int retval = MBX_SUCCESS;
18451 uint32_t mbox_tmo;
18452 struct lpfc_hba *phba = vport->phba;
18453 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18454 if (!mboxq)
18455 return -ENOMEM;
18456 lpfc_init_vpi(phba, mboxq, vport->vpi);
18457 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18458 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18459 if (rc != MBX_SUCCESS) {
18460 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18461 "2022 INIT VPI Mailbox failed "
18462 "status %d, mbxStatus x%x\n", rc,
18463 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18464 retval = -EIO;
18465 }
18466 if (rc != MBX_TIMEOUT)
18467 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18468
18469 return retval;
18470 }
18471
18472
18473
18474
18475
18476
18477
18478
18479
18480
18481 static void
18482 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18483 {
18484 void *virt_addr;
18485 union lpfc_sli4_cfg_shdr *shdr;
18486 uint32_t shdr_status, shdr_add_status;
18487
18488 virt_addr = mboxq->sge_array->addr[0];
18489
18490 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18493
18494 if ((shdr_status || shdr_add_status) &&
18495 (shdr_status != STATUS_FCF_IN_USE))
18496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18497 "2558 ADD_FCF_RECORD mailbox failed with "
18498 "status x%x add_status x%x\n",
18499 shdr_status, shdr_add_status);
18500
18501 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18502 }
18503
18504
18505
18506
18507
18508
18509
18510
18511
18512
18513 int
18514 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18515 {
18516 int rc = 0;
18517 LPFC_MBOXQ_t *mboxq;
18518 uint8_t *bytep;
18519 void *virt_addr;
18520 struct lpfc_mbx_sge sge;
18521 uint32_t alloc_len, req_len;
18522 uint32_t fcfindex;
18523
18524 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18525 if (!mboxq) {
18526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18527 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18528 return -ENOMEM;
18529 }
18530
18531 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18532 sizeof(uint32_t);
18533
18534
18535 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18536 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18537 req_len, LPFC_SLI4_MBX_NEMBED);
18538 if (alloc_len < req_len) {
18539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18540 "2523 Allocated DMA memory size (x%x) is "
18541 "less than the requested DMA memory "
18542 "size (x%x)\n", alloc_len, req_len);
18543 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18544 return -ENOMEM;
18545 }
18546
18547
18548
18549
18550
18551 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18552 virt_addr = mboxq->sge_array->addr[0];
18553
18554
18555
18556
18557 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18558 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18559 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18560
18561
18562
18563
18564
18565
18566 bytep += sizeof(uint32_t);
18567 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18568 mboxq->vport = phba->pport;
18569 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18570 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18571 if (rc == MBX_NOT_FINISHED) {
18572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18573 "2515 ADD_FCF_RECORD mailbox failed with "
18574 "status 0x%x\n", rc);
18575 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18576 rc = -EIO;
18577 } else
18578 rc = 0;
18579
18580 return rc;
18581 }
18582
18583
18584
18585
18586
18587
18588
18589
18590
18591
18592
18593 void
18594 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18595 struct fcf_record *fcf_record,
18596 uint16_t fcf_index)
18597 {
18598 memset(fcf_record, 0, sizeof(struct fcf_record));
18599 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18600 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18601 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18602 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18603 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18604 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18605 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18606 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18607 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18608 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18609 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18610 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18611 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18612 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18613 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18614 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18615 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18616
18617 if (phba->valid_vlan) {
18618 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18619 = 1 << (phba->vlan_id % 8);
18620 }
18621 }
18622
18623
18624
18625
18626
18627
18628
18629
18630
18631
18632
18633
18634
18635 int
18636 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18637 {
18638 int rc = 0, error;
18639 LPFC_MBOXQ_t *mboxq;
18640
18641 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18642 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18643 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18644 if (!mboxq) {
18645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18646 "2000 Failed to allocate mbox for "
18647 "READ_FCF cmd\n");
18648 error = -ENOMEM;
18649 goto fail_fcf_scan;
18650 }
18651
18652 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18653 if (rc) {
18654 error = -EINVAL;
18655 goto fail_fcf_scan;
18656 }
18657
18658 mboxq->vport = phba->pport;
18659 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18660
18661 spin_lock_irq(&phba->hbalock);
18662 phba->hba_flag |= FCF_TS_INPROG;
18663 spin_unlock_irq(&phba->hbalock);
18664
18665 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18666 if (rc == MBX_NOT_FINISHED)
18667 error = -EIO;
18668 else {
18669
18670 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18671 phba->fcf.eligible_fcf_cnt = 0;
18672 error = 0;
18673 }
18674 fail_fcf_scan:
18675 if (error) {
18676 if (mboxq)
18677 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18678
18679 spin_lock_irq(&phba->hbalock);
18680 phba->hba_flag &= ~FCF_TS_INPROG;
18681 spin_unlock_irq(&phba->hbalock);
18682 }
18683 return error;
18684 }
18685
18686
18687
18688
18689
18690
18691
18692
18693
18694
18695
18696
18697 int
18698 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18699 {
18700 int rc = 0, error;
18701 LPFC_MBOXQ_t *mboxq;
18702
18703 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18704 if (!mboxq) {
18705 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18706 "2763 Failed to allocate mbox for "
18707 "READ_FCF cmd\n");
18708 error = -ENOMEM;
18709 goto fail_fcf_read;
18710 }
18711
18712 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18713 if (rc) {
18714 error = -EINVAL;
18715 goto fail_fcf_read;
18716 }
18717
18718 mboxq->vport = phba->pport;
18719 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18720 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18721 if (rc == MBX_NOT_FINISHED)
18722 error = -EIO;
18723 else
18724 error = 0;
18725
18726 fail_fcf_read:
18727 if (error && mboxq)
18728 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18729 return error;
18730 }
18731
18732
18733
18734
18735
18736
18737
18738
18739
18740
18741
18742
18743 int
18744 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18745 {
18746 int rc = 0, error;
18747 LPFC_MBOXQ_t *mboxq;
18748
18749 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18750 if (!mboxq) {
18751 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18752 "2758 Failed to allocate mbox for "
18753 "READ_FCF cmd\n");
18754 error = -ENOMEM;
18755 goto fail_fcf_read;
18756 }
18757
18758 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18759 if (rc) {
18760 error = -EINVAL;
18761 goto fail_fcf_read;
18762 }
18763
18764 mboxq->vport = phba->pport;
18765 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18766 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18767 if (rc == MBX_NOT_FINISHED)
18768 error = -EIO;
18769 else
18770 error = 0;
18771
18772 fail_fcf_read:
18773 if (error && mboxq)
18774 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18775 return error;
18776 }
18777
18778
18779
18780
18781
18782
18783
18784
18785
18786
18787
18788
18789
18790
18791 static int
18792 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18793 {
18794 uint16_t next_fcf_pri;
18795 uint16_t last_index;
18796 struct lpfc_fcf_pri *fcf_pri;
18797 int rc;
18798 int ret = 0;
18799
18800 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18801 LPFC_SLI4_FCF_TBL_INDX_MAX);
18802 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18803 "3060 Last IDX %d\n", last_index);
18804
18805
18806 spin_lock_irq(&phba->hbalock);
18807 if (list_empty(&phba->fcf.fcf_pri_list) ||
18808 list_is_singular(&phba->fcf.fcf_pri_list)) {
18809 spin_unlock_irq(&phba->hbalock);
18810 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18811 "3061 Last IDX %d\n", last_index);
18812 return 0;
18813 }
18814 spin_unlock_irq(&phba->hbalock);
18815
18816 next_fcf_pri = 0;
18817
18818
18819
18820
18821 memset(phba->fcf.fcf_rr_bmask, 0,
18822 sizeof(*phba->fcf.fcf_rr_bmask));
18823 spin_lock_irq(&phba->hbalock);
18824 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18825 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18826 continue;
18827
18828
18829
18830
18831 if (!next_fcf_pri)
18832 next_fcf_pri = fcf_pri->fcf_rec.priority;
18833 spin_unlock_irq(&phba->hbalock);
18834 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18835 rc = lpfc_sli4_fcf_rr_index_set(phba,
18836 fcf_pri->fcf_rec.fcf_index);
18837 if (rc)
18838 return 0;
18839 }
18840 spin_lock_irq(&phba->hbalock);
18841 }
18842
18843
18844
18845
18846
18847 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18848 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18849 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18850
18851
18852
18853
18854 if (!next_fcf_pri)
18855 next_fcf_pri = fcf_pri->fcf_rec.priority;
18856 spin_unlock_irq(&phba->hbalock);
18857 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18858 rc = lpfc_sli4_fcf_rr_index_set(phba,
18859 fcf_pri->fcf_rec.fcf_index);
18860 if (rc)
18861 return 0;
18862 }
18863 spin_lock_irq(&phba->hbalock);
18864 }
18865 } else
18866 ret = 1;
18867 spin_unlock_irq(&phba->hbalock);
18868
18869 return ret;
18870 }
18871
18872
18873
18874
18875
18876
18877
18878
18879
18880
18881 uint16_t
18882 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18883 {
18884 uint16_t next_fcf_index;
18885
18886 initial_priority:
18887
18888 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18889
18890 next_priority:
18891
18892 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18893 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18894 LPFC_SLI4_FCF_TBL_INDX_MAX,
18895 next_fcf_index);
18896
18897
18898 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18899
18900
18901
18902
18903
18904 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18905 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18906 }
18907
18908
18909
18910 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18911 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18912
18913
18914
18915
18916
18917
18918 if (lpfc_check_next_fcf_pri_level(phba))
18919 goto initial_priority;
18920 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18921 "2844 No roundrobin failover FCF available\n");
18922
18923 return LPFC_FCOE_FCF_NEXT_NONE;
18924 }
18925
18926 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18927 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18928 LPFC_FCF_FLOGI_FAILED) {
18929 if (list_is_singular(&phba->fcf.fcf_pri_list))
18930 return LPFC_FCOE_FCF_NEXT_NONE;
18931
18932 goto next_priority;
18933 }
18934
18935 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18936 "2845 Get next roundrobin failover FCF (x%x)\n",
18937 next_fcf_index);
18938
18939 return next_fcf_index;
18940 }
18941
18942
18943
18944
18945
18946
18947
18948
18949
18950
18951
18952
18953
18954 int
18955 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18956 {
18957 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18958 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18959 "2610 FCF (x%x) reached driver's book "
18960 "keeping dimension:x%x\n",
18961 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18962 return -EINVAL;
18963 }
18964
18965 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18966
18967 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18968 "2790 Set FCF (x%x) to roundrobin FCF failover "
18969 "bmask\n", fcf_index);
18970
18971 return 0;
18972 }
18973
18974
18975
18976
18977
18978
18979
18980
18981
18982
18983 void
18984 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18985 {
18986 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18987 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18988 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18989 "2762 FCF (x%x) reached driver's book "
18990 "keeping dimension:x%x\n",
18991 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18992 return;
18993 }
18994
18995 spin_lock_irq(&phba->hbalock);
18996 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18997 list) {
18998 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18999 list_del_init(&fcf_pri->list);
19000 break;
19001 }
19002 }
19003 spin_unlock_irq(&phba->hbalock);
19004 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19005
19006 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19007 "2791 Clear FCF (x%x) from roundrobin failover "
19008 "bmask\n", fcf_index);
19009 }
19010
19011
19012
19013
19014
19015
19016
19017
19018
19019 static void
19020 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19021 {
19022 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19023 uint32_t shdr_status, shdr_add_status;
19024
19025 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19026
19027 shdr_status = bf_get(lpfc_mbox_hdr_status,
19028 &redisc_fcf->header.cfg_shdr.response);
19029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19030 &redisc_fcf->header.cfg_shdr.response);
19031 if (shdr_status || shdr_add_status) {
19032 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19033 "2746 Requesting for FCF rediscovery failed "
19034 "status x%x add_status x%x\n",
19035 shdr_status, shdr_add_status);
19036 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19037 spin_lock_irq(&phba->hbalock);
19038 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19039 spin_unlock_irq(&phba->hbalock);
19040
19041
19042
19043
19044 lpfc_retry_pport_discovery(phba);
19045 } else {
19046 spin_lock_irq(&phba->hbalock);
19047 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19048 spin_unlock_irq(&phba->hbalock);
19049
19050
19051
19052
19053
19054 lpfc_sli4_fcf_dead_failthrough(phba);
19055 }
19056 } else {
19057 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19058 "2775 Start FCF rediscover quiescent timer\n");
19059
19060
19061
19062
19063 lpfc_fcf_redisc_wait_start_timer(phba);
19064 }
19065
19066 mempool_free(mbox, phba->mbox_mem_pool);
19067 }
19068
19069
19070
19071
19072
19073
19074
19075
19076 int
19077 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19078 {
19079 LPFC_MBOXQ_t *mbox;
19080 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19081 int rc, length;
19082
19083
19084 lpfc_cancel_all_vport_retry_delay_timer(phba);
19085
19086 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19087 if (!mbox) {
19088 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19089 "2745 Failed to allocate mbox for "
19090 "requesting FCF rediscover.\n");
19091 return -ENOMEM;
19092 }
19093
19094 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19095 sizeof(struct lpfc_sli4_cfg_mhdr));
19096 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19097 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19098 length, LPFC_SLI4_MBX_EMBED);
19099
19100 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19101
19102 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19103
19104
19105 mbox->vport = phba->pport;
19106 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19107 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19108
19109 if (rc == MBX_NOT_FINISHED) {
19110 mempool_free(mbox, phba->mbox_mem_pool);
19111 return -EIO;
19112 }
19113 return 0;
19114 }
19115
19116
19117
19118
19119
19120
19121
19122
19123 void
19124 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19125 {
19126 uint32_t link_state;
19127
19128
19129
19130
19131
19132
19133 link_state = phba->link_state;
19134 lpfc_linkdown(phba);
19135 phba->link_state = link_state;
19136
19137
19138 lpfc_unregister_unused_fcf(phba);
19139 }
19140
19141
19142
19143
19144
19145
19146
19147
19148
19149
19150 static uint32_t
19151 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19152 {
19153 LPFC_MBOXQ_t *pmb = NULL;
19154 MAILBOX_t *mb;
19155 uint32_t offset = 0;
19156 int rc;
19157
19158 if (!rgn23_data)
19159 return 0;
19160
19161 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19162 if (!pmb) {
19163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19164 "2600 failed to allocate mailbox memory\n");
19165 return 0;
19166 }
19167 mb = &pmb->u.mb;
19168
19169 do {
19170 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19171 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19172
19173 if (rc != MBX_SUCCESS) {
19174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19175 "2601 failed to read config "
19176 "region 23, rc 0x%x Status 0x%x\n",
19177 rc, mb->mbxStatus);
19178 mb->un.varDmp.word_cnt = 0;
19179 }
19180
19181
19182
19183
19184 if (mb->un.varDmp.word_cnt == 0)
19185 break;
19186 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19187 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19188
19189 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19190 rgn23_data + offset,
19191 mb->un.varDmp.word_cnt);
19192 offset += mb->un.varDmp.word_cnt;
19193 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19194
19195 mempool_free(pmb, phba->mbox_mem_pool);
19196 return offset;
19197 }
19198
19199
19200
19201
19202
19203
19204
19205
19206
19207
19208 static uint32_t
19209 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19210 {
19211 LPFC_MBOXQ_t *mboxq = NULL;
19212 struct lpfc_dmabuf *mp = NULL;
19213 struct lpfc_mqe *mqe;
19214 uint32_t data_length = 0;
19215 int rc;
19216
19217 if (!rgn23_data)
19218 return 0;
19219
19220 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19221 if (!mboxq) {
19222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19223 "3105 failed to allocate mailbox memory\n");
19224 return 0;
19225 }
19226
19227 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19228 goto out;
19229 mqe = &mboxq->u.mqe;
19230 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19231 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19232 if (rc)
19233 goto out;
19234 data_length = mqe->un.mb_words[5];
19235 if (data_length == 0)
19236 goto out;
19237 if (data_length > DMP_RGN23_SIZE) {
19238 data_length = 0;
19239 goto out;
19240 }
19241 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19242 out:
19243 mempool_free(mboxq, phba->mbox_mem_pool);
19244 if (mp) {
19245 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19246 kfree(mp);
19247 }
19248 return data_length;
19249 }
19250
19251
19252
19253
19254
19255
19256
19257
19258
19259 void
19260 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19261 {
19262 uint8_t *rgn23_data = NULL;
19263 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19264 uint32_t offset = 0;
19265
19266
19267 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19268 if (!rgn23_data)
19269 goto out;
19270
19271 if (phba->sli_rev < LPFC_SLI_REV4)
19272 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19273 else {
19274 if_type = bf_get(lpfc_sli_intf_if_type,
19275 &phba->sli4_hba.sli_intf);
19276 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19277 goto out;
19278 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19279 }
19280
19281 if (!data_size)
19282 goto out;
19283
19284
19285 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19287 "2619 Config region 23 has bad signature\n");
19288 goto out;
19289 }
19290 offset += 4;
19291
19292
19293 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19295 "2620 Config region 23 has bad version\n");
19296 goto out;
19297 }
19298 offset += 4;
19299
19300
19301 while (offset < data_size) {
19302 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19303 break;
19304
19305
19306
19307
19308 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19309 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19310 (rgn23_data[offset + 3] != 0)) {
19311 offset += rgn23_data[offset + 1] * 4 + 4;
19312 continue;
19313 }
19314
19315
19316 sub_tlv_len = rgn23_data[offset + 1] * 4;
19317 offset += 4;
19318 tlv_offset = 0;
19319
19320
19321
19322
19323 while ((offset < data_size) &&
19324 (tlv_offset < sub_tlv_len)) {
19325 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19326 offset += 4;
19327 tlv_offset += 4;
19328 break;
19329 }
19330 if (rgn23_data[offset] != PORT_STE_TYPE) {
19331 offset += rgn23_data[offset + 1] * 4 + 4;
19332 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19333 continue;
19334 }
19335
19336
19337 if (!rgn23_data[offset + 2])
19338 phba->hba_flag |= LINK_DISABLED;
19339
19340 goto out;
19341 }
19342 }
19343
19344 out:
19345 kfree(rgn23_data);
19346 return;
19347 }
19348
19349
19350
19351
19352
19353
19354
19355
19356
19357
19358
19359
19360
19361
19362
19363
19364
19365
19366
19367
19368 int
19369 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19370 uint32_t size, uint32_t *offset)
19371 {
19372 struct lpfc_mbx_wr_object *wr_object;
19373 LPFC_MBOXQ_t *mbox;
19374 int rc = 0, i = 0;
19375 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19376 uint32_t mbox_tmo;
19377 struct lpfc_dmabuf *dmabuf;
19378 uint32_t written = 0;
19379 bool check_change_status = false;
19380
19381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19382 if (!mbox)
19383 return -ENOMEM;
19384
19385 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19386 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19387 sizeof(struct lpfc_mbx_wr_object) -
19388 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19389
19390 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19391 wr_object->u.request.write_offset = *offset;
19392 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19393 wr_object->u.request.object_name[0] =
19394 cpu_to_le32(wr_object->u.request.object_name[0]);
19395 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19396 list_for_each_entry(dmabuf, dmabuf_list, list) {
19397 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19398 break;
19399 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19400 wr_object->u.request.bde[i].addrHigh =
19401 putPaddrHigh(dmabuf->phys);
19402 if (written + SLI4_PAGE_SIZE >= size) {
19403 wr_object->u.request.bde[i].tus.f.bdeSize =
19404 (size - written);
19405 written += (size - written);
19406 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19407 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19408 check_change_status = true;
19409 } else {
19410 wr_object->u.request.bde[i].tus.f.bdeSize =
19411 SLI4_PAGE_SIZE;
19412 written += SLI4_PAGE_SIZE;
19413 }
19414 i++;
19415 }
19416 wr_object->u.request.bde_count = i;
19417 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19418 if (!phba->sli4_hba.intr_enable)
19419 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19420 else {
19421 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19422 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19423 }
19424
19425 shdr_status = bf_get(lpfc_mbox_hdr_status,
19426 &wr_object->header.cfg_shdr.response);
19427 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19428 &wr_object->header.cfg_shdr.response);
19429 if (check_change_status) {
19430 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19431 &wr_object->u.response);
19432 switch (shdr_change_status) {
19433 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19434 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19435 "3198 Firmware write complete: System "
19436 "reboot required to instantiate\n");
19437 break;
19438 case (LPFC_CHANGE_STATUS_FW_RESET):
19439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19440 "3199 Firmware write complete: Firmware"
19441 " reset required to instantiate\n");
19442 break;
19443 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19445 "3200 Firmware write complete: Port "
19446 "Migration or PCI Reset required to "
19447 "instantiate\n");
19448 break;
19449 case (LPFC_CHANGE_STATUS_PCI_RESET):
19450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19451 "3201 Firmware write complete: PCI "
19452 "Reset required to instantiate\n");
19453 break;
19454 default:
19455 break;
19456 }
19457 }
19458 if (rc != MBX_TIMEOUT)
19459 mempool_free(mbox, phba->mbox_mem_pool);
19460 if (shdr_status || shdr_add_status || rc) {
19461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19462 "3025 Write Object mailbox failed with "
19463 "status x%x add_status x%x, mbx status x%x\n",
19464 shdr_status, shdr_add_status, rc);
19465 rc = -ENXIO;
19466 *offset = shdr_add_status;
19467 } else
19468 *offset += wr_object->u.response.actual_write_length;
19469 return rc;
19470 }
19471
19472
19473
19474
19475
19476
19477
19478
19479
19480
19481 void
19482 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19483 {
19484 struct lpfc_hba *phba = vport->phba;
19485 LPFC_MBOXQ_t *mb, *nextmb;
19486 struct lpfc_dmabuf *mp;
19487 struct lpfc_nodelist *ndlp;
19488 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19489 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19490 LIST_HEAD(mbox_cmd_list);
19491 uint8_t restart_loop;
19492
19493
19494 spin_lock_irq(&phba->hbalock);
19495 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19496 if (mb->vport != vport)
19497 continue;
19498
19499 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19500 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19501 continue;
19502
19503 list_del(&mb->list);
19504 list_add_tail(&mb->list, &mbox_cmd_list);
19505 }
19506
19507 mb = phba->sli.mbox_active;
19508 if (mb && (mb->vport == vport)) {
19509 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19510 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19511 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19512 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19513 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19514
19515 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19516
19517 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19518 }
19519 }
19520
19521 do {
19522 restart_loop = 0;
19523 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19524
19525
19526
19527
19528 if ((mb->vport != vport) ||
19529 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19530 continue;
19531
19532 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19533 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19534 continue;
19535
19536 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19537 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19538 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19539
19540 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19541 restart_loop = 1;
19542 spin_unlock_irq(&phba->hbalock);
19543 spin_lock(shost->host_lock);
19544 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19545 spin_unlock(shost->host_lock);
19546 spin_lock_irq(&phba->hbalock);
19547 break;
19548 }
19549 }
19550 } while (restart_loop);
19551
19552 spin_unlock_irq(&phba->hbalock);
19553
19554
19555 while (!list_empty(&mbox_cmd_list)) {
19556 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19557 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19558 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19559 if (mp) {
19560 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19561 kfree(mp);
19562 }
19563 mb->ctx_buf = NULL;
19564 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19565 mb->ctx_ndlp = NULL;
19566 if (ndlp) {
19567 spin_lock(shost->host_lock);
19568 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19569 spin_unlock(shost->host_lock);
19570 lpfc_nlp_put(ndlp);
19571 }
19572 }
19573 mempool_free(mb, phba->mbox_mem_pool);
19574 }
19575
19576
19577 if (act_mbx_ndlp) {
19578 spin_lock(shost->host_lock);
19579 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19580 spin_unlock(shost->host_lock);
19581 lpfc_nlp_put(act_mbx_ndlp);
19582 }
19583 }
19584
19585
19586
19587
19588
19589
19590
19591
19592
19593
19594
19595
19596 uint32_t
19597 lpfc_drain_txq(struct lpfc_hba *phba)
19598 {
19599 LIST_HEAD(completions);
19600 struct lpfc_sli_ring *pring;
19601 struct lpfc_iocbq *piocbq = NULL;
19602 unsigned long iflags = 0;
19603 char *fail_msg = NULL;
19604 struct lpfc_sglq *sglq;
19605 union lpfc_wqe128 wqe;
19606 uint32_t txq_cnt = 0;
19607 struct lpfc_queue *wq;
19608
19609 if (phba->link_flag & LS_MDS_LOOPBACK) {
19610
19611 wq = phba->sli4_hba.hdwq[0].io_wq;
19612 if (unlikely(!wq))
19613 return 0;
19614 pring = wq->pring;
19615 } else {
19616 wq = phba->sli4_hba.els_wq;
19617 if (unlikely(!wq))
19618 return 0;
19619 pring = lpfc_phba_elsring(phba);
19620 }
19621
19622 if (unlikely(!pring) || list_empty(&pring->txq))
19623 return 0;
19624
19625 spin_lock_irqsave(&pring->ring_lock, iflags);
19626 list_for_each_entry(piocbq, &pring->txq, list) {
19627 txq_cnt++;
19628 }
19629
19630 if (txq_cnt > pring->txq_max)
19631 pring->txq_max = txq_cnt;
19632
19633 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19634
19635 while (!list_empty(&pring->txq)) {
19636 spin_lock_irqsave(&pring->ring_lock, iflags);
19637
19638 piocbq = lpfc_sli_ringtx_get(phba, pring);
19639 if (!piocbq) {
19640 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19642 "2823 txq empty and txq_cnt is %d\n ",
19643 txq_cnt);
19644 break;
19645 }
19646 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19647 if (!sglq) {
19648 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19649 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19650 break;
19651 }
19652 txq_cnt--;
19653
19654
19655
19656
19657 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19658 piocbq->sli4_xritag = sglq->sli4_xritag;
19659 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19660 fail_msg = "to convert bpl to sgl";
19661 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19662 fail_msg = "to convert iocb to wqe";
19663 else if (lpfc_sli4_wq_put(wq, &wqe))
19664 fail_msg = " - Wq is full";
19665 else
19666 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19667
19668 if (fail_msg) {
19669
19670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19671 "2822 IOCB failed %s iotag 0x%x "
19672 "xri 0x%x\n",
19673 fail_msg,
19674 piocbq->iotag, piocbq->sli4_xritag);
19675 list_add_tail(&piocbq->list, &completions);
19676 }
19677 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19678 }
19679
19680
19681 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19682 IOERR_SLI_ABORTED);
19683
19684 return txq_cnt;
19685 }
19686
19687
19688
19689
19690
19691
19692
19693
19694
19695
19696
19697
19698
19699
19700
19701
19702
19703
19704 static uint16_t
19705 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19706 struct lpfc_sglq *sglq)
19707 {
19708 uint16_t xritag = NO_XRI;
19709 struct ulp_bde64 *bpl = NULL;
19710 struct ulp_bde64 bde;
19711 struct sli4_sge *sgl = NULL;
19712 struct lpfc_dmabuf *dmabuf;
19713 union lpfc_wqe128 *wqe;
19714 int numBdes = 0;
19715 int i = 0;
19716 uint32_t offset = 0;
19717 int inbound = 0;
19718 uint32_t cmd;
19719
19720 if (!pwqeq || !sglq)
19721 return xritag;
19722
19723 sgl = (struct sli4_sge *)sglq->sgl;
19724 wqe = &pwqeq->wqe;
19725 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19726
19727 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19728 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19729 return sglq->sli4_xritag;
19730 numBdes = pwqeq->rsvd2;
19731 if (numBdes) {
19732
19733
19734
19735
19736 if (pwqeq->context3)
19737 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19738 else
19739 return xritag;
19740
19741 bpl = (struct ulp_bde64 *)dmabuf->virt;
19742 if (!bpl)
19743 return xritag;
19744
19745 for (i = 0; i < numBdes; i++) {
19746
19747 sgl->addr_hi = bpl->addrHigh;
19748 sgl->addr_lo = bpl->addrLow;
19749
19750 sgl->word2 = le32_to_cpu(sgl->word2);
19751 if ((i+1) == numBdes)
19752 bf_set(lpfc_sli4_sge_last, sgl, 1);
19753 else
19754 bf_set(lpfc_sli4_sge_last, sgl, 0);
19755
19756
19757
19758 bde.tus.w = le32_to_cpu(bpl->tus.w);
19759 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19760
19761
19762
19763
19764 switch (cmd) {
19765 case CMD_GEN_REQUEST64_WQE:
19766
19767 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19768 inbound++;
19769
19770 if (inbound == 1)
19771 offset = 0;
19772 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19773 bf_set(lpfc_sli4_sge_type, sgl,
19774 LPFC_SGE_TYPE_DATA);
19775 offset += bde.tus.f.bdeSize;
19776 break;
19777 case CMD_FCP_TRSP64_WQE:
19778 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19779 bf_set(lpfc_sli4_sge_type, sgl,
19780 LPFC_SGE_TYPE_DATA);
19781 break;
19782 case CMD_FCP_TSEND64_WQE:
19783 case CMD_FCP_TRECEIVE64_WQE:
19784 bf_set(lpfc_sli4_sge_type, sgl,
19785 bpl->tus.f.bdeFlags);
19786 if (i < 3)
19787 offset = 0;
19788 else
19789 offset += bde.tus.f.bdeSize;
19790 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19791 break;
19792 }
19793 sgl->word2 = cpu_to_le32(sgl->word2);
19794 bpl++;
19795 sgl++;
19796 }
19797 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19798
19799
19800
19801
19802 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19803 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19804 sgl->word2 = le32_to_cpu(sgl->word2);
19805 bf_set(lpfc_sli4_sge_last, sgl, 1);
19806 sgl->word2 = cpu_to_le32(sgl->word2);
19807 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19808 }
19809 return sglq->sli4_xritag;
19810 }
19811
19812
19813
19814
19815
19816
19817
19818 int
19819 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19820 struct lpfc_iocbq *pwqe)
19821 {
19822 union lpfc_wqe128 *wqe = &pwqe->wqe;
19823 struct lpfc_nvmet_rcv_ctx *ctxp;
19824 struct lpfc_queue *wq;
19825 struct lpfc_sglq *sglq;
19826 struct lpfc_sli_ring *pring;
19827 unsigned long iflags;
19828 uint32_t ret = 0;
19829
19830
19831 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19832 pring = phba->sli4_hba.nvmels_wq->pring;
19833 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19834 qp, wq_access);
19835 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19836 if (!sglq) {
19837 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19838 return WQE_BUSY;
19839 }
19840 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19841 pwqe->sli4_xritag = sglq->sli4_xritag;
19842 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19843 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19844 return WQE_ERROR;
19845 }
19846 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19847 pwqe->sli4_xritag);
19848 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19849 if (ret) {
19850 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19851 return ret;
19852 }
19853
19854 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19855 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19856
19857 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19858 return 0;
19859 }
19860
19861
19862 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19863
19864 wq = qp->io_wq;
19865 pring = wq->pring;
19866
19867 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19868
19869 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19870 qp, wq_access);
19871 ret = lpfc_sli4_wq_put(wq, wqe);
19872 if (ret) {
19873 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19874 return ret;
19875 }
19876 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19877 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19878
19879 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19880 return 0;
19881 }
19882
19883
19884 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19885
19886 wq = qp->io_wq;
19887 pring = wq->pring;
19888
19889 ctxp = pwqe->context2;
19890 sglq = ctxp->ctxbuf->sglq;
19891 if (pwqe->sli4_xritag == NO_XRI) {
19892 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19893 pwqe->sli4_xritag = sglq->sli4_xritag;
19894 }
19895 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19896 pwqe->sli4_xritag);
19897 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19898
19899 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19900 qp, wq_access);
19901 ret = lpfc_sli4_wq_put(wq, wqe);
19902 if (ret) {
19903 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19904 return ret;
19905 }
19906 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19907 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19908
19909 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19910 return 0;
19911 }
19912 return WQE_ERROR;
19913 }
19914
19915 #ifdef LPFC_MXP_STAT
19916
19917
19918
19919
19920
19921
19922
19923
19924
19925
19926
19927
19928
19929
19930 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19931 {
19932 struct lpfc_sli4_hdw_queue *qp;
19933 struct lpfc_multixri_pool *multixri_pool;
19934 struct lpfc_pvt_pool *pvt_pool;
19935 struct lpfc_pbl_pool *pbl_pool;
19936 u32 txcmplq_cnt;
19937
19938 qp = &phba->sli4_hba.hdwq[hwqid];
19939 multixri_pool = qp->p_multixri_pool;
19940 if (!multixri_pool)
19941 return;
19942
19943 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19944 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19945 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19946 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19947
19948 multixri_pool->stat_pbl_count = pbl_pool->count;
19949 multixri_pool->stat_pvt_count = pvt_pool->count;
19950 multixri_pool->stat_busy_count = txcmplq_cnt;
19951 }
19952
19953 multixri_pool->stat_snapshot_taken++;
19954 }
19955 #endif
19956
19957
19958
19959
19960
19961
19962
19963
19964
19965 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19966 {
19967 struct lpfc_multixri_pool *multixri_pool;
19968 u32 io_req_count;
19969 u32 prev_io_req_count;
19970
19971 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19972 if (!multixri_pool)
19973 return;
19974 io_req_count = multixri_pool->io_req_count;
19975 prev_io_req_count = multixri_pool->prev_io_req_count;
19976
19977 if (prev_io_req_count != io_req_count) {
19978
19979 multixri_pool->prev_io_req_count = io_req_count;
19980 } else {
19981
19982
19983
19984 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19985 }
19986 }
19987
19988
19989
19990
19991
19992
19993
19994
19995
19996 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19997 {
19998 u32 new_watermark;
19999 u32 watermark_max;
20000 u32 watermark_min;
20001 u32 xri_limit;
20002 u32 txcmplq_cnt;
20003 u32 abts_io_bufs;
20004 struct lpfc_multixri_pool *multixri_pool;
20005 struct lpfc_sli4_hdw_queue *qp;
20006
20007 qp = &phba->sli4_hba.hdwq[hwqid];
20008 multixri_pool = qp->p_multixri_pool;
20009 if (!multixri_pool)
20010 return;
20011 xri_limit = multixri_pool->xri_limit;
20012
20013 watermark_max = xri_limit;
20014 watermark_min = xri_limit / 2;
20015
20016 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20017 abts_io_bufs = qp->abts_scsi_io_bufs;
20018 abts_io_bufs += qp->abts_nvme_io_bufs;
20019
20020 new_watermark = txcmplq_cnt + abts_io_bufs;
20021 new_watermark = min(watermark_max, new_watermark);
20022 new_watermark = max(watermark_min, new_watermark);
20023 multixri_pool->pvt_pool.high_watermark = new_watermark;
20024
20025 #ifdef LPFC_MXP_STAT
20026 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20027 new_watermark);
20028 #endif
20029 }
20030
20031
20032
20033
20034
20035
20036
20037
20038
20039
20040
20041 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20042 {
20043 struct lpfc_pbl_pool *pbl_pool;
20044 struct lpfc_pvt_pool *pvt_pool;
20045 struct lpfc_sli4_hdw_queue *qp;
20046 struct lpfc_io_buf *lpfc_ncmd;
20047 struct lpfc_io_buf *lpfc_ncmd_next;
20048 unsigned long iflag;
20049 struct list_head tmp_list;
20050 u32 tmp_count;
20051
20052 qp = &phba->sli4_hba.hdwq[hwqid];
20053 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20054 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20055 tmp_count = 0;
20056
20057 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20058 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20059
20060 if (pvt_pool->count > pvt_pool->low_watermark) {
20061
20062
20063
20064
20065
20066 INIT_LIST_HEAD(&tmp_list);
20067 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20068 &pvt_pool->list, list) {
20069 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20070 tmp_count++;
20071 if (tmp_count >= pvt_pool->low_watermark)
20072 break;
20073 }
20074
20075
20076 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20077
20078
20079 list_splice(&tmp_list, &pvt_pool->list);
20080
20081 pbl_pool->count += (pvt_pool->count - tmp_count);
20082 pvt_pool->count = tmp_count;
20083 } else {
20084
20085 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20086 pbl_pool->count += pvt_pool->count;
20087 pvt_pool->count = 0;
20088 }
20089
20090 spin_unlock(&pvt_pool->lock);
20091 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20092 }
20093
20094
20095
20096
20097
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107
20108
20109
20110 static bool
20111 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20112 struct lpfc_pbl_pool *pbl_pool,
20113 struct lpfc_pvt_pool *pvt_pool, u32 count)
20114 {
20115 struct lpfc_io_buf *lpfc_ncmd;
20116 struct lpfc_io_buf *lpfc_ncmd_next;
20117 unsigned long iflag;
20118 int ret;
20119
20120 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20121 if (ret) {
20122 if (pbl_pool->count) {
20123
20124 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20125 list_for_each_entry_safe(lpfc_ncmd,
20126 lpfc_ncmd_next,
20127 &pbl_pool->list,
20128 list) {
20129 list_move_tail(&lpfc_ncmd->list,
20130 &pvt_pool->list);
20131 pvt_pool->count++;
20132 pbl_pool->count--;
20133 count--;
20134 if (count == 0)
20135 break;
20136 }
20137
20138 spin_unlock(&pvt_pool->lock);
20139 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20140 return true;
20141 }
20142 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20143 }
20144
20145 return false;
20146 }
20147
20148
20149
20150
20151
20152
20153
20154
20155
20156
20157
20158
20159
20160 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20161 {
20162 struct lpfc_multixri_pool *multixri_pool;
20163 struct lpfc_multixri_pool *next_multixri_pool;
20164 struct lpfc_pvt_pool *pvt_pool;
20165 struct lpfc_pbl_pool *pbl_pool;
20166 struct lpfc_sli4_hdw_queue *qp;
20167 u32 next_hwqid;
20168 u32 hwq_count;
20169 int ret;
20170
20171 qp = &phba->sli4_hba.hdwq[hwqid];
20172 multixri_pool = qp->p_multixri_pool;
20173 pvt_pool = &multixri_pool->pvt_pool;
20174 pbl_pool = &multixri_pool->pbl_pool;
20175
20176
20177 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20178 if (ret) {
20179 #ifdef LPFC_MXP_STAT
20180 multixri_pool->local_pbl_hit_count++;
20181 #endif
20182 return;
20183 }
20184
20185 hwq_count = phba->cfg_hdw_queue;
20186
20187
20188 next_hwqid = multixri_pool->rrb_next_hwqid;
20189
20190 do {
20191
20192 next_hwqid = (next_hwqid + 1) % hwq_count;
20193
20194 next_multixri_pool =
20195 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20196 pbl_pool = &next_multixri_pool->pbl_pool;
20197
20198
20199 ret = _lpfc_move_xri_pbl_to_pvt(
20200 phba, qp, pbl_pool, pvt_pool, count);
20201
20202
20203 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20204
20205
20206 multixri_pool->rrb_next_hwqid = next_hwqid;
20207
20208 if (!ret) {
20209
20210 multixri_pool->pbl_empty_count++;
20211 }
20212
20213 #ifdef LPFC_MXP_STAT
20214 if (ret) {
20215 if (next_hwqid == hwqid)
20216 multixri_pool->local_pbl_hit_count++;
20217 else
20218 multixri_pool->other_pbl_hit_count++;
20219 }
20220 #endif
20221 }
20222
20223
20224
20225
20226
20227
20228
20229
20230
20231 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20232 {
20233 struct lpfc_multixri_pool *multixri_pool;
20234 struct lpfc_pvt_pool *pvt_pool;
20235
20236 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20237 pvt_pool = &multixri_pool->pvt_pool;
20238
20239 if (pvt_pool->count < pvt_pool->low_watermark)
20240 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20241 }
20242
20243
20244
20245
20246
20247
20248
20249
20250
20251
20252
20253
20254
20255 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20256 struct lpfc_sli4_hdw_queue *qp)
20257 {
20258 unsigned long iflag;
20259 struct lpfc_pbl_pool *pbl_pool;
20260 struct lpfc_pvt_pool *pvt_pool;
20261 struct lpfc_epd_pool *epd_pool;
20262 u32 txcmplq_cnt;
20263 u32 xri_owned;
20264 u32 xri_limit;
20265 u32 abts_io_bufs;
20266
20267
20268 lpfc_ncmd->nvmeCmd = NULL;
20269 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20270 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20271
20272 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20273 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20274 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20275
20276 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20277 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20278
20279 if (phba->cfg_xri_rebalancing) {
20280 if (lpfc_ncmd->expedite) {
20281
20282 epd_pool = &phba->epd_pool;
20283 spin_lock_irqsave(&epd_pool->lock, iflag);
20284 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20285 epd_pool->count++;
20286 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20287 return;
20288 }
20289
20290
20291
20292
20293
20294 if (!qp->p_multixri_pool)
20295 return;
20296
20297 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20298 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20299
20300 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20301 abts_io_bufs = qp->abts_scsi_io_bufs;
20302 abts_io_bufs += qp->abts_nvme_io_bufs;
20303
20304 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20305 xri_limit = qp->p_multixri_pool->xri_limit;
20306
20307 #ifdef LPFC_MXP_STAT
20308 if (xri_owned <= xri_limit)
20309 qp->p_multixri_pool->below_limit_count++;
20310 else
20311 qp->p_multixri_pool->above_limit_count++;
20312 #endif
20313
20314
20315
20316
20317 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20318 (xri_owned < xri_limit &&
20319 pvt_pool->count < pvt_pool->high_watermark)) {
20320 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20321 qp, free_pvt_pool);
20322 list_add_tail(&lpfc_ncmd->list,
20323 &pvt_pool->list);
20324 pvt_pool->count++;
20325 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20326 } else {
20327 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20328 qp, free_pub_pool);
20329 list_add_tail(&lpfc_ncmd->list,
20330 &pbl_pool->list);
20331 pbl_pool->count++;
20332 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20333 }
20334 } else {
20335 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20336 qp, free_xri);
20337 list_add_tail(&lpfc_ncmd->list,
20338 &qp->lpfc_io_buf_list_put);
20339 qp->put_io_bufs++;
20340 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20341 iflag);
20342 }
20343 }
20344
20345
20346
20347
20348
20349
20350
20351
20352
20353
20354
20355
20356
20357 static struct lpfc_io_buf *
20358 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20359 struct lpfc_sli4_hdw_queue *qp,
20360 struct lpfc_pvt_pool *pvt_pool,
20361 struct lpfc_nodelist *ndlp)
20362 {
20363 struct lpfc_io_buf *lpfc_ncmd;
20364 struct lpfc_io_buf *lpfc_ncmd_next;
20365 unsigned long iflag;
20366
20367 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20368 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20369 &pvt_pool->list, list) {
20370 if (lpfc_test_rrq_active(
20371 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20372 continue;
20373 list_del(&lpfc_ncmd->list);
20374 pvt_pool->count--;
20375 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20376 return lpfc_ncmd;
20377 }
20378 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20379
20380 return NULL;
20381 }
20382
20383
20384
20385
20386
20387
20388
20389
20390
20391
20392
20393 static struct lpfc_io_buf *
20394 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20395 {
20396 struct lpfc_io_buf *lpfc_ncmd;
20397 struct lpfc_io_buf *lpfc_ncmd_next;
20398 unsigned long iflag;
20399 struct lpfc_epd_pool *epd_pool;
20400
20401 epd_pool = &phba->epd_pool;
20402 lpfc_ncmd = NULL;
20403
20404 spin_lock_irqsave(&epd_pool->lock, iflag);
20405 if (epd_pool->count > 0) {
20406 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20407 &epd_pool->list, list) {
20408 list_del(&lpfc_ncmd->list);
20409 epd_pool->count--;
20410 break;
20411 }
20412 }
20413 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20414
20415 return lpfc_ncmd;
20416 }
20417
20418
20419
20420
20421
20422
20423
20424
20425
20426
20427
20428
20429
20430
20431
20432
20433
20434
20435
20436
20437
20438
20439
20440
20441 static struct lpfc_io_buf *
20442 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20443 struct lpfc_nodelist *ndlp,
20444 int hwqid, int expedite)
20445 {
20446 struct lpfc_sli4_hdw_queue *qp;
20447 struct lpfc_multixri_pool *multixri_pool;
20448 struct lpfc_pvt_pool *pvt_pool;
20449 struct lpfc_io_buf *lpfc_ncmd;
20450
20451 qp = &phba->sli4_hba.hdwq[hwqid];
20452 lpfc_ncmd = NULL;
20453 multixri_pool = qp->p_multixri_pool;
20454 pvt_pool = &multixri_pool->pvt_pool;
20455 multixri_pool->io_req_count++;
20456
20457
20458 if (pvt_pool->count == 0)
20459 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20460
20461
20462 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20463
20464 if (lpfc_ncmd) {
20465 lpfc_ncmd->hdwq = qp;
20466 lpfc_ncmd->hdwq_no = hwqid;
20467 } else if (expedite) {
20468
20469
20470
20471 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20472 }
20473
20474 return lpfc_ncmd;
20475 }
20476
20477 static inline struct lpfc_io_buf *
20478 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20479 {
20480 struct lpfc_sli4_hdw_queue *qp;
20481 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20482
20483 qp = &phba->sli4_hba.hdwq[idx];
20484 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20485 &qp->lpfc_io_buf_list_get, list) {
20486 if (lpfc_test_rrq_active(phba, ndlp,
20487 lpfc_cmd->cur_iocbq.sli4_lxritag))
20488 continue;
20489
20490 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20491 continue;
20492
20493 list_del_init(&lpfc_cmd->list);
20494 qp->get_io_bufs--;
20495 lpfc_cmd->hdwq = qp;
20496 lpfc_cmd->hdwq_no = idx;
20497 return lpfc_cmd;
20498 }
20499 return NULL;
20500 }
20501
20502
20503
20504
20505
20506
20507
20508
20509
20510
20511
20512
20513
20514
20515
20516
20517
20518
20519
20520 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20521 struct lpfc_nodelist *ndlp,
20522 u32 hwqid, int expedite)
20523 {
20524 struct lpfc_sli4_hdw_queue *qp;
20525 unsigned long iflag;
20526 struct lpfc_io_buf *lpfc_cmd;
20527
20528 qp = &phba->sli4_hba.hdwq[hwqid];
20529 lpfc_cmd = NULL;
20530
20531 if (phba->cfg_xri_rebalancing)
20532 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20533 phba, ndlp, hwqid, expedite);
20534 else {
20535 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20536 qp, alloc_xri_get);
20537 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20538 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20539 if (!lpfc_cmd) {
20540 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20541 qp, alloc_xri_put);
20542 list_splice(&qp->lpfc_io_buf_list_put,
20543 &qp->lpfc_io_buf_list_get);
20544 qp->get_io_bufs += qp->put_io_bufs;
20545 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20546 qp->put_io_bufs = 0;
20547 spin_unlock(&qp->io_buf_list_put_lock);
20548 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20549 expedite)
20550 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20551 }
20552 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20553 }
20554
20555 return lpfc_cmd;
20556 }
20557
20558
20559
20560
20561
20562
20563
20564
20565
20566
20567
20568
20569
20570 struct sli4_hybrid_sgl *
20571 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20572 {
20573 struct sli4_hybrid_sgl *list_entry = NULL;
20574 struct sli4_hybrid_sgl *tmp = NULL;
20575 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20576 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20577 struct list_head *buf_list = &hdwq->sgl_list;
20578 unsigned long iflags;
20579
20580 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20581
20582 if (likely(!list_empty(buf_list))) {
20583
20584 list_for_each_entry_safe(list_entry, tmp,
20585 buf_list, list_node) {
20586 list_move_tail(&list_entry->list_node,
20587 &lpfc_buf->dma_sgl_xtra_list);
20588 break;
20589 }
20590 } else {
20591
20592 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20593 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20594 cpu_to_node(hdwq->io_wq->chann));
20595 if (!tmp) {
20596 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20597 "8353 error kmalloc memory for HDWQ "
20598 "%d %s\n",
20599 lpfc_buf->hdwq_no, __func__);
20600 return NULL;
20601 }
20602
20603 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20604 GFP_ATOMIC, &tmp->dma_phys_sgl);
20605 if (!tmp->dma_sgl) {
20606 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20607 "8354 error pool_alloc memory for HDWQ "
20608 "%d %s\n",
20609 lpfc_buf->hdwq_no, __func__);
20610 kfree(tmp);
20611 return NULL;
20612 }
20613
20614 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20615 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20616 }
20617
20618 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20619 struct sli4_hybrid_sgl,
20620 list_node);
20621
20622 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20623
20624 return allocated_sgl;
20625 }
20626
20627
20628
20629
20630
20631
20632
20633
20634
20635
20636
20637
20638 int
20639 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20640 {
20641 int rc = 0;
20642 struct sli4_hybrid_sgl *list_entry = NULL;
20643 struct sli4_hybrid_sgl *tmp = NULL;
20644 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20645 struct list_head *buf_list = &hdwq->sgl_list;
20646 unsigned long iflags;
20647
20648 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20649
20650 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20651 list_for_each_entry_safe(list_entry, tmp,
20652 &lpfc_buf->dma_sgl_xtra_list,
20653 list_node) {
20654 list_move_tail(&list_entry->list_node,
20655 buf_list);
20656 }
20657 } else {
20658 rc = -EINVAL;
20659 }
20660
20661 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20662 return rc;
20663 }
20664
20665
20666
20667
20668
20669
20670
20671
20672
20673
20674
20675 void
20676 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20677 struct lpfc_sli4_hdw_queue *hdwq)
20678 {
20679 struct list_head *buf_list = &hdwq->sgl_list;
20680 struct sli4_hybrid_sgl *list_entry = NULL;
20681 struct sli4_hybrid_sgl *tmp = NULL;
20682 unsigned long iflags;
20683
20684 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20685
20686
20687 list_for_each_entry_safe(list_entry, tmp,
20688 buf_list, list_node) {
20689 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20690 list_entry->dma_sgl,
20691 list_entry->dma_phys_sgl);
20692 list_del(&list_entry->list_node);
20693 kfree(list_entry);
20694 }
20695
20696 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20697 }
20698
20699
20700
20701
20702
20703
20704
20705
20706
20707
20708
20709
20710
20711 struct fcp_cmd_rsp_buf *
20712 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20713 struct lpfc_io_buf *lpfc_buf)
20714 {
20715 struct fcp_cmd_rsp_buf *list_entry = NULL;
20716 struct fcp_cmd_rsp_buf *tmp = NULL;
20717 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20718 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20719 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20720 unsigned long iflags;
20721
20722 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20723
20724 if (likely(!list_empty(buf_list))) {
20725
20726 list_for_each_entry_safe(list_entry, tmp,
20727 buf_list,
20728 list_node) {
20729 list_move_tail(&list_entry->list_node,
20730 &lpfc_buf->dma_cmd_rsp_list);
20731 break;
20732 }
20733 } else {
20734
20735 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20736 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20737 cpu_to_node(hdwq->io_wq->chann));
20738 if (!tmp) {
20739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20740 "8355 error kmalloc memory for HDWQ "
20741 "%d %s\n",
20742 lpfc_buf->hdwq_no, __func__);
20743 return NULL;
20744 }
20745
20746 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20747 GFP_ATOMIC,
20748 &tmp->fcp_cmd_rsp_dma_handle);
20749
20750 if (!tmp->fcp_cmnd) {
20751 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20752 "8356 error pool_alloc memory for HDWQ "
20753 "%d %s\n",
20754 lpfc_buf->hdwq_no, __func__);
20755 kfree(tmp);
20756 return NULL;
20757 }
20758
20759 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20760 sizeof(struct fcp_cmnd));
20761
20762 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20763 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20764 }
20765
20766 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20767 struct fcp_cmd_rsp_buf,
20768 list_node);
20769
20770 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20771
20772 return allocated_buf;
20773 }
20774
20775
20776
20777
20778
20779
20780
20781
20782
20783
20784
20785
20786 int
20787 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20788 struct lpfc_io_buf *lpfc_buf)
20789 {
20790 int rc = 0;
20791 struct fcp_cmd_rsp_buf *list_entry = NULL;
20792 struct fcp_cmd_rsp_buf *tmp = NULL;
20793 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20794 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20795 unsigned long iflags;
20796
20797 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20798
20799 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20800 list_for_each_entry_safe(list_entry, tmp,
20801 &lpfc_buf->dma_cmd_rsp_list,
20802 list_node) {
20803 list_move_tail(&list_entry->list_node,
20804 buf_list);
20805 }
20806 } else {
20807 rc = -EINVAL;
20808 }
20809
20810 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20811 return rc;
20812 }
20813
20814
20815
20816
20817
20818
20819
20820
20821
20822
20823
20824 void
20825 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20826 struct lpfc_sli4_hdw_queue *hdwq)
20827 {
20828 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20829 struct fcp_cmd_rsp_buf *list_entry = NULL;
20830 struct fcp_cmd_rsp_buf *tmp = NULL;
20831 unsigned long iflags;
20832
20833 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20834
20835
20836 list_for_each_entry_safe(list_entry, tmp,
20837 buf_list,
20838 list_node) {
20839 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20840 list_entry->fcp_cmnd,
20841 list_entry->fcp_cmd_rsp_dma_handle);
20842 list_del(&list_entry->list_node);
20843 kfree(list_entry);
20844 }
20845
20846 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20847 }