This source file includes following definitions.
- qla2x00_sp_timeout
- qla2x00_sp_free
- qla2x00_get_async_timeout
- qla24xx_abort_iocb_timeout
- qla24xx_abort_sp_done
- qla24xx_async_abort_cmd
- qla2x00_async_iocb_timeout
- qla2x00_async_login_sp_done
- fcport_is_smaller
- fcport_is_bigger
- qla2x00_async_login
- qla2x00_async_logout_sp_done
- qla2x00_async_logout
- qla2x00_async_prlo_done
- qla2x00_async_prlo_sp_done
- qla2x00_async_prlo
- qla24xx_handle_adisc_event
- qla_post_els_plogi_work
- qla2x00_async_adisc_sp_done
- qla2x00_async_adisc
- qla2x00_is_reserved_id
- qla2x00_find_new_loop_id
- qla2x00_clear_loop_id
- qla24xx_handle_gnl_done_event
- qla24xx_async_gnl_sp_done
- qla24xx_async_gnl
- qla24xx_post_gnl_work
- qla24xx_async_gpdb_sp_done
- qla24xx_post_prli_work
- qla2x00_async_prli_sp_done
- qla24xx_async_prli
- qla24xx_post_gpdb_work
- qla24xx_async_gpdb
- __qla24xx_handle_gpdb_event
- qla24xx_handle_gpdb_event
- qla_chk_n2n_b4_login
- qla24xx_fcport_handle_login
- qla24xx_post_newsess_work
- qla2x00_handle_rscn
- qla24xx_handle_relogin_event
- qla_rscn_replay
- qla2x00_tmf_iocb_timeout
- qla2x00_tmf_sp_done
- qla2x00_async_tm_cmd
- qla24xx_async_abort_command
- qla24xx_handle_prli_done_event
- qla24xx_handle_plogi_done_event
- qla2x00_async_logout_done
- qla83xx_nic_core_fw_load
- qla2x00_initialize_adapter
- qla2100_pci_config
- qla2300_pci_config
- qla24xx_pci_config
- qla25xx_pci_config
- qla2x00_isp_firmware
- qla2x00_reset_chip
- qla81xx_reset_mpi
- qla24xx_reset_risc
- qla25xx_read_risc_sema_reg
- qla25xx_write_risc_sema_reg
- qla25xx_manipulate_risc_semaphore
- qla24xx_reset_chip
- qla2x00_chip_diag
- qla24xx_chip_diag
- qla2x00_init_fce_trace
- qla2x00_init_eft_trace
- qla2x00_alloc_offload_mem
- qla2x00_alloc_fw_dump
- qla81xx_mpi_sync
- qla2x00_alloc_outstanding_cmds
- qla2xxx_print_sfp_info
- qla24xx_detect_sfp
- qla2x00_setup_chip
- qla2x00_init_response_q_entries
- qla2x00_update_fw_options
- qla24xx_update_fw_options
- qla2x00_config_rings
- qla24xx_config_rings
- qla2x00_init_rings
- qla2x00_fw_ready
- qla2x00_configure_hba
- qla2x00_set_model_info
- qla2xxx_nvram_wwn_from_ofw
- qla2x00_nvram_config
- qla2x00_rport_del
- qla2x00_set_fcport_state
- qla2x00_alloc_fcport
- qla2x00_free_fcport
- qla2x00_configure_loop
- qla2x00_configure_local_loop
- qla2x00_iidma_fcport
- qla_do_iidma_work
- qla_post_iidma_work
- qla2x00_reg_remote_port
- qla2x00_update_fcport
- qla_register_fcport_fn
- qla2x00_configure_fabric
- qla2x00_find_all_fabric_devs
- qla2x00_reserve_mgmt_server_loop_id
- qla2x00_fabric_login
- qla2x00_local_device_login
- qla2x00_loop_resync
- qla2x00_perform_loop_resync
- qla2x00_update_fcports
- qla83xx_reset_ownership
- __qla83xx_set_drv_ack
- __qla83xx_clear_drv_ack
- qla83xx_dev_state_to_string
- qla83xx_idc_audit
- qla83xx_initiating_reset
- __qla83xx_set_idc_control
- __qla83xx_get_idc_control
- qla83xx_check_driver_presence
- qla83xx_nic_core_reset
- qla2xxx_mctp_dump
- qla2x00_quiesce_io
- qla2x00_abort_isp_cleanup
- qla2x00_abort_isp
- qla2x00_restart_isp
- qla25xx_init_queues
- qla2x00_reset_adapter
- qla24xx_reset_adapter
- qla24xx_nvram_wwn_from_ofw
- qla24xx_nvram_config
- qla27xx_print_image
- qla28xx_check_aux_image_status_signature
- qla27xx_check_image_status_signature
- qla27xx_image_status_checksum
- qla28xx_component_bitmask
- qla28xx_component_status
- qla27xx_compare_image_generation
- qla28xx_get_aux_images
- qla27xx_get_active_image
- qla24xx_risc_firmware_invalid
- qla24xx_load_risc_flash
- qla2x00_load_risc
- qla24xx_load_risc_blob
- qla24xx_load_risc
- qla81xx_load_risc
- qla2x00_try_to_stop_firmware
- qla24xx_configure_vhba
- qla84xx_get_chip
- __qla84xx_chip_release
- qla84xx_put_chip
- qla84xx_init_chip
- qla81xx_nvram_config
- qla82xx_restart_isp
- qla81xx_update_fw_options
- qla24xx_get_fcp_prio
- qla24xx_update_fcport_fcp_prio
- qla24xx_update_all_fcp_prio
- qla2xxx_create_qpair
- qla2xxx_delete_qpair
1
2
3
4
5
6
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22
23
24
25
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
35
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
41 struct event_arg *ea);
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
45
46
47
48 void
49 qla2x00_sp_timeout(struct timer_list *t)
50 {
51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
52 struct srb_iocb *iocb;
53 struct req_que *req;
54 unsigned long flags;
55 struct qla_hw_data *ha = sp->vha->hw;
56
57 WARN_ON_ONCE(irqs_disabled());
58 spin_lock_irqsave(&ha->hardware_lock, flags);
59 req = sp->qpair->req;
60 req->outstanding_cmds[sp->handle] = NULL;
61 iocb = &sp->u.iocb_cmd;
62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63 iocb->timeout(sp);
64 }
65
66 void qla2x00_sp_free(srb_t *sp)
67 {
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
69
70 del_timer(&iocb->timer);
71 qla2x00_rel_sp(sp);
72 }
73
74
75
76 unsigned long
77 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
78 {
79 unsigned long tmo;
80 struct qla_hw_data *ha = vha->hw;
81
82
83 tmo = ha->r_a_tov / 10 * 2;
84 if (IS_QLAFX00(ha)) {
85 tmo = FX00_DEF_RATOV * 2;
86 } else if (!IS_FWI2_CAPABLE(ha)) {
87
88
89
90
91 tmo = ha->login_timeout;
92 }
93 return tmo;
94 }
95
96 static void qla24xx_abort_iocb_timeout(void *data)
97 {
98 srb_t *sp = data;
99 struct srb_iocb *abt = &sp->u.iocb_cmd;
100 struct qla_qpair *qpair = sp->qpair;
101 u32 handle;
102 unsigned long flags;
103
104 if (sp->cmd_sp)
105 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
106 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
107 sp->cmd_sp->handle, sp->cmd_sp->type,
108 sp->handle, sp->type);
109 else
110 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
111 "Abort timeout 2 - hdl=%x, type=%x\n",
112 sp->handle, sp->type);
113
114 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
115 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
116 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
117 sp->cmd_sp))
118 qpair->req->outstanding_cmds[handle] = NULL;
119
120
121 if (qpair->req->outstanding_cmds[handle] == sp) {
122 qpair->req->outstanding_cmds[handle] = NULL;
123 break;
124 }
125 }
126 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
127
128 if (sp->cmd_sp)
129 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
130
131 abt->u.abt.comp_status = CS_TIMEOUT;
132 sp->done(sp, QLA_OS_TIMER_EXPIRED);
133 }
134
135 static void qla24xx_abort_sp_done(srb_t *sp, int res)
136 {
137 struct srb_iocb *abt = &sp->u.iocb_cmd;
138
139 del_timer(&sp->u.iocb_cmd.timer);
140 if (sp->flags & SRB_WAKEUP_ON_COMP)
141 complete(&abt->u.abt.comp);
142 else
143 sp->free(sp);
144 }
145
146 static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
147 {
148 scsi_qla_host_t *vha = cmd_sp->vha;
149 struct srb_iocb *abt_iocb;
150 srb_t *sp;
151 int rval = QLA_FUNCTION_FAILED;
152
153 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
154 GFP_ATOMIC);
155 if (!sp)
156 return rval;
157
158 abt_iocb = &sp->u.iocb_cmd;
159 sp->type = SRB_ABT_CMD;
160 sp->name = "abort";
161 sp->qpair = cmd_sp->qpair;
162 sp->cmd_sp = cmd_sp;
163 if (wait)
164 sp->flags = SRB_WAKEUP_ON_COMP;
165
166 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
167 init_completion(&abt_iocb->u.abt.comp);
168
169 qla2x00_init_timer(sp, 42);
170
171 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
172 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
173
174 sp->done = qla24xx_abort_sp_done;
175
176 ql_dbg(ql_dbg_async, vha, 0x507c,
177 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
178 cmd_sp->type);
179
180 rval = qla2x00_start_sp(sp);
181 if (rval != QLA_SUCCESS) {
182 sp->free(sp);
183 return rval;
184 }
185
186 if (wait) {
187 wait_for_completion(&abt_iocb->u.abt.comp);
188 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
189 QLA_SUCCESS : QLA_FUNCTION_FAILED;
190 sp->free(sp);
191 }
192
193 return rval;
194 }
195
196 void
197 qla2x00_async_iocb_timeout(void *data)
198 {
199 srb_t *sp = data;
200 fc_port_t *fcport = sp->fcport;
201 struct srb_iocb *lio = &sp->u.iocb_cmd;
202 int rc, h;
203 unsigned long flags;
204
205 if (fcport) {
206 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
207 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
208 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
209
210 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
211 } else {
212 pr_info("Async-%s timeout - hdl=%x.\n",
213 sp->name, sp->handle);
214 }
215
216 switch (sp->type) {
217 case SRB_LOGIN_CMD:
218 rc = qla24xx_async_abort_cmd(sp, false);
219 if (rc) {
220
221 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
222 lio->u.logio.data[1] =
223 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
224 QLA_LOGIO_LOGIN_RETRIED : 0;
225 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
226 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
227 h++) {
228 if (sp->qpair->req->outstanding_cmds[h] ==
229 sp) {
230 sp->qpair->req->outstanding_cmds[h] =
231 NULL;
232 break;
233 }
234 }
235 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
236 sp->done(sp, QLA_FUNCTION_TIMEOUT);
237 }
238 break;
239 case SRB_LOGOUT_CMD:
240 case SRB_CT_PTHRU_CMD:
241 case SRB_MB_IOCB:
242 case SRB_NACK_PLOGI:
243 case SRB_NACK_PRLI:
244 case SRB_NACK_LOGO:
245 case SRB_CTRL_VP:
246 rc = qla24xx_async_abort_cmd(sp, false);
247 if (rc) {
248 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
249 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
250 h++) {
251 if (sp->qpair->req->outstanding_cmds[h] ==
252 sp) {
253 sp->qpair->req->outstanding_cmds[h] =
254 NULL;
255 break;
256 }
257 }
258 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
259 sp->done(sp, QLA_FUNCTION_TIMEOUT);
260 }
261 break;
262 default:
263 WARN_ON_ONCE(true);
264 sp->done(sp, QLA_FUNCTION_TIMEOUT);
265 break;
266 }
267 }
268
269 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
270 {
271 struct scsi_qla_host *vha = sp->vha;
272 struct srb_iocb *lio = &sp->u.iocb_cmd;
273 struct event_arg ea;
274
275 ql_dbg(ql_dbg_disc, vha, 0x20dd,
276 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
277
278 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
279
280 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
281 memset(&ea, 0, sizeof(ea));
282 ea.fcport = sp->fcport;
283 ea.data[0] = lio->u.logio.data[0];
284 ea.data[1] = lio->u.logio.data[1];
285 ea.iop[0] = lio->u.logio.iop[0];
286 ea.iop[1] = lio->u.logio.iop[1];
287 ea.sp = sp;
288 qla24xx_handle_plogi_done_event(vha, &ea);
289 }
290
291 sp->free(sp);
292 }
293
294 static inline bool
295 fcport_is_smaller(fc_port_t *fcport)
296 {
297 if (wwn_to_u64(fcport->port_name) <
298 wwn_to_u64(fcport->vha->port_name))
299 return true;
300 else
301 return false;
302 }
303
304 static inline bool
305 fcport_is_bigger(fc_port_t *fcport)
306 {
307 return !fcport_is_smaller(fcport);
308 }
309
310 int
311 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
312 uint16_t *data)
313 {
314 srb_t *sp;
315 struct srb_iocb *lio;
316 int rval = QLA_FUNCTION_FAILED;
317
318 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
319 fcport->loop_id == FC_NO_LOOP_ID) {
320 ql_log(ql_log_warn, vha, 0xffff,
321 "%s: %8phC - not sending command.\n",
322 __func__, fcport->port_name);
323 return rval;
324 }
325
326 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
327 if (!sp)
328 goto done;
329
330 fcport->flags |= FCF_ASYNC_SENT;
331 fcport->logout_completed = 0;
332
333 fcport->disc_state = DSC_LOGIN_PEND;
334 sp->type = SRB_LOGIN_CMD;
335 sp->name = "login";
336 sp->gen1 = fcport->rscn_gen;
337 sp->gen2 = fcport->login_gen;
338
339 lio = &sp->u.iocb_cmd;
340 lio->timeout = qla2x00_async_iocb_timeout;
341 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
342
343 sp->done = qla2x00_async_login_sp_done;
344 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
345 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
346 else
347 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
348
349 if (fcport->fc4f_nvme)
350 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
351
352 ql_dbg(ql_dbg_disc, vha, 0x2072,
353 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
354 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
355 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
356 fcport->login_retry);
357
358 rval = qla2x00_start_sp(sp);
359 if (rval != QLA_SUCCESS) {
360 fcport->flags |= FCF_LOGIN_NEEDED;
361 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
362 goto done_free_sp;
363 }
364
365 return rval;
366
367 done_free_sp:
368 sp->free(sp);
369 fcport->flags &= ~FCF_ASYNC_SENT;
370 done:
371 fcport->flags &= ~FCF_ASYNC_ACTIVE;
372 return rval;
373 }
374
375 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
376 {
377 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
378 sp->fcport->login_gen++;
379 qlt_logo_completion_handler(sp->fcport, res);
380 sp->free(sp);
381 }
382
383 int
384 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
385 {
386 srb_t *sp;
387 struct srb_iocb *lio;
388 int rval = QLA_FUNCTION_FAILED;
389
390 fcport->flags |= FCF_ASYNC_SENT;
391 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
392 if (!sp)
393 goto done;
394
395 sp->type = SRB_LOGOUT_CMD;
396 sp->name = "logout";
397
398 lio = &sp->u.iocb_cmd;
399 lio->timeout = qla2x00_async_iocb_timeout;
400 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
401
402 sp->done = qla2x00_async_logout_sp_done;
403
404 ql_dbg(ql_dbg_disc, vha, 0x2070,
405 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
406 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
407 fcport->d_id.b.area, fcport->d_id.b.al_pa,
408 fcport->port_name);
409
410 rval = qla2x00_start_sp(sp);
411 if (rval != QLA_SUCCESS)
412 goto done_free_sp;
413 return rval;
414
415 done_free_sp:
416 sp->free(sp);
417 done:
418 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
419 return rval;
420 }
421
422 void
423 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
424 uint16_t *data)
425 {
426 fcport->flags &= ~FCF_ASYNC_ACTIVE;
427
428 if (!fcport->tgt_session)
429 qla2x00_mark_device_lost(vha, fcport, 1, 0);
430 qlt_logo_completion_handler(fcport, data[0]);
431 }
432
433 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
434 {
435 struct srb_iocb *lio = &sp->u.iocb_cmd;
436 struct scsi_qla_host *vha = sp->vha;
437
438 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
439 if (!test_bit(UNLOADING, &vha->dpc_flags))
440 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
441 lio->u.logio.data);
442 sp->free(sp);
443 }
444
445 int
446 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
447 {
448 srb_t *sp;
449 struct srb_iocb *lio;
450 int rval;
451
452 rval = QLA_FUNCTION_FAILED;
453 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
454 if (!sp)
455 goto done;
456
457 sp->type = SRB_PRLO_CMD;
458 sp->name = "prlo";
459
460 lio = &sp->u.iocb_cmd;
461 lio->timeout = qla2x00_async_iocb_timeout;
462 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
463
464 sp->done = qla2x00_async_prlo_sp_done;
465
466 ql_dbg(ql_dbg_disc, vha, 0x2070,
467 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
468 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
469 fcport->d_id.b.area, fcport->d_id.b.al_pa);
470
471 rval = qla2x00_start_sp(sp);
472 if (rval != QLA_SUCCESS)
473 goto done_free_sp;
474
475 return rval;
476
477 done_free_sp:
478 sp->free(sp);
479 done:
480 fcport->flags &= ~FCF_ASYNC_ACTIVE;
481 return rval;
482 }
483
484 static
485 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
486 {
487 struct fc_port *fcport = ea->fcport;
488
489 ql_dbg(ql_dbg_disc, vha, 0x20d2,
490 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
491 __func__, fcport->port_name, fcport->disc_state,
492 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
493 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
494
495 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
496 ea->data[0]);
497
498 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
499 ql_dbg(ql_dbg_disc, vha, 0x2066,
500 "%s %8phC: adisc fail: post delete\n",
501 __func__, ea->fcport->port_name);
502
503 fcport->deleted = 0;
504 fcport->logout_on_delete = 1;
505 qlt_schedule_sess_for_deletion(ea->fcport);
506 return;
507 }
508
509 if (ea->fcport->disc_state == DSC_DELETE_PEND)
510 return;
511
512 if (ea->sp->gen2 != ea->fcport->login_gen) {
513
514 ql_dbg(ql_dbg_disc, vha, 0x20d3,
515 "%s %8phC generation changed\n",
516 __func__, ea->fcport->port_name);
517 return;
518 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
519 qla_rscn_replay(fcport);
520 qlt_schedule_sess_for_deletion(fcport);
521 return;
522 }
523
524 __qla24xx_handle_gpdb_event(vha, ea);
525 }
526
527 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
528 {
529 struct qla_work_evt *e;
530
531 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
532 if (!e)
533 return QLA_FUNCTION_FAILED;
534
535 e->u.fcport.fcport = fcport;
536 fcport->flags |= FCF_ASYNC_ACTIVE;
537 fcport->disc_state = DSC_LOGIN_PEND;
538 return qla2x00_post_work(vha, e);
539 }
540
541 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
542 {
543 struct scsi_qla_host *vha = sp->vha;
544 struct event_arg ea;
545 struct srb_iocb *lio = &sp->u.iocb_cmd;
546
547 ql_dbg(ql_dbg_disc, vha, 0x2066,
548 "Async done-%s res %x %8phC\n",
549 sp->name, res, sp->fcport->port_name);
550
551 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
552
553 memset(&ea, 0, sizeof(ea));
554 ea.rc = res;
555 ea.data[0] = lio->u.logio.data[0];
556 ea.data[1] = lio->u.logio.data[1];
557 ea.iop[0] = lio->u.logio.iop[0];
558 ea.iop[1] = lio->u.logio.iop[1];
559 ea.fcport = sp->fcport;
560 ea.sp = sp;
561
562 qla24xx_handle_adisc_event(vha, &ea);
563
564 sp->free(sp);
565 }
566
567 int
568 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
569 uint16_t *data)
570 {
571 srb_t *sp;
572 struct srb_iocb *lio;
573 int rval = QLA_FUNCTION_FAILED;
574
575 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
576 return rval;
577
578 fcport->flags |= FCF_ASYNC_SENT;
579 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
580 if (!sp)
581 goto done;
582
583 sp->type = SRB_ADISC_CMD;
584 sp->name = "adisc";
585
586 lio = &sp->u.iocb_cmd;
587 lio->timeout = qla2x00_async_iocb_timeout;
588 sp->gen1 = fcport->rscn_gen;
589 sp->gen2 = fcport->login_gen;
590 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
591
592 sp->done = qla2x00_async_adisc_sp_done;
593 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
594 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
595
596 ql_dbg(ql_dbg_disc, vha, 0x206f,
597 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
598 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
599
600 rval = qla2x00_start_sp(sp);
601 if (rval != QLA_SUCCESS)
602 goto done_free_sp;
603
604 return rval;
605
606 done_free_sp:
607 sp->free(sp);
608 done:
609 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
610 qla2x00_post_async_adisc_work(vha, fcport, data);
611 return rval;
612 }
613
614 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
615 {
616 struct qla_hw_data *ha = vha->hw;
617
618 if (IS_FWI2_CAPABLE(ha))
619 return loop_id > NPH_LAST_HANDLE;
620
621 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
622 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
623 }
624
625
626
627
628
629
630
631
632
633
634
635
636 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
637 {
638 int rval;
639 struct qla_hw_data *ha = vha->hw;
640 unsigned long flags = 0;
641
642 rval = QLA_SUCCESS;
643
644 spin_lock_irqsave(&ha->vport_slock, flags);
645
646 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
647 if (dev->loop_id >= LOOPID_MAP_SIZE ||
648 qla2x00_is_reserved_id(vha, dev->loop_id)) {
649 dev->loop_id = FC_NO_LOOP_ID;
650 rval = QLA_FUNCTION_FAILED;
651 } else {
652 set_bit(dev->loop_id, ha->loop_id_map);
653 }
654 spin_unlock_irqrestore(&ha->vport_slock, flags);
655
656 if (rval == QLA_SUCCESS)
657 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
658 "Assigning new loopid=%x, portid=%x.\n",
659 dev->loop_id, dev->d_id.b24);
660 else
661 ql_log(ql_log_warn, dev->vha, 0x2087,
662 "No loop_id's available, portid=%x.\n",
663 dev->d_id.b24);
664
665 return rval;
666 }
667
668 void qla2x00_clear_loop_id(fc_port_t *fcport)
669 {
670 struct qla_hw_data *ha = fcport->vha->hw;
671
672 if (fcport->loop_id == FC_NO_LOOP_ID ||
673 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
674 return;
675
676 clear_bit(fcport->loop_id, ha->loop_id_map);
677 fcport->loop_id = FC_NO_LOOP_ID;
678 }
679
680 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
681 struct event_arg *ea)
682 {
683 fc_port_t *fcport, *conflict_fcport;
684 struct get_name_list_extended *e;
685 u16 i, n, found = 0, loop_id;
686 port_id_t id;
687 u64 wwn;
688 u16 data[2];
689 u8 current_login_state, nvme_cls;
690
691 fcport = ea->fcport;
692 ql_dbg(ql_dbg_disc, vha, 0xffff,
693 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
694 __func__, fcport->port_name, fcport->disc_state,
695 fcport->fw_login_state, ea->rc,
696 fcport->login_gen, fcport->last_login_gen,
697 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
698
699 if (fcport->disc_state == DSC_DELETE_PEND)
700 return;
701
702 if (ea->rc) {
703 if (fcport->login_retry == 0) {
704 ql_dbg(ql_dbg_disc, vha, 0x20de,
705 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
706 fcport->port_name, fcport->login_retry);
707 }
708 return;
709 }
710
711 if (fcport->last_rscn_gen != fcport->rscn_gen) {
712 qla_rscn_replay(fcport);
713 qlt_schedule_sess_for_deletion(fcport);
714 return;
715 } else if (fcport->last_login_gen != fcport->login_gen) {
716 ql_dbg(ql_dbg_disc, vha, 0x20e0,
717 "%s %8phC login gen changed\n",
718 __func__, fcport->port_name);
719 return;
720 }
721
722 n = ea->data[0] / sizeof(struct get_name_list_extended);
723
724 ql_dbg(ql_dbg_disc, vha, 0x20e1,
725 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
726 __func__, __LINE__, fcport->port_name, n,
727 fcport->d_id.b.domain, fcport->d_id.b.area,
728 fcport->d_id.b.al_pa, fcport->loop_id);
729
730 for (i = 0; i < n; i++) {
731 e = &vha->gnl.l[i];
732 wwn = wwn_to_u64(e->port_name);
733 id.b.domain = e->port_id[2];
734 id.b.area = e->port_id[1];
735 id.b.al_pa = e->port_id[0];
736 id.b.rsvd_1 = 0;
737
738 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
739 continue;
740
741 if (IS_SW_RESV_ADDR(id))
742 continue;
743
744 found = 1;
745
746 loop_id = le16_to_cpu(e->nport_handle);
747 loop_id = (loop_id & 0x7fff);
748 nvme_cls = e->current_login_state >> 4;
749 current_login_state = e->current_login_state & 0xf;
750
751 if (PRLI_PHASE(nvme_cls)) {
752 current_login_state = nvme_cls;
753 fcport->fc4_type &= ~FS_FC4TYPE_FCP;
754 fcport->fc4_type |= FS_FC4TYPE_NVME;
755 } else if (PRLI_PHASE(current_login_state)) {
756 fcport->fc4_type |= FS_FC4TYPE_FCP;
757 fcport->fc4_type &= ~FS_FC4TYPE_NVME;
758 }
759
760
761 ql_dbg(ql_dbg_disc, vha, 0x20e2,
762 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
763 __func__, fcport->port_name,
764 e->current_login_state, fcport->fw_login_state,
765 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
766 fcport->d_id.b.domain, fcport->d_id.b.area,
767 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
768
769 switch (fcport->disc_state) {
770 case DSC_DELETE_PEND:
771 case DSC_DELETED:
772 break;
773 default:
774 if ((id.b24 != fcport->d_id.b24 &&
775 fcport->d_id.b24 &&
776 fcport->loop_id != FC_NO_LOOP_ID) ||
777 (fcport->loop_id != FC_NO_LOOP_ID &&
778 fcport->loop_id != loop_id)) {
779 ql_dbg(ql_dbg_disc, vha, 0x20e3,
780 "%s %d %8phC post del sess\n",
781 __func__, __LINE__, fcport->port_name);
782 if (fcport->n2n_flag)
783 fcport->d_id.b24 = 0;
784 qlt_schedule_sess_for_deletion(fcport);
785 return;
786 }
787 break;
788 }
789
790 fcport->loop_id = loop_id;
791 if (fcport->n2n_flag)
792 fcport->d_id.b24 = id.b24;
793
794 wwn = wwn_to_u64(fcport->port_name);
795 qlt_find_sess_invalidate_other(vha, wwn,
796 id, loop_id, &conflict_fcport);
797
798 if (conflict_fcport) {
799
800
801
802
803
804 conflict_fcport->conflict = fcport;
805 fcport->login_pause = 1;
806 }
807
808 switch (vha->hw->current_topology) {
809 default:
810 switch (current_login_state) {
811 case DSC_LS_PRLI_COMP:
812 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
813 vha, 0x20e4, "%s %d %8phC post gpdb\n",
814 __func__, __LINE__, fcport->port_name);
815
816 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
817 fcport->port_type = FCT_INITIATOR;
818 else
819 fcport->port_type = FCT_TARGET;
820 data[0] = data[1] = 0;
821 qla2x00_post_async_adisc_work(vha, fcport,
822 data);
823 break;
824 case DSC_LS_PORT_UNAVAIL:
825 default:
826 if (fcport->loop_id == FC_NO_LOOP_ID) {
827 qla2x00_find_new_loop_id(vha, fcport);
828 fcport->fw_login_state =
829 DSC_LS_PORT_UNAVAIL;
830 }
831 ql_dbg(ql_dbg_disc, vha, 0x20e5,
832 "%s %d %8phC\n", __func__, __LINE__,
833 fcport->port_name);
834 qla24xx_fcport_handle_login(vha, fcport);
835 break;
836 }
837 break;
838 case ISP_CFG_N:
839 fcport->fw_login_state = current_login_state;
840 fcport->d_id = id;
841 switch (current_login_state) {
842 case DSC_LS_PRLI_PEND:
843
844
845
846
847
848
849 fcport->disc_state = DSC_DELETED;
850 break;
851 case DSC_LS_PRLI_COMP:
852 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
853 fcport->port_type = FCT_INITIATOR;
854 else
855 fcport->port_type = FCT_TARGET;
856
857 data[0] = data[1] = 0;
858 qla2x00_post_async_adisc_work(vha, fcport,
859 data);
860 break;
861 case DSC_LS_PLOGI_COMP:
862 if (fcport_is_bigger(fcport)) {
863
864 if (fcport->loop_id != FC_NO_LOOP_ID)
865 qla2x00_clear_loop_id(fcport);
866
867 fcport->loop_id = loop_id;
868 qla24xx_fcport_handle_login(vha,
869 fcport);
870 break;
871 }
872
873 default:
874 if (fcport_is_smaller(fcport)) {
875
876 if (fcport->loop_id != FC_NO_LOOP_ID)
877 qla2x00_clear_loop_id(fcport);
878
879 fcport->loop_id = loop_id;
880 qla24xx_fcport_handle_login(vha,
881 fcport);
882 }
883 break;
884 }
885 break;
886 }
887 }
888
889 if (!found) {
890 switch (vha->hw->current_topology) {
891 case ISP_CFG_F:
892 case ISP_CFG_FL:
893 for (i = 0; i < n; i++) {
894 e = &vha->gnl.l[i];
895 id.b.domain = e->port_id[0];
896 id.b.area = e->port_id[1];
897 id.b.al_pa = e->port_id[2];
898 id.b.rsvd_1 = 0;
899 loop_id = le16_to_cpu(e->nport_handle);
900
901 if (fcport->d_id.b24 == id.b24) {
902 conflict_fcport =
903 qla2x00_find_fcport_by_wwpn(vha,
904 e->port_name, 0);
905 if (conflict_fcport) {
906 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
907 vha, 0x20e5,
908 "%s %d %8phC post del sess\n",
909 __func__, __LINE__,
910 conflict_fcport->port_name);
911 qlt_schedule_sess_for_deletion
912 (conflict_fcport);
913 }
914 }
915
916
917
918
919 if (fcport->loop_id == loop_id)
920 fcport->loop_id = FC_NO_LOOP_ID;
921 }
922 qla24xx_fcport_handle_login(vha, fcport);
923 break;
924 case ISP_CFG_N:
925 fcport->disc_state = DSC_DELETED;
926 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
927 if (fcport->n2n_link_reset_cnt < 2) {
928 fcport->n2n_link_reset_cnt++;
929
930
931
932
933
934 set_bit(N2N_LINK_RESET,
935 &vha->dpc_flags);
936 } else {
937 if (fcport->n2n_chip_reset < 1) {
938 ql_log(ql_log_info, vha, 0x705d,
939 "Chip reset to bring laser down");
940 set_bit(ISP_ABORT_NEEDED,
941 &vha->dpc_flags);
942 fcport->n2n_chip_reset++;
943 } else {
944 ql_log(ql_log_info, vha, 0x705d,
945 "Remote port %8ph is not coming back\n",
946 fcport->port_name);
947 fcport->scan_state = 0;
948 }
949 }
950 qla2xxx_wake_dpc(vha);
951 } else {
952
953
954
955
956 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
957 }
958 break;
959 default:
960 break;
961 }
962 }
963 }
964
965 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
966 {
967 struct scsi_qla_host *vha = sp->vha;
968 unsigned long flags;
969 struct fc_port *fcport = NULL, *tf;
970 u16 i, n = 0, loop_id;
971 struct event_arg ea;
972 struct get_name_list_extended *e;
973 u64 wwn;
974 struct list_head h;
975 bool found = false;
976
977 ql_dbg(ql_dbg_disc, vha, 0x20e7,
978 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
979 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
980 sp->u.iocb_cmd.u.mbx.in_mb[2]);
981
982 if (res == QLA_FUNCTION_TIMEOUT)
983 return;
984
985 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
986 memset(&ea, 0, sizeof(ea));
987 ea.sp = sp;
988 ea.rc = res;
989
990 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
991 sizeof(struct get_name_list_extended)) {
992 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
993 sizeof(struct get_name_list_extended);
994 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1];
995 }
996
997 for (i = 0; i < n; i++) {
998 e = &vha->gnl.l[i];
999 loop_id = le16_to_cpu(e->nport_handle);
1000
1001 loop_id = (loop_id & 0x7fff);
1002 set_bit(loop_id, vha->hw->loop_id_map);
1003 wwn = wwn_to_u64(e->port_name);
1004
1005 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
1006 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1007 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
1008 e->port_id[0], e->current_login_state, e->last_login_state,
1009 (loop_id & 0x7fff));
1010 }
1011
1012 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1013
1014 INIT_LIST_HEAD(&h);
1015 fcport = tf = NULL;
1016 if (!list_empty(&vha->gnl.fcports))
1017 list_splice_init(&vha->gnl.fcports, &h);
1018 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1019
1020 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1021 list_del_init(&fcport->gnl_entry);
1022 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1023 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1024 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1025 ea.fcport = fcport;
1026
1027 qla24xx_handle_gnl_done_event(vha, &ea);
1028 }
1029
1030
1031 for (i = 0; i < n; i++) {
1032 port_id_t id;
1033 u64 wwnn;
1034
1035 e = &vha->gnl.l[i];
1036 wwn = wwn_to_u64(e->port_name);
1037
1038 found = false;
1039 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1040 if (!memcmp((u8 *)&wwn, fcport->port_name,
1041 WWN_SIZE)) {
1042 found = true;
1043 break;
1044 }
1045 }
1046
1047 id.b.domain = e->port_id[2];
1048 id.b.area = e->port_id[1];
1049 id.b.al_pa = e->port_id[0];
1050 id.b.rsvd_1 = 0;
1051
1052 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1053 ql_dbg(ql_dbg_disc, vha, 0x2065,
1054 "%s %d %8phC %06x post new sess\n",
1055 __func__, __LINE__, (u8 *)&wwn, id.b24);
1056 wwnn = wwn_to_u64(e->node_name);
1057 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1058 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
1059 }
1060 }
1061
1062 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1063 vha->gnl.sent = 0;
1064 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1065
1066 sp->free(sp);
1067 }
1068
1069 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1070 {
1071 srb_t *sp;
1072 struct srb_iocb *mbx;
1073 int rval = QLA_FUNCTION_FAILED;
1074 unsigned long flags;
1075 u16 *mb;
1076
1077 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1078 return rval;
1079
1080 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1081 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1082
1083 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1084 fcport->flags |= FCF_ASYNC_SENT;
1085 fcport->disc_state = DSC_GNL;
1086 fcport->last_rscn_gen = fcport->rscn_gen;
1087 fcport->last_login_gen = fcport->login_gen;
1088
1089 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1090 if (vha->gnl.sent) {
1091 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1092 return QLA_SUCCESS;
1093 }
1094 vha->gnl.sent = 1;
1095 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1096
1097 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1098 if (!sp)
1099 goto done;
1100
1101 sp->type = SRB_MB_IOCB;
1102 sp->name = "gnlist";
1103 sp->gen1 = fcport->rscn_gen;
1104 sp->gen2 = fcport->login_gen;
1105
1106 mbx = &sp->u.iocb_cmd;
1107 mbx->timeout = qla2x00_async_iocb_timeout;
1108 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
1109
1110 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1111 mb[0] = MBC_PORT_NODE_NAME_LIST;
1112 mb[1] = BIT_2 | BIT_3;
1113 mb[2] = MSW(vha->gnl.ldma);
1114 mb[3] = LSW(vha->gnl.ldma);
1115 mb[6] = MSW(MSD(vha->gnl.ldma));
1116 mb[7] = LSW(MSD(vha->gnl.ldma));
1117 mb[8] = vha->gnl.size;
1118 mb[9] = vha->vp_idx;
1119
1120 sp->done = qla24xx_async_gnl_sp_done;
1121
1122 ql_dbg(ql_dbg_disc, vha, 0x20da,
1123 "Async-%s - OUT WWPN %8phC hndl %x\n",
1124 sp->name, fcport->port_name, sp->handle);
1125
1126 rval = qla2x00_start_sp(sp);
1127 if (rval != QLA_SUCCESS)
1128 goto done_free_sp;
1129
1130 return rval;
1131
1132 done_free_sp:
1133 sp->free(sp);
1134 fcport->flags &= ~FCF_ASYNC_SENT;
1135 done:
1136 return rval;
1137 }
1138
1139 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1140 {
1141 struct qla_work_evt *e;
1142
1143 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1144 if (!e)
1145 return QLA_FUNCTION_FAILED;
1146
1147 e->u.fcport.fcport = fcport;
1148 fcport->flags |= FCF_ASYNC_ACTIVE;
1149 return qla2x00_post_work(vha, e);
1150 }
1151
1152 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1153 {
1154 struct scsi_qla_host *vha = sp->vha;
1155 struct qla_hw_data *ha = vha->hw;
1156 fc_port_t *fcport = sp->fcport;
1157 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1158 struct event_arg ea;
1159
1160 ql_dbg(ql_dbg_disc, vha, 0x20db,
1161 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1162 sp->name, res, fcport->port_name, mb[1], mb[2]);
1163
1164 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1165
1166 if (res == QLA_FUNCTION_TIMEOUT)
1167 goto done;
1168
1169 memset(&ea, 0, sizeof(ea));
1170 ea.fcport = fcport;
1171 ea.sp = sp;
1172
1173 qla24xx_handle_gpdb_event(vha, &ea);
1174
1175 done:
1176 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1177 sp->u.iocb_cmd.u.mbx.in_dma);
1178
1179 sp->free(sp);
1180 }
1181
1182 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1183 {
1184 struct qla_work_evt *e;
1185
1186 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1187 if (!e)
1188 return QLA_FUNCTION_FAILED;
1189
1190 e->u.fcport.fcport = fcport;
1191
1192 return qla2x00_post_work(vha, e);
1193 }
1194
1195 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1196 {
1197 struct scsi_qla_host *vha = sp->vha;
1198 struct srb_iocb *lio = &sp->u.iocb_cmd;
1199 struct event_arg ea;
1200
1201 ql_dbg(ql_dbg_disc, vha, 0x2129,
1202 "%s %8phC res %d \n", __func__,
1203 sp->fcport->port_name, res);
1204
1205 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1206
1207 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1208 memset(&ea, 0, sizeof(ea));
1209 ea.fcport = sp->fcport;
1210 ea.data[0] = lio->u.logio.data[0];
1211 ea.data[1] = lio->u.logio.data[1];
1212 ea.iop[0] = lio->u.logio.iop[0];
1213 ea.iop[1] = lio->u.logio.iop[1];
1214 ea.sp = sp;
1215
1216 qla24xx_handle_prli_done_event(vha, &ea);
1217 }
1218
1219 sp->free(sp);
1220 }
1221
1222 int
1223 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1224 {
1225 srb_t *sp;
1226 struct srb_iocb *lio;
1227 int rval = QLA_FUNCTION_FAILED;
1228
1229 if (!vha->flags.online) {
1230 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1231 __func__, __LINE__, fcport->port_name);
1232 return rval;
1233 }
1234
1235 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1236 fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1237 qla_dual_mode_enabled(vha)) {
1238 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1239 __func__, __LINE__, fcport->port_name);
1240 return rval;
1241 }
1242
1243 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1244 if (!sp)
1245 return rval;
1246
1247 fcport->flags |= FCF_ASYNC_SENT;
1248 fcport->logout_completed = 0;
1249
1250 sp->type = SRB_PRLI_CMD;
1251 sp->name = "prli";
1252
1253 lio = &sp->u.iocb_cmd;
1254 lio->timeout = qla2x00_async_iocb_timeout;
1255 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1256
1257 sp->done = qla2x00_async_prli_sp_done;
1258 lio->u.logio.flags = 0;
1259
1260 if (fcport->fc4f_nvme)
1261 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1262
1263 ql_dbg(ql_dbg_disc, vha, 0x211b,
1264 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1265 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1266 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
1267
1268 rval = qla2x00_start_sp(sp);
1269 if (rval != QLA_SUCCESS) {
1270 fcport->flags |= FCF_LOGIN_NEEDED;
1271 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1272 goto done_free_sp;
1273 }
1274
1275 return rval;
1276
1277 done_free_sp:
1278 sp->free(sp);
1279 fcport->flags &= ~FCF_ASYNC_SENT;
1280 return rval;
1281 }
1282
1283 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1284 {
1285 struct qla_work_evt *e;
1286
1287 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1288 if (!e)
1289 return QLA_FUNCTION_FAILED;
1290
1291 e->u.fcport.fcport = fcport;
1292 e->u.fcport.opt = opt;
1293 fcport->flags |= FCF_ASYNC_ACTIVE;
1294 return qla2x00_post_work(vha, e);
1295 }
1296
1297 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1298 {
1299 srb_t *sp;
1300 struct srb_iocb *mbx;
1301 int rval = QLA_FUNCTION_FAILED;
1302 u16 *mb;
1303 dma_addr_t pd_dma;
1304 struct port_database_24xx *pd;
1305 struct qla_hw_data *ha = vha->hw;
1306
1307 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
1308 fcport->loop_id == FC_NO_LOOP_ID) {
1309 ql_log(ql_log_warn, vha, 0xffff,
1310 "%s: %8phC - not sending command.\n",
1311 __func__, fcport->port_name);
1312 return rval;
1313 }
1314
1315 fcport->disc_state = DSC_GPDB;
1316
1317 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1318 if (!sp)
1319 goto done;
1320
1321 fcport->flags |= FCF_ASYNC_SENT;
1322 sp->type = SRB_MB_IOCB;
1323 sp->name = "gpdb";
1324 sp->gen1 = fcport->rscn_gen;
1325 sp->gen2 = fcport->login_gen;
1326
1327 mbx = &sp->u.iocb_cmd;
1328 mbx->timeout = qla2x00_async_iocb_timeout;
1329 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1330
1331 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1332 if (pd == NULL) {
1333 ql_log(ql_log_warn, vha, 0xd043,
1334 "Failed to allocate port database structure.\n");
1335 goto done_free_sp;
1336 }
1337
1338 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1339 mb[0] = MBC_GET_PORT_DATABASE;
1340 mb[1] = fcport->loop_id;
1341 mb[2] = MSW(pd_dma);
1342 mb[3] = LSW(pd_dma);
1343 mb[6] = MSW(MSD(pd_dma));
1344 mb[7] = LSW(MSD(pd_dma));
1345 mb[9] = vha->vp_idx;
1346 mb[10] = opt;
1347
1348 mbx->u.mbx.in = (void *)pd;
1349 mbx->u.mbx.in_dma = pd_dma;
1350
1351 sp->done = qla24xx_async_gpdb_sp_done;
1352
1353 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1354 "Async-%s %8phC hndl %x opt %x\n",
1355 sp->name, fcport->port_name, sp->handle, opt);
1356
1357 rval = qla2x00_start_sp(sp);
1358 if (rval != QLA_SUCCESS)
1359 goto done_free_sp;
1360 return rval;
1361
1362 done_free_sp:
1363 if (pd)
1364 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1365
1366 sp->free(sp);
1367 fcport->flags &= ~FCF_ASYNC_SENT;
1368 done:
1369 qla24xx_post_gpdb_work(vha, fcport, opt);
1370 return rval;
1371 }
1372
1373 static
1374 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1375 {
1376 unsigned long flags;
1377
1378 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1379 ea->fcport->login_gen++;
1380 ea->fcport->deleted = 0;
1381 ea->fcport->logout_on_delete = 1;
1382
1383 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1384 vha->fcport_count++;
1385 ea->fcport->login_succ = 1;
1386
1387 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1388 qla24xx_sched_upd_fcport(ea->fcport);
1389 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1390 } else if (ea->fcport->login_succ) {
1391
1392
1393
1394
1395
1396 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1397 "%s %d %8phC session revalidate success\n",
1398 __func__, __LINE__, ea->fcport->port_name);
1399 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
1400 }
1401 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1402 }
1403
1404 static
1405 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1406 {
1407 fc_port_t *fcport = ea->fcport;
1408 struct port_database_24xx *pd;
1409 struct srb *sp = ea->sp;
1410 uint8_t ls;
1411
1412 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1413
1414 fcport->flags &= ~FCF_ASYNC_SENT;
1415
1416 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1417 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1418 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1419 ea->rc);
1420
1421 if (fcport->disc_state == DSC_DELETE_PEND)
1422 return;
1423
1424 if (fcport->fc4f_nvme)
1425 ls = pd->current_login_state >> 4;
1426 else
1427 ls = pd->current_login_state & 0xf;
1428
1429 if (ea->sp->gen2 != fcport->login_gen) {
1430
1431
1432 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1433 "%s %8phC generation changed\n",
1434 __func__, fcport->port_name);
1435 return;
1436 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1437 qla_rscn_replay(fcport);
1438 qlt_schedule_sess_for_deletion(fcport);
1439 return;
1440 }
1441
1442 switch (ls) {
1443 case PDS_PRLI_COMPLETE:
1444 __qla24xx_parse_gpdb(vha, fcport, pd);
1445 break;
1446 case PDS_PLOGI_PENDING:
1447 case PDS_PLOGI_COMPLETE:
1448 case PDS_PRLI_PENDING:
1449 case PDS_PRLI2_PENDING:
1450
1451 if (qla_dual_mode_enabled(vha) ||
1452 qla_ini_mode_enabled(vha)) {
1453 fcport->disc_state = DSC_GNL;
1454 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1455 }
1456 return;
1457 case PDS_LOGO_PENDING:
1458 case PDS_PORT_UNAVAILABLE:
1459 default:
1460 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1461 __func__, __LINE__, fcport->port_name);
1462 qlt_schedule_sess_for_deletion(fcport);
1463 return;
1464 }
1465 __qla24xx_handle_gpdb_event(vha, ea);
1466 }
1467
1468 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1469 {
1470 u8 login = 0;
1471 int rc;
1472
1473 if (qla_tgt_mode_enabled(vha))
1474 return;
1475
1476 if (qla_dual_mode_enabled(vha)) {
1477 if (N2N_TOPO(vha->hw)) {
1478 u64 mywwn, wwn;
1479
1480 mywwn = wwn_to_u64(vha->port_name);
1481 wwn = wwn_to_u64(fcport->port_name);
1482 if (mywwn > wwn)
1483 login = 1;
1484 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1485 && time_after_eq(jiffies,
1486 fcport->plogi_nack_done_deadline))
1487 login = 1;
1488 } else {
1489 login = 1;
1490 }
1491 } else {
1492
1493 login = 1;
1494 }
1495
1496 if (login && fcport->login_retry) {
1497 fcport->login_retry--;
1498 if (fcport->loop_id == FC_NO_LOOP_ID) {
1499 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1500 rc = qla2x00_find_new_loop_id(vha, fcport);
1501 if (rc) {
1502 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1503 "%s %d %8phC post del sess - out of loopid\n",
1504 __func__, __LINE__, fcport->port_name);
1505 fcport->scan_state = 0;
1506 qlt_schedule_sess_for_deletion(fcport);
1507 return;
1508 }
1509 }
1510 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1511 "%s %d %8phC post login\n",
1512 __func__, __LINE__, fcport->port_name);
1513 qla2x00_post_async_login_work(vha, fcport, NULL);
1514 }
1515 }
1516
1517 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1518 {
1519 u16 data[2];
1520 u64 wwn;
1521 u16 sec;
1522
1523 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1524 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1525 __func__, fcport->port_name, fcport->disc_state,
1526 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1527 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1528 fcport->login_gen, fcport->loop_id, fcport->scan_state);
1529
1530 if (fcport->scan_state != QLA_FCPORT_FOUND)
1531 return 0;
1532
1533 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1534 qla_dual_mode_enabled(vha) &&
1535 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1536 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1537 return 0;
1538
1539 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1540 !N2N_TOPO(vha->hw)) {
1541 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1542 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1543 return 0;
1544 }
1545 }
1546
1547
1548 if (vha->host->active_mode == MODE_TARGET)
1549 return 0;
1550
1551 if (fcport->flags & FCF_ASYNC_SENT) {
1552 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1553 return 0;
1554 }
1555
1556 switch (fcport->disc_state) {
1557 case DSC_DELETED:
1558 wwn = wwn_to_u64(fcport->node_name);
1559 switch (vha->hw->current_topology) {
1560 case ISP_CFG_N:
1561 if (fcport_is_smaller(fcport)) {
1562
1563 if (fcport->login_retry) {
1564 if (fcport->loop_id == FC_NO_LOOP_ID) {
1565 qla2x00_find_new_loop_id(vha,
1566 fcport);
1567 fcport->fw_login_state =
1568 DSC_LS_PORT_UNAVAIL;
1569 }
1570 fcport->login_retry--;
1571 qla_post_els_plogi_work(vha, fcport);
1572 } else {
1573 ql_log(ql_log_info, vha, 0x705d,
1574 "Unable to reach remote port %8phC",
1575 fcport->port_name);
1576 }
1577 } else {
1578 qla24xx_post_gnl_work(vha, fcport);
1579 }
1580 break;
1581 default:
1582 if (wwn == 0) {
1583 ql_dbg(ql_dbg_disc, vha, 0xffff,
1584 "%s %d %8phC post GNNID\n",
1585 __func__, __LINE__, fcport->port_name);
1586 qla24xx_post_gnnid_work(vha, fcport);
1587 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1588 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1589 "%s %d %8phC post gnl\n",
1590 __func__, __LINE__, fcport->port_name);
1591 qla24xx_post_gnl_work(vha, fcport);
1592 } else {
1593 qla_chk_n2n_b4_login(vha, fcport);
1594 }
1595 break;
1596 }
1597 break;
1598
1599 case DSC_GNL:
1600 switch (vha->hw->current_topology) {
1601 case ISP_CFG_N:
1602 if ((fcport->current_login_state & 0xf) == 0x6) {
1603 ql_dbg(ql_dbg_disc, vha, 0x2118,
1604 "%s %d %8phC post GPDB work\n",
1605 __func__, __LINE__, fcport->port_name);
1606 fcport->chip_reset =
1607 vha->hw->base_qpair->chip_reset;
1608 qla24xx_post_gpdb_work(vha, fcport, 0);
1609 } else {
1610 ql_dbg(ql_dbg_disc, vha, 0x2118,
1611 "%s %d %8phC post %s PRLI\n",
1612 __func__, __LINE__, fcport->port_name,
1613 fcport->fc4f_nvme ? "NVME" : "FC");
1614 qla24xx_post_prli_work(vha, fcport);
1615 }
1616 break;
1617 default:
1618 if (fcport->login_pause) {
1619 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1620 "%s %d %8phC exit\n",
1621 __func__, __LINE__,
1622 fcport->port_name);
1623 fcport->last_rscn_gen = fcport->rscn_gen;
1624 fcport->last_login_gen = fcport->login_gen;
1625 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1626 break;
1627 }
1628 qla_chk_n2n_b4_login(vha, fcport);
1629 break;
1630 }
1631 break;
1632
1633 case DSC_LOGIN_FAILED:
1634 if (N2N_TOPO(vha->hw))
1635 qla_chk_n2n_b4_login(vha, fcport);
1636 else
1637 qlt_schedule_sess_for_deletion(fcport);
1638 break;
1639
1640 case DSC_LOGIN_COMPLETE:
1641
1642 data[0] = data[1] = 0;
1643 qla2x00_post_async_adisc_work(vha, fcport, data);
1644 break;
1645
1646 case DSC_LOGIN_PEND:
1647 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1648 qla24xx_post_prli_work(vha, fcport);
1649 break;
1650
1651 case DSC_UPD_FCPORT:
1652 sec = jiffies_to_msecs(jiffies -
1653 fcport->jiffies_at_registration)/1000;
1654 if (fcport->sec_since_registration < sec && sec &&
1655 !(sec % 60)) {
1656 fcport->sec_since_registration = sec;
1657 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1658 "%s %8phC - Slow Rport registration(%d Sec)\n",
1659 __func__, fcport->port_name, sec);
1660 }
1661
1662 if (fcport->next_disc_state != DSC_DELETE_PEND)
1663 fcport->next_disc_state = DSC_ADISC;
1664 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1665 break;
1666
1667 default:
1668 break;
1669 }
1670
1671 return 0;
1672 }
1673
1674 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1675 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1676 {
1677 struct qla_work_evt *e;
1678
1679 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1680 if (!e)
1681 return QLA_FUNCTION_FAILED;
1682
1683 e->u.new_sess.id = *id;
1684 e->u.new_sess.pla = pla;
1685 e->u.new_sess.fc4_type = fc4_type;
1686 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1687 if (node_name)
1688 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1689
1690 return qla2x00_post_work(vha, e);
1691 }
1692
1693 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1694 {
1695 fc_port_t *fcport;
1696 unsigned long flags;
1697
1698 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1699 if (fcport) {
1700 fcport->scan_needed = 1;
1701 fcport->rscn_gen++;
1702 }
1703
1704 spin_lock_irqsave(&vha->work_lock, flags);
1705 if (vha->scan.scan_flags == 0) {
1706 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1707 vha->scan.scan_flags |= SF_QUEUED;
1708 schedule_delayed_work(&vha->scan.scan_work, 5);
1709 }
1710 spin_unlock_irqrestore(&vha->work_lock, flags);
1711 }
1712
1713 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1714 struct event_arg *ea)
1715 {
1716 fc_port_t *fcport = ea->fcport;
1717
1718 if (test_bit(UNLOADING, &vha->dpc_flags))
1719 return;
1720
1721 ql_dbg(ql_dbg_disc, vha, 0x2102,
1722 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1723 __func__, fcport->port_name, fcport->disc_state,
1724 fcport->fw_login_state, fcport->login_pause,
1725 fcport->deleted, fcport->conflict,
1726 fcport->last_rscn_gen, fcport->rscn_gen,
1727 fcport->last_login_gen, fcport->login_gen,
1728 fcport->flags);
1729
1730 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1731 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1732 __func__, __LINE__, fcport->port_name);
1733 qla24xx_post_gnl_work(vha, fcport);
1734 return;
1735 }
1736
1737 qla24xx_fcport_handle_login(vha, fcport);
1738 }
1739
1740
1741
1742
1743
1744 void qla_rscn_replay(fc_port_t *fcport)
1745 {
1746 struct event_arg ea;
1747
1748 switch (fcport->disc_state) {
1749 case DSC_DELETE_PEND:
1750 return;
1751 default:
1752 break;
1753 }
1754
1755 if (fcport->scan_needed) {
1756 memset(&ea, 0, sizeof(ea));
1757 ea.id = fcport->d_id;
1758 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1759 qla2x00_handle_rscn(fcport->vha, &ea);
1760 }
1761 }
1762
1763 static void
1764 qla2x00_tmf_iocb_timeout(void *data)
1765 {
1766 srb_t *sp = data;
1767 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1768
1769 tmf->u.tmf.comp_status = CS_TIMEOUT;
1770 complete(&tmf->u.tmf.comp);
1771 }
1772
1773 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1774 {
1775 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1776
1777 complete(&tmf->u.tmf.comp);
1778 }
1779
1780 int
1781 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1782 uint32_t tag)
1783 {
1784 struct scsi_qla_host *vha = fcport->vha;
1785 struct srb_iocb *tm_iocb;
1786 srb_t *sp;
1787 int rval = QLA_FUNCTION_FAILED;
1788
1789 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1790 if (!sp)
1791 goto done;
1792
1793 tm_iocb = &sp->u.iocb_cmd;
1794 sp->type = SRB_TM_CMD;
1795 sp->name = "tmf";
1796
1797 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1798 init_completion(&tm_iocb->u.tmf.comp);
1799 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1800
1801 tm_iocb->u.tmf.flags = flags;
1802 tm_iocb->u.tmf.lun = lun;
1803 tm_iocb->u.tmf.data = tag;
1804 sp->done = qla2x00_tmf_sp_done;
1805
1806 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1807 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1808 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1809 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1810
1811 rval = qla2x00_start_sp(sp);
1812 if (rval != QLA_SUCCESS)
1813 goto done_free_sp;
1814 wait_for_completion(&tm_iocb->u.tmf.comp);
1815
1816 rval = tm_iocb->u.tmf.data;
1817
1818 if (rval != QLA_SUCCESS) {
1819 ql_log(ql_log_warn, vha, 0x8030,
1820 "TM IOCB failed (%x).\n", rval);
1821 }
1822
1823 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1824 flags = tm_iocb->u.tmf.flags;
1825 lun = (uint16_t)tm_iocb->u.tmf.lun;
1826
1827
1828 qla2x00_marker(vha, vha->hw->base_qpair,
1829 fcport->loop_id, lun,
1830 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1831 }
1832
1833 done_free_sp:
1834 sp->free(sp);
1835 fcport->flags &= ~FCF_ASYNC_SENT;
1836 done:
1837 return rval;
1838 }
1839
1840 int
1841 qla24xx_async_abort_command(srb_t *sp)
1842 {
1843 unsigned long flags = 0;
1844
1845 uint32_t handle;
1846 fc_port_t *fcport = sp->fcport;
1847 struct qla_qpair *qpair = sp->qpair;
1848 struct scsi_qla_host *vha = fcport->vha;
1849 struct req_que *req = qpair->req;
1850
1851 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1852 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1853 if (req->outstanding_cmds[handle] == sp)
1854 break;
1855 }
1856 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1857
1858 if (handle == req->num_outstanding_cmds) {
1859
1860 return QLA_FUNCTION_FAILED;
1861 }
1862 if (sp->type == SRB_FXIOCB_DCMD)
1863 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1864 FXDISC_ABORT_IOCTL);
1865
1866 return qla24xx_async_abort_cmd(sp, true);
1867 }
1868
1869 static void
1870 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1871 {
1872 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1873 ea->data[0]);
1874
1875 switch (ea->data[0]) {
1876 case MBS_COMMAND_COMPLETE:
1877 ql_dbg(ql_dbg_disc, vha, 0x2118,
1878 "%s %d %8phC post gpdb\n",
1879 __func__, __LINE__, ea->fcport->port_name);
1880
1881 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1882 ea->fcport->logout_on_delete = 1;
1883 ea->fcport->nvme_prli_service_param = ea->iop[0];
1884 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
1885 ea->fcport->nvme_first_burst_size =
1886 (ea->iop[1] & 0xffff) * 512;
1887 else
1888 ea->fcport->nvme_first_burst_size = 0;
1889 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1890 break;
1891 default:
1892 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1893 (ea->iop[1] == 0x50000)) {
1894 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1895 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1896 break;
1897 }
1898
1899 if (ea->fcport->fc4f_nvme) {
1900 ql_dbg(ql_dbg_disc, vha, 0x2118,
1901 "%s %d %8phC post fc4 prli\n",
1902 __func__, __LINE__, ea->fcport->port_name);
1903 ea->fcport->fc4f_nvme = 0;
1904 qla24xx_post_prli_work(vha, ea->fcport);
1905 return;
1906 }
1907
1908
1909 if (N2N_TOPO(vha->hw)) {
1910 if (ea->fcport->n2n_link_reset_cnt < 3) {
1911 ea->fcport->n2n_link_reset_cnt++;
1912
1913
1914
1915
1916 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
1917 } else {
1918 ql_log(ql_log_warn, vha, 0x2119,
1919 "%s %d %8phC Unable to reconnect\n",
1920 __func__, __LINE__, ea->fcport->port_name);
1921 }
1922 } else {
1923
1924
1925
1926
1927 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1928 ea->fcport->keep_nport_handle = 0;
1929 qlt_schedule_sess_for_deletion(ea->fcport);
1930 }
1931 break;
1932 }
1933 }
1934
1935 void
1936 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1937 {
1938 port_id_t cid;
1939 u16 lid;
1940 struct fc_port *conflict_fcport;
1941 unsigned long flags;
1942 struct fc_port *fcport = ea->fcport;
1943
1944 ql_dbg(ql_dbg_disc, vha, 0xffff,
1945 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1946 __func__, fcport->port_name, fcport->disc_state,
1947 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1948 ea->sp->gen1, fcport->rscn_gen,
1949 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1950
1951 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1952 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1953 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1954 "%s %d %8phC Remote is trying to login\n",
1955 __func__, __LINE__, fcport->port_name);
1956 return;
1957 }
1958
1959 if ((fcport->disc_state == DSC_DELETE_PEND) ||
1960 (fcport->disc_state == DSC_DELETED)) {
1961 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1962 return;
1963 }
1964
1965 if (ea->sp->gen2 != fcport->login_gen) {
1966
1967 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1968 "%s %8phC generation changed\n",
1969 __func__, fcport->port_name);
1970 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1971 return;
1972 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1973 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1974 "%s %8phC RSCN generation changed\n",
1975 __func__, fcport->port_name);
1976 qla_rscn_replay(fcport);
1977 qlt_schedule_sess_for_deletion(fcport);
1978 return;
1979 }
1980
1981 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1982 ea->data[0]);
1983
1984 switch (ea->data[0]) {
1985 case MBS_COMMAND_COMPLETE:
1986
1987
1988
1989
1990
1991 if (ea->fcport->fc4f_nvme) {
1992 ql_dbg(ql_dbg_disc, vha, 0x2117,
1993 "%s %d %8phC post prli\n",
1994 __func__, __LINE__, ea->fcport->port_name);
1995 qla24xx_post_prli_work(vha, ea->fcport);
1996 } else {
1997 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1998 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1999 __func__, __LINE__, ea->fcport->port_name,
2000 ea->fcport->loop_id, ea->fcport->d_id.b24);
2001
2002 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2003 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2004 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2005 ea->fcport->logout_on_delete = 1;
2006 ea->fcport->send_els_logo = 0;
2007 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
2008 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2009
2010 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2011 }
2012 break;
2013 case MBS_COMMAND_ERROR:
2014 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2015 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2016
2017 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2018 ea->fcport->disc_state = DSC_LOGIN_FAILED;
2019 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
2020 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2021 else
2022 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
2023 break;
2024 case MBS_LOOP_ID_USED:
2025
2026 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2027 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2028 cid.b.al_pa = ea->iop[1] & 0xff;
2029 cid.b.rsvd_1 = 0;
2030
2031 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2032 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2033 __func__, __LINE__, ea->fcport->port_name,
2034 ea->fcport->loop_id, cid.b24);
2035
2036 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2037 ea->fcport->loop_id = FC_NO_LOOP_ID;
2038 qla24xx_post_gnl_work(vha, ea->fcport);
2039 break;
2040 case MBS_PORT_ID_USED:
2041 lid = ea->iop[1] & 0xffff;
2042 qlt_find_sess_invalidate_other(vha,
2043 wwn_to_u64(ea->fcport->port_name),
2044 ea->fcport->d_id, lid, &conflict_fcport);
2045
2046 if (conflict_fcport) {
2047
2048
2049
2050
2051
2052 conflict_fcport->conflict = ea->fcport;
2053 ea->fcport->login_pause = 1;
2054
2055 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2056 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2057 __func__, __LINE__, ea->fcport->port_name,
2058 ea->fcport->d_id.b24, lid);
2059 } else {
2060 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2061 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2062 __func__, __LINE__, ea->fcport->port_name,
2063 ea->fcport->d_id.b24, lid);
2064
2065 qla2x00_clear_loop_id(ea->fcport);
2066 set_bit(lid, vha->hw->loop_id_map);
2067 ea->fcport->loop_id = lid;
2068 ea->fcport->keep_nport_handle = 0;
2069 qlt_schedule_sess_for_deletion(ea->fcport);
2070 }
2071 break;
2072 }
2073 return;
2074 }
2075
2076 void
2077 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2078 uint16_t *data)
2079 {
2080 qlt_logo_completion_handler(fcport, data[0]);
2081 fcport->login_gen++;
2082 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2083 return;
2084 }
2085
2086
2087
2088
2089
2090 static int
2091 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2092 {
2093 int rval = QLA_SUCCESS;
2094 struct qla_hw_data *ha = vha->hw;
2095 uint32_t idc_major_ver, idc_minor_ver;
2096 uint16_t config[4];
2097
2098 qla83xx_idc_lock(vha, 0);
2099
2100
2101
2102
2103 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2104 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2105
2106
2107 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2108 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2109 "Error while setting DRV-Presence.\n");
2110 rval = QLA_FUNCTION_FAILED;
2111 goto exit;
2112 }
2113
2114
2115 qla83xx_reset_ownership(vha);
2116
2117
2118
2119
2120
2121
2122
2123 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2124 if (ha->flags.nic_core_reset_owner) {
2125
2126 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2127 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2128
2129
2130 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2131 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2132
2133
2134
2135
2136 ql_log(ql_log_warn, vha, 0xb07d,
2137 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2138 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2139 __qla83xx_clear_drv_presence(vha);
2140 rval = QLA_FUNCTION_FAILED;
2141 goto exit;
2142 }
2143
2144 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2145 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2146 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2147
2148 if (ha->flags.nic_core_reset_owner) {
2149 memset(config, 0, sizeof(config));
2150 if (!qla81xx_get_port_config(vha, config))
2151 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2152 QLA8XXX_DEV_READY);
2153 }
2154
2155 rval = qla83xx_idc_state_handler(vha);
2156
2157 exit:
2158 qla83xx_idc_unlock(vha, 0);
2159
2160 return rval;
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 int
2174 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2175 {
2176 int rval;
2177 struct qla_hw_data *ha = vha->hw;
2178 struct req_que *req = ha->req_q_map[0];
2179 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2180
2181 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2182 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2183
2184
2185 vha->flags.online = 0;
2186 ha->flags.chip_reset_done = 0;
2187 vha->flags.reset_active = 0;
2188 ha->flags.pci_channel_io_perm_failure = 0;
2189 ha->flags.eeh_busy = 0;
2190 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2191 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2192 atomic_set(&vha->loop_state, LOOP_DOWN);
2193 vha->device_flags = DFLG_NO_CABLE;
2194 vha->dpc_flags = 0;
2195 vha->flags.management_server_logged_in = 0;
2196 vha->marker_needed = 0;
2197 ha->isp_abort_cnt = 0;
2198 ha->beacon_blink_led = 0;
2199
2200 set_bit(0, ha->req_qid_map);
2201 set_bit(0, ha->rsp_qid_map);
2202
2203 ql_dbg(ql_dbg_init, vha, 0x0040,
2204 "Configuring PCI space...\n");
2205 rval = ha->isp_ops->pci_config(vha);
2206 if (rval) {
2207 ql_log(ql_log_warn, vha, 0x0044,
2208 "Unable to configure PCI space.\n");
2209 return (rval);
2210 }
2211
2212 ha->isp_ops->reset_chip(vha);
2213
2214
2215 if (IS_QLA28XX(ha)) {
2216 if (RD_REG_DWORD(®->mailbox12) & BIT_0) {
2217 ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
2218 ha->flags.secure_adapter = 1;
2219 }
2220 }
2221
2222
2223 rval = qla2xxx_get_flash_info(vha);
2224 if (rval) {
2225 ql_log(ql_log_fatal, vha, 0x004f,
2226 "Unable to validate FLASH data.\n");
2227 return rval;
2228 }
2229
2230 if (IS_QLA8044(ha)) {
2231 qla8044_read_reset_template(vha);
2232
2233
2234
2235
2236
2237 if (ql2xdontresethba == 1)
2238 qla8044_set_idc_dontreset(vha);
2239 }
2240
2241 ha->isp_ops->get_flash_version(vha, req->ring);
2242 ql_dbg(ql_dbg_init, vha, 0x0061,
2243 "Configure NVRAM parameters...\n");
2244
2245 ha->isp_ops->nvram_config(vha);
2246
2247 if (ha->flags.disable_serdes) {
2248
2249 ql_log(ql_log_info, vha, 0x0077,
2250 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2251 return QLA_FUNCTION_FAILED;
2252 }
2253
2254 ql_dbg(ql_dbg_init, vha, 0x0078,
2255 "Verifying loaded RISC code...\n");
2256
2257 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2258 rval = ha->isp_ops->chip_diag(vha);
2259 if (rval)
2260 return (rval);
2261 rval = qla2x00_setup_chip(vha);
2262 if (rval)
2263 return (rval);
2264 }
2265
2266 if (IS_QLA84XX(ha)) {
2267 ha->cs84xx = qla84xx_get_chip(vha);
2268 if (!ha->cs84xx) {
2269 ql_log(ql_log_warn, vha, 0x00d0,
2270 "Unable to configure ISP84XX.\n");
2271 return QLA_FUNCTION_FAILED;
2272 }
2273 }
2274
2275 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2276 rval = qla2x00_init_rings(vha);
2277
2278
2279 if (rval != QLA_SUCCESS)
2280 return rval;
2281
2282 ha->flags.chip_reset_done = 1;
2283
2284 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2285
2286 rval = qla84xx_init_chip(vha);
2287 if (rval != QLA_SUCCESS) {
2288 ql_log(ql_log_warn, vha, 0x00d4,
2289 "Unable to initialize ISP84XX.\n");
2290 qla84xx_put_chip(vha);
2291 }
2292 }
2293
2294
2295 if (IS_QLA8031(ha)) {
2296 rval = qla83xx_nic_core_fw_load(vha);
2297 if (rval)
2298 ql_log(ql_log_warn, vha, 0x0124,
2299 "Error in initializing NIC Core f/w.\n");
2300 }
2301
2302 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2303 qla24xx_read_fcp_prio_cfg(vha);
2304
2305 if (IS_P3P_TYPE(ha))
2306 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2307 else
2308 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2309
2310 return (rval);
2311 }
2312
2313
2314
2315
2316
2317
2318
2319 int
2320 qla2100_pci_config(scsi_qla_host_t *vha)
2321 {
2322 uint16_t w;
2323 unsigned long flags;
2324 struct qla_hw_data *ha = vha->hw;
2325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2326
2327 pci_set_master(ha->pdev);
2328 pci_try_set_mwi(ha->pdev);
2329
2330 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2331 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2332 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2333
2334 pci_disable_rom(ha->pdev);
2335
2336
2337 spin_lock_irqsave(&ha->hardware_lock, flags);
2338 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2339 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2340
2341 return QLA_SUCCESS;
2342 }
2343
2344
2345
2346
2347
2348
2349
2350 int
2351 qla2300_pci_config(scsi_qla_host_t *vha)
2352 {
2353 uint16_t w;
2354 unsigned long flags = 0;
2355 uint32_t cnt;
2356 struct qla_hw_data *ha = vha->hw;
2357 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2358
2359 pci_set_master(ha->pdev);
2360 pci_try_set_mwi(ha->pdev);
2361
2362 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2363 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2364
2365 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2366 w &= ~PCI_COMMAND_INTX_DISABLE;
2367 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2368
2369
2370
2371
2372
2373
2374
2375
2376 if (IS_QLA2300(ha)) {
2377 spin_lock_irqsave(&ha->hardware_lock, flags);
2378
2379
2380 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2381 for (cnt = 0; cnt < 30000; cnt++) {
2382 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
2383 break;
2384
2385 udelay(10);
2386 }
2387
2388
2389 WRT_REG_WORD(®->ctrl_status, 0x20);
2390 RD_REG_WORD(®->ctrl_status);
2391
2392
2393 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2394
2395 if (ha->fb_rev == FPM_2300)
2396 pci_clear_mwi(ha->pdev);
2397
2398
2399 WRT_REG_WORD(®->ctrl_status, 0x0);
2400 RD_REG_WORD(®->ctrl_status);
2401
2402
2403 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2404 for (cnt = 0; cnt < 30000; cnt++) {
2405 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
2406 break;
2407
2408 udelay(10);
2409 }
2410
2411 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2412 }
2413
2414 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2415
2416 pci_disable_rom(ha->pdev);
2417
2418
2419 spin_lock_irqsave(&ha->hardware_lock, flags);
2420 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2421 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2422
2423 return QLA_SUCCESS;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432 int
2433 qla24xx_pci_config(scsi_qla_host_t *vha)
2434 {
2435 uint16_t w;
2436 unsigned long flags = 0;
2437 struct qla_hw_data *ha = vha->hw;
2438 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2439
2440 pci_set_master(ha->pdev);
2441 pci_try_set_mwi(ha->pdev);
2442
2443 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2444 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2445 w &= ~PCI_COMMAND_INTX_DISABLE;
2446 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2447
2448 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2449
2450
2451 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2452 pcix_set_mmrbc(ha->pdev, 2048);
2453
2454
2455 if (pci_is_pcie(ha->pdev))
2456 pcie_set_readrq(ha->pdev, 4096);
2457
2458 pci_disable_rom(ha->pdev);
2459
2460 ha->chip_revision = ha->pdev->revision;
2461
2462
2463 spin_lock_irqsave(&ha->hardware_lock, flags);
2464 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
2465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2466
2467 return QLA_SUCCESS;
2468 }
2469
2470
2471
2472
2473
2474
2475
2476 int
2477 qla25xx_pci_config(scsi_qla_host_t *vha)
2478 {
2479 uint16_t w;
2480 struct qla_hw_data *ha = vha->hw;
2481
2482 pci_set_master(ha->pdev);
2483 pci_try_set_mwi(ha->pdev);
2484
2485 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2486 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2487 w &= ~PCI_COMMAND_INTX_DISABLE;
2488 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2489
2490
2491 if (pci_is_pcie(ha->pdev))
2492 pcie_set_readrq(ha->pdev, 4096);
2493
2494 pci_disable_rom(ha->pdev);
2495
2496 ha->chip_revision = ha->pdev->revision;
2497
2498 return QLA_SUCCESS;
2499 }
2500
2501
2502
2503
2504
2505
2506
2507 static int
2508 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2509 {
2510 int rval;
2511 uint16_t loop_id, topo, sw_cap;
2512 uint8_t domain, area, al_pa;
2513 struct qla_hw_data *ha = vha->hw;
2514
2515
2516 rval = QLA_FUNCTION_FAILED;
2517
2518 if (ha->flags.disable_risc_code_load) {
2519 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2520
2521
2522 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2523 if (rval == QLA_SUCCESS) {
2524
2525 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2526 &area, &domain, &topo, &sw_cap);
2527 }
2528 }
2529
2530 if (rval)
2531 ql_dbg(ql_dbg_init, vha, 0x007a,
2532 "**** Load RISC code ****.\n");
2533
2534 return (rval);
2535 }
2536
2537
2538
2539
2540
2541
2542
2543 int
2544 qla2x00_reset_chip(scsi_qla_host_t *vha)
2545 {
2546 unsigned long flags = 0;
2547 struct qla_hw_data *ha = vha->hw;
2548 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2549 uint32_t cnt;
2550 uint16_t cmd;
2551 int rval = QLA_FUNCTION_FAILED;
2552
2553 if (unlikely(pci_channel_offline(ha->pdev)))
2554 return rval;
2555
2556 ha->isp_ops->disable_intrs(ha);
2557
2558 spin_lock_irqsave(&ha->hardware_lock, flags);
2559
2560
2561 cmd = 0;
2562 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2563 cmd &= ~PCI_COMMAND_MASTER;
2564 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2565
2566 if (!IS_QLA2100(ha)) {
2567
2568 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2569 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2570 for (cnt = 0; cnt < 30000; cnt++) {
2571 if ((RD_REG_WORD(®->hccr) &
2572 HCCR_RISC_PAUSE) != 0)
2573 break;
2574 udelay(100);
2575 }
2576 } else {
2577 RD_REG_WORD(®->hccr);
2578 udelay(10);
2579 }
2580
2581
2582 WRT_REG_WORD(®->ctrl_status, 0x20);
2583 RD_REG_WORD(®->ctrl_status);
2584
2585
2586 WRT_REG_WORD(®->fpm_diag_config, 0x100);
2587 RD_REG_WORD(®->fpm_diag_config);
2588
2589
2590 if (!IS_QLA2200(ha)) {
2591 WRT_REG_WORD(®->fpm_diag_config, 0x0);
2592 RD_REG_WORD(®->fpm_diag_config);
2593 }
2594
2595
2596 WRT_REG_WORD(®->ctrl_status, 0x10);
2597 RD_REG_WORD(®->ctrl_status);
2598
2599
2600 if (IS_QLA2200(ha)) {
2601 WRT_FB_CMD_REG(ha, reg, 0xa000);
2602 RD_FB_CMD_REG(ha, reg);
2603 } else {
2604 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2605
2606
2607 for (cnt = 0; cnt < 3000; cnt++) {
2608 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2609 break;
2610 udelay(100);
2611 }
2612 }
2613
2614
2615 WRT_REG_WORD(®->ctrl_status, 0);
2616 RD_REG_WORD(®->ctrl_status);
2617
2618
2619 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2620 RD_REG_WORD(®->hccr);
2621
2622
2623 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2624 RD_REG_WORD(®->hccr);
2625 }
2626
2627 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
2628 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
2629
2630
2631 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2632
2633
2634 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2635
2636
2637
2638
2639
2640 udelay(20);
2641 for (cnt = 30000; cnt; cnt--) {
2642 if ((RD_REG_WORD(®->ctrl_status) &
2643 CSR_ISP_SOFT_RESET) == 0)
2644 break;
2645 udelay(100);
2646 }
2647 } else
2648 udelay(10);
2649
2650
2651 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2652
2653 WRT_REG_WORD(®->semaphore, 0);
2654
2655
2656 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2657 RD_REG_WORD(®->hccr);
2658
2659 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2660 for (cnt = 0; cnt < 30000; cnt++) {
2661 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2662 break;
2663
2664 udelay(100);
2665 }
2666 } else
2667 udelay(100);
2668
2669
2670 cmd |= PCI_COMMAND_MASTER;
2671 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2672
2673
2674 if (!IS_QLA2100(ha)) {
2675 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2676 RD_REG_WORD(®->hccr);
2677 }
2678
2679 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2680
2681 return QLA_SUCCESS;
2682 }
2683
2684
2685
2686
2687
2688
2689
2690 static int
2691 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2692 {
2693 uint16_t mb[4] = {0x1010, 0, 1, 0};
2694
2695 if (!IS_QLA81XX(vha->hw))
2696 return QLA_SUCCESS;
2697
2698 return qla81xx_write_mpi_register(vha, mb);
2699 }
2700
2701
2702
2703
2704
2705
2706
2707 static inline int
2708 qla24xx_reset_risc(scsi_qla_host_t *vha)
2709 {
2710 unsigned long flags = 0;
2711 struct qla_hw_data *ha = vha->hw;
2712 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2713 uint32_t cnt;
2714 uint16_t wd;
2715 static int abts_cnt;
2716 int rval = QLA_SUCCESS;
2717
2718 spin_lock_irqsave(&ha->hardware_lock, flags);
2719
2720
2721 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2722 for (cnt = 0; cnt < 30000; cnt++) {
2723 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2724 break;
2725
2726 udelay(10);
2727 }
2728
2729 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
2730 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2731
2732 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2733 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2734 RD_REG_DWORD(®->hccr),
2735 RD_REG_DWORD(®->ctrl_status),
2736 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE));
2737
2738 WRT_REG_DWORD(®->ctrl_status,
2739 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2740 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2741
2742 udelay(100);
2743
2744
2745 RD_REG_WORD(®->mailbox0);
2746 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
2747 rval == QLA_SUCCESS; cnt--) {
2748 barrier();
2749 if (cnt)
2750 udelay(5);
2751 else
2752 rval = QLA_FUNCTION_TIMEOUT;
2753 }
2754
2755 if (rval == QLA_SUCCESS)
2756 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2757
2758 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2759 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2760 RD_REG_DWORD(®->hccr),
2761 RD_REG_DWORD(®->mailbox0));
2762
2763
2764 RD_REG_DWORD(®->ctrl_status);
2765 for (cnt = 0; cnt < 60; cnt++) {
2766 barrier();
2767 if ((RD_REG_DWORD(®->ctrl_status) &
2768 CSRX_ISP_SOFT_RESET) == 0)
2769 break;
2770
2771 udelay(5);
2772 }
2773 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
2774 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2775
2776 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2777 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2778 RD_REG_DWORD(®->hccr),
2779 RD_REG_DWORD(®->ctrl_status));
2780
2781
2782 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2783 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2784 if (++abts_cnt < 5) {
2785 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2786 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2787 } else {
2788
2789
2790
2791
2792 abts_cnt = 0;
2793 vha->flags.online = 0;
2794 }
2795 }
2796 }
2797
2798 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
2799 RD_REG_DWORD(®->hccr);
2800
2801 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
2802 RD_REG_DWORD(®->hccr);
2803
2804 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
2805 RD_REG_DWORD(®->hccr);
2806
2807 RD_REG_WORD(®->mailbox0);
2808 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 &&
2809 rval == QLA_SUCCESS; cnt--) {
2810 barrier();
2811 if (cnt)
2812 udelay(5);
2813 else
2814 rval = QLA_FUNCTION_TIMEOUT;
2815 }
2816 if (rval == QLA_SUCCESS)
2817 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2818
2819 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2820 "Host Risc 0x%x, mailbox0 0x%x\n",
2821 RD_REG_DWORD(®->hccr),
2822 RD_REG_WORD(®->mailbox0));
2823
2824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2825
2826 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2827 "Driver in %s mode\n",
2828 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2829
2830 if (IS_NOPOLLING_TYPE(ha))
2831 ha->isp_ops->enable_intrs(ha);
2832
2833 return rval;
2834 }
2835
2836 static void
2837 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2838 {
2839 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2840
2841 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2842 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2843
2844 }
2845
2846 static void
2847 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2848 {
2849 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2850
2851 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2852 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2853 }
2854
2855 static void
2856 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2857 {
2858 uint32_t wd32 = 0;
2859 uint delta_msec = 100;
2860 uint elapsed_msec = 0;
2861 uint timeout_msec;
2862 ulong n;
2863
2864 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2865 vha->hw->pdev->subsystem_device != 0x0240)
2866 return;
2867
2868 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2869 udelay(100);
2870
2871 attempt:
2872 timeout_msec = TIMEOUT_SEMAPHORE;
2873 n = timeout_msec / delta_msec;
2874 while (n--) {
2875 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2876 qla25xx_read_risc_sema_reg(vha, &wd32);
2877 if (wd32 & RISC_SEMAPHORE)
2878 break;
2879 msleep(delta_msec);
2880 elapsed_msec += delta_msec;
2881 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2882 goto force;
2883 }
2884
2885 if (!(wd32 & RISC_SEMAPHORE))
2886 goto force;
2887
2888 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2889 goto acquired;
2890
2891 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2892 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2893 n = timeout_msec / delta_msec;
2894 while (n--) {
2895 qla25xx_read_risc_sema_reg(vha, &wd32);
2896 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2897 break;
2898 msleep(delta_msec);
2899 elapsed_msec += delta_msec;
2900 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2901 goto force;
2902 }
2903
2904 if (wd32 & RISC_SEMAPHORE_FORCE)
2905 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2906
2907 goto attempt;
2908
2909 force:
2910 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2911
2912 acquired:
2913 return;
2914 }
2915
2916
2917
2918
2919
2920
2921
2922 int
2923 qla24xx_reset_chip(scsi_qla_host_t *vha)
2924 {
2925 struct qla_hw_data *ha = vha->hw;
2926 int rval = QLA_FUNCTION_FAILED;
2927
2928 if (pci_channel_offline(ha->pdev) &&
2929 ha->flags.pci_channel_io_perm_failure) {
2930 return rval;
2931 }
2932
2933 ha->isp_ops->disable_intrs(ha);
2934
2935 qla25xx_manipulate_risc_semaphore(vha);
2936
2937
2938 rval = qla24xx_reset_risc(vha);
2939
2940 return rval;
2941 }
2942
2943
2944
2945
2946
2947
2948
2949 int
2950 qla2x00_chip_diag(scsi_qla_host_t *vha)
2951 {
2952 int rval;
2953 struct qla_hw_data *ha = vha->hw;
2954 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2955 unsigned long flags = 0;
2956 uint16_t data;
2957 uint32_t cnt;
2958 uint16_t mb[5];
2959 struct req_que *req = ha->req_q_map[0];
2960
2961
2962 rval = QLA_FUNCTION_FAILED;
2963
2964 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2965 ®->flash_address);
2966
2967 spin_lock_irqsave(&ha->hardware_lock, flags);
2968
2969
2970 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2971
2972
2973
2974
2975
2976 udelay(20);
2977 data = qla2x00_debounce_register(®->ctrl_status);
2978 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2979 udelay(5);
2980 data = RD_REG_WORD(®->ctrl_status);
2981 barrier();
2982 }
2983
2984 if (!cnt)
2985 goto chip_diag_failed;
2986
2987 ql_dbg(ql_dbg_init, vha, 0x007c,
2988 "Reset register cleared by chip reset.\n");
2989
2990
2991 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2992 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2993
2994
2995 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2996 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2997 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2998 udelay(5);
2999 data = RD_MAILBOX_REG(ha, reg, 0);
3000 barrier();
3001 }
3002 } else
3003 udelay(10);
3004
3005 if (!cnt)
3006 goto chip_diag_failed;
3007
3008
3009 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3010
3011 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3012 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3013 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3014 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3015 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3016 mb[3] != PROD_ID_3) {
3017 ql_log(ql_log_warn, vha, 0x0062,
3018 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3019 mb[1], mb[2], mb[3]);
3020
3021 goto chip_diag_failed;
3022 }
3023 ha->product_id[0] = mb[1];
3024 ha->product_id[1] = mb[2];
3025 ha->product_id[2] = mb[3];
3026 ha->product_id[3] = mb[4];
3027
3028
3029 if (req->length > 1024)
3030 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3031 else
3032 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3033 req->length;
3034
3035 if (IS_QLA2200(ha) &&
3036 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3037
3038 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3039
3040 ha->device_type |= DT_ISP2200A;
3041 ha->fw_transfer_size = 128;
3042 }
3043
3044
3045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3046
3047 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3048 rval = qla2x00_mbx_reg_test(vha);
3049 if (rval)
3050 ql_log(ql_log_warn, vha, 0x0080,
3051 "Failed mailbox send register test.\n");
3052 else
3053
3054 rval = QLA_SUCCESS;
3055 spin_lock_irqsave(&ha->hardware_lock, flags);
3056
3057 chip_diag_failed:
3058 if (rval)
3059 ql_log(ql_log_info, vha, 0x0081,
3060 "Chip diagnostics **** FAILED ****.\n");
3061
3062 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3063
3064 return (rval);
3065 }
3066
3067
3068
3069
3070
3071
3072
3073 int
3074 qla24xx_chip_diag(scsi_qla_host_t *vha)
3075 {
3076 int rval;
3077 struct qla_hw_data *ha = vha->hw;
3078 struct req_que *req = ha->req_q_map[0];
3079
3080 if (IS_P3P_TYPE(ha))
3081 return QLA_SUCCESS;
3082
3083 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3084
3085 rval = qla2x00_mbx_reg_test(vha);
3086 if (rval) {
3087 ql_log(ql_log_warn, vha, 0x0082,
3088 "Failed mailbox send register test.\n");
3089 } else {
3090
3091 rval = QLA_SUCCESS;
3092 }
3093
3094 return rval;
3095 }
3096
3097 static void
3098 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3099 {
3100 int rval;
3101 dma_addr_t tc_dma;
3102 void *tc;
3103 struct qla_hw_data *ha = vha->hw;
3104
3105 if (!IS_FWI2_CAPABLE(ha))
3106 return;
3107
3108 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3109 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3110 return;
3111
3112 if (ha->fce) {
3113 ql_dbg(ql_dbg_init, vha, 0x00bd,
3114 "%s: FCE Mem is already allocated.\n",
3115 __func__);
3116 return;
3117 }
3118
3119
3120 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3121 GFP_KERNEL);
3122 if (!tc) {
3123 ql_log(ql_log_warn, vha, 0x00be,
3124 "Unable to allocate (%d KB) for FCE.\n",
3125 FCE_SIZE / 1024);
3126 return;
3127 }
3128
3129 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3130 ha->fce_mb, &ha->fce_bufs);
3131 if (rval) {
3132 ql_log(ql_log_warn, vha, 0x00bf,
3133 "Unable to initialize FCE (%d).\n", rval);
3134 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3135 return;
3136 }
3137
3138 ql_dbg(ql_dbg_init, vha, 0x00c0,
3139 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3140
3141 ha->flags.fce_enabled = 1;
3142 ha->fce_dma = tc_dma;
3143 ha->fce = tc;
3144 }
3145
3146 static void
3147 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3148 {
3149 int rval;
3150 dma_addr_t tc_dma;
3151 void *tc;
3152 struct qla_hw_data *ha = vha->hw;
3153
3154 if (!IS_FWI2_CAPABLE(ha))
3155 return;
3156
3157 if (ha->eft) {
3158 ql_dbg(ql_dbg_init, vha, 0x00bd,
3159 "%s: EFT Mem is already allocated.\n",
3160 __func__);
3161 return;
3162 }
3163
3164
3165 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3166 GFP_KERNEL);
3167 if (!tc) {
3168 ql_log(ql_log_warn, vha, 0x00c1,
3169 "Unable to allocate (%d KB) for EFT.\n",
3170 EFT_SIZE / 1024);
3171 return;
3172 }
3173
3174 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3175 if (rval) {
3176 ql_log(ql_log_warn, vha, 0x00c2,
3177 "Unable to initialize EFT (%d).\n", rval);
3178 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3179 return;
3180 }
3181
3182 ql_dbg(ql_dbg_init, vha, 0x00c3,
3183 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3184
3185 ha->eft_dma = tc_dma;
3186 ha->eft = tc;
3187 }
3188
3189 static void
3190 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3191 {
3192 qla2x00_init_fce_trace(vha);
3193 qla2x00_init_eft_trace(vha);
3194 }
3195
3196 void
3197 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3198 {
3199 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3200 eft_size, fce_size, mq_size;
3201 struct qla_hw_data *ha = vha->hw;
3202 struct req_que *req = ha->req_q_map[0];
3203 struct rsp_que *rsp = ha->rsp_q_map[0];
3204 struct qla2xxx_fw_dump *fw_dump;
3205
3206 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3207 req_q_size = rsp_q_size = 0;
3208
3209 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3210 fixed_size = sizeof(struct qla2100_fw_dump);
3211 } else if (IS_QLA23XX(ha)) {
3212 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3213 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3214 sizeof(uint16_t);
3215 } else if (IS_FWI2_CAPABLE(ha)) {
3216 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
3217 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3218 else if (IS_QLA81XX(ha))
3219 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3220 else if (IS_QLA25XX(ha))
3221 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3222 else
3223 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3224
3225 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3226 sizeof(uint32_t);
3227 if (ha->mqenable) {
3228 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
3229 !IS_QLA28XX(ha))
3230 mq_size = sizeof(struct qla2xxx_mq_chain);
3231
3232
3233
3234
3235 mq_size += (ha->max_req_queues - 1) *
3236 (req->length * sizeof(request_t));
3237 mq_size += (ha->max_rsp_queues - 1) *
3238 (rsp->length * sizeof(response_t));
3239 }
3240 if (ha->tgt.atio_ring)
3241 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3242
3243 qla2x00_init_fce_trace(vha);
3244 if (ha->fce)
3245 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3246 qla2x00_init_eft_trace(vha);
3247 if (ha->eft)
3248 eft_size = EFT_SIZE;
3249 }
3250
3251 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3252 struct fwdt *fwdt = ha->fwdt;
3253 uint j;
3254
3255 for (j = 0; j < 2; j++, fwdt++) {
3256 if (!fwdt->template) {
3257 ql_dbg(ql_dbg_init, vha, 0x00ba,
3258 "-> fwdt%u no template\n", j);
3259 continue;
3260 }
3261 ql_dbg(ql_dbg_init, vha, 0x00fa,
3262 "-> fwdt%u calculating fwdump size...\n", j);
3263 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3264 vha, fwdt->template);
3265 ql_dbg(ql_dbg_init, vha, 0x00fa,
3266 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3267 j, fwdt->dump_size);
3268 dump_size += fwdt->dump_size;
3269 }
3270 } else {
3271 req_q_size = req->length * sizeof(request_t);
3272 rsp_q_size = rsp->length * sizeof(response_t);
3273 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3274 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3275 + eft_size;
3276 ha->chain_offset = dump_size;
3277 dump_size += mq_size + fce_size;
3278 if (ha->exchoffld_buf)
3279 dump_size += sizeof(struct qla2xxx_offld_chain) +
3280 ha->exchoffld_size;
3281 if (ha->exlogin_buf)
3282 dump_size += sizeof(struct qla2xxx_offld_chain) +
3283 ha->exlogin_size;
3284 }
3285
3286 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3287
3288 ql_dbg(ql_dbg_init, vha, 0x00c5,
3289 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3290 __func__, dump_size, ha->fw_dump_len,
3291 ha->fw_dump_alloc_len);
3292
3293 fw_dump = vmalloc(dump_size);
3294 if (!fw_dump) {
3295 ql_log(ql_log_warn, vha, 0x00c4,
3296 "Unable to allocate (%d KB) for firmware dump.\n",
3297 dump_size / 1024);
3298 } else {
3299 mutex_lock(&ha->optrom_mutex);
3300 if (ha->fw_dumped) {
3301 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3302 vfree(ha->fw_dump);
3303 ha->fw_dump = fw_dump;
3304 ha->fw_dump_alloc_len = dump_size;
3305 ql_dbg(ql_dbg_init, vha, 0x00c5,
3306 "Re-Allocated (%d KB) and save firmware dump.\n",
3307 dump_size / 1024);
3308 } else {
3309 if (ha->fw_dump)
3310 vfree(ha->fw_dump);
3311 ha->fw_dump = fw_dump;
3312
3313 ha->fw_dump_len = ha->fw_dump_alloc_len =
3314 dump_size;
3315 ql_dbg(ql_dbg_init, vha, 0x00c5,
3316 "Allocated (%d KB) for firmware dump.\n",
3317 dump_size / 1024);
3318
3319 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3320 mutex_unlock(&ha->optrom_mutex);
3321 return;
3322 }
3323
3324 ha->fw_dump->signature[0] = 'Q';
3325 ha->fw_dump->signature[1] = 'L';
3326 ha->fw_dump->signature[2] = 'G';
3327 ha->fw_dump->signature[3] = 'C';
3328 ha->fw_dump->version = htonl(1);
3329
3330 ha->fw_dump->fixed_size = htonl(fixed_size);
3331 ha->fw_dump->mem_size = htonl(mem_size);
3332 ha->fw_dump->req_q_size = htonl(req_q_size);
3333 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3334
3335 ha->fw_dump->eft_size = htonl(eft_size);
3336 ha->fw_dump->eft_addr_l =
3337 htonl(LSD(ha->eft_dma));
3338 ha->fw_dump->eft_addr_h =
3339 htonl(MSD(ha->eft_dma));
3340
3341 ha->fw_dump->header_size =
3342 htonl(offsetof
3343 (struct qla2xxx_fw_dump, isp));
3344 }
3345 mutex_unlock(&ha->optrom_mutex);
3346 }
3347 }
3348 }
3349
3350 static int
3351 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3352 {
3353 #define MPS_MASK 0xe0
3354 int rval;
3355 uint16_t dc;
3356 uint32_t dw;
3357
3358 if (!IS_QLA81XX(vha->hw))
3359 return QLA_SUCCESS;
3360
3361 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3362 if (rval != QLA_SUCCESS) {
3363 ql_log(ql_log_warn, vha, 0x0105,
3364 "Unable to acquire semaphore.\n");
3365 goto done;
3366 }
3367
3368 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3369 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3370 if (rval != QLA_SUCCESS) {
3371 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3372 goto done_release;
3373 }
3374
3375 dc &= MPS_MASK;
3376 if (dc == (dw & MPS_MASK))
3377 goto done_release;
3378
3379 dw &= ~MPS_MASK;
3380 dw |= dc;
3381 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3382 if (rval != QLA_SUCCESS) {
3383 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3384 }
3385
3386 done_release:
3387 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3388 if (rval != QLA_SUCCESS) {
3389 ql_log(ql_log_warn, vha, 0x006d,
3390 "Unable to release semaphore.\n");
3391 }
3392
3393 done:
3394 return rval;
3395 }
3396
3397 int
3398 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3399 {
3400
3401 if (req->outstanding_cmds)
3402 return QLA_SUCCESS;
3403
3404 if (!IS_FWI2_CAPABLE(ha))
3405 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3406 else {
3407 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3408 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3409 else
3410 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3411 }
3412
3413 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3414 sizeof(srb_t *),
3415 GFP_KERNEL);
3416
3417 if (!req->outstanding_cmds) {
3418
3419
3420
3421
3422 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3423 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3424 sizeof(srb_t *),
3425 GFP_KERNEL);
3426
3427 if (!req->outstanding_cmds) {
3428 ql_log(ql_log_fatal, NULL, 0x0126,
3429 "Failed to allocate memory for "
3430 "outstanding_cmds for req_que %p.\n", req);
3431 req->num_outstanding_cmds = 0;
3432 return QLA_FUNCTION_FAILED;
3433 }
3434 }
3435
3436 return QLA_SUCCESS;
3437 }
3438
3439 #define PRINT_FIELD(_field, _flag, _str) { \
3440 if (a0->_field & _flag) {\
3441 if (p) {\
3442 strcat(ptr, "|");\
3443 ptr++;\
3444 leftover--;\
3445 } \
3446 len = snprintf(ptr, leftover, "%s", _str); \
3447 p = 1;\
3448 leftover -= len;\
3449 ptr += len; \
3450 } \
3451 }
3452
3453 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3454 {
3455 #define STR_LEN 64
3456 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3457 u8 str[STR_LEN], *ptr, p;
3458 int leftover, len;
3459
3460 memset(str, 0, STR_LEN);
3461 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3462 ql_dbg(ql_dbg_init, vha, 0x015a,
3463 "SFP MFG Name: %s\n", str);
3464
3465 memset(str, 0, STR_LEN);
3466 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3467 ql_dbg(ql_dbg_init, vha, 0x015c,
3468 "SFP Part Name: %s\n", str);
3469
3470
3471 memset(str, 0, STR_LEN);
3472 ptr = str;
3473 leftover = STR_LEN;
3474 p = len = 0;
3475 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3476 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3477 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3478 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3479 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3480 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3481 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3482 ql_dbg(ql_dbg_init, vha, 0x0160,
3483 "SFP Media: %s\n", str);
3484
3485
3486 memset(str, 0, STR_LEN);
3487 ptr = str;
3488 leftover = STR_LEN;
3489 p = len = 0;
3490 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3491 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3492 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3493 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3494 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3495 ql_dbg(ql_dbg_init, vha, 0x0196,
3496 "SFP Link Length: %s\n", str);
3497
3498 memset(str, 0, STR_LEN);
3499 ptr = str;
3500 leftover = STR_LEN;
3501 p = len = 0;
3502 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3503 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3504 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3505 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3506 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3507 ql_dbg(ql_dbg_init, vha, 0x016e,
3508 "SFP FC Link Tech: %s\n", str);
3509
3510 if (a0->length_km)
3511 ql_dbg(ql_dbg_init, vha, 0x016f,
3512 "SFP Distant: %d km\n", a0->length_km);
3513 if (a0->length_100m)
3514 ql_dbg(ql_dbg_init, vha, 0x0170,
3515 "SFP Distant: %d m\n", a0->length_100m*100);
3516 if (a0->length_50um_10m)
3517 ql_dbg(ql_dbg_init, vha, 0x0189,
3518 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3519 if (a0->length_62um_10m)
3520 ql_dbg(ql_dbg_init, vha, 0x018a,
3521 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3522 if (a0->length_om4_10m)
3523 ql_dbg(ql_dbg_init, vha, 0x0194,
3524 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3525 if (a0->length_om3_10m)
3526 ql_dbg(ql_dbg_init, vha, 0x0195,
3527 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3528 }
3529
3530
3531
3532
3533
3534
3535
3536
3537 int
3538 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3539 {
3540 int rc = QLA_SUCCESS;
3541 struct sff_8247_a0 *a;
3542 struct qla_hw_data *ha = vha->hw;
3543
3544 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3545 goto out;
3546
3547 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3548 if (rc)
3549 goto out;
3550
3551 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3552 qla2xxx_print_sfp_info(vha);
3553
3554 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3555
3556 ha->flags.detected_lr_sfp = 1;
3557
3558 if (a->length_km > 5 || a->length_100m > 50)
3559 ha->long_range_distance = LR_DISTANCE_10K;
3560 else
3561 ha->long_range_distance = LR_DISTANCE_5K;
3562
3563 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3564 ql_dbg(ql_dbg_async, vha, 0x507b,
3565 "Detected Long Range SFP.\n");
3566 } else {
3567
3568 ha->flags.detected_lr_sfp = 0;
3569 if (ha->flags.using_lr_setting)
3570 ql_dbg(ql_dbg_async, vha, 0x5084,
3571 "Detected Short Range SFP.\n");
3572 }
3573
3574 if (!vha->flags.init_done)
3575 rc = QLA_SUCCESS;
3576 out:
3577 return rc;
3578 }
3579
3580
3581
3582
3583
3584
3585
3586 static int
3587 qla2x00_setup_chip(scsi_qla_host_t *vha)
3588 {
3589 int rval;
3590 uint32_t srisc_address = 0;
3591 struct qla_hw_data *ha = vha->hw;
3592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3593 unsigned long flags;
3594 uint16_t fw_major_version;
3595
3596 if (IS_P3P_TYPE(ha)) {
3597 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3598 if (rval == QLA_SUCCESS) {
3599 qla2x00_stop_firmware(vha);
3600 goto enable_82xx_npiv;
3601 } else
3602 goto failed;
3603 }
3604
3605 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3606
3607 spin_lock_irqsave(&ha->hardware_lock, flags);
3608 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3609 RD_REG_WORD(®->hccr);
3610 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3611 }
3612
3613 qla81xx_mpi_sync(vha);
3614
3615
3616 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3617 if (rval == QLA_SUCCESS) {
3618 ql_dbg(ql_dbg_init, vha, 0x00c9,
3619 "Verifying Checksum of loaded RISC code.\n");
3620
3621 rval = qla2x00_verify_checksum(vha, srisc_address);
3622 if (rval == QLA_SUCCESS) {
3623
3624 ql_dbg(ql_dbg_init, vha, 0x00ca,
3625 "Starting firmware.\n");
3626
3627 if (ql2xexlogins)
3628 ha->flags.exlogins_enabled = 1;
3629
3630 if (qla_is_exch_offld_enabled(vha))
3631 ha->flags.exchoffld_enabled = 1;
3632
3633 rval = qla2x00_execute_fw(vha, srisc_address);
3634
3635 if (rval == QLA_SUCCESS) {
3636 qla24xx_detect_sfp(vha);
3637
3638 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3639 IS_QLA28XX(ha)) &&
3640 (ha->zio_mode == QLA_ZIO_MODE_6))
3641 qla27xx_set_zio_threshold(vha,
3642 ha->last_zio_threshold);
3643
3644 rval = qla2x00_set_exlogins_buffer(vha);
3645 if (rval != QLA_SUCCESS)
3646 goto failed;
3647
3648 rval = qla2x00_set_exchoffld_buffer(vha);
3649 if (rval != QLA_SUCCESS)
3650 goto failed;
3651
3652 enable_82xx_npiv:
3653 fw_major_version = ha->fw_major_version;
3654 if (IS_P3P_TYPE(ha))
3655 qla82xx_check_md_needed(vha);
3656 else
3657 rval = qla2x00_get_fw_version(vha);
3658 if (rval != QLA_SUCCESS)
3659 goto failed;
3660 ha->flags.npiv_supported = 0;
3661 if (IS_QLA2XXX_MIDTYPE(ha) &&
3662 (ha->fw_attributes & BIT_2)) {
3663 ha->flags.npiv_supported = 1;
3664 if ((!ha->max_npiv_vports) ||
3665 ((ha->max_npiv_vports + 1) %
3666 MIN_MULTI_ID_FABRIC))
3667 ha->max_npiv_vports =
3668 MIN_MULTI_ID_FABRIC - 1;
3669 }
3670 qla2x00_get_resource_cnts(vha);
3671
3672
3673
3674
3675
3676 rval = qla2x00_alloc_outstanding_cmds(ha,
3677 vha->req);
3678 if (rval != QLA_SUCCESS)
3679 goto failed;
3680
3681 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3682 qla2x00_alloc_offload_mem(vha);
3683
3684 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
3685 qla2x00_alloc_fw_dump(vha);
3686
3687 } else {
3688 goto failed;
3689 }
3690 } else {
3691 ql_log(ql_log_fatal, vha, 0x00cd,
3692 "ISP Firmware failed checksum.\n");
3693 goto failed;
3694 }
3695 } else
3696 goto failed;
3697
3698 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3699
3700 spin_lock_irqsave(&ha->hardware_lock, flags);
3701 if (IS_QLA2300(ha))
3702
3703 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1);
3704 else
3705
3706 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7);
3707 RD_REG_WORD(®->hccr);
3708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3709 }
3710
3711 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
3712 ha->flags.fac_supported = 1;
3713 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3714 uint32_t size;
3715
3716 rval = qla81xx_fac_get_sector_size(vha, &size);
3717 if (rval == QLA_SUCCESS) {
3718 ha->flags.fac_supported = 1;
3719 ha->fdt_block_size = size << 2;
3720 } else {
3721 ql_log(ql_log_warn, vha, 0x00ce,
3722 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3723 ha->fw_major_version, ha->fw_minor_version,
3724 ha->fw_subminor_version);
3725
3726 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3727 IS_QLA28XX(ha)) {
3728 ha->flags.fac_supported = 0;
3729 rval = QLA_SUCCESS;
3730 }
3731 }
3732 }
3733 failed:
3734 if (rval) {
3735 ql_log(ql_log_fatal, vha, 0x00cf,
3736 "Setup chip ****FAILED****.\n");
3737 }
3738
3739 return (rval);
3740 }
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 void
3752 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3753 {
3754 uint16_t cnt;
3755 response_t *pkt;
3756
3757 rsp->ring_ptr = rsp->ring;
3758 rsp->ring_index = 0;
3759 rsp->status_srb = NULL;
3760 pkt = rsp->ring_ptr;
3761 for (cnt = 0; cnt < rsp->length; cnt++) {
3762 pkt->signature = RESPONSE_PROCESSED;
3763 pkt++;
3764 }
3765 }
3766
3767
3768
3769
3770
3771
3772
3773 void
3774 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3775 {
3776 uint16_t swing, emphasis, tx_sens, rx_sens;
3777 struct qla_hw_data *ha = vha->hw;
3778
3779 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3780 qla2x00_get_fw_options(vha, ha->fw_options);
3781
3782 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3783 return;
3784
3785
3786 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3787 "Serial link options.\n");
3788 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3789 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
3790
3791 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3792 if (ha->fw_seriallink_options[3] & BIT_2) {
3793 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3794
3795
3796 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3797 emphasis = (ha->fw_seriallink_options[2] &
3798 (BIT_4 | BIT_3)) >> 3;
3799 tx_sens = ha->fw_seriallink_options[0] &
3800 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3801 rx_sens = (ha->fw_seriallink_options[0] &
3802 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3803 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3804 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3805 if (rx_sens == 0x0)
3806 rx_sens = 0x3;
3807 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3808 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3809 ha->fw_options[10] |= BIT_5 |
3810 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3811 (tx_sens & (BIT_1 | BIT_0));
3812
3813
3814 swing = (ha->fw_seriallink_options[2] &
3815 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3816 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3817 tx_sens = ha->fw_seriallink_options[1] &
3818 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3819 rx_sens = (ha->fw_seriallink_options[1] &
3820 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3821 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3822 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3823 if (rx_sens == 0x0)
3824 rx_sens = 0x3;
3825 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3826 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3827 ha->fw_options[11] |= BIT_5 |
3828 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3829 (tx_sens & (BIT_1 | BIT_0));
3830 }
3831
3832
3833
3834 ha->fw_options[3] |= BIT_13;
3835
3836
3837 if (ha->flags.enable_led_scheme)
3838 ha->fw_options[2] |= BIT_12;
3839
3840
3841 if (IS_QLA6312(ha))
3842 ha->fw_options[2] |= BIT_13;
3843
3844
3845 if (ha->operating_mode == P2P) {
3846 ha->fw_options[2] |= BIT_3;
3847 ql_dbg(ql_dbg_disc, vha, 0x2100,
3848 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3849 __func__, ha->fw_options[2]);
3850 }
3851
3852
3853 qla2x00_set_fw_options(vha, ha->fw_options);
3854 }
3855
3856 void
3857 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3858 {
3859 int rval;
3860 struct qla_hw_data *ha = vha->hw;
3861
3862 if (IS_P3P_TYPE(ha))
3863 return;
3864
3865
3866 if (ql2xfwholdabts)
3867 ha->fw_options[3] |= BIT_12;
3868
3869
3870 if (ha->operating_mode == P2P) {
3871 ha->fw_options[2] |= BIT_3;
3872 ql_dbg(ql_dbg_disc, vha, 0x2101,
3873 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3874 __func__, ha->fw_options[2]);
3875 }
3876
3877
3878 if (ql2xmvasynctoatio &&
3879 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
3880 if (qla_tgt_mode_enabled(vha) ||
3881 qla_dual_mode_enabled(vha))
3882 ha->fw_options[2] |= BIT_11;
3883 else
3884 ha->fw_options[2] &= ~BIT_11;
3885 }
3886
3887 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3888 IS_QLA28XX(ha)) {
3889
3890
3891
3892
3893 if (qla_tgt_mode_enabled(vha) ||
3894 qla_dual_mode_enabled(vha))
3895 ha->fw_options[2] |= BIT_4;
3896 else
3897 ha->fw_options[2] &= ~BIT_4;
3898
3899
3900 if (qla2xuseresexchforels)
3901 ha->fw_options[2] |= BIT_8;
3902 else
3903 ha->fw_options[2] &= ~BIT_8;
3904 }
3905
3906 ql_dbg(ql_dbg_init, vha, 0x00e8,
3907 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3908 __func__, ha->fw_options[1], ha->fw_options[2],
3909 ha->fw_options[3], vha->host->active_mode);
3910
3911 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3912 qla2x00_set_fw_options(vha, ha->fw_options);
3913
3914
3915 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3916 return;
3917
3918 rval = qla2x00_set_serdes_params(vha,
3919 le16_to_cpu(ha->fw_seriallink_options24[1]),
3920 le16_to_cpu(ha->fw_seriallink_options24[2]),
3921 le16_to_cpu(ha->fw_seriallink_options24[3]));
3922 if (rval != QLA_SUCCESS) {
3923 ql_log(ql_log_warn, vha, 0x0104,
3924 "Unable to update Serial Link options (%x).\n", rval);
3925 }
3926 }
3927
3928 void
3929 qla2x00_config_rings(struct scsi_qla_host *vha)
3930 {
3931 struct qla_hw_data *ha = vha->hw;
3932 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3933 struct req_que *req = ha->req_q_map[0];
3934 struct rsp_que *rsp = ha->rsp_q_map[0];
3935
3936
3937 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3938 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3939 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3940 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3941 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
3942 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
3943
3944 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3945 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3946 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3947 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3948 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));
3949 }
3950
3951 void
3952 qla24xx_config_rings(struct scsi_qla_host *vha)
3953 {
3954 struct qla_hw_data *ha = vha->hw;
3955 device_reg_t *reg = ISP_QUE_REG(ha, 0);
3956 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3957 struct qla_msix_entry *msix;
3958 struct init_cb_24xx *icb;
3959 uint16_t rid = 0;
3960 struct req_que *req = ha->req_q_map[0];
3961 struct rsp_que *rsp = ha->rsp_q_map[0];
3962
3963
3964 icb = (struct init_cb_24xx *)ha->init_cb;
3965 icb->request_q_outpointer = cpu_to_le16(0);
3966 icb->response_q_inpointer = cpu_to_le16(0);
3967 icb->request_q_length = cpu_to_le16(req->length);
3968 icb->response_q_length = cpu_to_le16(rsp->length);
3969 put_unaligned_le64(req->dma, &icb->request_q_address);
3970 put_unaligned_le64(rsp->dma, &icb->response_q_address);
3971
3972
3973 icb->atio_q_inpointer = cpu_to_le16(0);
3974 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3975 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
3976
3977 if (IS_SHADOW_REG_CAPABLE(ha))
3978 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3979
3980 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3981 IS_QLA28XX(ha)) {
3982 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3983 icb->rid = cpu_to_le16(rid);
3984 if (ha->flags.msix_enabled) {
3985 msix = &ha->msix_entries[1];
3986 ql_dbg(ql_dbg_init, vha, 0x0019,
3987 "Registering vector 0x%x for base que.\n",
3988 msix->entry);
3989 icb->msix = cpu_to_le16(msix->entry);
3990 }
3991
3992 if (MSB(rid))
3993 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3994
3995 if (LSB(rid))
3996 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3997
3998
3999 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4000 (ha->flags.msix_enabled)) {
4001 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4002 ha->flags.disable_msix_handshake = 1;
4003 ql_dbg(ql_dbg_init, vha, 0x00fe,
4004 "MSIX Handshake Disable Mode turned on.\n");
4005 } else {
4006 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4007 }
4008 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4009
4010 WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
4011 WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
4012 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0);
4013 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0);
4014 } else {
4015 WRT_REG_DWORD(®->isp24.req_q_in, 0);
4016 WRT_REG_DWORD(®->isp24.req_q_out, 0);
4017 WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
4018 WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
4019 }
4020
4021 qlt_24xx_config_rings(vha);
4022
4023
4024 if (ha->set_data_rate) {
4025 ql_dbg(ql_dbg_init, vha, 0x00fd,
4026 "Speed set by user : %s Gbps \n",
4027 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4028 icb->firmware_options_3 = (ha->set_data_rate << 13);
4029 }
4030
4031
4032 RD_REG_DWORD(&ioreg->hccr);
4033 }
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044 int
4045 qla2x00_init_rings(scsi_qla_host_t *vha)
4046 {
4047 int rval;
4048 unsigned long flags = 0;
4049 int cnt, que;
4050 struct qla_hw_data *ha = vha->hw;
4051 struct req_que *req;
4052 struct rsp_que *rsp;
4053 struct mid_init_cb_24xx *mid_init_cb =
4054 (struct mid_init_cb_24xx *) ha->init_cb;
4055
4056 spin_lock_irqsave(&ha->hardware_lock, flags);
4057
4058
4059 for (que = 0; que < ha->max_req_queues; que++) {
4060 req = ha->req_q_map[que];
4061 if (!req || !test_bit(que, ha->req_qid_map))
4062 continue;
4063 req->out_ptr = (void *)(req->ring + req->length);
4064 *req->out_ptr = 0;
4065 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4066 req->outstanding_cmds[cnt] = NULL;
4067
4068 req->current_outstanding_cmd = 1;
4069
4070
4071 req->ring_ptr = req->ring;
4072 req->ring_index = 0;
4073 req->cnt = req->length;
4074 }
4075
4076 for (que = 0; que < ha->max_rsp_queues; que++) {
4077 rsp = ha->rsp_q_map[que];
4078 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4079 continue;
4080 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
4081 *rsp->in_ptr = 0;
4082
4083 if (IS_QLAFX00(ha))
4084 qlafx00_init_response_q_entries(rsp);
4085 else
4086 qla2x00_init_response_q_entries(rsp);
4087 }
4088
4089 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4090 ha->tgt.atio_ring_index = 0;
4091
4092 qlt_init_atio_q_entries(vha);
4093
4094 ha->isp_ops->config_rings(vha);
4095
4096 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4097
4098 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
4099
4100 if (IS_QLAFX00(ha)) {
4101 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4102 goto next_check;
4103 }
4104
4105
4106 ha->isp_ops->update_fw_options(vha);
4107
4108 if (ha->flags.npiv_supported) {
4109 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4110 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4111 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4112 }
4113
4114 if (IS_FWI2_CAPABLE(ha)) {
4115 mid_init_cb->options = cpu_to_le16(BIT_1);
4116 mid_init_cb->init_cb.execution_throttle =
4117 cpu_to_le16(ha->cur_fw_xcb_count);
4118 ha->flags.dport_enabled =
4119 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
4120 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4121 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4122
4123 ha->flags.fawwpn_enabled =
4124 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
4125 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4126 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4127 }
4128
4129 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4130 next_check:
4131 if (rval) {
4132 ql_log(ql_log_fatal, vha, 0x00d2,
4133 "Init Firmware **** FAILED ****.\n");
4134 } else {
4135 ql_dbg(ql_dbg_init, vha, 0x00d3,
4136 "Init Firmware -- success.\n");
4137 QLA_FW_STARTED(ha);
4138 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4139 }
4140
4141 return (rval);
4142 }
4143
4144
4145
4146
4147
4148
4149
4150 static int
4151 qla2x00_fw_ready(scsi_qla_host_t *vha)
4152 {
4153 int rval;
4154 unsigned long wtime, mtime, cs84xx_time;
4155 uint16_t min_wait;
4156 uint16_t wait_time;
4157 uint16_t state[6];
4158 struct qla_hw_data *ha = vha->hw;
4159
4160 if (IS_QLAFX00(vha->hw))
4161 return qlafx00_fw_ready(vha);
4162
4163 rval = QLA_SUCCESS;
4164
4165
4166 if (IS_P3P_TYPE(ha))
4167 min_wait = 30;
4168 else
4169 min_wait = 20;
4170
4171
4172
4173
4174
4175 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4176 wait_time = min_wait;
4177 }
4178
4179
4180 mtime = jiffies + (min_wait * HZ);
4181
4182
4183 wtime = jiffies + (wait_time * HZ);
4184
4185
4186 if (!vha->flags.init_done)
4187 ql_log(ql_log_info, vha, 0x801e,
4188 "Waiting for LIP to complete.\n");
4189
4190 do {
4191 memset(state, -1, sizeof(state));
4192 rval = qla2x00_get_firmware_state(vha, state);
4193 if (rval == QLA_SUCCESS) {
4194 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4195 vha->device_flags &= ~DFLG_NO_CABLE;
4196 }
4197 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4198 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4199 "fw_state=%x 84xx=%x.\n", state[0],
4200 state[2]);
4201 if ((state[2] & FSTATE_LOGGED_IN) &&
4202 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4203 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4204 "Sending verify iocb.\n");
4205
4206 cs84xx_time = jiffies;
4207 rval = qla84xx_init_chip(vha);
4208 if (rval != QLA_SUCCESS) {
4209 ql_log(ql_log_warn,
4210 vha, 0x8007,
4211 "Init chip failed.\n");
4212 break;
4213 }
4214
4215
4216 cs84xx_time = jiffies - cs84xx_time;
4217 wtime += cs84xx_time;
4218 mtime += cs84xx_time;
4219 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4220 "Increasing wait time by %ld. "
4221 "New time %ld.\n", cs84xx_time,
4222 wtime);
4223 }
4224 } else if (state[0] == FSTATE_READY) {
4225 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4226 "F/W Ready - OK.\n");
4227
4228 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4229 &ha->login_timeout, &ha->r_a_tov);
4230
4231 rval = QLA_SUCCESS;
4232 break;
4233 }
4234
4235 rval = QLA_FUNCTION_FAILED;
4236
4237 if (atomic_read(&vha->loop_down_timer) &&
4238 state[0] != FSTATE_READY) {
4239
4240
4241
4242 if (time_after_eq(jiffies, mtime)) {
4243 ql_log(ql_log_info, vha, 0x8038,
4244 "Cable is unplugged...\n");
4245
4246 vha->device_flags |= DFLG_NO_CABLE;
4247 break;
4248 }
4249 }
4250 } else {
4251
4252 if (time_after_eq(jiffies, mtime) ||
4253 ha->flags.isp82xx_fw_hung)
4254 break;
4255 }
4256
4257 if (time_after_eq(jiffies, wtime))
4258 break;
4259
4260
4261 msleep(500);
4262 } while (1);
4263
4264 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4265 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4266 state[1], state[2], state[3], state[4], state[5], jiffies);
4267
4268 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4269 ql_log(ql_log_warn, vha, 0x803b,
4270 "Firmware ready **** FAILED ****.\n");
4271 }
4272
4273 return (rval);
4274 }
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289 static int
4290 qla2x00_configure_hba(scsi_qla_host_t *vha)
4291 {
4292 int rval;
4293 uint16_t loop_id;
4294 uint16_t topo;
4295 uint16_t sw_cap;
4296 uint8_t al_pa;
4297 uint8_t area;
4298 uint8_t domain;
4299 char connect_type[22];
4300 struct qla_hw_data *ha = vha->hw;
4301 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4302 port_id_t id;
4303 unsigned long flags;
4304
4305
4306 rval = qla2x00_get_adapter_id(vha,
4307 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4308 if (rval != QLA_SUCCESS) {
4309 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4310 IS_CNA_CAPABLE(ha) ||
4311 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4312 ql_dbg(ql_dbg_disc, vha, 0x2008,
4313 "Loop is in a transition state.\n");
4314 } else {
4315 ql_log(ql_log_warn, vha, 0x2009,
4316 "Unable to get host loop ID.\n");
4317 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4318 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4319 ql_log(ql_log_warn, vha, 0x1151,
4320 "Doing link init.\n");
4321 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4322 return rval;
4323 }
4324 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4325 }
4326 return (rval);
4327 }
4328
4329 if (topo == 4) {
4330 ql_log(ql_log_info, vha, 0x200a,
4331 "Cannot get topology - retrying.\n");
4332 return (QLA_FUNCTION_FAILED);
4333 }
4334
4335 vha->loop_id = loop_id;
4336
4337
4338 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4339 ha->operating_mode = LOOP;
4340 ha->switch_cap = 0;
4341
4342 switch (topo) {
4343 case 0:
4344 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4345 ha->current_topology = ISP_CFG_NL;
4346 strcpy(connect_type, "(Loop)");
4347 break;
4348
4349 case 1:
4350 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4351 ha->switch_cap = sw_cap;
4352 ha->current_topology = ISP_CFG_FL;
4353 strcpy(connect_type, "(FL_Port)");
4354 break;
4355
4356 case 2:
4357 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4358 ha->operating_mode = P2P;
4359 ha->current_topology = ISP_CFG_N;
4360 strcpy(connect_type, "(N_Port-to-N_Port)");
4361 break;
4362
4363 case 3:
4364 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4365 ha->switch_cap = sw_cap;
4366 ha->operating_mode = P2P;
4367 ha->current_topology = ISP_CFG_F;
4368 strcpy(connect_type, "(F_Port)");
4369 break;
4370
4371 default:
4372 ql_dbg(ql_dbg_disc, vha, 0x200f,
4373 "HBA in unknown topology %x, using NL.\n", topo);
4374 ha->current_topology = ISP_CFG_NL;
4375 strcpy(connect_type, "(Loop)");
4376 break;
4377 }
4378
4379
4380
4381 id.b.domain = domain;
4382 id.b.area = area;
4383 id.b.al_pa = al_pa;
4384 id.b.rsvd_1 = 0;
4385 spin_lock_irqsave(&ha->hardware_lock, flags);
4386 if (!(topo == 2 && ha->flags.n2n_bigger))
4387 qlt_update_host_map(vha, id);
4388 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4389
4390 if (!vha->flags.init_done)
4391 ql_log(ql_log_info, vha, 0x2010,
4392 "Topology - %s, Host Loop address 0x%x.\n",
4393 connect_type, vha->loop_id);
4394
4395 return(rval);
4396 }
4397
4398 inline void
4399 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4400 const char *def)
4401 {
4402 char *st, *en;
4403 uint16_t index;
4404 uint64_t zero[2] = { 0 };
4405 struct qla_hw_data *ha = vha->hw;
4406 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4407 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4408
4409 if (len > sizeof(zero))
4410 len = sizeof(zero);
4411 if (memcmp(model, &zero, len) != 0) {
4412 memcpy(ha->model_number, model, len);
4413 st = en = ha->model_number;
4414 en += len - 1;
4415 while (en > st) {
4416 if (*en != 0x20 && *en != 0x00)
4417 break;
4418 *en-- = '\0';
4419 }
4420
4421 index = (ha->pdev->subsystem_device & 0xff);
4422 if (use_tbl &&
4423 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4424 index < QLA_MODEL_NAMES)
4425 strlcpy(ha->model_desc,
4426 qla2x00_model_name[index * 2 + 1],
4427 sizeof(ha->model_desc));
4428 } else {
4429 index = (ha->pdev->subsystem_device & 0xff);
4430 if (use_tbl &&
4431 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4432 index < QLA_MODEL_NAMES) {
4433 strlcpy(ha->model_number,
4434 qla2x00_model_name[index * 2],
4435 sizeof(ha->model_number));
4436 strlcpy(ha->model_desc,
4437 qla2x00_model_name[index * 2 + 1],
4438 sizeof(ha->model_desc));
4439 } else {
4440 strlcpy(ha->model_number, def,
4441 sizeof(ha->model_number));
4442 }
4443 }
4444 if (IS_FWI2_CAPABLE(ha))
4445 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4446 sizeof(ha->model_desc));
4447 }
4448
4449
4450
4451
4452 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4453 {
4454 #ifdef CONFIG_SPARC
4455 struct qla_hw_data *ha = vha->hw;
4456 struct pci_dev *pdev = ha->pdev;
4457 struct device_node *dp = pci_device_to_OF_node(pdev);
4458 const u8 *val;
4459 int len;
4460
4461 val = of_get_property(dp, "port-wwn", &len);
4462 if (val && len >= WWN_SIZE)
4463 memcpy(nv->port_name, val, WWN_SIZE);
4464
4465 val = of_get_property(dp, "node-wwn", &len);
4466 if (val && len >= WWN_SIZE)
4467 memcpy(nv->node_name, val, WWN_SIZE);
4468 #endif
4469 }
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484 int
4485 qla2x00_nvram_config(scsi_qla_host_t *vha)
4486 {
4487 int rval;
4488 uint8_t chksum = 0;
4489 uint16_t cnt;
4490 uint8_t *dptr1, *dptr2;
4491 struct qla_hw_data *ha = vha->hw;
4492 init_cb_t *icb = ha->init_cb;
4493 nvram_t *nv = ha->nvram;
4494 uint8_t *ptr = ha->nvram;
4495 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4496
4497 rval = QLA_SUCCESS;
4498
4499
4500 ha->nvram_size = sizeof(*nv);
4501 ha->nvram_base = 0;
4502 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4503 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1)
4504 ha->nvram_base = 0x80;
4505
4506
4507 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4508 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4509 chksum += *ptr++;
4510
4511 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4512 "Contents of NVRAM.\n");
4513 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4514 nv, ha->nvram_size);
4515
4516
4517 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4518 nv->nvram_version < 1) {
4519
4520 ql_log(ql_log_warn, vha, 0x0064,
4521 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4522 chksum, nv->id, nv->nvram_version);
4523 ql_log(ql_log_warn, vha, 0x0065,
4524 "Falling back to "
4525 "functioning (yet invalid -- WWPN) defaults.\n");
4526
4527
4528
4529
4530 memset(nv, 0, ha->nvram_size);
4531 nv->parameter_block_version = ICB_VERSION;
4532
4533 if (IS_QLA23XX(ha)) {
4534 nv->firmware_options[0] = BIT_2 | BIT_1;
4535 nv->firmware_options[1] = BIT_7 | BIT_5;
4536 nv->add_firmware_options[0] = BIT_5;
4537 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4538 nv->frame_payload_size = 2048;
4539 nv->special_options[1] = BIT_7;
4540 } else if (IS_QLA2200(ha)) {
4541 nv->firmware_options[0] = BIT_2 | BIT_1;
4542 nv->firmware_options[1] = BIT_7 | BIT_5;
4543 nv->add_firmware_options[0] = BIT_5;
4544 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4545 nv->frame_payload_size = 1024;
4546 } else if (IS_QLA2100(ha)) {
4547 nv->firmware_options[0] = BIT_3 | BIT_1;
4548 nv->firmware_options[1] = BIT_5;
4549 nv->frame_payload_size = 1024;
4550 }
4551
4552 nv->max_iocb_allocation = cpu_to_le16(256);
4553 nv->execution_throttle = cpu_to_le16(16);
4554 nv->retry_count = 8;
4555 nv->retry_delay = 1;
4556
4557 nv->port_name[0] = 33;
4558 nv->port_name[3] = 224;
4559 nv->port_name[4] = 139;
4560
4561 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4562
4563 nv->login_timeout = 4;
4564
4565
4566
4567
4568 nv->host_p[1] = BIT_2;
4569 nv->reset_delay = 5;
4570 nv->port_down_retry_count = 8;
4571 nv->max_luns_per_target = cpu_to_le16(8);
4572 nv->link_down_timeout = 60;
4573
4574 rval = 1;
4575 }
4576
4577
4578 memset(icb, 0, ha->init_cb_size);
4579
4580
4581
4582
4583 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4584 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4585 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4586 nv->firmware_options[1] &= ~BIT_4;
4587
4588 if (IS_QLA23XX(ha)) {
4589 nv->firmware_options[0] |= BIT_2;
4590 nv->firmware_options[0] &= ~BIT_3;
4591 nv->special_options[0] &= ~BIT_6;
4592 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4593
4594 if (IS_QLA2300(ha)) {
4595 if (ha->fb_rev == FPM_2310) {
4596 strcpy(ha->model_number, "QLA2310");
4597 } else {
4598 strcpy(ha->model_number, "QLA2300");
4599 }
4600 } else {
4601 qla2x00_set_model_info(vha, nv->model_number,
4602 sizeof(nv->model_number), "QLA23xx");
4603 }
4604 } else if (IS_QLA2200(ha)) {
4605 nv->firmware_options[0] |= BIT_2;
4606
4607
4608
4609
4610 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4611 (BIT_5 | BIT_4)) {
4612
4613 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4614 nv->add_firmware_options[0] |= BIT_5;
4615 }
4616 strcpy(ha->model_number, "QLA22xx");
4617 } else {
4618 strcpy(ha->model_number, "QLA2100");
4619 }
4620
4621
4622
4623
4624 dptr1 = (uint8_t *)icb;
4625 dptr2 = (uint8_t *)&nv->parameter_block_version;
4626 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4627 while (cnt--)
4628 *dptr1++ = *dptr2++;
4629
4630
4631 dptr1 = (uint8_t *)icb->add_firmware_options;
4632 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4633 while (cnt--)
4634 *dptr1++ = *dptr2++;
4635 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
4636
4637 if (nv->host_p[1] & BIT_7) {
4638 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4639 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4640 }
4641
4642
4643 if ((icb->firmware_options[1] & BIT_6) == 0) {
4644
4645
4646
4647
4648 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4649 icb->node_name[0] &= 0xF0;
4650 }
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660 if (nv->host_p[0] & BIT_7)
4661 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4662 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4663
4664 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4665 ha->flags.disable_risc_code_load = 0;
4666 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4667 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4668 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4669 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4670 ha->flags.disable_serdes = 0;
4671
4672 ha->operating_mode =
4673 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4674
4675 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4676 sizeof(ha->fw_seriallink_options));
4677
4678
4679 ha->serial0 = icb->port_name[5];
4680 ha->serial1 = icb->port_name[6];
4681 ha->serial2 = icb->port_name[7];
4682 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4683 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4684
4685 icb->execution_throttle = cpu_to_le16(0xFFFF);
4686
4687 ha->retry_count = nv->retry_count;
4688
4689
4690 if (nv->login_timeout != ql2xlogintimeout)
4691 nv->login_timeout = ql2xlogintimeout;
4692 if (nv->login_timeout < 4)
4693 nv->login_timeout = 4;
4694 ha->login_timeout = nv->login_timeout;
4695
4696
4697 ha->r_a_tov = 100;
4698
4699 ha->loop_reset_delay = nv->reset_delay;
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711 if (nv->link_down_timeout == 0) {
4712 ha->loop_down_abort_time =
4713 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4714 } else {
4715 ha->link_down_timeout = nv->link_down_timeout;
4716 ha->loop_down_abort_time =
4717 (LOOP_DOWN_TIME - ha->link_down_timeout);
4718 }
4719
4720
4721
4722
4723 ha->port_down_retry_count = nv->port_down_retry_count;
4724 if (qlport_down_retry)
4725 ha->port_down_retry_count = qlport_down_retry;
4726
4727 ha->login_retry_count = nv->retry_count;
4728 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4729 ha->port_down_retry_count > 3)
4730 ha->login_retry_count = ha->port_down_retry_count;
4731 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4732 ha->login_retry_count = ha->port_down_retry_count;
4733 if (ql2xloginretrycount)
4734 ha->login_retry_count = ql2xloginretrycount;
4735
4736 icb->lun_enables = cpu_to_le16(0);
4737 icb->command_resource_count = 0;
4738 icb->immediate_notify_resource_count = 0;
4739 icb->timeout = cpu_to_le16(0);
4740
4741 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4742
4743 icb->firmware_options[0] &= ~BIT_3;
4744 icb->add_firmware_options[0] &=
4745 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4746 icb->add_firmware_options[0] |= BIT_2;
4747 icb->response_accumulation_timer = 3;
4748 icb->interrupt_delay_timer = 5;
4749
4750 vha->flags.process_response_queue = 1;
4751 } else {
4752
4753 if (!vha->flags.init_done) {
4754 ha->zio_mode = icb->add_firmware_options[0] &
4755 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4756 ha->zio_timer = icb->interrupt_delay_timer ?
4757 icb->interrupt_delay_timer : 2;
4758 }
4759 icb->add_firmware_options[0] &=
4760 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4761 vha->flags.process_response_queue = 0;
4762 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4763 ha->zio_mode = QLA_ZIO_MODE_6;
4764
4765 ql_log(ql_log_info, vha, 0x0068,
4766 "ZIO mode %d enabled; timer delay (%d us).\n",
4767 ha->zio_mode, ha->zio_timer * 100);
4768
4769 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4770 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4771 vha->flags.process_response_queue = 1;
4772 }
4773 }
4774
4775 if (rval) {
4776 ql_log(ql_log_warn, vha, 0x0069,
4777 "NVRAM configuration failed.\n");
4778 }
4779 return (rval);
4780 }
4781
4782 static void
4783 qla2x00_rport_del(void *data)
4784 {
4785 fc_port_t *fcport = data;
4786 struct fc_rport *rport;
4787 unsigned long flags;
4788
4789 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4790 rport = fcport->drport ? fcport->drport : fcport->rport;
4791 fcport->drport = NULL;
4792 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4793 if (rport) {
4794 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4795 "%s %8phN. rport %p roles %x\n",
4796 __func__, fcport->port_name, rport,
4797 rport->roles);
4798
4799 fc_remote_port_delete(rport);
4800 }
4801 }
4802
4803 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
4804 {
4805 int old_state;
4806
4807 old_state = atomic_read(&fcport->state);
4808 atomic_set(&fcport->state, state);
4809
4810
4811 if (old_state && old_state != state) {
4812 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
4813 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
4814 fcport->port_name, port_state_str[old_state],
4815 port_state_str[state], fcport->d_id.b.domain,
4816 fcport->d_id.b.area, fcport->d_id.b.al_pa);
4817 }
4818 }
4819
4820
4821
4822
4823
4824
4825
4826
4827 fc_port_t *
4828 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4829 {
4830 fc_port_t *fcport;
4831
4832 fcport = kzalloc(sizeof(fc_port_t), flags);
4833 if (!fcport)
4834 return NULL;
4835
4836 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4837 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4838 flags);
4839 if (!fcport->ct_desc.ct_sns) {
4840 ql_log(ql_log_warn, vha, 0xd049,
4841 "Failed to allocate ct_sns request.\n");
4842 kfree(fcport);
4843 return NULL;
4844 }
4845
4846
4847 fcport->vha = vha;
4848 fcport->port_type = FCT_UNKNOWN;
4849 fcport->loop_id = FC_NO_LOOP_ID;
4850 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4851 fcport->supported_classes = FC_COS_UNSPECIFIED;
4852 fcport->fp_speed = PORT_SPEED_UNKNOWN;
4853
4854 fcport->disc_state = DSC_DELETED;
4855 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4856 fcport->deleted = QLA_SESS_DELETED;
4857 fcport->login_retry = vha->hw->login_retry_count;
4858 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
4859 fcport->logout_on_delete = 1;
4860
4861 if (!fcport->ct_desc.ct_sns) {
4862 ql_log(ql_log_warn, vha, 0xd049,
4863 "Failed to allocate ct_sns request.\n");
4864 kfree(fcport);
4865 return NULL;
4866 }
4867
4868 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4869 INIT_WORK(&fcport->free_work, qlt_free_session_done);
4870 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
4871 INIT_LIST_HEAD(&fcport->gnl_entry);
4872 INIT_LIST_HEAD(&fcport->list);
4873
4874 return fcport;
4875 }
4876
4877 void
4878 qla2x00_free_fcport(fc_port_t *fcport)
4879 {
4880 if (fcport->ct_desc.ct_sns) {
4881 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4882 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4883 fcport->ct_desc.ct_sns_dma);
4884
4885 fcport->ct_desc.ct_sns = NULL;
4886 }
4887 list_del(&fcport->list);
4888 qla2x00_clear_loop_id(fcport);
4889 kfree(fcport);
4890 }
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904 static int
4905 qla2x00_configure_loop(scsi_qla_host_t *vha)
4906 {
4907 int rval;
4908 unsigned long flags, save_flags;
4909 struct qla_hw_data *ha = vha->hw;
4910
4911 rval = QLA_SUCCESS;
4912
4913
4914 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4915 rval = qla2x00_configure_hba(vha);
4916 if (rval != QLA_SUCCESS) {
4917 ql_dbg(ql_dbg_disc, vha, 0x2013,
4918 "Unable to configure HBA.\n");
4919 return (rval);
4920 }
4921 }
4922
4923 save_flags = flags = vha->dpc_flags;
4924 ql_dbg(ql_dbg_disc, vha, 0x2014,
4925 "Configure loop -- dpc flags = 0x%lx.\n", flags);
4926
4927
4928
4929
4930
4931 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4932 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4933
4934 qla2x00_get_data_rate(vha);
4935
4936
4937 if (ha->current_topology == ISP_CFG_FL &&
4938 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4939
4940 set_bit(RSCN_UPDATE, &flags);
4941
4942 } else if (ha->current_topology == ISP_CFG_F &&
4943 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4944
4945 set_bit(RSCN_UPDATE, &flags);
4946 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4947
4948 } else if (ha->current_topology == ISP_CFG_NL ||
4949 ha->current_topology == ISP_CFG_N) {
4950 clear_bit(RSCN_UPDATE, &flags);
4951 set_bit(LOCAL_LOOP_UPDATE, &flags);
4952 } else if (!vha->flags.online ||
4953 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4954 set_bit(RSCN_UPDATE, &flags);
4955 set_bit(LOCAL_LOOP_UPDATE, &flags);
4956 }
4957
4958 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4959 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4960 ql_dbg(ql_dbg_disc, vha, 0x2015,
4961 "Loop resync needed, failing.\n");
4962 rval = QLA_FUNCTION_FAILED;
4963 } else
4964 rval = qla2x00_configure_local_loop(vha);
4965 }
4966
4967 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4968 if (LOOP_TRANSITION(vha)) {
4969 ql_dbg(ql_dbg_disc, vha, 0x2099,
4970 "Needs RSCN update and loop transition.\n");
4971 rval = QLA_FUNCTION_FAILED;
4972 }
4973 else
4974 rval = qla2x00_configure_fabric(vha);
4975 }
4976
4977 if (rval == QLA_SUCCESS) {
4978 if (atomic_read(&vha->loop_down_timer) ||
4979 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4980 rval = QLA_FUNCTION_FAILED;
4981 } else {
4982 atomic_set(&vha->loop_state, LOOP_READY);
4983 ql_dbg(ql_dbg_disc, vha, 0x2069,
4984 "LOOP READY.\n");
4985 ha->flags.fw_init_done = 1;
4986
4987
4988
4989
4990
4991 if (qla_tgt_mode_enabled(vha) ||
4992 qla_dual_mode_enabled(vha)) {
4993 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4994 qlt_24xx_process_atio_queue(vha, 0);
4995 spin_unlock_irqrestore(&ha->tgt.atio_lock,
4996 flags);
4997 }
4998 }
4999 }
5000
5001 if (rval) {
5002 ql_dbg(ql_dbg_disc, vha, 0x206a,
5003 "%s *** FAILED ***.\n", __func__);
5004 } else {
5005 ql_dbg(ql_dbg_disc, vha, 0x206b,
5006 "%s: exiting normally.\n", __func__);
5007 }
5008
5009
5010 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5012 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5013 if (test_bit(RSCN_UPDATE, &save_flags)) {
5014 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5015 }
5016 }
5017
5018 return (rval);
5019 }
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031 static int
5032 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5033 {
5034 int rval, rval2;
5035 int found_devs;
5036 int found;
5037 fc_port_t *fcport, *new_fcport;
5038
5039 uint16_t index;
5040 uint16_t entries;
5041 struct gid_list_info *gid;
5042 uint16_t loop_id;
5043 uint8_t domain, area, al_pa;
5044 struct qla_hw_data *ha = vha->hw;
5045 unsigned long flags;
5046
5047
5048 if (N2N_TOPO(ha)) {
5049 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
5050
5051 u32 *bp, i, sz;
5052
5053 memset(ha->init_cb, 0, ha->init_cb_size);
5054 sz = min_t(int, sizeof(struct els_plogi_payload),
5055 ha->init_cb_size);
5056 rval = qla24xx_get_port_login_templ(vha,
5057 ha->init_cb_dma, (void *)ha->init_cb, sz);
5058 if (rval == QLA_SUCCESS) {
5059 bp = (uint32_t *)ha->init_cb;
5060 for (i = 0; i < sz/4 ; i++, bp++)
5061 *bp = cpu_to_be32(*bp);
5062
5063 memcpy(&ha->plogi_els_payld.data,
5064 (void *)ha->init_cb,
5065 sizeof(ha->plogi_els_payld.data));
5066 } else {
5067 ql_dbg(ql_dbg_init, vha, 0x00d1,
5068 "PLOGI ELS param read fail.\n");
5069 goto skip_login;
5070 }
5071 }
5072
5073 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5074 if (fcport->n2n_flag) {
5075 qla24xx_fcport_handle_login(vha, fcport);
5076 return QLA_SUCCESS;
5077 }
5078 }
5079 skip_login:
5080 spin_lock_irqsave(&vha->work_lock, flags);
5081 vha->scan.scan_retry++;
5082 spin_unlock_irqrestore(&vha->work_lock, flags);
5083
5084 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5085 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5086 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5087 }
5088 }
5089
5090 found_devs = 0;
5091 new_fcport = NULL;
5092 entries = MAX_FIBRE_DEVICES_LOOP;
5093
5094
5095 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5096 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5097 &entries);
5098 if (rval != QLA_SUCCESS)
5099 goto cleanup_allocation;
5100
5101 ql_dbg(ql_dbg_disc, vha, 0x2011,
5102 "Entries in ID list (%d).\n", entries);
5103 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5104 ha->gid_list, entries * sizeof(*ha->gid_list));
5105
5106 if (entries == 0) {
5107 spin_lock_irqsave(&vha->work_lock, flags);
5108 vha->scan.scan_retry++;
5109 spin_unlock_irqrestore(&vha->work_lock, flags);
5110
5111 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5112 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5113 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5114 }
5115 } else {
5116 vha->scan.scan_retry = 0;
5117 }
5118
5119 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5120 fcport->scan_state = QLA_FCPORT_SCAN;
5121 }
5122
5123
5124 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5125 if (new_fcport == NULL) {
5126 ql_log(ql_log_warn, vha, 0x2012,
5127 "Memory allocation failed for fcport.\n");
5128 rval = QLA_MEMORY_ALLOC_FAILED;
5129 goto cleanup_allocation;
5130 }
5131 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5132
5133
5134 gid = ha->gid_list;
5135 for (index = 0; index < entries; index++) {
5136 domain = gid->domain;
5137 area = gid->area;
5138 al_pa = gid->al_pa;
5139 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5140 loop_id = gid->loop_id_2100;
5141 else
5142 loop_id = le16_to_cpu(gid->loop_id);
5143 gid = (void *)gid + ha->gid_list_info_size;
5144
5145
5146 if ((domain & 0xf0) == 0xf0)
5147 continue;
5148
5149
5150 if (area && domain && ((area != vha->d_id.b.area) ||
5151 (domain != vha->d_id.b.domain)) &&
5152 (ha->current_topology == ISP_CFG_NL))
5153 continue;
5154
5155
5156
5157 if (loop_id > LAST_LOCAL_LOOP_ID)
5158 continue;
5159
5160 memset(new_fcport->port_name, 0, WWN_SIZE);
5161
5162
5163 new_fcport->d_id.b.domain = domain;
5164 new_fcport->d_id.b.area = area;
5165 new_fcport->d_id.b.al_pa = al_pa;
5166 new_fcport->loop_id = loop_id;
5167 new_fcport->scan_state = QLA_FCPORT_FOUND;
5168
5169 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5170 if (rval2 != QLA_SUCCESS) {
5171 ql_dbg(ql_dbg_disc, vha, 0x2097,
5172 "Failed to retrieve fcport information "
5173 "-- get_port_database=%x, loop_id=0x%04x.\n",
5174 rval2, new_fcport->loop_id);
5175
5176 if (ha->current_topology != ISP_CFG_N) {
5177 ql_dbg(ql_dbg_disc, vha, 0x2105,
5178 "Scheduling resync.\n");
5179 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5180 continue;
5181 }
5182 }
5183
5184 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5185
5186 found = 0;
5187 fcport = NULL;
5188 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5189 if (memcmp(new_fcport->port_name, fcport->port_name,
5190 WWN_SIZE))
5191 continue;
5192
5193 fcport->flags &= ~FCF_FABRIC_DEVICE;
5194 fcport->loop_id = new_fcport->loop_id;
5195 fcport->port_type = new_fcport->port_type;
5196 fcport->d_id.b24 = new_fcport->d_id.b24;
5197 memcpy(fcport->node_name, new_fcport->node_name,
5198 WWN_SIZE);
5199 fcport->scan_state = QLA_FCPORT_FOUND;
5200 found++;
5201 break;
5202 }
5203
5204 if (!found) {
5205
5206 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5207
5208
5209 fcport = new_fcport;
5210
5211 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5212
5213 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5214
5215 if (new_fcport == NULL) {
5216 ql_log(ql_log_warn, vha, 0xd031,
5217 "Failed to allocate memory for fcport.\n");
5218 rval = QLA_MEMORY_ALLOC_FAILED;
5219 goto cleanup_allocation;
5220 }
5221 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5222 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5223 }
5224
5225 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5226
5227
5228 fcport->fp_speed = ha->link_data_rate;
5229
5230 found_devs++;
5231 }
5232
5233 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5234 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5235 break;
5236
5237 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5238 if ((qla_dual_mode_enabled(vha) ||
5239 qla_ini_mode_enabled(vha)) &&
5240 atomic_read(&fcport->state) == FCS_ONLINE) {
5241 qla2x00_mark_device_lost(vha, fcport,
5242 ql2xplogiabsentdevice, 0);
5243 if (fcport->loop_id != FC_NO_LOOP_ID &&
5244 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5245 fcport->port_type != FCT_INITIATOR &&
5246 fcport->port_type != FCT_BROADCAST) {
5247 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5248 "%s %d %8phC post del sess\n",
5249 __func__, __LINE__,
5250 fcport->port_name);
5251
5252 qlt_schedule_sess_for_deletion(fcport);
5253 continue;
5254 }
5255 }
5256 }
5257
5258 if (fcport->scan_state == QLA_FCPORT_FOUND)
5259 qla24xx_fcport_handle_login(vha, fcport);
5260 }
5261
5262 cleanup_allocation:
5263 kfree(new_fcport);
5264
5265 if (rval != QLA_SUCCESS) {
5266 ql_dbg(ql_dbg_disc, vha, 0x2098,
5267 "Configure local loop error exit: rval=%x.\n", rval);
5268 }
5269
5270 return (rval);
5271 }
5272
5273 static void
5274 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5275 {
5276 int rval;
5277 uint16_t mb[MAILBOX_REGISTER_COUNT];
5278 struct qla_hw_data *ha = vha->hw;
5279
5280 if (!IS_IIDMA_CAPABLE(ha))
5281 return;
5282
5283 if (atomic_read(&fcport->state) != FCS_ONLINE)
5284 return;
5285
5286 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5287 fcport->fp_speed > ha->link_data_rate ||
5288 !ha->flags.gpsc_supported)
5289 return;
5290
5291 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5292 mb);
5293 if (rval != QLA_SUCCESS) {
5294 ql_dbg(ql_dbg_disc, vha, 0x2004,
5295 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5296 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5297 } else {
5298 ql_dbg(ql_dbg_disc, vha, 0x2005,
5299 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5300 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5301 fcport->fp_speed, fcport->port_name);
5302 }
5303 }
5304
5305 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5306 {
5307 qla2x00_iidma_fcport(vha, fcport);
5308 qla24xx_update_fcport_fcp_prio(vha, fcport);
5309 }
5310
5311 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5312 {
5313 struct qla_work_evt *e;
5314
5315 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5316 if (!e)
5317 return QLA_FUNCTION_FAILED;
5318
5319 e->u.fcport.fcport = fcport;
5320 return qla2x00_post_work(vha, e);
5321 }
5322
5323
5324 static void
5325 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5326 {
5327 struct fc_rport_identifiers rport_ids;
5328 struct fc_rport *rport;
5329 unsigned long flags;
5330
5331 if (atomic_read(&fcport->state) == FCS_ONLINE)
5332 return;
5333
5334 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5335 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5336 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5337 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5338 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5339 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5340 if (!rport) {
5341 ql_log(ql_log_warn, vha, 0x2006,
5342 "Unable to allocate fc remote port.\n");
5343 return;
5344 }
5345
5346 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5347 *((fc_port_t **)rport->dd_data) = fcport;
5348 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5349
5350 rport->supported_classes = fcport->supported_classes;
5351
5352 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5353 if (fcport->port_type == FCT_INITIATOR)
5354 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5355 if (fcport->port_type == FCT_TARGET)
5356 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5357 if (fcport->port_type & FCT_NVME_INITIATOR)
5358 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5359 if (fcport->port_type & FCT_NVME_TARGET)
5360 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5361 if (fcport->port_type & FCT_NVME_DISCOVERY)
5362 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5363
5364 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5365 "%s %8phN. rport %p is %s mode\n",
5366 __func__, fcport->port_name, rport,
5367 (fcport->port_type == FCT_TARGET) ? "tgt" :
5368 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5369
5370 fc_remote_port_rolechg(rport, rport_ids.roles);
5371 }
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388 void
5389 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5390 {
5391 if (IS_SW_RESV_ADDR(fcport->d_id))
5392 return;
5393
5394 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5395 __func__, fcport->port_name);
5396
5397 fcport->disc_state = DSC_UPD_FCPORT;
5398 fcport->login_retry = vha->hw->login_retry_count;
5399 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5400 fcport->deleted = 0;
5401 fcport->logout_on_delete = 1;
5402 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5403
5404 switch (vha->hw->current_topology) {
5405 case ISP_CFG_N:
5406 case ISP_CFG_NL:
5407 fcport->keep_nport_handle = 1;
5408 break;
5409 default:
5410 break;
5411 }
5412
5413 qla2x00_iidma_fcport(vha, fcport);
5414
5415 if (fcport->fc4f_nvme) {
5416 qla_nvme_register_remote(vha, fcport);
5417 fcport->disc_state = DSC_LOGIN_COMPLETE;
5418 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5419 return;
5420 }
5421
5422 qla24xx_update_fcport_fcp_prio(vha, fcport);
5423
5424 switch (vha->host->active_mode) {
5425 case MODE_INITIATOR:
5426 qla2x00_reg_remote_port(vha, fcport);
5427 break;
5428 case MODE_TARGET:
5429 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5430 !vha->vha_tgt.qla_tgt->tgt_stopped)
5431 qlt_fc_port_added(vha, fcport);
5432 break;
5433 case MODE_DUAL:
5434 qla2x00_reg_remote_port(vha, fcport);
5435 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5436 !vha->vha_tgt.qla_tgt->tgt_stopped)
5437 qlt_fc_port_added(vha, fcport);
5438 break;
5439 default:
5440 break;
5441 }
5442
5443 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5444
5445 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5446 if (fcport->id_changed) {
5447 fcport->id_changed = 0;
5448 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5449 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5450 __func__, __LINE__, fcport->port_name,
5451 vha->fcport_count);
5452 qla24xx_post_gfpnid_work(vha, fcport);
5453 } else {
5454 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5455 "%s %d %8phC post gpsc fcp_cnt %d\n",
5456 __func__, __LINE__, fcport->port_name,
5457 vha->fcport_count);
5458 qla24xx_post_gpsc_work(vha, fcport);
5459 }
5460 }
5461
5462 fcport->disc_state = DSC_LOGIN_COMPLETE;
5463 }
5464
5465 void qla_register_fcport_fn(struct work_struct *work)
5466 {
5467 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5468 u32 rscn_gen = fcport->rscn_gen;
5469 u16 data[2];
5470
5471 if (IS_SW_RESV_ADDR(fcport->d_id))
5472 return;
5473
5474 qla2x00_update_fcport(fcport->vha, fcport);
5475
5476 if (rscn_gen != fcport->rscn_gen) {
5477
5478 switch (fcport->next_disc_state) {
5479 case DSC_DELETE_PEND:
5480 qlt_schedule_sess_for_deletion(fcport);
5481 break;
5482 case DSC_ADISC:
5483 data[0] = data[1] = 0;
5484 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5485 data);
5486 break;
5487 default:
5488 break;
5489 }
5490 }
5491 }
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504 static int
5505 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5506 {
5507 int rval;
5508 fc_port_t *fcport;
5509 uint16_t mb[MAILBOX_REGISTER_COUNT];
5510 uint16_t loop_id;
5511 LIST_HEAD(new_fcports);
5512 struct qla_hw_data *ha = vha->hw;
5513 int discovery_gen;
5514
5515
5516 if (IS_FWI2_CAPABLE(ha))
5517 loop_id = NPH_F_PORT;
5518 else
5519 loop_id = SNS_FL_PORT;
5520 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5521 if (rval != QLA_SUCCESS) {
5522 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5523 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5524
5525 vha->device_flags &= ~SWITCH_FOUND;
5526 return (QLA_SUCCESS);
5527 }
5528 vha->device_flags |= SWITCH_FOUND;
5529
5530
5531 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5532 rval = qla2x00_send_change_request(vha, 0x3, 0);
5533 if (rval != QLA_SUCCESS)
5534 ql_log(ql_log_warn, vha, 0x121,
5535 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5536 rval);
5537 }
5538
5539
5540 do {
5541 qla2x00_mgmt_svr_login(vha);
5542
5543
5544 if (ql2xfdmienable &&
5545 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5546 qla2x00_fdmi_register(vha);
5547
5548
5549 loop_id = NPH_SNS_LID(ha);
5550 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5551 0xfc, mb, BIT_1|BIT_0);
5552 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5553 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5554 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5555 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
5556 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5557 return rval;
5558 }
5559 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5560 if (qla2x00_rft_id(vha)) {
5561
5562 ql_dbg(ql_dbg_disc, vha, 0x20a2,
5563 "Register FC-4 TYPE failed.\n");
5564 if (test_bit(LOOP_RESYNC_NEEDED,
5565 &vha->dpc_flags))
5566 break;
5567 }
5568 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
5569
5570 ql_dbg(ql_dbg_disc, vha, 0x209a,
5571 "Register FC-4 Features failed.\n");
5572 if (test_bit(LOOP_RESYNC_NEEDED,
5573 &vha->dpc_flags))
5574 break;
5575 }
5576 if (vha->flags.nvme_enabled) {
5577 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5578 ql_dbg(ql_dbg_disc, vha, 0x2049,
5579 "Register NVME FC Type Features failed.\n");
5580 }
5581 }
5582 if (qla2x00_rnn_id(vha)) {
5583
5584 ql_dbg(ql_dbg_disc, vha, 0x2104,
5585 "Register Node Name failed.\n");
5586 if (test_bit(LOOP_RESYNC_NEEDED,
5587 &vha->dpc_flags))
5588 break;
5589 } else if (qla2x00_rsnn_nn(vha)) {
5590
5591 ql_dbg(ql_dbg_disc, vha, 0x209b,
5592 "Register Symbolic Node Name failed.\n");
5593 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5594 break;
5595 }
5596 }
5597
5598
5599
5600
5601
5602
5603
5604
5605 qlt_do_generation_tick(vha, &discovery_gen);
5606
5607 if (USE_ASYNC_SCAN(ha)) {
5608 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5609 NULL);
5610 if (rval)
5611 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5612 } else {
5613 list_for_each_entry(fcport, &vha->vp_fcports, list)
5614 fcport->scan_state = QLA_FCPORT_SCAN;
5615
5616 rval = qla2x00_find_all_fabric_devs(vha);
5617 }
5618 if (rval != QLA_SUCCESS)
5619 break;
5620 } while (0);
5621
5622 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5623 qla_nvme_register_hba(vha);
5624
5625 if (rval)
5626 ql_dbg(ql_dbg_disc, vha, 0x2068,
5627 "Configure fabric error exit rval=%d.\n", rval);
5628
5629 return (rval);
5630 }
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645 static int
5646 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5647 {
5648 int rval;
5649 uint16_t loop_id;
5650 fc_port_t *fcport, *new_fcport;
5651 int found;
5652
5653 sw_info_t *swl;
5654 int swl_idx;
5655 int first_dev, last_dev;
5656 port_id_t wrap = {}, nxt_d_id;
5657 struct qla_hw_data *ha = vha->hw;
5658 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5659 unsigned long flags;
5660
5661 rval = QLA_SUCCESS;
5662
5663
5664 if (!ha->swl)
5665 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5666 GFP_KERNEL);
5667 swl = ha->swl;
5668 if (!swl) {
5669
5670 ql_dbg(ql_dbg_disc, vha, 0x209c,
5671 "GID_PT allocations failed, fallback on GA_NXT.\n");
5672 } else {
5673 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5674 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5675 swl = NULL;
5676 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5677 return rval;
5678 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5679 swl = NULL;
5680 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5681 return rval;
5682 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5683 swl = NULL;
5684 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5685 return rval;
5686 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5687 swl = NULL;
5688 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5689 return rval;
5690 }
5691
5692
5693 if (swl) {
5694 qla2x00_gff_id(vha, swl);
5695 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5696 return rval;
5697 }
5698 }
5699 swl_idx = 0;
5700
5701
5702 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5703 if (new_fcport == NULL) {
5704 ql_log(ql_log_warn, vha, 0x209d,
5705 "Failed to allocate memory for fcport.\n");
5706 return (QLA_MEMORY_ALLOC_FAILED);
5707 }
5708 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5709
5710 first_dev = 1;
5711 last_dev = 0;
5712
5713
5714 loop_id = ha->min_external_loopid;
5715 for (; loop_id <= ha->max_loop_id; loop_id++) {
5716 if (qla2x00_is_reserved_id(vha, loop_id))
5717 continue;
5718
5719 if (ha->current_topology == ISP_CFG_FL &&
5720 (atomic_read(&vha->loop_down_timer) ||
5721 LOOP_TRANSITION(vha))) {
5722 atomic_set(&vha->loop_down_timer, 0);
5723 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5724 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5725 break;
5726 }
5727
5728 if (swl != NULL) {
5729 if (last_dev) {
5730 wrap.b24 = new_fcport->d_id.b24;
5731 } else {
5732 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5733 memcpy(new_fcport->node_name,
5734 swl[swl_idx].node_name, WWN_SIZE);
5735 memcpy(new_fcport->port_name,
5736 swl[swl_idx].port_name, WWN_SIZE);
5737 memcpy(new_fcport->fabric_port_name,
5738 swl[swl_idx].fabric_port_name, WWN_SIZE);
5739 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5740 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5741
5742 new_fcport->nvme_flag = 0;
5743 new_fcport->fc4f_nvme = 0;
5744 if (vha->flags.nvme_enabled &&
5745 swl[swl_idx].fc4f_nvme) {
5746 new_fcport->fc4f_nvme =
5747 swl[swl_idx].fc4f_nvme;
5748 ql_log(ql_log_info, vha, 0x2131,
5749 "FOUND: NVME port %8phC as FC Type 28h\n",
5750 new_fcport->port_name);
5751 }
5752
5753 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5754 last_dev = 1;
5755 }
5756 swl_idx++;
5757 }
5758 } else {
5759
5760 rval = qla2x00_ga_nxt(vha, new_fcport);
5761 if (rval != QLA_SUCCESS) {
5762 ql_log(ql_log_warn, vha, 0x209e,
5763 "SNS scan failed -- assuming "
5764 "zero-entry result.\n");
5765 rval = QLA_SUCCESS;
5766 break;
5767 }
5768 }
5769
5770
5771 if (first_dev) {
5772 wrap.b24 = new_fcport->d_id.b24;
5773 first_dev = 0;
5774 } else if (new_fcport->d_id.b24 == wrap.b24) {
5775 ql_dbg(ql_dbg_disc, vha, 0x209f,
5776 "Device wrap (%02x%02x%02x).\n",
5777 new_fcport->d_id.b.domain,
5778 new_fcport->d_id.b.area,
5779 new_fcport->d_id.b.al_pa);
5780 break;
5781 }
5782
5783
5784 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5785 continue;
5786
5787
5788 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5789 continue;
5790
5791
5792 if (((new_fcport->d_id.b24 & 0xffff00) ==
5793 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5794 ISP_CFG_FL)
5795 continue;
5796
5797
5798 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5799 continue;
5800
5801
5802 if (ql2xgffidenable &&
5803 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5804 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
5805 continue;
5806
5807 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5808
5809
5810 found = 0;
5811 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5812 if (memcmp(new_fcport->port_name, fcport->port_name,
5813 WWN_SIZE))
5814 continue;
5815
5816 fcport->scan_state = QLA_FCPORT_FOUND;
5817
5818 found++;
5819
5820
5821 memcpy(fcport->fabric_port_name,
5822 new_fcport->fabric_port_name, WWN_SIZE);
5823 fcport->fp_speed = new_fcport->fp_speed;
5824
5825
5826
5827
5828
5829 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5830 (atomic_read(&fcport->state) == FCS_ONLINE ||
5831 (vha->host->active_mode == MODE_TARGET))) {
5832 break;
5833 }
5834
5835
5836
5837
5838 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5839 fcport->d_id.b24 = new_fcport->d_id.b24;
5840 qla2x00_clear_loop_id(fcport);
5841 fcport->flags |= (FCF_FABRIC_DEVICE |
5842 FCF_LOGIN_NEEDED);
5843 break;
5844 }
5845
5846
5847
5848
5849
5850
5851 if (qla_tgt_mode_enabled(base_vha)) {
5852 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5853 "port changed FC ID, %8phC"
5854 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5855 fcport->port_name,
5856 fcport->d_id.b.domain,
5857 fcport->d_id.b.area,
5858 fcport->d_id.b.al_pa,
5859 fcport->loop_id,
5860 new_fcport->d_id.b.domain,
5861 new_fcport->d_id.b.area,
5862 new_fcport->d_id.b.al_pa);
5863 fcport->d_id.b24 = new_fcport->d_id.b24;
5864 break;
5865 }
5866
5867 fcport->d_id.b24 = new_fcport->d_id.b24;
5868 fcport->flags |= FCF_LOGIN_NEEDED;
5869 break;
5870 }
5871
5872 if (fcport->fc4f_nvme) {
5873 if (fcport->disc_state == DSC_DELETE_PEND) {
5874 fcport->disc_state = DSC_GNL;
5875 vha->fcport_count--;
5876 fcport->login_succ = 0;
5877 }
5878 }
5879
5880 if (found) {
5881 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5882 continue;
5883 }
5884
5885 new_fcport->scan_state = QLA_FCPORT_FOUND;
5886 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5887
5888 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5889
5890
5891
5892 nxt_d_id.b24 = new_fcport->d_id.b24;
5893 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5894 if (new_fcport == NULL) {
5895 ql_log(ql_log_warn, vha, 0xd032,
5896 "Memory allocation failed for fcport.\n");
5897 return (QLA_MEMORY_ALLOC_FAILED);
5898 }
5899 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5900 new_fcport->d_id.b24 = nxt_d_id.b24;
5901 }
5902
5903 qla2x00_free_fcport(new_fcport);
5904
5905
5906
5907
5908 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5909 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5910 break;
5911
5912 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
5913 continue;
5914
5915 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5916 if ((qla_dual_mode_enabled(vha) ||
5917 qla_ini_mode_enabled(vha)) &&
5918 atomic_read(&fcport->state) == FCS_ONLINE) {
5919 qla2x00_mark_device_lost(vha, fcport,
5920 ql2xplogiabsentdevice, 0);
5921 if (fcport->loop_id != FC_NO_LOOP_ID &&
5922 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5923 fcport->port_type != FCT_INITIATOR &&
5924 fcport->port_type != FCT_BROADCAST) {
5925 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5926 "%s %d %8phC post del sess\n",
5927 __func__, __LINE__,
5928 fcport->port_name);
5929 qlt_schedule_sess_for_deletion(fcport);
5930 continue;
5931 }
5932 }
5933 }
5934
5935 if (fcport->scan_state == QLA_FCPORT_FOUND &&
5936 (fcport->flags & FCF_LOGIN_NEEDED) != 0)
5937 qla24xx_fcport_handle_login(vha, fcport);
5938 }
5939 return (rval);
5940 }
5941
5942
5943 int
5944 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
5945 {
5946 int loop_id = FC_NO_LOOP_ID;
5947 int lid = NPH_MGMT_SERVER - vha->vp_idx;
5948 unsigned long flags;
5949 struct qla_hw_data *ha = vha->hw;
5950
5951 if (vha->vp_idx == 0) {
5952 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
5953 return NPH_MGMT_SERVER;
5954 }
5955
5956
5957 spin_lock_irqsave(&ha->vport_slock, flags);
5958 for (; lid > 0; lid--) {
5959 if (!test_bit(lid, vha->hw->loop_id_map)) {
5960 set_bit(lid, vha->hw->loop_id_map);
5961 loop_id = lid;
5962 break;
5963 }
5964 }
5965 spin_unlock_irqrestore(&ha->vport_slock, flags);
5966
5967 return loop_id;
5968 }
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984 int
5985 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5986 uint16_t *next_loopid)
5987 {
5988 int rval;
5989 int retry;
5990 uint16_t tmp_loopid;
5991 uint16_t mb[MAILBOX_REGISTER_COUNT];
5992 struct qla_hw_data *ha = vha->hw;
5993
5994 retry = 0;
5995 tmp_loopid = 0;
5996
5997 for (;;) {
5998 ql_dbg(ql_dbg_disc, vha, 0x2000,
5999 "Trying Fabric Login w/loop id 0x%04x for port "
6000 "%02x%02x%02x.\n",
6001 fcport->loop_id, fcport->d_id.b.domain,
6002 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6003
6004
6005 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6006 fcport->d_id.b.domain, fcport->d_id.b.area,
6007 fcport->d_id.b.al_pa, mb, BIT_0);
6008 if (rval != QLA_SUCCESS) {
6009 return rval;
6010 }
6011 if (mb[0] == MBS_PORT_ID_USED) {
6012
6013
6014
6015
6016
6017
6018
6019 retry++;
6020 tmp_loopid = fcport->loop_id;
6021 fcport->loop_id = mb[1];
6022
6023 ql_dbg(ql_dbg_disc, vha, 0x2001,
6024 "Fabric Login: port in use - next loop "
6025 "id=0x%04x, port id= %02x%02x%02x.\n",
6026 fcport->loop_id, fcport->d_id.b.domain,
6027 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6028
6029 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6030
6031
6032
6033 if (retry) {
6034
6035 *next_loopid = tmp_loopid;
6036 } else {
6037
6038
6039
6040
6041 *next_loopid = (fcport->loop_id + 1);
6042 }
6043
6044 if (mb[1] & BIT_0) {
6045 fcport->port_type = FCT_INITIATOR;
6046 } else {
6047 fcport->port_type = FCT_TARGET;
6048 if (mb[1] & BIT_1) {
6049 fcport->flags |= FCF_FCP2_DEVICE;
6050 }
6051 }
6052
6053 if (mb[10] & BIT_0)
6054 fcport->supported_classes |= FC_COS_CLASS2;
6055 if (mb[10] & BIT_1)
6056 fcport->supported_classes |= FC_COS_CLASS3;
6057
6058 if (IS_FWI2_CAPABLE(ha)) {
6059 if (mb[10] & BIT_7)
6060 fcport->flags |=
6061 FCF_CONF_COMP_SUPPORTED;
6062 }
6063
6064 rval = QLA_SUCCESS;
6065 break;
6066 } else if (mb[0] == MBS_LOOP_ID_USED) {
6067
6068
6069
6070 fcport->loop_id++;
6071 rval = qla2x00_find_new_loop_id(vha, fcport);
6072 if (rval != QLA_SUCCESS) {
6073
6074 break;
6075 }
6076 } else if (mb[0] == MBS_COMMAND_ERROR) {
6077
6078
6079
6080
6081
6082 *next_loopid = fcport->loop_id;
6083 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6084 fcport->d_id.b.domain, fcport->d_id.b.area,
6085 fcport->d_id.b.al_pa);
6086 qla2x00_mark_device_lost(vha, fcport, 1, 0);
6087
6088 rval = 1;
6089 break;
6090 } else {
6091
6092
6093
6094 ql_dbg(ql_dbg_disc, vha, 0x2002,
6095 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6096 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6097 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6098 fcport->loop_id, jiffies);
6099
6100 *next_loopid = fcport->loop_id;
6101 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6102 fcport->d_id.b.domain, fcport->d_id.b.area,
6103 fcport->d_id.b.al_pa);
6104 qla2x00_clear_loop_id(fcport);
6105 fcport->login_retry = 0;
6106
6107 rval = 3;
6108 break;
6109 }
6110 }
6111
6112 return (rval);
6113 }
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128 int
6129 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6130 {
6131 int rval;
6132 uint16_t mb[MAILBOX_REGISTER_COUNT];
6133
6134 memset(mb, 0, sizeof(mb));
6135 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6136 if (rval == QLA_SUCCESS) {
6137
6138 if (mb[0] == MBS_COMMAND_ERROR)
6139 rval = 1;
6140 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6141
6142 rval = 3;
6143 }
6144
6145 return (rval);
6146 }
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158 int
6159 qla2x00_loop_resync(scsi_qla_host_t *vha)
6160 {
6161 int rval = QLA_SUCCESS;
6162 uint32_t wait_time;
6163
6164 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6165 if (vha->flags.online) {
6166 if (!(rval = qla2x00_fw_ready(vha))) {
6167
6168 wait_time = 256;
6169 do {
6170 if (!IS_QLAFX00(vha->hw)) {
6171
6172
6173
6174
6175 qla2x00_marker(vha, vha->hw->base_qpair,
6176 0, 0, MK_SYNC_ALL);
6177 vha->marker_needed = 0;
6178 }
6179
6180
6181 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6182
6183 if (IS_QLAFX00(vha->hw))
6184 qlafx00_configure_devices(vha);
6185 else
6186 qla2x00_configure_loop(vha);
6187
6188 wait_time--;
6189 } while (!atomic_read(&vha->loop_down_timer) &&
6190 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6191 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6192 &vha->dpc_flags)));
6193 }
6194 }
6195
6196 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6197 return (QLA_FUNCTION_FAILED);
6198
6199 if (rval)
6200 ql_dbg(ql_dbg_disc, vha, 0x206c,
6201 "%s *** FAILED ***.\n", __func__);
6202
6203 return (rval);
6204 }
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6215 {
6216 int32_t rval = 0;
6217
6218 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6219
6220 atomic_set(&ha->loop_down_timer, 0);
6221 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6222 atomic_set(&ha->loop_state, LOOP_UP);
6223 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6224 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6225 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6226
6227 rval = qla2x00_loop_resync(ha);
6228 } else
6229 atomic_set(&ha->loop_state, LOOP_DEAD);
6230
6231 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6232 }
6233
6234 return rval;
6235 }
6236
6237 void
6238 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6239 {
6240 fc_port_t *fcport;
6241 struct scsi_qla_host *vha;
6242 struct qla_hw_data *ha = base_vha->hw;
6243 unsigned long flags;
6244
6245 spin_lock_irqsave(&ha->vport_slock, flags);
6246
6247 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
6248 atomic_inc(&vha->vref_count);
6249 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6250 if (fcport->drport &&
6251 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6252 spin_unlock_irqrestore(&ha->vport_slock, flags);
6253 qla2x00_rport_del(fcport);
6254
6255 spin_lock_irqsave(&ha->vport_slock, flags);
6256 }
6257 }
6258 atomic_dec(&vha->vref_count);
6259 wake_up(&vha->vref_waitq);
6260 }
6261 spin_unlock_irqrestore(&ha->vport_slock, flags);
6262 }
6263
6264
6265 void
6266 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6267 {
6268 struct qla_hw_data *ha = vha->hw;
6269 uint32_t drv_presence, drv_presence_mask;
6270 uint32_t dev_part_info1, dev_part_info2, class_type;
6271 uint32_t class_type_mask = 0x3;
6272 uint16_t fcoe_other_function = 0xffff, i;
6273
6274 if (IS_QLA8044(ha)) {
6275 drv_presence = qla8044_rd_direct(vha,
6276 QLA8044_CRB_DRV_ACTIVE_INDEX);
6277 dev_part_info1 = qla8044_rd_direct(vha,
6278 QLA8044_CRB_DEV_PART_INFO_INDEX);
6279 dev_part_info2 = qla8044_rd_direct(vha,
6280 QLA8044_CRB_DEV_PART_INFO2);
6281 } else {
6282 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6283 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6284 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6285 }
6286 for (i = 0; i < 8; i++) {
6287 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6288 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6289 (i != ha->portnum)) {
6290 fcoe_other_function = i;
6291 break;
6292 }
6293 }
6294 if (fcoe_other_function == 0xffff) {
6295 for (i = 0; i < 8; i++) {
6296 class_type = ((dev_part_info2 >> (i * 4)) &
6297 class_type_mask);
6298 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6299 ((i + 8) != ha->portnum)) {
6300 fcoe_other_function = i + 8;
6301 break;
6302 }
6303 }
6304 }
6305
6306
6307
6308
6309 drv_presence_mask = ~((1 << (ha->portnum)) |
6310 ((fcoe_other_function == 0xffff) ?
6311 0 : (1 << (fcoe_other_function))));
6312
6313
6314
6315
6316 if (!(drv_presence & drv_presence_mask) &&
6317 (ha->portnum < fcoe_other_function)) {
6318 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6319 "This host is Reset owner.\n");
6320 ha->flags.nic_core_reset_owner = 1;
6321 }
6322 }
6323
6324 static int
6325 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6326 {
6327 int rval = QLA_SUCCESS;
6328 struct qla_hw_data *ha = vha->hw;
6329 uint32_t drv_ack;
6330
6331 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6332 if (rval == QLA_SUCCESS) {
6333 drv_ack |= (1 << ha->portnum);
6334 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6335 }
6336
6337 return rval;
6338 }
6339
6340 static int
6341 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6342 {
6343 int rval = QLA_SUCCESS;
6344 struct qla_hw_data *ha = vha->hw;
6345 uint32_t drv_ack;
6346
6347 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6348 if (rval == QLA_SUCCESS) {
6349 drv_ack &= ~(1 << ha->portnum);
6350 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6351 }
6352
6353 return rval;
6354 }
6355
6356 static const char *
6357 qla83xx_dev_state_to_string(uint32_t dev_state)
6358 {
6359 switch (dev_state) {
6360 case QLA8XXX_DEV_COLD:
6361 return "COLD/RE-INIT";
6362 case QLA8XXX_DEV_INITIALIZING:
6363 return "INITIALIZING";
6364 case QLA8XXX_DEV_READY:
6365 return "READY";
6366 case QLA8XXX_DEV_NEED_RESET:
6367 return "NEED RESET";
6368 case QLA8XXX_DEV_NEED_QUIESCENT:
6369 return "NEED QUIESCENT";
6370 case QLA8XXX_DEV_FAILED:
6371 return "FAILED";
6372 case QLA8XXX_DEV_QUIESCENT:
6373 return "QUIESCENT";
6374 default:
6375 return "Unknown";
6376 }
6377 }
6378
6379
6380 void
6381 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6382 {
6383 struct qla_hw_data *ha = vha->hw;
6384 uint32_t idc_audit_reg = 0, duration_secs = 0;
6385
6386 switch (audit_type) {
6387 case IDC_AUDIT_TIMESTAMP:
6388 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6389 idc_audit_reg = (ha->portnum) |
6390 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6391 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6392 break;
6393
6394 case IDC_AUDIT_COMPLETION:
6395 duration_secs = ((jiffies_to_msecs(jiffies) -
6396 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6397 idc_audit_reg = (ha->portnum) |
6398 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6399 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6400 break;
6401
6402 default:
6403 ql_log(ql_log_warn, vha, 0xb078,
6404 "Invalid audit type specified.\n");
6405 break;
6406 }
6407 }
6408
6409
6410 static int
6411 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6412 {
6413 struct qla_hw_data *ha = vha->hw;
6414 uint32_t idc_control, dev_state;
6415
6416 __qla83xx_get_idc_control(vha, &idc_control);
6417 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6418 ql_log(ql_log_info, vha, 0xb080,
6419 "NIC Core reset has been disabled. idc-control=0x%x\n",
6420 idc_control);
6421 return QLA_FUNCTION_FAILED;
6422 }
6423
6424
6425 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6426 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6427 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6428 QLA8XXX_DEV_NEED_RESET);
6429 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6430 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6431 } else {
6432 const char *state = qla83xx_dev_state_to_string(dev_state);
6433
6434 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6435
6436
6437
6438 while (dev_state == QLA8XXX_DEV_READY) {
6439 qla83xx_idc_unlock(vha, 0);
6440 msleep(200);
6441 qla83xx_idc_lock(vha, 0);
6442 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6443 }
6444 }
6445
6446
6447 __qla83xx_set_drv_ack(vha);
6448
6449 return QLA_SUCCESS;
6450 }
6451
6452 int
6453 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6454 {
6455 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6456 }
6457
6458 int
6459 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6460 {
6461 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6462 }
6463
6464 static int
6465 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6466 {
6467 uint32_t drv_presence = 0;
6468 struct qla_hw_data *ha = vha->hw;
6469
6470 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6471 if (drv_presence & (1 << ha->portnum))
6472 return QLA_SUCCESS;
6473 else
6474 return QLA_TEST_FAILED;
6475 }
6476
6477 int
6478 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6479 {
6480 int rval = QLA_SUCCESS;
6481 struct qla_hw_data *ha = vha->hw;
6482
6483 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6484 "Entered %s().\n", __func__);
6485
6486 if (vha->device_flags & DFLG_DEV_FAILED) {
6487 ql_log(ql_log_warn, vha, 0xb059,
6488 "Device in unrecoverable FAILED state.\n");
6489 return QLA_FUNCTION_FAILED;
6490 }
6491
6492 qla83xx_idc_lock(vha, 0);
6493
6494 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6495 ql_log(ql_log_warn, vha, 0xb05a,
6496 "Function=0x%x has been removed from IDC participation.\n",
6497 ha->portnum);
6498 rval = QLA_FUNCTION_FAILED;
6499 goto exit;
6500 }
6501
6502 qla83xx_reset_ownership(vha);
6503
6504 rval = qla83xx_initiating_reset(vha);
6505
6506
6507
6508
6509
6510 if (rval == QLA_SUCCESS) {
6511 rval = qla83xx_idc_state_handler(vha);
6512
6513 if (rval == QLA_SUCCESS)
6514 ha->flags.nic_core_hung = 0;
6515 __qla83xx_clear_drv_ack(vha);
6516 }
6517
6518 exit:
6519 qla83xx_idc_unlock(vha, 0);
6520
6521 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6522
6523 return rval;
6524 }
6525
6526 int
6527 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6528 {
6529 struct qla_hw_data *ha = vha->hw;
6530 int rval = QLA_FUNCTION_FAILED;
6531
6532 if (!IS_MCTP_CAPABLE(ha)) {
6533
6534 ql_log(ql_log_info, vha, 0x506d,
6535 "This board is not MCTP capable\n");
6536 return rval;
6537 }
6538
6539 if (!ha->mctp_dump) {
6540 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6541 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6542
6543 if (!ha->mctp_dump) {
6544 ql_log(ql_log_warn, vha, 0x506e,
6545 "Failed to allocate memory for mctp dump\n");
6546 return rval;
6547 }
6548 }
6549
6550 #define MCTP_DUMP_STR_ADDR 0x00000000
6551 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6552 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6553 if (rval != QLA_SUCCESS) {
6554 ql_log(ql_log_warn, vha, 0x506f,
6555 "Failed to capture mctp dump\n");
6556 } else {
6557 ql_log(ql_log_info, vha, 0x5070,
6558 "Mctp dump capture for host (%ld/%p).\n",
6559 vha->host_no, ha->mctp_dump);
6560 ha->mctp_dumped = 1;
6561 }
6562
6563 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6564 ha->flags.nic_core_reset_hdlr_active = 1;
6565 rval = qla83xx_restart_nic_firmware(vha);
6566 if (rval)
6567
6568 ql_log(ql_log_warn, vha, 0x5071,
6569 "Failed to restart nic firmware\n");
6570 else
6571 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6572 "Restarted NIC firmware successfully.\n");
6573 ha->flags.nic_core_reset_hdlr_active = 0;
6574 }
6575
6576 return rval;
6577
6578 }
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588 void
6589 qla2x00_quiesce_io(scsi_qla_host_t *vha)
6590 {
6591 struct qla_hw_data *ha = vha->hw;
6592 struct scsi_qla_host *vp;
6593
6594 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6595 "Quiescing I/O - ha=%p.\n", ha);
6596
6597 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6598 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6599 atomic_set(&vha->loop_state, LOOP_DOWN);
6600 qla2x00_mark_all_devices_lost(vha, 0);
6601 list_for_each_entry(vp, &ha->vp_list, list)
6602 qla2x00_mark_all_devices_lost(vp, 0);
6603 } else {
6604 if (!atomic_read(&vha->loop_down_timer))
6605 atomic_set(&vha->loop_down_timer,
6606 LOOP_DOWN_TIME);
6607 }
6608
6609 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
6610 != QLA_SUCCESS);
6611 }
6612
6613 void
6614 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6615 {
6616 struct qla_hw_data *ha = vha->hw;
6617 struct scsi_qla_host *vp;
6618 unsigned long flags;
6619 fc_port_t *fcport;
6620 u16 i;
6621
6622
6623
6624
6625 if (!(IS_P3P_TYPE(ha)))
6626 vha->flags.online = 0;
6627 ha->flags.chip_reset_done = 0;
6628 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6629 vha->qla_stats.total_isp_aborts++;
6630
6631 ql_log(ql_log_info, vha, 0x00af,
6632 "Performing ISP error recovery - ha=%p.\n", ha);
6633
6634 ha->flags.purge_mbox = 1;
6635
6636
6637
6638
6639 if (!(IS_P3P_TYPE(ha)))
6640 ha->isp_ops->reset_chip(vha);
6641
6642 ha->link_data_rate = PORT_SPEED_UNKNOWN;
6643 SAVE_TOPO(ha);
6644 ha->flags.rida_fmt2 = 0;
6645 ha->flags.n2n_ae = 0;
6646 ha->flags.lip_ae = 0;
6647 ha->current_topology = 0;
6648 ha->flags.fw_started = 0;
6649 ha->flags.fw_init_done = 0;
6650 ha->chip_reset++;
6651 ha->base_qpair->chip_reset = ha->chip_reset;
6652 for (i = 0; i < ha->max_qpairs; i++) {
6653 if (ha->queue_pair_map[i])
6654 ha->queue_pair_map[i]->chip_reset =
6655 ha->base_qpair->chip_reset;
6656 }
6657
6658
6659 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6660 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6661 complete(&ha->mbx_intr_comp);
6662 }
6663
6664 i = 0;
6665 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6666 atomic_read(&ha->num_pend_mbx_stage2) ||
6667 atomic_read(&ha->num_pend_mbx_stage1)) {
6668 msleep(20);
6669 i++;
6670 if (i > 50)
6671 break;
6672 }
6673 ha->flags.purge_mbox = 0;
6674
6675 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6676 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6677 atomic_set(&vha->loop_state, LOOP_DOWN);
6678 qla2x00_mark_all_devices_lost(vha, 0);
6679
6680 spin_lock_irqsave(&ha->vport_slock, flags);
6681 list_for_each_entry(vp, &ha->vp_list, list) {
6682 atomic_inc(&vp->vref_count);
6683 spin_unlock_irqrestore(&ha->vport_slock, flags);
6684
6685 qla2x00_mark_all_devices_lost(vp, 0);
6686
6687 spin_lock_irqsave(&ha->vport_slock, flags);
6688 atomic_dec(&vp->vref_count);
6689 }
6690 spin_unlock_irqrestore(&ha->vport_slock, flags);
6691 } else {
6692 if (!atomic_read(&vha->loop_down_timer))
6693 atomic_set(&vha->loop_down_timer,
6694 LOOP_DOWN_TIME);
6695 }
6696
6697
6698 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6699 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6700 fcport->scan_state = 0;
6701 }
6702 spin_lock_irqsave(&ha->vport_slock, flags);
6703 list_for_each_entry(vp, &ha->vp_list, list) {
6704 atomic_inc(&vp->vref_count);
6705 spin_unlock_irqrestore(&ha->vport_slock, flags);
6706
6707 list_for_each_entry(fcport, &vp->vp_fcports, list)
6708 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6709
6710 spin_lock_irqsave(&ha->vport_slock, flags);
6711 atomic_dec(&vp->vref_count);
6712 }
6713 spin_unlock_irqrestore(&ha->vport_slock, flags);
6714
6715 if (!ha->flags.eeh_busy) {
6716
6717 if (IS_P3P_TYPE(ha)) {
6718 qla82xx_chip_reset_cleanup(vha);
6719 ql_log(ql_log_info, vha, 0x00b4,
6720 "Done chip reset cleanup.\n");
6721
6722
6723
6724
6725 vha->flags.online = 0;
6726 }
6727
6728
6729 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6730 }
6731
6732 wmb();
6733 }
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745 int
6746 qla2x00_abort_isp(scsi_qla_host_t *vha)
6747 {
6748 int rval;
6749 uint8_t status = 0;
6750 struct qla_hw_data *ha = vha->hw;
6751 struct scsi_qla_host *vp;
6752 struct req_que *req = ha->req_q_map[0];
6753 unsigned long flags;
6754
6755 if (vha->flags.online) {
6756 qla2x00_abort_isp_cleanup(vha);
6757
6758 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
6759 ha->flags.chip_reset_done = 1;
6760 vha->flags.online = 1;
6761 status = 0;
6762 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6763 return status;
6764 }
6765
6766 if (IS_QLA8031(ha)) {
6767 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6768 "Clearing fcoe driver presence.\n");
6769 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6770 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6771 "Error while clearing DRV-Presence.\n");
6772 }
6773
6774 if (unlikely(pci_channel_offline(ha->pdev) &&
6775 ha->flags.pci_channel_io_perm_failure)) {
6776 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6777 status = 0;
6778 return status;
6779 }
6780
6781 switch (vha->qlini_mode) {
6782 case QLA2XXX_INI_MODE_DISABLED:
6783 if (!qla_tgt_mode_enabled(vha))
6784 return 0;
6785 break;
6786 case QLA2XXX_INI_MODE_DUAL:
6787 if (!qla_dual_mode_enabled(vha))
6788 return 0;
6789 break;
6790 case QLA2XXX_INI_MODE_ENABLED:
6791 default:
6792 break;
6793 }
6794
6795 ha->isp_ops->get_flash_version(vha, req->ring);
6796
6797 ha->isp_ops->nvram_config(vha);
6798
6799 if (!qla2x00_restart_isp(vha)) {
6800 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6801
6802 if (!atomic_read(&vha->loop_down_timer)) {
6803
6804
6805
6806
6807 vha->marker_needed = 1;
6808 }
6809
6810 vha->flags.online = 1;
6811
6812 ha->isp_ops->enable_intrs(ha);
6813
6814 ha->isp_abort_cnt = 0;
6815 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6816
6817 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6818 qla2x00_get_fw_version(vha);
6819 if (ha->fce) {
6820 ha->flags.fce_enabled = 1;
6821 memset(ha->fce, 0,
6822 fce_calc_size(ha->fce_bufs));
6823 rval = qla2x00_enable_fce_trace(vha,
6824 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6825 &ha->fce_bufs);
6826 if (rval) {
6827 ql_log(ql_log_warn, vha, 0x8033,
6828 "Unable to reinitialize FCE "
6829 "(%d).\n", rval);
6830 ha->flags.fce_enabled = 0;
6831 }
6832 }
6833
6834 if (ha->eft) {
6835 memset(ha->eft, 0, EFT_SIZE);
6836 rval = qla2x00_enable_eft_trace(vha,
6837 ha->eft_dma, EFT_NUM_BUFFERS);
6838 if (rval) {
6839 ql_log(ql_log_warn, vha, 0x8034,
6840 "Unable to reinitialize EFT "
6841 "(%d).\n", rval);
6842 }
6843 }
6844 } else {
6845 vha->flags.online = 1;
6846 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6847 if (ha->isp_abort_cnt == 0) {
6848 ql_log(ql_log_fatal, vha, 0x8035,
6849 "ISP error recover failed - "
6850 "board disabled.\n");
6851
6852
6853
6854
6855 qla2x00_abort_isp_cleanup(vha);
6856 vha->flags.online = 0;
6857 clear_bit(ISP_ABORT_RETRY,
6858 &vha->dpc_flags);
6859 status = 0;
6860 } else {
6861 ha->isp_abort_cnt--;
6862 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6863 "ISP abort - retry remaining %d.\n",
6864 ha->isp_abort_cnt);
6865 status = 1;
6866 }
6867 } else {
6868 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
6869 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6870 "ISP error recovery - retrying (%d) "
6871 "more times.\n", ha->isp_abort_cnt);
6872 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6873 status = 1;
6874 }
6875 }
6876
6877 }
6878
6879 if (!status) {
6880 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
6881 qla2x00_configure_hba(vha);
6882 spin_lock_irqsave(&ha->vport_slock, flags);
6883 list_for_each_entry(vp, &ha->vp_list, list) {
6884 if (vp->vp_idx) {
6885 atomic_inc(&vp->vref_count);
6886 spin_unlock_irqrestore(&ha->vport_slock, flags);
6887
6888 qla2x00_vp_abort_isp(vp);
6889
6890 spin_lock_irqsave(&ha->vport_slock, flags);
6891 atomic_dec(&vp->vref_count);
6892 }
6893 }
6894 spin_unlock_irqrestore(&ha->vport_slock, flags);
6895
6896 if (IS_QLA8031(ha)) {
6897 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6898 "Setting back fcoe driver presence.\n");
6899 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6900 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6901 "Error while setting DRV-Presence.\n");
6902 }
6903 } else {
6904 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6905 __func__);
6906 }
6907
6908 return(status);
6909 }
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921 static int
6922 qla2x00_restart_isp(scsi_qla_host_t *vha)
6923 {
6924 int status = 0;
6925 struct qla_hw_data *ha = vha->hw;
6926
6927
6928 if (qla2x00_isp_firmware(vha)) {
6929 vha->flags.online = 0;
6930 status = ha->isp_ops->chip_diag(vha);
6931 if (!status)
6932 status = qla2x00_setup_chip(vha);
6933 }
6934
6935 if (!status && !(status = qla2x00_init_rings(vha))) {
6936 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6937 ha->flags.chip_reset_done = 1;
6938
6939
6940 qla25xx_init_queues(ha);
6941
6942 status = qla2x00_fw_ready(vha);
6943 if (!status) {
6944
6945 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
6946 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6947 }
6948
6949
6950 if ((vha->device_flags & DFLG_NO_CABLE))
6951 status = 0;
6952 }
6953 return (status);
6954 }
6955
6956 static int
6957 qla25xx_init_queues(struct qla_hw_data *ha)
6958 {
6959 struct rsp_que *rsp = NULL;
6960 struct req_que *req = NULL;
6961 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6962 int ret = -1;
6963 int i;
6964
6965 for (i = 1; i < ha->max_rsp_queues; i++) {
6966 rsp = ha->rsp_q_map[i];
6967 if (rsp && test_bit(i, ha->rsp_qid_map)) {
6968 rsp->options &= ~BIT_0;
6969 ret = qla25xx_init_rsp_que(base_vha, rsp);
6970 if (ret != QLA_SUCCESS)
6971 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6972 "%s Rsp que: %d init failed.\n",
6973 __func__, rsp->id);
6974 else
6975 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6976 "%s Rsp que: %d inited.\n",
6977 __func__, rsp->id);
6978 }
6979 }
6980 for (i = 1; i < ha->max_req_queues; i++) {
6981 req = ha->req_q_map[i];
6982 if (req && test_bit(i, ha->req_qid_map)) {
6983
6984 req->options &= ~BIT_0;
6985 ret = qla25xx_init_req_que(base_vha, req);
6986 if (ret != QLA_SUCCESS)
6987 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6988 "%s Req que: %d init failed.\n",
6989 __func__, req->id);
6990 else
6991 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6992 "%s Req que: %d inited.\n",
6993 __func__, req->id);
6994 }
6995 }
6996 return ret;
6997 }
6998
6999
7000
7001
7002
7003
7004
7005
7006 int
7007 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7008 {
7009 unsigned long flags = 0;
7010 struct qla_hw_data *ha = vha->hw;
7011 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7012
7013 vha->flags.online = 0;
7014 ha->isp_ops->disable_intrs(ha);
7015
7016 spin_lock_irqsave(&ha->hardware_lock, flags);
7017 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
7018 RD_REG_WORD(®->hccr);
7019 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
7020 RD_REG_WORD(®->hccr);
7021 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7022
7023 return QLA_SUCCESS;
7024 }
7025
7026 int
7027 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7028 {
7029 unsigned long flags = 0;
7030 struct qla_hw_data *ha = vha->hw;
7031 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7032 int rval = QLA_SUCCESS;
7033
7034 if (IS_P3P_TYPE(ha))
7035 return rval;
7036
7037 vha->flags.online = 0;
7038 ha->isp_ops->disable_intrs(ha);
7039
7040 spin_lock_irqsave(&ha->hardware_lock, flags);
7041 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
7042 RD_REG_DWORD(®->hccr);
7043 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
7044 RD_REG_DWORD(®->hccr);
7045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7046
7047 if (IS_NOPOLLING_TYPE(ha))
7048 ha->isp_ops->enable_intrs(ha);
7049
7050 return rval;
7051 }
7052
7053
7054
7055
7056 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7057 struct nvram_24xx *nv)
7058 {
7059 #ifdef CONFIG_SPARC
7060 struct qla_hw_data *ha = vha->hw;
7061 struct pci_dev *pdev = ha->pdev;
7062 struct device_node *dp = pci_device_to_OF_node(pdev);
7063 const u8 *val;
7064 int len;
7065
7066 val = of_get_property(dp, "port-wwn", &len);
7067 if (val && len >= WWN_SIZE)
7068 memcpy(nv->port_name, val, WWN_SIZE);
7069
7070 val = of_get_property(dp, "node-wwn", &len);
7071 if (val && len >= WWN_SIZE)
7072 memcpy(nv->node_name, val, WWN_SIZE);
7073 #endif
7074 }
7075
7076 int
7077 qla24xx_nvram_config(scsi_qla_host_t *vha)
7078 {
7079 int rval;
7080 struct init_cb_24xx *icb;
7081 struct nvram_24xx *nv;
7082 uint32_t *dptr;
7083 uint8_t *dptr1, *dptr2;
7084 uint32_t chksum;
7085 uint16_t cnt;
7086 struct qla_hw_data *ha = vha->hw;
7087
7088 rval = QLA_SUCCESS;
7089 icb = (struct init_cb_24xx *)ha->init_cb;
7090 nv = ha->nvram;
7091
7092
7093 if (ha->port_no == 0) {
7094 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7095 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7096 } else {
7097 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7098 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7099 }
7100
7101 ha->nvram_size = sizeof(*nv);
7102 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7103
7104
7105 ha->vpd = ha->nvram + VPD_OFFSET;
7106 ha->isp_ops->read_nvram(vha, ha->vpd,
7107 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7108
7109
7110 dptr = (uint32_t *)nv;
7111 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7112 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7113 chksum += le32_to_cpu(*dptr);
7114
7115 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7116 "Contents of NVRAM\n");
7117 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7118 nv, ha->nvram_size);
7119
7120
7121 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7122 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7123
7124 ql_log(ql_log_warn, vha, 0x006b,
7125 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7126 chksum, nv->id, nv->nvram_version);
7127 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7128 ql_log(ql_log_warn, vha, 0x006c,
7129 "Falling back to functioning (yet invalid -- WWPN) "
7130 "defaults.\n");
7131
7132
7133
7134
7135 memset(nv, 0, ha->nvram_size);
7136 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7137 nv->version = cpu_to_le16(ICB_VERSION);
7138 nv->frame_payload_size = 2048;
7139 nv->execution_throttle = cpu_to_le16(0xFFFF);
7140 nv->exchange_count = cpu_to_le16(0);
7141 nv->hard_address = cpu_to_le16(124);
7142 nv->port_name[0] = 0x21;
7143 nv->port_name[1] = 0x00 + ha->port_no + 1;
7144 nv->port_name[2] = 0x00;
7145 nv->port_name[3] = 0xe0;
7146 nv->port_name[4] = 0x8b;
7147 nv->port_name[5] = 0x1c;
7148 nv->port_name[6] = 0x55;
7149 nv->port_name[7] = 0x86;
7150 nv->node_name[0] = 0x20;
7151 nv->node_name[1] = 0x00;
7152 nv->node_name[2] = 0x00;
7153 nv->node_name[3] = 0xe0;
7154 nv->node_name[4] = 0x8b;
7155 nv->node_name[5] = 0x1c;
7156 nv->node_name[6] = 0x55;
7157 nv->node_name[7] = 0x86;
7158 qla24xx_nvram_wwn_from_ofw(vha, nv);
7159 nv->login_retry_count = cpu_to_le16(8);
7160 nv->interrupt_delay_timer = cpu_to_le16(0);
7161 nv->login_timeout = cpu_to_le16(0);
7162 nv->firmware_options_1 =
7163 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7164 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7165 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7166 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7167 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7168 nv->efi_parameters = cpu_to_le32(0);
7169 nv->reset_delay = 5;
7170 nv->max_luns_per_target = cpu_to_le16(128);
7171 nv->port_down_retry_count = cpu_to_le16(30);
7172 nv->link_down_timeout = cpu_to_le16(30);
7173
7174 rval = 1;
7175 }
7176
7177 if (qla_tgt_mode_enabled(vha)) {
7178
7179 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7180
7181 nv->host_p &= cpu_to_le32(~BIT_10);
7182 }
7183
7184 qlt_24xx_config_nvram_stage1(vha, nv);
7185
7186
7187 memset(icb, 0, ha->init_cb_size);
7188
7189
7190 dptr1 = (uint8_t *)icb;
7191 dptr2 = (uint8_t *)&nv->version;
7192 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7193 while (cnt--)
7194 *dptr1++ = *dptr2++;
7195
7196 icb->login_retry_count = nv->login_retry_count;
7197 icb->link_down_on_nos = nv->link_down_on_nos;
7198
7199
7200 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7201 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7202 cnt = (uint8_t *)&icb->reserved_3 -
7203 (uint8_t *)&icb->interrupt_delay_timer;
7204 while (cnt--)
7205 *dptr1++ = *dptr2++;
7206 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7207
7208
7209
7210 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7211 "QLA2462");
7212
7213 qlt_24xx_config_nvram_stage2(vha, icb);
7214
7215 if (nv->host_p & cpu_to_le32(BIT_15)) {
7216
7217 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7218 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7219 }
7220
7221
7222 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7223
7224
7225
7226
7227 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7228 icb->node_name[0] &= 0xF0;
7229 }
7230
7231
7232 ha->flags.disable_risc_code_load = 0;
7233 ha->flags.enable_lip_reset = 0;
7234 ha->flags.enable_lip_full_login =
7235 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7236 ha->flags.enable_target_reset =
7237 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7238 ha->flags.enable_led_scheme = 0;
7239 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7240
7241 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7242 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7243
7244 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7245 sizeof(ha->fw_seriallink_options24));
7246
7247
7248 ha->serial0 = icb->port_name[5];
7249 ha->serial1 = icb->port_name[6];
7250 ha->serial2 = icb->port_name[7];
7251 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7252 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7253
7254 icb->execution_throttle = cpu_to_le16(0xFFFF);
7255
7256 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7257
7258
7259 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7260 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7261 if (le16_to_cpu(nv->login_timeout) < 4)
7262 nv->login_timeout = cpu_to_le16(4);
7263 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7264
7265
7266 ha->r_a_tov = 100;
7267
7268 ha->loop_reset_delay = nv->reset_delay;
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7281 ha->loop_down_abort_time =
7282 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7283 } else {
7284 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7285 ha->loop_down_abort_time =
7286 (LOOP_DOWN_TIME - ha->link_down_timeout);
7287 }
7288
7289
7290 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7291 if (qlport_down_retry)
7292 ha->port_down_retry_count = qlport_down_retry;
7293
7294
7295 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7296 if (ha->port_down_retry_count ==
7297 le16_to_cpu(nv->port_down_retry_count) &&
7298 ha->port_down_retry_count > 3)
7299 ha->login_retry_count = ha->port_down_retry_count;
7300 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7301 ha->login_retry_count = ha->port_down_retry_count;
7302 if (ql2xloginretrycount)
7303 ha->login_retry_count = ql2xloginretrycount;
7304
7305
7306 icb->firmware_options_3 |= BIT_8;
7307
7308
7309 if (!vha->flags.init_done) {
7310 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7311 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7312 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7313 le16_to_cpu(icb->interrupt_delay_timer) : 2;
7314 }
7315 icb->firmware_options_2 &= cpu_to_le32(
7316 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7317 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7318 ha->zio_mode = QLA_ZIO_MODE_6;
7319
7320 ql_log(ql_log_info, vha, 0x006f,
7321 "ZIO mode %d enabled; timer delay (%d us).\n",
7322 ha->zio_mode, ha->zio_timer * 100);
7323
7324 icb->firmware_options_2 |= cpu_to_le32(
7325 (uint32_t)ha->zio_mode);
7326 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7327 }
7328
7329 if (rval) {
7330 ql_log(ql_log_warn, vha, 0x0070,
7331 "NVRAM configuration failed.\n");
7332 }
7333 return (rval);
7334 }
7335
7336 static void
7337 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7338 struct qla27xx_image_status *image_status)
7339 {
7340 ql_dbg(ql_dbg_init, vha, 0x018b,
7341 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7342 name, "status",
7343 image_status->image_status_mask,
7344 le16_to_cpu(image_status->generation),
7345 image_status->ver_major,
7346 image_status->ver_minor,
7347 image_status->bitmap,
7348 le32_to_cpu(image_status->checksum),
7349 le32_to_cpu(image_status->signature));
7350 }
7351
7352 static bool
7353 qla28xx_check_aux_image_status_signature(
7354 struct qla27xx_image_status *image_status)
7355 {
7356 ulong signature = le32_to_cpu(image_status->signature);
7357
7358 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7359 }
7360
7361 static bool
7362 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7363 {
7364 ulong signature = le32_to_cpu(image_status->signature);
7365
7366 return
7367 signature != QLA27XX_IMG_STATUS_SIGN &&
7368 signature != QLA28XX_IMG_STATUS_SIGN;
7369 }
7370
7371 static ulong
7372 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7373 {
7374 uint32_t *p = (void *)image_status;
7375 uint n = sizeof(*image_status) / sizeof(*p);
7376 uint32_t sum = 0;
7377
7378 for ( ; n--; p++)
7379 sum += le32_to_cpup(p);
7380
7381 return sum;
7382 }
7383
7384 static inline uint
7385 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7386 {
7387 return aux->bitmap & bitmask ?
7388 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7389 }
7390
7391 static void
7392 qla28xx_component_status(
7393 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7394 {
7395 active_regions->aux.board_config =
7396 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7397
7398 active_regions->aux.vpd_nvram =
7399 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7400
7401 active_regions->aux.npiv_config_0_1 =
7402 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7403
7404 active_regions->aux.npiv_config_2_3 =
7405 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7406 }
7407
7408 static int
7409 qla27xx_compare_image_generation(
7410 struct qla27xx_image_status *pri_image_status,
7411 struct qla27xx_image_status *sec_image_status)
7412 {
7413
7414 int16_t delta =
7415 le16_to_cpu(pri_image_status->generation) -
7416 le16_to_cpu(sec_image_status->generation);
7417
7418 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7419
7420 return delta;
7421 }
7422
7423 void
7424 qla28xx_get_aux_images(
7425 struct scsi_qla_host *vha, struct active_regions *active_regions)
7426 {
7427 struct qla_hw_data *ha = vha->hw;
7428 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7429 bool valid_pri_image = false, valid_sec_image = false;
7430 bool active_pri_image = false, active_sec_image = false;
7431
7432 if (!ha->flt_region_aux_img_status_pri) {
7433 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7434 goto check_sec_image;
7435 }
7436
7437 qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
7438 ha->flt_region_aux_img_status_pri,
7439 sizeof(pri_aux_image_status) >> 2);
7440 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7441
7442 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7443 ql_dbg(ql_dbg_init, vha, 0x018b,
7444 "Primary aux image signature (%#x) not valid\n",
7445 le32_to_cpu(pri_aux_image_status.signature));
7446 goto check_sec_image;
7447 }
7448
7449 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7450 ql_dbg(ql_dbg_init, vha, 0x018c,
7451 "Primary aux image checksum failed\n");
7452 goto check_sec_image;
7453 }
7454
7455 valid_pri_image = true;
7456
7457 if (pri_aux_image_status.image_status_mask & 1) {
7458 ql_dbg(ql_dbg_init, vha, 0x018d,
7459 "Primary aux image is active\n");
7460 active_pri_image = true;
7461 }
7462
7463 check_sec_image:
7464 if (!ha->flt_region_aux_img_status_sec) {
7465 ql_dbg(ql_dbg_init, vha, 0x018a,
7466 "Secondary aux image not addressed\n");
7467 goto check_valid_image;
7468 }
7469
7470 qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
7471 ha->flt_region_aux_img_status_sec,
7472 sizeof(sec_aux_image_status) >> 2);
7473 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7474
7475 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7476 ql_dbg(ql_dbg_init, vha, 0x018b,
7477 "Secondary aux image signature (%#x) not valid\n",
7478 le32_to_cpu(sec_aux_image_status.signature));
7479 goto check_valid_image;
7480 }
7481
7482 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7483 ql_dbg(ql_dbg_init, vha, 0x018c,
7484 "Secondary aux image checksum failed\n");
7485 goto check_valid_image;
7486 }
7487
7488 valid_sec_image = true;
7489
7490 if (sec_aux_image_status.image_status_mask & 1) {
7491 ql_dbg(ql_dbg_init, vha, 0x018d,
7492 "Secondary aux image is active\n");
7493 active_sec_image = true;
7494 }
7495
7496 check_valid_image:
7497 if (valid_pri_image && active_pri_image &&
7498 valid_sec_image && active_sec_image) {
7499 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7500 &sec_aux_image_status) >= 0) {
7501 qla28xx_component_status(active_regions,
7502 &pri_aux_image_status);
7503 } else {
7504 qla28xx_component_status(active_regions,
7505 &sec_aux_image_status);
7506 }
7507 } else if (valid_pri_image && active_pri_image) {
7508 qla28xx_component_status(active_regions, &pri_aux_image_status);
7509 } else if (valid_sec_image && active_sec_image) {
7510 qla28xx_component_status(active_regions, &sec_aux_image_status);
7511 }
7512
7513 ql_dbg(ql_dbg_init, vha, 0x018f,
7514 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7515 active_regions->aux.board_config,
7516 active_regions->aux.vpd_nvram,
7517 active_regions->aux.npiv_config_0_1,
7518 active_regions->aux.npiv_config_2_3);
7519 }
7520
7521 void
7522 qla27xx_get_active_image(struct scsi_qla_host *vha,
7523 struct active_regions *active_regions)
7524 {
7525 struct qla_hw_data *ha = vha->hw;
7526 struct qla27xx_image_status pri_image_status, sec_image_status;
7527 bool valid_pri_image = false, valid_sec_image = false;
7528 bool active_pri_image = false, active_sec_image = false;
7529
7530 if (!ha->flt_region_img_status_pri) {
7531 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
7532 goto check_sec_image;
7533 }
7534
7535 if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
7536 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
7537 QLA_SUCCESS) {
7538 WARN_ON_ONCE(true);
7539 goto check_sec_image;
7540 }
7541 qla27xx_print_image(vha, "Primary image", &pri_image_status);
7542
7543 if (qla27xx_check_image_status_signature(&pri_image_status)) {
7544 ql_dbg(ql_dbg_init, vha, 0x018b,
7545 "Primary image signature (%#x) not valid\n",
7546 le32_to_cpu(pri_image_status.signature));
7547 goto check_sec_image;
7548 }
7549
7550 if (qla27xx_image_status_checksum(&pri_image_status)) {
7551 ql_dbg(ql_dbg_init, vha, 0x018c,
7552 "Primary image checksum failed\n");
7553 goto check_sec_image;
7554 }
7555
7556 valid_pri_image = true;
7557
7558 if (pri_image_status.image_status_mask & 1) {
7559 ql_dbg(ql_dbg_init, vha, 0x018d,
7560 "Primary image is active\n");
7561 active_pri_image = true;
7562 }
7563
7564 check_sec_image:
7565 if (!ha->flt_region_img_status_sec) {
7566 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
7567 goto check_valid_image;
7568 }
7569
7570 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7571 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
7572 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
7573
7574 if (qla27xx_check_image_status_signature(&sec_image_status)) {
7575 ql_dbg(ql_dbg_init, vha, 0x018b,
7576 "Secondary image signature (%#x) not valid\n",
7577 le32_to_cpu(sec_image_status.signature));
7578 goto check_valid_image;
7579 }
7580
7581 if (qla27xx_image_status_checksum(&sec_image_status)) {
7582 ql_dbg(ql_dbg_init, vha, 0x018c,
7583 "Secondary image checksum failed\n");
7584 goto check_valid_image;
7585 }
7586
7587 valid_sec_image = true;
7588
7589 if (sec_image_status.image_status_mask & 1) {
7590 ql_dbg(ql_dbg_init, vha, 0x018d,
7591 "Secondary image is active\n");
7592 active_sec_image = true;
7593 }
7594
7595 check_valid_image:
7596 if (valid_pri_image && active_pri_image)
7597 active_regions->global = QLA27XX_PRIMARY_IMAGE;
7598
7599 if (valid_sec_image && active_sec_image) {
7600 if (!active_regions->global ||
7601 qla27xx_compare_image_generation(
7602 &pri_image_status, &sec_image_status) < 0) {
7603 active_regions->global = QLA27XX_SECONDARY_IMAGE;
7604 }
7605 }
7606
7607 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
7608 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
7609 "default (boot/fw)" :
7610 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
7611 "primary" :
7612 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
7613 "secondary" : "invalid",
7614 active_regions->global);
7615 }
7616
7617 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
7618 {
7619 return
7620 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
7621 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
7622 }
7623
7624 static int
7625 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7626 uint32_t faddr)
7627 {
7628 int rval;
7629 uint templates, segments, fragment;
7630 ulong i;
7631 uint j;
7632 ulong dlen;
7633 uint32_t *dcode;
7634 uint32_t risc_addr, risc_size, risc_attr = 0;
7635 struct qla_hw_data *ha = vha->hw;
7636 struct req_que *req = ha->req_q_map[0];
7637 struct fwdt *fwdt = ha->fwdt;
7638
7639 ql_dbg(ql_dbg_init, vha, 0x008b,
7640 "FW: Loading firmware from flash (%x).\n", faddr);
7641
7642 dcode = (void *)req->ring;
7643 qla24xx_read_flash_data(vha, dcode, faddr, 8);
7644 if (qla24xx_risc_firmware_invalid(dcode)) {
7645 ql_log(ql_log_fatal, vha, 0x008c,
7646 "Unable to verify the integrity of flash firmware "
7647 "image.\n");
7648 ql_log(ql_log_fatal, vha, 0x008d,
7649 "Firmware data: %08x %08x %08x %08x.\n",
7650 dcode[0], dcode[1], dcode[2], dcode[3]);
7651
7652 return QLA_FUNCTION_FAILED;
7653 }
7654
7655 dcode = (void *)req->ring;
7656 *srisc_addr = 0;
7657 segments = FA_RISC_CODE_SEGMENTS;
7658 for (j = 0; j < segments; j++) {
7659 ql_dbg(ql_dbg_init, vha, 0x008d,
7660 "-> Loading segment %u...\n", j);
7661 qla24xx_read_flash_data(vha, dcode, faddr, 10);
7662 risc_addr = be32_to_cpu(dcode[2]);
7663 risc_size = be32_to_cpu(dcode[3]);
7664 if (!*srisc_addr) {
7665 *srisc_addr = risc_addr;
7666 risc_attr = be32_to_cpu(dcode[9]);
7667 }
7668
7669 dlen = ha->fw_transfer_size >> 2;
7670 for (fragment = 0; risc_size; fragment++) {
7671 if (dlen > risc_size)
7672 dlen = risc_size;
7673
7674 ql_dbg(ql_dbg_init, vha, 0x008e,
7675 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
7676 fragment, risc_addr, faddr, dlen);
7677 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
7678 for (i = 0; i < dlen; i++)
7679 dcode[i] = swab32(dcode[i]);
7680
7681 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
7682 if (rval) {
7683 ql_log(ql_log_fatal, vha, 0x008f,
7684 "-> Failed load firmware fragment %u.\n",
7685 fragment);
7686 return QLA_FUNCTION_FAILED;
7687 }
7688
7689 faddr += dlen;
7690 risc_addr += dlen;
7691 risc_size -= dlen;
7692 }
7693 }
7694
7695 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7696 return QLA_SUCCESS;
7697
7698 templates = (risc_attr & BIT_9) ? 2 : 1;
7699 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
7700 for (j = 0; j < templates; j++, fwdt++) {
7701 if (fwdt->template)
7702 vfree(fwdt->template);
7703 fwdt->template = NULL;
7704 fwdt->length = 0;
7705
7706 dcode = (void *)req->ring;
7707 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7708 risc_size = be32_to_cpu(dcode[2]);
7709 ql_dbg(ql_dbg_init, vha, 0x0161,
7710 "-> fwdt%u template array at %#x (%#x dwords)\n",
7711 j, faddr, risc_size);
7712 if (!risc_size || !~risc_size) {
7713 ql_dbg(ql_dbg_init, vha, 0x0162,
7714 "-> fwdt%u failed to read array\n", j);
7715 goto failed;
7716 }
7717
7718
7719 faddr += 7;
7720 risc_size -= 8;
7721
7722 ql_dbg(ql_dbg_init, vha, 0x0163,
7723 "-> fwdt%u template allocate template %#x words...\n",
7724 j, risc_size);
7725 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7726 if (!fwdt->template) {
7727 ql_log(ql_log_warn, vha, 0x0164,
7728 "-> fwdt%u failed allocate template.\n", j);
7729 goto failed;
7730 }
7731
7732 dcode = fwdt->template;
7733 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7734
7735 if (!qla27xx_fwdt_template_valid(dcode)) {
7736 ql_log(ql_log_warn, vha, 0x0165,
7737 "-> fwdt%u failed template validate\n", j);
7738 goto failed;
7739 }
7740
7741 dlen = qla27xx_fwdt_template_size(dcode);
7742 ql_dbg(ql_dbg_init, vha, 0x0166,
7743 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
7744 j, dlen, dlen / sizeof(*dcode));
7745 if (dlen > risc_size * sizeof(*dcode)) {
7746 ql_log(ql_log_warn, vha, 0x0167,
7747 "-> fwdt%u template exceeds array (%-lu bytes)\n",
7748 j, dlen - risc_size * sizeof(*dcode));
7749 goto failed;
7750 }
7751
7752 fwdt->length = dlen;
7753 ql_dbg(ql_dbg_init, vha, 0x0168,
7754 "-> fwdt%u loaded template ok\n", j);
7755
7756 faddr += risc_size + 1;
7757 }
7758
7759 return QLA_SUCCESS;
7760
7761 failed:
7762 if (fwdt->template)
7763 vfree(fwdt->template);
7764 fwdt->template = NULL;
7765 fwdt->length = 0;
7766
7767 return QLA_SUCCESS;
7768 }
7769
7770 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
7771
7772 int
7773 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7774 {
7775 int rval;
7776 int i, fragment;
7777 uint16_t *wcode, *fwcode;
7778 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7779 struct fw_blob *blob;
7780 struct qla_hw_data *ha = vha->hw;
7781 struct req_que *req = ha->req_q_map[0];
7782
7783
7784 blob = qla2x00_request_firmware(vha);
7785 if (!blob) {
7786 ql_log(ql_log_info, vha, 0x0083,
7787 "Firmware image unavailable.\n");
7788 ql_log(ql_log_info, vha, 0x0084,
7789 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
7790 return QLA_FUNCTION_FAILED;
7791 }
7792
7793 rval = QLA_SUCCESS;
7794
7795 wcode = (uint16_t *)req->ring;
7796 *srisc_addr = 0;
7797 fwcode = (uint16_t *)blob->fw->data;
7798 fwclen = 0;
7799
7800
7801 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7802 ql_log(ql_log_fatal, vha, 0x0085,
7803 "Unable to verify integrity of firmware image (%zd).\n",
7804 blob->fw->size);
7805 goto fail_fw_integrity;
7806 }
7807 for (i = 0; i < 4; i++)
7808 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7809 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7810 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7811 wcode[2] == 0 && wcode[3] == 0)) {
7812 ql_log(ql_log_fatal, vha, 0x0086,
7813 "Unable to verify integrity of firmware image.\n");
7814 ql_log(ql_log_fatal, vha, 0x0087,
7815 "Firmware data: %04x %04x %04x %04x.\n",
7816 wcode[0], wcode[1], wcode[2], wcode[3]);
7817 goto fail_fw_integrity;
7818 }
7819
7820 seg = blob->segs;
7821 while (*seg && rval == QLA_SUCCESS) {
7822 risc_addr = *seg;
7823 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7824 risc_size = be16_to_cpu(fwcode[3]);
7825
7826
7827 fwclen += risc_size * sizeof(uint16_t);
7828 if (blob->fw->size < fwclen) {
7829 ql_log(ql_log_fatal, vha, 0x0088,
7830 "Unable to verify integrity of firmware image "
7831 "(%zd).\n", blob->fw->size);
7832 goto fail_fw_integrity;
7833 }
7834
7835 fragment = 0;
7836 while (risc_size > 0 && rval == QLA_SUCCESS) {
7837 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7838 if (wlen > risc_size)
7839 wlen = risc_size;
7840 ql_dbg(ql_dbg_init, vha, 0x0089,
7841 "Loading risc segment@ risc addr %x number of "
7842 "words 0x%x.\n", risc_addr, wlen);
7843
7844 for (i = 0; i < wlen; i++)
7845 wcode[i] = swab16(fwcode[i]);
7846
7847 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7848 wlen);
7849 if (rval) {
7850 ql_log(ql_log_fatal, vha, 0x008a,
7851 "Failed to load segment %d of firmware.\n",
7852 fragment);
7853 break;
7854 }
7855
7856 fwcode += wlen;
7857 risc_addr += wlen;
7858 risc_size -= wlen;
7859 fragment++;
7860 }
7861
7862
7863 seg++;
7864 }
7865 return rval;
7866
7867 fail_fw_integrity:
7868 return QLA_FUNCTION_FAILED;
7869 }
7870
7871 static int
7872 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7873 {
7874 int rval;
7875 uint templates, segments, fragment;
7876 uint32_t *dcode;
7877 ulong dlen;
7878 uint32_t risc_addr, risc_size, risc_attr = 0;
7879 ulong i;
7880 uint j;
7881 struct fw_blob *blob;
7882 uint32_t *fwcode;
7883 struct qla_hw_data *ha = vha->hw;
7884 struct req_que *req = ha->req_q_map[0];
7885 struct fwdt *fwdt = ha->fwdt;
7886
7887 ql_dbg(ql_dbg_init, vha, 0x0090,
7888 "-> FW: Loading via request-firmware.\n");
7889
7890 blob = qla2x00_request_firmware(vha);
7891 if (!blob) {
7892 ql_log(ql_log_warn, vha, 0x0092,
7893 "-> Firmware file not found.\n");
7894
7895 return QLA_FUNCTION_FAILED;
7896 }
7897
7898 fwcode = (void *)blob->fw->data;
7899 dcode = fwcode;
7900 if (qla24xx_risc_firmware_invalid(dcode)) {
7901 ql_log(ql_log_fatal, vha, 0x0093,
7902 "Unable to verify integrity of firmware image (%zd).\n",
7903 blob->fw->size);
7904 ql_log(ql_log_fatal, vha, 0x0095,
7905 "Firmware data: %08x %08x %08x %08x.\n",
7906 dcode[0], dcode[1], dcode[2], dcode[3]);
7907 return QLA_FUNCTION_FAILED;
7908 }
7909
7910 dcode = (void *)req->ring;
7911 *srisc_addr = 0;
7912 segments = FA_RISC_CODE_SEGMENTS;
7913 for (j = 0; j < segments; j++) {
7914 ql_dbg(ql_dbg_init, vha, 0x0096,
7915 "-> Loading segment %u...\n", j);
7916 risc_addr = be32_to_cpu(fwcode[2]);
7917 risc_size = be32_to_cpu(fwcode[3]);
7918
7919 if (!*srisc_addr) {
7920 *srisc_addr = risc_addr;
7921 risc_attr = be32_to_cpu(fwcode[9]);
7922 }
7923
7924 dlen = ha->fw_transfer_size >> 2;
7925 for (fragment = 0; risc_size; fragment++) {
7926 if (dlen > risc_size)
7927 dlen = risc_size;
7928
7929 ql_dbg(ql_dbg_init, vha, 0x0097,
7930 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
7931 fragment, risc_addr,
7932 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
7933 dlen);
7934
7935 for (i = 0; i < dlen; i++)
7936 dcode[i] = swab32(fwcode[i]);
7937
7938 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
7939 if (rval) {
7940 ql_log(ql_log_fatal, vha, 0x0098,
7941 "-> Failed load firmware fragment %u.\n",
7942 fragment);
7943 return QLA_FUNCTION_FAILED;
7944 }
7945
7946 fwcode += dlen;
7947 risc_addr += dlen;
7948 risc_size -= dlen;
7949 }
7950 }
7951
7952 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7953 return QLA_SUCCESS;
7954
7955 templates = (risc_attr & BIT_9) ? 2 : 1;
7956 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
7957 for (j = 0; j < templates; j++, fwdt++) {
7958 if (fwdt->template)
7959 vfree(fwdt->template);
7960 fwdt->template = NULL;
7961 fwdt->length = 0;
7962
7963 risc_size = be32_to_cpu(fwcode[2]);
7964 ql_dbg(ql_dbg_init, vha, 0x0171,
7965 "-> fwdt%u template array at %#x (%#x dwords)\n",
7966 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
7967 risc_size);
7968 if (!risc_size || !~risc_size) {
7969 ql_dbg(ql_dbg_init, vha, 0x0172,
7970 "-> fwdt%u failed to read array\n", j);
7971 goto failed;
7972 }
7973
7974
7975 fwcode += 7;
7976 risc_size -= 8;
7977
7978 ql_dbg(ql_dbg_init, vha, 0x0173,
7979 "-> fwdt%u template allocate template %#x words...\n",
7980 j, risc_size);
7981 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7982 if (!fwdt->template) {
7983 ql_log(ql_log_warn, vha, 0x0174,
7984 "-> fwdt%u failed allocate template.\n", j);
7985 goto failed;
7986 }
7987
7988 dcode = fwdt->template;
7989 for (i = 0; i < risc_size; i++)
7990 dcode[i] = fwcode[i];
7991
7992 if (!qla27xx_fwdt_template_valid(dcode)) {
7993 ql_log(ql_log_warn, vha, 0x0175,
7994 "-> fwdt%u failed template validate\n", j);
7995 goto failed;
7996 }
7997
7998 dlen = qla27xx_fwdt_template_size(dcode);
7999 ql_dbg(ql_dbg_init, vha, 0x0176,
8000 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8001 j, dlen, dlen / sizeof(*dcode));
8002 if (dlen > risc_size * sizeof(*dcode)) {
8003 ql_log(ql_log_warn, vha, 0x0177,
8004 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8005 j, dlen - risc_size * sizeof(*dcode));
8006 goto failed;
8007 }
8008
8009 fwdt->length = dlen;
8010 ql_dbg(ql_dbg_init, vha, 0x0178,
8011 "-> fwdt%u loaded template ok\n", j);
8012
8013 fwcode += risc_size + 1;
8014 }
8015
8016 return QLA_SUCCESS;
8017
8018 failed:
8019 if (fwdt->template)
8020 vfree(fwdt->template);
8021 fwdt->template = NULL;
8022 fwdt->length = 0;
8023
8024 return QLA_SUCCESS;
8025 }
8026
8027 int
8028 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8029 {
8030 int rval;
8031
8032 if (ql2xfwloadbin == 1)
8033 return qla81xx_load_risc(vha, srisc_addr);
8034
8035
8036
8037
8038
8039
8040 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8041 if (rval == QLA_SUCCESS)
8042 return rval;
8043
8044 return qla24xx_load_risc_flash(vha, srisc_addr,
8045 vha->hw->flt_region_fw);
8046 }
8047
8048 int
8049 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8050 {
8051 int rval;
8052 struct qla_hw_data *ha = vha->hw;
8053 struct active_regions active_regions = { };
8054
8055 if (ql2xfwloadbin == 2)
8056 goto try_blob_fw;
8057
8058
8059
8060
8061
8062
8063
8064 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8065 goto try_primary_fw;
8066
8067 qla27xx_get_active_image(vha, &active_regions);
8068
8069 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8070 goto try_primary_fw;
8071
8072 ql_dbg(ql_dbg_init, vha, 0x008b,
8073 "Loading secondary firmware image.\n");
8074 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8075 if (!rval)
8076 return rval;
8077
8078 try_primary_fw:
8079 ql_dbg(ql_dbg_init, vha, 0x008b,
8080 "Loading primary firmware image.\n");
8081 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8082 if (!rval)
8083 return rval;
8084
8085 try_blob_fw:
8086 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8087 if (!rval || !ha->flt_region_gold_fw)
8088 return rval;
8089
8090 ql_log(ql_log_info, vha, 0x0099,
8091 "Attempting to fallback to golden firmware.\n");
8092 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8093 if (rval)
8094 return rval;
8095
8096 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8097 ha->flags.running_gold_fw = 1;
8098 return rval;
8099 }
8100
8101 void
8102 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8103 {
8104 int ret, retries;
8105 struct qla_hw_data *ha = vha->hw;
8106
8107 if (ha->flags.pci_channel_io_perm_failure)
8108 return;
8109 if (!IS_FWI2_CAPABLE(ha))
8110 return;
8111 if (!ha->fw_major_version)
8112 return;
8113 if (!ha->flags.fw_started)
8114 return;
8115
8116 ret = qla2x00_stop_firmware(vha);
8117 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8118 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8119 ha->isp_ops->reset_chip(vha);
8120 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8121 continue;
8122 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8123 continue;
8124 ql_log(ql_log_info, vha, 0x8015,
8125 "Attempting retry of stop-firmware command.\n");
8126 ret = qla2x00_stop_firmware(vha);
8127 }
8128
8129 QLA_FW_STOPPED(ha);
8130 ha->flags.fw_init_done = 0;
8131 }
8132
8133 int
8134 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8135 {
8136 int rval = QLA_SUCCESS;
8137 int rval2;
8138 uint16_t mb[MAILBOX_REGISTER_COUNT];
8139 struct qla_hw_data *ha = vha->hw;
8140 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8141
8142 if (!vha->vp_idx)
8143 return -EINVAL;
8144
8145 rval = qla2x00_fw_ready(base_vha);
8146
8147 if (rval == QLA_SUCCESS) {
8148 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8149 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8150 }
8151
8152 vha->flags.management_server_logged_in = 0;
8153
8154
8155 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8156 BIT_1);
8157 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8158 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8159 ql_dbg(ql_dbg_init, vha, 0x0120,
8160 "Failed SNS login: loop_id=%x, rval2=%d\n",
8161 NPH_SNS, rval2);
8162 else
8163 ql_dbg(ql_dbg_init, vha, 0x0103,
8164 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8165 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8166 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8167 return (QLA_FUNCTION_FAILED);
8168 }
8169
8170 atomic_set(&vha->loop_down_timer, 0);
8171 atomic_set(&vha->loop_state, LOOP_UP);
8172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8174 rval = qla2x00_loop_resync(base_vha);
8175
8176 return rval;
8177 }
8178
8179
8180
8181 static LIST_HEAD(qla_cs84xx_list);
8182 static DEFINE_MUTEX(qla_cs84xx_mutex);
8183
8184 static struct qla_chip_state_84xx *
8185 qla84xx_get_chip(struct scsi_qla_host *vha)
8186 {
8187 struct qla_chip_state_84xx *cs84xx;
8188 struct qla_hw_data *ha = vha->hw;
8189
8190 mutex_lock(&qla_cs84xx_mutex);
8191
8192
8193 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8194 if (cs84xx->bus == ha->pdev->bus) {
8195 kref_get(&cs84xx->kref);
8196 goto done;
8197 }
8198 }
8199
8200 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8201 if (!cs84xx)
8202 goto done;
8203
8204 kref_init(&cs84xx->kref);
8205 spin_lock_init(&cs84xx->access_lock);
8206 mutex_init(&cs84xx->fw_update_mutex);
8207 cs84xx->bus = ha->pdev->bus;
8208
8209 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8210 done:
8211 mutex_unlock(&qla_cs84xx_mutex);
8212 return cs84xx;
8213 }
8214
8215 static void
8216 __qla84xx_chip_release(struct kref *kref)
8217 {
8218 struct qla_chip_state_84xx *cs84xx =
8219 container_of(kref, struct qla_chip_state_84xx, kref);
8220
8221 mutex_lock(&qla_cs84xx_mutex);
8222 list_del(&cs84xx->list);
8223 mutex_unlock(&qla_cs84xx_mutex);
8224 kfree(cs84xx);
8225 }
8226
8227 void
8228 qla84xx_put_chip(struct scsi_qla_host *vha)
8229 {
8230 struct qla_hw_data *ha = vha->hw;
8231
8232 if (ha->cs84xx)
8233 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8234 }
8235
8236 static int
8237 qla84xx_init_chip(scsi_qla_host_t *vha)
8238 {
8239 int rval;
8240 uint16_t status[2];
8241 struct qla_hw_data *ha = vha->hw;
8242
8243 mutex_lock(&ha->cs84xx->fw_update_mutex);
8244
8245 rval = qla84xx_verify_chip(vha, status);
8246
8247 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8248
8249 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8250 QLA_SUCCESS;
8251 }
8252
8253
8254
8255 int
8256 qla81xx_nvram_config(scsi_qla_host_t *vha)
8257 {
8258 int rval;
8259 struct init_cb_81xx *icb;
8260 struct nvram_81xx *nv;
8261 uint32_t *dptr;
8262 uint8_t *dptr1, *dptr2;
8263 uint32_t chksum;
8264 uint16_t cnt;
8265 struct qla_hw_data *ha = vha->hw;
8266 uint32_t faddr;
8267 struct active_regions active_regions = { };
8268
8269 rval = QLA_SUCCESS;
8270 icb = (struct init_cb_81xx *)ha->init_cb;
8271 nv = ha->nvram;
8272
8273
8274 ha->nvram_size = sizeof(*nv);
8275 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8276 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8277 ha->vpd_size = FA_VPD_SIZE_82XX;
8278
8279 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8280 qla28xx_get_aux_images(vha, &active_regions);
8281
8282
8283 ha->vpd = ha->nvram + VPD_OFFSET;
8284
8285 faddr = ha->flt_region_vpd;
8286 if (IS_QLA28XX(ha)) {
8287 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8288 faddr = ha->flt_region_vpd_sec;
8289 ql_dbg(ql_dbg_init, vha, 0x0110,
8290 "Loading %s nvram image.\n",
8291 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8292 "primary" : "secondary");
8293 }
8294 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8295
8296
8297 faddr = ha->flt_region_nvram;
8298 if (IS_QLA28XX(ha)) {
8299 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8300 faddr = ha->flt_region_nvram_sec;
8301 }
8302 ql_dbg(ql_dbg_init, vha, 0x0110,
8303 "Loading %s nvram image.\n",
8304 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8305 "primary" : "secondary");
8306 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8307
8308 dptr = (uint32_t *)nv;
8309 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8310 chksum += le32_to_cpu(*dptr);
8311
8312 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8313 "Contents of NVRAM:\n");
8314 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8315 nv, ha->nvram_size);
8316
8317
8318 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8319 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8320
8321 ql_log(ql_log_info, vha, 0x0073,
8322 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8323 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8324 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8325 ql_log(ql_log_info, vha, 0x0074,
8326 "Falling back to functioning (yet invalid -- WWPN) "
8327 "defaults.\n");
8328
8329
8330
8331
8332 memset(nv, 0, ha->nvram_size);
8333 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8334 nv->version = cpu_to_le16(ICB_VERSION);
8335 nv->frame_payload_size = 2048;
8336 nv->execution_throttle = cpu_to_le16(0xFFFF);
8337 nv->exchange_count = cpu_to_le16(0);
8338 nv->port_name[0] = 0x21;
8339 nv->port_name[1] = 0x00 + ha->port_no + 1;
8340 nv->port_name[2] = 0x00;
8341 nv->port_name[3] = 0xe0;
8342 nv->port_name[4] = 0x8b;
8343 nv->port_name[5] = 0x1c;
8344 nv->port_name[6] = 0x55;
8345 nv->port_name[7] = 0x86;
8346 nv->node_name[0] = 0x20;
8347 nv->node_name[1] = 0x00;
8348 nv->node_name[2] = 0x00;
8349 nv->node_name[3] = 0xe0;
8350 nv->node_name[4] = 0x8b;
8351 nv->node_name[5] = 0x1c;
8352 nv->node_name[6] = 0x55;
8353 nv->node_name[7] = 0x86;
8354 nv->login_retry_count = cpu_to_le16(8);
8355 nv->interrupt_delay_timer = cpu_to_le16(0);
8356 nv->login_timeout = cpu_to_le16(0);
8357 nv->firmware_options_1 =
8358 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8359 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8360 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8361 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8362 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8363 nv->efi_parameters = cpu_to_le32(0);
8364 nv->reset_delay = 5;
8365 nv->max_luns_per_target = cpu_to_le16(128);
8366 nv->port_down_retry_count = cpu_to_le16(30);
8367 nv->link_down_timeout = cpu_to_le16(180);
8368 nv->enode_mac[0] = 0x00;
8369 nv->enode_mac[1] = 0xC0;
8370 nv->enode_mac[2] = 0xDD;
8371 nv->enode_mac[3] = 0x04;
8372 nv->enode_mac[4] = 0x05;
8373 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8374
8375 rval = 1;
8376 }
8377
8378 if (IS_T10_PI_CAPABLE(ha))
8379 nv->frame_payload_size &= ~7;
8380
8381 qlt_81xx_config_nvram_stage1(vha, nv);
8382
8383
8384 memset(icb, 0, ha->init_cb_size);
8385
8386
8387 dptr1 = (uint8_t *)icb;
8388 dptr2 = (uint8_t *)&nv->version;
8389 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8390 while (cnt--)
8391 *dptr1++ = *dptr2++;
8392
8393 icb->login_retry_count = nv->login_retry_count;
8394
8395
8396 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8397 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8398 cnt = (uint8_t *)&icb->reserved_5 -
8399 (uint8_t *)&icb->interrupt_delay_timer;
8400 while (cnt--)
8401 *dptr1++ = *dptr2++;
8402
8403 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8404
8405 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8406 icb->enode_mac[0] = 0x00;
8407 icb->enode_mac[1] = 0xC0;
8408 icb->enode_mac[2] = 0xDD;
8409 icb->enode_mac[3] = 0x04;
8410 icb->enode_mac[4] = 0x05;
8411 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8412 }
8413
8414
8415 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8416 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8417
8418
8419
8420 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8421 "QLE8XXX");
8422
8423 qlt_81xx_config_nvram_stage2(vha, icb);
8424
8425
8426 if (nv->host_p & cpu_to_le32(BIT_15)) {
8427 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8428 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8429 }
8430
8431
8432 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8433
8434
8435
8436
8437 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8438 icb->node_name[0] &= 0xF0;
8439 }
8440
8441
8442 ha->flags.disable_risc_code_load = 0;
8443 ha->flags.enable_lip_reset = 0;
8444 ha->flags.enable_lip_full_login =
8445 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8446 ha->flags.enable_target_reset =
8447 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8448 ha->flags.enable_led_scheme = 0;
8449 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8450
8451 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8452 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8453
8454
8455 ha->serial0 = icb->port_name[5];
8456 ha->serial1 = icb->port_name[6];
8457 ha->serial2 = icb->port_name[7];
8458 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8459 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8460
8461 icb->execution_throttle = cpu_to_le16(0xFFFF);
8462
8463 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8464
8465
8466 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8467 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8468 if (le16_to_cpu(nv->login_timeout) < 4)
8469 nv->login_timeout = cpu_to_le16(4);
8470 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8471
8472
8473 ha->r_a_tov = 100;
8474
8475 ha->loop_reset_delay = nv->reset_delay;
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485
8486
8487 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8488 ha->loop_down_abort_time =
8489 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8490 } else {
8491 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8492 ha->loop_down_abort_time =
8493 (LOOP_DOWN_TIME - ha->link_down_timeout);
8494 }
8495
8496
8497 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8498 if (qlport_down_retry)
8499 ha->port_down_retry_count = qlport_down_retry;
8500
8501
8502 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8503 if (ha->port_down_retry_count ==
8504 le16_to_cpu(nv->port_down_retry_count) &&
8505 ha->port_down_retry_count > 3)
8506 ha->login_retry_count = ha->port_down_retry_count;
8507 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8508 ha->login_retry_count = ha->port_down_retry_count;
8509 if (ql2xloginretrycount)
8510 ha->login_retry_count = ql2xloginretrycount;
8511
8512
8513 if (!vha->hw->flags.msix_enabled &&
8514 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
8515 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
8516
8517
8518 if (!vha->flags.init_done) {
8519 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8520 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8521 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8522 le16_to_cpu(icb->interrupt_delay_timer) : 2;
8523 }
8524 icb->firmware_options_2 &= cpu_to_le32(
8525 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8526 vha->flags.process_response_queue = 0;
8527 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8528 ha->zio_mode = QLA_ZIO_MODE_6;
8529
8530 ql_log(ql_log_info, vha, 0x0075,
8531 "ZIO mode %d enabled; timer delay (%d us).\n",
8532 ha->zio_mode,
8533 ha->zio_timer * 100);
8534
8535 icb->firmware_options_2 |= cpu_to_le32(
8536 (uint32_t)ha->zio_mode);
8537 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8538 vha->flags.process_response_queue = 1;
8539 }
8540
8541
8542 icb->firmware_options_3 |= BIT_0;
8543
8544
8545 icb->firmware_options_3 |= BIT_8;
8546
8547 if (rval) {
8548 ql_log(ql_log_warn, vha, 0x0076,
8549 "NVRAM configuration failed.\n");
8550 }
8551 return (rval);
8552 }
8553
8554 int
8555 qla82xx_restart_isp(scsi_qla_host_t *vha)
8556 {
8557 int status, rval;
8558 struct qla_hw_data *ha = vha->hw;
8559 struct scsi_qla_host *vp;
8560 unsigned long flags;
8561
8562 status = qla2x00_init_rings(vha);
8563 if (!status) {
8564 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8565 ha->flags.chip_reset_done = 1;
8566
8567 status = qla2x00_fw_ready(vha);
8568 if (!status) {
8569
8570 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8571 vha->flags.online = 1;
8572 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8573 }
8574
8575
8576 if ((vha->device_flags & DFLG_NO_CABLE))
8577 status = 0;
8578 }
8579
8580 if (!status) {
8581 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8582
8583 if (!atomic_read(&vha->loop_down_timer)) {
8584
8585
8586
8587
8588 vha->marker_needed = 1;
8589 }
8590
8591 ha->isp_ops->enable_intrs(ha);
8592
8593 ha->isp_abort_cnt = 0;
8594 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8595
8596
8597 status = qla82xx_check_md_needed(vha);
8598
8599 if (ha->fce) {
8600 ha->flags.fce_enabled = 1;
8601 memset(ha->fce, 0,
8602 fce_calc_size(ha->fce_bufs));
8603 rval = qla2x00_enable_fce_trace(vha,
8604 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8605 &ha->fce_bufs);
8606 if (rval) {
8607 ql_log(ql_log_warn, vha, 0x8001,
8608 "Unable to reinitialize FCE (%d).\n",
8609 rval);
8610 ha->flags.fce_enabled = 0;
8611 }
8612 }
8613
8614 if (ha->eft) {
8615 memset(ha->eft, 0, EFT_SIZE);
8616 rval = qla2x00_enable_eft_trace(vha,
8617 ha->eft_dma, EFT_NUM_BUFFERS);
8618 if (rval) {
8619 ql_log(ql_log_warn, vha, 0x8010,
8620 "Unable to reinitialize EFT (%d).\n",
8621 rval);
8622 }
8623 }
8624 }
8625
8626 if (!status) {
8627 ql_dbg(ql_dbg_taskm, vha, 0x8011,
8628 "qla82xx_restart_isp succeeded.\n");
8629
8630 spin_lock_irqsave(&ha->vport_slock, flags);
8631 list_for_each_entry(vp, &ha->vp_list, list) {
8632 if (vp->vp_idx) {
8633 atomic_inc(&vp->vref_count);
8634 spin_unlock_irqrestore(&ha->vport_slock, flags);
8635
8636 qla2x00_vp_abort_isp(vp);
8637
8638 spin_lock_irqsave(&ha->vport_slock, flags);
8639 atomic_dec(&vp->vref_count);
8640 }
8641 }
8642 spin_unlock_irqrestore(&ha->vport_slock, flags);
8643
8644 } else {
8645 ql_log(ql_log_warn, vha, 0x8016,
8646 "qla82xx_restart_isp **** FAILED ****.\n");
8647 }
8648
8649 return status;
8650 }
8651
8652 void
8653 qla81xx_update_fw_options(scsi_qla_host_t *vha)
8654 {
8655 struct qla_hw_data *ha = vha->hw;
8656
8657
8658 if (ql2xfwholdabts)
8659 ha->fw_options[3] |= BIT_12;
8660
8661
8662 if (ha->operating_mode == P2P) {
8663 ha->fw_options[2] |= BIT_3;
8664 ql_dbg(ql_dbg_disc, vha, 0x2103,
8665 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8666 __func__, ha->fw_options[2]);
8667 }
8668
8669
8670 if (ql2xmvasynctoatio) {
8671 if (qla_tgt_mode_enabled(vha) ||
8672 qla_dual_mode_enabled(vha))
8673 ha->fw_options[2] |= BIT_11;
8674 else
8675 ha->fw_options[2] &= ~BIT_11;
8676 }
8677
8678 if (qla_tgt_mode_enabled(vha) ||
8679 qla_dual_mode_enabled(vha)) {
8680
8681 ha->fw_options[1] |= BIT_8;
8682 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8683
8684
8685 ha->fw_options[2] |= BIT_4;
8686 } else {
8687 ha->fw_options[1] &= ~BIT_8;
8688 ha->fw_options[10] &= 0x00ff;
8689
8690 ha->fw_options[2] &= ~BIT_4;
8691 }
8692
8693 if (ql2xetsenable) {
8694
8695 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8696 ha->fw_options[2] |= BIT_9;
8697 }
8698
8699 ql_dbg(ql_dbg_init, vha, 0x00e9,
8700 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8701 __func__, ha->fw_options[1], ha->fw_options[2],
8702 ha->fw_options[3], vha->host->active_mode);
8703
8704 qla2x00_set_fw_options(vha, ha->fw_options);
8705 }
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725 static int
8726 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8727 {
8728 int i, entries;
8729 uint8_t pid_match, wwn_match;
8730 int priority;
8731 uint32_t pid1, pid2;
8732 uint64_t wwn1, wwn2;
8733 struct qla_fcp_prio_entry *pri_entry;
8734 struct qla_hw_data *ha = vha->hw;
8735
8736 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
8737 return -1;
8738
8739 priority = -1;
8740 entries = ha->fcp_prio_cfg->num_entries;
8741 pri_entry = &ha->fcp_prio_cfg->entry[0];
8742
8743 for (i = 0; i < entries; i++) {
8744 pid_match = wwn_match = 0;
8745
8746 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8747 pri_entry++;
8748 continue;
8749 }
8750
8751
8752 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8753 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8754 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8755 if (pid1 == INVALID_PORT_ID)
8756 pid_match++;
8757 else if (pid1 == pid2)
8758 pid_match++;
8759 }
8760
8761
8762 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8763 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8764 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8765 if (pid1 == INVALID_PORT_ID)
8766 pid_match++;
8767 else if (pid1 == pid2)
8768 pid_match++;
8769 }
8770
8771
8772 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8773 wwn1 = wwn_to_u64(vha->port_name);
8774 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8775 if (wwn2 == (uint64_t)-1)
8776 wwn_match++;
8777 else if (wwn1 == wwn2)
8778 wwn_match++;
8779 }
8780
8781
8782 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8783 wwn1 = wwn_to_u64(fcport->port_name);
8784 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8785 if (wwn2 == (uint64_t)-1)
8786 wwn_match++;
8787 else if (wwn1 == wwn2)
8788 wwn_match++;
8789 }
8790
8791 if (pid_match == 2 || wwn_match == 2) {
8792
8793 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8794 priority = pri_entry->tag;
8795 break;
8796 }
8797
8798 pri_entry++;
8799 }
8800
8801 return priority;
8802 }
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812
8813
8814
8815
8816
8817
8818 int
8819 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8820 {
8821 int ret;
8822 int priority;
8823 uint16_t mb[5];
8824
8825 if (fcport->port_type != FCT_TARGET ||
8826 fcport->loop_id == FC_NO_LOOP_ID)
8827 return QLA_FUNCTION_FAILED;
8828
8829 priority = qla24xx_get_fcp_prio(vha, fcport);
8830 if (priority < 0)
8831 return QLA_FUNCTION_FAILED;
8832
8833 if (IS_P3P_TYPE(vha->hw)) {
8834 fcport->fcp_prio = priority & 0xf;
8835 return QLA_SUCCESS;
8836 }
8837
8838 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8839 if (ret == QLA_SUCCESS) {
8840 if (fcport->fcp_prio != priority)
8841 ql_dbg(ql_dbg_user, vha, 0x709e,
8842 "Updated FCP_CMND priority - value=%d loop_id=%d "
8843 "port_id=%02x%02x%02x.\n", priority,
8844 fcport->loop_id, fcport->d_id.b.domain,
8845 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8846 fcport->fcp_prio = priority & 0xf;
8847 } else
8848 ql_dbg(ql_dbg_user, vha, 0x704f,
8849 "Unable to update FCP_CMND priority - ret=0x%x for "
8850 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8851 fcport->d_id.b.domain, fcport->d_id.b.area,
8852 fcport->d_id.b.al_pa);
8853 return ret;
8854 }
8855
8856
8857
8858
8859
8860
8861
8862
8863
8864
8865
8866
8867
8868
8869 int
8870 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8871 {
8872 int ret;
8873 fc_port_t *fcport;
8874
8875 ret = QLA_FUNCTION_FAILED;
8876
8877 list_for_each_entry(fcport, &vha->vp_fcports, list)
8878 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8879
8880 return ret;
8881 }
8882
8883 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8884 int vp_idx, bool startqp)
8885 {
8886 int rsp_id = 0;
8887 int req_id = 0;
8888 int i;
8889 struct qla_hw_data *ha = vha->hw;
8890 uint16_t qpair_id = 0;
8891 struct qla_qpair *qpair = NULL;
8892 struct qla_msix_entry *msix;
8893
8894 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8895 ql_log(ql_log_warn, vha, 0x00181,
8896 "FW/Driver is not multi-queue capable.\n");
8897 return NULL;
8898 }
8899
8900 if (ql2xmqsupport || ql2xnvmeenable) {
8901 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8902 if (qpair == NULL) {
8903 ql_log(ql_log_warn, vha, 0x0182,
8904 "Failed to allocate memory for queue pair.\n");
8905 return NULL;
8906 }
8907
8908 qpair->hw = vha->hw;
8909 qpair->vha = vha;
8910 qpair->qp_lock_ptr = &qpair->qp_lock;
8911 spin_lock_init(&qpair->qp_lock);
8912 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
8913
8914
8915 mutex_lock(&ha->mq_lock);
8916 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
8917 if (ha->num_qpairs >= ha->max_qpairs) {
8918 mutex_unlock(&ha->mq_lock);
8919 ql_log(ql_log_warn, vha, 0x0183,
8920 "No resources to create additional q pair.\n");
8921 goto fail_qid_map;
8922 }
8923 ha->num_qpairs++;
8924 set_bit(qpair_id, ha->qpair_qid_map);
8925 ha->queue_pair_map[qpair_id] = qpair;
8926 qpair->id = qpair_id;
8927 qpair->vp_idx = vp_idx;
8928 qpair->fw_started = ha->flags.fw_started;
8929 INIT_LIST_HEAD(&qpair->hints_list);
8930 qpair->chip_reset = ha->base_qpair->chip_reset;
8931 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8932 qpair->enable_explicit_conf =
8933 ha->base_qpair->enable_explicit_conf;
8934
8935 for (i = 0; i < ha->msix_count; i++) {
8936 msix = &ha->msix_entries[i];
8937 if (msix->in_use)
8938 continue;
8939 qpair->msix = msix;
8940 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
8941 "Vector %x selected for qpair\n", msix->vector);
8942 break;
8943 }
8944 if (!qpair->msix) {
8945 ql_log(ql_log_warn, vha, 0x0184,
8946 "Out of MSI-X vectors!.\n");
8947 goto fail_msix;
8948 }
8949
8950 qpair->msix->in_use = 1;
8951 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8952 qpair->pdev = ha->pdev;
8953 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
8954 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
8955
8956 mutex_unlock(&ha->mq_lock);
8957
8958
8959 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
8960 if (!rsp_id) {
8961 ql_log(ql_log_warn, vha, 0x0185,
8962 "Failed to create response queue.\n");
8963 goto fail_rsp;
8964 }
8965
8966 qpair->rsp = ha->rsp_q_map[rsp_id];
8967
8968
8969 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8970 startqp);
8971 if (!req_id) {
8972 ql_log(ql_log_warn, vha, 0x0186,
8973 "Failed to create request queue.\n");
8974 goto fail_req;
8975 }
8976
8977 qpair->req = ha->req_q_map[req_id];
8978 qpair->rsp->req = qpair->req;
8979 qpair->rsp->qpair = qpair;
8980
8981 qla_cpu_update(qpair, smp_processor_id());
8982
8983 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8984 if (ha->fw_attributes & BIT_4)
8985 qpair->difdix_supported = 1;
8986 }
8987
8988 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8989 if (!qpair->srb_mempool) {
8990 ql_log(ql_log_warn, vha, 0xd036,
8991 "Failed to create srb mempool for qpair %d\n",
8992 qpair->id);
8993 goto fail_mempool;
8994 }
8995
8996
8997 qpair->online = 1;
8998
8999 if (!vha->flags.qpairs_available)
9000 vha->flags.qpairs_available = 1;
9001
9002 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9003 "Request/Response queue pair created, id %d\n",
9004 qpair->id);
9005 ql_dbg(ql_dbg_init, vha, 0x0187,
9006 "Request/Response queue pair created, id %d\n",
9007 qpair->id);
9008 }
9009 return qpair;
9010
9011 fail_mempool:
9012 fail_req:
9013 qla25xx_delete_rsp_que(vha, qpair->rsp);
9014 fail_rsp:
9015 mutex_lock(&ha->mq_lock);
9016 qpair->msix->in_use = 0;
9017 list_del(&qpair->qp_list_elem);
9018 if (list_empty(&vha->qp_list))
9019 vha->flags.qpairs_available = 0;
9020 fail_msix:
9021 ha->queue_pair_map[qpair_id] = NULL;
9022 clear_bit(qpair_id, ha->qpair_qid_map);
9023 ha->num_qpairs--;
9024 mutex_unlock(&ha->mq_lock);
9025 fail_qid_map:
9026 kfree(qpair);
9027 return NULL;
9028 }
9029
9030 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9031 {
9032 int ret = QLA_FUNCTION_FAILED;
9033 struct qla_hw_data *ha = qpair->hw;
9034
9035 qpair->delete_in_progress = 1;
9036
9037 ret = qla25xx_delete_req_que(vha, qpair->req);
9038 if (ret != QLA_SUCCESS)
9039 goto fail;
9040
9041 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9042 if (ret != QLA_SUCCESS)
9043 goto fail;
9044
9045 mutex_lock(&ha->mq_lock);
9046 ha->queue_pair_map[qpair->id] = NULL;
9047 clear_bit(qpair->id, ha->qpair_qid_map);
9048 ha->num_qpairs--;
9049 list_del(&qpair->qp_list_elem);
9050 if (list_empty(&vha->qp_list)) {
9051 vha->flags.qpairs_available = 0;
9052 vha->flags.qpairs_req_created = 0;
9053 vha->flags.qpairs_rsp_created = 0;
9054 }
9055 mempool_destroy(qpair->srb_mempool);
9056 kfree(qpair);
9057 mutex_unlock(&ha->mq_lock);
9058
9059 return QLA_SUCCESS;
9060 fail:
9061 return ret;
9062 }