This source file includes following definitions.
- to_fc_ctrl
- localport_to_lport
- remoteport_to_rport
- ls_req_to_lsop
- fcp_req_to_fcp_op
- nvme_fc_free_lport
- nvme_fc_lport_put
- nvme_fc_lport_get
- nvme_fc_attach_to_unreg_lport
- nvme_fc_register_localport
- nvme_fc_unregister_localport
- nvme_fc_signal_discovery_scan
- nvme_fc_free_rport
- nvme_fc_rport_put
- nvme_fc_rport_get
- nvme_fc_resume_controller
- nvme_fc_attach_to_suspended_rport
- __nvme_fc_set_dev_loss_tmo
- nvme_fc_register_remoteport
- nvme_fc_abort_lsops
- nvme_fc_ctrl_connectivity_loss
- nvme_fc_unregister_remoteport
- nvme_fc_rescan_remoteport
- nvme_fc_set_remoteport_devloss
- fc_dma_map_single
- fc_dma_mapping_error
- fc_dma_unmap_single
- fc_dma_sync_single_for_cpu
- fc_dma_sync_single_for_device
- fc_map_sg
- fc_dma_map_sg
- fc_dma_unmap_sg
- __nvme_fc_finish_ls_req
- __nvme_fc_send_ls_req
- nvme_fc_send_ls_req_done
- nvme_fc_send_ls_req
- nvme_fc_send_ls_req_async
- nvme_fc_connect_admin_queue
- nvme_fc_connect_queue
- nvme_fc_disconnect_assoc_done
- nvme_fc_xmt_disconnect_assoc
- __nvme_fc_exit_request
- nvme_fc_exit_request
- __nvme_fc_abort_op
- nvme_fc_abort_aen_ops
- __nvme_fc_fcpop_chk_teardowns
- nvme_fc_fcpio_done
- __nvme_fc_init_request
- nvme_fc_init_request
- nvme_fc_init_aen_ops
- nvme_fc_term_aen_ops
- __nvme_fc_init_hctx
- nvme_fc_init_hctx
- nvme_fc_init_admin_hctx
- nvme_fc_init_queue
- nvme_fc_free_queue
- __nvme_fc_delete_hw_queue
- nvme_fc_free_io_queues
- __nvme_fc_create_hw_queue
- nvme_fc_delete_hw_io_queues
- nvme_fc_create_hw_io_queues
- nvme_fc_connect_io_queues
- nvme_fc_init_io_queues
- nvme_fc_ctrl_free
- nvme_fc_ctrl_put
- nvme_fc_ctrl_get
- nvme_fc_nvme_ctrl_freed
- nvme_fc_error_recovery
- nvme_fc_timeout
- nvme_fc_map_data
- nvme_fc_unmap_data
- nvme_fc_start_fcp_op
- nvme_fc_queue_rq
- nvme_fc_submit_async_event
- nvme_fc_complete_rq
- nvme_fc_terminate_exchange
- nvme_fc_create_io_queues
- nvme_fc_recreate_io_queues
- nvme_fc_rport_active_on_lport
- nvme_fc_rport_inactive_on_lport
- nvme_fc_ctlr_active_on_rport
- nvme_fc_ctlr_inactive_on_rport
- nvme_fc_create_association
- nvme_fc_delete_association
- nvme_fc_delete_ctrl
- nvme_fc_reconnect_or_delete
- __nvme_fc_terminate_io
- nvme_fc_reset_ctrl_work
- nvme_fc_connect_err_work
- nvme_fc_connect_ctrl_work
- nvme_fc_existing_controller
- nvme_fc_init_ctrl
- __nvme_fc_parse_u64
- nvme_fc_parse_traddr
- nvme_fc_create_ctrl
- nvme_fc_nvme_discovery_store
- nvme_fc_init_module
- nvme_fc_delete_controllers
- nvme_fc_cleanup_for_unload
- nvme_fc_exit_module
1
2
3
4
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
12
13 #include "nvme.h"
14 #include "fabrics.h"
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include <scsi/scsi_transport_fc.h>
18
19
20
21
22 enum nvme_fc_queue_flags {
23 NVME_FC_Q_CONNECTED = 0,
24 NVME_FC_Q_LIVE,
25 };
26
27 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
28
29 struct nvme_fc_queue {
30 struct nvme_fc_ctrl *ctrl;
31 struct device *dev;
32 struct blk_mq_hw_ctx *hctx;
33 void *lldd_handle;
34 size_t cmnd_capsule_len;
35 u32 qnum;
36 u32 rqcnt;
37 u32 seqno;
38
39 u64 connection_id;
40 atomic_t csn;
41
42 unsigned long flags;
43 } __aligned(sizeof(u64));
44
45 enum nvme_fcop_flags {
46 FCOP_FLAGS_TERMIO = (1 << 0),
47 FCOP_FLAGS_AEN = (1 << 1),
48 };
49
50 struct nvmefc_ls_req_op {
51 struct nvmefc_ls_req ls_req;
52
53 struct nvme_fc_rport *rport;
54 struct nvme_fc_queue *queue;
55 struct request *rq;
56 u32 flags;
57
58 int ls_error;
59 struct completion ls_done;
60 struct list_head lsreq_list;
61 bool req_queued;
62 };
63
64 enum nvme_fcpop_state {
65 FCPOP_STATE_UNINIT = 0,
66 FCPOP_STATE_IDLE = 1,
67 FCPOP_STATE_ACTIVE = 2,
68 FCPOP_STATE_ABORTED = 3,
69 FCPOP_STATE_COMPLETE = 4,
70 };
71
72 struct nvme_fc_fcp_op {
73 struct nvme_request nreq;
74
75
76
77
78
79
80
81 struct nvmefc_fcp_req fcp_req;
82
83 struct nvme_fc_ctrl *ctrl;
84 struct nvme_fc_queue *queue;
85 struct request *rq;
86
87 atomic_t state;
88 u32 flags;
89 u32 rqno;
90 u32 nents;
91
92 struct nvme_fc_cmd_iu cmd_iu;
93 struct nvme_fc_ersp_iu rsp_iu;
94 };
95
96 struct nvme_fcp_op_w_sgl {
97 struct nvme_fc_fcp_op op;
98 struct scatterlist sgl[SG_CHUNK_SIZE];
99 uint8_t priv[0];
100 };
101
102 struct nvme_fc_lport {
103 struct nvme_fc_local_port localport;
104
105 struct ida endp_cnt;
106 struct list_head port_list;
107 struct list_head endp_list;
108 struct device *dev;
109 struct nvme_fc_port_template *ops;
110 struct kref ref;
111 atomic_t act_rport_cnt;
112 } __aligned(sizeof(u64));
113
114 struct nvme_fc_rport {
115 struct nvme_fc_remote_port remoteport;
116
117 struct list_head endp_list;
118 struct list_head ctrl_list;
119 struct list_head ls_req_list;
120 struct list_head disc_list;
121 struct device *dev;
122 struct nvme_fc_lport *lport;
123 spinlock_t lock;
124 struct kref ref;
125 atomic_t act_ctrl_cnt;
126 unsigned long dev_loss_end;
127 } __aligned(sizeof(u64));
128
129 enum nvme_fcctrl_flags {
130 FCCTRL_TERMIO = (1 << 0),
131 };
132
133 struct nvme_fc_ctrl {
134 spinlock_t lock;
135 struct nvme_fc_queue *queues;
136 struct device *dev;
137 struct nvme_fc_lport *lport;
138 struct nvme_fc_rport *rport;
139 u32 cnum;
140
141 bool ioq_live;
142 bool assoc_active;
143 atomic_t err_work_active;
144 u64 association_id;
145
146 struct list_head ctrl_list;
147
148 struct blk_mq_tag_set admin_tag_set;
149 struct blk_mq_tag_set tag_set;
150
151 struct delayed_work connect_work;
152 struct work_struct err_work;
153
154 struct kref ref;
155 u32 flags;
156 u32 iocnt;
157 wait_queue_head_t ioabort_wait;
158
159 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
160
161 struct nvme_ctrl ctrl;
162 };
163
164 static inline struct nvme_fc_ctrl *
165 to_fc_ctrl(struct nvme_ctrl *ctrl)
166 {
167 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
168 }
169
170 static inline struct nvme_fc_lport *
171 localport_to_lport(struct nvme_fc_local_port *portptr)
172 {
173 return container_of(portptr, struct nvme_fc_lport, localport);
174 }
175
176 static inline struct nvme_fc_rport *
177 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
178 {
179 return container_of(portptr, struct nvme_fc_rport, remoteport);
180 }
181
182 static inline struct nvmefc_ls_req_op *
183 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
184 {
185 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
186 }
187
188 static inline struct nvme_fc_fcp_op *
189 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
190 {
191 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
192 }
193
194
195
196
197
198
199 static DEFINE_SPINLOCK(nvme_fc_lock);
200
201 static LIST_HEAD(nvme_fc_lport_list);
202 static DEFINE_IDA(nvme_fc_local_port_cnt);
203 static DEFINE_IDA(nvme_fc_ctrl_cnt);
204
205 static struct workqueue_struct *nvme_fc_wq;
206
207 static bool nvme_fc_waiting_to_unload;
208 static DECLARE_COMPLETION(nvme_fc_unload_proceed);
209
210
211
212
213
214 static struct device *fc_udev_device;
215
216
217
218
219 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 struct nvme_fc_queue *, unsigned int);
221
222 static void
223 nvme_fc_free_lport(struct kref *ref)
224 {
225 struct nvme_fc_lport *lport =
226 container_of(ref, struct nvme_fc_lport, ref);
227 unsigned long flags;
228
229 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 WARN_ON(!list_empty(&lport->endp_list));
231
232
233 spin_lock_irqsave(&nvme_fc_lock, flags);
234 list_del(&lport->port_list);
235 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
236 complete(&nvme_fc_unload_proceed);
237 spin_unlock_irqrestore(&nvme_fc_lock, flags);
238
239 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
240 ida_destroy(&lport->endp_cnt);
241
242 put_device(lport->dev);
243
244 kfree(lport);
245 }
246
247 static void
248 nvme_fc_lport_put(struct nvme_fc_lport *lport)
249 {
250 kref_put(&lport->ref, nvme_fc_free_lport);
251 }
252
253 static int
254 nvme_fc_lport_get(struct nvme_fc_lport *lport)
255 {
256 return kref_get_unless_zero(&lport->ref);
257 }
258
259
260 static struct nvme_fc_lport *
261 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
262 struct nvme_fc_port_template *ops,
263 struct device *dev)
264 {
265 struct nvme_fc_lport *lport;
266 unsigned long flags;
267
268 spin_lock_irqsave(&nvme_fc_lock, flags);
269
270 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
271 if (lport->localport.node_name != pinfo->node_name ||
272 lport->localport.port_name != pinfo->port_name)
273 continue;
274
275 if (lport->dev != dev) {
276 lport = ERR_PTR(-EXDEV);
277 goto out_done;
278 }
279
280 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
281 lport = ERR_PTR(-EEXIST);
282 goto out_done;
283 }
284
285 if (!nvme_fc_lport_get(lport)) {
286
287
288
289
290 lport = NULL;
291 goto out_done;
292 }
293
294
295
296 lport->ops = ops;
297 lport->localport.port_role = pinfo->port_role;
298 lport->localport.port_id = pinfo->port_id;
299 lport->localport.port_state = FC_OBJSTATE_ONLINE;
300
301 spin_unlock_irqrestore(&nvme_fc_lock, flags);
302
303 return lport;
304 }
305
306 lport = NULL;
307
308 out_done:
309 spin_unlock_irqrestore(&nvme_fc_lock, flags);
310
311 return lport;
312 }
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331 int
332 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
333 struct nvme_fc_port_template *template,
334 struct device *dev,
335 struct nvme_fc_local_port **portptr)
336 {
337 struct nvme_fc_lport *newrec;
338 unsigned long flags;
339 int ret, idx;
340
341 if (!template->localport_delete || !template->remoteport_delete ||
342 !template->ls_req || !template->fcp_io ||
343 !template->ls_abort || !template->fcp_abort ||
344 !template->max_hw_queues || !template->max_sgl_segments ||
345 !template->max_dif_sgl_segments || !template->dma_boundary) {
346 ret = -EINVAL;
347 goto out_reghost_failed;
348 }
349
350
351
352
353
354
355
356
357 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
358
359
360 if (IS_ERR(newrec)) {
361 ret = PTR_ERR(newrec);
362 goto out_reghost_failed;
363
364
365 } else if (newrec) {
366 *portptr = &newrec->localport;
367 return 0;
368 }
369
370
371
372 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
373 GFP_KERNEL);
374 if (!newrec) {
375 ret = -ENOMEM;
376 goto out_reghost_failed;
377 }
378
379 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
380 if (idx < 0) {
381 ret = -ENOSPC;
382 goto out_fail_kfree;
383 }
384
385 if (!get_device(dev) && dev) {
386 ret = -ENODEV;
387 goto out_ida_put;
388 }
389
390 INIT_LIST_HEAD(&newrec->port_list);
391 INIT_LIST_HEAD(&newrec->endp_list);
392 kref_init(&newrec->ref);
393 atomic_set(&newrec->act_rport_cnt, 0);
394 newrec->ops = template;
395 newrec->dev = dev;
396 ida_init(&newrec->endp_cnt);
397 newrec->localport.private = &newrec[1];
398 newrec->localport.node_name = pinfo->node_name;
399 newrec->localport.port_name = pinfo->port_name;
400 newrec->localport.port_role = pinfo->port_role;
401 newrec->localport.port_id = pinfo->port_id;
402 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
403 newrec->localport.port_num = idx;
404
405 spin_lock_irqsave(&nvme_fc_lock, flags);
406 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
408
409 if (dev)
410 dma_set_seg_boundary(dev, template->dma_boundary);
411
412 *portptr = &newrec->localport;
413 return 0;
414
415 out_ida_put:
416 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
417 out_fail_kfree:
418 kfree(newrec);
419 out_reghost_failed:
420 *portptr = NULL;
421
422 return ret;
423 }
424 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
425
426
427
428
429
430
431
432
433
434
435
436 int
437 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
438 {
439 struct nvme_fc_lport *lport = localport_to_lport(portptr);
440 unsigned long flags;
441
442 if (!portptr)
443 return -EINVAL;
444
445 spin_lock_irqsave(&nvme_fc_lock, flags);
446
447 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
448 spin_unlock_irqrestore(&nvme_fc_lock, flags);
449 return -EINVAL;
450 }
451 portptr->port_state = FC_OBJSTATE_DELETED;
452
453 spin_unlock_irqrestore(&nvme_fc_lock, flags);
454
455 if (atomic_read(&lport->act_rport_cnt) == 0)
456 lport->ops->localport_delete(&lport->localport);
457
458 nvme_fc_lport_put(lport);
459
460 return 0;
461 }
462 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
463
464
465
466
467
468
469
470
471
472 #define FCNVME_TRADDR_LENGTH 64
473
474 static void
475 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
476 struct nvme_fc_rport *rport)
477 {
478 char hostaddr[FCNVME_TRADDR_LENGTH];
479 char tgtaddr[FCNVME_TRADDR_LENGTH];
480 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
481
482 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
483 return;
484
485 snprintf(hostaddr, sizeof(hostaddr),
486 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
487 lport->localport.node_name, lport->localport.port_name);
488 snprintf(tgtaddr, sizeof(tgtaddr),
489 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
490 rport->remoteport.node_name, rport->remoteport.port_name);
491 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
492 }
493
494 static void
495 nvme_fc_free_rport(struct kref *ref)
496 {
497 struct nvme_fc_rport *rport =
498 container_of(ref, struct nvme_fc_rport, ref);
499 struct nvme_fc_lport *lport =
500 localport_to_lport(rport->remoteport.localport);
501 unsigned long flags;
502
503 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
504 WARN_ON(!list_empty(&rport->ctrl_list));
505
506
507 spin_lock_irqsave(&nvme_fc_lock, flags);
508 list_del(&rport->endp_list);
509 spin_unlock_irqrestore(&nvme_fc_lock, flags);
510
511 WARN_ON(!list_empty(&rport->disc_list));
512 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
513
514 kfree(rport);
515
516 nvme_fc_lport_put(lport);
517 }
518
519 static void
520 nvme_fc_rport_put(struct nvme_fc_rport *rport)
521 {
522 kref_put(&rport->ref, nvme_fc_free_rport);
523 }
524
525 static int
526 nvme_fc_rport_get(struct nvme_fc_rport *rport)
527 {
528 return kref_get_unless_zero(&rport->ref);
529 }
530
531 static void
532 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
533 {
534 switch (ctrl->ctrl.state) {
535 case NVME_CTRL_NEW:
536 case NVME_CTRL_CONNECTING:
537
538
539
540
541 dev_info(ctrl->ctrl.device,
542 "NVME-FC{%d}: connectivity re-established. "
543 "Attempting reconnect\n", ctrl->cnum);
544
545 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
546 break;
547
548 case NVME_CTRL_RESETTING:
549
550
551
552
553
554 break;
555
556 default:
557
558 break;
559 }
560 }
561
562 static struct nvme_fc_rport *
563 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
564 struct nvme_fc_port_info *pinfo)
565 {
566 struct nvme_fc_rport *rport;
567 struct nvme_fc_ctrl *ctrl;
568 unsigned long flags;
569
570 spin_lock_irqsave(&nvme_fc_lock, flags);
571
572 list_for_each_entry(rport, &lport->endp_list, endp_list) {
573 if (rport->remoteport.node_name != pinfo->node_name ||
574 rport->remoteport.port_name != pinfo->port_name)
575 continue;
576
577 if (!nvme_fc_rport_get(rport)) {
578 rport = ERR_PTR(-ENOLCK);
579 goto out_done;
580 }
581
582 spin_unlock_irqrestore(&nvme_fc_lock, flags);
583
584 spin_lock_irqsave(&rport->lock, flags);
585
586
587 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
588
589 spin_unlock_irqrestore(&rport->lock, flags);
590 nvme_fc_rport_put(rport);
591 return ERR_PTR(-ESTALE);
592 }
593
594 rport->remoteport.port_role = pinfo->port_role;
595 rport->remoteport.port_id = pinfo->port_id;
596 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
597 rport->dev_loss_end = 0;
598
599
600
601
602
603 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
604 nvme_fc_resume_controller(ctrl);
605
606 spin_unlock_irqrestore(&rport->lock, flags);
607
608 return rport;
609 }
610
611 rport = NULL;
612
613 out_done:
614 spin_unlock_irqrestore(&nvme_fc_lock, flags);
615
616 return rport;
617 }
618
619 static inline void
620 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
621 struct nvme_fc_port_info *pinfo)
622 {
623 if (pinfo->dev_loss_tmo)
624 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
625 else
626 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
627 }
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645 int
646 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
647 struct nvme_fc_port_info *pinfo,
648 struct nvme_fc_remote_port **portptr)
649 {
650 struct nvme_fc_lport *lport = localport_to_lport(localport);
651 struct nvme_fc_rport *newrec;
652 unsigned long flags;
653 int ret, idx;
654
655 if (!nvme_fc_lport_get(lport)) {
656 ret = -ESHUTDOWN;
657 goto out_reghost_failed;
658 }
659
660
661
662
663
664
665 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
666
667
668 if (IS_ERR(newrec)) {
669 ret = PTR_ERR(newrec);
670 goto out_lport_put;
671
672
673 } else if (newrec) {
674 nvme_fc_lport_put(lport);
675 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
676 nvme_fc_signal_discovery_scan(lport, newrec);
677 *portptr = &newrec->remoteport;
678 return 0;
679 }
680
681
682
683 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
684 GFP_KERNEL);
685 if (!newrec) {
686 ret = -ENOMEM;
687 goto out_lport_put;
688 }
689
690 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
691 if (idx < 0) {
692 ret = -ENOSPC;
693 goto out_kfree_rport;
694 }
695
696 INIT_LIST_HEAD(&newrec->endp_list);
697 INIT_LIST_HEAD(&newrec->ctrl_list);
698 INIT_LIST_HEAD(&newrec->ls_req_list);
699 INIT_LIST_HEAD(&newrec->disc_list);
700 kref_init(&newrec->ref);
701 atomic_set(&newrec->act_ctrl_cnt, 0);
702 spin_lock_init(&newrec->lock);
703 newrec->remoteport.localport = &lport->localport;
704 newrec->dev = lport->dev;
705 newrec->lport = lport;
706 newrec->remoteport.private = &newrec[1];
707 newrec->remoteport.port_role = pinfo->port_role;
708 newrec->remoteport.node_name = pinfo->node_name;
709 newrec->remoteport.port_name = pinfo->port_name;
710 newrec->remoteport.port_id = pinfo->port_id;
711 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
712 newrec->remoteport.port_num = idx;
713 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
714
715 spin_lock_irqsave(&nvme_fc_lock, flags);
716 list_add_tail(&newrec->endp_list, &lport->endp_list);
717 spin_unlock_irqrestore(&nvme_fc_lock, flags);
718
719 nvme_fc_signal_discovery_scan(lport, newrec);
720
721 *portptr = &newrec->remoteport;
722 return 0;
723
724 out_kfree_rport:
725 kfree(newrec);
726 out_lport_put:
727 nvme_fc_lport_put(lport);
728 out_reghost_failed:
729 *portptr = NULL;
730 return ret;
731 }
732 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
733
734 static int
735 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
736 {
737 struct nvmefc_ls_req_op *lsop;
738 unsigned long flags;
739
740 restart:
741 spin_lock_irqsave(&rport->lock, flags);
742
743 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
744 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
745 lsop->flags |= FCOP_FLAGS_TERMIO;
746 spin_unlock_irqrestore(&rport->lock, flags);
747 rport->lport->ops->ls_abort(&rport->lport->localport,
748 &rport->remoteport,
749 &lsop->ls_req);
750 goto restart;
751 }
752 }
753 spin_unlock_irqrestore(&rport->lock, flags);
754
755 return 0;
756 }
757
758 static void
759 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
760 {
761 dev_info(ctrl->ctrl.device,
762 "NVME-FC{%d}: controller connectivity lost. Awaiting "
763 "Reconnect", ctrl->cnum);
764
765 switch (ctrl->ctrl.state) {
766 case NVME_CTRL_NEW:
767 case NVME_CTRL_LIVE:
768
769
770
771
772
773
774
775 if (nvme_reset_ctrl(&ctrl->ctrl)) {
776 dev_warn(ctrl->ctrl.device,
777 "NVME-FC{%d}: Couldn't schedule reset.\n",
778 ctrl->cnum);
779 nvme_delete_ctrl(&ctrl->ctrl);
780 }
781 break;
782
783 case NVME_CTRL_CONNECTING:
784
785
786
787
788
789
790
791 break;
792
793 case NVME_CTRL_RESETTING:
794
795
796
797
798
799
800 break;
801
802 case NVME_CTRL_DELETING:
803 default:
804
805 break;
806 }
807 }
808
809
810
811
812
813
814
815
816
817
818
819
820 int
821 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
822 {
823 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
824 struct nvme_fc_ctrl *ctrl;
825 unsigned long flags;
826
827 if (!portptr)
828 return -EINVAL;
829
830 spin_lock_irqsave(&rport->lock, flags);
831
832 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
833 spin_unlock_irqrestore(&rport->lock, flags);
834 return -EINVAL;
835 }
836 portptr->port_state = FC_OBJSTATE_DELETED;
837
838 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
839
840 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
841
842 if (!portptr->dev_loss_tmo) {
843 dev_warn(ctrl->ctrl.device,
844 "NVME-FC{%d}: controller connectivity lost.\n",
845 ctrl->cnum);
846 nvme_delete_ctrl(&ctrl->ctrl);
847 } else
848 nvme_fc_ctrl_connectivity_loss(ctrl);
849 }
850
851 spin_unlock_irqrestore(&rport->lock, flags);
852
853 nvme_fc_abort_lsops(rport);
854
855 if (atomic_read(&rport->act_ctrl_cnt) == 0)
856 rport->lport->ops->remoteport_delete(portptr);
857
858
859
860
861
862
863 nvme_fc_rport_put(rport);
864
865 return 0;
866 }
867 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
868
869
870
871
872
873
874
875
876
877 void
878 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
879 {
880 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
881
882 nvme_fc_signal_discovery_scan(rport->lport, rport);
883 }
884 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
885
886 int
887 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
888 u32 dev_loss_tmo)
889 {
890 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
891 unsigned long flags;
892
893 spin_lock_irqsave(&rport->lock, flags);
894
895 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
896 spin_unlock_irqrestore(&rport->lock, flags);
897 return -EINVAL;
898 }
899
900
901 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
902
903 spin_unlock_irqrestore(&rport->lock, flags);
904
905 return 0;
906 }
907 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 static inline dma_addr_t
929 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
930 enum dma_data_direction dir)
931 {
932 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
933 }
934
935 static inline int
936 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
937 {
938 return dev ? dma_mapping_error(dev, dma_addr) : 0;
939 }
940
941 static inline void
942 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
943 enum dma_data_direction dir)
944 {
945 if (dev)
946 dma_unmap_single(dev, addr, size, dir);
947 }
948
949 static inline void
950 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
951 enum dma_data_direction dir)
952 {
953 if (dev)
954 dma_sync_single_for_cpu(dev, addr, size, dir);
955 }
956
957 static inline void
958 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
959 enum dma_data_direction dir)
960 {
961 if (dev)
962 dma_sync_single_for_device(dev, addr, size, dir);
963 }
964
965
966 static int
967 fc_map_sg(struct scatterlist *sg, int nents)
968 {
969 struct scatterlist *s;
970 int i;
971
972 WARN_ON(nents == 0 || sg[0].length == 0);
973
974 for_each_sg(sg, s, nents, i) {
975 s->dma_address = 0L;
976 #ifdef CONFIG_NEED_SG_DMA_LENGTH
977 s->dma_length = s->length;
978 #endif
979 }
980 return nents;
981 }
982
983 static inline int
984 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
985 enum dma_data_direction dir)
986 {
987 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
988 }
989
990 static inline void
991 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
992 enum dma_data_direction dir)
993 {
994 if (dev)
995 dma_unmap_sg(dev, sg, nents, dir);
996 }
997
998
999
1000 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1001 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1002
1003
1004 static void
1005 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1006 {
1007 struct nvme_fc_rport *rport = lsop->rport;
1008 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&rport->lock, flags);
1012
1013 if (!lsop->req_queued) {
1014 spin_unlock_irqrestore(&rport->lock, flags);
1015 return;
1016 }
1017
1018 list_del(&lsop->lsreq_list);
1019
1020 lsop->req_queued = false;
1021
1022 spin_unlock_irqrestore(&rport->lock, flags);
1023
1024 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1025 (lsreq->rqstlen + lsreq->rsplen),
1026 DMA_BIDIRECTIONAL);
1027
1028 nvme_fc_rport_put(rport);
1029 }
1030
1031 static int
1032 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1033 struct nvmefc_ls_req_op *lsop,
1034 void (*done)(struct nvmefc_ls_req *req, int status))
1035 {
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1038 int ret = 0;
1039
1040 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1041 return -ECONNREFUSED;
1042
1043 if (!nvme_fc_rport_get(rport))
1044 return -ESHUTDOWN;
1045
1046 lsreq->done = done;
1047 lsop->rport = rport;
1048 lsop->req_queued = false;
1049 INIT_LIST_HEAD(&lsop->lsreq_list);
1050 init_completion(&lsop->ls_done);
1051
1052 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1053 lsreq->rqstlen + lsreq->rsplen,
1054 DMA_BIDIRECTIONAL);
1055 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1056 ret = -EFAULT;
1057 goto out_putrport;
1058 }
1059 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1060
1061 spin_lock_irqsave(&rport->lock, flags);
1062
1063 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1064
1065 lsop->req_queued = true;
1066
1067 spin_unlock_irqrestore(&rport->lock, flags);
1068
1069 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1070 &rport->remoteport, lsreq);
1071 if (ret)
1072 goto out_unlink;
1073
1074 return 0;
1075
1076 out_unlink:
1077 lsop->ls_error = ret;
1078 spin_lock_irqsave(&rport->lock, flags);
1079 lsop->req_queued = false;
1080 list_del(&lsop->lsreq_list);
1081 spin_unlock_irqrestore(&rport->lock, flags);
1082 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1083 (lsreq->rqstlen + lsreq->rsplen),
1084 DMA_BIDIRECTIONAL);
1085 out_putrport:
1086 nvme_fc_rport_put(rport);
1087
1088 return ret;
1089 }
1090
1091 static void
1092 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1093 {
1094 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1095
1096 lsop->ls_error = status;
1097 complete(&lsop->ls_done);
1098 }
1099
1100 static int
1101 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1102 {
1103 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1104 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1105 int ret;
1106
1107 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1108
1109 if (!ret) {
1110
1111
1112
1113
1114
1115
1116 wait_for_completion(&lsop->ls_done);
1117
1118 __nvme_fc_finish_ls_req(lsop);
1119
1120 ret = lsop->ls_error;
1121 }
1122
1123 if (ret)
1124 return ret;
1125
1126
1127 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1128 return -ENXIO;
1129
1130 return 0;
1131 }
1132
1133 static int
1134 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1135 struct nvmefc_ls_req_op *lsop,
1136 void (*done)(struct nvmefc_ls_req *req, int status))
1137 {
1138
1139
1140 return __nvme_fc_send_ls_req(rport, lsop, done);
1141 }
1142
1143
1144 enum {
1145 VERR_NO_ERROR = 0,
1146 VERR_LSACC = 1,
1147 VERR_LSDESC_RQST = 2,
1148 VERR_LSDESC_RQST_LEN = 3,
1149 VERR_ASSOC_ID = 4,
1150 VERR_ASSOC_ID_LEN = 5,
1151 VERR_CONN_ID = 6,
1152 VERR_CONN_ID_LEN = 7,
1153 VERR_CR_ASSOC = 8,
1154 VERR_CR_ASSOC_ACC_LEN = 9,
1155 VERR_CR_CONN = 10,
1156 VERR_CR_CONN_ACC_LEN = 11,
1157 VERR_DISCONN = 12,
1158 VERR_DISCONN_ACC_LEN = 13,
1159 };
1160
1161 static char *validation_errors[] = {
1162 "OK",
1163 "Not LS_ACC",
1164 "Not LSDESC_RQST",
1165 "Bad LSDESC_RQST Length",
1166 "Not Association ID",
1167 "Bad Association ID Length",
1168 "Not Connection ID",
1169 "Bad Connection ID Length",
1170 "Not CR_ASSOC Rqst",
1171 "Bad CR_ASSOC ACC Length",
1172 "Not CR_CONN Rqst",
1173 "Bad CR_CONN ACC Length",
1174 "Not Disconnect Rqst",
1175 "Bad Disconnect ACC Length",
1176 };
1177
1178 static int
1179 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1180 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1181 {
1182 struct nvmefc_ls_req_op *lsop;
1183 struct nvmefc_ls_req *lsreq;
1184 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1185 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1186 int ret, fcret = 0;
1187
1188 lsop = kzalloc((sizeof(*lsop) +
1189 ctrl->lport->ops->lsrqst_priv_sz +
1190 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1191 if (!lsop) {
1192 ret = -ENOMEM;
1193 goto out_no_memory;
1194 }
1195 lsreq = &lsop->ls_req;
1196
1197 lsreq->private = (void *)&lsop[1];
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1199 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1200 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1201
1202 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1203 assoc_rqst->desc_list_len =
1204 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1205
1206 assoc_rqst->assoc_cmd.desc_tag =
1207 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1208 assoc_rqst->assoc_cmd.desc_len =
1209 fcnvme_lsdesc_len(
1210 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1211
1212 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1213 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1214
1215 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1216 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1217 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1218 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1219 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1220 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1221
1222 lsop->queue = queue;
1223 lsreq->rqstaddr = assoc_rqst;
1224 lsreq->rqstlen = sizeof(*assoc_rqst);
1225 lsreq->rspaddr = assoc_acc;
1226 lsreq->rsplen = sizeof(*assoc_acc);
1227 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1228
1229 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1230 if (ret)
1231 goto out_free_buffer;
1232
1233
1234
1235
1236 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1237 fcret = VERR_LSACC;
1238 else if (assoc_acc->hdr.desc_list_len !=
1239 fcnvme_lsdesc_len(
1240 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1241 fcret = VERR_CR_ASSOC_ACC_LEN;
1242 else if (assoc_acc->hdr.rqst.desc_tag !=
1243 cpu_to_be32(FCNVME_LSDESC_RQST))
1244 fcret = VERR_LSDESC_RQST;
1245 else if (assoc_acc->hdr.rqst.desc_len !=
1246 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1247 fcret = VERR_LSDESC_RQST_LEN;
1248 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1249 fcret = VERR_CR_ASSOC;
1250 else if (assoc_acc->associd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1252 fcret = VERR_ASSOC_ID;
1253 else if (assoc_acc->associd.desc_len !=
1254 fcnvme_lsdesc_len(
1255 sizeof(struct fcnvme_lsdesc_assoc_id)))
1256 fcret = VERR_ASSOC_ID_LEN;
1257 else if (assoc_acc->connectid.desc_tag !=
1258 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1259 fcret = VERR_CONN_ID;
1260 else if (assoc_acc->connectid.desc_len !=
1261 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1262 fcret = VERR_CONN_ID_LEN;
1263
1264 if (fcret) {
1265 ret = -EBADF;
1266 dev_err(ctrl->dev,
1267 "q %d connect failed: %s\n",
1268 queue->qnum, validation_errors[fcret]);
1269 } else {
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1275 }
1276
1277 out_free_buffer:
1278 kfree(lsop);
1279 out_no_memory:
1280 if (ret)
1281 dev_err(ctrl->dev,
1282 "queue %d connect admin queue failed (%d).\n",
1283 queue->qnum, ret);
1284 return ret;
1285 }
1286
1287 static int
1288 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1289 u16 qsize, u16 ersp_ratio)
1290 {
1291 struct nvmefc_ls_req_op *lsop;
1292 struct nvmefc_ls_req *lsreq;
1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1294 struct fcnvme_ls_cr_conn_acc *conn_acc;
1295 int ret, fcret = 0;
1296
1297 lsop = kzalloc((sizeof(*lsop) +
1298 ctrl->lport->ops->lsrqst_priv_sz +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1300 if (!lsop) {
1301 ret = -ENOMEM;
1302 goto out_no_memory;
1303 }
1304 lsreq = &lsop->ls_req;
1305
1306 lsreq->private = (void *)&lsop[1];
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1308 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1310
1311 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1312 conn_rqst->desc_list_len = cpu_to_be32(
1313 sizeof(struct fcnvme_lsdesc_assoc_id) +
1314 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1315
1316 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1317 conn_rqst->associd.desc_len =
1318 fcnvme_lsdesc_len(
1319 sizeof(struct fcnvme_lsdesc_assoc_id));
1320 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1321 conn_rqst->connect_cmd.desc_tag =
1322 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1323 conn_rqst->connect_cmd.desc_len =
1324 fcnvme_lsdesc_len(
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1327 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1328 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1329
1330 lsop->queue = queue;
1331 lsreq->rqstaddr = conn_rqst;
1332 lsreq->rqstlen = sizeof(*conn_rqst);
1333 lsreq->rspaddr = conn_acc;
1334 lsreq->rsplen = sizeof(*conn_acc);
1335 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1336
1337 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1338 if (ret)
1339 goto out_free_buffer;
1340
1341
1342
1343
1344 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1345 fcret = VERR_LSACC;
1346 else if (conn_acc->hdr.desc_list_len !=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1348 fcret = VERR_CR_CONN_ACC_LEN;
1349 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1350 fcret = VERR_LSDESC_RQST;
1351 else if (conn_acc->hdr.rqst.desc_len !=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1353 fcret = VERR_LSDESC_RQST_LEN;
1354 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1355 fcret = VERR_CR_CONN;
1356 else if (conn_acc->connectid.desc_tag !=
1357 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1358 fcret = VERR_CONN_ID;
1359 else if (conn_acc->connectid.desc_len !=
1360 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1361 fcret = VERR_CONN_ID_LEN;
1362
1363 if (fcret) {
1364 ret = -EBADF;
1365 dev_err(ctrl->dev,
1366 "q %d connect failed: %s\n",
1367 queue->qnum, validation_errors[fcret]);
1368 } else {
1369 queue->connection_id =
1370 be64_to_cpu(conn_acc->connectid.connection_id);
1371 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1372 }
1373
1374 out_free_buffer:
1375 kfree(lsop);
1376 out_no_memory:
1377 if (ret)
1378 dev_err(ctrl->dev,
1379 "queue %d connect command failed (%d).\n",
1380 queue->qnum, ret);
1381 return ret;
1382 }
1383
1384 static void
1385 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1386 {
1387 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1388
1389 __nvme_fc_finish_ls_req(lsop);
1390
1391
1392
1393 kfree(lsop);
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 static void
1414 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1415 {
1416 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1417 struct fcnvme_ls_disconnect_acc *discon_acc;
1418 struct nvmefc_ls_req_op *lsop;
1419 struct nvmefc_ls_req *lsreq;
1420 int ret;
1421
1422 lsop = kzalloc((sizeof(*lsop) +
1423 ctrl->lport->ops->lsrqst_priv_sz +
1424 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1425 GFP_KERNEL);
1426 if (!lsop)
1427
1428 return;
1429
1430 lsreq = &lsop->ls_req;
1431
1432 lsreq->private = (void *)&lsop[1];
1433 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1434 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1435 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1436
1437 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1438 discon_rqst->desc_list_len = cpu_to_be32(
1439 sizeof(struct fcnvme_lsdesc_assoc_id) +
1440 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1441
1442 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1443 discon_rqst->associd.desc_len =
1444 fcnvme_lsdesc_len(
1445 sizeof(struct fcnvme_lsdesc_assoc_id));
1446
1447 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1448
1449 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1450 FCNVME_LSDESC_DISCONN_CMD);
1451 discon_rqst->discon_cmd.desc_len =
1452 fcnvme_lsdesc_len(
1453 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1454 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1455 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1456
1457 lsreq->rqstaddr = discon_rqst;
1458 lsreq->rqstlen = sizeof(*discon_rqst);
1459 lsreq->rspaddr = discon_acc;
1460 lsreq->rsplen = sizeof(*discon_acc);
1461 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1462
1463 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1464 nvme_fc_disconnect_assoc_done);
1465 if (ret)
1466 kfree(lsop);
1467
1468
1469 ctrl->association_id = 0;
1470 }
1471
1472
1473
1474
1475 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1476
1477 static void
1478 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1479 struct nvme_fc_fcp_op *op)
1480 {
1481 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1482 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1483 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1484 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1485
1486 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1487 }
1488
1489 static void
1490 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1491 unsigned int hctx_idx)
1492 {
1493 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1494
1495 return __nvme_fc_exit_request(set->driver_data, op);
1496 }
1497
1498 static int
1499 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1500 {
1501 unsigned long flags;
1502 int opstate;
1503
1504 spin_lock_irqsave(&ctrl->lock, flags);
1505 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1506 if (opstate != FCPOP_STATE_ACTIVE)
1507 atomic_set(&op->state, opstate);
1508 else if (ctrl->flags & FCCTRL_TERMIO)
1509 ctrl->iocnt++;
1510 spin_unlock_irqrestore(&ctrl->lock, flags);
1511
1512 if (opstate != FCPOP_STATE_ACTIVE)
1513 return -ECANCELED;
1514
1515 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1516 &ctrl->rport->remoteport,
1517 op->queue->lldd_handle,
1518 &op->fcp_req);
1519
1520 return 0;
1521 }
1522
1523 static void
1524 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1525 {
1526 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1527 int i;
1528
1529
1530 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1531 return;
1532
1533 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1534 __nvme_fc_abort_op(ctrl, aen_op);
1535 }
1536
1537 static inline void
1538 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1539 struct nvme_fc_fcp_op *op, int opstate)
1540 {
1541 unsigned long flags;
1542
1543 if (opstate == FCPOP_STATE_ABORTED) {
1544 spin_lock_irqsave(&ctrl->lock, flags);
1545 if (ctrl->flags & FCCTRL_TERMIO) {
1546 if (!--ctrl->iocnt)
1547 wake_up(&ctrl->ioabort_wait);
1548 }
1549 spin_unlock_irqrestore(&ctrl->lock, flags);
1550 }
1551 }
1552
1553 static void
1554 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1555 {
1556 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1557 struct request *rq = op->rq;
1558 struct nvmefc_fcp_req *freq = &op->fcp_req;
1559 struct nvme_fc_ctrl *ctrl = op->ctrl;
1560 struct nvme_fc_queue *queue = op->queue;
1561 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1562 struct nvme_command *sqe = &op->cmd_iu.sqe;
1563 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1564 union nvme_result result;
1565 bool terminate_assoc = true;
1566 int opstate;
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1606
1607 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1608 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1609
1610 if (opstate == FCPOP_STATE_ABORTED)
1611 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1612 else if (freq->status) {
1613 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1614 dev_info(ctrl->ctrl.device,
1615 "NVME-FC{%d}: io failed due to lldd error %d\n",
1616 ctrl->cnum, freq->status);
1617 }
1618
1619
1620
1621
1622
1623
1624 if (status)
1625 goto done;
1626
1627
1628
1629
1630
1631
1632
1633
1634 switch (freq->rcv_rsplen) {
1635
1636 case 0:
1637 case NVME_FC_SIZEOF_ZEROS_RSP:
1638
1639
1640
1641
1642
1643 if (freq->transferred_length !=
1644 be32_to_cpu(op->cmd_iu.data_len)) {
1645 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1646 dev_info(ctrl->ctrl.device,
1647 "NVME-FC{%d}: io failed due to bad transfer "
1648 "length: %d vs expected %d\n",
1649 ctrl->cnum, freq->transferred_length,
1650 be32_to_cpu(op->cmd_iu.data_len));
1651 goto done;
1652 }
1653 result.u64 = 0;
1654 break;
1655
1656 case sizeof(struct nvme_fc_ersp_iu):
1657
1658
1659
1660
1661 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1662 (freq->rcv_rsplen / 4) ||
1663 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1664 freq->transferred_length ||
1665 op->rsp_iu.status_code ||
1666 sqe->common.command_id != cqe->command_id)) {
1667 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1668 dev_info(ctrl->ctrl.device,
1669 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
1670 "iu len %d, xfr len %d vs %d, status code "
1671 "%d, cmdid %d vs %d\n",
1672 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
1673 be32_to_cpu(op->rsp_iu.xfrd_len),
1674 freq->transferred_length,
1675 op->rsp_iu.status_code,
1676 sqe->common.command_id,
1677 cqe->command_id);
1678 goto done;
1679 }
1680 result = cqe->result;
1681 status = cqe->status;
1682 break;
1683
1684 default:
1685 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1686 dev_info(ctrl->ctrl.device,
1687 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
1688 "len %d\n",
1689 ctrl->cnum, freq->rcv_rsplen);
1690 goto done;
1691 }
1692
1693 terminate_assoc = false;
1694
1695 done:
1696 if (op->flags & FCOP_FLAGS_AEN) {
1697 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1698 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1699 atomic_set(&op->state, FCPOP_STATE_IDLE);
1700 op->flags = FCOP_FLAGS_AEN;
1701 nvme_fc_ctrl_put(ctrl);
1702 goto check_error;
1703 }
1704
1705 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1706 nvme_end_request(rq, status, result);
1707
1708 check_error:
1709 if (terminate_assoc)
1710 nvme_fc_error_recovery(ctrl, "transport detected io error");
1711 }
1712
1713 static int
1714 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1715 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1716 struct request *rq, u32 rqno)
1717 {
1718 struct nvme_fcp_op_w_sgl *op_w_sgl =
1719 container_of(op, typeof(*op_w_sgl), op);
1720 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1721 int ret = 0;
1722
1723 memset(op, 0, sizeof(*op));
1724 op->fcp_req.cmdaddr = &op->cmd_iu;
1725 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1726 op->fcp_req.rspaddr = &op->rsp_iu;
1727 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1728 op->fcp_req.done = nvme_fc_fcpio_done;
1729 op->ctrl = ctrl;
1730 op->queue = queue;
1731 op->rq = rq;
1732 op->rqno = rqno;
1733
1734 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1735 cmdiu->fc_id = NVME_CMD_FC_ID;
1736 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1737
1738 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1739 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1740 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1741 dev_err(ctrl->dev,
1742 "FCP Op failed - cmdiu dma mapping failed.\n");
1743 ret = EFAULT;
1744 goto out_on_error;
1745 }
1746
1747 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1748 &op->rsp_iu, sizeof(op->rsp_iu),
1749 DMA_FROM_DEVICE);
1750 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1751 dev_err(ctrl->dev,
1752 "FCP Op failed - rspiu dma mapping failed.\n");
1753 ret = EFAULT;
1754 }
1755
1756 atomic_set(&op->state, FCPOP_STATE_IDLE);
1757 out_on_error:
1758 return ret;
1759 }
1760
1761 static int
1762 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1763 unsigned int hctx_idx, unsigned int numa_node)
1764 {
1765 struct nvme_fc_ctrl *ctrl = set->driver_data;
1766 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
1767 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1768 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1769 int res;
1770
1771 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1772 if (res)
1773 return res;
1774 op->op.fcp_req.first_sgl = &op->sgl[0];
1775 op->op.fcp_req.private = &op->priv[0];
1776 nvme_req(rq)->ctrl = &ctrl->ctrl;
1777 return res;
1778 }
1779
1780 static int
1781 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1782 {
1783 struct nvme_fc_fcp_op *aen_op;
1784 struct nvme_fc_cmd_iu *cmdiu;
1785 struct nvme_command *sqe;
1786 void *private;
1787 int i, ret;
1788
1789 aen_op = ctrl->aen_ops;
1790 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1791 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1792 GFP_KERNEL);
1793 if (!private)
1794 return -ENOMEM;
1795
1796 cmdiu = &aen_op->cmd_iu;
1797 sqe = &cmdiu->sqe;
1798 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1799 aen_op, (struct request *)NULL,
1800 (NVME_AQ_BLK_MQ_DEPTH + i));
1801 if (ret) {
1802 kfree(private);
1803 return ret;
1804 }
1805
1806 aen_op->flags = FCOP_FLAGS_AEN;
1807 aen_op->fcp_req.private = private;
1808
1809 memset(sqe, 0, sizeof(*sqe));
1810 sqe->common.opcode = nvme_admin_async_event;
1811
1812 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1813 }
1814 return 0;
1815 }
1816
1817 static void
1818 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1819 {
1820 struct nvme_fc_fcp_op *aen_op;
1821 int i;
1822
1823 aen_op = ctrl->aen_ops;
1824 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1825 if (!aen_op->fcp_req.private)
1826 continue;
1827
1828 __nvme_fc_exit_request(ctrl, aen_op);
1829
1830 kfree(aen_op->fcp_req.private);
1831 aen_op->fcp_req.private = NULL;
1832 }
1833 }
1834
1835 static inline void
1836 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1837 unsigned int qidx)
1838 {
1839 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1840
1841 hctx->driver_data = queue;
1842 queue->hctx = hctx;
1843 }
1844
1845 static int
1846 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1847 unsigned int hctx_idx)
1848 {
1849 struct nvme_fc_ctrl *ctrl = data;
1850
1851 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1852
1853 return 0;
1854 }
1855
1856 static int
1857 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1858 unsigned int hctx_idx)
1859 {
1860 struct nvme_fc_ctrl *ctrl = data;
1861
1862 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1863
1864 return 0;
1865 }
1866
1867 static void
1868 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1869 {
1870 struct nvme_fc_queue *queue;
1871
1872 queue = &ctrl->queues[idx];
1873 memset(queue, 0, sizeof(*queue));
1874 queue->ctrl = ctrl;
1875 queue->qnum = idx;
1876 atomic_set(&queue->csn, 0);
1877 queue->dev = ctrl->dev;
1878
1879 if (idx > 0)
1880 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1881 else
1882 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 }
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904 static void
1905 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1906 {
1907 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1908 return;
1909
1910 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1911
1912
1913
1914
1915
1916
1917 queue->connection_id = 0;
1918 atomic_set(&queue->csn, 0);
1919 }
1920
1921 static void
1922 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1923 struct nvme_fc_queue *queue, unsigned int qidx)
1924 {
1925 if (ctrl->lport->ops->delete_queue)
1926 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1927 queue->lldd_handle);
1928 queue->lldd_handle = NULL;
1929 }
1930
1931 static void
1932 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1933 {
1934 int i;
1935
1936 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1937 nvme_fc_free_queue(&ctrl->queues[i]);
1938 }
1939
1940 static int
1941 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1942 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1943 {
1944 int ret = 0;
1945
1946 queue->lldd_handle = NULL;
1947 if (ctrl->lport->ops->create_queue)
1948 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1949 qidx, qsize, &queue->lldd_handle);
1950
1951 return ret;
1952 }
1953
1954 static void
1955 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1956 {
1957 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1958 int i;
1959
1960 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1961 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1962 }
1963
1964 static int
1965 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1966 {
1967 struct nvme_fc_queue *queue = &ctrl->queues[1];
1968 int i, ret;
1969
1970 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1971 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1972 if (ret)
1973 goto delete_queues;
1974 }
1975
1976 return 0;
1977
1978 delete_queues:
1979 for (; i >= 0; i--)
1980 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1981 return ret;
1982 }
1983
1984 static int
1985 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1986 {
1987 int i, ret = 0;
1988
1989 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1990 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1991 (qsize / 5));
1992 if (ret)
1993 break;
1994 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
1995 if (ret)
1996 break;
1997
1998 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1999 }
2000
2001 return ret;
2002 }
2003
2004 static void
2005 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2006 {
2007 int i;
2008
2009 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2010 nvme_fc_init_queue(ctrl, i);
2011 }
2012
2013 static void
2014 nvme_fc_ctrl_free(struct kref *ref)
2015 {
2016 struct nvme_fc_ctrl *ctrl =
2017 container_of(ref, struct nvme_fc_ctrl, ref);
2018 unsigned long flags;
2019
2020 if (ctrl->ctrl.tagset) {
2021 blk_cleanup_queue(ctrl->ctrl.connect_q);
2022 blk_mq_free_tag_set(&ctrl->tag_set);
2023 }
2024
2025
2026 spin_lock_irqsave(&ctrl->rport->lock, flags);
2027 list_del(&ctrl->ctrl_list);
2028 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2029
2030 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2031 blk_cleanup_queue(ctrl->ctrl.admin_q);
2032 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2033 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2034
2035 kfree(ctrl->queues);
2036
2037 put_device(ctrl->dev);
2038 nvme_fc_rport_put(ctrl->rport);
2039
2040 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2041 if (ctrl->ctrl.opts)
2042 nvmf_free_options(ctrl->ctrl.opts);
2043 kfree(ctrl);
2044 }
2045
2046 static void
2047 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2048 {
2049 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2050 }
2051
2052 static int
2053 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2054 {
2055 return kref_get_unless_zero(&ctrl->ref);
2056 }
2057
2058
2059
2060
2061
2062 static void
2063 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2064 {
2065 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2066
2067 WARN_ON(nctrl != &ctrl->ctrl);
2068
2069 nvme_fc_ctrl_put(ctrl);
2070 }
2071
2072 static void
2073 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2074 {
2075 int active;
2076
2077
2078
2079
2080
2081
2082
2083
2084 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2085 active = atomic_xchg(&ctrl->err_work_active, 1);
2086 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
2087 atomic_set(&ctrl->err_work_active, 0);
2088 WARN_ON(1);
2089 }
2090 return;
2091 }
2092
2093
2094 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2095 return;
2096
2097 dev_warn(ctrl->ctrl.device,
2098 "NVME-FC{%d}: transport association error detected: %s\n",
2099 ctrl->cnum, errmsg);
2100 dev_warn(ctrl->ctrl.device,
2101 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2102
2103 nvme_reset_ctrl(&ctrl->ctrl);
2104 }
2105
2106 static enum blk_eh_timer_return
2107 nvme_fc_timeout(struct request *rq, bool reserved)
2108 {
2109 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2110 struct nvme_fc_ctrl *ctrl = op->ctrl;
2111
2112
2113
2114
2115
2116
2117
2118
2119 nvme_fc_error_recovery(ctrl, "io timeout error");
2120
2121
2122
2123
2124
2125
2126 return BLK_EH_RESET_TIMER;
2127 }
2128
2129 static int
2130 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2131 struct nvme_fc_fcp_op *op)
2132 {
2133 struct nvmefc_fcp_req *freq = &op->fcp_req;
2134 int ret;
2135
2136 freq->sg_cnt = 0;
2137
2138 if (!blk_rq_nr_phys_segments(rq))
2139 return 0;
2140
2141 freq->sg_table.sgl = freq->first_sgl;
2142 ret = sg_alloc_table_chained(&freq->sg_table,
2143 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2144 SG_CHUNK_SIZE);
2145 if (ret)
2146 return -ENOMEM;
2147
2148 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2149 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2150 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2151 op->nents, rq_dma_dir(rq));
2152 if (unlikely(freq->sg_cnt <= 0)) {
2153 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2154 freq->sg_cnt = 0;
2155 return -EFAULT;
2156 }
2157
2158
2159
2160
2161 return 0;
2162 }
2163
2164 static void
2165 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2166 struct nvme_fc_fcp_op *op)
2167 {
2168 struct nvmefc_fcp_req *freq = &op->fcp_req;
2169
2170 if (!freq->sg_cnt)
2171 return;
2172
2173 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2174 rq_dma_dir(rq));
2175
2176 nvme_cleanup_cmd(rq);
2177
2178 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2179
2180 freq->sg_cnt = 0;
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206 static blk_status_t
2207 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2208 struct nvme_fc_fcp_op *op, u32 data_len,
2209 enum nvmefc_fcp_datadir io_dir)
2210 {
2211 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2212 struct nvme_command *sqe = &cmdiu->sqe;
2213 int ret, opstate;
2214
2215
2216
2217
2218
2219 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2220 return BLK_STS_RESOURCE;
2221
2222 if (!nvme_fc_ctrl_get(ctrl))
2223 return BLK_STS_IOERR;
2224
2225
2226 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2227 cmdiu->data_len = cpu_to_be32(data_len);
2228 switch (io_dir) {
2229 case NVMEFC_FCP_WRITE:
2230 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2231 break;
2232 case NVMEFC_FCP_READ:
2233 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2234 break;
2235 case NVMEFC_FCP_NODATA:
2236 cmdiu->flags = 0;
2237 break;
2238 }
2239 op->fcp_req.payload_length = data_len;
2240 op->fcp_req.io_dir = io_dir;
2241 op->fcp_req.transferred_length = 0;
2242 op->fcp_req.rcv_rsplen = 0;
2243 op->fcp_req.status = NVME_SC_SUCCESS;
2244 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2245
2246
2247
2248
2249
2250 WARN_ON_ONCE(sqe->common.metadata);
2251 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2252
2253
2254
2255
2256
2257
2258
2259
2260 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2261 NVME_SGL_FMT_TRANSPORT_A;
2262 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2263 sqe->rw.dptr.sgl.addr = 0;
2264
2265 if (!(op->flags & FCOP_FLAGS_AEN)) {
2266 ret = nvme_fc_map_data(ctrl, op->rq, op);
2267 if (ret < 0) {
2268 nvme_cleanup_cmd(op->rq);
2269 nvme_fc_ctrl_put(ctrl);
2270 if (ret == -ENOMEM || ret == -EAGAIN)
2271 return BLK_STS_RESOURCE;
2272 return BLK_STS_IOERR;
2273 }
2274 }
2275
2276 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2277 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2278
2279 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2280
2281 if (!(op->flags & FCOP_FLAGS_AEN))
2282 blk_mq_start_request(op->rq);
2283
2284 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2285 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2286 &ctrl->rport->remoteport,
2287 queue->lldd_handle, &op->fcp_req);
2288
2289 if (ret) {
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2303 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2304
2305 if (!(op->flags & FCOP_FLAGS_AEN))
2306 nvme_fc_unmap_data(ctrl, op->rq, op);
2307
2308 nvme_fc_ctrl_put(ctrl);
2309
2310 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2311 ret != -EBUSY)
2312 return BLK_STS_IOERR;
2313
2314 return BLK_STS_RESOURCE;
2315 }
2316
2317 return BLK_STS_OK;
2318 }
2319
2320 static blk_status_t
2321 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2322 const struct blk_mq_queue_data *bd)
2323 {
2324 struct nvme_ns *ns = hctx->queue->queuedata;
2325 struct nvme_fc_queue *queue = hctx->driver_data;
2326 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2327 struct request *rq = bd->rq;
2328 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2329 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2330 struct nvme_command *sqe = &cmdiu->sqe;
2331 enum nvmefc_fcp_datadir io_dir;
2332 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2333 u32 data_len;
2334 blk_status_t ret;
2335
2336 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2337 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2338 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2339
2340 ret = nvme_setup_cmd(ns, rq, sqe);
2341 if (ret)
2342 return ret;
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352 if (blk_rq_nr_phys_segments(rq)) {
2353 data_len = blk_rq_payload_bytes(rq);
2354 io_dir = ((rq_data_dir(rq) == WRITE) ?
2355 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2356 } else {
2357 data_len = 0;
2358 io_dir = NVMEFC_FCP_NODATA;
2359 }
2360
2361
2362 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2363 }
2364
2365 static void
2366 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2367 {
2368 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2369 struct nvme_fc_fcp_op *aen_op;
2370 unsigned long flags;
2371 bool terminating = false;
2372 blk_status_t ret;
2373
2374 spin_lock_irqsave(&ctrl->lock, flags);
2375 if (ctrl->flags & FCCTRL_TERMIO)
2376 terminating = true;
2377 spin_unlock_irqrestore(&ctrl->lock, flags);
2378
2379 if (terminating)
2380 return;
2381
2382 aen_op = &ctrl->aen_ops[0];
2383
2384 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2385 NVMEFC_FCP_NODATA);
2386 if (ret)
2387 dev_err(ctrl->ctrl.device,
2388 "failed async event work\n");
2389 }
2390
2391 static void
2392 nvme_fc_complete_rq(struct request *rq)
2393 {
2394 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2395 struct nvme_fc_ctrl *ctrl = op->ctrl;
2396
2397 atomic_set(&op->state, FCPOP_STATE_IDLE);
2398
2399 nvme_fc_unmap_data(ctrl, rq, op);
2400 nvme_complete_rq(rq);
2401 nvme_fc_ctrl_put(ctrl);
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417 static bool
2418 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2419 {
2420 struct nvme_ctrl *nctrl = data;
2421 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2422 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2423
2424 __nvme_fc_abort_op(ctrl, op);
2425 return true;
2426 }
2427
2428
2429 static const struct blk_mq_ops nvme_fc_mq_ops = {
2430 .queue_rq = nvme_fc_queue_rq,
2431 .complete = nvme_fc_complete_rq,
2432 .init_request = nvme_fc_init_request,
2433 .exit_request = nvme_fc_exit_request,
2434 .init_hctx = nvme_fc_init_hctx,
2435 .timeout = nvme_fc_timeout,
2436 };
2437
2438 static int
2439 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2440 {
2441 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2442 unsigned int nr_io_queues;
2443 int ret;
2444
2445 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2446 ctrl->lport->ops->max_hw_queues);
2447 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2448 if (ret) {
2449 dev_info(ctrl->ctrl.device,
2450 "set_queue_count failed: %d\n", ret);
2451 return ret;
2452 }
2453
2454 ctrl->ctrl.queue_count = nr_io_queues + 1;
2455 if (!nr_io_queues)
2456 return 0;
2457
2458 nvme_fc_init_io_queues(ctrl);
2459
2460 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2461 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2462 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2463 ctrl->tag_set.reserved_tags = 1;
2464 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2465 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2466 ctrl->tag_set.cmd_size =
2467 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2468 ctrl->lport->ops->fcprqst_priv_sz);
2469 ctrl->tag_set.driver_data = ctrl;
2470 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2471 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2472
2473 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2474 if (ret)
2475 return ret;
2476
2477 ctrl->ctrl.tagset = &ctrl->tag_set;
2478
2479 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2480 if (IS_ERR(ctrl->ctrl.connect_q)) {
2481 ret = PTR_ERR(ctrl->ctrl.connect_q);
2482 goto out_free_tag_set;
2483 }
2484
2485 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2486 if (ret)
2487 goto out_cleanup_blk_queue;
2488
2489 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2490 if (ret)
2491 goto out_delete_hw_queues;
2492
2493 ctrl->ioq_live = true;
2494
2495 return 0;
2496
2497 out_delete_hw_queues:
2498 nvme_fc_delete_hw_io_queues(ctrl);
2499 out_cleanup_blk_queue:
2500 blk_cleanup_queue(ctrl->ctrl.connect_q);
2501 out_free_tag_set:
2502 blk_mq_free_tag_set(&ctrl->tag_set);
2503 nvme_fc_free_io_queues(ctrl);
2504
2505
2506 ctrl->ctrl.tagset = NULL;
2507
2508 return ret;
2509 }
2510
2511 static int
2512 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2513 {
2514 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2515 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2516 unsigned int nr_io_queues;
2517 int ret;
2518
2519 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2520 ctrl->lport->ops->max_hw_queues);
2521 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2522 if (ret) {
2523 dev_info(ctrl->ctrl.device,
2524 "set_queue_count failed: %d\n", ret);
2525 return ret;
2526 }
2527
2528 if (!nr_io_queues && prior_ioq_cnt) {
2529 dev_info(ctrl->ctrl.device,
2530 "Fail Reconnect: At least 1 io queue "
2531 "required (was %d)\n", prior_ioq_cnt);
2532 return -ENOSPC;
2533 }
2534
2535 ctrl->ctrl.queue_count = nr_io_queues + 1;
2536
2537 if (ctrl->ctrl.queue_count == 1)
2538 return 0;
2539
2540 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2541 if (ret)
2542 goto out_free_io_queues;
2543
2544 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2545 if (ret)
2546 goto out_delete_hw_queues;
2547
2548 if (prior_ioq_cnt != nr_io_queues)
2549 dev_info(ctrl->ctrl.device,
2550 "reconnect: revising io queue count from %d to %d\n",
2551 prior_ioq_cnt, nr_io_queues);
2552 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2553
2554 return 0;
2555
2556 out_delete_hw_queues:
2557 nvme_fc_delete_hw_io_queues(ctrl);
2558 out_free_io_queues:
2559 nvme_fc_free_io_queues(ctrl);
2560 return ret;
2561 }
2562
2563 static void
2564 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2565 {
2566 struct nvme_fc_lport *lport = rport->lport;
2567
2568 atomic_inc(&lport->act_rport_cnt);
2569 }
2570
2571 static void
2572 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2573 {
2574 struct nvme_fc_lport *lport = rport->lport;
2575 u32 cnt;
2576
2577 cnt = atomic_dec_return(&lport->act_rport_cnt);
2578 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2579 lport->ops->localport_delete(&lport->localport);
2580 }
2581
2582 static int
2583 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2584 {
2585 struct nvme_fc_rport *rport = ctrl->rport;
2586 u32 cnt;
2587
2588 if (ctrl->assoc_active)
2589 return 1;
2590
2591 ctrl->assoc_active = true;
2592 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2593 if (cnt == 1)
2594 nvme_fc_rport_active_on_lport(rport);
2595
2596 return 0;
2597 }
2598
2599 static int
2600 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2601 {
2602 struct nvme_fc_rport *rport = ctrl->rport;
2603 struct nvme_fc_lport *lport = rport->lport;
2604 u32 cnt;
2605
2606
2607
2608 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2609 if (cnt == 0) {
2610 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2611 lport->ops->remoteport_delete(&rport->remoteport);
2612 nvme_fc_rport_inactive_on_lport(rport);
2613 }
2614
2615 return 0;
2616 }
2617
2618
2619
2620
2621
2622 static int
2623 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2624 {
2625 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2626 int ret;
2627 bool changed;
2628
2629 ++ctrl->ctrl.nr_reconnects;
2630
2631 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2632 return -ENODEV;
2633
2634 if (nvme_fc_ctlr_active_on_rport(ctrl))
2635 return -ENOTUNIQ;
2636
2637 dev_info(ctrl->ctrl.device,
2638 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2639 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2640 ctrl->cnum, ctrl->lport->localport.port_name,
2641 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2642
2643
2644
2645
2646
2647 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2648 NVME_AQ_DEPTH);
2649 if (ret)
2650 goto out_free_queue;
2651
2652 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2653 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2654 if (ret)
2655 goto out_delete_hw_queue;
2656
2657 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2658 if (ret)
2659 goto out_disconnect_admin_queue;
2660
2661 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2662
2663
2664
2665
2666
2667
2668
2669
2670 ret = nvme_enable_ctrl(&ctrl->ctrl);
2671 if (ret)
2672 goto out_disconnect_admin_queue;
2673
2674 ctrl->ctrl.max_hw_sectors =
2675 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2676
2677 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2678
2679 ret = nvme_init_identify(&ctrl->ctrl);
2680 if (ret)
2681 goto out_disconnect_admin_queue;
2682
2683
2684
2685
2686 if (ctrl->ctrl.icdoff) {
2687 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2688 ctrl->ctrl.icdoff);
2689 goto out_disconnect_admin_queue;
2690 }
2691
2692
2693
2694 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2695
2696 dev_warn(ctrl->ctrl.device,
2697 "queue_size %zu > ctrl maxcmd %u, reducing "
2698 "to queue_size\n",
2699 opts->queue_size, ctrl->ctrl.maxcmd);
2700 opts->queue_size = ctrl->ctrl.maxcmd;
2701 }
2702
2703 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2704
2705 dev_warn(ctrl->ctrl.device,
2706 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2707 opts->queue_size, ctrl->ctrl.sqsize + 1);
2708 opts->queue_size = ctrl->ctrl.sqsize + 1;
2709 }
2710
2711 ret = nvme_fc_init_aen_ops(ctrl);
2712 if (ret)
2713 goto out_term_aen_ops;
2714
2715
2716
2717
2718
2719 if (ctrl->ctrl.queue_count > 1) {
2720 if (!ctrl->ioq_live)
2721 ret = nvme_fc_create_io_queues(ctrl);
2722 else
2723 ret = nvme_fc_recreate_io_queues(ctrl);
2724 if (ret)
2725 goto out_term_aen_ops;
2726 }
2727
2728 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2729
2730 ctrl->ctrl.nr_reconnects = 0;
2731
2732 if (changed)
2733 nvme_start_ctrl(&ctrl->ctrl);
2734
2735 return 0;
2736
2737 out_term_aen_ops:
2738 nvme_fc_term_aen_ops(ctrl);
2739 out_disconnect_admin_queue:
2740
2741 nvme_fc_xmt_disconnect_assoc(ctrl);
2742 out_delete_hw_queue:
2743 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2744 out_free_queue:
2745 nvme_fc_free_queue(&ctrl->queues[0]);
2746 ctrl->assoc_active = false;
2747 nvme_fc_ctlr_inactive_on_rport(ctrl);
2748
2749 return ret;
2750 }
2751
2752
2753
2754
2755
2756
2757
2758 static void
2759 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2760 {
2761 unsigned long flags;
2762
2763 if (!ctrl->assoc_active)
2764 return;
2765 ctrl->assoc_active = false;
2766
2767 spin_lock_irqsave(&ctrl->lock, flags);
2768 ctrl->flags |= FCCTRL_TERMIO;
2769 ctrl->iocnt = 0;
2770 spin_unlock_irqrestore(&ctrl->lock, flags);
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 if (ctrl->ctrl.queue_count > 1) {
2785 nvme_stop_queues(&ctrl->ctrl);
2786 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2787 nvme_fc_terminate_exchange, &ctrl->ctrl);
2788 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2789 }
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2809 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2810 nvme_fc_terminate_exchange, &ctrl->ctrl);
2811 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2812
2813
2814 nvme_fc_abort_aen_ops(ctrl);
2815
2816
2817 spin_lock_irq(&ctrl->lock);
2818 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2819 ctrl->flags &= ~FCCTRL_TERMIO;
2820 spin_unlock_irq(&ctrl->lock);
2821
2822 nvme_fc_term_aen_ops(ctrl);
2823
2824
2825
2826
2827
2828
2829
2830 if (ctrl->association_id)
2831 nvme_fc_xmt_disconnect_assoc(ctrl);
2832
2833 if (ctrl->ctrl.tagset) {
2834 nvme_fc_delete_hw_io_queues(ctrl);
2835 nvme_fc_free_io_queues(ctrl);
2836 }
2837
2838 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2839 nvme_fc_free_queue(&ctrl->queues[0]);
2840
2841
2842 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2843
2844
2845 nvme_start_queues(&ctrl->ctrl);
2846
2847 nvme_fc_ctlr_inactive_on_rport(ctrl);
2848 }
2849
2850 static void
2851 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2852 {
2853 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2854
2855 cancel_work_sync(&ctrl->err_work);
2856 cancel_delayed_work_sync(&ctrl->connect_work);
2857
2858
2859
2860
2861 nvme_fc_delete_association(ctrl);
2862 }
2863
2864 static void
2865 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2866 {
2867 struct nvme_fc_rport *rport = ctrl->rport;
2868 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2869 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2870 bool recon = true;
2871
2872 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2873 return;
2874
2875 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2876 dev_info(ctrl->ctrl.device,
2877 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2878 ctrl->cnum, status);
2879 else if (time_after_eq(jiffies, rport->dev_loss_end))
2880 recon = false;
2881
2882 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2883 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2884 dev_info(ctrl->ctrl.device,
2885 "NVME-FC{%d}: Reconnect attempt in %ld "
2886 "seconds\n",
2887 ctrl->cnum, recon_delay / HZ);
2888 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2889 recon_delay = rport->dev_loss_end - jiffies;
2890
2891 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2892 } else {
2893 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2894 dev_warn(ctrl->ctrl.device,
2895 "NVME-FC{%d}: Max reconnect attempts (%d) "
2896 "reached.\n",
2897 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2898 else
2899 dev_warn(ctrl->ctrl.device,
2900 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2901 "while waiting for remoteport connectivity.\n",
2902 ctrl->cnum, portptr->dev_loss_tmo);
2903 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2904 }
2905 }
2906
2907 static void
2908 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2909 {
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
2921 nvme_stop_keep_alive(&ctrl->ctrl);
2922
2923
2924 nvme_fc_delete_association(ctrl);
2925 }
2926
2927 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2928 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2929 dev_err(ctrl->ctrl.device,
2930 "NVME-FC{%d}: error_recovery: Couldn't change state "
2931 "to CONNECTING\n", ctrl->cnum);
2932 }
2933
2934 static void
2935 nvme_fc_reset_ctrl_work(struct work_struct *work)
2936 {
2937 struct nvme_fc_ctrl *ctrl =
2938 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2939 int ret;
2940
2941 __nvme_fc_terminate_io(ctrl);
2942
2943 nvme_stop_ctrl(&ctrl->ctrl);
2944
2945 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2946 ret = nvme_fc_create_association(ctrl);
2947 else
2948 ret = -ENOTCONN;
2949
2950 if (ret)
2951 nvme_fc_reconnect_or_delete(ctrl, ret);
2952 else
2953 dev_info(ctrl->ctrl.device,
2954 "NVME-FC{%d}: controller reset complete\n",
2955 ctrl->cnum);
2956 }
2957
2958 static void
2959 nvme_fc_connect_err_work(struct work_struct *work)
2960 {
2961 struct nvme_fc_ctrl *ctrl =
2962 container_of(work, struct nvme_fc_ctrl, err_work);
2963
2964 __nvme_fc_terminate_io(ctrl);
2965
2966 atomic_set(&ctrl->err_work_active, 0);
2967
2968
2969
2970
2971
2972
2973
2974 }
2975
2976 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2977 .name = "fc",
2978 .module = THIS_MODULE,
2979 .flags = NVME_F_FABRICS,
2980 .reg_read32 = nvmf_reg_read32,
2981 .reg_read64 = nvmf_reg_read64,
2982 .reg_write32 = nvmf_reg_write32,
2983 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2984 .submit_async_event = nvme_fc_submit_async_event,
2985 .delete_ctrl = nvme_fc_delete_ctrl,
2986 .get_address = nvmf_get_address,
2987 };
2988
2989 static void
2990 nvme_fc_connect_ctrl_work(struct work_struct *work)
2991 {
2992 int ret;
2993
2994 struct nvme_fc_ctrl *ctrl =
2995 container_of(to_delayed_work(work),
2996 struct nvme_fc_ctrl, connect_work);
2997
2998 ret = nvme_fc_create_association(ctrl);
2999 if (ret)
3000 nvme_fc_reconnect_or_delete(ctrl, ret);
3001 else
3002 dev_info(ctrl->ctrl.device,
3003 "NVME-FC{%d}: controller connect complete\n",
3004 ctrl->cnum);
3005 }
3006
3007
3008 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3009 .queue_rq = nvme_fc_queue_rq,
3010 .complete = nvme_fc_complete_rq,
3011 .init_request = nvme_fc_init_request,
3012 .exit_request = nvme_fc_exit_request,
3013 .init_hctx = nvme_fc_init_admin_hctx,
3014 .timeout = nvme_fc_timeout,
3015 };
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026 static bool
3027 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3028 struct nvmf_ctrl_options *opts)
3029 {
3030 struct nvme_fc_ctrl *ctrl;
3031 unsigned long flags;
3032 bool found = false;
3033
3034 spin_lock_irqsave(&rport->lock, flags);
3035 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3036 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3037 if (found)
3038 break;
3039 }
3040 spin_unlock_irqrestore(&rport->lock, flags);
3041
3042 return found;
3043 }
3044
3045 static struct nvme_ctrl *
3046 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3047 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3048 {
3049 struct nvme_fc_ctrl *ctrl;
3050 unsigned long flags;
3051 int ret, idx;
3052
3053 if (!(rport->remoteport.port_role &
3054 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3055 ret = -EBADR;
3056 goto out_fail;
3057 }
3058
3059 if (!opts->duplicate_connect &&
3060 nvme_fc_existing_controller(rport, opts)) {
3061 ret = -EALREADY;
3062 goto out_fail;
3063 }
3064
3065 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3066 if (!ctrl) {
3067 ret = -ENOMEM;
3068 goto out_fail;
3069 }
3070
3071 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3072 if (idx < 0) {
3073 ret = -ENOSPC;
3074 goto out_free_ctrl;
3075 }
3076
3077 ctrl->ctrl.opts = opts;
3078 ctrl->ctrl.nr_reconnects = 0;
3079 if (lport->dev)
3080 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3081 else
3082 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3083 INIT_LIST_HEAD(&ctrl->ctrl_list);
3084 ctrl->lport = lport;
3085 ctrl->rport = rport;
3086 ctrl->dev = lport->dev;
3087 ctrl->cnum = idx;
3088 ctrl->ioq_live = false;
3089 ctrl->assoc_active = false;
3090 atomic_set(&ctrl->err_work_active, 0);
3091 init_waitqueue_head(&ctrl->ioabort_wait);
3092
3093 get_device(ctrl->dev);
3094 kref_init(&ctrl->ref);
3095
3096 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3097 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3098 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3099 spin_lock_init(&ctrl->lock);
3100
3101
3102 ctrl->ctrl.queue_count = min_t(unsigned int,
3103 opts->nr_io_queues,
3104 lport->ops->max_hw_queues);
3105 ctrl->ctrl.queue_count++;
3106
3107 ctrl->ctrl.sqsize = opts->queue_size - 1;
3108 ctrl->ctrl.kato = opts->kato;
3109 ctrl->ctrl.cntlid = 0xffff;
3110
3111 ret = -ENOMEM;
3112 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3113 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3114 if (!ctrl->queues)
3115 goto out_free_ida;
3116
3117 nvme_fc_init_queue(ctrl, 0);
3118
3119 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3120 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3121 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3122 ctrl->admin_tag_set.reserved_tags = 2;
3123 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3124 ctrl->admin_tag_set.cmd_size =
3125 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3126 ctrl->lport->ops->fcprqst_priv_sz);
3127 ctrl->admin_tag_set.driver_data = ctrl;
3128 ctrl->admin_tag_set.nr_hw_queues = 1;
3129 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3130 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3131
3132 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3133 if (ret)
3134 goto out_free_queues;
3135 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3136
3137 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3138 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3139 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3140 goto out_free_admin_tag_set;
3141 }
3142
3143 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3144 if (IS_ERR(ctrl->ctrl.admin_q)) {
3145 ret = PTR_ERR(ctrl->ctrl.admin_q);
3146 goto out_cleanup_fabrics_q;
3147 }
3148
3149
3150
3151
3152
3153
3154
3155
3156 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3157 if (ret)
3158 goto out_cleanup_admin_q;
3159
3160
3161
3162 spin_lock_irqsave(&rport->lock, flags);
3163 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3164 spin_unlock_irqrestore(&rport->lock, flags);
3165
3166 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3167 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3168 dev_err(ctrl->ctrl.device,
3169 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3170 goto fail_ctrl;
3171 }
3172
3173 nvme_get_ctrl(&ctrl->ctrl);
3174
3175 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3176 nvme_put_ctrl(&ctrl->ctrl);
3177 dev_err(ctrl->ctrl.device,
3178 "NVME-FC{%d}: failed to schedule initial connect\n",
3179 ctrl->cnum);
3180 goto fail_ctrl;
3181 }
3182
3183 flush_delayed_work(&ctrl->connect_work);
3184
3185 dev_info(ctrl->ctrl.device,
3186 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3187 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3188
3189 return &ctrl->ctrl;
3190
3191 fail_ctrl:
3192 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3193 cancel_work_sync(&ctrl->ctrl.reset_work);
3194 cancel_work_sync(&ctrl->err_work);
3195 cancel_delayed_work_sync(&ctrl->connect_work);
3196
3197 ctrl->ctrl.opts = NULL;
3198
3199
3200 nvme_uninit_ctrl(&ctrl->ctrl);
3201
3202
3203 nvme_put_ctrl(&ctrl->ctrl);
3204
3205
3206
3207
3208
3209
3210
3211
3212 nvme_fc_rport_get(rport);
3213
3214 return ERR_PTR(-EIO);
3215
3216 out_cleanup_admin_q:
3217 blk_cleanup_queue(ctrl->ctrl.admin_q);
3218 out_cleanup_fabrics_q:
3219 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3220 out_free_admin_tag_set:
3221 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3222 out_free_queues:
3223 kfree(ctrl->queues);
3224 out_free_ida:
3225 put_device(ctrl->dev);
3226 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3227 out_free_ctrl:
3228 kfree(ctrl);
3229 out_fail:
3230
3231 return ERR_PTR(ret);
3232 }
3233
3234
3235 struct nvmet_fc_traddr {
3236 u64 nn;
3237 u64 pn;
3238 };
3239
3240 static int
3241 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3242 {
3243 u64 token64;
3244
3245 if (match_u64(sstr, &token64))
3246 return -EINVAL;
3247 *val = token64;
3248
3249 return 0;
3250 }
3251
3252
3253
3254
3255
3256
3257 static int
3258 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3259 {
3260 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3261 substring_t wwn = { name, &name[sizeof(name)-1] };
3262 int nnoffset, pnoffset;
3263
3264
3265 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3266 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3267 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3268 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3269 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3270 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3271 NVME_FC_TRADDR_OXNNLEN;
3272 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3273 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3274 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3275 "pn-", NVME_FC_TRADDR_NNLEN))) {
3276 nnoffset = NVME_FC_TRADDR_NNLEN;
3277 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3278 } else
3279 goto out_einval;
3280
3281 name[0] = '0';
3282 name[1] = 'x';
3283 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3284
3285 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3286 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3287 goto out_einval;
3288
3289 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3290 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3291 goto out_einval;
3292
3293 return 0;
3294
3295 out_einval:
3296 pr_warn("%s: bad traddr string\n", __func__);
3297 return -EINVAL;
3298 }
3299
3300 static struct nvme_ctrl *
3301 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3302 {
3303 struct nvme_fc_lport *lport;
3304 struct nvme_fc_rport *rport;
3305 struct nvme_ctrl *ctrl;
3306 struct nvmet_fc_traddr laddr = { 0L, 0L };
3307 struct nvmet_fc_traddr raddr = { 0L, 0L };
3308 unsigned long flags;
3309 int ret;
3310
3311 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3312 if (ret || !raddr.nn || !raddr.pn)
3313 return ERR_PTR(-EINVAL);
3314
3315 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3316 if (ret || !laddr.nn || !laddr.pn)
3317 return ERR_PTR(-EINVAL);
3318
3319
3320 spin_lock_irqsave(&nvme_fc_lock, flags);
3321 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3322 if (lport->localport.node_name != laddr.nn ||
3323 lport->localport.port_name != laddr.pn)
3324 continue;
3325
3326 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3327 if (rport->remoteport.node_name != raddr.nn ||
3328 rport->remoteport.port_name != raddr.pn)
3329 continue;
3330
3331
3332 if (!nvme_fc_rport_get(rport))
3333 break;
3334
3335 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3336
3337 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3338 if (IS_ERR(ctrl))
3339 nvme_fc_rport_put(rport);
3340 return ctrl;
3341 }
3342 }
3343 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3344
3345 pr_warn("%s: %s - %s combination not found\n",
3346 __func__, opts->traddr, opts->host_traddr);
3347 return ERR_PTR(-ENOENT);
3348 }
3349
3350
3351 static struct nvmf_transport_ops nvme_fc_transport = {
3352 .name = "fc",
3353 .module = THIS_MODULE,
3354 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3355 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3356 .create_ctrl = nvme_fc_create_ctrl,
3357 };
3358
3359
3360 #define DISCOVERY_MAX_FAIL 20
3361
3362 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3363 struct device_attribute *attr, const char *buf, size_t count)
3364 {
3365 unsigned long flags;
3366 LIST_HEAD(local_disc_list);
3367 struct nvme_fc_lport *lport;
3368 struct nvme_fc_rport *rport;
3369 int failcnt = 0;
3370
3371 spin_lock_irqsave(&nvme_fc_lock, flags);
3372 restart:
3373 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3374 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3375 if (!nvme_fc_lport_get(lport))
3376 continue;
3377 if (!nvme_fc_rport_get(rport)) {
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 nvme_fc_lport_put(lport);
3388
3389 if (failcnt++ < DISCOVERY_MAX_FAIL)
3390 goto restart;
3391
3392 pr_err("nvme_discovery: too many reference "
3393 "failures\n");
3394 goto process_local_list;
3395 }
3396 if (list_empty(&rport->disc_list))
3397 list_add_tail(&rport->disc_list,
3398 &local_disc_list);
3399 }
3400 }
3401
3402 process_local_list:
3403 while (!list_empty(&local_disc_list)) {
3404 rport = list_first_entry(&local_disc_list,
3405 struct nvme_fc_rport, disc_list);
3406 list_del_init(&rport->disc_list);
3407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3408
3409 lport = rport->lport;
3410
3411 nvme_fc_signal_discovery_scan(lport, rport);
3412 nvme_fc_rport_put(rport);
3413 nvme_fc_lport_put(lport);
3414
3415 spin_lock_irqsave(&nvme_fc_lock, flags);
3416 }
3417 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3418
3419 return count;
3420 }
3421 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3422
3423 static struct attribute *nvme_fc_attrs[] = {
3424 &dev_attr_nvme_discovery.attr,
3425 NULL
3426 };
3427
3428 static struct attribute_group nvme_fc_attr_group = {
3429 .attrs = nvme_fc_attrs,
3430 };
3431
3432 static const struct attribute_group *nvme_fc_attr_groups[] = {
3433 &nvme_fc_attr_group,
3434 NULL
3435 };
3436
3437 static struct class fc_class = {
3438 .name = "fc",
3439 .dev_groups = nvme_fc_attr_groups,
3440 .owner = THIS_MODULE,
3441 };
3442
3443 static int __init nvme_fc_init_module(void)
3444 {
3445 int ret;
3446
3447 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3448 if (!nvme_fc_wq)
3449 return -ENOMEM;
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465 ret = class_register(&fc_class);
3466 if (ret) {
3467 pr_err("couldn't register class fc\n");
3468 goto out_destroy_wq;
3469 }
3470
3471
3472
3473
3474 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3475 "fc_udev_device");
3476 if (IS_ERR(fc_udev_device)) {
3477 pr_err("couldn't create fc_udev device!\n");
3478 ret = PTR_ERR(fc_udev_device);
3479 goto out_destroy_class;
3480 }
3481
3482 ret = nvmf_register_transport(&nvme_fc_transport);
3483 if (ret)
3484 goto out_destroy_device;
3485
3486 return 0;
3487
3488 out_destroy_device:
3489 device_destroy(&fc_class, MKDEV(0, 0));
3490 out_destroy_class:
3491 class_unregister(&fc_class);
3492 out_destroy_wq:
3493 destroy_workqueue(nvme_fc_wq);
3494
3495 return ret;
3496 }
3497
3498 static void
3499 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3500 {
3501 struct nvme_fc_ctrl *ctrl;
3502
3503 spin_lock(&rport->lock);
3504 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3505 dev_warn(ctrl->ctrl.device,
3506 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3507 ctrl->cnum);
3508 nvme_delete_ctrl(&ctrl->ctrl);
3509 }
3510 spin_unlock(&rport->lock);
3511 }
3512
3513 static void
3514 nvme_fc_cleanup_for_unload(void)
3515 {
3516 struct nvme_fc_lport *lport;
3517 struct nvme_fc_rport *rport;
3518
3519 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3520 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3521 nvme_fc_delete_controllers(rport);
3522 }
3523 }
3524 }
3525
3526 static void __exit nvme_fc_exit_module(void)
3527 {
3528 unsigned long flags;
3529 bool need_cleanup = false;
3530
3531 spin_lock_irqsave(&nvme_fc_lock, flags);
3532 nvme_fc_waiting_to_unload = true;
3533 if (!list_empty(&nvme_fc_lport_list)) {
3534 need_cleanup = true;
3535 nvme_fc_cleanup_for_unload();
3536 }
3537 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3538 if (need_cleanup) {
3539 pr_info("%s: waiting for ctlr deletes\n", __func__);
3540 wait_for_completion(&nvme_fc_unload_proceed);
3541 pr_info("%s: ctrl deletes complete\n", __func__);
3542 }
3543
3544 nvmf_unregister_transport(&nvme_fc_transport);
3545
3546 ida_destroy(&nvme_fc_local_port_cnt);
3547 ida_destroy(&nvme_fc_ctrl_cnt);
3548
3549 device_destroy(&fc_class, MKDEV(0, 0));
3550 class_unregister(&fc_class);
3551 destroy_workqueue(nvme_fc_wq);
3552 }
3553
3554 module_init(nvme_fc_init_module);
3555 module_exit(nvme_fc_exit_module);
3556
3557 MODULE_LICENSE("GPL v2");