This source file includes following definitions.
- nvmet_fc_iodnum
- nvmet_fc_fodnum
- nvmet_fc_makeconnid
- nvmet_fc_getassociationid
- nvmet_fc_getqueueid
- targetport_to_tgtport
- nvmet_req_to_fod
- fc_dma_map_single
- fc_dma_mapping_error
- fc_dma_unmap_single
- fc_dma_sync_single_for_cpu
- fc_dma_sync_single_for_device
- fc_map_sg
- fc_dma_map_sg
- fc_dma_unmap_sg
- nvmet_fc_alloc_ls_iodlist
- nvmet_fc_free_ls_iodlist
- nvmet_fc_alloc_ls_iod
- nvmet_fc_free_ls_iod
- nvmet_fc_prep_fcp_iodlist
- nvmet_fc_destroy_fcp_iodlist
- nvmet_fc_alloc_fcp_iod
- nvmet_fc_queue_fcp_req
- nvmet_fc_fcp_rqst_op_defer_work
- nvmet_fc_free_fcp_iod
- nvmet_fc_alloc_target_queue
- nvmet_fc_tgt_queue_free
- nvmet_fc_tgt_q_put
- nvmet_fc_tgt_q_get
- nvmet_fc_delete_target_queue
- nvmet_fc_find_target_queue
- nvmet_fc_delete_assoc
- nvmet_fc_alloc_target_assoc
- nvmet_fc_target_assoc_free
- nvmet_fc_tgt_a_put
- nvmet_fc_tgt_a_get
- nvmet_fc_delete_target_assoc
- nvmet_fc_find_target_assoc
- nvmet_fc_portentry_bind
- nvmet_fc_portentry_unbind
- nvmet_fc_portentry_unbind_tgt
- nvmet_fc_portentry_rebind_tgt
- nvmet_fc_register_targetport
- nvmet_fc_free_tgtport
- nvmet_fc_tgtport_put
- nvmet_fc_tgtport_get
- __nvmet_fc_free_assocs
- nvmet_fc_delete_ctrl
- nvmet_fc_unregister_targetport
- nvmet_fc_format_rsp_hdr
- nvmet_fc_format_rjt
- nvmet_fc_ls_create_association
- nvmet_fc_ls_create_connection
- nvmet_fc_ls_disconnect
- nvmet_fc_xmt_ls_rsp_done
- nvmet_fc_xmt_ls_rsp
- nvmet_fc_handle_ls_rqst
- nvmet_fc_handle_ls_rqst_work
- nvmet_fc_rcv_ls_req
- nvmet_fc_alloc_tgt_pgs
- nvmet_fc_free_tgt_pgs
- queue_90percent_full
- nvmet_fc_prep_fcp_rsp
- nvmet_fc_abort_op
- nvmet_fc_xmt_fcp_rsp
- nvmet_fc_transfer_fcp_data
- __nvmet_fc_fod_op_abort
- nvmet_fc_fod_op_done
- nvmet_fc_xmt_fcp_op_done
- __nvmet_fc_fcp_nvme_cmd_done
- nvmet_fc_fcp_nvme_cmd_done
- nvmet_fc_handle_fcp_rqst
- nvmet_fc_rcv_fcp_req
- nvmet_fc_rcv_fcp_abort
- __nvme_fc_parse_u64
- nvme_fc_parse_traddr
- nvmet_fc_add_port
- nvmet_fc_remove_port
- nvmet_fc_discovery_chg
- nvmet_fc_init_module
- nvmet_fc_exit_module
1
2
3
4
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
13
14 #include "nvmet.h"
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17
18
19
20
21
22 #define NVMET_LS_CTX_COUNT 256
23
24
25 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
26
27 struct nvmet_fc_tgtport;
28 struct nvmet_fc_tgt_assoc;
29
30 struct nvmet_fc_ls_iod {
31 struct nvmefc_tgt_ls_req *lsreq;
32 struct nvmefc_tgt_fcp_req *fcpreq;
33
34 struct list_head ls_list;
35
36 struct nvmet_fc_tgtport *tgtport;
37 struct nvmet_fc_tgt_assoc *assoc;
38
39 u8 *rqstbuf;
40 u8 *rspbuf;
41 u16 rqstdatalen;
42 dma_addr_t rspdma;
43
44 struct scatterlist sg[2];
45
46 struct work_struct work;
47 } __aligned(sizeof(unsigned long long));
48
49
50 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
51
52 enum nvmet_fcp_datadir {
53 NVMET_FCP_NODATA,
54 NVMET_FCP_WRITE,
55 NVMET_FCP_READ,
56 NVMET_FCP_ABORTED,
57 };
58
59 struct nvmet_fc_fcp_iod {
60 struct nvmefc_tgt_fcp_req *fcpreq;
61
62 struct nvme_fc_cmd_iu cmdiubuf;
63 struct nvme_fc_ersp_iu rspiubuf;
64 dma_addr_t rspdma;
65 struct scatterlist *next_sg;
66 struct scatterlist *data_sg;
67 int data_sg_cnt;
68 u32 offset;
69 enum nvmet_fcp_datadir io_dir;
70 bool active;
71 bool abort;
72 bool aborted;
73 bool writedataactive;
74 spinlock_t flock;
75
76 struct nvmet_req req;
77 struct work_struct defer_work;
78
79 struct nvmet_fc_tgtport *tgtport;
80 struct nvmet_fc_tgt_queue *queue;
81
82 struct list_head fcp_list;
83 };
84
85 struct nvmet_fc_tgtport {
86
87 struct nvmet_fc_target_port fc_target_port;
88
89 struct list_head tgt_list;
90 struct device *dev;
91 struct nvmet_fc_target_template *ops;
92
93 struct nvmet_fc_ls_iod *iod;
94 spinlock_t lock;
95 struct list_head ls_list;
96 struct list_head ls_busylist;
97 struct list_head assoc_list;
98 struct ida assoc_cnt;
99 struct nvmet_fc_port_entry *pe;
100 struct kref ref;
101 u32 max_sg_cnt;
102 };
103
104 struct nvmet_fc_port_entry {
105 struct nvmet_fc_tgtport *tgtport;
106 struct nvmet_port *port;
107 u64 node_name;
108 u64 port_name;
109 struct list_head pe_list;
110 };
111
112 struct nvmet_fc_defer_fcp_req {
113 struct list_head req_list;
114 struct nvmefc_tgt_fcp_req *fcp_req;
115 };
116
117 struct nvmet_fc_tgt_queue {
118 bool ninetypercent;
119 u16 qid;
120 u16 sqsize;
121 u16 ersp_ratio;
122 __le16 sqhd;
123 atomic_t connected;
124 atomic_t sqtail;
125 atomic_t zrspcnt;
126 atomic_t rsn;
127 spinlock_t qlock;
128 struct nvmet_cq nvme_cq;
129 struct nvmet_sq nvme_sq;
130 struct nvmet_fc_tgt_assoc *assoc;
131 struct list_head fod_list;
132 struct list_head pending_cmd_list;
133 struct list_head avail_defer_list;
134 struct workqueue_struct *work_q;
135 struct kref ref;
136 struct nvmet_fc_fcp_iod fod[];
137 } __aligned(sizeof(unsigned long long));
138
139 struct nvmet_fc_tgt_assoc {
140 u64 association_id;
141 u32 a_id;
142 struct nvmet_fc_tgtport *tgtport;
143 struct list_head a_list;
144 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
145 struct kref ref;
146 struct work_struct del_work;
147 };
148
149
150 static inline int
151 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
152 {
153 return (iodptr - iodptr->tgtport->iod);
154 }
155
156 static inline int
157 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
158 {
159 return (fodptr - fodptr->queue->fod);
160 }
161
162
163
164
165
166
167
168
169
170
171
172
173 #define BYTES_FOR_QID sizeof(u16)
174 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
175 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
176
177 static inline u64
178 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
179 {
180 return (assoc->association_id | qid);
181 }
182
183 static inline u64
184 nvmet_fc_getassociationid(u64 connectionid)
185 {
186 return connectionid & ~NVMET_FC_QUEUEID_MASK;
187 }
188
189 static inline u16
190 nvmet_fc_getqueueid(u64 connectionid)
191 {
192 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
193 }
194
195 static inline struct nvmet_fc_tgtport *
196 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
197 {
198 return container_of(targetport, struct nvmet_fc_tgtport,
199 fc_target_port);
200 }
201
202 static inline struct nvmet_fc_fcp_iod *
203 nvmet_req_to_fod(struct nvmet_req *nvme_req)
204 {
205 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
206 }
207
208
209
210
211
212 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
213
214 static LIST_HEAD(nvmet_fc_target_list);
215 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
216 static LIST_HEAD(nvmet_fc_portentry_list);
217
218
219 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
220 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
221 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
222 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
223 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
224 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
225 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
226 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
227 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
228 struct nvmet_fc_fcp_iod *fod);
229 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250 static inline dma_addr_t
251 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
252 enum dma_data_direction dir)
253 {
254 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
255 }
256
257 static inline int
258 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
259 {
260 return dev ? dma_mapping_error(dev, dma_addr) : 0;
261 }
262
263 static inline void
264 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
265 enum dma_data_direction dir)
266 {
267 if (dev)
268 dma_unmap_single(dev, addr, size, dir);
269 }
270
271 static inline void
272 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
273 enum dma_data_direction dir)
274 {
275 if (dev)
276 dma_sync_single_for_cpu(dev, addr, size, dir);
277 }
278
279 static inline void
280 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
281 enum dma_data_direction dir)
282 {
283 if (dev)
284 dma_sync_single_for_device(dev, addr, size, dir);
285 }
286
287
288 static int
289 fc_map_sg(struct scatterlist *sg, int nents)
290 {
291 struct scatterlist *s;
292 int i;
293
294 WARN_ON(nents == 0 || sg[0].length == 0);
295
296 for_each_sg(sg, s, nents, i) {
297 s->dma_address = 0L;
298 #ifdef CONFIG_NEED_SG_DMA_LENGTH
299 s->dma_length = s->length;
300 #endif
301 }
302 return nents;
303 }
304
305 static inline int
306 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
307 enum dma_data_direction dir)
308 {
309 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
310 }
311
312 static inline void
313 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
314 enum dma_data_direction dir)
315 {
316 if (dev)
317 dma_unmap_sg(dev, sg, nents, dir);
318 }
319
320
321
322
323
324 static int
325 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
326 {
327 struct nvmet_fc_ls_iod *iod;
328 int i;
329
330 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
331 GFP_KERNEL);
332 if (!iod)
333 return -ENOMEM;
334
335 tgtport->iod = iod;
336
337 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
338 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
339 iod->tgtport = tgtport;
340 list_add_tail(&iod->ls_list, &tgtport->ls_list);
341
342 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
343 GFP_KERNEL);
344 if (!iod->rqstbuf)
345 goto out_fail;
346
347 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
348
349 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
350 NVME_FC_MAX_LS_BUFFER_SIZE,
351 DMA_TO_DEVICE);
352 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
353 goto out_fail;
354 }
355
356 return 0;
357
358 out_fail:
359 kfree(iod->rqstbuf);
360 list_del(&iod->ls_list);
361 for (iod--, i--; i >= 0; iod--, i--) {
362 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
363 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
364 kfree(iod->rqstbuf);
365 list_del(&iod->ls_list);
366 }
367
368 kfree(iod);
369
370 return -EFAULT;
371 }
372
373 static void
374 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
375 {
376 struct nvmet_fc_ls_iod *iod = tgtport->iod;
377 int i;
378
379 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
380 fc_dma_unmap_single(tgtport->dev,
381 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
382 DMA_TO_DEVICE);
383 kfree(iod->rqstbuf);
384 list_del(&iod->ls_list);
385 }
386 kfree(tgtport->iod);
387 }
388
389 static struct nvmet_fc_ls_iod *
390 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
391 {
392 struct nvmet_fc_ls_iod *iod;
393 unsigned long flags;
394
395 spin_lock_irqsave(&tgtport->lock, flags);
396 iod = list_first_entry_or_null(&tgtport->ls_list,
397 struct nvmet_fc_ls_iod, ls_list);
398 if (iod)
399 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
400 spin_unlock_irqrestore(&tgtport->lock, flags);
401 return iod;
402 }
403
404
405 static void
406 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
407 struct nvmet_fc_ls_iod *iod)
408 {
409 unsigned long flags;
410
411 spin_lock_irqsave(&tgtport->lock, flags);
412 list_move(&iod->ls_list, &tgtport->ls_list);
413 spin_unlock_irqrestore(&tgtport->lock, flags);
414 }
415
416 static void
417 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
418 struct nvmet_fc_tgt_queue *queue)
419 {
420 struct nvmet_fc_fcp_iod *fod = queue->fod;
421 int i;
422
423 for (i = 0; i < queue->sqsize; fod++, i++) {
424 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
425 fod->tgtport = tgtport;
426 fod->queue = queue;
427 fod->active = false;
428 fod->abort = false;
429 fod->aborted = false;
430 fod->fcpreq = NULL;
431 list_add_tail(&fod->fcp_list, &queue->fod_list);
432 spin_lock_init(&fod->flock);
433
434 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
435 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
436 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
437 list_del(&fod->fcp_list);
438 for (fod--, i--; i >= 0; fod--, i--) {
439 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
440 sizeof(fod->rspiubuf),
441 DMA_TO_DEVICE);
442 fod->rspdma = 0L;
443 list_del(&fod->fcp_list);
444 }
445
446 return;
447 }
448 }
449 }
450
451 static void
452 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
453 struct nvmet_fc_tgt_queue *queue)
454 {
455 struct nvmet_fc_fcp_iod *fod = queue->fod;
456 int i;
457
458 for (i = 0; i < queue->sqsize; fod++, i++) {
459 if (fod->rspdma)
460 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
461 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
462 }
463 }
464
465 static struct nvmet_fc_fcp_iod *
466 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
467 {
468 struct nvmet_fc_fcp_iod *fod;
469
470 lockdep_assert_held(&queue->qlock);
471
472 fod = list_first_entry_or_null(&queue->fod_list,
473 struct nvmet_fc_fcp_iod, fcp_list);
474 if (fod) {
475 list_del(&fod->fcp_list);
476 fod->active = true;
477
478
479
480
481
482 }
483 return fod;
484 }
485
486
487 static void
488 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
489 struct nvmet_fc_tgt_queue *queue,
490 struct nvmefc_tgt_fcp_req *fcpreq)
491 {
492 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
493
494
495
496
497
498 fcpreq->hwqid = queue->qid ?
499 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
500
501 nvmet_fc_handle_fcp_rqst(tgtport, fod);
502 }
503
504 static void
505 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
506 {
507 struct nvmet_fc_fcp_iod *fod =
508 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
509
510
511 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
512
513 }
514
515 static void
516 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
517 struct nvmet_fc_fcp_iod *fod)
518 {
519 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
520 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
521 struct nvmet_fc_defer_fcp_req *deferfcp;
522 unsigned long flags;
523
524 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
525 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
526
527 fcpreq->nvmet_fc_private = NULL;
528
529 fod->active = false;
530 fod->abort = false;
531 fod->aborted = false;
532 fod->writedataactive = false;
533 fod->fcpreq = NULL;
534
535 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
536
537
538 nvmet_fc_tgt_q_put(queue);
539
540 spin_lock_irqsave(&queue->qlock, flags);
541 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
542 struct nvmet_fc_defer_fcp_req, req_list);
543 if (!deferfcp) {
544 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
545 spin_unlock_irqrestore(&queue->qlock, flags);
546 return;
547 }
548
549
550 list_del(&deferfcp->req_list);
551
552 fcpreq = deferfcp->fcp_req;
553
554
555 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
556
557 spin_unlock_irqrestore(&queue->qlock, flags);
558
559
560 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
561
562
563 fcpreq->rspaddr = NULL;
564 fcpreq->rsplen = 0;
565 fcpreq->nvmet_fc_private = fod;
566 fod->fcpreq = fcpreq;
567 fod->active = true;
568
569
570 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
571
572
573
574
575
576
577 queue_work(queue->work_q, &fod->defer_work);
578 }
579
580 static struct nvmet_fc_tgt_queue *
581 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
582 u16 qid, u16 sqsize)
583 {
584 struct nvmet_fc_tgt_queue *queue;
585 unsigned long flags;
586 int ret;
587
588 if (qid > NVMET_NR_QUEUES)
589 return NULL;
590
591 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
592 if (!queue)
593 return NULL;
594
595 if (!nvmet_fc_tgt_a_get(assoc))
596 goto out_free_queue;
597
598 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
599 assoc->tgtport->fc_target_port.port_num,
600 assoc->a_id, qid);
601 if (!queue->work_q)
602 goto out_a_put;
603
604 queue->qid = qid;
605 queue->sqsize = sqsize;
606 queue->assoc = assoc;
607 INIT_LIST_HEAD(&queue->fod_list);
608 INIT_LIST_HEAD(&queue->avail_defer_list);
609 INIT_LIST_HEAD(&queue->pending_cmd_list);
610 atomic_set(&queue->connected, 0);
611 atomic_set(&queue->sqtail, 0);
612 atomic_set(&queue->rsn, 1);
613 atomic_set(&queue->zrspcnt, 0);
614 spin_lock_init(&queue->qlock);
615 kref_init(&queue->ref);
616
617 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
618
619 ret = nvmet_sq_init(&queue->nvme_sq);
620 if (ret)
621 goto out_fail_iodlist;
622
623 WARN_ON(assoc->queues[qid]);
624 spin_lock_irqsave(&assoc->tgtport->lock, flags);
625 assoc->queues[qid] = queue;
626 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
627
628 return queue;
629
630 out_fail_iodlist:
631 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
632 destroy_workqueue(queue->work_q);
633 out_a_put:
634 nvmet_fc_tgt_a_put(assoc);
635 out_free_queue:
636 kfree(queue);
637 return NULL;
638 }
639
640
641 static void
642 nvmet_fc_tgt_queue_free(struct kref *ref)
643 {
644 struct nvmet_fc_tgt_queue *queue =
645 container_of(ref, struct nvmet_fc_tgt_queue, ref);
646 unsigned long flags;
647
648 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
649 queue->assoc->queues[queue->qid] = NULL;
650 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
651
652 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
653
654 nvmet_fc_tgt_a_put(queue->assoc);
655
656 destroy_workqueue(queue->work_q);
657
658 kfree(queue);
659 }
660
661 static void
662 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
663 {
664 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
665 }
666
667 static int
668 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
669 {
670 return kref_get_unless_zero(&queue->ref);
671 }
672
673
674 static void
675 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
676 {
677 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
678 struct nvmet_fc_fcp_iod *fod = queue->fod;
679 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
680 unsigned long flags;
681 int i, writedataactive;
682 bool disconnect;
683
684 disconnect = atomic_xchg(&queue->connected, 0);
685
686 spin_lock_irqsave(&queue->qlock, flags);
687
688 for (i = 0; i < queue->sqsize; fod++, i++) {
689 if (fod->active) {
690 spin_lock(&fod->flock);
691 fod->abort = true;
692 writedataactive = fod->writedataactive;
693 spin_unlock(&fod->flock);
694
695
696
697
698
699 if (writedataactive) {
700 spin_lock(&fod->flock);
701 fod->aborted = true;
702 spin_unlock(&fod->flock);
703 tgtport->ops->fcp_abort(
704 &tgtport->fc_target_port, fod->fcpreq);
705 }
706 }
707 }
708
709
710 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
711 req_list) {
712 list_del(&deferfcp->req_list);
713 kfree(deferfcp);
714 }
715
716 for (;;) {
717 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
718 struct nvmet_fc_defer_fcp_req, req_list);
719 if (!deferfcp)
720 break;
721
722 list_del(&deferfcp->req_list);
723 spin_unlock_irqrestore(&queue->qlock, flags);
724
725 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
726 deferfcp->fcp_req);
727
728 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
729 deferfcp->fcp_req);
730
731 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
732 deferfcp->fcp_req);
733
734
735 nvmet_fc_tgt_q_put(queue);
736
737 kfree(deferfcp);
738
739 spin_lock_irqsave(&queue->qlock, flags);
740 }
741 spin_unlock_irqrestore(&queue->qlock, flags);
742
743 flush_workqueue(queue->work_q);
744
745 if (disconnect)
746 nvmet_sq_destroy(&queue->nvme_sq);
747
748 nvmet_fc_tgt_q_put(queue);
749 }
750
751 static struct nvmet_fc_tgt_queue *
752 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
753 u64 connection_id)
754 {
755 struct nvmet_fc_tgt_assoc *assoc;
756 struct nvmet_fc_tgt_queue *queue;
757 u64 association_id = nvmet_fc_getassociationid(connection_id);
758 u16 qid = nvmet_fc_getqueueid(connection_id);
759 unsigned long flags;
760
761 if (qid > NVMET_NR_QUEUES)
762 return NULL;
763
764 spin_lock_irqsave(&tgtport->lock, flags);
765 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
766 if (association_id == assoc->association_id) {
767 queue = assoc->queues[qid];
768 if (queue &&
769 (!atomic_read(&queue->connected) ||
770 !nvmet_fc_tgt_q_get(queue)))
771 queue = NULL;
772 spin_unlock_irqrestore(&tgtport->lock, flags);
773 return queue;
774 }
775 }
776 spin_unlock_irqrestore(&tgtport->lock, flags);
777 return NULL;
778 }
779
780 static void
781 nvmet_fc_delete_assoc(struct work_struct *work)
782 {
783 struct nvmet_fc_tgt_assoc *assoc =
784 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
785
786 nvmet_fc_delete_target_assoc(assoc);
787 nvmet_fc_tgt_a_put(assoc);
788 }
789
790 static struct nvmet_fc_tgt_assoc *
791 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
792 {
793 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
794 unsigned long flags;
795 u64 ran;
796 int idx;
797 bool needrandom = true;
798
799 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
800 if (!assoc)
801 return NULL;
802
803 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
804 if (idx < 0)
805 goto out_free_assoc;
806
807 if (!nvmet_fc_tgtport_get(tgtport))
808 goto out_ida_put;
809
810 assoc->tgtport = tgtport;
811 assoc->a_id = idx;
812 INIT_LIST_HEAD(&assoc->a_list);
813 kref_init(&assoc->ref);
814 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
815
816 while (needrandom) {
817 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
818 ran = ran << BYTES_FOR_QID_SHIFT;
819
820 spin_lock_irqsave(&tgtport->lock, flags);
821 needrandom = false;
822 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
823 if (ran == tmpassoc->association_id) {
824 needrandom = true;
825 break;
826 }
827 if (!needrandom) {
828 assoc->association_id = ran;
829 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
830 }
831 spin_unlock_irqrestore(&tgtport->lock, flags);
832 }
833
834 return assoc;
835
836 out_ida_put:
837 ida_simple_remove(&tgtport->assoc_cnt, idx);
838 out_free_assoc:
839 kfree(assoc);
840 return NULL;
841 }
842
843 static void
844 nvmet_fc_target_assoc_free(struct kref *ref)
845 {
846 struct nvmet_fc_tgt_assoc *assoc =
847 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
848 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
849 unsigned long flags;
850
851 spin_lock_irqsave(&tgtport->lock, flags);
852 list_del(&assoc->a_list);
853 spin_unlock_irqrestore(&tgtport->lock, flags);
854 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
855 kfree(assoc);
856 nvmet_fc_tgtport_put(tgtport);
857 }
858
859 static void
860 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
861 {
862 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
863 }
864
865 static int
866 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
867 {
868 return kref_get_unless_zero(&assoc->ref);
869 }
870
871 static void
872 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
873 {
874 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
875 struct nvmet_fc_tgt_queue *queue;
876 unsigned long flags;
877 int i;
878
879 spin_lock_irqsave(&tgtport->lock, flags);
880 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
881 queue = assoc->queues[i];
882 if (queue) {
883 if (!nvmet_fc_tgt_q_get(queue))
884 continue;
885 spin_unlock_irqrestore(&tgtport->lock, flags);
886 nvmet_fc_delete_target_queue(queue);
887 nvmet_fc_tgt_q_put(queue);
888 spin_lock_irqsave(&tgtport->lock, flags);
889 }
890 }
891 spin_unlock_irqrestore(&tgtport->lock, flags);
892
893 nvmet_fc_tgt_a_put(assoc);
894 }
895
896 static struct nvmet_fc_tgt_assoc *
897 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
898 u64 association_id)
899 {
900 struct nvmet_fc_tgt_assoc *assoc;
901 struct nvmet_fc_tgt_assoc *ret = NULL;
902 unsigned long flags;
903
904 spin_lock_irqsave(&tgtport->lock, flags);
905 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
906 if (association_id == assoc->association_id) {
907 ret = assoc;
908 nvmet_fc_tgt_a_get(assoc);
909 break;
910 }
911 }
912 spin_unlock_irqrestore(&tgtport->lock, flags);
913
914 return ret;
915 }
916
917 static void
918 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
919 struct nvmet_fc_port_entry *pe,
920 struct nvmet_port *port)
921 {
922 lockdep_assert_held(&nvmet_fc_tgtlock);
923
924 pe->tgtport = tgtport;
925 tgtport->pe = pe;
926
927 pe->port = port;
928 port->priv = pe;
929
930 pe->node_name = tgtport->fc_target_port.node_name;
931 pe->port_name = tgtport->fc_target_port.port_name;
932 INIT_LIST_HEAD(&pe->pe_list);
933
934 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
935 }
936
937 static void
938 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
939 {
940 unsigned long flags;
941
942 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
943 if (pe->tgtport)
944 pe->tgtport->pe = NULL;
945 list_del(&pe->pe_list);
946 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
947 }
948
949
950
951
952
953
954 static void
955 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
956 {
957 struct nvmet_fc_port_entry *pe;
958 unsigned long flags;
959
960 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
961 pe = tgtport->pe;
962 if (pe)
963 pe->tgtport = NULL;
964 tgtport->pe = NULL;
965 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
966 }
967
968
969
970
971
972
973
974
975
976 static void
977 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
978 {
979 struct nvmet_fc_port_entry *pe;
980 unsigned long flags;
981
982 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
983 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
984 if (tgtport->fc_target_port.node_name == pe->node_name &&
985 tgtport->fc_target_port.port_name == pe->port_name) {
986 WARN_ON(pe->tgtport);
987 tgtport->pe = pe;
988 pe->tgtport = tgtport;
989 break;
990 }
991 }
992 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
993 }
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 int
1013 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1014 struct nvmet_fc_target_template *template,
1015 struct device *dev,
1016 struct nvmet_fc_target_port **portptr)
1017 {
1018 struct nvmet_fc_tgtport *newrec;
1019 unsigned long flags;
1020 int ret, idx;
1021
1022 if (!template->xmt_ls_rsp || !template->fcp_op ||
1023 !template->fcp_abort ||
1024 !template->fcp_req_release || !template->targetport_delete ||
1025 !template->max_hw_queues || !template->max_sgl_segments ||
1026 !template->max_dif_sgl_segments || !template->dma_boundary) {
1027 ret = -EINVAL;
1028 goto out_regtgt_failed;
1029 }
1030
1031 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1032 GFP_KERNEL);
1033 if (!newrec) {
1034 ret = -ENOMEM;
1035 goto out_regtgt_failed;
1036 }
1037
1038 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1039 if (idx < 0) {
1040 ret = -ENOSPC;
1041 goto out_fail_kfree;
1042 }
1043
1044 if (!get_device(dev) && dev) {
1045 ret = -ENODEV;
1046 goto out_ida_put;
1047 }
1048
1049 newrec->fc_target_port.node_name = pinfo->node_name;
1050 newrec->fc_target_port.port_name = pinfo->port_name;
1051 newrec->fc_target_port.private = &newrec[1];
1052 newrec->fc_target_port.port_id = pinfo->port_id;
1053 newrec->fc_target_port.port_num = idx;
1054 INIT_LIST_HEAD(&newrec->tgt_list);
1055 newrec->dev = dev;
1056 newrec->ops = template;
1057 spin_lock_init(&newrec->lock);
1058 INIT_LIST_HEAD(&newrec->ls_list);
1059 INIT_LIST_HEAD(&newrec->ls_busylist);
1060 INIT_LIST_HEAD(&newrec->assoc_list);
1061 kref_init(&newrec->ref);
1062 ida_init(&newrec->assoc_cnt);
1063 newrec->max_sg_cnt = template->max_sgl_segments;
1064
1065 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1066 if (ret) {
1067 ret = -ENOMEM;
1068 goto out_free_newrec;
1069 }
1070
1071 nvmet_fc_portentry_rebind_tgt(newrec);
1072
1073 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1074 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1075 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1076
1077 *portptr = &newrec->fc_target_port;
1078 return 0;
1079
1080 out_free_newrec:
1081 put_device(dev);
1082 out_ida_put:
1083 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1084 out_fail_kfree:
1085 kfree(newrec);
1086 out_regtgt_failed:
1087 *portptr = NULL;
1088 return ret;
1089 }
1090 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1091
1092
1093 static void
1094 nvmet_fc_free_tgtport(struct kref *ref)
1095 {
1096 struct nvmet_fc_tgtport *tgtport =
1097 container_of(ref, struct nvmet_fc_tgtport, ref);
1098 struct device *dev = tgtport->dev;
1099 unsigned long flags;
1100
1101 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1102 list_del(&tgtport->tgt_list);
1103 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1104
1105 nvmet_fc_free_ls_iodlist(tgtport);
1106
1107
1108 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1109
1110 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1111 tgtport->fc_target_port.port_num);
1112
1113 ida_destroy(&tgtport->assoc_cnt);
1114
1115 kfree(tgtport);
1116
1117 put_device(dev);
1118 }
1119
1120 static void
1121 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1122 {
1123 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1124 }
1125
1126 static int
1127 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1128 {
1129 return kref_get_unless_zero(&tgtport->ref);
1130 }
1131
1132 static void
1133 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1134 {
1135 struct nvmet_fc_tgt_assoc *assoc, *next;
1136 unsigned long flags;
1137
1138 spin_lock_irqsave(&tgtport->lock, flags);
1139 list_for_each_entry_safe(assoc, next,
1140 &tgtport->assoc_list, a_list) {
1141 if (!nvmet_fc_tgt_a_get(assoc))
1142 continue;
1143 if (!schedule_work(&assoc->del_work))
1144 nvmet_fc_tgt_a_put(assoc);
1145 }
1146 spin_unlock_irqrestore(&tgtport->lock, flags);
1147 }
1148
1149
1150
1151
1152 static void
1153 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1154 {
1155 struct nvmet_fc_tgtport *tgtport, *next;
1156 struct nvmet_fc_tgt_assoc *assoc;
1157 struct nvmet_fc_tgt_queue *queue;
1158 unsigned long flags;
1159 bool found_ctrl = false;
1160
1161
1162 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1163 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1164 tgt_list) {
1165 if (!nvmet_fc_tgtport_get(tgtport))
1166 continue;
1167 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1168
1169 spin_lock_irqsave(&tgtport->lock, flags);
1170 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1171 queue = assoc->queues[0];
1172 if (queue && queue->nvme_sq.ctrl == ctrl) {
1173 if (nvmet_fc_tgt_a_get(assoc))
1174 found_ctrl = true;
1175 break;
1176 }
1177 }
1178 spin_unlock_irqrestore(&tgtport->lock, flags);
1179
1180 nvmet_fc_tgtport_put(tgtport);
1181
1182 if (found_ctrl) {
1183 if (!schedule_work(&assoc->del_work))
1184 nvmet_fc_tgt_a_put(assoc);
1185 return;
1186 }
1187
1188 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1189 }
1190 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 int
1205 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1206 {
1207 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1208
1209 nvmet_fc_portentry_unbind_tgt(tgtport);
1210
1211
1212 __nvmet_fc_free_assocs(tgtport);
1213
1214 nvmet_fc_tgtport_put(tgtport);
1215
1216 return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1219
1220
1221
1222
1223
1224 static void
1225 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1226 {
1227 struct fcnvme_ls_acc_hdr *acc = buf;
1228
1229 acc->w0.ls_cmd = ls_cmd;
1230 acc->desc_list_len = desc_len;
1231 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1232 acc->rqst.desc_len =
1233 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1234 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1235 }
1236
1237 static int
1238 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1239 u8 reason, u8 explanation, u8 vendor)
1240 {
1241 struct fcnvme_ls_rjt *rjt = buf;
1242
1243 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1244 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1245 ls_cmd);
1246 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1247 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1248 rjt->rjt.reason_code = reason;
1249 rjt->rjt.reason_explanation = explanation;
1250 rjt->rjt.vendor = vendor;
1251
1252 return sizeof(struct fcnvme_ls_rjt);
1253 }
1254
1255
1256 enum {
1257 VERR_NO_ERROR = 0,
1258 VERR_CR_ASSOC_LEN = 1,
1259 VERR_CR_ASSOC_RQST_LEN = 2,
1260 VERR_CR_ASSOC_CMD = 3,
1261 VERR_CR_ASSOC_CMD_LEN = 4,
1262 VERR_ERSP_RATIO = 5,
1263 VERR_ASSOC_ALLOC_FAIL = 6,
1264 VERR_QUEUE_ALLOC_FAIL = 7,
1265 VERR_CR_CONN_LEN = 8,
1266 VERR_CR_CONN_RQST_LEN = 9,
1267 VERR_ASSOC_ID = 10,
1268 VERR_ASSOC_ID_LEN = 11,
1269 VERR_NO_ASSOC = 12,
1270 VERR_CONN_ID = 13,
1271 VERR_CONN_ID_LEN = 14,
1272 VERR_NO_CONN = 15,
1273 VERR_CR_CONN_CMD = 16,
1274 VERR_CR_CONN_CMD_LEN = 17,
1275 VERR_DISCONN_LEN = 18,
1276 VERR_DISCONN_RQST_LEN = 19,
1277 VERR_DISCONN_CMD = 20,
1278 VERR_DISCONN_CMD_LEN = 21,
1279 VERR_DISCONN_SCOPE = 22,
1280 VERR_RS_LEN = 23,
1281 VERR_RS_RQST_LEN = 24,
1282 VERR_RS_CMD = 25,
1283 VERR_RS_CMD_LEN = 26,
1284 VERR_RS_RCTL = 27,
1285 VERR_RS_RO = 28,
1286 };
1287
1288 static char *validation_errors[] = {
1289 "OK",
1290 "Bad CR_ASSOC Length",
1291 "Bad CR_ASSOC Rqst Length",
1292 "Not CR_ASSOC Cmd",
1293 "Bad CR_ASSOC Cmd Length",
1294 "Bad Ersp Ratio",
1295 "Association Allocation Failed",
1296 "Queue Allocation Failed",
1297 "Bad CR_CONN Length",
1298 "Bad CR_CONN Rqst Length",
1299 "Not Association ID",
1300 "Bad Association ID Length",
1301 "No Association",
1302 "Not Connection ID",
1303 "Bad Connection ID Length",
1304 "No Connection",
1305 "Not CR_CONN Cmd",
1306 "Bad CR_CONN Cmd Length",
1307 "Bad DISCONN Length",
1308 "Bad DISCONN Rqst Length",
1309 "Not DISCONN Cmd",
1310 "Bad DISCONN Cmd Length",
1311 "Bad Disconnect Scope",
1312 "Bad RS Length",
1313 "Bad RS Rqst Length",
1314 "Not RS Cmd",
1315 "Bad RS Cmd Length",
1316 "Bad RS R_CTL",
1317 "Bad RS Relative Offset",
1318 };
1319
1320 static void
1321 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1322 struct nvmet_fc_ls_iod *iod)
1323 {
1324 struct fcnvme_ls_cr_assoc_rqst *rqst =
1325 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1326 struct fcnvme_ls_cr_assoc_acc *acc =
1327 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1328 struct nvmet_fc_tgt_queue *queue;
1329 int ret = 0;
1330
1331 memset(acc, 0, sizeof(*acc));
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1342 ret = VERR_CR_ASSOC_LEN;
1343 else if (be32_to_cpu(rqst->desc_list_len) <
1344 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1345 ret = VERR_CR_ASSOC_RQST_LEN;
1346 else if (rqst->assoc_cmd.desc_tag !=
1347 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1348 ret = VERR_CR_ASSOC_CMD;
1349 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1350 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1351 ret = VERR_CR_ASSOC_CMD_LEN;
1352 else if (!rqst->assoc_cmd.ersp_ratio ||
1353 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1354 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1355 ret = VERR_ERSP_RATIO;
1356
1357 else {
1358
1359 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1360 if (!iod->assoc)
1361 ret = VERR_ASSOC_ALLOC_FAIL;
1362 else {
1363 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1364 be16_to_cpu(rqst->assoc_cmd.sqsize));
1365 if (!queue)
1366 ret = VERR_QUEUE_ALLOC_FAIL;
1367 }
1368 }
1369
1370 if (ret) {
1371 dev_err(tgtport->dev,
1372 "Create Association LS failed: %s\n",
1373 validation_errors[ret]);
1374 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1375 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1376 FCNVME_RJT_RC_LOGIC,
1377 FCNVME_RJT_EXP_NONE, 0);
1378 return;
1379 }
1380
1381 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1382 atomic_set(&queue->connected, 1);
1383 queue->sqhd = 0;
1384
1385
1386
1387 iod->lsreq->rsplen = sizeof(*acc);
1388
1389 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1390 fcnvme_lsdesc_len(
1391 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1392 FCNVME_LS_CREATE_ASSOCIATION);
1393 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1394 acc->associd.desc_len =
1395 fcnvme_lsdesc_len(
1396 sizeof(struct fcnvme_lsdesc_assoc_id));
1397 acc->associd.association_id =
1398 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1399 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1400 acc->connectid.desc_len =
1401 fcnvme_lsdesc_len(
1402 sizeof(struct fcnvme_lsdesc_conn_id));
1403 acc->connectid.connection_id = acc->associd.association_id;
1404 }
1405
1406 static void
1407 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1408 struct nvmet_fc_ls_iod *iod)
1409 {
1410 struct fcnvme_ls_cr_conn_rqst *rqst =
1411 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1412 struct fcnvme_ls_cr_conn_acc *acc =
1413 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1414 struct nvmet_fc_tgt_queue *queue;
1415 int ret = 0;
1416
1417 memset(acc, 0, sizeof(*acc));
1418
1419 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1420 ret = VERR_CR_CONN_LEN;
1421 else if (rqst->desc_list_len !=
1422 fcnvme_lsdesc_len(
1423 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1424 ret = VERR_CR_CONN_RQST_LEN;
1425 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1426 ret = VERR_ASSOC_ID;
1427 else if (rqst->associd.desc_len !=
1428 fcnvme_lsdesc_len(
1429 sizeof(struct fcnvme_lsdesc_assoc_id)))
1430 ret = VERR_ASSOC_ID_LEN;
1431 else if (rqst->connect_cmd.desc_tag !=
1432 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1433 ret = VERR_CR_CONN_CMD;
1434 else if (rqst->connect_cmd.desc_len !=
1435 fcnvme_lsdesc_len(
1436 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1437 ret = VERR_CR_CONN_CMD_LEN;
1438 else if (!rqst->connect_cmd.ersp_ratio ||
1439 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1440 be16_to_cpu(rqst->connect_cmd.sqsize)))
1441 ret = VERR_ERSP_RATIO;
1442
1443 else {
1444
1445 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1446 be64_to_cpu(rqst->associd.association_id));
1447 if (!iod->assoc)
1448 ret = VERR_NO_ASSOC;
1449 else {
1450 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1451 be16_to_cpu(rqst->connect_cmd.qid),
1452 be16_to_cpu(rqst->connect_cmd.sqsize));
1453 if (!queue)
1454 ret = VERR_QUEUE_ALLOC_FAIL;
1455
1456
1457 nvmet_fc_tgt_a_put(iod->assoc);
1458 }
1459 }
1460
1461 if (ret) {
1462 dev_err(tgtport->dev,
1463 "Create Connection LS failed: %s\n",
1464 validation_errors[ret]);
1465 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1466 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1467 (ret == VERR_NO_ASSOC) ?
1468 FCNVME_RJT_RC_INV_ASSOC :
1469 FCNVME_RJT_RC_LOGIC,
1470 FCNVME_RJT_EXP_NONE, 0);
1471 return;
1472 }
1473
1474 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1475 atomic_set(&queue->connected, 1);
1476 queue->sqhd = 0;
1477
1478
1479
1480 iod->lsreq->rsplen = sizeof(*acc);
1481
1482 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1483 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1484 FCNVME_LS_CREATE_CONNECTION);
1485 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1486 acc->connectid.desc_len =
1487 fcnvme_lsdesc_len(
1488 sizeof(struct fcnvme_lsdesc_conn_id));
1489 acc->connectid.connection_id =
1490 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1491 be16_to_cpu(rqst->connect_cmd.qid)));
1492 }
1493
1494 static void
1495 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1496 struct nvmet_fc_ls_iod *iod)
1497 {
1498 struct fcnvme_ls_disconnect_rqst *rqst =
1499 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1500 struct fcnvme_ls_disconnect_acc *acc =
1501 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1502 struct nvmet_fc_tgt_assoc *assoc;
1503 int ret = 0;
1504
1505 memset(acc, 0, sizeof(*acc));
1506
1507 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1508 ret = VERR_DISCONN_LEN;
1509 else if (rqst->desc_list_len !=
1510 fcnvme_lsdesc_len(
1511 sizeof(struct fcnvme_ls_disconnect_rqst)))
1512 ret = VERR_DISCONN_RQST_LEN;
1513 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1514 ret = VERR_ASSOC_ID;
1515 else if (rqst->associd.desc_len !=
1516 fcnvme_lsdesc_len(
1517 sizeof(struct fcnvme_lsdesc_assoc_id)))
1518 ret = VERR_ASSOC_ID_LEN;
1519 else if (rqst->discon_cmd.desc_tag !=
1520 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1521 ret = VERR_DISCONN_CMD;
1522 else if (rqst->discon_cmd.desc_len !=
1523 fcnvme_lsdesc_len(
1524 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1525 ret = VERR_DISCONN_CMD_LEN;
1526 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1527 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1528 ret = VERR_DISCONN_SCOPE;
1529 else {
1530
1531 assoc = nvmet_fc_find_target_assoc(tgtport,
1532 be64_to_cpu(rqst->associd.association_id));
1533 iod->assoc = assoc;
1534 if (!assoc)
1535 ret = VERR_NO_ASSOC;
1536 }
1537
1538 if (ret) {
1539 dev_err(tgtport->dev,
1540 "Disconnect LS failed: %s\n",
1541 validation_errors[ret]);
1542 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1543 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1544 (ret == VERR_NO_ASSOC) ?
1545 FCNVME_RJT_RC_INV_ASSOC :
1546 (ret == VERR_NO_CONN) ?
1547 FCNVME_RJT_RC_INV_CONN :
1548 FCNVME_RJT_RC_LOGIC,
1549 FCNVME_RJT_EXP_NONE, 0);
1550 return;
1551 }
1552
1553
1554
1555 iod->lsreq->rsplen = sizeof(*acc);
1556
1557 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1558 fcnvme_lsdesc_len(
1559 sizeof(struct fcnvme_ls_disconnect_acc)),
1560 FCNVME_LS_DISCONNECT);
1561
1562
1563 nvmet_fc_tgt_a_put(iod->assoc);
1564
1565 nvmet_fc_delete_target_assoc(iod->assoc);
1566 }
1567
1568
1569
1570
1571
1572 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1573
1574 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1575
1576 static void
1577 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1578 {
1579 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1580 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1581
1582 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1583 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1584 nvmet_fc_free_ls_iod(tgtport, iod);
1585 nvmet_fc_tgtport_put(tgtport);
1586 }
1587
1588 static void
1589 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1590 struct nvmet_fc_ls_iod *iod)
1591 {
1592 int ret;
1593
1594 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1595 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1596
1597 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1598 if (ret)
1599 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1600 }
1601
1602
1603
1604
1605 static void
1606 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1607 struct nvmet_fc_ls_iod *iod)
1608 {
1609 struct fcnvme_ls_rqst_w0 *w0 =
1610 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1611
1612 iod->lsreq->nvmet_fc_private = iod;
1613 iod->lsreq->rspbuf = iod->rspbuf;
1614 iod->lsreq->rspdma = iod->rspdma;
1615 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1616
1617 iod->lsreq->rsplen = 0;
1618
1619 iod->assoc = NULL;
1620
1621
1622
1623
1624
1625
1626 switch (w0->ls_cmd) {
1627 case FCNVME_LS_CREATE_ASSOCIATION:
1628
1629 nvmet_fc_ls_create_association(tgtport, iod);
1630 break;
1631 case FCNVME_LS_CREATE_CONNECTION:
1632
1633 nvmet_fc_ls_create_connection(tgtport, iod);
1634 break;
1635 case FCNVME_LS_DISCONNECT:
1636
1637 nvmet_fc_ls_disconnect(tgtport, iod);
1638 break;
1639 default:
1640 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1641 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1642 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1643 }
1644
1645 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1646 }
1647
1648
1649
1650
1651 static void
1652 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1653 {
1654 struct nvmet_fc_ls_iod *iod =
1655 container_of(work, struct nvmet_fc_ls_iod, work);
1656 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1657
1658 nvmet_fc_handle_ls_rqst(tgtport, iod);
1659 }
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679 int
1680 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1681 struct nvmefc_tgt_ls_req *lsreq,
1682 void *lsreqbuf, u32 lsreqbuf_len)
1683 {
1684 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1685 struct nvmet_fc_ls_iod *iod;
1686
1687 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1688 return -E2BIG;
1689
1690 if (!nvmet_fc_tgtport_get(tgtport))
1691 return -ESHUTDOWN;
1692
1693 iod = nvmet_fc_alloc_ls_iod(tgtport);
1694 if (!iod) {
1695 nvmet_fc_tgtport_put(tgtport);
1696 return -ENOENT;
1697 }
1698
1699 iod->lsreq = lsreq;
1700 iod->fcpreq = NULL;
1701 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1702 iod->rqstdatalen = lsreqbuf_len;
1703
1704 schedule_work(&iod->work);
1705
1706 return 0;
1707 }
1708 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1709
1710
1711
1712
1713
1714
1715
1716
1717 static int
1718 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1719 {
1720 struct scatterlist *sg;
1721 unsigned int nent;
1722
1723 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1724 if (!sg)
1725 goto out;
1726
1727 fod->data_sg = sg;
1728 fod->data_sg_cnt = nent;
1729 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1730 ((fod->io_dir == NVMET_FCP_WRITE) ?
1731 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1732
1733 fod->next_sg = fod->data_sg;
1734
1735 return 0;
1736
1737 out:
1738 return NVME_SC_INTERNAL;
1739 }
1740
1741 static void
1742 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1743 {
1744 if (!fod->data_sg || !fod->data_sg_cnt)
1745 return;
1746
1747 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1748 ((fod->io_dir == NVMET_FCP_WRITE) ?
1749 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1750 sgl_free(fod->data_sg);
1751 fod->data_sg = NULL;
1752 fod->data_sg_cnt = 0;
1753 }
1754
1755
1756 static bool
1757 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1758 {
1759 u32 sqtail, used;
1760
1761
1762 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1763
1764 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1765 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1766 }
1767
1768
1769
1770
1771
1772 static void
1773 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1774 struct nvmet_fc_fcp_iod *fod)
1775 {
1776 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1777 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1778 struct nvme_completion *cqe = &ersp->cqe;
1779 u32 *cqewd = (u32 *)cqe;
1780 bool send_ersp = false;
1781 u32 rsn, rspcnt, xfr_length;
1782
1783 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1784 xfr_length = fod->req.transfer_len;
1785 else
1786 xfr_length = fod->offset;
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1808 if (!(rspcnt % fod->queue->ersp_ratio) ||
1809 nvme_is_fabrics((struct nvme_command *) sqe) ||
1810 xfr_length != fod->req.transfer_len ||
1811 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1812 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1813 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1814 send_ersp = true;
1815
1816
1817 fod->fcpreq->rspaddr = ersp;
1818 fod->fcpreq->rspdma = fod->rspdma;
1819
1820 if (!send_ersp) {
1821 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1822 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1823 } else {
1824 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1825 rsn = atomic_inc_return(&fod->queue->rsn);
1826 ersp->rsn = cpu_to_be32(rsn);
1827 ersp->xfrd_len = cpu_to_be32(xfr_length);
1828 fod->fcpreq->rsplen = sizeof(*ersp);
1829 }
1830
1831 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1832 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1833 }
1834
1835 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1836
1837 static void
1838 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1839 struct nvmet_fc_fcp_iod *fod)
1840 {
1841 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1842
1843
1844 nvmet_fc_free_tgt_pgs(fod);
1845
1846
1847
1848
1849
1850
1851 if (!fod->aborted)
1852 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1853
1854 nvmet_fc_free_fcp_iod(fod->queue, fod);
1855 }
1856
1857 static void
1858 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1859 struct nvmet_fc_fcp_iod *fod)
1860 {
1861 int ret;
1862
1863 fod->fcpreq->op = NVMET_FCOP_RSP;
1864 fod->fcpreq->timeout = 0;
1865
1866 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1867
1868 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1869 if (ret)
1870 nvmet_fc_abort_op(tgtport, fod);
1871 }
1872
1873 static void
1874 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1875 struct nvmet_fc_fcp_iod *fod, u8 op)
1876 {
1877 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1878 struct scatterlist *sg = fod->next_sg;
1879 unsigned long flags;
1880 u32 remaininglen = fod->req.transfer_len - fod->offset;
1881 u32 tlen = 0;
1882 int ret;
1883
1884 fcpreq->op = op;
1885 fcpreq->offset = fod->offset;
1886 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 fcpreq->sg = sg;
1898 fcpreq->sg_cnt = 0;
1899 while (tlen < remaininglen &&
1900 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1901 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1902 fcpreq->sg_cnt++;
1903 tlen += sg_dma_len(sg);
1904 sg = sg_next(sg);
1905 }
1906 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1907 fcpreq->sg_cnt++;
1908 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1909 sg = sg_next(sg);
1910 }
1911 if (tlen < remaininglen)
1912 fod->next_sg = sg;
1913 else
1914 fod->next_sg = NULL;
1915
1916 fcpreq->transfer_length = tlen;
1917 fcpreq->transferred_length = 0;
1918 fcpreq->fcp_error = 0;
1919 fcpreq->rsplen = 0;
1920
1921
1922
1923
1924
1925 if ((op == NVMET_FCOP_READDATA) &&
1926 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1927 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1928 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1929 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1930 }
1931
1932 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1933 if (ret) {
1934
1935
1936
1937
1938
1939 fod->abort = true;
1940
1941 if (op == NVMET_FCOP_WRITEDATA) {
1942 spin_lock_irqsave(&fod->flock, flags);
1943 fod->writedataactive = false;
1944 spin_unlock_irqrestore(&fod->flock, flags);
1945 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1946 } else {
1947 fcpreq->fcp_error = ret;
1948 fcpreq->transferred_length = 0;
1949 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1950 }
1951 }
1952 }
1953
1954 static inline bool
1955 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1956 {
1957 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1958 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1959
1960
1961 if (abort) {
1962 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1963 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1964 return true;
1965 }
1966
1967 nvmet_fc_abort_op(tgtport, fod);
1968 return true;
1969 }
1970
1971 return false;
1972 }
1973
1974
1975
1976
1977 static void
1978 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1979 {
1980 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1981 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1982 unsigned long flags;
1983 bool abort;
1984
1985 spin_lock_irqsave(&fod->flock, flags);
1986 abort = fod->abort;
1987 fod->writedataactive = false;
1988 spin_unlock_irqrestore(&fod->flock, flags);
1989
1990 switch (fcpreq->op) {
1991
1992 case NVMET_FCOP_WRITEDATA:
1993 if (__nvmet_fc_fod_op_abort(fod, abort))
1994 return;
1995 if (fcpreq->fcp_error ||
1996 fcpreq->transferred_length != fcpreq->transfer_length) {
1997 spin_lock(&fod->flock);
1998 fod->abort = true;
1999 spin_unlock(&fod->flock);
2000
2001 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2002 return;
2003 }
2004
2005 fod->offset += fcpreq->transferred_length;
2006 if (fod->offset != fod->req.transfer_len) {
2007 spin_lock_irqsave(&fod->flock, flags);
2008 fod->writedataactive = true;
2009 spin_unlock_irqrestore(&fod->flock, flags);
2010
2011
2012 nvmet_fc_transfer_fcp_data(tgtport, fod,
2013 NVMET_FCOP_WRITEDATA);
2014 return;
2015 }
2016
2017
2018 nvmet_req_execute(&fod->req);
2019 break;
2020
2021 case NVMET_FCOP_READDATA:
2022 case NVMET_FCOP_READDATA_RSP:
2023 if (__nvmet_fc_fod_op_abort(fod, abort))
2024 return;
2025 if (fcpreq->fcp_error ||
2026 fcpreq->transferred_length != fcpreq->transfer_length) {
2027 nvmet_fc_abort_op(tgtport, fod);
2028 return;
2029 }
2030
2031
2032
2033 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2034
2035 nvmet_fc_free_tgt_pgs(fod);
2036 nvmet_fc_free_fcp_iod(fod->queue, fod);
2037 return;
2038 }
2039
2040 fod->offset += fcpreq->transferred_length;
2041 if (fod->offset != fod->req.transfer_len) {
2042
2043 nvmet_fc_transfer_fcp_data(tgtport, fod,
2044 NVMET_FCOP_READDATA);
2045 return;
2046 }
2047
2048
2049
2050
2051 nvmet_fc_free_tgt_pgs(fod);
2052
2053 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2054
2055 break;
2056
2057 case NVMET_FCOP_RSP:
2058 if (__nvmet_fc_fod_op_abort(fod, abort))
2059 return;
2060 nvmet_fc_free_fcp_iod(fod->queue, fod);
2061 break;
2062
2063 default:
2064 break;
2065 }
2066 }
2067
2068 static void
2069 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2070 {
2071 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2072
2073 nvmet_fc_fod_op_done(fod);
2074 }
2075
2076
2077
2078
2079 static void
2080 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2081 struct nvmet_fc_fcp_iod *fod, int status)
2082 {
2083 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2084 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2085 unsigned long flags;
2086 bool abort;
2087
2088 spin_lock_irqsave(&fod->flock, flags);
2089 abort = fod->abort;
2090 spin_unlock_irqrestore(&fod->flock, flags);
2091
2092
2093 if (!status)
2094 fod->queue->sqhd = cqe->sq_head;
2095
2096 if (abort) {
2097 nvmet_fc_abort_op(tgtport, fod);
2098 return;
2099 }
2100
2101
2102 if (status) {
2103
2104 memset(cqe, 0, sizeof(*cqe));
2105 cqe->sq_head = fod->queue->sqhd;
2106 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2107 cqe->command_id = sqe->command_id;
2108 cqe->status = cpu_to_le16(status);
2109 } else {
2110
2111
2112
2113
2114
2115
2116 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2117
2118 nvmet_fc_transfer_fcp_data(tgtport, fod,
2119 NVMET_FCOP_READDATA);
2120 return;
2121 }
2122
2123
2124 }
2125
2126
2127 nvmet_fc_free_tgt_pgs(fod);
2128
2129 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2130 }
2131
2132
2133 static void
2134 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2135 {
2136 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2137 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2138
2139 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2140 }
2141
2142
2143
2144
2145
2146 static void
2147 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2148 struct nvmet_fc_fcp_iod *fod)
2149 {
2150 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2151 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2152 int ret;
2153
2154
2155
2156
2157
2158 if (!tgtport->pe)
2159 goto transport_error;
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2171
2172 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2173 fod->io_dir = NVMET_FCP_WRITE;
2174 if (!nvme_is_write(&cmdiu->sqe))
2175 goto transport_error;
2176 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2177 fod->io_dir = NVMET_FCP_READ;
2178 if (nvme_is_write(&cmdiu->sqe))
2179 goto transport_error;
2180 } else {
2181 fod->io_dir = NVMET_FCP_NODATA;
2182 if (xfrlen)
2183 goto transport_error;
2184 }
2185
2186 fod->req.cmd = &fod->cmdiubuf.sqe;
2187 fod->req.cqe = &fod->rspiubuf.cqe;
2188 fod->req.port = tgtport->pe->port;
2189
2190
2191 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2192
2193 fod->data_sg = NULL;
2194 fod->data_sg_cnt = 0;
2195
2196 ret = nvmet_req_init(&fod->req,
2197 &fod->queue->nvme_cq,
2198 &fod->queue->nvme_sq,
2199 &nvmet_fc_tgt_fcp_ops);
2200 if (!ret) {
2201
2202
2203 return;
2204 }
2205
2206 fod->req.transfer_len = xfrlen;
2207
2208
2209 atomic_inc(&fod->queue->sqtail);
2210
2211 if (fod->req.transfer_len) {
2212 ret = nvmet_fc_alloc_tgt_pgs(fod);
2213 if (ret) {
2214 nvmet_req_complete(&fod->req, ret);
2215 return;
2216 }
2217 }
2218 fod->req.sg = fod->data_sg;
2219 fod->req.sg_cnt = fod->data_sg_cnt;
2220 fod->offset = 0;
2221
2222 if (fod->io_dir == NVMET_FCP_WRITE) {
2223
2224 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2225 return;
2226 }
2227
2228
2229
2230
2231
2232
2233
2234 nvmet_req_execute(&fod->req);
2235 return;
2236
2237 transport_error:
2238 nvmet_fc_abort_op(tgtport, fod);
2239 }
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 int
2289 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2290 struct nvmefc_tgt_fcp_req *fcpreq,
2291 void *cmdiubuf, u32 cmdiubuf_len)
2292 {
2293 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2294 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2295 struct nvmet_fc_tgt_queue *queue;
2296 struct nvmet_fc_fcp_iod *fod;
2297 struct nvmet_fc_defer_fcp_req *deferfcp;
2298 unsigned long flags;
2299
2300
2301 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2302 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2303 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2304 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2305 return -EIO;
2306
2307 queue = nvmet_fc_find_target_queue(tgtport,
2308 be64_to_cpu(cmdiu->connection_id));
2309 if (!queue)
2310 return -ENOTCONN;
2311
2312
2313
2314
2315
2316
2317
2318
2319 spin_lock_irqsave(&queue->qlock, flags);
2320
2321 fod = nvmet_fc_alloc_fcp_iod(queue);
2322 if (fod) {
2323 spin_unlock_irqrestore(&queue->qlock, flags);
2324
2325 fcpreq->nvmet_fc_private = fod;
2326 fod->fcpreq = fcpreq;
2327
2328 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2329
2330 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2331
2332 return 0;
2333 }
2334
2335 if (!tgtport->ops->defer_rcv) {
2336 spin_unlock_irqrestore(&queue->qlock, flags);
2337
2338 nvmet_fc_tgt_q_put(queue);
2339 return -ENOENT;
2340 }
2341
2342 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2343 struct nvmet_fc_defer_fcp_req, req_list);
2344 if (deferfcp) {
2345
2346 list_del(&deferfcp->req_list);
2347 } else {
2348 spin_unlock_irqrestore(&queue->qlock, flags);
2349
2350
2351 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2352 if (!deferfcp) {
2353
2354 nvmet_fc_tgt_q_put(queue);
2355 return -ENOMEM;
2356 }
2357 spin_lock_irqsave(&queue->qlock, flags);
2358 }
2359
2360
2361 fcpreq->rspaddr = cmdiubuf;
2362 fcpreq->rsplen = cmdiubuf_len;
2363 deferfcp->fcp_req = fcpreq;
2364
2365
2366 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2367
2368
2369
2370 spin_unlock_irqrestore(&queue->qlock, flags);
2371
2372 return -EOVERFLOW;
2373 }
2374 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399 void
2400 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2401 struct nvmefc_tgt_fcp_req *fcpreq)
2402 {
2403 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2404 struct nvmet_fc_tgt_queue *queue;
2405 unsigned long flags;
2406
2407 if (!fod || fod->fcpreq != fcpreq)
2408
2409 return;
2410
2411 queue = fod->queue;
2412
2413 spin_lock_irqsave(&queue->qlock, flags);
2414 if (fod->active) {
2415
2416
2417
2418
2419
2420 spin_lock(&fod->flock);
2421 fod->abort = true;
2422 fod->aborted = true;
2423 spin_unlock(&fod->flock);
2424 }
2425 spin_unlock_irqrestore(&queue->qlock, flags);
2426 }
2427 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2428
2429
2430 struct nvmet_fc_traddr {
2431 u64 nn;
2432 u64 pn;
2433 };
2434
2435 static int
2436 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2437 {
2438 u64 token64;
2439
2440 if (match_u64(sstr, &token64))
2441 return -EINVAL;
2442 *val = token64;
2443
2444 return 0;
2445 }
2446
2447
2448
2449
2450
2451
2452 static int
2453 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2454 {
2455 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2456 substring_t wwn = { name, &name[sizeof(name)-1] };
2457 int nnoffset, pnoffset;
2458
2459
2460 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2461 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2462 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2463 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2464 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2465 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2466 NVME_FC_TRADDR_OXNNLEN;
2467 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2468 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2469 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2470 "pn-", NVME_FC_TRADDR_NNLEN))) {
2471 nnoffset = NVME_FC_TRADDR_NNLEN;
2472 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2473 } else
2474 goto out_einval;
2475
2476 name[0] = '0';
2477 name[1] = 'x';
2478 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2479
2480 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2481 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2482 goto out_einval;
2483
2484 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2485 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2486 goto out_einval;
2487
2488 return 0;
2489
2490 out_einval:
2491 pr_warn("%s: bad traddr string\n", __func__);
2492 return -EINVAL;
2493 }
2494
2495 static int
2496 nvmet_fc_add_port(struct nvmet_port *port)
2497 {
2498 struct nvmet_fc_tgtport *tgtport;
2499 struct nvmet_fc_port_entry *pe;
2500 struct nvmet_fc_traddr traddr = { 0L, 0L };
2501 unsigned long flags;
2502 int ret;
2503
2504
2505 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2506 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2507 return -EINVAL;
2508
2509
2510
2511 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2512 sizeof(port->disc_addr.traddr));
2513 if (ret)
2514 return ret;
2515
2516 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2517 if (!pe)
2518 return -ENOMEM;
2519
2520 ret = -ENXIO;
2521 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2522 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2523 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2524 (tgtport->fc_target_port.port_name == traddr.pn)) {
2525
2526 if (!tgtport->pe) {
2527 nvmet_fc_portentry_bind(tgtport, pe, port);
2528 ret = 0;
2529 } else
2530 ret = -EALREADY;
2531 break;
2532 }
2533 }
2534 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2535
2536 if (ret)
2537 kfree(pe);
2538
2539 return ret;
2540 }
2541
2542 static void
2543 nvmet_fc_remove_port(struct nvmet_port *port)
2544 {
2545 struct nvmet_fc_port_entry *pe = port->priv;
2546
2547 nvmet_fc_portentry_unbind(pe);
2548
2549 kfree(pe);
2550 }
2551
2552 static void
2553 nvmet_fc_discovery_chg(struct nvmet_port *port)
2554 {
2555 struct nvmet_fc_port_entry *pe = port->priv;
2556 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2557
2558 if (tgtport && tgtport->ops->discovery_event)
2559 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2560 }
2561
2562 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2563 .owner = THIS_MODULE,
2564 .type = NVMF_TRTYPE_FC,
2565 .msdbd = 1,
2566 .add_port = nvmet_fc_add_port,
2567 .remove_port = nvmet_fc_remove_port,
2568 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2569 .delete_ctrl = nvmet_fc_delete_ctrl,
2570 .discovery_chg = nvmet_fc_discovery_chg,
2571 };
2572
2573 static int __init nvmet_fc_init_module(void)
2574 {
2575 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2576 }
2577
2578 static void __exit nvmet_fc_exit_module(void)
2579 {
2580
2581 if (!list_empty(&nvmet_fc_target_list))
2582 pr_warn("%s: targetport list not empty\n", __func__);
2583
2584 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2585
2586 ida_destroy(&nvmet_fc_tgtport_cnt);
2587 }
2588
2589 module_init(nvmet_fc_init_module);
2590 module_exit(nvmet_fc_exit_module);
2591
2592 MODULE_LICENSE("GPL v2");