This source file includes following definitions.
- fc_exch_name_lookup
- fc_exch_rctl_name
- fc_exch_hold
- fc_exch_setup_hdr
- fc_exch_release
- fc_exch_timer_cancel
- fc_exch_timer_set_locked
- fc_exch_timer_set
- fc_exch_done_locked
- fc_exch_ptr_get
- fc_exch_ptr_set
- fc_exch_delete
- fc_seq_send_locked
- fc_seq_send
- fc_seq_alloc
- fc_seq_start_next_locked
- fc_seq_start_next
- fc_seq_set_resp
- fc_exch_abort_locked
- fc_seq_exch_abort
- fc_invoke_resp
- fc_exch_timeout
- fc_exch_em_alloc
- fc_exch_alloc
- fc_exch_find
- fc_exch_done
- fc_exch_resp
- fc_seq_lookup_recip
- fc_seq_lookup_orig
- fc_exch_set_addr
- fc_seq_els_rsp_send
- fc_seq_send_last
- fc_seq_send_ack
- fc_exch_send_ba_rjt
- fc_exch_recv_abts
- fc_seq_assign
- fc_seq_release
- fc_exch_recv_req
- fc_exch_recv_seq_resp
- fc_exch_recv_resp
- fc_exch_abts_resp
- fc_exch_recv_bls
- fc_seq_ls_acc
- fc_seq_ls_rjt
- fc_exch_reset
- fc_exch_pool_reset
- fc_exch_mgr_reset
- fc_exch_lookup
- fc_exch_els_rec
- fc_exch_rrq_resp
- fc_exch_seq_send
- fc_exch_rrq
- fc_exch_els_rrq
- fc_exch_update_stats
- fc_exch_mgr_add
- fc_exch_mgr_destroy
- fc_exch_mgr_del
- fc_exch_mgr_list_clone
- fc_exch_mgr_alloc
- fc_exch_mgr_free
- fc_find_ema
- fc_exch_recv
- fc_exch_init
- fc_setup_exch_mgr
- fc_destroy_exch_mgr
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/timer.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/export.h>
18 #include <linux/log2.h>
19
20 #include <scsi/fc/fc_fc2.h>
21
22 #include <scsi/libfc.h>
23 #include <scsi/fc_encode.h>
24
25 #include "fc_libfc.h"
26
27 u16 fc_cpu_mask;
28 EXPORT_SYMBOL(fc_cpu_mask);
29 static u16 fc_cpu_order;
30 static struct kmem_cache *fc_em_cachep;
31 static struct workqueue_struct *fc_exch_workqueue;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57 struct fc_exch_pool {
58 spinlock_t lock;
59 struct list_head ex_list;
60 u16 next_index;
61 u16 total_exches;
62
63
64 u16 left;
65 u16 right;
66 } ____cacheline_aligned_in_smp;
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 struct fc_exch_mgr {
83 struct fc_exch_pool __percpu *pool;
84 mempool_t *ep_pool;
85 struct fc_lport *lport;
86 enum fc_class class;
87 struct kref kref;
88 u16 min_xid;
89 u16 max_xid;
90 u16 pool_max_index;
91
92 struct {
93 atomic_t no_free_exch;
94 atomic_t no_free_exch_xid;
95 atomic_t xid_not_found;
96 atomic_t xid_busy;
97 atomic_t seq_not_found;
98 atomic_t non_bls_resp;
99 } stats;
100 };
101
102
103
104
105
106
107
108
109
110
111
112
113
114 struct fc_exch_mgr_anchor {
115 struct list_head ema_list;
116 struct fc_exch_mgr *mp;
117 bool (*match)(struct fc_frame *);
118 };
119
120 static void fc_exch_rrq(struct fc_exch *);
121 static void fc_seq_ls_acc(struct fc_frame *);
122 static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
123 enum fc_els_rjt_explan);
124 static void fc_exch_els_rec(struct fc_frame *);
125 static void fc_exch_els_rrq(struct fc_frame *);
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
210
211
212
213
214
215
216
217
218
219
220 static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
221 unsigned int max_index)
222 {
223 const char *name = NULL;
224
225 if (op < max_index)
226 name = table[op];
227 if (!name)
228 name = "unknown";
229 return name;
230 }
231
232
233
234
235
236 static const char *fc_exch_rctl_name(unsigned int op)
237 {
238 return fc_exch_name_lookup(op, fc_exch_rctl_names,
239 ARRAY_SIZE(fc_exch_rctl_names));
240 }
241
242
243
244
245
246 static inline void fc_exch_hold(struct fc_exch *ep)
247 {
248 atomic_inc(&ep->ex_refcnt);
249 }
250
251
252
253
254
255
256
257
258
259
260
261 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
262 u32 f_ctl)
263 {
264 struct fc_frame_header *fh = fc_frame_header_get(fp);
265 u16 fill;
266
267 fr_sof(fp) = ep->class;
268 if (ep->seq.cnt)
269 fr_sof(fp) = fc_sof_normal(ep->class);
270
271 if (f_ctl & FC_FC_END_SEQ) {
272 fr_eof(fp) = FC_EOF_T;
273 if (fc_sof_needs_ack(ep->class))
274 fr_eof(fp) = FC_EOF_N;
275
276
277
278
279
280
281
282
283
284 fill = fr_len(fp) & 3;
285 if (fill) {
286 fill = 4 - fill;
287
288 skb_put(fp_skb(fp), fill);
289 hton24(fh->fh_f_ctl, f_ctl | fill);
290 }
291 } else {
292 WARN_ON(fr_len(fp) % 4 != 0);
293 fr_eof(fp) = FC_EOF_N;
294 }
295
296
297 fh->fh_ox_id = htons(ep->oxid);
298 fh->fh_rx_id = htons(ep->rxid);
299 fh->fh_seq_id = ep->seq.id;
300 fh->fh_seq_cnt = htons(ep->seq.cnt);
301 }
302
303
304
305
306
307
308
309
310 static void fc_exch_release(struct fc_exch *ep)
311 {
312 struct fc_exch_mgr *mp;
313
314 if (atomic_dec_and_test(&ep->ex_refcnt)) {
315 mp = ep->em;
316 if (ep->destructor)
317 ep->destructor(&ep->seq, ep->arg);
318 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
319 mempool_free(ep, mp->ep_pool);
320 }
321 }
322
323
324
325
326
327 static inline void fc_exch_timer_cancel(struct fc_exch *ep)
328 {
329 if (cancel_delayed_work(&ep->timeout_work)) {
330 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
331 atomic_dec(&ep->ex_refcnt);
332 }
333 }
334
335
336
337
338
339
340
341
342
343
344 static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
345 unsigned int timer_msec)
346 {
347 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
348 return;
349
350 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
351
352 fc_exch_hold(ep);
353 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
354 msecs_to_jiffies(timer_msec))) {
355 FC_EXCH_DBG(ep, "Exchange already queued\n");
356 fc_exch_release(ep);
357 }
358 }
359
360
361
362
363
364
365 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
366 {
367 spin_lock_bh(&ep->ex_lock);
368 fc_exch_timer_set_locked(ep, timer_msec);
369 spin_unlock_bh(&ep->ex_lock);
370 }
371
372
373
374
375
376
377
378 static int fc_exch_done_locked(struct fc_exch *ep)
379 {
380 int rc = 1;
381
382
383
384
385
386
387
388 if (ep->state & FC_EX_DONE)
389 return rc;
390 ep->esb_stat |= ESB_ST_COMPLETE;
391
392 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
393 ep->state |= FC_EX_DONE;
394 fc_exch_timer_cancel(ep);
395 rc = 0;
396 }
397 return rc;
398 }
399
400 static struct fc_exch fc_quarantine_exch;
401
402
403
404
405
406
407
408
409
410
411 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
412 u16 index)
413 {
414 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
415 return exches[index];
416 }
417
418
419
420
421
422
423
424 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
425 struct fc_exch *ep)
426 {
427 ((struct fc_exch **)(pool + 1))[index] = ep;
428 }
429
430
431
432
433
434 static void fc_exch_delete(struct fc_exch *ep)
435 {
436 struct fc_exch_pool *pool;
437 u16 index;
438
439 pool = ep->pool;
440 spin_lock_bh(&pool->lock);
441 WARN_ON(pool->total_exches <= 0);
442 pool->total_exches--;
443
444
445 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
446 if (!(ep->state & FC_EX_QUARANTINE)) {
447 if (pool->left == FC_XID_UNKNOWN)
448 pool->left = index;
449 else if (pool->right == FC_XID_UNKNOWN)
450 pool->right = index;
451 else
452 pool->next_index = index;
453 fc_exch_ptr_set(pool, index, NULL);
454 } else {
455 fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
456 }
457 list_del(&ep->ex_list);
458 spin_unlock_bh(&pool->lock);
459 fc_exch_release(ep);
460 }
461
462 static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
463 struct fc_frame *fp)
464 {
465 struct fc_exch *ep;
466 struct fc_frame_header *fh = fc_frame_header_get(fp);
467 int error = -ENXIO;
468 u32 f_ctl;
469 u8 fh_type = fh->fh_type;
470
471 ep = fc_seq_exch(sp);
472
473 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
474 fc_frame_free(fp);
475 goto out;
476 }
477
478 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
479
480 f_ctl = ntoh24(fh->fh_f_ctl);
481 fc_exch_setup_hdr(ep, fp, f_ctl);
482 fr_encaps(fp) = ep->encaps;
483
484
485
486
487
488
489 if (fr_max_payload(fp))
490 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
491 fr_max_payload(fp));
492 else
493 sp->cnt++;
494
495
496
497
498 error = lport->tt.frame_send(lport, fp);
499
500 if (fh_type == FC_TYPE_BLS)
501 goto out;
502
503
504
505
506
507
508 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;
509 if (f_ctl & FC_FC_SEQ_INIT)
510 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
511 out:
512 return error;
513 }
514
515
516
517
518
519
520
521
522
523
524 int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
525 {
526 struct fc_exch *ep;
527 int error;
528 ep = fc_seq_exch(sp);
529 spin_lock_bh(&ep->ex_lock);
530 error = fc_seq_send_locked(lport, sp, fp);
531 spin_unlock_bh(&ep->ex_lock);
532 return error;
533 }
534 EXPORT_SYMBOL(fc_seq_send);
535
536
537
538
539
540
541
542
543
544
545 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
546 {
547 struct fc_seq *sp;
548
549 sp = &ep->seq;
550 sp->ssb_stat = 0;
551 sp->cnt = 0;
552 sp->id = seq_id;
553 return sp;
554 }
555
556
557
558
559
560
561 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
562 {
563 struct fc_exch *ep = fc_seq_exch(sp);
564
565 sp = fc_seq_alloc(ep, ep->seq_id++);
566 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
567 ep->f_ctl, sp->id);
568 return sp;
569 }
570
571
572
573
574
575
576 struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
577 {
578 struct fc_exch *ep = fc_seq_exch(sp);
579
580 spin_lock_bh(&ep->ex_lock);
581 sp = fc_seq_start_next_locked(sp);
582 spin_unlock_bh(&ep->ex_lock);
583
584 return sp;
585 }
586 EXPORT_SYMBOL(fc_seq_start_next);
587
588
589
590
591
592
593 void fc_seq_set_resp(struct fc_seq *sp,
594 void (*resp)(struct fc_seq *, struct fc_frame *, void *),
595 void *arg)
596 {
597 struct fc_exch *ep = fc_seq_exch(sp);
598 DEFINE_WAIT(wait);
599
600 spin_lock_bh(&ep->ex_lock);
601 while (ep->resp_active && ep->resp_task != current) {
602 prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
603 spin_unlock_bh(&ep->ex_lock);
604
605 schedule();
606
607 spin_lock_bh(&ep->ex_lock);
608 }
609 finish_wait(&ep->resp_wq, &wait);
610 ep->resp = resp;
611 ep->arg = arg;
612 spin_unlock_bh(&ep->ex_lock);
613 }
614 EXPORT_SYMBOL(fc_seq_set_resp);
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632 static int fc_exch_abort_locked(struct fc_exch *ep,
633 unsigned int timer_msec)
634 {
635 struct fc_seq *sp;
636 struct fc_frame *fp;
637 int error;
638
639 FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
640 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
641 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
642 FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
643 ep->esb_stat, ep->state);
644 return -ENXIO;
645 }
646
647
648
649
650 sp = fc_seq_start_next_locked(&ep->seq);
651 if (!sp)
652 return -ENOMEM;
653
654 if (timer_msec)
655 fc_exch_timer_set_locked(ep, timer_msec);
656
657 if (ep->sid) {
658
659
660
661 fp = fc_frame_alloc(ep->lp, 0);
662 if (fp) {
663 ep->esb_stat |= ESB_ST_SEQ_INIT;
664 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
665 FC_TYPE_BLS, FC_FC_END_SEQ |
666 FC_FC_SEQ_INIT, 0);
667 error = fc_seq_send_locked(ep->lp, sp, fp);
668 } else {
669 error = -ENOBUFS;
670 }
671 } else {
672
673
674
675
676 error = 0;
677 }
678 ep->esb_stat |= ESB_ST_ABNORMAL;
679 return error;
680 }
681
682
683
684
685
686
687
688
689
690
691 int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
692 {
693 struct fc_exch *ep;
694 int error;
695
696 ep = fc_seq_exch(req_sp);
697 spin_lock_bh(&ep->ex_lock);
698 error = fc_exch_abort_locked(ep, timer_msec);
699 spin_unlock_bh(&ep->ex_lock);
700 return error;
701 }
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725 static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
726 struct fc_frame *fp)
727 {
728 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
729 void *arg;
730 bool res = false;
731
732 spin_lock_bh(&ep->ex_lock);
733 ep->resp_active++;
734 if (ep->resp_task != current)
735 ep->resp_task = !ep->resp_task ? current : NULL;
736 resp = ep->resp;
737 arg = ep->arg;
738 spin_unlock_bh(&ep->ex_lock);
739
740 if (resp) {
741 resp(sp, fp, arg);
742 res = true;
743 }
744
745 spin_lock_bh(&ep->ex_lock);
746 if (--ep->resp_active == 0)
747 ep->resp_task = NULL;
748 spin_unlock_bh(&ep->ex_lock);
749
750 if (ep->resp_active == 0)
751 wake_up(&ep->resp_wq);
752
753 return res;
754 }
755
756
757
758
759
760 static void fc_exch_timeout(struct work_struct *work)
761 {
762 struct fc_exch *ep = container_of(work, struct fc_exch,
763 timeout_work.work);
764 struct fc_seq *sp = &ep->seq;
765 u32 e_stat;
766 int rc = 1;
767
768 FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
769
770 spin_lock_bh(&ep->ex_lock);
771 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
772 goto unlock;
773
774 e_stat = ep->esb_stat;
775 if (e_stat & ESB_ST_COMPLETE) {
776 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
777 spin_unlock_bh(&ep->ex_lock);
778 if (e_stat & ESB_ST_REC_QUAL)
779 fc_exch_rrq(ep);
780 goto done;
781 } else {
782 if (e_stat & ESB_ST_ABNORMAL)
783 rc = fc_exch_done_locked(ep);
784 spin_unlock_bh(&ep->ex_lock);
785 if (!rc)
786 fc_exch_delete(ep);
787 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
788 fc_seq_set_resp(sp, NULL, ep->arg);
789 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
790 goto done;
791 }
792 unlock:
793 spin_unlock_bh(&ep->ex_lock);
794 done:
795
796
797
798 fc_exch_release(ep);
799 }
800
801
802
803
804
805
806
807
808 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
809 struct fc_exch_mgr *mp)
810 {
811 struct fc_exch *ep;
812 unsigned int cpu;
813 u16 index;
814 struct fc_exch_pool *pool;
815
816
817 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
818 if (!ep) {
819 atomic_inc(&mp->stats.no_free_exch);
820 goto out;
821 }
822 memset(ep, 0, sizeof(*ep));
823
824 cpu = get_cpu();
825 pool = per_cpu_ptr(mp->pool, cpu);
826 spin_lock_bh(&pool->lock);
827 put_cpu();
828
829
830 if (pool->left != FC_XID_UNKNOWN) {
831 if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
832 index = pool->left;
833 pool->left = FC_XID_UNKNOWN;
834 goto hit;
835 }
836 }
837 if (pool->right != FC_XID_UNKNOWN) {
838 if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
839 index = pool->right;
840 pool->right = FC_XID_UNKNOWN;
841 goto hit;
842 }
843 }
844
845 index = pool->next_index;
846
847 while (fc_exch_ptr_get(pool, index)) {
848 index = index == mp->pool_max_index ? 0 : index + 1;
849 if (index == pool->next_index)
850 goto err;
851 }
852 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
853 hit:
854 fc_exch_hold(ep);
855 spin_lock_init(&ep->ex_lock);
856
857
858
859
860
861 spin_lock_bh(&ep->ex_lock);
862
863 fc_exch_ptr_set(pool, index, ep);
864 list_add_tail(&ep->ex_list, &pool->ex_list);
865 fc_seq_alloc(ep, ep->seq_id++);
866 pool->total_exches++;
867 spin_unlock_bh(&pool->lock);
868
869
870
871
872 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
873 ep->em = mp;
874 ep->pool = pool;
875 ep->lp = lport;
876 ep->f_ctl = FC_FC_FIRST_SEQ;
877 ep->rxid = FC_XID_UNKNOWN;
878 ep->class = mp->class;
879 ep->resp_active = 0;
880 init_waitqueue_head(&ep->resp_wq);
881 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
882 out:
883 return ep;
884 err:
885 spin_unlock_bh(&pool->lock);
886 atomic_inc(&mp->stats.no_free_exch_xid);
887 mempool_free(ep, mp->ep_pool);
888 return NULL;
889 }
890
891
892
893
894
895
896
897
898
899
900
901
902 static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
903 struct fc_frame *fp)
904 {
905 struct fc_exch_mgr_anchor *ema;
906 struct fc_exch *ep;
907
908 list_for_each_entry(ema, &lport->ema_list, ema_list) {
909 if (!ema->match || ema->match(fp)) {
910 ep = fc_exch_em_alloc(lport, ema->mp);
911 if (ep)
912 return ep;
913 }
914 }
915 return NULL;
916 }
917
918
919
920
921
922
923 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
924 {
925 struct fc_lport *lport = mp->lport;
926 struct fc_exch_pool *pool;
927 struct fc_exch *ep = NULL;
928 u16 cpu = xid & fc_cpu_mask;
929
930 if (xid == FC_XID_UNKNOWN)
931 return NULL;
932
933 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
934 pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
935 lport->host->host_no, lport->port_id, xid, cpu);
936 return NULL;
937 }
938
939 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
940 pool = per_cpu_ptr(mp->pool, cpu);
941 spin_lock_bh(&pool->lock);
942 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
943 if (ep == &fc_quarantine_exch) {
944 FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
945 ep = NULL;
946 }
947 if (ep) {
948 WARN_ON(ep->xid != xid);
949 fc_exch_hold(ep);
950 }
951 spin_unlock_bh(&pool->lock);
952 }
953 return ep;
954 }
955
956
957
958
959
960
961
962
963
964 void fc_exch_done(struct fc_seq *sp)
965 {
966 struct fc_exch *ep = fc_seq_exch(sp);
967 int rc;
968
969 spin_lock_bh(&ep->ex_lock);
970 rc = fc_exch_done_locked(ep);
971 spin_unlock_bh(&ep->ex_lock);
972
973 fc_seq_set_resp(sp, NULL, ep->arg);
974 if (!rc)
975 fc_exch_delete(ep);
976 }
977 EXPORT_SYMBOL(fc_exch_done);
978
979
980
981
982
983
984
985
986
987 static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
988 struct fc_exch_mgr *mp,
989 struct fc_frame *fp)
990 {
991 struct fc_exch *ep;
992 struct fc_frame_header *fh;
993
994 ep = fc_exch_alloc(lport, fp);
995 if (ep) {
996 ep->class = fc_frame_class(fp);
997
998
999
1000
1001 ep->f_ctl |= FC_FC_EX_CTX;
1002 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
1003 fh = fc_frame_header_get(fp);
1004 ep->sid = ntoh24(fh->fh_d_id);
1005 ep->did = ntoh24(fh->fh_s_id);
1006 ep->oid = ep->did;
1007
1008
1009
1010
1011
1012
1013 ep->rxid = ep->xid;
1014 ep->oxid = ntohs(fh->fh_ox_id);
1015 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1016 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1017 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1018
1019 fc_exch_hold(ep);
1020 spin_unlock_bh(&ep->ex_lock);
1021 }
1022 return ep;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1036 struct fc_exch_mgr *mp,
1037 struct fc_frame *fp)
1038 {
1039 struct fc_frame_header *fh = fc_frame_header_get(fp);
1040 struct fc_exch *ep = NULL;
1041 struct fc_seq *sp = NULL;
1042 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1043 u32 f_ctl;
1044 u16 xid;
1045
1046 f_ctl = ntoh24(fh->fh_f_ctl);
1047 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1048
1049
1050
1051
1052 if (f_ctl & FC_FC_EX_CTX) {
1053 xid = ntohs(fh->fh_ox_id);
1054 ep = fc_exch_find(mp, xid);
1055 if (!ep) {
1056 atomic_inc(&mp->stats.xid_not_found);
1057 reject = FC_RJT_OX_ID;
1058 goto out;
1059 }
1060 if (ep->rxid == FC_XID_UNKNOWN)
1061 ep->rxid = ntohs(fh->fh_rx_id);
1062 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1063 reject = FC_RJT_OX_ID;
1064 goto rel;
1065 }
1066 } else {
1067 xid = ntohs(fh->fh_rx_id);
1068
1069
1070
1071
1072
1073
1074 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1075 fc_frame_payload_op(fp) == ELS_TEST) {
1076 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1077 xid = FC_XID_UNKNOWN;
1078 }
1079
1080
1081
1082
1083 ep = fc_exch_find(mp, xid);
1084 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1085 if (ep) {
1086 atomic_inc(&mp->stats.xid_busy);
1087 reject = FC_RJT_RX_ID;
1088 goto rel;
1089 }
1090 ep = fc_exch_resp(lport, mp, fp);
1091 if (!ep) {
1092 reject = FC_RJT_EXCH_EST;
1093 goto out;
1094 }
1095 xid = ep->xid;
1096 } else if (!ep) {
1097 atomic_inc(&mp->stats.xid_not_found);
1098 reject = FC_RJT_RX_ID;
1099 goto out;
1100 }
1101 }
1102
1103 spin_lock_bh(&ep->ex_lock);
1104
1105
1106
1107
1108 if (fc_sof_is_init(fr_sof(fp))) {
1109 sp = &ep->seq;
1110 sp->ssb_stat |= SSB_ST_RESP;
1111 sp->id = fh->fh_seq_id;
1112 } else {
1113 sp = &ep->seq;
1114 if (sp->id != fh->fh_seq_id) {
1115 atomic_inc(&mp->stats.seq_not_found);
1116 if (f_ctl & FC_FC_END_SEQ) {
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 sp->ssb_stat |= SSB_ST_RESP;
1132 sp->id = fh->fh_seq_id;
1133 } else {
1134 spin_unlock_bh(&ep->ex_lock);
1135
1136
1137 reject = FC_RJT_SEQ_ID;
1138 goto rel;
1139 }
1140 }
1141 }
1142 WARN_ON(ep != fc_seq_exch(sp));
1143
1144 if (f_ctl & FC_FC_SEQ_INIT)
1145 ep->esb_stat |= ESB_ST_SEQ_INIT;
1146 spin_unlock_bh(&ep->ex_lock);
1147
1148 fr_seq(fp) = sp;
1149 out:
1150 return reject;
1151 rel:
1152 fc_exch_done(&ep->seq);
1153 fc_exch_release(ep);
1154 return reject;
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1166 struct fc_frame *fp)
1167 {
1168 struct fc_frame_header *fh = fc_frame_header_get(fp);
1169 struct fc_exch *ep;
1170 struct fc_seq *sp = NULL;
1171 u32 f_ctl;
1172 u16 xid;
1173
1174 f_ctl = ntoh24(fh->fh_f_ctl);
1175 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1176 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1177 ep = fc_exch_find(mp, xid);
1178 if (!ep)
1179 return NULL;
1180 if (ep->seq.id == fh->fh_seq_id) {
1181
1182
1183
1184 sp = &ep->seq;
1185 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1186 ep->rxid == FC_XID_UNKNOWN) {
1187 ep->rxid = ntohs(fh->fh_rx_id);
1188 }
1189 }
1190 fc_exch_release(ep);
1191 return sp;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 static void fc_exch_set_addr(struct fc_exch *ep,
1203 u32 orig_id, u32 resp_id)
1204 {
1205 ep->oid = orig_id;
1206 if (ep->esb_stat & ESB_ST_RESP) {
1207 ep->sid = resp_id;
1208 ep->did = orig_id;
1209 } else {
1210 ep->sid = orig_id;
1211 ep->did = resp_id;
1212 }
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1225 struct fc_seq_els_data *els_data)
1226 {
1227 switch (els_cmd) {
1228 case ELS_LS_RJT:
1229 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1230 break;
1231 case ELS_LS_ACC:
1232 fc_seq_ls_acc(fp);
1233 break;
1234 case ELS_RRQ:
1235 fc_exch_els_rrq(fp);
1236 break;
1237 case ELS_REC:
1238 fc_exch_els_rec(fp);
1239 break;
1240 default:
1241 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1242 }
1243 }
1244 EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1245
1246
1247
1248
1249
1250
1251
1252
1253 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1254 enum fc_rctl rctl, enum fc_fh_type fh_type)
1255 {
1256 u32 f_ctl;
1257 struct fc_exch *ep = fc_seq_exch(sp);
1258
1259 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1260 f_ctl |= ep->f_ctl;
1261 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1262 fc_seq_send_locked(ep->lp, sp, fp);
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1273 {
1274 struct fc_frame *fp;
1275 struct fc_frame_header *rx_fh;
1276 struct fc_frame_header *fh;
1277 struct fc_exch *ep = fc_seq_exch(sp);
1278 struct fc_lport *lport = ep->lp;
1279 unsigned int f_ctl;
1280
1281
1282
1283
1284 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1285 fp = fc_frame_alloc(lport, 0);
1286 if (!fp) {
1287 FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1288 return;
1289 }
1290
1291 fh = fc_frame_header_get(fp);
1292 fh->fh_r_ctl = FC_RCTL_ACK_1;
1293 fh->fh_type = FC_TYPE_BLS;
1294
1295
1296
1297
1298
1299
1300
1301
1302 rx_fh = fc_frame_header_get(rx_fp);
1303 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1304 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1305 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1306 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1307 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1308 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1309 hton24(fh->fh_f_ctl, f_ctl);
1310
1311 fc_exch_setup_hdr(ep, fp, f_ctl);
1312 fh->fh_seq_id = rx_fh->fh_seq_id;
1313 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1314 fh->fh_parm_offset = htonl(1);
1315
1316 fr_sof(fp) = fr_sof(rx_fp);
1317 if (f_ctl & FC_FC_END_SEQ)
1318 fr_eof(fp) = FC_EOF_T;
1319 else
1320 fr_eof(fp) = FC_EOF_N;
1321
1322 lport->tt.frame_send(lport, fp);
1323 }
1324 }
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1335 enum fc_ba_rjt_reason reason,
1336 enum fc_ba_rjt_explan explan)
1337 {
1338 struct fc_frame *fp;
1339 struct fc_frame_header *rx_fh;
1340 struct fc_frame_header *fh;
1341 struct fc_ba_rjt *rp;
1342 struct fc_seq *sp;
1343 struct fc_lport *lport;
1344 unsigned int f_ctl;
1345
1346 lport = fr_dev(rx_fp);
1347 sp = fr_seq(rx_fp);
1348 fp = fc_frame_alloc(lport, sizeof(*rp));
1349 if (!fp) {
1350 FC_EXCH_DBG(fc_seq_exch(sp),
1351 "Drop BA_RJT request, out of memory\n");
1352 return;
1353 }
1354 fh = fc_frame_header_get(fp);
1355 rx_fh = fc_frame_header_get(rx_fp);
1356
1357 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1358
1359 rp = fc_frame_payload_get(fp, sizeof(*rp));
1360 rp->br_reason = reason;
1361 rp->br_explan = explan;
1362
1363
1364
1365
1366 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1367 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1368 fh->fh_ox_id = rx_fh->fh_ox_id;
1369 fh->fh_rx_id = rx_fh->fh_rx_id;
1370 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1371 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1372 fh->fh_type = FC_TYPE_BLS;
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1383 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1384 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1385 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1386 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1387 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1388 f_ctl &= ~FC_FC_FIRST_SEQ;
1389 hton24(fh->fh_f_ctl, f_ctl);
1390
1391 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1392 fr_eof(fp) = FC_EOF_T;
1393 if (fc_sof_needs_ack(fr_sof(fp)))
1394 fr_eof(fp) = FC_EOF_N;
1395
1396 lport->tt.frame_send(lport, fp);
1397 }
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1409 {
1410 struct fc_frame *fp;
1411 struct fc_ba_acc *ap;
1412 struct fc_frame_header *fh;
1413 struct fc_seq *sp;
1414
1415 if (!ep)
1416 goto reject;
1417
1418 FC_EXCH_DBG(ep, "exch: ABTS received\n");
1419 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1420 if (!fp) {
1421 FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1422 goto free;
1423 }
1424
1425 spin_lock_bh(&ep->ex_lock);
1426 if (ep->esb_stat & ESB_ST_COMPLETE) {
1427 spin_unlock_bh(&ep->ex_lock);
1428 FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1429 fc_frame_free(fp);
1430 goto reject;
1431 }
1432 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1433 ep->esb_stat |= ESB_ST_REC_QUAL;
1434 fc_exch_hold(ep);
1435 }
1436 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1437 fh = fc_frame_header_get(fp);
1438 ap = fc_frame_payload_get(fp, sizeof(*ap));
1439 memset(ap, 0, sizeof(*ap));
1440 sp = &ep->seq;
1441 ap->ba_high_seq_cnt = htons(0xffff);
1442 if (sp->ssb_stat & SSB_ST_RESP) {
1443 ap->ba_seq_id = sp->id;
1444 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1445 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1446 ap->ba_low_seq_cnt = htons(sp->cnt);
1447 }
1448 sp = fc_seq_start_next_locked(sp);
1449 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1450 ep->esb_stat |= ESB_ST_ABNORMAL;
1451 spin_unlock_bh(&ep->ex_lock);
1452
1453 free:
1454 fc_frame_free(rx_fp);
1455 return;
1456
1457 reject:
1458 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1459 goto free;
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1472 {
1473 struct fc_exch_mgr_anchor *ema;
1474
1475 WARN_ON(lport != fr_dev(fp));
1476 WARN_ON(fr_seq(fp));
1477 fr_seq(fp) = NULL;
1478
1479 list_for_each_entry(ema, &lport->ema_list, ema_list)
1480 if ((!ema->match || ema->match(fp)) &&
1481 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1482 break;
1483 return fr_seq(fp);
1484 }
1485 EXPORT_SYMBOL(fc_seq_assign);
1486
1487
1488
1489
1490
1491 void fc_seq_release(struct fc_seq *sp)
1492 {
1493 fc_exch_release(fc_seq_exch(sp));
1494 }
1495 EXPORT_SYMBOL(fc_seq_release);
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1507 struct fc_frame *fp)
1508 {
1509 struct fc_frame_header *fh = fc_frame_header_get(fp);
1510 struct fc_seq *sp = NULL;
1511 struct fc_exch *ep = NULL;
1512 enum fc_pf_rjt_reason reject;
1513
1514
1515
1516
1517 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1518 if (!lport) {
1519 fc_frame_free(fp);
1520 return;
1521 }
1522 fr_dev(fp) = lport;
1523
1524 BUG_ON(fr_seq(fp));
1525
1526
1527
1528
1529
1530 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1531 return fc_lport_recv(lport, fp);
1532
1533 reject = fc_seq_lookup_recip(lport, mp, fp);
1534 if (reject == FC_RJT_NONE) {
1535 sp = fr_seq(fp);
1536 ep = fc_seq_exch(sp);
1537 fc_seq_send_ack(sp, fp);
1538 ep->encaps = fr_encaps(fp);
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 if (!fc_invoke_resp(ep, sp, fp))
1552 fc_lport_recv(lport, fp);
1553 fc_exch_release(ep);
1554 } else {
1555 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1556 reject);
1557 fc_frame_free(fp);
1558 }
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1569 {
1570 struct fc_frame_header *fh = fc_frame_header_get(fp);
1571 struct fc_seq *sp;
1572 struct fc_exch *ep;
1573 enum fc_sof sof;
1574 u32 f_ctl;
1575 int rc;
1576
1577 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1578 if (!ep) {
1579 atomic_inc(&mp->stats.xid_not_found);
1580 goto out;
1581 }
1582 if (ep->esb_stat & ESB_ST_COMPLETE) {
1583 atomic_inc(&mp->stats.xid_not_found);
1584 goto rel;
1585 }
1586 if (ep->rxid == FC_XID_UNKNOWN)
1587 ep->rxid = ntohs(fh->fh_rx_id);
1588 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1589 atomic_inc(&mp->stats.xid_not_found);
1590 goto rel;
1591 }
1592 if (ep->did != ntoh24(fh->fh_s_id) &&
1593 ep->did != FC_FID_FLOGI) {
1594 atomic_inc(&mp->stats.xid_not_found);
1595 goto rel;
1596 }
1597 sof = fr_sof(fp);
1598 sp = &ep->seq;
1599 if (fc_sof_is_init(sof)) {
1600 sp->ssb_stat |= SSB_ST_RESP;
1601 sp->id = fh->fh_seq_id;
1602 }
1603
1604 f_ctl = ntoh24(fh->fh_f_ctl);
1605 fr_seq(fp) = sp;
1606
1607 spin_lock_bh(&ep->ex_lock);
1608 if (f_ctl & FC_FC_SEQ_INIT)
1609 ep->esb_stat |= ESB_ST_SEQ_INIT;
1610 spin_unlock_bh(&ep->ex_lock);
1611
1612 if (fc_sof_needs_ack(sof))
1613 fc_seq_send_ack(sp, fp);
1614
1615 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1616 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1617 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1618 spin_lock_bh(&ep->ex_lock);
1619 rc = fc_exch_done_locked(ep);
1620 WARN_ON(fc_seq_exch(sp) != ep);
1621 spin_unlock_bh(&ep->ex_lock);
1622 if (!rc)
1623 fc_exch_delete(ep);
1624 }
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 if (!fc_invoke_resp(ep, sp, fp))
1640 fc_frame_free(fp);
1641
1642 fc_exch_release(ep);
1643 return;
1644 rel:
1645 fc_exch_release(ep);
1646 out:
1647 fc_frame_free(fp);
1648 }
1649
1650
1651
1652
1653
1654
1655
1656 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1657 {
1658 struct fc_seq *sp;
1659
1660 sp = fc_seq_lookup_orig(mp, fp);
1661
1662 if (!sp)
1663 atomic_inc(&mp->stats.xid_not_found);
1664 else
1665 atomic_inc(&mp->stats.non_bls_resp);
1666
1667 fc_frame_free(fp);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1679 {
1680 struct fc_frame_header *fh;
1681 struct fc_ba_acc *ap;
1682 struct fc_seq *sp;
1683 u16 low;
1684 u16 high;
1685 int rc = 1, has_rec = 0;
1686
1687 fh = fc_frame_header_get(fp);
1688 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1689 fc_exch_rctl_name(fh->fh_r_ctl));
1690
1691 if (cancel_delayed_work_sync(&ep->timeout_work)) {
1692 FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1693 fc_exch_release(ep);
1694 }
1695
1696 spin_lock_bh(&ep->ex_lock);
1697 switch (fh->fh_r_ctl) {
1698 case FC_RCTL_BA_ACC:
1699 ap = fc_frame_payload_get(fp, sizeof(*ap));
1700 if (!ap)
1701 break;
1702
1703
1704
1705
1706
1707
1708 low = ntohs(ap->ba_low_seq_cnt);
1709 high = ntohs(ap->ba_high_seq_cnt);
1710 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1711 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1712 ap->ba_seq_id == ep->seq_id) && low != high) {
1713 ep->esb_stat |= ESB_ST_REC_QUAL;
1714 fc_exch_hold(ep);
1715 has_rec = 1;
1716 }
1717 break;
1718 case FC_RCTL_BA_RJT:
1719 break;
1720 default:
1721 break;
1722 }
1723
1724
1725
1726
1727 sp = &ep->seq;
1728
1729
1730
1731 if (ep->fh_type != FC_TYPE_FCP &&
1732 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1733 rc = fc_exch_done_locked(ep);
1734 spin_unlock_bh(&ep->ex_lock);
1735
1736 fc_exch_hold(ep);
1737 if (!rc)
1738 fc_exch_delete(ep);
1739 if (!fc_invoke_resp(ep, sp, fp))
1740 fc_frame_free(fp);
1741 if (has_rec)
1742 fc_exch_timer_set(ep, ep->r_a_tov);
1743 fc_exch_release(ep);
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1755 {
1756 struct fc_frame_header *fh;
1757 struct fc_exch *ep;
1758 u32 f_ctl;
1759
1760 fh = fc_frame_header_get(fp);
1761 f_ctl = ntoh24(fh->fh_f_ctl);
1762 fr_seq(fp) = NULL;
1763
1764 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1765 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1766 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1767 spin_lock_bh(&ep->ex_lock);
1768 ep->esb_stat |= ESB_ST_SEQ_INIT;
1769 spin_unlock_bh(&ep->ex_lock);
1770 }
1771 if (f_ctl & FC_FC_SEQ_CTX) {
1772
1773
1774
1775
1776 switch (fh->fh_r_ctl) {
1777 case FC_RCTL_ACK_1:
1778 case FC_RCTL_ACK_0:
1779 break;
1780 default:
1781 if (ep)
1782 FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1783 fh->fh_r_ctl,
1784 fc_exch_rctl_name(fh->fh_r_ctl));
1785 break;
1786 }
1787 fc_frame_free(fp);
1788 } else {
1789 switch (fh->fh_r_ctl) {
1790 case FC_RCTL_BA_RJT:
1791 case FC_RCTL_BA_ACC:
1792 if (ep)
1793 fc_exch_abts_resp(ep, fp);
1794 else
1795 fc_frame_free(fp);
1796 break;
1797 case FC_RCTL_BA_ABTS:
1798 if (ep)
1799 fc_exch_recv_abts(ep, fp);
1800 else
1801 fc_frame_free(fp);
1802 break;
1803 default:
1804 fc_frame_free(fp);
1805 break;
1806 }
1807 }
1808 if (ep)
1809 fc_exch_release(ep);
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819 static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1820 {
1821 struct fc_lport *lport;
1822 struct fc_els_ls_acc *acc;
1823 struct fc_frame *fp;
1824 struct fc_seq *sp;
1825
1826 lport = fr_dev(rx_fp);
1827 sp = fr_seq(rx_fp);
1828 fp = fc_frame_alloc(lport, sizeof(*acc));
1829 if (!fp) {
1830 FC_EXCH_DBG(fc_seq_exch(sp),
1831 "exch: drop LS_ACC, out of memory\n");
1832 return;
1833 }
1834 acc = fc_frame_payload_get(fp, sizeof(*acc));
1835 memset(acc, 0, sizeof(*acc));
1836 acc->la_cmd = ELS_LS_ACC;
1837 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1838 lport->tt.frame_send(lport, fp);
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1851 enum fc_els_rjt_explan explan)
1852 {
1853 struct fc_lport *lport;
1854 struct fc_els_ls_rjt *rjt;
1855 struct fc_frame *fp;
1856 struct fc_seq *sp;
1857
1858 lport = fr_dev(rx_fp);
1859 sp = fr_seq(rx_fp);
1860 fp = fc_frame_alloc(lport, sizeof(*rjt));
1861 if (!fp) {
1862 FC_EXCH_DBG(fc_seq_exch(sp),
1863 "exch: drop LS_ACC, out of memory\n");
1864 return;
1865 }
1866 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1867 memset(rjt, 0, sizeof(*rjt));
1868 rjt->er_cmd = ELS_LS_RJT;
1869 rjt->er_reason = reason;
1870 rjt->er_explan = explan;
1871 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1872 lport->tt.frame_send(lport, fp);
1873 }
1874
1875
1876
1877
1878
1879
1880
1881 static void fc_exch_reset(struct fc_exch *ep)
1882 {
1883 struct fc_seq *sp;
1884 int rc = 1;
1885
1886 spin_lock_bh(&ep->ex_lock);
1887 ep->state |= FC_EX_RST_CLEANUP;
1888 fc_exch_timer_cancel(ep);
1889 if (ep->esb_stat & ESB_ST_REC_QUAL)
1890 atomic_dec(&ep->ex_refcnt);
1891 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1892 sp = &ep->seq;
1893 rc = fc_exch_done_locked(ep);
1894 spin_unlock_bh(&ep->ex_lock);
1895
1896 fc_exch_hold(ep);
1897
1898 if (!rc)
1899 fc_exch_delete(ep);
1900
1901 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1902 fc_seq_set_resp(sp, NULL, ep->arg);
1903 fc_exch_release(ep);
1904 }
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 static void fc_exch_pool_reset(struct fc_lport *lport,
1919 struct fc_exch_pool *pool,
1920 u32 sid, u32 did)
1921 {
1922 struct fc_exch *ep;
1923 struct fc_exch *next;
1924
1925 spin_lock_bh(&pool->lock);
1926 restart:
1927 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1928 if ((lport == ep->lp) &&
1929 (sid == 0 || sid == ep->sid) &&
1930 (did == 0 || did == ep->did)) {
1931 fc_exch_hold(ep);
1932 spin_unlock_bh(&pool->lock);
1933
1934 fc_exch_reset(ep);
1935
1936 fc_exch_release(ep);
1937 spin_lock_bh(&pool->lock);
1938
1939
1940
1941
1942
1943 goto restart;
1944 }
1945 }
1946 pool->next_index = 0;
1947 pool->left = FC_XID_UNKNOWN;
1948 pool->right = FC_XID_UNKNOWN;
1949 spin_unlock_bh(&pool->lock);
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1964 {
1965 struct fc_exch_mgr_anchor *ema;
1966 unsigned int cpu;
1967
1968 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1969 for_each_possible_cpu(cpu)
1970 fc_exch_pool_reset(lport,
1971 per_cpu_ptr(ema->mp->pool, cpu),
1972 sid, did);
1973 }
1974 }
1975 EXPORT_SYMBOL(fc_exch_mgr_reset);
1976
1977
1978
1979
1980
1981
1982
1983
1984 static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
1985 {
1986 struct fc_exch_mgr_anchor *ema;
1987
1988 list_for_each_entry(ema, &lport->ema_list, ema_list)
1989 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1990 return fc_exch_find(ema->mp, xid);
1991 return NULL;
1992 }
1993
1994
1995
1996
1997
1998
1999
2000 static void fc_exch_els_rec(struct fc_frame *rfp)
2001 {
2002 struct fc_lport *lport;
2003 struct fc_frame *fp;
2004 struct fc_exch *ep;
2005 struct fc_els_rec *rp;
2006 struct fc_els_rec_acc *acc;
2007 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2008 enum fc_els_rjt_explan explan;
2009 u32 sid;
2010 u16 xid, rxid, oxid;
2011
2012 lport = fr_dev(rfp);
2013 rp = fc_frame_payload_get(rfp, sizeof(*rp));
2014 explan = ELS_EXPL_INV_LEN;
2015 if (!rp)
2016 goto reject;
2017 sid = ntoh24(rp->rec_s_id);
2018 rxid = ntohs(rp->rec_rx_id);
2019 oxid = ntohs(rp->rec_ox_id);
2020
2021 explan = ELS_EXPL_OXID_RXID;
2022 if (sid == fc_host_port_id(lport->host))
2023 xid = oxid;
2024 else
2025 xid = rxid;
2026 if (xid == FC_XID_UNKNOWN) {
2027 FC_LPORT_DBG(lport,
2028 "REC request from %x: invalid rxid %x oxid %x\n",
2029 sid, rxid, oxid);
2030 goto reject;
2031 }
2032 ep = fc_exch_lookup(lport, xid);
2033 if (!ep) {
2034 FC_LPORT_DBG(lport,
2035 "REC request from %x: rxid %x oxid %x not found\n",
2036 sid, rxid, oxid);
2037 goto reject;
2038 }
2039 FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2040 sid, rxid, oxid);
2041 if (ep->oid != sid || oxid != ep->oxid)
2042 goto rel;
2043 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2044 goto rel;
2045 fp = fc_frame_alloc(lport, sizeof(*acc));
2046 if (!fp) {
2047 FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2048 goto out;
2049 }
2050
2051 acc = fc_frame_payload_get(fp, sizeof(*acc));
2052 memset(acc, 0, sizeof(*acc));
2053 acc->reca_cmd = ELS_LS_ACC;
2054 acc->reca_ox_id = rp->rec_ox_id;
2055 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2056 acc->reca_rx_id = htons(ep->rxid);
2057 if (ep->sid == ep->oid)
2058 hton24(acc->reca_rfid, ep->did);
2059 else
2060 hton24(acc->reca_rfid, ep->sid);
2061 acc->reca_fc4value = htonl(ep->seq.rec_data);
2062 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2063 ESB_ST_SEQ_INIT |
2064 ESB_ST_COMPLETE));
2065 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2066 lport->tt.frame_send(lport, fp);
2067 out:
2068 fc_exch_release(ep);
2069 return;
2070
2071 rel:
2072 fc_exch_release(ep);
2073 reject:
2074 fc_seq_ls_rjt(rfp, reason, explan);
2075 }
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2086 {
2087 struct fc_exch *aborted_ep = arg;
2088 unsigned int op;
2089
2090 if (IS_ERR(fp)) {
2091 int err = PTR_ERR(fp);
2092
2093 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2094 goto cleanup;
2095 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2096 "frame error %d\n", err);
2097 return;
2098 }
2099
2100 op = fc_frame_payload_op(fp);
2101 fc_frame_free(fp);
2102
2103 switch (op) {
2104 case ELS_LS_RJT:
2105 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2106
2107 case ELS_LS_ACC:
2108 goto cleanup;
2109 default:
2110 FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2111 op);
2112 return;
2113 }
2114
2115 cleanup:
2116 fc_exch_done(&aborted_ep->seq);
2117
2118 fc_exch_release(aborted_ep);
2119 }
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159 struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2160 struct fc_frame *fp,
2161 void (*resp)(struct fc_seq *,
2162 struct fc_frame *fp,
2163 void *arg),
2164 void (*destructor)(struct fc_seq *, void *),
2165 void *arg, u32 timer_msec)
2166 {
2167 struct fc_exch *ep;
2168 struct fc_seq *sp = NULL;
2169 struct fc_frame_header *fh;
2170 struct fc_fcp_pkt *fsp = NULL;
2171 int rc = 1;
2172
2173 ep = fc_exch_alloc(lport, fp);
2174 if (!ep) {
2175 fc_frame_free(fp);
2176 return NULL;
2177 }
2178 ep->esb_stat |= ESB_ST_SEQ_INIT;
2179 fh = fc_frame_header_get(fp);
2180 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2181 ep->resp = resp;
2182 ep->destructor = destructor;
2183 ep->arg = arg;
2184 ep->r_a_tov = lport->r_a_tov;
2185 ep->lp = lport;
2186 sp = &ep->seq;
2187
2188 ep->fh_type = fh->fh_type;
2189 ep->f_ctl = ntoh24(fh->fh_f_ctl);
2190 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2191 sp->cnt++;
2192
2193 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2194 fsp = fr_fsp(fp);
2195 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2196 }
2197
2198 if (unlikely(lport->tt.frame_send(lport, fp)))
2199 goto err;
2200
2201 if (timer_msec)
2202 fc_exch_timer_set_locked(ep, timer_msec);
2203 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
2204
2205 if (ep->f_ctl & FC_FC_SEQ_INIT)
2206 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2207 spin_unlock_bh(&ep->ex_lock);
2208 return sp;
2209 err:
2210 if (fsp)
2211 fc_fcp_ddp_done(fsp);
2212 rc = fc_exch_done_locked(ep);
2213 spin_unlock_bh(&ep->ex_lock);
2214 if (!rc)
2215 fc_exch_delete(ep);
2216 return NULL;
2217 }
2218 EXPORT_SYMBOL(fc_exch_seq_send);
2219
2220
2221
2222
2223
2224
2225
2226
2227 static void fc_exch_rrq(struct fc_exch *ep)
2228 {
2229 struct fc_lport *lport;
2230 struct fc_els_rrq *rrq;
2231 struct fc_frame *fp;
2232 u32 did;
2233
2234 lport = ep->lp;
2235
2236 fp = fc_frame_alloc(lport, sizeof(*rrq));
2237 if (!fp)
2238 goto retry;
2239
2240 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2241 memset(rrq, 0, sizeof(*rrq));
2242 rrq->rrq_cmd = ELS_RRQ;
2243 hton24(rrq->rrq_s_id, ep->sid);
2244 rrq->rrq_ox_id = htons(ep->oxid);
2245 rrq->rrq_rx_id = htons(ep->rxid);
2246
2247 did = ep->did;
2248 if (ep->esb_stat & ESB_ST_RESP)
2249 did = ep->sid;
2250
2251 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2252 lport->port_id, FC_TYPE_ELS,
2253 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2254
2255 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2256 lport->e_d_tov))
2257 return;
2258
2259 retry:
2260 FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2261 spin_lock_bh(&ep->ex_lock);
2262 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2263 spin_unlock_bh(&ep->ex_lock);
2264
2265 fc_exch_release(ep);
2266 return;
2267 }
2268 ep->esb_stat |= ESB_ST_REC_QUAL;
2269 fc_exch_timer_set_locked(ep, ep->r_a_tov);
2270 spin_unlock_bh(&ep->ex_lock);
2271 }
2272
2273
2274
2275
2276
2277 static void fc_exch_els_rrq(struct fc_frame *fp)
2278 {
2279 struct fc_lport *lport;
2280 struct fc_exch *ep = NULL;
2281 struct fc_els_rrq *rp;
2282 u32 sid;
2283 u16 xid;
2284 enum fc_els_rjt_explan explan;
2285
2286 lport = fr_dev(fp);
2287 rp = fc_frame_payload_get(fp, sizeof(*rp));
2288 explan = ELS_EXPL_INV_LEN;
2289 if (!rp)
2290 goto reject;
2291
2292
2293
2294
2295 sid = ntoh24(rp->rrq_s_id);
2296 xid = fc_host_port_id(lport->host) == sid ?
2297 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2298 ep = fc_exch_lookup(lport, xid);
2299 explan = ELS_EXPL_OXID_RXID;
2300 if (!ep)
2301 goto reject;
2302 spin_lock_bh(&ep->ex_lock);
2303 FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2304 sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2305 if (ep->oxid != ntohs(rp->rrq_ox_id))
2306 goto unlock_reject;
2307 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2308 ep->rxid != FC_XID_UNKNOWN)
2309 goto unlock_reject;
2310 explan = ELS_EXPL_SID;
2311 if (ep->sid != sid)
2312 goto unlock_reject;
2313
2314
2315
2316
2317 if (ep->esb_stat & ESB_ST_REC_QUAL) {
2318 ep->esb_stat &= ~ESB_ST_REC_QUAL;
2319 atomic_dec(&ep->ex_refcnt);
2320 }
2321 if (ep->esb_stat & ESB_ST_COMPLETE)
2322 fc_exch_timer_cancel(ep);
2323
2324 spin_unlock_bh(&ep->ex_lock);
2325
2326
2327
2328
2329 fc_seq_ls_acc(fp);
2330 goto out;
2331
2332 unlock_reject:
2333 spin_unlock_bh(&ep->ex_lock);
2334 reject:
2335 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2336 out:
2337 if (ep)
2338 fc_exch_release(ep);
2339 }
2340
2341
2342
2343
2344
2345 void fc_exch_update_stats(struct fc_lport *lport)
2346 {
2347 struct fc_host_statistics *st;
2348 struct fc_exch_mgr_anchor *ema;
2349 struct fc_exch_mgr *mp;
2350
2351 st = &lport->host_stats;
2352
2353 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2354 mp = ema->mp;
2355 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2356 st->fc_no_free_exch_xid +=
2357 atomic_read(&mp->stats.no_free_exch_xid);
2358 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2359 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2360 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2361 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2362 }
2363 }
2364 EXPORT_SYMBOL(fc_exch_update_stats);
2365
2366
2367
2368
2369
2370
2371
2372 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2373 struct fc_exch_mgr *mp,
2374 bool (*match)(struct fc_frame *))
2375 {
2376 struct fc_exch_mgr_anchor *ema;
2377
2378 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2379 if (!ema)
2380 return ema;
2381
2382 ema->mp = mp;
2383 ema->match = match;
2384
2385 list_add_tail(&ema->ema_list, &lport->ema_list);
2386 kref_get(&mp->kref);
2387 return ema;
2388 }
2389 EXPORT_SYMBOL(fc_exch_mgr_add);
2390
2391
2392
2393
2394
2395 static void fc_exch_mgr_destroy(struct kref *kref)
2396 {
2397 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2398
2399 mempool_destroy(mp->ep_pool);
2400 free_percpu(mp->pool);
2401 kfree(mp);
2402 }
2403
2404
2405
2406
2407
2408 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2409 {
2410
2411 list_del(&ema->ema_list);
2412 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2413 kfree(ema);
2414 }
2415 EXPORT_SYMBOL(fc_exch_mgr_del);
2416
2417
2418
2419
2420
2421
2422 int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2423 {
2424 struct fc_exch_mgr_anchor *ema, *tmp;
2425
2426 list_for_each_entry(ema, &src->ema_list, ema_list) {
2427 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2428 goto err;
2429 }
2430 return 0;
2431 err:
2432 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2433 fc_exch_mgr_del(ema);
2434 return -ENOMEM;
2435 }
2436 EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2447 enum fc_class class,
2448 u16 min_xid, u16 max_xid,
2449 bool (*match)(struct fc_frame *))
2450 {
2451 struct fc_exch_mgr *mp;
2452 u16 pool_exch_range;
2453 size_t pool_size;
2454 unsigned int cpu;
2455 struct fc_exch_pool *pool;
2456
2457 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2458 (min_xid & fc_cpu_mask) != 0) {
2459 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2460 min_xid, max_xid);
2461 return NULL;
2462 }
2463
2464
2465
2466
2467 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2468 if (!mp)
2469 return NULL;
2470
2471 mp->class = class;
2472 mp->lport = lport;
2473
2474 mp->min_xid = min_xid;
2475
2476
2477 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2478 sizeof(struct fc_exch *);
2479 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2480 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2481 min_xid - 1;
2482 } else {
2483 mp->max_xid = max_xid;
2484 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2485 (fc_cpu_mask + 1);
2486 }
2487
2488 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2489 if (!mp->ep_pool)
2490 goto free_mp;
2491
2492
2493
2494
2495
2496
2497 mp->pool_max_index = pool_exch_range - 1;
2498
2499
2500
2501
2502 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2503 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2504 if (!mp->pool)
2505 goto free_mempool;
2506 for_each_possible_cpu(cpu) {
2507 pool = per_cpu_ptr(mp->pool, cpu);
2508 pool->next_index = 0;
2509 pool->left = FC_XID_UNKNOWN;
2510 pool->right = FC_XID_UNKNOWN;
2511 spin_lock_init(&pool->lock);
2512 INIT_LIST_HEAD(&pool->ex_list);
2513 }
2514
2515 kref_init(&mp->kref);
2516 if (!fc_exch_mgr_add(lport, mp, match)) {
2517 free_percpu(mp->pool);
2518 goto free_mempool;
2519 }
2520
2521
2522
2523
2524
2525
2526 kref_put(&mp->kref, fc_exch_mgr_destroy);
2527 return mp;
2528
2529 free_mempool:
2530 mempool_destroy(mp->ep_pool);
2531 free_mp:
2532 kfree(mp);
2533 return NULL;
2534 }
2535 EXPORT_SYMBOL(fc_exch_mgr_alloc);
2536
2537
2538
2539
2540
2541 void fc_exch_mgr_free(struct fc_lport *lport)
2542 {
2543 struct fc_exch_mgr_anchor *ema, *next;
2544
2545 flush_workqueue(fc_exch_workqueue);
2546 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2547 fc_exch_mgr_del(ema);
2548 }
2549 EXPORT_SYMBOL(fc_exch_mgr_free);
2550
2551
2552
2553
2554
2555
2556
2557
2558 static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2559 struct fc_lport *lport,
2560 struct fc_frame_header *fh)
2561 {
2562 struct fc_exch_mgr_anchor *ema;
2563 u16 xid;
2564
2565 if (f_ctl & FC_FC_EX_CTX)
2566 xid = ntohs(fh->fh_ox_id);
2567 else {
2568 xid = ntohs(fh->fh_rx_id);
2569 if (xid == FC_XID_UNKNOWN)
2570 return list_entry(lport->ema_list.prev,
2571 typeof(*ema), ema_list);
2572 }
2573
2574 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2575 if ((xid >= ema->mp->min_xid) &&
2576 (xid <= ema->mp->max_xid))
2577 return ema;
2578 }
2579 return NULL;
2580 }
2581
2582
2583
2584
2585
2586 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2587 {
2588 struct fc_frame_header *fh = fc_frame_header_get(fp);
2589 struct fc_exch_mgr_anchor *ema;
2590 u32 f_ctl;
2591
2592
2593 if (!lport || lport->state == LPORT_ST_DISABLED) {
2594 FC_LIBFC_DBG("Receiving frames for an lport that "
2595 "has not been initialized correctly\n");
2596 fc_frame_free(fp);
2597 return;
2598 }
2599
2600 f_ctl = ntoh24(fh->fh_f_ctl);
2601 ema = fc_find_ema(f_ctl, lport, fh);
2602 if (!ema) {
2603 FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2604 "fc_ctl <0x%x>, xid <0x%x>\n",
2605 f_ctl,
2606 (f_ctl & FC_FC_EX_CTX) ?
2607 ntohs(fh->fh_ox_id) :
2608 ntohs(fh->fh_rx_id));
2609 fc_frame_free(fp);
2610 return;
2611 }
2612
2613
2614
2615
2616 switch (fr_eof(fp)) {
2617 case FC_EOF_T:
2618 if (f_ctl & FC_FC_END_SEQ)
2619 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2620
2621 case FC_EOF_N:
2622 if (fh->fh_type == FC_TYPE_BLS)
2623 fc_exch_recv_bls(ema->mp, fp);
2624 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2625 FC_FC_EX_CTX)
2626 fc_exch_recv_seq_resp(ema->mp, fp);
2627 else if (f_ctl & FC_FC_SEQ_CTX)
2628 fc_exch_recv_resp(ema->mp, fp);
2629 else
2630 fc_exch_recv_req(lport, ema->mp, fp);
2631 break;
2632 default:
2633 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2634 fr_eof(fp));
2635 fc_frame_free(fp);
2636 }
2637 }
2638 EXPORT_SYMBOL(fc_exch_recv);
2639
2640
2641
2642
2643
2644 int fc_exch_init(struct fc_lport *lport)
2645 {
2646 if (!lport->tt.exch_mgr_reset)
2647 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2648
2649 return 0;
2650 }
2651 EXPORT_SYMBOL(fc_exch_init);
2652
2653
2654
2655
2656 int fc_setup_exch_mgr(void)
2657 {
2658 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2659 0, SLAB_HWCACHE_ALIGN, NULL);
2660 if (!fc_em_cachep)
2661 return -ENOMEM;
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2678 fc_cpu_mask = (1 << fc_cpu_order) - 1;
2679
2680 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2681 if (!fc_exch_workqueue)
2682 goto err;
2683 return 0;
2684 err:
2685 kmem_cache_destroy(fc_em_cachep);
2686 return -ENOMEM;
2687 }
2688
2689
2690
2691
2692 void fc_destroy_exch_mgr(void)
2693 {
2694 destroy_workqueue(fc_exch_workqueue);
2695 kmem_cache_destroy(fc_em_cachep);
2696 }