This source file includes following definitions.
- __core_tpg_get_initiator_node_acl
- core_tpg_get_initiator_node_acl
- core_allocate_nexus_loss_ua
- core_tpg_add_node_to_devs
- target_set_nacl_queue_depth
- target_alloc_node_acl
- target_add_node_acl
- target_tpg_has_node_acl
- core_tpg_check_initiator_node_acl
- core_tpg_wait_for_nacl_pr_ref
- core_tpg_add_initiator_node_acl
- target_shutdown_sessions
- core_tpg_del_initiator_node_acl
- core_tpg_set_initiator_node_queue_depth
- core_tpg_set_initiator_node_tag
- core_tpg_lun_ref_release
- core_tpg_register
- core_tpg_deregister
- core_tpg_alloc_lun
- core_tpg_add_lun
- core_tpg_remove_lun
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/in.h>
19 #include <linux/export.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 #include <scsi/scsi_proto.h>
23
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
27
28 #include "target_core_internal.h"
29 #include "target_core_alua.h"
30 #include "target_core_pr.h"
31 #include "target_core_ua.h"
32
33 extern struct se_device *g_lun0_dev;
34
35 static DEFINE_SPINLOCK(tpg_lock);
36 static LIST_HEAD(tpg_list);
37
38
39
40
41
42 struct se_node_acl *__core_tpg_get_initiator_node_acl(
43 struct se_portal_group *tpg,
44 const char *initiatorname)
45 {
46 struct se_node_acl *acl;
47
48 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
49 if (!strcmp(acl->initiatorname, initiatorname))
50 return acl;
51 }
52
53 return NULL;
54 }
55
56
57
58
59
60 struct se_node_acl *core_tpg_get_initiator_node_acl(
61 struct se_portal_group *tpg,
62 unsigned char *initiatorname)
63 {
64 struct se_node_acl *acl;
65
66
67
68
69
70
71
72
73
74 mutex_lock(&tpg->acl_node_mutex);
75 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
76 if (acl) {
77 if (!kref_get_unless_zero(&acl->acl_kref))
78 acl = NULL;
79 }
80 mutex_unlock(&tpg->acl_node_mutex);
81
82 return acl;
83 }
84 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85
86 void core_allocate_nexus_loss_ua(
87 struct se_node_acl *nacl)
88 {
89 struct se_dev_entry *deve;
90
91 if (!nacl)
92 return;
93
94 rcu_read_lock();
95 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
96 core_scsi3_ua_allocate(deve, 0x29,
97 ASCQ_29H_NEXUS_LOSS_OCCURRED);
98 rcu_read_unlock();
99 }
100 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
101
102
103
104
105
106 void core_tpg_add_node_to_devs(
107 struct se_node_acl *acl,
108 struct se_portal_group *tpg,
109 struct se_lun *lun_orig)
110 {
111 bool lun_access_ro = true;
112 struct se_lun *lun;
113 struct se_device *dev;
114
115 mutex_lock(&tpg->tpg_lun_mutex);
116 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
117 if (lun_orig && lun != lun_orig)
118 continue;
119
120 dev = rcu_dereference_check(lun->lun_se_dev,
121 lockdep_is_held(&tpg->tpg_lun_mutex));
122
123
124
125
126 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
127 lun_access_ro = false;
128 } else {
129
130
131
132
133 if (dev->transport->get_device_type(dev) == TYPE_DISK)
134 lun_access_ro = true;
135 else
136 lun_access_ro = false;
137 }
138
139 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
140 " access for LUN in Demo Mode\n",
141 tpg->se_tpg_tfo->fabric_name,
142 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
143 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
144
145 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
146 lun_access_ro, acl, tpg);
147
148
149
150
151
152 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
153 lun->unpacked_lun);
154 }
155 mutex_unlock(&tpg->tpg_lun_mutex);
156 }
157
158 static void
159 target_set_nacl_queue_depth(struct se_portal_group *tpg,
160 struct se_node_acl *acl, u32 queue_depth)
161 {
162 acl->queue_depth = queue_depth;
163
164 if (!acl->queue_depth) {
165 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
166 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
167 acl->initiatorname);
168 acl->queue_depth = 1;
169 }
170 }
171
172 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
173 const unsigned char *initiatorname)
174 {
175 struct se_node_acl *acl;
176 u32 queue_depth;
177
178 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
179 GFP_KERNEL);
180 if (!acl)
181 return NULL;
182
183 INIT_LIST_HEAD(&acl->acl_list);
184 INIT_LIST_HEAD(&acl->acl_sess_list);
185 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
186 kref_init(&acl->acl_kref);
187 init_completion(&acl->acl_free_comp);
188 spin_lock_init(&acl->nacl_sess_lock);
189 mutex_init(&acl->lun_entry_mutex);
190 atomic_set(&acl->acl_pr_ref_count, 0);
191
192 if (tpg->se_tpg_tfo->tpg_get_default_depth)
193 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
194 else
195 queue_depth = 1;
196 target_set_nacl_queue_depth(tpg, acl, queue_depth);
197
198 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
199 acl->se_tpg = tpg;
200 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
201
202 tpg->se_tpg_tfo->set_default_node_attributes(acl);
203
204 return acl;
205 }
206
207 static void target_add_node_acl(struct se_node_acl *acl)
208 {
209 struct se_portal_group *tpg = acl->se_tpg;
210
211 mutex_lock(&tpg->acl_node_mutex);
212 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
213 mutex_unlock(&tpg->acl_node_mutex);
214
215 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
216 " Initiator Node: %s\n",
217 tpg->se_tpg_tfo->fabric_name,
218 tpg->se_tpg_tfo->tpg_get_tag(tpg),
219 acl->dynamic_node_acl ? "DYNAMIC" : "",
220 acl->queue_depth,
221 tpg->se_tpg_tfo->fabric_name,
222 acl->initiatorname);
223 }
224
225 bool target_tpg_has_node_acl(struct se_portal_group *tpg,
226 const char *initiatorname)
227 {
228 struct se_node_acl *acl;
229 bool found = false;
230
231 mutex_lock(&tpg->acl_node_mutex);
232 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
233 if (!strcmp(acl->initiatorname, initiatorname)) {
234 found = true;
235 break;
236 }
237 }
238 mutex_unlock(&tpg->acl_node_mutex);
239
240 return found;
241 }
242 EXPORT_SYMBOL(target_tpg_has_node_acl);
243
244 struct se_node_acl *core_tpg_check_initiator_node_acl(
245 struct se_portal_group *tpg,
246 unsigned char *initiatorname)
247 {
248 struct se_node_acl *acl;
249
250 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
251 if (acl)
252 return acl;
253
254 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
255 return NULL;
256
257 acl = target_alloc_node_acl(tpg, initiatorname);
258 if (!acl)
259 return NULL;
260
261
262
263
264
265
266
267
268 kref_get(&acl->acl_kref);
269 acl->dynamic_node_acl = 1;
270
271
272
273
274
275
276 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
277 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
278 core_tpg_add_node_to_devs(acl, tpg, NULL);
279
280 target_add_node_acl(acl);
281 return acl;
282 }
283 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
284
285 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
286 {
287 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
288 cpu_relax();
289 }
290
291 struct se_node_acl *core_tpg_add_initiator_node_acl(
292 struct se_portal_group *tpg,
293 const char *initiatorname)
294 {
295 struct se_node_acl *acl;
296
297 mutex_lock(&tpg->acl_node_mutex);
298 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
299 if (acl) {
300 if (acl->dynamic_node_acl) {
301 acl->dynamic_node_acl = 0;
302 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
303 " for %s\n", tpg->se_tpg_tfo->fabric_name,
304 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
305 mutex_unlock(&tpg->acl_node_mutex);
306 return acl;
307 }
308
309 pr_err("ACL entry for %s Initiator"
310 " Node %s already exists for TPG %u, ignoring"
311 " request.\n", tpg->se_tpg_tfo->fabric_name,
312 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
313 mutex_unlock(&tpg->acl_node_mutex);
314 return ERR_PTR(-EEXIST);
315 }
316 mutex_unlock(&tpg->acl_node_mutex);
317
318 acl = target_alloc_node_acl(tpg, initiatorname);
319 if (!acl)
320 return ERR_PTR(-ENOMEM);
321
322 target_add_node_acl(acl);
323 return acl;
324 }
325
326 static void target_shutdown_sessions(struct se_node_acl *acl)
327 {
328 struct se_session *sess;
329 unsigned long flags;
330
331 restart:
332 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
333 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
334 if (sess->sess_tearing_down)
335 continue;
336
337 list_del_init(&sess->sess_acl_list);
338 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
339
340 if (acl->se_tpg->se_tpg_tfo->close_session)
341 acl->se_tpg->se_tpg_tfo->close_session(sess);
342 goto restart;
343 }
344 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
345 }
346
347 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
348 {
349 struct se_portal_group *tpg = acl->se_tpg;
350
351 mutex_lock(&tpg->acl_node_mutex);
352 if (acl->dynamic_node_acl)
353 acl->dynamic_node_acl = 0;
354 list_del_init(&acl->acl_list);
355 mutex_unlock(&tpg->acl_node_mutex);
356
357 target_shutdown_sessions(acl);
358
359 target_put_nacl(acl);
360
361
362
363
364 wait_for_completion(&acl->acl_free_comp);
365
366 core_tpg_wait_for_nacl_pr_ref(acl);
367 core_free_device_list_for_node(acl, tpg);
368
369 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
370 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
371 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
372 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
373
374 kfree(acl);
375 }
376
377
378
379
380
381 int core_tpg_set_initiator_node_queue_depth(
382 struct se_node_acl *acl,
383 u32 queue_depth)
384 {
385 struct se_portal_group *tpg = acl->se_tpg;
386
387
388
389
390
391
392 if (acl->queue_depth == queue_depth)
393 return 0;
394
395
396
397
398
399 target_set_nacl_queue_depth(tpg, acl, queue_depth);
400
401
402
403
404 target_shutdown_sessions(acl);
405
406 pr_debug("Successfully changed queue depth to: %d for Initiator"
407 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
408 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
409 tpg->se_tpg_tfo->tpg_get_tag(tpg));
410
411 return 0;
412 }
413 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
414
415
416
417
418
419
420
421 int core_tpg_set_initiator_node_tag(
422 struct se_portal_group *tpg,
423 struct se_node_acl *acl,
424 const char *new_tag)
425 {
426 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
427 return -EINVAL;
428
429 if (!strncmp("NULL", new_tag, 4)) {
430 acl->acl_tag[0] = '\0';
431 return 0;
432 }
433
434 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
435 }
436 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
437
438 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
439 {
440 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
441
442 complete(&lun->lun_shutdown_comp);
443 }
444
445
446 int core_tpg_register(
447 struct se_wwn *se_wwn,
448 struct se_portal_group *se_tpg,
449 int proto_id)
450 {
451 int ret;
452
453 if (!se_tpg)
454 return -EINVAL;
455
456
457
458
459
460
461
462
463
464
465 if (se_wwn)
466 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
467
468 if (!se_tpg->se_tpg_tfo) {
469 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
470 return -EINVAL;
471 }
472
473 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
474 se_tpg->proto_id = proto_id;
475 se_tpg->se_tpg_wwn = se_wwn;
476 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
477 INIT_LIST_HEAD(&se_tpg->acl_node_list);
478 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
479 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
480 spin_lock_init(&se_tpg->session_lock);
481 mutex_init(&se_tpg->tpg_lun_mutex);
482 mutex_init(&se_tpg->acl_node_mutex);
483
484 if (se_tpg->proto_id >= 0) {
485 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
486 if (IS_ERR(se_tpg->tpg_virt_lun0))
487 return PTR_ERR(se_tpg->tpg_virt_lun0);
488
489 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
490 true, g_lun0_dev);
491 if (ret < 0) {
492 kfree(se_tpg->tpg_virt_lun0);
493 return ret;
494 }
495 }
496
497 spin_lock_bh(&tpg_lock);
498 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
499 spin_unlock_bh(&tpg_lock);
500
501 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
502 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
503 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
504 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
505 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
506
507 return 0;
508 }
509 EXPORT_SYMBOL(core_tpg_register);
510
511 int core_tpg_deregister(struct se_portal_group *se_tpg)
512 {
513 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
514 struct se_node_acl *nacl, *nacl_tmp;
515 LIST_HEAD(node_list);
516
517 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
518 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
519 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
520 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
521
522 spin_lock_bh(&tpg_lock);
523 list_del(&se_tpg->se_tpg_node);
524 spin_unlock_bh(&tpg_lock);
525
526 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
527 cpu_relax();
528
529 mutex_lock(&se_tpg->acl_node_mutex);
530 list_splice_init(&se_tpg->acl_node_list, &node_list);
531 mutex_unlock(&se_tpg->acl_node_mutex);
532
533
534
535
536
537 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
538 list_del_init(&nacl->acl_list);
539
540 core_tpg_wait_for_nacl_pr_ref(nacl);
541 core_free_device_list_for_node(nacl, se_tpg);
542 kfree(nacl);
543 }
544
545 if (se_tpg->proto_id >= 0) {
546 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
547 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
548 }
549
550 return 0;
551 }
552 EXPORT_SYMBOL(core_tpg_deregister);
553
554 struct se_lun *core_tpg_alloc_lun(
555 struct se_portal_group *tpg,
556 u64 unpacked_lun)
557 {
558 struct se_lun *lun;
559
560 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
561 if (!lun) {
562 pr_err("Unable to allocate se_lun memory\n");
563 return ERR_PTR(-ENOMEM);
564 }
565 lun->unpacked_lun = unpacked_lun;
566 atomic_set(&lun->lun_acl_count, 0);
567 init_completion(&lun->lun_shutdown_comp);
568 INIT_LIST_HEAD(&lun->lun_deve_list);
569 INIT_LIST_HEAD(&lun->lun_dev_link);
570 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
571 spin_lock_init(&lun->lun_deve_lock);
572 mutex_init(&lun->lun_tg_pt_md_mutex);
573 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
574 spin_lock_init(&lun->lun_tg_pt_gp_lock);
575 lun->lun_tpg = tpg;
576
577 return lun;
578 }
579
580 int core_tpg_add_lun(
581 struct se_portal_group *tpg,
582 struct se_lun *lun,
583 bool lun_access_ro,
584 struct se_device *dev)
585 {
586 int ret;
587
588 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
589 GFP_KERNEL);
590 if (ret < 0)
591 goto out;
592
593 ret = core_alloc_rtpi(lun, dev);
594 if (ret)
595 goto out_kill_ref;
596
597 if (!(dev->transport->transport_flags &
598 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
599 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
600 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
601
602 mutex_lock(&tpg->tpg_lun_mutex);
603
604 spin_lock(&dev->se_port_lock);
605 lun->lun_index = dev->dev_index;
606 rcu_assign_pointer(lun->lun_se_dev, dev);
607 dev->export_count++;
608 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
609 spin_unlock(&dev->se_port_lock);
610
611 if (dev->dev_flags & DF_READ_ONLY)
612 lun->lun_access_ro = true;
613 else
614 lun->lun_access_ro = lun_access_ro;
615 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
616 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
617 mutex_unlock(&tpg->tpg_lun_mutex);
618
619 return 0;
620
621 out_kill_ref:
622 percpu_ref_exit(&lun->lun_ref);
623 out:
624 return ret;
625 }
626
627 void core_tpg_remove_lun(
628 struct se_portal_group *tpg,
629 struct se_lun *lun)
630 {
631
632
633
634
635 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
636
637 lun->lun_shutdown = true;
638
639 core_clear_lun_from_tpg(lun, tpg);
640
641
642
643
644
645
646 transport_clear_lun_ref(lun);
647
648 mutex_lock(&tpg->tpg_lun_mutex);
649 if (lun->lun_se_dev) {
650 target_detach_tg_pt_gp(lun);
651
652 spin_lock(&dev->se_port_lock);
653 list_del(&lun->lun_dev_link);
654 dev->export_count--;
655 rcu_assign_pointer(lun->lun_se_dev, NULL);
656 spin_unlock(&dev->se_port_lock);
657 }
658 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
659 hlist_del_rcu(&lun->link);
660
661 lun->lun_shutdown = false;
662 mutex_unlock(&tpg->tpg_lun_mutex);
663
664 percpu_ref_exit(&lun->lun_ref);
665 }