1/*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * �� Copyright 2011-2013 Datera, Inc.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/configfs.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_fabric.h>
37
38#include "tcm_loop.h"
39
40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
41
42static struct workqueue_struct *tcm_loop_workqueue;
43static struct kmem_cache *tcm_loop_cmd_cache;
44
45static int tcm_loop_hba_no_cnt;
46
47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
48
49/*
50 * Called from struct target_core_fabric_ops->check_stop_free()
51 */
52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
53{
54	/*
55	 * Do not release struct se_cmd's containing a valid TMR
56	 * pointer.  These will be released directly in tcm_loop_device_reset()
57	 * with transport_generic_free_cmd().
58	 */
59	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
60		return 0;
61	/*
62	 * Release the struct se_cmd, which will make a callback to release
63	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
64	 */
65	transport_generic_free_cmd(se_cmd, 0);
66	return 1;
67}
68
69static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
70{
71	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
72				struct tcm_loop_cmd, tl_se_cmd);
73
74	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
75}
76
77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
78{
79	seq_printf(m, "tcm_loop_proc_info()\n");
80	return 0;
81}
82
83static int tcm_loop_driver_probe(struct device *);
84static int tcm_loop_driver_remove(struct device *);
85
86static int pseudo_lld_bus_match(struct device *dev,
87				struct device_driver *dev_driver)
88{
89	return 1;
90}
91
92static struct bus_type tcm_loop_lld_bus = {
93	.name			= "tcm_loop_bus",
94	.match			= pseudo_lld_bus_match,
95	.probe			= tcm_loop_driver_probe,
96	.remove			= tcm_loop_driver_remove,
97};
98
99static struct device_driver tcm_loop_driverfs = {
100	.name			= "tcm_loop",
101	.bus			= &tcm_loop_lld_bus,
102};
103/*
104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
105 */
106static struct device *tcm_loop_primary;
107
108static void tcm_loop_submission_work(struct work_struct *work)
109{
110	struct tcm_loop_cmd *tl_cmd =
111		container_of(work, struct tcm_loop_cmd, work);
112	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
113	struct scsi_cmnd *sc = tl_cmd->sc;
114	struct tcm_loop_nexus *tl_nexus;
115	struct tcm_loop_hba *tl_hba;
116	struct tcm_loop_tpg *tl_tpg;
117	struct scatterlist *sgl_bidi = NULL;
118	u32 sgl_bidi_count = 0, transfer_length;
119	int rc;
120
121	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
122	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
123
124	/*
125	 * Ensure that this tl_tpg reference from the incoming sc->device->id
126	 * has already been configured via tcm_loop_make_naa_tpg().
127	 */
128	if (!tl_tpg->tl_hba) {
129		set_host_byte(sc, DID_NO_CONNECT);
130		goto out_done;
131	}
132	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
133		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
134		goto out_done;
135	}
136	tl_nexus = tl_tpg->tl_nexus;
137	if (!tl_nexus) {
138		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
139				" does not exist\n");
140		set_host_byte(sc, DID_ERROR);
141		goto out_done;
142	}
143	if (scsi_bidi_cmnd(sc)) {
144		struct scsi_data_buffer *sdb = scsi_in(sc);
145
146		sgl_bidi = sdb->table.sgl;
147		sgl_bidi_count = sdb->table.nents;
148		se_cmd->se_cmd_flags |= SCF_BIDI;
149
150	}
151
152	transfer_length = scsi_transfer_length(sc);
153	if (!scsi_prot_sg_count(sc) &&
154	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
155		se_cmd->prot_pto = true;
156		/*
157		 * loopback transport doesn't support
158		 * WRITE_GENERATE, READ_STRIP protection
159		 * information operations, go ahead unprotected.
160		 */
161		transfer_length = scsi_bufflen(sc);
162	}
163
164	se_cmd->tag = tl_cmd->sc_cmd_tag;
165	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
166			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
167			transfer_length, TCM_SIMPLE_TAG,
168			sc->sc_data_direction, 0,
169			scsi_sglist(sc), scsi_sg_count(sc),
170			sgl_bidi, sgl_bidi_count,
171			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
172	if (rc < 0) {
173		set_host_byte(sc, DID_NO_CONNECT);
174		goto out_done;
175	}
176	return;
177
178out_done:
179	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
180	sc->scsi_done(sc);
181	return;
182}
183
184/*
185 * ->queuecommand can be and usually is called from interrupt context, so
186 * defer the actual submission to a workqueue.
187 */
188static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
189{
190	struct tcm_loop_cmd *tl_cmd;
191
192	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
193		" scsi_buf_len: %u\n", sc->device->host->host_no,
194		sc->device->id, sc->device->channel, sc->device->lun,
195		sc->cmnd[0], scsi_bufflen(sc));
196
197	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
198	if (!tl_cmd) {
199		pr_err("Unable to allocate struct tcm_loop_cmd\n");
200		set_host_byte(sc, DID_ERROR);
201		sc->scsi_done(sc);
202		return 0;
203	}
204
205	tl_cmd->sc = sc;
206	tl_cmd->sc_cmd_tag = sc->request->tag;
207	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
208	queue_work(tcm_loop_workqueue, &tl_cmd->work);
209	return 0;
210}
211
212/*
213 * Called from SCSI EH process context to issue a LUN_RESET TMR
214 * to struct scsi_device
215 */
216static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
217			      u64 lun, int task, enum tcm_tmreq_table tmr)
218{
219	struct se_cmd *se_cmd = NULL;
220	struct se_session *se_sess;
221	struct se_portal_group *se_tpg;
222	struct tcm_loop_nexus *tl_nexus;
223	struct tcm_loop_cmd *tl_cmd = NULL;
224	struct tcm_loop_tmr *tl_tmr = NULL;
225	int ret = TMR_FUNCTION_FAILED, rc;
226
227	/*
228	 * Locate the tl_nexus and se_sess pointers
229	 */
230	tl_nexus = tl_tpg->tl_nexus;
231	if (!tl_nexus) {
232		pr_err("Unable to perform device reset without"
233				" active I_T Nexus\n");
234		return ret;
235	}
236
237	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
238	if (!tl_cmd) {
239		pr_err("Unable to allocate memory for tl_cmd\n");
240		return ret;
241	}
242
243	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
244	if (!tl_tmr) {
245		pr_err("Unable to allocate memory for tl_tmr\n");
246		goto release;
247	}
248	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
249
250	se_cmd = &tl_cmd->tl_se_cmd;
251	se_tpg = &tl_tpg->tl_se_tpg;
252	se_sess = tl_tpg->tl_nexus->se_sess;
253	/*
254	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
255	 */
256	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
257				DMA_NONE, TCM_SIMPLE_TAG,
258				&tl_cmd->tl_sense_buf[0]);
259
260	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
261	if (rc < 0)
262		goto release;
263
264	if (tmr == TMR_ABORT_TASK)
265		se_cmd->se_tmr_req->ref_task_tag = task;
266
267	/*
268	 * Locate the underlying TCM struct se_lun
269	 */
270	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
271		ret = TMR_LUN_DOES_NOT_EXIST;
272		goto release;
273	}
274	/*
275	 * Queue the TMR to TCM Core and sleep waiting for
276	 * tcm_loop_queue_tm_rsp() to wake us up.
277	 */
278	transport_generic_handle_tmr(se_cmd);
279	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
280	/*
281	 * The TMR LUN_RESET has completed, check the response status and
282	 * then release allocations.
283	 */
284	ret = se_cmd->se_tmr_req->response;
285release:
286	if (se_cmd)
287		transport_generic_free_cmd(se_cmd, 1);
288	else
289		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
290	kfree(tl_tmr);
291	return ret;
292}
293
294static int tcm_loop_abort_task(struct scsi_cmnd *sc)
295{
296	struct tcm_loop_hba *tl_hba;
297	struct tcm_loop_tpg *tl_tpg;
298	int ret = FAILED;
299
300	/*
301	 * Locate the tcm_loop_hba_t pointer
302	 */
303	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
304	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
305	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
306				 sc->request->tag, TMR_ABORT_TASK);
307	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
308}
309
310/*
311 * Called from SCSI EH process context to issue a LUN_RESET TMR
312 * to struct scsi_device
313 */
314static int tcm_loop_device_reset(struct scsi_cmnd *sc)
315{
316	struct tcm_loop_hba *tl_hba;
317	struct tcm_loop_tpg *tl_tpg;
318	int ret = FAILED;
319
320	/*
321	 * Locate the tcm_loop_hba_t pointer
322	 */
323	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
324	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
325
326	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
327				 0, TMR_LUN_RESET);
328	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
329}
330
331static int tcm_loop_target_reset(struct scsi_cmnd *sc)
332{
333	struct tcm_loop_hba *tl_hba;
334	struct tcm_loop_tpg *tl_tpg;
335
336	/*
337	 * Locate the tcm_loop_hba_t pointer
338	 */
339	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
340	if (!tl_hba) {
341		pr_err("Unable to perform device reset without"
342				" active I_T Nexus\n");
343		return FAILED;
344	}
345	/*
346	 * Locate the tl_tpg pointer from TargetID in sc->device->id
347	 */
348	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
349	if (tl_tpg) {
350		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
351		return SUCCESS;
352	}
353	return FAILED;
354}
355
356static int tcm_loop_slave_alloc(struct scsi_device *sd)
357{
358	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
359	return 0;
360}
361
362static struct scsi_host_template tcm_loop_driver_template = {
363	.show_info		= tcm_loop_show_info,
364	.proc_name		= "tcm_loopback",
365	.name			= "TCM_Loopback",
366	.queuecommand		= tcm_loop_queuecommand,
367	.change_queue_depth	= scsi_change_queue_depth,
368	.eh_abort_handler = tcm_loop_abort_task,
369	.eh_device_reset_handler = tcm_loop_device_reset,
370	.eh_target_reset_handler = tcm_loop_target_reset,
371	.can_queue		= 1024,
372	.this_id		= -1,
373	.sg_tablesize		= 256,
374	.cmd_per_lun		= 1024,
375	.max_sectors		= 0xFFFF,
376	.use_clustering		= DISABLE_CLUSTERING,
377	.slave_alloc		= tcm_loop_slave_alloc,
378	.module			= THIS_MODULE,
379	.track_queue_depth	= 1,
380};
381
382static int tcm_loop_driver_probe(struct device *dev)
383{
384	struct tcm_loop_hba *tl_hba;
385	struct Scsi_Host *sh;
386	int error, host_prot;
387
388	tl_hba = to_tcm_loop_hba(dev);
389
390	sh = scsi_host_alloc(&tcm_loop_driver_template,
391			sizeof(struct tcm_loop_hba));
392	if (!sh) {
393		pr_err("Unable to allocate struct scsi_host\n");
394		return -ENODEV;
395	}
396	tl_hba->sh = sh;
397
398	/*
399	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
400	 */
401	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
402	/*
403	 * Setup single ID, Channel and LUN for now..
404	 */
405	sh->max_id = 2;
406	sh->max_lun = 0;
407	sh->max_channel = 0;
408	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
409
410	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
411		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
412		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
413
414	scsi_host_set_prot(sh, host_prot);
415	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
416
417	error = scsi_add_host(sh, &tl_hba->dev);
418	if (error) {
419		pr_err("%s: scsi_add_host failed\n", __func__);
420		scsi_host_put(sh);
421		return -ENODEV;
422	}
423	return 0;
424}
425
426static int tcm_loop_driver_remove(struct device *dev)
427{
428	struct tcm_loop_hba *tl_hba;
429	struct Scsi_Host *sh;
430
431	tl_hba = to_tcm_loop_hba(dev);
432	sh = tl_hba->sh;
433
434	scsi_remove_host(sh);
435	scsi_host_put(sh);
436	return 0;
437}
438
439static void tcm_loop_release_adapter(struct device *dev)
440{
441	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
442
443	kfree(tl_hba);
444}
445
446/*
447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
448 */
449static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
450{
451	int ret;
452
453	tl_hba->dev.bus = &tcm_loop_lld_bus;
454	tl_hba->dev.parent = tcm_loop_primary;
455	tl_hba->dev.release = &tcm_loop_release_adapter;
456	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
457
458	ret = device_register(&tl_hba->dev);
459	if (ret) {
460		pr_err("device_register() failed for"
461				" tl_hba->dev: %d\n", ret);
462		return -ENODEV;
463	}
464
465	return 0;
466}
467
468/*
469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
470 * tcm_loop SCSI bus.
471 */
472static int tcm_loop_alloc_core_bus(void)
473{
474	int ret;
475
476	tcm_loop_primary = root_device_register("tcm_loop_0");
477	if (IS_ERR(tcm_loop_primary)) {
478		pr_err("Unable to allocate tcm_loop_primary\n");
479		return PTR_ERR(tcm_loop_primary);
480	}
481
482	ret = bus_register(&tcm_loop_lld_bus);
483	if (ret) {
484		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
485		goto dev_unreg;
486	}
487
488	ret = driver_register(&tcm_loop_driverfs);
489	if (ret) {
490		pr_err("driver_register() failed for"
491				"tcm_loop_driverfs\n");
492		goto bus_unreg;
493	}
494
495	pr_debug("Initialized TCM Loop Core Bus\n");
496	return ret;
497
498bus_unreg:
499	bus_unregister(&tcm_loop_lld_bus);
500dev_unreg:
501	root_device_unregister(tcm_loop_primary);
502	return ret;
503}
504
505static void tcm_loop_release_core_bus(void)
506{
507	driver_unregister(&tcm_loop_driverfs);
508	bus_unregister(&tcm_loop_lld_bus);
509	root_device_unregister(tcm_loop_primary);
510
511	pr_debug("Releasing TCM Loop Core BUS\n");
512}
513
514static char *tcm_loop_get_fabric_name(void)
515{
516	return "loopback";
517}
518
519static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
520{
521	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
522}
523
524static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
525{
526	/*
527	 * Return the passed NAA identifier for the Target Port
528	 */
529	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
530}
531
532static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
533{
534	/*
535	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
536	 * to represent the SCSI Target Port.
537	 */
538	return tl_tpg(se_tpg)->tl_tpgt;
539}
540
541/*
542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
543 * based upon the incoming fabric dependent SCSI Initiator Port
544 */
545static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
546{
547	return 1;
548}
549
550static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
551{
552	return 0;
553}
554
555/*
556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
558 */
559static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
560{
561	return 0;
562}
563
564/*
565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
567 * It has been added here as a nop for target_fabric_tf_ops_check()
568 */
569static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
570{
571	return 0;
572}
573
574static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
575{
576	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
577						   tl_se_tpg);
578	return tl_tpg->tl_fabric_prot_type;
579}
580
581static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
582{
583	return 1;
584}
585
586static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
587{
588	return 1;
589}
590
591static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
592{
593	return;
594}
595
596static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
597{
598	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
599			struct tcm_loop_cmd, tl_se_cmd);
600
601	return tl_cmd->sc_cmd_state;
602}
603
604static int tcm_loop_shutdown_session(struct se_session *se_sess)
605{
606	return 0;
607}
608
609static void tcm_loop_close_session(struct se_session *se_sess)
610{
611	return;
612};
613
614static int tcm_loop_write_pending(struct se_cmd *se_cmd)
615{
616	/*
617	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
618	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
619	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
620	 * format with transport_generic_map_mem_to_cmd().
621	 *
622	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
623	 * object execution queue.
624	 */
625	target_execute_cmd(se_cmd);
626	return 0;
627}
628
629static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
630{
631	return 0;
632}
633
634static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
635{
636	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
637				struct tcm_loop_cmd, tl_se_cmd);
638	struct scsi_cmnd *sc = tl_cmd->sc;
639
640	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
641		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
642
643	sc->result = SAM_STAT_GOOD;
644	set_host_byte(sc, DID_OK);
645	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
646	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
647		scsi_set_resid(sc, se_cmd->residual_count);
648	sc->scsi_done(sc);
649	return 0;
650}
651
652static int tcm_loop_queue_status(struct se_cmd *se_cmd)
653{
654	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
655				struct tcm_loop_cmd, tl_se_cmd);
656	struct scsi_cmnd *sc = tl_cmd->sc;
657
658	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
659			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
660
661	if (se_cmd->sense_buffer &&
662	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
663	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
664
665		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
666				SCSI_SENSE_BUFFERSIZE);
667		sc->result = SAM_STAT_CHECK_CONDITION;
668		set_driver_byte(sc, DRIVER_SENSE);
669	} else
670		sc->result = se_cmd->scsi_status;
671
672	set_host_byte(sc, DID_OK);
673	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
674	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
675		scsi_set_resid(sc, se_cmd->residual_count);
676	sc->scsi_done(sc);
677	return 0;
678}
679
680static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
681{
682	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
683	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
684	/*
685	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
686	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
687	 */
688	atomic_set(&tl_tmr->tmr_complete, 1);
689	wake_up(&tl_tmr->tl_tmr_wait);
690}
691
692static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
693{
694	return;
695}
696
697static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
698{
699	switch (tl_hba->tl_proto_id) {
700	case SCSI_PROTOCOL_SAS:
701		return "SAS";
702	case SCSI_PROTOCOL_FCP:
703		return "FCP";
704	case SCSI_PROTOCOL_ISCSI:
705		return "iSCSI";
706	default:
707		break;
708	}
709
710	return "Unknown";
711}
712
713/* Start items for tcm_loop_port_cit */
714
715static int tcm_loop_port_link(
716	struct se_portal_group *se_tpg,
717	struct se_lun *lun)
718{
719	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
720				struct tcm_loop_tpg, tl_se_tpg);
721	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
722
723	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
724	/*
725	 * Add Linux/SCSI struct scsi_device by HCTL
726	 */
727	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
728
729	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
730	return 0;
731}
732
733static void tcm_loop_port_unlink(
734	struct se_portal_group *se_tpg,
735	struct se_lun *se_lun)
736{
737	struct scsi_device *sd;
738	struct tcm_loop_hba *tl_hba;
739	struct tcm_loop_tpg *tl_tpg;
740
741	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
742	tl_hba = tl_tpg->tl_hba;
743
744	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
745				se_lun->unpacked_lun);
746	if (!sd) {
747		pr_err("Unable to locate struct scsi_device for %d:%d:"
748			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
749		return;
750	}
751	/*
752	 * Remove Linux/SCSI struct scsi_device by HCTL
753	 */
754	scsi_remove_device(sd);
755	scsi_device_put(sd);
756
757	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
758
759	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
760}
761
762/* End items for tcm_loop_port_cit */
763
764static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
765		struct config_item *item, char *page)
766{
767	struct se_portal_group *se_tpg = attrib_to_tpg(item);
768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
769						   tl_se_tpg);
770
771	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
772}
773
774static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
775		struct config_item *item, const char *page, size_t count)
776{
777	struct se_portal_group *se_tpg = attrib_to_tpg(item);
778	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
779						   tl_se_tpg);
780	unsigned long val;
781	int ret = kstrtoul(page, 0, &val);
782
783	if (ret) {
784		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
785		return ret;
786	}
787	if (val != 0 && val != 1 && val != 3) {
788		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
789		return -EINVAL;
790	}
791	tl_tpg->tl_fabric_prot_type = val;
792
793	return count;
794}
795
796CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
797
798static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
799	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
800	NULL,
801};
802
803/* Start items for tcm_loop_nexus_cit */
804
805static int tcm_loop_make_nexus(
806	struct tcm_loop_tpg *tl_tpg,
807	const char *name)
808{
809	struct se_portal_group *se_tpg;
810	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
811	struct tcm_loop_nexus *tl_nexus;
812	int ret = -ENOMEM;
813
814	if (tl_tpg->tl_nexus) {
815		pr_debug("tl_tpg->tl_nexus already exists\n");
816		return -EEXIST;
817	}
818	se_tpg = &tl_tpg->tl_se_tpg;
819
820	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
821	if (!tl_nexus) {
822		pr_err("Unable to allocate struct tcm_loop_nexus\n");
823		return -ENOMEM;
824	}
825	/*
826	 * Initialize the struct se_session pointer
827	 */
828	tl_nexus->se_sess = transport_init_session(
829				TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
830	if (IS_ERR(tl_nexus->se_sess)) {
831		ret = PTR_ERR(tl_nexus->se_sess);
832		goto out;
833	}
834	/*
835	 * Since we are running in 'demo mode' this call with generate a
836	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
837	 * Initiator port name of the passed configfs group 'name'.
838	 */
839	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
840				se_tpg, (unsigned char *)name);
841	if (!tl_nexus->se_sess->se_node_acl) {
842		transport_free_session(tl_nexus->se_sess);
843		goto out;
844	}
845	/* Now, register the I_T Nexus as active. */
846	transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
847			tl_nexus->se_sess, tl_nexus);
848	tl_tpg->tl_nexus = tl_nexus;
849	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
850		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
851		name);
852	return 0;
853
854out:
855	kfree(tl_nexus);
856	return ret;
857}
858
859static int tcm_loop_drop_nexus(
860	struct tcm_loop_tpg *tpg)
861{
862	struct se_session *se_sess;
863	struct tcm_loop_nexus *tl_nexus;
864
865	tl_nexus = tpg->tl_nexus;
866	if (!tl_nexus)
867		return -ENODEV;
868
869	se_sess = tl_nexus->se_sess;
870	if (!se_sess)
871		return -ENODEV;
872
873	if (atomic_read(&tpg->tl_tpg_port_count)) {
874		pr_err("Unable to remove TCM_Loop I_T Nexus with"
875			" active TPG port count: %d\n",
876			atomic_read(&tpg->tl_tpg_port_count));
877		return -EPERM;
878	}
879
880	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
881		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
882		tl_nexus->se_sess->se_node_acl->initiatorname);
883	/*
884	 * Release the SCSI I_T Nexus to the emulated Target Port
885	 */
886	transport_deregister_session(tl_nexus->se_sess);
887	tpg->tl_nexus = NULL;
888	kfree(tl_nexus);
889	return 0;
890}
891
892/* End items for tcm_loop_nexus_cit */
893
894static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
895{
896	struct se_portal_group *se_tpg = to_tpg(item);
897	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
898			struct tcm_loop_tpg, tl_se_tpg);
899	struct tcm_loop_nexus *tl_nexus;
900	ssize_t ret;
901
902	tl_nexus = tl_tpg->tl_nexus;
903	if (!tl_nexus)
904		return -ENODEV;
905
906	ret = snprintf(page, PAGE_SIZE, "%s\n",
907		tl_nexus->se_sess->se_node_acl->initiatorname);
908
909	return ret;
910}
911
912static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
913		const char *page, size_t count)
914{
915	struct se_portal_group *se_tpg = to_tpg(item);
916	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
917			struct tcm_loop_tpg, tl_se_tpg);
918	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
919	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
920	int ret;
921	/*
922	 * Shutdown the active I_T nexus if 'NULL' is passed..
923	 */
924	if (!strncmp(page, "NULL", 4)) {
925		ret = tcm_loop_drop_nexus(tl_tpg);
926		return (!ret) ? count : ret;
927	}
928	/*
929	 * Otherwise make sure the passed virtual Initiator port WWN matches
930	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
931	 * tcm_loop_make_nexus()
932	 */
933	if (strlen(page) >= TL_WWN_ADDR_LEN) {
934		pr_err("Emulated NAA Sas Address: %s, exceeds"
935				" max: %d\n", page, TL_WWN_ADDR_LEN);
936		return -EINVAL;
937	}
938	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
939
940	ptr = strstr(i_port, "naa.");
941	if (ptr) {
942		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
943			pr_err("Passed SAS Initiator Port %s does not"
944				" match target port protoid: %s\n", i_port,
945				tcm_loop_dump_proto_id(tl_hba));
946			return -EINVAL;
947		}
948		port_ptr = &i_port[0];
949		goto check_newline;
950	}
951	ptr = strstr(i_port, "fc.");
952	if (ptr) {
953		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
954			pr_err("Passed FCP Initiator Port %s does not"
955				" match target port protoid: %s\n", i_port,
956				tcm_loop_dump_proto_id(tl_hba));
957			return -EINVAL;
958		}
959		port_ptr = &i_port[3]; /* Skip over "fc." */
960		goto check_newline;
961	}
962	ptr = strstr(i_port, "iqn.");
963	if (ptr) {
964		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
965			pr_err("Passed iSCSI Initiator Port %s does not"
966				" match target port protoid: %s\n", i_port,
967				tcm_loop_dump_proto_id(tl_hba));
968			return -EINVAL;
969		}
970		port_ptr = &i_port[0];
971		goto check_newline;
972	}
973	pr_err("Unable to locate prefix for emulated Initiator Port:"
974			" %s\n", i_port);
975	return -EINVAL;
976	/*
977	 * Clear any trailing newline for the NAA WWN
978	 */
979check_newline:
980	if (i_port[strlen(i_port)-1] == '\n')
981		i_port[strlen(i_port)-1] = '\0';
982
983	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
984	if (ret < 0)
985		return ret;
986
987	return count;
988}
989
990static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
991		char *page)
992{
993	struct se_portal_group *se_tpg = to_tpg(item);
994	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
995			struct tcm_loop_tpg, tl_se_tpg);
996	const char *status = NULL;
997	ssize_t ret = -EINVAL;
998
999	switch (tl_tpg->tl_transport_status) {
1000	case TCM_TRANSPORT_ONLINE:
1001		status = "online";
1002		break;
1003	case TCM_TRANSPORT_OFFLINE:
1004		status = "offline";
1005		break;
1006	default:
1007		break;
1008	}
1009
1010	if (status)
1011		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1012
1013	return ret;
1014}
1015
1016static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1017		const char *page, size_t count)
1018{
1019	struct se_portal_group *se_tpg = to_tpg(item);
1020	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1021			struct tcm_loop_tpg, tl_se_tpg);
1022
1023	if (!strncmp(page, "online", 6)) {
1024		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1025		return count;
1026	}
1027	if (!strncmp(page, "offline", 7)) {
1028		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1029		if (tl_tpg->tl_nexus) {
1030			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1031
1032			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1033		}
1034		return count;
1035	}
1036	return -EINVAL;
1037}
1038
1039CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1040CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1041
1042static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1043	&tcm_loop_tpg_attr_nexus,
1044	&tcm_loop_tpg_attr_transport_status,
1045	NULL,
1046};
1047
1048/* Start items for tcm_loop_naa_cit */
1049
1050static struct se_portal_group *tcm_loop_make_naa_tpg(
1051	struct se_wwn *wwn,
1052	struct config_group *group,
1053	const char *name)
1054{
1055	struct tcm_loop_hba *tl_hba = container_of(wwn,
1056			struct tcm_loop_hba, tl_hba_wwn);
1057	struct tcm_loop_tpg *tl_tpg;
1058	int ret;
1059	unsigned long tpgt;
1060
1061	if (strstr(name, "tpgt_") != name) {
1062		pr_err("Unable to locate \"tpgt_#\" directory"
1063				" group\n");
1064		return ERR_PTR(-EINVAL);
1065	}
1066	if (kstrtoul(name+5, 10, &tpgt))
1067		return ERR_PTR(-EINVAL);
1068
1069	if (tpgt >= TL_TPGS_PER_HBA) {
1070		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1071				" %u\n", tpgt, TL_TPGS_PER_HBA);
1072		return ERR_PTR(-EINVAL);
1073	}
1074	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1075	tl_tpg->tl_hba = tl_hba;
1076	tl_tpg->tl_tpgt = tpgt;
1077	/*
1078	 * Register the tl_tpg as a emulated TCM Target Endpoint
1079	 */
1080	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1081	if (ret < 0)
1082		return ERR_PTR(-ENOMEM);
1083
1084	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1085		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1086		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1087
1088	return &tl_tpg->tl_se_tpg;
1089}
1090
1091static void tcm_loop_drop_naa_tpg(
1092	struct se_portal_group *se_tpg)
1093{
1094	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1095	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1096				struct tcm_loop_tpg, tl_se_tpg);
1097	struct tcm_loop_hba *tl_hba;
1098	unsigned short tpgt;
1099
1100	tl_hba = tl_tpg->tl_hba;
1101	tpgt = tl_tpg->tl_tpgt;
1102	/*
1103	 * Release the I_T Nexus for the Virtual target link if present
1104	 */
1105	tcm_loop_drop_nexus(tl_tpg);
1106	/*
1107	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1108	 */
1109	core_tpg_deregister(se_tpg);
1110
1111	tl_tpg->tl_hba = NULL;
1112	tl_tpg->tl_tpgt = 0;
1113
1114	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1115		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1116		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1117}
1118
1119/* End items for tcm_loop_naa_cit */
1120
1121/* Start items for tcm_loop_cit */
1122
1123static struct se_wwn *tcm_loop_make_scsi_hba(
1124	struct target_fabric_configfs *tf,
1125	struct config_group *group,
1126	const char *name)
1127{
1128	struct tcm_loop_hba *tl_hba;
1129	struct Scsi_Host *sh;
1130	char *ptr;
1131	int ret, off = 0;
1132
1133	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1134	if (!tl_hba) {
1135		pr_err("Unable to allocate struct tcm_loop_hba\n");
1136		return ERR_PTR(-ENOMEM);
1137	}
1138	/*
1139	 * Determine the emulated Protocol Identifier and Target Port Name
1140	 * based on the incoming configfs directory name.
1141	 */
1142	ptr = strstr(name, "naa.");
1143	if (ptr) {
1144		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1145		goto check_len;
1146	}
1147	ptr = strstr(name, "fc.");
1148	if (ptr) {
1149		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1150		off = 3; /* Skip over "fc." */
1151		goto check_len;
1152	}
1153	ptr = strstr(name, "iqn.");
1154	if (!ptr) {
1155		pr_err("Unable to locate prefix for emulated Target "
1156				"Port: %s\n", name);
1157		ret = -EINVAL;
1158		goto out;
1159	}
1160	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1161
1162check_len:
1163	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1164		pr_err("Emulated NAA %s Address: %s, exceeds"
1165			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1166			TL_WWN_ADDR_LEN);
1167		ret = -EINVAL;
1168		goto out;
1169	}
1170	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1171
1172	/*
1173	 * Call device_register(tl_hba->dev) to register the emulated
1174	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1175	 * device_register() callbacks in tcm_loop_driver_probe()
1176	 */
1177	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1178	if (ret)
1179		goto out;
1180
1181	sh = tl_hba->sh;
1182	tcm_loop_hba_no_cnt++;
1183	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1184		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1185		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1186
1187	return &tl_hba->tl_hba_wwn;
1188out:
1189	kfree(tl_hba);
1190	return ERR_PTR(ret);
1191}
1192
1193static void tcm_loop_drop_scsi_hba(
1194	struct se_wwn *wwn)
1195{
1196	struct tcm_loop_hba *tl_hba = container_of(wwn,
1197				struct tcm_loop_hba, tl_hba_wwn);
1198
1199	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1200		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1201		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1202		tl_hba->sh->host_no);
1203	/*
1204	 * Call device_unregister() on the original tl_hba->dev.
1205	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1206	 * release *tl_hba;
1207	 */
1208	device_unregister(&tl_hba->dev);
1209}
1210
1211/* Start items for tcm_loop_cit */
1212static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1213{
1214	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1215}
1216
1217CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1218
1219static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1220	&tcm_loop_wwn_attr_version,
1221	NULL,
1222};
1223
1224/* End items for tcm_loop_cit */
1225
1226static const struct target_core_fabric_ops loop_ops = {
1227	.module				= THIS_MODULE,
1228	.name				= "loopback",
1229	.get_fabric_name		= tcm_loop_get_fabric_name,
1230	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1231	.tpg_get_tag			= tcm_loop_get_tag,
1232	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1233	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1234	.tpg_check_demo_mode_write_protect =
1235				tcm_loop_check_demo_mode_write_protect,
1236	.tpg_check_prod_mode_write_protect =
1237				tcm_loop_check_prod_mode_write_protect,
1238	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1239	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1240	.check_stop_free		= tcm_loop_check_stop_free,
1241	.release_cmd			= tcm_loop_release_cmd,
1242	.shutdown_session		= tcm_loop_shutdown_session,
1243	.close_session			= tcm_loop_close_session,
1244	.sess_get_index			= tcm_loop_sess_get_index,
1245	.write_pending			= tcm_loop_write_pending,
1246	.write_pending_status		= tcm_loop_write_pending_status,
1247	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1248	.get_cmd_state			= tcm_loop_get_cmd_state,
1249	.queue_data_in			= tcm_loop_queue_data_in,
1250	.queue_status			= tcm_loop_queue_status,
1251	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1252	.aborted_task			= tcm_loop_aborted_task,
1253	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1254	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1255	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1256	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1257	.fabric_post_link		= tcm_loop_port_link,
1258	.fabric_pre_unlink		= tcm_loop_port_unlink,
1259	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1260	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1261	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1262};
1263
1264static int __init tcm_loop_fabric_init(void)
1265{
1266	int ret = -ENOMEM;
1267
1268	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1269	if (!tcm_loop_workqueue)
1270		goto out;
1271
1272	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1273				sizeof(struct tcm_loop_cmd),
1274				__alignof__(struct tcm_loop_cmd),
1275				0, NULL);
1276	if (!tcm_loop_cmd_cache) {
1277		pr_debug("kmem_cache_create() for"
1278			" tcm_loop_cmd_cache failed\n");
1279		goto out_destroy_workqueue;
1280	}
1281
1282	ret = tcm_loop_alloc_core_bus();
1283	if (ret)
1284		goto out_destroy_cache;
1285
1286	ret = target_register_template(&loop_ops);
1287	if (ret)
1288		goto out_release_core_bus;
1289
1290	return 0;
1291
1292out_release_core_bus:
1293	tcm_loop_release_core_bus();
1294out_destroy_cache:
1295	kmem_cache_destroy(tcm_loop_cmd_cache);
1296out_destroy_workqueue:
1297	destroy_workqueue(tcm_loop_workqueue);
1298out:
1299	return ret;
1300}
1301
1302static void __exit tcm_loop_fabric_exit(void)
1303{
1304	target_unregister_template(&loop_ops);
1305	tcm_loop_release_core_bus();
1306	kmem_cache_destroy(tcm_loop_cmd_cache);
1307	destroy_workqueue(tcm_loop_workqueue);
1308}
1309
1310MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1311MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1312MODULE_LICENSE("GPL");
1313module_init(tcm_loop_fabric_init);
1314module_exit(tcm_loop_fabric_exit);
1315