1/*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
3 *
4 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#define KMSG_COMPONENT "sbp_target"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/configfs.h>
30#include <linux/ctype.h>
31#include <linux/firewire.h>
32#include <linux/firewire-constants.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_tcq.h>
35#include <target/target_core_base.h>
36#include <target/target_core_backend.h>
37#include <target/target_core_fabric.h>
38#include <target/target_core_fabric_configfs.h>
39#include <target/target_core_configfs.h>
40#include <target/configfs_macros.h>
41#include <asm/unaligned.h>
42
43#include "sbp_target.h"
44
45static const struct target_core_fabric_ops sbp_ops;
46
47/* FireWire address region for management and command block address handlers */
48static const struct fw_address_region sbp_register_region = {
49	.start	= CSR_REGISTER_BASE + 0x10000,
50	.end	= 0x1000000000000ULL,
51};
52
53static const u32 sbp_unit_directory_template[] = {
54	0x1200609e, /* unit_specifier_id: NCITS/T10 */
55	0x13010483, /* unit_sw_version: 1155D Rev 4 */
56	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
57	0x390104d8, /* command_set: SPC-2 */
58	0x3b000000, /* command_set_revision: 0 */
59	0x3c000001, /* firmware_revision: 1 */
60};
61
62#define SESSION_MAINTENANCE_INTERVAL HZ
63
64static atomic_t login_id = ATOMIC_INIT(0);
65
66static void session_maintenance_work(struct work_struct *);
67static int sbp_run_transaction(struct fw_card *, int, int, int, int,
68		unsigned long long, void *, size_t);
69
70static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
71{
72	int ret;
73	__be32 high, low;
74
75	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
76			req->node_addr, req->generation, req->speed,
77			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
78			&high, sizeof(high));
79	if (ret != RCODE_COMPLETE)
80		return ret;
81
82	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
83			req->node_addr, req->generation, req->speed,
84			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
85			&low, sizeof(low));
86	if (ret != RCODE_COMPLETE)
87		return ret;
88
89	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
90
91	return RCODE_COMPLETE;
92}
93
94static struct sbp_session *sbp_session_find_by_guid(
95	struct sbp_tpg *tpg, u64 guid)
96{
97	struct se_session *se_sess;
98	struct sbp_session *sess, *found = NULL;
99
100	spin_lock_bh(&tpg->se_tpg.session_lock);
101	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
102		sess = se_sess->fabric_sess_ptr;
103		if (sess->guid == guid)
104			found = sess;
105	}
106	spin_unlock_bh(&tpg->se_tpg.session_lock);
107
108	return found;
109}
110
111static struct sbp_login_descriptor *sbp_login_find_by_lun(
112		struct sbp_session *session, struct se_lun *lun)
113{
114	struct sbp_login_descriptor *login, *found = NULL;
115
116	spin_lock_bh(&session->lock);
117	list_for_each_entry(login, &session->login_list, link) {
118		if (login->lun == lun)
119			found = login;
120	}
121	spin_unlock_bh(&session->lock);
122
123	return found;
124}
125
126static int sbp_login_count_all_by_lun(
127		struct sbp_tpg *tpg,
128		struct se_lun *lun,
129		int exclusive)
130{
131	struct se_session *se_sess;
132	struct sbp_session *sess;
133	struct sbp_login_descriptor *login;
134	int count = 0;
135
136	spin_lock_bh(&tpg->se_tpg.session_lock);
137	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
138		sess = se_sess->fabric_sess_ptr;
139
140		spin_lock_bh(&sess->lock);
141		list_for_each_entry(login, &sess->login_list, link) {
142			if (login->lun != lun)
143				continue;
144
145			if (!exclusive || login->exclusive)
146				count++;
147		}
148		spin_unlock_bh(&sess->lock);
149	}
150	spin_unlock_bh(&tpg->se_tpg.session_lock);
151
152	return count;
153}
154
155static struct sbp_login_descriptor *sbp_login_find_by_id(
156	struct sbp_tpg *tpg, int login_id)
157{
158	struct se_session *se_sess;
159	struct sbp_session *sess;
160	struct sbp_login_descriptor *login, *found = NULL;
161
162	spin_lock_bh(&tpg->se_tpg.session_lock);
163	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
164		sess = se_sess->fabric_sess_ptr;
165
166		spin_lock_bh(&sess->lock);
167		list_for_each_entry(login, &sess->login_list, link) {
168			if (login->login_id == login_id)
169				found = login;
170		}
171		spin_unlock_bh(&sess->lock);
172	}
173	spin_unlock_bh(&tpg->se_tpg.session_lock);
174
175	return found;
176}
177
178static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
179{
180	struct se_portal_group *se_tpg = &tpg->se_tpg;
181	struct se_lun *se_lun;
182
183	if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
184		return ERR_PTR(-EINVAL);
185
186	spin_lock(&se_tpg->tpg_lun_lock);
187	se_lun = se_tpg->tpg_lun_list[lun];
188
189	if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
190		se_lun = ERR_PTR(-ENODEV);
191
192	spin_unlock(&se_tpg->tpg_lun_lock);
193
194	return se_lun;
195}
196
197static struct sbp_session *sbp_session_create(
198		struct sbp_tpg *tpg,
199		u64 guid)
200{
201	struct sbp_session *sess;
202	int ret;
203	char guid_str[17];
204	struct se_node_acl *se_nacl;
205
206	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
207	if (!sess) {
208		pr_err("failed to allocate session descriptor\n");
209		return ERR_PTR(-ENOMEM);
210	}
211
212	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
213	if (IS_ERR(sess->se_sess)) {
214		pr_err("failed to init se_session\n");
215
216		ret = PTR_ERR(sess->se_sess);
217		kfree(sess);
218		return ERR_PTR(ret);
219	}
220
221	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
222
223	se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
224	if (!se_nacl) {
225		pr_warn("Node ACL not found for %s\n", guid_str);
226
227		transport_free_session(sess->se_sess);
228		kfree(sess);
229
230		return ERR_PTR(-EPERM);
231	}
232
233	sess->se_sess->se_node_acl = se_nacl;
234
235	spin_lock_init(&sess->lock);
236	INIT_LIST_HEAD(&sess->login_list);
237	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
238
239	sess->guid = guid;
240
241	transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
242
243	return sess;
244}
245
246static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
247{
248	spin_lock_bh(&sess->lock);
249	if (!list_empty(&sess->login_list)) {
250		spin_unlock_bh(&sess->lock);
251		return;
252	}
253	spin_unlock_bh(&sess->lock);
254
255	if (cancel_work)
256		cancel_delayed_work_sync(&sess->maint_work);
257
258	transport_deregister_session_configfs(sess->se_sess);
259	transport_deregister_session(sess->se_sess);
260
261	if (sess->card)
262		fw_card_put(sess->card);
263
264	kfree(sess);
265}
266
267static void sbp_target_agent_unregister(struct sbp_target_agent *);
268
269static void sbp_login_release(struct sbp_login_descriptor *login,
270	bool cancel_work)
271{
272	struct sbp_session *sess = login->sess;
273
274	/* FIXME: abort/wait on tasks */
275
276	sbp_target_agent_unregister(login->tgt_agt);
277
278	if (sess) {
279		spin_lock_bh(&sess->lock);
280		list_del(&login->link);
281		spin_unlock_bh(&sess->lock);
282
283		sbp_session_release(sess, cancel_work);
284	}
285
286	kfree(login);
287}
288
289static struct sbp_target_agent *sbp_target_agent_register(
290	struct sbp_login_descriptor *);
291
292static void sbp_management_request_login(
293	struct sbp_management_agent *agent, struct sbp_management_request *req,
294	int *status_data_size)
295{
296	struct sbp_tport *tport = agent->tport;
297	struct sbp_tpg *tpg = tport->tpg;
298	struct se_lun *se_lun;
299	int ret;
300	u64 guid;
301	struct sbp_session *sess;
302	struct sbp_login_descriptor *login;
303	struct sbp_login_response_block *response;
304	int login_response_len;
305
306	se_lun = sbp_get_lun_from_tpg(tpg,
307			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
308	if (IS_ERR(se_lun)) {
309		pr_notice("login to unknown LUN: %d\n",
310			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
311
312		req->status.status = cpu_to_be32(
313			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
314			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
315		return;
316	}
317
318	ret = read_peer_guid(&guid, req);
319	if (ret != RCODE_COMPLETE) {
320		pr_warn("failed to read peer GUID: %d\n", ret);
321
322		req->status.status = cpu_to_be32(
323			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
324			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
325		return;
326	}
327
328	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
329		se_lun->unpacked_lun, guid);
330
331	sess = sbp_session_find_by_guid(tpg, guid);
332	if (sess) {
333		login = sbp_login_find_by_lun(sess, se_lun);
334		if (login) {
335			pr_notice("initiator already logged-in\n");
336
337			/*
338			 * SBP-2 R4 says we should return access denied, but
339			 * that can confuse initiators. Instead we need to
340			 * treat this like a reconnect, but send the login
341			 * response block like a fresh login.
342			 *
343			 * This is required particularly in the case of Apple
344			 * devices booting off the FireWire target, where
345			 * the firmware has an active login to the target. When
346			 * the OS takes control of the session it issues its own
347			 * LOGIN rather than a RECONNECT. To avoid the machine
348			 * waiting until the reconnect_hold expires, we can skip
349			 * the ACCESS_DENIED errors to speed things up.
350			 */
351
352			goto already_logged_in;
353		}
354	}
355
356	/*
357	 * check exclusive bit in login request
358	 * reject with access_denied if any logins present
359	 */
360	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
361			sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
362		pr_warn("refusing exclusive login with other active logins\n");
363
364		req->status.status = cpu_to_be32(
365			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
366			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
367		return;
368	}
369
370	/*
371	 * check exclusive bit in any existing login descriptor
372	 * reject with access_denied if any exclusive logins present
373	 */
374	if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
375		pr_warn("refusing login while another exclusive login present\n");
376
377		req->status.status = cpu_to_be32(
378			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
379			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
380		return;
381	}
382
383	/*
384	 * check we haven't exceeded the number of allowed logins
385	 * reject with resources_unavailable if we have
386	 */
387	if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
388			tport->max_logins_per_lun) {
389		pr_warn("max number of logins reached\n");
390
391		req->status.status = cpu_to_be32(
392			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
393			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
394		return;
395	}
396
397	if (!sess) {
398		sess = sbp_session_create(tpg, guid);
399		if (IS_ERR(sess)) {
400			switch (PTR_ERR(sess)) {
401			case -EPERM:
402				ret = SBP_STATUS_ACCESS_DENIED;
403				break;
404			default:
405				ret = SBP_STATUS_RESOURCES_UNAVAIL;
406				break;
407			}
408
409			req->status.status = cpu_to_be32(
410				STATUS_BLOCK_RESP(
411					STATUS_RESP_REQUEST_COMPLETE) |
412				STATUS_BLOCK_SBP_STATUS(ret));
413			return;
414		}
415
416		sess->node_id = req->node_addr;
417		sess->card = fw_card_get(req->card);
418		sess->generation = req->generation;
419		sess->speed = req->speed;
420
421		schedule_delayed_work(&sess->maint_work,
422				SESSION_MAINTENANCE_INTERVAL);
423	}
424
425	/* only take the latest reconnect_hold into account */
426	sess->reconnect_hold = min(
427		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
428		tport->max_reconnect_timeout) - 1;
429
430	login = kmalloc(sizeof(*login), GFP_KERNEL);
431	if (!login) {
432		pr_err("failed to allocate login descriptor\n");
433
434		sbp_session_release(sess, true);
435
436		req->status.status = cpu_to_be32(
437			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
438			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
439		return;
440	}
441
442	login->sess = sess;
443	login->lun = se_lun;
444	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
445	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
446	login->login_id = atomic_inc_return(&login_id);
447
448	login->tgt_agt = sbp_target_agent_register(login);
449	if (IS_ERR(login->tgt_agt)) {
450		ret = PTR_ERR(login->tgt_agt);
451		pr_err("failed to map command block handler: %d\n", ret);
452
453		sbp_session_release(sess, true);
454		kfree(login);
455
456		req->status.status = cpu_to_be32(
457			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
458			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
459		return;
460	}
461
462	spin_lock_bh(&sess->lock);
463	list_add_tail(&login->link, &sess->login_list);
464	spin_unlock_bh(&sess->lock);
465
466already_logged_in:
467	response = kzalloc(sizeof(*response), GFP_KERNEL);
468	if (!response) {
469		pr_err("failed to allocate login response block\n");
470
471		sbp_login_release(login, true);
472
473		req->status.status = cpu_to_be32(
474			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
475			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
476		return;
477	}
478
479	login_response_len = clamp_val(
480			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
481			12, sizeof(*response));
482	response->misc = cpu_to_be32(
483		((login_response_len & 0xffff) << 16) |
484		(login->login_id & 0xffff));
485	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
486	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
487		&response->command_block_agent);
488
489	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
490		sess->node_id, sess->generation, sess->speed,
491		sbp2_pointer_to_addr(&req->orb.ptr2), response,
492		login_response_len);
493	if (ret != RCODE_COMPLETE) {
494		pr_debug("failed to write login response block: %x\n", ret);
495
496		kfree(response);
497		sbp_login_release(login, true);
498
499		req->status.status = cpu_to_be32(
500			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
501			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
502		return;
503	}
504
505	kfree(response);
506
507	req->status.status = cpu_to_be32(
508		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
509		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
510}
511
512static void sbp_management_request_query_logins(
513	struct sbp_management_agent *agent, struct sbp_management_request *req,
514	int *status_data_size)
515{
516	pr_notice("QUERY LOGINS not implemented\n");
517	/* FIXME: implement */
518
519	req->status.status = cpu_to_be32(
520		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
521		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
522}
523
524static void sbp_management_request_reconnect(
525	struct sbp_management_agent *agent, struct sbp_management_request *req,
526	int *status_data_size)
527{
528	struct sbp_tport *tport = agent->tport;
529	struct sbp_tpg *tpg = tport->tpg;
530	int ret;
531	u64 guid;
532	struct sbp_login_descriptor *login;
533
534	ret = read_peer_guid(&guid, req);
535	if (ret != RCODE_COMPLETE) {
536		pr_warn("failed to read peer GUID: %d\n", ret);
537
538		req->status.status = cpu_to_be32(
539			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
540			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
541		return;
542	}
543
544	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
545
546	login = sbp_login_find_by_id(tpg,
547		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
548
549	if (!login) {
550		pr_err("mgt_agent RECONNECT unknown login ID\n");
551
552		req->status.status = cpu_to_be32(
553			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
554			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
555		return;
556	}
557
558	if (login->sess->guid != guid) {
559		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
560
561		req->status.status = cpu_to_be32(
562			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
563			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
564		return;
565	}
566
567	spin_lock_bh(&login->sess->lock);
568	if (login->sess->card)
569		fw_card_put(login->sess->card);
570
571	/* update the node details */
572	login->sess->generation = req->generation;
573	login->sess->node_id = req->node_addr;
574	login->sess->card = fw_card_get(req->card);
575	login->sess->speed = req->speed;
576	spin_unlock_bh(&login->sess->lock);
577
578	req->status.status = cpu_to_be32(
579		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
580		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
581}
582
583static void sbp_management_request_logout(
584	struct sbp_management_agent *agent, struct sbp_management_request *req,
585	int *status_data_size)
586{
587	struct sbp_tport *tport = agent->tport;
588	struct sbp_tpg *tpg = tport->tpg;
589	int id;
590	struct sbp_login_descriptor *login;
591
592	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
593
594	login = sbp_login_find_by_id(tpg, id);
595	if (!login) {
596		pr_warn("cannot find login: %d\n", id);
597
598		req->status.status = cpu_to_be32(
599			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
600			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
601		return;
602	}
603
604	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
605		login->lun->unpacked_lun, login->login_id);
606
607	if (req->node_addr != login->sess->node_id) {
608		pr_warn("logout from different node ID\n");
609
610		req->status.status = cpu_to_be32(
611			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
612			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
613		return;
614	}
615
616	sbp_login_release(login, true);
617
618	req->status.status = cpu_to_be32(
619		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
620		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
621}
622
623static void session_check_for_reset(struct sbp_session *sess)
624{
625	bool card_valid = false;
626
627	spin_lock_bh(&sess->lock);
628
629	if (sess->card) {
630		spin_lock_irq(&sess->card->lock);
631		card_valid = (sess->card->local_node != NULL);
632		spin_unlock_irq(&sess->card->lock);
633
634		if (!card_valid) {
635			fw_card_put(sess->card);
636			sess->card = NULL;
637		}
638	}
639
640	if (!card_valid || (sess->generation != sess->card->generation)) {
641		pr_info("Waiting for reconnect from node: %016llx\n",
642				sess->guid);
643
644		sess->node_id = -1;
645		sess->reconnect_expires = get_jiffies_64() +
646			((sess->reconnect_hold + 1) * HZ);
647	}
648
649	spin_unlock_bh(&sess->lock);
650}
651
652static void session_reconnect_expired(struct sbp_session *sess)
653{
654	struct sbp_login_descriptor *login, *temp;
655	LIST_HEAD(login_list);
656
657	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
658
659	spin_lock_bh(&sess->lock);
660	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
661		login->sess = NULL;
662		list_move_tail(&login->link, &login_list);
663	}
664	spin_unlock_bh(&sess->lock);
665
666	list_for_each_entry_safe(login, temp, &login_list, link) {
667		list_del(&login->link);
668		sbp_login_release(login, false);
669	}
670
671	sbp_session_release(sess, false);
672}
673
674static void session_maintenance_work(struct work_struct *work)
675{
676	struct sbp_session *sess = container_of(work, struct sbp_session,
677			maint_work.work);
678
679	/* could be called while tearing down the session */
680	spin_lock_bh(&sess->lock);
681	if (list_empty(&sess->login_list)) {
682		spin_unlock_bh(&sess->lock);
683		return;
684	}
685	spin_unlock_bh(&sess->lock);
686
687	if (sess->node_id != -1) {
688		/* check for bus reset and make node_id invalid */
689		session_check_for_reset(sess);
690
691		schedule_delayed_work(&sess->maint_work,
692				SESSION_MAINTENANCE_INTERVAL);
693	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
694		/* still waiting for reconnect */
695		schedule_delayed_work(&sess->maint_work,
696				SESSION_MAINTENANCE_INTERVAL);
697	} else {
698		/* reconnect timeout has expired */
699		session_reconnect_expired(sess);
700	}
701}
702
703static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
704		struct sbp_target_agent *agent)
705{
706	int state;
707
708	switch (tcode) {
709	case TCODE_READ_QUADLET_REQUEST:
710		pr_debug("tgt_agent AGENT_STATE READ\n");
711
712		spin_lock_bh(&agent->lock);
713		state = agent->state;
714		spin_unlock_bh(&agent->lock);
715
716		*(__be32 *)data = cpu_to_be32(state);
717
718		return RCODE_COMPLETE;
719
720	case TCODE_WRITE_QUADLET_REQUEST:
721		/* ignored */
722		return RCODE_COMPLETE;
723
724	default:
725		return RCODE_TYPE_ERROR;
726	}
727}
728
729static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
730		struct sbp_target_agent *agent)
731{
732	switch (tcode) {
733	case TCODE_WRITE_QUADLET_REQUEST:
734		pr_debug("tgt_agent AGENT_RESET\n");
735		spin_lock_bh(&agent->lock);
736		agent->state = AGENT_STATE_RESET;
737		spin_unlock_bh(&agent->lock);
738		return RCODE_COMPLETE;
739
740	default:
741		return RCODE_TYPE_ERROR;
742	}
743}
744
745static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
746		struct sbp_target_agent *agent)
747{
748	struct sbp2_pointer *ptr = data;
749
750	switch (tcode) {
751	case TCODE_WRITE_BLOCK_REQUEST:
752		spin_lock_bh(&agent->lock);
753		if (agent->state != AGENT_STATE_SUSPENDED &&
754				agent->state != AGENT_STATE_RESET) {
755			spin_unlock_bh(&agent->lock);
756			pr_notice("Ignoring ORB_POINTER write while active.\n");
757			return RCODE_CONFLICT_ERROR;
758		}
759		agent->state = AGENT_STATE_ACTIVE;
760		spin_unlock_bh(&agent->lock);
761
762		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
763		agent->doorbell = false;
764
765		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
766				agent->orb_pointer);
767
768		queue_work(system_unbound_wq, &agent->work);
769
770		return RCODE_COMPLETE;
771
772	case TCODE_READ_BLOCK_REQUEST:
773		pr_debug("tgt_agent ORB_POINTER READ\n");
774		spin_lock_bh(&agent->lock);
775		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
776		spin_unlock_bh(&agent->lock);
777		return RCODE_COMPLETE;
778
779	default:
780		return RCODE_TYPE_ERROR;
781	}
782}
783
784static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
785		struct sbp_target_agent *agent)
786{
787	switch (tcode) {
788	case TCODE_WRITE_QUADLET_REQUEST:
789		spin_lock_bh(&agent->lock);
790		if (agent->state != AGENT_STATE_SUSPENDED) {
791			spin_unlock_bh(&agent->lock);
792			pr_debug("Ignoring DOORBELL while active.\n");
793			return RCODE_CONFLICT_ERROR;
794		}
795		agent->state = AGENT_STATE_ACTIVE;
796		spin_unlock_bh(&agent->lock);
797
798		agent->doorbell = true;
799
800		pr_debug("tgt_agent DOORBELL\n");
801
802		queue_work(system_unbound_wq, &agent->work);
803
804		return RCODE_COMPLETE;
805
806	case TCODE_READ_QUADLET_REQUEST:
807		return RCODE_COMPLETE;
808
809	default:
810		return RCODE_TYPE_ERROR;
811	}
812}
813
814static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
815		int tcode, void *data, struct sbp_target_agent *agent)
816{
817	switch (tcode) {
818	case TCODE_WRITE_QUADLET_REQUEST:
819		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
820		/* ignored as we don't send unsolicited status */
821		return RCODE_COMPLETE;
822
823	case TCODE_READ_QUADLET_REQUEST:
824		return RCODE_COMPLETE;
825
826	default:
827		return RCODE_TYPE_ERROR;
828	}
829}
830
831static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
832		int tcode, int destination, int source, int generation,
833		unsigned long long offset, void *data, size_t length,
834		void *callback_data)
835{
836	struct sbp_target_agent *agent = callback_data;
837	struct sbp_session *sess = agent->login->sess;
838	int sess_gen, sess_node, rcode;
839
840	spin_lock_bh(&sess->lock);
841	sess_gen = sess->generation;
842	sess_node = sess->node_id;
843	spin_unlock_bh(&sess->lock);
844
845	if (generation != sess_gen) {
846		pr_notice("ignoring request with wrong generation\n");
847		rcode = RCODE_TYPE_ERROR;
848		goto out;
849	}
850
851	if (source != sess_node) {
852		pr_notice("ignoring request from foreign node (%x != %x)\n",
853				source, sess_node);
854		rcode = RCODE_TYPE_ERROR;
855		goto out;
856	}
857
858	/* turn offset into the offset from the start of the block */
859	offset -= agent->handler.offset;
860
861	if (offset == 0x00 && length == 4) {
862		/* AGENT_STATE */
863		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
864	} else if (offset == 0x04 && length == 4) {
865		/* AGENT_RESET */
866		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
867	} else if (offset == 0x08 && length == 8) {
868		/* ORB_POINTER */
869		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
870	} else if (offset == 0x10 && length == 4) {
871		/* DOORBELL */
872		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
873	} else if (offset == 0x14 && length == 4) {
874		/* UNSOLICITED_STATUS_ENABLE */
875		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
876				data, agent);
877	} else {
878		rcode = RCODE_ADDRESS_ERROR;
879	}
880
881out:
882	fw_send_response(card, request, rcode);
883}
884
885static void sbp_handle_command(struct sbp_target_request *);
886static int sbp_send_status(struct sbp_target_request *);
887static void sbp_free_request(struct sbp_target_request *);
888
889static void tgt_agent_process_work(struct work_struct *work)
890{
891	struct sbp_target_request *req =
892		container_of(work, struct sbp_target_request, work);
893
894	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
895			req->orb_pointer,
896			sbp2_pointer_to_addr(&req->orb.next_orb),
897			sbp2_pointer_to_addr(&req->orb.data_descriptor),
898			be32_to_cpu(req->orb.misc));
899
900	if (req->orb_pointer >> 32)
901		pr_debug("ORB with high bits set\n");
902
903	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
904		case 0:/* Format specified by this standard */
905			sbp_handle_command(req);
906			return;
907		case 1: /* Reserved for future standardization */
908		case 2: /* Vendor-dependent */
909			req->status.status |= cpu_to_be32(
910					STATUS_BLOCK_RESP(
911						STATUS_RESP_REQUEST_COMPLETE) |
912					STATUS_BLOCK_DEAD(0) |
913					STATUS_BLOCK_LEN(1) |
914					STATUS_BLOCK_SBP_STATUS(
915						SBP_STATUS_REQ_TYPE_NOTSUPP));
916			sbp_send_status(req);
917			sbp_free_request(req);
918			return;
919		case 3: /* Dummy ORB */
920			req->status.status |= cpu_to_be32(
921					STATUS_BLOCK_RESP(
922						STATUS_RESP_REQUEST_COMPLETE) |
923					STATUS_BLOCK_DEAD(0) |
924					STATUS_BLOCK_LEN(1) |
925					STATUS_BLOCK_SBP_STATUS(
926						SBP_STATUS_DUMMY_ORB_COMPLETE));
927			sbp_send_status(req);
928			sbp_free_request(req);
929			return;
930		default:
931			BUG();
932	}
933}
934
935/* used to double-check we haven't been issued an AGENT_RESET */
936static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
937{
938	bool active;
939
940	spin_lock_bh(&agent->lock);
941	active = (agent->state == AGENT_STATE_ACTIVE);
942	spin_unlock_bh(&agent->lock);
943
944	return active;
945}
946
947static void tgt_agent_fetch_work(struct work_struct *work)
948{
949	struct sbp_target_agent *agent =
950		container_of(work, struct sbp_target_agent, work);
951	struct sbp_session *sess = agent->login->sess;
952	struct sbp_target_request *req;
953	int ret;
954	bool doorbell = agent->doorbell;
955	u64 next_orb = agent->orb_pointer;
956
957	while (next_orb && tgt_agent_check_active(agent)) {
958		req = kzalloc(sizeof(*req), GFP_KERNEL);
959		if (!req) {
960			spin_lock_bh(&agent->lock);
961			agent->state = AGENT_STATE_DEAD;
962			spin_unlock_bh(&agent->lock);
963			return;
964		}
965
966		req->login = agent->login;
967		req->orb_pointer = next_orb;
968
969		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
970					req->orb_pointer >> 32));
971		req->status.orb_low = cpu_to_be32(
972				req->orb_pointer & 0xfffffffc);
973
974		/* read in the ORB */
975		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
976				sess->node_id, sess->generation, sess->speed,
977				req->orb_pointer, &req->orb, sizeof(req->orb));
978		if (ret != RCODE_COMPLETE) {
979			pr_debug("tgt_orb fetch failed: %x\n", ret);
980			req->status.status |= cpu_to_be32(
981					STATUS_BLOCK_SRC(
982						STATUS_SRC_ORB_FINISHED) |
983					STATUS_BLOCK_RESP(
984						STATUS_RESP_TRANSPORT_FAILURE) |
985					STATUS_BLOCK_DEAD(1) |
986					STATUS_BLOCK_LEN(1) |
987					STATUS_BLOCK_SBP_STATUS(
988						SBP_STATUS_UNSPECIFIED_ERROR));
989			spin_lock_bh(&agent->lock);
990			agent->state = AGENT_STATE_DEAD;
991			spin_unlock_bh(&agent->lock);
992
993			sbp_send_status(req);
994			sbp_free_request(req);
995			return;
996		}
997
998		/* check the next_ORB field */
999		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
1000			next_orb = 0;
1001			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1002						STATUS_SRC_ORB_FINISHED));
1003		} else {
1004			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1005			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1006						STATUS_SRC_ORB_CONTINUING));
1007		}
1008
1009		if (tgt_agent_check_active(agent) && !doorbell) {
1010			INIT_WORK(&req->work, tgt_agent_process_work);
1011			queue_work(system_unbound_wq, &req->work);
1012		} else {
1013			/* don't process this request, just check next_ORB */
1014			sbp_free_request(req);
1015		}
1016
1017		spin_lock_bh(&agent->lock);
1018		doorbell = agent->doorbell = false;
1019
1020		/* check if we should carry on processing */
1021		if (next_orb)
1022			agent->orb_pointer = next_orb;
1023		else
1024			agent->state = AGENT_STATE_SUSPENDED;
1025
1026		spin_unlock_bh(&agent->lock);
1027	};
1028}
1029
1030static struct sbp_target_agent *sbp_target_agent_register(
1031		struct sbp_login_descriptor *login)
1032{
1033	struct sbp_target_agent *agent;
1034	int ret;
1035
1036	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1037	if (!agent)
1038		return ERR_PTR(-ENOMEM);
1039
1040	spin_lock_init(&agent->lock);
1041
1042	agent->handler.length = 0x20;
1043	agent->handler.address_callback = tgt_agent_rw;
1044	agent->handler.callback_data = agent;
1045
1046	agent->login = login;
1047	agent->state = AGENT_STATE_RESET;
1048	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1049	agent->orb_pointer = 0;
1050	agent->doorbell = false;
1051
1052	ret = fw_core_add_address_handler(&agent->handler,
1053			&sbp_register_region);
1054	if (ret < 0) {
1055		kfree(agent);
1056		return ERR_PTR(ret);
1057	}
1058
1059	return agent;
1060}
1061
1062static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1063{
1064	fw_core_remove_address_handler(&agent->handler);
1065	cancel_work_sync(&agent->work);
1066	kfree(agent);
1067}
1068
1069/*
1070 * Simple wrapper around fw_run_transaction that retries the transaction several
1071 * times in case of failure, with an exponential backoff.
1072 */
1073static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1074		int generation, int speed, unsigned long long offset,
1075		void *payload, size_t length)
1076{
1077	int attempt, ret, delay;
1078
1079	for (attempt = 1; attempt <= 5; attempt++) {
1080		ret = fw_run_transaction(card, tcode, destination_id,
1081				generation, speed, offset, payload, length);
1082
1083		switch (ret) {
1084		case RCODE_COMPLETE:
1085		case RCODE_TYPE_ERROR:
1086		case RCODE_ADDRESS_ERROR:
1087		case RCODE_GENERATION:
1088			return ret;
1089
1090		default:
1091			delay = 5 * attempt * attempt;
1092			usleep_range(delay, delay * 2);
1093		}
1094	}
1095
1096	return ret;
1097}
1098
1099/*
1100 * Wrapper around sbp_run_transaction that gets the card, destination,
1101 * generation and speed out of the request's session.
1102 */
1103static int sbp_run_request_transaction(struct sbp_target_request *req,
1104		int tcode, unsigned long long offset, void *payload,
1105		size_t length)
1106{
1107	struct sbp_login_descriptor *login = req->login;
1108	struct sbp_session *sess = login->sess;
1109	struct fw_card *card;
1110	int node_id, generation, speed, ret;
1111
1112	spin_lock_bh(&sess->lock);
1113	card = fw_card_get(sess->card);
1114	node_id = sess->node_id;
1115	generation = sess->generation;
1116	speed = sess->speed;
1117	spin_unlock_bh(&sess->lock);
1118
1119	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1120			offset, payload, length);
1121
1122	fw_card_put(card);
1123
1124	return ret;
1125}
1126
1127static int sbp_fetch_command(struct sbp_target_request *req)
1128{
1129	int ret, cmd_len, copy_len;
1130
1131	cmd_len = scsi_command_size(req->orb.command_block);
1132
1133	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1134	if (!req->cmd_buf)
1135		return -ENOMEM;
1136
1137	memcpy(req->cmd_buf, req->orb.command_block,
1138		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1139
1140	if (cmd_len > sizeof(req->orb.command_block)) {
1141		pr_debug("sbp_fetch_command: filling in long command\n");
1142		copy_len = cmd_len - sizeof(req->orb.command_block);
1143
1144		ret = sbp_run_request_transaction(req,
1145				TCODE_READ_BLOCK_REQUEST,
1146				req->orb_pointer + sizeof(req->orb),
1147				req->cmd_buf + sizeof(req->orb.command_block),
1148				copy_len);
1149		if (ret != RCODE_COMPLETE)
1150			return -EIO;
1151	}
1152
1153	return 0;
1154}
1155
1156static int sbp_fetch_page_table(struct sbp_target_request *req)
1157{
1158	int pg_tbl_sz, ret;
1159	struct sbp_page_table_entry *pg_tbl;
1160
1161	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1162		return 0;
1163
1164	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1165		sizeof(struct sbp_page_table_entry);
1166
1167	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1168	if (!pg_tbl)
1169		return -ENOMEM;
1170
1171	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1172			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1173			pg_tbl, pg_tbl_sz);
1174	if (ret != RCODE_COMPLETE) {
1175		kfree(pg_tbl);
1176		return -EIO;
1177	}
1178
1179	req->pg_tbl = pg_tbl;
1180	return 0;
1181}
1182
1183static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1184	u32 *data_len, enum dma_data_direction *data_dir)
1185{
1186	int data_size, direction, idx;
1187
1188	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1189	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1190
1191	if (!data_size) {
1192		*data_len = 0;
1193		*data_dir = DMA_NONE;
1194		return;
1195	}
1196
1197	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1198
1199	if (req->pg_tbl) {
1200		*data_len = 0;
1201		for (idx = 0; idx < data_size; idx++) {
1202			*data_len += be16_to_cpu(
1203					req->pg_tbl[idx].segment_length);
1204		}
1205	} else {
1206		*data_len = data_size;
1207	}
1208}
1209
1210static void sbp_handle_command(struct sbp_target_request *req)
1211{
1212	struct sbp_login_descriptor *login = req->login;
1213	struct sbp_session *sess = login->sess;
1214	int ret, unpacked_lun;
1215	u32 data_length;
1216	enum dma_data_direction data_dir;
1217
1218	ret = sbp_fetch_command(req);
1219	if (ret) {
1220		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1221		goto err;
1222	}
1223
1224	ret = sbp_fetch_page_table(req);
1225	if (ret) {
1226		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1227			ret);
1228		goto err;
1229	}
1230
1231	unpacked_lun = req->login->lun->unpacked_lun;
1232	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1233
1234	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1235			req->orb_pointer, unpacked_lun, data_length, data_dir);
1236
1237	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1238			      req->sense_buf, unpacked_lun, data_length,
1239			      TCM_SIMPLE_TAG, data_dir, 0))
1240		goto err;
1241
1242	return;
1243
1244err:
1245	req->status.status |= cpu_to_be32(
1246		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1247		STATUS_BLOCK_DEAD(0) |
1248		STATUS_BLOCK_LEN(1) |
1249		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1250	sbp_send_status(req);
1251	sbp_free_request(req);
1252}
1253
1254/*
1255 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1256 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1257 */
1258static int sbp_rw_data(struct sbp_target_request *req)
1259{
1260	struct sbp_session *sess = req->login->sess;
1261	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1262		generation, num_pte, length, tfr_length,
1263		rcode = RCODE_COMPLETE;
1264	struct sbp_page_table_entry *pte;
1265	unsigned long long offset;
1266	struct fw_card *card;
1267	struct sg_mapping_iter iter;
1268
1269	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1270		tcode = TCODE_WRITE_BLOCK_REQUEST;
1271		sg_miter_flags = SG_MITER_FROM_SG;
1272	} else {
1273		tcode = TCODE_READ_BLOCK_REQUEST;
1274		sg_miter_flags = SG_MITER_TO_SG;
1275	}
1276
1277	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1278	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1279
1280	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1281	if (pg_size) {
1282		pr_err("sbp_run_transaction: page size ignored\n");
1283		pg_size = 0x100 << pg_size;
1284	}
1285
1286	spin_lock_bh(&sess->lock);
1287	card = fw_card_get(sess->card);
1288	node_id = sess->node_id;
1289	generation = sess->generation;
1290	spin_unlock_bh(&sess->lock);
1291
1292	if (req->pg_tbl) {
1293		pte = req->pg_tbl;
1294		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1295
1296		offset = 0;
1297		length = 0;
1298	} else {
1299		pte = NULL;
1300		num_pte = 0;
1301
1302		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1303		length = req->se_cmd.data_length;
1304	}
1305
1306	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1307		sg_miter_flags);
1308
1309	while (length || num_pte) {
1310		if (!length) {
1311			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1312				be32_to_cpu(pte->segment_base_lo);
1313			length = be16_to_cpu(pte->segment_length);
1314
1315			pte++;
1316			num_pte--;
1317		}
1318
1319		sg_miter_next(&iter);
1320
1321		tfr_length = min3(length, max_payload, (int)iter.length);
1322
1323		/* FIXME: take page_size into account */
1324
1325		rcode = sbp_run_transaction(card, tcode, node_id,
1326				generation, speed,
1327				offset, iter.addr, tfr_length);
1328
1329		if (rcode != RCODE_COMPLETE)
1330			break;
1331
1332		length -= tfr_length;
1333		offset += tfr_length;
1334		iter.consumed = tfr_length;
1335	}
1336
1337	sg_miter_stop(&iter);
1338	fw_card_put(card);
1339
1340	if (rcode == RCODE_COMPLETE) {
1341		WARN_ON(length != 0);
1342		return 0;
1343	} else {
1344		return -EIO;
1345	}
1346}
1347
1348static int sbp_send_status(struct sbp_target_request *req)
1349{
1350	int ret, length;
1351	struct sbp_login_descriptor *login = req->login;
1352
1353	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1354
1355	ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1356			login->status_fifo_addr, &req->status, length);
1357	if (ret != RCODE_COMPLETE) {
1358		pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1359		return -EIO;
1360	}
1361
1362	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1363			req->orb_pointer);
1364
1365	return 0;
1366}
1367
1368static void sbp_sense_mangle(struct sbp_target_request *req)
1369{
1370	struct se_cmd *se_cmd = &req->se_cmd;
1371	u8 *sense = req->sense_buf;
1372	u8 *status = req->status.data;
1373
1374	WARN_ON(se_cmd->scsi_sense_length < 18);
1375
1376	switch (sense[0] & 0x7f) { 		/* sfmt */
1377	case 0x70: /* current, fixed */
1378		status[0] = 0 << 6;
1379		break;
1380	case 0x71: /* deferred, fixed */
1381		status[0] = 1 << 6;
1382		break;
1383	case 0x72: /* current, descriptor */
1384	case 0x73: /* deferred, descriptor */
1385	default:
1386		/*
1387		 * TODO: SBP-3 specifies what we should do with descriptor
1388		 * format sense data
1389		 */
1390		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1391			sense[0]);
1392		req->status.status |= cpu_to_be32(
1393			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1394			STATUS_BLOCK_DEAD(0) |
1395			STATUS_BLOCK_LEN(1) |
1396			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1397		return;
1398	}
1399
1400	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1401	status[1] =
1402		(sense[0] & 0x80) |		/* valid */
1403		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1404		(sense[2] & 0x0f);		/* sense_key */
1405	status[2] = se_cmd->scsi_asc;		/* sense_code */
1406	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1407
1408	/* information */
1409	status[4] = sense[3];
1410	status[5] = sense[4];
1411	status[6] = sense[5];
1412	status[7] = sense[6];
1413
1414	/* CDB-dependent */
1415	status[8] = sense[8];
1416	status[9] = sense[9];
1417	status[10] = sense[10];
1418	status[11] = sense[11];
1419
1420	/* fru */
1421	status[12] = sense[14];
1422
1423	/* sense_key-dependent */
1424	status[13] = sense[15];
1425	status[14] = sense[16];
1426	status[15] = sense[17];
1427
1428	req->status.status |= cpu_to_be32(
1429		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1430		STATUS_BLOCK_DEAD(0) |
1431		STATUS_BLOCK_LEN(5) |
1432		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1433}
1434
1435static int sbp_send_sense(struct sbp_target_request *req)
1436{
1437	struct se_cmd *se_cmd = &req->se_cmd;
1438
1439	if (se_cmd->scsi_sense_length) {
1440		sbp_sense_mangle(req);
1441	} else {
1442		req->status.status |= cpu_to_be32(
1443			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1444			STATUS_BLOCK_DEAD(0) |
1445			STATUS_BLOCK_LEN(1) |
1446			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1447	}
1448
1449	return sbp_send_status(req);
1450}
1451
1452static void sbp_free_request(struct sbp_target_request *req)
1453{
1454	kfree(req->pg_tbl);
1455	kfree(req->cmd_buf);
1456	kfree(req);
1457}
1458
1459static void sbp_mgt_agent_process(struct work_struct *work)
1460{
1461	struct sbp_management_agent *agent =
1462		container_of(work, struct sbp_management_agent, work);
1463	struct sbp_management_request *req = agent->request;
1464	int ret;
1465	int status_data_len = 0;
1466
1467	/* fetch the ORB from the initiator */
1468	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1469		req->node_addr, req->generation, req->speed,
1470		agent->orb_offset, &req->orb, sizeof(req->orb));
1471	if (ret != RCODE_COMPLETE) {
1472		pr_debug("mgt_orb fetch failed: %x\n", ret);
1473		goto out;
1474	}
1475
1476	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1477		sbp2_pointer_to_addr(&req->orb.ptr1),
1478		sbp2_pointer_to_addr(&req->orb.ptr2),
1479		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1480		sbp2_pointer_to_addr(&req->orb.status_fifo));
1481
1482	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1483		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1484		pr_err("mgt_orb bad request\n");
1485		goto out;
1486	}
1487
1488	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1489	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1490		sbp_management_request_login(agent, req, &status_data_len);
1491		break;
1492
1493	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1494		sbp_management_request_query_logins(agent, req,
1495				&status_data_len);
1496		break;
1497
1498	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1499		sbp_management_request_reconnect(agent, req, &status_data_len);
1500		break;
1501
1502	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1503		pr_notice("SET PASSWORD not implemented\n");
1504
1505		req->status.status = cpu_to_be32(
1506			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1507			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1508
1509		break;
1510
1511	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1512		sbp_management_request_logout(agent, req, &status_data_len);
1513		break;
1514
1515	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1516		pr_notice("ABORT TASK not implemented\n");
1517
1518		req->status.status = cpu_to_be32(
1519			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1521
1522		break;
1523
1524	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1525		pr_notice("ABORT TASK SET not implemented\n");
1526
1527		req->status.status = cpu_to_be32(
1528			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1529			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1530
1531		break;
1532
1533	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1534		pr_notice("LOGICAL UNIT RESET not implemented\n");
1535
1536		req->status.status = cpu_to_be32(
1537			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1538			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1539
1540		break;
1541
1542	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1543		pr_notice("TARGET RESET not implemented\n");
1544
1545		req->status.status = cpu_to_be32(
1546			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1547			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1548
1549		break;
1550
1551	default:
1552		pr_notice("unknown management function 0x%x\n",
1553			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1554
1555		req->status.status = cpu_to_be32(
1556			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1557			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1558
1559		break;
1560	}
1561
1562	req->status.status |= cpu_to_be32(
1563		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1564		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1565		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1566	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1567
1568	/* write the status block back to the initiator */
1569	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1570		req->node_addr, req->generation, req->speed,
1571		sbp2_pointer_to_addr(&req->orb.status_fifo),
1572		&req->status, 8 + status_data_len);
1573	if (ret != RCODE_COMPLETE) {
1574		pr_debug("mgt_orb status write failed: %x\n", ret);
1575		goto out;
1576	}
1577
1578out:
1579	fw_card_put(req->card);
1580	kfree(req);
1581
1582	spin_lock_bh(&agent->lock);
1583	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1584	spin_unlock_bh(&agent->lock);
1585}
1586
1587static void sbp_mgt_agent_rw(struct fw_card *card,
1588	struct fw_request *request, int tcode, int destination, int source,
1589	int generation, unsigned long long offset, void *data, size_t length,
1590	void *callback_data)
1591{
1592	struct sbp_management_agent *agent = callback_data;
1593	struct sbp2_pointer *ptr = data;
1594	int rcode = RCODE_ADDRESS_ERROR;
1595
1596	if (!agent->tport->enable)
1597		goto out;
1598
1599	if ((offset != agent->handler.offset) || (length != 8))
1600		goto out;
1601
1602	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1603		struct sbp_management_request *req;
1604		int prev_state;
1605
1606		spin_lock_bh(&agent->lock);
1607		prev_state = agent->state;
1608		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1609		spin_unlock_bh(&agent->lock);
1610
1611		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1612			pr_notice("ignoring management request while busy\n");
1613			rcode = RCODE_CONFLICT_ERROR;
1614			goto out;
1615		}
1616
1617		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1618		if (!req) {
1619			rcode = RCODE_CONFLICT_ERROR;
1620			goto out;
1621		}
1622
1623		req->card = fw_card_get(card);
1624		req->generation = generation;
1625		req->node_addr = source;
1626		req->speed = fw_get_request_speed(request);
1627
1628		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1629		agent->request = req;
1630
1631		queue_work(system_unbound_wq, &agent->work);
1632		rcode = RCODE_COMPLETE;
1633	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1634		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1635		rcode = RCODE_COMPLETE;
1636	} else {
1637		rcode = RCODE_TYPE_ERROR;
1638	}
1639
1640out:
1641	fw_send_response(card, request, rcode);
1642}
1643
1644static struct sbp_management_agent *sbp_management_agent_register(
1645		struct sbp_tport *tport)
1646{
1647	int ret;
1648	struct sbp_management_agent *agent;
1649
1650	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1651	if (!agent)
1652		return ERR_PTR(-ENOMEM);
1653
1654	spin_lock_init(&agent->lock);
1655	agent->tport = tport;
1656	agent->handler.length = 0x08;
1657	agent->handler.address_callback = sbp_mgt_agent_rw;
1658	agent->handler.callback_data = agent;
1659	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1660	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1661	agent->orb_offset = 0;
1662	agent->request = NULL;
1663
1664	ret = fw_core_add_address_handler(&agent->handler,
1665			&sbp_register_region);
1666	if (ret < 0) {
1667		kfree(agent);
1668		return ERR_PTR(ret);
1669	}
1670
1671	return agent;
1672}
1673
1674static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1675{
1676	fw_core_remove_address_handler(&agent->handler);
1677	cancel_work_sync(&agent->work);
1678	kfree(agent);
1679}
1680
1681static int sbp_check_true(struct se_portal_group *se_tpg)
1682{
1683	return 1;
1684}
1685
1686static int sbp_check_false(struct se_portal_group *se_tpg)
1687{
1688	return 0;
1689}
1690
1691static char *sbp_get_fabric_name(void)
1692{
1693	return "sbp";
1694}
1695
1696static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1697{
1698	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1699	struct sbp_tport *tport = tpg->tport;
1700
1701	return &tport->tport_name[0];
1702}
1703
1704static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1705{
1706	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1707	return tpg->tport_tpgt;
1708}
1709
1710static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
1711{
1712	return 1;
1713}
1714
1715static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
1716{
1717	struct sbp_nacl *nacl;
1718
1719	nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
1720	if (!nacl) {
1721		pr_err("Unable to allocate struct sbp_nacl\n");
1722		return NULL;
1723	}
1724
1725	return &nacl->se_node_acl;
1726}
1727
1728static void sbp_release_fabric_acl(
1729	struct se_portal_group *se_tpg,
1730	struct se_node_acl *se_nacl)
1731{
1732	struct sbp_nacl *nacl =
1733		container_of(se_nacl, struct sbp_nacl, se_node_acl);
1734	kfree(nacl);
1735}
1736
1737static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1738{
1739	return 1;
1740}
1741
1742static void sbp_release_cmd(struct se_cmd *se_cmd)
1743{
1744	struct sbp_target_request *req = container_of(se_cmd,
1745			struct sbp_target_request, se_cmd);
1746
1747	sbp_free_request(req);
1748}
1749
1750static int sbp_shutdown_session(struct se_session *se_sess)
1751{
1752	return 0;
1753}
1754
1755static void sbp_close_session(struct se_session *se_sess)
1756{
1757	return;
1758}
1759
1760static u32 sbp_sess_get_index(struct se_session *se_sess)
1761{
1762	return 0;
1763}
1764
1765static int sbp_write_pending(struct se_cmd *se_cmd)
1766{
1767	struct sbp_target_request *req = container_of(se_cmd,
1768			struct sbp_target_request, se_cmd);
1769	int ret;
1770
1771	ret = sbp_rw_data(req);
1772	if (ret) {
1773		req->status.status |= cpu_to_be32(
1774			STATUS_BLOCK_RESP(
1775				STATUS_RESP_TRANSPORT_FAILURE) |
1776			STATUS_BLOCK_DEAD(0) |
1777			STATUS_BLOCK_LEN(1) |
1778			STATUS_BLOCK_SBP_STATUS(
1779				SBP_STATUS_UNSPECIFIED_ERROR));
1780		sbp_send_status(req);
1781		return ret;
1782	}
1783
1784	target_execute_cmd(se_cmd);
1785	return 0;
1786}
1787
1788static int sbp_write_pending_status(struct se_cmd *se_cmd)
1789{
1790	return 0;
1791}
1792
1793static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1794{
1795	return;
1796}
1797
1798static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
1799{
1800	struct sbp_target_request *req = container_of(se_cmd,
1801			struct sbp_target_request, se_cmd);
1802
1803	/* only used for printk until we do TMRs */
1804	return (u32)req->orb_pointer;
1805}
1806
1807static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1808{
1809	return 0;
1810}
1811
1812static int sbp_queue_data_in(struct se_cmd *se_cmd)
1813{
1814	struct sbp_target_request *req = container_of(se_cmd,
1815			struct sbp_target_request, se_cmd);
1816	int ret;
1817
1818	ret = sbp_rw_data(req);
1819	if (ret) {
1820		req->status.status |= cpu_to_be32(
1821			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1822			STATUS_BLOCK_DEAD(0) |
1823			STATUS_BLOCK_LEN(1) |
1824			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1825		sbp_send_status(req);
1826		return ret;
1827	}
1828
1829	return sbp_send_sense(req);
1830}
1831
1832/*
1833 * Called after command (no data transfer) or after the write (to device)
1834 * operation is completed
1835 */
1836static int sbp_queue_status(struct se_cmd *se_cmd)
1837{
1838	struct sbp_target_request *req = container_of(se_cmd,
1839			struct sbp_target_request, se_cmd);
1840
1841	return sbp_send_sense(req);
1842}
1843
1844static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1845{
1846}
1847
1848static void sbp_aborted_task(struct se_cmd *se_cmd)
1849{
1850	return;
1851}
1852
1853static int sbp_check_stop_free(struct se_cmd *se_cmd)
1854{
1855	struct sbp_target_request *req = container_of(se_cmd,
1856			struct sbp_target_request, se_cmd);
1857
1858	transport_generic_free_cmd(&req->se_cmd, 0);
1859	return 1;
1860}
1861
1862/*
1863 * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1864 */
1865static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
1866{
1867	/*
1868	 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1869	 * This is defined in section 7.5.1 Table 362 in spc4r17
1870	 */
1871	return SCSI_PROTOCOL_SBP;
1872}
1873
1874static u32 sbp_get_pr_transport_id(
1875	struct se_portal_group *se_tpg,
1876	struct se_node_acl *se_nacl,
1877	struct t10_pr_registration *pr_reg,
1878	int *format_code,
1879	unsigned char *buf)
1880{
1881	int ret;
1882
1883	/*
1884	 * Set PROTOCOL IDENTIFIER to 3h for SBP
1885	 */
1886	buf[0] = SCSI_PROTOCOL_SBP;
1887	/*
1888	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1889	 * over IEEE 1394
1890	 */
1891	ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
1892	if (ret < 0)
1893		pr_debug("sbp transport_id: invalid hex string\n");
1894
1895	/*
1896	 * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1897	 */
1898	return 24;
1899}
1900
1901static u32 sbp_get_pr_transport_id_len(
1902	struct se_portal_group *se_tpg,
1903	struct se_node_acl *se_nacl,
1904	struct t10_pr_registration *pr_reg,
1905	int *format_code)
1906{
1907	*format_code = 0;
1908	/*
1909	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1910	 * over IEEE 1394
1911	 *
1912	 * The SBP Transport ID is a hardcoded 24-byte length
1913	 */
1914	return 24;
1915}
1916
1917/*
1918 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1919 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1920 */
1921static char *sbp_parse_pr_out_transport_id(
1922	struct se_portal_group *se_tpg,
1923	const char *buf,
1924	u32 *out_tid_len,
1925	char **port_nexus_ptr)
1926{
1927	/*
1928	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1929	 * for initiator ports using SCSI over SBP Serial SCSI Protocol
1930	 *
1931	 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1932	 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1933	 * so we return the **port_nexus_ptr set to NULL.
1934	 */
1935	*port_nexus_ptr = NULL;
1936	*out_tid_len = 24;
1937
1938	return (char *)&buf[8];
1939}
1940
1941static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1942{
1943	int i, count = 0;
1944
1945	spin_lock(&tpg->tpg_lun_lock);
1946	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1947		struct se_lun *se_lun = tpg->tpg_lun_list[i];
1948
1949		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1950			continue;
1951
1952		count++;
1953	}
1954	spin_unlock(&tpg->tpg_lun_lock);
1955
1956	return count;
1957}
1958
1959static int sbp_update_unit_directory(struct sbp_tport *tport)
1960{
1961	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
1962	u32 *data;
1963
1964	if (tport->unit_directory.data) {
1965		fw_core_remove_descriptor(&tport->unit_directory);
1966		kfree(tport->unit_directory.data);
1967		tport->unit_directory.data = NULL;
1968	}
1969
1970	if (!tport->enable || !tport->tpg)
1971		return 0;
1972
1973	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1974
1975	/*
1976	 * Number of entries in the final unit directory:
1977	 *  - all of those in the template
1978	 *  - management_agent
1979	 *  - unit_characteristics
1980	 *  - reconnect_timeout
1981	 *  - unit unique ID
1982	 *  - one for each LUN
1983	 *
1984	 *  MUST NOT include leaf or sub-directory entries
1985	 */
1986	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1987
1988	if (tport->directory_id != -1)
1989		num_entries++;
1990
1991	/* allocate num_entries + 4 for the header and unique ID leaf */
1992	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1993	if (!data)
1994		return -ENOMEM;
1995
1996	/* directory_length */
1997	data[idx++] = num_entries << 16;
1998
1999	/* directory_id */
2000	if (tport->directory_id != -1)
2001		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
2002
2003	/* unit directory template */
2004	memcpy(&data[idx], sbp_unit_directory_template,
2005			sizeof(sbp_unit_directory_template));
2006	idx += ARRAY_SIZE(sbp_unit_directory_template);
2007
2008	/* management_agent */
2009	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
2010	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
2011
2012	/* unit_characteristics */
2013	data[idx++] = 0x3a000000 |
2014		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
2015		SBP_ORB_FETCH_SIZE;
2016
2017	/* reconnect_timeout */
2018	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
2019
2020	/* unit unique ID (leaf is just after LUNs) */
2021	data[idx++] = 0x8d000000 | (num_luns + 1);
2022
2023	spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2024	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
2025		struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
2026		struct se_device *dev;
2027		int type;
2028
2029		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
2030			continue;
2031
2032		spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2033
2034		dev = se_lun->lun_se_dev;
2035		type = dev->transport->get_device_type(dev);
2036
2037		/* logical_unit_number */
2038		data[idx++] = 0x14000000 |
2039			((type << 16) & 0x1f0000) |
2040			(se_lun->unpacked_lun & 0xffff);
2041
2042		spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2043	}
2044	spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2045
2046	/* unit unique ID leaf */
2047	data[idx++] = 2 << 16;
2048	data[idx++] = tport->guid >> 32;
2049	data[idx++] = tport->guid;
2050
2051	tport->unit_directory.length = idx;
2052	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
2053	tport->unit_directory.data = data;
2054
2055	ret = fw_core_add_descriptor(&tport->unit_directory);
2056	if (ret < 0) {
2057		kfree(tport->unit_directory.data);
2058		tport->unit_directory.data = NULL;
2059	}
2060
2061	return ret;
2062}
2063
2064static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
2065{
2066	const char *cp;
2067	char c, nibble;
2068	int pos = 0, err;
2069
2070	*wwn = 0;
2071	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
2072		c = *cp;
2073		if (c == '\n' && cp[1] == '\0')
2074			continue;
2075		if (c == '\0') {
2076			err = 2;
2077			if (pos != 16)
2078				goto fail;
2079			return cp - name;
2080		}
2081		err = 3;
2082		if (isdigit(c))
2083			nibble = c - '0';
2084		else if (isxdigit(c))
2085			nibble = tolower(c) - 'a' + 10;
2086		else
2087			goto fail;
2088		*wwn = (*wwn << 4) | nibble;
2089		pos++;
2090	}
2091	err = 4;
2092fail:
2093	printk(KERN_INFO "err %u len %zu pos %u\n",
2094			err, cp - name, pos);
2095	return -1;
2096}
2097
2098static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
2099{
2100	return snprintf(buf, len, "%016llx", wwn);
2101}
2102
2103static struct se_node_acl *sbp_make_nodeacl(
2104		struct se_portal_group *se_tpg,
2105		struct config_group *group,
2106		const char *name)
2107{
2108	struct se_node_acl *se_nacl, *se_nacl_new;
2109	struct sbp_nacl *nacl;
2110	u64 guid = 0;
2111	u32 nexus_depth = 1;
2112
2113	if (sbp_parse_wwn(name, &guid) < 0)
2114		return ERR_PTR(-EINVAL);
2115
2116	se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
2117	if (!se_nacl_new)
2118		return ERR_PTR(-ENOMEM);
2119
2120	/*
2121	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2122	 * when converting a NodeACL from demo mode -> explict
2123	 */
2124	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
2125			name, nexus_depth);
2126	if (IS_ERR(se_nacl)) {
2127		sbp_release_fabric_acl(se_tpg, se_nacl_new);
2128		return se_nacl;
2129	}
2130
2131	nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
2132	nacl->guid = guid;
2133	sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
2134
2135	return se_nacl;
2136}
2137
2138static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
2139{
2140	struct sbp_nacl *nacl =
2141		container_of(se_acl, struct sbp_nacl, se_node_acl);
2142
2143	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
2144	kfree(nacl);
2145}
2146
2147static int sbp_post_link_lun(
2148		struct se_portal_group *se_tpg,
2149		struct se_lun *se_lun)
2150{
2151	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2152
2153	return sbp_update_unit_directory(tpg->tport);
2154}
2155
2156static void sbp_pre_unlink_lun(
2157		struct se_portal_group *se_tpg,
2158		struct se_lun *se_lun)
2159{
2160	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2161	struct sbp_tport *tport = tpg->tport;
2162	int ret;
2163
2164	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2165		tport->enable = 0;
2166
2167	ret = sbp_update_unit_directory(tport);
2168	if (ret < 0)
2169		pr_err("unlink LUN: failed to update unit directory\n");
2170}
2171
2172static struct se_portal_group *sbp_make_tpg(
2173		struct se_wwn *wwn,
2174		struct config_group *group,
2175		const char *name)
2176{
2177	struct sbp_tport *tport =
2178		container_of(wwn, struct sbp_tport, tport_wwn);
2179
2180	struct sbp_tpg *tpg;
2181	unsigned long tpgt;
2182	int ret;
2183
2184	if (strstr(name, "tpgt_") != name)
2185		return ERR_PTR(-EINVAL);
2186	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2187		return ERR_PTR(-EINVAL);
2188
2189	if (tport->tpg) {
2190		pr_err("Only one TPG per Unit is possible.\n");
2191		return ERR_PTR(-EBUSY);
2192	}
2193
2194	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2195	if (!tpg) {
2196		pr_err("Unable to allocate struct sbp_tpg\n");
2197		return ERR_PTR(-ENOMEM);
2198	}
2199
2200	tpg->tport = tport;
2201	tpg->tport_tpgt = tpgt;
2202	tport->tpg = tpg;
2203
2204	/* default attribute values */
2205	tport->enable = 0;
2206	tport->directory_id = -1;
2207	tport->mgt_orb_timeout = 15;
2208	tport->max_reconnect_timeout = 5;
2209	tport->max_logins_per_lun = 1;
2210
2211	tport->mgt_agt = sbp_management_agent_register(tport);
2212	if (IS_ERR(tport->mgt_agt)) {
2213		ret = PTR_ERR(tport->mgt_agt);
2214		goto out_free_tpg;
2215	}
2216
2217	ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg,
2218			TRANSPORT_TPG_TYPE_NORMAL);
2219	if (ret < 0)
2220		goto out_unreg_mgt_agt;
2221
2222	return &tpg->se_tpg;
2223
2224out_unreg_mgt_agt:
2225	sbp_management_agent_unregister(tport->mgt_agt);
2226out_free_tpg:
2227	tport->tpg = NULL;
2228	kfree(tpg);
2229	return ERR_PTR(ret);
2230}
2231
2232static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2233{
2234	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235	struct sbp_tport *tport = tpg->tport;
2236
2237	core_tpg_deregister(se_tpg);
2238	sbp_management_agent_unregister(tport->mgt_agt);
2239	tport->tpg = NULL;
2240	kfree(tpg);
2241}
2242
2243static struct se_wwn *sbp_make_tport(
2244		struct target_fabric_configfs *tf,
2245		struct config_group *group,
2246		const char *name)
2247{
2248	struct sbp_tport *tport;
2249	u64 guid = 0;
2250
2251	if (sbp_parse_wwn(name, &guid) < 0)
2252		return ERR_PTR(-EINVAL);
2253
2254	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2255	if (!tport) {
2256		pr_err("Unable to allocate struct sbp_tport\n");
2257		return ERR_PTR(-ENOMEM);
2258	}
2259
2260	tport->guid = guid;
2261	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2262
2263	return &tport->tport_wwn;
2264}
2265
2266static void sbp_drop_tport(struct se_wwn *wwn)
2267{
2268	struct sbp_tport *tport =
2269		container_of(wwn, struct sbp_tport, tport_wwn);
2270
2271	kfree(tport);
2272}
2273
2274static ssize_t sbp_wwn_show_attr_version(
2275		struct target_fabric_configfs *tf,
2276		char *page)
2277{
2278	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2279}
2280
2281TF_WWN_ATTR_RO(sbp, version);
2282
2283static struct configfs_attribute *sbp_wwn_attrs[] = {
2284	&sbp_wwn_version.attr,
2285	NULL,
2286};
2287
2288static ssize_t sbp_tpg_show_directory_id(
2289		struct se_portal_group *se_tpg,
2290		char *page)
2291{
2292	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2293	struct sbp_tport *tport = tpg->tport;
2294
2295	if (tport->directory_id == -1)
2296		return sprintf(page, "implicit\n");
2297	else
2298		return sprintf(page, "%06x\n", tport->directory_id);
2299}
2300
2301static ssize_t sbp_tpg_store_directory_id(
2302		struct se_portal_group *se_tpg,
2303		const char *page,
2304		size_t count)
2305{
2306	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2307	struct sbp_tport *tport = tpg->tport;
2308	unsigned long val;
2309
2310	if (tport->enable) {
2311		pr_err("Cannot change the directory_id on an active target.\n");
2312		return -EBUSY;
2313	}
2314
2315	if (strstr(page, "implicit") == page) {
2316		tport->directory_id = -1;
2317	} else {
2318		if (kstrtoul(page, 16, &val) < 0)
2319			return -EINVAL;
2320		if (val > 0xffffff)
2321			return -EINVAL;
2322
2323		tport->directory_id = val;
2324	}
2325
2326	return count;
2327}
2328
2329static ssize_t sbp_tpg_show_enable(
2330		struct se_portal_group *se_tpg,
2331		char *page)
2332{
2333	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2334	struct sbp_tport *tport = tpg->tport;
2335	return sprintf(page, "%d\n", tport->enable);
2336}
2337
2338static ssize_t sbp_tpg_store_enable(
2339		struct se_portal_group *se_tpg,
2340		const char *page,
2341		size_t count)
2342{
2343	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2344	struct sbp_tport *tport = tpg->tport;
2345	unsigned long val;
2346	int ret;
2347
2348	if (kstrtoul(page, 0, &val) < 0)
2349		return -EINVAL;
2350	if ((val != 0) && (val != 1))
2351		return -EINVAL;
2352
2353	if (tport->enable == val)
2354		return count;
2355
2356	if (val) {
2357		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2358			pr_err("Cannot enable a target with no LUNs!\n");
2359			return -EINVAL;
2360		}
2361	} else {
2362		/* XXX: force-shutdown sessions instead? */
2363		spin_lock_bh(&se_tpg->session_lock);
2364		if (!list_empty(&se_tpg->tpg_sess_list)) {
2365			spin_unlock_bh(&se_tpg->session_lock);
2366			return -EBUSY;
2367		}
2368		spin_unlock_bh(&se_tpg->session_lock);
2369	}
2370
2371	tport->enable = val;
2372
2373	ret = sbp_update_unit_directory(tport);
2374	if (ret < 0) {
2375		pr_err("Could not update Config ROM\n");
2376		return ret;
2377	}
2378
2379	return count;
2380}
2381
2382TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
2383TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
2384
2385static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2386	&sbp_tpg_directory_id.attr,
2387	&sbp_tpg_enable.attr,
2388	NULL,
2389};
2390
2391static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
2392		struct se_portal_group *se_tpg,
2393		char *page)
2394{
2395	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2396	struct sbp_tport *tport = tpg->tport;
2397	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2398}
2399
2400static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
2401		struct se_portal_group *se_tpg,
2402		const char *page,
2403		size_t count)
2404{
2405	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2406	struct sbp_tport *tport = tpg->tport;
2407	unsigned long val;
2408	int ret;
2409
2410	if (kstrtoul(page, 0, &val) < 0)
2411		return -EINVAL;
2412	if ((val < 1) || (val > 127))
2413		return -EINVAL;
2414
2415	if (tport->mgt_orb_timeout == val)
2416		return count;
2417
2418	tport->mgt_orb_timeout = val;
2419
2420	ret = sbp_update_unit_directory(tport);
2421	if (ret < 0)
2422		return ret;
2423
2424	return count;
2425}
2426
2427static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
2428		struct se_portal_group *se_tpg,
2429		char *page)
2430{
2431	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2432	struct sbp_tport *tport = tpg->tport;
2433	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2434}
2435
2436static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
2437		struct se_portal_group *se_tpg,
2438		const char *page,
2439		size_t count)
2440{
2441	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2442	struct sbp_tport *tport = tpg->tport;
2443	unsigned long val;
2444	int ret;
2445
2446	if (kstrtoul(page, 0, &val) < 0)
2447		return -EINVAL;
2448	if ((val < 1) || (val > 32767))
2449		return -EINVAL;
2450
2451	if (tport->max_reconnect_timeout == val)
2452		return count;
2453
2454	tport->max_reconnect_timeout = val;
2455
2456	ret = sbp_update_unit_directory(tport);
2457	if (ret < 0)
2458		return ret;
2459
2460	return count;
2461}
2462
2463static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
2464		struct se_portal_group *se_tpg,
2465		char *page)
2466{
2467	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2468	struct sbp_tport *tport = tpg->tport;
2469	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2470}
2471
2472static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
2473		struct se_portal_group *se_tpg,
2474		const char *page,
2475		size_t count)
2476{
2477	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2478	struct sbp_tport *tport = tpg->tport;
2479	unsigned long val;
2480
2481	if (kstrtoul(page, 0, &val) < 0)
2482		return -EINVAL;
2483	if ((val < 1) || (val > 127))
2484		return -EINVAL;
2485
2486	/* XXX: also check against current count? */
2487
2488	tport->max_logins_per_lun = val;
2489
2490	return count;
2491}
2492
2493TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
2494TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
2495TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
2496
2497static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2498	&sbp_tpg_attrib_mgt_orb_timeout.attr,
2499	&sbp_tpg_attrib_max_reconnect_timeout.attr,
2500	&sbp_tpg_attrib_max_logins_per_lun.attr,
2501	NULL,
2502};
2503
2504static const struct target_core_fabric_ops sbp_ops = {
2505	.module				= THIS_MODULE,
2506	.name				= "sbp",
2507	.get_fabric_name		= sbp_get_fabric_name,
2508	.get_fabric_proto_ident		= sbp_get_fabric_proto_ident,
2509	.tpg_get_wwn			= sbp_get_fabric_wwn,
2510	.tpg_get_tag			= sbp_get_tag,
2511	.tpg_get_default_depth		= sbp_get_default_depth,
2512	.tpg_get_pr_transport_id	= sbp_get_pr_transport_id,
2513	.tpg_get_pr_transport_id_len	= sbp_get_pr_transport_id_len,
2514	.tpg_parse_pr_out_transport_id	= sbp_parse_pr_out_transport_id,
2515	.tpg_check_demo_mode		= sbp_check_true,
2516	.tpg_check_demo_mode_cache	= sbp_check_true,
2517	.tpg_check_demo_mode_write_protect = sbp_check_false,
2518	.tpg_check_prod_mode_write_protect = sbp_check_false,
2519	.tpg_alloc_fabric_acl		= sbp_alloc_fabric_acl,
2520	.tpg_release_fabric_acl		= sbp_release_fabric_acl,
2521	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2522	.release_cmd			= sbp_release_cmd,
2523	.shutdown_session		= sbp_shutdown_session,
2524	.close_session			= sbp_close_session,
2525	.sess_get_index			= sbp_sess_get_index,
2526	.write_pending			= sbp_write_pending,
2527	.write_pending_status		= sbp_write_pending_status,
2528	.set_default_node_attributes	= sbp_set_default_node_attrs,
2529	.get_task_tag			= sbp_get_task_tag,
2530	.get_cmd_state			= sbp_get_cmd_state,
2531	.queue_data_in			= sbp_queue_data_in,
2532	.queue_status			= sbp_queue_status,
2533	.queue_tm_rsp			= sbp_queue_tm_rsp,
2534	.aborted_task			= sbp_aborted_task,
2535	.check_stop_free		= sbp_check_stop_free,
2536
2537	.fabric_make_wwn		= sbp_make_tport,
2538	.fabric_drop_wwn		= sbp_drop_tport,
2539	.fabric_make_tpg		= sbp_make_tpg,
2540	.fabric_drop_tpg		= sbp_drop_tpg,
2541	.fabric_post_link		= sbp_post_link_lun,
2542	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2543	.fabric_make_np			= NULL,
2544	.fabric_drop_np			= NULL,
2545	.fabric_make_nodeacl		= sbp_make_nodeacl,
2546	.fabric_drop_nodeacl		= sbp_drop_nodeacl,
2547
2548	.tfc_wwn_attrs			= sbp_wwn_attrs,
2549	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2550	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2551};
2552
2553static int __init sbp_init(void)
2554{
2555	return target_register_template(&sbp_ops);
2556};
2557
2558static void __exit sbp_exit(void)
2559{
2560	target_unregister_template(&sbp_ops);
2561};
2562
2563MODULE_DESCRIPTION("FireWire SBP fabric driver");
2564MODULE_LICENSE("GPL");
2565module_init(sbp_init);
2566module_exit(sbp_exit);
2567