This source file includes following definitions.
- init_se_kmem_caches
- release_se_kmem_caches
- scsi_get_new_index
- transport_subsystem_check_init
- target_release_sess_cmd_refcnt
- transport_init_session
- transport_alloc_session
- transport_alloc_session_tags
- transport_init_session_tags
- __transport_register_session
- transport_register_session
- target_setup_session
- target_show_dynamic_sessions
- target_complete_nacl
- target_put_nacl
- transport_deregister_session_configfs
- transport_free_session
- target_release_res
- transport_deregister_session
- target_remove_session
- target_remove_from_state_list
- transport_cmd_check_stop_to_fabric
- transport_lun_remove_cmd
- target_complete_failure_work
- transport_get_sense_buffer
- transport_copy_sense_to_cmd
- target_handle_abort
- target_abort_work
- target_cmd_interrupted
- target_complete_cmd
- target_complete_cmd_with_length
- target_add_to_state_list
- target_qf_do_work
- transport_dump_cmd_direction
- transport_dump_dev_state
- transport_dump_vpd_proto_id
- transport_set_vpd_proto_id
- transport_dump_vpd_assoc
- transport_set_vpd_assoc
- transport_dump_vpd_ident_type
- transport_set_vpd_ident_type
- transport_dump_vpd_ident
- transport_set_vpd_ident
- target_check_max_data_sg_nents
- target_cmd_size_check
- transport_init_se_cmd
- transport_check_alloc_task_attr
- target_setup_cmd_from_cdb
- transport_handle_cdb_direct
- transport_generic_map_mem_to_cmd
- target_submit_cmd_map_sgls
- target_submit_cmd
- target_complete_tmr_failure
- target_lookup_lun_from_tag
- target_submit_tmr
- transport_generic_request_failure
- __target_execute_cmd
- target_write_prot_action
- target_handle_task_attr
- target_execute_cmd
- target_restart_delayed_cmds
- transport_complete_task_attr
- transport_complete_qf
- transport_handle_queue_full
- target_read_prot_action
- target_complete_ok_work
- target_free_sgl
- transport_reset_sgl_orig
- transport_free_pages
- transport_kmap_data_sg
- transport_kunmap_data_sg
- target_alloc_sgl
- transport_generic_new_cmd
- transport_write_pending_qf
- target_wait_free_cmd
- target_put_cmd_and_wait
- transport_generic_free_cmd
- target_get_sess_cmd
- target_free_cmd_mem
- target_release_cmd_kref
- target_put_sess_cmd
- data_dir_name
- cmd_state_name
- target_append_str
- target_ts_to_str
- target_tmf_name
- target_show_cmd
- target_sess_cmd_list_set_waiting
- target_wait_for_sess_cmds
- transport_clear_lun_ref
- __transport_wait_for_tasks
- transport_wait_for_tasks
- translate_sense_reason
- transport_send_check_condition_and_sense
- target_send_busy
- target_tmr_work
- transport_generic_handle_tmr
- target_check_wce
- target_check_fua
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/net.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/kthread.h>
20 #include <linux/in.h>
21 #include <linux/cdrom.h>
22 #include <linux/module.h>
23 #include <linux/ratelimit.h>
24 #include <linux/vmalloc.h>
25 #include <asm/unaligned.h>
26 #include <net/sock.h>
27 #include <net/tcp.h>
28 #include <scsi/scsi_proto.h>
29 #include <scsi/scsi_common.h>
30
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
34
35 #include "target_core_internal.h"
36 #include "target_core_alua.h"
37 #include "target_core_pr.h"
38 #include "target_core_ua.h"
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/target.h>
42
43 static struct workqueue_struct *target_completion_wq;
44 static struct kmem_cache *se_sess_cache;
45 struct kmem_cache *se_ua_cache;
46 struct kmem_cache *t10_pr_reg_cache;
47 struct kmem_cache *t10_alua_lu_gp_cache;
48 struct kmem_cache *t10_alua_lu_gp_mem_cache;
49 struct kmem_cache *t10_alua_tg_pt_gp_cache;
50 struct kmem_cache *t10_alua_lba_map_cache;
51 struct kmem_cache *t10_alua_lba_map_mem_cache;
52
53 static void transport_complete_task_attr(struct se_cmd *cmd);
54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
55 static void transport_handle_queue_full(struct se_cmd *cmd,
56 struct se_device *dev, int err, bool write_pending);
57 static void target_complete_ok_work(struct work_struct *work);
58
59 int init_se_kmem_caches(void)
60 {
61 se_sess_cache = kmem_cache_create("se_sess_cache",
62 sizeof(struct se_session), __alignof__(struct se_session),
63 0, NULL);
64 if (!se_sess_cache) {
65 pr_err("kmem_cache_create() for struct se_session"
66 " failed\n");
67 goto out;
68 }
69 se_ua_cache = kmem_cache_create("se_ua_cache",
70 sizeof(struct se_ua), __alignof__(struct se_ua),
71 0, NULL);
72 if (!se_ua_cache) {
73 pr_err("kmem_cache_create() for struct se_ua failed\n");
74 goto out_free_sess_cache;
75 }
76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
77 sizeof(struct t10_pr_registration),
78 __alignof__(struct t10_pr_registration), 0, NULL);
79 if (!t10_pr_reg_cache) {
80 pr_err("kmem_cache_create() for struct t10_pr_registration"
81 " failed\n");
82 goto out_free_ua_cache;
83 }
84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
86 0, NULL);
87 if (!t10_alua_lu_gp_cache) {
88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
89 " failed\n");
90 goto out_free_pr_reg_cache;
91 }
92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
93 sizeof(struct t10_alua_lu_gp_member),
94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
95 if (!t10_alua_lu_gp_mem_cache) {
96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
97 "cache failed\n");
98 goto out_free_lu_gp_cache;
99 }
100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
101 sizeof(struct t10_alua_tg_pt_gp),
102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
103 if (!t10_alua_tg_pt_gp_cache) {
104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
105 "cache failed\n");
106 goto out_free_lu_gp_mem_cache;
107 }
108 t10_alua_lba_map_cache = kmem_cache_create(
109 "t10_alua_lba_map_cache",
110 sizeof(struct t10_alua_lba_map),
111 __alignof__(struct t10_alua_lba_map), 0, NULL);
112 if (!t10_alua_lba_map_cache) {
113 pr_err("kmem_cache_create() for t10_alua_lba_map_"
114 "cache failed\n");
115 goto out_free_tg_pt_gp_cache;
116 }
117 t10_alua_lba_map_mem_cache = kmem_cache_create(
118 "t10_alua_lba_map_mem_cache",
119 sizeof(struct t10_alua_lba_map_member),
120 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
121 if (!t10_alua_lba_map_mem_cache) {
122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
123 "cache failed\n");
124 goto out_free_lba_map_cache;
125 }
126
127 target_completion_wq = alloc_workqueue("target_completion",
128 WQ_MEM_RECLAIM, 0);
129 if (!target_completion_wq)
130 goto out_free_lba_map_mem_cache;
131
132 return 0;
133
134 out_free_lba_map_mem_cache:
135 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
136 out_free_lba_map_cache:
137 kmem_cache_destroy(t10_alua_lba_map_cache);
138 out_free_tg_pt_gp_cache:
139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
140 out_free_lu_gp_mem_cache:
141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
142 out_free_lu_gp_cache:
143 kmem_cache_destroy(t10_alua_lu_gp_cache);
144 out_free_pr_reg_cache:
145 kmem_cache_destroy(t10_pr_reg_cache);
146 out_free_ua_cache:
147 kmem_cache_destroy(se_ua_cache);
148 out_free_sess_cache:
149 kmem_cache_destroy(se_sess_cache);
150 out:
151 return -ENOMEM;
152 }
153
154 void release_se_kmem_caches(void)
155 {
156 destroy_workqueue(target_completion_wq);
157 kmem_cache_destroy(se_sess_cache);
158 kmem_cache_destroy(se_ua_cache);
159 kmem_cache_destroy(t10_pr_reg_cache);
160 kmem_cache_destroy(t10_alua_lu_gp_cache);
161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
163 kmem_cache_destroy(t10_alua_lba_map_cache);
164 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
165 }
166
167
168 static DEFINE_SPINLOCK(scsi_mib_index_lock);
169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
170
171
172
173
174 u32 scsi_get_new_index(scsi_index_t type)
175 {
176 u32 new_index;
177
178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
179
180 spin_lock(&scsi_mib_index_lock);
181 new_index = ++scsi_mib_index[type];
182 spin_unlock(&scsi_mib_index_lock);
183
184 return new_index;
185 }
186
187 void transport_subsystem_check_init(void)
188 {
189 int ret;
190 static int sub_api_initialized;
191
192 if (sub_api_initialized)
193 return;
194
195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
196 if (ret != 0)
197 pr_err("Unable to load target_core_iblock\n");
198
199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
200 if (ret != 0)
201 pr_err("Unable to load target_core_file\n");
202
203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
204 if (ret != 0)
205 pr_err("Unable to load target_core_pscsi\n");
206
207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
208 if (ret != 0)
209 pr_err("Unable to load target_core_user\n");
210
211 sub_api_initialized = 1;
212 }
213
214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
215 {
216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
217
218 wake_up(&sess->cmd_list_wq);
219 }
220
221
222
223
224
225
226
227 int transport_init_session(struct se_session *se_sess)
228 {
229 INIT_LIST_HEAD(&se_sess->sess_list);
230 INIT_LIST_HEAD(&se_sess->sess_acl_list);
231 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
232 spin_lock_init(&se_sess->sess_cmd_lock);
233 init_waitqueue_head(&se_sess->cmd_list_wq);
234 return percpu_ref_init(&se_sess->cmd_count,
235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
236 }
237 EXPORT_SYMBOL(transport_init_session);
238
239
240
241
242
243 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
244 {
245 struct se_session *se_sess;
246 int ret;
247
248 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
249 if (!se_sess) {
250 pr_err("Unable to allocate struct se_session from"
251 " se_sess_cache\n");
252 return ERR_PTR(-ENOMEM);
253 }
254 ret = transport_init_session(se_sess);
255 if (ret < 0) {
256 kmem_cache_free(se_sess_cache, se_sess);
257 return ERR_PTR(ret);
258 }
259 se_sess->sup_prot_ops = sup_prot_ops;
260
261 return se_sess;
262 }
263 EXPORT_SYMBOL(transport_alloc_session);
264
265
266
267
268
269
270
271
272 int transport_alloc_session_tags(struct se_session *se_sess,
273 unsigned int tag_num, unsigned int tag_size)
274 {
275 int rc;
276
277 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
278 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
279 if (!se_sess->sess_cmd_map) {
280 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
281 return -ENOMEM;
282 }
283
284 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
285 false, GFP_KERNEL, NUMA_NO_NODE);
286 if (rc < 0) {
287 pr_err("Unable to init se_sess->sess_tag_pool,"
288 " tag_num: %u\n", tag_num);
289 kvfree(se_sess->sess_cmd_map);
290 se_sess->sess_cmd_map = NULL;
291 return -ENOMEM;
292 }
293
294 return 0;
295 }
296 EXPORT_SYMBOL(transport_alloc_session_tags);
297
298
299
300
301
302
303
304
305 static struct se_session *
306 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
307 enum target_prot_op sup_prot_ops)
308 {
309 struct se_session *se_sess;
310 int rc;
311
312 if (tag_num != 0 && !tag_size) {
313 pr_err("init_session_tags called with percpu-ida tag_num:"
314 " %u, but zero tag_size\n", tag_num);
315 return ERR_PTR(-EINVAL);
316 }
317 if (!tag_num && tag_size) {
318 pr_err("init_session_tags called with percpu-ida tag_size:"
319 " %u, but zero tag_num\n", tag_size);
320 return ERR_PTR(-EINVAL);
321 }
322
323 se_sess = transport_alloc_session(sup_prot_ops);
324 if (IS_ERR(se_sess))
325 return se_sess;
326
327 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
328 if (rc < 0) {
329 transport_free_session(se_sess);
330 return ERR_PTR(-ENOMEM);
331 }
332
333 return se_sess;
334 }
335
336
337
338
339 void __transport_register_session(
340 struct se_portal_group *se_tpg,
341 struct se_node_acl *se_nacl,
342 struct se_session *se_sess,
343 void *fabric_sess_ptr)
344 {
345 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
346 unsigned char buf[PR_REG_ISID_LEN];
347 unsigned long flags;
348
349 se_sess->se_tpg = se_tpg;
350 se_sess->fabric_sess_ptr = fabric_sess_ptr;
351
352
353
354
355
356
357 if (se_nacl) {
358
359
360
361
362
363
364
365
366
367
368 if (se_nacl->saved_prot_type)
369 se_sess->sess_prot_type = se_nacl->saved_prot_type;
370 else if (tfo->tpg_check_prot_fabric_only)
371 se_sess->sess_prot_type = se_nacl->saved_prot_type =
372 tfo->tpg_check_prot_fabric_only(se_tpg);
373
374
375
376
377 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
378 memset(&buf[0], 0, PR_REG_ISID_LEN);
379 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
380 &buf[0], PR_REG_ISID_LEN);
381 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
382 }
383
384 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
385
386
387
388
389 se_nacl->nacl_sess = se_sess;
390
391 list_add_tail(&se_sess->sess_acl_list,
392 &se_nacl->acl_sess_list);
393 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
394 }
395 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
396
397 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
398 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
399 }
400 EXPORT_SYMBOL(__transport_register_session);
401
402 void transport_register_session(
403 struct se_portal_group *se_tpg,
404 struct se_node_acl *se_nacl,
405 struct se_session *se_sess,
406 void *fabric_sess_ptr)
407 {
408 unsigned long flags;
409
410 spin_lock_irqsave(&se_tpg->session_lock, flags);
411 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
412 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
413 }
414 EXPORT_SYMBOL(transport_register_session);
415
416 struct se_session *
417 target_setup_session(struct se_portal_group *tpg,
418 unsigned int tag_num, unsigned int tag_size,
419 enum target_prot_op prot_op,
420 const char *initiatorname, void *private,
421 int (*callback)(struct se_portal_group *,
422 struct se_session *, void *))
423 {
424 struct se_session *sess;
425
426
427
428
429
430 if (tag_num != 0)
431 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
432 else
433 sess = transport_alloc_session(prot_op);
434
435 if (IS_ERR(sess))
436 return sess;
437
438 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
439 (unsigned char *)initiatorname);
440 if (!sess->se_node_acl) {
441 transport_free_session(sess);
442 return ERR_PTR(-EACCES);
443 }
444
445
446
447
448 if (callback != NULL) {
449 int rc = callback(tpg, sess, private);
450 if (rc) {
451 transport_free_session(sess);
452 return ERR_PTR(rc);
453 }
454 }
455
456 transport_register_session(tpg, sess->se_node_acl, sess, private);
457 return sess;
458 }
459 EXPORT_SYMBOL(target_setup_session);
460
461 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
462 {
463 struct se_session *se_sess;
464 ssize_t len = 0;
465
466 spin_lock_bh(&se_tpg->session_lock);
467 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
468 if (!se_sess->se_node_acl)
469 continue;
470 if (!se_sess->se_node_acl->dynamic_node_acl)
471 continue;
472 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
473 break;
474
475 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
476 se_sess->se_node_acl->initiatorname);
477 len += 1;
478 }
479 spin_unlock_bh(&se_tpg->session_lock);
480
481 return len;
482 }
483 EXPORT_SYMBOL(target_show_dynamic_sessions);
484
485 static void target_complete_nacl(struct kref *kref)
486 {
487 struct se_node_acl *nacl = container_of(kref,
488 struct se_node_acl, acl_kref);
489 struct se_portal_group *se_tpg = nacl->se_tpg;
490
491 if (!nacl->dynamic_stop) {
492 complete(&nacl->acl_free_comp);
493 return;
494 }
495
496 mutex_lock(&se_tpg->acl_node_mutex);
497 list_del_init(&nacl->acl_list);
498 mutex_unlock(&se_tpg->acl_node_mutex);
499
500 core_tpg_wait_for_nacl_pr_ref(nacl);
501 core_free_device_list_for_node(nacl, se_tpg);
502 kfree(nacl);
503 }
504
505 void target_put_nacl(struct se_node_acl *nacl)
506 {
507 kref_put(&nacl->acl_kref, target_complete_nacl);
508 }
509 EXPORT_SYMBOL(target_put_nacl);
510
511 void transport_deregister_session_configfs(struct se_session *se_sess)
512 {
513 struct se_node_acl *se_nacl;
514 unsigned long flags;
515
516
517
518 se_nacl = se_sess->se_node_acl;
519 if (se_nacl) {
520 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
521 if (!list_empty(&se_sess->sess_acl_list))
522 list_del_init(&se_sess->sess_acl_list);
523
524
525
526
527
528 if (list_empty(&se_nacl->acl_sess_list))
529 se_nacl->nacl_sess = NULL;
530 else {
531 se_nacl->nacl_sess = container_of(
532 se_nacl->acl_sess_list.prev,
533 struct se_session, sess_acl_list);
534 }
535 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
536 }
537 }
538 EXPORT_SYMBOL(transport_deregister_session_configfs);
539
540 void transport_free_session(struct se_session *se_sess)
541 {
542 struct se_node_acl *se_nacl = se_sess->se_node_acl;
543
544
545
546
547
548 if (se_nacl) {
549 struct se_portal_group *se_tpg = se_nacl->se_tpg;
550 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
551 unsigned long flags;
552
553 se_sess->se_node_acl = NULL;
554
555
556
557
558
559
560 mutex_lock(&se_tpg->acl_node_mutex);
561 if (se_nacl->dynamic_node_acl &&
562 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
563 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
564 if (list_empty(&se_nacl->acl_sess_list))
565 se_nacl->dynamic_stop = true;
566 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
567
568 if (se_nacl->dynamic_stop)
569 list_del_init(&se_nacl->acl_list);
570 }
571 mutex_unlock(&se_tpg->acl_node_mutex);
572
573 if (se_nacl->dynamic_stop)
574 target_put_nacl(se_nacl);
575
576 target_put_nacl(se_nacl);
577 }
578 if (se_sess->sess_cmd_map) {
579 sbitmap_queue_free(&se_sess->sess_tag_pool);
580 kvfree(se_sess->sess_cmd_map);
581 }
582 percpu_ref_exit(&se_sess->cmd_count);
583 kmem_cache_free(se_sess_cache, se_sess);
584 }
585 EXPORT_SYMBOL(transport_free_session);
586
587 static int target_release_res(struct se_device *dev, void *data)
588 {
589 struct se_session *sess = data;
590
591 if (dev->reservation_holder == sess)
592 target_release_reservation(dev);
593 return 0;
594 }
595
596 void transport_deregister_session(struct se_session *se_sess)
597 {
598 struct se_portal_group *se_tpg = se_sess->se_tpg;
599 unsigned long flags;
600
601 if (!se_tpg) {
602 transport_free_session(se_sess);
603 return;
604 }
605
606 spin_lock_irqsave(&se_tpg->session_lock, flags);
607 list_del(&se_sess->sess_list);
608 se_sess->se_tpg = NULL;
609 se_sess->fabric_sess_ptr = NULL;
610 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
611
612
613
614
615
616 target_for_each_device(target_release_res, se_sess);
617
618 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
619 se_tpg->se_tpg_tfo->fabric_name);
620
621
622
623
624
625
626
627
628
629 transport_free_session(se_sess);
630 }
631 EXPORT_SYMBOL(transport_deregister_session);
632
633 void target_remove_session(struct se_session *se_sess)
634 {
635 transport_deregister_session_configfs(se_sess);
636 transport_deregister_session(se_sess);
637 }
638 EXPORT_SYMBOL(target_remove_session);
639
640 static void target_remove_from_state_list(struct se_cmd *cmd)
641 {
642 struct se_device *dev = cmd->se_dev;
643 unsigned long flags;
644
645 if (!dev)
646 return;
647
648 spin_lock_irqsave(&dev->execute_task_lock, flags);
649 if (cmd->state_active) {
650 list_del(&cmd->state_list);
651 cmd->state_active = false;
652 }
653 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
654 }
655
656
657
658
659
660
661
662
663 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
664 {
665 unsigned long flags;
666
667 target_remove_from_state_list(cmd);
668
669
670
671
672 cmd->se_lun = NULL;
673
674 spin_lock_irqsave(&cmd->t_state_lock, flags);
675
676
677
678
679 if (cmd->transport_state & CMD_T_STOP) {
680 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
681 __func__, __LINE__, cmd->tag);
682
683 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
684
685 complete_all(&cmd->t_transport_stop_comp);
686 return 1;
687 }
688 cmd->transport_state &= ~CMD_T_ACTIVE;
689 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
690
691
692
693
694
695
696
697
698 return cmd->se_tfo->check_stop_free(cmd);
699 }
700
701 static void transport_lun_remove_cmd(struct se_cmd *cmd)
702 {
703 struct se_lun *lun = cmd->se_lun;
704
705 if (!lun)
706 return;
707
708 if (cmpxchg(&cmd->lun_ref_active, true, false))
709 percpu_ref_put(&lun->lun_ref);
710 }
711
712 static void target_complete_failure_work(struct work_struct *work)
713 {
714 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
715
716 transport_generic_request_failure(cmd,
717 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
718 }
719
720
721
722
723
724 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
725 {
726 struct se_device *dev = cmd->se_dev;
727
728 WARN_ON(!cmd->se_lun);
729
730 if (!dev)
731 return NULL;
732
733 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
734 return NULL;
735
736 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
737
738 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
739 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
740 return cmd->sense_buffer;
741 }
742
743 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
744 {
745 unsigned char *cmd_sense_buf;
746 unsigned long flags;
747
748 spin_lock_irqsave(&cmd->t_state_lock, flags);
749 cmd_sense_buf = transport_get_sense_buffer(cmd);
750 if (!cmd_sense_buf) {
751 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
752 return;
753 }
754
755 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
756 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
757 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
758 }
759 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
760
761 static void target_handle_abort(struct se_cmd *cmd)
762 {
763 bool tas = cmd->transport_state & CMD_T_TAS;
764 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
765 int ret;
766
767 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
768
769 if (tas) {
770 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
771 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
772 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
773 cmd->t_task_cdb[0], cmd->tag);
774 trace_target_cmd_complete(cmd);
775 ret = cmd->se_tfo->queue_status(cmd);
776 if (ret) {
777 transport_handle_queue_full(cmd, cmd->se_dev,
778 ret, false);
779 return;
780 }
781 } else {
782 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
783 cmd->se_tfo->queue_tm_rsp(cmd);
784 }
785 } else {
786
787
788
789
790 cmd->se_tfo->aborted_task(cmd);
791 if (ack_kref)
792 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
793
794
795
796
797
798 }
799
800 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
801
802 transport_lun_remove_cmd(cmd);
803
804 transport_cmd_check_stop_to_fabric(cmd);
805 }
806
807 static void target_abort_work(struct work_struct *work)
808 {
809 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
810
811 target_handle_abort(cmd);
812 }
813
814 static bool target_cmd_interrupted(struct se_cmd *cmd)
815 {
816 int post_ret;
817
818 if (cmd->transport_state & CMD_T_ABORTED) {
819 if (cmd->transport_complete_callback)
820 cmd->transport_complete_callback(cmd, false, &post_ret);
821 INIT_WORK(&cmd->work, target_abort_work);
822 queue_work(target_completion_wq, &cmd->work);
823 return true;
824 } else if (cmd->transport_state & CMD_T_STOP) {
825 if (cmd->transport_complete_callback)
826 cmd->transport_complete_callback(cmd, false, &post_ret);
827 complete_all(&cmd->t_transport_stop_comp);
828 return true;
829 }
830
831 return false;
832 }
833
834
835 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
836 {
837 int success;
838 unsigned long flags;
839
840 if (target_cmd_interrupted(cmd))
841 return;
842
843 cmd->scsi_status = scsi_status;
844
845 spin_lock_irqsave(&cmd->t_state_lock, flags);
846 switch (cmd->scsi_status) {
847 case SAM_STAT_CHECK_CONDITION:
848 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
849 success = 1;
850 else
851 success = 0;
852 break;
853 default:
854 success = 1;
855 break;
856 }
857
858 cmd->t_state = TRANSPORT_COMPLETE;
859 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
860 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
861
862 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
863 target_complete_failure_work);
864 if (cmd->se_cmd_flags & SCF_USE_CPUID)
865 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
866 else
867 queue_work(target_completion_wq, &cmd->work);
868 }
869 EXPORT_SYMBOL(target_complete_cmd);
870
871 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
872 {
873 if ((scsi_status == SAM_STAT_GOOD ||
874 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
875 length < cmd->data_length) {
876 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
877 cmd->residual_count += cmd->data_length - length;
878 } else {
879 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
880 cmd->residual_count = cmd->data_length - length;
881 }
882
883 cmd->data_length = length;
884 }
885
886 target_complete_cmd(cmd, scsi_status);
887 }
888 EXPORT_SYMBOL(target_complete_cmd_with_length);
889
890 static void target_add_to_state_list(struct se_cmd *cmd)
891 {
892 struct se_device *dev = cmd->se_dev;
893 unsigned long flags;
894
895 spin_lock_irqsave(&dev->execute_task_lock, flags);
896 if (!cmd->state_active) {
897 list_add_tail(&cmd->state_list, &dev->state_list);
898 cmd->state_active = true;
899 }
900 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
901 }
902
903
904
905
906 static void transport_write_pending_qf(struct se_cmd *cmd);
907 static void transport_complete_qf(struct se_cmd *cmd);
908
909 void target_qf_do_work(struct work_struct *work)
910 {
911 struct se_device *dev = container_of(work, struct se_device,
912 qf_work_queue);
913 LIST_HEAD(qf_cmd_list);
914 struct se_cmd *cmd, *cmd_tmp;
915
916 spin_lock_irq(&dev->qf_cmd_lock);
917 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
918 spin_unlock_irq(&dev->qf_cmd_lock);
919
920 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
921 list_del(&cmd->se_qf_node);
922 atomic_dec_mb(&dev->dev_qf_count);
923
924 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
925 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
926 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
927 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
928 : "UNKNOWN");
929
930 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
931 transport_write_pending_qf(cmd);
932 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
933 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
934 transport_complete_qf(cmd);
935 }
936 }
937
938 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
939 {
940 switch (cmd->data_direction) {
941 case DMA_NONE:
942 return "NONE";
943 case DMA_FROM_DEVICE:
944 return "READ";
945 case DMA_TO_DEVICE:
946 return "WRITE";
947 case DMA_BIDIRECTIONAL:
948 return "BIDI";
949 default:
950 break;
951 }
952
953 return "UNKNOWN";
954 }
955
956 void transport_dump_dev_state(
957 struct se_device *dev,
958 char *b,
959 int *bl)
960 {
961 *bl += sprintf(b + *bl, "Status: ");
962 if (dev->export_count)
963 *bl += sprintf(b + *bl, "ACTIVATED");
964 else
965 *bl += sprintf(b + *bl, "DEACTIVATED");
966
967 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
968 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
969 dev->dev_attrib.block_size,
970 dev->dev_attrib.hw_max_sectors);
971 *bl += sprintf(b + *bl, " ");
972 }
973
974 void transport_dump_vpd_proto_id(
975 struct t10_vpd *vpd,
976 unsigned char *p_buf,
977 int p_buf_len)
978 {
979 unsigned char buf[VPD_TMP_BUF_SIZE];
980 int len;
981
982 memset(buf, 0, VPD_TMP_BUF_SIZE);
983 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
984
985 switch (vpd->protocol_identifier) {
986 case 0x00:
987 sprintf(buf+len, "Fibre Channel\n");
988 break;
989 case 0x10:
990 sprintf(buf+len, "Parallel SCSI\n");
991 break;
992 case 0x20:
993 sprintf(buf+len, "SSA\n");
994 break;
995 case 0x30:
996 sprintf(buf+len, "IEEE 1394\n");
997 break;
998 case 0x40:
999 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1000 " Protocol\n");
1001 break;
1002 case 0x50:
1003 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1004 break;
1005 case 0x60:
1006 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1007 break;
1008 case 0x70:
1009 sprintf(buf+len, "Automation/Drive Interface Transport"
1010 " Protocol\n");
1011 break;
1012 case 0x80:
1013 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1014 break;
1015 default:
1016 sprintf(buf+len, "Unknown 0x%02x\n",
1017 vpd->protocol_identifier);
1018 break;
1019 }
1020
1021 if (p_buf)
1022 strncpy(p_buf, buf, p_buf_len);
1023 else
1024 pr_debug("%s", buf);
1025 }
1026
1027 void
1028 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1029 {
1030
1031
1032
1033
1034
1035 if (page_83[1] & 0x80) {
1036 vpd->protocol_identifier = (page_83[0] & 0xf0);
1037 vpd->protocol_identifier_set = 1;
1038 transport_dump_vpd_proto_id(vpd, NULL, 0);
1039 }
1040 }
1041 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1042
1043 int transport_dump_vpd_assoc(
1044 struct t10_vpd *vpd,
1045 unsigned char *p_buf,
1046 int p_buf_len)
1047 {
1048 unsigned char buf[VPD_TMP_BUF_SIZE];
1049 int ret = 0;
1050 int len;
1051
1052 memset(buf, 0, VPD_TMP_BUF_SIZE);
1053 len = sprintf(buf, "T10 VPD Identifier Association: ");
1054
1055 switch (vpd->association) {
1056 case 0x00:
1057 sprintf(buf+len, "addressed logical unit\n");
1058 break;
1059 case 0x10:
1060 sprintf(buf+len, "target port\n");
1061 break;
1062 case 0x20:
1063 sprintf(buf+len, "SCSI target device\n");
1064 break;
1065 default:
1066 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1067 ret = -EINVAL;
1068 break;
1069 }
1070
1071 if (p_buf)
1072 strncpy(p_buf, buf, p_buf_len);
1073 else
1074 pr_debug("%s", buf);
1075
1076 return ret;
1077 }
1078
1079 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1080 {
1081
1082
1083
1084
1085
1086 vpd->association = (page_83[1] & 0x30);
1087 return transport_dump_vpd_assoc(vpd, NULL, 0);
1088 }
1089 EXPORT_SYMBOL(transport_set_vpd_assoc);
1090
1091 int transport_dump_vpd_ident_type(
1092 struct t10_vpd *vpd,
1093 unsigned char *p_buf,
1094 int p_buf_len)
1095 {
1096 unsigned char buf[VPD_TMP_BUF_SIZE];
1097 int ret = 0;
1098 int len;
1099
1100 memset(buf, 0, VPD_TMP_BUF_SIZE);
1101 len = sprintf(buf, "T10 VPD Identifier Type: ");
1102
1103 switch (vpd->device_identifier_type) {
1104 case 0x00:
1105 sprintf(buf+len, "Vendor specific\n");
1106 break;
1107 case 0x01:
1108 sprintf(buf+len, "T10 Vendor ID based\n");
1109 break;
1110 case 0x02:
1111 sprintf(buf+len, "EUI-64 based\n");
1112 break;
1113 case 0x03:
1114 sprintf(buf+len, "NAA\n");
1115 break;
1116 case 0x04:
1117 sprintf(buf+len, "Relative target port identifier\n");
1118 break;
1119 case 0x08:
1120 sprintf(buf+len, "SCSI name string\n");
1121 break;
1122 default:
1123 sprintf(buf+len, "Unsupported: 0x%02x\n",
1124 vpd->device_identifier_type);
1125 ret = -EINVAL;
1126 break;
1127 }
1128
1129 if (p_buf) {
1130 if (p_buf_len < strlen(buf)+1)
1131 return -EINVAL;
1132 strncpy(p_buf, buf, p_buf_len);
1133 } else {
1134 pr_debug("%s", buf);
1135 }
1136
1137 return ret;
1138 }
1139
1140 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1141 {
1142
1143
1144
1145
1146
1147 vpd->device_identifier_type = (page_83[1] & 0x0f);
1148 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1149 }
1150 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1151
1152 int transport_dump_vpd_ident(
1153 struct t10_vpd *vpd,
1154 unsigned char *p_buf,
1155 int p_buf_len)
1156 {
1157 unsigned char buf[VPD_TMP_BUF_SIZE];
1158 int ret = 0;
1159
1160 memset(buf, 0, VPD_TMP_BUF_SIZE);
1161
1162 switch (vpd->device_identifier_code_set) {
1163 case 0x01:
1164 snprintf(buf, sizeof(buf),
1165 "T10 VPD Binary Device Identifier: %s\n",
1166 &vpd->device_identifier[0]);
1167 break;
1168 case 0x02:
1169 snprintf(buf, sizeof(buf),
1170 "T10 VPD ASCII Device Identifier: %s\n",
1171 &vpd->device_identifier[0]);
1172 break;
1173 case 0x03:
1174 snprintf(buf, sizeof(buf),
1175 "T10 VPD UTF-8 Device Identifier: %s\n",
1176 &vpd->device_identifier[0]);
1177 break;
1178 default:
1179 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1180 " 0x%02x", vpd->device_identifier_code_set);
1181 ret = -EINVAL;
1182 break;
1183 }
1184
1185 if (p_buf)
1186 strncpy(p_buf, buf, p_buf_len);
1187 else
1188 pr_debug("%s", buf);
1189
1190 return ret;
1191 }
1192
1193 int
1194 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1195 {
1196 static const char hex_str[] = "0123456789abcdef";
1197 int j = 0, i = 4;
1198
1199
1200
1201
1202
1203
1204 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1205 switch (vpd->device_identifier_code_set) {
1206 case 0x01:
1207 vpd->device_identifier[j++] =
1208 hex_str[vpd->device_identifier_type];
1209 while (i < (4 + page_83[3])) {
1210 vpd->device_identifier[j++] =
1211 hex_str[(page_83[i] & 0xf0) >> 4];
1212 vpd->device_identifier[j++] =
1213 hex_str[page_83[i] & 0x0f];
1214 i++;
1215 }
1216 break;
1217 case 0x02:
1218 case 0x03:
1219 while (i < (4 + page_83[3]))
1220 vpd->device_identifier[j++] = page_83[i++];
1221 break;
1222 default:
1223 break;
1224 }
1225
1226 return transport_dump_vpd_ident(vpd, NULL, 0);
1227 }
1228 EXPORT_SYMBOL(transport_set_vpd_ident);
1229
1230 static sense_reason_t
1231 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1232 unsigned int size)
1233 {
1234 u32 mtl;
1235
1236 if (!cmd->se_tfo->max_data_sg_nents)
1237 return TCM_NO_SENSE;
1238
1239
1240
1241
1242
1243
1244 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1245 if (cmd->data_length > mtl) {
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1258 cmd->residual_count = (size - mtl);
1259 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1260 u32 orig_dl = size + cmd->residual_count;
1261 cmd->residual_count = (orig_dl - mtl);
1262 } else {
1263 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1264 cmd->residual_count = (cmd->data_length - mtl);
1265 }
1266 cmd->data_length = mtl;
1267
1268
1269
1270
1271 if (cmd->prot_length) {
1272 u32 sectors = (mtl / dev->dev_attrib.block_size);
1273 cmd->prot_length = dev->prot_length * sectors;
1274 }
1275 }
1276 return TCM_NO_SENSE;
1277 }
1278
1279 sense_reason_t
1280 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1281 {
1282 struct se_device *dev = cmd->se_dev;
1283
1284 if (cmd->unknown_data_length) {
1285 cmd->data_length = size;
1286 } else if (size != cmd->data_length) {
1287 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1288 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1289 " 0x%02x\n", cmd->se_tfo->fabric_name,
1290 cmd->data_length, size, cmd->t_task_cdb[0]);
1291
1292 if (cmd->data_direction == DMA_TO_DEVICE) {
1293 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1294 pr_err_ratelimited("Rejecting underflow/overflow"
1295 " for WRITE data CDB\n");
1296 return TCM_INVALID_CDB_FIELD;
1297 }
1298
1299
1300
1301
1302
1303
1304 if (size > cmd->data_length) {
1305 pr_err_ratelimited("Rejecting overflow for"
1306 " WRITE control CDB\n");
1307 return TCM_INVALID_CDB_FIELD;
1308 }
1309 }
1310
1311
1312
1313
1314 if (dev->dev_attrib.block_size != 512) {
1315 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1316 " CDB on non 512-byte sector setup subsystem"
1317 " plugin: %s\n", dev->transport->name);
1318
1319 return TCM_INVALID_CDB_FIELD;
1320 }
1321
1322
1323
1324
1325
1326
1327 if (size > cmd->data_length) {
1328 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1329 cmd->residual_count = (size - cmd->data_length);
1330 } else {
1331 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1332 cmd->residual_count = (cmd->data_length - size);
1333 cmd->data_length = size;
1334 }
1335 }
1336
1337 return target_check_max_data_sg_nents(cmd, dev, size);
1338
1339 }
1340
1341
1342
1343
1344
1345
1346
1347 void transport_init_se_cmd(
1348 struct se_cmd *cmd,
1349 const struct target_core_fabric_ops *tfo,
1350 struct se_session *se_sess,
1351 u32 data_length,
1352 int data_direction,
1353 int task_attr,
1354 unsigned char *sense_buffer)
1355 {
1356 INIT_LIST_HEAD(&cmd->se_delayed_node);
1357 INIT_LIST_HEAD(&cmd->se_qf_node);
1358 INIT_LIST_HEAD(&cmd->se_cmd_list);
1359 INIT_LIST_HEAD(&cmd->state_list);
1360 init_completion(&cmd->t_transport_stop_comp);
1361 cmd->free_compl = NULL;
1362 cmd->abrt_compl = NULL;
1363 spin_lock_init(&cmd->t_state_lock);
1364 INIT_WORK(&cmd->work, NULL);
1365 kref_init(&cmd->cmd_kref);
1366
1367 cmd->se_tfo = tfo;
1368 cmd->se_sess = se_sess;
1369 cmd->data_length = data_length;
1370 cmd->data_direction = data_direction;
1371 cmd->sam_task_attr = task_attr;
1372 cmd->sense_buffer = sense_buffer;
1373
1374 cmd->state_active = false;
1375 }
1376 EXPORT_SYMBOL(transport_init_se_cmd);
1377
1378 static sense_reason_t
1379 transport_check_alloc_task_attr(struct se_cmd *cmd)
1380 {
1381 struct se_device *dev = cmd->se_dev;
1382
1383
1384
1385
1386
1387 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1388 return 0;
1389
1390 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1391 pr_debug("SAM Task Attribute ACA"
1392 " emulation is not supported\n");
1393 return TCM_INVALID_CDB_FIELD;
1394 }
1395
1396 return 0;
1397 }
1398
1399 sense_reason_t
1400 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1401 {
1402 struct se_device *dev = cmd->se_dev;
1403 sense_reason_t ret;
1404
1405
1406
1407
1408
1409 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1410 pr_err("Received SCSI CDB with command_size: %d that"
1411 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1412 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1413 return TCM_INVALID_CDB_FIELD;
1414 }
1415
1416
1417
1418
1419
1420 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1421 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1422 GFP_KERNEL);
1423 if (!cmd->t_task_cdb) {
1424 pr_err("Unable to allocate cmd->t_task_cdb"
1425 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1426 scsi_command_size(cdb),
1427 (unsigned long)sizeof(cmd->__t_task_cdb));
1428 return TCM_OUT_OF_RESOURCES;
1429 }
1430 } else
1431 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1432
1433
1434
1435 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1436
1437 trace_target_sequencer_start(cmd);
1438
1439 ret = dev->transport->parse_cdb(cmd);
1440 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1441 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1442 cmd->se_tfo->fabric_name,
1443 cmd->se_sess->se_node_acl->initiatorname,
1444 cmd->t_task_cdb[0]);
1445 if (ret)
1446 return ret;
1447
1448 ret = transport_check_alloc_task_attr(cmd);
1449 if (ret)
1450 return ret;
1451
1452 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1453 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1454 return 0;
1455 }
1456 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1457
1458
1459
1460
1461
1462 int transport_handle_cdb_direct(
1463 struct se_cmd *cmd)
1464 {
1465 sense_reason_t ret;
1466
1467 if (!cmd->se_lun) {
1468 dump_stack();
1469 pr_err("cmd->se_lun is NULL\n");
1470 return -EINVAL;
1471 }
1472 if (in_interrupt()) {
1473 dump_stack();
1474 pr_err("transport_generic_handle_cdb cannot be called"
1475 " from interrupt context\n");
1476 return -EINVAL;
1477 }
1478
1479
1480
1481
1482
1483
1484
1485
1486 cmd->t_state = TRANSPORT_NEW_CMD;
1487 cmd->transport_state |= CMD_T_ACTIVE;
1488
1489
1490
1491
1492
1493
1494 ret = transport_generic_new_cmd(cmd);
1495 if (ret)
1496 transport_generic_request_failure(cmd, ret);
1497 return 0;
1498 }
1499 EXPORT_SYMBOL(transport_handle_cdb_direct);
1500
1501 sense_reason_t
1502 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1503 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1504 {
1505 if (!sgl || !sgl_count)
1506 return 0;
1507
1508
1509
1510
1511
1512
1513 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1514 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1515 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1516 return TCM_INVALID_CDB_FIELD;
1517 }
1518
1519 cmd->t_data_sg = sgl;
1520 cmd->t_data_nents = sgl_count;
1521 cmd->t_bidi_data_sg = sgl_bidi;
1522 cmd->t_bidi_data_nents = sgl_bidi_count;
1523
1524 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1525 return 0;
1526 }
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1558 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1559 u32 data_length, int task_attr, int data_dir, int flags,
1560 struct scatterlist *sgl, u32 sgl_count,
1561 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1562 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1563 {
1564 struct se_portal_group *se_tpg;
1565 sense_reason_t rc;
1566 int ret;
1567
1568 se_tpg = se_sess->se_tpg;
1569 BUG_ON(!se_tpg);
1570 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1571 BUG_ON(in_interrupt());
1572
1573
1574
1575
1576
1577 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1578 data_length, data_dir, task_attr, sense);
1579
1580 if (flags & TARGET_SCF_USE_CPUID)
1581 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1582 else
1583 se_cmd->cpuid = WORK_CPU_UNBOUND;
1584
1585 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1586 se_cmd->unknown_data_length = 1;
1587
1588
1589
1590
1591
1592
1593 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1594 if (ret)
1595 return ret;
1596
1597
1598
1599 if (flags & TARGET_SCF_BIDI_OP)
1600 se_cmd->se_cmd_flags |= SCF_BIDI;
1601
1602
1603
1604 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1605 if (rc) {
1606 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1607 target_put_sess_cmd(se_cmd);
1608 return 0;
1609 }
1610
1611 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1612 if (rc != 0) {
1613 transport_generic_request_failure(se_cmd, rc);
1614 return 0;
1615 }
1616
1617
1618
1619
1620
1621 if (sgl_prot_count) {
1622 se_cmd->t_prot_sg = sgl_prot;
1623 se_cmd->t_prot_nents = sgl_prot_count;
1624 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1625 }
1626
1627
1628
1629
1630
1631
1632 if (sgl_count != 0) {
1633 BUG_ON(!sgl);
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1644 se_cmd->data_direction == DMA_FROM_DEVICE) {
1645 unsigned char *buf = NULL;
1646
1647 if (sgl)
1648 buf = kmap(sg_page(sgl)) + sgl->offset;
1649
1650 if (buf) {
1651 memset(buf, 0, sgl->length);
1652 kunmap(sg_page(sgl));
1653 }
1654 }
1655
1656 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1657 sgl_bidi, sgl_bidi_count);
1658 if (rc != 0) {
1659 transport_generic_request_failure(se_cmd, rc);
1660 return 0;
1661 }
1662 }
1663
1664
1665
1666
1667
1668 core_alua_check_nonop_delay(se_cmd);
1669
1670 transport_handle_cdb_direct(se_cmd);
1671 return 0;
1672 }
1673 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1700 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1701 u32 data_length, int task_attr, int data_dir, int flags)
1702 {
1703 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1704 unpacked_lun, data_length, task_attr, data_dir,
1705 flags, NULL, 0, NULL, 0, NULL, 0);
1706 }
1707 EXPORT_SYMBOL(target_submit_cmd);
1708
1709 static void target_complete_tmr_failure(struct work_struct *work)
1710 {
1711 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1712
1713 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1714 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1715
1716 transport_lun_remove_cmd(se_cmd);
1717 transport_cmd_check_stop_to_fabric(se_cmd);
1718 }
1719
1720 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1721 u64 *unpacked_lun)
1722 {
1723 struct se_cmd *se_cmd;
1724 unsigned long flags;
1725 bool ret = false;
1726
1727 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1728 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1729 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1730 continue;
1731
1732 if (se_cmd->tag == tag) {
1733 *unpacked_lun = se_cmd->orig_fe_lun;
1734 ret = true;
1735 break;
1736 }
1737 }
1738 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1739
1740 return ret;
1741 }
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1761 unsigned char *sense, u64 unpacked_lun,
1762 void *fabric_tmr_ptr, unsigned char tm_type,
1763 gfp_t gfp, u64 tag, int flags)
1764 {
1765 struct se_portal_group *se_tpg;
1766 int ret;
1767
1768 se_tpg = se_sess->se_tpg;
1769 BUG_ON(!se_tpg);
1770
1771 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1772 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1773
1774
1775
1776
1777 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1778 if (ret < 0)
1779 return -ENOMEM;
1780
1781 if (tm_type == TMR_ABORT_TASK)
1782 se_cmd->se_tmr_req->ref_task_tag = tag;
1783
1784
1785 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1786 if (ret) {
1787 core_tmr_release_req(se_cmd->se_tmr_req);
1788 return ret;
1789 }
1790
1791
1792
1793
1794
1795 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1796 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1797 goto failure;
1798 }
1799
1800 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1801 if (ret)
1802 goto failure;
1803
1804 transport_generic_handle_tmr(se_cmd);
1805 return 0;
1806
1807
1808
1809
1810
1811 failure:
1812 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1813 schedule_work(&se_cmd->work);
1814 return 0;
1815 }
1816 EXPORT_SYMBOL(target_submit_tmr);
1817
1818
1819
1820
1821 void transport_generic_request_failure(struct se_cmd *cmd,
1822 sense_reason_t sense_reason)
1823 {
1824 int ret = 0, post_ret;
1825
1826 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1827 sense_reason);
1828 target_show_cmd("-----[ ", cmd);
1829
1830
1831
1832
1833 transport_complete_task_attr(cmd);
1834
1835 if (cmd->transport_complete_callback)
1836 cmd->transport_complete_callback(cmd, false, &post_ret);
1837
1838 if (cmd->transport_state & CMD_T_ABORTED) {
1839 INIT_WORK(&cmd->work, target_abort_work);
1840 queue_work(target_completion_wq, &cmd->work);
1841 return;
1842 }
1843
1844 switch (sense_reason) {
1845 case TCM_NON_EXISTENT_LUN:
1846 case TCM_UNSUPPORTED_SCSI_OPCODE:
1847 case TCM_INVALID_CDB_FIELD:
1848 case TCM_INVALID_PARAMETER_LIST:
1849 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1850 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1851 case TCM_UNKNOWN_MODE_PAGE:
1852 case TCM_WRITE_PROTECTED:
1853 case TCM_ADDRESS_OUT_OF_RANGE:
1854 case TCM_CHECK_CONDITION_ABORT_CMD:
1855 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1856 case TCM_CHECK_CONDITION_NOT_READY:
1857 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1858 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1859 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1860 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1861 case TCM_TOO_MANY_TARGET_DESCS:
1862 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1863 case TCM_TOO_MANY_SEGMENT_DESCS:
1864 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1865 break;
1866 case TCM_OUT_OF_RESOURCES:
1867 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1868 goto queue_status;
1869 case TCM_LUN_BUSY:
1870 cmd->scsi_status = SAM_STAT_BUSY;
1871 goto queue_status;
1872 case TCM_RESERVATION_CONFLICT:
1873
1874
1875
1876
1877
1878
1879 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1880
1881
1882
1883
1884
1885
1886
1887 if (cmd->se_sess &&
1888 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1889 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1890 cmd->orig_fe_lun, 0x2C,
1891 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1892 }
1893
1894 goto queue_status;
1895 default:
1896 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1897 cmd->t_task_cdb[0], sense_reason);
1898 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1899 break;
1900 }
1901
1902 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1903 if (ret)
1904 goto queue_full;
1905
1906 check_stop:
1907 transport_lun_remove_cmd(cmd);
1908 transport_cmd_check_stop_to_fabric(cmd);
1909 return;
1910
1911 queue_status:
1912 trace_target_cmd_complete(cmd);
1913 ret = cmd->se_tfo->queue_status(cmd);
1914 if (!ret)
1915 goto check_stop;
1916 queue_full:
1917 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1918 }
1919 EXPORT_SYMBOL(transport_generic_request_failure);
1920
1921 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1922 {
1923 sense_reason_t ret;
1924
1925 if (!cmd->execute_cmd) {
1926 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1927 goto err;
1928 }
1929 if (do_checks) {
1930
1931
1932
1933
1934
1935
1936 ret = target_scsi3_ua_check(cmd);
1937 if (ret)
1938 goto err;
1939
1940 ret = target_alua_state_check(cmd);
1941 if (ret)
1942 goto err;
1943
1944 ret = target_check_reservation(cmd);
1945 if (ret) {
1946 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1947 goto err;
1948 }
1949 }
1950
1951 ret = cmd->execute_cmd(cmd);
1952 if (!ret)
1953 return;
1954 err:
1955 spin_lock_irq(&cmd->t_state_lock);
1956 cmd->transport_state &= ~CMD_T_SENT;
1957 spin_unlock_irq(&cmd->t_state_lock);
1958
1959 transport_generic_request_failure(cmd, ret);
1960 }
1961
1962 static int target_write_prot_action(struct se_cmd *cmd)
1963 {
1964 u32 sectors;
1965
1966
1967
1968
1969
1970 switch (cmd->prot_op) {
1971 case TARGET_PROT_DOUT_INSERT:
1972 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1973 sbc_dif_generate(cmd);
1974 break;
1975 case TARGET_PROT_DOUT_STRIP:
1976 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1977 break;
1978
1979 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1980 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1981 sectors, 0, cmd->t_prot_sg, 0);
1982 if (unlikely(cmd->pi_err)) {
1983 spin_lock_irq(&cmd->t_state_lock);
1984 cmd->transport_state &= ~CMD_T_SENT;
1985 spin_unlock_irq(&cmd->t_state_lock);
1986 transport_generic_request_failure(cmd, cmd->pi_err);
1987 return -1;
1988 }
1989 break;
1990 default:
1991 break;
1992 }
1993
1994 return 0;
1995 }
1996
1997 static bool target_handle_task_attr(struct se_cmd *cmd)
1998 {
1999 struct se_device *dev = cmd->se_dev;
2000
2001 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2002 return false;
2003
2004 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2005
2006
2007
2008
2009
2010 switch (cmd->sam_task_attr) {
2011 case TCM_HEAD_TAG:
2012 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2013 cmd->t_task_cdb[0]);
2014 return false;
2015 case TCM_ORDERED_TAG:
2016 atomic_inc_mb(&dev->dev_ordered_sync);
2017
2018 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2019 cmd->t_task_cdb[0]);
2020
2021
2022
2023
2024
2025 if (!atomic_read(&dev->simple_cmds))
2026 return false;
2027 break;
2028 default:
2029
2030
2031
2032 atomic_inc_mb(&dev->simple_cmds);
2033 break;
2034 }
2035
2036 if (atomic_read(&dev->dev_ordered_sync) == 0)
2037 return false;
2038
2039 spin_lock(&dev->delayed_cmd_lock);
2040 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2041 spin_unlock(&dev->delayed_cmd_lock);
2042
2043 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2044 cmd->t_task_cdb[0], cmd->sam_task_attr);
2045 return true;
2046 }
2047
2048 void target_execute_cmd(struct se_cmd *cmd)
2049 {
2050
2051
2052
2053
2054
2055
2056 if (target_cmd_interrupted(cmd))
2057 return;
2058
2059 spin_lock_irq(&cmd->t_state_lock);
2060 cmd->t_state = TRANSPORT_PROCESSING;
2061 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2062 spin_unlock_irq(&cmd->t_state_lock);
2063
2064 if (target_write_prot_action(cmd))
2065 return;
2066
2067 if (target_handle_task_attr(cmd)) {
2068 spin_lock_irq(&cmd->t_state_lock);
2069 cmd->transport_state &= ~CMD_T_SENT;
2070 spin_unlock_irq(&cmd->t_state_lock);
2071 return;
2072 }
2073
2074 __target_execute_cmd(cmd, true);
2075 }
2076 EXPORT_SYMBOL(target_execute_cmd);
2077
2078
2079
2080
2081
2082 static void target_restart_delayed_cmds(struct se_device *dev)
2083 {
2084 for (;;) {
2085 struct se_cmd *cmd;
2086
2087 spin_lock(&dev->delayed_cmd_lock);
2088 if (list_empty(&dev->delayed_cmd_list)) {
2089 spin_unlock(&dev->delayed_cmd_lock);
2090 break;
2091 }
2092
2093 cmd = list_entry(dev->delayed_cmd_list.next,
2094 struct se_cmd, se_delayed_node);
2095 list_del(&cmd->se_delayed_node);
2096 spin_unlock(&dev->delayed_cmd_lock);
2097
2098 cmd->transport_state |= CMD_T_SENT;
2099
2100 __target_execute_cmd(cmd, true);
2101
2102 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2103 break;
2104 }
2105 }
2106
2107
2108
2109
2110
2111 static void transport_complete_task_attr(struct se_cmd *cmd)
2112 {
2113 struct se_device *dev = cmd->se_dev;
2114
2115 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2116 return;
2117
2118 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2119 goto restart;
2120
2121 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2122 atomic_dec_mb(&dev->simple_cmds);
2123 dev->dev_cur_ordered_id++;
2124 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2125 dev->dev_cur_ordered_id++;
2126 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2127 dev->dev_cur_ordered_id);
2128 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2129 atomic_dec_mb(&dev->dev_ordered_sync);
2130
2131 dev->dev_cur_ordered_id++;
2132 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2133 dev->dev_cur_ordered_id);
2134 }
2135 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2136
2137 restart:
2138 target_restart_delayed_cmds(dev);
2139 }
2140
2141 static void transport_complete_qf(struct se_cmd *cmd)
2142 {
2143 int ret = 0;
2144
2145 transport_complete_task_attr(cmd);
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2156 if (cmd->scsi_status)
2157 goto queue_status;
2158
2159 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2160 goto queue_status;
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2173 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2174 goto queue_status;
2175
2176 switch (cmd->data_direction) {
2177 case DMA_FROM_DEVICE:
2178
2179 if (cmd->scsi_status &&
2180 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2181 goto queue_status;
2182
2183 trace_target_cmd_complete(cmd);
2184 ret = cmd->se_tfo->queue_data_in(cmd);
2185 break;
2186 case DMA_TO_DEVICE:
2187 if (cmd->se_cmd_flags & SCF_BIDI) {
2188 ret = cmd->se_tfo->queue_data_in(cmd);
2189 break;
2190 }
2191
2192 case DMA_NONE:
2193 queue_status:
2194 trace_target_cmd_complete(cmd);
2195 ret = cmd->se_tfo->queue_status(cmd);
2196 break;
2197 default:
2198 break;
2199 }
2200
2201 if (ret < 0) {
2202 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2203 return;
2204 }
2205 transport_lun_remove_cmd(cmd);
2206 transport_cmd_check_stop_to_fabric(cmd);
2207 }
2208
2209 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2210 int err, bool write_pending)
2211 {
2212
2213
2214
2215
2216
2217
2218
2219
2220 if (err == -EAGAIN || err == -ENOMEM) {
2221 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2222 TRANSPORT_COMPLETE_QF_OK;
2223 } else {
2224 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2225 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2226 }
2227
2228 spin_lock_irq(&dev->qf_cmd_lock);
2229 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2230 atomic_inc_mb(&dev->dev_qf_count);
2231 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2232
2233 schedule_work(&cmd->se_dev->qf_work_queue);
2234 }
2235
2236 static bool target_read_prot_action(struct se_cmd *cmd)
2237 {
2238 switch (cmd->prot_op) {
2239 case TARGET_PROT_DIN_STRIP:
2240 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2241 u32 sectors = cmd->data_length >>
2242 ilog2(cmd->se_dev->dev_attrib.block_size);
2243
2244 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2245 sectors, 0, cmd->t_prot_sg,
2246 0);
2247 if (cmd->pi_err)
2248 return true;
2249 }
2250 break;
2251 case TARGET_PROT_DIN_INSERT:
2252 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2253 break;
2254
2255 sbc_dif_generate(cmd);
2256 break;
2257 default:
2258 break;
2259 }
2260
2261 return false;
2262 }
2263
2264 static void target_complete_ok_work(struct work_struct *work)
2265 {
2266 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2267 int ret;
2268
2269
2270
2271
2272
2273
2274 transport_complete_task_attr(cmd);
2275
2276
2277
2278
2279
2280 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2281 schedule_work(&cmd->se_dev->qf_work_queue);
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2293 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2294 WARN_ON(!cmd->scsi_status);
2295 ret = transport_send_check_condition_and_sense(
2296 cmd, 0, 1);
2297 if (ret)
2298 goto queue_full;
2299
2300 transport_lun_remove_cmd(cmd);
2301 transport_cmd_check_stop_to_fabric(cmd);
2302 return;
2303 }
2304
2305
2306
2307
2308 if (cmd->transport_complete_callback) {
2309 sense_reason_t rc;
2310 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2311 bool zero_dl = !(cmd->data_length);
2312 int post_ret = 0;
2313
2314 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2315 if (!rc && !post_ret) {
2316 if (caw && zero_dl)
2317 goto queue_rsp;
2318
2319 return;
2320 } else if (rc) {
2321 ret = transport_send_check_condition_and_sense(cmd,
2322 rc, 0);
2323 if (ret)
2324 goto queue_full;
2325
2326 transport_lun_remove_cmd(cmd);
2327 transport_cmd_check_stop_to_fabric(cmd);
2328 return;
2329 }
2330 }
2331
2332 queue_rsp:
2333 switch (cmd->data_direction) {
2334 case DMA_FROM_DEVICE:
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345 if (cmd->scsi_status &&
2346 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2347 goto queue_status;
2348
2349 atomic_long_add(cmd->data_length,
2350 &cmd->se_lun->lun_stats.tx_data_octets);
2351
2352
2353
2354
2355
2356 if (target_read_prot_action(cmd)) {
2357 ret = transport_send_check_condition_and_sense(cmd,
2358 cmd->pi_err, 0);
2359 if (ret)
2360 goto queue_full;
2361
2362 transport_lun_remove_cmd(cmd);
2363 transport_cmd_check_stop_to_fabric(cmd);
2364 return;
2365 }
2366
2367 trace_target_cmd_complete(cmd);
2368 ret = cmd->se_tfo->queue_data_in(cmd);
2369 if (ret)
2370 goto queue_full;
2371 break;
2372 case DMA_TO_DEVICE:
2373 atomic_long_add(cmd->data_length,
2374 &cmd->se_lun->lun_stats.rx_data_octets);
2375
2376
2377
2378 if (cmd->se_cmd_flags & SCF_BIDI) {
2379 atomic_long_add(cmd->data_length,
2380 &cmd->se_lun->lun_stats.tx_data_octets);
2381 ret = cmd->se_tfo->queue_data_in(cmd);
2382 if (ret)
2383 goto queue_full;
2384 break;
2385 }
2386
2387 case DMA_NONE:
2388 queue_status:
2389 trace_target_cmd_complete(cmd);
2390 ret = cmd->se_tfo->queue_status(cmd);
2391 if (ret)
2392 goto queue_full;
2393 break;
2394 default:
2395 break;
2396 }
2397
2398 transport_lun_remove_cmd(cmd);
2399 transport_cmd_check_stop_to_fabric(cmd);
2400 return;
2401
2402 queue_full:
2403 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2404 " data_direction: %d\n", cmd, cmd->data_direction);
2405
2406 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2407 }
2408
2409 void target_free_sgl(struct scatterlist *sgl, int nents)
2410 {
2411 sgl_free_n_order(sgl, nents, 0);
2412 }
2413 EXPORT_SYMBOL(target_free_sgl);
2414
2415 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2416 {
2417
2418
2419
2420
2421 if (!cmd->t_data_sg_orig)
2422 return;
2423
2424 kfree(cmd->t_data_sg);
2425 cmd->t_data_sg = cmd->t_data_sg_orig;
2426 cmd->t_data_sg_orig = NULL;
2427 cmd->t_data_nents = cmd->t_data_nents_orig;
2428 cmd->t_data_nents_orig = 0;
2429 }
2430
2431 static inline void transport_free_pages(struct se_cmd *cmd)
2432 {
2433 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2434 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2435 cmd->t_prot_sg = NULL;
2436 cmd->t_prot_nents = 0;
2437 }
2438
2439 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2440
2441
2442
2443
2444 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2445 target_free_sgl(cmd->t_bidi_data_sg,
2446 cmd->t_bidi_data_nents);
2447 cmd->t_bidi_data_sg = NULL;
2448 cmd->t_bidi_data_nents = 0;
2449 }
2450 transport_reset_sgl_orig(cmd);
2451 return;
2452 }
2453 transport_reset_sgl_orig(cmd);
2454
2455 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2456 cmd->t_data_sg = NULL;
2457 cmd->t_data_nents = 0;
2458
2459 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2460 cmd->t_bidi_data_sg = NULL;
2461 cmd->t_bidi_data_nents = 0;
2462 }
2463
2464 void *transport_kmap_data_sg(struct se_cmd *cmd)
2465 {
2466 struct scatterlist *sg = cmd->t_data_sg;
2467 struct page **pages;
2468 int i;
2469
2470
2471
2472
2473
2474
2475 if (!cmd->t_data_nents)
2476 return NULL;
2477
2478 BUG_ON(!sg);
2479 if (cmd->t_data_nents == 1)
2480 return kmap(sg_page(sg)) + sg->offset;
2481
2482
2483 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2484 if (!pages)
2485 return NULL;
2486
2487
2488 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2489 pages[i] = sg_page(sg);
2490 }
2491
2492 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2493 kfree(pages);
2494 if (!cmd->t_data_vmap)
2495 return NULL;
2496
2497 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2498 }
2499 EXPORT_SYMBOL(transport_kmap_data_sg);
2500
2501 void transport_kunmap_data_sg(struct se_cmd *cmd)
2502 {
2503 if (!cmd->t_data_nents) {
2504 return;
2505 } else if (cmd->t_data_nents == 1) {
2506 kunmap(sg_page(cmd->t_data_sg));
2507 return;
2508 }
2509
2510 vunmap(cmd->t_data_vmap);
2511 cmd->t_data_vmap = NULL;
2512 }
2513 EXPORT_SYMBOL(transport_kunmap_data_sg);
2514
2515 int
2516 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2517 bool zero_page, bool chainable)
2518 {
2519 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2520
2521 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2522 return *sgl ? 0 : -ENOMEM;
2523 }
2524 EXPORT_SYMBOL(target_alloc_sgl);
2525
2526
2527
2528
2529
2530
2531 sense_reason_t
2532 transport_generic_new_cmd(struct se_cmd *cmd)
2533 {
2534 unsigned long flags;
2535 int ret = 0;
2536 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2537
2538 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2539 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2540 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2541 cmd->prot_length, true, false);
2542 if (ret < 0)
2543 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2544 }
2545
2546
2547
2548
2549
2550
2551 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2552 cmd->data_length) {
2553
2554 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2555 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2556 u32 bidi_length;
2557
2558 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2559 bidi_length = cmd->t_task_nolb *
2560 cmd->se_dev->dev_attrib.block_size;
2561 else
2562 bidi_length = cmd->data_length;
2563
2564 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2565 &cmd->t_bidi_data_nents,
2566 bidi_length, zero_flag, false);
2567 if (ret < 0)
2568 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2569 }
2570
2571 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2572 cmd->data_length, zero_flag, false);
2573 if (ret < 0)
2574 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2575 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2576 cmd->data_length) {
2577
2578
2579
2580
2581 u32 caw_length = cmd->t_task_nolb *
2582 cmd->se_dev->dev_attrib.block_size;
2583
2584 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2585 &cmd->t_bidi_data_nents,
2586 caw_length, zero_flag, false);
2587 if (ret < 0)
2588 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2589 }
2590
2591
2592
2593
2594
2595 target_add_to_state_list(cmd);
2596 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2597 target_execute_cmd(cmd);
2598 return 0;
2599 }
2600
2601 spin_lock_irqsave(&cmd->t_state_lock, flags);
2602 cmd->t_state = TRANSPORT_WRITE_PENDING;
2603
2604
2605
2606
2607 if (cmd->transport_state & CMD_T_STOP &&
2608 !cmd->se_tfo->write_pending_must_be_called) {
2609 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2610 __func__, __LINE__, cmd->tag);
2611
2612 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2613
2614 complete_all(&cmd->t_transport_stop_comp);
2615 return 0;
2616 }
2617 cmd->transport_state &= ~CMD_T_ACTIVE;
2618 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2619
2620 ret = cmd->se_tfo->write_pending(cmd);
2621 if (ret)
2622 goto queue_full;
2623
2624 return 0;
2625
2626 queue_full:
2627 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2628 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2629 return 0;
2630 }
2631 EXPORT_SYMBOL(transport_generic_new_cmd);
2632
2633 static void transport_write_pending_qf(struct se_cmd *cmd)
2634 {
2635 unsigned long flags;
2636 int ret;
2637 bool stop;
2638
2639 spin_lock_irqsave(&cmd->t_state_lock, flags);
2640 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2641 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2642
2643 if (stop) {
2644 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2645 __func__, __LINE__, cmd->tag);
2646 complete_all(&cmd->t_transport_stop_comp);
2647 return;
2648 }
2649
2650 ret = cmd->se_tfo->write_pending(cmd);
2651 if (ret) {
2652 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2653 cmd);
2654 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2655 }
2656 }
2657
2658 static bool
2659 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2660 unsigned long *flags);
2661
2662 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2663 {
2664 unsigned long flags;
2665
2666 spin_lock_irqsave(&cmd->t_state_lock, flags);
2667 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2668 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2669 }
2670
2671
2672
2673
2674
2675 void target_put_cmd_and_wait(struct se_cmd *cmd)
2676 {
2677 DECLARE_COMPLETION_ONSTACK(compl);
2678
2679 WARN_ON_ONCE(cmd->abrt_compl);
2680 cmd->abrt_compl = &compl;
2681 target_put_sess_cmd(cmd);
2682 wait_for_completion(&compl);
2683 }
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2709 {
2710 DECLARE_COMPLETION_ONSTACK(compl);
2711 int ret = 0;
2712 bool aborted = false, tas = false;
2713
2714 if (wait_for_tasks)
2715 target_wait_free_cmd(cmd, &aborted, &tas);
2716
2717 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2718
2719
2720
2721
2722
2723 if (cmd->state_active)
2724 target_remove_from_state_list(cmd);
2725
2726 if (cmd->se_lun)
2727 transport_lun_remove_cmd(cmd);
2728 }
2729 if (aborted)
2730 cmd->free_compl = &compl;
2731 ret = target_put_sess_cmd(cmd);
2732 if (aborted) {
2733 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2734 wait_for_completion(&compl);
2735 ret = 1;
2736 }
2737 return ret;
2738 }
2739 EXPORT_SYMBOL(transport_generic_free_cmd);
2740
2741
2742
2743
2744
2745
2746 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2747 {
2748 struct se_session *se_sess = se_cmd->se_sess;
2749 unsigned long flags;
2750 int ret = 0;
2751
2752
2753
2754
2755
2756
2757 if (ack_kref) {
2758 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2759 return -EINVAL;
2760
2761 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2762 }
2763
2764 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2765 if (se_sess->sess_tearing_down) {
2766 ret = -ESHUTDOWN;
2767 goto out;
2768 }
2769 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2770 percpu_ref_get(&se_sess->cmd_count);
2771 out:
2772 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2773
2774 if (ret && ack_kref)
2775 target_put_sess_cmd(se_cmd);
2776
2777 return ret;
2778 }
2779 EXPORT_SYMBOL(target_get_sess_cmd);
2780
2781 static void target_free_cmd_mem(struct se_cmd *cmd)
2782 {
2783 transport_free_pages(cmd);
2784
2785 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2786 core_tmr_release_req(cmd->se_tmr_req);
2787 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2788 kfree(cmd->t_task_cdb);
2789 }
2790
2791 static void target_release_cmd_kref(struct kref *kref)
2792 {
2793 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2794 struct se_session *se_sess = se_cmd->se_sess;
2795 struct completion *free_compl = se_cmd->free_compl;
2796 struct completion *abrt_compl = se_cmd->abrt_compl;
2797 unsigned long flags;
2798
2799 if (se_sess) {
2800 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2801 list_del_init(&se_cmd->se_cmd_list);
2802 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2803 }
2804
2805 target_free_cmd_mem(se_cmd);
2806 se_cmd->se_tfo->release_cmd(se_cmd);
2807 if (free_compl)
2808 complete(free_compl);
2809 if (abrt_compl)
2810 complete(abrt_compl);
2811
2812 percpu_ref_put(&se_sess->cmd_count);
2813 }
2814
2815
2816
2817
2818
2819
2820
2821
2822 int target_put_sess_cmd(struct se_cmd *se_cmd)
2823 {
2824 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2825 }
2826 EXPORT_SYMBOL(target_put_sess_cmd);
2827
2828 static const char *data_dir_name(enum dma_data_direction d)
2829 {
2830 switch (d) {
2831 case DMA_BIDIRECTIONAL: return "BIDI";
2832 case DMA_TO_DEVICE: return "WRITE";
2833 case DMA_FROM_DEVICE: return "READ";
2834 case DMA_NONE: return "NONE";
2835 }
2836
2837 return "(?)";
2838 }
2839
2840 static const char *cmd_state_name(enum transport_state_table t)
2841 {
2842 switch (t) {
2843 case TRANSPORT_NO_STATE: return "NO_STATE";
2844 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2845 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2846 case TRANSPORT_PROCESSING: return "PROCESSING";
2847 case TRANSPORT_COMPLETE: return "COMPLETE";
2848 case TRANSPORT_ISTATE_PROCESSING:
2849 return "ISTATE_PROCESSING";
2850 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2851 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2852 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2853 }
2854
2855 return "(?)";
2856 }
2857
2858 static void target_append_str(char **str, const char *txt)
2859 {
2860 char *prev = *str;
2861
2862 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2863 kstrdup(txt, GFP_ATOMIC);
2864 kfree(prev);
2865 }
2866
2867
2868
2869
2870
2871 static char *target_ts_to_str(u32 ts)
2872 {
2873 char *str = NULL;
2874
2875 if (ts & CMD_T_ABORTED)
2876 target_append_str(&str, "aborted");
2877 if (ts & CMD_T_ACTIVE)
2878 target_append_str(&str, "active");
2879 if (ts & CMD_T_COMPLETE)
2880 target_append_str(&str, "complete");
2881 if (ts & CMD_T_SENT)
2882 target_append_str(&str, "sent");
2883 if (ts & CMD_T_STOP)
2884 target_append_str(&str, "stop");
2885 if (ts & CMD_T_FABRIC_STOP)
2886 target_append_str(&str, "fabric_stop");
2887
2888 return str;
2889 }
2890
2891 static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2892 {
2893 switch (tmf) {
2894 case TMR_ABORT_TASK: return "ABORT_TASK";
2895 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2896 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2897 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2898 case TMR_LUN_RESET: return "LUN_RESET";
2899 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2900 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2901 case TMR_UNKNOWN: break;
2902 }
2903 return "(?)";
2904 }
2905
2906 void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2907 {
2908 char *ts_str = target_ts_to_str(cmd->transport_state);
2909 const u8 *cdb = cmd->t_task_cdb;
2910 struct se_tmr_req *tmf = cmd->se_tmr_req;
2911
2912 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2913 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2914 pfx, cdb[0], cdb[1], cmd->tag,
2915 data_dir_name(cmd->data_direction),
2916 cmd->se_tfo->get_cmd_state(cmd),
2917 cmd_state_name(cmd->t_state), cmd->data_length,
2918 kref_read(&cmd->cmd_kref), ts_str);
2919 } else {
2920 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2921 pfx, target_tmf_name(tmf->function), cmd->tag,
2922 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2923 cmd_state_name(cmd->t_state),
2924 kref_read(&cmd->cmd_kref), ts_str);
2925 }
2926 kfree(ts_str);
2927 }
2928 EXPORT_SYMBOL(target_show_cmd);
2929
2930
2931
2932
2933
2934 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2935 {
2936 unsigned long flags;
2937
2938 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2939 se_sess->sess_tearing_down = 1;
2940 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2941
2942 percpu_ref_kill(&se_sess->cmd_count);
2943 }
2944 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2945
2946
2947
2948
2949
2950 void target_wait_for_sess_cmds(struct se_session *se_sess)
2951 {
2952 struct se_cmd *cmd;
2953 int ret;
2954
2955 WARN_ON_ONCE(!se_sess->sess_tearing_down);
2956
2957 do {
2958 ret = wait_event_timeout(se_sess->cmd_list_wq,
2959 percpu_ref_is_zero(&se_sess->cmd_count),
2960 180 * HZ);
2961 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2962 target_show_cmd("session shutdown: still waiting for ",
2963 cmd);
2964 } while (ret <= 0);
2965 }
2966 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2967
2968
2969
2970
2971
2972 void transport_clear_lun_ref(struct se_lun *lun)
2973 {
2974 percpu_ref_kill(&lun->lun_ref);
2975 wait_for_completion(&lun->lun_shutdown_comp);
2976 }
2977
2978 static bool
2979 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2980 bool *aborted, bool *tas, unsigned long *flags)
2981 __releases(&cmd->t_state_lock)
2982 __acquires(&cmd->t_state_lock)
2983 {
2984
2985 assert_spin_locked(&cmd->t_state_lock);
2986 WARN_ON_ONCE(!irqs_disabled());
2987
2988 if (fabric_stop)
2989 cmd->transport_state |= CMD_T_FABRIC_STOP;
2990
2991 if (cmd->transport_state & CMD_T_ABORTED)
2992 *aborted = true;
2993
2994 if (cmd->transport_state & CMD_T_TAS)
2995 *tas = true;
2996
2997 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2998 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2999 return false;
3000
3001 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3002 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3003 return false;
3004
3005 if (!(cmd->transport_state & CMD_T_ACTIVE))
3006 return false;
3007
3008 if (fabric_stop && *aborted)
3009 return false;
3010
3011 cmd->transport_state |= CMD_T_STOP;
3012
3013 target_show_cmd("wait_for_tasks: Stopping ", cmd);
3014
3015 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3016
3017 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3018 180 * HZ))
3019 target_show_cmd("wait for tasks: ", cmd);
3020
3021 spin_lock_irqsave(&cmd->t_state_lock, *flags);
3022 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3023
3024 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3025 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3026
3027 return true;
3028 }
3029
3030
3031
3032
3033
3034 bool transport_wait_for_tasks(struct se_cmd *cmd)
3035 {
3036 unsigned long flags;
3037 bool ret, aborted = false, tas = false;
3038
3039 spin_lock_irqsave(&cmd->t_state_lock, flags);
3040 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3041 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3042
3043 return ret;
3044 }
3045 EXPORT_SYMBOL(transport_wait_for_tasks);
3046
3047 struct sense_info {
3048 u8 key;
3049 u8 asc;
3050 u8 ascq;
3051 bool add_sector_info;
3052 };
3053
3054 static const struct sense_info sense_info_table[] = {
3055 [TCM_NO_SENSE] = {
3056 .key = NOT_READY
3057 },
3058 [TCM_NON_EXISTENT_LUN] = {
3059 .key = ILLEGAL_REQUEST,
3060 .asc = 0x25
3061 },
3062 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3063 .key = ILLEGAL_REQUEST,
3064 .asc = 0x20,
3065 },
3066 [TCM_SECTOR_COUNT_TOO_MANY] = {
3067 .key = ILLEGAL_REQUEST,
3068 .asc = 0x20,
3069 },
3070 [TCM_UNKNOWN_MODE_PAGE] = {
3071 .key = ILLEGAL_REQUEST,
3072 .asc = 0x24,
3073 },
3074 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3075 .key = ABORTED_COMMAND,
3076 .asc = 0x29,
3077 .ascq = 0x03,
3078 },
3079 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3080 .key = ABORTED_COMMAND,
3081 .asc = 0x0c,
3082 .ascq = 0x0d,
3083 },
3084 [TCM_INVALID_CDB_FIELD] = {
3085 .key = ILLEGAL_REQUEST,
3086 .asc = 0x24,
3087 },
3088 [TCM_INVALID_PARAMETER_LIST] = {
3089 .key = ILLEGAL_REQUEST,
3090 .asc = 0x26,
3091 },
3092 [TCM_TOO_MANY_TARGET_DESCS] = {
3093 .key = ILLEGAL_REQUEST,
3094 .asc = 0x26,
3095 .ascq = 0x06,
3096 },
3097 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3098 .key = ILLEGAL_REQUEST,
3099 .asc = 0x26,
3100 .ascq = 0x07,
3101 },
3102 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3103 .key = ILLEGAL_REQUEST,
3104 .asc = 0x26,
3105 .ascq = 0x08,
3106 },
3107 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3108 .key = ILLEGAL_REQUEST,
3109 .asc = 0x26,
3110 .ascq = 0x09,
3111 },
3112 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3113 .key = ILLEGAL_REQUEST,
3114 .asc = 0x1a,
3115 },
3116 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3117 .key = ILLEGAL_REQUEST,
3118 .asc = 0x0c,
3119 .ascq = 0x0c,
3120 },
3121 [TCM_SERVICE_CRC_ERROR] = {
3122 .key = ABORTED_COMMAND,
3123 .asc = 0x47,
3124 .ascq = 0x05,
3125 },
3126 [TCM_SNACK_REJECTED] = {
3127 .key = ABORTED_COMMAND,
3128 .asc = 0x11,
3129 .ascq = 0x13,
3130 },
3131 [TCM_WRITE_PROTECTED] = {
3132 .key = DATA_PROTECT,
3133 .asc = 0x27,
3134 },
3135 [TCM_ADDRESS_OUT_OF_RANGE] = {
3136 .key = ILLEGAL_REQUEST,
3137 .asc = 0x21,
3138 },
3139 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3140 .key = UNIT_ATTENTION,
3141 },
3142 [TCM_CHECK_CONDITION_NOT_READY] = {
3143 .key = NOT_READY,
3144 },
3145 [TCM_MISCOMPARE_VERIFY] = {
3146 .key = MISCOMPARE,
3147 .asc = 0x1d,
3148 .ascq = 0x00,
3149 },
3150 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3151 .key = ABORTED_COMMAND,
3152 .asc = 0x10,
3153 .ascq = 0x01,
3154 .add_sector_info = true,
3155 },
3156 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3157 .key = ABORTED_COMMAND,
3158 .asc = 0x10,
3159 .ascq = 0x02,
3160 .add_sector_info = true,
3161 },
3162 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3163 .key = ABORTED_COMMAND,
3164 .asc = 0x10,
3165 .ascq = 0x03,
3166 .add_sector_info = true,
3167 },
3168 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3169 .key = COPY_ABORTED,
3170 .asc = 0x0d,
3171 .ascq = 0x02,
3172
3173 },
3174 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3175
3176
3177
3178
3179
3180
3181 .key = NOT_READY,
3182 .asc = 0x08,
3183 },
3184 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195 .key = ILLEGAL_REQUEST,
3196 .asc = 0x55,
3197 .ascq = 0x04,
3198 },
3199 };
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3213 {
3214 const struct sense_info *si;
3215 u8 *buffer = cmd->sense_buffer;
3216 int r = (__force int)reason;
3217 u8 key, asc, ascq;
3218 bool desc_format = target_sense_desc_format(cmd->se_dev);
3219
3220 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3221 si = &sense_info_table[r];
3222 else
3223 si = &sense_info_table[(__force int)
3224 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3225
3226 key = si->key;
3227 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3228 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3229 &ascq)) {
3230 cmd->scsi_status = SAM_STAT_BUSY;
3231 return;
3232 }
3233 } else if (si->asc == 0) {
3234 WARN_ON_ONCE(cmd->scsi_asc == 0);
3235 asc = cmd->scsi_asc;
3236 ascq = cmd->scsi_ascq;
3237 } else {
3238 asc = si->asc;
3239 ascq = si->ascq;
3240 }
3241
3242 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3243 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3244 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3245 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3246 if (si->add_sector_info)
3247 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3248 cmd->scsi_sense_length,
3249 cmd->bad_sector) < 0);
3250 }
3251
3252 int
3253 transport_send_check_condition_and_sense(struct se_cmd *cmd,
3254 sense_reason_t reason, int from_transport)
3255 {
3256 unsigned long flags;
3257
3258 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3259
3260 spin_lock_irqsave(&cmd->t_state_lock, flags);
3261 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3262 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3263 return 0;
3264 }
3265 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3266 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3267
3268 if (!from_transport)
3269 translate_sense_reason(cmd, reason);
3270
3271 trace_target_cmd_complete(cmd);
3272 return cmd->se_tfo->queue_status(cmd);
3273 }
3274 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3275
3276
3277
3278
3279
3280
3281
3282 int target_send_busy(struct se_cmd *cmd)
3283 {
3284 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3285
3286 cmd->scsi_status = SAM_STAT_BUSY;
3287 trace_target_cmd_complete(cmd);
3288 return cmd->se_tfo->queue_status(cmd);
3289 }
3290 EXPORT_SYMBOL(target_send_busy);
3291
3292 static void target_tmr_work(struct work_struct *work)
3293 {
3294 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3295 struct se_device *dev = cmd->se_dev;
3296 struct se_tmr_req *tmr = cmd->se_tmr_req;
3297 int ret;
3298
3299 if (cmd->transport_state & CMD_T_ABORTED)
3300 goto aborted;
3301
3302 switch (tmr->function) {
3303 case TMR_ABORT_TASK:
3304 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3305 break;
3306 case TMR_ABORT_TASK_SET:
3307 case TMR_CLEAR_ACA:
3308 case TMR_CLEAR_TASK_SET:
3309 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3310 break;
3311 case TMR_LUN_RESET:
3312 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3313 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3314 TMR_FUNCTION_REJECTED;
3315 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3316 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3317 cmd->orig_fe_lun, 0x29,
3318 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3319 }
3320 break;
3321 case TMR_TARGET_WARM_RESET:
3322 tmr->response = TMR_FUNCTION_REJECTED;
3323 break;
3324 case TMR_TARGET_COLD_RESET:
3325 tmr->response = TMR_FUNCTION_REJECTED;
3326 break;
3327 default:
3328 pr_err("Unknown TMR function: 0x%02x.\n",
3329 tmr->function);
3330 tmr->response = TMR_FUNCTION_REJECTED;
3331 break;
3332 }
3333
3334 if (cmd->transport_state & CMD_T_ABORTED)
3335 goto aborted;
3336
3337 cmd->se_tfo->queue_tm_rsp(cmd);
3338
3339 transport_lun_remove_cmd(cmd);
3340 transport_cmd_check_stop_to_fabric(cmd);
3341 return;
3342
3343 aborted:
3344 target_handle_abort(cmd);
3345 }
3346
3347 int transport_generic_handle_tmr(
3348 struct se_cmd *cmd)
3349 {
3350 unsigned long flags;
3351 bool aborted = false;
3352
3353 spin_lock_irqsave(&cmd->t_state_lock, flags);
3354 if (cmd->transport_state & CMD_T_ABORTED) {
3355 aborted = true;
3356 } else {
3357 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3358 cmd->transport_state |= CMD_T_ACTIVE;
3359 }
3360 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3361
3362 if (aborted) {
3363 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3364 cmd->se_tmr_req->function,
3365 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3366 target_handle_abort(cmd);
3367 return 0;
3368 }
3369
3370 INIT_WORK(&cmd->work, target_tmr_work);
3371 schedule_work(&cmd->work);
3372 return 0;
3373 }
3374 EXPORT_SYMBOL(transport_generic_handle_tmr);
3375
3376 bool
3377 target_check_wce(struct se_device *dev)
3378 {
3379 bool wce = false;
3380
3381 if (dev->transport->get_write_cache)
3382 wce = dev->transport->get_write_cache(dev);
3383 else if (dev->dev_attrib.emulate_write_cache > 0)
3384 wce = true;
3385
3386 return wce;
3387 }
3388
3389 bool
3390 target_check_fua(struct se_device *dev)
3391 {
3392 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3393 }