Lines Matching refs:sdev
122 struct scsi_device *sdev = cmd->device; in scsi_mq_requeue_cmd() local
127 put_device(&sdev->sdev_gendev); in scsi_mq_requeue_cmd()
216 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, in scsi_execute() argument
225 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); in scsi_execute()
230 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, in scsi_execute()
266 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, in scsi_execute_req_flags() argument
279 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, in scsi_execute_req_flags()
309 void scsi_device_unbusy(struct scsi_device *sdev) in scsi_device_unbusy() argument
311 struct Scsi_Host *shost = sdev->host; in scsi_device_unbusy()
312 struct scsi_target *starget = scsi_target(sdev); in scsi_device_unbusy()
326 atomic_dec(&sdev->device_busy); in scsi_device_unbusy()
347 struct scsi_device *sdev, *tmp; in scsi_single_lun_run() local
366 list_for_each_entry_safe(sdev, tmp, &starget->devices, in scsi_single_lun_run()
368 if (sdev == current_sdev) in scsi_single_lun_run()
370 if (scsi_device_get(sdev)) in scsi_single_lun_run()
374 scsi_kick_queue(sdev->request_queue); in scsi_single_lun_run()
377 scsi_device_put(sdev); in scsi_single_lun_run()
383 static inline bool scsi_device_is_busy(struct scsi_device *sdev) in scsi_device_is_busy() argument
385 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) in scsi_device_is_busy()
387 if (atomic_read(&sdev->device_blocked) > 0) in scsi_device_is_busy()
418 struct scsi_device *sdev; in scsi_starved_list_run() local
440 sdev = list_entry(starved_list.next, in scsi_starved_list_run()
442 list_del_init(&sdev->starved_entry); in scsi_starved_list_run()
443 if (scsi_target_is_busy(scsi_target(sdev))) { in scsi_starved_list_run()
444 list_move_tail(&sdev->starved_entry, in scsi_starved_list_run()
459 slq = sdev->request_queue; in scsi_starved_list_run()
488 struct scsi_device *sdev = q->queuedata; in scsi_run_queue() local
490 if (scsi_target(sdev)->single_lun) in scsi_run_queue()
491 scsi_single_lun_run(sdev); in scsi_run_queue()
492 if (!list_empty(&sdev->host->starved_list)) in scsi_run_queue()
493 scsi_starved_list_run(sdev->host); in scsi_run_queue()
503 struct scsi_device *sdev; in scsi_requeue_run_queue() local
506 sdev = container_of(work, struct scsi_device, requeue_work); in scsi_requeue_run_queue()
507 q = sdev->request_queue; in scsi_requeue_run_queue()
531 struct scsi_device *sdev = cmd->device; in scsi_requeue_command() local
544 put_device(&sdev->sdev_gendev); in scsi_requeue_command()
549 struct scsi_device *sdev; in scsi_run_host_queues() local
551 shost_for_each_device(sdev, shost) in scsi_run_host_queues()
552 scsi_run_queue(sdev->request_queue); in scsi_run_host_queues()
637 struct scsi_device *sdev = cmd->device; in scsi_mq_uninit_cmd() local
638 struct Scsi_Host *shost = sdev->host; in scsi_mq_uninit_cmd()
646 spin_lock_irqsave(&sdev->list_lock, flags); in scsi_mq_uninit_cmd()
648 spin_unlock_irqrestore(&sdev->list_lock, flags); in scsi_mq_uninit_cmd()
692 struct scsi_device *sdev = cmd->device; in scsi_end_request() local
693 struct request_queue *q = sdev->request_queue; in scsi_end_request()
718 if (scsi_target(sdev)->single_lun || in scsi_end_request()
719 !list_empty(&sdev->host->starved_list)) in scsi_end_request()
720 kblockd_schedule_work(&sdev->requeue_work); in scsi_end_request()
739 put_device(&sdev->sdev_gendev); in scsi_end_request()
1118 struct scsi_device *sdev = cmd->device; in scsi_init_io() local
1185 put_device(&sdev->sdev_gendev); in scsi_init_io()
1191 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, in scsi_get_cmd_from_req() argument
1198 if (!get_device(&sdev->sdev_gendev)) in scsi_get_cmd_from_req()
1201 cmd = scsi_get_command(sdev, GFP_ATOMIC); in scsi_get_cmd_from_req()
1203 put_device(&sdev->sdev_gendev); in scsi_get_cmd_from_req()
1221 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) in scsi_setup_blk_pc_cmnd() argument
1251 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) in scsi_setup_fs_cmnd() argument
1255 if (unlikely(sdev->handler && sdev->handler->prep_fn)) { in scsi_setup_fs_cmnd()
1256 int ret = sdev->handler->prep_fn(sdev, req); in scsi_setup_fs_cmnd()
1265 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) in scsi_setup_cmnd() argument
1278 return scsi_setup_fs_cmnd(sdev, req); in scsi_setup_cmnd()
1280 return scsi_setup_blk_pc_cmnd(sdev, req); in scsi_setup_cmnd()
1287 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) in scsi_prep_state_check() argument
1295 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { in scsi_prep_state_check()
1296 switch (sdev->sdev_state) { in scsi_prep_state_check()
1304 sdev_printk(KERN_ERR, sdev, in scsi_prep_state_check()
1313 sdev_printk(KERN_ERR, sdev, in scsi_prep_state_check()
1345 struct scsi_device *sdev = q->queuedata; in scsi_prep_return() local
1355 put_device(&sdev->sdev_gendev); in scsi_prep_return()
1365 if (atomic_read(&sdev->device_busy) == 0) in scsi_prep_return()
1377 struct scsi_device *sdev = q->queuedata; in scsi_prep_fn() local
1381 ret = scsi_prep_state_check(sdev, req); in scsi_prep_fn()
1385 cmd = scsi_get_cmd_from_req(sdev, req); in scsi_prep_fn()
1391 ret = scsi_setup_cmnd(sdev, req); in scsi_prep_fn()
1408 struct scsi_device *sdev) in scsi_dev_queue_ready() argument
1412 busy = atomic_inc_return(&sdev->device_busy) - 1; in scsi_dev_queue_ready()
1413 if (atomic_read(&sdev->device_blocked)) { in scsi_dev_queue_ready()
1420 if (atomic_dec_return(&sdev->device_blocked) > 0) { in scsi_dev_queue_ready()
1428 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, in scsi_dev_queue_ready()
1432 if (busy >= sdev->queue_depth) in scsi_dev_queue_ready()
1437 atomic_dec(&sdev->device_busy); in scsi_dev_queue_ready()
1446 struct scsi_device *sdev) in scsi_target_queue_ready() argument
1448 struct scsi_target *starget = scsi_target(sdev); in scsi_target_queue_ready()
1454 starget->starget_sdev_user != sdev) { in scsi_target_queue_ready()
1458 starget->starget_sdev_user = sdev; in scsi_target_queue_ready()
1487 list_move_tail(&sdev->starved_entry, &shost->starved_list); in scsi_target_queue_ready()
1502 struct scsi_device *sdev) in scsi_host_queue_ready() argument
1531 if (!list_empty(&sdev->starved_entry)) { in scsi_host_queue_ready()
1533 if (!list_empty(&sdev->starved_entry)) in scsi_host_queue_ready()
1534 list_del_init(&sdev->starved_entry); in scsi_host_queue_ready()
1542 if (list_empty(&sdev->starved_entry)) in scsi_host_queue_ready()
1543 list_add_tail(&sdev->starved_entry, &shost->starved_list); in scsi_host_queue_ready()
1564 struct scsi_device *sdev = q->queuedata; in scsi_lld_busy() local
1570 shost = sdev->host; in scsi_lld_busy()
1578 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) in scsi_lld_busy()
1590 struct scsi_device *sdev; in scsi_kill_request() local
1598 sdev = cmd->device; in scsi_kill_request()
1599 starget = scsi_target(sdev); in scsi_kill_request()
1600 shost = sdev->host; in scsi_kill_request()
1610 atomic_inc(&sdev->device_busy); in scsi_kill_request()
1770 struct scsi_device *sdev = q->queuedata; in scsi_request_fn() local
1779 shost = sdev->host; in scsi_request_fn()
1791 if (unlikely(!scsi_device_online(sdev))) { in scsi_request_fn()
1792 sdev_printk(KERN_ERR, sdev, in scsi_request_fn()
1798 if (!scsi_dev_queue_ready(q, sdev)) in scsi_request_fn()
1828 if (list_empty(&sdev->starved_entry)) in scsi_request_fn()
1829 list_add_tail(&sdev->starved_entry, in scsi_request_fn()
1835 if (!scsi_target_queue_ready(shost, sdev)) in scsi_request_fn()
1838 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_request_fn()
1841 if (sdev->simple_tags) in scsi_request_fn()
1868 if (scsi_target(sdev)->can_queue > 0) in scsi_request_fn()
1869 atomic_dec(&scsi_target(sdev)->target_busy); in scsi_request_fn()
1881 atomic_dec(&sdev->device_busy); in scsi_request_fn()
1883 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) in scsi_request_fn()
1902 struct scsi_device *sdev = req->q->queuedata; in scsi_mq_prep_fn() local
1903 struct Scsi_Host *shost = sdev->host; in scsi_mq_prep_fn()
1912 cmd->device = sdev; in scsi_mq_prep_fn()
1925 spin_lock_irq(&sdev->list_lock); in scsi_mq_prep_fn()
1926 list_add_tail(&cmd->list, &sdev->cmd_list); in scsi_mq_prep_fn()
1927 spin_unlock_irq(&sdev->list_lock); in scsi_mq_prep_fn()
1957 return scsi_setup_cmnd(sdev, req); in scsi_mq_prep_fn()
1971 struct scsi_device *sdev = q->queuedata; in scsi_queue_rq() local
1972 struct Scsi_Host *shost = sdev->host; in scsi_queue_rq()
1977 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); in scsi_queue_rq()
1982 if (!get_device(&sdev->sdev_gendev)) in scsi_queue_rq()
1985 if (!scsi_dev_queue_ready(q, sdev)) in scsi_queue_rq()
1987 if (!scsi_target_queue_ready(shost, sdev)) in scsi_queue_rq()
1989 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_queue_rq()
2002 if (sdev->simple_tags) in scsi_queue_rq()
2022 if (scsi_target(sdev)->can_queue > 0) in scsi_queue_rq()
2023 atomic_dec(&scsi_target(sdev)->target_busy); in scsi_queue_rq()
2025 atomic_dec(&sdev->device_busy); in scsi_queue_rq()
2027 put_device(&sdev->sdev_gendev); in scsi_queue_rq()
2032 if (atomic_read(&sdev->device_busy) == 0 && in scsi_queue_rq()
2033 !scsi_device_blocked(sdev)) in scsi_queue_rq()
2150 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) in scsi_alloc_queue() argument
2154 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); in scsi_alloc_queue()
2175 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) in scsi_mq_alloc_queue() argument
2177 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); in scsi_mq_alloc_queue()
2178 if (IS_ERR(sdev->request_queue)) in scsi_mq_alloc_queue()
2181 sdev->request_queue->queuedata = sdev; in scsi_mq_alloc_queue()
2182 __scsi_init_queue(sdev->host, sdev->request_queue); in scsi_mq_alloc_queue()
2183 return sdev->request_queue; in scsi_mq_alloc_queue()
2346 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, in scsi_mode_select() argument
2357 if (sdev->use_10_for_ms) { in scsi_mode_select()
2397 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, in scsi_mode_select()
2422 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, in scsi_mode_sense() argument
2442 use_10_for_ms = sdev->use_10_for_ms; in scsi_mode_sense()
2462 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, in scsi_mode_sense()
2478 sdev->use_10_for_ms = 0; in scsi_mode_sense()
2532 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, in scsi_test_unit_ready() argument
2548 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, in scsi_test_unit_ready()
2550 if (sdev->removable && scsi_sense_valid(sshdr) && in scsi_test_unit_ready()
2552 sdev->changed = 1; in scsi_test_unit_ready()
2571 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) in scsi_device_set_state() argument
2573 enum scsi_device_state oldstate = sdev->sdev_state; in scsi_device_set_state()
2673 sdev->sdev_state = state; in scsi_device_set_state()
2678 sdev_printk(KERN_ERR, sdev, in scsi_device_set_state()
2694 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) in scsi_evt_emit() argument
2728 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); in scsi_evt_emit()
2740 struct scsi_device *sdev; in scsi_evt_thread() local
2744 sdev = container_of(work, struct scsi_device, event_work); in scsi_evt_thread()
2747 if (test_and_clear_bit(evt_type, sdev->pending_events)) in scsi_evt_thread()
2748 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); in scsi_evt_thread()
2755 spin_lock_irqsave(&sdev->list_lock, flags); in scsi_evt_thread()
2756 list_splice_init(&sdev->event_list, &event_list); in scsi_evt_thread()
2757 spin_unlock_irqrestore(&sdev->list_lock, flags); in scsi_evt_thread()
2765 scsi_evt_emit(sdev, evt); in scsi_evt_thread()
2778 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) in sdev_evt_send() argument
2786 if (!test_bit(evt->evt_type, sdev->supported_events)) { in sdev_evt_send()
2792 spin_lock_irqsave(&sdev->list_lock, flags); in sdev_evt_send()
2793 list_add_tail(&evt->node, &sdev->event_list); in sdev_evt_send()
2794 schedule_work(&sdev->event_work); in sdev_evt_send()
2795 spin_unlock_irqrestore(&sdev->list_lock, flags); in sdev_evt_send()
2842 void sdev_evt_send_simple(struct scsi_device *sdev, in sdev_evt_send_simple() argument
2847 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", in sdev_evt_send_simple()
2852 sdev_evt_send(sdev, evt); in sdev_evt_send_simple()
2872 scsi_device_quiesce(struct scsi_device *sdev) in scsi_device_quiesce() argument
2874 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); in scsi_device_quiesce()
2878 scsi_run_queue(sdev->request_queue); in scsi_device_quiesce()
2879 while (atomic_read(&sdev->device_busy)) { in scsi_device_quiesce()
2881 scsi_run_queue(sdev->request_queue); in scsi_device_quiesce()
2896 void scsi_device_resume(struct scsi_device *sdev) in scsi_device_resume() argument
2902 if (sdev->sdev_state != SDEV_QUIESCE || in scsi_device_resume()
2903 scsi_device_set_state(sdev, SDEV_RUNNING)) in scsi_device_resume()
2905 scsi_run_queue(sdev->request_queue); in scsi_device_resume()
2910 device_quiesce_fn(struct scsi_device *sdev, void *data) in device_quiesce_fn() argument
2912 scsi_device_quiesce(sdev); in device_quiesce_fn()
2923 device_resume_fn(struct scsi_device *sdev, void *data) in device_resume_fn() argument
2925 scsi_device_resume(sdev); in device_resume_fn()
2952 scsi_internal_device_block(struct scsi_device *sdev) in scsi_internal_device_block() argument
2954 struct request_queue *q = sdev->request_queue; in scsi_internal_device_block()
2958 err = scsi_device_set_state(sdev, SDEV_BLOCK); in scsi_internal_device_block()
2960 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); in scsi_internal_device_block()
3000 scsi_internal_device_unblock(struct scsi_device *sdev, in scsi_internal_device_unblock() argument
3003 struct request_queue *q = sdev->request_queue; in scsi_internal_device_unblock()
3010 if ((sdev->sdev_state == SDEV_BLOCK) || in scsi_internal_device_unblock()
3011 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) in scsi_internal_device_unblock()
3012 sdev->sdev_state = new_state; in scsi_internal_device_unblock()
3013 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { in scsi_internal_device_unblock()
3016 sdev->sdev_state = new_state; in scsi_internal_device_unblock()
3018 sdev->sdev_state = SDEV_CREATED; in scsi_internal_device_unblock()
3019 } else if (sdev->sdev_state != SDEV_CANCEL && in scsi_internal_device_unblock()
3020 sdev->sdev_state != SDEV_OFFLINE) in scsi_internal_device_unblock()
3036 device_block(struct scsi_device *sdev, void *data) in device_block() argument
3038 scsi_internal_device_block(sdev); in device_block()
3062 device_unblock(struct scsi_device *sdev, void *data) in device_unblock() argument
3064 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); in device_unblock()
3147 void sdev_disable_disk_events(struct scsi_device *sdev) in sdev_disable_disk_events() argument
3149 atomic_inc(&sdev->disk_events_disable_depth); in sdev_disable_disk_events()
3153 void sdev_enable_disk_events(struct scsi_device *sdev) in sdev_enable_disk_events() argument
3155 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) in sdev_enable_disk_events()
3157 atomic_dec(&sdev->disk_events_disable_depth); in sdev_enable_disk_events()