Lines Matching refs:bd

81 	struct bsg_device *bd;  member
93 struct bsg_device *bd = bc->bd; in bsg_free_command() local
98 spin_lock_irqsave(&bd->lock, flags); in bsg_free_command()
99 bd->queued_cmds--; in bsg_free_command()
100 spin_unlock_irqrestore(&bd->lock, flags); in bsg_free_command()
102 wake_up(&bd->wq_free); in bsg_free_command()
105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) in bsg_alloc_command() argument
109 spin_lock_irq(&bd->lock); in bsg_alloc_command()
111 if (bd->queued_cmds >= bd->max_queue) in bsg_alloc_command()
114 bd->queued_cmds++; in bsg_alloc_command()
115 spin_unlock_irq(&bd->lock); in bsg_alloc_command()
119 spin_lock_irq(&bd->lock); in bsg_alloc_command()
120 bd->queued_cmds--; in bsg_alloc_command()
125 bc->bd = bd; in bsg_alloc_command()
127 dprintk("%s: returning free cmd %p\n", bd->name, bc); in bsg_alloc_command()
130 spin_unlock_irq(&bd->lock); in bsg_alloc_command()
140 struct sg_io_v4 *hdr, struct bsg_device *bd, in blk_fill_sgv4_hdr_rq() argument
208 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, in bsg_map_hdr() argument
211 struct request_queue *q = bd->queue; in bsg_map_hdr()
241 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); in bsg_map_hdr()
305 struct bsg_device *bd = bc->bd; in bsg_rq_end_io() local
309 bd->name, rq, bc, bc->bio, uptodate); in bsg_rq_end_io()
313 spin_lock_irqsave(&bd->lock, flags); in bsg_rq_end_io()
314 list_move_tail(&bc->list, &bd->done_list); in bsg_rq_end_io()
315 bd->done_cmds++; in bsg_rq_end_io()
316 spin_unlock_irqrestore(&bd->lock, flags); in bsg_rq_end_io()
318 wake_up(&bd->wq_done); in bsg_rq_end_io()
325 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, in bsg_add_command() argument
338 spin_lock_irq(&bd->lock); in bsg_add_command()
339 list_add_tail(&bc->list, &bd->busy_list); in bsg_add_command()
340 spin_unlock_irq(&bd->lock); in bsg_add_command()
342 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); in bsg_add_command()
348 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) in bsg_next_done_cmd() argument
352 spin_lock_irq(&bd->lock); in bsg_next_done_cmd()
353 if (bd->done_cmds) { in bsg_next_done_cmd()
354 bc = list_first_entry(&bd->done_list, struct bsg_command, list); in bsg_next_done_cmd()
356 bd->done_cmds--; in bsg_next_done_cmd()
358 spin_unlock_irq(&bd->lock); in bsg_next_done_cmd()
366 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) in bsg_get_done_cmd() argument
372 bc = bsg_next_done_cmd(bd); in bsg_get_done_cmd()
376 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { in bsg_get_done_cmd()
381 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); in bsg_get_done_cmd()
388 dprintk("%s: returning done %p\n", bd->name, bc); in bsg_get_done_cmd()
449 static bool bsg_complete(struct bsg_device *bd) in bsg_complete() argument
455 spin_lock_irq(&bd->lock); in bsg_complete()
457 BUG_ON(bd->done_cmds > bd->queued_cmds); in bsg_complete()
462 if (bd->done_cmds == bd->queued_cmds) in bsg_complete()
465 spin = !test_bit(BSG_F_BLOCK, &bd->flags); in bsg_complete()
467 spin_unlock_irq(&bd->lock); in bsg_complete()
473 static int bsg_complete_all_commands(struct bsg_device *bd) in bsg_complete_all_commands() argument
478 dprintk("%s: entered\n", bd->name); in bsg_complete_all_commands()
483 io_wait_event(bd->wq_done, bsg_complete(bd)); in bsg_complete_all_commands()
490 spin_lock_irq(&bd->lock); in bsg_complete_all_commands()
491 if (!bd->queued_cmds) { in bsg_complete_all_commands()
492 spin_unlock_irq(&bd->lock); in bsg_complete_all_commands()
495 spin_unlock_irq(&bd->lock); in bsg_complete_all_commands()
497 bc = bsg_get_done_cmd(bd); in bsg_complete_all_commands()
513 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, in __bsg_read() argument
525 bc = bsg_get_done_cmd(bd); in __bsg_read()
555 static inline void bsg_set_block(struct bsg_device *bd, struct file *file) in bsg_set_block() argument
558 clear_bit(BSG_F_BLOCK, &bd->flags); in bsg_set_block()
560 set_bit(BSG_F_BLOCK, &bd->flags); in bsg_set_block()
577 struct bsg_device *bd = file->private_data; in bsg_read() local
581 dprintk("%s: read %Zd bytes\n", bd->name, count); in bsg_read()
583 bsg_set_block(bd, file); in bsg_read()
586 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); in bsg_read()
595 static int __bsg_write(struct bsg_device *bd, const char __user *buf, in __bsg_write() argument
611 struct request_queue *q = bd->queue; in __bsg_write()
613 bc = bsg_alloc_command(bd); in __bsg_write()
628 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); in __bsg_write()
635 bsg_add_command(bd, q, bc, rq); in __bsg_write()
652 struct bsg_device *bd = file->private_data; in bsg_write() local
656 dprintk("%s: write %Zd bytes\n", bd->name, count); in bsg_write()
658 bsg_set_block(bd, file); in bsg_write()
661 ret = __bsg_write(bd, buf, count, &bytes_written, in bsg_write()
672 dprintk("%s: returning %Zd\n", bd->name, bytes_written); in bsg_write()
678 struct bsg_device *bd; in bsg_alloc_device() local
680 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); in bsg_alloc_device()
681 if (unlikely(!bd)) in bsg_alloc_device()
684 spin_lock_init(&bd->lock); in bsg_alloc_device()
686 bd->max_queue = BSG_DEFAULT_CMDS; in bsg_alloc_device()
688 INIT_LIST_HEAD(&bd->busy_list); in bsg_alloc_device()
689 INIT_LIST_HEAD(&bd->done_list); in bsg_alloc_device()
690 INIT_HLIST_NODE(&bd->dev_list); in bsg_alloc_device()
692 init_waitqueue_head(&bd->wq_free); in bsg_alloc_device()
693 init_waitqueue_head(&bd->wq_done); in bsg_alloc_device()
694 return bd; in bsg_alloc_device()
709 static int bsg_put_device(struct bsg_device *bd) in bsg_put_device() argument
712 struct request_queue *q = bd->queue; in bsg_put_device()
716 do_free = atomic_dec_and_test(&bd->ref_count); in bsg_put_device()
722 hlist_del(&bd->dev_list); in bsg_put_device()
725 dprintk("%s: tearing down\n", bd->name); in bsg_put_device()
730 set_bit(BSG_F_BLOCK, &bd->flags); in bsg_put_device()
737 ret = bsg_complete_all_commands(bd); in bsg_put_device()
739 kfree(bd); in bsg_put_device()
751 struct bsg_device *bd; in bsg_add_device() local
758 bd = bsg_alloc_device(); in bsg_add_device()
759 if (!bd) { in bsg_add_device()
764 bd->queue = rq; in bsg_add_device()
766 bsg_set_block(bd, file); in bsg_add_device()
768 atomic_set(&bd->ref_count, 1); in bsg_add_device()
770 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); in bsg_add_device()
772 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); in bsg_add_device()
774 format_dev_t(buf, inode->i_rdev), bd->max_queue); in bsg_add_device()
777 return bd; in bsg_add_device()
782 struct bsg_device *bd; in __bsg_get_device() local
786 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { in __bsg_get_device()
787 if (bd->queue == q) { in __bsg_get_device()
788 atomic_inc(&bd->ref_count); in __bsg_get_device()
792 bd = NULL; in __bsg_get_device()
795 return bd; in __bsg_get_device()
800 struct bsg_device *bd; in bsg_get_device() local
815 bd = __bsg_get_device(iminor(inode), bcd->queue); in bsg_get_device()
816 if (bd) in bsg_get_device()
817 return bd; in bsg_get_device()
819 bd = bsg_add_device(inode, bcd->queue, file); in bsg_get_device()
820 if (IS_ERR(bd)) in bsg_get_device()
823 return bd; in bsg_get_device()
828 struct bsg_device *bd; in bsg_open() local
830 bd = bsg_get_device(inode, file); in bsg_open()
832 if (IS_ERR(bd)) in bsg_open()
833 return PTR_ERR(bd); in bsg_open()
835 file->private_data = bd; in bsg_open()
841 struct bsg_device *bd = file->private_data; in bsg_release() local
844 return bsg_put_device(bd); in bsg_release()
849 struct bsg_device *bd = file->private_data; in bsg_poll() local
852 poll_wait(file, &bd->wq_done, wait); in bsg_poll()
853 poll_wait(file, &bd->wq_free, wait); in bsg_poll()
855 spin_lock_irq(&bd->lock); in bsg_poll()
856 if (!list_empty(&bd->done_list)) in bsg_poll()
858 if (bd->queued_cmds < bd->max_queue) in bsg_poll()
860 spin_unlock_irq(&bd->lock); in bsg_poll()
867 struct bsg_device *bd = file->private_data; in bsg_ioctl() local
876 return put_user(bd->max_queue, uarg); in bsg_ioctl()
885 spin_lock_irq(&bd->lock); in bsg_ioctl()
886 bd->max_queue = queue; in bsg_ioctl()
887 spin_unlock_irq(&bd->lock); in bsg_ioctl()
904 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); in bsg_ioctl()
916 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); in bsg_ioctl()
925 blk_execute_rq(bd->queue, NULL, rq, at_head); in bsg_ioctl()
938 return ioctl_by_bdev(bd->bdev, cmd, arg); in bsg_ioctl()