Lines Matching refs:q

60 static int scsi_get_idlun(struct request_queue *q, int __user *p)  in scsi_get_idlun()  argument
65 static int scsi_get_bus(struct request_queue *q, int __user *p) in scsi_get_bus() argument
70 static int sg_get_timeout(struct request_queue *q) in sg_get_timeout() argument
72 return jiffies_to_clock_t(q->sg_timeout); in sg_get_timeout()
75 static int sg_set_timeout(struct request_queue *q, int __user *p) in sg_set_timeout() argument
80 q->sg_timeout = clock_t_to_jiffies(timeout); in sg_set_timeout()
85 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() argument
87 unsigned int max_sectors = queue_max_sectors(q); in max_sectors_bytes()
94 static int sg_get_reserved_size(struct request_queue *q, int __user *p) in sg_get_reserved_size() argument
96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); in sg_get_reserved_size()
101 static int sg_set_reserved_size(struct request_queue *q, int __user *p) in sg_set_reserved_size() argument
111 q->sg_reserved_size = min(size, max_sectors_bytes(q)); in sg_set_reserved_size()
119 static int sg_emulated_host(struct request_queue *q, int __user *p) in sg_emulated_host() argument
227 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument
242 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq()
286 static int sg_io(struct request_queue *q, struct gendisk *bd_disk, in sg_io() argument
300 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) in sg_io()
318 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); in sg_io()
330 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) in sg_io()
347 ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); in sg_io()
350 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, in sg_io()
368 blk_execute_rq(q, bd_disk, rq, at_head); in sg_io()
416 int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, in sg_scsi_ioctl() argument
441 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); in sg_scsi_ioctl()
447 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); in sg_scsi_ioctl()
498 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { in sg_scsi_ioctl()
507 blk_execute_rq(q, disk, rq, 0); in sg_scsi_ioctl()
533 static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, in __blk_send_generic() argument
539 rq = blk_get_request(q, WRITE, __GFP_WAIT); in __blk_send_generic()
547 err = blk_execute_rq(q, bd_disk, rq, 0); in __blk_send_generic()
553 static inline int blk_send_start_stop(struct request_queue *q, in blk_send_start_stop() argument
556 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); in blk_send_start_stop()
559 int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, in scsi_cmd_ioctl() argument
564 if (!q) in scsi_cmd_ioctl()
575 err = scsi_get_idlun(q, arg); in scsi_cmd_ioctl()
578 err = scsi_get_bus(q, arg); in scsi_cmd_ioctl()
581 err = sg_set_timeout(q, arg); in scsi_cmd_ioctl()
584 err = sg_get_timeout(q); in scsi_cmd_ioctl()
587 err = sg_get_reserved_size(q, arg); in scsi_cmd_ioctl()
590 err = sg_set_reserved_size(q, arg); in scsi_cmd_ioctl()
593 err = sg_emulated_host(q, arg); in scsi_cmd_ioctl()
601 err = sg_io(q, bd_disk, &hdr, mode); in scsi_cmd_ioctl()
649 err = sg_io(q, bd_disk, &hdr, mode); in scsi_cmd_ioctl()
673 err = sg_scsi_ioctl(q, bd_disk, mode, arg); in scsi_cmd_ioctl()
676 err = blk_send_start_stop(q, bd_disk, 0x03); in scsi_cmd_ioctl()
679 err = blk_send_start_stop(q, bd_disk, 0x02); in scsi_cmd_ioctl()