Lines Matching refs:q
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument
16 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
17 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
63 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument
84 if (uaddr & queue_dma_alignment(q)) in blk_rq_map_user_iov()
88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) in blk_rq_map_user_iov()
89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov()
91 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov()
114 blk_queue_bounce(q, &bio); in blk_rq_map_user_iov()
116 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov()
121 int blk_rq_map_user(struct request_queue *q, struct request *rq, in blk_rq_map_user() argument
132 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user()
181 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, in blk_rq_map_kern() argument
190 if (len > (queue_max_hw_sectors(q) << 9)) in blk_rq_map_kern()
195 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); in blk_rq_map_kern()
197 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
199 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
210 ret = blk_rq_append_bio(q, rq, bio); in blk_rq_map_kern()
217 blk_queue_bounce(q, &rq->bio); in blk_rq_map_kern()