Lines Matching refs:fq

96 			   struct blk_flush_queue *fq);
167 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument
171 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
187 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
192 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
216 kicked = blk_kick_flush(q, fq); in blk_flush_complete_seq()
227 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local
233 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
235 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io()
239 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
240 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io()
243 fq->flush_running_idx ^= 1; in flush_end_io()
253 queued |= blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io()
267 if (queued || fq->flush_queue_delayed) { in flush_end_io()
271 fq->flush_queue_delayed = 0; in flush_end_io()
273 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
290 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) in blk_kick_flush() argument
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush()
295 struct request *flush_rq = fq->flush_rq; in blk_kick_flush()
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) in blk_kick_flush()
302 if (!list_empty(&fq->flush_data_in_flight) && in blk_kick_flush()
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) in blk_kick_flush()
311 fq->flush_pending_idx ^= 1; in blk_kick_flush()
325 fq->orig_rq = first_rq; in blk_kick_flush()
342 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in flush_data_end_io() local
348 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) in flush_data_end_io()
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); in mq_flush_data_end_io() local
366 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
367 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) in mq_flush_data_end_io()
369 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
389 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); in blk_insert_flush() local
440 spin_lock_irq(&fq->mq_flush_lock); in blk_insert_flush()
441 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); in blk_insert_flush()
442 spin_unlock_irq(&fq->mq_flush_lock); in blk_insert_flush()
447 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); in blk_insert_flush()
506 struct blk_flush_queue *fq; in blk_alloc_flush_queue() local
509 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); in blk_alloc_flush_queue()
510 if (!fq) in blk_alloc_flush_queue()
514 spin_lock_init(&fq->mq_flush_lock); in blk_alloc_flush_queue()
518 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); in blk_alloc_flush_queue()
519 if (!fq->flush_rq) in blk_alloc_flush_queue()
522 INIT_LIST_HEAD(&fq->flush_queue[0]); in blk_alloc_flush_queue()
523 INIT_LIST_HEAD(&fq->flush_queue[1]); in blk_alloc_flush_queue()
524 INIT_LIST_HEAD(&fq->flush_data_in_flight); in blk_alloc_flush_queue()
526 return fq; in blk_alloc_flush_queue()
529 kfree(fq); in blk_alloc_flush_queue()
534 void blk_free_flush_queue(struct blk_flush_queue *fq) in blk_free_flush_queue() argument
537 if (!fq) in blk_free_flush_queue()
540 kfree(fq->flush_rq); in blk_free_flush_queue()
541 kfree(fq); in blk_free_flush_queue()