Lines Matching refs:fq
95 struct blk_flush_queue *fq);
166 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
186 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
215 kicked = blk_kick_flush(q, fq); in blk_flush_complete_seq()
226 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local
229 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
233 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
234 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io()
237 fq->flush_running_idx ^= 1; in flush_end_io()
247 queued |= blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io()
261 if (queued || fq->flush_queue_delayed) { in flush_end_io()
265 fq->flush_queue_delayed = 0; in flush_end_io()
267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
284 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) in blk_kick_flush() argument
286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush()
289 struct request *flush_rq = fq->flush_rq; in blk_kick_flush()
292 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) in blk_kick_flush()
296 if (!list_empty(&fq->flush_data_in_flight) && in blk_kick_flush()
298 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) in blk_kick_flush()
305 fq->flush_pending_idx ^= 1; in blk_kick_flush()
329 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in flush_data_end_io() local
335 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) in flush_data_end_io()
345 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); in mq_flush_data_end_io() local
353 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
354 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) in mq_flush_data_end_io()
356 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
376 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); in blk_insert_flush() local
427 spin_lock_irq(&fq->mq_flush_lock); in blk_insert_flush()
428 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); in blk_insert_flush()
429 spin_unlock_irq(&fq->mq_flush_lock); in blk_insert_flush()
434 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); in blk_insert_flush()
493 struct blk_flush_queue *fq; in blk_alloc_flush_queue() local
496 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); in blk_alloc_flush_queue()
497 if (!fq) in blk_alloc_flush_queue()
501 spin_lock_init(&fq->mq_flush_lock); in blk_alloc_flush_queue()
505 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); in blk_alloc_flush_queue()
506 if (!fq->flush_rq) in blk_alloc_flush_queue()
509 INIT_LIST_HEAD(&fq->flush_queue[0]); in blk_alloc_flush_queue()
510 INIT_LIST_HEAD(&fq->flush_queue[1]); in blk_alloc_flush_queue()
511 INIT_LIST_HEAD(&fq->flush_data_in_flight); in blk_alloc_flush_queue()
513 return fq; in blk_alloc_flush_queue()
516 kfree(fq); in blk_alloc_flush_queue()
521 void blk_free_flush_queue(struct blk_flush_queue *fq) in blk_free_flush_queue() argument
524 if (!fq) in blk_free_flush_queue()
527 kfree(fq->flush_rq); in blk_free_flush_queue()
528 kfree(fq); in blk_free_flush_queue()