Lines Matching refs:blkif
109 static inline int get_free_page(struct xen_blkif *blkif, struct page **page) in get_free_page() argument
113 spin_lock_irqsave(&blkif->free_pages_lock, flags); in get_free_page()
114 if (list_empty(&blkif->free_pages)) { in get_free_page()
115 BUG_ON(blkif->free_pages_num != 0); in get_free_page()
116 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in get_free_page()
119 BUG_ON(blkif->free_pages_num == 0); in get_free_page()
120 page[0] = list_first_entry(&blkif->free_pages, struct page, lru); in get_free_page()
122 blkif->free_pages_num--; in get_free_page()
123 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in get_free_page()
128 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, in put_free_pages() argument
134 spin_lock_irqsave(&blkif->free_pages_lock, flags); in put_free_pages()
136 list_add(&page[i]->lru, &blkif->free_pages); in put_free_pages()
137 blkif->free_pages_num += num; in put_free_pages()
138 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in put_free_pages()
141 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) in shrink_free_pagepool() argument
148 spin_lock_irqsave(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
149 while (blkif->free_pages_num > num) { in shrink_free_pagepool()
150 BUG_ON(list_empty(&blkif->free_pages)); in shrink_free_pagepool()
151 page[num_pages] = list_first_entry(&blkif->free_pages, in shrink_free_pagepool()
154 blkif->free_pages_num--; in shrink_free_pagepool()
156 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
158 spin_lock_irqsave(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
162 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
169 static int do_block_io_op(struct xen_blkif *blkif);
170 static int dispatch_rw_block_io(struct xen_blkif *blkif,
173 static void make_response(struct xen_blkif *blkif, u64 id,
194 static int add_persistent_gnt(struct xen_blkif *blkif, in add_persistent_gnt() argument
200 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { in add_persistent_gnt()
201 if (!blkif->vbd.overflow_max_grants) in add_persistent_gnt()
202 blkif->vbd.overflow_max_grants = 1; in add_persistent_gnt()
206 new = &blkif->persistent_gnts.rb_node; in add_persistent_gnt()
225 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); in add_persistent_gnt()
226 blkif->persistent_gnt_c++; in add_persistent_gnt()
227 atomic_inc(&blkif->persistent_gnt_in_use); in add_persistent_gnt()
231 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, in get_persistent_gnt() argument
237 node = blkif->persistent_gnts.rb_node; in get_persistent_gnt()
251 atomic_inc(&blkif->persistent_gnt_in_use); in get_persistent_gnt()
258 static void put_persistent_gnt(struct xen_blkif *blkif, in put_persistent_gnt() argument
265 atomic_dec(&blkif->persistent_gnt_in_use); in put_persistent_gnt()
268 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, in free_persistent_gnts() argument
299 put_free_pages(blkif, pages, segs_to_unmap); in free_persistent_gnts()
316 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); in xen_blkbk_unmap_purged_grants() local
323 while(!list_empty(&blkif->persistent_purge_list)) { in xen_blkbk_unmap_purged_grants()
324 persistent_gnt = list_first_entry(&blkif->persistent_purge_list, in xen_blkbk_unmap_purged_grants()
339 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
347 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
351 static void purge_persistent_gnt(struct xen_blkif *blkif) in purge_persistent_gnt() argument
359 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || in purge_persistent_gnt()
360 (blkif->persistent_gnt_c == xen_blkif_max_pgrants && in purge_persistent_gnt()
361 !blkif->vbd.overflow_max_grants)) { in purge_persistent_gnt()
365 if (work_busy(&blkif->persistent_purge_work)) { in purge_persistent_gnt()
371 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; in purge_persistent_gnt()
372 num_clean = min(blkif->persistent_gnt_c, num_clean); in purge_persistent_gnt()
374 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use)))) in purge_persistent_gnt()
390 BUG_ON(!list_empty(&blkif->persistent_purge_list)); in purge_persistent_gnt()
391 root = &blkif->persistent_gnts; in purge_persistent_gnt()
410 &blkif->persistent_purge_list); in purge_persistent_gnt()
431 blkif->persistent_gnt_c -= (total - num_clean); in purge_persistent_gnt()
432 blkif->vbd.overflow_max_grants = 0; in purge_persistent_gnt()
435 schedule_work(&blkif->persistent_purge_work); in purge_persistent_gnt()
443 static struct pending_req *alloc_req(struct xen_blkif *blkif) in alloc_req() argument
448 spin_lock_irqsave(&blkif->pending_free_lock, flags); in alloc_req()
449 if (!list_empty(&blkif->pending_free)) { in alloc_req()
450 req = list_entry(blkif->pending_free.next, struct pending_req, in alloc_req()
454 spin_unlock_irqrestore(&blkif->pending_free_lock, flags); in alloc_req()
462 static void free_req(struct xen_blkif *blkif, struct pending_req *req) in free_req() argument
467 spin_lock_irqsave(&blkif->pending_free_lock, flags); in free_req()
468 was_empty = list_empty(&blkif->pending_free); in free_req()
469 list_add(&req->free_list, &blkif->pending_free); in free_req()
470 spin_unlock_irqrestore(&blkif->pending_free_lock, flags); in free_req()
472 wake_up(&blkif->pending_free_wq); in free_req()
478 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, in xen_vbd_translate() argument
481 struct xen_vbd *vbd = &blkif->vbd; in xen_vbd_translate()
504 static void xen_vbd_resize(struct xen_blkif *blkif) in xen_vbd_resize() argument
506 struct xen_vbd *vbd = &blkif->vbd; in xen_vbd_resize()
509 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); in xen_vbd_resize()
513 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); in xen_vbd_resize()
552 static void blkif_notify_work(struct xen_blkif *blkif) in blkif_notify_work() argument
554 blkif->waiting_reqs = 1; in blkif_notify_work()
555 wake_up(&blkif->wq); in blkif_notify_work()
568 static void print_stats(struct xen_blkif *blkif) in print_stats() argument
572 current->comm, blkif->st_oo_req, in print_stats()
573 blkif->st_rd_req, blkif->st_wr_req, in print_stats()
574 blkif->st_f_req, blkif->st_ds_req, in print_stats()
575 blkif->persistent_gnt_c, in print_stats()
577 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); in print_stats()
578 blkif->st_rd_req = 0; in print_stats()
579 blkif->st_wr_req = 0; in print_stats()
580 blkif->st_oo_req = 0; in print_stats()
581 blkif->st_ds_req = 0; in print_stats()
586 struct xen_blkif *blkif = arg; in xen_blkif_schedule() local
587 struct xen_vbd *vbd = &blkif->vbd; in xen_blkif_schedule()
591 xen_blkif_get(blkif); in xen_blkif_schedule()
597 xen_vbd_resize(blkif); in xen_blkif_schedule()
602 blkif->wq, in xen_blkif_schedule()
603 blkif->waiting_reqs || kthread_should_stop(), in xen_blkif_schedule()
608 blkif->pending_free_wq, in xen_blkif_schedule()
609 !list_empty(&blkif->pending_free) || in xen_blkif_schedule()
615 blkif->waiting_reqs = 0; in xen_blkif_schedule()
618 ret = do_block_io_op(blkif); in xen_blkif_schedule()
620 blkif->waiting_reqs = 1; in xen_blkif_schedule()
622 wait_event_interruptible(blkif->shutdown_wq, in xen_blkif_schedule()
626 if (blkif->vbd.feature_gnt_persistent && in xen_blkif_schedule()
627 time_after(jiffies, blkif->next_lru)) { in xen_blkif_schedule()
628 purge_persistent_gnt(blkif); in xen_blkif_schedule()
629 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); in xen_blkif_schedule()
633 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); in xen_blkif_schedule()
635 if (log_stats && time_after(jiffies, blkif->st_print)) in xen_blkif_schedule()
636 print_stats(blkif); in xen_blkif_schedule()
640 flush_work(&blkif->persistent_purge_work); in xen_blkif_schedule()
643 print_stats(blkif); in xen_blkif_schedule()
645 blkif->xenblkd = NULL; in xen_blkif_schedule()
646 xen_blkif_put(blkif); in xen_blkif_schedule()
654 void xen_blkbk_free_caches(struct xen_blkif *blkif) in xen_blkbk_free_caches() argument
657 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) in xen_blkbk_free_caches()
658 free_persistent_gnts(blkif, &blkif->persistent_gnts, in xen_blkbk_free_caches()
659 blkif->persistent_gnt_c); in xen_blkbk_free_caches()
661 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); in xen_blkbk_free_caches()
662 blkif->persistent_gnt_c = 0; in xen_blkbk_free_caches()
665 shrink_free_pagepool(blkif, 0 /* All */); in xen_blkbk_free_caches()
669 struct xen_blkif *blkif, in xen_blkbk_unmap_prepare() argument
679 put_persistent_gnt(blkif, pages[i]->persistent_gnt); in xen_blkbk_unmap_prepare()
697 struct xen_blkif *blkif = pending_req->blkif; in xen_blkbk_unmap_and_respond_callback() local
703 put_free_pages(blkif, data->pages, data->count); in xen_blkbk_unmap_and_respond_callback()
704 make_response(blkif, pending_req->id, in xen_blkbk_unmap_and_respond_callback()
706 free_req(blkif, pending_req); in xen_blkbk_unmap_and_respond_callback()
719 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { in xen_blkbk_unmap_and_respond_callback()
720 complete(&blkif->drain_complete); in xen_blkbk_unmap_and_respond_callback()
722 xen_blkif_put(blkif); in xen_blkbk_unmap_and_respond_callback()
728 struct xen_blkif *blkif = req->blkif; in xen_blkbk_unmap_and_respond() local
732 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, in xen_blkbk_unmap_and_respond()
753 static void xen_blkbk_unmap(struct xen_blkif *blkif, in xen_blkbk_unmap() argument
765 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, in xen_blkbk_unmap()
770 put_free_pages(blkif, unmap_pages, invcount); in xen_blkbk_unmap()
777 static int xen_blkbk_map(struct xen_blkif *blkif, in xen_blkbk_map() argument
791 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); in xen_blkbk_map()
804 blkif, in xen_blkbk_map()
815 if (get_free_page(blkif, &pages[i]->page)) in xen_blkbk_map()
825 blkif->domid); in xen_blkbk_map()
848 put_free_pages(blkif, &pages[seg_idx]->page, 1); in xen_blkbk_map()
858 blkif->persistent_gnt_c < xen_blkif_max_pgrants) { in xen_blkbk_map()
876 if (add_persistent_gnt(blkif, in xen_blkbk_map()
884 persistent_gnt->gnt, blkif->persistent_gnt_c, in xen_blkbk_map()
888 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { in xen_blkbk_map()
889 blkif->vbd.overflow_max_grants = 1; in xen_blkbk_map()
891 blkif->domid, blkif->vbd.handle); in xen_blkbk_map()
909 put_free_pages(blkif, pages_to_gnt, segs_to_map); in xen_blkbk_map()
917 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, in xen_blkbk_map_seg()
930 struct xen_blkif *blkif = pending_req->blkif; in xen_blkbk_parse_indirect() local
941 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); in xen_blkbk_parse_indirect()
968 xen_blkbk_unmap(blkif, pages, indirect_grefs); in xen_blkbk_parse_indirect()
972 static int dispatch_discard_io(struct xen_blkif *blkif, in dispatch_discard_io() argument
977 struct block_device *bdev = blkif->vbd.bdev; in dispatch_discard_io()
981 xen_blkif_get(blkif); in dispatch_discard_io()
986 err = xen_vbd_translate(&preq, blkif, WRITE); in dispatch_discard_io()
990 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); in dispatch_discard_io()
993 blkif->st_ds_req++; in dispatch_discard_io()
995 secure = (blkif->vbd.discard_secure && in dispatch_discard_io()
1009 make_response(blkif, req->u.discard.id, req->operation, status); in dispatch_discard_io()
1010 xen_blkif_put(blkif); in dispatch_discard_io()
1014 static int dispatch_other_io(struct xen_blkif *blkif, in dispatch_other_io() argument
1018 free_req(blkif, pending_req); in dispatch_other_io()
1019 make_response(blkif, req->u.other.id, req->operation, in dispatch_other_io()
1024 static void xen_blk_drain_io(struct xen_blkif *blkif) in xen_blk_drain_io() argument
1026 atomic_set(&blkif->drain, 1); in xen_blk_drain_io()
1028 if (atomic_read(&blkif->inflight) == 0) in xen_blk_drain_io()
1031 &blkif->drain_complete, HZ); in xen_blk_drain_io()
1033 if (!atomic_read(&blkif->drain)) in xen_blk_drain_io()
1036 atomic_set(&blkif->drain, 0); in xen_blk_drain_io()
1049 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); in __end_block_io_op()
1054 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); in __end_block_io_op()
1088 __do_block_io_op(struct xen_blkif *blkif) in __do_block_io_op() argument
1090 union blkif_back_rings *blk_rings = &blkif->blk_rings; in __do_block_io_op()
1103 rp, rc, rp - rc, blkif->vbd.pdevice); in __do_block_io_op()
1116 pending_req = alloc_req(blkif); in __do_block_io_op()
1118 blkif->st_oo_req++; in __do_block_io_op()
1123 switch (blkif->blk_protocol) { in __do_block_io_op()
1147 if (dispatch_rw_block_io(blkif, &req, pending_req)) in __do_block_io_op()
1151 free_req(blkif, pending_req); in __do_block_io_op()
1152 if (dispatch_discard_io(blkif, &req)) in __do_block_io_op()
1156 if (dispatch_other_io(blkif, &req, pending_req)) in __do_block_io_op()
1169 do_block_io_op(struct xen_blkif *blkif) in do_block_io_op() argument
1171 union blkif_back_rings *blk_rings = &blkif->blk_rings; in do_block_io_op()
1175 more_to_do = __do_block_io_op(blkif); in do_block_io_op()
1188 static int dispatch_rw_block_io(struct xen_blkif *blkif, in dispatch_rw_block_io() argument
1215 blkif->st_rd_req++; in dispatch_rw_block_io()
1219 blkif->st_wr_req++; in dispatch_rw_block_io()
1225 blkif->st_f_req++; in dispatch_rw_block_io()
1250 pending_req->blkif = blkif; in dispatch_rw_block_io()
1277 if (xen_vbd_translate(&preq, blkif, operation) != 0) { in dispatch_rw_block_io()
1282 blkif->vbd.pdevice); in dispatch_rw_block_io()
1294 blkif->domid); in dispatch_rw_block_io()
1303 xen_blk_drain_io(pending_req->blkif); in dispatch_rw_block_io()
1318 xen_blkif_get(blkif); in dispatch_rw_block_io()
1319 atomic_inc(&blkif->inflight); in dispatch_rw_block_io()
1367 blkif->st_rd_sect += preq.nr_sects; in dispatch_rw_block_io()
1369 blkif->st_wr_sect += preq.nr_sects; in dispatch_rw_block_io()
1374 xen_blkbk_unmap(blkif, pending_req->segments, in dispatch_rw_block_io()
1378 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); in dispatch_rw_block_io()
1379 free_req(blkif, pending_req); in dispatch_rw_block_io()
1397 static void make_response(struct xen_blkif *blkif, u64 id, in make_response() argument
1402 union blkif_back_rings *blk_rings = &blkif->blk_rings; in make_response()
1409 spin_lock_irqsave(&blkif->blk_ring_lock, flags); in make_response()
1411 switch (blkif->blk_protocol) { in make_response()
1429 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); in make_response()
1431 notify_remote_via_irq(blkif->irq); in make_response()