Lines Matching refs:blkif

116 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)  in get_free_page()  argument
120 spin_lock_irqsave(&blkif->free_pages_lock, flags); in get_free_page()
121 if (list_empty(&blkif->free_pages)) { in get_free_page()
122 BUG_ON(blkif->free_pages_num != 0); in get_free_page()
123 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in get_free_page()
126 BUG_ON(blkif->free_pages_num == 0); in get_free_page()
127 page[0] = list_first_entry(&blkif->free_pages, struct page, lru); in get_free_page()
129 blkif->free_pages_num--; in get_free_page()
130 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in get_free_page()
135 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, in put_free_pages() argument
141 spin_lock_irqsave(&blkif->free_pages_lock, flags); in put_free_pages()
143 list_add(&page[i]->lru, &blkif->free_pages); in put_free_pages()
144 blkif->free_pages_num += num; in put_free_pages()
145 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in put_free_pages()
148 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) in shrink_free_pagepool() argument
155 spin_lock_irqsave(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
156 while (blkif->free_pages_num > num) { in shrink_free_pagepool()
157 BUG_ON(list_empty(&blkif->free_pages)); in shrink_free_pagepool()
158 page[num_pages] = list_first_entry(&blkif->free_pages, in shrink_free_pagepool()
161 blkif->free_pages_num--; in shrink_free_pagepool()
163 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
165 spin_lock_irqsave(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
169 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); in shrink_free_pagepool()
176 static int do_block_io_op(struct xen_blkif *blkif);
177 static int dispatch_rw_block_io(struct xen_blkif *blkif,
180 static void make_response(struct xen_blkif *blkif, u64 id,
201 static int add_persistent_gnt(struct xen_blkif *blkif, in add_persistent_gnt() argument
207 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { in add_persistent_gnt()
208 if (!blkif->vbd.overflow_max_grants) in add_persistent_gnt()
209 blkif->vbd.overflow_max_grants = 1; in add_persistent_gnt()
213 new = &blkif->persistent_gnts.rb_node; in add_persistent_gnt()
232 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); in add_persistent_gnt()
233 blkif->persistent_gnt_c++; in add_persistent_gnt()
234 atomic_inc(&blkif->persistent_gnt_in_use); in add_persistent_gnt()
238 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, in get_persistent_gnt() argument
244 node = blkif->persistent_gnts.rb_node; in get_persistent_gnt()
258 atomic_inc(&blkif->persistent_gnt_in_use); in get_persistent_gnt()
265 static void put_persistent_gnt(struct xen_blkif *blkif, in put_persistent_gnt() argument
272 atomic_dec(&blkif->persistent_gnt_in_use); in put_persistent_gnt()
275 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, in free_persistent_gnts() argument
306 put_free_pages(blkif, pages, segs_to_unmap); in free_persistent_gnts()
323 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); in xen_blkbk_unmap_purged_grants() local
330 while(!list_empty(&blkif->persistent_purge_list)) { in xen_blkbk_unmap_purged_grants()
331 persistent_gnt = list_first_entry(&blkif->persistent_purge_list, in xen_blkbk_unmap_purged_grants()
346 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
354 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
358 static void purge_persistent_gnt(struct xen_blkif *blkif) in purge_persistent_gnt() argument
366 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || in purge_persistent_gnt()
367 (blkif->persistent_gnt_c == xen_blkif_max_pgrants && in purge_persistent_gnt()
368 !blkif->vbd.overflow_max_grants)) { in purge_persistent_gnt()
372 if (work_busy(&blkif->persistent_purge_work)) { in purge_persistent_gnt()
378 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; in purge_persistent_gnt()
379 num_clean = min(blkif->persistent_gnt_c, num_clean); in purge_persistent_gnt()
381 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use)))) in purge_persistent_gnt()
397 BUG_ON(!list_empty(&blkif->persistent_purge_list)); in purge_persistent_gnt()
398 root = &blkif->persistent_gnts; in purge_persistent_gnt()
417 &blkif->persistent_purge_list); in purge_persistent_gnt()
438 blkif->persistent_gnt_c -= (total - num_clean); in purge_persistent_gnt()
439 blkif->vbd.overflow_max_grants = 0; in purge_persistent_gnt()
442 schedule_work(&blkif->persistent_purge_work); in purge_persistent_gnt()
450 static struct pending_req *alloc_req(struct xen_blkif *blkif) in alloc_req() argument
455 spin_lock_irqsave(&blkif->pending_free_lock, flags); in alloc_req()
456 if (!list_empty(&blkif->pending_free)) { in alloc_req()
457 req = list_entry(blkif->pending_free.next, struct pending_req, in alloc_req()
461 spin_unlock_irqrestore(&blkif->pending_free_lock, flags); in alloc_req()
469 static void free_req(struct xen_blkif *blkif, struct pending_req *req) in free_req() argument
474 spin_lock_irqsave(&blkif->pending_free_lock, flags); in free_req()
475 was_empty = list_empty(&blkif->pending_free); in free_req()
476 list_add(&req->free_list, &blkif->pending_free); in free_req()
477 spin_unlock_irqrestore(&blkif->pending_free_lock, flags); in free_req()
479 wake_up(&blkif->pending_free_wq); in free_req()
485 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, in xen_vbd_translate() argument
488 struct xen_vbd *vbd = &blkif->vbd; in xen_vbd_translate()
511 static void xen_vbd_resize(struct xen_blkif *blkif) in xen_vbd_resize() argument
513 struct xen_vbd *vbd = &blkif->vbd; in xen_vbd_resize()
516 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); in xen_vbd_resize()
520 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); in xen_vbd_resize()
559 static void blkif_notify_work(struct xen_blkif *blkif) in blkif_notify_work() argument
561 blkif->waiting_reqs = 1; in blkif_notify_work()
562 wake_up(&blkif->wq); in blkif_notify_work()
575 static void print_stats(struct xen_blkif *blkif) in print_stats() argument
579 current->comm, blkif->st_oo_req, in print_stats()
580 blkif->st_rd_req, blkif->st_wr_req, in print_stats()
581 blkif->st_f_req, blkif->st_ds_req, in print_stats()
582 blkif->persistent_gnt_c, in print_stats()
584 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); in print_stats()
585 blkif->st_rd_req = 0; in print_stats()
586 blkif->st_wr_req = 0; in print_stats()
587 blkif->st_oo_req = 0; in print_stats()
588 blkif->st_ds_req = 0; in print_stats()
593 struct xen_blkif *blkif = arg; in xen_blkif_schedule() local
594 struct xen_vbd *vbd = &blkif->vbd; in xen_blkif_schedule()
598 xen_blkif_get(blkif); in xen_blkif_schedule()
604 xen_vbd_resize(blkif); in xen_blkif_schedule()
609 blkif->wq, in xen_blkif_schedule()
610 blkif->waiting_reqs || kthread_should_stop(), in xen_blkif_schedule()
615 blkif->pending_free_wq, in xen_blkif_schedule()
616 !list_empty(&blkif->pending_free) || in xen_blkif_schedule()
622 blkif->waiting_reqs = 0; in xen_blkif_schedule()
625 ret = do_block_io_op(blkif); in xen_blkif_schedule()
627 blkif->waiting_reqs = 1; in xen_blkif_schedule()
629 wait_event_interruptible(blkif->shutdown_wq, in xen_blkif_schedule()
633 if (blkif->vbd.feature_gnt_persistent && in xen_blkif_schedule()
634 time_after(jiffies, blkif->next_lru)) { in xen_blkif_schedule()
635 purge_persistent_gnt(blkif); in xen_blkif_schedule()
636 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); in xen_blkif_schedule()
640 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); in xen_blkif_schedule()
642 if (log_stats && time_after(jiffies, blkif->st_print)) in xen_blkif_schedule()
643 print_stats(blkif); in xen_blkif_schedule()
647 flush_work(&blkif->persistent_purge_work); in xen_blkif_schedule()
650 print_stats(blkif); in xen_blkif_schedule()
652 blkif->xenblkd = NULL; in xen_blkif_schedule()
653 xen_blkif_put(blkif); in xen_blkif_schedule()
661 void xen_blkbk_free_caches(struct xen_blkif *blkif) in xen_blkbk_free_caches() argument
664 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) in xen_blkbk_free_caches()
665 free_persistent_gnts(blkif, &blkif->persistent_gnts, in xen_blkbk_free_caches()
666 blkif->persistent_gnt_c); in xen_blkbk_free_caches()
668 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); in xen_blkbk_free_caches()
669 blkif->persistent_gnt_c = 0; in xen_blkbk_free_caches()
672 shrink_free_pagepool(blkif, 0 /* All */); in xen_blkbk_free_caches()
676 struct xen_blkif *blkif, in xen_blkbk_unmap_prepare() argument
686 put_persistent_gnt(blkif, pages[i]->persistent_gnt); in xen_blkbk_unmap_prepare()
704 struct xen_blkif *blkif = pending_req->blkif; in xen_blkbk_unmap_and_respond_callback() local
710 put_free_pages(blkif, data->pages, data->count); in xen_blkbk_unmap_and_respond_callback()
711 make_response(blkif, pending_req->id, in xen_blkbk_unmap_and_respond_callback()
713 free_req(blkif, pending_req); in xen_blkbk_unmap_and_respond_callback()
726 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { in xen_blkbk_unmap_and_respond_callback()
727 complete(&blkif->drain_complete); in xen_blkbk_unmap_and_respond_callback()
729 xen_blkif_put(blkif); in xen_blkbk_unmap_and_respond_callback()
735 struct xen_blkif *blkif = req->blkif; in xen_blkbk_unmap_and_respond() local
739 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs, in xen_blkbk_unmap_and_respond()
760 static void xen_blkbk_unmap(struct xen_blkif *blkif, in xen_blkbk_unmap() argument
772 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, in xen_blkbk_unmap()
777 put_free_pages(blkif, unmap_pages, invcount); in xen_blkbk_unmap()
784 static int xen_blkbk_map(struct xen_blkif *blkif, in xen_blkbk_map() argument
798 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); in xen_blkbk_map()
811 blkif, in xen_blkbk_map()
822 if (get_free_page(blkif, &pages[i]->page)) in xen_blkbk_map()
832 blkif->domid); in xen_blkbk_map()
855 put_free_pages(blkif, &pages[seg_idx]->page, 1); in xen_blkbk_map()
865 blkif->persistent_gnt_c < xen_blkif_max_pgrants) { in xen_blkbk_map()
883 if (add_persistent_gnt(blkif, in xen_blkbk_map()
891 persistent_gnt->gnt, blkif->persistent_gnt_c, in xen_blkbk_map()
895 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { in xen_blkbk_map()
896 blkif->vbd.overflow_max_grants = 1; in xen_blkbk_map()
898 blkif->domid, blkif->vbd.handle); in xen_blkbk_map()
916 put_free_pages(blkif, pages_to_gnt, segs_to_map); in xen_blkbk_map()
924 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, in xen_blkbk_map_seg()
937 struct xen_blkif *blkif = pending_req->blkif; in xen_blkbk_parse_indirect() local
948 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); in xen_blkbk_parse_indirect()
980 xen_blkbk_unmap(blkif, pages, indirect_grefs); in xen_blkbk_parse_indirect()
984 static int dispatch_discard_io(struct xen_blkif *blkif, in dispatch_discard_io() argument
989 struct block_device *bdev = blkif->vbd.bdev; in dispatch_discard_io()
993 xen_blkif_get(blkif); in dispatch_discard_io()
998 err = xen_vbd_translate(&preq, blkif, WRITE); in dispatch_discard_io()
1002 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); in dispatch_discard_io()
1005 blkif->st_ds_req++; in dispatch_discard_io()
1007 secure = (blkif->vbd.discard_secure && in dispatch_discard_io()
1021 make_response(blkif, req->u.discard.id, req->operation, status); in dispatch_discard_io()
1022 xen_blkif_put(blkif); in dispatch_discard_io()
1026 static int dispatch_other_io(struct xen_blkif *blkif, in dispatch_other_io() argument
1030 free_req(blkif, pending_req); in dispatch_other_io()
1031 make_response(blkif, req->u.other.id, req->operation, in dispatch_other_io()
1036 static void xen_blk_drain_io(struct xen_blkif *blkif) in xen_blk_drain_io() argument
1038 atomic_set(&blkif->drain, 1); in xen_blk_drain_io()
1040 if (atomic_read(&blkif->inflight) == 0) in xen_blk_drain_io()
1043 &blkif->drain_complete, HZ); in xen_blk_drain_io()
1045 if (!atomic_read(&blkif->drain)) in xen_blk_drain_io()
1048 atomic_set(&blkif->drain, 0); in xen_blk_drain_io()
1061 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); in __end_block_io_op()
1066 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); in __end_block_io_op()
1100 __do_block_io_op(struct xen_blkif *blkif) in __do_block_io_op() argument
1102 union blkif_back_rings *blk_rings = &blkif->blk_rings; in __do_block_io_op()
1115 rp, rc, rp - rc, blkif->vbd.pdevice); in __do_block_io_op()
1128 pending_req = alloc_req(blkif); in __do_block_io_op()
1130 blkif->st_oo_req++; in __do_block_io_op()
1135 switch (blkif->blk_protocol) { in __do_block_io_op()
1159 if (dispatch_rw_block_io(blkif, &req, pending_req)) in __do_block_io_op()
1163 free_req(blkif, pending_req); in __do_block_io_op()
1164 if (dispatch_discard_io(blkif, &req)) in __do_block_io_op()
1168 if (dispatch_other_io(blkif, &req, pending_req)) in __do_block_io_op()
1181 do_block_io_op(struct xen_blkif *blkif) in do_block_io_op() argument
1183 union blkif_back_rings *blk_rings = &blkif->blk_rings; in do_block_io_op()
1187 more_to_do = __do_block_io_op(blkif); in do_block_io_op()
1200 static int dispatch_rw_block_io(struct xen_blkif *blkif, in dispatch_rw_block_io() argument
1228 blkif->st_rd_req++; in dispatch_rw_block_io()
1232 blkif->st_wr_req++; in dispatch_rw_block_io()
1238 blkif->st_f_req++; in dispatch_rw_block_io()
1263 pending_req->blkif = blkif; in dispatch_rw_block_io()
1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) { in dispatch_rw_block_io()
1295 blkif->vbd.pdevice); in dispatch_rw_block_io()
1307 blkif->domid); in dispatch_rw_block_io()
1316 xen_blk_drain_io(pending_req->blkif); in dispatch_rw_block_io()
1331 xen_blkif_get(blkif); in dispatch_rw_block_io()
1332 atomic_inc(&blkif->inflight); in dispatch_rw_block_io()
1380 blkif->st_rd_sect += preq.nr_sects; in dispatch_rw_block_io()
1382 blkif->st_wr_sect += preq.nr_sects; in dispatch_rw_block_io()
1387 xen_blkbk_unmap(blkif, pending_req->segments, in dispatch_rw_block_io()
1391 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); in dispatch_rw_block_io()
1392 free_req(blkif, pending_req); in dispatch_rw_block_io()
1410 static void make_response(struct xen_blkif *blkif, u64 id, in make_response() argument
1415 union blkif_back_rings *blk_rings = &blkif->blk_rings; in make_response()
1422 spin_lock_irqsave(&blkif->blk_ring_lock, flags); in make_response()
1424 switch (blkif->blk_protocol) { in make_response()
1442 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); in make_response()
1444 notify_remote_via_irq(blkif->irq); in make_response()