Lines Matching refs:dp

160 	void (*get_page)(struct dpages *dp,
162 void (*next_page)(struct dpages *dp);
174 static void list_get_page(struct dpages *dp, in list_get_page() argument
177 unsigned o = dp->context_u; in list_get_page()
178 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_get_page()
185 static void list_next_page(struct dpages *dp) in list_next_page() argument
187 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_next_page()
188 dp->context_ptr = pl->next; in list_next_page()
189 dp->context_u = 0; in list_next_page()
192 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) in list_dp_init() argument
194 dp->get_page = list_get_page; in list_dp_init()
195 dp->next_page = list_next_page; in list_dp_init()
196 dp->context_u = offset; in list_dp_init()
197 dp->context_ptr = pl; in list_dp_init()
203 static void bio_get_page(struct dpages *dp, struct page **p, in bio_get_page() argument
206 struct bio_vec *bvec = dp->context_ptr; in bio_get_page()
208 *len = bvec->bv_len - dp->context_u; in bio_get_page()
209 *offset = bvec->bv_offset + dp->context_u; in bio_get_page()
212 static void bio_next_page(struct dpages *dp) in bio_next_page() argument
214 struct bio_vec *bvec = dp->context_ptr; in bio_next_page()
215 dp->context_ptr = bvec + 1; in bio_next_page()
216 dp->context_u = 0; in bio_next_page()
219 static void bio_dp_init(struct dpages *dp, struct bio *bio) in bio_dp_init() argument
221 dp->get_page = bio_get_page; in bio_dp_init()
222 dp->next_page = bio_next_page; in bio_dp_init()
223 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_dp_init()
224 dp->context_u = bio->bi_iter.bi_bvec_done; in bio_dp_init()
230 static void vm_get_page(struct dpages *dp, in vm_get_page() argument
233 *p = vmalloc_to_page(dp->context_ptr); in vm_get_page()
234 *offset = dp->context_u; in vm_get_page()
235 *len = PAGE_SIZE - dp->context_u; in vm_get_page()
238 static void vm_next_page(struct dpages *dp) in vm_next_page() argument
240 dp->context_ptr += PAGE_SIZE - dp->context_u; in vm_next_page()
241 dp->context_u = 0; in vm_next_page()
244 static void vm_dp_init(struct dpages *dp, void *data) in vm_dp_init() argument
246 dp->get_page = vm_get_page; in vm_dp_init()
247 dp->next_page = vm_next_page; in vm_dp_init()
248 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); in vm_dp_init()
249 dp->context_ptr = data; in vm_dp_init()
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, in km_get_page() argument
258 *p = virt_to_page(dp->context_ptr); in km_get_page()
259 *offset = dp->context_u; in km_get_page()
260 *len = PAGE_SIZE - dp->context_u; in km_get_page()
263 static void km_next_page(struct dpages *dp) in km_next_page() argument
265 dp->context_ptr += PAGE_SIZE - dp->context_u; in km_next_page()
266 dp->context_u = 0; in km_next_page()
269 static void km_dp_init(struct dpages *dp, void *data) in km_dp_init() argument
271 dp->get_page = km_get_page; in km_dp_init()
272 dp->next_page = km_next_page; in km_dp_init()
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); in km_dp_init()
274 dp->context_ptr = data; in km_dp_init()
281 struct dpages *dp, struct io *io) in do_region() argument
334 dp->get_page(dp, &page, &len, &offset); in do_region()
341 dp->next_page(dp); in do_region()
346 dp->get_page(dp, &page, &len, &offset); in do_region()
353 dp->next_page(dp); in do_region()
362 struct dm_io_region *where, struct dpages *dp, in dispatch_io() argument
366 struct dpages old_pages = *dp; in dispatch_io()
378 *dp = old_pages; in dispatch_io()
380 do_region(rw, i, where + i, dp, io); in dispatch_io()
404 struct dm_io_region *where, int rw, struct dpages *dp, in sync_io() argument
424 io->vma_invalidate_address = dp->vma_invalidate_address; in sync_io()
425 io->vma_invalidate_size = dp->vma_invalidate_size; in sync_io()
427 dispatch_io(rw, num_regions, where, dp, io, 1); in sync_io()
438 struct dm_io_region *where, int rw, struct dpages *dp, in async_io() argument
456 io->vma_invalidate_address = dp->vma_invalidate_address; in async_io()
457 io->vma_invalidate_size = dp->vma_invalidate_size; in async_io()
459 dispatch_io(rw, num_regions, where, dp, io, 0); in async_io()
463 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, in dp_init() argument
468 dp->vma_invalidate_address = NULL; in dp_init()
469 dp->vma_invalidate_size = 0; in dp_init()
473 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); in dp_init()
477 bio_dp_init(dp, io_req->mem.ptr.bio); in dp_init()
483 dp->vma_invalidate_address = io_req->mem.ptr.vma; in dp_init()
484 dp->vma_invalidate_size = size; in dp_init()
486 vm_dp_init(dp, io_req->mem.ptr.vma); in dp_init()
490 km_dp_init(dp, io_req->mem.ptr.addr); in dp_init()
512 struct dpages dp; in dm_io() local
514 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); in dm_io()
520 io_req->bi_rw, &dp, sync_error_bits); in dm_io()
523 &dp, io_req->notify.fn, io_req->notify.context); in dm_io()