Lines Matching refs:size

127 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)  in range_straddles_page_boundary()  argument
132 if (offset + size <= XEN_PAGE_SIZE) in range_straddles_page_boundary()
134 if (check_pages_physically_contiguous(xen_pfn, offset, size)) in range_straddles_page_boundary()
159 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) in xen_swiotlb_fixup() argument
295 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, in xen_swiotlb_alloc_coherent() argument
300 int order = get_order(size); in xen_swiotlb_alloc_coherent()
318 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); in xen_swiotlb_alloc_coherent()
332 if (((dev_addr + size - 1 <= dma_mask)) && in xen_swiotlb_alloc_coherent()
333 !range_straddles_page_boundary(phys, size)) in xen_swiotlb_alloc_coherent()
338 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); in xen_swiotlb_alloc_coherent()
342 memset(ret, 0, size); in xen_swiotlb_alloc_coherent()
348 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, in xen_swiotlb_free_coherent() argument
351 int order = get_order(size); in xen_swiotlb_free_coherent()
362 if (((dev_addr + size - 1 > dma_mask)) || in xen_swiotlb_free_coherent()
363 range_straddles_page_boundary(phys, size)) in xen_swiotlb_free_coherent()
366 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); in xen_swiotlb_free_coherent()
379 unsigned long offset, size_t size, in xen_swiotlb_map_page() argument
392 if (dma_capable(dev, dev_addr, size) && in xen_swiotlb_map_page()
393 !range_straddles_page_boundary(phys, size) && in xen_swiotlb_map_page()
399 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); in xen_swiotlb_map_page()
406 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); in xen_swiotlb_map_page()
408 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); in xen_swiotlb_map_page()
413 dev_addr, map & ~PAGE_MASK, size, dir, attrs); in xen_swiotlb_map_page()
419 if (!dma_capable(dev, dev_addr, size)) { in xen_swiotlb_map_page()
420 swiotlb_tbl_unmap_single(dev, map, size, dir); in xen_swiotlb_map_page()
436 size_t size, enum dma_data_direction dir, in xen_unmap_single() argument
443 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); in xen_unmap_single()
447 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); in xen_unmap_single()
460 dma_mark_clean(phys_to_virt(paddr), size); in xen_unmap_single()
464 size_t size, enum dma_data_direction dir, in xen_swiotlb_unmap_page() argument
467 xen_unmap_single(hwdev, dev_addr, size, dir, attrs); in xen_swiotlb_unmap_page()
483 size_t size, enum dma_data_direction dir, in xen_swiotlb_sync_single() argument
491 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); in xen_swiotlb_sync_single()
495 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); in xen_swiotlb_sync_single()
498 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); in xen_swiotlb_sync_single()
503 dma_mark_clean(phys_to_virt(paddr), size); in xen_swiotlb_sync_single()
508 size_t size, enum dma_data_direction dir) in xen_swiotlb_sync_single_for_cpu() argument
510 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); in xen_swiotlb_sync_single_for_cpu()
516 size_t size, enum dma_data_direction dir) in xen_swiotlb_sync_single_for_device() argument
518 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); in xen_swiotlb_sync_single_for_device()