Lines Matching refs:size

127 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)  in range_straddles_page_boundary()  argument
132 if (offset + size <= PAGE_SIZE) in range_straddles_page_boundary()
134 if (check_pages_physically_contiguous(pfn, offset, size)) in range_straddles_page_boundary()
160 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) in xen_swiotlb_fixup() argument
296 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, in xen_swiotlb_alloc_coherent() argument
301 int order = get_order(size); in xen_swiotlb_alloc_coherent()
314 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) in xen_swiotlb_alloc_coherent()
322 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); in xen_swiotlb_alloc_coherent()
336 if (((dev_addr + size - 1 <= dma_mask)) && in xen_swiotlb_alloc_coherent()
337 !range_straddles_page_boundary(phys, size)) in xen_swiotlb_alloc_coherent()
342 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); in xen_swiotlb_alloc_coherent()
346 memset(ret, 0, size); in xen_swiotlb_alloc_coherent()
352 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, in xen_swiotlb_free_coherent() argument
355 int order = get_order(size); in xen_swiotlb_free_coherent()
369 if (((dev_addr + size - 1 > dma_mask)) || in xen_swiotlb_free_coherent()
370 range_straddles_page_boundary(phys, size)) in xen_swiotlb_free_coherent()
373 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); in xen_swiotlb_free_coherent()
386 unsigned long offset, size_t size, in xen_swiotlb_map_page() argument
399 if (dma_capable(dev, dev_addr, size) && in xen_swiotlb_map_page()
400 !range_straddles_page_boundary(phys, size) && in xen_swiotlb_map_page()
406 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); in xen_swiotlb_map_page()
413 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); in xen_swiotlb_map_page()
415 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); in xen_swiotlb_map_page()
420 dev_addr, map & ~PAGE_MASK, size, dir, attrs); in xen_swiotlb_map_page()
426 if (!dma_capable(dev, dev_addr, size)) { in xen_swiotlb_map_page()
427 swiotlb_tbl_unmap_single(dev, map, size, dir); in xen_swiotlb_map_page()
443 size_t size, enum dma_data_direction dir, in xen_unmap_single() argument
450 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); in xen_unmap_single()
454 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); in xen_unmap_single()
467 dma_mark_clean(phys_to_virt(paddr), size); in xen_unmap_single()
471 size_t size, enum dma_data_direction dir, in xen_swiotlb_unmap_page() argument
474 xen_unmap_single(hwdev, dev_addr, size, dir, attrs); in xen_swiotlb_unmap_page()
490 size_t size, enum dma_data_direction dir, in xen_swiotlb_sync_single() argument
498 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); in xen_swiotlb_sync_single()
502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); in xen_swiotlb_sync_single()
505 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); in xen_swiotlb_sync_single()
510 dma_mark_clean(phys_to_virt(paddr), size); in xen_swiotlb_sync_single()
515 size_t size, enum dma_data_direction dir) in xen_swiotlb_sync_single_for_cpu() argument
517 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); in xen_swiotlb_sync_single_for_cpu()
523 size_t size, enum dma_data_direction dir) in xen_swiotlb_sync_single_for_device() argument
525 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); in xen_swiotlb_sync_single_for_device()