/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | svc_rdma_recvfrom.c | 132 u32 *page_offset, in rdma_read_chunk_lcl() argument 139 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; in rdma_read_chunk_lcl() 142 u32 pg_off = *page_offset; in rdma_read_chunk_lcl() 149 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, in rdma_read_chunk_lcl() 211 *page_offset = pg_off; in rdma_read_chunk_lcl() 226 u32 *page_offset, in rdma_read_chunk_frmr() argument 236 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; in rdma_read_chunk_frmr() 240 u32 pg_off = *page_offset; in rdma_read_chunk_frmr() 249 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, in rdma_read_chunk_frmr() 296 ctxt->sge[0].addr = (unsigned long)frmr->kva + *page_offset; in rdma_read_chunk_frmr() [all …]
|
/linux-4.1.27/drivers/scsi/fnic/ |
D | fnic_trace.c | 70 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; in fnic_trace_get_buf() 123 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 165 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 414 fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * in fnic_trace_buf_init() 416 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init() 426 memset((void *)fnic_trace_entries.page_offset, 0, in fnic_trace_buf_init() 437 fnic_trace_entries.page_offset[i] = fnic_buf_head; in fnic_trace_buf_init() 460 if (fnic_trace_entries.page_offset) { in fnic_trace_free() 461 vfree((void *)fnic_trace_entries.page_offset); in fnic_trace_free() 462 fnic_trace_entries.page_offset = NULL; in fnic_trace_free() [all …]
|
D | fnic_trace.h | 51 unsigned long *page_offset; member
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_bo_vm.c | 91 unsigned long page_offset; in ttm_bo_vm_fault() local 177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault() 182 if (unlikely(page_offset >= bo->num_pages)) { in ttm_bo_vm_fault() 216 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; in ttm_bo_vm_fault() 218 page = ttm->pages[page_offset]; in ttm_bo_vm_fault() 227 page_offset; in ttm_bo_vm_fault() 250 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault()
|
/linux-4.1.27/fs/hfs/ |
D | bnode.c | 22 off += node->page_offset; in hfs_bnode_read() 64 off += node->page_offset; in hfs_bnode_write() 89 off += node->page_offset; in hfs_bnode_clear() 107 src += src_node->page_offset; in hfs_bnode_copy() 108 dst += dst_node->page_offset; in hfs_bnode_copy() 126 src += node->page_offset; in hfs_bnode_move() 127 dst += node->page_offset; in hfs_bnode_move() 282 node->page_offset = off & ~PAGE_CACHE_MASK; in __hfs_bnode_create() 341 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); in hfs_bnode_find() 431 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
|
D | btree.c | 259 off += node->page_offset; in hfs_bmap_alloc() 304 off += node->page_offset; in hfs_bmap_alloc() 350 off += node->page_offset + nidx / 8; in hfs_bmap_free()
|
D | btree.h | 61 unsigned int page_offset; member
|
/linux-4.1.27/drivers/gpu/drm/exynos/ |
D | exynos_drm_gem.c | 82 pgoff_t page_offset) in exynos_drm_gem_map_buf() argument 93 if (page_offset >= (buf->size >> PAGE_SHIFT)) { in exynos_drm_gem_map_buf() 100 if (page_offset < (sgl->length >> PAGE_SHIFT)) in exynos_drm_gem_map_buf() 102 page_offset -= (sgl->length >> PAGE_SHIFT); in exynos_drm_gem_map_buf() 105 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; in exynos_drm_gem_map_buf() 600 pgoff_t page_offset; in exynos_drm_gem_fault() local 603 page_offset = ((unsigned long)vmf->virtual_address - in exynos_drm_gem_fault() 609 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); in exynos_drm_gem_fault()
|
/linux-4.1.27/drivers/mtd/tests/ |
D | nandbiterrs.c | 58 static unsigned page_offset; variable 59 module_param(page_offset, uint, S_IRUGO); 60 MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); 371 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init() 375 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
|
/linux-4.1.27/drivers/gpu/drm/qxl/ |
D | qxl_image.c | 165 unsigned page_base, page_offset, out_offset; in qxl_image_init_helper() local 173 page_offset = offset_in_page(out_offset); in qxl_image_init_helper() 174 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper() 177 k_data = ptr + page_offset; in qxl_image_init_helper()
|
D | qxl_object.c | 145 struct qxl_bo *bo, int page_offset) in qxl_bo_kmap_atomic_page() argument 163 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); in qxl_bo_kmap_atomic_page() 166 rptr = bo->kptr + (page_offset * PAGE_SIZE); in qxl_bo_kmap_atomic_page() 174 rptr += page_offset * PAGE_SIZE; in qxl_bo_kmap_atomic_page()
|
D | qxl_object.h | 94 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
|
/linux-4.1.27/net/ceph/ |
D | messenger.c | 538 int page_offset, size_t length) in ceph_tcp_recvpage() argument 543 BUG_ON(page_offset + length > PAGE_SIZE); in ceph_tcp_recvpage() 547 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); in ceph_tcp_recvpage() 847 size_t *page_offset, in ceph_msg_data_bio_next() argument 861 *page_offset = (size_t) bio_vec.bv_offset; in ceph_msg_data_bio_next() 862 BUG_ON(*page_offset >= PAGE_SIZE); in ceph_msg_data_bio_next() 868 BUG_ON(*page_offset + *length > PAGE_SIZE); in ceph_msg_data_bio_next() 937 cursor->page_offset = data->alignment & ~PAGE_MASK; in ceph_msg_data_pages_cursor_init() 941 BUG_ON(length > SIZE_MAX - cursor->page_offset); in ceph_msg_data_pages_cursor_init() 942 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; in ceph_msg_data_pages_cursor_init() [all …]
|
/linux-4.1.27/drivers/gpu/drm/gma500/ |
D | gem.c | 180 pgoff_t page_offset; in psb_gem_fault() local 207 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start) in psb_gem_fault() 214 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
|
/linux-4.1.27/fs/hfsplus/ |
D | bnode.c | 26 off += node->page_offset; in hfs_bnode_read() 79 off += node->page_offset; in hfs_bnode_write() 109 off += node->page_offset; in hfs_bnode_clear() 137 src += src_node->page_offset; in hfs_bnode_copy() 138 dst += dst_node->page_offset; in hfs_bnode_copy() 194 src += node->page_offset; in hfs_bnode_move() 195 dst += node->page_offset; in hfs_bnode_move() 448 node->page_offset = off & ~PAGE_CACHE_MASK; in __hfs_bnode_create() 509 node->page_offset); in hfs_bnode_find() 599 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
|
D | wrapper.c | 73 unsigned int page_offset = offset_in_page(buf); in hfsplus_submit_bio() local 74 unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, in hfsplus_submit_bio() 77 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
|
D | btree.c | 382 off += node->page_offset; in hfs_bmap_alloc() 428 off += node->page_offset; in hfs_bmap_alloc() 477 off += node->page_offset + nidx / 8; in hfs_bmap_free()
|
D | hfsplus_fs.h | 119 unsigned int page_offset; member
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_test.c | 104 unsigned long page_offset = offset >> PAGE_SHIFT; in ion_handle_test_kernel() local 118 void *vaddr = dma_buf_kmap(dma_buf, page_offset); in ion_handle_test_kernel() 130 dma_buf_kunmap(dma_buf, page_offset, vaddr); in ion_handle_test_kernel() 138 page_offset++; in ion_handle_test_kernel()
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
D | rx.c | 62 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va() 157 unsigned int page_offset; in efx_init_rx_buffers() local 188 page_offset = sizeof(struct efx_rx_page_state); in efx_init_rx_buffers() 195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in efx_init_rx_buffers() 201 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers() 202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers() 451 rx_buf->page, rx_buf->page_offset, in efx_rx_packet_gro() 499 rx_buf->page_offset += hdr_len; in efx_rx_mk_skb() 504 rx_buf->page, rx_buf->page_offset, in efx_rx_mk_skb() 586 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
|
D | net_driver.h | 279 u16 page_offset; member
|
D | tx.c | 262 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, in efx_skb_copy_bits_to_pio()
|
/linux-4.1.27/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 94 pgoff_t page_offset; in vgem_gem_fault() local 98 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in vgem_gem_fault() 103 if (page_offset > num_pages) in vgem_gem_fault() 109 obj->pages[page_offset]); in vgem_gem_fault()
|
/linux-4.1.27/tools/testing/selftests/powerpc/primitives/ |
D | load_unaligned_zeropad.c | 102 static int do_one_test(char *p, int page_offset) in do_one_test() argument 114 …printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, sho… in do_one_test()
|
/linux-4.1.27/drivers/gpu/drm/udl/ |
D | udl_gem.c | 107 unsigned int page_offset; in udl_gem_fault() local 110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in udl_gem_fault() 116 page = obj->pages[page_offset]; in udl_gem_fault()
|
/linux-4.1.27/net/core/ |
D | tso.c | 51 tso->data = page_address(frag->page.p) + frag->page_offset; in tso_build_data() 74 tso->data = page_address(frag->page.p) + frag->page_offset; in tso_start()
|
D | skbuff.c | 948 vaddr + f->page_offset, skb_frag_size(f)); in skb_copy_ubufs() 1677 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_pull_tail() 1743 vaddr + f->page_offset + offset - start, in skb_copy_bits() 1915 f->page_offset, skb_frag_size(f), in __skb_splice_bits() 2030 memcpy(vaddr + frag->page_offset + offset - start, in skb_store_bits() 2103 csum2 = ops->update(vaddr + frag->page_offset + in __skb_checksum() 2191 frag->page_offset + in skb_copy_and_csum_bits() 2567 skb_shinfo(skb1)->frags[0].page_offset += len - pos; in skb_split_no_header() 2643 fragfrom->page_offset)) { in skb_shift() 2660 fragfrom->page_offset += shiftlen; in skb_shift() [all …]
|
D | datagram.c | 387 frag->page_offset + offset - in skb_copy_datagram_iter() 476 frag->page_offset + offset - start, in skb_copy_datagram_from_iter() 606 n = csum_and_copy_to_iter(vaddr + frag->page_offset + in skb_copy_and_csum_datagram()
|
D | pktgen.c | 2692 skb_shinfo(skb)->frags[i].page_offset = 0; in pktgen_finalize_skb()
|
D | dev.c | 4024 pinfo->frags[0].page_offset += grow; in gro_pull_from_frag0()
|
/linux-4.1.27/fs/ocfs2/ |
D | mmap.c | 67 loff_t pos = page_offset(page); in __ocfs2_page_mkwrite() 91 (page_offset(page) >= size)) in __ocfs2_page_mkwrite()
|
D | aops.c | 1047 u64 offset = page_offset(page) + block_start; in ocfs2_should_read_blk() 1396 new = new | ((i_size_read(inode) <= page_offset(page)) && in ocfs2_prepare_page_for_write() 1397 (page_offset(page) <= user_pos)); in ocfs2_prepare_page_for_write()
|
/linux-4.1.27/mm/ |
D | readahead.c | 173 pgoff_t page_offset = offset + page_idx; in __do_page_cache_readahead() local 175 if (page_offset > end_index) in __do_page_cache_readahead() 179 page = radix_tree_lookup(&mapping->page_tree, page_offset); in __do_page_cache_readahead() 187 page->index = page_offset; in __do_page_cache_readahead()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 81 page_alloc->page_offset = 0; in mlx4_alloc_pages() 105 page_alloc[i].page_offset += frag_info->frag_stride; in mlx4_en_alloc_frags() 107 if (page_alloc[i].page_offset + frag_info->frag_stride <= in mlx4_en_alloc_frags() 117 dma = ring_alloc[i].dma + ring_alloc[i].page_offset; in mlx4_en_alloc_frags() 142 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; in mlx4_en_free_frag() 202 while (page_alloc->page_offset + frag_info->frag_stride < in mlx4_en_destroy_allocator() 205 page_alloc->page_offset += frag_info->frag_stride; in mlx4_en_destroy_allocator() 570 skb_frags_rx[nr].page_offset = frags[nr].page_offset; in mlx4_en_complete_rx_desc() 609 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_rx_skb() 637 skb_shinfo(skb)->frags[0].page_offset += pull_len; in mlx4_en_rx_skb() [all …]
|
D | mlx4.h | 316 __be16 page_offset; member 337 __be16 page_offset; member
|
D | mlx4_en.h | 253 u32 page_offset; member
|
D | resource_tracker.c | 2490 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; in qp_get_mtt_size() local 2496 roundup_pow_of_two((total_mem + (page_offset << 6)) >> in qp_get_mtt_size()
|
/linux-4.1.27/drivers/mtd/devices/ |
D | mtd_dataflash.c | 90 unsigned short page_offset; /* offset in flash address */ member 186 pageaddr = pageaddr << priv->page_offset; in dataflash_erase() 248 addr = (((unsigned)from / priv->page_size) << priv->page_offset) in dataflash_read() 349 addr = pageaddr << priv->page_offset; in dataflash_write() 394 addr = pageaddr << priv->page_offset; in dataflash_write() 638 priv->page_offset = pageoffset; in add_dataflash_otp()
|
D | spear_smi.c | 647 u32 page_offset, page_size; in spear_mtd_write() local 662 page_offset = (u32)to % flash->page_size; in spear_mtd_write() 665 if (page_offset + len <= flash->page_size) { in spear_mtd_write() 673 page_size = flash->page_size - page_offset; in spear_mtd_write()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 119 bi->page_offset = 0; in fm10k_alloc_mapped_page() 150 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in fm10k_alloc_rx_buffers() 212 old_buff->page_offset, in fm10k_reuse_rx_page() 236 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; in fm10k_can_reuse_rx_page() 239 rx_buffer->page_offset += truesize; in fm10k_can_reuse_rx_page() 241 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) in fm10k_can_reuse_rx_page() 280 unsigned char *va = page_address(page) + rx_buffer->page_offset; in fm10k_add_rx_frag() 294 rx_buffer->page_offset, size, truesize); in fm10k_add_rx_frag() 312 rx_buffer->page_offset; in fm10k_fetch_rx_buffer() 338 rx_buffer->page_offset, in fm10k_fetch_rx_buffer() [all …]
|
D | fm10k.h | 91 u32 page_offset; member
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_execbuffer.c | 263 uint32_t page_offset = offset_in_page(reloc->offset); in relocate_entry_cpu() local 274 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); in relocate_entry_cpu() 277 page_offset = offset_in_page(page_offset + sizeof(uint32_t)); in relocate_entry_cpu() 279 if (page_offset == 0) { in relocate_entry_cpu() 285 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); in relocate_entry_cpu() 354 uint32_t page_offset = offset_in_page(reloc->offset); in relocate_entry_clflush() local 365 clflush_write32(vaddr + page_offset, lower_32_bits(delta)); in relocate_entry_clflush() 368 page_offset = offset_in_page(page_offset + sizeof(uint32_t)); in relocate_entry_clflush() 370 if (page_offset == 0) { in relocate_entry_clflush() 376 clflush_write32(vaddr + page_offset, upper_32_bits(delta)); in relocate_entry_clflush()
|
D | i915_gem.c | 759 loff_t page_base, int page_offset, in fast_user_write() argument 769 vaddr = (void __force*)vaddr_atomic + page_offset; in fast_user_write() 790 int page_offset, page_length, ret; in i915_gem_gtt_pwrite_fast() local 819 page_offset = offset_in_page(offset); in i915_gem_gtt_pwrite_fast() 821 if ((page_offset + remain) > PAGE_SIZE) in i915_gem_gtt_pwrite_fast() 822 page_length = PAGE_SIZE - page_offset; in i915_gem_gtt_pwrite_fast() 829 page_offset, user_data, page_length)) { in i915_gem_gtt_pwrite_fast() 1619 pgoff_t page_offset; in i915_gem_fault() local 1627 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in i915_gem_fault() 1634 trace_i915_gem_object_fault(obj, page_offset, true, write); in i915_gem_fault() [all …]
|
/linux-4.1.27/drivers/video/fbdev/ |
D | ssd1307fb.c | 53 u32 page_offset; member 417 par->page_offset + (par->height / 8) - 1); in ssd1307fb_ssd1306_init() 487 if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset)) in ssd1307fb_probe() 488 par->page_offset = 1; in ssd1307fb_probe()
|
/linux-4.1.27/arch/powerpc/perf/ |
D | hv-24x7.c | 897 loff_t page_offset = 0; in catalog_read() local 917 page_offset = offset / 4096; in catalog_read() 920 if (page_offset >= catalog_page_len) in catalog_read() 923 if (page_offset != 0) { in catalog_read() 925 page_offset); in catalog_read() 943 catalog_version_num, page_offset, hret); in catalog_read() 947 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset, in catalog_read()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_vma_manager.c | 86 unsigned long page_offset, unsigned long size) in drm_vma_offset_manager_init() argument 90 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); in drm_vma_offset_manager_init()
|
D | drm_vm.c | 332 unsigned long page_offset; in drm_do_vm_sg_fault() local 342 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); in drm_do_vm_sg_fault() 343 page = entry->pagelist[page_offset]; in drm_do_vm_sg_fault()
|
/linux-4.1.27/fs/sysv/ |
D | dir.c | 213 pos = page_offset(page) + in sysv_add_link() 238 loff_t pos = page_offset(page) + (char *)de - kaddr; in sysv_delete_entry() 335 loff_t pos = page_offset(page) + in sysv_set_link()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_metapage.c | 596 unsigned long page_offset; in __get_metapage() local 604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; in __get_metapage() 605 if ((page_offset + size) > PAGE_CACHE_SIZE) { in __get_metapage() 640 mp = page_to_mp(page, page_offset); in __get_metapage() 670 mp->data = page_address(page) + page_offset; in __get_metapage()
|
D | jfs_dtree.c | 255 int page_offset; in find_index() local 280 page_offset = offset & (PSIZE - 1); in find_index() 299 page_offset); in find_index() 346 uint page_offset; in add_index() local 446 page_offset = offset & (PSIZE - 1); in add_index() 448 if (page_offset == 0) { in add_index() 474 (struct dir_table_slot *) ((char *) mp->data + page_offset); in add_index()
|
/linux-4.1.27/drivers/target/ |
D | target_core_rd.c | 134 u32 i = 0, j, page_offset = 0, sg_per_table; in rd_allocate_sgl_table() local 178 sg_table[i].page_start_offset = page_offset; in rd_allocate_sgl_table() 179 sg_table[i++].page_end_offset = (page_offset + sg_per_table) in rd_allocate_sgl_table() 197 page_offset += sg_per_table; in rd_allocate_sgl_table()
|
/linux-4.1.27/fs/9p/ |
D | vfs_addr.c | 69 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage() 184 p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); in v9fs_vfs_writepage_locked()
|
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/ |
D | myri10ge.c | 107 int page_offset; member 131 int page_offset; member 1308 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { in myri10ge_alloc_rx_pages() 1333 rx->page_offset = 0; in myri10ge_alloc_rx_pages() 1338 rx->info[idx].page_offset = rx->page_offset; in myri10ge_alloc_rx_pages() 1343 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); in myri10ge_alloc_rx_pages() 1348 rx->page_offset += SKB_DATA_ALIGN(bytes); in myri10ge_alloc_rx_pages() 1352 end_offset = rx->page_offset + bytes - 1; in myri10ge_alloc_rx_pages() 1353 if ((unsigned)(rx->page_offset ^ end_offset) > 4095) in myri10ge_alloc_rx_pages() 1354 rx->page_offset = end_offset & ~4095; in myri10ge_alloc_rx_pages() [all …]
|
/linux-4.1.27/include/drm/ |
D | drm_vma_manager.h | 54 unsigned long page_offset, unsigned long size);
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf.c | 1460 int page_index, page_offset, csize; in xfs_buf_iomove() local 1463 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_iomove() 1465 csize = min_t(size_t, PAGE_SIZE - page_offset, in xfs_buf_iomove() 1468 ASSERT((csize + page_offset) <= PAGE_SIZE); in xfs_buf_iomove() 1472 memset(page_address(page) + page_offset, 0, csize); in xfs_buf_iomove() 1475 memcpy(data, page_address(page) + page_offset, csize); in xfs_buf_iomove() 1478 memcpy(page_address(page) + page_offset, data, csize); in xfs_buf_iomove()
|
D | xfs_aops.c | 697 xfs_off_t offset = page_offset(page); in xfs_convert_page() 887 loff_t offset = page_offset(page); in xfs_aops_discard_page() 1050 offset = page_offset(page); in xfs_vm_writepage() 1882 offset = page_offset(page); in xfs_vm_set_page_dirty()
|
D | xfs_file.c | 1099 loff_t lastoff = page_offset(page); in xfs_lookup_buffer_offset() 1206 lastoff < page_offset(pvec.pages[0])) { in xfs_find_get_desired_pgoff() 1269 lastoff = page_offset(page) + PAGE_SIZE; in xfs_find_get_desired_pgoff()
|
D | xfs_trace.h | 1148 __entry->pgoff = page_offset(page);
|
/linux-4.1.27/include/linux/mlx5/ |
D | cq.h | 106 u32 page_offset; member
|
D | device.h | 681 __be16 page_offset; member
|
/linux-4.1.27/fs/ext4/ |
D | file.c | 378 lastoff < page_offset(pvec.pages[0])) { in ext4_find_unwritten_pgoff() 411 lastoff = page_offset(page); in ext4_find_unwritten_pgoff() 433 lastoff = page_offset(page) + PAGE_SIZE; in ext4_find_unwritten_pgoff()
|
D | inode.c | 5311 if (page->mapping != mapping || page_offset(page) > size) { in ext4_page_mkwrite()
|
/linux-4.1.27/fs/minix/ |
D | dir.c | 269 pos = page_offset(page) + p - (char *)page_address(page); in minix_add_link() 297 loff_t pos = page_offset(page) + (char*)de - kaddr; in minix_delete_entry() 421 loff_t pos = page_offset(page) + in minix_set_link()
|
/linux-4.1.27/drivers/iommu/ |
D | rockchip-iommu.c | 413 u32 dte_index, pte_index, page_offset; in log_iova() local 426 page_offset = rk_iova_page_offset(iova); in log_iova() 445 page_addr_phys = rk_pte_page_address(pte) + page_offset; in log_iova() 450 &iova, dte_index, pte_index, page_offset); in log_iova()
|
/linux-4.1.27/fs/nilfs2/ |
D | file.c | 72 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { in nilfs_page_mkwrite()
|
D | dir.c | 85 loff_t pos = page_offset(page) + from; in nilfs_prepare_chunk() 94 loff_t pos = page_offset(page) + from; in nilfs_commit_chunk()
|
D | page.c | 414 page_offset(page), inode->i_ino); in nilfs_clear_dirty_page()
|
/linux-4.1.27/fs/ceph/ |
D | addr.c | 195 u64 off = page_offset(page); in readpage_nounlock() 323 off = (u64) page_offset(page); in start_read() 478 loff_t page_off = page_offset(page); in writepage_nounlock() 831 if (page_offset(page) >= snap_size) { in ceph_writepages_start() 869 offset = (u64)page_offset(page); in ceph_writepages_start() 948 offset = page_offset(pages[0]); in ceph_writepages_start() 1319 loff_t off = page_offset(page); in ceph_page_mkwrite()
|
/linux-4.1.27/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 363 const size_t page_offset = in __qp_memcpy_to_queue() local 374 if (size - bytes_copied > PAGE_SIZE - page_offset) in __qp_memcpy_to_queue() 376 to_copy = PAGE_SIZE - page_offset; in __qp_memcpy_to_queue() 385 err = memcpy_from_msg((u8 *)va + page_offset, in __qp_memcpy_to_queue() 393 memcpy((u8 *)va + page_offset, in __qp_memcpy_to_queue() 423 const size_t page_offset = in __qp_memcpy_from_queue() local 434 if (size - bytes_copied > PAGE_SIZE - page_offset) in __qp_memcpy_from_queue() 436 to_copy = PAGE_SIZE - page_offset; in __qp_memcpy_from_queue() 445 err = memcpy_to_msg(msg, (u8 *)va + page_offset, in __qp_memcpy_from_queue() 454 (u8 *)va + page_offset, to_copy); in __qp_memcpy_from_queue()
|
/linux-4.1.27/fs/reiserfs/ |
D | xattr.c | 529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); in reiserfs_xattr_set_handle() local 557 err = __reiserfs_write_begin(page, page_offset, chunk + skip); in reiserfs_xattr_set_handle() 561 err = reiserfs_commit_write(NULL, page, page_offset, in reiserfs_xattr_set_handle() 562 page_offset + chunk + in reiserfs_xattr_set_handle()
|
/linux-4.1.27/fs/btrfs/ |
D | compression.c | 463 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); in add_ra_bio_pages() 595 page_offset(bio->bi_io_vec->bv_page), in btrfs_submit_compressed_read() 995 start_byte = page_offset(page_out) - disk_start; in btrfs_decompress_buf2page() 1036 start_byte = page_offset(page_out) - disk_start; in btrfs_decompress_buf2page()
|
D | extent_io.c | 1987 u64 start = page_offset(page); in check_page_uptodate() 2099 start - page_offset(p), mirror_num); in repair_eb_io_failure() 2427 start - page_offset(page), in bio_readpage_error() 2511 start = page_offset(page); in end_bio_extent_writepage() 2591 start = page_offset(page); in end_bio_extent_readpage() 2758 start = page_offset(page) + bvec->bv_offset; in submit_one_bio() 2915 u64 start = page_offset(page); in __do_readpage() 3172 page_start = page_offset(pages[index]); in __extent_readpages() 3207 u64 start = page_offset(page); in __extent_read_full_page() 3372 u64 start = page_offset(page); in __extent_writepage_io() [all …]
|
D | file-item.c | 225 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in __btrfs_lookup_bio_sums() 451 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio() 460 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio()
|
D | inode.c | 623 if (page_offset(locked_page) >= start && in compress_file_range() 624 page_offset(locked_page) <= end) { in compress_file_range() 1974 page_start = page_offset(page); in btrfs_writepage_fixup_worker() 1975 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; in btrfs_writepage_fixup_worker() 3058 size_t offset = start - page_offset(page); in btrfs_readpage_end_io_hook() 4608 page_start = page_offset(page); in btrfs_truncate_page() 6817 extent_offset = page_offset(page) + pg_offset - extent_start; in btrfs_get_extent() 8494 u64 page_start = page_offset(page); in btrfs_invalidatepage() 8626 page_start = page_offset(page); in btrfs_page_mkwrite()
|
D | ioctl.c | 1137 page_start = page_offset(page); in cluster_pages_for_defrag() 1197 page_start = page_offset(pages[0]); in cluster_pages_for_defrag() 1198 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; in cluster_pages_for_defrag()
|
D | disk-io.c | 501 u64 start = page_offset(page); in csum_dirty_buffer() 1039 (unsigned long long)page_offset(page)); in btree_invalidatepage()
|
D | scrub.c | 745 offset - page_offset(page), in scrub_fixup_readpage()
|
D | relocation.c | 3166 page_start = page_offset(page);
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 583 bi->page_offset = 0; in ixgbevf_alloc_mapped_page() 615 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbevf_alloc_rx_buffers() 686 frag->page_offset += pull_len; in ixgbevf_pull_tail() 757 new_buff->page_offset = old_buff->page_offset; in ixgbevf_reuse_rx_page() 761 new_buff->page_offset, in ixgbevf_reuse_rx_page() 800 unsigned char *va = page_address(page) + rx_buffer->page_offset; in ixgbevf_add_rx_frag() 814 rx_buffer->page_offset, size, truesize); in ixgbevf_add_rx_frag() 826 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; in ixgbevf_add_rx_frag() 830 rx_buffer->page_offset += truesize; in ixgbevf_add_rx_frag() 832 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) in ixgbevf_add_rx_frag() [all …]
|
D | ixgbevf.h | 70 unsigned int page_offset; member
|
/linux-4.1.27/fs/exofs/ |
D | dir.c | 408 loff_t pos = page_offset(page) + in exofs_set_link() 494 pos = page_offset(page) + in exofs_add_link() 551 pos = page_offset(page) + from; in exofs_delete_entry()
|
/linux-4.1.27/fs/ufs/ |
D | dir.c | 92 loff_t pos = page_offset(page) + in ufs_set_link() 381 pos = page_offset(page) + in ufs_add_link() 543 pos = page_offset(page) + from; in ufs_delete_entry()
|
/linux-4.1.27/fs/ext2/ |
D | dir.c | 466 loff_t pos = page_offset(page) + in ext2_set_link() 553 pos = page_offset(page) + in ext2_add_link() 610 pos = page_offset(page) + from; in ext2_delete_entry()
|
/linux-4.1.27/tools/perf/util/ |
D | session.c | 1313 u64 head, page_offset, file_offset, file_pos, size; in __perf_session__process_events() local 1323 page_offset = page_size * (data_offset / page_size); in __perf_session__process_events() 1324 file_offset = page_offset; in __perf_session__process_events() 1325 head = data_offset - page_offset; in __perf_session__process_events() 1371 page_offset = page_size * (head / page_size); in __perf_session__process_events() 1372 file_offset += page_offset; in __perf_session__process_events() 1373 head -= page_offset; in __perf_session__process_events()
|
/linux-4.1.27/drivers/mtd/spi-nor/ |
D | spi-nor.c | 810 u32 page_offset, page_size, i; in spi_nor_write() local 821 page_offset = to & (nor->page_size - 1); in spi_nor_write() 824 if (page_offset + len <= nor->page_size) { in spi_nor_write() 828 page_size = nor->page_size - page_offset; in spi_nor_write()
|
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/ |
D | igbvf.h | 135 unsigned int page_offset; member
|
D | netdev.c | 185 buffer_info->page_offset = 0; in igbvf_alloc_rx_buffers() 187 buffer_info->page_offset ^= PAGE_SIZE / 2; in igbvf_alloc_rx_buffers() 191 buffer_info->page_offset, in igbvf_alloc_rx_buffers() 333 buffer_info->page_offset, in igbvf_clean_rx_irq() 609 buffer_info->page_offset = 0; in igbvf_clean_rx_ring()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 576 rx_bi->page_offset = 0; in i40evf_clean_rx_ring() 730 bi->page_offset ^= PAGE_SIZE / 2; in i40evf_alloc_rx_buffers_ps() 733 bi->page_offset, in i40evf_alloc_rx_buffers_ps() 1087 rx_bi->page + rx_bi->page_offset, in i40e_clean_rx_irq_ps() 1089 rx_bi->page_offset += len; in i40e_clean_rx_irq_ps() 1097 rx_bi->page_offset, in i40e_clean_rx_irq_ps()
|
D | i40e_txrx.h | 166 unsigned int page_offset; member
|
/linux-4.1.27/include/linux/ceph/ |
D | messenger.h | 132 unsigned int page_offset; /* offset in page */ member
|
/linux-4.1.27/include/linux/ |
D | skbuff.h | 215 __u32 page_offset; member 218 __u16 page_offset; member 1599 frag->page_offset = off; in __skb_fill_page_desc() 2320 return page_address(skb_frag_page(frag)) + frag->page_offset; in skb_frag_address() 2336 return ptr + frag->page_offset; in skb_frag_address_safe() 2384 frag->page_offset + offset, size, dir); in skb_frag_dma_map() 2528 off == frag->page_offset + skb_frag_size(frag); in skb_can_coalesce()
|
D | pagemap.h | 399 static inline loff_t page_offset(struct page *page) in page_offset() function
|
/linux-4.1.27/net/sunrpc/ |
D | xdr.c | 1454 unsigned int page_len, thislen, page_offset; in xdr_process_buf() local 1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); in xdr_process_buf() 1484 thislen = PAGE_CACHE_SIZE - page_offset; in xdr_process_buf() 1488 sg_set_page(sg, buf->pages[i], thislen, page_offset); in xdr_process_buf() 1494 page_offset = 0; in xdr_process_buf()
|
/linux-4.1.27/fs/ubifs/ |
D | file.c | 780 pgoff_t page_offset = offset + page_idx; in ubifs_do_bulk_read() local 783 if (page_offset > end_index) in ubifs_do_bulk_read() 785 page = find_or_create_page(mapping, page_offset, in ubifs_do_bulk_read() 1495 page_offset(page) > i_size_read(inode))) { in ubifs_vm_page_mkwrite()
|
/linux-4.1.27/drivers/net/ethernet/emulex/benet/ |
D | be_main.c | 1797 start = page_address(page_info->page) + page_info->page_offset; in skb_fill_rx_data() 1815 skb_shinfo(skb)->frags[0].page_offset = in skb_fill_rx_data() 1816 page_info->page_offset + hdr_len; in skb_fill_rx_data() 1837 if (page_info->page_offset == 0) { in skb_fill_rx_data() 1841 skb_shinfo(skb)->frags[j].page_offset = in skb_fill_rx_data() 1842 page_info->page_offset; in skb_fill_rx_data() 1919 if (i == 0 || page_info->page_offset == 0) { in be_rx_compl_process_gro() 1923 skb_shinfo(skb)->frags[j].page_offset = in be_rx_compl_process_gro() 1924 page_info->page_offset; in be_rx_compl_process_gro() 2065 u32 posted, page_offset = 0, notify = 0; in be_post_rx_frags() local [all …]
|
D | be.h | 269 u16 page_offset; member
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.h | 167 unsigned int page_offset; member
|
D | i40e_txrx.c | 1093 rx_bi->page_offset = 0; in i40e_clean_rx_ring() 1247 bi->page_offset ^= PAGE_SIZE / 2; in i40e_alloc_rx_buffers_ps() 1250 bi->page_offset, in i40e_alloc_rx_buffers_ps() 1612 rx_bi->page + rx_bi->page_offset, in i40e_clean_rx_irq_ps() 1614 rx_bi->page_offset += len; in i40e_clean_rx_irq_ps() 1622 rx_bi->page_offset, in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
D | bnad.c | 371 u32 page_offset, alloc_size; in bnad_rxq_refill_page() local 386 page_offset = 0; in bnad_rxq_refill_page() 390 page_offset = prev->page_offset + unmap_q->map_size; in bnad_rxq_refill_page() 400 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, in bnad_rxq_refill_page() 404 unmap->page_offset = page_offset; in bnad_rxq_refill_page() 407 page_offset += unmap_q->map_size; in bnad_rxq_refill_page() 409 if (page_offset < alloc_size) in bnad_rxq_refill_page() 544 unmap_q->unmap[sop_ci].page_offset); in bnad_cq_setup_skb_frags() 560 unmap->page, unmap->page_offset, len); in bnad_cq_setup_skb_frags()
|
D | bnad.h | 244 u32 page_offset; member
|
/linux-4.1.27/drivers/scsi/be2iscsi/ |
D | be_cmds.c | 1336 u32 page_offset, u32 num_pages) in be_cmd_iscsi_post_sgl_pages() argument 1359 req->page_offset = page_offset; in be_cmd_iscsi_post_sgl_pages() 1363 page_offset += req->num_pages; in be_cmd_iscsi_post_sgl_pages()
|
D | be_cmds.h | 759 struct be_dma_mem *q_mem, u32 page_offset, 849 u16 page_offset; member
|
D | be_main.c | 3501 unsigned int page_offset, i; in beiscsi_post_pages() local 3513 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * in beiscsi_post_pages() 3518 page_offset, in beiscsi_post_pages() 3520 page_offset += pm_arr->size / PAGE_SIZE; in beiscsi_post_pages()
|
/linux-4.1.27/fs/nfs/ |
D | file.c | 552 inode->i_ino, (long long)page_offset(page)); in nfs_launder_page() 621 (long long)page_offset(page)); in nfs_vm_page_mkwrite()
|
D | write.c | 183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) in nfs_page_group_search_locked() argument 192 if (page_offset >= req->wb_pgbase && in nfs_page_group_search_locked() 193 page_offset < (req->wb_pgbase + req->wb_bytes)) in nfs_page_group_search_locked()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.c | 1249 int page_offset; in kiblnd_map_tx_pool() local 1265 for (ipage = page_offset = i = 0; i < pool->po_size; i++) { in kiblnd_map_tx_pool() 1270 page_offset); in kiblnd_map_tx_pool() 1281 page_offset += IBLND_MSG_SIZE; in kiblnd_map_tx_pool() 1282 LASSERT(page_offset <= PAGE_SIZE); in kiblnd_map_tx_pool() 1284 if (page_offset == PAGE_SIZE) { in kiblnd_map_tx_pool() 1285 page_offset = 0; in kiblnd_map_tx_pool()
|
D | o2iblnd_cb.c | 707 int page_offset; in kiblnd_setup_rd_iov() local 725 page_offset = vaddr & (PAGE_SIZE - 1); in kiblnd_setup_rd_iov() 733 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); in kiblnd_setup_rd_iov() 735 sg_set_page(sg, page, fragnob, page_offset); in kiblnd_setup_rd_iov()
|
/linux-4.1.27/fs/isofs/ |
D | compress.c | 222 start_off = page_offset(pages[full_page]); in zisofs_fill_pages()
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 174 u16 page_offset; member
|
/linux-4.1.27/net/xfrm/ |
D | xfrm_ipcomp.c | 92 frag->page_offset = 0; in ipcomp_decompress()
|
/linux-4.1.27/drivers/net/hyperv/ |
D | netvsc_drv.c | 309 frag->page_offset, in init_page_array() 323 unsigned long offset = frag->page_offset; in count_skb_frag_slots()
|
/linux-4.1.27/drivers/net/xen-netback/ |
D | netback.c | 139 return (u16)frag->page_offset; in frag_get_pending_idx() 144 frag->page_offset = pending_idx; in frag_set_pending_idx() 433 skb_shinfo(skb)->frags[i].page_offset, in xenvif_gop_skb() 1383 frags[i].page_offset = 0; in xenvif_handle_frag_list()
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb.h | 206 unsigned int page_offset; member
|
D | igb_main.c | 551 buffer_info->page_offset, in igb_dump() 6592 old_buff->page_offset, in igb_reuse_rx_page() 6616 rx_buffer->page_offset ^= IGB_RX_BUFSZ; in igb_can_reuse_rx_page() 6619 rx_buffer->page_offset += truesize; in igb_can_reuse_rx_page() 6621 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) in igb_can_reuse_rx_page() 6662 unsigned char *va = page_address(page) + rx_buffer->page_offset; in igb_add_rx_frag() 6682 rx_buffer->page_offset, size, truesize); in igb_add_rx_frag() 6700 rx_buffer->page_offset; in igb_fetch_rx_buffer() 6725 rx_buffer->page_offset, in igb_fetch_rx_buffer() 6856 frag->page_offset += IGB_TS_HDR_LEN; in igb_pull_tail() [all …]
|
/linux-4.1.27/fs/hostfs/ |
D | hostfs_kern.c | 412 loff_t base = page_offset(page); in hostfs_writepage() 445 loff_t start = page_offset(page); in hostfs_readpage()
|
/linux-4.1.27/arch/x86/kvm/ |
D | mmu.c | 4135 unsigned page_offset, quadrant; in get_written_sptes() local 4139 page_offset = offset_in_page(gpa); in get_written_sptes() 4143 page_offset <<= 1; /* 32->64 */ in get_written_sptes() 4150 page_offset &= ~7; /* kill rounding error */ in get_written_sptes() 4151 page_offset <<= 1; in get_written_sptes() 4154 quadrant = page_offset >> PAGE_SHIFT; in get_written_sptes() 4155 page_offset &= ~PAGE_MASK; in get_written_sptes() 4160 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
|
/linux-4.1.27/drivers/memstick/core/ |
D | ms_block.c | 1259 int data_size, data_offset, page, page_offset, size_to_read; in msb_read_bad_block_table() local 1276 page_offset = data_offset % msb->page_size; in msb_read_bad_block_table() 1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) * in msb_read_bad_block_table() 1307 for (i = page_offset; i < data_size / sizeof(u16); i++) { in msb_read_bad_block_table()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 792 rx_buffer_info->page_offset, in ixgbe_dump() 1489 bi->page_offset = 0; in ixgbe_alloc_mapped_page() 1521 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbe_alloc_rx_buffers() 1717 frag->page_offset += pull_len; in ixgbe_pull_tail() 1745 frag->page_offset, in ixgbe_dma_sync_frag() 1825 new_buff->page_offset, in ixgbe_reuse_rx_page() 1866 unsigned char *va = page_address(page) + rx_buffer->page_offset; in ixgbe_add_rx_frag() 1880 rx_buffer->page_offset, size, truesize); in ixgbe_add_rx_frag() 1892 rx_buffer->page_offset ^= truesize; in ixgbe_add_rx_frag() 1895 rx_buffer->page_offset += truesize; in ixgbe_add_rx_frag() [all …]
|
D | ixgbe.h | 191 unsigned int page_offset; member
|
D | ixgbe_ethtool.c | 1818 data = kmap(rx_buffer->page) + rx_buffer->page_offset; in ixgbe_check_lbtest_frame()
|
/linux-4.1.27/fs/romfs/ |
D | super.c | 114 offset = page_offset(page); in romfs_readpage()
|
/linux-4.1.27/arch/um/drivers/ |
D | ubd_kern.c | 1214 unsigned long long offset, int page_offset, in prepare_request() argument 1233 io_req->buffer = page_address(page) + page_offset; in prepare_request()
|
/linux-4.1.27/fs/fuse/ |
D | file.c | 694 loff_t pos = page_offset(req->pages[0]) + num_read; in fuse_short_read() 706 loff_t pos = page_offset(page); in fuse_do_readpage() 802 loff_t pos = page_offset(req->pages[0]); in fuse_send_readpages() 1631 fuse_write_fill(req, req->ff, page_offset(page), 0); in fuse_writepage_locked() 1836 fuse_write_fill(req, data->ff, page_offset(page), 0); in fuse_writepages_fill()
|
/linux-4.1.27/drivers/scsi/cxgbi/ |
D | libcxgbi.h | 100 u32 page_offset; member
|
D | libcxgbi.c | 1526 hdr.page_offset = htonl(gl->offset); in ddp_tag_reserve()
|
/linux-4.1.27/drivers/net/ |
D | xen-netfront.c | 485 unsigned long offset = frag->page_offset; in xennet_count_skb_slots() 609 skb_frag_page(frag), frag->page_offset, in xennet_start_xmit() 970 skb_shinfo(skb)->frags[0].page_offset = rx->offset; in xennet_poll()
|
/linux-4.1.27/drivers/md/ |
D | raid5.c | 1111 int page_offset; in async_copy_data() local 1116 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data() 1118 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data() 1129 if (page_offset < 0) { in async_copy_data() 1130 b_offset = -page_offset; in async_copy_data() 1131 page_offset += b_offset; in async_copy_data() 1135 if (len > 0 && page_offset + len > STRIPE_SIZE) in async_copy_data() 1136 clen = STRIPE_SIZE - page_offset; in async_copy_data() 1145 b_offset == 0 && page_offset == 0 && in async_copy_data() 1149 tx = async_memcpy(*page, bio_page, page_offset, in async_copy_data() [all …]
|
/linux-4.1.27/drivers/staging/octeon/ |
D | ethernet-tx.c | 304 fs->page_offset)); in cvm_oct_xmit()
|
/linux-4.1.27/drivers/net/ethernet/tile/ |
D | tilepro.c | 1635 void *va = pfn_to_kaddr(pfn) + f->page_offset; in tile_net_tx_frags() 1640 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; in tile_net_tx_frags()
|
D | tilegx.c | 1633 return pfn_to_kaddr(pfn) + f->page_offset; in tile_net_frag_buf()
|
/linux-4.1.27/drivers/net/ethernet/ti/ |
D | netcp_core.c | 1040 u32 page_offset = frag->page_offset; in netcp_tx_map_skb() local 1045 dma_addr = dma_map_page(dev, page, page_offset, buf_len, in netcp_tx_map_skb()
|
/linux-4.1.27/drivers/scsi/fcoe/ |
D | fcoe_transport.c | 310 off = frag->page_offset; in fcoe_fc_crc()
|
D | fcoe.c | 1647 + frag->page_offset; in fcoe_xmit()
|
/linux-4.1.27/fs/cifs/ |
D | file.c | 1976 if (page_offset(page) >= i_size_read(mapping->host)) { in wdata_prepare_pages() 2011 wdata->offset = page_offset(wdata->pages[0]); in wdata_send_pages() 2014 page_offset(wdata->pages[nr_pages - 1]), in wdata_send_pages() 3774 loff_t range_start = page_offset(page); in cifs_launder_page()
|
D | cifssmb.c | 1956 wdata2->offset = page_offset(wdata2->pages[0]); in cifs_writev_requeue()
|
/linux-4.1.27/drivers/net/ethernet/ |
D | jme.c | 1996 u32 page_offset, in jme_fill_tx_map() argument 2004 page_offset, in jme_fill_tx_map() 2070 frag->page_offset, skb_frag_size(frag), hidma); in jme_map_tx_skb()
|
/linux-4.1.27/fs/afs/ |
D | dir.c | 152 latter = dir->i_size - page_offset(page); in afs_dir_check_page()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | sunvnet.c | 1028 err = ldc_map_single(lp, vaddr + f->page_offset, in vnet_skb_map() 1064 docopy |= f->page_offset & 7; in vnet_skb_shape()
|
D | cassini.c | 2048 frag->page_offset = off; in cas_rx_process_pkt() 2072 frag->page_offset = 0; in cas_rx_process_pkt() 2830 tabort = cas_calc_tabort(cp, fragp->page_offset, len); in cas_xmit_tx_ringN() 2841 addr + fragp->page_offset + len - tabort, in cas_xmit_tx_ringN()
|
D | niu.c | 6722 frag->page_offset, len, in niu_start_xmit()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | ldc.c | 1941 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) in make_cookie() argument 1945 page_offset); in make_cookie()
|
/linux-4.1.27/drivers/block/ |
D | rbd.c | 1286 size_t page_offset; in zero_pages() local 1291 page_offset = offset & ~PAGE_MASK; in zero_pages() 1292 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset); in zero_pages() 1295 memset(kaddr + page_offset, 0, length); in zero_pages()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 288 frag->page_offset, skb_frag_size(frag), in ipoib_dma_map_tx()
|
/linux-4.1.27/fs/f2fs/ |
D | file.c | 61 page_offset(page) > i_size_read(inode) || in f2fs_vm_page_mkwrite()
|
/linux-4.1.27/drivers/hsi/clients/ |
D | ssi_protocol.c | 194 sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); in ssip_skb_to_msg()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | vvp_io.c | 697 (page_offset(vmpage) > size))) { in vvp_io_fault_start()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | cq.c | 1128 in->ctx.page_offset = 0; in mlx5_ib_resize_cq()
|
/linux-4.1.27/drivers/scsi/ |
D | ipr.c | 2904 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy() 2905 ioa_dump->page_offset == 0) { in ipr_sdt_copy() 2913 ioa_dump->page_offset = 0; in ipr_sdt_copy() 2920 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy() 2929 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy() 2935 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
|
D | ipr.h | 1701 u32 page_offset; member
|
/linux-4.1.27/drivers/net/ethernet/ibm/ |
D | ibmveth.c | 911 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) macro
|
/linux-4.1.27/net/appletalk/ |
D | ddp.c | 962 sum = atalk_sum_partial(vaddr + frag->page_offset + in atalk_sum_skb()
|
/linux-4.1.27/drivers/net/usb/ |
D | usbnet.c | 1278 f->page_offset); in build_dma_sg()
|
/linux-4.1.27/drivers/net/ethernet/natsemi/ |
D | ns83820.c | 1165 frag->page_offset);
|
/linux-4.1.27/drivers/scsi/bnx2fc/ |
D | bnx2fc_fcoe.c | 330 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; in bnx2fc_xmit()
|
/linux-4.1.27/net/ipv4/ |
D | tcp.c | 2966 unsigned int offset = f->page_offset; in tcp_md5_hash_skb_data()
|
D | tcp_output.c | 1258 shinfo->frags[k].page_offset += eat; in __pskb_trim_head()
|
/linux-4.1.27/drivers/atm/ |
D | eni.c | 1137 skb_shinfo(skb)->frags[i].page_offset, in do_tx()
|
D | he.c | 2586 (void *) page_address(frag->page) + frag->page_offset, in he_send()
|
/linux-4.1.27/fs/ |
D | buffer.c | 2414 (page_offset(page) > size)) { in __block_page_mkwrite()
|
/linux-4.1.27/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 678 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) in has_tiny_unaligned_frags()
|
D | mvneta.c | 1700 void *addr = page_address(frag->page.p) + frag->page_offset; in mvneta_tx_frag_process()
|
D | mvpp2.c | 5127 void *addr = page_address(frag->page.p) + frag->page_offset; in mvpp2_tx_frag_process()
|
/linux-4.1.27/drivers/s390/net/ |
D | qeth_core_main.c | 3872 frag->page_offset; in qeth_get_elements_for_frags() 3978 frag->page_offset; in __qeth_fill_buffer()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 2120 rx_frag->page_offset = sd->pg_chunk.offset + offset; in lro_add_page()
|
/linux-4.1.27/drivers/net/ethernet/freescale/ |
D | fec_main.c | 412 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; in fec_enet_txq_submit_frag_skb()
|
/linux-4.1.27/drivers/net/vmxnet3/ |
D | vmxnet3_drv.c | 650 frag->page_offset = 0; in vmxnet3_append_frag()
|