Home
last modified time | relevance | path

Searched refs:page_offset (Results 1 – 176 of 176) sorted by relevance

/linux-4.4.14/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c123 u32 *page_offset, in rdma_read_chunk_lcl() argument
130 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; in rdma_read_chunk_lcl()
133 u32 pg_off = *page_offset; in rdma_read_chunk_lcl()
139 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, in rdma_read_chunk_lcl()
201 *page_offset = pg_off; in rdma_read_chunk_lcl()
216 u32 *page_offset, in rdma_read_chunk_frmr() argument
226 int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; in rdma_read_chunk_frmr()
230 u32 pg_off = *page_offset; in rdma_read_chunk_frmr()
239 read = min_t(int, (nents << PAGE_SHIFT) - *page_offset, rs_length); in rdma_read_chunk_frmr()
344 *page_offset = pg_off; in rdma_read_chunk_frmr()
[all …]
/linux-4.4.14/drivers/scsi/fnic/
Dfnic_trace.c71 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; in fnic_trace_get_buf()
124 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data()
166 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data()
415 fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * in fnic_trace_buf_init()
417 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init()
427 memset((void *)fnic_trace_entries.page_offset, 0, in fnic_trace_buf_init()
438 fnic_trace_entries.page_offset[i] = fnic_buf_head; in fnic_trace_buf_init()
461 if (fnic_trace_entries.page_offset) { in fnic_trace_free()
462 vfree((void *)fnic_trace_entries.page_offset); in fnic_trace_free()
463 fnic_trace_entries.page_offset = NULL; in fnic_trace_free()
[all …]
Dfnic_trace.h51 unsigned long *page_offset; member
/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_bo_vm.c91 unsigned long page_offset; in ttm_bo_vm_fault() local
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault()
182 if (unlikely(page_offset >= bo->num_pages)) { in ttm_bo_vm_fault()
216 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; in ttm_bo_vm_fault()
218 page = ttm->pages[page_offset]; in ttm_bo_vm_fault()
227 page_offset; in ttm_bo_vm_fault()
250 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault()
/linux-4.4.14/fs/hfs/
Dbnode.c22 off += node->page_offset; in hfs_bnode_read()
64 off += node->page_offset; in hfs_bnode_write()
89 off += node->page_offset; in hfs_bnode_clear()
107 src += src_node->page_offset; in hfs_bnode_copy()
108 dst += dst_node->page_offset; in hfs_bnode_copy()
126 src += node->page_offset; in hfs_bnode_move()
127 dst += node->page_offset; in hfs_bnode_move()
282 node->page_offset = off & ~PAGE_CACHE_MASK; in __hfs_bnode_create()
341 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); in hfs_bnode_find()
431 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
Dbtree.c259 off += node->page_offset; in hfs_bmap_alloc()
304 off += node->page_offset; in hfs_bmap_alloc()
350 off += node->page_offset + nidx / 8; in hfs_bmap_free()
Dbtree.h61 unsigned int page_offset; member
/linux-4.4.14/drivers/mtd/tests/
Dnandbiterrs.c58 static unsigned page_offset; variable
59 module_param(page_offset, uint, S_IRUGO);
60 MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
371 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init()
375 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
/linux-4.4.14/drivers/gpu/drm/vgem/
Dvgem_drv.c94 pgoff_t page_offset; in vgem_gem_fault() local
98 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in vgem_gem_fault()
103 if (page_offset > num_pages) in vgem_gem_fault()
109 obj->pages[page_offset]); in vgem_gem_fault()
/linux-4.4.14/drivers/gpu/drm/qxl/
Dqxl_image.c165 unsigned page_base, page_offset, out_offset; in qxl_image_init_helper() local
173 page_offset = offset_in_page(out_offset); in qxl_image_init_helper()
174 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper()
177 k_data = ptr + page_offset; in qxl_image_init_helper()
Dqxl_object.c145 struct qxl_bo *bo, int page_offset) in qxl_bo_kmap_atomic_page() argument
163 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); in qxl_bo_kmap_atomic_page()
166 rptr = bo->kptr + (page_offset * PAGE_SIZE); in qxl_bo_kmap_atomic_page()
174 rptr += page_offset * PAGE_SIZE; in qxl_bo_kmap_atomic_page()
Dqxl_object.h94 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
/linux-4.4.14/net/ceph/
Dmessenger.c539 int page_offset, size_t length) in ceph_tcp_recvpage() argument
544 BUG_ON(page_offset + length > PAGE_SIZE); in ceph_tcp_recvpage()
548 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); in ceph_tcp_recvpage()
844 size_t *page_offset, in ceph_msg_data_bio_next() argument
858 *page_offset = (size_t) bio_vec.bv_offset; in ceph_msg_data_bio_next()
859 BUG_ON(*page_offset >= PAGE_SIZE); in ceph_msg_data_bio_next()
865 BUG_ON(*page_offset + *length > PAGE_SIZE); in ceph_msg_data_bio_next()
934 cursor->page_offset = data->alignment & ~PAGE_MASK; in ceph_msg_data_pages_cursor_init()
938 BUG_ON(length > SIZE_MAX - cursor->page_offset); in ceph_msg_data_pages_cursor_init()
939 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; in ceph_msg_data_pages_cursor_init()
[all …]
/linux-4.4.14/drivers/gpu/drm/gma500/
Dgem.c180 pgoff_t page_offset; in psb_gem_fault() local
207 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start) in psb_gem_fault()
214 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
/linux-4.4.14/fs/hfsplus/
Dbnode.c26 off += node->page_offset; in hfs_bnode_read()
79 off += node->page_offset; in hfs_bnode_write()
109 off += node->page_offset; in hfs_bnode_clear()
137 src += src_node->page_offset; in hfs_bnode_copy()
138 dst += dst_node->page_offset; in hfs_bnode_copy()
194 src += node->page_offset; in hfs_bnode_move()
195 dst += node->page_offset; in hfs_bnode_move()
448 node->page_offset = off & ~PAGE_CACHE_MASK; in __hfs_bnode_create()
509 node->page_offset); in hfs_bnode_find()
599 memset(kmap(*pagep) + node->page_offset, 0, in hfs_bnode_create()
Dwrapper.c73 unsigned int page_offset = offset_in_page(buf); in hfsplus_submit_bio() local
74 unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, in hfsplus_submit_bio()
77 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
Dbtree.c382 off += node->page_offset; in hfs_bmap_alloc()
428 off += node->page_offset; in hfs_bmap_alloc()
477 off += node->page_offset + nidx / 8; in hfs_bmap_free()
Dhfsplus_fs.h119 unsigned int page_offset; member
/linux-4.4.14/drivers/staging/android/ion/
Dion_test.c104 unsigned long page_offset = offset >> PAGE_SHIFT; in ion_handle_test_kernel() local
118 void *vaddr = dma_buf_kmap(dma_buf, page_offset); in ion_handle_test_kernel()
130 dma_buf_kunmap(dma_buf, page_offset, vaddr); in ion_handle_test_kernel()
138 page_offset++; in ion_handle_test_kernel()
/linux-4.4.14/drivers/net/ethernet/sfc/
Drx.c62 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va()
157 unsigned int page_offset; in efx_init_rx_buffers() local
188 page_offset = sizeof(struct efx_rx_page_state); in efx_init_rx_buffers()
195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in efx_init_rx_buffers()
201 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
451 rx_buf->page, rx_buf->page_offset, in efx_rx_packet_gro()
499 rx_buf->page_offset += hdr_len; in efx_rx_mk_skb()
504 rx_buf->page, rx_buf->page_offset, in efx_rx_mk_skb()
586 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
Dnet_driver.h282 u16 page_offset; member
Dtx.c262 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, in efx_skb_copy_bits_to_pio()
/linux-4.4.14/tools/testing/selftests/powerpc/primitives/
Dload_unaligned_zeropad.c111 static int do_one_test(char *p, int page_offset) in do_one_test() argument
123 …printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, sho… in do_one_test()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dsrq.c71 u32 page_offset = MLX5_GET(srqc, srqc, page_offset); in get_pas_size() local
75 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); in get_pas_size()
103 MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); in rmpc_srqc_reformat()
127 MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); in rmpc_srqc_reformat()
/linux-4.4.14/drivers/gpu/drm/udl/
Dudl_gem.c107 unsigned int page_offset; in udl_gem_fault() local
110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in udl_gem_fault()
116 page = obj->pages[page_offset]; in udl_gem_fault()
/linux-4.4.14/net/core/
Dtso.c58 tso->data = page_address(frag->page.p) + frag->page_offset; in tso_build_data()
82 tso->data = page_address(frag->page.p) + frag->page_offset; in tso_start()
Dskbuff.c900 vaddr + f->page_offset, skb_frag_size(f)); in skb_copy_ubufs()
1629 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_pull_tail()
1695 vaddr + f->page_offset + offset - start, in skb_copy_bits()
1867 f->page_offset, skb_frag_size(f), in __skb_splice_bits()
1994 memcpy(vaddr + frag->page_offset + offset - start, in skb_store_bits()
2067 csum2 = ops->update(vaddr + frag->page_offset + in __skb_checksum()
2155 frag->page_offset + in skb_copy_and_csum_bits()
2531 skb_shinfo(skb1)->frags[0].page_offset += len - pos; in skb_split_no_header()
2607 fragfrom->page_offset)) { in skb_shift()
2624 fragfrom->page_offset += shiftlen; in skb_shift()
[all …]
Ddatagram.c387 frag->page_offset + offset - in skb_copy_datagram_iter()
476 frag->page_offset + offset - start, in skb_copy_datagram_from_iter()
606 n = csum_and_copy_to_iter(vaddr + frag->page_offset + in skb_copy_and_csum_datagram()
Dpktgen.c2740 skb_shinfo(skb)->frags[i].page_offset = 0; in pktgen_finalize_skb()
Ddev.c4188 pinfo->frags[0].page_offset += grow; in gro_pull_from_frag0()
/linux-4.4.14/fs/ocfs2/
Dmmap.c67 loff_t pos = page_offset(page); in __ocfs2_page_mkwrite()
91 (page_offset(page) >= size)) in __ocfs2_page_mkwrite()
Daops.c1081 u64 offset = page_offset(page) + block_start; in ocfs2_should_read_blk()
1430 new = new | ((i_size_read(inode) <= page_offset(page)) && in ocfs2_prepare_page_for_write()
1431 (page_offset(page) <= user_pos)); in ocfs2_prepare_page_for_write()
/linux-4.4.14/mm/
Dreadahead.c173 pgoff_t page_offset = offset + page_idx; in __do_page_cache_readahead() local
175 if (page_offset > end_index) in __do_page_cache_readahead()
179 page = radix_tree_lookup(&mapping->page_tree, page_offset); in __do_page_cache_readahead()
187 page->index = page_offset; in __do_page_cache_readahead()
/linux-4.4.14/drivers/mtd/devices/
Dmtd_dataflash.c90 unsigned short page_offset; /* offset in flash address */ member
187 pageaddr = pageaddr << priv->page_offset; in dataflash_erase()
249 addr = (((unsigned)from / priv->page_size) << priv->page_offset) in dataflash_read()
350 addr = pageaddr << priv->page_offset; in dataflash_write()
395 addr = pageaddr << priv->page_offset; in dataflash_write()
639 priv->page_offset = pageoffset; in add_dataflash_otp()
Dspear_smi.c647 u32 page_offset, page_size; in spear_mtd_write() local
662 page_offset = (u32)to % flash->page_size; in spear_mtd_write()
665 if (page_offset + len <= flash->page_size) { in spear_mtd_write()
673 page_size = flash->page_size - page_offset; in spear_mtd_write()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c81 page_alloc->page_offset = 0; in mlx4_alloc_pages()
105 page_alloc[i].page_offset += frag_info->frag_stride; in mlx4_en_alloc_frags()
107 if (page_alloc[i].page_offset + frag_info->frag_stride <= in mlx4_en_alloc_frags()
117 dma = ring_alloc[i].dma + ring_alloc[i].page_offset; in mlx4_en_alloc_frags()
142 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; in mlx4_en_free_frag()
202 while (page_alloc->page_offset + frag_info->frag_stride < in mlx4_en_destroy_allocator()
205 page_alloc->page_offset += frag_info->frag_stride; in mlx4_en_destroy_allocator()
564 skb_frags_rx[nr].page_offset = frags[nr].page_offset; in mlx4_en_complete_rx_desc()
603 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_rx_skb()
631 skb_shinfo(skb)->frags[0].page_offset += pull_len; in mlx4_en_rx_skb()
[all …]
Dmlx4.h324 __be16 page_offset; member
345 __be16 page_offset; member
Dmlx4_en.h254 u32 page_offset; member
Dresource_tracker.c2654 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; in qp_get_mtt_size() local
2660 roundup_pow_of_two((total_mem + (page_offset << 6)) >> in qp_get_mtt_size()
/linux-4.4.14/drivers/gpu/drm/i915/
Di915_gem_execbuffer.c258 uint32_t page_offset = offset_in_page(reloc->offset); in relocate_entry_cpu() local
269 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); in relocate_entry_cpu()
272 page_offset = offset_in_page(page_offset + sizeof(uint32_t)); in relocate_entry_cpu()
274 if (page_offset == 0) { in relocate_entry_cpu()
280 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); in relocate_entry_cpu()
349 uint32_t page_offset = offset_in_page(reloc->offset); in relocate_entry_clflush() local
360 clflush_write32(vaddr + page_offset, lower_32_bits(delta)); in relocate_entry_clflush()
363 page_offset = offset_in_page(page_offset + sizeof(uint32_t)); in relocate_entry_clflush()
365 if (page_offset == 0) { in relocate_entry_clflush()
371 clflush_write32(vaddr + page_offset, upper_32_bits(delta)); in relocate_entry_clflush()
Di915_gem.c741 loff_t page_base, int page_offset, in fast_user_write() argument
751 vaddr = (void __force*)vaddr_atomic + page_offset; in fast_user_write()
772 int page_offset, page_length, ret; in i915_gem_gtt_pwrite_fast() local
801 page_offset = offset_in_page(offset); in i915_gem_gtt_pwrite_fast()
803 if ((page_offset + remain) > PAGE_SIZE) in i915_gem_gtt_pwrite_fast()
804 page_length = PAGE_SIZE - page_offset; in i915_gem_gtt_pwrite_fast()
811 page_offset, user_data, page_length)) { in i915_gem_gtt_pwrite_fast()
1795 pgoff_t page_offset; in i915_gem_fault() local
1803 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in i915_gem_fault()
1810 trace_i915_gem_object_fault(obj, page_offset, true, write); in i915_gem_fault()
[all …]
/linux-4.4.14/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c480 pgoff_t page_offset; in exynos_drm_gem_fault() local
483 page_offset = ((unsigned long)vmf->virtual_address - in exynos_drm_gem_fault()
486 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { in exynos_drm_gem_fault()
492 pfn = page_to_pfn(exynos_gem->pages[page_offset]); in exynos_drm_gem_fault()
/linux-4.4.14/drivers/gpu/drm/
Ddrm_vma_manager.c86 unsigned long page_offset, unsigned long size) in drm_vma_offset_manager_init() argument
90 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); in drm_vma_offset_manager_init()
Ddrm_vm.c332 unsigned long page_offset; in drm_do_vm_sg_fault() local
342 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); in drm_do_vm_sg_fault()
343 page = entry->pagelist[page_offset]; in drm_do_vm_sg_fault()
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
Dfm10k_main.c119 bi->page_offset = 0; in fm10k_alloc_mapped_page()
150 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in fm10k_alloc_rx_buffers()
212 old_buff->page_offset, in fm10k_reuse_rx_page()
236 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; in fm10k_can_reuse_rx_page()
239 rx_buffer->page_offset += truesize; in fm10k_can_reuse_rx_page()
241 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) in fm10k_can_reuse_rx_page()
272 unsigned char *va = page_address(page) + rx_buffer->page_offset; in fm10k_add_rx_frag()
328 rx_buffer->page_offset; in fm10k_fetch_rx_buffer()
354 rx_buffer->page_offset, in fm10k_fetch_rx_buffer()
Dfm10k.h91 u32 page_offset; member
/linux-4.4.14/fs/sysv/
Ddir.c208 pos = page_offset(page) + in sysv_add_link()
233 loff_t pos = page_offset(page) + (char *)de - kaddr; in sysv_delete_entry()
330 loff_t pos = page_offset(page) + in sysv_set_link()
/linux-4.4.14/drivers/target/
Dtarget_core_rd.c128 u32 i = 0, j, page_offset = 0, sg_per_table; in rd_allocate_sgl_table() local
164 sg_table[i].page_start_offset = page_offset; in rd_allocate_sgl_table()
165 sg_table[i++].page_end_offset = (page_offset + sg_per_table) in rd_allocate_sgl_table()
183 page_offset += sg_per_table; in rd_allocate_sgl_table()
/linux-4.4.14/fs/jfs/
Djfs_metapage.c596 unsigned long page_offset; in __get_metapage() local
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; in __get_metapage()
605 if ((page_offset + size) > PAGE_CACHE_SIZE) { in __get_metapage()
640 mp = page_to_mp(page, page_offset); in __get_metapage()
670 mp->data = page_address(page) + page_offset; in __get_metapage()
Djfs_dtree.c255 int page_offset; in find_index() local
280 page_offset = offset & (PSIZE - 1); in find_index()
299 page_offset); in find_index()
346 uint page_offset; in add_index() local
446 page_offset = offset & (PSIZE - 1); in add_index()
448 if (page_offset == 0) { in add_index()
474 (struct dir_table_slot *) ((char *) mp->data + page_offset); in add_index()
/linux-4.4.14/arch/powerpc/perf/
Dhv-24x7.c908 loff_t page_offset = 0; in catalog_read() local
928 page_offset = offset / 4096; in catalog_read()
931 if (page_offset >= catalog_page_len) in catalog_read()
934 if (page_offset != 0) { in catalog_read()
936 page_offset); in catalog_read()
954 catalog_version_num, page_offset, hret); in catalog_read()
958 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset, in catalog_read()
/linux-4.4.14/drivers/video/fbdev/
Dssd1307fb.c71 u32 page_offset; member
429 par->page_offset + (par->height / 8) - 1); in ssd1307fb_init()
569 if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset)) in ssd1307fb_probe()
570 par->page_offset = 1; in ssd1307fb_probe()
/linux-4.4.14/fs/9p/
Dvfs_addr.c69 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage()
184 p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); in v9fs_vfs_writepage_locked()
/linux-4.4.14/drivers/net/ethernet/myricom/myri10ge/
Dmyri10ge.c107 int page_offset; member
131 int page_offset; member
1308 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { in myri10ge_alloc_rx_pages()
1333 rx->page_offset = 0; in myri10ge_alloc_rx_pages()
1338 rx->info[idx].page_offset = rx->page_offset; in myri10ge_alloc_rx_pages()
1343 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); in myri10ge_alloc_rx_pages()
1348 rx->page_offset += SKB_DATA_ALIGN(bytes); in myri10ge_alloc_rx_pages()
1352 end_offset = rx->page_offset + bytes - 1; in myri10ge_alloc_rx_pages()
1353 if ((unsigned)(rx->page_offset ^ end_offset) > 4095) in myri10ge_alloc_rx_pages()
1354 rx->page_offset = end_offset & ~4095; in myri10ge_alloc_rx_pages()
[all …]
/linux-4.4.14/include/drm/
Ddrm_vma_manager.h54 unsigned long page_offset, unsigned long size);
/linux-4.4.14/fs/minix/
Ddir.c264 pos = page_offset(page) + p - (char *)page_address(page); in minix_add_link()
292 loff_t pos = page_offset(page) + (char*)de - kaddr; in minix_delete_entry()
416 loff_t pos = page_offset(page) + in minix_set_link()
/linux-4.4.14/fs/xfs/
Dxfs_buf.c1458 int page_index, page_offset, csize; in xfs_buf_iomove() local
1461 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_iomove()
1463 csize = min_t(size_t, PAGE_SIZE - page_offset, in xfs_buf_iomove()
1466 ASSERT((csize + page_offset) <= PAGE_SIZE); in xfs_buf_iomove()
1470 memset(page_address(page) + page_offset, 0, csize); in xfs_buf_iomove()
1473 memcpy(data, page_address(page) + page_offset, csize); in xfs_buf_iomove()
1476 memcpy(page_address(page) + page_offset, data, csize); in xfs_buf_iomove()
Dxfs_aops.c702 xfs_off_t offset = page_offset(page); in xfs_convert_page()
892 loff_t offset = page_offset(page); in xfs_aops_discard_page()
1055 offset = page_offset(page); in xfs_vm_writepage()
1960 offset = page_offset(page); in xfs_vm_set_page_dirty()
Dxfs_file.c1137 loff_t lastoff = page_offset(page); in xfs_lookup_buffer_offset()
1244 lastoff < page_offset(pvec.pages[0])) { in xfs_find_get_desired_pgoff()
1307 lastoff = page_offset(page) + PAGE_SIZE; in xfs_find_get_desired_pgoff()
Dxfs_trace.h1197 __entry->pgoff = page_offset(page);
/linux-4.4.14/drivers/iommu/
Drockchip-iommu.c413 u32 dte_index, pte_index, page_offset; in log_iova() local
426 page_offset = rk_iova_page_offset(iova); in log_iova()
445 page_addr_phys = rk_pte_page_address(pte) + page_offset; in log_iova()
450 &iova, dte_index, pte_index, page_offset); in log_iova()
/linux-4.4.14/fs/nilfs2/
Dfile.c72 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { in nilfs_page_mkwrite()
Ddir.c80 loff_t pos = page_offset(page) + from; in nilfs_prepare_chunk()
89 loff_t pos = page_offset(page) + from; in nilfs_commit_chunk()
Dpage.c414 page_offset(page), inode->i_ino); in nilfs_clear_dirty_page()
/linux-4.4.14/include/linux/mlx5/
Dcq.h106 u32 page_offset; member
Dmlx5_ifc.h864 u8 page_offset[0x5]; member
1762 u8 page_offset[0x6]; member
1882 u8 page_offset[0x6]; member
2007 u8 page_offset[0x6]; member
2288 u8 page_offset[0x6]; member
2430 u8 page_offset[0x6]; member
Ddevice.h755 __be16 page_offset; member
/linux-4.4.14/drivers/misc/vmw_vmci/
Dvmci_queue_pair.c363 const size_t page_offset = in __qp_memcpy_to_queue() local
374 if (size - bytes_copied > PAGE_SIZE - page_offset) in __qp_memcpy_to_queue()
376 to_copy = PAGE_SIZE - page_offset; in __qp_memcpy_to_queue()
385 err = memcpy_from_msg((u8 *)va + page_offset, in __qp_memcpy_to_queue()
393 memcpy((u8 *)va + page_offset, in __qp_memcpy_to_queue()
423 const size_t page_offset = in __qp_memcpy_from_queue() local
434 if (size - bytes_copied > PAGE_SIZE - page_offset) in __qp_memcpy_from_queue()
436 to_copy = PAGE_SIZE - page_offset; in __qp_memcpy_from_queue()
445 err = memcpy_to_msg(msg, (u8 *)va + page_offset, in __qp_memcpy_from_queue()
454 (u8 *)va + page_offset, to_copy); in __qp_memcpy_from_queue()
/linux-4.4.14/fs/ceph/
Daddr.c199 u64 off = page_offset(page); in readpage_nounlock()
327 off = (u64) page_offset(page); in start_read()
482 loff_t page_off = page_offset(page); in writepage_nounlock()
836 if (page_offset(page) >= in ceph_writepages_start()
876 offset = (u64)page_offset(page); in ceph_writepages_start()
955 offset = page_offset(pages[0]); in ceph_writepages_start()
1324 loff_t off = page_offset(page); in ceph_page_mkwrite()
/linux-4.4.14/fs/reiserfs/
Dxattr.c529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); in reiserfs_xattr_set_handle() local
557 err = __reiserfs_write_begin(page, page_offset, chunk + skip); in reiserfs_xattr_set_handle()
561 err = reiserfs_commit_write(NULL, page, page_offset, in reiserfs_xattr_set_handle()
562 page_offset + chunk + in reiserfs_xattr_set_handle()
/linux-4.4.14/fs/btrfs/
Dcompression.c460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); in add_ra_bio_pages()
591 page_offset(bio->bi_io_vec->bv_page), in btrfs_submit_compressed_read()
1000 start_byte = page_offset(page_out) - disk_start; in btrfs_decompress_buf2page()
1041 start_byte = page_offset(page_out) - disk_start; in btrfs_decompress_buf2page()
Dfile-item.c225 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in __btrfs_lookup_bio_sums()
451 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio()
460 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio()
Dextent_io.c2055 u64 start = page_offset(page); in check_page_uptodate()
2167 start - page_offset(p), mirror_num); in repair_eb_io_failure()
2495 start - page_offset(page), in bio_readpage_error()
2579 start = page_offset(page); in end_bio_extent_writepage()
2656 start = page_offset(page); in end_bio_extent_readpage()
2825 start = page_offset(page) + bvec->bv_offset; in submit_one_bio()
2982 u64 start = page_offset(page); in __do_readpage()
3243 page_start = page_offset(pages[index]); in __extent_readpages()
3278 u64 start = page_offset(page); in __extent_read_full_page()
3443 u64 start = page_offset(page); in __extent_writepage_io()
[all …]
Dinode.c630 if (page_offset(locked_page) >= start && in compress_file_range()
631 page_offset(locked_page) <= end) { in compress_file_range()
1989 page_start = page_offset(page); in btrfs_writepage_fixup_worker()
1990 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; in btrfs_writepage_fixup_worker()
3084 size_t offset = start - page_offset(page); in btrfs_readpage_end_io_hook()
4649 page_start = page_offset(page); in btrfs_truncate_page()
6885 extent_offset = page_offset(page) + pg_offset - extent_start; in btrfs_get_extent()
8622 u64 page_start = page_offset(page); in btrfs_invalidatepage()
8747 page_start = page_offset(page); in btrfs_page_mkwrite()
Dioctl.c1143 page_start = page_offset(page); in cluster_pages_for_defrag()
1203 page_start = page_offset(pages[0]); in cluster_pages_for_defrag()
1204 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; in cluster_pages_for_defrag()
Ddisk-io.c501 u64 start = page_offset(page); in csum_dirty_buffer()
1047 (unsigned long long)page_offset(page)); in btree_invalidatepage()
Dscrub.c737 offset - page_offset(page), in scrub_fixup_readpage()
Drelocation.c3172 page_start = page_offset(page);
/linux-4.4.14/fs/ext4/
Dfile.c487 lastoff < page_offset(pvec.pages[0])) { in ext4_find_unwritten_pgoff()
520 lastoff = page_offset(page); in ext4_find_unwritten_pgoff()
542 lastoff = page_offset(page) + PAGE_SIZE; in ext4_find_unwritten_pgoff()
Dinode.c5362 if (page->mapping != mapping || page_offset(page) > size) { in ext4_page_mkwrite()
/linux-4.4.14/fs/ufs/
Ddir.c88 loff_t pos = page_offset(page) + in ufs_set_link()
378 pos = page_offset(page) + in ufs_add_link()
540 pos = page_offset(page) + from; in ufs_delete_entry()
/linux-4.4.14/fs/ext2/
Ddir.c461 loff_t pos = page_offset(page) + in ext2_set_link()
548 pos = page_offset(page) + in ext2_add_link()
605 pos = page_offset(page) + from; in ext2_delete_entry()
/linux-4.4.14/fs/exofs/
Ddir.c402 loff_t pos = page_offset(page) + in exofs_set_link()
488 pos = page_offset(page) + in exofs_add_link()
545 pos = page_offset(page) + from; in exofs_delete_entry()
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/
Digbvf.h135 unsigned int page_offset; member
Dnetdev.c185 buffer_info->page_offset = 0; in igbvf_alloc_rx_buffers()
187 buffer_info->page_offset ^= PAGE_SIZE / 2; in igbvf_alloc_rx_buffers()
191 buffer_info->page_offset, in igbvf_alloc_rx_buffers()
334 buffer_info->page_offset, in igbvf_clean_rx_irq()
610 buffer_info->page_offset = 0; in igbvf_clean_rx_ring()
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/
Dhnae.h174 u16 page_offset; member
567 + ring->desc_cb[i].page_offset); in hnae_reuse_buffer()
Dhns_enet.c322 desc_cb->page_offset += tsize; in hns_nic_reuse_page()
324 if (desc_cb->page_offset <= last_offset) { in hns_nic_reuse_page()
352 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; in hns_nic_poll_rx_skb()
386 desc_cb->page_offset + pull_len, in hns_nic_poll_rx_skb()
402 desc_cb->page_offset, in hns_nic_poll_rx_skb()
Dhnae.c49 cb->page_offset = 0; in hnae_alloc_buffer()
/linux-4.4.14/drivers/mtd/spi-nor/
Dspi-nor.c968 u32 page_offset, page_size, i; in spi_nor_write() local
979 page_offset = to & (nor->page_size - 1); in spi_nor_write()
982 if (page_offset + len <= nor->page_size) { in spi_nor_write()
986 page_size = nor->page_size - page_offset; in spi_nor_write()
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/
Di40e_txrx.c518 rx_bi->page_offset = 0; in i40evf_clean_rx_ring()
674 bi->page_offset ^= PAGE_SIZE / 2; in i40evf_alloc_rx_buffers_ps()
677 bi->page_offset, in i40evf_alloc_rx_buffers_ps()
1027 rx_bi->page + rx_bi->page_offset, in i40e_clean_rx_irq_ps()
1029 rx_bi->page_offset += len; in i40e_clean_rx_irq_ps()
1037 rx_bi->page_offset, in i40e_clean_rx_irq_ps()
Di40e_txrx.h191 unsigned int page_offset; member
/linux-4.4.14/include/linux/ceph/
Dmessenger.h128 unsigned int page_offset; /* offset in page */ member
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c610 bi->page_offset = 0; in ixgbevf_alloc_mapped_page()
642 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbevf_alloc_rx_buffers()
740 new_buff->page_offset = old_buff->page_offset; in ixgbevf_reuse_rx_page()
744 new_buff->page_offset, in ixgbevf_reuse_rx_page()
775 unsigned char *va = page_address(page) + rx_buffer->page_offset; in ixgbevf_add_rx_frag()
825 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; in ixgbevf_add_rx_frag()
829 rx_buffer->page_offset += truesize; in ixgbevf_add_rx_frag()
831 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) in ixgbevf_add_rx_frag()
856 rx_buffer->page_offset; in ixgbevf_fetch_rx_buffer()
882 rx_buffer->page_offset, in ixgbevf_fetch_rx_buffer()
Dixgbevf.h70 unsigned int page_offset; member
/linux-4.4.14/tools/perf/util/
Dsession.c1564 u64 head, page_offset, file_offset, file_pos, size; in __perf_session__process_events() local
1574 page_offset = page_size * (data_offset / page_size); in __perf_session__process_events()
1575 file_offset = page_offset; in __perf_session__process_events()
1576 head = data_offset - page_offset; in __perf_session__process_events()
1625 page_offset = page_size * (head / page_size); in __perf_session__process_events()
1626 file_offset += page_offset; in __perf_session__process_events()
1627 head -= page_offset; in __perf_session__process_events()
/linux-4.4.14/net/sunrpc/
Dxdr.c1454 unsigned int page_len, thislen, page_offset; in xdr_process_buf() local
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); in xdr_process_buf()
1484 thislen = PAGE_CACHE_SIZE - page_offset; in xdr_process_buf()
1488 sg_set_page(sg, buf->pages[i], thislen, page_offset); in xdr_process_buf()
1494 page_offset = 0; in xdr_process_buf()
/linux-4.4.14/fs/ubifs/
Dfile.c779 pgoff_t page_offset = offset + page_idx; in ubifs_do_bulk_read() local
782 if (page_offset > end_index) in ubifs_do_bulk_read()
784 page = find_or_create_page(mapping, page_offset, in ubifs_do_bulk_read()
1527 page_offset(page) > i_size_read(inode))) { in ubifs_vm_page_mkwrite()
/linux-4.4.14/include/linux/
Dskbuff.h231 __u32 page_offset; member
234 __u16 page_offset; member
1726 frag->page_offset = off; in __skb_fill_page_desc()
2472 return page_address(skb_frag_page(frag)) + frag->page_offset; in skb_frag_address()
2488 return ptr + frag->page_offset; in skb_frag_address_safe()
2536 frag->page_offset + offset, size, dir); in skb_frag_dma_map()
2680 off == frag->page_offset + skb_frag_size(frag); in skb_can_coalesce()
Dpagemap.h406 static inline loff_t page_offset(struct page *page) in page_offset() function
/linux-4.4.14/drivers/net/ethernet/emulex/benet/
Dbe_main.c2039 start = page_address(page_info->page) + page_info->page_offset; in skb_fill_rx_data()
2057 skb_shinfo(skb)->frags[0].page_offset = in skb_fill_rx_data()
2058 page_info->page_offset + hdr_len; in skb_fill_rx_data()
2079 if (page_info->page_offset == 0) { in skb_fill_rx_data()
2083 skb_shinfo(skb)->frags[j].page_offset = in skb_fill_rx_data()
2084 page_info->page_offset; in skb_fill_rx_data()
2161 if (i == 0 || page_info->page_offset == 0) { in be_rx_compl_process_gro()
2165 skb_shinfo(skb)->frags[j].page_offset = in be_rx_compl_process_gro()
2166 page_info->page_offset; in be_rx_compl_process_gro()
2307 u32 posted, page_offset = 0, notify = 0; in be_post_rx_frags() local
[all …]
Dbe.h274 u16 page_offset; member
/linux-4.4.14/drivers/net/ethernet/brocade/bna/
Dbnad.c372 u32 page_offset, alloc_size; in bnad_rxq_refill_page() local
387 page_offset = 0; in bnad_rxq_refill_page()
391 page_offset = prev->page_offset + unmap_q->map_size; in bnad_rxq_refill_page()
401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, in bnad_rxq_refill_page()
411 unmap->page_offset = page_offset; in bnad_rxq_refill_page()
414 page_offset += unmap_q->map_size; in bnad_rxq_refill_page()
416 if (page_offset < alloc_size) in bnad_rxq_refill_page()
558 unmap_q->unmap[sop_ci].page_offset); in bnad_cq_setup_skb_frags()
574 unmap->page, unmap->page_offset, len); in bnad_cq_setup_skb_frags()
Dbnad.h246 u32 page_offset; member
/linux-4.4.14/drivers/scsi/be2iscsi/
Dbe_cmds.c1340 u32 page_offset, u32 num_pages) in be_cmd_iscsi_post_sgl_pages() argument
1363 req->page_offset = page_offset; in be_cmd_iscsi_post_sgl_pages()
1367 page_offset += req->num_pages; in be_cmd_iscsi_post_sgl_pages()
Dbe_cmds.h770 struct be_dma_mem *q_mem, u32 page_offset,
860 u16 page_offset; member
Dbe_main.c3522 unsigned int page_offset, i; in beiscsi_post_pages() local
3534 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * in beiscsi_post_pages()
3539 page_offset, in beiscsi_post_pages()
3541 page_offset += pm_arr->size / PAGE_SIZE; in beiscsi_post_pages()
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
Di40e_txrx.h192 unsigned int page_offset; member
Di40e_txrx.c1044 rx_bi->page_offset = 0; in i40e_clean_rx_ring()
1200 bi->page_offset ^= PAGE_SIZE / 2; in i40e_alloc_rx_buffers_ps()
1203 bi->page_offset, in i40e_alloc_rx_buffers_ps()
1562 rx_bi->page + rx_bi->page_offset, in i40e_clean_rx_irq_ps()
1564 rx_bi->page_offset += len; in i40e_clean_rx_irq_ps()
1572 rx_bi->page_offset, in i40e_clean_rx_irq_ps()
/linux-4.4.14/fs/nfs/
Dfile.c545 inode->i_ino, (long long)page_offset(page)); in nfs_launder_page()
603 (long long)page_offset(page)); in nfs_vm_page_mkwrite()
Dwrite.c183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) in nfs_page_group_search_locked() argument
192 if (page_offset >= req->wb_pgbase && in nfs_page_group_search_locked()
193 page_offset < (req->wb_pgbase + req->wb_bytes)) in nfs_page_group_search_locked()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c1251 int page_offset; in kiblnd_map_tx_pool() local
1267 for (ipage = page_offset = i = 0; i < pool->po_size; i++) { in kiblnd_map_tx_pool()
1272 page_offset); in kiblnd_map_tx_pool()
1283 page_offset += IBLND_MSG_SIZE; in kiblnd_map_tx_pool()
1284 LASSERT(page_offset <= PAGE_SIZE); in kiblnd_map_tx_pool()
1286 if (page_offset == PAGE_SIZE) { in kiblnd_map_tx_pool()
1287 page_offset = 0; in kiblnd_map_tx_pool()
Do2iblnd_cb.c667 int page_offset; in kiblnd_setup_rd_iov() local
685 page_offset = vaddr & (PAGE_SIZE - 1); in kiblnd_setup_rd_iov()
693 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); in kiblnd_setup_rd_iov()
695 sg_set_page(sg, page, fragnob, page_offset); in kiblnd_setup_rd_iov()
/linux-4.4.14/fs/isofs/
Dcompress.c222 start_off = page_offset(pages[full_page]); in zisofs_fill_pages()
/linux-4.4.14/net/xfrm/
Dxfrm_ipcomp.c92 frag->page_offset = 0; in ipcomp_decompress()
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.h174 u16 page_offset; member
/linux-4.4.14/drivers/net/ethernet/intel/igb/
Digb.h206 unsigned int page_offset; member
Digb_main.c553 buffer_info->page_offset, in igb_dump()
6589 old_buff->page_offset, in igb_reuse_rx_page()
6613 rx_buffer->page_offset ^= IGB_RX_BUFSZ; in igb_can_reuse_rx_page()
6616 rx_buffer->page_offset += truesize; in igb_can_reuse_rx_page()
6618 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) in igb_can_reuse_rx_page()
6651 unsigned char *va = page_address(page) + rx_buffer->page_offset; in igb_add_rx_frag()
6713 rx_buffer->page_offset; in igb_fetch_rx_buffer()
6738 rx_buffer->page_offset, in igb_fetch_rx_buffer()
7021 bi->page_offset = 0; in igb_alloc_mapped_page()
7051 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igb_alloc_rx_buffers()
/linux-4.4.14/fs/hostfs/
Dhostfs_kern.c412 loff_t base = page_offset(page); in hostfs_writepage()
445 loff_t start = page_offset(page); in hostfs_readpage()
/linux-4.4.14/drivers/net/xen-netback/
Dnetback.c139 return (u16)frag->page_offset; in frag_get_pending_idx()
144 frag->page_offset = pending_idx; in frag_set_pending_idx()
487 skb_shinfo(skb)->frags[i].page_offset, in xenvif_gop_skb()
1532 frags[i].page_offset = 0; in xenvif_handle_frag_list()
/linux-4.4.14/drivers/net/ethernet/freescale/
Dgianfar.c2780 rxb->page_offset = 0; in gfar_new_page()
2816 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); in gfar_alloc_rx_buffs()
2938 rxb->page_offset + RXBUF_ALIGNMENT, in gfar_add_rx_frag()
2946 rxb->page_offset ^= GFAR_RXB_TRUESIZE; in gfar_add_rx_frag()
2970 old_rxb->page_offset, in gfar_reuse_rx_page()
2982 void *buff_addr = page_address(page) + rxb->page_offset; in gfar_get_next_rxbuff()
2993 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, in gfar_get_next_rxbuff()
Dgianfar.h1025 unsigned int page_offset; member
Dfec_main.c416 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; in fec_enet_txq_submit_frag_skb()
/linux-4.4.14/drivers/memstick/core/
Dms_block.c1259 int data_size, data_offset, page, page_offset, size_to_read; in msb_read_bad_block_table() local
1276 page_offset = data_offset % msb->page_size; in msb_read_bad_block_table()
1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) * in msb_read_bad_block_table()
1307 for (i = page_offset; i < data_size / sizeof(u16); i++) { in msb_read_bad_block_table()
/linux-4.4.14/arch/x86/kvm/
Dmmu.c4255 unsigned page_offset, quadrant; in get_written_sptes() local
4259 page_offset = offset_in_page(gpa); in get_written_sptes()
4263 page_offset <<= 1; /* 32->64 */ in get_written_sptes()
4270 page_offset &= ~7; /* kill rounding error */ in get_written_sptes()
4271 page_offset <<= 1; in get_written_sptes()
4274 quadrant = page_offset >> PAGE_SHIFT; in get_written_sptes()
4275 page_offset &= ~PAGE_MASK; in get_written_sptes()
4280 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
/linux-4.4.14/drivers/net/hyperv/
Dnetvsc_drv.c352 frag->page_offset, in init_page_array()
366 unsigned long offset = frag->page_offset; in count_skb_frag_slots()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c806 rx_buffer_info->page_offset, in ixgbe_dump()
1529 bi->page_offset = 0; in ixgbe_alloc_mapped_page()
1561 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbe_alloc_rx_buffers()
1757 frag->page_offset += pull_len; in ixgbe_pull_tail()
1785 frag->page_offset, in ixgbe_dma_sync_frag()
1865 new_buff->page_offset, in ixgbe_reuse_rx_page()
1906 unsigned char *va = page_address(page) + rx_buffer->page_offset; in ixgbe_add_rx_frag()
1920 rx_buffer->page_offset, size, truesize); in ixgbe_add_rx_frag()
1932 rx_buffer->page_offset ^= truesize; in ixgbe_add_rx_frag()
1935 rx_buffer->page_offset += truesize; in ixgbe_add_rx_frag()
[all …]
Dixgbe.h199 unsigned int page_offset; member
Dixgbe_ethtool.c1823 data = kmap(rx_buffer->page) + rx_buffer->page_offset; in ixgbe_check_lbtest_frame()
/linux-4.4.14/fs/romfs/
Dsuper.c114 offset = page_offset(page); in romfs_readpage()
/linux-4.4.14/arch/um/drivers/
Dubd_kern.c1214 unsigned long long offset, int page_offset, in prepare_request() argument
1233 io_req->buffer = page_address(page) + page_offset; in prepare_request()
/linux-4.4.14/fs/fuse/
Dfile.c694 loff_t pos = page_offset(req->pages[0]) + num_read; in fuse_short_read()
706 loff_t pos = page_offset(page); in fuse_do_readpage()
802 loff_t pos = page_offset(req->pages[0]); in fuse_send_readpages()
1632 fuse_write_fill(req, req->ff, page_offset(page), 0); in fuse_writepage_locked()
1836 fuse_write_fill(req, data->ff, page_offset(page), 0); in fuse_writepages_fill()
/linux-4.4.14/drivers/scsi/cxgbi/
Dlibcxgbi.h100 u32 page_offset; member
Dlibcxgbi.c1530 hdr.page_offset = htonl(gl->offset); in ddp_tag_reserve()
/linux-4.4.14/drivers/net/
Dxen-netfront.c531 unsigned long offset = frag->page_offset; in xennet_count_skb_slots()
660 skb_frag_page(frag), frag->page_offset, in xennet_start_xmit()
1021 skb_shinfo(skb)->frags[0].page_offset = rx->offset; in xennet_poll()
/linux-4.4.14/drivers/md/
Draid5.c1115 int page_offset; in async_copy_data() local
1120 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
1122 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1133 if (page_offset < 0) { in async_copy_data()
1134 b_offset = -page_offset; in async_copy_data()
1135 page_offset += b_offset; in async_copy_data()
1139 if (len > 0 && page_offset + len > STRIPE_SIZE) in async_copy_data()
1140 clen = STRIPE_SIZE - page_offset; in async_copy_data()
1149 b_offset == 0 && page_offset == 0 && in async_copy_data()
1153 tx = async_memcpy(*page, bio_page, page_offset, in async_copy_data()
[all …]
/linux-4.4.14/drivers/staging/octeon/
Dethernet-tx.c288 fs->page_offset)); in cvm_oct_xmit()
/linux-4.4.14/drivers/net/ethernet/tile/
Dtilepro.c1632 void *va = pfn_to_kaddr(pfn) + f->page_offset; in tile_net_tx_frags()
1637 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; in tile_net_tx_frags()
Dtilegx.c1634 return pfn_to_kaddr(pfn) + f->page_offset; in tile_net_frag_buf()
/linux-4.4.14/drivers/net/ethernet/ti/
Dnetcp_core.c1059 u32 page_offset = frag->page_offset; in netcp_tx_map_skb() local
1064 dma_addr = dma_map_page(dev, page, page_offset, buf_len, in netcp_tx_map_skb()
/linux-4.4.14/drivers/scsi/fcoe/
Dfcoe_transport.c310 off = frag->page_offset; in fcoe_fc_crc()
Dfcoe.c1646 + frag->page_offset; in fcoe_xmit()
/linux-4.4.14/fs/cifs/
Dfile.c1976 if (page_offset(page) >= i_size_read(mapping->host)) { in wdata_prepare_pages()
2011 wdata->offset = page_offset(wdata->pages[0]); in wdata_send_pages()
2014 page_offset(wdata->pages[nr_pages - 1]), in wdata_send_pages()
3774 loff_t range_start = page_offset(page); in cifs_launder_page()
Dcifssmb.c1959 wdata2->offset = page_offset(wdata2->pages[0]); in cifs_writev_requeue()
/linux-4.4.14/drivers/net/ethernet/
Djme.c1998 u32 page_offset, in jme_fill_tx_map() argument
2006 page_offset, in jme_fill_tx_map()
2072 frag->page_offset, skb_frag_size(frag), hidma); in jme_map_tx_skb()
/linux-4.4.14/drivers/net/ethernet/freescale/fs_enet/
Dfs_enet-main.c531 if (!IS_ALIGNED(frag->page_offset, 4)) { in fs_enet_start_xmit()
/linux-4.4.14/fs/afs/
Ddir.c152 latter = dir->i_size - page_offset(page); in afs_dir_check_page()
/linux-4.4.14/drivers/net/ethernet/sun/
Dsunvnet.c1028 err = ldc_map_single(lp, vaddr + f->page_offset, in vnet_skb_map()
1064 docopy |= f->page_offset & 7; in vnet_skb_shape()
Dcassini.c2048 frag->page_offset = off; in cas_rx_process_pkt()
2072 frag->page_offset = 0; in cas_rx_process_pkt()
2830 tabort = cas_calc_tabort(cp, fragp->page_offset, len); in cas_xmit_tx_ringN()
2841 addr + fragp->page_offset + len - tabort, in cas_xmit_tx_ringN()
Dniu.c6720 frag->page_offset, len, in niu_start_xmit()
/linux-4.4.14/arch/sparc/kernel/
Dldc.c1941 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) in make_cookie() argument
1945 page_offset); in make_cookie()
/linux-4.4.14/drivers/block/
Drbd.c1288 size_t page_offset; in zero_pages() local
1293 page_offset = offset & ~PAGE_MASK; in zero_pages()
1294 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset); in zero_pages()
1297 memset(kaddr + page_offset, 0, length); in zero_pages()
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
Dipoib_ib.c287 frag->page_offset, skb_frag_size(frag), in ipoib_dma_map_tx()
/linux-4.4.14/drivers/hsi/clients/
Dssi_protocol.c194 sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); in ssip_skb_to_msg()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Dvvp_io.c694 (page_offset(vmpage) > size))) { in vvp_io_fault_start()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dcq.c1141 in->ctx.page_offset = 0; in mlx5_ib_resize_cq()
/linux-4.4.14/drivers/scsi/
Dipr.c2905 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2906 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2914 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2921 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
2930 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
2936 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
Dipr.h1718 u32 page_offset; member
/linux-4.4.14/net/appletalk/
Dddp.c962 sum = atalk_sum_partial(vaddr + frag->page_offset + in atalk_sum_skb()
/linux-4.4.14/drivers/staging/unisys/visornic/
Dvisornic_main.c265 page_offset, in visor_copy_fragsinfo_from_skb()
/linux-4.4.14/drivers/net/ethernet/ibm/
Dibmveth.c989 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) macro
/linux-4.4.14/fs/f2fs/
Dfile.c63 page_offset(page) > i_size_read(inode) || in f2fs_vm_page_mkwrite()
/linux-4.4.14/drivers/net/usb/
Dusbnet.c1296 f->page_offset); in build_dma_sg()
/linux-4.4.14/drivers/net/ethernet/natsemi/
Dns83820.c1165 frag->page_offset);
/linux-4.4.14/drivers/scsi/bnx2fc/
Dbnx2fc_fcoe.c330 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; in bnx2fc_xmit()
/linux-4.4.14/net/ipv4/
Dtcp.c3038 unsigned int offset = f->page_offset; in tcp_md5_hash_skb_data()
Dtcp_output.c1247 shinfo->frags[k].page_offset += eat; in __pskb_trim_head()
/linux-4.4.14/fs/
Dbuffer.c2437 (page_offset(page) > size)) { in block_page_mkwrite()
/linux-4.4.14/drivers/atm/
Deni.c1137 skb_shinfo(skb)->frags[i].page_offset, in do_tx()
Dhe.c2583 (void *) page_address(frag->page) + frag->page_offset, in he_send()
/linux-4.4.14/drivers/net/ethernet/marvell/
Dmv643xx_eth.c678 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) in has_tiny_unaligned_frags()
Dmvneta.c1752 void *addr = page_address(frag->page.p) + frag->page_offset; in mvneta_tx_frag_process()
Dmvpp2.c5209 void *addr = page_address(frag->page.p) + frag->page_offset; in mvpp2_tx_frag_process()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c2120 rx_frag->page_offset = sd->pg_chunk.offset + offset; in lro_add_page()
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/
Dlio_main.c2854 frag->page_offset, in liquidio_xmit()
/linux-4.4.14/drivers/s390/net/
Dqeth_core_main.c3824 frag->page_offset; in qeth_get_elements_for_frags()
3930 frag->page_offset; in __qeth_fill_buffer()
/linux-4.4.14/drivers/net/vmxnet3/
Dvmxnet3_drv.c661 frag->page_offset = 0; in vmxnet3_append_frag()