Lines Matching refs:ofs

89 			int ofs;  in ntfs_end_buffer_async_read()  local
92 ofs = 0; in ntfs_end_buffer_async_read()
94 ofs = init_size - file_ofs; in ntfs_end_buffer_async_read()
97 memset(kaddr + bh_offset(bh) + ofs, 0, in ntfs_end_buffer_async_read()
98 bh->b_size - ofs); in ntfs_end_buffer_async_read()
1124 unsigned int ofs; in ntfs_write_mst_block() local
1130 ofs = bh_offset(tbh); in ntfs_write_mst_block()
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) in ntfs_write_mst_block()
1141 (MFT_RECORD*)(kaddr + ofs), &tni)) { in ntfs_write_mst_block()
1166 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs), in ntfs_write_mst_block()
1175 ni->type, page->index, ofs); in ntfs_write_mst_block()
1238 unsigned int ofs; in ntfs_write_mst_block() local
1250 ofs = bh_offset(tbh); in ntfs_write_mst_block()
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) in ntfs_write_mst_block()
1256 (MFT_RECORD*)(kaddr + ofs), in ntfs_write_mst_block()
1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; in ntfs_writepage() local
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); in ntfs_writepage()
1565 s64 ofs, size; in ntfs_bmap() local
1589 ofs = (s64)block << blocksize_bits; in ntfs_bmap()
1599 if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size))) in ntfs_bmap()
1603 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false); in ntfs_bmap()
1645 delta = ofs & vol->cluster_size_mask; in ntfs_bmap()
1729 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { in mark_ntfs_record_dirty() argument
1736 end = ofs + ni->itype.index.block_size; in mark_ntfs_record_dirty()
1760 if (bh_ofs + bh_size <= ofs) in mark_ntfs_record_dirty()