/linux-4.1.27/fs/ |
H A D | stack.c | 12 loff_t i_size; fsstack_copy_inode_size() local 19 * i_size and i_blocks in sync together. fsstack_copy_inode_size() 21 i_size = i_size_read(src); fsstack_copy_inode_size() 53 if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) fsstack_copy_inode_size() 55 i_size_write(dst, i_size); fsstack_copy_inode_size() 57 if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) fsstack_copy_inode_size()
|
H A D | mpage.c | 483 loff_t i_size = i_size_read(inode); __mpage_writepage() local 541 last_block = (i_size - 1) >> blkbits; __mpage_writepage() 572 end_index = i_size >> PAGE_CACHE_SHIFT; __mpage_writepage() 575 * The page straddles i_size. It must be zeroed out on each __mpage_writepage() 582 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); __mpage_writepage()
|
H A D | read_write.c | 121 * offset isn't i_size or larger, return i_size. generic_file_llseek_size() 219 if (offset >= inode->i_size) { default_llseek() 227 * as long as offset isn't i_size or larger, return default_llseek() 228 * i_size. default_llseek() 230 if (offset >= inode->i_size) { default_llseek() 234 offset = inode->i_size; default_llseek()
|
H A D | direct-io.c | 113 loff_t i_size; /* i_size when submitted */ member in struct:dio 242 if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) dio_complete() 243 transferred = dio->i_size - offset; dio_complete() 614 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we get_more_blocks() 1162 /* Once we sampled i_size check for reads beyond EOF */ do_blockdev_direct_IO() 1163 dio->i_size = i_size_read(inode); do_blockdev_direct_IO() 1164 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { do_blockdev_direct_IO() 1173 * For file extending writes updating i_size before data writeouts do_blockdev_direct_IO()
|
H A D | buffer.c | 1699 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; __block_write_full_page() 1717 * mapped buffers outside i_size will occur, because __block_write_full_page() 1718 * this page can be outside i_size when there is a __block_write_full_page() 2072 loff_t old_size = inode->i_size; generic_write_end() 2078 * No need to use i_size_read() here, the i_size generic_write_end() 2081 * But it's important to update i_size while still holding page lock: generic_write_end() 2082 * page writeout could otherwise come in and zero beyond i_size. generic_write_end() 2084 if (pos+copied > inode->i_size) { generic_write_end() 2658 if (pos+copied > inode->i_size) { nobh_write_end() 2685 loff_t i_size = i_size_read(inode); nobh_writepage() local 2686 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; nobh_writepage() 2690 /* Is the page fully inside i_size? */ nobh_writepage() 2694 /* Is the page fully outside i_size? (truncate in progress) */ nobh_writepage() 2695 offset = i_size & (PAGE_CACHE_SIZE-1); nobh_writepage() 2712 * The page straddles i_size. It must be zeroed out on each and every nobh_writepage() 2889 loff_t i_size = i_size_read(inode); block_write_full_page() local 2890 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; block_write_full_page() 2893 /* Is the page fully inside i_size? */ block_write_full_page() 2898 /* Is the page fully outside i_size? (truncate in progress) */ block_write_full_page() 2899 offset = i_size & (PAGE_CACHE_SIZE-1); block_write_full_page() 2912 * The page straddles i_size. It must be zeroed out on each and every block_write_full_page()
|
H A D | libfs.c | 436 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty 439 * case that i_size has changed. 458 * No need to use i_size_read() here, the i_size simple_write_end() 461 if (last_pos > inode->i_size) simple_write_end() 1179 inode->i_size = 0; make_empty_dir_inode()
|
H A D | attr.c | 101 if (inode->i_size < offset) { inode_newsize_ok()
|
H A D | ioctl.c | 242 * If it is possible to have data blocks beyond a hole past @inode->i_size, then 244 * beyond i_size.
|
/linux-4.1.27/fs/udf/ |
H A D | truncate.c | 64 * Truncate the last extent to match i_size. This function assumes 78 inode->i_size == iinfo->i_lenExtents) udf_truncate_tail_extent() 95 if (lbcount > inode->i_size) { udf_truncate_tail_extent() 96 if (lbcount - inode->i_size >= inode->i_sb->s_blocksize) udf_truncate_tail_extent() 98 "Too long extent after EOF in inode %u: i_size: %lld lbcount: %lld extent %u+%u\n", udf_truncate_tail_extent() 100 (long long)inode->i_size, udf_truncate_tail_extent() 104 nelen = elen - (lbcount - inode->i_size); udf_truncate_tail_extent() 117 iinfo->i_lenExtents = inode->i_size; udf_truncate_tail_extent() 132 inode->i_size == iinfo->i_lenExtents) udf_discard_prealloc() 198 * Truncate extents of inode to inode->i_size. This function can be used only 209 sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; udf_truncate_extents() 223 (inode->i_size & (sb->s_blocksize - 1)); udf_truncate_extents() 283 iinfo->i_lenExtents = inode->i_size; udf_truncate_extents()
|
H A D | file.c | 48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); __udf_adinicb_readpage() 49 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); __udf_adinicb_readpage() 74 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); udf_adinicb_writepage() 144 iinfo->i_lenAlloc = max(end, inode->i_size); udf_file_write_iter() 225 * Grab i_mutex to avoid races with writes changing i_size udf_release_file()
|
H A D | symlink.c | 112 if (inode->i_size > inode->i_sb->s_blocksize) { udf_symlink_filler() 134 err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE); udf_symlink_filler()
|
H A D | lowlevel.c | 61 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; udf_get_last_block()
|
H A D | namei.c | 163 size = udf_ext0_offset(dir) + dir->i_size; udf_find_entry() 304 loff_t size = udf_ext0_offset(dir) + dir->i_size; udf_add_entry() 504 dir->i_size += nfidlen; udf_add_entry() 512 elen -= dinfo->i_lenExtents - dir->i_size; udf_add_entry() 518 dinfo->i_lenExtents = dir->i_size; udf_add_entry() 703 loff_t size = udf_ext0_offset(dir) + dir->i_size; empty_dir() 794 inode->i_size = 0; udf_rmdir() 978 inode->i_size = elen; udf_symlink() 980 iinfo->i_lenAlloc = inode->i_size; udf_symlink()
|
H A D | inode.c | 152 inode->i_size != iinfo->i_lenExtents) { udf_evict_inode() 155 (unsigned long long)inode->i_size, udf_evict_inode() 170 loff_t isize = inode->i_size; udf_write_failed() 315 inode->i_size); udf_expand_file_adinicb() 339 int size = udf_ext0_offset(inode) + inode->i_size; udf_expand_dir_adinicb() 348 if (!inode->i_size) { udf_expand_dir_adinicb() 408 iinfo->i_lenExtents = inode->i_size; udf_expand_dir_adinicb() 412 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); udf_expand_dir_adinicb() 1218 if (newsize > inode->i_size) { udf_setsize() 1443 inode->i_size = le64_to_cpu(fe->informationLength); udf_read_inode() 1444 iinfo->i_lenExtents = inode->i_size; udf_read_inode() 1503 if (iinfo->i_lenAlloc != inode->i_size) udf_read_inode() 1506 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) udf_read_inode() 1693 fe->informationLength = cpu_to_le64(inode->i_size); udf_update_inode() 1751 efe->objectSize = cpu_to_le64(inode->i_size); udf_update_inode()
|
H A D | dir.c | 52 loff_t size = udf_ext0_offset(dir) + dir->i_size; udf_readdir()
|
/linux-4.1.27/fs/efs/ |
H A D | file.c | 26 __func__, block, inode->i_blocks, inode->i_size); efs_get_block() 50 __func__, block, inode->i_blocks, inode->i_size); efs_bmap()
|
H A D | symlink.c | 19 efs_block_t size = inode->i_size; efs_symlink_readpage()
|
H A D | dir.c | 28 if (inode->i_size & (EFS_DIRBSIZE-1)) efs_readdir()
|
H A D | namei.c | 26 if (inode->i_size & (EFS_DIRBSIZE-1)) efs_find_entry()
|
H A D | inode.c | 102 inode->i_size = be32_to_cpu(efs_inode->di_size); efs_iget() 109 if (inode->i_size == 0) { efs_iget() 112 inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1; efs_iget()
|
/linux-4.1.27/fs/qnx4/ |
H A D | dir.c | 28 QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size)); qnx4_readdir() 31 while (ctx->pos < inode->i_size) { qnx4_readdir()
|
H A D | inode.c | 300 inode->i_size = le32_to_cpu(raw_inode->di_size); qnx4_iget() 313 qnx4_i(inode)->mmu_private = inode->i_size; qnx4_iget() 320 qnx4_i(inode)->mmu_private = inode->i_size; qnx4_iget()
|
H A D | namei.c | 65 while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { qnx4_find_entry()
|
/linux-4.1.27/fs/ubifs/ |
H A D | dir.c | 36 * @i_size of the parent inode and writes the parent inode together with the 121 inode->i_size = ui->ui_size = UBIFS_INO_NODE_SZ; ubifs_new_inode() 278 dir->i_size += sz_change; ubifs_create() 279 dir_ui->ui_size = dir->i_size; ubifs_create() 292 dir->i_size -= sz_change; ubifs_create() 293 dir_ui->ui_size = dir->i_size; ubifs_create() 532 dir->i_size += sz_change; ubifs_link() 533 dir_ui->ui_size = dir->i_size; ubifs_link() 545 dir->i_size -= sz_change; ubifs_link() 546 dir_ui->ui_size = dir->i_size; ubifs_link() 590 dir->i_size -= sz_change; ubifs_unlink() 591 dir_ui->ui_size = dir->i_size; ubifs_unlink() 608 dir->i_size += sz_change; ubifs_unlink() 609 dir_ui->ui_size = dir->i_size; ubifs_unlink() 680 dir->i_size -= sz_change; ubifs_rmdir() 681 dir_ui->ui_size = dir->i_size; ubifs_rmdir() 698 dir->i_size += sz_change; ubifs_rmdir() 699 dir_ui->ui_size = dir->i_size; ubifs_rmdir() 742 dir->i_size += sz_change; ubifs_mkdir() 743 dir_ui->ui_size = dir->i_size; ubifs_mkdir() 757 dir->i_size -= sz_change; ubifs_mkdir() 758 dir_ui->ui_size = dir->i_size; ubifs_mkdir() 814 inode->i_size = ubifs_inode(inode)->ui_size = devlen; ubifs_mknod() 824 dir->i_size += sz_change; ubifs_mknod() 825 dir_ui->ui_size = dir->i_size; ubifs_mknod() 838 dir->i_size -= sz_change; ubifs_mknod() 839 dir_ui->ui_size = dir->i_size; ubifs_mknod() 898 inode->i_size = ubifs_inode(inode)->ui_size = len; ubifs_symlink() 905 dir->i_size += sz_change; ubifs_symlink() 906 dir_ui->ui_size = dir->i_size; ubifs_symlink() 919 dir->i_size -= sz_change; ubifs_symlink() 920 dir_ui->ui_size = dir->i_size; ubifs_symlink() 1055 old_dir->i_size -= old_sz; ubifs_rename() 1056 ubifs_inode(old_dir)->ui_size = old_dir->i_size; ubifs_rename() 1077 new_dir->i_size += new_sz; ubifs_rename() 1078 ubifs_inode(new_dir)->ui_size = new_dir->i_size; ubifs_rename() 1114 new_dir->i_size -= new_sz; ubifs_rename() 1115 ubifs_inode(new_dir)->ui_size = new_dir->i_size; ubifs_rename() 1117 old_dir->i_size += old_sz; ubifs_rename() 1118 ubifs_inode(old_dir)->ui_size = old_dir->i_size; ubifs_rename()
|
H A D | file.c | 111 loff_t i_size = i_size_read(inode); do_readpage() local 113 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", do_readpage() 114 inode->i_ino, page->index, i_size, page->flags); do_readpage() 121 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; do_readpage() 151 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); do_readpage() 229 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); write_begin_slow() 232 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", write_begin_slow() 233 inode->i_ino, pos, len, inode->i_size); write_begin_slow() 433 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); ubifs_write_begin() 437 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); ubifs_write_begin() 548 int appending = !!(end_pos > inode->i_size); ubifs_write_end() 550 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", ubifs_write_end() 551 inode->i_ino, pos, page->index, len, copied, inode->i_size); ubifs_write_end() 615 loff_t i_size = i_size_read(inode); populate_page() local 620 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", populate_page() 621 inode->i_ino, page->index, i_size, page->flags); populate_page() 625 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; populate_page() 626 if (!i_size || page->index > end_index) { populate_page() 677 int len = i_size & (PAGE_CACHE_SIZE - 1); populate_page() 977 * we have to call 'truncate_setsize()', which first changes @inode->i_size, 981 * This means that @inode->i_size is changed while @ui_mutex is unlocked. 988 * inode size. How do we do this if @inode->i_size may became smaller while we 990 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size 1004 loff_t i_size = i_size_read(inode), synced_i_size; ubifs_writepage() local 1005 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; ubifs_writepage() 1006 int err, len = i_size & (PAGE_CACHE_SIZE - 1); ubifs_writepage() 1013 /* Is the page fully outside @i_size? (truncate in progress) */ ubifs_writepage() 1023 /* Is the page fully inside @i_size? */ ubifs_writepage() 1042 * The page straddles @i_size. It must be zeroed out on each and every ubifs_writepage() 1053 if (i_size > synced_i_size) { ubifs_writepage() 1110 loff_t old_size = inode->i_size, new_size = attr->ia_size; do_truncation() 1183 ui->ui_size = inode->i_size; do_truncation() 1225 dbg_gen("size %lld -> %lld", inode->i_size, new_size); do_setattr() 1233 /* 'truncate_setsize()' changed @i_size, update @ui_size */ do_setattr() 1234 ui->ui_size = inode->i_size; do_setattr() 1273 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size) ubifs_setattr() 1452 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, ubifs_vm_page_mkwrite()
|
H A D | recovery.c | 1236 * @i_size: size on inode 1244 loff_t i_size; member in struct:size_entry 1254 * @i_size: size on inode 1258 static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size, add_ino() argument 1278 e->i_size = i_size; add_ino() 1381 e->i_size = new_size; ubifs_recover_size_accum() 1421 loff_t i_size; fix_size_in_place() local 1433 i_size = le64_to_cpu(ino->size); fix_size_in_place() 1434 if (i_size >= e->d_size) fix_size_in_place() 1457 (unsigned long)e->inum, lnum, offs, i_size, e->d_size); fix_size_in_place() 1462 (unsigned long)e->inum, e->i_size, e->d_size, err); fix_size_in_place() 1502 e->i_size = le64_to_cpu(ino->size); ubifs_recover_size() 1506 if (e->exists && e->i_size < e->d_size) { ubifs_recover_size() 1519 if (inode->i_size < e->d_size) { ubifs_recover_size() 1522 inode->i_size, e->d_size); ubifs_recover_size() 1523 inode->i_size = e->d_size; ubifs_recover_size()
|
H A D | xattr.c | 152 inode->i_size = ui->ui_size = size; create_xattr() 206 ubifs_assert(ui->data_len == inode->i_size); change_xattr() 217 inode->i_size = ui->ui_size = size; change_xattr() 409 ubifs_assert(inode->i_size == ui->data_len); ubifs_getxattr() 505 ubifs_assert(ui->data_len == inode->i_size); remove_xattr()
|
/linux-4.1.27/fs/hpfs/ |
H A D | inode.c | 21 i->i_size = -1; hpfs_init_inode() 82 i->i_size = ea_size; hpfs_read_inode() 106 i->i_size = 0; hpfs_read_inode() 128 i->i_size = 2048 * n_dnodes; hpfs_read_inode() 136 i->i_size = le32_to_cpu(fnode->file_size); hpfs_read_inode() 137 i->i_blocks = ((i->i_size + 511) >> 9) + 1; hpfs_read_inode() 139 hpfs_i(i)->mmu_private = i->i_size; hpfs_read_inode() 223 fnode->file_size = cpu_to_le32(i->i_size); hpfs_write_inode_nolock() 224 if (de) de->file_size = cpu_to_le32(i->i_size); hpfs_write_inode_nolock() 272 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) hpfs_setattr()
|
H A D | file.c | 69 i->i_blocks = 1 + ((i->i_size + 511) >> 9); hpfs_truncate() 70 hpfs_i(i)->mmu_private = i->i_size; hpfs_truncate() 71 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); hpfs_truncate() 140 if (to > inode->i_size) { hpfs_write_failed() 141 truncate_pagecache(inode, inode->i_size); hpfs_write_failed()
|
H A D | dir.c | 283 if (result->i_size == -1) { hpfs_lookup() 284 result->i_size = le32_to_cpu(de->file_size); hpfs_lookup() 286 hpfs_i(result)->mmu_private = result->i_size; hpfs_lookup() 293 result->i_blocks = 1 + ((result->i_size + 511) >> 9); hpfs_lookup()
|
H A D | namei.c | 69 result->i_size = 2048; hpfs_mkdir() 175 result->i_size = 0; hpfs_create() 259 result->i_size = 0; hpfs_mknod() 336 result->i_size = strlen(symlink); hpfs_symlink()
|
H A D | dnode.c | 307 i->i_size += 2048; hpfs_add_to_dnode() 345 i->i_size += 2048; hpfs_add_to_dnode() 468 i->i_size -= 2048; move_to_top() 540 i->i_size -= 2048; delete_empty_dnode()
|
H A D | super.c | 724 if (root->i_size == -1) hpfs_fill_super() 725 root->i_size = 2048; hpfs_fill_super()
|
/linux-4.1.27/fs/jffs2/ |
H A D | file.c | 151 if (pageofs > inode->i_size) { jffs2_write_begin() 159 (unsigned int)inode->i_size, pageofs); jffs2_write_begin() 179 ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); jffs2_write_begin() 181 ri.offset = cpu_to_je32(inode->i_size); jffs2_write_begin() 182 ri.dsize = cpu_to_je32(pageofs - inode->i_size); jffs2_write_begin() 212 inode->i_size = pageofs; jffs2_write_begin() 287 ri->isize = cpu_to_je32((uint32_t)inode->i_size); jffs2_write_end() 309 if (inode->i_size < pos + writtenlen) { jffs2_write_end() 310 inode->i_size = pos + writtenlen; jffs2_write_end() 311 inode->i_blocks = (inode->i_size + 511) >> 9; jffs2_write_end()
|
H A D | fs.c | 113 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); jffs2_do_setattr() 121 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_do_setattr() 124 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); jffs2_do_setattr() 125 ri->offset = cpu_to_je32(inode->i_size); jffs2_do_setattr() 158 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_do_setattr() 161 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_do_setattr() 163 inode->i_size = iattr->ia_size; jffs2_do_setattr() 164 inode->i_blocks = (inode->i_size + 511) >> 9; jffs2_do_setattr() 180 We are protected from a simultaneous write() extending i_size jffs2_do_setattr() 183 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { jffs2_do_setattr() 185 inode->i_blocks = (inode->i_size + 511) >> 9; jffs2_do_setattr() 284 inode->i_size = je32_to_cpu(latest_node.isize); jffs2_iget() 291 inode->i_blocks = (inode->i_size + 511) >> 9; jffs2_iget() 481 inode->i_size = 0; jffs2_new_inode()
|
H A D | os-linux.h | 28 #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
|
H A D | dir.c | 327 inode->i_size = targetlen; jffs2_symlink() 328 ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size); jffs2_symlink() 329 ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size); jffs2_symlink()
|
/linux-4.1.27/security/integrity/ima/ |
H A D | ima_crypto.c | 231 loff_t i_size, offset; ima_calc_file_hash_atfm() local 254 i_size = i_size_read(file_inode(file)); ima_calc_file_hash_atfm() 256 if (i_size == 0) ima_calc_file_hash_atfm() 263 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); ima_calc_file_hash_atfm() 270 if (i_size > rbuf_size[0]) { ima_calc_file_hash_atfm() 276 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], ima_calc_file_hash_atfm() 285 for (offset = 0; offset < i_size; offset += rbuf_len) { ima_calc_file_hash_atfm() 296 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); ima_calc_file_hash_atfm() 357 loff_t i_size, offset = 0; ima_calc_file_hash_tfm() local 371 i_size = i_size_read(file_inode(file)); ima_calc_file_hash_tfm() 373 if (i_size == 0) ima_calc_file_hash_tfm() 385 while (offset < i_size) { ima_calc_file_hash_tfm() 441 loff_t i_size; ima_calc_file_hash() local 444 i_size = i_size_read(file_inode(file)); ima_calc_file_hash() 446 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { ima_calc_file_hash()
|
/linux-4.1.27/include/uapi/linux/ |
H A D | minix_fs.h | 37 __u32 i_size; member in struct:minix_inode 55 __u32 i_size; member in struct:minix2_inode
|
/linux-4.1.27/fs/logfs/ |
H A D | file.c | 31 /* Reading beyond i_size is simple: memset to zero */ logfs_write_begin() 120 loff_t i_size = i_size_read(inode); logfs_writepage() local 121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; logfs_writepage() 140 /* Is the page fully inside i_size? */ logfs_writepage() 144 /* Is the page fully outside i_size? (truncate in progress) */ logfs_writepage() 145 offset = i_size & (PAGE_CACHE_SIZE-1); logfs_writepage() 152 * The page straddles i_size. It must be zeroed out on each and every logfs_writepage()
|
H A D | dev_bdev.c | 264 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000; bdev_find_last_sb()
|
/linux-4.1.27/block/partitions/ |
H A D | ibm.c | 200 loff_t i_size, find_lnx1_partitions() 215 * 'size based on geo == size based on i_size' is true, then find_lnx1_partitions() 222 size = i_size >> 9; find_lnx1_partitions() 231 /* else keep size based on i_size */ find_lnx1_partitions() 293 loff_t i_size, offset, size; ibm_partition() local 305 i_size = i_size_read(bdev->bd_inode); ibm_partition() 306 if (i_size == 0) ibm_partition() 331 label, labelsect, i_size, ibm_partition() 348 size = i_size >> 9; ibm_partition() 194 find_lnx1_partitions(struct parsed_partitions *state, struct hd_geometry *geo, int blocksize, char name[], union label_t *label, sector_t labelsect, loff_t i_size, dasd_information2_t *info) find_lnx1_partitions() argument
|
H A D | atari.c | 50 hd_size = state->bdev->bd_inode->i_size >> 9; atari_partition()
|
H A D | aix.c | 82 return (bdev->bd_inode->i_size >> 9) - 1ULL; last_lba()
|
H A D | acorn.c | 284 nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect; adfspart_check_ADFS()
|
/linux-4.1.27/fs/squashfs/ |
H A D | inode.c | 78 inode->i_size = 0; squashfs_new_inode() 167 inode->i_size = le32_to_cpu(sqsh_ino->file_size); squashfs_read_inode() 170 inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; squashfs_read_inode() 211 inode->i_size = le64_to_cpu(sqsh_ino->file_size); squashfs_read_inode() 215 inode->i_blocks = (inode->i_size - squashfs_read_inode() 240 inode->i_size = le16_to_cpu(sqsh_ino->file_size); squashfs_read_inode() 265 inode->i_size = le32_to_cpu(sqsh_ino->file_size); squashfs_read_inode() 292 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); squashfs_read_inode() 303 &offset, inode->i_size); squashfs_read_inode()
|
/linux-4.1.27/fs/jfs/ |
H A D | inode.c | 61 if (inode->i_size >= IDATASIZE) { jfs_iget() 70 JFS_IP(inode)->i_inline[inode->i_size] = '\0'; jfs_iget() 214 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && jfs_get_block() 308 if (to > inode->i_size) { jfs_write_failed() 309 truncate_pagecache(inode, inode->i_size); jfs_write_failed() 346 * blocks outside i_size. Trim these off again. jfs_direct_IO() 416 jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size); jfs_truncate() 418 nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block); jfs_truncate() 421 jfs_truncate_nolock(ip, ip->i_size); jfs_truncate()
|
H A D | resize.c | 101 VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; jfs_extendfs() 214 sbi->direct_inode->i_size = sb->s_bdev->bd_inode->i_size; jfs_extendfs() 355 nPages = ipbmap->i_size >> L2PSIZE; jfs_extendfs() 399 ipbmap->i_size += xlen << sbi->l2bsize; jfs_extendfs() 480 ipbmap2->i_size = ipbmap->i_size; jfs_extendfs()
|
H A D | super.c | 286 *newLVSize = sb->s_bdev->bd_inode->i_size >> parse_options() 555 inode->i_size = sb->s_bdev->bd_inode->i_size; jfs_fill_super() 752 loff_t i_size = i_size_read(inode); jfs_quota_read() local 754 if (off > i_size) jfs_quota_read() 756 if (off+len > i_size) jfs_quota_read() 757 len = i_size-off; jfs_quota_read() 833 if (inode->i_size < off+len-towrite) jfs_quota_write()
|
H A D | namei.c | 437 if (dip->i_size > 1) jfs_rmdir() 589 if (dip->i_size > 1) jfs_unlink() 639 if (ip->i_size < IDATASIZE) { commitZeroLink() 640 ip->i_size = 0; commitZeroLink() 675 if (ip->i_size) commitZeroLink() 708 if (ip->i_size < IDATASIZE) jfs_free_zero_link() 761 if (ip->i_size) jfs_free_zero_link() 951 ip->i_size = ssize - 1; jfs_symlink() 986 ip->i_size = ssize - 1; jfs_symlink() 1324 if (old_dir->i_size > 1) jfs_rename()
|
H A D | file.c | 73 (inode->i_size == 0)) { jfs_open()
|
H A D | jfs_metapage.c | 253 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >> metapage_get_blocks() 619 if ((lblock << inode->i_blkbits) >= inode->i_size) __get_metapage()
|
/linux-4.1.27/fs/hfsplus/ |
H A D | inode.c | 38 if (to > inode->i_size) { hfsplus_write_failed() 39 truncate_pagecache(inode, inode->i_size); hfsplus_write_failed() 138 * blocks outside i_size. Trim these off again. hfsplus_direct_IO() 256 if (attr->ia_size > inode->i_size) { hfsplus_setattr() 393 inode->i_size = 2; hfsplus_new_inode() 429 inode->i_size = 0; hfsplus_delete_inode() 433 inode->i_size = 0; hfsplus_delete_inode() 456 hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size); hfsplus_inode_read_fork() 458 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; hfsplus_inode_read_fork() 474 fork->total_size = cpu_to_be64(inode->i_size); hfsplus_inode_write_fork() 496 inode->i_size = 2 + be32_to_cpu(folder->valence); hfsplus_cat_read_inode() 577 folder->valence = cpu_to_be32(inode->i_size - 2); hfsplus_cat_write_inode()
|
H A D | extents.c | 436 if (sbi->alloc_file->i_size * 8 < hfsplus_file_extend() 440 sbi->alloc_file->i_size * 8, hfsplus_file_extend() 540 inode->i_ino, (long long)hip->phys_size, inode->i_size); hfsplus_file_truncate() 542 if (inode->i_size > hip->phys_size) { hfsplus_file_truncate() 546 loff_t size = inode->i_size; hfsplus_file_truncate() 559 } else if (inode->i_size == hip->phys_size) hfsplus_file_truncate() 562 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> hfsplus_file_truncate() 606 hip->phys_size = inode->i_size; hfsplus_file_truncate() 607 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> hfsplus_file_truncate()
|
H A D | btree.c | 364 hip->phys_size = inode->i_size = hfs_bmap_alloc() 369 inode_set_bytes(inode, inode->i_size); hfs_bmap_alloc() 370 count = inode->i_size >> tree->node_size_shift; hfs_bmap_alloc()
|
H A D | catalog.c | 303 dir->i_size++; hfsplus_create_cat() 397 dir->i_size--; hfsplus_delete_cat() 466 dst_dir->i_size++; hfsplus_rename_cat() 483 src_dir->i_size--; hfsplus_rename_cat()
|
H A D | dir.c | 142 if (file->f_pos >= inode->i_size) hfsplus_readdir() 187 if (ctx->pos >= inode->i_size) hfsplus_readdir() 259 if (ctx->pos >= inode->i_size) hfsplus_readdir() 416 if (inode->i_size != 2) hfsplus_rmdir()
|
H A D | wrapper.c | 133 *size = sb->s_bdev->bd_inode->i_size >> 9; hfsplus_get_last_session()
|
H A D | xattr.c | 204 hip->phys_size = attr_file->i_size = hfsplus_create_attributes_file() 207 inode_set_bytes(attr_file, attr_file->i_size); hfsplus_create_attributes_file()
|
/linux-4.1.27/fs/sysv/ |
H A D | inode.c | 170 nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, sysv_set_inode() 209 inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); sysv_iget() 263 raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); __sysv_write_inode() 301 inode->i_size = 0; sysv_evict_inode()
|
H A D | itree.c | 379 iblock = (inode->i_size + blocksize-1) sysv_truncate() 382 block_truncate_page(inode->i_mapping, inode->i_size, get_block); sysv_truncate() 471 if (to > inode->i_size) { sysv_write_failed() 472 truncate_pagecache(inode, inode->i_size); sysv_write_failed()
|
H A D | dir.c | 38 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; dir_pages() 48 if (pos+len > dir->i_size) { dir_commit_chunk() 78 if (pos >= inode->i_size) sysv_readdir()
|
H A D | super.c | 461 (fs32_to_cpu(sbi, v7i->i_size) == 0) || v7_sanity_check() 462 (fs32_to_cpu(sbi, v7i->i_size) & 017) || v7_sanity_check() 463 (fs32_to_cpu(sbi, v7i->i_size) > V7_NFILES * v7_sanity_check()
|
H A D | namei.c | 196 inode->i_size = 0; sysv_rmdir()
|
/linux-4.1.27/fs/9p/ |
H A D | vfs_addr.c | 257 loff_t i_size = i_size_read(inode); v9fs_direct_IO() local 258 if (pos + n > i_size) v9fs_direct_IO() 259 inode_add_bytes(inode, pos + n - i_size); v9fs_direct_IO() 325 * No need to use i_size_read() here, the i_size v9fs_write_end() 328 if (last_pos > inode->i_size) { v9fs_write_end() 329 inode_add_bytes(inode, last_pos - inode->i_size); v9fs_write_end()
|
H A D | vfs_file.c | 421 loff_t i_size; v9fs_file_write_iter() local 429 i_size = i_size_read(inode); v9fs_file_write_iter() 430 if (iocb->ki_pos > i_size) { v9fs_file_write_iter() 431 inode_add_bytes(inode, iocb->ki_pos - i_size); v9fs_file_write_iter()
|
H A D | vfs_inode.c | 1465 loff_t i_size; v9fs_refresh_inode() local 1482 * We don't want to refresh inode->i_size, v9fs_refresh_inode() 1485 i_size = inode->i_size; v9fs_refresh_inode() 1488 inode->i_size = i_size; v9fs_refresh_inode()
|
H A D | vfs_inode_dotl.c | 946 loff_t i_size; v9fs_refresh_inode_dotl() local 962 * We don't want to refresh inode->i_size, v9fs_refresh_inode_dotl() 965 i_size = inode->i_size; v9fs_refresh_inode_dotl() 968 inode->i_size = i_size; v9fs_refresh_inode_dotl()
|
/linux-4.1.27/fs/ntfs/ |
H A D | aops.c | 72 loff_t i_size; ntfs_end_buffer_async_read() local 81 i_size = i_size_read(vi); ntfs_end_buffer_async_read() 83 if (unlikely(init_size > i_size)) { ntfs_end_buffer_async_read() 85 init_size = i_size; ntfs_end_buffer_async_read() 177 * We only enforce allocated_size limit because i_size is checked for in 186 loff_t i_size; ntfs_read_block() local 236 i_size = i_size_read(vi); ntfs_read_block() 238 if (unlikely(init_size > i_size)) { ntfs_read_block() 240 init_size = i_size; ntfs_read_block() 400 loff_t i_size; ntfs_readpage() local 413 i_size = i_size_read(vi); ntfs_readpage() 414 /* Is the page fully outside i_size? (truncate in progress) */ ntfs_readpage() 415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> ntfs_readpage() 418 ntfs_debug("Read outside i_size - truncated?"); ntfs_readpage() 500 i_size = i_size_read(vi); ntfs_readpage() 502 if (unlikely(attr_len > i_size)) { ntfs_readpage() 504 attr_len = i_size; ntfs_readpage() 555 loff_t i_size; ntfs_write_block() local 605 i_size = i_size_read(vi); ntfs_write_block() 610 dblock = (i_size + blocksize - 1) >> blocksize_bits; ntfs_write_block() 621 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; ntfs_write_block() 637 * Mapped buffers outside i_size will occur, because ntfs_write_block() 638 * this page can be outside i_size when there is a ntfs_write_block() 644 * the page was within i_size but before we get here, ntfs_write_block() 645 * vmtruncate() modifies i_size? ntfs_write_block() 658 (initialized_size < i_size))) { ntfs_write_block() 692 // Zero any non-uptodate buffers up to i_size. ntfs_write_block() 697 // inode (up to i_size). ntfs_write_block() 1355 loff_t i_size; ntfs_writepage() local 1366 i_size = i_size_read(vi); ntfs_writepage() 1367 /* Is the page fully outside i_size? (truncate in progress) */ ntfs_writepage() 1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> ntfs_writepage() 1376 ntfs_debug("Write outside i_size - truncated?"); ntfs_writepage() 1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { ntfs_writepage() 1418 /* The page straddles i_size. */ ntfs_writepage() 1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; ntfs_writepage() 1484 i_size = i_size_read(vi); ntfs_writepage() 1485 if (unlikely(attr_len > i_size)) { ntfs_writepage() 1487 attr_len = i_size; ntfs_writepage() 1566 loff_t i_size; ntfs_bmap() local 1592 i_size = i_size_read(VFS_I(ni)); ntfs_bmap() 1599 if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size))) ntfs_bmap()
|
H A D | compress.c | 124 const loff_t i_size, const s64 initialized_size) handle_bounds_compressed_page() 127 (initialized_size < i_size)) handle_bounds_compressed_page() 143 * @i_size: file size when we started the read (IN) 172 const u32 cb_size, const loff_t i_size, ntfs_decompress() 235 handle_bounds_compressed_page(dp, i_size, ntfs_decompress() 484 loff_t i_size; ntfs_read_compressed_block() local 560 i_size = i_size_read(VFS_I(ni)); ntfs_read_compressed_block() 563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - ntfs_read_compressed_block() 565 /* Is the page fully outside i_size? (truncate in progress) */ ntfs_read_compressed_block() 570 ntfs_debug("Compressed read outside i_size - truncated?"); ntfs_read_compressed_block() 844 handle_bounds_compressed_page(page, i_size, ntfs_read_compressed_block() 868 cb_pos, cb_size - (cb_pos - cb), i_size, ntfs_read_compressed_block() 123 handle_bounds_compressed_page(struct page *page, const loff_t i_size, const s64 initialized_size) handle_bounds_compressed_page() argument 169 ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, const u32 cb_size, const loff_t i_size, const s64 initialized_size) ntfs_decompress() argument
|
H A D | inode.c | 908 vi->i_size = ni->initialized_size = ntfs_read_locked_inode() 972 vi->i_size = sle64_to_cpu(a->data.non_resident.data_size); ntfs_read_locked_inode() 1001 if ((bvi_size << 3) < (vi->i_size >> ntfs_read_locked_inode() 1005 bvi_size << 3, vi->i_size); ntfs_read_locked_inode() 1027 vi->i_size = ni->initialized_size = ntfs_read_locked_inode() 1135 vi->i_size = sle64_to_cpu( ntfs_read_locked_inode() 1142 vi->i_size = ni->initialized_size = le32_to_cpu( ntfs_read_locked_inode() 1147 if (vi->i_size > ni->allocated_size) { ntfs_read_locked_inode() 1355 vi->i_size = ni->initialized_size = le32_to_cpu( ntfs_read_locked_attr_inode() 1359 if (vi->i_size > ni->allocated_size) { ntfs_read_locked_attr_inode() 1412 vi->i_size = sle64_to_cpu(a->data.non_resident.data_size); ntfs_read_locked_attr_inode() 1614 vi->i_size = ni->initialized_size = ni->allocated_size = 0; ntfs_read_locked_index_inode() 1672 vi->i_size = sle64_to_cpu(a->data.non_resident.data_size); ntfs_read_locked_index_inode() 1700 if ((bvi_size << 3) < (vi->i_size >> ni->itype.index.block_size_bits)) { ntfs_read_locked_index_inode() 1703 vi->i_size); ntfs_read_locked_index_inode() 2075 vi->i_size = sle64_to_cpu( ntfs_read_inode_mount() 2085 if ((vi->i_size >> vol->mft_record_size_bits) >= ntfs_read_inode_mount() 2346 * ntfs_truncate - called when the i_size of an ntfs inode is changed 2347 * @vi: inode for which the i_size was changed 2349 * We only support i_size changes for normal files at present, i.e. not 2373 const char *te = " Leaving file length out of sync with i_size."; ntfs_truncate() 2426 * The i_size of the vfs inode is the new size for the attribute value. ntfs_truncate() 2523 * beyond i_size in which case there is nothing to do or in the ntfs_truncate() 2647 * Make the valid size smaller (i_size is already up-to-date). ntfs_truncate() 2864 * @vi: inode for which the i_size was changed 2882 * soon as possible, because we do not implement changes in i_size yet. So we 2883 * abort all i_size changes here.
|
H A D | file.c | 84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as, 133 "new_initialized_size 0x%llx, i_size 0x%llx.", ntfs_attr_extend_initialized() 185 * size (vfs inode->i_size), we need to extend the file size to the ntfs_attr_extend_initialized() 315 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.", ntfs_attr_extend_initialized() 468 * increments the vfs inode->i_size to keep it above or equal to the ntfs_prepare_file_for_write() 579 * i_size is not to be modified yet. 1447 * Finally, if we do not need to update initialized_size or i_size we ntfs_commit_pages_after_non_resident_write() 1458 * Update initialized_size/i_size as appropriate, both in the inode and ntfs_commit_pages_after_non_resident_write() 1510 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " ntfs_commit_pages_after_non_resident_write() 1537 * Finally, we need to update i_size and initialized_size as appropriate both 1542 * page are uptodate, and updates i_size if the end of io is beyond i_size. In 1557 loff_t i_size; ntfs_commit_pages_after_write() local 1616 i_size = i_size_read(vi); ntfs_commit_pages_after_write() 1617 BUG_ON(attr_len != i_size); ntfs_commit_pages_after_write() 1646 /* Update initialized_size/i_size if necessary. */ ntfs_commit_pages_after_write() 1651 BUG_ON(initialized_size != i_size); ntfs_commit_pages_after_write()
|
H A D | lcnalloc.c | 154 loff_t i_size; ntfs_cluster_alloc() local 256 i_size = i_size_read(lcnbmp_vi); ntfs_cluster_alloc() 271 if (last_read_pos > i_size) { ntfs_cluster_alloc() 295 if (unlikely(last_read_pos + buf_size > i_size)) ntfs_cluster_alloc() 296 buf_size = i_size - last_read_pos; ntfs_cluster_alloc()
|
H A D | super.c | 1594 loff_t i_size; load_and_init_attrdef() local 1611 i_size = i_size_read(ino); load_and_init_attrdef() 1612 if (i_size <= 0 || i_size > 0x7fffffff) load_and_init_attrdef() 1614 vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(i_size); load_and_init_attrdef() 1618 max_index = i_size >> PAGE_CACHE_SHIFT; load_and_init_attrdef() 1631 size = i_size & ~PAGE_CACHE_MASK; load_and_init_attrdef() 1635 vol->attrdef_size = i_size; load_and_init_attrdef() 1636 ntfs_debug("Read %llu bytes from $AttrDef.", i_size); load_and_init_attrdef() 1659 loff_t i_size; load_and_init_upcase() local 1679 i_size = i_size_read(ino); load_and_init_upcase() 1680 if (!i_size || i_size & (sizeof(ntfschar) - 1) || load_and_init_upcase() 1681 i_size > 64ULL * 1024 * sizeof(ntfschar)) load_and_init_upcase() 1683 vol->upcase = (ntfschar*)ntfs_malloc_nofs(i_size); load_and_init_upcase() 1687 max_index = i_size >> PAGE_CACHE_SHIFT; load_and_init_upcase() 1700 size = i_size & ~PAGE_CACHE_MASK; load_and_init_upcase() 1704 vol->upcase_len = i_size >> UCHAR_T_SIZE_BITS; load_and_init_upcase() 1706 i_size, 64 * 1024 * sizeof(ntfschar)); load_and_init_upcase()
|
H A D | dir.c | 1088 * parts (e.g. ->f_pos and ->i_size, and it also protects against directory 1102 loff_t i_size; ntfs_readdir() local 1122 i_size = i_size_read(vdir); ntfs_readdir() 1123 if (actor->pos >= i_size + vol->mft_record_size) ntfs_readdir() 1280 if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size)) ntfs_readdir() 1422 actor->pos = i_size + vol->mft_record_size; ntfs_readdir()
|
/linux-4.1.27/fs/minix/ |
H A D | dir.c | 43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) minix_last_byte() 44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); minix_last_byte() 50 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; dir_pages() 60 if (pos+len > dir->i_size) { dir_commit_chunk() 97 if (pos >= inode->i_size) minix_readdir() 224 * This code plays outside i_size, so it locks the page minix_add_link() 249 /* We hit i_size */ minix_add_link()
|
H A D | inode.c | 31 inode->i_size = 0; minix_evict_inode() 403 if (to > inode->i_size) { minix_write_failed() 404 truncate_pagecache(inode, inode->i_size); minix_write_failed() 479 inode->i_size = raw_inode->i_size; V1_minix_iget() 512 inode->i_size = raw_inode->i_size; V2_minix_iget() 564 raw_inode->i_size = inode->i_size; V1_minix_update_inode() 591 raw_inode->i_size = inode->i_size; V2_minix_update_inode()
|
H A D | itree_common.c | 305 iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits; truncate() 306 block_truncate_page(inode->i_mapping, inode->i_size, get_block); truncate()
|
/linux-4.1.27/fs/ceph/ |
H A D | file.c | 344 * If we get a short result from the OSD, check against i_size; we need to 393 if (was_short && (pos + ret < inode->i_size)) { striped_read() 395 inode->i_size - pos - ret); striped_read() 412 if (left && hit_stripe && pos < inode->i_size) striped_read() 419 if (pos + left > inode->i_size) striped_read() 861 loff_t i_size; ceph_read_iter() local 879 i_size = i_size_read(inode); ceph_read_iter() 882 if (iocb->ki_pos < i_size && ceph_read_iter() 884 loff_t end = min_t(loff_t, i_size, ceph_read_iter() 895 if (iocb->ki_pos < i_size && read < len) { ceph_read_iter() 897 i_size - iocb->ki_pos); ceph_read_iter() 907 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && ceph_read_iter() 911 inode->i_size); ceph_read_iter() 982 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", ceph_write_iter() 983 inode, ceph_vinop(inode), pos, count, inode->i_size); ceph_write_iter() 1018 loff_t old_size = inode->i_size; ceph_write_iter() 1029 if (inode->i_size > old_size) ceph_write_iter() 1086 offset += inode->i_size; ceph_llseek() 1102 if (offset >= inode->i_size) { ceph_llseek() 1108 if (offset >= inode->i_size) { ceph_llseek() 1112 offset = inode->i_size; ceph_llseek()
|
H A D | cache.c | 109 aux.size = inode->i_size; ceph_fscache_inode_get_aux() 122 *size = inode->i_size; ceph_fscache_inode_get_attr() 137 aux.size = inode->i_size; ceph_fscache_inode_check_aux()
|
H A D | inode.c | 549 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { ceph_fill_file_size() 550 dout("size %lld -> %llu\n", inode->i_size, size); ceph_fill_file_size() 551 inode->i_size = size; ceph_fill_file_size() 808 if (WARN_ON(symlen != inode->i_size)) fill_inode() 899 /* queue truncate if we saw i_size decrease */ fill_inode() 1488 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); ceph_inode_set_size() 1489 inode->i_size = size; ceph_inode_set_size() 1835 inode->i_size, attr->ia_size); ceph_setattr() 1837 attr->ia_size > inode->i_size) { ceph_setattr() 1838 inode->i_size = attr->ia_size; ceph_setattr() 1845 attr->ia_size != inode->i_size) { ceph_setattr() 1848 cpu_to_le64(inode->i_size); ceph_setattr()
|
H A D | addr.c | 1039 loff_t i_size; ceph_update_writeable_page() local 1101 i_size = inode->i_size; /* caller holds i_mutex */ ceph_update_writeable_page() 1103 if (page_off >= i_size || ceph_update_writeable_page() 1104 (pos_in_page == 0 && (pos+len) >= i_size && ceph_update_writeable_page() 1182 if (pos+copied > inode->i_size) ceph_write_end() 1342 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", ceph_page_mkwrite()
|
/linux-4.1.27/fs/affs/ |
H A D | file.c | 37 if (inode->i_size != AFFS_I(inode)->mmu_private) affs_file_release() 385 if (to > inode->i_size) { affs_write_failed() 386 truncate_pagecache(inode, inode->i_size); affs_write_failed() 601 inode->i_size = AFFS_I(inode)->mmu_private = newsize; affs_extent_file_ofs() 605 inode->i_size = AFFS_I(inode)->mmu_private = newsize; affs_extent_file_ofs() 618 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { affs_readpage_ofs() 619 to = inode->i_size & ~PAGE_CACHE_MASK; affs_readpage_ofs() 642 /* XXX: this probably leaves a too-big i_size in case of affs_write_begin_ofs() 643 * failure. Should really be updating i_size at write_end time affs_write_begin_ofs() 793 if (tmp > inode->i_size) affs_write_end_ofs() 794 inode->i_size = AFFS_I(inode)->mmu_private = tmp; affs_write_end_ofs() 844 inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); affs_truncate() 848 if (inode->i_size) { affs_truncate() 849 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; affs_truncate() 853 if (inode->i_size > AFFS_I(inode)->mmu_private) { affs_truncate() 857 loff_t isize = inode->i_size; affs_truncate() 864 inode->i_size = AFFS_I(inode)->mmu_private; affs_truncate() 867 } else if (inode->i_size == AFFS_I(inode)->mmu_private) affs_truncate() 896 if (inode->i_size) { affs_truncate() 914 if (inode->i_size) { affs_truncate() 935 AFFS_I(inode)->mmu_private = inode->i_size; affs_truncate()
|
H A D | inode.c | 51 inode->i_size = 0; affs_iget() 127 AFFS_I(inode)->mmu_private = inode->i_size = size; affs_iget() 128 if (inode->i_size) { affs_iget() 189 tail->size = cpu_to_be32(inode->i_size); affs_write_inode() 265 inode->i_size = 0; affs_evict_inode()
|
/linux-4.1.27/fs/bfs/ |
H A D | dir.c | 44 while (ctx->pos < dir->i_size) { bfs_readdir() 65 } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size)); bfs_readdir() 301 if (pos >= dir->i_size) { bfs_add_entry() 302 dir->i_size += BFS_DIRENT_SIZE; bfs_add_entry() 341 while (block * BFS_BSIZE + offset < dir->i_size) { bfs_find_entry()
|
H A D | file.c | 166 if (to > inode->i_size) bfs_write_failed() 167 truncate_pagecache(inode, inode->i_size); bfs_write_failed()
|
H A D | inode.c | 81 inode->i_size = BFS_FILESIZE(di); bfs_iget() 150 di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); bfs_write_inode()
|
/linux-4.1.27/fs/ocfs2/ |
H A D | mmap.c | 83 * The i_size check doesn't catch the case where nodes truncated and __ocfs2_page_mkwrite() 97 * length of the whole page (chopped to i_size) to make sure __ocfs2_page_mkwrite() 155 * ocfs2_truncate_file() changing i_size as well as any thread ocfs2_page_mkwrite()
|
H A D | file.c | 417 * Do this before setting i_size. ocfs2_orphan_for_truncate() 431 di->i_size = cpu_to_le64(new_i_size); ocfs2_orphan_for_truncate() 457 (unsigned long long)le64_to_cpu(fe->i_size), ocfs2_truncate_file() 460 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), ocfs2_truncate_file() 461 "Inode %llu, inode i_size = %lld != di " ocfs2_truncate_file() 462 "i_size = %llu, i_flags = 0x%x\n", ocfs2_truncate_file() 465 (unsigned long long)le64_to_cpu(fe->i_size), ocfs2_truncate_file() 468 if (new_i_size > le64_to_cpu(fe->i_size)) { ocfs2_truncate_file() 470 (unsigned long long)le64_to_cpu(fe->i_size), ocfs2_truncate_file() 504 * i_size. */ ocfs2_truncate_file() 625 * update i_size. */ __ocfs2_extend_allocation() 682 (unsigned long long)le64_to_cpu(fe->i_size), __ocfs2_extend_allocation() 822 /* must not update i_size! */ ocfs2_write_zero_page() 838 di->i_size = cpu_to_le64((u64)i_size_read(inode)); ocfs2_write_zero_page() 1049 * still need to zero the area between the old i_size and the ocfs2_extend_no_holes() 1050 * new i_size. ocfs2_extend_no_holes() 2141 * inode. There's also the dinode i_size state which ocfs2_prepare_inode_for_write() 2143 * set inode->i_size at the end of a write. */ ocfs2_prepare_inode_for_write() 2199 * i_size changes wouldn't be synchronized, so ocfs2_prepare_inode_for_write() 2372 * necessary, we sample i_size, and cluster count here. ocfs2_file_write_iter() 2519 * like i_size. This allows the checks down below ocfs2_file_read_iter()
|
H A D | resize.c | 156 le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits); ocfs2_update_last_group_and_inode() 158 i_size_write(bm_inode, le64_to_cpu(fe->i_size)); ocfs2_update_last_group_and_inode() 566 le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits); ocfs2_group_add() 568 i_size_write(main_bm_inode, le64_to_cpu(fe->i_size)); ocfs2_group_add()
|
H A D | aops.c | 173 * need to use BH_New is when we're extending i_size on a file ocfs2_get_block() 313 * i_size might have just been updated as we grabed the meta lock. We ocfs2_readpage() 317 * (generic_file_read, vm_ops->fault) are clever enough to check i_size ocfs2_readpage() 814 * when final_size > inode->i_size, inode->i_size will be ocfs2_direct_IO_write() 862 loff_t i_size = i_size_read(inode); ocfs2_direct_IO_write() local 864 if (offset + count > i_size) { ocfs2_direct_IO_write() 871 if (i_size == i_size_read(inode)) { ocfs2_direct_IO_write() 873 i_size); ocfs2_direct_IO_write() 1042 * testing i_size against each block offset. 1480 * writing past i_size, we only need enough pages to cover the ocfs2_grab_pages_for_write() 1489 * i_size, whichever is greater. ocfs2_grab_pages_for_write() 2396 di->i_size = cpu_to_le64((u64)i_size_read(inode)); ocfs2_write_end_nolock()
|
H A D | ocfs2_trace.h | 1183 long long i_size, unsigned int i_clusters, 1187 TP_ARGS(ino, i_size, i_clusters, pos, len, flags, 1191 __field(long long, i_size) 1202 __entry->i_size = i_size; 1212 __entry->ino, __entry->i_size, __entry->i_clusters, 1354 unsigned int ip_clusters, unsigned long long i_size), 1355 TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size), 1361 __field(unsigned long long, i_size) 1368 __entry->i_size = i_size; 1371 __entry->di_size, __entry->ip_clusters, __entry->i_size)
|
H A D | inode.c | 302 /* Fast symlinks will have i_size but no allocated clusters. */ ocfs2_populate_inode() 351 i_size_write(inode, le64_to_cpu(fe->i_size)); ocfs2_populate_inode() 359 i_size_write(inode, le64_to_cpu(fe->i_size)); ocfs2_populate_inode() 364 i_size_write(inode, le64_to_cpu(fe->i_size)); ocfs2_populate_inode() 1273 fe->i_size = cpu_to_le64(i_size_read(inode)); ocfs2_mark_inode_dirty() 1305 i_size_write(inode, le64_to_cpu(fe->i_size)); ocfs2_refresh_inode()
|
H A D | quota_global.c | 170 loff_t i_size = i_size_read(gqinode); ocfs2_quota_read() local 178 if (off > i_size) ocfs2_quota_read() 180 if (off + len > i_size) ocfs2_quota_read() 181 len = i_size - off; ocfs2_quota_read()
|
/linux-4.1.27/fs/reiserfs/ |
H A D | ioctl.c | 180 if (inode->i_size == 0) { reiserfs_unpack() 194 write_from = inode->i_size & (blocksize - 1); reiserfs_unpack() 206 index = inode->i_size >> PAGE_CACHE_SHIFT; reiserfs_unpack()
|
H A D | inode.c | 409 * the file. This can happen in odd cases where i_size isn't _get_block_create_0() 413 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) _get_block_create_0() 415 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) { _get_block_create_0() 417 inode->i_size - (le_ih_k_offset(ih) - 1) - _get_block_create_0() 434 * neighbor or rely on i_size _get_block_create_0() 722 && inode->i_size < i_block_size(inode) * 4) reiserfs_get_block() 724 && inode->i_size < i_block_size(inode))) reiserfs_get_block() 729 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) { reiserfs_get_block() 1095 inode->i_size += reiserfs_get_block() 1191 ((inode->i_size + real_space_diff() 1201 return inode->i_size + to_real_used_space() 1267 inode->i_size = sd_v1_size(sd); init_inode() 1277 blocks = (inode->i_size + 511) >> 9; init_inode() 1321 inode->i_size = sd_v2_size(sd); init_inode() 1930 loff_t i_size, struct dentry *dentry, reiserfs_new_inode() 2008 inode->i_size = i_size; reiserfs_new_inode() 2048 inode2sd_v1(&sd, inode, inode->i_size); reiserfs_new_inode() 2050 inode2sd(&sd, inode, inode->i_size); reiserfs_new_inode() 2092 i_size = ROUND_UP(i_size); reiserfs_new_inode() 2095 i_size); reiserfs_new_inode() 2191 unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; grab_tail_page() 2195 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); grab_tail_page() 2202 * we know that we are only called with inode->i_size > 0. grab_tail_page() 2204 * If i_size % blocksize == 0, our file is currently block aligned grab_tail_page() 2267 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); reiserfs_truncate_file() 2277 if (inode->i_size > 0) { reiserfs_truncate_file() 2466 (byte_offset + bytes_copied) < inode->i_size) { map_block_for_writepage() 2527 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; reiserfs_write_full_page() 2566 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); reiserfs_write_full_page() 2753 truncate_inode_pages(inode->i_mapping, inode->i_size); reiserfs_truncate_failed_write() 2925 * to do the i_size updates here. reiserfs_write_end() 2927 if (pos + copied > inode->i_size) { reiserfs_write_end() 2937 && inode->i_size > i_block_size(inode) * 4) reiserfs_write_end() 2939 && inode->i_size > i_block_size(inode))) reiserfs_write_end() 2947 inode->i_size = pos + copied; reiserfs_write_end() 2978 if (pos + len > inode->i_size) reiserfs_write_end() 3016 * to do the i_size updates here. reiserfs_commit_write() 3018 if (pos > inode->i_size) { reiserfs_commit_write() 3026 && inode->i_size > i_block_size(inode) * 4) reiserfs_commit_write() 3028 && inode->i_size > i_block_size(inode))) reiserfs_commit_write() 3036 inode->i_size = pos; reiserfs_commit_write() 3150 * reiserfs_setattr updates i_size in the on disk invalidatepage_can_drop() 3155 * or an older one already has updated i_size on disk, invalidatepage_can_drop() 3294 * blocks outside i_size. Trim these off again. reiserfs_direct_IO() 3340 if (attr->ia_size > inode->i_size) { reiserfs_setattr() 1926 reiserfs_new_inode(struct reiserfs_transaction_handle *th, struct inode *dir, umode_t mode, const char *symname, loff_t i_size, struct dentry *dentry, struct inode *inode, struct reiserfs_security_handle *security) reiserfs_new_inode() argument
|
H A D | namei.c | 572 dir->i_size += paste_size; reiserfs_add_entry() 659 reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry, reiserfs_create() 736 reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry, reiserfs_mknod() 876 if (inode->i_size != EMPTY_DIR_SIZE && reiserfs_empty_dir() 877 inode->i_size != EMPTY_DIR_SIZE_V1) { reiserfs_empty_dir() 954 dir->i_size -= (DEH_SIZE + de.de_entrylen); reiserfs_rmdir() 1055 dir->i_size -= (de.de_entrylen + DEH_SIZE); reiserfs_unlink() 1592 old_dir->i_size -= DEH_SIZE + old_de.de_entrylen; reiserfs_rename()
|
H A D | file.c | 183 unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; reiserfs_commit_page()
|
H A D | super.c | 339 INODE_PKEY(inode), inode->i_size); finish_unfinished() 1157 s->s_bdev->bd_inode->i_size >> s-> reiserfs_parse_options() 2411 loff_t i_size = i_size_read(inode); reiserfs_quota_read() local 2413 if (off > i_size) reiserfs_quota_read() 2415 if (off + len > i_size) reiserfs_quota_read() 2416 len = i_size - off; reiserfs_quota_read() 2505 if (inode->i_size < off + len - towrite) reiserfs_quota_write()
|
H A D | procfs.c | 290 loff_t size = file_inode(sb_info->oidmap.mapf)->i_size; show_oidmap()
|
H A D | tail_conversion.c | 289 * unformatted node. For now i_size is considered as guard for indirect2direct()
|
/linux-4.1.27/fs/coda/ |
H A D | file.c | 76 coda_inode->i_size = file_inode(host_file)->i_size; coda_file_write_iter() 77 coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9; coda_file_write_iter()
|
H A D | coda_linux.c | 103 inode->i_size = attr->va_size; coda_vattr_to_iattr()
|
/linux-4.1.27/fs/ufs/ |
H A D | truncate.c | 62 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 63 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 450 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", ufs_truncate() 467 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); ufs_truncate() 503 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { ufs_setattr() 504 loff_t old_i_size = inode->i_size; ufs_setattr()
|
H A D | dir.c | 51 if (pos+len > dir->i_size) { ufs_commit_chunk() 70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; ufs_dir_pages() 122 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { ufs_check_page() 123 limit = dir->i_size & ~PAGE_CACHE_MASK; ufs_check_page() 215 unsigned last_byte = inode->i_size; ufs_last_byte() 333 * This code plays outside i_size, so it locks the page ufs_add_link() 350 /* We hit i_size */ ufs_add_link() 447 if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) ufs_readdir()
|
H A D | inode.c | 533 if (to > inode->i_size) ufs_write_failed() 534 truncate_pagecache(inode, inode->i_size); ufs_write_failed() 608 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); ufs1_read_inode() 656 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); ufs2_read_inode() 728 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ufs_iget() 756 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs1_update_inode() 800 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs2_update_inode() 895 old_i_size = inode->i_size; ufs_evict_inode() 896 inode->i_size = 0; ufs_evict_inode()
|
H A D | namei.c | 148 inode->i_size = l-1; ufs_symlink() 262 inode->i_size = 0; ufs_rmdir()
|
/linux-4.1.27/fs/adfs/ |
H A D | inode.c | 52 if (to > inode->i_size) adfs_write_failed() 53 truncate_pagecache(inode, inode->i_size); adfs_write_failed() 254 inode->i_size = obj->size; adfs_iget() 256 inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >> adfs_iget() 284 ADFS_I(inode)->mmu_private = inode->i_size; adfs_iget() 367 obj.size = inode->i_size; adfs_write_inode()
|
H A D | dir.c | 32 ret = ops->read(sb, inode->i_ino, inode->i_size, &dir); adfs_readdir() 136 ret = ops->read(sb, inode->i_ino, inode->i_size, &dir); adfs_dir_lookup_byname()
|
/linux-4.1.27/fs/omfs/ |
H A D | inode.c | 57 inode->i_size = sbi->s_sys_blocksize; omfs_new_inode() 63 inode->i_size = 0; omfs_new_inode() 133 oi->i_size = cpu_to_be64(inode->i_size); __omfs_write_inode() 193 inode->i_size = 0; omfs_evict_inode() 245 inode->i_size = sbi->s_sys_blocksize; omfs_iget() 251 inode->i_size = be64_to_cpu(oi->i_size); omfs_iget()
|
H A D | file.c | 43 * than inode->i_size; omfs_shrink_inode() 49 if (inode->i_size != 0) omfs_shrink_inode() 313 if (to > inode->i_size) { omfs_write_failed() 314 truncate_pagecache(inode, inode->i_size); omfs_write_failed()
|
H A D | omfs_fs.h | 67 __be64 i_size; /* size of file, in bytes */ member in struct:omfs_inode
|
H A D | dir.c | 27 int nbuckets = (dir->i_size - OMFS_DIR_START)/8; omfs_get_bucket() 218 int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; omfs_dir_is_empty() 421 nbuckets = (dir->i_size - OMFS_DIR_START) / 8; omfs_readdir()
|
/linux-4.1.27/fs/hfs/ |
H A D | extent.c | 480 inode->i_size); hfs_file_truncate() 481 if (inode->i_size > HFS_I(inode)->phys_size) { hfs_file_truncate() 487 size = inode->i_size - 1; hfs_file_truncate() 495 inode->i_size = HFS_I(inode)->phys_size; hfs_file_truncate() 497 } else if (inode->i_size == HFS_I(inode)->phys_size) hfs_file_truncate() 499 size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1; hfs_file_truncate() 541 HFS_I(inode)->phys_size = inode->i_size; hfs_file_truncate() 542 HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; hfs_file_truncate()
|
H A D | btree.c | 241 HFS_I(inode)->phys_size = inode->i_size = hfs_bmap_alloc() 244 HFS_I(inode)->fs_blocks = inode->i_size >> hfs_bmap_alloc() 246 inode_set_bytes(inode, inode->i_size); hfs_bmap_alloc() 247 count = inode->i_size >> tree->node_size_shift; hfs_bmap_alloc()
|
H A D | dir.c | 65 if (ctx->pos >= inode->i_size) hfs_readdir() 104 if (ctx->pos >= inode->i_size) hfs_readdir() 149 if (ctx->pos >= inode->i_size) hfs_readdir() 259 if (S_ISDIR(inode->i_mode) && inode->i_size != 2) hfs_remove()
|
H A D | catalog.c | 92 if (dir->i_size >= HFS_MAX_VALENCE) hfs_cat_create() 127 dir->i_size++; hfs_cat_create() 262 dir->i_size--; hfs_cat_delete() 322 dst_dir->i_size++; hfs_cat_move() 334 src_dir->i_size--; hfs_cat_move()
|
H A D | inode.c | 43 if (to > inode->i_size) { hfs_write_failed() 44 truncate_pagecache(inode, inode->i_size); hfs_write_failed() 140 * blocks outside i_size. Trim these off again. hfs_direct_IO() 201 inode->i_size = 2; hfs_new_inode() 255 inode->i_size = 0; hfs_delete_inode() 276 inode->i_size = HFS_I(inode)->phys_size = log_size; hfs_inode_read_fork() 358 inode->i_size = be16_to_cpu(rec->dir.Val) + 2; hfs_read_inode() 409 *log_size = cpu_to_be32(inode->i_size); hfs_inode_write_fork() 468 rec.dir.Val = cpu_to_be16(inode->i_size - 2); hfs_write_inode()
|
/linux-4.1.27/fs/exofs/ |
H A D | inode.c | 385 loff_t i_size = i_size_read(inode); readpage_strip() local 386 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; readpage_strip() 402 len = i_size & ~PAGE_CACHE_MASK; readpage_strip() 576 loff_t i_size = i_size_read(pcol->inode); __r4w_get_page() local 578 if (offset >= i_size) { __r4w_get_page() 580 EXOFS_DBGMSG2("offset >= i_size index=0x%lx\n", index); __r4w_get_page() 701 loff_t i_size = i_size_read(inode); writepage_strip() local 702 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; writepage_strip() 716 len = i_size & ~PAGE_CACHE_MASK; writepage_strip() 860 /* i_mutex held using inode->i_size directly */ _write_failed() 863 if (to > inode->i_size) _write_failed() 864 truncate_pagecache(inode, inode->i_size); _write_failed() 888 loff_t i_size = i_size_read(mapping->host); exofs_write_begin() local 889 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; exofs_write_begin() 895 rlen = i_size & ~PAGE_CACHE_MASK; exofs_write_begin() 936 loff_t i_size = inode->i_size; exofs_write_end() local 944 if (i_size != inode->i_size) exofs_write_end() 1193 oi->i_commit_size = le64_to_cpu(fcb.i_size); exofs_iget() 1319 oi->i_commit_size = inode->i_size = 0; exofs_new_inode() 1403 fcb->i_size = cpu_to_le64(oi->i_commit_size); exofs_update_inode() 1492 inode->i_size = 0; exofs_evict_inode()
|
H A D | dir.c | 47 /* Accesses dir's inode->i_size must be called under inode lock */ dir_pages() 50 return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; dir_pages() 55 loff_t last_byte = inode->i_size; exofs_last_byte() 74 if (pos+len > dir->i_size) { exofs_commit_chunk() 99 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { exofs_check_page() 100 limit = dir->i_size & ~PAGE_CACHE_MASK; exofs_check_page() 252 if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1)) exofs_readdir()
|
H A D | namei.c | 127 inode->i_size = l-1; exofs_symlink() 222 inode->i_size = 0; exofs_rmdir()
|
H A D | common.h | 170 __le64 i_size; /* Size of the file */ member in struct:exofs_fcb
|
/linux-4.1.27/fs/isofs/ |
H A D | rock.c | 454 inode->i_size = symlink_len; parse_rock_ridge_inode_internal() 459 inode->i_size += parse_rock_ridge_inode_internal() 463 inode->i_size += 1; parse_rock_ridge_inode_internal() 466 inode->i_size += 2; parse_rock_ridge_inode_internal() 470 inode->i_size += 1; parse_rock_ridge_inode_internal() 487 inode->i_size += parse_rock_ridge_inode_internal() 498 inode->i_size += 1; parse_rock_ridge_inode_internal() 501 symlink_len = inode->i_size; parse_rock_ridge_inode_internal() 533 inode->i_size = reloc->i_size; parse_rock_ridge_inode_internal() 571 inode->i_size = parse_rock_ridge_inode_internal()
|
H A D | compress.c | 223 end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); zisofs_fill_pages() 310 end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; zisofs_readpage() 312 * If this page is wholly outside i_size we just return zero; zisofs_readpage()
|
H A D | namei.c | 52 while (f_pos < dir->i_size) { isofs_find_entry()
|
H A D | inode.c | 1024 if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { isofs_get_blocks() 1027 (unsigned long long)inode->i_size); isofs_get_blocks() 1150 inode->i_size = 0; isofs_read_level3_size() 1207 inode->i_size += isonum_733(de->size); isofs_read_level3_size() 1334 inode->i_size = isonum_733(de->size); isofs_read_inode() 1344 inode->i_size &= 0x00ffffff; isofs_read_inode() 1348 inode->i_size = 0; isofs_read_inode() 1379 inode->i_blocks = (inode->i_size + 511) >> 9; isofs_read_inode()
|
H A D | dir.c | 100 while (ctx->pos < inode->i_size) { do_isofs_readdir()
|
/linux-4.1.27/fs/btrfs/ |
H A D | ordered-data.c | 873 * i_size. i_size is updated to cover any fully written part of the file. 881 u64 i_size = i_size_read(inode); btrfs_ordered_update_i_size() local 900 if (disk_i_size > i_size) { btrfs_ordered_update_i_size() 901 BTRFS_I(inode)->disk_i_size = i_size; btrfs_ordered_update_i_size() 907 * if the disk i_size is already at the inode->i_size, or btrfs_ordered_update_i_size() 908 * this ordered extent is inside the disk i_size, we're done btrfs_ordered_update_i_size() 910 if (disk_i_size == i_size) btrfs_ordered_update_i_size() 923 * if we find an ordered extent then we can't update disk i_size btrfs_ordered_update_i_size() 949 if (test->file_offset >= i_size) btrfs_ordered_update_i_size() 954 * undealt i_size. Or we will not know the real btrfs_ordered_update_i_size() 955 * i_size. btrfs_ordered_update_i_size() 967 new_i_size = min_t(u64, offset, i_size); btrfs_ordered_update_i_size() 971 * we hold the real i_size in ->outstanding_isize. btrfs_ordered_update_i_size() 974 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); btrfs_ordered_update_i_size()
|
H A D | tree-log.c | 425 * If this is a directory we need to reset the i_size to overwrite_item() 445 * If this is a directory we need to reset the i_size to 0 so overwrite_item() 481 * Also, don't overwrite i_size on directories during replay. overwrite_item() 483 * state of the tree found in the subvolume, and i_size is modified overwrite_item() 1706 btrfs_i_size_write(dir, dir->i_size + name_len * 2); replay_one_name() 3031 u64 i_size; btrfs_del_dir_entries_in_log() local 3035 i_size = btrfs_inode_size(path->nodes[0], item); btrfs_del_dir_entries_in_log() 3036 if (i_size > bytes_del) btrfs_del_dir_entries_in_log() 3037 i_size -= bytes_del; btrfs_del_dir_entries_in_log() 3039 i_size = 0; btrfs_del_dir_entries_in_log() 3040 btrfs_set_inode_size(path->nodes[0], item, i_size); btrfs_del_dir_entries_in_log() 3253 * its i_size could never decrement to the value log_dir_items() 3432 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); fill_inode_item() 4208 * last extent and the i_size of our inode is explicitly marked in the log. This 4242 const u64 i_size = i_size_read(inode); btrfs_log_trailing_hole() local 4264 hole_size = i_size; btrfs_log_trailing_hole() 4270 * If there's an extent beyond i_size, an explicit hole was btrfs_log_trailing_hole() 4273 if (key.offset >= i_size) btrfs_log_trailing_hole() 4284 ASSERT(len == i_size); btrfs_log_trailing_hole() 4289 /* Last extent goes beyond i_size, no need to log a hole. */ btrfs_log_trailing_hole() 4290 if (key.offset + len > i_size) btrfs_log_trailing_hole() 4293 hole_size = i_size - hole_start; btrfs_log_trailing_hole() 4297 /* Last extent ends at i_size. */ btrfs_log_trailing_hole() 4536 * we use the inode's current i_size, after log replay btrfs_log_inode() 4918 * The directory's inode item with a wrong i_size is not a problem as well, 4919 * since we don't use it at log replay time to set the i_size in the inode 5143 * directory index entries and a wrong directory inode's i_size. btrfs_log_inode_parent()
|
H A D | file.c | 518 * we've only changed i_size in ram, and we haven't updated btrfs_dirty_pages() 519 * the disk i_size. There is no need to log the inode btrfs_dirty_pages() 1384 if (start_pos < inode->i_size) { lock_and_cleanup_extent_if_need() 2286 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE); btrfs_punch_hole() 2587 if (alloc_start > inode->i_size) { btrfs_fallocate() 2595 * need to zero out the end of the page if i_size lands in the btrfs_fallocate() 2598 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0); btrfs_fallocate() 2663 (cur_offset >= inode->i_size && btrfs_fallocate() 2670 } else if (actual_end > inode->i_size && btrfs_fallocate() 2678 * update i_size and the inode item. btrfs_fallocate() 2726 if (inode->i_size == 0) find_desired_extent() 2745 while (start < inode->i_size) { find_desired_extent() 2769 if (whence == SEEK_DATA && start >= inode->i_size) find_desired_extent() 2772 *offset = min_t(loff_t, start, inode->i_size); find_desired_extent()
|
H A D | extent_io.c | 2639 loff_t i_size = i_size_read(inode); bio_for_each_segment_all() local 2640 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; bio_for_each_segment_all() 2643 /* Zero out the end if this page straddles i_size */ bio_for_each_segment_all() 2644 off = i_size & (PAGE_CACHE_SIZE-1); bio_for_each_segment_all() 3367 loff_t i_size, __extent_writepage_io() 3413 if (i_size <= start) { __extent_writepage_io() 3424 if (cur >= i_size) { __extent_writepage_io() 3488 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; __extent_writepage_io() 3535 loff_t i_size = i_size_read(inode); __extent_writepage() local 3536 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; __extent_writepage() 3551 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); __extent_writepage() 3580 i_size, nr_written, write_flags, &nr); __extent_writepage() 4318 page->mapping->host->i_size > 16 * 1024 * 1024) { try_release_extent_mapping() 4426 * lookup the last file extent. We're not using i_size here extent_fiemap() 4427 * because there might be preallocation past i_size extent_fiemap() 4443 /* have to trust i_size as the end */ extent_fiemap() 3363 __extent_writepage_io(struct inode *inode, struct page *page, struct writeback_control *wbc, struct extent_page_data *epd, loff_t i_size, unsigned long nr_written, int write_flags, int *nr_ret) __extent_writepage_io() argument
|
H A D | ioctl.c | 288 if (inode->i_size == 0) btrfs_ioctl_setflags() 299 if (inode->i_size == 0) btrfs_ioctl_setflags() 591 btrfs_i_size_write(dir, dir->i_size + namelen * 2); create_subvol() 1532 new_size = device->bdev->bd_inode->i_size; btrfs_ioctl_resize() 1573 if (new_size > device->bdev->bd_inode->i_size) { btrfs_ioctl_resize() 2888 if (off + len > inode->i_size || off + len < off) extent_same_check_offsets() 3102 if (endoff > inode->i_size) clone_finish_inode_update() 3184 * inode's i_size because of that (i_size updates are done while holding the 3761 if (off + len > src->i_size || off + len < off) btrfs_ioctl_clone() 3764 olen = len = src->i_size - off; btrfs_ioctl_clone() 3766 if (off + len == src->i_size) btrfs_ioctl_clone() 3767 len = ALIGN(src->i_size, bs) - off; btrfs_ioctl_clone() 3785 if (destoff > inode->i_size) { btrfs_ioctl_clone() 3786 ret = btrfs_cont_expand(inode, inode->i_size, destoff); btrfs_ioctl_clone()
|
H A D | inode.c | 218 * extend the file past i_size without locking insert_inline_extent() 225 BTRFS_I(inode)->disk_i_size = inode->i_size; insert_inline_extent() 429 * we don't want to send crud past the end of i_size through compress_file_range() 3951 btrfs_i_size_write(dir, dir->i_size - name_len * 2); __btrfs_unlink_inode() 4117 btrfs_i_size_write(dir, dir->i_size - name_len * 2); btrfs_unlink_subvol() 4135 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) btrfs_rmdir() 4229 * csum items that cross the new i_size are truncated to the new size 4743 * rest of the page before we expand the i_size, otherwise we could btrfs_cont_expand() 6234 btrfs_i_size_write(parent_inode, parent_inode->i_size + btrfs_add_link() 7577 * Need to update the i_size under the extent lock so buffered btrfs_get_blocks_direct() 7578 * readers will get the updated i_size when we unlock. btrfs_get_blocks_direct() 8352 if (offset + count <= inode->i_size) { btrfs_direct_IO() 8720 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), btrfs_truncate() 8794 inode->i_size, btrfs_truncate() 9133 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) btrfs_rename() 9160 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) btrfs_rename() 9631 u64 i_size; __btrfs_prealloc_file_range() local 9712 (actual_len > inode->i_size) && __btrfs_prealloc_file_range() 9713 (cur_offset > inode->i_size)) { __btrfs_prealloc_file_range() 9715 i_size = actual_len; __btrfs_prealloc_file_range() 9717 i_size = cur_offset; __btrfs_prealloc_file_range() 9718 i_size_write(inode, i_size); __btrfs_prealloc_file_range() 9719 btrfs_ordered_update_i_size(inode, i_size, NULL); __btrfs_prealloc_file_range()
|
/linux-4.1.27/fs/ext4/ |
H A D | truncate.h | 14 truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate_failed_write()
|
H A D | inline.c | 939 * No need to use i_size_read() here, the i_size ext4_da_write_inline_data_end() 942 * But it's important to update i_size while still holding page lock: ext4_da_write_inline_data_end() 943 * page writeout could otherwise come in and zero beyond i_size. ext4_da_write_inline_data_end() 945 if (pos+copied > inode->i_size) { ext4_da_write_inline_data_end() 1101 dir->i_size = EXT4_I(dir)->i_disksize = EXT4_I(dir)->i_inline_size; ext4_update_inline_dir() 1141 inode->i_size = inode->i_sb->s_blocksize; ext4_finish_convert_inline_dir() 1607 inode->i_size = EXT4_I(inode)->i_disksize = inline_size; ext4_try_create_inline_dir() 1778 while (offset < dir->i_size) { empty_inline_dir() 1899 size_t i_size; ext4_inline_data_truncate() local 1929 i_size = inode->i_size; ext4_inline_data_truncate() 1931 EXT4_I(inode)->i_disksize = i_size; ext4_inline_data_truncate() 1933 if (i_size < inline_size) { ext4_inline_data_truncate() 1951 i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ? ext4_inline_data_truncate() 1952 i_size - EXT4_MIN_INLINE_DATA_SIZE : 0; ext4_inline_data_truncate() 1958 if (i_size < EXT4_MIN_INLINE_DATA_SIZE) { ext4_inline_data_truncate() 1960 memset(p + i_size, 0, ext4_inline_data_truncate() 1961 EXT4_MIN_INLINE_DATA_SIZE - i_size); ext4_inline_data_truncate() 1964 EXT4_I(inode)->i_inline_size = i_size < ext4_inline_data_truncate() 1966 EXT4_MIN_INLINE_DATA_SIZE : i_size; ext4_inline_data_truncate()
|
H A D | inode.c | 164 * moment, get_block can be called only for blocks inside i_size since ext4_truncate_restart_trans() 253 inode->i_size = 0; ext4_evict_inode() 1085 * outside i_size. Trim these off again. Don't need ext4_write_begin() 1091 if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_write_begin() 1095 if (pos + len > inode->i_size) { ext4_write_begin() 1144 loff_t old_size = inode->i_size; ext4_write_end() 1168 * it's important to update i_size while still holding page lock: ext4_write_end() 1169 * page writeout could otherwise come in and zero beyond i_size. ext4_write_end() 1186 if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_write_end() 1189 * inode->i_size. So truncate them ext4_write_end() 1197 if (pos + len > inode->i_size) { ext4_write_end() 1218 loff_t old_size = inode->i_size; ext4_journalled_write_end() 1260 if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_journalled_write_end() 1263 * inode->i_size. So truncate them ext4_journalled_write_end() 1270 if (pos + len > inode->i_size) { ext4_journalled_write_end() 2251 * truncate are avoided by checking i_size under i_data_sem. mpage_map_and_submit_extent() 2256 loff_t i_size; mpage_map_and_submit_extent() local 2259 i_size = i_size_read(inode); mpage_map_and_submit_extent() 2260 if (disksize > i_size) mpage_map_and_submit_extent() 2261 disksize = i_size; mpage_map_and_submit_extent() 2731 * outside i_size. Trim these off again. Don't need ext4_da_write_begin() 2734 if (pos + len > inode->i_size) ext4_da_write_begin() 2793 * generic_write_end() will run mark_inode_dirty() if i_size ext4_da_write_end() 2803 * new_i_size is less that inode->i_size ext4_da_write_end() 3115 /* Use the old path for reads and writes beyond i_size. */ ext4_ext_direct_IO() 3116 if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size) ext4_ext_direct_IO() 3615 /* No need to punch hole beyond i_size */ ext4_punch_hole() 3616 if (offset >= inode->i_size) ext4_punch_hole() 3620 * If the hole extends beyond i_size, set the hole ext4_punch_hole() 3621 * to end after the page that contains i_size ext4_punch_hole() 3623 if (offset + length > inode->i_size) { ext4_punch_hole() 3624 length = inode->i_size + ext4_punch_hole() 3625 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - ext4_punch_hole() 3764 * The committed inode has the new, desired i_size (which is the same as 3793 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_truncate() 3805 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { ext4_truncate() 3821 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_truncate() 3822 ext4_block_truncate_page(handle, mapping, inode->i_size); ext4_truncate() 4198 inode->i_size = ext4_isize(raw_inode); ext4_iget() 4199 ei->i_disksize = inode->i_size; ext4_iget() 4298 nd_terminate_link(ei->i_data, inode->i_size, ext4_iget() 4607 * inode->i_size = expr; 4610 * and the new i_size will be lost. Plus the inode will no longer be on the 4660 * buffers that are attached to a page stradding i_size and are undergoing 4671 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); ext4_wait_for_tail_page_commit() 4681 inode->i_size >> PAGE_CACHE_SHIFT); ext4_wait_for_tail_page_commit() 4707 * shrinks i_size, we put the inode on the orphan list and modify 4765 if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { ext4_setattr() 4775 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) ext4_setattr() 4779 (attr->ia_size < inode->i_size)) { ext4_setattr() 4801 * We have to update i_size under i_data_sem together ext4_setattr() 4814 loff_t oldsize = inode->i_size; ext4_setattr() 4817 pagecache_isize_extended(inode, oldsize, inode->i_size); ext4_setattr() 4838 truncate_pagecache(inode, inode->i_size); ext4_setattr() 4843 * inode->i_size for cases like truncation of fallocated space ext4_setattr() 5136 * i_size has been changed by generic_commit_write() and we thus need
|
H A D | move_extent.c | 299 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { move_extent_per_page() 301 tmp_data_size = orig_inode->i_size & (blocksize - 1); move_extent_per_page() 386 * but keeping in mind that i_size will not change */ move_extent_per_page() 508 if ((!orig_inode->i_size) || (!donor_inode->i_size)) { mext_check_arguments()
|
H A D | dir.c | 46 ((inode->i_size >> sb->s_blocksize_bits) == 1) || is_dx_dir() 151 while (ctx->pos < inode->i_size) { ext4_readdir() 226 while (ctx->pos < inode->i_size ext4_readdir() 265 if ((ctx->pos < inode->i_size) && !dir_relax(inode)) ext4_readdir()
|
H A D | indirect.c | 452 * the new i_size. But that is not done here - it is done in ext4_splice_branch() 641 * If the O_DIRECT write is intantiating holes inside i_size and the machine 660 if (final_size > inode->i_size) { ext4_ind_direct_IO() 673 ei->i_disksize = inode->i_size; ext4_ind_direct_IO() 728 * but cannot extend i_size. Bail out and pretend ext4_ind_direct_IO() 740 if (end > inode->i_size) { ext4_ind_direct_IO() 849 * partially truncated if some data below the new i_size is referred 1216 last_block = (inode->i_size + blocksize-1) ext4_ind_truncate() 1232 * the new, shorter inode size (held for now in i_size) into the ext4_ind_truncate() 1236 ei->i_disksize = inode->i_size; ext4_ind_truncate()
|
H A D | namei.c | 58 ((inode->i_size >> 10) >= ext4_append() 62 *block = inode->i_size >> inode->i_sb->s_blocksize_bits; ext4_append() 67 inode->i_size += inode->i_sb->s_blocksize; ext4_append() 68 EXT4_I(inode)->i_disksize = inode->i_size; ext4_append() 1414 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); ext4_find_entry() 1493 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); ext4_find_entry() 2182 blocks = dir->i_size >> sb->s_blocksize_bits; ext4_add_entry() 2665 inode->i_size = 0; ext4_init_new_dir() 2771 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { ext4_empty_dir() 2794 while (offset < inode->i_size) { ext4_empty_dir() 3048 inode->i_size = 0; ext4_rmdir() 3257 inode->i_size = disk_link.len - 1; ext4_symlink() 3259 EXT4_I(inode)->i_disksize = inode->i_size; ext4_symlink()
|
/linux-4.1.27/fs/nilfs2/ |
H A D | dir.c | 66 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; dir_pages() 75 unsigned last_byte = inode->i_size; nilfs_last_byte() 101 if (pos + copied > dir->i_size) nilfs_commit_chunk() 121 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { nilfs_check_page() 122 limit = dir->i_size & ~PAGE_CACHE_MASK; nilfs_check_page() 269 if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) nilfs_readdir() 372 dir->i_ino, dir->i_size, nilfs_find_entry() 454 * This code plays outside i_size, so it locks the page nilfs_add_link() 471 /* We hit i_size */ nilfs_add_link()
|
H A D | inode.c | 263 if (to > inode->i_size) { nilfs_write_failed() 264 truncate_pagecache(inode, inode->i_size); nilfs_write_failed() 324 * blocks outside i_size. Trim these off again. nilfs_direct_IO() 470 inode->i_size = le64_to_cpu(raw_inode->i_size); nilfs_read_inode_common() 661 raw_inode->i_size = cpu_to_le64(inode->i_size); nilfs_write_inode_common() 758 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; nilfs_truncate() 761 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); nilfs_truncate()
|
/linux-4.1.27/fs/afs/ |
H A D | write.c | 89 loff_t i_size; afs_fill_page() local 95 i_size = i_size_read(&vnode->vfs_inode); afs_fill_page() 96 if (pos + PAGE_CACHE_SIZE > i_size) afs_fill_page() 97 len = i_size - pos; afs_fill_page() 249 loff_t i_size, maybe_i_size; afs_write_end() local 256 i_size = i_size_read(&vnode->vfs_inode); afs_write_end() 257 if (maybe_i_size > i_size) { afs_write_end() 259 i_size = i_size_read(&vnode->vfs_inode); afs_write_end() 260 if (maybe_i_size > i_size) afs_write_end()
|
H A D | fsclient.c | 1159 loff_t size, loff_t pos, loff_t i_size, afs_fs_store_data64() 1206 *bp++ = htonl(i_size >> 32); afs_fs_store_data64() 1207 *bp++ = htonl((u32) i_size); afs_fs_store_data64() 1222 loff_t size, pos, i_size; afs_fs_store_data() local 1234 i_size = i_size_read(&vnode->vfs_inode); afs_fs_store_data() 1235 if (pos + size > i_size) afs_fs_store_data() 1236 i_size = size + pos; afs_fs_store_data() 1238 _debug("size %llx, at %llx, i_size %llx", afs_fs_store_data() 1240 (unsigned long long) i_size); afs_fs_store_data() 1242 if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32) afs_fs_store_data() 1244 size, pos, i_size, wait_mode); afs_fs_store_data() 1281 *bp++ = htonl(i_size); afs_fs_store_data() 1155 afs_fs_store_data64(struct afs_server *server, struct afs_writeback *wb, pgoff_t first, pgoff_t last, unsigned offset, unsigned to, loff_t size, loff_t pos, loff_t i_size, const struct afs_wait_mode *wait_mode) afs_fs_store_data64() argument
|
H A D | inode.c | 66 if (vnode->status.size != inode->i_size) afs_inode_map_status() 73 inode->i_size = vnode->status.size; afs_inode_map_status() 174 inode->i_size = 0; afs_iget_autocell() 258 inode->i_size = vnode->status.size; afs_iget()
|
/linux-4.1.27/fs/ext2/ |
H A D | dir.c | 75 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; dir_pages() 85 unsigned last_byte = inode->i_size; ext2_last_byte() 102 if (pos+len > dir->i_size) { ext2_commit_chunk() 130 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { ext2_check_page() 131 limit = dir->i_size & ~PAGE_CACHE_MASK; ext2_check_page() 302 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) ext2_readdir() 417 dir->i_ino, dir->i_size, ext2_find_entry() 505 * This code plays outside i_size, so it locks the page ext2_add_link() 522 /* We hit i_size */ ext2_add_link()
|
H A D | inode.c | 59 if (to > inode->i_size) { ext2_write_failed() 60 truncate_pagecache(inode, inode->i_size); ext2_write_failed() 61 ext2_truncate_blocks(inode, inode->i_size); ext2_write_failed() 89 inode->i_size = 0; ext2_evict_inode() 931 * truncated if some data below the new i_size is referred from it (and 1174 * IS_APPEND inode to have blocks-past-i_size trimmed off. ext2_truncate_blocks() 1347 inode->i_size = le32_to_cpu(raw_inode->i_size); ext2_iget() 1372 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; ext2_iget() 1407 nd_terminate_link(ei->i_data, inode->i_size, ext2_iget() 1478 raw_inode->i_size = cpu_to_le32(inode->i_size); __ext2_write_inode() 1493 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32); __ext2_write_inode() 1494 if (inode->i_size > 0x7fffffffULL) { __ext2_write_inode() 1562 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) { ext2_setattr()
|
H A D | namei.c | 193 inode->i_size = l-1; ext2_symlink() 308 inode->i_size = 0; ext2_rmdir()
|
H A D | super.c | 1119 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext2_fill_super() 1453 loff_t i_size = i_size_read(inode); ext2_quota_read() local 1455 if (off > i_size) ext2_quota_read() 1457 if (off+len > i_size) ext2_quota_read() 1458 len = i_size-off; ext2_quota_read() 1531 if (inode->i_size < off+len-towrite) ext2_quota_write()
|
/linux-4.1.27/fs/cramfs/ |
H A D | inode.c | 116 inode->i_size = cramfs_inode->size; get_cramfs_inode() 193 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; cramfs_read() 378 if (ctx->pos >= inode->i_size) cramfs_readdir() 389 while (offset < inode->i_size) { cramfs_readdir() 441 while (offset < dir->i_size) { cramfs_lookup() 498 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; cramfs_readpage()
|
/linux-4.1.27/mm/ |
H A D | truncate.c | 656 * inode's new i_size must already be written before truncate_pagecache 691 * truncate_setsize updates i_size and performs pagecache truncation (if 701 loff_t oldsize = inode->i_size; truncate_setsize() 711 * pagecache_isize_extended - update pagecache after extension of i_size 712 * @inode: inode for which i_size was extended 717 * write starting after current i_size. We mark the page straddling current 718 * i_size RO so that page_mkwrite() is called on the nearest write access to 720 * the page before user writes to the page via mmap after the i_size has been 723 * The function must be called after i_size is updated so that page fault 724 * coming after we unlock the page will already see the new i_size. 726 * makes sure i_size is stable but also that userspace cannot observe new 727 * i_size value before we are prepared to store mmap writes at new inode size. 736 WARN_ON(to > inode->i_size); pagecache_isize_extended()
|
H A D | shmem.c | 79 /* Pretend that each entry is of this size in directory's i_size */ 100 SGP_READ, /* don't exceed i_size, don't allocate page */ 101 SGP_CACHE, /* don't exceed i_size, may allocate page */ 103 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 556 loff_t oldsize = inode->i_size; shmem_setattr() 592 shmem_unacct_size(info->flags, inode->i_size); shmem_evict_inode() 593 inode->i_size = 0; shmem_evict_inode() 1374 if (!user_shm_lock(inode->i_size, user)) shmem_lock() 1380 user_shm_unlock(inode->i_size, user); shmem_lock() 1439 inode->i_size = 2 * BOGO_DIRENT_SIZE; shmem_get_inode() 1487 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) shmem_write_begin() 1501 if (pos + copied > inode->i_size) shmem_write_end() 1546 loff_t i_size = i_size_read(inode); shmem_file_read_iter() local 1548 end_index = i_size >> PAGE_CACHE_SHIFT; shmem_file_read_iter() 1552 nr = i_size & ~PAGE_CACHE_MASK; shmem_file_read_iter() 1571 i_size = i_size_read(inode); shmem_file_read_iter() 1572 end_index = i_size >> PAGE_CACHE_SHIFT; shmem_file_read_iter() 1574 nr = i_size & ~PAGE_CACHE_MASK; shmem_file_read_iter() 1801 /* We're holding i_mutex so we can access i_size directly */ shmem_file_llseek() 1805 else if (offset >= inode->i_size) shmem_file_llseek() 1809 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; shmem_file_llseek() 1813 if (new_offset < inode->i_size) shmem_file_llseek() 1818 offset = inode->i_size; shmem_file_llseek() 2102 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { shmem_fallocate() 2167 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) shmem_fallocate() 2221 dir->i_size += BOGO_DIRENT_SIZE; shmem_mknod() 2289 dir->i_size += BOGO_DIRENT_SIZE; shmem_link() 2306 dir->i_size -= BOGO_DIRENT_SIZE; shmem_unlink() 2410 old_dir->i_size -= BOGO_DIRENT_SIZE; shmem_rename2() 2411 new_dir->i_size += BOGO_DIRENT_SIZE; shmem_rename2() 2446 inode->i_size = len-1; shmem_symlink() 2470 dir->i_size += BOGO_DIRENT_SIZE; shmem_symlink() 3348 inode->i_size = size; __shmem_file_setup()
|
/linux-4.1.27/fs/hostfs/ |
H A D | hostfs_kern.c | 414 int end_index = inode->i_size >> PAGE_CACHE_SHIFT; hostfs_writepage() 418 count = inode->i_size & (PAGE_CACHE_SIZE-1); hostfs_writepage() 428 if (base > inode->i_size) hostfs_writepage() 429 inode->i_size = base; hostfs_writepage() 500 * i_size against the last byte written. hostfs_write_end() 502 if (err > 0 && (pos > inode->i_size)) hostfs_write_end() 503 inode->i_size = pos; hostfs_write_end() 561 ino->i_size = st.size; read_name()
|
/linux-4.1.27/fs/nfs/ |
H A D | fscache-index.c | 191 *size = nfsi->vfs_inode.i_size; nfs_fscache_inode_get_attr() 209 auxdata.size = nfsi->vfs_inode.i_size; nfs_fscache_inode_get_aux() 241 auxdata.size = nfsi->vfs_inode.i_size; nfs_fscache_inode_check_aux() 322 * Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
|
H A D | internal.h | 620 loff_t i_size = i_size_read(page_file_mapping(page)->host); nfs_page_length() local 622 if (i_size > 0) { nfs_page_length() 624 pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; nfs_page_length() 628 return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; nfs_page_length()
|
H A D | inode.c | 420 inode->i_size = 0; nfs_fhget() 448 inode->i_size = nfs_size_to_loff_t(fattr->size); nfs_fhget() 515 loff_t i_size; nfs_setattr() local 519 i_size = i_size_read(inode); nfs_setattr() 520 if (attr->ia_size == i_size) nfs_setattr() 522 else if (attr->ia_size < i_size && IS_SWAPFILE(inode)) nfs_setattr() 562 * inode->i_size to be updated under the inode->i_lock.
|
/linux-4.1.27/fs/fat/ |
H A D | file.c | 184 loff_t start = inode->i_size, count = size - inode->i_size; fat_cont_expand() 400 * Expand the file. Since inode_setattr() updates ->i_size fat_setattr() 408 if (attr->ia_size > inode->i_size) { fat_setattr()
|
H A D | inode.c | 202 if (to > inode->i_size) { fat_write_failed() 203 truncate_pagecache(inode, inode->i_size); fat_write_failed() 204 fat_truncate_blocks(inode, inode->i_size); fat_write_failed() 439 inode->i_size = 0; fat_calc_dir_size() 446 inode->i_size = (fclus + 1) << sbi->cluster_bits; fat_calc_dir_size() 474 MSDOS_I(inode)->mmu_private = inode->i_size; fat_fill_inode() 485 inode->i_size = le32_to_cpu(de->size); fat_fill_inode() 489 MSDOS_I(inode)->mmu_private = inode->i_size; fat_fill_inode() 497 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) fat_fill_inode() 559 inode->i_size = 0; fat_evict_inode() 777 raw_entry->size = cpu_to_le32(inode->i_size); __fat_write_inode() 1291 inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry); fat_read_root() 1293 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) fat_read_root() 1296 MSDOS_I(inode)->mmu_private = inode->i_size; fat_read_root()
|
H A D | dir.c | 1375 if (dir->i_size & (sbi->cluster_size - 1)) { fat_add_entries() 1377 dir->i_size = (dir->i_size + sbi->cluster_size - 1) fat_add_entries() 1380 dir->i_size += nr_cluster << sbi->cluster_bits; fat_add_entries()
|
/linux-4.1.27/fs/qnx6/ |
H A D | dir.c | 37 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; dir_pages() 42 unsigned long last_byte = inode->i_size; last_entry() 128 if (ctx->pos >= inode->i_size) qnx6_readdir()
|
H A D | inode.c | 510 inode->i_size = fs64_to_cpu(sbi, p->size); qnx6_private_inode() 561 inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size); qnx6_iget() 570 inode->i_blocks = (inode->i_size + 511) >> 9; qnx6_iget()
|
/linux-4.1.27/fs/freevxfs/ |
H A D | vxfs_lookup.c | 68 return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; dir_pages() 75 return (ip->i_size + bsize - 1) & ~(bsize - 1); dir_blocks() 258 if (pos > VXFS_DIRROUND(ip->i_size)) vxfs_readdir()
|
H A D | vxfs_inode.c | 231 ip->i_size = vip->vii_size; vxfs_iinit() 331 vip->vii_immed.vi_immed[ip->i_size] = '\0'; vxfs_iget()
|
/linux-4.1.27/fs/ext3/ |
H A D | inode.c | 176 * i_size since page cache has been already dropped and writes are truncate_restart_transaction() 255 inode->i_size = 0; ext3_evict_inode() 810 * the new i_size. But that is not done here - it is done in ext3_splice_branch() 1236 truncate_inode_pages(inode->i_mapping, inode->i_size); ext3_truncate_failed_write() 1246 ext3_block_truncate_page(inode, inode->i_size); ext3_truncate_failed_direct_write() 1296 * outside i_size. Trim these off again. Don't need ext3_write_begin() 1303 if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_write_begin() 1308 if (pos + len > inode->i_size) ext3_write_begin() 1356 /* What matters to us is i_disksize. We don't write i_size anywhere */ update_file_sizes() 1357 if (pos + copied > inode->i_size) update_file_sizes() 1393 * There may be allocated blocks outside of i_size because ext3_ordered_write_end() 1396 if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_ordered_write_end() 1404 if (pos + len > inode->i_size) ext3_ordered_write_end() 1422 * There may be allocated blocks outside of i_size because ext3_writeback_write_end() 1425 if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_writeback_write_end() 1431 if (pos + len > inode->i_size) ext3_writeback_write_end() 1464 if (pos + copied > inode->i_size) ext3_journalled_write_end() 1467 * There may be allocated blocks outside of i_size because ext3_journalled_write_end() 1470 if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_journalled_write_end() 1474 if (inode->i_size > ei->i_disksize) { ext3_journalled_write_end() 1475 ei->i_disksize = inode->i_size; ext3_journalled_write_end() 1487 if (pos + len > inode->i_size) ext3_journalled_write_end() 1819 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1840 if (final_size > inode->i_size) { ext3_direct_IO() 1853 ei->i_disksize = inode->i_size; ext3_direct_IO() 1862 * blocks outside i_size. Trim these off again. ext3_direct_IO() 1881 * but cannot extend i_size. Truncate allocated blocks ext3_direct_IO() 1893 if (end > inode->i_size) { ext3_direct_IO() 2120 * partially truncated if some data below the new i_size is referred 2485 * The committed inode has the new, desired i_size (which is the same as 2512 if (inode->i_size == 0 && ext3_should_writeback_data(inode)) ext3_truncate() 2519 last_block = (inode->i_size + blocksize-1) ext3_truncate() 2540 * the new, shorter inode size (held for now in i_size) into the ext3_truncate() 2544 ei->i_disksize = inode->i_size; ext3_truncate() 2886 inode->i_size = le32_to_cpu(raw_inode->i_size); ext3_iget() 2924 inode->i_size |= ext3_iget() 2927 ei->i_disksize = inode->i_size; ext3_iget() 3000 nd_terminate_link(ei->i_data, inode->i_size, ext3_iget() 3084 if (disksize != raw_inode->i_size) { ext3_do_update_inode() 3086 raw_inode->i_size = disksize; ext3_do_update_inode() 3196 * inode->i_size = expr; 3199 * and the new i_size will be lost. Plus the inode will no longer be on the 3231 * shrinks i_size, we put the inode on the orphan list and modify 3284 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { ext3_setattr() 3460 * i_size has been changed by generic_commit_write() and we thus need
|
H A D | dir.c | 56 ((inode->i_size >> sb->s_blocksize_bits) == 1))) is_dx_dir() 115 while (ctx->pos < inode->i_size) { ext3_readdir() 177 while (ctx->pos < inode->i_size ext3_readdir() 201 if (ctx->pos < inode->i_size) ext3_readdir()
|
H A D | namei.c | 46 *block = inode->i_size >> inode->i_sb->s_blocksize_bits; ext3_append() 49 inode->i_size += inode->i_sb->s_blocksize; ext3_append() 50 EXT3_I(inode)->i_disksize = inode->i_size; ext3_append() 885 nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb); ext3_find_entry() 951 nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb); ext3_find_entry() 1459 blocks = dir->i_size >> sb->s_blocksize_bits; ext3_add_entry() 1830 inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize; ext3_mkdir() 1898 if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) || empty_dir() 1925 while (offset < inode->i_size ) { empty_dir() 2150 inode->i_size = 0; ext3_rmdir() 2312 inode->i_size = l-1; ext3_symlink() 2314 EXT3_I(inode)->i_disksize = inode->i_size; ext3_symlink()
|
H A D | super.c | 1587 __func__, inode->i_ino, inode->i_size); ext3_orphan_cleanup() 1589 inode->i_ino, inode->i_size); ext3_orphan_cleanup() 2100 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext3_fill_super() 2209 journal_inode, journal_inode->i_size); ext3_get_journal() 3026 loff_t i_size = i_size_read(inode); ext3_quota_read() local 3028 if (off > i_size) ext3_quota_read() 3030 if (off+len > i_size) ext3_quota_read() 3031 len = i_size-off; ext3_quota_read() 3108 if (inode->i_size < off + len) { ext3_quota_write() 3110 EXT3_I(inode)->i_disksize = inode->i_size; ext3_quota_write()
|
/linux-4.1.27/include/linux/ |
H A D | fscache-cache.h | 275 * i_size) have changed */ 279 int (*reserve_space)(struct fscache_object *object, loff_t i_size); 453 * @i_size: The limit to set in bytes 462 void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) fscache_set_store_limit() argument 464 object->store_limit_l = i_size; fscache_set_store_limit() 465 object->store_limit = i_size >> PAGE_SHIFT; fscache_set_store_limit() 466 if (i_size & ~PAGE_MASK) fscache_set_store_limit()
|
H A D | fs.h | 561 * Use sequence counter to get consistent i_size on 32-bit processors. 617 loff_t i_size; member in struct:inode 621 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 719 loff_t i_size; i_size_read() local 724 i_size = inode->i_size; i_size_read() 726 return i_size; i_size_read() 728 loff_t i_size; i_size_read() 731 i_size = inode->i_size; i_size_read() 733 return i_size; i_size_read() 735 return inode->i_size; i_size_read() 744 static inline void i_size_write(struct inode *inode, loff_t i_size) i_size_write() argument 749 inode->i_size = i_size; i_size_write() 754 inode->i_size = i_size; i_size_write() 757 inode->i_size = i_size; i_size_write() 2056 size < inode->i_size ? size : inode->i_size, locks_verify_truncate() 2057 (size < inode->i_size ? inode->i_size - size locks_verify_truncate() 2058 : size - inode->i_size) locks_verify_truncate() 2655 /* filesystem can handle aio writes beyond i_size */
|
H A D | nilfs2_fs.h | 51 * @i_size: size in bytes 68 __le64 i_size; member in struct:nilfs_inode
|
/linux-4.1.27/fs/f2fs/ |
H A D | inode.c | 123 inode->i_size = le64_to_cpu(ri->i_size); do_read_inode() 234 ri->i_size = cpu_to_le64(i_size_read(inode)); update_inode()
|
H A D | data.c | 1148 /* update i_size */ __allocate_data_block() 1421 loff_t i_size = i_size_read(inode); f2fs_write_data_page() local 1422 const pgoff_t end_index = ((unsigned long long) i_size) f2fs_write_data_page() 1441 offset = i_size & (PAGE_CACHE_SIZE - 1); f2fs_write_data_page() 1561 if (to > inode->i_size) { f2fs_write_failed() 1562 truncate_pagecache(inode, inode->i_size); f2fs_write_failed() 1563 truncate_blocks(inode, inode->i_size, true); f2fs_write_failed() 1639 /* Reading beyond i_size is simple: memset to zero */ f2fs_write_begin()
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_iomap.c | 815 xfs_fsize_t i_size; xfs_iomap_write_unwritten() local 879 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); xfs_iomap_write_unwritten() 880 if (i_size > offset + count) xfs_iomap_write_unwritten() 881 i_size = offset + count; xfs_iomap_write_unwritten() 883 i_size = xfs_new_eof(ip, i_size); xfs_iomap_write_unwritten() 884 if (i_size) { xfs_iomap_write_unwritten() 885 ip->i_d.di_size = i_size; xfs_iomap_write_unwritten()
|
H A D | xfs_inode.h | 103 xfs_fsize_t i_size = i_size_read(VFS_I(ip)); xfs_new_eof() local 105 if (new_size > i_size || new_size < 0) xfs_new_eof() 106 new_size = i_size; xfs_new_eof()
|
/linux-4.1.27/include/trace/events/ |
H A D | f2fs.h | 139 __entry->size = inode->i_size; 145 "i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x", 284 __entry->size = dir->i_size; 289 TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " 357 __entry->size = inode->i_size; 362 TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, " 584 __entry->size = inode->i_size; 590 "len = %lld, i_size = %lld, i_blocks = %llu, ret = %d",
|
/linux-4.1.27/fs/cifs/ |
H A D | ioctl.c | 108 if (off + len > src_inode->i_size || off + len < off) cifs_ioctl_clone() 111 len = src_inode->i_size - off; cifs_ioctl_clone()
|
H A D | cache.c | 250 *size = cifsi->vfs_inode.i_size; cifs_fscache_inode_get_attr()
|
/linux-4.1.27/drivers/staging/lustre/lustre/lclient/ |
H A D | lcommon_cl.c | 672 /* vmtruncate() sets the i_size ccc_lock_state() 676 * reset i_size. generic_file_write can then trust the ccc_lock_state() 677 * stale i_size when doing appending writes and effectively ccc_lock_state() 773 * Helper function that if necessary adjusts file size (inode->i_size), when 806 * DLM lock conflicting with our lock. Also, any updates to ->i_size ccc_prep_size() 842 * size (A). We need to increase i_size to cover the ccc_prep_size() 853 DFID" updating i_size %llu\n", ccc_prep_size()
|
/linux-4.1.27/drivers/block/ |
H A D | nbd.c | 639 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl() 646 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl() 661 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl() 713 bdev->bd_inode->i_size = 0; __nbd_ioctl()
|
/linux-4.1.27/drivers/mtd/devices/ |
H A D | block2mtd.c | 276 if ((long)dev->blkdev->bd_inode->i_size % erase_size) { add_device() 291 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; add_device()
|
/linux-4.1.27/fs/gfs2/ |
H A D | aops.c | 103 loff_t i_size = i_size_read(inode); gfs2_writepage_common() local 104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; gfs2_writepage_common() 111 /* Is the page fully outside i_size? (truncate in progress) */ gfs2_writepage_common() 112 offset = i_size & (PAGE_CACHE_SIZE-1); gfs2_writepage_common() 729 if (pos + len > ip->i_inode.i_size) gfs2_write_begin() 829 if (inode->i_size < to) gfs2_stuffed_write_end()
|
/linux-4.1.27/fs/ecryptfs/ |
H A D | inode.c | 65 /* i_size will be overwritten for encrypted regular files */ ecryptfs_inode_set() 743 loff_t i_size = i_size_read(inode); truncate_upper() local 747 if (unlikely((ia->ia_size == i_size))) { truncate_upper() 756 if (ia->ia_size > i_size) { truncate_upper() 809 upper_size_to_lower_size(crypt_stat, i_size); truncate_upper()
|
/linux-4.1.27/fs/fscache/ |
H A D | cookie.c | 195 uint64_t i_size; fscache_acquire_non_index_cookie() local 235 cookie->def->get_attr(cookie->netfs_data, &i_size); fscache_acquire_non_index_cookie() 246 fscache_set_store_limit(object, i_size); fscache_acquire_non_index_cookie()
|
/linux-4.1.27/fs/fuse/ |
H A D | file.c | 665 if (attr_ver == fi->attr_version && size < inode->i_size && fuse_read_update_size() 912 * i_size is up to date). fuse_file_read_iter() 978 if (pos > inode->i_size) { fuse_write_update_size() 1107 if (inode->i_size < pos + iov_iter_count(ii)) fuse_perform_write() 2793 loff_t i_size; fuse_direct_IO() local 2800 i_size = i_size_read(inode); fuse_direct_IO() 2802 if ((iov_iter_rw(iter) == READ) && (offset > i_size)) fuse_direct_IO() 2806 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { fuse_direct_IO() 2807 if (offset >= i_size) fuse_direct_IO() 2809 iov_iter_truncate(iter, fuse_round_up(i_size - offset)); fuse_direct_IO() 2837 if (!is_sync && (offset + count > i_size) && fuse_direct_IO() 2873 else if (ret < 0 && offset + count > i_size) fuse_direct_IO()
|
/linux-4.1.27/fs/romfs/ |
H A D | super.c | 340 i->i_size = be32_to_cpu(ri.size); romfs_iget() 349 i->i_size = ROMFS_I(i)->i_metasize; romfs_iget()
|
/linux-4.1.27/fs/sysfs/ |
H A D | file.c | 87 loff_t size = file_inode(of->file)->i_size; sysfs_kf_bin_read() 140 loff_t size = file_inode(of->file)->i_size; sysfs_kf_bin_write()
|
/linux-4.1.27/arch/s390/hypfs/ |
H A D | inode.c | 354 inode->i_size = strlen(data); hypfs_create_file() 356 inode->i_size = 0; hypfs_create_file()
|
/linux-4.1.27/fs/ocfs2/cluster/ |
H A D | tcp_internal.h | 75 * - full 64 bit i_size in the metadata lock lvbs
|
/linux-4.1.27/fs/openpromfs/ |
H A D | inode.c | 255 inode->i_size = ent_oi->u.prop->length; openpromfs_lookup()
|
/linux-4.1.27/fs/proc/ |
H A D | fd.c | 195 inode->i_size = 64; proc_fd_instantiate()
|
H A D | inode.c | 436 inode->i_size = de->size; proc_get_inode()
|
/linux-4.1.27/fs/ramfs/ |
H A D | file-nommu.c | 177 loff_t size = inode->i_size; ramfs_nommu_setattr()
|
/linux-4.1.27/fs/kernfs/ |
H A D | inode.c | 303 inode->i_size = kn->attr.size; kernfs_init_inode()
|
/linux-4.1.27/fs/nfsd/ |
H A D | nfsfh.h | 232 fhp->fh_pre_size = inode->i_size; fill_pre_wcc()
|
/linux-4.1.27/ipc/ |
H A D | mqueue.c | 236 inode->i_size = FILENT_SIZE; mqueue_get_inode() 294 inode->i_size = 2 * DIRENT_SIZE; mqueue_get_inode() 451 dir->i_size += DIRENT_SIZE; mqueue_create() 469 dir->i_size -= DIRENT_SIZE; mqueue_unlink()
|
/linux-4.1.27/fs/hppfs/ |
H A D | hppfs.c | 702 inode->i_size = proc_ino->i_size; get_inode()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
H A D | blocklayout.c | 306 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { bl_read_pagelist() 308 header->res.count = header->inode->i_size - header->args.offset; bl_read_pagelist()
|
/linux-4.1.27/fs/nfs/objlayout/ |
H A D | objio_osd.c | 461 loff_t i_size = i_size_read(hdr->inode); __r4w_get_page() local 463 if (offset >= i_size) { __r4w_get_page()
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
H A D | inode.c | 117 (attr->ia_size != inode->i_size)) spufs_setattr() 144 inode->i_size = size; spufs_new_file()
|
/linux-4.1.27/fs/pstore/ |
H A D | inode.c | 386 inode->i_size = private->size = size; pstore_mkfile()
|