Lines Matching refs:inode
124 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
127 static inline int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument
130 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage()
131 mapping_gfp_mask(inode->i_mapping), fault_type); in shmem_getpage()
236 static void shmem_recalc_inode(struct inode *inode) in shmem_recalc_inode() argument
238 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_recalc_inode()
241 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; in shmem_recalc_inode()
243 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_recalc_inode()
247 inode->i_blocks -= freed * BLOCKS_PER_PAGE; in shmem_recalc_inode()
396 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, in shmem_undo_range() argument
399 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
400 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_undo_range()
455 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); in shmem_undo_range()
470 shmem_getpage(inode, end, &page, SGP_READ, NULL); in shmem_undo_range()
536 shmem_recalc_inode(inode); in shmem_undo_range()
540 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
542 shmem_undo_range(inode, lstart, lend, false); in shmem_truncate_range()
543 inode->i_ctime = inode->i_mtime = CURRENT_TIME; in shmem_truncate_range()
550 struct inode *inode = dentry->d_inode; in shmem_getattr() local
551 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_getattr()
553 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { in shmem_getattr()
555 shmem_recalc_inode(inode); in shmem_getattr()
558 generic_fillattr(inode, stat); in shmem_getattr()
564 struct inode *inode = d_inode(dentry); in shmem_setattr() local
565 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_setattr()
568 error = inode_change_ok(inode, attr); in shmem_setattr()
572 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
573 loff_t oldsize = inode->i_size; in shmem_setattr()
582 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
586 i_size_write(inode, newsize); in shmem_setattr()
587 inode->i_ctime = inode->i_mtime = CURRENT_TIME; in shmem_setattr()
592 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
595 shmem_truncate_range(inode, in shmem_setattr()
599 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
604 setattr_copy(inode, attr); in shmem_setattr()
606 error = posix_acl_chmod(inode, inode->i_mode); in shmem_setattr()
610 static void shmem_evict_inode(struct inode *inode) in shmem_evict_inode() argument
612 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_evict_inode()
614 if (inode->i_mapping->a_ops == &shmem_aops) { in shmem_evict_inode()
615 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
616 inode->i_size = 0; in shmem_evict_inode()
617 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
626 WARN_ON(inode->i_blocks); in shmem_evict_inode()
627 shmem_free_inode(inode->i_sb); in shmem_evict_inode()
628 clear_inode(inode); in shmem_evict_inode()
770 struct inode *inode; in shmem_writepage() local
777 inode = mapping->host; in shmem_writepage()
778 info = SHMEM_I(inode); in shmem_writepage()
808 if (inode->i_private) { in shmem_writepage()
810 spin_lock(&inode->i_lock); in shmem_writepage()
811 shmem_falloc = inode->i_private; in shmem_writepage()
819 spin_unlock(&inode->i_lock); in shmem_writepage()
846 shmem_recalc_inode(inode); in shmem_writepage()
1054 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, in shmem_getpage_gfp() argument
1057 struct address_space *mapping = inode->i_mapping; in shmem_getpage_gfp()
1078 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { in shmem_getpage_gfp()
1103 info = SHMEM_I(inode); in shmem_getpage_gfp()
1104 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1167 shmem_recalc_inode(inode); in shmem_getpage_gfp()
1220 inode->i_blocks += BLOCKS_PER_PAGE; in shmem_getpage_gfp()
1221 shmem_recalc_inode(inode); in shmem_getpage_gfp()
1247 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { in shmem_getpage_gfp()
1252 shmem_recalc_inode(inode); in shmem_getpage_gfp()
1278 info = SHMEM_I(inode); in shmem_getpage_gfp()
1280 shmem_recalc_inode(inode); in shmem_getpage_gfp()
1291 struct inode *inode = file_inode(vma->vm_file); in shmem_fault() local
1312 if (unlikely(inode->i_private)) { in shmem_fault()
1315 spin_lock(&inode->i_lock); in shmem_fault()
1316 shmem_falloc = inode->i_private; in shmem_fault()
1335 spin_unlock(&inode->i_lock); in shmem_fault()
1345 spin_lock(&inode->i_lock); in shmem_fault()
1347 spin_unlock(&inode->i_lock); in shmem_fault()
1350 spin_unlock(&inode->i_lock); in shmem_fault()
1353 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); in shmem_fault()
1367 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy() local
1368 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
1374 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy() local
1378 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
1384 struct inode *inode = file_inode(file); in shmem_lock() local
1385 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_lock()
1390 if (!user_shm_lock(inode->i_size, user)) in shmem_lock()
1396 user_shm_unlock(inode->i_size, user); in shmem_lock()
1414 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, in shmem_get_inode()
1417 struct inode *inode; in shmem_get_inode() local
1424 inode = new_inode(sb); in shmem_get_inode()
1425 if (inode) { in shmem_get_inode()
1426 inode->i_ino = get_next_ino(); in shmem_get_inode()
1427 inode_init_owner(inode, dir, mode); in shmem_get_inode()
1428 inode->i_blocks = 0; in shmem_get_inode()
1429 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; in shmem_get_inode()
1430 inode->i_generation = get_seconds(); in shmem_get_inode()
1431 info = SHMEM_I(inode); in shmem_get_inode()
1432 memset(info, 0, (char *)inode - (char *)info); in shmem_get_inode()
1438 cache_no_acl(inode); in shmem_get_inode()
1442 inode->i_op = &shmem_special_inode_operations; in shmem_get_inode()
1443 init_special_inode(inode, mode, dev); in shmem_get_inode()
1446 inode->i_mapping->a_ops = &shmem_aops; in shmem_get_inode()
1447 inode->i_op = &shmem_inode_operations; in shmem_get_inode()
1448 inode->i_fop = &shmem_file_operations; in shmem_get_inode()
1453 inc_nlink(inode); in shmem_get_inode()
1455 inode->i_size = 2 * BOGO_DIRENT_SIZE; in shmem_get_inode()
1456 inode->i_op = &shmem_dir_inode_operations; in shmem_get_inode()
1457 inode->i_fop = &simple_dir_operations; in shmem_get_inode()
1469 return inode; in shmem_get_inode()
1485 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1495 struct inode *inode = mapping->host; in shmem_write_begin() local
1496 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_write_begin()
1503 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
1507 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); in shmem_write_begin()
1515 struct inode *inode = mapping->host; in shmem_write_end() local
1517 if (pos + copied > inode->i_size) in shmem_write_end()
1518 i_size_write(inode, pos + copied); in shmem_write_end()
1538 struct inode *inode = file_inode(file); in shmem_file_read_iter() local
1539 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
1562 loff_t i_size = i_size_read(inode); in shmem_file_read_iter()
1573 error = shmem_getpage(inode, index, &page, sgp, NULL); in shmem_file_read_iter()
1587 i_size = i_size_read(inode); in shmem_file_read_iter()
1647 struct inode *inode = mapping->host; in shmem_file_splice_read() local
1664 isize = i_size_read(inode); in shmem_file_splice_read()
1686 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); in shmem_file_splice_read()
1708 error = shmem_getpage(inode, index, &page, in shmem_file_splice_read()
1717 isize = i_size_read(inode); in shmem_file_splice_read()
1809 struct inode *inode = mapping->host; in shmem_file_llseek() local
1815 MAX_LFS_FILESIZE, i_size_read(inode)); in shmem_file_llseek()
1816 mutex_lock(&inode->i_mutex); in shmem_file_llseek()
1821 else if (offset >= inode->i_size) in shmem_file_llseek()
1825 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in shmem_file_llseek()
1829 if (new_offset < inode->i_size) in shmem_file_llseek()
1834 offset = inode->i_size; in shmem_file_llseek()
1840 mutex_unlock(&inode->i_mutex); in shmem_file_llseek()
1964 struct inode *inode = file_inode(file); in shmem_add_seals() local
1965 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_add_seals()
2005 mutex_lock(&inode->i_mutex); in shmem_add_seals()
2028 mutex_unlock(&inode->i_mutex); in shmem_add_seals()
2068 struct inode *inode = file_inode(file); in shmem_fallocate() local
2069 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
2070 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_fallocate()
2078 mutex_lock(&inode->i_mutex); in shmem_fallocate()
2095 spin_lock(&inode->i_lock); in shmem_fallocate()
2096 inode->i_private = &shmem_falloc; in shmem_fallocate()
2097 spin_unlock(&inode->i_lock); in shmem_fallocate()
2102 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
2105 spin_lock(&inode->i_lock); in shmem_fallocate()
2106 inode->i_private = NULL; in shmem_fallocate()
2108 spin_unlock(&inode->i_lock); in shmem_fallocate()
2114 error = inode_newsize_ok(inode, offset + len); in shmem_fallocate()
2118 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
2136 spin_lock(&inode->i_lock); in shmem_fallocate()
2137 inode->i_private = &shmem_falloc; in shmem_fallocate()
2138 spin_unlock(&inode->i_lock); in shmem_fallocate()
2152 error = shmem_getpage(inode, index, &page, SGP_FALLOC, in shmem_fallocate()
2156 shmem_undo_range(inode, in shmem_fallocate()
2183 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
2184 i_size_write(inode, offset + len); in shmem_fallocate()
2185 inode->i_ctime = CURRENT_TIME; in shmem_fallocate()
2187 spin_lock(&inode->i_lock); in shmem_fallocate()
2188 inode->i_private = NULL; in shmem_fallocate()
2189 spin_unlock(&inode->i_lock); in shmem_fallocate()
2191 mutex_unlock(&inode->i_mutex); in shmem_fallocate()
2220 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) in shmem_mknod()
2222 struct inode *inode; in shmem_mknod() local
2225 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
2226 if (inode) { in shmem_mknod()
2227 error = simple_acl_create(dir, inode); in shmem_mknod()
2230 error = security_inode_init_security(inode, dir, in shmem_mknod()
2239 d_instantiate(dentry, inode); in shmem_mknod()
2244 iput(inode); in shmem_mknod()
2249 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) in shmem_tmpfile()
2251 struct inode *inode; in shmem_tmpfile() local
2254 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
2255 if (inode) { in shmem_tmpfile()
2256 error = security_inode_init_security(inode, dir, in shmem_tmpfile()
2261 error = simple_acl_create(dir, inode); in shmem_tmpfile()
2264 d_tmpfile(dentry, inode); in shmem_tmpfile()
2268 iput(inode); in shmem_tmpfile()
2272 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) in shmem_mkdir()
2282 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, in shmem_create()
2291 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) in shmem_link()
2293 struct inode *inode = d_inode(old_dentry); in shmem_link() local
2301 ret = shmem_reserve_inode(inode->i_sb); in shmem_link()
2306 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; in shmem_link()
2307 inc_nlink(inode); in shmem_link()
2308 ihold(inode); /* New dentry reference */ in shmem_link()
2310 d_instantiate(dentry, inode); in shmem_link()
2315 static int shmem_unlink(struct inode *dir, struct dentry *dentry) in shmem_unlink()
2317 struct inode *inode = d_inode(dentry); in shmem_unlink() local
2319 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
2320 shmem_free_inode(inode->i_sb); in shmem_unlink()
2323 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; in shmem_unlink()
2324 drop_nlink(inode); in shmem_unlink()
2329 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) in shmem_rmdir()
2339 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, … in shmem_exchange()
2361 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) in shmem_whiteout()
2393 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, s… in shmem_rename2()
2395 struct inode *inode = d_inode(old_dentry); in shmem_rename2() local
2396 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
2430 inode->i_ctime = CURRENT_TIME; in shmem_rename2()
2434 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) in shmem_symlink()
2438 struct inode *inode; in shmem_symlink() local
2447 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); in shmem_symlink()
2448 if (!inode) in shmem_symlink()
2451 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
2455 iput(inode); in shmem_symlink()
2461 info = SHMEM_I(inode); in shmem_symlink()
2462 inode->i_size = len-1; in shmem_symlink()
2464 inode->i_link = kmemdup(symname, len, GFP_KERNEL); in shmem_symlink()
2465 if (!inode->i_link) { in shmem_symlink()
2466 iput(inode); in shmem_symlink()
2469 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
2471 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); in shmem_symlink()
2473 iput(inode); in shmem_symlink()
2476 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
2477 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
2488 d_instantiate(dentry, inode); in shmem_symlink()
2504 static void shmem_put_link(struct inode *unused, void *cookie) in shmem_put_link()
2523 static int shmem_initxattrs(struct inode *inode, in shmem_initxattrs() argument
2527 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_initxattrs()
2680 static int shmem_match(struct inode *ino, void *vfh) in shmem_match()
2691 struct inode *inode; in shmem_fh_to_dentry() local
2701 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
2703 if (inode) { in shmem_fh_to_dentry()
2704 dentry = d_find_alias(inode); in shmem_fh_to_dentry()
2705 iput(inode); in shmem_fh_to_dentry()
2711 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, in shmem_encode_fh() argument
2712 struct inode *parent) in shmem_encode_fh()
2719 if (inode_unhashed(inode)) { in shmem_encode_fh()
2727 if (inode_unhashed(inode)) in shmem_encode_fh()
2728 __insert_inode_hash(inode, in shmem_encode_fh()
2729 inode->i_ino + inode->i_generation); in shmem_encode_fh()
2733 fh[0] = inode->i_generation; in shmem_encode_fh()
2734 fh[1] = inode->i_ino; in shmem_encode_fh()
2735 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
3000 struct inode *inode; in shmem_fill_super() local
3055 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3056 if (!inode) in shmem_fill_super()
3058 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3059 inode->i_gid = sbinfo->gid; in shmem_fill_super()
3060 sb->s_root = d_make_root(inode); in shmem_fill_super()
3072 static struct inode *shmem_alloc_inode(struct super_block *sb) in shmem_alloc_inode()
3083 struct inode *inode = container_of(head, struct inode, i_rcu); in shmem_destroy_callback() local
3084 kfree(inode->i_link); in shmem_destroy_callback()
3085 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); in shmem_destroy_callback()
3088 static void shmem_destroy_inode(struct inode *inode) in shmem_destroy_inode() argument
3090 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
3091 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
3092 call_rcu(&inode->i_rcu, shmem_destroy_callback); in shmem_destroy_inode()
3303 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
3305 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
3327 struct inode *inode; in __shmem_file_setup() local
3353 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); in __shmem_file_setup()
3354 if (!inode) in __shmem_file_setup()
3357 inode->i_flags |= i_flags; in __shmem_file_setup()
3358 d_instantiate(path.dentry, inode); in __shmem_file_setup()
3359 inode->i_size = size; in __shmem_file_setup()
3360 clear_nlink(inode); /* It is unlinked */ in __shmem_file_setup()
3361 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); in __shmem_file_setup()
3451 struct inode *inode = mapping->host; in shmem_read_mapping_page_gfp() local
3456 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); in shmem_read_mapping_page_gfp()