This source file includes following definitions.
- f2fs_filemap_fault
- f2fs_vm_page_mkwrite
- get_parent_ino
- need_do_checkpoint
- need_inode_page_update
- try_to_fix_pino
- f2fs_do_sync_file
- f2fs_sync_file
- __get_first_dirty_index
- __found_offset
- f2fs_seek_block
- f2fs_llseek
- f2fs_file_mmap
- f2fs_file_open
- f2fs_truncate_data_blocks_range
- f2fs_truncate_data_blocks
- truncate_partial_data_page
- f2fs_truncate_blocks
- f2fs_truncate
- f2fs_getattr
- __setattr_copy
- f2fs_setattr
- fill_zero
- f2fs_truncate_hole
- punch_hole
- __read_out_blkaddrs
- __roll_back_blkaddrs
- __clone_blkaddrs
- __exchange_data_block
- f2fs_do_collapse
- f2fs_collapse_range
- f2fs_do_zero_range
- f2fs_zero_range
- f2fs_insert_range
- expand_inode_data
- f2fs_fallocate
- f2fs_release_file
- f2fs_file_flush
- f2fs_setflags_common
- f2fs_iflags_to_fsflags
- f2fs_fsflags_to_iflags
- f2fs_ioc_getflags
- f2fs_ioc_setflags
- f2fs_ioc_getversion
- f2fs_ioc_start_atomic_write
- f2fs_ioc_commit_atomic_write
- f2fs_ioc_start_volatile_write
- f2fs_ioc_release_volatile_write
- f2fs_ioc_abort_volatile_write
- f2fs_ioc_shutdown
- f2fs_ioc_fitrim
- uuid_is_nonzero
- f2fs_ioc_set_encryption_policy
- f2fs_ioc_get_encryption_policy
- f2fs_ioc_get_encryption_pwsalt
- f2fs_ioc_get_encryption_policy_ex
- f2fs_ioc_add_encryption_key
- f2fs_ioc_remove_encryption_key
- f2fs_ioc_remove_encryption_key_all_users
- f2fs_ioc_get_encryption_key_status
- f2fs_ioc_gc
- f2fs_ioc_gc_range
- f2fs_ioc_write_checkpoint
- f2fs_defragment_range
- f2fs_ioc_defragment
- f2fs_move_file_range
- f2fs_ioc_move_range
- f2fs_ioc_flush_device
- f2fs_ioc_get_features
- f2fs_transfer_project_quota
- f2fs_ioc_setproject
- f2fs_transfer_project_quota
- f2fs_ioc_setproject
- f2fs_iflags_to_xflags
- f2fs_xflags_to_iflags
- f2fs_fill_fsxattr
- f2fs_ioc_fsgetxattr
- f2fs_ioc_fssetxattr
- f2fs_pin_file_control
- f2fs_ioc_set_pin_file
- f2fs_ioc_get_pin_file
- f2fs_precache_extents
- f2fs_ioc_precache_extents
- f2fs_ioc_resize_fs
- f2fs_ioc_enable_verity
- f2fs_ioc_measure_verity
- f2fs_get_volume_name
- f2fs_set_volume_name
- f2fs_ioctl
- f2fs_file_write_iter
- f2fs_compat_ioctl
1
2
3
4
5
6
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24
25 #include "f2fs.h"
26 #include "node.h"
27 #include "segment.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "gc.h"
31 #include "trace.h"
32 #include <trace/events/f2fs.h>
33
34 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
35 {
36 struct inode *inode = file_inode(vmf->vma->vm_file);
37 vm_fault_t ret;
38
39 down_read(&F2FS_I(inode)->i_mmap_sem);
40 ret = filemap_fault(vmf);
41 up_read(&F2FS_I(inode)->i_mmap_sem);
42
43 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
44
45 return ret;
46 }
47
48 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
49 {
50 struct page *page = vmf->page;
51 struct inode *inode = file_inode(vmf->vma->vm_file);
52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
53 struct dnode_of_data dn;
54 int err;
55
56 if (unlikely(f2fs_cp_error(sbi))) {
57 err = -EIO;
58 goto err;
59 }
60
61 if (!f2fs_is_checkpoint_ready(sbi)) {
62 err = -ENOSPC;
63 goto err;
64 }
65
66
67 f2fs_balance_fs(sbi, true);
68
69 sb_start_pagefault(inode->i_sb);
70
71 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
72
73 file_update_time(vmf->vma->vm_file);
74 down_read(&F2FS_I(inode)->i_mmap_sem);
75 lock_page(page);
76 if (unlikely(page->mapping != inode->i_mapping ||
77 page_offset(page) > i_size_read(inode) ||
78 !PageUptodate(page))) {
79 unlock_page(page);
80 err = -EFAULT;
81 goto out_sem;
82 }
83
84
85 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
86 set_new_dnode(&dn, inode, NULL, NULL, 0);
87 err = f2fs_get_block(&dn, page->index);
88 f2fs_put_dnode(&dn);
89 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
90 if (err) {
91 unlock_page(page);
92 goto out_sem;
93 }
94
95
96 f2fs_wait_on_page_writeback(page, DATA, false, true);
97
98
99 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
100
101
102
103
104 if (PageMappedToDisk(page))
105 goto out_sem;
106
107
108 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
109 i_size_read(inode)) {
110 loff_t offset;
111
112 offset = i_size_read(inode) & ~PAGE_MASK;
113 zero_user_segment(page, offset, PAGE_SIZE);
114 }
115 set_page_dirty(page);
116 if (!PageUptodate(page))
117 SetPageUptodate(page);
118
119 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
120 f2fs_update_time(sbi, REQ_TIME);
121
122 trace_f2fs_vm_page_mkwrite(page, DATA);
123 out_sem:
124 up_read(&F2FS_I(inode)->i_mmap_sem);
125
126 sb_end_pagefault(inode->i_sb);
127 err:
128 return block_page_mkwrite_return(err);
129 }
130
131 static const struct vm_operations_struct f2fs_file_vm_ops = {
132 .fault = f2fs_filemap_fault,
133 .map_pages = filemap_map_pages,
134 .page_mkwrite = f2fs_vm_page_mkwrite,
135 };
136
137 static int get_parent_ino(struct inode *inode, nid_t *pino)
138 {
139 struct dentry *dentry;
140
141 inode = igrab(inode);
142 dentry = d_find_any_alias(inode);
143 iput(inode);
144 if (!dentry)
145 return 0;
146
147 *pino = parent_ino(dentry);
148 dput(dentry);
149 return 1;
150 }
151
152 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
153 {
154 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
155 enum cp_reason_type cp_reason = CP_NO_NEEDED;
156
157 if (!S_ISREG(inode->i_mode))
158 cp_reason = CP_NON_REGULAR;
159 else if (inode->i_nlink != 1)
160 cp_reason = CP_HARDLINK;
161 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
162 cp_reason = CP_SB_NEED_CP;
163 else if (file_wrong_pino(inode))
164 cp_reason = CP_WRONG_PINO;
165 else if (!f2fs_space_for_roll_forward(sbi))
166 cp_reason = CP_NO_SPC_ROLL;
167 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
168 cp_reason = CP_NODE_NEED_CP;
169 else if (test_opt(sbi, FASTBOOT))
170 cp_reason = CP_FASTBOOT_MODE;
171 else if (F2FS_OPTION(sbi).active_logs == 2)
172 cp_reason = CP_SPEC_LOG_NUM;
173 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
174 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
175 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
176 TRANS_DIR_INO))
177 cp_reason = CP_RECOVER_DIR;
178
179 return cp_reason;
180 }
181
182 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
183 {
184 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
185 bool ret = false;
186
187 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
188 ret = true;
189 f2fs_put_page(i, 0);
190 return ret;
191 }
192
193 static void try_to_fix_pino(struct inode *inode)
194 {
195 struct f2fs_inode_info *fi = F2FS_I(inode);
196 nid_t pino;
197
198 down_write(&fi->i_sem);
199 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
200 get_parent_ino(inode, &pino)) {
201 f2fs_i_pino_write(inode, pino);
202 file_got_pino(inode);
203 }
204 up_write(&fi->i_sem);
205 }
206
207 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
208 int datasync, bool atomic)
209 {
210 struct inode *inode = file->f_mapping->host;
211 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
212 nid_t ino = inode->i_ino;
213 int ret = 0;
214 enum cp_reason_type cp_reason = 0;
215 struct writeback_control wbc = {
216 .sync_mode = WB_SYNC_ALL,
217 .nr_to_write = LONG_MAX,
218 .for_reclaim = 0,
219 };
220 unsigned int seq_id = 0;
221
222 if (unlikely(f2fs_readonly(inode->i_sb) ||
223 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
224 return 0;
225
226 trace_f2fs_sync_file_enter(inode);
227
228 if (S_ISDIR(inode->i_mode))
229 goto go_write;
230
231
232 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
233 set_inode_flag(inode, FI_NEED_IPU);
234 ret = file_write_and_wait_range(file, start, end);
235 clear_inode_flag(inode, FI_NEED_IPU);
236
237 if (ret) {
238 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
239 return ret;
240 }
241
242
243 if (!f2fs_skip_inode_update(inode, datasync)) {
244 f2fs_write_inode(inode, NULL);
245 goto go_write;
246 }
247
248
249
250
251 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
252 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
253
254
255 if (need_inode_page_update(sbi, ino))
256 goto go_write;
257
258 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
259 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
260 goto flush_out;
261 goto out;
262 }
263 go_write:
264
265
266
267
268 down_read(&F2FS_I(inode)->i_sem);
269 cp_reason = need_do_checkpoint(inode);
270 up_read(&F2FS_I(inode)->i_sem);
271
272 if (cp_reason) {
273
274 ret = f2fs_sync_fs(inode->i_sb, 1);
275
276
277
278
279
280 try_to_fix_pino(inode);
281 clear_inode_flag(inode, FI_APPEND_WRITE);
282 clear_inode_flag(inode, FI_UPDATE_WRITE);
283 goto out;
284 }
285 sync_nodes:
286 atomic_inc(&sbi->wb_sync_req[NODE]);
287 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
288 atomic_dec(&sbi->wb_sync_req[NODE]);
289 if (ret)
290 goto out;
291
292
293 if (unlikely(f2fs_cp_error(sbi))) {
294 ret = -EIO;
295 goto out;
296 }
297
298 if (f2fs_need_inode_block_update(sbi, ino)) {
299 f2fs_mark_inode_dirty_sync(inode, true);
300 f2fs_write_inode(inode, NULL);
301 goto sync_nodes;
302 }
303
304
305
306
307
308
309
310
311
312 if (!atomic) {
313 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
314 if (ret)
315 goto out;
316 }
317
318
319 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
320 clear_inode_flag(inode, FI_APPEND_WRITE);
321 flush_out:
322 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
323 ret = f2fs_issue_flush(sbi, inode->i_ino);
324 if (!ret) {
325 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
326 clear_inode_flag(inode, FI_UPDATE_WRITE);
327 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
328 }
329 f2fs_update_time(sbi, REQ_TIME);
330 out:
331 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
332 f2fs_trace_ios(NULL, 1);
333 return ret;
334 }
335
336 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
337 {
338 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
339 return -EIO;
340 return f2fs_do_sync_file(file, start, end, datasync, false);
341 }
342
343 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
344 pgoff_t pgofs, int whence)
345 {
346 struct page *page;
347 int nr_pages;
348
349 if (whence != SEEK_DATA)
350 return 0;
351
352
353 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
354 1, &page);
355 if (!nr_pages)
356 return ULONG_MAX;
357 pgofs = page->index;
358 put_page(page);
359 return pgofs;
360 }
361
362 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
363 pgoff_t dirty, pgoff_t pgofs, int whence)
364 {
365 switch (whence) {
366 case SEEK_DATA:
367 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
368 __is_valid_data_blkaddr(blkaddr))
369 return true;
370 break;
371 case SEEK_HOLE:
372 if (blkaddr == NULL_ADDR)
373 return true;
374 break;
375 }
376 return false;
377 }
378
379 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
380 {
381 struct inode *inode = file->f_mapping->host;
382 loff_t maxbytes = inode->i_sb->s_maxbytes;
383 struct dnode_of_data dn;
384 pgoff_t pgofs, end_offset, dirty;
385 loff_t data_ofs = offset;
386 loff_t isize;
387 int err = 0;
388
389 inode_lock(inode);
390
391 isize = i_size_read(inode);
392 if (offset >= isize)
393 goto fail;
394
395
396 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
397 if (whence == SEEK_HOLE)
398 data_ofs = isize;
399 goto found;
400 }
401
402 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
403
404 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
405
406 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
407 set_new_dnode(&dn, inode, NULL, NULL, 0);
408 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
409 if (err && err != -ENOENT) {
410 goto fail;
411 } else if (err == -ENOENT) {
412
413 if (whence == SEEK_DATA) {
414 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
415 continue;
416 } else {
417 goto found;
418 }
419 }
420
421 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
422
423
424 for (; dn.ofs_in_node < end_offset;
425 dn.ofs_in_node++, pgofs++,
426 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
427 block_t blkaddr;
428
429 blkaddr = datablock_addr(dn.inode,
430 dn.node_page, dn.ofs_in_node);
431
432 if (__is_valid_data_blkaddr(blkaddr) &&
433 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
434 blkaddr, DATA_GENERIC_ENHANCE)) {
435 f2fs_put_dnode(&dn);
436 goto fail;
437 }
438
439 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
440 pgofs, whence)) {
441 f2fs_put_dnode(&dn);
442 goto found;
443 }
444 }
445 f2fs_put_dnode(&dn);
446 }
447
448 if (whence == SEEK_DATA)
449 goto fail;
450 found:
451 if (whence == SEEK_HOLE && data_ofs > isize)
452 data_ofs = isize;
453 inode_unlock(inode);
454 return vfs_setpos(file, data_ofs, maxbytes);
455 fail:
456 inode_unlock(inode);
457 return -ENXIO;
458 }
459
460 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
461 {
462 struct inode *inode = file->f_mapping->host;
463 loff_t maxbytes = inode->i_sb->s_maxbytes;
464
465 switch (whence) {
466 case SEEK_SET:
467 case SEEK_CUR:
468 case SEEK_END:
469 return generic_file_llseek_size(file, offset, whence,
470 maxbytes, i_size_read(inode));
471 case SEEK_DATA:
472 case SEEK_HOLE:
473 if (offset < 0)
474 return -ENXIO;
475 return f2fs_seek_block(file, offset, whence);
476 }
477
478 return -EINVAL;
479 }
480
481 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
482 {
483 struct inode *inode = file_inode(file);
484 int err;
485
486 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
487 return -EIO;
488
489
490 err = f2fs_convert_inline_inode(inode);
491 if (err)
492 return err;
493
494 file_accessed(file);
495 vma->vm_ops = &f2fs_file_vm_ops;
496 return 0;
497 }
498
499 static int f2fs_file_open(struct inode *inode, struct file *filp)
500 {
501 int err = fscrypt_file_open(inode, filp);
502
503 if (err)
504 return err;
505
506 err = fsverity_file_open(inode, filp);
507 if (err)
508 return err;
509
510 filp->f_mode |= FMODE_NOWAIT;
511
512 return dquot_file_open(inode, filp);
513 }
514
515 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
516 {
517 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
518 struct f2fs_node *raw_node;
519 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
520 __le32 *addr;
521 int base = 0;
522
523 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
524 base = get_extra_isize(dn->inode);
525
526 raw_node = F2FS_NODE(dn->node_page);
527 addr = blkaddr_in_node(raw_node) + base + ofs;
528
529 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
530 block_t blkaddr = le32_to_cpu(*addr);
531
532 if (blkaddr == NULL_ADDR)
533 continue;
534
535 dn->data_blkaddr = NULL_ADDR;
536 f2fs_set_data_blkaddr(dn);
537
538 if (__is_valid_data_blkaddr(blkaddr) &&
539 !f2fs_is_valid_blkaddr(sbi, blkaddr,
540 DATA_GENERIC_ENHANCE))
541 continue;
542
543 f2fs_invalidate_blocks(sbi, blkaddr);
544 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
545 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
546 nr_free++;
547 }
548
549 if (nr_free) {
550 pgoff_t fofs;
551
552
553
554
555 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
556 dn->inode) + ofs;
557 f2fs_update_extent_cache_range(dn, fofs, 0, len);
558 dec_valid_block_count(sbi, dn->inode, nr_free);
559 }
560 dn->ofs_in_node = ofs;
561
562 f2fs_update_time(sbi, REQ_TIME);
563 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
564 dn->ofs_in_node, nr_free);
565 }
566
567 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
568 {
569 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
570 }
571
572 static int truncate_partial_data_page(struct inode *inode, u64 from,
573 bool cache_only)
574 {
575 loff_t offset = from & (PAGE_SIZE - 1);
576 pgoff_t index = from >> PAGE_SHIFT;
577 struct address_space *mapping = inode->i_mapping;
578 struct page *page;
579
580 if (!offset && !cache_only)
581 return 0;
582
583 if (cache_only) {
584 page = find_lock_page(mapping, index);
585 if (page && PageUptodate(page))
586 goto truncate_out;
587 f2fs_put_page(page, 1);
588 return 0;
589 }
590
591 page = f2fs_get_lock_data_page(inode, index, true);
592 if (IS_ERR(page))
593 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
594 truncate_out:
595 f2fs_wait_on_page_writeback(page, DATA, true, true);
596 zero_user(page, offset, PAGE_SIZE - offset);
597
598
599 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
600 if (!cache_only)
601 set_page_dirty(page);
602 f2fs_put_page(page, 1);
603 return 0;
604 }
605
606 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
607 {
608 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
609 struct dnode_of_data dn;
610 pgoff_t free_from;
611 int count = 0, err = 0;
612 struct page *ipage;
613 bool truncate_page = false;
614
615 trace_f2fs_truncate_blocks_enter(inode, from);
616
617 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
618
619 if (free_from >= sbi->max_file_blocks)
620 goto free_partial;
621
622 if (lock)
623 f2fs_lock_op(sbi);
624
625 ipage = f2fs_get_node_page(sbi, inode->i_ino);
626 if (IS_ERR(ipage)) {
627 err = PTR_ERR(ipage);
628 goto out;
629 }
630
631 if (f2fs_has_inline_data(inode)) {
632 f2fs_truncate_inline_inode(inode, ipage, from);
633 f2fs_put_page(ipage, 1);
634 truncate_page = true;
635 goto out;
636 }
637
638 set_new_dnode(&dn, inode, ipage, NULL, 0);
639 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
640 if (err) {
641 if (err == -ENOENT)
642 goto free_next;
643 goto out;
644 }
645
646 count = ADDRS_PER_PAGE(dn.node_page, inode);
647
648 count -= dn.ofs_in_node;
649 f2fs_bug_on(sbi, count < 0);
650
651 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
652 f2fs_truncate_data_blocks_range(&dn, count);
653 free_from += count;
654 }
655
656 f2fs_put_dnode(&dn);
657 free_next:
658 err = f2fs_truncate_inode_blocks(inode, free_from);
659 out:
660 if (lock)
661 f2fs_unlock_op(sbi);
662 free_partial:
663
664 if (!err)
665 err = truncate_partial_data_page(inode, from, truncate_page);
666
667 trace_f2fs_truncate_blocks_exit(inode, err);
668 return err;
669 }
670
671 int f2fs_truncate(struct inode *inode)
672 {
673 int err;
674
675 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
676 return -EIO;
677
678 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
679 S_ISLNK(inode->i_mode)))
680 return 0;
681
682 trace_f2fs_truncate(inode);
683
684 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
685 f2fs_show_injection_info(FAULT_TRUNCATE);
686 return -EIO;
687 }
688
689
690 if (!f2fs_may_inline_data(inode)) {
691 err = f2fs_convert_inline_inode(inode);
692 if (err)
693 return err;
694 }
695
696 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
697 if (err)
698 return err;
699
700 inode->i_mtime = inode->i_ctime = current_time(inode);
701 f2fs_mark_inode_dirty_sync(inode, false);
702 return 0;
703 }
704
705 int f2fs_getattr(const struct path *path, struct kstat *stat,
706 u32 request_mask, unsigned int query_flags)
707 {
708 struct inode *inode = d_inode(path->dentry);
709 struct f2fs_inode_info *fi = F2FS_I(inode);
710 struct f2fs_inode *ri;
711 unsigned int flags;
712
713 if (f2fs_has_extra_attr(inode) &&
714 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
715 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
716 stat->result_mask |= STATX_BTIME;
717 stat->btime.tv_sec = fi->i_crtime.tv_sec;
718 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
719 }
720
721 flags = fi->i_flags;
722 if (flags & F2FS_APPEND_FL)
723 stat->attributes |= STATX_ATTR_APPEND;
724 if (IS_ENCRYPTED(inode))
725 stat->attributes |= STATX_ATTR_ENCRYPTED;
726 if (flags & F2FS_IMMUTABLE_FL)
727 stat->attributes |= STATX_ATTR_IMMUTABLE;
728 if (flags & F2FS_NODUMP_FL)
729 stat->attributes |= STATX_ATTR_NODUMP;
730
731 stat->attributes_mask |= (STATX_ATTR_APPEND |
732 STATX_ATTR_ENCRYPTED |
733 STATX_ATTR_IMMUTABLE |
734 STATX_ATTR_NODUMP);
735
736 generic_fillattr(inode, stat);
737
738
739 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
740 f2fs_has_inline_dentry(inode))
741 stat->blocks += (stat->size + 511) >> 9;
742
743 return 0;
744 }
745
746 #ifdef CONFIG_F2FS_FS_POSIX_ACL
747 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
748 {
749 unsigned int ia_valid = attr->ia_valid;
750
751 if (ia_valid & ATTR_UID)
752 inode->i_uid = attr->ia_uid;
753 if (ia_valid & ATTR_GID)
754 inode->i_gid = attr->ia_gid;
755 if (ia_valid & ATTR_ATIME)
756 inode->i_atime = attr->ia_atime;
757 if (ia_valid & ATTR_MTIME)
758 inode->i_mtime = attr->ia_mtime;
759 if (ia_valid & ATTR_CTIME)
760 inode->i_ctime = attr->ia_ctime;
761 if (ia_valid & ATTR_MODE) {
762 umode_t mode = attr->ia_mode;
763
764 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
765 mode &= ~S_ISGID;
766 set_acl_inode(inode, mode);
767 }
768 }
769 #else
770 #define __setattr_copy setattr_copy
771 #endif
772
773 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
774 {
775 struct inode *inode = d_inode(dentry);
776 int err;
777
778 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
779 return -EIO;
780
781 err = setattr_prepare(dentry, attr);
782 if (err)
783 return err;
784
785 err = fscrypt_prepare_setattr(dentry, attr);
786 if (err)
787 return err;
788
789 err = fsverity_prepare_setattr(dentry, attr);
790 if (err)
791 return err;
792
793 if (is_quota_modification(inode, attr)) {
794 err = dquot_initialize(inode);
795 if (err)
796 return err;
797 }
798 if ((attr->ia_valid & ATTR_UID &&
799 !uid_eq(attr->ia_uid, inode->i_uid)) ||
800 (attr->ia_valid & ATTR_GID &&
801 !gid_eq(attr->ia_gid, inode->i_gid))) {
802 f2fs_lock_op(F2FS_I_SB(inode));
803 err = dquot_transfer(inode, attr);
804 if (err) {
805 set_sbi_flag(F2FS_I_SB(inode),
806 SBI_QUOTA_NEED_REPAIR);
807 f2fs_unlock_op(F2FS_I_SB(inode));
808 return err;
809 }
810
811
812
813
814 if (attr->ia_valid & ATTR_UID)
815 inode->i_uid = attr->ia_uid;
816 if (attr->ia_valid & ATTR_GID)
817 inode->i_gid = attr->ia_gid;
818 f2fs_mark_inode_dirty_sync(inode, true);
819 f2fs_unlock_op(F2FS_I_SB(inode));
820 }
821
822 if (attr->ia_valid & ATTR_SIZE) {
823 loff_t old_size = i_size_read(inode);
824
825 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
826
827
828
829
830 err = f2fs_convert_inline_inode(inode);
831 if (err)
832 return err;
833 }
834
835 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
836 down_write(&F2FS_I(inode)->i_mmap_sem);
837
838 truncate_setsize(inode, attr->ia_size);
839
840 if (attr->ia_size <= old_size)
841 err = f2fs_truncate(inode);
842
843
844
845
846 up_write(&F2FS_I(inode)->i_mmap_sem);
847 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
848 if (err)
849 return err;
850
851 down_write(&F2FS_I(inode)->i_sem);
852 inode->i_mtime = inode->i_ctime = current_time(inode);
853 F2FS_I(inode)->last_disk_size = i_size_read(inode);
854 up_write(&F2FS_I(inode)->i_sem);
855 }
856
857 __setattr_copy(inode, attr);
858
859 if (attr->ia_valid & ATTR_MODE) {
860 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
861 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
862 inode->i_mode = F2FS_I(inode)->i_acl_mode;
863 clear_inode_flag(inode, FI_ACL_MODE);
864 }
865 }
866
867
868 f2fs_mark_inode_dirty_sync(inode, true);
869
870
871 f2fs_balance_fs(F2FS_I_SB(inode), true);
872
873 return err;
874 }
875
876 const struct inode_operations f2fs_file_inode_operations = {
877 .getattr = f2fs_getattr,
878 .setattr = f2fs_setattr,
879 .get_acl = f2fs_get_acl,
880 .set_acl = f2fs_set_acl,
881 #ifdef CONFIG_F2FS_FS_XATTR
882 .listxattr = f2fs_listxattr,
883 #endif
884 .fiemap = f2fs_fiemap,
885 };
886
887 static int fill_zero(struct inode *inode, pgoff_t index,
888 loff_t start, loff_t len)
889 {
890 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
891 struct page *page;
892
893 if (!len)
894 return 0;
895
896 f2fs_balance_fs(sbi, true);
897
898 f2fs_lock_op(sbi);
899 page = f2fs_get_new_data_page(inode, NULL, index, false);
900 f2fs_unlock_op(sbi);
901
902 if (IS_ERR(page))
903 return PTR_ERR(page);
904
905 f2fs_wait_on_page_writeback(page, DATA, true, true);
906 zero_user(page, start, len);
907 set_page_dirty(page);
908 f2fs_put_page(page, 1);
909 return 0;
910 }
911
912 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
913 {
914 int err;
915
916 while (pg_start < pg_end) {
917 struct dnode_of_data dn;
918 pgoff_t end_offset, count;
919
920 set_new_dnode(&dn, inode, NULL, NULL, 0);
921 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
922 if (err) {
923 if (err == -ENOENT) {
924 pg_start = f2fs_get_next_page_offset(&dn,
925 pg_start);
926 continue;
927 }
928 return err;
929 }
930
931 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
932 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
933
934 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
935
936 f2fs_truncate_data_blocks_range(&dn, count);
937 f2fs_put_dnode(&dn);
938
939 pg_start += count;
940 }
941 return 0;
942 }
943
944 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
945 {
946 pgoff_t pg_start, pg_end;
947 loff_t off_start, off_end;
948 int ret;
949
950 ret = f2fs_convert_inline_inode(inode);
951 if (ret)
952 return ret;
953
954 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
955 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
956
957 off_start = offset & (PAGE_SIZE - 1);
958 off_end = (offset + len) & (PAGE_SIZE - 1);
959
960 if (pg_start == pg_end) {
961 ret = fill_zero(inode, pg_start, off_start,
962 off_end - off_start);
963 if (ret)
964 return ret;
965 } else {
966 if (off_start) {
967 ret = fill_zero(inode, pg_start++, off_start,
968 PAGE_SIZE - off_start);
969 if (ret)
970 return ret;
971 }
972 if (off_end) {
973 ret = fill_zero(inode, pg_end, 0, off_end);
974 if (ret)
975 return ret;
976 }
977
978 if (pg_start < pg_end) {
979 struct address_space *mapping = inode->i_mapping;
980 loff_t blk_start, blk_end;
981 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
982
983 f2fs_balance_fs(sbi, true);
984
985 blk_start = (loff_t)pg_start << PAGE_SHIFT;
986 blk_end = (loff_t)pg_end << PAGE_SHIFT;
987
988 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
989 down_write(&F2FS_I(inode)->i_mmap_sem);
990
991 truncate_inode_pages_range(mapping, blk_start,
992 blk_end - 1);
993
994 f2fs_lock_op(sbi);
995 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
996 f2fs_unlock_op(sbi);
997
998 up_write(&F2FS_I(inode)->i_mmap_sem);
999 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1000 }
1001 }
1002
1003 return ret;
1004 }
1005
1006 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1007 int *do_replace, pgoff_t off, pgoff_t len)
1008 {
1009 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1010 struct dnode_of_data dn;
1011 int ret, done, i;
1012
1013 next_dnode:
1014 set_new_dnode(&dn, inode, NULL, NULL, 0);
1015 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1016 if (ret && ret != -ENOENT) {
1017 return ret;
1018 } else if (ret == -ENOENT) {
1019 if (dn.max_level == 0)
1020 return -ENOENT;
1021 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
1022 len);
1023 blkaddr += done;
1024 do_replace += done;
1025 goto next;
1026 }
1027
1028 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1029 dn.ofs_in_node, len);
1030 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1031 *blkaddr = datablock_addr(dn.inode,
1032 dn.node_page, dn.ofs_in_node);
1033
1034 if (__is_valid_data_blkaddr(*blkaddr) &&
1035 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1036 DATA_GENERIC_ENHANCE)) {
1037 f2fs_put_dnode(&dn);
1038 return -EFSCORRUPTED;
1039 }
1040
1041 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1042
1043 if (test_opt(sbi, LFS)) {
1044 f2fs_put_dnode(&dn);
1045 return -EOPNOTSUPP;
1046 }
1047
1048
1049 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1050 *do_replace = 1;
1051 }
1052 }
1053 f2fs_put_dnode(&dn);
1054 next:
1055 len -= done;
1056 off += done;
1057 if (len)
1058 goto next_dnode;
1059 return 0;
1060 }
1061
1062 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1063 int *do_replace, pgoff_t off, int len)
1064 {
1065 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1066 struct dnode_of_data dn;
1067 int ret, i;
1068
1069 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1070 if (*do_replace == 0)
1071 continue;
1072
1073 set_new_dnode(&dn, inode, NULL, NULL, 0);
1074 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1075 if (ret) {
1076 dec_valid_block_count(sbi, inode, 1);
1077 f2fs_invalidate_blocks(sbi, *blkaddr);
1078 } else {
1079 f2fs_update_data_blkaddr(&dn, *blkaddr);
1080 }
1081 f2fs_put_dnode(&dn);
1082 }
1083 return 0;
1084 }
1085
1086 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1087 block_t *blkaddr, int *do_replace,
1088 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1089 {
1090 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1091 pgoff_t i = 0;
1092 int ret;
1093
1094 while (i < len) {
1095 if (blkaddr[i] == NULL_ADDR && !full) {
1096 i++;
1097 continue;
1098 }
1099
1100 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1101 struct dnode_of_data dn;
1102 struct node_info ni;
1103 size_t new_size;
1104 pgoff_t ilen;
1105
1106 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1107 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1108 if (ret)
1109 return ret;
1110
1111 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1112 if (ret) {
1113 f2fs_put_dnode(&dn);
1114 return ret;
1115 }
1116
1117 ilen = min((pgoff_t)
1118 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1119 dn.ofs_in_node, len - i);
1120 do {
1121 dn.data_blkaddr = datablock_addr(dn.inode,
1122 dn.node_page, dn.ofs_in_node);
1123 f2fs_truncate_data_blocks_range(&dn, 1);
1124
1125 if (do_replace[i]) {
1126 f2fs_i_blocks_write(src_inode,
1127 1, false, false);
1128 f2fs_i_blocks_write(dst_inode,
1129 1, true, false);
1130 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1131 blkaddr[i], ni.version, true, false);
1132
1133 do_replace[i] = 0;
1134 }
1135 dn.ofs_in_node++;
1136 i++;
1137 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1138 if (dst_inode->i_size < new_size)
1139 f2fs_i_size_write(dst_inode, new_size);
1140 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1141
1142 f2fs_put_dnode(&dn);
1143 } else {
1144 struct page *psrc, *pdst;
1145
1146 psrc = f2fs_get_lock_data_page(src_inode,
1147 src + i, true);
1148 if (IS_ERR(psrc))
1149 return PTR_ERR(psrc);
1150 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1151 true);
1152 if (IS_ERR(pdst)) {
1153 f2fs_put_page(psrc, 1);
1154 return PTR_ERR(pdst);
1155 }
1156 f2fs_copy_page(psrc, pdst);
1157 set_page_dirty(pdst);
1158 f2fs_put_page(pdst, 1);
1159 f2fs_put_page(psrc, 1);
1160
1161 ret = f2fs_truncate_hole(src_inode,
1162 src + i, src + i + 1);
1163 if (ret)
1164 return ret;
1165 i++;
1166 }
1167 }
1168 return 0;
1169 }
1170
1171 static int __exchange_data_block(struct inode *src_inode,
1172 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1173 pgoff_t len, bool full)
1174 {
1175 block_t *src_blkaddr;
1176 int *do_replace;
1177 pgoff_t olen;
1178 int ret;
1179
1180 while (len) {
1181 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1182
1183 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1184 array_size(olen, sizeof(block_t)),
1185 GFP_KERNEL);
1186 if (!src_blkaddr)
1187 return -ENOMEM;
1188
1189 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1190 array_size(olen, sizeof(int)),
1191 GFP_KERNEL);
1192 if (!do_replace) {
1193 kvfree(src_blkaddr);
1194 return -ENOMEM;
1195 }
1196
1197 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1198 do_replace, src, olen);
1199 if (ret)
1200 goto roll_back;
1201
1202 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1203 do_replace, src, dst, olen, full);
1204 if (ret)
1205 goto roll_back;
1206
1207 src += olen;
1208 dst += olen;
1209 len -= olen;
1210
1211 kvfree(src_blkaddr);
1212 kvfree(do_replace);
1213 }
1214 return 0;
1215
1216 roll_back:
1217 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1218 kvfree(src_blkaddr);
1219 kvfree(do_replace);
1220 return ret;
1221 }
1222
1223 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1224 {
1225 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1226 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1227 pgoff_t start = offset >> PAGE_SHIFT;
1228 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1229 int ret;
1230
1231 f2fs_balance_fs(sbi, true);
1232
1233
1234 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1235 down_write(&F2FS_I(inode)->i_mmap_sem);
1236
1237 f2fs_lock_op(sbi);
1238 f2fs_drop_extent_tree(inode);
1239 truncate_pagecache(inode, offset);
1240 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1241 f2fs_unlock_op(sbi);
1242
1243 up_write(&F2FS_I(inode)->i_mmap_sem);
1244 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1245 return ret;
1246 }
1247
1248 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1249 {
1250 loff_t new_size;
1251 int ret;
1252
1253 if (offset + len >= i_size_read(inode))
1254 return -EINVAL;
1255
1256
1257 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1258 return -EINVAL;
1259
1260 ret = f2fs_convert_inline_inode(inode);
1261 if (ret)
1262 return ret;
1263
1264
1265 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1266 if (ret)
1267 return ret;
1268
1269 ret = f2fs_do_collapse(inode, offset, len);
1270 if (ret)
1271 return ret;
1272
1273
1274 down_write(&F2FS_I(inode)->i_mmap_sem);
1275 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1276 truncate_pagecache(inode, offset);
1277
1278 new_size = i_size_read(inode) - len;
1279 truncate_pagecache(inode, new_size);
1280
1281 ret = f2fs_truncate_blocks(inode, new_size, true);
1282 up_write(&F2FS_I(inode)->i_mmap_sem);
1283 if (!ret)
1284 f2fs_i_size_write(inode, new_size);
1285 return ret;
1286 }
1287
1288 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1289 pgoff_t end)
1290 {
1291 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1292 pgoff_t index = start;
1293 unsigned int ofs_in_node = dn->ofs_in_node;
1294 blkcnt_t count = 0;
1295 int ret;
1296
1297 for (; index < end; index++, dn->ofs_in_node++) {
1298 if (datablock_addr(dn->inode, dn->node_page,
1299 dn->ofs_in_node) == NULL_ADDR)
1300 count++;
1301 }
1302
1303 dn->ofs_in_node = ofs_in_node;
1304 ret = f2fs_reserve_new_blocks(dn, count);
1305 if (ret)
1306 return ret;
1307
1308 dn->ofs_in_node = ofs_in_node;
1309 for (index = start; index < end; index++, dn->ofs_in_node++) {
1310 dn->data_blkaddr = datablock_addr(dn->inode,
1311 dn->node_page, dn->ofs_in_node);
1312
1313
1314
1315
1316 if (dn->data_blkaddr == NULL_ADDR) {
1317 ret = -ENOSPC;
1318 break;
1319 }
1320 if (dn->data_blkaddr != NEW_ADDR) {
1321 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1322 dn->data_blkaddr = NEW_ADDR;
1323 f2fs_set_data_blkaddr(dn);
1324 }
1325 }
1326
1327 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1328
1329 return ret;
1330 }
1331
1332 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1333 int mode)
1334 {
1335 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1336 struct address_space *mapping = inode->i_mapping;
1337 pgoff_t index, pg_start, pg_end;
1338 loff_t new_size = i_size_read(inode);
1339 loff_t off_start, off_end;
1340 int ret = 0;
1341
1342 ret = inode_newsize_ok(inode, (len + offset));
1343 if (ret)
1344 return ret;
1345
1346 ret = f2fs_convert_inline_inode(inode);
1347 if (ret)
1348 return ret;
1349
1350 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1351 if (ret)
1352 return ret;
1353
1354 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1355 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1356
1357 off_start = offset & (PAGE_SIZE - 1);
1358 off_end = (offset + len) & (PAGE_SIZE - 1);
1359
1360 if (pg_start == pg_end) {
1361 ret = fill_zero(inode, pg_start, off_start,
1362 off_end - off_start);
1363 if (ret)
1364 return ret;
1365
1366 new_size = max_t(loff_t, new_size, offset + len);
1367 } else {
1368 if (off_start) {
1369 ret = fill_zero(inode, pg_start++, off_start,
1370 PAGE_SIZE - off_start);
1371 if (ret)
1372 return ret;
1373
1374 new_size = max_t(loff_t, new_size,
1375 (loff_t)pg_start << PAGE_SHIFT);
1376 }
1377
1378 for (index = pg_start; index < pg_end;) {
1379 struct dnode_of_data dn;
1380 unsigned int end_offset;
1381 pgoff_t end;
1382
1383 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1384 down_write(&F2FS_I(inode)->i_mmap_sem);
1385
1386 truncate_pagecache_range(inode,
1387 (loff_t)index << PAGE_SHIFT,
1388 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1389
1390 f2fs_lock_op(sbi);
1391
1392 set_new_dnode(&dn, inode, NULL, NULL, 0);
1393 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1394 if (ret) {
1395 f2fs_unlock_op(sbi);
1396 up_write(&F2FS_I(inode)->i_mmap_sem);
1397 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1398 goto out;
1399 }
1400
1401 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1402 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1403
1404 ret = f2fs_do_zero_range(&dn, index, end);
1405 f2fs_put_dnode(&dn);
1406
1407 f2fs_unlock_op(sbi);
1408 up_write(&F2FS_I(inode)->i_mmap_sem);
1409 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1410
1411 f2fs_balance_fs(sbi, dn.node_changed);
1412
1413 if (ret)
1414 goto out;
1415
1416 index = end;
1417 new_size = max_t(loff_t, new_size,
1418 (loff_t)index << PAGE_SHIFT);
1419 }
1420
1421 if (off_end) {
1422 ret = fill_zero(inode, pg_end, 0, off_end);
1423 if (ret)
1424 goto out;
1425
1426 new_size = max_t(loff_t, new_size, offset + len);
1427 }
1428 }
1429
1430 out:
1431 if (new_size > i_size_read(inode)) {
1432 if (mode & FALLOC_FL_KEEP_SIZE)
1433 file_set_keep_isize(inode);
1434 else
1435 f2fs_i_size_write(inode, new_size);
1436 }
1437 return ret;
1438 }
1439
1440 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1441 {
1442 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1443 pgoff_t nr, pg_start, pg_end, delta, idx;
1444 loff_t new_size;
1445 int ret = 0;
1446
1447 new_size = i_size_read(inode) + len;
1448 ret = inode_newsize_ok(inode, new_size);
1449 if (ret)
1450 return ret;
1451
1452 if (offset >= i_size_read(inode))
1453 return -EINVAL;
1454
1455
1456 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1457 return -EINVAL;
1458
1459 ret = f2fs_convert_inline_inode(inode);
1460 if (ret)
1461 return ret;
1462
1463 f2fs_balance_fs(sbi, true);
1464
1465 down_write(&F2FS_I(inode)->i_mmap_sem);
1466 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1467 up_write(&F2FS_I(inode)->i_mmap_sem);
1468 if (ret)
1469 return ret;
1470
1471
1472 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1473 if (ret)
1474 return ret;
1475
1476 pg_start = offset >> PAGE_SHIFT;
1477 pg_end = (offset + len) >> PAGE_SHIFT;
1478 delta = pg_end - pg_start;
1479 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1480
1481
1482 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1483 down_write(&F2FS_I(inode)->i_mmap_sem);
1484 truncate_pagecache(inode, offset);
1485
1486 while (!ret && idx > pg_start) {
1487 nr = idx - pg_start;
1488 if (nr > delta)
1489 nr = delta;
1490 idx -= nr;
1491
1492 f2fs_lock_op(sbi);
1493 f2fs_drop_extent_tree(inode);
1494
1495 ret = __exchange_data_block(inode, inode, idx,
1496 idx + delta, nr, false);
1497 f2fs_unlock_op(sbi);
1498 }
1499 up_write(&F2FS_I(inode)->i_mmap_sem);
1500 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1501
1502
1503 down_write(&F2FS_I(inode)->i_mmap_sem);
1504 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1505 truncate_pagecache(inode, offset);
1506 up_write(&F2FS_I(inode)->i_mmap_sem);
1507
1508 if (!ret)
1509 f2fs_i_size_write(inode, new_size);
1510 return ret;
1511 }
1512
1513 static int expand_inode_data(struct inode *inode, loff_t offset,
1514 loff_t len, int mode)
1515 {
1516 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1517 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1518 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1519 .m_may_create = true };
1520 pgoff_t pg_end;
1521 loff_t new_size = i_size_read(inode);
1522 loff_t off_end;
1523 int err;
1524
1525 err = inode_newsize_ok(inode, (len + offset));
1526 if (err)
1527 return err;
1528
1529 err = f2fs_convert_inline_inode(inode);
1530 if (err)
1531 return err;
1532
1533 f2fs_balance_fs(sbi, true);
1534
1535 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1536 off_end = (offset + len) & (PAGE_SIZE - 1);
1537
1538 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1539 map.m_len = pg_end - map.m_lblk;
1540 if (off_end)
1541 map.m_len++;
1542
1543 if (f2fs_is_pinned_file(inode))
1544 map.m_seg_type = CURSEG_COLD_DATA;
1545
1546 err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
1547 F2FS_GET_BLOCK_PRE_DIO :
1548 F2FS_GET_BLOCK_PRE_AIO));
1549 if (err) {
1550 pgoff_t last_off;
1551
1552 if (!map.m_len)
1553 return err;
1554
1555 last_off = map.m_lblk + map.m_len - 1;
1556
1557
1558 new_size = (last_off == pg_end) ? offset + len :
1559 (loff_t)(last_off + 1) << PAGE_SHIFT;
1560 } else {
1561 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1562 }
1563
1564 if (new_size > i_size_read(inode)) {
1565 if (mode & FALLOC_FL_KEEP_SIZE)
1566 file_set_keep_isize(inode);
1567 else
1568 f2fs_i_size_write(inode, new_size);
1569 }
1570
1571 return err;
1572 }
1573
1574 static long f2fs_fallocate(struct file *file, int mode,
1575 loff_t offset, loff_t len)
1576 {
1577 struct inode *inode = file_inode(file);
1578 long ret = 0;
1579
1580 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1581 return -EIO;
1582 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1583 return -ENOSPC;
1584
1585
1586 if (!S_ISREG(inode->i_mode))
1587 return -EINVAL;
1588
1589 if (IS_ENCRYPTED(inode) &&
1590 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1591 return -EOPNOTSUPP;
1592
1593 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1594 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1595 FALLOC_FL_INSERT_RANGE))
1596 return -EOPNOTSUPP;
1597
1598 inode_lock(inode);
1599
1600 if (mode & FALLOC_FL_PUNCH_HOLE) {
1601 if (offset >= inode->i_size)
1602 goto out;
1603
1604 ret = punch_hole(inode, offset, len);
1605 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1606 ret = f2fs_collapse_range(inode, offset, len);
1607 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1608 ret = f2fs_zero_range(inode, offset, len, mode);
1609 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1610 ret = f2fs_insert_range(inode, offset, len);
1611 } else {
1612 ret = expand_inode_data(inode, offset, len, mode);
1613 }
1614
1615 if (!ret) {
1616 inode->i_mtime = inode->i_ctime = current_time(inode);
1617 f2fs_mark_inode_dirty_sync(inode, false);
1618 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1619 }
1620
1621 out:
1622 inode_unlock(inode);
1623
1624 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1625 return ret;
1626 }
1627
1628 static int f2fs_release_file(struct inode *inode, struct file *filp)
1629 {
1630
1631
1632
1633
1634 if (!(filp->f_mode & FMODE_WRITE) ||
1635 atomic_read(&inode->i_writecount) != 1)
1636 return 0;
1637
1638
1639 if (f2fs_is_atomic_file(inode))
1640 f2fs_drop_inmem_pages(inode);
1641 if (f2fs_is_volatile_file(inode)) {
1642 set_inode_flag(inode, FI_DROP_CACHE);
1643 filemap_fdatawrite(inode->i_mapping);
1644 clear_inode_flag(inode, FI_DROP_CACHE);
1645 clear_inode_flag(inode, FI_VOLATILE_FILE);
1646 stat_dec_volatile_write(inode);
1647 }
1648 return 0;
1649 }
1650
1651 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1652 {
1653 struct inode *inode = file_inode(file);
1654
1655
1656
1657
1658
1659
1660
1661 if (f2fs_is_atomic_file(inode) &&
1662 F2FS_I(inode)->inmem_task == current)
1663 f2fs_drop_inmem_pages(inode);
1664 return 0;
1665 }
1666
1667 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1668 {
1669 struct f2fs_inode_info *fi = F2FS_I(inode);
1670
1671
1672 if (IS_NOQUOTA(inode))
1673 return -EPERM;
1674
1675 if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) {
1676 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1677 return -EOPNOTSUPP;
1678 if (!f2fs_empty_dir(inode))
1679 return -ENOTEMPTY;
1680 }
1681
1682 fi->i_flags = iflags | (fi->i_flags & ~mask);
1683
1684 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1685 set_inode_flag(inode, FI_PROJ_INHERIT);
1686 else
1687 clear_inode_flag(inode, FI_PROJ_INHERIT);
1688
1689 inode->i_ctime = current_time(inode);
1690 f2fs_set_inode_flags(inode);
1691 f2fs_mark_inode_dirty_sync(inode, true);
1692 return 0;
1693 }
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 static const struct {
1705 u32 iflag;
1706 u32 fsflag;
1707 } f2fs_fsflags_map[] = {
1708 { F2FS_SYNC_FL, FS_SYNC_FL },
1709 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1710 { F2FS_APPEND_FL, FS_APPEND_FL },
1711 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1712 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1713 { F2FS_INDEX_FL, FS_INDEX_FL },
1714 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1715 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1716 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1717 };
1718
1719 #define F2FS_GETTABLE_FS_FL ( \
1720 FS_SYNC_FL | \
1721 FS_IMMUTABLE_FL | \
1722 FS_APPEND_FL | \
1723 FS_NODUMP_FL | \
1724 FS_NOATIME_FL | \
1725 FS_INDEX_FL | \
1726 FS_DIRSYNC_FL | \
1727 FS_PROJINHERIT_FL | \
1728 FS_ENCRYPT_FL | \
1729 FS_INLINE_DATA_FL | \
1730 FS_NOCOW_FL | \
1731 FS_VERITY_FL | \
1732 FS_CASEFOLD_FL)
1733
1734 #define F2FS_SETTABLE_FS_FL ( \
1735 FS_SYNC_FL | \
1736 FS_IMMUTABLE_FL | \
1737 FS_APPEND_FL | \
1738 FS_NODUMP_FL | \
1739 FS_NOATIME_FL | \
1740 FS_DIRSYNC_FL | \
1741 FS_PROJINHERIT_FL | \
1742 FS_CASEFOLD_FL)
1743
1744
1745 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1746 {
1747 u32 fsflags = 0;
1748 int i;
1749
1750 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1751 if (iflags & f2fs_fsflags_map[i].iflag)
1752 fsflags |= f2fs_fsflags_map[i].fsflag;
1753
1754 return fsflags;
1755 }
1756
1757
1758 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1759 {
1760 u32 iflags = 0;
1761 int i;
1762
1763 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1764 if (fsflags & f2fs_fsflags_map[i].fsflag)
1765 iflags |= f2fs_fsflags_map[i].iflag;
1766
1767 return iflags;
1768 }
1769
1770 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1771 {
1772 struct inode *inode = file_inode(filp);
1773 struct f2fs_inode_info *fi = F2FS_I(inode);
1774 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1775
1776 if (IS_ENCRYPTED(inode))
1777 fsflags |= FS_ENCRYPT_FL;
1778 if (IS_VERITY(inode))
1779 fsflags |= FS_VERITY_FL;
1780 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1781 fsflags |= FS_INLINE_DATA_FL;
1782 if (is_inode_flag_set(inode, FI_PIN_FILE))
1783 fsflags |= FS_NOCOW_FL;
1784
1785 fsflags &= F2FS_GETTABLE_FS_FL;
1786
1787 return put_user(fsflags, (int __user *)arg);
1788 }
1789
1790 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1791 {
1792 struct inode *inode = file_inode(filp);
1793 struct f2fs_inode_info *fi = F2FS_I(inode);
1794 u32 fsflags, old_fsflags;
1795 u32 iflags;
1796 int ret;
1797
1798 if (!inode_owner_or_capable(inode))
1799 return -EACCES;
1800
1801 if (get_user(fsflags, (int __user *)arg))
1802 return -EFAULT;
1803
1804 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1805 return -EOPNOTSUPP;
1806 fsflags &= F2FS_SETTABLE_FS_FL;
1807
1808 iflags = f2fs_fsflags_to_iflags(fsflags);
1809 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1810 return -EOPNOTSUPP;
1811
1812 ret = mnt_want_write_file(filp);
1813 if (ret)
1814 return ret;
1815
1816 inode_lock(inode);
1817
1818 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1819 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1820 if (ret)
1821 goto out;
1822
1823 ret = f2fs_setflags_common(inode, iflags,
1824 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1825 out:
1826 inode_unlock(inode);
1827 mnt_drop_write_file(filp);
1828 return ret;
1829 }
1830
1831 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1832 {
1833 struct inode *inode = file_inode(filp);
1834
1835 return put_user(inode->i_generation, (int __user *)arg);
1836 }
1837
1838 static int f2fs_ioc_start_atomic_write(struct file *filp)
1839 {
1840 struct inode *inode = file_inode(filp);
1841 struct f2fs_inode_info *fi = F2FS_I(inode);
1842 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1843 int ret;
1844
1845 if (!inode_owner_or_capable(inode))
1846 return -EACCES;
1847
1848 if (!S_ISREG(inode->i_mode))
1849 return -EINVAL;
1850
1851 if (filp->f_flags & O_DIRECT)
1852 return -EINVAL;
1853
1854 ret = mnt_want_write_file(filp);
1855 if (ret)
1856 return ret;
1857
1858 inode_lock(inode);
1859
1860 if (f2fs_is_atomic_file(inode)) {
1861 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1862 ret = -EINVAL;
1863 goto out;
1864 }
1865
1866 ret = f2fs_convert_inline_inode(inode);
1867 if (ret)
1868 goto out;
1869
1870 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1871
1872
1873
1874
1875
1876 if (get_dirty_pages(inode))
1877 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1878 inode->i_ino, get_dirty_pages(inode));
1879 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1880 if (ret) {
1881 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1882 goto out;
1883 }
1884
1885 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
1886 if (list_empty(&fi->inmem_ilist))
1887 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
1888 sbi->atomic_files++;
1889 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
1890
1891
1892 set_inode_flag(inode, FI_ATOMIC_FILE);
1893 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1894 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1895
1896 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1897 F2FS_I(inode)->inmem_task = current;
1898 stat_inc_atomic_write(inode);
1899 stat_update_max_atomic_write(inode);
1900 out:
1901 inode_unlock(inode);
1902 mnt_drop_write_file(filp);
1903 return ret;
1904 }
1905
1906 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1907 {
1908 struct inode *inode = file_inode(filp);
1909 int ret;
1910
1911 if (!inode_owner_or_capable(inode))
1912 return -EACCES;
1913
1914 ret = mnt_want_write_file(filp);
1915 if (ret)
1916 return ret;
1917
1918 f2fs_balance_fs(F2FS_I_SB(inode), true);
1919
1920 inode_lock(inode);
1921
1922 if (f2fs_is_volatile_file(inode)) {
1923 ret = -EINVAL;
1924 goto err_out;
1925 }
1926
1927 if (f2fs_is_atomic_file(inode)) {
1928 ret = f2fs_commit_inmem_pages(inode);
1929 if (ret)
1930 goto err_out;
1931
1932 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1933 if (!ret)
1934 f2fs_drop_inmem_pages(inode);
1935 } else {
1936 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1937 }
1938 err_out:
1939 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
1940 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1941 ret = -EINVAL;
1942 }
1943 inode_unlock(inode);
1944 mnt_drop_write_file(filp);
1945 return ret;
1946 }
1947
1948 static int f2fs_ioc_start_volatile_write(struct file *filp)
1949 {
1950 struct inode *inode = file_inode(filp);
1951 int ret;
1952
1953 if (!inode_owner_or_capable(inode))
1954 return -EACCES;
1955
1956 if (!S_ISREG(inode->i_mode))
1957 return -EINVAL;
1958
1959 ret = mnt_want_write_file(filp);
1960 if (ret)
1961 return ret;
1962
1963 inode_lock(inode);
1964
1965 if (f2fs_is_volatile_file(inode))
1966 goto out;
1967
1968 ret = f2fs_convert_inline_inode(inode);
1969 if (ret)
1970 goto out;
1971
1972 stat_inc_volatile_write(inode);
1973 stat_update_max_volatile_write(inode);
1974
1975 set_inode_flag(inode, FI_VOLATILE_FILE);
1976 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1977 out:
1978 inode_unlock(inode);
1979 mnt_drop_write_file(filp);
1980 return ret;
1981 }
1982
1983 static int f2fs_ioc_release_volatile_write(struct file *filp)
1984 {
1985 struct inode *inode = file_inode(filp);
1986 int ret;
1987
1988 if (!inode_owner_or_capable(inode))
1989 return -EACCES;
1990
1991 ret = mnt_want_write_file(filp);
1992 if (ret)
1993 return ret;
1994
1995 inode_lock(inode);
1996
1997 if (!f2fs_is_volatile_file(inode))
1998 goto out;
1999
2000 if (!f2fs_is_first_block_written(inode)) {
2001 ret = truncate_partial_data_page(inode, 0, true);
2002 goto out;
2003 }
2004
2005 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2006 out:
2007 inode_unlock(inode);
2008 mnt_drop_write_file(filp);
2009 return ret;
2010 }
2011
2012 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2013 {
2014 struct inode *inode = file_inode(filp);
2015 int ret;
2016
2017 if (!inode_owner_or_capable(inode))
2018 return -EACCES;
2019
2020 ret = mnt_want_write_file(filp);
2021 if (ret)
2022 return ret;
2023
2024 inode_lock(inode);
2025
2026 if (f2fs_is_atomic_file(inode))
2027 f2fs_drop_inmem_pages(inode);
2028 if (f2fs_is_volatile_file(inode)) {
2029 clear_inode_flag(inode, FI_VOLATILE_FILE);
2030 stat_dec_volatile_write(inode);
2031 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2032 }
2033
2034 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2035
2036 inode_unlock(inode);
2037
2038 mnt_drop_write_file(filp);
2039 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2040 return ret;
2041 }
2042
2043 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2044 {
2045 struct inode *inode = file_inode(filp);
2046 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2047 struct super_block *sb = sbi->sb;
2048 __u32 in;
2049 int ret = 0;
2050
2051 if (!capable(CAP_SYS_ADMIN))
2052 return -EPERM;
2053
2054 if (get_user(in, (__u32 __user *)arg))
2055 return -EFAULT;
2056
2057 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2058 ret = mnt_want_write_file(filp);
2059 if (ret)
2060 return ret;
2061 }
2062
2063 switch (in) {
2064 case F2FS_GOING_DOWN_FULLSYNC:
2065 sb = freeze_bdev(sb->s_bdev);
2066 if (IS_ERR(sb)) {
2067 ret = PTR_ERR(sb);
2068 goto out;
2069 }
2070 if (sb) {
2071 f2fs_stop_checkpoint(sbi, false);
2072 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2073 thaw_bdev(sb->s_bdev, sb);
2074 }
2075 break;
2076 case F2FS_GOING_DOWN_METASYNC:
2077
2078 ret = f2fs_sync_fs(sb, 1);
2079 if (ret)
2080 goto out;
2081 f2fs_stop_checkpoint(sbi, false);
2082 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2083 break;
2084 case F2FS_GOING_DOWN_NOSYNC:
2085 f2fs_stop_checkpoint(sbi, false);
2086 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2087 break;
2088 case F2FS_GOING_DOWN_METAFLUSH:
2089 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2090 f2fs_stop_checkpoint(sbi, false);
2091 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2092 break;
2093 case F2FS_GOING_DOWN_NEED_FSCK:
2094 set_sbi_flag(sbi, SBI_NEED_FSCK);
2095 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2096 set_sbi_flag(sbi, SBI_IS_DIRTY);
2097
2098 ret = f2fs_sync_fs(sb, 1);
2099 goto out;
2100 default:
2101 ret = -EINVAL;
2102 goto out;
2103 }
2104
2105 f2fs_stop_gc_thread(sbi);
2106 f2fs_stop_discard_thread(sbi);
2107
2108 f2fs_drop_discard_cmd(sbi);
2109 clear_opt(sbi, DISCARD);
2110
2111 f2fs_update_time(sbi, REQ_TIME);
2112 out:
2113 if (in != F2FS_GOING_DOWN_FULLSYNC)
2114 mnt_drop_write_file(filp);
2115
2116 trace_f2fs_shutdown(sbi, in, ret);
2117
2118 return ret;
2119 }
2120
2121 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2122 {
2123 struct inode *inode = file_inode(filp);
2124 struct super_block *sb = inode->i_sb;
2125 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2126 struct fstrim_range range;
2127 int ret;
2128
2129 if (!capable(CAP_SYS_ADMIN))
2130 return -EPERM;
2131
2132 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2133 return -EOPNOTSUPP;
2134
2135 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2136 sizeof(range)))
2137 return -EFAULT;
2138
2139 ret = mnt_want_write_file(filp);
2140 if (ret)
2141 return ret;
2142
2143 range.minlen = max((unsigned int)range.minlen,
2144 q->limits.discard_granularity);
2145 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2146 mnt_drop_write_file(filp);
2147 if (ret < 0)
2148 return ret;
2149
2150 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2151 sizeof(range)))
2152 return -EFAULT;
2153 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2154 return 0;
2155 }
2156
2157 static bool uuid_is_nonzero(__u8 u[16])
2158 {
2159 int i;
2160
2161 for (i = 0; i < 16; i++)
2162 if (u[i])
2163 return true;
2164 return false;
2165 }
2166
2167 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2168 {
2169 struct inode *inode = file_inode(filp);
2170
2171 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2172 return -EOPNOTSUPP;
2173
2174 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2175
2176 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2177 }
2178
2179 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2180 {
2181 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2182 return -EOPNOTSUPP;
2183 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2184 }
2185
2186 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2187 {
2188 struct inode *inode = file_inode(filp);
2189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2190 int err;
2191
2192 if (!f2fs_sb_has_encrypt(sbi))
2193 return -EOPNOTSUPP;
2194
2195 err = mnt_want_write_file(filp);
2196 if (err)
2197 return err;
2198
2199 down_write(&sbi->sb_lock);
2200
2201 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2202 goto got_it;
2203
2204
2205 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2206
2207 err = f2fs_commit_super(sbi, false);
2208 if (err) {
2209
2210 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2211 goto out_err;
2212 }
2213 got_it:
2214 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2215 16))
2216 err = -EFAULT;
2217 out_err:
2218 up_write(&sbi->sb_lock);
2219 mnt_drop_write_file(filp);
2220 return err;
2221 }
2222
2223 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2224 unsigned long arg)
2225 {
2226 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2227 return -EOPNOTSUPP;
2228
2229 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2230 }
2231
2232 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2233 {
2234 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2235 return -EOPNOTSUPP;
2236
2237 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2238 }
2239
2240 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2241 {
2242 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2243 return -EOPNOTSUPP;
2244
2245 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2246 }
2247
2248 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2249 unsigned long arg)
2250 {
2251 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2252 return -EOPNOTSUPP;
2253
2254 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2255 }
2256
2257 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2258 unsigned long arg)
2259 {
2260 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2261 return -EOPNOTSUPP;
2262
2263 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2264 }
2265
2266 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2267 {
2268 struct inode *inode = file_inode(filp);
2269 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2270 __u32 sync;
2271 int ret;
2272
2273 if (!capable(CAP_SYS_ADMIN))
2274 return -EPERM;
2275
2276 if (get_user(sync, (__u32 __user *)arg))
2277 return -EFAULT;
2278
2279 if (f2fs_readonly(sbi->sb))
2280 return -EROFS;
2281
2282 ret = mnt_want_write_file(filp);
2283 if (ret)
2284 return ret;
2285
2286 if (!sync) {
2287 if (!mutex_trylock(&sbi->gc_mutex)) {
2288 ret = -EBUSY;
2289 goto out;
2290 }
2291 } else {
2292 mutex_lock(&sbi->gc_mutex);
2293 }
2294
2295 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2296 out:
2297 mnt_drop_write_file(filp);
2298 return ret;
2299 }
2300
2301 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2302 {
2303 struct inode *inode = file_inode(filp);
2304 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2305 struct f2fs_gc_range range;
2306 u64 end;
2307 int ret;
2308
2309 if (!capable(CAP_SYS_ADMIN))
2310 return -EPERM;
2311
2312 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2313 sizeof(range)))
2314 return -EFAULT;
2315
2316 if (f2fs_readonly(sbi->sb))
2317 return -EROFS;
2318
2319 end = range.start + range.len;
2320 if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2321 end >= MAX_BLKADDR(sbi))
2322 return -EINVAL;
2323
2324 ret = mnt_want_write_file(filp);
2325 if (ret)
2326 return ret;
2327
2328 do_more:
2329 if (!range.sync) {
2330 if (!mutex_trylock(&sbi->gc_mutex)) {
2331 ret = -EBUSY;
2332 goto out;
2333 }
2334 } else {
2335 mutex_lock(&sbi->gc_mutex);
2336 }
2337
2338 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2339 range.start += BLKS_PER_SEC(sbi);
2340 if (range.start <= end)
2341 goto do_more;
2342 out:
2343 mnt_drop_write_file(filp);
2344 return ret;
2345 }
2346
2347 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2348 {
2349 struct inode *inode = file_inode(filp);
2350 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2351 int ret;
2352
2353 if (!capable(CAP_SYS_ADMIN))
2354 return -EPERM;
2355
2356 if (f2fs_readonly(sbi->sb))
2357 return -EROFS;
2358
2359 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2360 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2361 return -EINVAL;
2362 }
2363
2364 ret = mnt_want_write_file(filp);
2365 if (ret)
2366 return ret;
2367
2368 ret = f2fs_sync_fs(sbi->sb, 1);
2369
2370 mnt_drop_write_file(filp);
2371 return ret;
2372 }
2373
2374 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2375 struct file *filp,
2376 struct f2fs_defragment *range)
2377 {
2378 struct inode *inode = file_inode(filp);
2379 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2380 .m_seg_type = NO_CHECK_TYPE ,
2381 .m_may_create = false };
2382 struct extent_info ei = {0, 0, 0};
2383 pgoff_t pg_start, pg_end, next_pgofs;
2384 unsigned int blk_per_seg = sbi->blocks_per_seg;
2385 unsigned int total = 0, sec_num;
2386 block_t blk_end = 0;
2387 bool fragmented = false;
2388 int err;
2389
2390
2391 if (f2fs_should_update_inplace(inode, NULL))
2392 return -EINVAL;
2393
2394 pg_start = range->start >> PAGE_SHIFT;
2395 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2396
2397 f2fs_balance_fs(sbi, true);
2398
2399 inode_lock(inode);
2400
2401
2402 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2403 range->start + range->len - 1);
2404 if (err)
2405 goto out;
2406
2407
2408
2409
2410
2411 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2412 if (ei.fofs + ei.len >= pg_end)
2413 goto out;
2414 }
2415
2416 map.m_lblk = pg_start;
2417 map.m_next_pgofs = &next_pgofs;
2418
2419
2420
2421
2422
2423
2424 while (map.m_lblk < pg_end) {
2425 map.m_len = pg_end - map.m_lblk;
2426 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2427 if (err)
2428 goto out;
2429
2430 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2431 map.m_lblk = next_pgofs;
2432 continue;
2433 }
2434
2435 if (blk_end && blk_end != map.m_pblk)
2436 fragmented = true;
2437
2438
2439 total += map.m_len;
2440
2441 blk_end = map.m_pblk + map.m_len;
2442
2443 map.m_lblk += map.m_len;
2444 }
2445
2446 if (!fragmented) {
2447 total = 0;
2448 goto out;
2449 }
2450
2451 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2452
2453
2454
2455
2456
2457
2458 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2459 err = -EAGAIN;
2460 goto out;
2461 }
2462
2463 map.m_lblk = pg_start;
2464 map.m_len = pg_end - pg_start;
2465 total = 0;
2466
2467 while (map.m_lblk < pg_end) {
2468 pgoff_t idx;
2469 int cnt = 0;
2470
2471 do_map:
2472 map.m_len = pg_end - map.m_lblk;
2473 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2474 if (err)
2475 goto clear_out;
2476
2477 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2478 map.m_lblk = next_pgofs;
2479 goto check;
2480 }
2481
2482 set_inode_flag(inode, FI_DO_DEFRAG);
2483
2484 idx = map.m_lblk;
2485 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2486 struct page *page;
2487
2488 page = f2fs_get_lock_data_page(inode, idx, true);
2489 if (IS_ERR(page)) {
2490 err = PTR_ERR(page);
2491 goto clear_out;
2492 }
2493
2494 set_page_dirty(page);
2495 f2fs_put_page(page, 1);
2496
2497 idx++;
2498 cnt++;
2499 total++;
2500 }
2501
2502 map.m_lblk = idx;
2503 check:
2504 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2505 goto do_map;
2506
2507 clear_inode_flag(inode, FI_DO_DEFRAG);
2508
2509 err = filemap_fdatawrite(inode->i_mapping);
2510 if (err)
2511 goto out;
2512 }
2513 clear_out:
2514 clear_inode_flag(inode, FI_DO_DEFRAG);
2515 out:
2516 inode_unlock(inode);
2517 if (!err)
2518 range->len = (u64)total << PAGE_SHIFT;
2519 return err;
2520 }
2521
2522 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2523 {
2524 struct inode *inode = file_inode(filp);
2525 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2526 struct f2fs_defragment range;
2527 int err;
2528
2529 if (!capable(CAP_SYS_ADMIN))
2530 return -EPERM;
2531
2532 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2533 return -EINVAL;
2534
2535 if (f2fs_readonly(sbi->sb))
2536 return -EROFS;
2537
2538 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2539 sizeof(range)))
2540 return -EFAULT;
2541
2542
2543 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2544 return -EINVAL;
2545
2546 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2547 sbi->max_file_blocks))
2548 return -EINVAL;
2549
2550 err = mnt_want_write_file(filp);
2551 if (err)
2552 return err;
2553
2554 err = f2fs_defragment_range(sbi, filp, &range);
2555 mnt_drop_write_file(filp);
2556
2557 f2fs_update_time(sbi, REQ_TIME);
2558 if (err < 0)
2559 return err;
2560
2561 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2562 sizeof(range)))
2563 return -EFAULT;
2564
2565 return 0;
2566 }
2567
2568 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2569 struct file *file_out, loff_t pos_out, size_t len)
2570 {
2571 struct inode *src = file_inode(file_in);
2572 struct inode *dst = file_inode(file_out);
2573 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2574 size_t olen = len, dst_max_i_size = 0;
2575 size_t dst_osize;
2576 int ret;
2577
2578 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2579 src->i_sb != dst->i_sb)
2580 return -EXDEV;
2581
2582 if (unlikely(f2fs_readonly(src->i_sb)))
2583 return -EROFS;
2584
2585 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2586 return -EINVAL;
2587
2588 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2589 return -EOPNOTSUPP;
2590
2591 if (src == dst) {
2592 if (pos_in == pos_out)
2593 return 0;
2594 if (pos_out > pos_in && pos_out < pos_in + len)
2595 return -EINVAL;
2596 }
2597
2598 inode_lock(src);
2599 if (src != dst) {
2600 ret = -EBUSY;
2601 if (!inode_trylock(dst))
2602 goto out;
2603 }
2604
2605 ret = -EINVAL;
2606 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2607 goto out_unlock;
2608 if (len == 0)
2609 olen = len = src->i_size - pos_in;
2610 if (pos_in + len == src->i_size)
2611 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2612 if (len == 0) {
2613 ret = 0;
2614 goto out_unlock;
2615 }
2616
2617 dst_osize = dst->i_size;
2618 if (pos_out + olen > dst->i_size)
2619 dst_max_i_size = pos_out + olen;
2620
2621
2622 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2623 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2624 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2625 goto out_unlock;
2626
2627 ret = f2fs_convert_inline_inode(src);
2628 if (ret)
2629 goto out_unlock;
2630
2631 ret = f2fs_convert_inline_inode(dst);
2632 if (ret)
2633 goto out_unlock;
2634
2635
2636 ret = filemap_write_and_wait_range(src->i_mapping,
2637 pos_in, pos_in + len);
2638 if (ret)
2639 goto out_unlock;
2640
2641 ret = filemap_write_and_wait_range(dst->i_mapping,
2642 pos_out, pos_out + len);
2643 if (ret)
2644 goto out_unlock;
2645
2646 f2fs_balance_fs(sbi, true);
2647
2648 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2649 if (src != dst) {
2650 ret = -EBUSY;
2651 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2652 goto out_src;
2653 }
2654
2655 f2fs_lock_op(sbi);
2656 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2657 pos_out >> F2FS_BLKSIZE_BITS,
2658 len >> F2FS_BLKSIZE_BITS, false);
2659
2660 if (!ret) {
2661 if (dst_max_i_size)
2662 f2fs_i_size_write(dst, dst_max_i_size);
2663 else if (dst_osize != dst->i_size)
2664 f2fs_i_size_write(dst, dst_osize);
2665 }
2666 f2fs_unlock_op(sbi);
2667
2668 if (src != dst)
2669 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2670 out_src:
2671 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2672 out_unlock:
2673 if (src != dst)
2674 inode_unlock(dst);
2675 out:
2676 inode_unlock(src);
2677 return ret;
2678 }
2679
2680 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2681 {
2682 struct f2fs_move_range range;
2683 struct fd dst;
2684 int err;
2685
2686 if (!(filp->f_mode & FMODE_READ) ||
2687 !(filp->f_mode & FMODE_WRITE))
2688 return -EBADF;
2689
2690 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2691 sizeof(range)))
2692 return -EFAULT;
2693
2694 dst = fdget(range.dst_fd);
2695 if (!dst.file)
2696 return -EBADF;
2697
2698 if (!(dst.file->f_mode & FMODE_WRITE)) {
2699 err = -EBADF;
2700 goto err_out;
2701 }
2702
2703 err = mnt_want_write_file(filp);
2704 if (err)
2705 goto err_out;
2706
2707 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2708 range.pos_out, range.len);
2709
2710 mnt_drop_write_file(filp);
2711 if (err)
2712 goto err_out;
2713
2714 if (copy_to_user((struct f2fs_move_range __user *)arg,
2715 &range, sizeof(range)))
2716 err = -EFAULT;
2717 err_out:
2718 fdput(dst);
2719 return err;
2720 }
2721
2722 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2723 {
2724 struct inode *inode = file_inode(filp);
2725 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2726 struct sit_info *sm = SIT_I(sbi);
2727 unsigned int start_segno = 0, end_segno = 0;
2728 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2729 struct f2fs_flush_device range;
2730 int ret;
2731
2732 if (!capable(CAP_SYS_ADMIN))
2733 return -EPERM;
2734
2735 if (f2fs_readonly(sbi->sb))
2736 return -EROFS;
2737
2738 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2739 return -EINVAL;
2740
2741 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2742 sizeof(range)))
2743 return -EFAULT;
2744
2745 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2746 __is_large_section(sbi)) {
2747 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2748 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2749 return -EINVAL;
2750 }
2751
2752 ret = mnt_want_write_file(filp);
2753 if (ret)
2754 return ret;
2755
2756 if (range.dev_num != 0)
2757 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2758 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2759
2760 start_segno = sm->last_victim[FLUSH_DEVICE];
2761 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2762 start_segno = dev_start_segno;
2763 end_segno = min(start_segno + range.segments, dev_end_segno);
2764
2765 while (start_segno < end_segno) {
2766 if (!mutex_trylock(&sbi->gc_mutex)) {
2767 ret = -EBUSY;
2768 goto out;
2769 }
2770 sm->last_victim[GC_CB] = end_segno + 1;
2771 sm->last_victim[GC_GREEDY] = end_segno + 1;
2772 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2773 ret = f2fs_gc(sbi, true, true, start_segno);
2774 if (ret == -EAGAIN)
2775 ret = 0;
2776 else if (ret < 0)
2777 break;
2778 start_segno++;
2779 }
2780 out:
2781 mnt_drop_write_file(filp);
2782 return ret;
2783 }
2784
2785 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2786 {
2787 struct inode *inode = file_inode(filp);
2788 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2789
2790
2791 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2792
2793 return put_user(sb_feature, (u32 __user *)arg);
2794 }
2795
2796 #ifdef CONFIG_QUOTA
2797 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2798 {
2799 struct dquot *transfer_to[MAXQUOTAS] = {};
2800 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2801 struct super_block *sb = sbi->sb;
2802 int err = 0;
2803
2804 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2805 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2806 err = __dquot_transfer(inode, transfer_to);
2807 if (err)
2808 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2809 dqput(transfer_to[PRJQUOTA]);
2810 }
2811 return err;
2812 }
2813
2814 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2815 {
2816 struct inode *inode = file_inode(filp);
2817 struct f2fs_inode_info *fi = F2FS_I(inode);
2818 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2819 struct page *ipage;
2820 kprojid_t kprojid;
2821 int err;
2822
2823 if (!f2fs_sb_has_project_quota(sbi)) {
2824 if (projid != F2FS_DEF_PROJID)
2825 return -EOPNOTSUPP;
2826 else
2827 return 0;
2828 }
2829
2830 if (!f2fs_has_extra_attr(inode))
2831 return -EOPNOTSUPP;
2832
2833 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2834
2835 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2836 return 0;
2837
2838 err = -EPERM;
2839
2840 if (IS_NOQUOTA(inode))
2841 return err;
2842
2843 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2844 if (IS_ERR(ipage))
2845 return PTR_ERR(ipage);
2846
2847 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2848 i_projid)) {
2849 err = -EOVERFLOW;
2850 f2fs_put_page(ipage, 1);
2851 return err;
2852 }
2853 f2fs_put_page(ipage, 1);
2854
2855 err = dquot_initialize(inode);
2856 if (err)
2857 return err;
2858
2859 f2fs_lock_op(sbi);
2860 err = f2fs_transfer_project_quota(inode, kprojid);
2861 if (err)
2862 goto out_unlock;
2863
2864 F2FS_I(inode)->i_projid = kprojid;
2865 inode->i_ctime = current_time(inode);
2866 f2fs_mark_inode_dirty_sync(inode, true);
2867 out_unlock:
2868 f2fs_unlock_op(sbi);
2869 return err;
2870 }
2871 #else
2872 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2873 {
2874 return 0;
2875 }
2876
2877 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2878 {
2879 if (projid != F2FS_DEF_PROJID)
2880 return -EOPNOTSUPP;
2881 return 0;
2882 }
2883 #endif
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893 static const struct {
2894 u32 iflag;
2895 u32 xflag;
2896 } f2fs_xflags_map[] = {
2897 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
2898 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
2899 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
2900 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
2901 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
2902 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
2903 };
2904
2905 #define F2FS_SUPPORTED_XFLAGS ( \
2906 FS_XFLAG_SYNC | \
2907 FS_XFLAG_IMMUTABLE | \
2908 FS_XFLAG_APPEND | \
2909 FS_XFLAG_NODUMP | \
2910 FS_XFLAG_NOATIME | \
2911 FS_XFLAG_PROJINHERIT)
2912
2913
2914 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
2915 {
2916 u32 xflags = 0;
2917 int i;
2918
2919 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2920 if (iflags & f2fs_xflags_map[i].iflag)
2921 xflags |= f2fs_xflags_map[i].xflag;
2922
2923 return xflags;
2924 }
2925
2926
2927 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2928 {
2929 u32 iflags = 0;
2930 int i;
2931
2932 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2933 if (xflags & f2fs_xflags_map[i].xflag)
2934 iflags |= f2fs_xflags_map[i].iflag;
2935
2936 return iflags;
2937 }
2938
2939 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
2940 {
2941 struct f2fs_inode_info *fi = F2FS_I(inode);
2942
2943 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
2944
2945 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2946 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
2947 }
2948
2949 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2950 {
2951 struct inode *inode = file_inode(filp);
2952 struct fsxattr fa;
2953
2954 f2fs_fill_fsxattr(inode, &fa);
2955
2956 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2957 return -EFAULT;
2958 return 0;
2959 }
2960
2961 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2962 {
2963 struct inode *inode = file_inode(filp);
2964 struct fsxattr fa, old_fa;
2965 u32 iflags;
2966 int err;
2967
2968 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
2969 return -EFAULT;
2970
2971
2972 if (!inode_owner_or_capable(inode))
2973 return -EACCES;
2974
2975 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
2976 return -EOPNOTSUPP;
2977
2978 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2979 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2980 return -EOPNOTSUPP;
2981
2982 err = mnt_want_write_file(filp);
2983 if (err)
2984 return err;
2985
2986 inode_lock(inode);
2987
2988 f2fs_fill_fsxattr(inode, &old_fa);
2989 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
2990 if (err)
2991 goto out;
2992
2993 err = f2fs_setflags_common(inode, iflags,
2994 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2995 if (err)
2996 goto out;
2997
2998 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
2999 out:
3000 inode_unlock(inode);
3001 mnt_drop_write_file(filp);
3002 return err;
3003 }
3004
3005 int f2fs_pin_file_control(struct inode *inode, bool inc)
3006 {
3007 struct f2fs_inode_info *fi = F2FS_I(inode);
3008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3009
3010
3011 if (inc)
3012 f2fs_i_gc_failures_write(inode,
3013 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3014
3015 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3016 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3017 __func__, inode->i_ino,
3018 fi->i_gc_failures[GC_FAILURE_PIN]);
3019 clear_inode_flag(inode, FI_PIN_FILE);
3020 return -EAGAIN;
3021 }
3022 return 0;
3023 }
3024
3025 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3026 {
3027 struct inode *inode = file_inode(filp);
3028 __u32 pin;
3029 int ret = 0;
3030
3031 if (get_user(pin, (__u32 __user *)arg))
3032 return -EFAULT;
3033
3034 if (!S_ISREG(inode->i_mode))
3035 return -EINVAL;
3036
3037 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3038 return -EROFS;
3039
3040 ret = mnt_want_write_file(filp);
3041 if (ret)
3042 return ret;
3043
3044 inode_lock(inode);
3045
3046 if (f2fs_should_update_outplace(inode, NULL)) {
3047 ret = -EINVAL;
3048 goto out;
3049 }
3050
3051 if (!pin) {
3052 clear_inode_flag(inode, FI_PIN_FILE);
3053 f2fs_i_gc_failures_write(inode, 0);
3054 goto done;
3055 }
3056
3057 if (f2fs_pin_file_control(inode, false)) {
3058 ret = -EAGAIN;
3059 goto out;
3060 }
3061 ret = f2fs_convert_inline_inode(inode);
3062 if (ret)
3063 goto out;
3064
3065 set_inode_flag(inode, FI_PIN_FILE);
3066 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3067 done:
3068 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3069 out:
3070 inode_unlock(inode);
3071 mnt_drop_write_file(filp);
3072 return ret;
3073 }
3074
3075 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3076 {
3077 struct inode *inode = file_inode(filp);
3078 __u32 pin = 0;
3079
3080 if (is_inode_flag_set(inode, FI_PIN_FILE))
3081 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3082 return put_user(pin, (u32 __user *)arg);
3083 }
3084
3085 int f2fs_precache_extents(struct inode *inode)
3086 {
3087 struct f2fs_inode_info *fi = F2FS_I(inode);
3088 struct f2fs_map_blocks map;
3089 pgoff_t m_next_extent;
3090 loff_t end;
3091 int err;
3092
3093 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3094 return -EOPNOTSUPP;
3095
3096 map.m_lblk = 0;
3097 map.m_next_pgofs = NULL;
3098 map.m_next_extent = &m_next_extent;
3099 map.m_seg_type = NO_CHECK_TYPE;
3100 map.m_may_create = false;
3101 end = F2FS_I_SB(inode)->max_file_blocks;
3102
3103 while (map.m_lblk < end) {
3104 map.m_len = end - map.m_lblk;
3105
3106 down_write(&fi->i_gc_rwsem[WRITE]);
3107 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3108 up_write(&fi->i_gc_rwsem[WRITE]);
3109 if (err)
3110 return err;
3111
3112 map.m_lblk = m_next_extent;
3113 }
3114
3115 return err;
3116 }
3117
3118 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3119 {
3120 return f2fs_precache_extents(file_inode(filp));
3121 }
3122
3123 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3124 {
3125 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3126 __u64 block_count;
3127 int ret;
3128
3129 if (!capable(CAP_SYS_ADMIN))
3130 return -EPERM;
3131
3132 if (f2fs_readonly(sbi->sb))
3133 return -EROFS;
3134
3135 if (copy_from_user(&block_count, (void __user *)arg,
3136 sizeof(block_count)))
3137 return -EFAULT;
3138
3139 ret = f2fs_resize_fs(sbi, block_count);
3140
3141 return ret;
3142 }
3143
3144 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3145 {
3146 struct inode *inode = file_inode(filp);
3147
3148 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3149
3150 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3151 f2fs_warn(F2FS_I_SB(inode),
3152 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3153 inode->i_ino);
3154 return -EOPNOTSUPP;
3155 }
3156
3157 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3158 }
3159
3160 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3161 {
3162 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3163 return -EOPNOTSUPP;
3164
3165 return fsverity_ioctl_measure(filp, (void __user *)arg);
3166 }
3167
3168 static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
3169 {
3170 struct inode *inode = file_inode(filp);
3171 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3172 char *vbuf;
3173 int count;
3174 int err = 0;
3175
3176 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3177 if (!vbuf)
3178 return -ENOMEM;
3179
3180 down_read(&sbi->sb_lock);
3181 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3182 ARRAY_SIZE(sbi->raw_super->volume_name),
3183 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3184 up_read(&sbi->sb_lock);
3185
3186 if (copy_to_user((char __user *)arg, vbuf,
3187 min(FSLABEL_MAX, count)))
3188 err = -EFAULT;
3189
3190 kvfree(vbuf);
3191 return err;
3192 }
3193
3194 static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
3195 {
3196 struct inode *inode = file_inode(filp);
3197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3198 char *vbuf;
3199 int err = 0;
3200
3201 if (!capable(CAP_SYS_ADMIN))
3202 return -EPERM;
3203
3204 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3205 if (IS_ERR(vbuf))
3206 return PTR_ERR(vbuf);
3207
3208 err = mnt_want_write_file(filp);
3209 if (err)
3210 goto out;
3211
3212 down_write(&sbi->sb_lock);
3213
3214 memset(sbi->raw_super->volume_name, 0,
3215 sizeof(sbi->raw_super->volume_name));
3216 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3217 sbi->raw_super->volume_name,
3218 ARRAY_SIZE(sbi->raw_super->volume_name));
3219
3220 err = f2fs_commit_super(sbi, false);
3221
3222 up_write(&sbi->sb_lock);
3223
3224 mnt_drop_write_file(filp);
3225 out:
3226 kfree(vbuf);
3227 return err;
3228 }
3229
3230 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3231 {
3232 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3233 return -EIO;
3234 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3235 return -ENOSPC;
3236
3237 switch (cmd) {
3238 case F2FS_IOC_GETFLAGS:
3239 return f2fs_ioc_getflags(filp, arg);
3240 case F2FS_IOC_SETFLAGS:
3241 return f2fs_ioc_setflags(filp, arg);
3242 case F2FS_IOC_GETVERSION:
3243 return f2fs_ioc_getversion(filp, arg);
3244 case F2FS_IOC_START_ATOMIC_WRITE:
3245 return f2fs_ioc_start_atomic_write(filp);
3246 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3247 return f2fs_ioc_commit_atomic_write(filp);
3248 case F2FS_IOC_START_VOLATILE_WRITE:
3249 return f2fs_ioc_start_volatile_write(filp);
3250 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3251 return f2fs_ioc_release_volatile_write(filp);
3252 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3253 return f2fs_ioc_abort_volatile_write(filp);
3254 case F2FS_IOC_SHUTDOWN:
3255 return f2fs_ioc_shutdown(filp, arg);
3256 case FITRIM:
3257 return f2fs_ioc_fitrim(filp, arg);
3258 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3259 return f2fs_ioc_set_encryption_policy(filp, arg);
3260 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3261 return f2fs_ioc_get_encryption_policy(filp, arg);
3262 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3263 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3264 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3265 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
3266 case FS_IOC_ADD_ENCRYPTION_KEY:
3267 return f2fs_ioc_add_encryption_key(filp, arg);
3268 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3269 return f2fs_ioc_remove_encryption_key(filp, arg);
3270 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3271 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
3272 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3273 return f2fs_ioc_get_encryption_key_status(filp, arg);
3274 case F2FS_IOC_GARBAGE_COLLECT:
3275 return f2fs_ioc_gc(filp, arg);
3276 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3277 return f2fs_ioc_gc_range(filp, arg);
3278 case F2FS_IOC_WRITE_CHECKPOINT:
3279 return f2fs_ioc_write_checkpoint(filp, arg);
3280 case F2FS_IOC_DEFRAGMENT:
3281 return f2fs_ioc_defragment(filp, arg);
3282 case F2FS_IOC_MOVE_RANGE:
3283 return f2fs_ioc_move_range(filp, arg);
3284 case F2FS_IOC_FLUSH_DEVICE:
3285 return f2fs_ioc_flush_device(filp, arg);
3286 case F2FS_IOC_GET_FEATURES:
3287 return f2fs_ioc_get_features(filp, arg);
3288 case F2FS_IOC_FSGETXATTR:
3289 return f2fs_ioc_fsgetxattr(filp, arg);
3290 case F2FS_IOC_FSSETXATTR:
3291 return f2fs_ioc_fssetxattr(filp, arg);
3292 case F2FS_IOC_GET_PIN_FILE:
3293 return f2fs_ioc_get_pin_file(filp, arg);
3294 case F2FS_IOC_SET_PIN_FILE:
3295 return f2fs_ioc_set_pin_file(filp, arg);
3296 case F2FS_IOC_PRECACHE_EXTENTS:
3297 return f2fs_ioc_precache_extents(filp, arg);
3298 case F2FS_IOC_RESIZE_FS:
3299 return f2fs_ioc_resize_fs(filp, arg);
3300 case FS_IOC_ENABLE_VERITY:
3301 return f2fs_ioc_enable_verity(filp, arg);
3302 case FS_IOC_MEASURE_VERITY:
3303 return f2fs_ioc_measure_verity(filp, arg);
3304 case F2FS_IOC_GET_VOLUME_NAME:
3305 return f2fs_get_volume_name(filp, arg);
3306 case F2FS_IOC_SET_VOLUME_NAME:
3307 return f2fs_set_volume_name(filp, arg);
3308 default:
3309 return -ENOTTY;
3310 }
3311 }
3312
3313 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3314 {
3315 struct file *file = iocb->ki_filp;
3316 struct inode *inode = file_inode(file);
3317 ssize_t ret;
3318
3319 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
3320 ret = -EIO;
3321 goto out;
3322 }
3323
3324 if (iocb->ki_flags & IOCB_NOWAIT) {
3325 if (!inode_trylock(inode)) {
3326 ret = -EAGAIN;
3327 goto out;
3328 }
3329 } else {
3330 inode_lock(inode);
3331 }
3332
3333 ret = generic_write_checks(iocb, from);
3334 if (ret > 0) {
3335 bool preallocated = false;
3336 size_t target_size = 0;
3337 int err;
3338
3339 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3340 set_inode_flag(inode, FI_NO_PREALLOC);
3341
3342 if ((iocb->ki_flags & IOCB_NOWAIT)) {
3343 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3344 iov_iter_count(from)) ||
3345 f2fs_has_inline_data(inode) ||
3346 f2fs_force_buffered_io(inode, iocb, from)) {
3347 clear_inode_flag(inode, FI_NO_PREALLOC);
3348 inode_unlock(inode);
3349 ret = -EAGAIN;
3350 goto out;
3351 }
3352 goto write;
3353 }
3354
3355 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
3356 goto write;
3357
3358 if (iocb->ki_flags & IOCB_DIRECT) {
3359
3360
3361
3362
3363 err = f2fs_convert_inline_inode(inode);
3364 if (err)
3365 goto out_err;
3366
3367
3368
3369
3370
3371 if (!f2fs_force_buffered_io(inode, iocb, from) &&
3372 allow_outplace_dio(inode, iocb, from))
3373 goto write;
3374 }
3375 preallocated = true;
3376 target_size = iocb->ki_pos + iov_iter_count(from);
3377
3378 err = f2fs_preallocate_blocks(iocb, from);
3379 if (err) {
3380 out_err:
3381 clear_inode_flag(inode, FI_NO_PREALLOC);
3382 inode_unlock(inode);
3383 ret = err;
3384 goto out;
3385 }
3386 write:
3387 ret = __generic_file_write_iter(iocb, from);
3388 clear_inode_flag(inode, FI_NO_PREALLOC);
3389
3390
3391 if (preallocated && i_size_read(inode) < target_size)
3392 f2fs_truncate(inode);
3393
3394 if (ret > 0)
3395 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3396 }
3397 inode_unlock(inode);
3398 out:
3399 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
3400 iov_iter_count(from), ret);
3401 if (ret > 0)
3402 ret = generic_write_sync(iocb, ret);
3403 return ret;
3404 }
3405
3406 #ifdef CONFIG_COMPAT
3407 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3408 {
3409 switch (cmd) {
3410 case F2FS_IOC32_GETFLAGS:
3411 cmd = F2FS_IOC_GETFLAGS;
3412 break;
3413 case F2FS_IOC32_SETFLAGS:
3414 cmd = F2FS_IOC_SETFLAGS;
3415 break;
3416 case F2FS_IOC32_GETVERSION:
3417 cmd = F2FS_IOC_GETVERSION;
3418 break;
3419 case F2FS_IOC_START_ATOMIC_WRITE:
3420 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3421 case F2FS_IOC_START_VOLATILE_WRITE:
3422 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3423 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3424 case F2FS_IOC_SHUTDOWN:
3425 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3426 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3427 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3428 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3429 case FS_IOC_ADD_ENCRYPTION_KEY:
3430 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3431 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3432 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3433 case F2FS_IOC_GARBAGE_COLLECT:
3434 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3435 case F2FS_IOC_WRITE_CHECKPOINT:
3436 case F2FS_IOC_DEFRAGMENT:
3437 case F2FS_IOC_MOVE_RANGE:
3438 case F2FS_IOC_FLUSH_DEVICE:
3439 case F2FS_IOC_GET_FEATURES:
3440 case F2FS_IOC_FSGETXATTR:
3441 case F2FS_IOC_FSSETXATTR:
3442 case F2FS_IOC_GET_PIN_FILE:
3443 case F2FS_IOC_SET_PIN_FILE:
3444 case F2FS_IOC_PRECACHE_EXTENTS:
3445 case F2FS_IOC_RESIZE_FS:
3446 case FS_IOC_ENABLE_VERITY:
3447 case FS_IOC_MEASURE_VERITY:
3448 case F2FS_IOC_GET_VOLUME_NAME:
3449 case F2FS_IOC_SET_VOLUME_NAME:
3450 break;
3451 default:
3452 return -ENOIOCTLCMD;
3453 }
3454 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3455 }
3456 #endif
3457
3458 const struct file_operations f2fs_file_operations = {
3459 .llseek = f2fs_llseek,
3460 .read_iter = generic_file_read_iter,
3461 .write_iter = f2fs_file_write_iter,
3462 .open = f2fs_file_open,
3463 .release = f2fs_release_file,
3464 .mmap = f2fs_file_mmap,
3465 .flush = f2fs_file_flush,
3466 .fsync = f2fs_sync_file,
3467 .fallocate = f2fs_fallocate,
3468 .unlocked_ioctl = f2fs_ioctl,
3469 #ifdef CONFIG_COMPAT
3470 .compat_ioctl = f2fs_compat_ioctl,
3471 #endif
3472 .splice_read = generic_file_splice_read,
3473 .splice_write = iter_file_splice_write,
3474 };