This source file includes following definitions.
- ntfs_file_open
- ntfs_attr_extend_initialized
- ntfs_prepare_file_for_write
- __ntfs_grab_cache_pages
- ntfs_submit_bh_for_read
- ntfs_prepare_pages_for_non_resident_write
- ntfs_flush_dcache_pages
- ntfs_commit_pages_after_non_resident_write
- ntfs_commit_pages_after_write
- ntfs_copy_from_user_iter
- ntfs_perform_write
- ntfs_file_write_iter
- ntfs_file_fsync
1
2
3
4
5
6
7
8 #include <linux/backing-dev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfp.h>
11 #include <linux/pagemap.h>
12 #include <linux/pagevec.h>
13 #include <linux/sched/signal.h>
14 #include <linux/swap.h>
15 #include <linux/uio.h>
16 #include <linux/writeback.h>
17
18 #include <asm/page.h>
19 #include <linux/uaccess.h>
20
21 #include "attrib.h"
22 #include "bitmap.h"
23 #include "inode.h"
24 #include "debug.h"
25 #include "lcnalloc.h"
26 #include "malloc.h"
27 #include "mft.h"
28 #include "ntfs.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 static int ntfs_file_open(struct inode *vi, struct file *filp)
49 {
50 if (sizeof(unsigned long) < 8) {
51 if (i_size_read(vi) > MAX_LFS_FILESIZE)
52 return -EOVERFLOW;
53 }
54 return generic_file_open(vi, filp);
55 }
56
57 #ifdef NTFS_RW
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
96 {
97 s64 old_init_size;
98 loff_t old_i_size;
99 pgoff_t index, end_index;
100 unsigned long flags;
101 struct inode *vi = VFS_I(ni);
102 ntfs_inode *base_ni;
103 MFT_RECORD *m = NULL;
104 ATTR_RECORD *a;
105 ntfs_attr_search_ctx *ctx = NULL;
106 struct address_space *mapping;
107 struct page *page = NULL;
108 u8 *kattr;
109 int err;
110 u32 attr_len;
111
112 read_lock_irqsave(&ni->size_lock, flags);
113 old_init_size = ni->initialized_size;
114 old_i_size = i_size_read(vi);
115 BUG_ON(new_init_size > ni->allocated_size);
116 read_unlock_irqrestore(&ni->size_lock, flags);
117 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
118 "old_initialized_size 0x%llx, "
119 "new_initialized_size 0x%llx, i_size 0x%llx.",
120 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
121 (unsigned long long)old_init_size,
122 (unsigned long long)new_init_size, old_i_size);
123 if (!NInoAttr(ni))
124 base_ni = ni;
125 else
126 base_ni = ni->ext.base_ntfs_ino;
127
128 if (NInoNonResident(ni))
129 goto do_non_resident_extend;
130 BUG_ON(old_init_size != old_i_size);
131 m = map_mft_record(base_ni);
132 if (IS_ERR(m)) {
133 err = PTR_ERR(m);
134 m = NULL;
135 goto err_out;
136 }
137 ctx = ntfs_attr_get_search_ctx(base_ni, m);
138 if (unlikely(!ctx)) {
139 err = -ENOMEM;
140 goto err_out;
141 }
142 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
143 CASE_SENSITIVE, 0, NULL, 0, ctx);
144 if (unlikely(err)) {
145 if (err == -ENOENT)
146 err = -EIO;
147 goto err_out;
148 }
149 m = ctx->mrec;
150 a = ctx->attr;
151 BUG_ON(a->non_resident);
152
153 attr_len = le32_to_cpu(a->data.resident.value_length);
154 BUG_ON(old_i_size != (loff_t)attr_len);
155
156
157
158
159 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
160 memset(kattr + attr_len, 0, new_init_size - attr_len);
161 a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
162
163 write_lock_irqsave(&ni->size_lock, flags);
164 i_size_write(vi, new_init_size);
165 ni->initialized_size = new_init_size;
166 write_unlock_irqrestore(&ni->size_lock, flags);
167 goto done;
168 do_non_resident_extend:
169
170
171
172
173
174 if (new_init_size > old_i_size) {
175 m = map_mft_record(base_ni);
176 if (IS_ERR(m)) {
177 err = PTR_ERR(m);
178 m = NULL;
179 goto err_out;
180 }
181 ctx = ntfs_attr_get_search_ctx(base_ni, m);
182 if (unlikely(!ctx)) {
183 err = -ENOMEM;
184 goto err_out;
185 }
186 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
187 CASE_SENSITIVE, 0, NULL, 0, ctx);
188 if (unlikely(err)) {
189 if (err == -ENOENT)
190 err = -EIO;
191 goto err_out;
192 }
193 m = ctx->mrec;
194 a = ctx->attr;
195 BUG_ON(!a->non_resident);
196 BUG_ON(old_i_size != (loff_t)
197 sle64_to_cpu(a->data.non_resident.data_size));
198 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
199 flush_dcache_mft_record_page(ctx->ntfs_ino);
200 mark_mft_record_dirty(ctx->ntfs_ino);
201
202 i_size_write(vi, new_init_size);
203 ntfs_attr_put_search_ctx(ctx);
204 ctx = NULL;
205 unmap_mft_record(base_ni);
206 m = NULL;
207 }
208 mapping = vi->i_mapping;
209 index = old_init_size >> PAGE_SHIFT;
210 end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
211 do {
212
213
214
215
216 page = read_mapping_page(mapping, index, NULL);
217 if (IS_ERR(page)) {
218 err = PTR_ERR(page);
219 goto init_err_out;
220 }
221 if (unlikely(PageError(page))) {
222 put_page(page);
223 err = -EIO;
224 goto init_err_out;
225 }
226
227
228
229
230 write_lock_irqsave(&ni->size_lock, flags);
231 ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
232 if (ni->initialized_size > new_init_size)
233 ni->initialized_size = new_init_size;
234 write_unlock_irqrestore(&ni->size_lock, flags);
235
236 set_page_dirty(page);
237 put_page(page);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 balance_dirty_pages_ratelimited(mapping);
266 cond_resched();
267 } while (++index < end_index);
268 read_lock_irqsave(&ni->size_lock, flags);
269 BUG_ON(ni->initialized_size != new_init_size);
270 read_unlock_irqrestore(&ni->size_lock, flags);
271
272 m = map_mft_record(base_ni);
273 if (IS_ERR(m)) {
274 err = PTR_ERR(m);
275 m = NULL;
276 goto init_err_out;
277 }
278 ctx = ntfs_attr_get_search_ctx(base_ni, m);
279 if (unlikely(!ctx)) {
280 err = -ENOMEM;
281 goto init_err_out;
282 }
283 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
284 CASE_SENSITIVE, 0, NULL, 0, ctx);
285 if (unlikely(err)) {
286 if (err == -ENOENT)
287 err = -EIO;
288 goto init_err_out;
289 }
290 m = ctx->mrec;
291 a = ctx->attr;
292 BUG_ON(!a->non_resident);
293 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
294 done:
295 flush_dcache_mft_record_page(ctx->ntfs_ino);
296 mark_mft_record_dirty(ctx->ntfs_ino);
297 if (ctx)
298 ntfs_attr_put_search_ctx(ctx);
299 if (m)
300 unmap_mft_record(base_ni);
301 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
302 (unsigned long long)new_init_size, i_size_read(vi));
303 return 0;
304 init_err_out:
305 write_lock_irqsave(&ni->size_lock, flags);
306 ni->initialized_size = old_init_size;
307 write_unlock_irqrestore(&ni->size_lock, flags);
308 err_out:
309 if (ctx)
310 ntfs_attr_put_search_ctx(ctx);
311 if (m)
312 unmap_mft_record(base_ni);
313 ntfs_debug("Failed. Returning error code %i.", err);
314 return err;
315 }
316
317 static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
318 struct iov_iter *from)
319 {
320 loff_t pos;
321 s64 end, ll;
322 ssize_t err;
323 unsigned long flags;
324 struct file *file = iocb->ki_filp;
325 struct inode *vi = file_inode(file);
326 ntfs_inode *base_ni, *ni = NTFS_I(vi);
327 ntfs_volume *vol = ni->vol;
328
329 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
330 "0x%llx, count 0x%zx.", vi->i_ino,
331 (unsigned)le32_to_cpu(ni->type),
332 (unsigned long long)iocb->ki_pos,
333 iov_iter_count(from));
334 err = generic_write_checks(iocb, from);
335 if (unlikely(err <= 0))
336 goto out;
337
338
339
340
341 BUG_ON(NInoMstProtected(ni));
342 BUG_ON(ni->type != AT_DATA);
343
344 if (NInoEncrypted(ni)) {
345
346
347
348
349
350 ntfs_debug("Denying write access to encrypted file.");
351 err = -EACCES;
352 goto out;
353 }
354 if (NInoCompressed(ni)) {
355
356 BUG_ON(ni->name_len);
357
358
359
360
361
362
363 ntfs_error(vi->i_sb, "Writing to compressed files is not "
364 "implemented yet. Sorry.");
365 err = -EOPNOTSUPP;
366 goto out;
367 }
368 base_ni = ni;
369 if (NInoAttr(ni))
370 base_ni = ni->ext.base_ntfs_ino;
371 err = file_remove_privs(file);
372 if (unlikely(err))
373 goto out;
374
375
376
377
378 file_update_time(file);
379 pos = iocb->ki_pos;
380
381 end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
382 ~(u64)vol->cluster_size_mask;
383
384
385
386
387 read_lock_irqsave(&ni->size_lock, flags);
388 ll = ni->allocated_size;
389 read_unlock_irqrestore(&ni->size_lock, flags);
390 if (end > ll) {
391
392
393
394
395
396
397
398 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
399 if (likely(ll >= 0)) {
400 BUG_ON(pos >= ll);
401
402 if (end > ll) {
403 ntfs_debug("Truncating write to inode 0x%lx, "
404 "attribute type 0x%x, because "
405 "the allocation was only "
406 "partially extended.",
407 vi->i_ino, (unsigned)
408 le32_to_cpu(ni->type));
409 iov_iter_truncate(from, ll - pos);
410 }
411 } else {
412 err = ll;
413 read_lock_irqsave(&ni->size_lock, flags);
414 ll = ni->allocated_size;
415 read_unlock_irqrestore(&ni->size_lock, flags);
416
417 if (pos < ll) {
418 ntfs_debug("Truncating write to inode 0x%lx "
419 "attribute type 0x%x, because "
420 "extending the allocation "
421 "failed (error %d).",
422 vi->i_ino, (unsigned)
423 le32_to_cpu(ni->type),
424 (int)-err);
425 iov_iter_truncate(from, ll - pos);
426 } else {
427 if (err != -ENOSPC)
428 ntfs_error(vi->i_sb, "Cannot perform "
429 "write to inode "
430 "0x%lx, attribute "
431 "type 0x%x, because "
432 "extending the "
433 "allocation failed "
434 "(error %ld).",
435 vi->i_ino, (unsigned)
436 le32_to_cpu(ni->type),
437 (long)-err);
438 else
439 ntfs_debug("Cannot perform write to "
440 "inode 0x%lx, "
441 "attribute type 0x%x, "
442 "because there is not "
443 "space left.",
444 vi->i_ino, (unsigned)
445 le32_to_cpu(ni->type));
446 goto out;
447 }
448 }
449 }
450
451
452
453
454
455
456
457 read_lock_irqsave(&ni->size_lock, flags);
458 ll = ni->initialized_size;
459 read_unlock_irqrestore(&ni->size_lock, flags);
460 if (pos > ll) {
461
462
463
464
465 inode_dio_wait(vi);
466 err = ntfs_attr_extend_initialized(ni, pos);
467 if (unlikely(err < 0))
468 ntfs_error(vi->i_sb, "Cannot perform write to inode "
469 "0x%lx, attribute type 0x%x, because "
470 "extending the initialized size "
471 "failed (error %d).", vi->i_ino,
472 (unsigned)le32_to_cpu(ni->type),
473 (int)-err);
474 }
475 out:
476 return err;
477 }
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
495 pgoff_t index, const unsigned nr_pages, struct page **pages,
496 struct page **cached_page)
497 {
498 int err, nr;
499
500 BUG_ON(!nr_pages);
501 err = nr = 0;
502 do {
503 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK |
504 FGP_ACCESSED);
505 if (!pages[nr]) {
506 if (!*cached_page) {
507 *cached_page = page_cache_alloc(mapping);
508 if (unlikely(!*cached_page)) {
509 err = -ENOMEM;
510 goto err_out;
511 }
512 }
513 err = add_to_page_cache_lru(*cached_page, mapping,
514 index,
515 mapping_gfp_constraint(mapping, GFP_KERNEL));
516 if (unlikely(err)) {
517 if (err == -EEXIST)
518 continue;
519 goto err_out;
520 }
521 pages[nr] = *cached_page;
522 *cached_page = NULL;
523 }
524 index++;
525 nr++;
526 } while (nr < nr_pages);
527 out:
528 return err;
529 err_out:
530 while (nr > 0) {
531 unlock_page(pages[--nr]);
532 put_page(pages[nr]);
533 }
534 goto out;
535 }
536
537 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
538 {
539 lock_buffer(bh);
540 get_bh(bh);
541 bh->b_end_io = end_buffer_read_sync;
542 return submit_bh(REQ_OP_READ, 0, bh);
543 }
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
571 unsigned nr_pages, s64 pos, size_t bytes)
572 {
573 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
574 LCN lcn;
575 s64 bh_pos, vcn_len, end, initialized_size;
576 sector_t lcn_block;
577 struct page *page;
578 struct inode *vi;
579 ntfs_inode *ni, *base_ni = NULL;
580 ntfs_volume *vol;
581 runlist_element *rl, *rl2;
582 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
583 ntfs_attr_search_ctx *ctx = NULL;
584 MFT_RECORD *m = NULL;
585 ATTR_RECORD *a = NULL;
586 unsigned long flags;
587 u32 attr_rec_len = 0;
588 unsigned blocksize, u;
589 int err, mp_size;
590 bool rl_write_locked, was_hole, is_retry;
591 unsigned char blocksize_bits;
592 struct {
593 u8 runlist_merged:1;
594 u8 mft_attr_mapped:1;
595 u8 mp_rebuilt:1;
596 u8 attr_switched:1;
597 } status = { 0, 0, 0, 0 };
598
599 BUG_ON(!nr_pages);
600 BUG_ON(!pages);
601 BUG_ON(!*pages);
602 vi = pages[0]->mapping->host;
603 ni = NTFS_I(vi);
604 vol = ni->vol;
605 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
606 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
607 vi->i_ino, ni->type, pages[0]->index, nr_pages,
608 (long long)pos, bytes);
609 blocksize = vol->sb->s_blocksize;
610 blocksize_bits = vol->sb->s_blocksize_bits;
611 u = 0;
612 do {
613 page = pages[u];
614 BUG_ON(!page);
615
616
617
618
619 if (!page_has_buffers(page)) {
620 create_empty_buffers(page, blocksize, 0);
621 if (unlikely(!page_has_buffers(page)))
622 return -ENOMEM;
623 }
624 } while (++u < nr_pages);
625 rl_write_locked = false;
626 rl = NULL;
627 err = 0;
628 vcn = lcn = -1;
629 vcn_len = 0;
630 lcn_block = -1;
631 was_hole = false;
632 cpos = pos >> vol->cluster_size_bits;
633 end = pos + bytes;
634 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
635
636
637
638
639 u = 0;
640 do_next_page:
641 page = pages[u];
642 bh_pos = (s64)page->index << PAGE_SHIFT;
643 bh = head = page_buffers(page);
644 do {
645 VCN cdelta;
646 s64 bh_end;
647 unsigned bh_cofs;
648
649
650 if (buffer_new(bh))
651 clear_buffer_new(bh);
652 bh_end = bh_pos + blocksize;
653 bh_cpos = bh_pos >> vol->cluster_size_bits;
654 bh_cofs = bh_pos & vol->cluster_size_mask;
655 if (buffer_mapped(bh)) {
656
657
658
659
660 if (buffer_uptodate(bh))
661 continue;
662
663
664
665
666 if (PageUptodate(page)) {
667 set_buffer_uptodate(bh);
668 continue;
669 }
670
671
672
673
674
675 if ((bh_pos < pos && bh_end > pos) ||
676 (bh_pos < end && bh_end > end)) {
677
678
679
680
681
682 read_lock_irqsave(&ni->size_lock, flags);
683 initialized_size = ni->initialized_size;
684 read_unlock_irqrestore(&ni->size_lock, flags);
685 if (bh_pos < initialized_size) {
686 ntfs_submit_bh_for_read(bh);
687 *wait_bh++ = bh;
688 } else {
689 zero_user(page, bh_offset(bh),
690 blocksize);
691 set_buffer_uptodate(bh);
692 }
693 }
694 continue;
695 }
696
697 bh->b_bdev = vol->sb->s_bdev;
698
699
700
701
702
703
704
705
706 cdelta = bh_cpos - vcn;
707 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
708 map_buffer_cached:
709 BUG_ON(lcn < 0);
710 bh->b_blocknr = lcn_block +
711 (cdelta << (vol->cluster_size_bits -
712 blocksize_bits)) +
713 (bh_cofs >> blocksize_bits);
714 set_buffer_mapped(bh);
715
716
717
718
719
720
721
722
723
724 if (PageUptodate(page)) {
725 if (!buffer_uptodate(bh))
726 set_buffer_uptodate(bh);
727 if (unlikely(was_hole)) {
728
729 clean_bdev_bh_alias(bh);
730 if (bh_end <= pos || bh_pos >= end)
731 mark_buffer_dirty(bh);
732 else
733 set_buffer_new(bh);
734 }
735 continue;
736 }
737
738 if (likely(!was_hole)) {
739
740
741
742
743
744
745 if (!buffer_uptodate(bh) && bh_pos < end &&
746 bh_end > pos &&
747 (bh_pos < pos ||
748 bh_end > end)) {
749
750
751
752
753
754
755 read_lock_irqsave(&ni->size_lock,
756 flags);
757 initialized_size = ni->initialized_size;
758 read_unlock_irqrestore(&ni->size_lock,
759 flags);
760 if (bh_pos < initialized_size) {
761 ntfs_submit_bh_for_read(bh);
762 *wait_bh++ = bh;
763 } else {
764 zero_user(page, bh_offset(bh),
765 blocksize);
766 set_buffer_uptodate(bh);
767 }
768 }
769 continue;
770 }
771
772 clean_bdev_bh_alias(bh);
773
774
775
776
777
778
779
780
781 if (bh_end <= pos || bh_pos >= end) {
782 if (!buffer_uptodate(bh)) {
783 zero_user(page, bh_offset(bh),
784 blocksize);
785 set_buffer_uptodate(bh);
786 }
787 mark_buffer_dirty(bh);
788 continue;
789 }
790 set_buffer_new(bh);
791 if (!buffer_uptodate(bh) &&
792 (bh_pos < pos || bh_end > end)) {
793 u8 *kaddr;
794 unsigned pofs;
795
796 kaddr = kmap_atomic(page);
797 if (bh_pos < pos) {
798 pofs = bh_pos & ~PAGE_MASK;
799 memset(kaddr + pofs, 0, pos - bh_pos);
800 }
801 if (bh_end > end) {
802 pofs = end & ~PAGE_MASK;
803 memset(kaddr + pofs, 0, bh_end - end);
804 }
805 kunmap_atomic(kaddr);
806 flush_dcache_page(page);
807 }
808 continue;
809 }
810
811
812
813
814
815 read_lock_irqsave(&ni->size_lock, flags);
816 initialized_size = ni->allocated_size;
817 read_unlock_irqrestore(&ni->size_lock, flags);
818 if (bh_pos > initialized_size) {
819 if (PageUptodate(page)) {
820 if (!buffer_uptodate(bh))
821 set_buffer_uptodate(bh);
822 } else if (!buffer_uptodate(bh)) {
823 zero_user(page, bh_offset(bh), blocksize);
824 set_buffer_uptodate(bh);
825 }
826 continue;
827 }
828 is_retry = false;
829 if (!rl) {
830 down_read(&ni->runlist.lock);
831 retry_remap:
832 rl = ni->runlist.rl;
833 }
834 if (likely(rl != NULL)) {
835
836 while (rl->length && rl[1].vcn <= bh_cpos)
837 rl++;
838 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
839 if (likely(lcn >= 0)) {
840
841
842
843
844 was_hole = false;
845 vcn = bh_cpos;
846 vcn_len = rl[1].vcn - vcn;
847 lcn_block = lcn << (vol->cluster_size_bits -
848 blocksize_bits);
849 cdelta = 0;
850
851
852
853
854
855
856
857 if (likely(vcn + vcn_len >= cend)) {
858 if (rl_write_locked) {
859 up_write(&ni->runlist.lock);
860 rl_write_locked = false;
861 } else
862 up_read(&ni->runlist.lock);
863 rl = NULL;
864 }
865 goto map_buffer_cached;
866 }
867 } else
868 lcn = LCN_RL_NOT_MAPPED;
869
870
871
872
873 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
874 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
875
876 if (!rl_write_locked) {
877
878
879
880
881
882
883
884 up_read(&ni->runlist.lock);
885 down_write(&ni->runlist.lock);
886 rl_write_locked = true;
887 goto retry_remap;
888 }
889 err = ntfs_map_runlist_nolock(ni, bh_cpos,
890 NULL);
891 if (likely(!err)) {
892 is_retry = true;
893 goto retry_remap;
894 }
895
896
897
898
899
900 if (err == -ENOENT) {
901 lcn = LCN_ENOENT;
902 err = 0;
903 goto rl_not_mapped_enoent;
904 }
905 } else
906 err = -EIO;
907
908 bh->b_blocknr = -1;
909 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
910 "attribute type 0x%x, vcn 0x%llx, "
911 "vcn offset 0x%x, because its "
912 "location on disk could not be "
913 "determined%s (error code %i).",
914 ni->mft_no, ni->type,
915 (unsigned long long)bh_cpos,
916 (unsigned)bh_pos &
917 vol->cluster_size_mask,
918 is_retry ? " even after retrying" : "",
919 err);
920 break;
921 }
922 rl_not_mapped_enoent:
923
924
925
926
927
928
929
930 if (unlikely(vol->cluster_size < PAGE_SIZE)) {
931 bh_cend = (bh_end + vol->cluster_size - 1) >>
932 vol->cluster_size_bits;
933 if ((bh_cend <= cpos || bh_cpos >= cend)) {
934 bh->b_blocknr = -1;
935
936
937
938
939
940
941
942
943 if (PageUptodate(page)) {
944 if (!buffer_uptodate(bh))
945 set_buffer_uptodate(bh);
946 } else if (!buffer_uptodate(bh)) {
947 zero_user(page, bh_offset(bh),
948 blocksize);
949 set_buffer_uptodate(bh);
950 }
951 continue;
952 }
953 }
954
955
956
957
958 BUG_ON(lcn != LCN_HOLE);
959
960
961
962
963
964 BUG_ON(!rl);
965 if (!rl_write_locked) {
966 up_read(&ni->runlist.lock);
967 down_write(&ni->runlist.lock);
968 rl_write_locked = true;
969 goto retry_remap;
970 }
971
972 BUG_ON(rl->lcn != LCN_HOLE);
973 lcn = -1;
974 rl2 = rl;
975 while (--rl2 >= ni->runlist.rl) {
976 if (rl2->lcn >= 0) {
977 lcn = rl2->lcn + rl2->length;
978 break;
979 }
980 }
981 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
982 false);
983 if (IS_ERR(rl2)) {
984 err = PTR_ERR(rl2);
985 ntfs_debug("Failed to allocate cluster, error code %i.",
986 err);
987 break;
988 }
989 lcn = rl2->lcn;
990 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
991 if (IS_ERR(rl)) {
992 err = PTR_ERR(rl);
993 if (err != -ENOMEM)
994 err = -EIO;
995 if (ntfs_cluster_free_from_rl(vol, rl2)) {
996 ntfs_error(vol->sb, "Failed to release "
997 "allocated cluster in error "
998 "code path. Run chkdsk to "
999 "recover the lost cluster.");
1000 NVolSetErrors(vol);
1001 }
1002 ntfs_free(rl2);
1003 break;
1004 }
1005 ni->runlist.rl = rl;
1006 status.runlist_merged = 1;
1007 ntfs_debug("Allocated cluster, lcn 0x%llx.",
1008 (unsigned long long)lcn);
1009
1010 if (!NInoAttr(ni))
1011 base_ni = ni;
1012 else
1013 base_ni = ni->ext.base_ntfs_ino;
1014 m = map_mft_record(base_ni);
1015 if (IS_ERR(m)) {
1016 err = PTR_ERR(m);
1017 break;
1018 }
1019 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1020 if (unlikely(!ctx)) {
1021 err = -ENOMEM;
1022 unmap_mft_record(base_ni);
1023 break;
1024 }
1025 status.mft_attr_mapped = 1;
1026 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1027 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
1028 if (unlikely(err)) {
1029 if (err == -ENOENT)
1030 err = -EIO;
1031 break;
1032 }
1033 m = ctx->mrec;
1034 a = ctx->attr;
1035
1036
1037
1038
1039
1040
1041
1042 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
1043 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
1044 BUG_ON(!rl2);
1045 BUG_ON(!rl2->length);
1046 BUG_ON(rl2->lcn < LCN_HOLE);
1047 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
1048
1049
1050
1051
1052 if (!highest_vcn)
1053 highest_vcn = (sle64_to_cpu(
1054 a->data.non_resident.allocated_size) >>
1055 vol->cluster_size_bits) - 1;
1056
1057
1058
1059
1060 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
1061 highest_vcn);
1062 if (unlikely(mp_size <= 0)) {
1063 if (!(err = mp_size))
1064 err = -EIO;
1065 ntfs_debug("Failed to get size for mapping pairs "
1066 "array, error code %i.", err);
1067 break;
1068 }
1069
1070
1071
1072
1073 attr_rec_len = le32_to_cpu(a->length);
1074 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
1075 a->data.non_resident.mapping_pairs_offset));
1076 if (unlikely(err)) {
1077 BUG_ON(err != -ENOSPC);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 ntfs_error(vol->sb, "Not enough space in the mft "
1091 "record for the extended attribute "
1092 "record. This case is not "
1093 "implemented yet.");
1094 err = -EOPNOTSUPP;
1095 break ;
1096 }
1097 status.mp_rebuilt = 1;
1098
1099
1100
1101
1102 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1103 a->data.non_resident.mapping_pairs_offset),
1104 mp_size, rl2, vcn, highest_vcn, NULL);
1105 if (unlikely(err)) {
1106 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1107 "attribute type 0x%x, because building "
1108 "the mapping pairs failed with error "
1109 "code %i.", vi->i_ino,
1110 (unsigned)le32_to_cpu(ni->type), err);
1111 err = -EIO;
1112 break;
1113 }
1114
1115 if (unlikely(!a->data.non_resident.highest_vcn))
1116 a->data.non_resident.highest_vcn =
1117 cpu_to_sle64(highest_vcn);
1118
1119
1120
1121
1122 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1123
1124
1125
1126
1127
1128 if (a->data.non_resident.lowest_vcn) {
1129 flush_dcache_mft_record_page(ctx->ntfs_ino);
1130 mark_mft_record_dirty(ctx->ntfs_ino);
1131 ntfs_attr_reinit_search_ctx(ctx);
1132 err = ntfs_attr_lookup(ni->type, ni->name,
1133 ni->name_len, CASE_SENSITIVE,
1134 0, NULL, 0, ctx);
1135 if (unlikely(err)) {
1136 status.attr_switched = 1;
1137 break;
1138 }
1139
1140 a = ctx->attr;
1141 }
1142 write_lock_irqsave(&ni->size_lock, flags);
1143 ni->itype.compressed.size += vol->cluster_size;
1144 a->data.non_resident.compressed_size =
1145 cpu_to_sle64(ni->itype.compressed.size);
1146 write_unlock_irqrestore(&ni->size_lock, flags);
1147 }
1148
1149 flush_dcache_mft_record_page(ctx->ntfs_ino);
1150 mark_mft_record_dirty(ctx->ntfs_ino);
1151 ntfs_attr_put_search_ctx(ctx);
1152 unmap_mft_record(base_ni);
1153
1154 status.runlist_merged = 0;
1155 status.mft_attr_mapped = 0;
1156 status.mp_rebuilt = 0;
1157
1158 was_hole = true;
1159 vcn = bh_cpos;
1160 vcn_len = 1;
1161 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1162 cdelta = 0;
1163
1164
1165
1166
1167
1168 if (likely(vcn + vcn_len >= cend)) {
1169 up_write(&ni->runlist.lock);
1170 rl_write_locked = false;
1171 rl = NULL;
1172 }
1173 goto map_buffer_cached;
1174 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1175
1176 if (likely(!err && ++u < nr_pages))
1177 goto do_next_page;
1178
1179 if (likely(!err)) {
1180 if (unlikely(rl_write_locked)) {
1181 up_write(&ni->runlist.lock);
1182 rl_write_locked = false;
1183 } else if (unlikely(rl))
1184 up_read(&ni->runlist.lock);
1185 rl = NULL;
1186 }
1187
1188 read_lock_irqsave(&ni->size_lock, flags);
1189 initialized_size = ni->initialized_size;
1190 read_unlock_irqrestore(&ni->size_lock, flags);
1191 while (wait_bh > wait) {
1192 bh = *--wait_bh;
1193 wait_on_buffer(bh);
1194 if (likely(buffer_uptodate(bh))) {
1195 page = bh->b_page;
1196 bh_pos = ((s64)page->index << PAGE_SHIFT) +
1197 bh_offset(bh);
1198
1199
1200
1201
1202 if (unlikely(bh_pos + blocksize > initialized_size)) {
1203 int ofs = 0;
1204
1205 if (likely(bh_pos < initialized_size))
1206 ofs = initialized_size - bh_pos;
1207 zero_user_segment(page, bh_offset(bh) + ofs,
1208 blocksize);
1209 }
1210 } else
1211 err = -EIO;
1212 }
1213 if (likely(!err)) {
1214
1215 u = 0;
1216 do {
1217 bh = head = page_buffers(pages[u]);
1218 do {
1219 if (buffer_new(bh))
1220 clear_buffer_new(bh);
1221 } while ((bh = bh->b_this_page) != head);
1222 } while (++u < nr_pages);
1223 ntfs_debug("Done.");
1224 return err;
1225 }
1226 if (status.attr_switched) {
1227
1228 ntfs_attr_reinit_search_ctx(ctx);
1229 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1230 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1231 ntfs_error(vol->sb, "Failed to find required "
1232 "attribute extent of attribute in "
1233 "error code path. Run chkdsk to "
1234 "recover.");
1235 write_lock_irqsave(&ni->size_lock, flags);
1236 ni->itype.compressed.size += vol->cluster_size;
1237 write_unlock_irqrestore(&ni->size_lock, flags);
1238 flush_dcache_mft_record_page(ctx->ntfs_ino);
1239 mark_mft_record_dirty(ctx->ntfs_ino);
1240
1241
1242
1243
1244
1245 NVolSetErrors(vol);
1246 } else {
1247 m = ctx->mrec;
1248 a = ctx->attr;
1249 status.attr_switched = 0;
1250 }
1251 }
1252
1253
1254
1255
1256
1257
1258
1259 if (status.runlist_merged && !status.attr_switched) {
1260 BUG_ON(!rl_write_locked);
1261
1262 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1263 ntfs_error(vol->sb, "Failed to punch hole into "
1264 "attribute runlist in error code "
1265 "path. Run chkdsk to recover the "
1266 "lost cluster.");
1267 NVolSetErrors(vol);
1268 } else {
1269 status.runlist_merged = 0;
1270
1271
1272
1273
1274
1275 down_write(&vol->lcnbmp_lock);
1276 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1277 ntfs_error(vol->sb, "Failed to release "
1278 "allocated cluster in error "
1279 "code path. Run chkdsk to "
1280 "recover the lost cluster.");
1281 NVolSetErrors(vol);
1282 }
1283 up_write(&vol->lcnbmp_lock);
1284 }
1285 }
1286
1287
1288
1289
1290
1291
1292 if (status.mp_rebuilt && !status.runlist_merged) {
1293 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1294 ntfs_error(vol->sb, "Failed to restore attribute "
1295 "record in error code path. Run "
1296 "chkdsk to recover.");
1297 NVolSetErrors(vol);
1298 } else {
1299 if (ntfs_mapping_pairs_build(vol, (u8*)a +
1300 le16_to_cpu(a->data.non_resident.
1301 mapping_pairs_offset), attr_rec_len -
1302 le16_to_cpu(a->data.non_resident.
1303 mapping_pairs_offset), ni->runlist.rl,
1304 vcn, highest_vcn, NULL)) {
1305 ntfs_error(vol->sb, "Failed to restore "
1306 "mapping pairs array in error "
1307 "code path. Run chkdsk to "
1308 "recover.");
1309 NVolSetErrors(vol);
1310 }
1311 flush_dcache_mft_record_page(ctx->ntfs_ino);
1312 mark_mft_record_dirty(ctx->ntfs_ino);
1313 }
1314 }
1315
1316 if (status.mft_attr_mapped) {
1317 ntfs_attr_put_search_ctx(ctx);
1318 unmap_mft_record(base_ni);
1319 }
1320
1321 if (rl_write_locked)
1322 up_write(&ni->runlist.lock);
1323 else if (rl)
1324 up_read(&ni->runlist.lock);
1325
1326
1327
1328
1329
1330 nr_pages = u;
1331 u = 0;
1332 end = bh_cpos << vol->cluster_size_bits;
1333 do {
1334 page = pages[u];
1335 bh = head = page_buffers(page);
1336 do {
1337 if (u == nr_pages &&
1338 ((s64)page->index << PAGE_SHIFT) +
1339 bh_offset(bh) >= end)
1340 break;
1341 if (!buffer_new(bh))
1342 continue;
1343 clear_buffer_new(bh);
1344 if (!buffer_uptodate(bh)) {
1345 if (PageUptodate(page))
1346 set_buffer_uptodate(bh);
1347 else {
1348 zero_user(page, bh_offset(bh),
1349 blocksize);
1350 set_buffer_uptodate(bh);
1351 }
1352 }
1353 mark_buffer_dirty(bh);
1354 } while ((bh = bh->b_this_page) != head);
1355 } while (++u <= nr_pages);
1356 ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
1357 return err;
1358 }
1359
1360 static inline void ntfs_flush_dcache_pages(struct page **pages,
1361 unsigned nr_pages)
1362 {
1363 BUG_ON(!nr_pages);
1364
1365
1366
1367
1368
1369 do {
1370 --nr_pages;
1371 flush_dcache_page(pages[nr_pages]);
1372 } while (nr_pages > 0);
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 static inline int ntfs_commit_pages_after_non_resident_write(
1385 struct page **pages, const unsigned nr_pages,
1386 s64 pos, size_t bytes)
1387 {
1388 s64 end, initialized_size;
1389 struct inode *vi;
1390 ntfs_inode *ni, *base_ni;
1391 struct buffer_head *bh, *head;
1392 ntfs_attr_search_ctx *ctx;
1393 MFT_RECORD *m;
1394 ATTR_RECORD *a;
1395 unsigned long flags;
1396 unsigned blocksize, u;
1397 int err;
1398
1399 vi = pages[0]->mapping->host;
1400 ni = NTFS_I(vi);
1401 blocksize = vi->i_sb->s_blocksize;
1402 end = pos + bytes;
1403 u = 0;
1404 do {
1405 s64 bh_pos;
1406 struct page *page;
1407 bool partial;
1408
1409 page = pages[u];
1410 bh_pos = (s64)page->index << PAGE_SHIFT;
1411 bh = head = page_buffers(page);
1412 partial = false;
1413 do {
1414 s64 bh_end;
1415
1416 bh_end = bh_pos + blocksize;
1417 if (bh_end <= pos || bh_pos >= end) {
1418 if (!buffer_uptodate(bh))
1419 partial = true;
1420 } else {
1421 set_buffer_uptodate(bh);
1422 mark_buffer_dirty(bh);
1423 }
1424 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1425
1426
1427
1428
1429 if (!partial && !PageUptodate(page))
1430 SetPageUptodate(page);
1431 } while (++u < nr_pages);
1432
1433
1434
1435
1436 read_lock_irqsave(&ni->size_lock, flags);
1437 initialized_size = ni->initialized_size;
1438 read_unlock_irqrestore(&ni->size_lock, flags);
1439 if (end <= initialized_size) {
1440 ntfs_debug("Done.");
1441 return 0;
1442 }
1443
1444
1445
1446
1447 if (!NInoAttr(ni))
1448 base_ni = ni;
1449 else
1450 base_ni = ni->ext.base_ntfs_ino;
1451
1452 m = map_mft_record(base_ni);
1453 if (IS_ERR(m)) {
1454 err = PTR_ERR(m);
1455 m = NULL;
1456 ctx = NULL;
1457 goto err_out;
1458 }
1459 BUG_ON(!NInoNonResident(ni));
1460 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1461 if (unlikely(!ctx)) {
1462 err = -ENOMEM;
1463 goto err_out;
1464 }
1465 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1466 CASE_SENSITIVE, 0, NULL, 0, ctx);
1467 if (unlikely(err)) {
1468 if (err == -ENOENT)
1469 err = -EIO;
1470 goto err_out;
1471 }
1472 a = ctx->attr;
1473 BUG_ON(!a->non_resident);
1474 write_lock_irqsave(&ni->size_lock, flags);
1475 BUG_ON(end > ni->allocated_size);
1476 ni->initialized_size = end;
1477 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1478 if (end > i_size_read(vi)) {
1479 i_size_write(vi, end);
1480 a->data.non_resident.data_size =
1481 a->data.non_resident.initialized_size;
1482 }
1483 write_unlock_irqrestore(&ni->size_lock, flags);
1484
1485 flush_dcache_mft_record_page(ctx->ntfs_ino);
1486 mark_mft_record_dirty(ctx->ntfs_ino);
1487 ntfs_attr_put_search_ctx(ctx);
1488 unmap_mft_record(base_ni);
1489 ntfs_debug("Done.");
1490 return 0;
1491 err_out:
1492 if (ctx)
1493 ntfs_attr_put_search_ctx(ctx);
1494 if (m)
1495 unmap_mft_record(base_ni);
1496 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1497 "code %i).", err);
1498 if (err != -ENOMEM)
1499 NVolSetErrors(ni->vol);
1500 return err;
1501 }
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 static int ntfs_commit_pages_after_write(struct page **pages,
1540 const unsigned nr_pages, s64 pos, size_t bytes)
1541 {
1542 s64 end, initialized_size;
1543 loff_t i_size;
1544 struct inode *vi;
1545 ntfs_inode *ni, *base_ni;
1546 struct page *page;
1547 ntfs_attr_search_ctx *ctx;
1548 MFT_RECORD *m;
1549 ATTR_RECORD *a;
1550 char *kattr, *kaddr;
1551 unsigned long flags;
1552 u32 attr_len;
1553 int err;
1554
1555 BUG_ON(!nr_pages);
1556 BUG_ON(!pages);
1557 page = pages[0];
1558 BUG_ON(!page);
1559 vi = page->mapping->host;
1560 ni = NTFS_I(vi);
1561 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1562 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1563 vi->i_ino, ni->type, page->index, nr_pages,
1564 (long long)pos, bytes);
1565 if (NInoNonResident(ni))
1566 return ntfs_commit_pages_after_non_resident_write(pages,
1567 nr_pages, pos, bytes);
1568 BUG_ON(nr_pages > 1);
1569
1570
1571
1572
1573 if (!NInoAttr(ni))
1574 base_ni = ni;
1575 else
1576 base_ni = ni->ext.base_ntfs_ino;
1577 BUG_ON(NInoNonResident(ni));
1578
1579 m = map_mft_record(base_ni);
1580 if (IS_ERR(m)) {
1581 err = PTR_ERR(m);
1582 m = NULL;
1583 ctx = NULL;
1584 goto err_out;
1585 }
1586 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1587 if (unlikely(!ctx)) {
1588 err = -ENOMEM;
1589 goto err_out;
1590 }
1591 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1592 CASE_SENSITIVE, 0, NULL, 0, ctx);
1593 if (unlikely(err)) {
1594 if (err == -ENOENT)
1595 err = -EIO;
1596 goto err_out;
1597 }
1598 a = ctx->attr;
1599 BUG_ON(a->non_resident);
1600
1601 attr_len = le32_to_cpu(a->data.resident.value_length);
1602 i_size = i_size_read(vi);
1603 BUG_ON(attr_len != i_size);
1604 BUG_ON(pos > attr_len);
1605 end = pos + bytes;
1606 BUG_ON(end > le32_to_cpu(a->length) -
1607 le16_to_cpu(a->data.resident.value_offset));
1608 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1609 kaddr = kmap_atomic(page);
1610
1611 memcpy(kattr + pos, kaddr + pos, bytes);
1612
1613 if (end > attr_len) {
1614 attr_len = end;
1615 a->data.resident.value_length = cpu_to_le32(attr_len);
1616 }
1617
1618
1619
1620
1621 if (!PageUptodate(page)) {
1622 if (pos > 0)
1623 memcpy(kaddr, kattr, pos);
1624 if (end < attr_len)
1625 memcpy(kaddr + end, kattr + end, attr_len - end);
1626
1627 memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
1628 flush_dcache_page(page);
1629 SetPageUptodate(page);
1630 }
1631 kunmap_atomic(kaddr);
1632
1633 read_lock_irqsave(&ni->size_lock, flags);
1634 initialized_size = ni->initialized_size;
1635 BUG_ON(end > ni->allocated_size);
1636 read_unlock_irqrestore(&ni->size_lock, flags);
1637 BUG_ON(initialized_size != i_size);
1638 if (end > initialized_size) {
1639 write_lock_irqsave(&ni->size_lock, flags);
1640 ni->initialized_size = end;
1641 i_size_write(vi, end);
1642 write_unlock_irqrestore(&ni->size_lock, flags);
1643 }
1644
1645 flush_dcache_mft_record_page(ctx->ntfs_ino);
1646 mark_mft_record_dirty(ctx->ntfs_ino);
1647 ntfs_attr_put_search_ctx(ctx);
1648 unmap_mft_record(base_ni);
1649 ntfs_debug("Done.");
1650 return 0;
1651 err_out:
1652 if (err == -ENOMEM) {
1653 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1654 "commit the write.");
1655 if (PageUptodate(page)) {
1656 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1657 "dirty so the write will be retried "
1658 "later on by the VM.");
1659
1660
1661
1662
1663 __set_page_dirty_nobuffers(page);
1664 err = 0;
1665 } else
1666 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1667 "data has been lost.");
1668 } else {
1669 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1670 "with error %i.", err);
1671 NVolSetErrors(ni->vol);
1672 }
1673 if (ctx)
1674 ntfs_attr_put_search_ctx(ctx);
1675 if (m)
1676 unmap_mft_record(base_ni);
1677 return err;
1678 }
1679
1680
1681
1682
1683
1684
1685 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
1686 unsigned ofs, struct iov_iter *i, size_t bytes)
1687 {
1688 struct page **last_page = pages + nr_pages;
1689 size_t total = 0;
1690 struct iov_iter data = *i;
1691 unsigned len, copied;
1692
1693 do {
1694 len = PAGE_SIZE - ofs;
1695 if (len > bytes)
1696 len = bytes;
1697 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
1698 len);
1699 total += copied;
1700 bytes -= copied;
1701 if (!bytes)
1702 break;
1703 iov_iter_advance(&data, copied);
1704 if (copied < len)
1705 goto err;
1706 ofs = 0;
1707 } while (++pages < last_page);
1708 out:
1709 return total;
1710 err:
1711
1712 len = PAGE_SIZE - copied;
1713 do {
1714 if (len > bytes)
1715 len = bytes;
1716 zero_user(*pages, copied, len);
1717 bytes -= len;
1718 copied = 0;
1719 len = PAGE_SIZE;
1720 } while (++pages < last_page);
1721 goto out;
1722 }
1723
1724
1725
1726
1727
1728
1729
1730 static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1731 loff_t pos)
1732 {
1733 struct address_space *mapping = file->f_mapping;
1734 struct inode *vi = mapping->host;
1735 ntfs_inode *ni = NTFS_I(vi);
1736 ntfs_volume *vol = ni->vol;
1737 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1738 struct page *cached_page = NULL;
1739 VCN last_vcn;
1740 LCN lcn;
1741 size_t bytes;
1742 ssize_t status, written = 0;
1743 unsigned nr_pages;
1744
1745 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
1746 "0x%llx, count 0x%lx.", vi->i_ino,
1747 (unsigned)le32_to_cpu(ni->type),
1748 (unsigned long long)pos,
1749 (unsigned long)iov_iter_count(i));
1750
1751
1752
1753
1754 if (unlikely(NInoTruncateFailed(ni))) {
1755 int err;
1756
1757 inode_dio_wait(vi);
1758 err = ntfs_truncate(vi);
1759 if (err || NInoTruncateFailed(ni)) {
1760 if (!err)
1761 err = -EIO;
1762 ntfs_error(vol->sb, "Cannot perform write to inode "
1763 "0x%lx, attribute type 0x%x, because "
1764 "ntfs_truncate() failed (error code "
1765 "%i).", vi->i_ino,
1766 (unsigned)le32_to_cpu(ni->type), err);
1767 return err;
1768 }
1769 }
1770
1771
1772
1773
1774 nr_pages = 1;
1775 if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
1776 nr_pages = vol->cluster_size >> PAGE_SHIFT;
1777 last_vcn = -1;
1778 do {
1779 VCN vcn;
1780 pgoff_t idx, start_idx;
1781 unsigned ofs, do_pages, u;
1782 size_t copied;
1783
1784 start_idx = idx = pos >> PAGE_SHIFT;
1785 ofs = pos & ~PAGE_MASK;
1786 bytes = PAGE_SIZE - ofs;
1787 do_pages = 1;
1788 if (nr_pages > 1) {
1789 vcn = pos >> vol->cluster_size_bits;
1790 if (vcn != last_vcn) {
1791 last_vcn = vcn;
1792
1793
1794
1795
1796
1797 down_read(&ni->runlist.lock);
1798 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
1799 vol->cluster_size_bits, false);
1800 up_read(&ni->runlist.lock);
1801 if (unlikely(lcn < LCN_HOLE)) {
1802 if (lcn == LCN_ENOMEM)
1803 status = -ENOMEM;
1804 else {
1805 status = -EIO;
1806 ntfs_error(vol->sb, "Cannot "
1807 "perform write to "
1808 "inode 0x%lx, "
1809 "attribute type 0x%x, "
1810 "because the attribute "
1811 "is corrupt.",
1812 vi->i_ino, (unsigned)
1813 le32_to_cpu(ni->type));
1814 }
1815 break;
1816 }
1817 if (lcn == LCN_HOLE) {
1818 start_idx = (pos & ~(s64)
1819 vol->cluster_size_mask)
1820 >> PAGE_SHIFT;
1821 bytes = vol->cluster_size - (pos &
1822 vol->cluster_size_mask);
1823 do_pages = nr_pages;
1824 }
1825 }
1826 }
1827 if (bytes > iov_iter_count(i))
1828 bytes = iov_iter_count(i);
1829 again:
1830
1831
1832
1833
1834
1835
1836
1837
1838 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
1839 status = -EFAULT;
1840 break;
1841 }
1842
1843 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
1844 pages, &cached_page);
1845 if (unlikely(status))
1846 break;
1847
1848
1849
1850
1851
1852
1853 if (NInoNonResident(ni)) {
1854 status = ntfs_prepare_pages_for_non_resident_write(
1855 pages, do_pages, pos, bytes);
1856 if (unlikely(status)) {
1857 do {
1858 unlock_page(pages[--do_pages]);
1859 put_page(pages[do_pages]);
1860 } while (do_pages);
1861 break;
1862 }
1863 }
1864 u = (pos >> PAGE_SHIFT) - pages[0]->index;
1865 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
1866 i, bytes);
1867 ntfs_flush_dcache_pages(pages + u, do_pages - u);
1868 status = 0;
1869 if (likely(copied == bytes)) {
1870 status = ntfs_commit_pages_after_write(pages, do_pages,
1871 pos, bytes);
1872 if (!status)
1873 status = bytes;
1874 }
1875 do {
1876 unlock_page(pages[--do_pages]);
1877 put_page(pages[do_pages]);
1878 } while (do_pages);
1879 if (unlikely(status < 0))
1880 break;
1881 copied = status;
1882 cond_resched();
1883 if (unlikely(!copied)) {
1884 size_t sc;
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 sc = iov_iter_single_seg_count(i);
1895 if (bytes > sc)
1896 bytes = sc;
1897 goto again;
1898 }
1899 iov_iter_advance(i, copied);
1900 pos += copied;
1901 written += copied;
1902 balance_dirty_pages_ratelimited(mapping);
1903 if (fatal_signal_pending(current)) {
1904 status = -EINTR;
1905 break;
1906 }
1907 } while (iov_iter_count(i));
1908 if (cached_page)
1909 put_page(cached_page);
1910 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
1911 written ? "written" : "status", (unsigned long)written,
1912 (long)status);
1913 return written ? written : status;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1926 {
1927 struct file *file = iocb->ki_filp;
1928 struct inode *vi = file_inode(file);
1929 ssize_t written = 0;
1930 ssize_t err;
1931
1932 inode_lock(vi);
1933
1934 current->backing_dev_info = inode_to_bdi(vi);
1935 err = ntfs_prepare_file_for_write(iocb, from);
1936 if (iov_iter_count(from) && !err)
1937 written = ntfs_perform_write(file, from, iocb->ki_pos);
1938 current->backing_dev_info = NULL;
1939 inode_unlock(vi);
1940 iocb->ki_pos += written;
1941 if (likely(written > 0))
1942 written = generic_write_sync(iocb, written);
1943 return written ? written : err;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
1971 int datasync)
1972 {
1973 struct inode *vi = filp->f_mapping->host;
1974 int err, ret = 0;
1975
1976 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
1977
1978 err = file_write_and_wait_range(filp, start, end);
1979 if (err)
1980 return err;
1981 inode_lock(vi);
1982
1983 BUG_ON(S_ISDIR(vi->i_mode));
1984 if (!datasync || !NInoNonResident(NTFS_I(vi)))
1985 ret = __ntfs_write_inode(vi, 1);
1986 write_inode_now(vi, !datasync);
1987
1988
1989
1990
1991
1992 err = sync_blockdev(vi->i_sb->s_bdev);
1993 if (unlikely(err && !ret))
1994 ret = err;
1995 if (likely(!ret))
1996 ntfs_debug("Done.");
1997 else
1998 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
1999 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2000 inode_unlock(vi);
2001 return ret;
2002 }
2003
2004 #endif
2005
2006 const struct file_operations ntfs_file_ops = {
2007 .llseek = generic_file_llseek,
2008 .read_iter = generic_file_read_iter,
2009 #ifdef NTFS_RW
2010 .write_iter = ntfs_file_write_iter,
2011 .fsync = ntfs_file_fsync,
2012 #endif
2013 .mmap = generic_file_mmap,
2014 .open = ntfs_file_open,
2015 .splice_read = generic_file_splice_read,
2016 };
2017
2018 const struct inode_operations ntfs_file_inode_ops = {
2019 #ifdef NTFS_RW
2020 .setattr = ntfs_setattr,
2021 #endif
2022 };
2023
2024 const struct file_operations ntfs_empty_file_ops = {};
2025
2026 const struct inode_operations ntfs_empty_inode_ops = {};