This source file includes following definitions.
- ntfs_end_buffer_async_read
- ntfs_read_block
- ntfs_readpage
- ntfs_write_block
- ntfs_write_mst_block
- ntfs_writepage
- ntfs_bmap
- mark_ntfs_record_dirty
1
2
3
4
5
6
7
8
9 #include <linux/errno.h>
10 #include <linux/fs.h>
11 #include <linux/gfp.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/swap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/writeback.h>
17 #include <linux/bit_spinlock.h>
18 #include <linux/bio.h>
19
20 #include "aops.h"
21 #include "attrib.h"
22 #include "debug.h"
23 #include "inode.h"
24 #include "mft.h"
25 #include "runlist.h"
26 #include "types.h"
27 #include "ntfs.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
46 {
47 unsigned long flags;
48 struct buffer_head *first, *tmp;
49 struct page *page;
50 struct inode *vi;
51 ntfs_inode *ni;
52 int page_uptodate = 1;
53
54 page = bh->b_page;
55 vi = page->mapping->host;
56 ni = NTFS_I(vi);
57
58 if (likely(uptodate)) {
59 loff_t i_size;
60 s64 file_ofs, init_size;
61
62 set_buffer_uptodate(bh);
63
64 file_ofs = ((s64)page->index << PAGE_SHIFT) +
65 bh_offset(bh);
66 read_lock_irqsave(&ni->size_lock, flags);
67 init_size = ni->initialized_size;
68 i_size = i_size_read(vi);
69 read_unlock_irqrestore(&ni->size_lock, flags);
70 if (unlikely(init_size > i_size)) {
71
72 init_size = i_size;
73 }
74
75 if (unlikely(file_ofs + bh->b_size > init_size)) {
76 int ofs;
77 void *kaddr;
78
79 ofs = 0;
80 if (file_ofs < init_size)
81 ofs = init_size - file_ofs;
82 kaddr = kmap_atomic(page);
83 memset(kaddr + bh_offset(bh) + ofs, 0,
84 bh->b_size - ofs);
85 flush_dcache_page(page);
86 kunmap_atomic(kaddr);
87 }
88 } else {
89 clear_buffer_uptodate(bh);
90 SetPageError(page);
91 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
92 "0x%llx.", (unsigned long long)bh->b_blocknr);
93 }
94 first = page_buffers(page);
95 local_irq_save(flags);
96 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
97 clear_buffer_async_read(bh);
98 unlock_buffer(bh);
99 tmp = bh;
100 do {
101 if (!buffer_uptodate(tmp))
102 page_uptodate = 0;
103 if (buffer_async_read(tmp)) {
104 if (likely(buffer_locked(tmp)))
105 goto still_busy;
106
107 BUG();
108 }
109 tmp = tmp->b_this_page;
110 } while (tmp != bh);
111 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
112 local_irq_restore(flags);
113
114
115
116
117
118
119
120
121 if (!NInoMstProtected(ni)) {
122 if (likely(page_uptodate && !PageError(page)))
123 SetPageUptodate(page);
124 } else {
125 u8 *kaddr;
126 unsigned int i, recs;
127 u32 rec_size;
128
129 rec_size = ni->itype.index.block_size;
130 recs = PAGE_SIZE / rec_size;
131
132 BUG_ON(!recs);
133 kaddr = kmap_atomic(page);
134 for (i = 0; i < recs; i++)
135 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
136 i * rec_size), rec_size);
137 kunmap_atomic(kaddr);
138 flush_dcache_page(page);
139 if (likely(page_uptodate && !PageError(page)))
140 SetPageUptodate(page);
141 }
142 unlock_page(page);
143 return;
144 still_busy:
145 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
146 local_irq_restore(flags);
147 return;
148 }
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167 static int ntfs_read_block(struct page *page)
168 {
169 loff_t i_size;
170 VCN vcn;
171 LCN lcn;
172 s64 init_size;
173 struct inode *vi;
174 ntfs_inode *ni;
175 ntfs_volume *vol;
176 runlist_element *rl;
177 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
178 sector_t iblock, lblock, zblock;
179 unsigned long flags;
180 unsigned int blocksize, vcn_ofs;
181 int i, nr;
182 unsigned char blocksize_bits;
183
184 vi = page->mapping->host;
185 ni = NTFS_I(vi);
186 vol = ni->vol;
187
188
189 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
190
191 blocksize = vol->sb->s_blocksize;
192 blocksize_bits = vol->sb->s_blocksize_bits;
193
194 if (!page_has_buffers(page)) {
195 create_empty_buffers(page, blocksize, 0);
196 if (unlikely(!page_has_buffers(page))) {
197 unlock_page(page);
198 return -ENOMEM;
199 }
200 }
201 bh = head = page_buffers(page);
202 BUG_ON(!bh);
203
204
205
206
207
208
209
210
211
212
213
214
215 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
216 read_lock_irqsave(&ni->size_lock, flags);
217 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
218 init_size = ni->initialized_size;
219 i_size = i_size_read(vi);
220 read_unlock_irqrestore(&ni->size_lock, flags);
221 if (unlikely(init_size > i_size)) {
222
223 init_size = i_size;
224 }
225 zblock = (init_size + blocksize - 1) >> blocksize_bits;
226
227
228 rl = NULL;
229 nr = i = 0;
230 do {
231 int err = 0;
232
233 if (unlikely(buffer_uptodate(bh)))
234 continue;
235 if (unlikely(buffer_mapped(bh))) {
236 arr[nr++] = bh;
237 continue;
238 }
239 bh->b_bdev = vol->sb->s_bdev;
240
241 if (iblock < lblock) {
242 bool is_retry = false;
243
244
245 vcn = (VCN)iblock << blocksize_bits >>
246 vol->cluster_size_bits;
247 vcn_ofs = ((VCN)iblock << blocksize_bits) &
248 vol->cluster_size_mask;
249 if (!rl) {
250 lock_retry_remap:
251 down_read(&ni->runlist.lock);
252 rl = ni->runlist.rl;
253 }
254 if (likely(rl != NULL)) {
255
256 while (rl->length && rl[1].vcn <= vcn)
257 rl++;
258 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
259 } else
260 lcn = LCN_RL_NOT_MAPPED;
261
262 if (lcn >= 0) {
263
264 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
265 + vcn_ofs) >> blocksize_bits;
266 set_buffer_mapped(bh);
267
268 if (iblock < zblock) {
269 arr[nr++] = bh;
270 continue;
271 }
272
273 goto handle_zblock;
274 }
275
276 if (lcn == LCN_HOLE)
277 goto handle_hole;
278
279 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
280 is_retry = true;
281
282
283
284
285 up_read(&ni->runlist.lock);
286 err = ntfs_map_runlist(ni, vcn);
287 if (likely(!err))
288 goto lock_retry_remap;
289 rl = NULL;
290 } else if (!rl)
291 up_read(&ni->runlist.lock);
292
293
294
295
296
297 if (err == -ENOENT || lcn == LCN_ENOENT) {
298 err = 0;
299 goto handle_hole;
300 }
301
302 if (!err)
303 err = -EIO;
304 bh->b_blocknr = -1;
305 SetPageError(page);
306 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
307 "attribute type 0x%x, vcn 0x%llx, "
308 "offset 0x%x because its location on "
309 "disk could not be determined%s "
310 "(error code %i).", ni->mft_no,
311 ni->type, (unsigned long long)vcn,
312 vcn_ofs, is_retry ? " even after "
313 "retrying" : "", err);
314 }
315
316
317
318
319
320 handle_hole:
321 bh->b_blocknr = -1UL;
322 clear_buffer_mapped(bh);
323 handle_zblock:
324 zero_user(page, i * blocksize, blocksize);
325 if (likely(!err))
326 set_buffer_uptodate(bh);
327 } while (i++, iblock++, (bh = bh->b_this_page) != head);
328
329
330 if (rl)
331 up_read(&ni->runlist.lock);
332
333
334 if (nr) {
335 struct buffer_head *tbh;
336
337
338 for (i = 0; i < nr; i++) {
339 tbh = arr[i];
340 lock_buffer(tbh);
341 tbh->b_end_io = ntfs_end_buffer_async_read;
342 set_buffer_async_read(tbh);
343 }
344
345 for (i = 0; i < nr; i++) {
346 tbh = arr[i];
347 if (likely(!buffer_uptodate(tbh)))
348 submit_bh(REQ_OP_READ, 0, tbh);
349 else
350 ntfs_end_buffer_async_read(tbh, 1);
351 }
352 return 0;
353 }
354
355 if (likely(!PageError(page)))
356 SetPageUptodate(page);
357 else
358 nr = -EIO;
359 unlock_page(page);
360 return nr;
361 }
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381 static int ntfs_readpage(struct file *file, struct page *page)
382 {
383 loff_t i_size;
384 struct inode *vi;
385 ntfs_inode *ni, *base_ni;
386 u8 *addr;
387 ntfs_attr_search_ctx *ctx;
388 MFT_RECORD *mrec;
389 unsigned long flags;
390 u32 attr_len;
391 int err = 0;
392
393 retry_readpage:
394 BUG_ON(!PageLocked(page));
395 vi = page->mapping->host;
396 i_size = i_size_read(vi);
397
398 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
399 PAGE_SHIFT)) {
400 zero_user(page, 0, PAGE_SIZE);
401 ntfs_debug("Read outside i_size - truncated?");
402 goto done;
403 }
404
405
406
407
408 if (PageUptodate(page)) {
409 unlock_page(page);
410 return 0;
411 }
412 ni = NTFS_I(vi);
413
414
415
416
417
418
419
420
421 if (ni->type != AT_INDEX_ALLOCATION) {
422
423 if (NInoEncrypted(ni)) {
424 BUG_ON(ni->type != AT_DATA);
425 err = -EACCES;
426 goto err_out;
427 }
428
429 if (NInoNonResident(ni) && NInoCompressed(ni)) {
430 BUG_ON(ni->type != AT_DATA);
431 BUG_ON(ni->name_len);
432 return ntfs_read_compressed_block(page);
433 }
434 }
435
436 if (NInoNonResident(ni)) {
437
438 return ntfs_read_block(page);
439 }
440
441
442
443
444
445
446
447
448 if (unlikely(page->index > 0)) {
449 zero_user(page, 0, PAGE_SIZE);
450 goto done;
451 }
452 if (!NInoAttr(ni))
453 base_ni = ni;
454 else
455 base_ni = ni->ext.base_ntfs_ino;
456
457 mrec = map_mft_record(base_ni);
458 if (IS_ERR(mrec)) {
459 err = PTR_ERR(mrec);
460 goto err_out;
461 }
462
463
464
465
466 if (unlikely(NInoNonResident(ni))) {
467 unmap_mft_record(base_ni);
468 goto retry_readpage;
469 }
470 ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
471 if (unlikely(!ctx)) {
472 err = -ENOMEM;
473 goto unm_err_out;
474 }
475 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
476 CASE_SENSITIVE, 0, NULL, 0, ctx);
477 if (unlikely(err))
478 goto put_unm_err_out;
479 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
480 read_lock_irqsave(&ni->size_lock, flags);
481 if (unlikely(attr_len > ni->initialized_size))
482 attr_len = ni->initialized_size;
483 i_size = i_size_read(vi);
484 read_unlock_irqrestore(&ni->size_lock, flags);
485 if (unlikely(attr_len > i_size)) {
486
487 attr_len = i_size;
488 }
489 addr = kmap_atomic(page);
490
491 memcpy(addr, (u8*)ctx->attr +
492 le16_to_cpu(ctx->attr->data.resident.value_offset),
493 attr_len);
494
495 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
496 flush_dcache_page(page);
497 kunmap_atomic(addr);
498 put_unm_err_out:
499 ntfs_attr_put_search_ctx(ctx);
500 unm_err_out:
501 unmap_mft_record(base_ni);
502 done:
503 SetPageUptodate(page);
504 err_out:
505 unlock_page(page);
506 return err;
507 }
508
509 #ifdef NTFS_RW
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
534 {
535 VCN vcn;
536 LCN lcn;
537 s64 initialized_size;
538 loff_t i_size;
539 sector_t block, dblock, iblock;
540 struct inode *vi;
541 ntfs_inode *ni;
542 ntfs_volume *vol;
543 runlist_element *rl;
544 struct buffer_head *bh, *head;
545 unsigned long flags;
546 unsigned int blocksize, vcn_ofs;
547 int err;
548 bool need_end_writeback;
549 unsigned char blocksize_bits;
550
551 vi = page->mapping->host;
552 ni = NTFS_I(vi);
553 vol = ni->vol;
554
555 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
556 "0x%lx.", ni->mft_no, ni->type, page->index);
557
558 BUG_ON(!NInoNonResident(ni));
559 BUG_ON(NInoMstProtected(ni));
560 blocksize = vol->sb->s_blocksize;
561 blocksize_bits = vol->sb->s_blocksize_bits;
562 if (!page_has_buffers(page)) {
563 BUG_ON(!PageUptodate(page));
564 create_empty_buffers(page, blocksize,
565 (1 << BH_Uptodate) | (1 << BH_Dirty));
566 if (unlikely(!page_has_buffers(page))) {
567 ntfs_warning(vol->sb, "Error allocating page "
568 "buffers. Redirtying page so we try "
569 "again later.");
570
571
572
573
574 redirty_page_for_writepage(wbc, page);
575 unlock_page(page);
576 return 0;
577 }
578 }
579 bh = head = page_buffers(page);
580 BUG_ON(!bh);
581
582
583
584
585 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
586
587 read_lock_irqsave(&ni->size_lock, flags);
588 i_size = i_size_read(vi);
589 initialized_size = ni->initialized_size;
590 read_unlock_irqrestore(&ni->size_lock, flags);
591
592
593 dblock = (i_size + blocksize - 1) >> blocksize_bits;
594
595
596 iblock = initialized_size >> blocksize_bits;
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 rl = NULL;
614 err = 0;
615 do {
616 bool is_retry = false;
617
618 if (unlikely(block >= dblock)) {
619
620
621
622
623
624
625
626
627
628
629
630 clear_buffer_dirty(bh);
631 set_buffer_uptodate(bh);
632 continue;
633 }
634
635
636 if (!buffer_dirty(bh))
637 continue;
638
639
640 if (unlikely((block >= iblock) &&
641 (initialized_size < i_size))) {
642
643
644
645
646
647
648 if (block > iblock) {
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663 }
664
665
666
667
668
669
670
671
672
673 if (!PageUptodate(page)) {
674
675
676
677 }
678
679
680
681
682
683
684 ntfs_error(vol->sb, "Writing beyond initialized size "
685 "is not supported yet. Sorry.");
686 err = -EOPNOTSUPP;
687 break;
688
689
690
691
692 }
693
694
695 if (buffer_mapped(bh))
696 continue;
697
698
699 bh->b_bdev = vol->sb->s_bdev;
700
701
702 vcn = (VCN)block << blocksize_bits;
703 vcn_ofs = vcn & vol->cluster_size_mask;
704 vcn >>= vol->cluster_size_bits;
705 if (!rl) {
706 lock_retry_remap:
707 down_read(&ni->runlist.lock);
708 rl = ni->runlist.rl;
709 }
710 if (likely(rl != NULL)) {
711
712 while (rl->length && rl[1].vcn <= vcn)
713 rl++;
714 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
715 } else
716 lcn = LCN_RL_NOT_MAPPED;
717
718 if (lcn >= 0) {
719
720 bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
721 vcn_ofs) >> blocksize_bits;
722 set_buffer_mapped(bh);
723 continue;
724 }
725
726 if (lcn == LCN_HOLE) {
727 u8 *kaddr;
728 unsigned long *bpos, *bend;
729
730
731 kaddr = kmap_atomic(page);
732 bpos = (unsigned long *)(kaddr + bh_offset(bh));
733 bend = (unsigned long *)((u8*)bpos + blocksize);
734 do {
735 if (unlikely(*bpos))
736 break;
737 } while (likely(++bpos < bend));
738 kunmap_atomic(kaddr);
739 if (bpos == bend) {
740
741
742
743
744 bh->b_blocknr = -1;
745 clear_buffer_dirty(bh);
746 continue;
747 }
748
749
750
751 ntfs_error(vol->sb, "Writing into sparse regions is "
752 "not supported yet. Sorry.");
753 err = -EOPNOTSUPP;
754 break;
755 }
756
757 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
758 is_retry = true;
759
760
761
762
763 up_read(&ni->runlist.lock);
764 err = ntfs_map_runlist(ni, vcn);
765 if (likely(!err))
766 goto lock_retry_remap;
767 rl = NULL;
768 } else if (!rl)
769 up_read(&ni->runlist.lock);
770
771
772
773
774
775 if (err == -ENOENT || lcn == LCN_ENOENT) {
776 bh->b_blocknr = -1;
777 clear_buffer_dirty(bh);
778 zero_user(page, bh_offset(bh), blocksize);
779 set_buffer_uptodate(bh);
780 err = 0;
781 continue;
782 }
783
784 if (!err)
785 err = -EIO;
786 bh->b_blocknr = -1;
787 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
788 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
789 "because its location on disk could not be "
790 "determined%s (error code %i).", ni->mft_no,
791 ni->type, (unsigned long long)vcn,
792 vcn_ofs, is_retry ? " even after "
793 "retrying" : "", err);
794 break;
795 } while (block++, (bh = bh->b_this_page) != head);
796
797
798 if (rl)
799 up_read(&ni->runlist.lock);
800
801
802 bh = head;
803
804
805 if (unlikely(!PageUptodate(page))) {
806 int uptodate = 1;
807 do {
808 if (!buffer_uptodate(bh)) {
809 uptodate = 0;
810 bh = head;
811 break;
812 }
813 } while ((bh = bh->b_this_page) != head);
814 if (uptodate)
815 SetPageUptodate(page);
816 }
817
818
819 do {
820 if (buffer_mapped(bh) && buffer_dirty(bh)) {
821 lock_buffer(bh);
822 if (test_clear_buffer_dirty(bh)) {
823 BUG_ON(!buffer_uptodate(bh));
824 mark_buffer_async_write(bh);
825 } else
826 unlock_buffer(bh);
827 } else if (unlikely(err)) {
828
829
830
831
832 if (err != -ENOMEM)
833 clear_buffer_dirty(bh);
834 }
835 } while ((bh = bh->b_this_page) != head);
836
837 if (unlikely(err)) {
838
839 if (unlikely(err == -EOPNOTSUPP))
840 err = 0;
841 else if (err == -ENOMEM) {
842 ntfs_warning(vol->sb, "Error allocating memory. "
843 "Redirtying page so we try again "
844 "later.");
845
846
847
848
849 redirty_page_for_writepage(wbc, page);
850 err = 0;
851 } else
852 SetPageError(page);
853 }
854
855 BUG_ON(PageWriteback(page));
856 set_page_writeback(page);
857
858
859 need_end_writeback = true;
860 do {
861 struct buffer_head *next = bh->b_this_page;
862 if (buffer_async_write(bh)) {
863 submit_bh(REQ_OP_WRITE, 0, bh);
864 need_end_writeback = false;
865 }
866 bh = next;
867 } while (bh != head);
868 unlock_page(page);
869
870
871 if (unlikely(need_end_writeback))
872 end_page_writeback(page);
873
874 ntfs_debug("Done.");
875 return err;
876 }
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902 static int ntfs_write_mst_block(struct page *page,
903 struct writeback_control *wbc)
904 {
905 sector_t block, dblock, rec_block;
906 struct inode *vi = page->mapping->host;
907 ntfs_inode *ni = NTFS_I(vi);
908 ntfs_volume *vol = ni->vol;
909 u8 *kaddr;
910 unsigned int rec_size = ni->itype.index.block_size;
911 ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
912 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
913 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
914 runlist_element *rl;
915 int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
916 unsigned bh_size, rec_size_bits;
917 bool sync, is_mft, page_is_dirty, rec_is_dirty;
918 unsigned char bh_size_bits;
919
920 if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
921 return -EINVAL;
922
923 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
924 "0x%lx.", vi->i_ino, ni->type, page->index);
925 BUG_ON(!NInoNonResident(ni));
926 BUG_ON(!NInoMstProtected(ni));
927 is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
928
929
930
931
932
933
934 BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
935 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
936 bh_size = vol->sb->s_blocksize;
937 bh_size_bits = vol->sb->s_blocksize_bits;
938 max_bhs = PAGE_SIZE / bh_size;
939 BUG_ON(!max_bhs);
940 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
941
942
943 sync = (wbc->sync_mode == WB_SYNC_ALL);
944
945
946 bh = head = page_buffers(page);
947 BUG_ON(!bh);
948
949 rec_size_bits = ni->itype.index.block_size_bits;
950 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
951 bhs_per_rec = rec_size >> bh_size_bits;
952 BUG_ON(!bhs_per_rec);
953
954
955 rec_block = block = (sector_t)page->index <<
956 (PAGE_SHIFT - bh_size_bits);
957
958
959 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
960
961 rl = NULL;
962 err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
963 page_is_dirty = rec_is_dirty = false;
964 rec_start_bh = NULL;
965 do {
966 bool is_retry = false;
967
968 if (likely(block < rec_block)) {
969 if (unlikely(block >= dblock)) {
970 clear_buffer_dirty(bh);
971 set_buffer_uptodate(bh);
972 continue;
973 }
974
975
976
977
978
979 if (!rec_is_dirty)
980 continue;
981 if (unlikely(err2)) {
982 if (err2 != -ENOMEM)
983 clear_buffer_dirty(bh);
984 continue;
985 }
986 } else {
987 BUG_ON(block > rec_block);
988
989 rec_block += bhs_per_rec;
990 err2 = 0;
991 if (unlikely(block >= dblock)) {
992 clear_buffer_dirty(bh);
993 continue;
994 }
995 if (!buffer_dirty(bh)) {
996
997 rec_is_dirty = false;
998 continue;
999 }
1000 rec_is_dirty = true;
1001 rec_start_bh = bh;
1002 }
1003
1004 if (unlikely(!buffer_mapped(bh))) {
1005 VCN vcn;
1006 LCN lcn;
1007 unsigned int vcn_ofs;
1008
1009 bh->b_bdev = vol->sb->s_bdev;
1010
1011 vcn = (VCN)block << bh_size_bits;
1012 vcn_ofs = vcn & vol->cluster_size_mask;
1013 vcn >>= vol->cluster_size_bits;
1014 if (!rl) {
1015 lock_retry_remap:
1016 down_read(&ni->runlist.lock);
1017 rl = ni->runlist.rl;
1018 }
1019 if (likely(rl != NULL)) {
1020
1021 while (rl->length && rl[1].vcn <= vcn)
1022 rl++;
1023 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1024 } else
1025 lcn = LCN_RL_NOT_MAPPED;
1026
1027 if (likely(lcn >= 0)) {
1028
1029 bh->b_blocknr = ((lcn <<
1030 vol->cluster_size_bits) +
1031 vcn_ofs) >> bh_size_bits;
1032 set_buffer_mapped(bh);
1033 } else {
1034
1035
1036
1037
1038
1039 if (!is_mft && !is_retry &&
1040 lcn == LCN_RL_NOT_MAPPED) {
1041 is_retry = true;
1042
1043
1044
1045
1046 up_read(&ni->runlist.lock);
1047 err2 = ntfs_map_runlist(ni, vcn);
1048 if (likely(!err2))
1049 goto lock_retry_remap;
1050 if (err2 == -ENOMEM)
1051 page_is_dirty = true;
1052 lcn = err2;
1053 } else {
1054 err2 = -EIO;
1055 if (!rl)
1056 up_read(&ni->runlist.lock);
1057 }
1058
1059 if (!err || err == -ENOMEM)
1060 err = err2;
1061 bh->b_blocknr = -1;
1062 ntfs_error(vol->sb, "Cannot write ntfs record "
1063 "0x%llx (inode 0x%lx, "
1064 "attribute type 0x%x) because "
1065 "its location on disk could "
1066 "not be determined (error "
1067 "code %lli).",
1068 (long long)block <<
1069 bh_size_bits >>
1070 vol->mft_record_size_bits,
1071 ni->mft_no, ni->type,
1072 (long long)lcn);
1073
1074
1075
1076
1077
1078
1079 if (rec_start_bh != bh) {
1080 while (bhs[--nr_bhs] != rec_start_bh)
1081 ;
1082 if (err2 != -ENOMEM) {
1083 do {
1084 clear_buffer_dirty(
1085 rec_start_bh);
1086 } while ((rec_start_bh =
1087 rec_start_bh->
1088 b_this_page) !=
1089 bh);
1090 }
1091 }
1092 continue;
1093 }
1094 }
1095 BUG_ON(!buffer_uptodate(bh));
1096 BUG_ON(nr_bhs >= max_bhs);
1097 bhs[nr_bhs++] = bh;
1098 } while (block++, (bh = bh->b_this_page) != head);
1099 if (unlikely(rl))
1100 up_read(&ni->runlist.lock);
1101
1102 if (!nr_bhs)
1103 goto done;
1104
1105 kaddr = kmap(page);
1106
1107 BUG_ON(!PageUptodate(page));
1108 ClearPageUptodate(page);
1109 for (i = 0; i < nr_bhs; i++) {
1110 unsigned int ofs;
1111
1112
1113 if (i % bhs_per_rec)
1114 continue;
1115 tbh = bhs[i];
1116 ofs = bh_offset(tbh);
1117 if (is_mft) {
1118 ntfs_inode *tni;
1119 unsigned long mft_no;
1120
1121
1122 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1123 >> rec_size_bits;
1124
1125 tni = NULL;
1126 if (!ntfs_may_write_mft_record(vol, mft_no,
1127 (MFT_RECORD*)(kaddr + ofs), &tni)) {
1128
1129
1130
1131
1132
1133 page_is_dirty = true;
1134
1135
1136
1137
1138 do {
1139 bhs[i] = NULL;
1140 } while (++i % bhs_per_rec);
1141 continue;
1142 }
1143
1144
1145
1146
1147
1148 if (tni)
1149 locked_nis[nr_locked_nis++] = tni;
1150 }
1151
1152 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1153 rec_size);
1154 if (unlikely(err2)) {
1155 if (!err || err == -ENOMEM)
1156 err = -EIO;
1157 ntfs_error(vol->sb, "Failed to apply mst fixups "
1158 "(inode 0x%lx, attribute type 0x%x, "
1159 "page index 0x%lx, page offset 0x%x)!"
1160 " Unmount and run chkdsk.", vi->i_ino,
1161 ni->type, page->index, ofs);
1162
1163
1164
1165
1166 do {
1167 clear_buffer_dirty(bhs[i]);
1168 bhs[i] = NULL;
1169 } while (++i % bhs_per_rec);
1170 continue;
1171 }
1172 nr_recs++;
1173 }
1174
1175 if (!nr_recs)
1176 goto unm_done;
1177 flush_dcache_page(page);
1178
1179 for (i = 0; i < nr_bhs; i++) {
1180 tbh = bhs[i];
1181 if (!tbh)
1182 continue;
1183 if (!trylock_buffer(tbh))
1184 BUG();
1185
1186 clear_buffer_dirty(tbh);
1187 BUG_ON(!buffer_uptodate(tbh));
1188 BUG_ON(!buffer_mapped(tbh));
1189 get_bh(tbh);
1190 tbh->b_end_io = end_buffer_write_sync;
1191 submit_bh(REQ_OP_WRITE, 0, tbh);
1192 }
1193
1194 if (is_mft && !sync)
1195 goto do_mirror;
1196 do_wait:
1197
1198 for (i = 0; i < nr_bhs; i++) {
1199 tbh = bhs[i];
1200 if (!tbh)
1201 continue;
1202 wait_on_buffer(tbh);
1203 if (unlikely(!buffer_uptodate(tbh))) {
1204 ntfs_error(vol->sb, "I/O error while writing ntfs "
1205 "record buffer (inode 0x%lx, "
1206 "attribute type 0x%x, page index "
1207 "0x%lx, page offset 0x%lx)! Unmount "
1208 "and run chkdsk.", vi->i_ino, ni->type,
1209 page->index, bh_offset(tbh));
1210 if (!err || err == -ENOMEM)
1211 err = -EIO;
1212
1213
1214
1215
1216 set_buffer_uptodate(tbh);
1217 }
1218 }
1219
1220 if (is_mft && sync) {
1221 do_mirror:
1222 for (i = 0; i < nr_bhs; i++) {
1223 unsigned long mft_no;
1224 unsigned int ofs;
1225
1226
1227
1228
1229
1230 if (i % bhs_per_rec)
1231 continue;
1232 tbh = bhs[i];
1233
1234 if (!tbh)
1235 continue;
1236 ofs = bh_offset(tbh);
1237
1238 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1239 >> rec_size_bits;
1240 if (mft_no < vol->mftmirr_size)
1241 ntfs_sync_mft_mirror(vol, mft_no,
1242 (MFT_RECORD*)(kaddr + ofs),
1243 sync);
1244 }
1245 if (!sync)
1246 goto do_wait;
1247 }
1248
1249 for (i = 0; i < nr_bhs; i++) {
1250 if (!(i % bhs_per_rec)) {
1251 tbh = bhs[i];
1252 if (!tbh)
1253 continue;
1254 post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1255 bh_offset(tbh)));
1256 }
1257 }
1258 flush_dcache_page(page);
1259 unm_done:
1260
1261 while (nr_locked_nis-- > 0) {
1262 ntfs_inode *tni, *base_tni;
1263
1264 tni = locked_nis[nr_locked_nis];
1265
1266 mutex_lock(&tni->extent_lock);
1267 if (tni->nr_extents >= 0)
1268 base_tni = tni;
1269 else {
1270 base_tni = tni->ext.base_ntfs_ino;
1271 BUG_ON(!base_tni);
1272 }
1273 mutex_unlock(&tni->extent_lock);
1274 ntfs_debug("Unlocking %s inode 0x%lx.",
1275 tni == base_tni ? "base" : "extent",
1276 tni->mft_no);
1277 mutex_unlock(&tni->mrec_lock);
1278 atomic_dec(&tni->count);
1279 iput(VFS_I(base_tni));
1280 }
1281 SetPageUptodate(page);
1282 kunmap(page);
1283 done:
1284 if (unlikely(err && err != -ENOMEM)) {
1285
1286
1287
1288
1289 if (ni->itype.index.block_size == PAGE_SIZE)
1290 SetPageError(page);
1291 NVolSetErrors(vol);
1292 }
1293 if (page_is_dirty) {
1294 ntfs_debug("Page still contains one or more dirty ntfs "
1295 "records. Redirtying the page starting at "
1296 "record 0x%lx.", page->index <<
1297 (PAGE_SHIFT - rec_size_bits));
1298 redirty_page_for_writepage(wbc, page);
1299 unlock_page(page);
1300 } else {
1301
1302
1303
1304
1305
1306 BUG_ON(PageWriteback(page));
1307 set_page_writeback(page);
1308 unlock_page(page);
1309 end_page_writeback(page);
1310 }
1311 if (likely(!err))
1312 ntfs_debug("Done.");
1313 return err;
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1340 {
1341 loff_t i_size;
1342 struct inode *vi = page->mapping->host;
1343 ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1344 char *addr;
1345 ntfs_attr_search_ctx *ctx = NULL;
1346 MFT_RECORD *m = NULL;
1347 u32 attr_len;
1348 int err;
1349
1350 retry_writepage:
1351 BUG_ON(!PageLocked(page));
1352 i_size = i_size_read(vi);
1353
1354 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1355 PAGE_SHIFT)) {
1356
1357
1358
1359
1360 block_invalidatepage(page, 0, PAGE_SIZE);
1361 unlock_page(page);
1362 ntfs_debug("Write outside i_size - truncated?");
1363 return 0;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373 if (ni->type != AT_INDEX_ALLOCATION) {
1374
1375 if (NInoEncrypted(ni)) {
1376 unlock_page(page);
1377 BUG_ON(ni->type != AT_DATA);
1378 ntfs_debug("Denying write access to encrypted file.");
1379 return -EACCES;
1380 }
1381
1382 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1383 BUG_ON(ni->type != AT_DATA);
1384 BUG_ON(ni->name_len);
1385
1386
1387 unlock_page(page);
1388 ntfs_error(vi->i_sb, "Writing to compressed files is "
1389 "not supported yet. Sorry.");
1390 return -EOPNOTSUPP;
1391 }
1392
1393 if (NInoNonResident(ni) && NInoSparse(ni)) {
1394 unlock_page(page);
1395 ntfs_error(vi->i_sb, "Writing to sparse files is not "
1396 "supported yet. Sorry.");
1397 return -EOPNOTSUPP;
1398 }
1399 }
1400
1401 if (NInoNonResident(ni)) {
1402
1403 if (page->index >= (i_size >> PAGE_SHIFT)) {
1404
1405 unsigned int ofs = i_size & ~PAGE_MASK;
1406 zero_user_segment(page, ofs, PAGE_SIZE);
1407 }
1408
1409 if (NInoMstProtected(ni))
1410 return ntfs_write_mst_block(page, wbc);
1411
1412 return ntfs_write_block(page, wbc);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422 BUG_ON(page_has_buffers(page));
1423 BUG_ON(!PageUptodate(page));
1424 if (unlikely(page->index > 0)) {
1425 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
1426 "Aborting write.", page->index);
1427 BUG_ON(PageWriteback(page));
1428 set_page_writeback(page);
1429 unlock_page(page);
1430 end_page_writeback(page);
1431 return -EIO;
1432 }
1433 if (!NInoAttr(ni))
1434 base_ni = ni;
1435 else
1436 base_ni = ni->ext.base_ntfs_ino;
1437
1438 m = map_mft_record(base_ni);
1439 if (IS_ERR(m)) {
1440 err = PTR_ERR(m);
1441 m = NULL;
1442 ctx = NULL;
1443 goto err_out;
1444 }
1445
1446
1447
1448
1449 if (unlikely(NInoNonResident(ni))) {
1450 unmap_mft_record(base_ni);
1451 goto retry_writepage;
1452 }
1453 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1454 if (unlikely(!ctx)) {
1455 err = -ENOMEM;
1456 goto err_out;
1457 }
1458 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1459 CASE_SENSITIVE, 0, NULL, 0, ctx);
1460 if (unlikely(err))
1461 goto err_out;
1462
1463
1464
1465
1466 BUG_ON(PageWriteback(page));
1467 set_page_writeback(page);
1468 unlock_page(page);
1469 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1470 i_size = i_size_read(vi);
1471 if (unlikely(attr_len > i_size)) {
1472
1473 attr_len = i_size;
1474
1475
1476
1477
1478 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1479 attr_len);
1480
1481 BUG_ON(err);
1482 }
1483 addr = kmap_atomic(page);
1484
1485 memcpy((u8*)ctx->attr +
1486 le16_to_cpu(ctx->attr->data.resident.value_offset),
1487 addr, attr_len);
1488
1489 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1490 kunmap_atomic(addr);
1491 flush_dcache_page(page);
1492 flush_dcache_mft_record_page(ctx->ntfs_ino);
1493
1494 end_page_writeback(page);
1495
1496 mark_mft_record_dirty(ctx->ntfs_ino);
1497 ntfs_attr_put_search_ctx(ctx);
1498 unmap_mft_record(base_ni);
1499 return 0;
1500 err_out:
1501 if (err == -ENOMEM) {
1502 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1503 "page so we try again later.");
1504
1505
1506
1507
1508 redirty_page_for_writepage(wbc, page);
1509 err = 0;
1510 } else {
1511 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1512 "error %i.", err);
1513 SetPageError(page);
1514 NVolSetErrors(ni->vol);
1515 }
1516 unlock_page(page);
1517 if (ctx)
1518 ntfs_attr_put_search_ctx(ctx);
1519 if (m)
1520 unmap_mft_record(base_ni);
1521 return err;
1522 }
1523
1524 #endif
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
1550 {
1551 s64 ofs, size;
1552 loff_t i_size;
1553 LCN lcn;
1554 unsigned long blocksize, flags;
1555 ntfs_inode *ni = NTFS_I(mapping->host);
1556 ntfs_volume *vol = ni->vol;
1557 unsigned delta;
1558 unsigned char blocksize_bits, cluster_size_shift;
1559
1560 ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
1561 ni->mft_no, (unsigned long long)block);
1562 if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
1563 ntfs_error(vol->sb, "BMAP does not make sense for %s "
1564 "attributes, returning 0.",
1565 (ni->type != AT_DATA) ? "non-data" :
1566 (!NInoNonResident(ni) ? "resident" :
1567 "encrypted"));
1568 return 0;
1569 }
1570
1571 BUG_ON(NInoCompressed(ni));
1572 BUG_ON(NInoMstProtected(ni));
1573 blocksize = vol->sb->s_blocksize;
1574 blocksize_bits = vol->sb->s_blocksize_bits;
1575 ofs = (s64)block << blocksize_bits;
1576 read_lock_irqsave(&ni->size_lock, flags);
1577 size = ni->initialized_size;
1578 i_size = i_size_read(VFS_I(ni));
1579 read_unlock_irqrestore(&ni->size_lock, flags);
1580
1581
1582
1583
1584
1585 if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
1586 goto hole;
1587 cluster_size_shift = vol->cluster_size_bits;
1588 down_read(&ni->runlist.lock);
1589 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
1590 up_read(&ni->runlist.lock);
1591 if (unlikely(lcn < LCN_HOLE)) {
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 switch ((int)lcn) {
1602 case LCN_ENOENT:
1603
1604
1605
1606
1607 goto hole;
1608 case LCN_ENOMEM:
1609 ntfs_error(vol->sb, "Not enough memory to complete "
1610 "mapping for inode 0x%lx. "
1611 "Returning 0.", ni->mft_no);
1612 break;
1613 default:
1614 ntfs_error(vol->sb, "Failed to complete mapping for "
1615 "inode 0x%lx. Run chkdsk. "
1616 "Returning 0.", ni->mft_no);
1617 break;
1618 }
1619 return 0;
1620 }
1621 if (lcn < 0) {
1622
1623 hole:
1624 ntfs_debug("Done (returning hole).");
1625 return 0;
1626 }
1627
1628
1629
1630
1631 delta = ofs & vol->cluster_size_mask;
1632 if (unlikely(sizeof(block) < sizeof(lcn))) {
1633 block = lcn = ((lcn << cluster_size_shift) + delta) >>
1634 blocksize_bits;
1635
1636 if (unlikely(block != lcn)) {
1637 ntfs_error(vol->sb, "Physical block 0x%llx is too "
1638 "large to be returned, returning 0.",
1639 (long long)lcn);
1640 return 0;
1641 }
1642 } else
1643 block = ((lcn << cluster_size_shift) + delta) >>
1644 blocksize_bits;
1645 ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
1646 return block;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655 const struct address_space_operations ntfs_normal_aops = {
1656 .readpage = ntfs_readpage,
1657 #ifdef NTFS_RW
1658 .writepage = ntfs_writepage,
1659 .set_page_dirty = __set_page_dirty_buffers,
1660 #endif
1661 .bmap = ntfs_bmap,
1662 .migratepage = buffer_migrate_page,
1663 .is_partially_uptodate = block_is_partially_uptodate,
1664 .error_remove_page = generic_error_remove_page,
1665 };
1666
1667
1668
1669
1670 const struct address_space_operations ntfs_compressed_aops = {
1671 .readpage = ntfs_readpage,
1672 #ifdef NTFS_RW
1673 .writepage = ntfs_writepage,
1674 .set_page_dirty = __set_page_dirty_buffers,
1675 #endif
1676 .migratepage = buffer_migrate_page,
1677 .is_partially_uptodate = block_is_partially_uptodate,
1678 .error_remove_page = generic_error_remove_page,
1679 };
1680
1681
1682
1683
1684
1685 const struct address_space_operations ntfs_mst_aops = {
1686 .readpage = ntfs_readpage,
1687 #ifdef NTFS_RW
1688 .writepage = ntfs_writepage,
1689 .set_page_dirty = __set_page_dirty_nobuffers,
1690
1691
1692 #endif
1693 .migratepage = buffer_migrate_page,
1694 .is_partially_uptodate = block_is_partially_uptodate,
1695 .error_remove_page = generic_error_remove_page,
1696 };
1697
1698 #ifdef NTFS_RW
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1716 struct address_space *mapping = page->mapping;
1717 ntfs_inode *ni = NTFS_I(mapping->host);
1718 struct buffer_head *bh, *head, *buffers_to_free = NULL;
1719 unsigned int end, bh_size, bh_ofs;
1720
1721 BUG_ON(!PageUptodate(page));
1722 end = ofs + ni->itype.index.block_size;
1723 bh_size = VFS_I(ni)->i_sb->s_blocksize;
1724 spin_lock(&mapping->private_lock);
1725 if (unlikely(!page_has_buffers(page))) {
1726 spin_unlock(&mapping->private_lock);
1727 bh = head = alloc_page_buffers(page, bh_size, true);
1728 spin_lock(&mapping->private_lock);
1729 if (likely(!page_has_buffers(page))) {
1730 struct buffer_head *tail;
1731
1732 do {
1733 set_buffer_uptodate(bh);
1734 tail = bh;
1735 bh = bh->b_this_page;
1736 } while (bh);
1737 tail->b_this_page = head;
1738 attach_page_buffers(page, head);
1739 } else
1740 buffers_to_free = bh;
1741 }
1742 bh = head = page_buffers(page);
1743 BUG_ON(!bh);
1744 do {
1745 bh_ofs = bh_offset(bh);
1746 if (bh_ofs + bh_size <= ofs)
1747 continue;
1748 if (unlikely(bh_ofs >= end))
1749 break;
1750 set_buffer_dirty(bh);
1751 } while ((bh = bh->b_this_page) != head);
1752 spin_unlock(&mapping->private_lock);
1753 __set_page_dirty_nobuffers(page);
1754 if (unlikely(buffers_to_free)) {
1755 do {
1756 bh = buffers_to_free->b_this_page;
1757 free_buffer_head(buffers_to_free);
1758 buffers_to_free = bh;
1759 } while (buffers_to_free);
1760 }
1761 }
1762
1763 #endif