This source file includes following definitions.
- xfs_inode_alloc
- xfs_inode_free_callback
- __xfs_inode_free
- xfs_inode_free
- xfs_reclaim_work_queue
- xfs_reclaim_worker
- xfs_perag_set_reclaim_tag
- xfs_perag_clear_reclaim_tag
- xfs_inode_set_reclaim_tag
- xfs_inode_clear_reclaim_tag
- xfs_inew_wait
- xfs_reinit_inode
- xfs_iget_check_free_state
- xfs_iget_cache_hit
- xfs_iget_cache_miss
- xfs_iget
- xfs_icache_inode_is_allocated
- xfs_inode_ag_walk_grab
- xfs_inode_ag_walk
- xfs_queue_eofblocks
- xfs_eofblocks_worker
- xfs_queue_cowblocks
- xfs_cowblocks_worker
- xfs_inode_ag_iterator_flags
- xfs_inode_ag_iterator
- xfs_inode_ag_iterator_tag
- xfs_reclaim_inode_grab
- xfs_reclaim_inode
- xfs_reclaim_inodes_ag
- xfs_reclaim_inodes
- xfs_reclaim_inodes_nr
- xfs_reclaim_inodes_count
- xfs_inode_match_id
- xfs_inode_match_id_union
- xfs_inode_free_eofblocks
- __xfs_icache_free_eofblocks
- xfs_icache_free_eofblocks
- __xfs_inode_free_quota_eofblocks
- xfs_inode_free_quota_eofblocks
- xfs_iflag_for_tag
- __xfs_inode_set_blocks_tag
- xfs_inode_set_eofblocks_tag
- __xfs_inode_clear_blocks_tag
- xfs_inode_clear_eofblocks_tag
- xfs_prep_free_cowblocks
- xfs_inode_free_cowblocks
- xfs_icache_free_cowblocks
- xfs_inode_free_quota_cowblocks
- xfs_inode_set_cowblocks_tag
- xfs_inode_clear_cowblocks_tag
- xfs_stop_block_reaping
- xfs_start_block_reaping
1
2
3
4
5
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25
26 #include <linux/iversion.h>
27
28
29
30
31 struct xfs_inode *
32 xfs_inode_alloc(
33 struct xfs_mount *mp,
34 xfs_ino_t ino)
35 {
36 struct xfs_inode *ip;
37
38
39
40
41
42
43 ip = kmem_zone_alloc(xfs_inode_zone, 0);
44 if (!ip)
45 return NULL;
46 if (inode_init_always(mp->m_super, VFS_I(ip))) {
47 kmem_zone_free(xfs_inode_zone, ip);
48 return NULL;
49 }
50
51
52 VFS_I(ip)->i_mode = 0;
53
54 XFS_STATS_INC(mp, vn_active);
55 ASSERT(atomic_read(&ip->i_pincount) == 0);
56 ASSERT(!xfs_isiflocked(ip));
57 ASSERT(ip->i_ino == 0);
58
59
60 ip->i_ino = ino;
61 ip->i_mount = mp;
62 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
63 ip->i_afp = NULL;
64 ip->i_cowfp = NULL;
65 ip->i_cnextents = 0;
66 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
67 memset(&ip->i_df, 0, sizeof(ip->i_df));
68 ip->i_flags = 0;
69 ip->i_delayed_blks = 0;
70 memset(&ip->i_d, 0, sizeof(ip->i_d));
71 ip->i_sick = 0;
72 ip->i_checked = 0;
73 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
74 INIT_LIST_HEAD(&ip->i_ioend_list);
75 spin_lock_init(&ip->i_ioend_lock);
76
77 return ip;
78 }
79
80 STATIC void
81 xfs_inode_free_callback(
82 struct rcu_head *head)
83 {
84 struct inode *inode = container_of(head, struct inode, i_rcu);
85 struct xfs_inode *ip = XFS_I(inode);
86
87 switch (VFS_I(ip)->i_mode & S_IFMT) {
88 case S_IFREG:
89 case S_IFDIR:
90 case S_IFLNK:
91 xfs_idestroy_fork(ip, XFS_DATA_FORK);
92 break;
93 }
94
95 if (ip->i_afp)
96 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
97 if (ip->i_cowfp)
98 xfs_idestroy_fork(ip, XFS_COW_FORK);
99
100 if (ip->i_itemp) {
101 ASSERT(!test_bit(XFS_LI_IN_AIL,
102 &ip->i_itemp->ili_item.li_flags));
103 xfs_inode_item_destroy(ip);
104 ip->i_itemp = NULL;
105 }
106
107 kmem_zone_free(xfs_inode_zone, ip);
108 }
109
110 static void
111 __xfs_inode_free(
112 struct xfs_inode *ip)
113 {
114
115 ASSERT(atomic_read(&ip->i_pincount) == 0);
116 XFS_STATS_DEC(ip->i_mount, vn_active);
117
118 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
119 }
120
121 void
122 xfs_inode_free(
123 struct xfs_inode *ip)
124 {
125 ASSERT(!xfs_isiflocked(ip));
126
127
128
129
130
131
132
133 spin_lock(&ip->i_flags_lock);
134 ip->i_flags = XFS_IRECLAIM;
135 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock);
137
138 __xfs_inode_free(ip);
139 }
140
141
142
143
144
145
146
147
148 static void
149 xfs_reclaim_work_queue(
150 struct xfs_mount *mp)
151 {
152
153 rcu_read_lock();
154 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
155 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
156 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
157 }
158 rcu_read_unlock();
159 }
160
161
162
163
164
165
166
167
168 void
169 xfs_reclaim_worker(
170 struct work_struct *work)
171 {
172 struct xfs_mount *mp = container_of(to_delayed_work(work),
173 struct xfs_mount, m_reclaim_work);
174
175 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
176 xfs_reclaim_work_queue(mp);
177 }
178
179 static void
180 xfs_perag_set_reclaim_tag(
181 struct xfs_perag *pag)
182 {
183 struct xfs_mount *mp = pag->pag_mount;
184
185 lockdep_assert_held(&pag->pag_ici_lock);
186 if (pag->pag_ici_reclaimable++)
187 return;
188
189
190 spin_lock(&mp->m_perag_lock);
191 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
192 XFS_ICI_RECLAIM_TAG);
193 spin_unlock(&mp->m_perag_lock);
194
195
196 xfs_reclaim_work_queue(mp);
197
198 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
199 }
200
201 static void
202 xfs_perag_clear_reclaim_tag(
203 struct xfs_perag *pag)
204 {
205 struct xfs_mount *mp = pag->pag_mount;
206
207 lockdep_assert_held(&pag->pag_ici_lock);
208 if (--pag->pag_ici_reclaimable)
209 return;
210
211
212 spin_lock(&mp->m_perag_lock);
213 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
214 XFS_ICI_RECLAIM_TAG);
215 spin_unlock(&mp->m_perag_lock);
216 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
217 }
218
219
220
221
222
223
224
225 void
226 xfs_inode_set_reclaim_tag(
227 struct xfs_inode *ip)
228 {
229 struct xfs_mount *mp = ip->i_mount;
230 struct xfs_perag *pag;
231
232 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
233 spin_lock(&pag->pag_ici_lock);
234 spin_lock(&ip->i_flags_lock);
235
236 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
237 XFS_ICI_RECLAIM_TAG);
238 xfs_perag_set_reclaim_tag(pag);
239 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
240
241 spin_unlock(&ip->i_flags_lock);
242 spin_unlock(&pag->pag_ici_lock);
243 xfs_perag_put(pag);
244 }
245
246 STATIC void
247 xfs_inode_clear_reclaim_tag(
248 struct xfs_perag *pag,
249 xfs_ino_t ino)
250 {
251 radix_tree_tag_clear(&pag->pag_ici_root,
252 XFS_INO_TO_AGINO(pag->pag_mount, ino),
253 XFS_ICI_RECLAIM_TAG);
254 xfs_perag_clear_reclaim_tag(pag);
255 }
256
257 static void
258 xfs_inew_wait(
259 struct xfs_inode *ip)
260 {
261 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
262 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
263
264 do {
265 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
266 if (!xfs_iflags_test(ip, XFS_INEW))
267 break;
268 schedule();
269 } while (true);
270 finish_wait(wq, &wait.wq_entry);
271 }
272
273
274
275
276
277
278
279
280
281 static int
282 xfs_reinit_inode(
283 struct xfs_mount *mp,
284 struct inode *inode)
285 {
286 int error;
287 uint32_t nlink = inode->i_nlink;
288 uint32_t generation = inode->i_generation;
289 uint64_t version = inode_peek_iversion(inode);
290 umode_t mode = inode->i_mode;
291 dev_t dev = inode->i_rdev;
292
293 error = inode_init_always(mp->m_super, inode);
294
295 set_nlink(inode, nlink);
296 inode->i_generation = generation;
297 inode_set_iversion_queried(inode, version);
298 inode->i_mode = mode;
299 inode->i_rdev = dev;
300 return error;
301 }
302
303
304
305
306
307
308
309
310
311
312
313 static int
314 xfs_iget_check_free_state(
315 struct xfs_inode *ip,
316 int flags)
317 {
318 if (flags & XFS_IGET_CREATE) {
319
320 if (VFS_I(ip)->i_mode != 0) {
321 xfs_warn(ip->i_mount,
322 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
323 ip->i_ino, VFS_I(ip)->i_mode);
324 return -EFSCORRUPTED;
325 }
326
327 if (ip->i_d.di_nblocks != 0) {
328 xfs_warn(ip->i_mount,
329 "Corruption detected! Free inode 0x%llx has blocks allocated!",
330 ip->i_ino);
331 return -EFSCORRUPTED;
332 }
333 return 0;
334 }
335
336
337 if (VFS_I(ip)->i_mode == 0)
338 return -ENOENT;
339
340 return 0;
341 }
342
343
344
345
346 static int
347 xfs_iget_cache_hit(
348 struct xfs_perag *pag,
349 struct xfs_inode *ip,
350 xfs_ino_t ino,
351 int flags,
352 int lock_flags) __releases(RCU)
353 {
354 struct inode *inode = VFS_I(ip);
355 struct xfs_mount *mp = ip->i_mount;
356 int error;
357
358
359
360
361
362
363
364
365 spin_lock(&ip->i_flags_lock);
366 if (ip->i_ino != ino) {
367 trace_xfs_iget_skip(ip);
368 XFS_STATS_INC(mp, xs_ig_frecycle);
369 error = -EAGAIN;
370 goto out_error;
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
385 trace_xfs_iget_skip(ip);
386 XFS_STATS_INC(mp, xs_ig_frecycle);
387 error = -EAGAIN;
388 goto out_error;
389 }
390
391
392
393
394
395 error = xfs_iget_check_free_state(ip, flags);
396 if (error)
397 goto out_error;
398
399
400
401
402
403 if (ip->i_flags & XFS_IRECLAIMABLE) {
404 trace_xfs_iget_reclaim(ip);
405
406 if (flags & XFS_IGET_INCORE) {
407 error = -EAGAIN;
408 goto out_error;
409 }
410
411
412
413
414
415
416
417 ip->i_flags |= XFS_IRECLAIM;
418
419 spin_unlock(&ip->i_flags_lock);
420 rcu_read_unlock();
421
422 error = xfs_reinit_inode(mp, inode);
423 if (error) {
424 bool wake;
425
426
427
428
429 rcu_read_lock();
430 spin_lock(&ip->i_flags_lock);
431 wake = !!__xfs_iflags_test(ip, XFS_INEW);
432 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
433 if (wake)
434 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
435 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
436 trace_xfs_iget_reclaim_fail(ip);
437 goto out_error;
438 }
439
440 spin_lock(&pag->pag_ici_lock);
441 spin_lock(&ip->i_flags_lock);
442
443
444
445
446
447
448 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
449 ip->i_flags |= XFS_INEW;
450 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
451 inode->i_state = I_NEW;
452 ip->i_sick = 0;
453 ip->i_checked = 0;
454
455 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
456 init_rwsem(&inode->i_rwsem);
457
458 spin_unlock(&ip->i_flags_lock);
459 spin_unlock(&pag->pag_ici_lock);
460 } else {
461
462 if (!igrab(inode)) {
463 trace_xfs_iget_skip(ip);
464 error = -EAGAIN;
465 goto out_error;
466 }
467
468
469 spin_unlock(&ip->i_flags_lock);
470 rcu_read_unlock();
471 trace_xfs_iget_hit(ip);
472 }
473
474 if (lock_flags != 0)
475 xfs_ilock(ip, lock_flags);
476
477 if (!(flags & XFS_IGET_INCORE))
478 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
479 XFS_STATS_INC(mp, xs_ig_found);
480
481 return 0;
482
483 out_error:
484 spin_unlock(&ip->i_flags_lock);
485 rcu_read_unlock();
486 return error;
487 }
488
489
490 static int
491 xfs_iget_cache_miss(
492 struct xfs_mount *mp,
493 struct xfs_perag *pag,
494 xfs_trans_t *tp,
495 xfs_ino_t ino,
496 struct xfs_inode **ipp,
497 int flags,
498 int lock_flags)
499 {
500 struct xfs_inode *ip;
501 int error;
502 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
503 int iflags;
504
505 ip = xfs_inode_alloc(mp, ino);
506 if (!ip)
507 return -ENOMEM;
508
509 error = xfs_iread(mp, tp, ip, flags);
510 if (error)
511 goto out_destroy;
512
513 if (!xfs_inode_verify_forks(ip)) {
514 error = -EFSCORRUPTED;
515 goto out_destroy;
516 }
517
518 trace_xfs_iget_miss(ip);
519
520
521
522
523
524
525 error = xfs_iget_check_free_state(ip, flags);
526 if (error)
527 goto out_destroy;
528
529
530
531
532
533
534
535 if (radix_tree_preload(GFP_NOFS)) {
536 error = -EAGAIN;
537 goto out_destroy;
538 }
539
540
541
542
543
544 if (lock_flags) {
545 if (!xfs_ilock_nowait(ip, lock_flags))
546 BUG();
547 }
548
549
550
551
552
553
554
555
556
557
558 iflags = XFS_INEW;
559 if (flags & XFS_IGET_DONTCACHE)
560 iflags |= XFS_IDONTCACHE;
561 ip->i_udquot = NULL;
562 ip->i_gdquot = NULL;
563 ip->i_pdquot = NULL;
564 xfs_iflags_set(ip, iflags);
565
566
567 spin_lock(&pag->pag_ici_lock);
568 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
569 if (unlikely(error)) {
570 WARN_ON(error != -EEXIST);
571 XFS_STATS_INC(mp, xs_ig_dup);
572 error = -EAGAIN;
573 goto out_preload_end;
574 }
575 spin_unlock(&pag->pag_ici_lock);
576 radix_tree_preload_end();
577
578 *ipp = ip;
579 return 0;
580
581 out_preload_end:
582 spin_unlock(&pag->pag_ici_lock);
583 radix_tree_preload_end();
584 if (lock_flags)
585 xfs_iunlock(ip, lock_flags);
586 out_destroy:
587 __destroy_inode(VFS_I(ip));
588 xfs_inode_free(ip);
589 return error;
590 }
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614 int
615 xfs_iget(
616 xfs_mount_t *mp,
617 xfs_trans_t *tp,
618 xfs_ino_t ino,
619 uint flags,
620 uint lock_flags,
621 xfs_inode_t **ipp)
622 {
623 xfs_inode_t *ip;
624 int error;
625 xfs_perag_t *pag;
626 xfs_agino_t agino;
627
628
629
630
631
632
633
634
635 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
636
637
638 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
639 return -EINVAL;
640
641 XFS_STATS_INC(mp, xs_ig_attempts);
642
643
644 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
645 agino = XFS_INO_TO_AGINO(mp, ino);
646
647 again:
648 error = 0;
649 rcu_read_lock();
650 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
651
652 if (ip) {
653 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
654 if (error)
655 goto out_error_or_again;
656 } else {
657 rcu_read_unlock();
658 if (flags & XFS_IGET_INCORE) {
659 error = -ENODATA;
660 goto out_error_or_again;
661 }
662 XFS_STATS_INC(mp, xs_ig_missed);
663
664 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
665 flags, lock_flags);
666 if (error)
667 goto out_error_or_again;
668 }
669 xfs_perag_put(pag);
670
671 *ipp = ip;
672
673
674
675
676
677 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
678 xfs_setup_existing_inode(ip);
679 return 0;
680
681 out_error_or_again:
682 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
683 delay(1);
684 goto again;
685 }
686 xfs_perag_put(pag);
687 return error;
688 }
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 int
710 xfs_icache_inode_is_allocated(
711 struct xfs_mount *mp,
712 struct xfs_trans *tp,
713 xfs_ino_t ino,
714 bool *inuse)
715 {
716 struct xfs_inode *ip;
717 int error;
718
719 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
720 if (error)
721 return error;
722
723 *inuse = !!(VFS_I(ip)->i_mode);
724 xfs_irele(ip);
725 return 0;
726 }
727
728
729
730
731
732
733
734 #define XFS_LOOKUP_BATCH 32
735
736 STATIC int
737 xfs_inode_ag_walk_grab(
738 struct xfs_inode *ip,
739 int flags)
740 {
741 struct inode *inode = VFS_I(ip);
742 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
743
744 ASSERT(rcu_read_lock_held());
745
746
747
748
749
750
751
752
753
754
755 spin_lock(&ip->i_flags_lock);
756 if (!ip->i_ino)
757 goto out_unlock_noent;
758
759
760 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
761 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
762 goto out_unlock_noent;
763 spin_unlock(&ip->i_flags_lock);
764
765
766 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
767 return -EFSCORRUPTED;
768
769
770 if (!igrab(inode))
771 return -ENOENT;
772
773
774 return 0;
775
776 out_unlock_noent:
777 spin_unlock(&ip->i_flags_lock);
778 return -ENOENT;
779 }
780
781 STATIC int
782 xfs_inode_ag_walk(
783 struct xfs_mount *mp,
784 struct xfs_perag *pag,
785 int (*execute)(struct xfs_inode *ip, int flags,
786 void *args),
787 int flags,
788 void *args,
789 int tag,
790 int iter_flags)
791 {
792 uint32_t first_index;
793 int last_error = 0;
794 int skipped;
795 int done;
796 int nr_found;
797
798 restart:
799 done = 0;
800 skipped = 0;
801 first_index = 0;
802 nr_found = 0;
803 do {
804 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
805 int error = 0;
806 int i;
807
808 rcu_read_lock();
809
810 if (tag == -1)
811 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
812 (void **)batch, first_index,
813 XFS_LOOKUP_BATCH);
814 else
815 nr_found = radix_tree_gang_lookup_tag(
816 &pag->pag_ici_root,
817 (void **) batch, first_index,
818 XFS_LOOKUP_BATCH, tag);
819
820 if (!nr_found) {
821 rcu_read_unlock();
822 break;
823 }
824
825
826
827
828
829 for (i = 0; i < nr_found; i++) {
830 struct xfs_inode *ip = batch[i];
831
832 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
833 batch[i] = NULL;
834
835
836
837
838
839
840
841
842
843
844
845
846
847 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
848 continue;
849 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
850 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
851 done = 1;
852 }
853
854
855 rcu_read_unlock();
856
857 for (i = 0; i < nr_found; i++) {
858 if (!batch[i])
859 continue;
860 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
861 xfs_iflags_test(batch[i], XFS_INEW))
862 xfs_inew_wait(batch[i]);
863 error = execute(batch[i], flags, args);
864 xfs_irele(batch[i]);
865 if (error == -EAGAIN) {
866 skipped++;
867 continue;
868 }
869 if (error && last_error != -EFSCORRUPTED)
870 last_error = error;
871 }
872
873
874 if (error == -EFSCORRUPTED)
875 break;
876
877 cond_resched();
878
879 } while (nr_found && !done);
880
881 if (skipped) {
882 delay(1);
883 goto restart;
884 }
885 return last_error;
886 }
887
888
889
890
891
892 void
893 xfs_queue_eofblocks(
894 struct xfs_mount *mp)
895 {
896 rcu_read_lock();
897 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
898 queue_delayed_work(mp->m_eofblocks_workqueue,
899 &mp->m_eofblocks_work,
900 msecs_to_jiffies(xfs_eofb_secs * 1000));
901 rcu_read_unlock();
902 }
903
904 void
905 xfs_eofblocks_worker(
906 struct work_struct *work)
907 {
908 struct xfs_mount *mp = container_of(to_delayed_work(work),
909 struct xfs_mount, m_eofblocks_work);
910
911 if (!sb_start_write_trylock(mp->m_super))
912 return;
913 xfs_icache_free_eofblocks(mp, NULL);
914 sb_end_write(mp->m_super);
915
916 xfs_queue_eofblocks(mp);
917 }
918
919
920
921
922
923
924 void
925 xfs_queue_cowblocks(
926 struct xfs_mount *mp)
927 {
928 rcu_read_lock();
929 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
930 queue_delayed_work(mp->m_eofblocks_workqueue,
931 &mp->m_cowblocks_work,
932 msecs_to_jiffies(xfs_cowb_secs * 1000));
933 rcu_read_unlock();
934 }
935
936 void
937 xfs_cowblocks_worker(
938 struct work_struct *work)
939 {
940 struct xfs_mount *mp = container_of(to_delayed_work(work),
941 struct xfs_mount, m_cowblocks_work);
942
943 if (!sb_start_write_trylock(mp->m_super))
944 return;
945 xfs_icache_free_cowblocks(mp, NULL);
946 sb_end_write(mp->m_super);
947
948 xfs_queue_cowblocks(mp);
949 }
950
951 int
952 xfs_inode_ag_iterator_flags(
953 struct xfs_mount *mp,
954 int (*execute)(struct xfs_inode *ip, int flags,
955 void *args),
956 int flags,
957 void *args,
958 int iter_flags)
959 {
960 struct xfs_perag *pag;
961 int error = 0;
962 int last_error = 0;
963 xfs_agnumber_t ag;
964
965 ag = 0;
966 while ((pag = xfs_perag_get(mp, ag))) {
967 ag = pag->pag_agno + 1;
968 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
969 iter_flags);
970 xfs_perag_put(pag);
971 if (error) {
972 last_error = error;
973 if (error == -EFSCORRUPTED)
974 break;
975 }
976 }
977 return last_error;
978 }
979
980 int
981 xfs_inode_ag_iterator(
982 struct xfs_mount *mp,
983 int (*execute)(struct xfs_inode *ip, int flags,
984 void *args),
985 int flags,
986 void *args)
987 {
988 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
989 }
990
991 int
992 xfs_inode_ag_iterator_tag(
993 struct xfs_mount *mp,
994 int (*execute)(struct xfs_inode *ip, int flags,
995 void *args),
996 int flags,
997 void *args,
998 int tag)
999 {
1000 struct xfs_perag *pag;
1001 int error = 0;
1002 int last_error = 0;
1003 xfs_agnumber_t ag;
1004
1005 ag = 0;
1006 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
1007 ag = pag->pag_agno + 1;
1008 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
1009 0);
1010 xfs_perag_put(pag);
1011 if (error) {
1012 last_error = error;
1013 if (error == -EFSCORRUPTED)
1014 break;
1015 }
1016 }
1017 return last_error;
1018 }
1019
1020
1021
1022
1023
1024 STATIC int
1025 xfs_reclaim_inode_grab(
1026 struct xfs_inode *ip,
1027 int flags)
1028 {
1029 ASSERT(rcu_read_lock_held());
1030
1031
1032 if (!ip->i_ino)
1033 return 1;
1034
1035
1036
1037
1038
1039
1040 if ((flags & SYNC_TRYLOCK) &&
1041 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1042 return 1;
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 spin_lock(&ip->i_flags_lock);
1055 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1056 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1057
1058 spin_unlock(&ip->i_flags_lock);
1059 return 1;
1060 }
1061 __xfs_iflags_set(ip, XFS_IRECLAIM);
1062 spin_unlock(&ip->i_flags_lock);
1063 return 0;
1064 }
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 STATIC int
1106 xfs_reclaim_inode(
1107 struct xfs_inode *ip,
1108 struct xfs_perag *pag,
1109 int sync_mode)
1110 {
1111 struct xfs_buf *bp = NULL;
1112 xfs_ino_t ino = ip->i_ino;
1113 int error;
1114
1115 restart:
1116 error = 0;
1117 xfs_ilock(ip, XFS_ILOCK_EXCL);
1118 if (!xfs_iflock_nowait(ip)) {
1119 if (!(sync_mode & SYNC_WAIT))
1120 goto out;
1121 xfs_iflock(ip);
1122 }
1123
1124 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1125 xfs_iunpin_wait(ip);
1126
1127 xfs_iflush_abort(ip, false);
1128 goto reclaim;
1129 }
1130 if (xfs_ipincount(ip)) {
1131 if (!(sync_mode & SYNC_WAIT))
1132 goto out_ifunlock;
1133 xfs_iunpin_wait(ip);
1134 }
1135 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1136 xfs_ifunlock(ip);
1137 goto reclaim;
1138 }
1139
1140
1141
1142
1143
1144 if (!(sync_mode & SYNC_WAIT))
1145 goto out_ifunlock;
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 error = xfs_iflush(ip, &bp);
1164 if (error == -EAGAIN) {
1165 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1166
1167 delay(2);
1168 goto restart;
1169 }
1170
1171 if (!error) {
1172 error = xfs_bwrite(bp);
1173 xfs_buf_relse(bp);
1174 }
1175
1176 reclaim:
1177 ASSERT(!xfs_isiflocked(ip));
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 spin_lock(&ip->i_flags_lock);
1190 ip->i_flags = XFS_IRECLAIM;
1191 ip->i_ino = 0;
1192 spin_unlock(&ip->i_flags_lock);
1193
1194 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1195
1196 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1197
1198
1199
1200
1201
1202
1203
1204 spin_lock(&pag->pag_ici_lock);
1205 if (!radix_tree_delete(&pag->pag_ici_root,
1206 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1207 ASSERT(0);
1208 xfs_perag_clear_reclaim_tag(pag);
1209 spin_unlock(&pag->pag_ici_lock);
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 xfs_ilock(ip, XFS_ILOCK_EXCL);
1220 xfs_qm_dqdetach(ip);
1221 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1222
1223 __xfs_inode_free(ip);
1224 return error;
1225
1226 out_ifunlock:
1227 xfs_ifunlock(ip);
1228 out:
1229 xfs_iflags_clear(ip, XFS_IRECLAIM);
1230 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1231
1232
1233
1234
1235
1236
1237
1238 return 0;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247 STATIC int
1248 xfs_reclaim_inodes_ag(
1249 struct xfs_mount *mp,
1250 int flags,
1251 int *nr_to_scan)
1252 {
1253 struct xfs_perag *pag;
1254 int error = 0;
1255 int last_error = 0;
1256 xfs_agnumber_t ag;
1257 int trylock = flags & SYNC_TRYLOCK;
1258 int skipped;
1259
1260 restart:
1261 ag = 0;
1262 skipped = 0;
1263 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1264 unsigned long first_index = 0;
1265 int done = 0;
1266 int nr_found = 0;
1267
1268 ag = pag->pag_agno + 1;
1269
1270 if (trylock) {
1271 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1272 skipped++;
1273 xfs_perag_put(pag);
1274 continue;
1275 }
1276 first_index = pag->pag_ici_reclaim_cursor;
1277 } else
1278 mutex_lock(&pag->pag_ici_reclaim_lock);
1279
1280 do {
1281 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1282 int i;
1283
1284 rcu_read_lock();
1285 nr_found = radix_tree_gang_lookup_tag(
1286 &pag->pag_ici_root,
1287 (void **)batch, first_index,
1288 XFS_LOOKUP_BATCH,
1289 XFS_ICI_RECLAIM_TAG);
1290 if (!nr_found) {
1291 done = 1;
1292 rcu_read_unlock();
1293 break;
1294 }
1295
1296
1297
1298
1299
1300 for (i = 0; i < nr_found; i++) {
1301 struct xfs_inode *ip = batch[i];
1302
1303 if (done || xfs_reclaim_inode_grab(ip, flags))
1304 batch[i] = NULL;
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1321 pag->pag_agno)
1322 continue;
1323 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1324 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1325 done = 1;
1326 }
1327
1328
1329 rcu_read_unlock();
1330
1331 for (i = 0; i < nr_found; i++) {
1332 if (!batch[i])
1333 continue;
1334 error = xfs_reclaim_inode(batch[i], pag, flags);
1335 if (error && last_error != -EFSCORRUPTED)
1336 last_error = error;
1337 }
1338
1339 *nr_to_scan -= XFS_LOOKUP_BATCH;
1340
1341 cond_resched();
1342
1343 } while (nr_found && !done && *nr_to_scan > 0);
1344
1345 if (trylock && !done)
1346 pag->pag_ici_reclaim_cursor = first_index;
1347 else
1348 pag->pag_ici_reclaim_cursor = 0;
1349 mutex_unlock(&pag->pag_ici_reclaim_lock);
1350 xfs_perag_put(pag);
1351 }
1352
1353
1354
1355
1356
1357
1358
1359
1360 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1361 trylock = 0;
1362 goto restart;
1363 }
1364 return last_error;
1365 }
1366
1367 int
1368 xfs_reclaim_inodes(
1369 xfs_mount_t *mp,
1370 int mode)
1371 {
1372 int nr_to_scan = INT_MAX;
1373
1374 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 long
1387 xfs_reclaim_inodes_nr(
1388 struct xfs_mount *mp,
1389 int nr_to_scan)
1390 {
1391
1392 xfs_reclaim_work_queue(mp);
1393 xfs_ail_push_all(mp->m_ail);
1394
1395 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1396 }
1397
1398
1399
1400
1401
1402 int
1403 xfs_reclaim_inodes_count(
1404 struct xfs_mount *mp)
1405 {
1406 struct xfs_perag *pag;
1407 xfs_agnumber_t ag = 0;
1408 int reclaimable = 0;
1409
1410 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1411 ag = pag->pag_agno + 1;
1412 reclaimable += pag->pag_ici_reclaimable;
1413 xfs_perag_put(pag);
1414 }
1415 return reclaimable;
1416 }
1417
1418 STATIC int
1419 xfs_inode_match_id(
1420 struct xfs_inode *ip,
1421 struct xfs_eofblocks *eofb)
1422 {
1423 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1424 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1425 return 0;
1426
1427 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1428 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1429 return 0;
1430
1431 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1432 xfs_get_projid(ip) != eofb->eof_prid)
1433 return 0;
1434
1435 return 1;
1436 }
1437
1438
1439
1440
1441
1442 STATIC int
1443 xfs_inode_match_id_union(
1444 struct xfs_inode *ip,
1445 struct xfs_eofblocks *eofb)
1446 {
1447 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1448 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1449 return 1;
1450
1451 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1452 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1453 return 1;
1454
1455 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1456 xfs_get_projid(ip) == eofb->eof_prid)
1457 return 1;
1458
1459 return 0;
1460 }
1461
1462 STATIC int
1463 xfs_inode_free_eofblocks(
1464 struct xfs_inode *ip,
1465 int flags,
1466 void *args)
1467 {
1468 int ret = 0;
1469 struct xfs_eofblocks *eofb = args;
1470 int match;
1471
1472 if (!xfs_can_free_eofblocks(ip, false)) {
1473
1474 trace_xfs_inode_free_eofblocks_invalid(ip);
1475 xfs_inode_clear_eofblocks_tag(ip);
1476 return 0;
1477 }
1478
1479
1480
1481
1482
1483 if (!(flags & SYNC_WAIT) &&
1484 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1485 return 0;
1486
1487 if (eofb) {
1488 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1489 match = xfs_inode_match_id_union(ip, eofb);
1490 else
1491 match = xfs_inode_match_id(ip, eofb);
1492 if (!match)
1493 return 0;
1494
1495
1496 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1497 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1498 return 0;
1499 }
1500
1501
1502
1503
1504
1505 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1506 if (flags & SYNC_WAIT)
1507 ret = -EAGAIN;
1508 return ret;
1509 }
1510 ret = xfs_free_eofblocks(ip);
1511 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1512
1513 return ret;
1514 }
1515
1516 static int
1517 __xfs_icache_free_eofblocks(
1518 struct xfs_mount *mp,
1519 struct xfs_eofblocks *eofb,
1520 int (*execute)(struct xfs_inode *ip, int flags,
1521 void *args),
1522 int tag)
1523 {
1524 int flags = SYNC_TRYLOCK;
1525
1526 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1527 flags = SYNC_WAIT;
1528
1529 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1530 eofb, tag);
1531 }
1532
1533 int
1534 xfs_icache_free_eofblocks(
1535 struct xfs_mount *mp,
1536 struct xfs_eofblocks *eofb)
1537 {
1538 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1539 XFS_ICI_EOFBLOCKS_TAG);
1540 }
1541
1542
1543
1544
1545
1546
1547
1548 static int
1549 __xfs_inode_free_quota_eofblocks(
1550 struct xfs_inode *ip,
1551 int (*execute)(struct xfs_mount *mp,
1552 struct xfs_eofblocks *eofb))
1553 {
1554 int scan = 0;
1555 struct xfs_eofblocks eofb = {0};
1556 struct xfs_dquot *dq;
1557
1558
1559
1560
1561
1562 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1563
1564 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1565 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1566 if (dq && xfs_dquot_lowsp(dq)) {
1567 eofb.eof_uid = VFS_I(ip)->i_uid;
1568 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1569 scan = 1;
1570 }
1571 }
1572
1573 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1574 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1575 if (dq && xfs_dquot_lowsp(dq)) {
1576 eofb.eof_gid = VFS_I(ip)->i_gid;
1577 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1578 scan = 1;
1579 }
1580 }
1581
1582 if (scan)
1583 execute(ip->i_mount, &eofb);
1584
1585 return scan;
1586 }
1587
1588 int
1589 xfs_inode_free_quota_eofblocks(
1590 struct xfs_inode *ip)
1591 {
1592 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1593 }
1594
1595 static inline unsigned long
1596 xfs_iflag_for_tag(
1597 int tag)
1598 {
1599 switch (tag) {
1600 case XFS_ICI_EOFBLOCKS_TAG:
1601 return XFS_IEOFBLOCKS;
1602 case XFS_ICI_COWBLOCKS_TAG:
1603 return XFS_ICOWBLOCKS;
1604 default:
1605 ASSERT(0);
1606 return 0;
1607 }
1608 }
1609
1610 static void
1611 __xfs_inode_set_blocks_tag(
1612 xfs_inode_t *ip,
1613 void (*execute)(struct xfs_mount *mp),
1614 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1615 int error, unsigned long caller_ip),
1616 int tag)
1617 {
1618 struct xfs_mount *mp = ip->i_mount;
1619 struct xfs_perag *pag;
1620 int tagged;
1621
1622
1623
1624
1625
1626 if (ip->i_flags & xfs_iflag_for_tag(tag))
1627 return;
1628 spin_lock(&ip->i_flags_lock);
1629 ip->i_flags |= xfs_iflag_for_tag(tag);
1630 spin_unlock(&ip->i_flags_lock);
1631
1632 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1633 spin_lock(&pag->pag_ici_lock);
1634
1635 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1636 radix_tree_tag_set(&pag->pag_ici_root,
1637 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1638 if (!tagged) {
1639
1640 spin_lock(&ip->i_mount->m_perag_lock);
1641 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1642 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1643 tag);
1644 spin_unlock(&ip->i_mount->m_perag_lock);
1645
1646
1647 execute(ip->i_mount);
1648
1649 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1650 }
1651
1652 spin_unlock(&pag->pag_ici_lock);
1653 xfs_perag_put(pag);
1654 }
1655
1656 void
1657 xfs_inode_set_eofblocks_tag(
1658 xfs_inode_t *ip)
1659 {
1660 trace_xfs_inode_set_eofblocks_tag(ip);
1661 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1662 trace_xfs_perag_set_eofblocks,
1663 XFS_ICI_EOFBLOCKS_TAG);
1664 }
1665
1666 static void
1667 __xfs_inode_clear_blocks_tag(
1668 xfs_inode_t *ip,
1669 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1670 int error, unsigned long caller_ip),
1671 int tag)
1672 {
1673 struct xfs_mount *mp = ip->i_mount;
1674 struct xfs_perag *pag;
1675
1676 spin_lock(&ip->i_flags_lock);
1677 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1678 spin_unlock(&ip->i_flags_lock);
1679
1680 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1681 spin_lock(&pag->pag_ici_lock);
1682
1683 radix_tree_tag_clear(&pag->pag_ici_root,
1684 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1685 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1686
1687 spin_lock(&ip->i_mount->m_perag_lock);
1688 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1689 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1690 tag);
1691 spin_unlock(&ip->i_mount->m_perag_lock);
1692 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1693 }
1694
1695 spin_unlock(&pag->pag_ici_lock);
1696 xfs_perag_put(pag);
1697 }
1698
1699 void
1700 xfs_inode_clear_eofblocks_tag(
1701 xfs_inode_t *ip)
1702 {
1703 trace_xfs_inode_clear_eofblocks_tag(ip);
1704 return __xfs_inode_clear_blocks_tag(ip,
1705 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1706 }
1707
1708
1709
1710
1711
1712
1713 static bool
1714 xfs_prep_free_cowblocks(
1715 struct xfs_inode *ip)
1716 {
1717
1718
1719
1720
1721 if (!xfs_inode_has_cow_data(ip)) {
1722 trace_xfs_inode_free_cowblocks_invalid(ip);
1723 xfs_inode_clear_cowblocks_tag(ip);
1724 return false;
1725 }
1726
1727
1728
1729
1730
1731 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1732 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1733 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1734 atomic_read(&VFS_I(ip)->i_dio_count))
1735 return false;
1736
1737 return true;
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 STATIC int
1753 xfs_inode_free_cowblocks(
1754 struct xfs_inode *ip,
1755 int flags,
1756 void *args)
1757 {
1758 struct xfs_eofblocks *eofb = args;
1759 int match;
1760 int ret = 0;
1761
1762 if (!xfs_prep_free_cowblocks(ip))
1763 return 0;
1764
1765 if (eofb) {
1766 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1767 match = xfs_inode_match_id_union(ip, eofb);
1768 else
1769 match = xfs_inode_match_id(ip, eofb);
1770 if (!match)
1771 return 0;
1772
1773
1774 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1775 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1776 return 0;
1777 }
1778
1779
1780 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1781 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1782
1783
1784
1785
1786
1787 if (xfs_prep_free_cowblocks(ip))
1788 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1789
1790 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1791 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1792
1793 return ret;
1794 }
1795
1796 int
1797 xfs_icache_free_cowblocks(
1798 struct xfs_mount *mp,
1799 struct xfs_eofblocks *eofb)
1800 {
1801 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1802 XFS_ICI_COWBLOCKS_TAG);
1803 }
1804
1805 int
1806 xfs_inode_free_quota_cowblocks(
1807 struct xfs_inode *ip)
1808 {
1809 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1810 }
1811
1812 void
1813 xfs_inode_set_cowblocks_tag(
1814 xfs_inode_t *ip)
1815 {
1816 trace_xfs_inode_set_cowblocks_tag(ip);
1817 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1818 trace_xfs_perag_set_cowblocks,
1819 XFS_ICI_COWBLOCKS_TAG);
1820 }
1821
1822 void
1823 xfs_inode_clear_cowblocks_tag(
1824 xfs_inode_t *ip)
1825 {
1826 trace_xfs_inode_clear_cowblocks_tag(ip);
1827 return __xfs_inode_clear_blocks_tag(ip,
1828 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1829 }
1830
1831
1832 void
1833 xfs_stop_block_reaping(
1834 struct xfs_mount *mp)
1835 {
1836 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1837 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1838 }
1839
1840
1841 void
1842 xfs_start_block_reaping(
1843 struct xfs_mount *mp)
1844 {
1845 xfs_queue_eofblocks(mp);
1846 xfs_queue_cowblocks(mp);
1847 }