This source file includes following definitions.
- xfs_get_extsz_hint
- xfs_get_cowextsz_hint
- xfs_ilock_data_map_shared
- xfs_ilock_attr_map_shared
- xfs_ilock
- xfs_ilock_nowait
- xfs_iunlock
- xfs_ilock_demote
- xfs_isilocked
- xfs_lockdep_subclass_ok
- xfs_lock_inumorder
- xfs_lock_inodes
- xfs_lock_two_inodes
- __xfs_iflock
- _xfs_dic2xflags
- xfs_ip2xflags
- xfs_lookup
- xfs_ialloc
- xfs_dir_ialloc
- xfs_droplink
- xfs_bumplink
- xfs_create
- xfs_create_tmpfile
- xfs_link
- xfs_itruncate_clear_reflink_flags
- xfs_itruncate_extents_flags
- xfs_release
- xfs_inactive_truncate
- xfs_inactive_ifree
- xfs_inactive
- xfs_iunlink_obj_cmpfn
- xfs_iunlink_lookup_backref
- xfs_iunlink_insert_backref
- xfs_iunlink_add_backref
- xfs_iunlink_change_backref
- xfs_iunlink_init
- xfs_iunlink_free_item
- xfs_iunlink_destroy
- xfs_iunlink_update_bucket
- xfs_iunlink_update_dinode
- xfs_iunlink_update_inode
- xfs_iunlink
- xfs_iunlink_map_ino
- xfs_iunlink_map_prev
- xfs_iunlink_remove
- xfs_ifree_cluster
- xfs_ifree_local_data
- xfs_ifree
- xfs_iunpin
- __xfs_iunpin_wait
- xfs_iunpin_wait
- xfs_remove
- xfs_sort_for_rename
- xfs_finish_rename
- xfs_cross_rename
- xfs_rename_alloc_whiteout
- xfs_rename
- xfs_iflush_cluster
- xfs_iflush
- xfs_inode_verify_forks
- xfs_iflush_int
- xfs_irele
1
2
3
4
5
6 #include <linux/iversion.h>
7
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_dir2.h"
19 #include "xfs_attr.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_bmap.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
35 #include "xfs_log.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
38
39 kmem_zone_t *xfs_inode_zone;
40
41
42
43
44
45 #define XFS_ITRUNC_MAX_EXTENTS 2
46
47 STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
48 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
49 STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
50
51
52
53
54 xfs_extlen_t
55 xfs_get_extsz_hint(
56 struct xfs_inode *ip)
57 {
58 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
59 return ip->i_d.di_extsize;
60 if (XFS_IS_REALTIME_INODE(ip))
61 return ip->i_mount->m_sb.sb_rextsize;
62 return 0;
63 }
64
65
66
67
68
69
70
71 xfs_extlen_t
72 xfs_get_cowextsz_hint(
73 struct xfs_inode *ip)
74 {
75 xfs_extlen_t a, b;
76
77 a = 0;
78 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
79 a = ip->i_d.di_cowextsize;
80 b = xfs_get_extsz_hint(ip);
81
82 a = max(a, b);
83 if (a == 0)
84 return XFS_DEFAULT_COWEXTSZ_HINT;
85 return a;
86 }
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103 uint
104 xfs_ilock_data_map_shared(
105 struct xfs_inode *ip)
106 {
107 uint lock_mode = XFS_ILOCK_SHARED;
108
109 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
110 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
111 lock_mode = XFS_ILOCK_EXCL;
112 xfs_ilock(ip, lock_mode);
113 return lock_mode;
114 }
115
116 uint
117 xfs_ilock_attr_map_shared(
118 struct xfs_inode *ip)
119 {
120 uint lock_mode = XFS_ILOCK_SHARED;
121
122 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
123 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
124 lock_mode = XFS_ILOCK_EXCL;
125 xfs_ilock(ip, lock_mode);
126 return lock_mode;
127 }
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 void
160 xfs_ilock(
161 xfs_inode_t *ip,
162 uint lock_flags)
163 {
164 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
165
166
167
168
169
170
171 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
172 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
173 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
174 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
175 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
176 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
177 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
178
179 if (lock_flags & XFS_IOLOCK_EXCL) {
180 down_write_nested(&VFS_I(ip)->i_rwsem,
181 XFS_IOLOCK_DEP(lock_flags));
182 } else if (lock_flags & XFS_IOLOCK_SHARED) {
183 down_read_nested(&VFS_I(ip)->i_rwsem,
184 XFS_IOLOCK_DEP(lock_flags));
185 }
186
187 if (lock_flags & XFS_MMAPLOCK_EXCL)
188 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
189 else if (lock_flags & XFS_MMAPLOCK_SHARED)
190 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
191
192 if (lock_flags & XFS_ILOCK_EXCL)
193 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
194 else if (lock_flags & XFS_ILOCK_SHARED)
195 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
196 }
197
198
199
200
201
202
203
204
205
206
207
208
209
210 int
211 xfs_ilock_nowait(
212 xfs_inode_t *ip,
213 uint lock_flags)
214 {
215 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
216
217
218
219
220
221
222 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
223 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
224 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
225 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
226 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
227 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
228 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
229
230 if (lock_flags & XFS_IOLOCK_EXCL) {
231 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
232 goto out;
233 } else if (lock_flags & XFS_IOLOCK_SHARED) {
234 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
235 goto out;
236 }
237
238 if (lock_flags & XFS_MMAPLOCK_EXCL) {
239 if (!mrtryupdate(&ip->i_mmaplock))
240 goto out_undo_iolock;
241 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
242 if (!mrtryaccess(&ip->i_mmaplock))
243 goto out_undo_iolock;
244 }
245
246 if (lock_flags & XFS_ILOCK_EXCL) {
247 if (!mrtryupdate(&ip->i_lock))
248 goto out_undo_mmaplock;
249 } else if (lock_flags & XFS_ILOCK_SHARED) {
250 if (!mrtryaccess(&ip->i_lock))
251 goto out_undo_mmaplock;
252 }
253 return 1;
254
255 out_undo_mmaplock:
256 if (lock_flags & XFS_MMAPLOCK_EXCL)
257 mrunlock_excl(&ip->i_mmaplock);
258 else if (lock_flags & XFS_MMAPLOCK_SHARED)
259 mrunlock_shared(&ip->i_mmaplock);
260 out_undo_iolock:
261 if (lock_flags & XFS_IOLOCK_EXCL)
262 up_write(&VFS_I(ip)->i_rwsem);
263 else if (lock_flags & XFS_IOLOCK_SHARED)
264 up_read(&VFS_I(ip)->i_rwsem);
265 out:
266 return 0;
267 }
268
269
270
271
272
273
274
275
276
277
278
279
280
281 void
282 xfs_iunlock(
283 xfs_inode_t *ip,
284 uint lock_flags)
285 {
286
287
288
289
290
291 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
292 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
293 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
294 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
295 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
296 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
297 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
298 ASSERT(lock_flags != 0);
299
300 if (lock_flags & XFS_IOLOCK_EXCL)
301 up_write(&VFS_I(ip)->i_rwsem);
302 else if (lock_flags & XFS_IOLOCK_SHARED)
303 up_read(&VFS_I(ip)->i_rwsem);
304
305 if (lock_flags & XFS_MMAPLOCK_EXCL)
306 mrunlock_excl(&ip->i_mmaplock);
307 else if (lock_flags & XFS_MMAPLOCK_SHARED)
308 mrunlock_shared(&ip->i_mmaplock);
309
310 if (lock_flags & XFS_ILOCK_EXCL)
311 mrunlock_excl(&ip->i_lock);
312 else if (lock_flags & XFS_ILOCK_SHARED)
313 mrunlock_shared(&ip->i_lock);
314
315 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
316 }
317
318
319
320
321
322 void
323 xfs_ilock_demote(
324 xfs_inode_t *ip,
325 uint lock_flags)
326 {
327 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
328 ASSERT((lock_flags &
329 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
330
331 if (lock_flags & XFS_ILOCK_EXCL)
332 mrdemote(&ip->i_lock);
333 if (lock_flags & XFS_MMAPLOCK_EXCL)
334 mrdemote(&ip->i_mmaplock);
335 if (lock_flags & XFS_IOLOCK_EXCL)
336 downgrade_write(&VFS_I(ip)->i_rwsem);
337
338 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
339 }
340
341 #if defined(DEBUG) || defined(XFS_WARN)
342 int
343 xfs_isilocked(
344 xfs_inode_t *ip,
345 uint lock_flags)
346 {
347 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
348 if (!(lock_flags & XFS_ILOCK_SHARED))
349 return !!ip->i_lock.mr_writer;
350 return rwsem_is_locked(&ip->i_lock.mr_lock);
351 }
352
353 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
354 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
355 return !!ip->i_mmaplock.mr_writer;
356 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
357 }
358
359 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
360 if (!(lock_flags & XFS_IOLOCK_SHARED))
361 return !debug_locks ||
362 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
363 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
364 }
365
366 ASSERT(0);
367 return 0;
368 }
369 #endif
370
371
372
373
374
375
376
377 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
378 static bool
379 xfs_lockdep_subclass_ok(
380 int subclass)
381 {
382 return subclass < MAX_LOCKDEP_SUBCLASSES;
383 }
384 #else
385 #define xfs_lockdep_subclass_ok(subclass) (true)
386 #endif
387
388
389
390
391
392
393
394 static inline int
395 xfs_lock_inumorder(int lock_mode, int subclass)
396 {
397 int class = 0;
398
399 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
400 XFS_ILOCK_RTSUM)));
401 ASSERT(xfs_lockdep_subclass_ok(subclass));
402
403 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
404 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
405 class += subclass << XFS_IOLOCK_SHIFT;
406 }
407
408 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
409 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
410 class += subclass << XFS_MMAPLOCK_SHIFT;
411 }
412
413 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
414 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
415 class += subclass << XFS_ILOCK_SHIFT;
416 }
417
418 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
419 }
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 static void
437 xfs_lock_inodes(
438 struct xfs_inode **ips,
439 int inodes,
440 uint lock_mode)
441 {
442 int attempts = 0, i, j, try_lock;
443 struct xfs_log_item *lp;
444
445
446
447
448
449
450
451
452 ASSERT(ips && inodes >= 2 && inodes <= 5);
453 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
454 XFS_ILOCK_EXCL));
455 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
456 XFS_ILOCK_SHARED)));
457 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
458 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
459 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
460 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
461
462 if (lock_mode & XFS_IOLOCK_EXCL) {
463 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
464 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
466
467 try_lock = 0;
468 i = 0;
469 again:
470 for (; i < inodes; i++) {
471 ASSERT(ips[i]);
472
473 if (i && (ips[i] == ips[i - 1]))
474 continue;
475
476
477
478
479
480 if (!try_lock) {
481 for (j = (i - 1); j >= 0 && !try_lock; j--) {
482 lp = &ips[j]->i_itemp->ili_item;
483 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
484 try_lock++;
485 }
486 }
487
488
489
490
491
492
493
494 if (!try_lock) {
495 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
496 continue;
497 }
498
499
500 ASSERT(i != 0);
501 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
502 continue;
503
504
505
506
507
508 attempts++;
509 for (j = i - 1; j >= 0; j--) {
510
511
512
513
514
515 if (j != (i - 1) && ips[j] == ips[j + 1])
516 continue;
517
518 xfs_iunlock(ips[j], lock_mode);
519 }
520
521 if ((attempts % 5) == 0) {
522 delay(1);
523 }
524 i = 0;
525 try_lock = 0;
526 goto again;
527 }
528 }
529
530
531
532
533
534
535
536
537
538 void
539 xfs_lock_two_inodes(
540 struct xfs_inode *ip0,
541 uint ip0_mode,
542 struct xfs_inode *ip1,
543 uint ip1_mode)
544 {
545 struct xfs_inode *temp;
546 uint mode_temp;
547 int attempts = 0;
548 struct xfs_log_item *lp;
549
550 ASSERT(hweight32(ip0_mode) == 1);
551 ASSERT(hweight32(ip1_mode) == 1);
552 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
553 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
554 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
555 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
556 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
557 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
558 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
559 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562
563 ASSERT(ip0->i_ino != ip1->i_ino);
564
565 if (ip0->i_ino > ip1->i_ino) {
566 temp = ip0;
567 ip0 = ip1;
568 ip1 = temp;
569 mode_temp = ip0_mode;
570 ip0_mode = ip1_mode;
571 ip1_mode = mode_temp;
572 }
573
574 again:
575 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576
577
578
579
580
581
582 lp = &ip0->i_itemp->ili_item;
583 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
584 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
585 xfs_iunlock(ip0, ip0_mode);
586 if ((++attempts % 5) == 0)
587 delay(1);
588 goto again;
589 }
590 } else {
591 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592 }
593 }
594
595 void
596 __xfs_iflock(
597 struct xfs_inode *ip)
598 {
599 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
600 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
601
602 do {
603 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
604 if (xfs_isiflocked(ip))
605 io_schedule();
606 } while (!xfs_iflock_nowait(ip));
607
608 finish_wait(wq, &wait.wq_entry);
609 }
610
611 STATIC uint
612 _xfs_dic2xflags(
613 uint16_t di_flags,
614 uint64_t di_flags2,
615 bool has_attr)
616 {
617 uint flags = 0;
618
619 if (di_flags & XFS_DIFLAG_ANY) {
620 if (di_flags & XFS_DIFLAG_REALTIME)
621 flags |= FS_XFLAG_REALTIME;
622 if (di_flags & XFS_DIFLAG_PREALLOC)
623 flags |= FS_XFLAG_PREALLOC;
624 if (di_flags & XFS_DIFLAG_IMMUTABLE)
625 flags |= FS_XFLAG_IMMUTABLE;
626 if (di_flags & XFS_DIFLAG_APPEND)
627 flags |= FS_XFLAG_APPEND;
628 if (di_flags & XFS_DIFLAG_SYNC)
629 flags |= FS_XFLAG_SYNC;
630 if (di_flags & XFS_DIFLAG_NOATIME)
631 flags |= FS_XFLAG_NOATIME;
632 if (di_flags & XFS_DIFLAG_NODUMP)
633 flags |= FS_XFLAG_NODUMP;
634 if (di_flags & XFS_DIFLAG_RTINHERIT)
635 flags |= FS_XFLAG_RTINHERIT;
636 if (di_flags & XFS_DIFLAG_PROJINHERIT)
637 flags |= FS_XFLAG_PROJINHERIT;
638 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
639 flags |= FS_XFLAG_NOSYMLINKS;
640 if (di_flags & XFS_DIFLAG_EXTSIZE)
641 flags |= FS_XFLAG_EXTSIZE;
642 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
643 flags |= FS_XFLAG_EXTSZINHERIT;
644 if (di_flags & XFS_DIFLAG_NODEFRAG)
645 flags |= FS_XFLAG_NODEFRAG;
646 if (di_flags & XFS_DIFLAG_FILESTREAM)
647 flags |= FS_XFLAG_FILESTREAM;
648 }
649
650 if (di_flags2 & XFS_DIFLAG2_ANY) {
651 if (di_flags2 & XFS_DIFLAG2_DAX)
652 flags |= FS_XFLAG_DAX;
653 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
654 flags |= FS_XFLAG_COWEXTSIZE;
655 }
656
657 if (has_attr)
658 flags |= FS_XFLAG_HASATTR;
659
660 return flags;
661 }
662
663 uint
664 xfs_ip2xflags(
665 struct xfs_inode *ip)
666 {
667 struct xfs_icdinode *dic = &ip->i_d;
668
669 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
670 }
671
672
673
674
675
676
677
678 int
679 xfs_lookup(
680 xfs_inode_t *dp,
681 struct xfs_name *name,
682 xfs_inode_t **ipp,
683 struct xfs_name *ci_name)
684 {
685 xfs_ino_t inum;
686 int error;
687
688 trace_xfs_lookup(dp, name);
689
690 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
691 return -EIO;
692
693 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
694 if (error)
695 goto out_unlock;
696
697 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
698 if (error)
699 goto out_free_name;
700
701 return 0;
702
703 out_free_name:
704 if (ci_name)
705 kmem_free(ci_name->name);
706 out_unlock:
707 *ipp = NULL;
708 return error;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742 static int
743 xfs_ialloc(
744 xfs_trans_t *tp,
745 xfs_inode_t *pip,
746 umode_t mode,
747 xfs_nlink_t nlink,
748 dev_t rdev,
749 prid_t prid,
750 xfs_buf_t **ialloc_context,
751 xfs_inode_t **ipp)
752 {
753 struct xfs_mount *mp = tp->t_mountp;
754 xfs_ino_t ino;
755 xfs_inode_t *ip;
756 uint flags;
757 int error;
758 struct timespec64 tv;
759 struct inode *inode;
760
761
762
763
764
765 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
766 ialloc_context, &ino);
767 if (error)
768 return error;
769 if (*ialloc_context || ino == NULLFSINO) {
770 *ipp = NULL;
771 return 0;
772 }
773 ASSERT(*ialloc_context == NULL);
774
775
776
777
778
779
780
781
782 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
783 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
784 return -EFSCORRUPTED;
785 }
786
787
788
789
790
791
792 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
793 XFS_ILOCK_EXCL, &ip);
794 if (error)
795 return error;
796 ASSERT(ip != NULL);
797 inode = VFS_I(ip);
798
799
800
801
802
803
804 if (ip->i_d.di_version == 1)
805 ip->i_d.di_version = 2;
806
807 inode->i_mode = mode;
808 set_nlink(inode, nlink);
809 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
810 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
811 inode->i_rdev = rdev;
812 xfs_set_projid(ip, prid);
813
814 if (pip && XFS_INHERIT_GID(pip)) {
815 ip->i_d.di_gid = pip->i_d.di_gid;
816 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
817 inode->i_mode |= S_ISGID;
818 }
819
820
821
822
823
824
825 if ((irix_sgid_inherit) &&
826 (inode->i_mode & S_ISGID) &&
827 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
828 inode->i_mode &= ~S_ISGID;
829
830 ip->i_d.di_size = 0;
831 ip->i_d.di_nextents = 0;
832 ASSERT(ip->i_d.di_nblocks == 0);
833
834 tv = current_time(inode);
835 inode->i_mtime = tv;
836 inode->i_atime = tv;
837 inode->i_ctime = tv;
838
839 ip->i_d.di_extsize = 0;
840 ip->i_d.di_dmevmask = 0;
841 ip->i_d.di_dmstate = 0;
842 ip->i_d.di_flags = 0;
843
844 if (ip->i_d.di_version == 3) {
845 inode_set_iversion(inode, 1);
846 ip->i_d.di_flags2 = 0;
847 ip->i_d.di_cowextsize = 0;
848 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
849 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
850 }
851
852
853 flags = XFS_ILOG_CORE;
854 switch (mode & S_IFMT) {
855 case S_IFIFO:
856 case S_IFCHR:
857 case S_IFBLK:
858 case S_IFSOCK:
859 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
860 ip->i_df.if_flags = 0;
861 flags |= XFS_ILOG_DEV;
862 break;
863 case S_IFREG:
864 case S_IFDIR:
865 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
866 uint di_flags = 0;
867
868 if (S_ISDIR(mode)) {
869 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
870 di_flags |= XFS_DIFLAG_RTINHERIT;
871 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
872 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
873 ip->i_d.di_extsize = pip->i_d.di_extsize;
874 }
875 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
876 di_flags |= XFS_DIFLAG_PROJINHERIT;
877 } else if (S_ISREG(mode)) {
878 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
879 di_flags |= XFS_DIFLAG_REALTIME;
880 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
881 di_flags |= XFS_DIFLAG_EXTSIZE;
882 ip->i_d.di_extsize = pip->i_d.di_extsize;
883 }
884 }
885 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
886 xfs_inherit_noatime)
887 di_flags |= XFS_DIFLAG_NOATIME;
888 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
889 xfs_inherit_nodump)
890 di_flags |= XFS_DIFLAG_NODUMP;
891 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
892 xfs_inherit_sync)
893 di_flags |= XFS_DIFLAG_SYNC;
894 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
895 xfs_inherit_nosymlinks)
896 di_flags |= XFS_DIFLAG_NOSYMLINKS;
897 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
898 xfs_inherit_nodefrag)
899 di_flags |= XFS_DIFLAG_NODEFRAG;
900 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
901 di_flags |= XFS_DIFLAG_FILESTREAM;
902
903 ip->i_d.di_flags |= di_flags;
904 }
905 if (pip &&
906 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
907 pip->i_d.di_version == 3 &&
908 ip->i_d.di_version == 3) {
909 uint64_t di_flags2 = 0;
910
911 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
912 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
913 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
914 }
915 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
916 di_flags2 |= XFS_DIFLAG2_DAX;
917
918 ip->i_d.di_flags2 |= di_flags2;
919 }
920
921 case S_IFLNK:
922 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
923 ip->i_df.if_flags = XFS_IFEXTENTS;
924 ip->i_df.if_bytes = 0;
925 ip->i_df.if_u1.if_root = NULL;
926 break;
927 default:
928 ASSERT(0);
929 }
930
931
932
933 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
934 ip->i_d.di_anextents = 0;
935
936
937
938
939 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
940 xfs_trans_log_inode(tp, ip, flags);
941
942
943 xfs_setup_inode(ip);
944
945 *ipp = ip;
946 return 0;
947 }
948
949
950
951
952
953
954
955
956
957
958
959 int
960 xfs_dir_ialloc(
961 xfs_trans_t **tpp,
962
963 xfs_inode_t *dp,
964
965 umode_t mode,
966 xfs_nlink_t nlink,
967 dev_t rdev,
968 prid_t prid,
969 xfs_inode_t **ipp)
970
971 {
972 xfs_trans_t *tp;
973 xfs_inode_t *ip;
974 xfs_buf_t *ialloc_context = NULL;
975 int code;
976 void *dqinfo;
977 uint tflags;
978
979 tp = *tpp;
980 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
998 &ip);
999
1000
1001
1002
1003
1004
1005 if (code) {
1006 *ipp = NULL;
1007 return code;
1008 }
1009 if (!ialloc_context && !ip) {
1010 *ipp = NULL;
1011 return -ENOSPC;
1012 }
1013
1014
1015
1016
1017
1018
1019
1020 if (ialloc_context) {
1021
1022
1023
1024
1025
1026
1027
1028 xfs_trans_bhold(tp, ialloc_context);
1029
1030
1031
1032
1033
1034
1035 dqinfo = NULL;
1036 tflags = 0;
1037 if (tp->t_dqinfo) {
1038 dqinfo = (void *)tp->t_dqinfo;
1039 tp->t_dqinfo = NULL;
1040 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1041 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1042 }
1043
1044 code = xfs_trans_roll(&tp);
1045
1046
1047
1048
1049 if (dqinfo) {
1050 tp->t_dqinfo = dqinfo;
1051 tp->t_flags |= tflags;
1052 }
1053
1054 if (code) {
1055 xfs_buf_relse(ialloc_context);
1056 *tpp = tp;
1057 *ipp = NULL;
1058 return code;
1059 }
1060 xfs_trans_bjoin(tp, ialloc_context);
1061
1062
1063
1064
1065
1066
1067 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1068 &ialloc_context, &ip);
1069
1070
1071
1072
1073
1074 if (code) {
1075 *tpp = tp;
1076 *ipp = NULL;
1077 return code;
1078 }
1079 ASSERT(!ialloc_context && ip);
1080
1081 }
1082
1083 *ipp = ip;
1084 *tpp = tp;
1085
1086 return 0;
1087 }
1088
1089
1090
1091
1092
1093
1094 static int
1095 xfs_droplink(
1096 xfs_trans_t *tp,
1097 xfs_inode_t *ip)
1098 {
1099 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1100
1101 drop_nlink(VFS_I(ip));
1102 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1103
1104 if (VFS_I(ip)->i_nlink)
1105 return 0;
1106
1107 return xfs_iunlink(tp, ip);
1108 }
1109
1110
1111
1112
1113 static void
1114 xfs_bumplink(
1115 xfs_trans_t *tp,
1116 xfs_inode_t *ip)
1117 {
1118 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1119
1120 ASSERT(ip->i_d.di_version > 1);
1121 inc_nlink(VFS_I(ip));
1122 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1123 }
1124
1125 int
1126 xfs_create(
1127 xfs_inode_t *dp,
1128 struct xfs_name *name,
1129 umode_t mode,
1130 dev_t rdev,
1131 xfs_inode_t **ipp)
1132 {
1133 int is_dir = S_ISDIR(mode);
1134 struct xfs_mount *mp = dp->i_mount;
1135 struct xfs_inode *ip = NULL;
1136 struct xfs_trans *tp = NULL;
1137 int error;
1138 bool unlock_dp_on_error = false;
1139 prid_t prid;
1140 struct xfs_dquot *udqp = NULL;
1141 struct xfs_dquot *gdqp = NULL;
1142 struct xfs_dquot *pdqp = NULL;
1143 struct xfs_trans_res *tres;
1144 uint resblks;
1145
1146 trace_xfs_create(dp, name);
1147
1148 if (XFS_FORCED_SHUTDOWN(mp))
1149 return -EIO;
1150
1151 prid = xfs_get_initial_prid(dp);
1152
1153
1154
1155
1156 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1157 xfs_kgid_to_gid(current_fsgid()), prid,
1158 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1159 &udqp, &gdqp, &pdqp);
1160 if (error)
1161 return error;
1162
1163 if (is_dir) {
1164 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1165 tres = &M_RES(mp)->tr_mkdir;
1166 } else {
1167 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1168 tres = &M_RES(mp)->tr_create;
1169 }
1170
1171
1172
1173
1174
1175
1176
1177 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1178 if (error == -ENOSPC) {
1179
1180 xfs_flush_inodes(mp);
1181 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1182 }
1183 if (error)
1184 goto out_release_inode;
1185
1186 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1187 unlock_dp_on_error = true;
1188
1189
1190
1191
1192 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1193 pdqp, resblks, 1, 0);
1194 if (error)
1195 goto out_trans_cancel;
1196
1197
1198
1199
1200
1201
1202 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1203 if (error)
1204 goto out_trans_cancel;
1205
1206
1207
1208
1209
1210
1211
1212
1213 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1214 unlock_dp_on_error = false;
1215
1216 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1217 resblks ?
1218 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1219 if (error) {
1220 ASSERT(error != -ENOSPC);
1221 goto out_trans_cancel;
1222 }
1223 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1224 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1225
1226 if (is_dir) {
1227 error = xfs_dir_init(tp, ip, dp);
1228 if (error)
1229 goto out_trans_cancel;
1230
1231 xfs_bumplink(tp, dp);
1232 }
1233
1234
1235
1236
1237
1238
1239 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1240 xfs_trans_set_sync(tp);
1241
1242
1243
1244
1245
1246
1247 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1248
1249 error = xfs_trans_commit(tp);
1250 if (error)
1251 goto out_release_inode;
1252
1253 xfs_qm_dqrele(udqp);
1254 xfs_qm_dqrele(gdqp);
1255 xfs_qm_dqrele(pdqp);
1256
1257 *ipp = ip;
1258 return 0;
1259
1260 out_trans_cancel:
1261 xfs_trans_cancel(tp);
1262 out_release_inode:
1263
1264
1265
1266
1267
1268 if (ip) {
1269 xfs_finish_inode_setup(ip);
1270 xfs_irele(ip);
1271 }
1272
1273 xfs_qm_dqrele(udqp);
1274 xfs_qm_dqrele(gdqp);
1275 xfs_qm_dqrele(pdqp);
1276
1277 if (unlock_dp_on_error)
1278 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1279 return error;
1280 }
1281
1282 int
1283 xfs_create_tmpfile(
1284 struct xfs_inode *dp,
1285 umode_t mode,
1286 struct xfs_inode **ipp)
1287 {
1288 struct xfs_mount *mp = dp->i_mount;
1289 struct xfs_inode *ip = NULL;
1290 struct xfs_trans *tp = NULL;
1291 int error;
1292 prid_t prid;
1293 struct xfs_dquot *udqp = NULL;
1294 struct xfs_dquot *gdqp = NULL;
1295 struct xfs_dquot *pdqp = NULL;
1296 struct xfs_trans_res *tres;
1297 uint resblks;
1298
1299 if (XFS_FORCED_SHUTDOWN(mp))
1300 return -EIO;
1301
1302 prid = xfs_get_initial_prid(dp);
1303
1304
1305
1306
1307 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1308 xfs_kgid_to_gid(current_fsgid()), prid,
1309 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1310 &udqp, &gdqp, &pdqp);
1311 if (error)
1312 return error;
1313
1314 resblks = XFS_IALLOC_SPACE_RES(mp);
1315 tres = &M_RES(mp)->tr_create_tmpfile;
1316
1317 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1318 if (error)
1319 goto out_release_inode;
1320
1321 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1322 pdqp, resblks, 1, 0);
1323 if (error)
1324 goto out_trans_cancel;
1325
1326 error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1327 if (error)
1328 goto out_trans_cancel;
1329
1330 if (mp->m_flags & XFS_MOUNT_WSYNC)
1331 xfs_trans_set_sync(tp);
1332
1333
1334
1335
1336
1337
1338 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1339
1340 error = xfs_iunlink(tp, ip);
1341 if (error)
1342 goto out_trans_cancel;
1343
1344 error = xfs_trans_commit(tp);
1345 if (error)
1346 goto out_release_inode;
1347
1348 xfs_qm_dqrele(udqp);
1349 xfs_qm_dqrele(gdqp);
1350 xfs_qm_dqrele(pdqp);
1351
1352 *ipp = ip;
1353 return 0;
1354
1355 out_trans_cancel:
1356 xfs_trans_cancel(tp);
1357 out_release_inode:
1358
1359
1360
1361
1362
1363 if (ip) {
1364 xfs_finish_inode_setup(ip);
1365 xfs_irele(ip);
1366 }
1367
1368 xfs_qm_dqrele(udqp);
1369 xfs_qm_dqrele(gdqp);
1370 xfs_qm_dqrele(pdqp);
1371
1372 return error;
1373 }
1374
1375 int
1376 xfs_link(
1377 xfs_inode_t *tdp,
1378 xfs_inode_t *sip,
1379 struct xfs_name *target_name)
1380 {
1381 xfs_mount_t *mp = tdp->i_mount;
1382 xfs_trans_t *tp;
1383 int error;
1384 int resblks;
1385
1386 trace_xfs_link(tdp, target_name);
1387
1388 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1389
1390 if (XFS_FORCED_SHUTDOWN(mp))
1391 return -EIO;
1392
1393 error = xfs_qm_dqattach(sip);
1394 if (error)
1395 goto std_return;
1396
1397 error = xfs_qm_dqattach(tdp);
1398 if (error)
1399 goto std_return;
1400
1401 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1402 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1403 if (error == -ENOSPC) {
1404 resblks = 0;
1405 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1406 }
1407 if (error)
1408 goto std_return;
1409
1410 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1411
1412 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1413 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1414
1415
1416
1417
1418
1419
1420 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1421 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1422 error = -EXDEV;
1423 goto error_return;
1424 }
1425
1426 if (!resblks) {
1427 error = xfs_dir_canenter(tp, tdp, target_name);
1428 if (error)
1429 goto error_return;
1430 }
1431
1432
1433
1434
1435 if (VFS_I(sip)->i_nlink == 0) {
1436 error = xfs_iunlink_remove(tp, sip);
1437 if (error)
1438 goto error_return;
1439 }
1440
1441 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1442 resblks);
1443 if (error)
1444 goto error_return;
1445 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1446 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1447
1448 xfs_bumplink(tp, sip);
1449
1450
1451
1452
1453
1454
1455 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1456 xfs_trans_set_sync(tp);
1457
1458 return xfs_trans_commit(tp);
1459
1460 error_return:
1461 xfs_trans_cancel(tp);
1462 std_return:
1463 return error;
1464 }
1465
1466
1467 static void
1468 xfs_itruncate_clear_reflink_flags(
1469 struct xfs_inode *ip)
1470 {
1471 struct xfs_ifork *dfork;
1472 struct xfs_ifork *cfork;
1473
1474 if (!xfs_is_reflink_inode(ip))
1475 return;
1476 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1477 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1478 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1479 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1480 if (cfork->if_bytes == 0)
1481 xfs_inode_clear_cowblocks_tag(ip);
1482 }
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 int
1506 xfs_itruncate_extents_flags(
1507 struct xfs_trans **tpp,
1508 struct xfs_inode *ip,
1509 int whichfork,
1510 xfs_fsize_t new_size,
1511 int flags)
1512 {
1513 struct xfs_mount *mp = ip->i_mount;
1514 struct xfs_trans *tp = *tpp;
1515 xfs_fileoff_t first_unmap_block;
1516 xfs_fileoff_t last_block;
1517 xfs_filblks_t unmap_len;
1518 int error = 0;
1519 int done = 0;
1520
1521 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1522 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1523 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1524 ASSERT(new_size <= XFS_ISIZE(ip));
1525 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1526 ASSERT(ip->i_itemp != NULL);
1527 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1528 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1529
1530 trace_xfs_itruncate_extents_start(ip, new_size);
1531
1532 flags |= xfs_bmapi_aflag(whichfork);
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1544 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1545 if (first_unmap_block == last_block)
1546 return 0;
1547
1548 ASSERT(first_unmap_block < last_block);
1549 unmap_len = last_block - first_unmap_block + 1;
1550 while (!done) {
1551 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1552 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
1553 XFS_ITRUNC_MAX_EXTENTS, &done);
1554 if (error)
1555 goto out;
1556
1557
1558
1559
1560
1561 error = xfs_defer_finish(&tp);
1562 if (error)
1563 goto out;
1564
1565 error = xfs_trans_roll_inode(&tp, ip);
1566 if (error)
1567 goto out;
1568 }
1569
1570 if (whichfork == XFS_DATA_FORK) {
1571
1572 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1573 first_unmap_block, last_block, true);
1574 if (error)
1575 goto out;
1576
1577 xfs_itruncate_clear_reflink_flags(ip);
1578 }
1579
1580
1581
1582
1583
1584 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1585
1586 trace_xfs_itruncate_extents_end(ip, new_size);
1587
1588 out:
1589 *tpp = tp;
1590 return error;
1591 }
1592
1593 int
1594 xfs_release(
1595 xfs_inode_t *ip)
1596 {
1597 xfs_mount_t *mp = ip->i_mount;
1598 int error;
1599
1600 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1601 return 0;
1602
1603
1604 if (mp->m_flags & XFS_MOUNT_RDONLY)
1605 return 0;
1606
1607 if (!XFS_FORCED_SHUTDOWN(mp)) {
1608 int truncated;
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1621 if (truncated) {
1622 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1623 if (ip->i_delayed_blks > 0) {
1624 error = filemap_flush(VFS_I(ip)->i_mapping);
1625 if (error)
1626 return error;
1627 }
1628 }
1629 }
1630
1631 if (VFS_I(ip)->i_nlink == 0)
1632 return 0;
1633
1634 if (xfs_can_free_eofblocks(ip, false)) {
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1651 return 0;
1652
1653
1654
1655
1656
1657
1658
1659 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1660 error = xfs_free_eofblocks(ip);
1661 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1662 if (error)
1663 return error;
1664 }
1665
1666
1667 if (ip->i_delayed_blks)
1668 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1669 }
1670 return 0;
1671 }
1672
1673
1674
1675
1676
1677
1678 STATIC int
1679 xfs_inactive_truncate(
1680 struct xfs_inode *ip)
1681 {
1682 struct xfs_mount *mp = ip->i_mount;
1683 struct xfs_trans *tp;
1684 int error;
1685
1686 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1687 if (error) {
1688 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1689 return error;
1690 }
1691 xfs_ilock(ip, XFS_ILOCK_EXCL);
1692 xfs_trans_ijoin(tp, ip, 0);
1693
1694
1695
1696
1697
1698
1699 ip->i_d.di_size = 0;
1700 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1701
1702 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1703 if (error)
1704 goto error_trans_cancel;
1705
1706 ASSERT(ip->i_d.di_nextents == 0);
1707
1708 error = xfs_trans_commit(tp);
1709 if (error)
1710 goto error_unlock;
1711
1712 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1713 return 0;
1714
1715 error_trans_cancel:
1716 xfs_trans_cancel(tp);
1717 error_unlock:
1718 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1719 return error;
1720 }
1721
1722
1723
1724
1725
1726
1727 STATIC int
1728 xfs_inactive_ifree(
1729 struct xfs_inode *ip)
1730 {
1731 struct xfs_mount *mp = ip->i_mount;
1732 struct xfs_trans *tp;
1733 int error;
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (unlikely(mp->m_finobt_nores)) {
1747 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1748 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1749 &tp);
1750 } else {
1751 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1752 }
1753 if (error) {
1754 if (error == -ENOSPC) {
1755 xfs_warn_ratelimited(mp,
1756 "Failed to remove inode(s) from unlinked list. "
1757 "Please free space, unmount and run xfs_repair.");
1758 } else {
1759 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1760 }
1761 return error;
1762 }
1763
1764 xfs_ilock(ip, XFS_ILOCK_EXCL);
1765 xfs_trans_ijoin(tp, ip, 0);
1766
1767 error = xfs_ifree(tp, ip);
1768 if (error) {
1769
1770
1771
1772
1773
1774 if (!XFS_FORCED_SHUTDOWN(mp)) {
1775 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1776 __func__, error);
1777 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1778 }
1779 xfs_trans_cancel(tp);
1780 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1781 return error;
1782 }
1783
1784
1785
1786
1787 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1788
1789
1790
1791
1792
1793 error = xfs_trans_commit(tp);
1794 if (error)
1795 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1796 __func__, error);
1797
1798 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1799 return 0;
1800 }
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 void
1811 xfs_inactive(
1812 xfs_inode_t *ip)
1813 {
1814 struct xfs_mount *mp;
1815 int error;
1816 int truncate = 0;
1817
1818
1819
1820
1821
1822 if (VFS_I(ip)->i_mode == 0) {
1823 ASSERT(ip->i_df.if_broot_bytes == 0);
1824 return;
1825 }
1826
1827 mp = ip->i_mount;
1828 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1829
1830
1831 if (mp->m_flags & XFS_MOUNT_RDONLY)
1832 return;
1833
1834
1835 if (xfs_inode_has_cow_data(ip))
1836 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1837
1838 if (VFS_I(ip)->i_nlink != 0) {
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 if (xfs_can_free_eofblocks(ip, true))
1849 xfs_free_eofblocks(ip);
1850
1851 return;
1852 }
1853
1854 if (S_ISREG(VFS_I(ip)->i_mode) &&
1855 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1856 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1857 truncate = 1;
1858
1859 error = xfs_qm_dqattach(ip);
1860 if (error)
1861 return;
1862
1863 if (S_ISLNK(VFS_I(ip)->i_mode))
1864 error = xfs_inactive_symlink(ip);
1865 else if (truncate)
1866 error = xfs_inactive_truncate(ip);
1867 if (error)
1868 return;
1869
1870
1871
1872
1873
1874
1875 if (XFS_IFORK_Q(ip)) {
1876 error = xfs_attr_inactive(ip);
1877 if (error)
1878 return;
1879 }
1880
1881 ASSERT(!ip->i_afp);
1882 ASSERT(ip->i_d.di_anextents == 0);
1883 ASSERT(ip->i_d.di_forkoff == 0);
1884
1885
1886
1887
1888 error = xfs_inactive_ifree(ip);
1889 if (error)
1890 return;
1891
1892
1893
1894
1895 xfs_qm_dqdetach(ip);
1896 }
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 struct xfs_iunlink {
1931 struct rhash_head iu_rhash_head;
1932 xfs_agino_t iu_agino;
1933 xfs_agino_t iu_next_unlinked;
1934 };
1935
1936
1937 static int
1938 xfs_iunlink_obj_cmpfn(
1939 struct rhashtable_compare_arg *arg,
1940 const void *obj)
1941 {
1942 const xfs_agino_t *key = arg->key;
1943 const struct xfs_iunlink *iu = obj;
1944
1945 if (iu->iu_next_unlinked != *key)
1946 return 1;
1947 return 0;
1948 }
1949
1950 static const struct rhashtable_params xfs_iunlink_hash_params = {
1951 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1952 .key_len = sizeof(xfs_agino_t),
1953 .key_offset = offsetof(struct xfs_iunlink,
1954 iu_next_unlinked),
1955 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1956 .automatic_shrinking = true,
1957 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1958 };
1959
1960
1961
1962
1963
1964 static xfs_agino_t
1965 xfs_iunlink_lookup_backref(
1966 struct xfs_perag *pag,
1967 xfs_agino_t agino)
1968 {
1969 struct xfs_iunlink *iu;
1970
1971 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1972 xfs_iunlink_hash_params);
1973 return iu ? iu->iu_agino : NULLAGINO;
1974 }
1975
1976
1977
1978
1979
1980
1981 static int
1982 xfs_iunlink_insert_backref(
1983 struct xfs_perag *pag,
1984 struct xfs_iunlink *iu)
1985 {
1986 int error;
1987
1988 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1989 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1990
1991
1992
1993
1994
1995
1996 if (error) {
1997 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1998 kmem_free(iu);
1999 }
2000
2001
2002
2003
2004 if (error != 0 && error != -EEXIST)
2005 error = 0;
2006 return error;
2007 }
2008
2009
2010 static int
2011 xfs_iunlink_add_backref(
2012 struct xfs_perag *pag,
2013 xfs_agino_t prev_agino,
2014 xfs_agino_t this_agino)
2015 {
2016 struct xfs_iunlink *iu;
2017
2018 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2019 return 0;
2020
2021 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2022 iu->iu_agino = prev_agino;
2023 iu->iu_next_unlinked = this_agino;
2024
2025 return xfs_iunlink_insert_backref(pag, iu);
2026 }
2027
2028
2029
2030
2031
2032
2033 static int
2034 xfs_iunlink_change_backref(
2035 struct xfs_perag *pag,
2036 xfs_agino_t agino,
2037 xfs_agino_t next_unlinked)
2038 {
2039 struct xfs_iunlink *iu;
2040 int error;
2041
2042
2043 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2044 xfs_iunlink_hash_params);
2045 if (!iu)
2046 return 0;
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2057 &iu->iu_rhash_head, xfs_iunlink_hash_params);
2058 if (error)
2059 return error;
2060
2061
2062 if (next_unlinked == NULLAGINO) {
2063 kmem_free(iu);
2064 return 0;
2065 }
2066
2067
2068 iu->iu_next_unlinked = next_unlinked;
2069 return xfs_iunlink_insert_backref(pag, iu);
2070 }
2071
2072
2073 int
2074 xfs_iunlink_init(
2075 struct xfs_perag *pag)
2076 {
2077 return rhashtable_init(&pag->pagi_unlinked_hash,
2078 &xfs_iunlink_hash_params);
2079 }
2080
2081
2082 static void
2083 xfs_iunlink_free_item(
2084 void *ptr,
2085 void *arg)
2086 {
2087 struct xfs_iunlink *iu = ptr;
2088 bool *freed_anything = arg;
2089
2090 *freed_anything = true;
2091 kmem_free(iu);
2092 }
2093
2094 void
2095 xfs_iunlink_destroy(
2096 struct xfs_perag *pag)
2097 {
2098 bool freed_anything = false;
2099
2100 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2101 xfs_iunlink_free_item, &freed_anything);
2102
2103 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2104 }
2105
2106
2107
2108
2109
2110 STATIC int
2111 xfs_iunlink_update_bucket(
2112 struct xfs_trans *tp,
2113 xfs_agnumber_t agno,
2114 struct xfs_buf *agibp,
2115 unsigned int bucket_index,
2116 xfs_agino_t new_agino)
2117 {
2118 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
2119 xfs_agino_t old_value;
2120 int offset;
2121
2122 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2123
2124 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2125 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2126 old_value, new_agino);
2127
2128
2129
2130
2131
2132
2133 if (old_value == new_agino)
2134 return -EFSCORRUPTED;
2135
2136 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2137 offset = offsetof(struct xfs_agi, agi_unlinked) +
2138 (sizeof(xfs_agino_t) * bucket_index);
2139 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2140 return 0;
2141 }
2142
2143
2144 STATIC void
2145 xfs_iunlink_update_dinode(
2146 struct xfs_trans *tp,
2147 xfs_agnumber_t agno,
2148 xfs_agino_t agino,
2149 struct xfs_buf *ibp,
2150 struct xfs_dinode *dip,
2151 struct xfs_imap *imap,
2152 xfs_agino_t next_agino)
2153 {
2154 struct xfs_mount *mp = tp->t_mountp;
2155 int offset;
2156
2157 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2158
2159 trace_xfs_iunlink_update_dinode(mp, agno, agino,
2160 be32_to_cpu(dip->di_next_unlinked), next_agino);
2161
2162 dip->di_next_unlinked = cpu_to_be32(next_agino);
2163 offset = imap->im_boffset +
2164 offsetof(struct xfs_dinode, di_next_unlinked);
2165
2166
2167 xfs_dinode_calc_crc(mp, dip);
2168 xfs_trans_inode_buf(tp, ibp);
2169 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2170 xfs_inobp_check(mp, ibp);
2171 }
2172
2173
2174 STATIC int
2175 xfs_iunlink_update_inode(
2176 struct xfs_trans *tp,
2177 struct xfs_inode *ip,
2178 xfs_agnumber_t agno,
2179 xfs_agino_t next_agino,
2180 xfs_agino_t *old_next_agino)
2181 {
2182 struct xfs_mount *mp = tp->t_mountp;
2183 struct xfs_dinode *dip;
2184 struct xfs_buf *ibp;
2185 xfs_agino_t old_value;
2186 int error;
2187
2188 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2189
2190 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
2191 if (error)
2192 return error;
2193
2194
2195 old_value = be32_to_cpu(dip->di_next_unlinked);
2196 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2197 error = -EFSCORRUPTED;
2198 goto out;
2199 }
2200
2201
2202
2203
2204
2205
2206 *old_next_agino = old_value;
2207 if (old_value == next_agino) {
2208 if (next_agino != NULLAGINO)
2209 error = -EFSCORRUPTED;
2210 goto out;
2211 }
2212
2213
2214 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2215 ibp, dip, &ip->i_imap, next_agino);
2216 return 0;
2217 out:
2218 xfs_trans_brelse(tp, ibp);
2219 return error;
2220 }
2221
2222
2223
2224
2225
2226
2227
2228
2229 STATIC int
2230 xfs_iunlink(
2231 struct xfs_trans *tp,
2232 struct xfs_inode *ip)
2233 {
2234 struct xfs_mount *mp = tp->t_mountp;
2235 struct xfs_agi *agi;
2236 struct xfs_buf *agibp;
2237 xfs_agino_t next_agino;
2238 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2239 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2240 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2241 int error;
2242
2243 ASSERT(VFS_I(ip)->i_nlink == 0);
2244 ASSERT(VFS_I(ip)->i_mode != 0);
2245 trace_xfs_iunlink(ip);
2246
2247
2248 error = xfs_read_agi(mp, tp, agno, &agibp);
2249 if (error)
2250 return error;
2251 agi = XFS_BUF_TO_AGI(agibp);
2252
2253
2254
2255
2256
2257
2258 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2259 if (next_agino == agino ||
2260 !xfs_verify_agino_or_null(mp, agno, next_agino))
2261 return -EFSCORRUPTED;
2262
2263 if (next_agino != NULLAGINO) {
2264 struct xfs_perag *pag;
2265 xfs_agino_t old_agino;
2266
2267
2268
2269
2270
2271 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2272 &old_agino);
2273 if (error)
2274 return error;
2275 ASSERT(old_agino == NULLAGINO);
2276
2277
2278
2279
2280
2281 pag = xfs_perag_get(mp, agno);
2282 error = xfs_iunlink_add_backref(pag, agino, next_agino);
2283 xfs_perag_put(pag);
2284 if (error)
2285 return error;
2286 }
2287
2288
2289 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2290 }
2291
2292
2293 STATIC int
2294 xfs_iunlink_map_ino(
2295 struct xfs_trans *tp,
2296 xfs_agnumber_t agno,
2297 xfs_agino_t agino,
2298 struct xfs_imap *imap,
2299 struct xfs_dinode **dipp,
2300 struct xfs_buf **bpp)
2301 {
2302 struct xfs_mount *mp = tp->t_mountp;
2303 int error;
2304
2305 imap->im_blkno = 0;
2306 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2307 if (error) {
2308 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2309 __func__, error);
2310 return error;
2311 }
2312
2313 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
2314 if (error) {
2315 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2316 __func__, error);
2317 return error;
2318 }
2319
2320 return 0;
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333 STATIC int
2334 xfs_iunlink_map_prev(
2335 struct xfs_trans *tp,
2336 xfs_agnumber_t agno,
2337 xfs_agino_t head_agino,
2338 xfs_agino_t target_agino,
2339 xfs_agino_t *agino,
2340 struct xfs_imap *imap,
2341 struct xfs_dinode **dipp,
2342 struct xfs_buf **bpp,
2343 struct xfs_perag *pag)
2344 {
2345 struct xfs_mount *mp = tp->t_mountp;
2346 xfs_agino_t next_agino;
2347 int error;
2348
2349 ASSERT(head_agino != target_agino);
2350 *bpp = NULL;
2351
2352
2353 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2354 if (*agino != NULLAGINO) {
2355 error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2356 if (error)
2357 return error;
2358
2359 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2360 return 0;
2361
2362
2363
2364
2365
2366 xfs_trans_brelse(tp, *bpp);
2367 *bpp = NULL;
2368 WARN_ON_ONCE(1);
2369 }
2370
2371 trace_xfs_iunlink_map_prev_fallback(mp, agno);
2372
2373
2374 next_agino = head_agino;
2375 while (next_agino != target_agino) {
2376 xfs_agino_t unlinked_agino;
2377
2378 if (*bpp)
2379 xfs_trans_brelse(tp, *bpp);
2380
2381 *agino = next_agino;
2382 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2383 bpp);
2384 if (error)
2385 return error;
2386
2387 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2388
2389
2390
2391
2392 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2393 next_agino == unlinked_agino) {
2394 XFS_CORRUPTION_ERROR(__func__,
2395 XFS_ERRLEVEL_LOW, mp,
2396 *dipp, sizeof(**dipp));
2397 error = -EFSCORRUPTED;
2398 return error;
2399 }
2400 next_agino = unlinked_agino;
2401 }
2402
2403 return 0;
2404 }
2405
2406
2407
2408
2409 STATIC int
2410 xfs_iunlink_remove(
2411 struct xfs_trans *tp,
2412 struct xfs_inode *ip)
2413 {
2414 struct xfs_mount *mp = tp->t_mountp;
2415 struct xfs_agi *agi;
2416 struct xfs_buf *agibp;
2417 struct xfs_buf *last_ibp;
2418 struct xfs_dinode *last_dip = NULL;
2419 struct xfs_perag *pag = NULL;
2420 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2421 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2422 xfs_agino_t next_agino;
2423 xfs_agino_t head_agino;
2424 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2425 int error;
2426
2427 trace_xfs_iunlink_remove(ip);
2428
2429
2430 error = xfs_read_agi(mp, tp, agno, &agibp);
2431 if (error)
2432 return error;
2433 agi = XFS_BUF_TO_AGI(agibp);
2434
2435
2436
2437
2438
2439 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2440 if (!xfs_verify_agino(mp, agno, head_agino)) {
2441 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2442 agi, sizeof(*agi));
2443 return -EFSCORRUPTED;
2444 }
2445
2446
2447
2448
2449
2450
2451 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2452 if (error)
2453 return error;
2454
2455
2456
2457
2458
2459
2460
2461
2462 if (next_agino != NULLAGINO) {
2463 pag = xfs_perag_get(mp, agno);
2464 error = xfs_iunlink_change_backref(pag, next_agino,
2465 NULLAGINO);
2466 if (error)
2467 goto out;
2468 }
2469
2470 if (head_agino == agino) {
2471
2472 error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2473 next_agino);
2474 if (error)
2475 goto out;
2476 } else {
2477 struct xfs_imap imap;
2478 xfs_agino_t prev_agino;
2479
2480 if (!pag)
2481 pag = xfs_perag_get(mp, agno);
2482
2483
2484 error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2485 &prev_agino, &imap, &last_dip, &last_ibp,
2486 pag);
2487 if (error)
2488 goto out;
2489
2490
2491 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2492 last_dip, &imap, next_agino);
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 error = xfs_iunlink_change_backref(pag, agino, next_agino);
2503 if (error)
2504 goto out;
2505 }
2506
2507 out:
2508 if (pag)
2509 xfs_perag_put(pag);
2510 return error;
2511 }
2512
2513
2514
2515
2516
2517
2518 STATIC int
2519 xfs_ifree_cluster(
2520 xfs_inode_t *free_ip,
2521 xfs_trans_t *tp,
2522 struct xfs_icluster *xic)
2523 {
2524 xfs_mount_t *mp = free_ip->i_mount;
2525 int nbufs;
2526 int i, j;
2527 int ioffset;
2528 xfs_daddr_t blkno;
2529 xfs_buf_t *bp;
2530 xfs_inode_t *ip;
2531 xfs_inode_log_item_t *iip;
2532 struct xfs_log_item *lip;
2533 struct xfs_perag *pag;
2534 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2535 xfs_ino_t inum;
2536
2537 inum = xic->first_ino;
2538 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2539 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2540
2541 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2542
2543
2544
2545
2546
2547 ioffset = inum - xic->first_ino;
2548 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2549 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2550 continue;
2551 }
2552
2553 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2554 XFS_INO_TO_AGBNO(mp, inum));
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2565 mp->m_bsize * igeo->blocks_per_cluster,
2566 XBF_UNMAPPED);
2567
2568 if (!bp)
2569 return -ENOMEM;
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580 bp->b_ops = &xfs_inode_buf_ops;
2581
2582
2583
2584
2585
2586
2587
2588
2589 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
2590 if (lip->li_type == XFS_LI_INODE) {
2591 iip = (xfs_inode_log_item_t *)lip;
2592 ASSERT(iip->ili_logged == 1);
2593 lip->li_cb = xfs_istale_done;
2594 xfs_trans_ail_copy_lsn(mp->m_ail,
2595 &iip->ili_flush_lsn,
2596 &iip->ili_item.li_lsn);
2597 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2598 }
2599 }
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612 for (i = 0; i < igeo->inodes_per_cluster; i++) {
2613 retry:
2614 rcu_read_lock();
2615 ip = radix_tree_lookup(&pag->pag_ici_root,
2616 XFS_INO_TO_AGINO(mp, (inum + i)));
2617
2618
2619 if (!ip) {
2620 rcu_read_unlock();
2621 continue;
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631 spin_lock(&ip->i_flags_lock);
2632 if (ip->i_ino != inum + i ||
2633 __xfs_iflags_test(ip, XFS_ISTALE)) {
2634 spin_unlock(&ip->i_flags_lock);
2635 rcu_read_unlock();
2636 continue;
2637 }
2638 spin_unlock(&ip->i_flags_lock);
2639
2640
2641
2642
2643
2644
2645
2646
2647 if (ip != free_ip) {
2648 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2649 rcu_read_unlock();
2650 delay(1);
2651 goto retry;
2652 }
2653
2654
2655
2656
2657
2658
2659
2660
2661 if (ip->i_ino != inum + i) {
2662 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2663 rcu_read_unlock();
2664 continue;
2665 }
2666 }
2667 rcu_read_unlock();
2668
2669 xfs_iflock(ip);
2670 xfs_iflags_set(ip, XFS_ISTALE);
2671
2672
2673
2674
2675
2676 iip = ip->i_itemp;
2677 if (!iip || xfs_inode_clean(ip)) {
2678 ASSERT(ip != free_ip);
2679 xfs_ifunlock(ip);
2680 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2681 continue;
2682 }
2683
2684 iip->ili_last_fields = iip->ili_fields;
2685 iip->ili_fields = 0;
2686 iip->ili_fsync_fields = 0;
2687 iip->ili_logged = 1;
2688 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2689 &iip->ili_item.li_lsn);
2690
2691 xfs_buf_attach_iodone(bp, xfs_istale_done,
2692 &iip->ili_item);
2693
2694 if (ip != free_ip)
2695 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2696 }
2697
2698 xfs_trans_stale_inode_buf(tp, bp);
2699 xfs_trans_binval(tp, bp);
2700 }
2701
2702 xfs_perag_put(pag);
2703 return 0;
2704 }
2705
2706
2707
2708
2709
2710 static inline void
2711 xfs_ifree_local_data(
2712 struct xfs_inode *ip,
2713 int whichfork)
2714 {
2715 struct xfs_ifork *ifp;
2716
2717 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2718 return;
2719
2720 ifp = XFS_IFORK_PTR(ip, whichfork);
2721 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2722 }
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734 int
2735 xfs_ifree(
2736 struct xfs_trans *tp,
2737 struct xfs_inode *ip)
2738 {
2739 int error;
2740 struct xfs_icluster xic = { 0 };
2741
2742 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2743 ASSERT(VFS_I(ip)->i_nlink == 0);
2744 ASSERT(ip->i_d.di_nextents == 0);
2745 ASSERT(ip->i_d.di_anextents == 0);
2746 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2747 ASSERT(ip->i_d.di_nblocks == 0);
2748
2749
2750
2751
2752 error = xfs_iunlink_remove(tp, ip);
2753 if (error)
2754 return error;
2755
2756 error = xfs_difree(tp, ip->i_ino, &xic);
2757 if (error)
2758 return error;
2759
2760 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2761 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2762
2763 VFS_I(ip)->i_mode = 0;
2764 ip->i_d.di_flags = 0;
2765 ip->i_d.di_flags2 = 0;
2766 ip->i_d.di_dmevmask = 0;
2767 ip->i_d.di_forkoff = 0;
2768 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2769 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2770
2771
2772 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2773
2774
2775
2776
2777
2778 VFS_I(ip)->i_generation++;
2779 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2780
2781 if (xic.deleted)
2782 error = xfs_ifree_cluster(ip, tp, &xic);
2783
2784 return error;
2785 }
2786
2787
2788
2789
2790
2791
2792 static void
2793 xfs_iunpin(
2794 struct xfs_inode *ip)
2795 {
2796 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2797
2798 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2799
2800
2801 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2802
2803 }
2804
2805 static void
2806 __xfs_iunpin_wait(
2807 struct xfs_inode *ip)
2808 {
2809 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2810 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2811
2812 xfs_iunpin(ip);
2813
2814 do {
2815 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2816 if (xfs_ipincount(ip))
2817 io_schedule();
2818 } while (xfs_ipincount(ip));
2819 finish_wait(wq, &wait.wq_entry);
2820 }
2821
2822 void
2823 xfs_iunpin_wait(
2824 struct xfs_inode *ip)
2825 {
2826 if (xfs_ipincount(ip))
2827 __xfs_iunpin_wait(ip);
2828 }
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857 int
2858 xfs_remove(
2859 xfs_inode_t *dp,
2860 struct xfs_name *name,
2861 xfs_inode_t *ip)
2862 {
2863 xfs_mount_t *mp = dp->i_mount;
2864 xfs_trans_t *tp = NULL;
2865 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2866 int error = 0;
2867 uint resblks;
2868
2869 trace_xfs_remove(dp, name);
2870
2871 if (XFS_FORCED_SHUTDOWN(mp))
2872 return -EIO;
2873
2874 error = xfs_qm_dqattach(dp);
2875 if (error)
2876 goto std_return;
2877
2878 error = xfs_qm_dqattach(ip);
2879 if (error)
2880 goto std_return;
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891 resblks = XFS_REMOVE_SPACE_RES(mp);
2892 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2893 if (error == -ENOSPC) {
2894 resblks = 0;
2895 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2896 &tp);
2897 }
2898 if (error) {
2899 ASSERT(error != -ENOSPC);
2900 goto std_return;
2901 }
2902
2903 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2904
2905 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2906 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2907
2908
2909
2910
2911 if (is_dir) {
2912 ASSERT(VFS_I(ip)->i_nlink >= 2);
2913 if (VFS_I(ip)->i_nlink != 2) {
2914 error = -ENOTEMPTY;
2915 goto out_trans_cancel;
2916 }
2917 if (!xfs_dir_isempty(ip)) {
2918 error = -ENOTEMPTY;
2919 goto out_trans_cancel;
2920 }
2921
2922
2923 error = xfs_droplink(tp, dp);
2924 if (error)
2925 goto out_trans_cancel;
2926
2927
2928 error = xfs_droplink(tp, ip);
2929 if (error)
2930 goto out_trans_cancel;
2931 } else {
2932
2933
2934
2935
2936
2937 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2938 }
2939 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2940
2941
2942 error = xfs_droplink(tp, ip);
2943 if (error)
2944 goto out_trans_cancel;
2945
2946 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2947 if (error) {
2948 ASSERT(error != -ENOENT);
2949 goto out_trans_cancel;
2950 }
2951
2952
2953
2954
2955
2956
2957 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2958 xfs_trans_set_sync(tp);
2959
2960 error = xfs_trans_commit(tp);
2961 if (error)
2962 goto std_return;
2963
2964 if (is_dir && xfs_inode_is_filestream(ip))
2965 xfs_filestream_deassociate(ip);
2966
2967 return 0;
2968
2969 out_trans_cancel:
2970 xfs_trans_cancel(tp);
2971 std_return:
2972 return error;
2973 }
2974
2975
2976
2977
2978 #define __XFS_SORT_INODES 5
2979 STATIC void
2980 xfs_sort_for_rename(
2981 struct xfs_inode *dp1,
2982 struct xfs_inode *dp2,
2983 struct xfs_inode *ip1,
2984 struct xfs_inode *ip2,
2985 struct xfs_inode *wip,
2986 struct xfs_inode **i_tab,
2987 int *num_inodes)
2988 {
2989 int i, j;
2990
2991 ASSERT(*num_inodes == __XFS_SORT_INODES);
2992 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2993
2994
2995
2996
2997
2998
2999
3000
3001 i = 0;
3002 i_tab[i++] = dp1;
3003 i_tab[i++] = dp2;
3004 i_tab[i++] = ip1;
3005 if (ip2)
3006 i_tab[i++] = ip2;
3007 if (wip)
3008 i_tab[i++] = wip;
3009 *num_inodes = i;
3010
3011
3012
3013
3014
3015 for (i = 0; i < *num_inodes; i++) {
3016 for (j = 1; j < *num_inodes; j++) {
3017 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
3018 struct xfs_inode *temp = i_tab[j];
3019 i_tab[j] = i_tab[j-1];
3020 i_tab[j-1] = temp;
3021 }
3022 }
3023 }
3024 }
3025
3026 static int
3027 xfs_finish_rename(
3028 struct xfs_trans *tp)
3029 {
3030
3031
3032
3033
3034 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
3035 xfs_trans_set_sync(tp);
3036
3037 return xfs_trans_commit(tp);
3038 }
3039
3040
3041
3042
3043
3044
3045 STATIC int
3046 xfs_cross_rename(
3047 struct xfs_trans *tp,
3048 struct xfs_inode *dp1,
3049 struct xfs_name *name1,
3050 struct xfs_inode *ip1,
3051 struct xfs_inode *dp2,
3052 struct xfs_name *name2,
3053 struct xfs_inode *ip2,
3054 int spaceres)
3055 {
3056 int error = 0;
3057 int ip1_flags = 0;
3058 int ip2_flags = 0;
3059 int dp2_flags = 0;
3060
3061
3062 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3063 if (error)
3064 goto out_trans_abort;
3065
3066
3067 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3068 if (error)
3069 goto out_trans_abort;
3070
3071
3072
3073
3074
3075
3076 if (dp1 != dp2) {
3077 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3078
3079 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3080 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3081 dp1->i_ino, spaceres);
3082 if (error)
3083 goto out_trans_abort;
3084
3085
3086 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3087 error = xfs_droplink(tp, dp2);
3088 if (error)
3089 goto out_trans_abort;
3090 xfs_bumplink(tp, dp1);
3091 }
3092
3093
3094
3095
3096
3097
3098
3099 ip1_flags |= XFS_ICHGTIME_CHG;
3100 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3101 }
3102
3103 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3104 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3105 dp2->i_ino, spaceres);
3106 if (error)
3107 goto out_trans_abort;
3108
3109
3110 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3111 error = xfs_droplink(tp, dp1);
3112 if (error)
3113 goto out_trans_abort;
3114 xfs_bumplink(tp, dp2);
3115 }
3116
3117
3118
3119
3120
3121
3122
3123 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3124 ip2_flags |= XFS_ICHGTIME_CHG;
3125 }
3126 }
3127
3128 if (ip1_flags) {
3129 xfs_trans_ichgtime(tp, ip1, ip1_flags);
3130 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3131 }
3132 if (ip2_flags) {
3133 xfs_trans_ichgtime(tp, ip2, ip2_flags);
3134 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3135 }
3136 if (dp2_flags) {
3137 xfs_trans_ichgtime(tp, dp2, dp2_flags);
3138 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3139 }
3140 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3141 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3142 return xfs_finish_rename(tp);
3143
3144 out_trans_abort:
3145 xfs_trans_cancel(tp);
3146 return error;
3147 }
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157 static int
3158 xfs_rename_alloc_whiteout(
3159 struct xfs_inode *dp,
3160 struct xfs_inode **wip)
3161 {
3162 struct xfs_inode *tmpfile;
3163 int error;
3164
3165 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
3166 if (error)
3167 return error;
3168
3169
3170
3171
3172
3173
3174 xfs_setup_iops(tmpfile);
3175 xfs_finish_inode_setup(tmpfile);
3176 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3177
3178 *wip = tmpfile;
3179 return 0;
3180 }
3181
3182
3183
3184
3185 int
3186 xfs_rename(
3187 struct xfs_inode *src_dp,
3188 struct xfs_name *src_name,
3189 struct xfs_inode *src_ip,
3190 struct xfs_inode *target_dp,
3191 struct xfs_name *target_name,
3192 struct xfs_inode *target_ip,
3193 unsigned int flags)
3194 {
3195 struct xfs_mount *mp = src_dp->i_mount;
3196 struct xfs_trans *tp;
3197 struct xfs_inode *wip = NULL;
3198 struct xfs_inode *inodes[__XFS_SORT_INODES];
3199 int num_inodes = __XFS_SORT_INODES;
3200 bool new_parent = (src_dp != target_dp);
3201 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3202 int spaceres;
3203 int error;
3204
3205 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3206
3207 if ((flags & RENAME_EXCHANGE) && !target_ip)
3208 return -EINVAL;
3209
3210
3211
3212
3213
3214
3215 if (flags & RENAME_WHITEOUT) {
3216 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3217 error = xfs_rename_alloc_whiteout(target_dp, &wip);
3218 if (error)
3219 return error;
3220
3221
3222 src_name->type = XFS_DIR3_FT_CHRDEV;
3223 }
3224
3225 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3226 inodes, &num_inodes);
3227
3228 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3229 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3230 if (error == -ENOSPC) {
3231 spaceres = 0;
3232 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3233 &tp);
3234 }
3235 if (error)
3236 goto out_release_wip;
3237
3238
3239
3240
3241 error = xfs_qm_vop_rename_dqattach(inodes);
3242 if (error)
3243 goto out_trans_cancel;
3244
3245
3246
3247
3248
3249
3250
3251 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3252
3253
3254
3255
3256
3257
3258 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3259 if (new_parent)
3260 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3261 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3262 if (target_ip)
3263 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3264 if (wip)
3265 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3266
3267
3268
3269
3270
3271
3272 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3273 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
3274 error = -EXDEV;
3275 goto out_trans_cancel;
3276 }
3277
3278
3279 if (flags & RENAME_EXCHANGE)
3280 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3281 target_dp, target_name, target_ip,
3282 spaceres);
3283
3284
3285
3286
3287
3288 if (target_ip == NULL) {
3289
3290
3291
3292
3293 if (!spaceres) {
3294 error = xfs_dir_canenter(tp, target_dp, target_name);
3295 if (error)
3296 goto out_trans_cancel;
3297 }
3298 } else {
3299
3300
3301
3302
3303 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3304 (!xfs_dir_isempty(target_ip) ||
3305 (VFS_I(target_ip)->i_nlink > 2))) {
3306 error = -EEXIST;
3307 goto out_trans_cancel;
3308 }
3309 }
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323 if (wip) {
3324 ASSERT(VFS_I(wip)->i_nlink == 0);
3325 error = xfs_iunlink_remove(tp, wip);
3326 if (error)
3327 goto out_trans_cancel;
3328
3329 xfs_bumplink(tp, wip);
3330 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3331 VFS_I(wip)->i_state &= ~I_LINKABLE;
3332 }
3333
3334
3335
3336
3337 if (target_ip == NULL) {
3338
3339
3340
3341
3342
3343 error = xfs_dir_createname(tp, target_dp, target_name,
3344 src_ip->i_ino, spaceres);
3345 if (error)
3346 goto out_trans_cancel;
3347
3348 xfs_trans_ichgtime(tp, target_dp,
3349 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3350
3351 if (new_parent && src_is_directory) {
3352 xfs_bumplink(tp, target_dp);
3353 }
3354 } else {
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364 error = xfs_dir_replace(tp, target_dp, target_name,
3365 src_ip->i_ino, spaceres);
3366 if (error)
3367 goto out_trans_cancel;
3368
3369 xfs_trans_ichgtime(tp, target_dp,
3370 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3371
3372
3373
3374
3375
3376 error = xfs_droplink(tp, target_ip);
3377 if (error)
3378 goto out_trans_cancel;
3379
3380 if (src_is_directory) {
3381
3382
3383
3384 error = xfs_droplink(tp, target_ip);
3385 if (error)
3386 goto out_trans_cancel;
3387 }
3388 }
3389
3390
3391
3392
3393 if (new_parent && src_is_directory) {
3394
3395
3396
3397
3398 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3399 target_dp->i_ino, spaceres);
3400 ASSERT(error != -EEXIST);
3401 if (error)
3402 goto out_trans_cancel;
3403 }
3404
3405
3406
3407
3408
3409
3410
3411
3412 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3413 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3414
3415
3416
3417
3418
3419
3420 if (src_is_directory && (new_parent || target_ip != NULL)) {
3421
3422
3423
3424
3425
3426 error = xfs_droplink(tp, src_dp);
3427 if (error)
3428 goto out_trans_cancel;
3429 }
3430
3431
3432
3433
3434
3435
3436 if (wip) {
3437 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3438 spaceres);
3439 } else
3440 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3441 spaceres);
3442 if (error)
3443 goto out_trans_cancel;
3444
3445 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3446 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3447 if (new_parent)
3448 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3449
3450 error = xfs_finish_rename(tp);
3451 if (wip)
3452 xfs_irele(wip);
3453 return error;
3454
3455 out_trans_cancel:
3456 xfs_trans_cancel(tp);
3457 out_release_wip:
3458 if (wip)
3459 xfs_irele(wip);
3460 return error;
3461 }
3462
3463 STATIC int
3464 xfs_iflush_cluster(
3465 struct xfs_inode *ip,
3466 struct xfs_buf *bp)
3467 {
3468 struct xfs_mount *mp = ip->i_mount;
3469 struct xfs_perag *pag;
3470 unsigned long first_index, mask;
3471 int cilist_size;
3472 struct xfs_inode **cilist;
3473 struct xfs_inode *cip;
3474 struct xfs_ino_geometry *igeo = M_IGEO(mp);
3475 int nr_found;
3476 int clcount = 0;
3477 int i;
3478
3479 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3480
3481 cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *);
3482 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3483 if (!cilist)
3484 goto out_put;
3485
3486 mask = ~(igeo->inodes_per_cluster - 1);
3487 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3488 rcu_read_lock();
3489
3490 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
3491 first_index, igeo->inodes_per_cluster);
3492 if (nr_found == 0)
3493 goto out_free;
3494
3495 for (i = 0; i < nr_found; i++) {
3496 cip = cilist[i];
3497 if (cip == ip)
3498 continue;
3499
3500
3501
3502
3503
3504
3505
3506 spin_lock(&cip->i_flags_lock);
3507 if (!cip->i_ino ||
3508 __xfs_iflags_test(cip, XFS_ISTALE)) {
3509 spin_unlock(&cip->i_flags_lock);
3510 continue;
3511 }
3512
3513
3514
3515
3516
3517
3518 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3519 spin_unlock(&cip->i_flags_lock);
3520 break;
3521 }
3522 spin_unlock(&cip->i_flags_lock);
3523
3524
3525
3526
3527
3528
3529 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3530 continue;
3531
3532
3533
3534
3535
3536
3537 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3538 continue;
3539 if (!xfs_iflock_nowait(cip)) {
3540 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3541 continue;
3542 }
3543 if (xfs_ipincount(cip)) {
3544 xfs_ifunlock(cip);
3545 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3546 continue;
3547 }
3548
3549
3550
3551
3552
3553
3554
3555
3556 if (!cip->i_ino) {
3557 xfs_ifunlock(cip);
3558 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3559 continue;
3560 }
3561
3562
3563
3564
3565
3566 if (!xfs_inode_clean(cip)) {
3567 int error;
3568 error = xfs_iflush_int(cip, bp);
3569 if (error) {
3570 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3571 goto cluster_corrupt_out;
3572 }
3573 clcount++;
3574 } else {
3575 xfs_ifunlock(cip);
3576 }
3577 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3578 }
3579
3580 if (clcount) {
3581 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3582 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3583 }
3584
3585 out_free:
3586 rcu_read_unlock();
3587 kmem_free(cilist);
3588 out_put:
3589 xfs_perag_put(pag);
3590 return 0;
3591
3592
3593 cluster_corrupt_out:
3594
3595
3596
3597
3598 rcu_read_unlock();
3599
3600
3601
3602
3603
3604
3605
3606
3607 ASSERT(bp->b_iodone);
3608 bp->b_flags |= XBF_ASYNC;
3609 bp->b_flags &= ~XBF_DONE;
3610 xfs_buf_stale(bp);
3611 xfs_buf_ioerror(bp, -EIO);
3612 xfs_buf_ioend(bp);
3613
3614 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3615
3616
3617 xfs_iflush_abort(cip, false);
3618 kmem_free(cilist);
3619 xfs_perag_put(pag);
3620 return -EFSCORRUPTED;
3621 }
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632 int
3633 xfs_iflush(
3634 struct xfs_inode *ip,
3635 struct xfs_buf **bpp)
3636 {
3637 struct xfs_mount *mp = ip->i_mount;
3638 struct xfs_buf *bp = NULL;
3639 struct xfs_dinode *dip;
3640 int error;
3641
3642 XFS_STATS_INC(mp, xs_iflush_count);
3643
3644 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3645 ASSERT(xfs_isiflocked(ip));
3646 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3647 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3648
3649 *bpp = NULL;
3650
3651 xfs_iunpin_wait(ip);
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3662 xfs_ifunlock(ip);
3663 return 0;
3664 }
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674 if (XFS_FORCED_SHUTDOWN(mp)) {
3675 error = -EIO;
3676 goto abort_out;
3677 }
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3689 0);
3690 if (error == -EAGAIN) {
3691 xfs_ifunlock(ip);
3692 return error;
3693 }
3694 if (error)
3695 goto corrupt_out;
3696
3697
3698
3699
3700 error = xfs_iflush_int(ip, bp);
3701 if (error)
3702 goto corrupt_out;
3703
3704
3705
3706
3707
3708 if (xfs_buf_ispinned(bp))
3709 xfs_log_force(mp, 0);
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720 error = xfs_iflush_cluster(ip, bp);
3721 if (error)
3722 return error;
3723
3724 *bpp = bp;
3725 return 0;
3726
3727 corrupt_out:
3728 if (bp)
3729 xfs_buf_relse(bp);
3730 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3731 abort_out:
3732
3733 xfs_iflush_abort(ip, false);
3734 return error;
3735 }
3736
3737
3738
3739
3740
3741 bool
3742 xfs_inode_verify_forks(
3743 struct xfs_inode *ip)
3744 {
3745 struct xfs_ifork *ifp;
3746 xfs_failaddr_t fa;
3747
3748 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3749 if (fa) {
3750 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3751 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3752 ifp->if_u1.if_data, ifp->if_bytes, fa);
3753 return false;
3754 }
3755
3756 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3757 if (fa) {
3758 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3759 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3760 ifp ? ifp->if_u1.if_data : NULL,
3761 ifp ? ifp->if_bytes : 0, fa);
3762 return false;
3763 }
3764 return true;
3765 }
3766
3767 STATIC int
3768 xfs_iflush_int(
3769 struct xfs_inode *ip,
3770 struct xfs_buf *bp)
3771 {
3772 struct xfs_inode_log_item *iip = ip->i_itemp;
3773 struct xfs_dinode *dip;
3774 struct xfs_mount *mp = ip->i_mount;
3775
3776 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3777 ASSERT(xfs_isiflocked(ip));
3778 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3779 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3780 ASSERT(iip != NULL && iip->ili_fields != 0);
3781 ASSERT(ip->i_d.di_version > 1);
3782
3783
3784 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3785
3786 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3787 mp, XFS_ERRTAG_IFLUSH_1)) {
3788 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3789 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3790 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3791 goto corrupt_out;
3792 }
3793 if (S_ISREG(VFS_I(ip)->i_mode)) {
3794 if (XFS_TEST_ERROR(
3795 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3796 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3797 mp, XFS_ERRTAG_IFLUSH_3)) {
3798 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3799 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3800 __func__, ip->i_ino, ip);
3801 goto corrupt_out;
3802 }
3803 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3804 if (XFS_TEST_ERROR(
3805 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3806 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3807 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3808 mp, XFS_ERRTAG_IFLUSH_4)) {
3809 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3810 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3811 __func__, ip->i_ino, ip);
3812 goto corrupt_out;
3813 }
3814 }
3815 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3816 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3817 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3818 "%s: detected corrupt incore inode %Lu, "
3819 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3820 __func__, ip->i_ino,
3821 ip->i_d.di_nextents + ip->i_d.di_anextents,
3822 ip->i_d.di_nblocks, ip);
3823 goto corrupt_out;
3824 }
3825 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3826 mp, XFS_ERRTAG_IFLUSH_6)) {
3827 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3828 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3829 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3830 goto corrupt_out;
3831 }
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842 if (ip->i_d.di_version < 3)
3843 ip->i_d.di_flushiter++;
3844
3845
3846 if (!xfs_inode_verify_forks(ip))
3847 goto corrupt_out;
3848
3849
3850
3851
3852
3853
3854 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3855
3856
3857 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3858 ip->i_d.di_flushiter = 0;
3859
3860 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3861 if (XFS_IFORK_Q(ip))
3862 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3863 xfs_inobp_check(mp, bp);
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890 iip->ili_last_fields = iip->ili_fields;
3891 iip->ili_fields = 0;
3892 iip->ili_fsync_fields = 0;
3893 iip->ili_logged = 1;
3894
3895 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3896 &iip->ili_item.li_lsn);
3897
3898
3899
3900
3901
3902
3903
3904 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3905
3906
3907 xfs_dinode_calc_crc(mp, dip);
3908
3909 ASSERT(!list_empty(&bp->b_li_list));
3910 ASSERT(bp->b_iodone != NULL);
3911 return 0;
3912
3913 corrupt_out:
3914 return -EFSCORRUPTED;
3915 }
3916
3917
3918 void
3919 xfs_irele(
3920 struct xfs_inode *ip)
3921 {
3922 trace_xfs_irele(ip, _RET_IP_);
3923 iput(VFS_I(ip));
3924 }