This source file includes following definitions.
- iget_test
- iget_set
- gfs2_iget
- gfs2_set_iop
- gfs2_inode_lookup
- gfs2_lookup_by_inum
- gfs2_lookup_simple
- gfs2_lookupi
- create_ok
- munge_mode_uid_gid
- alloc_dinode
- gfs2_init_dir
- gfs2_init_xattr
- init_dinode
- gfs2_trans_da_blks
- link_dinode
- gfs2_initxattrs
- gfs2_create_inode
- gfs2_create
- __gfs2_lookup
- gfs2_lookup
- gfs2_link
- gfs2_unlink_ok
- gfs2_unlink_inode
- gfs2_unlink
- gfs2_symlink
- gfs2_mkdir
- gfs2_mknod
- gfs2_atomic_open
- gfs2_ok_to_move
- update_moved_ino
- gfs2_rename
- gfs2_exchange
- gfs2_rename2
- gfs2_get_link
- gfs2_permission
- __gfs2_setattr_simple
- gfs2_setattr_simple
- setattr_chown
- gfs2_setattr
- gfs2_getattr
- gfs2_fiemap
- gfs2_seek_data
- gfs2_seek_hole
1
2
3
4
5
6
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/completion.h>
10 #include <linux/buffer_head.h>
11 #include <linux/namei.h>
12 #include <linux/mm.h>
13 #include <linux/cred.h>
14 #include <linux/xattr.h>
15 #include <linux/posix_acl.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/crc32.h>
18 #include <linux/iomap.h>
19 #include <linux/security.h>
20 #include <linux/uaccess.h>
21
22 #include "gfs2.h"
23 #include "incore.h"
24 #include "acl.h"
25 #include "bmap.h"
26 #include "dir.h"
27 #include "xattr.h"
28 #include "glock.h"
29 #include "inode.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "rgrp.h"
33 #include "trans.h"
34 #include "util.h"
35 #include "super.h"
36 #include "glops.h"
37
38 static int iget_test(struct inode *inode, void *opaque)
39 {
40 u64 no_addr = *(u64 *)opaque;
41
42 return GFS2_I(inode)->i_no_addr == no_addr;
43 }
44
45 static int iget_set(struct inode *inode, void *opaque)
46 {
47 u64 no_addr = *(u64 *)opaque;
48
49 GFS2_I(inode)->i_no_addr = no_addr;
50 inode->i_ino = no_addr;
51 return 0;
52 }
53
54 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
55 {
56 struct inode *inode;
57
58 repeat:
59 inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
60 if (!inode)
61 return inode;
62 if (is_bad_inode(inode)) {
63 iput(inode);
64 goto repeat;
65 }
66 return inode;
67 }
68
69
70
71
72
73
74
75
76
77 static void gfs2_set_iop(struct inode *inode)
78 {
79 struct gfs2_sbd *sdp = GFS2_SB(inode);
80 umode_t mode = inode->i_mode;
81
82 if (S_ISREG(mode)) {
83 inode->i_op = &gfs2_file_iops;
84 if (gfs2_localflocks(sdp))
85 inode->i_fop = &gfs2_file_fops_nolock;
86 else
87 inode->i_fop = &gfs2_file_fops;
88 } else if (S_ISDIR(mode)) {
89 inode->i_op = &gfs2_dir_iops;
90 if (gfs2_localflocks(sdp))
91 inode->i_fop = &gfs2_dir_fops_nolock;
92 else
93 inode->i_fop = &gfs2_dir_fops;
94 } else if (S_ISLNK(mode)) {
95 inode->i_op = &gfs2_symlink_iops;
96 } else {
97 inode->i_op = &gfs2_file_iops;
98 init_special_inode(inode, inode->i_mode, inode->i_rdev);
99 }
100 }
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
121 u64 no_addr, u64 no_formal_ino,
122 unsigned int blktype)
123 {
124 struct inode *inode;
125 struct gfs2_inode *ip;
126 struct gfs2_glock *io_gl = NULL;
127 struct gfs2_holder i_gh;
128 int error;
129
130 gfs2_holder_mark_uninitialized(&i_gh);
131 inode = gfs2_iget(sb, no_addr);
132 if (!inode)
133 return ERR_PTR(-ENOMEM);
134
135 ip = GFS2_I(inode);
136
137 if (inode->i_state & I_NEW) {
138 struct gfs2_sbd *sdp = GFS2_SB(inode);
139 ip->i_no_formal_ino = no_formal_ino;
140
141 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
142 if (unlikely(error))
143 goto fail;
144 flush_delayed_work(&ip->i_gl->gl_work);
145
146 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
147 if (unlikely(error))
148 goto fail_put;
149
150 if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
151
152
153
154
155
156 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
157 GL_SKIP, &i_gh);
158 if (error)
159 goto fail_put;
160
161 if (blktype != GFS2_BLKST_FREE) {
162 error = gfs2_check_blk_type(sdp, no_addr,
163 blktype);
164 if (error)
165 goto fail_put;
166 }
167 }
168
169 glock_set_object(ip->i_gl, ip);
170 set_bit(GIF_INVALID, &ip->i_flags);
171 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
172 if (unlikely(error))
173 goto fail_put;
174 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
175 gfs2_glock_put(io_gl);
176 io_gl = NULL;
177
178 if (type == DT_UNKNOWN) {
179
180 error = gfs2_inode_refresh(GFS2_I(inode));
181 if (error)
182 goto fail_refresh;
183 } else {
184 inode->i_mode = DT2IF(type);
185 }
186
187 gfs2_set_iop(inode);
188
189
190 inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
191 inode->i_atime.tv_nsec = 0;
192
193 unlock_new_inode(inode);
194 }
195
196 if (gfs2_holder_initialized(&i_gh))
197 gfs2_glock_dq_uninit(&i_gh);
198 return inode;
199
200 fail_refresh:
201 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
202 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
203 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
204 fail_put:
205 if (io_gl)
206 gfs2_glock_put(io_gl);
207 glock_clear_object(ip->i_gl, ip);
208 if (gfs2_holder_initialized(&i_gh))
209 gfs2_glock_dq_uninit(&i_gh);
210 fail:
211 iget_failed(inode);
212 return ERR_PTR(error);
213 }
214
215 struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
216 u64 *no_formal_ino, unsigned int blktype)
217 {
218 struct super_block *sb = sdp->sd_vfs;
219 struct inode *inode;
220 int error;
221
222 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
223 if (IS_ERR(inode))
224 return inode;
225
226
227 if (no_formal_ino) {
228 error = -ESTALE;
229 if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
230 goto fail_iput;
231
232 error = -EIO;
233 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
234 goto fail_iput;
235 }
236 return inode;
237
238 fail_iput:
239 iput(inode);
240 return ERR_PTR(error);
241 }
242
243
244 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
245 {
246 struct qstr qstr;
247 struct inode *inode;
248 gfs2_str2qstr(&qstr, name);
249 inode = gfs2_lookupi(dip, &qstr, 1);
250
251
252
253
254
255 if (inode == NULL)
256 return ERR_PTR(-ENOENT);
257 else
258 return inode;
259 }
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
278 int is_root)
279 {
280 struct super_block *sb = dir->i_sb;
281 struct gfs2_inode *dip = GFS2_I(dir);
282 struct gfs2_holder d_gh;
283 int error = 0;
284 struct inode *inode = NULL;
285
286 gfs2_holder_mark_uninitialized(&d_gh);
287 if (!name->len || name->len > GFS2_FNAMESIZE)
288 return ERR_PTR(-ENAMETOOLONG);
289
290 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
291 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
292 dir == d_inode(sb->s_root))) {
293 igrab(dir);
294 return dir;
295 }
296
297 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
298 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
299 if (error)
300 return ERR_PTR(error);
301 }
302
303 if (!is_root) {
304 error = gfs2_permission(dir, MAY_EXEC);
305 if (error)
306 goto out;
307 }
308
309 inode = gfs2_dir_search(dir, name, false);
310 if (IS_ERR(inode))
311 error = PTR_ERR(inode);
312 out:
313 if (gfs2_holder_initialized(&d_gh))
314 gfs2_glock_dq_uninit(&d_gh);
315 if (error == -ENOENT)
316 return NULL;
317 return inode ? inode : ERR_PTR(error);
318 }
319
320
321
322
323
324
325
326
327
328
329 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
330 umode_t mode)
331 {
332 int error;
333
334 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
335 if (error)
336 return error;
337
338
339 if (!dip->i_inode.i_nlink)
340 return -ENOENT;
341
342 if (dip->i_entries == (u32)-1)
343 return -EFBIG;
344 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
345 return -EMLINK;
346
347 return 0;
348 }
349
350 static void munge_mode_uid_gid(const struct gfs2_inode *dip,
351 struct inode *inode)
352 {
353 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
354 (dip->i_inode.i_mode & S_ISUID) &&
355 !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) {
356 if (S_ISDIR(inode->i_mode))
357 inode->i_mode |= S_ISUID;
358 else if (!uid_eq(dip->i_inode.i_uid, current_fsuid()))
359 inode->i_mode &= ~07111;
360 inode->i_uid = dip->i_inode.i_uid;
361 } else
362 inode->i_uid = current_fsuid();
363
364 if (dip->i_inode.i_mode & S_ISGID) {
365 if (S_ISDIR(inode->i_mode))
366 inode->i_mode |= S_ISGID;
367 inode->i_gid = dip->i_inode.i_gid;
368 } else
369 inode->i_gid = current_fsgid();
370 }
371
372 static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
373 {
374 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
375 struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, };
376 int error;
377
378 error = gfs2_quota_lock_check(ip, &ap);
379 if (error)
380 goto out;
381
382 error = gfs2_inplace_reserve(ip, &ap);
383 if (error)
384 goto out_quota;
385
386 error = gfs2_trans_begin(sdp, (*dblocks * RES_RG_BIT) + RES_STATFS + RES_QUOTA, 0);
387 if (error)
388 goto out_ipreserv;
389
390 error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
391 ip->i_no_formal_ino = ip->i_generation;
392 ip->i_inode.i_ino = ip->i_no_addr;
393 ip->i_goal = ip->i_no_addr;
394
395 gfs2_trans_end(sdp);
396
397 out_ipreserv:
398 gfs2_inplace_release(ip);
399 out_quota:
400 gfs2_quota_unlock(ip);
401 out:
402 return error;
403 }
404
405 static void gfs2_init_dir(struct buffer_head *dibh,
406 const struct gfs2_inode *parent)
407 {
408 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
409 struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
410
411 gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent);
412 dent->de_inum = di->di_num;
413 dent->de_type = cpu_to_be16(DT_DIR);
414
415 dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
416 gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
417 gfs2_inum_out(parent, dent);
418 dent->de_type = cpu_to_be16(DT_DIR);
419
420 }
421
422
423
424
425
426
427
428
429
430 static void gfs2_init_xattr(struct gfs2_inode *ip)
431 {
432 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
433 struct buffer_head *bh;
434 struct gfs2_ea_header *ea;
435
436 bh = gfs2_meta_new(ip->i_gl, ip->i_eattr);
437 gfs2_trans_add_meta(ip->i_gl, bh);
438 gfs2_metatype_set(bh, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
439 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
440
441 ea = GFS2_EA_BH2FIRST(bh);
442 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
443 ea->ea_type = GFS2_EATYPE_UNUSED;
444 ea->ea_flags = GFS2_EAFLAG_LAST;
445
446 brelse(bh);
447 }
448
449
450
451
452
453
454
455
456
457
458 static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
459 const char *symname)
460 {
461 struct gfs2_dinode *di;
462 struct buffer_head *dibh;
463
464 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
465 gfs2_trans_add_meta(ip->i_gl, dibh);
466 di = (struct gfs2_dinode *)dibh->b_data;
467 gfs2_dinode_out(ip, di);
468
469 di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev));
470 di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev));
471 di->__pad1 = 0;
472 di->__pad2 = 0;
473 di->__pad3 = 0;
474 memset(&di->__pad4, 0, sizeof(di->__pad4));
475 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
476 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
477
478 switch(ip->i_inode.i_mode & S_IFMT) {
479 case S_IFDIR:
480 gfs2_init_dir(dibh, dip);
481 break;
482 case S_IFLNK:
483 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, ip->i_inode.i_size);
484 break;
485 }
486
487 set_buffer_uptodate(dibh);
488 brelse(dibh);
489 }
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 static unsigned gfs2_trans_da_blks(const struct gfs2_inode *dip,
506 const struct gfs2_diradd *da,
507 unsigned nr_inodes)
508 {
509 return da->nr_blocks + gfs2_rg_blocks(dip, da->nr_blocks) +
510 (nr_inodes * RES_DINODE) + RES_QUOTA + RES_STATFS;
511 }
512
513 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
514 struct gfs2_inode *ip, struct gfs2_diradd *da)
515 {
516 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
517 struct gfs2_alloc_parms ap = { .target = da->nr_blocks, };
518 int error;
519
520 if (da->nr_blocks) {
521 error = gfs2_quota_lock_check(dip, &ap);
522 if (error)
523 goto fail_quota_locks;
524
525 error = gfs2_inplace_reserve(dip, &ap);
526 if (error)
527 goto fail_quota_locks;
528
529 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0);
530 if (error)
531 goto fail_ipreserv;
532 } else {
533 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
534 if (error)
535 goto fail_quota_locks;
536 }
537
538 error = gfs2_dir_add(&dip->i_inode, name, ip, da);
539
540 gfs2_trans_end(sdp);
541 fail_ipreserv:
542 gfs2_inplace_release(dip);
543 fail_quota_locks:
544 gfs2_quota_unlock(dip);
545 return error;
546 }
547
548 static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
549 void *fs_info)
550 {
551 const struct xattr *xattr;
552 int err = 0;
553
554 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
555 err = __gfs2_xattr_set(inode, xattr->name, xattr->value,
556 xattr->value_len, 0,
557 GFS2_EATYPE_SECURITY);
558 if (err < 0)
559 break;
560 }
561 return err;
562 }
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577 static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
578 struct file *file,
579 umode_t mode, dev_t dev, const char *symname,
580 unsigned int size, int excl)
581 {
582 const struct qstr *name = &dentry->d_name;
583 struct posix_acl *default_acl, *acl;
584 struct gfs2_holder ghs[2];
585 struct inode *inode = NULL;
586 struct gfs2_inode *dip = GFS2_I(dir), *ip;
587 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
588 struct gfs2_glock *io_gl = NULL;
589 int error, free_vfs_inode = 1;
590 u32 aflags = 0;
591 unsigned blocks = 1;
592 struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
593
594 if (!name->len || name->len > GFS2_FNAMESIZE)
595 return -ENAMETOOLONG;
596
597 error = gfs2_rsqa_alloc(dip);
598 if (error)
599 return error;
600
601 error = gfs2_rindex_update(sdp);
602 if (error)
603 return error;
604
605 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
606 if (error)
607 goto fail;
608 gfs2_holder_mark_uninitialized(ghs + 1);
609
610 error = create_ok(dip, name, mode);
611 if (error)
612 goto fail_gunlock;
613
614 inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl);
615 error = PTR_ERR(inode);
616 if (!IS_ERR(inode)) {
617 if (S_ISDIR(inode->i_mode)) {
618 iput(inode);
619 inode = ERR_PTR(-EISDIR);
620 goto fail_gunlock;
621 }
622 d_instantiate(dentry, inode);
623 error = 0;
624 if (file) {
625 if (S_ISREG(inode->i_mode))
626 error = finish_open(file, dentry, gfs2_open_common);
627 else
628 error = finish_no_open(file, NULL);
629 }
630 gfs2_glock_dq_uninit(ghs);
631 return error;
632 } else if (error != -ENOENT) {
633 goto fail_gunlock;
634 }
635
636 error = gfs2_diradd_alloc_required(dir, name, &da);
637 if (error < 0)
638 goto fail_gunlock;
639
640 inode = new_inode(sdp->sd_vfs);
641 error = -ENOMEM;
642 if (!inode)
643 goto fail_gunlock;
644
645 error = posix_acl_create(dir, &mode, &default_acl, &acl);
646 if (error)
647 goto fail_gunlock;
648
649 ip = GFS2_I(inode);
650 error = gfs2_rsqa_alloc(ip);
651 if (error)
652 goto fail_free_acls;
653
654 inode->i_mode = mode;
655 set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
656 inode->i_rdev = dev;
657 inode->i_size = size;
658 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
659 gfs2_set_inode_blocks(inode, 1);
660 munge_mode_uid_gid(dip, inode);
661 check_and_update_goal(dip);
662 ip->i_goal = dip->i_goal;
663 ip->i_diskflags = 0;
664 ip->i_eattr = 0;
665 ip->i_height = 0;
666 ip->i_depth = 0;
667 ip->i_entries = 0;
668 ip->i_no_addr = 0;
669
670 switch(mode & S_IFMT) {
671 case S_IFREG:
672 if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
673 gfs2_tune_get(sdp, gt_new_files_jdata))
674 ip->i_diskflags |= GFS2_DIF_JDATA;
675 gfs2_set_aops(inode);
676 break;
677 case S_IFDIR:
678 ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA);
679 ip->i_diskflags |= GFS2_DIF_JDATA;
680 ip->i_entries = 2;
681 break;
682 }
683
684
685 if (dip->i_diskflags & GFS2_DIF_SYSTEM)
686 ip->i_diskflags |= GFS2_DIF_SYSTEM;
687
688 gfs2_set_inode_flags(inode);
689
690 if ((GFS2_I(d_inode(sdp->sd_root_dir)) == dip) ||
691 (dip->i_diskflags & GFS2_DIF_TOPDIR))
692 aflags |= GFS2_AF_ORLOV;
693
694 if (default_acl || acl)
695 blocks++;
696
697 error = alloc_dinode(ip, aflags, &blocks);
698 if (error)
699 goto fail_free_inode;
700
701 gfs2_set_inode_blocks(inode, blocks);
702
703 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
704 if (error)
705 goto fail_free_inode;
706 flush_delayed_work(&ip->i_gl->gl_work);
707 glock_set_object(ip->i_gl, ip);
708
709 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
710 if (error)
711 goto fail_free_inode;
712
713 error = gfs2_trans_begin(sdp, blocks, 0);
714 if (error)
715 goto fail_gunlock2;
716
717 if (blocks > 1) {
718 ip->i_eattr = ip->i_no_addr + 1;
719 gfs2_init_xattr(ip);
720 }
721 init_dinode(dip, ip, symname);
722 gfs2_trans_end(sdp);
723
724 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
725 if (error)
726 goto fail_gunlock2;
727
728 BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
729
730 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
731 if (error)
732 goto fail_gunlock2;
733
734 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
735 gfs2_glock_put(io_gl);
736 gfs2_set_iop(inode);
737 insert_inode_hash(inode);
738
739 free_vfs_inode = 0;
740
741
742 if (default_acl) {
743 error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
744 if (error)
745 goto fail_gunlock3;
746 posix_acl_release(default_acl);
747 default_acl = NULL;
748 }
749 if (acl) {
750 error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
751 if (error)
752 goto fail_gunlock3;
753 posix_acl_release(acl);
754 acl = NULL;
755 }
756
757 error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
758 &gfs2_initxattrs, NULL);
759 if (error)
760 goto fail_gunlock3;
761
762 error = link_dinode(dip, name, ip, &da);
763 if (error)
764 goto fail_gunlock3;
765
766 mark_inode_dirty(inode);
767 d_instantiate(dentry, inode);
768 if (file) {
769 file->f_mode |= FMODE_CREATED;
770 error = finish_open(file, dentry, gfs2_open_common);
771 }
772 gfs2_glock_dq_uninit(ghs);
773 gfs2_glock_dq_uninit(ghs + 1);
774 clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
775 return error;
776
777 fail_gunlock3:
778 glock_clear_object(io_gl, ip);
779 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
780 gfs2_glock_put(io_gl);
781 fail_gunlock2:
782 if (io_gl)
783 clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
784 fail_free_inode:
785 if (ip->i_gl) {
786 glock_clear_object(ip->i_gl, ip);
787 gfs2_glock_put(ip->i_gl);
788 }
789 gfs2_rsqa_delete(ip, NULL);
790 fail_free_acls:
791 posix_acl_release(default_acl);
792 posix_acl_release(acl);
793 fail_gunlock:
794 gfs2_dir_no_add(&da);
795 gfs2_glock_dq_uninit(ghs);
796 if (!IS_ERR_OR_NULL(inode)) {
797 clear_nlink(inode);
798 if (!free_vfs_inode)
799 mark_inode_dirty(inode);
800 set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
801 &GFS2_I(inode)->i_flags);
802 iput(inode);
803 }
804 if (gfs2_holder_initialized(ghs + 1))
805 gfs2_glock_dq_uninit(ghs + 1);
806 fail:
807 return error;
808 }
809
810
811
812
813
814
815
816
817
818
819 static int gfs2_create(struct inode *dir, struct dentry *dentry,
820 umode_t mode, bool excl)
821 {
822 return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
823 }
824
825
826
827
828
829
830
831
832
833
834
835 static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
836 struct file *file)
837 {
838 struct inode *inode;
839 struct dentry *d;
840 struct gfs2_holder gh;
841 struct gfs2_glock *gl;
842 int error;
843
844 inode = gfs2_lookupi(dir, &dentry->d_name, 0);
845 if (inode == NULL) {
846 d_add(dentry, NULL);
847 return NULL;
848 }
849 if (IS_ERR(inode))
850 return ERR_CAST(inode);
851
852 gl = GFS2_I(inode)->i_gl;
853 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
854 if (error) {
855 iput(inode);
856 return ERR_PTR(error);
857 }
858
859 d = d_splice_alias(inode, dentry);
860 if (IS_ERR(d)) {
861 gfs2_glock_dq_uninit(&gh);
862 return d;
863 }
864 if (file && S_ISREG(inode->i_mode))
865 error = finish_open(file, dentry, gfs2_open_common);
866
867 gfs2_glock_dq_uninit(&gh);
868 if (error) {
869 dput(d);
870 return ERR_PTR(error);
871 }
872 return d;
873 }
874
875 static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
876 unsigned flags)
877 {
878 return __gfs2_lookup(dir, dentry, NULL);
879 }
880
881
882
883
884
885
886
887
888
889
890
891
892
893 static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
894 struct dentry *dentry)
895 {
896 struct gfs2_inode *dip = GFS2_I(dir);
897 struct gfs2_sbd *sdp = GFS2_SB(dir);
898 struct inode *inode = d_inode(old_dentry);
899 struct gfs2_inode *ip = GFS2_I(inode);
900 struct gfs2_holder ghs[2];
901 struct buffer_head *dibh;
902 struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
903 int error;
904
905 if (S_ISDIR(inode->i_mode))
906 return -EPERM;
907
908 error = gfs2_rsqa_alloc(dip);
909 if (error)
910 return error;
911
912 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
913 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
914
915 error = gfs2_glock_nq(ghs);
916 if (error)
917 goto out_parent;
918
919 error = gfs2_glock_nq(ghs + 1);
920 if (error)
921 goto out_child;
922
923 error = -ENOENT;
924 if (inode->i_nlink == 0)
925 goto out_gunlock;
926
927 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
928 if (error)
929 goto out_gunlock;
930
931 error = gfs2_dir_check(dir, &dentry->d_name, NULL);
932 switch (error) {
933 case -ENOENT:
934 break;
935 case 0:
936 error = -EEXIST;
937 default:
938 goto out_gunlock;
939 }
940
941 error = -EINVAL;
942 if (!dip->i_inode.i_nlink)
943 goto out_gunlock;
944 error = -EFBIG;
945 if (dip->i_entries == (u32)-1)
946 goto out_gunlock;
947 error = -EPERM;
948 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
949 goto out_gunlock;
950 error = -EINVAL;
951 if (!ip->i_inode.i_nlink)
952 goto out_gunlock;
953 error = -EMLINK;
954 if (ip->i_inode.i_nlink == (u32)-1)
955 goto out_gunlock;
956
957 error = gfs2_diradd_alloc_required(dir, &dentry->d_name, &da);
958 if (error < 0)
959 goto out_gunlock;
960
961 if (da.nr_blocks) {
962 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
963 error = gfs2_quota_lock_check(dip, &ap);
964 if (error)
965 goto out_gunlock;
966
967 error = gfs2_inplace_reserve(dip, &ap);
968 if (error)
969 goto out_gunlock_q;
970
971 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0);
972 if (error)
973 goto out_ipres;
974 } else {
975 error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0);
976 if (error)
977 goto out_ipres;
978 }
979
980 error = gfs2_meta_inode_buffer(ip, &dibh);
981 if (error)
982 goto out_end_trans;
983
984 error = gfs2_dir_add(dir, &dentry->d_name, ip, &da);
985 if (error)
986 goto out_brelse;
987
988 gfs2_trans_add_meta(ip->i_gl, dibh);
989 inc_nlink(&ip->i_inode);
990 ip->i_inode.i_ctime = current_time(&ip->i_inode);
991 ihold(inode);
992 d_instantiate(dentry, inode);
993 mark_inode_dirty(inode);
994
995 out_brelse:
996 brelse(dibh);
997 out_end_trans:
998 gfs2_trans_end(sdp);
999 out_ipres:
1000 if (da.nr_blocks)
1001 gfs2_inplace_release(dip);
1002 out_gunlock_q:
1003 if (da.nr_blocks)
1004 gfs2_quota_unlock(dip);
1005 out_gunlock:
1006 gfs2_dir_no_add(&da);
1007 gfs2_glock_dq(ghs + 1);
1008 out_child:
1009 gfs2_glock_dq(ghs);
1010 out_parent:
1011 gfs2_holder_uninit(ghs);
1012 gfs2_holder_uninit(ghs + 1);
1013 return error;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1028 const struct gfs2_inode *ip)
1029 {
1030 int error;
1031
1032 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1033 return -EPERM;
1034
1035 if ((dip->i_inode.i_mode & S_ISVTX) &&
1036 !uid_eq(dip->i_inode.i_uid, current_fsuid()) &&
1037 !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER))
1038 return -EPERM;
1039
1040 if (IS_APPEND(&dip->i_inode))
1041 return -EPERM;
1042
1043 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
1044 if (error)
1045 return error;
1046
1047 return gfs2_dir_check(&dip->i_inode, name, ip);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 static int gfs2_unlink_inode(struct gfs2_inode *dip,
1063 const struct dentry *dentry)
1064 {
1065 struct inode *inode = d_inode(dentry);
1066 struct gfs2_inode *ip = GFS2_I(inode);
1067 int error;
1068
1069 error = gfs2_dir_del(dip, dentry);
1070 if (error)
1071 return error;
1072
1073 ip->i_entries = 0;
1074 inode->i_ctime = current_time(inode);
1075 if (S_ISDIR(inode->i_mode))
1076 clear_nlink(inode);
1077 else
1078 drop_nlink(inode);
1079 mark_inode_dirty(inode);
1080 if (inode->i_nlink == 0)
1081 gfs2_unlink_di(inode);
1082 return 0;
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
1098 {
1099 struct gfs2_inode *dip = GFS2_I(dir);
1100 struct gfs2_sbd *sdp = GFS2_SB(dir);
1101 struct inode *inode = d_inode(dentry);
1102 struct gfs2_inode *ip = GFS2_I(inode);
1103 struct gfs2_holder ghs[3];
1104 struct gfs2_rgrpd *rgd;
1105 int error;
1106
1107 error = gfs2_rindex_update(sdp);
1108 if (error)
1109 return error;
1110
1111 error = -EROFS;
1112
1113 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
1114 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
1115
1116 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1117 if (!rgd)
1118 goto out_inodes;
1119
1120 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
1121
1122
1123 error = gfs2_glock_nq(ghs);
1124 if (error)
1125 goto out_parent;
1126
1127 error = gfs2_glock_nq(ghs + 1);
1128 if (error)
1129 goto out_child;
1130
1131 error = -ENOENT;
1132 if (inode->i_nlink == 0)
1133 goto out_rgrp;
1134
1135 if (S_ISDIR(inode->i_mode)) {
1136 error = -ENOTEMPTY;
1137 if (ip->i_entries > 2 || inode->i_nlink > 2)
1138 goto out_rgrp;
1139 }
1140
1141 error = gfs2_glock_nq(ghs + 2);
1142 if (error)
1143 goto out_rgrp;
1144
1145 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
1146 if (error)
1147 goto out_gunlock;
1148
1149 error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
1150 if (error)
1151 goto out_gunlock;
1152
1153 error = gfs2_unlink_inode(dip, dentry);
1154 gfs2_trans_end(sdp);
1155
1156 out_gunlock:
1157 gfs2_glock_dq(ghs + 2);
1158 out_rgrp:
1159 gfs2_glock_dq(ghs + 1);
1160 out_child:
1161 gfs2_glock_dq(ghs);
1162 out_parent:
1163 gfs2_holder_uninit(ghs + 2);
1164 out_inodes:
1165 gfs2_holder_uninit(ghs + 1);
1166 gfs2_holder_uninit(ghs);
1167 return error;
1168 }
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
1180 const char *symname)
1181 {
1182 unsigned int size;
1183
1184 size = strlen(symname);
1185 if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
1186 return -ENAMETOOLONG;
1187
1188 return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
1189 }
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1201 {
1202 unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
1203 return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
1204 }
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
1216 dev_t dev)
1217 {
1218 return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
1233 struct file *file, unsigned flags,
1234 umode_t mode)
1235 {
1236 struct dentry *d;
1237 bool excl = !!(flags & O_EXCL);
1238
1239 if (!d_in_lookup(dentry))
1240 goto skip_lookup;
1241
1242 d = __gfs2_lookup(dir, dentry, file);
1243 if (IS_ERR(d))
1244 return PTR_ERR(d);
1245 if (d != NULL)
1246 dentry = d;
1247 if (d_really_is_positive(dentry)) {
1248 if (!(file->f_mode & FMODE_OPENED))
1249 return finish_no_open(file, d);
1250 dput(d);
1251 return excl && (flags & O_CREAT) ? -EEXIST : 0;
1252 }
1253
1254 BUG_ON(d != NULL);
1255
1256 skip_lookup:
1257 if (!(flags & O_CREAT))
1258 return -ENOENT;
1259
1260 return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1275 {
1276 struct inode *dir = &to->i_inode;
1277 struct super_block *sb = dir->i_sb;
1278 struct inode *tmp;
1279 int error = 0;
1280
1281 igrab(dir);
1282
1283 for (;;) {
1284 if (dir == &this->i_inode) {
1285 error = -EINVAL;
1286 break;
1287 }
1288 if (dir == d_inode(sb->s_root)) {
1289 error = 0;
1290 break;
1291 }
1292
1293 tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1);
1294 if (!tmp) {
1295 error = -ENOENT;
1296 break;
1297 }
1298 if (IS_ERR(tmp)) {
1299 error = PTR_ERR(tmp);
1300 break;
1301 }
1302
1303 iput(dir);
1304 dir = tmp;
1305 }
1306
1307 iput(dir);
1308
1309 return error;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip,
1322 int dir_rename)
1323 {
1324 if (dir_rename)
1325 return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
1326
1327 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1328 mark_inode_dirty_sync(&ip->i_inode);
1329 return 0;
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343 static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1344 struct inode *ndir, struct dentry *ndentry)
1345 {
1346 struct gfs2_inode *odip = GFS2_I(odir);
1347 struct gfs2_inode *ndip = GFS2_I(ndir);
1348 struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
1349 struct gfs2_inode *nip = NULL;
1350 struct gfs2_sbd *sdp = GFS2_SB(odir);
1351 struct gfs2_holder ghs[4], r_gh, rd_gh;
1352 struct gfs2_rgrpd *nrgd;
1353 unsigned int num_gh;
1354 int dir_rename = 0;
1355 struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, };
1356 unsigned int x;
1357 int error;
1358
1359 gfs2_holder_mark_uninitialized(&r_gh);
1360 gfs2_holder_mark_uninitialized(&rd_gh);
1361 if (d_really_is_positive(ndentry)) {
1362 nip = GFS2_I(d_inode(ndentry));
1363 if (ip == nip)
1364 return 0;
1365 }
1366
1367 error = gfs2_rindex_update(sdp);
1368 if (error)
1369 return error;
1370
1371 error = gfs2_rsqa_alloc(ndip);
1372 if (error)
1373 return error;
1374
1375 if (odip != ndip) {
1376 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
1377 0, &r_gh);
1378 if (error)
1379 goto out;
1380
1381 if (S_ISDIR(ip->i_inode.i_mode)) {
1382 dir_rename = 1;
1383
1384 error = gfs2_ok_to_move(ip, ndip);
1385 if (error)
1386 goto out_gunlock_r;
1387 }
1388 }
1389
1390 num_gh = 1;
1391 gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
1392 if (odip != ndip) {
1393 gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE,GL_ASYNC,
1394 ghs + num_gh);
1395 num_gh++;
1396 }
1397 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1398 num_gh++;
1399
1400 if (nip) {
1401 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
1402 ghs + num_gh);
1403 num_gh++;
1404 }
1405
1406 for (x = 0; x < num_gh; x++) {
1407 error = gfs2_glock_nq(ghs + x);
1408 if (error)
1409 goto out_gunlock;
1410 }
1411 error = gfs2_glock_async_wait(num_gh, ghs);
1412 if (error)
1413 goto out_gunlock;
1414
1415 if (nip) {
1416
1417
1418
1419
1420 nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1);
1421 if (!nrgd) {
1422 error = -ENOENT;
1423 goto out_gunlock;
1424 }
1425 error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1426 &rd_gh);
1427 if (error)
1428 goto out_gunlock;
1429 }
1430
1431 error = -ENOENT;
1432 if (ip->i_inode.i_nlink == 0)
1433 goto out_gunlock;
1434
1435
1436
1437 error = gfs2_unlink_ok(odip, &odentry->d_name, ip);
1438 if (error)
1439 goto out_gunlock;
1440
1441
1442
1443 if (nip) {
1444 error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip);
1445 if (error)
1446 goto out_gunlock;
1447
1448 if (nip->i_inode.i_nlink == 0) {
1449 error = -EAGAIN;
1450 goto out_gunlock;
1451 }
1452
1453 if (S_ISDIR(nip->i_inode.i_mode)) {
1454 if (nip->i_entries < 2) {
1455 gfs2_consist_inode(nip);
1456 error = -EIO;
1457 goto out_gunlock;
1458 }
1459 if (nip->i_entries > 2) {
1460 error = -ENOTEMPTY;
1461 goto out_gunlock;
1462 }
1463 }
1464 } else {
1465 error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC);
1466 if (error)
1467 goto out_gunlock;
1468
1469 error = gfs2_dir_check(ndir, &ndentry->d_name, NULL);
1470 switch (error) {
1471 case -ENOENT:
1472 error = 0;
1473 break;
1474 case 0:
1475 error = -EEXIST;
1476 default:
1477 goto out_gunlock;
1478 };
1479
1480 if (odip != ndip) {
1481 if (!ndip->i_inode.i_nlink) {
1482 error = -ENOENT;
1483 goto out_gunlock;
1484 }
1485 if (ndip->i_entries == (u32)-1) {
1486 error = -EFBIG;
1487 goto out_gunlock;
1488 }
1489 if (S_ISDIR(ip->i_inode.i_mode) &&
1490 ndip->i_inode.i_nlink == (u32)-1) {
1491 error = -EMLINK;
1492 goto out_gunlock;
1493 }
1494 }
1495 }
1496
1497
1498
1499 if (dir_rename) {
1500 error = gfs2_permission(d_inode(odentry), MAY_WRITE);
1501 if (error)
1502 goto out_gunlock;
1503 }
1504
1505 if (nip == NULL) {
1506 error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name, &da);
1507 if (error)
1508 goto out_gunlock;
1509 }
1510
1511 if (da.nr_blocks) {
1512 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
1513 error = gfs2_quota_lock_check(ndip, &ap);
1514 if (error)
1515 goto out_gunlock;
1516
1517 error = gfs2_inplace_reserve(ndip, &ap);
1518 if (error)
1519 goto out_gunlock_q;
1520
1521 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(ndip, &da, 4) +
1522 4 * RES_LEAF + 4, 0);
1523 if (error)
1524 goto out_ipreserv;
1525 } else {
1526 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
1527 5 * RES_LEAF + 4, 0);
1528 if (error)
1529 goto out_gunlock;
1530 }
1531
1532
1533
1534 if (nip)
1535 error = gfs2_unlink_inode(ndip, ndentry);
1536
1537 error = update_moved_ino(ip, ndip, dir_rename);
1538 if (error)
1539 goto out_end_trans;
1540
1541 error = gfs2_dir_del(odip, odentry);
1542 if (error)
1543 goto out_end_trans;
1544
1545 error = gfs2_dir_add(ndir, &ndentry->d_name, ip, &da);
1546 if (error)
1547 goto out_end_trans;
1548
1549 out_end_trans:
1550 gfs2_trans_end(sdp);
1551 out_ipreserv:
1552 if (da.nr_blocks)
1553 gfs2_inplace_release(ndip);
1554 out_gunlock_q:
1555 if (da.nr_blocks)
1556 gfs2_quota_unlock(ndip);
1557 out_gunlock:
1558 gfs2_dir_no_add(&da);
1559 if (gfs2_holder_initialized(&rd_gh))
1560 gfs2_glock_dq_uninit(&rd_gh);
1561
1562 while (x--) {
1563 if (gfs2_holder_queued(ghs + x))
1564 gfs2_glock_dq(ghs + x);
1565 gfs2_holder_uninit(ghs + x);
1566 }
1567 out_gunlock_r:
1568 if (gfs2_holder_initialized(&r_gh))
1569 gfs2_glock_dq_uninit(&r_gh);
1570 out:
1571 return error;
1572 }
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
1586 struct inode *ndir, struct dentry *ndentry,
1587 unsigned int flags)
1588 {
1589 struct gfs2_inode *odip = GFS2_I(odir);
1590 struct gfs2_inode *ndip = GFS2_I(ndir);
1591 struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
1592 struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
1593 struct gfs2_sbd *sdp = GFS2_SB(odir);
1594 struct gfs2_holder ghs[4], r_gh;
1595 unsigned int num_gh;
1596 unsigned int x;
1597 umode_t old_mode = oip->i_inode.i_mode;
1598 umode_t new_mode = nip->i_inode.i_mode;
1599 int error;
1600
1601 gfs2_holder_mark_uninitialized(&r_gh);
1602 error = gfs2_rindex_update(sdp);
1603 if (error)
1604 return error;
1605
1606 if (odip != ndip) {
1607 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
1608 0, &r_gh);
1609 if (error)
1610 goto out;
1611
1612 if (S_ISDIR(old_mode)) {
1613
1614 error = gfs2_ok_to_move(oip, ndip);
1615 if (error)
1616 goto out_gunlock_r;
1617 }
1618
1619 if (S_ISDIR(new_mode)) {
1620
1621 error = gfs2_ok_to_move(nip, odip);
1622 if (error)
1623 goto out_gunlock_r;
1624 }
1625 }
1626
1627 num_gh = 1;
1628 gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
1629 if (odip != ndip) {
1630 gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
1631 ghs + num_gh);
1632 num_gh++;
1633 }
1634 gfs2_holder_init(oip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1635 num_gh++;
1636
1637 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1638 num_gh++;
1639
1640 for (x = 0; x < num_gh; x++) {
1641 error = gfs2_glock_nq(ghs + x);
1642 if (error)
1643 goto out_gunlock;
1644 }
1645
1646 error = gfs2_glock_async_wait(num_gh, ghs);
1647 if (error)
1648 goto out_gunlock;
1649
1650 error = -ENOENT;
1651 if (oip->i_inode.i_nlink == 0 || nip->i_inode.i_nlink == 0)
1652 goto out_gunlock;
1653
1654 error = gfs2_unlink_ok(odip, &odentry->d_name, oip);
1655 if (error)
1656 goto out_gunlock;
1657 error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip);
1658 if (error)
1659 goto out_gunlock;
1660
1661 if (S_ISDIR(old_mode)) {
1662 error = gfs2_permission(odentry->d_inode, MAY_WRITE);
1663 if (error)
1664 goto out_gunlock;
1665 }
1666 if (S_ISDIR(new_mode)) {
1667 error = gfs2_permission(ndentry->d_inode, MAY_WRITE);
1668 if (error)
1669 goto out_gunlock;
1670 }
1671 error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 4 * RES_LEAF, 0);
1672 if (error)
1673 goto out_gunlock;
1674
1675 error = update_moved_ino(oip, ndip, S_ISDIR(old_mode));
1676 if (error)
1677 goto out_end_trans;
1678
1679 error = update_moved_ino(nip, odip, S_ISDIR(new_mode));
1680 if (error)
1681 goto out_end_trans;
1682
1683 error = gfs2_dir_mvino(ndip, &ndentry->d_name, oip,
1684 IF2DT(old_mode));
1685 if (error)
1686 goto out_end_trans;
1687
1688 error = gfs2_dir_mvino(odip, &odentry->d_name, nip,
1689 IF2DT(new_mode));
1690 if (error)
1691 goto out_end_trans;
1692
1693 if (odip != ndip) {
1694 if (S_ISDIR(new_mode) && !S_ISDIR(old_mode)) {
1695 inc_nlink(&odip->i_inode);
1696 drop_nlink(&ndip->i_inode);
1697 } else if (S_ISDIR(old_mode) && !S_ISDIR(new_mode)) {
1698 inc_nlink(&ndip->i_inode);
1699 drop_nlink(&odip->i_inode);
1700 }
1701 }
1702 mark_inode_dirty(&ndip->i_inode);
1703 if (odip != ndip)
1704 mark_inode_dirty(&odip->i_inode);
1705
1706 out_end_trans:
1707 gfs2_trans_end(sdp);
1708 out_gunlock:
1709 while (x--) {
1710 if (gfs2_holder_queued(ghs + x))
1711 gfs2_glock_dq(ghs + x);
1712 gfs2_holder_uninit(ghs + x);
1713 }
1714 out_gunlock_r:
1715 if (gfs2_holder_initialized(&r_gh))
1716 gfs2_glock_dq_uninit(&r_gh);
1717 out:
1718 return error;
1719 }
1720
1721 static int gfs2_rename2(struct inode *odir, struct dentry *odentry,
1722 struct inode *ndir, struct dentry *ndentry,
1723 unsigned int flags)
1724 {
1725 flags &= ~RENAME_NOREPLACE;
1726
1727 if (flags & ~RENAME_EXCHANGE)
1728 return -EINVAL;
1729
1730 if (flags & RENAME_EXCHANGE)
1731 return gfs2_exchange(odir, odentry, ndir, ndentry, flags);
1732
1733 return gfs2_rename(odir, odentry, ndir, ndentry);
1734 }
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static const char *gfs2_get_link(struct dentry *dentry,
1748 struct inode *inode,
1749 struct delayed_call *done)
1750 {
1751 struct gfs2_inode *ip = GFS2_I(inode);
1752 struct gfs2_holder i_gh;
1753 struct buffer_head *dibh;
1754 unsigned int size;
1755 char *buf;
1756 int error;
1757
1758 if (!dentry)
1759 return ERR_PTR(-ECHILD);
1760
1761 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1762 error = gfs2_glock_nq(&i_gh);
1763 if (error) {
1764 gfs2_holder_uninit(&i_gh);
1765 return ERR_PTR(error);
1766 }
1767
1768 size = (unsigned int)i_size_read(&ip->i_inode);
1769 if (size == 0) {
1770 gfs2_consist_inode(ip);
1771 buf = ERR_PTR(-EIO);
1772 goto out;
1773 }
1774
1775 error = gfs2_meta_inode_buffer(ip, &dibh);
1776 if (error) {
1777 buf = ERR_PTR(error);
1778 goto out;
1779 }
1780
1781 buf = kzalloc(size + 1, GFP_NOFS);
1782 if (!buf)
1783 buf = ERR_PTR(-ENOMEM);
1784 else
1785 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size);
1786 brelse(dibh);
1787 out:
1788 gfs2_glock_dq_uninit(&i_gh);
1789 if (!IS_ERR(buf))
1790 set_delayed_call(done, kfree_link, buf);
1791 return buf;
1792 }
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 int gfs2_permission(struct inode *inode, int mask)
1808 {
1809 struct gfs2_inode *ip;
1810 struct gfs2_holder i_gh;
1811 int error;
1812
1813 gfs2_holder_mark_uninitialized(&i_gh);
1814 ip = GFS2_I(inode);
1815 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
1816 if (mask & MAY_NOT_BLOCK)
1817 return -ECHILD;
1818 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
1819 if (error)
1820 return error;
1821 }
1822
1823 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
1824 error = -EPERM;
1825 else
1826 error = generic_permission(inode, mask);
1827 if (gfs2_holder_initialized(&i_gh))
1828 gfs2_glock_dq_uninit(&i_gh);
1829
1830 return error;
1831 }
1832
1833 static int __gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
1834 {
1835 setattr_copy(inode, attr);
1836 mark_inode_dirty(inode);
1837 return 0;
1838 }
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 int gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
1849 {
1850 int error;
1851
1852 if (current->journal_info)
1853 return __gfs2_setattr_simple(inode, attr);
1854
1855 error = gfs2_trans_begin(GFS2_SB(inode), RES_DINODE, 0);
1856 if (error)
1857 return error;
1858
1859 error = __gfs2_setattr_simple(inode, attr);
1860 gfs2_trans_end(GFS2_SB(inode));
1861 return error;
1862 }
1863
1864 static int setattr_chown(struct inode *inode, struct iattr *attr)
1865 {
1866 struct gfs2_inode *ip = GFS2_I(inode);
1867 struct gfs2_sbd *sdp = GFS2_SB(inode);
1868 kuid_t ouid, nuid;
1869 kgid_t ogid, ngid;
1870 int error;
1871 struct gfs2_alloc_parms ap;
1872
1873 ouid = inode->i_uid;
1874 ogid = inode->i_gid;
1875 nuid = attr->ia_uid;
1876 ngid = attr->ia_gid;
1877
1878 if (!(attr->ia_valid & ATTR_UID) || uid_eq(ouid, nuid))
1879 ouid = nuid = NO_UID_QUOTA_CHANGE;
1880 if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
1881 ogid = ngid = NO_GID_QUOTA_CHANGE;
1882
1883 error = gfs2_rsqa_alloc(ip);
1884 if (error)
1885 goto out;
1886
1887 error = gfs2_rindex_update(sdp);
1888 if (error)
1889 goto out;
1890
1891 error = gfs2_quota_lock(ip, nuid, ngid);
1892 if (error)
1893 goto out;
1894
1895 ap.target = gfs2_get_inode_blocks(&ip->i_inode);
1896
1897 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1898 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1899 error = gfs2_quota_check(ip, nuid, ngid, &ap);
1900 if (error)
1901 goto out_gunlock_q;
1902 }
1903
1904 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_QUOTA, 0);
1905 if (error)
1906 goto out_gunlock_q;
1907
1908 error = gfs2_setattr_simple(inode, attr);
1909 if (error)
1910 goto out_end_trans;
1911
1912 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1913 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1914 gfs2_quota_change(ip, -(s64)ap.target, ouid, ogid);
1915 gfs2_quota_change(ip, ap.target, nuid, ngid);
1916 }
1917
1918 out_end_trans:
1919 gfs2_trans_end(sdp);
1920 out_gunlock_q:
1921 gfs2_quota_unlock(ip);
1922 out:
1923 return error;
1924 }
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937 static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
1938 {
1939 struct inode *inode = d_inode(dentry);
1940 struct gfs2_inode *ip = GFS2_I(inode);
1941 struct gfs2_holder i_gh;
1942 int error;
1943
1944 error = gfs2_rsqa_alloc(ip);
1945 if (error)
1946 return error;
1947
1948 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1949 if (error)
1950 return error;
1951
1952 error = -EPERM;
1953 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1954 goto out;
1955
1956 error = setattr_prepare(dentry, attr);
1957 if (error)
1958 goto out;
1959
1960 if (attr->ia_valid & ATTR_SIZE)
1961 error = gfs2_setattr_size(inode, attr->ia_size);
1962 else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
1963 error = setattr_chown(inode, attr);
1964 else {
1965 error = gfs2_setattr_simple(inode, attr);
1966 if (!error && attr->ia_valid & ATTR_MODE)
1967 error = posix_acl_chmod(inode, inode->i_mode);
1968 }
1969
1970 out:
1971 if (!error)
1972 mark_inode_dirty(inode);
1973 gfs2_glock_dq_uninit(&i_gh);
1974 return error;
1975 }
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 static int gfs2_getattr(const struct path *path, struct kstat *stat,
1994 u32 request_mask, unsigned int flags)
1995 {
1996 struct inode *inode = d_inode(path->dentry);
1997 struct gfs2_inode *ip = GFS2_I(inode);
1998 struct gfs2_holder gh;
1999 u32 gfsflags;
2000 int error;
2001
2002 gfs2_holder_mark_uninitialized(&gh);
2003 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
2004 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
2005 if (error)
2006 return error;
2007 }
2008
2009 gfsflags = ip->i_diskflags;
2010 if (gfsflags & GFS2_DIF_APPENDONLY)
2011 stat->attributes |= STATX_ATTR_APPEND;
2012 if (gfsflags & GFS2_DIF_IMMUTABLE)
2013 stat->attributes |= STATX_ATTR_IMMUTABLE;
2014
2015 stat->attributes_mask |= (STATX_ATTR_APPEND |
2016 STATX_ATTR_COMPRESSED |
2017 STATX_ATTR_ENCRYPTED |
2018 STATX_ATTR_IMMUTABLE |
2019 STATX_ATTR_NODUMP);
2020
2021 generic_fillattr(inode, stat);
2022
2023 if (gfs2_holder_initialized(&gh))
2024 gfs2_glock_dq_uninit(&gh);
2025
2026 return 0;
2027 }
2028
2029 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2030 u64 start, u64 len)
2031 {
2032 struct gfs2_inode *ip = GFS2_I(inode);
2033 struct gfs2_holder gh;
2034 int ret;
2035
2036 inode_lock_shared(inode);
2037
2038 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2039 if (ret)
2040 goto out;
2041
2042 ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops);
2043
2044 gfs2_glock_dq_uninit(&gh);
2045
2046 out:
2047 inode_unlock_shared(inode);
2048 return ret;
2049 }
2050
2051 loff_t gfs2_seek_data(struct file *file, loff_t offset)
2052 {
2053 struct inode *inode = file->f_mapping->host;
2054 struct gfs2_inode *ip = GFS2_I(inode);
2055 struct gfs2_holder gh;
2056 loff_t ret;
2057
2058 inode_lock_shared(inode);
2059 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2060 if (!ret)
2061 ret = iomap_seek_data(inode, offset, &gfs2_iomap_ops);
2062 gfs2_glock_dq_uninit(&gh);
2063 inode_unlock_shared(inode);
2064
2065 if (ret < 0)
2066 return ret;
2067 return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
2068 }
2069
2070 loff_t gfs2_seek_hole(struct file *file, loff_t offset)
2071 {
2072 struct inode *inode = file->f_mapping->host;
2073 struct gfs2_inode *ip = GFS2_I(inode);
2074 struct gfs2_holder gh;
2075 loff_t ret;
2076
2077 inode_lock_shared(inode);
2078 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2079 if (!ret)
2080 ret = iomap_seek_hole(inode, offset, &gfs2_iomap_ops);
2081 gfs2_glock_dq_uninit(&gh);
2082 inode_unlock_shared(inode);
2083
2084 if (ret < 0)
2085 return ret;
2086 return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
2087 }
2088
2089 const struct inode_operations gfs2_file_iops = {
2090 .permission = gfs2_permission,
2091 .setattr = gfs2_setattr,
2092 .getattr = gfs2_getattr,
2093 .listxattr = gfs2_listxattr,
2094 .fiemap = gfs2_fiemap,
2095 .get_acl = gfs2_get_acl,
2096 .set_acl = gfs2_set_acl,
2097 };
2098
2099 const struct inode_operations gfs2_dir_iops = {
2100 .create = gfs2_create,
2101 .lookup = gfs2_lookup,
2102 .link = gfs2_link,
2103 .unlink = gfs2_unlink,
2104 .symlink = gfs2_symlink,
2105 .mkdir = gfs2_mkdir,
2106 .rmdir = gfs2_unlink,
2107 .mknod = gfs2_mknod,
2108 .rename = gfs2_rename2,
2109 .permission = gfs2_permission,
2110 .setattr = gfs2_setattr,
2111 .getattr = gfs2_getattr,
2112 .listxattr = gfs2_listxattr,
2113 .fiemap = gfs2_fiemap,
2114 .get_acl = gfs2_get_acl,
2115 .set_acl = gfs2_set_acl,
2116 .atomic_open = gfs2_atomic_open,
2117 };
2118
2119 const struct inode_operations gfs2_symlink_iops = {
2120 .get_link = gfs2_get_link,
2121 .permission = gfs2_permission,
2122 .setattr = gfs2_setattr,
2123 .getattr = gfs2_getattr,
2124 .listxattr = gfs2_listxattr,
2125 .fiemap = gfs2_fiemap,
2126 };
2127