root/fs/gfs2/glops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gfs2_ail_error
  2. __gfs2_ail_flush
  3. gfs2_ail_empty_gl
  4. gfs2_ail_flush
  5. rgrp_go_sync
  6. rgrp_go_inval
  7. gfs2_glock2inode
  8. gfs2_glock2rgrp
  9. gfs2_clear_glop_pending
  10. inode_go_sync
  11. inode_go_inval
  12. inode_go_demote_ok
  13. gfs2_dinode_in
  14. gfs2_inode_refresh
  15. inode_go_lock
  16. inode_go_dump
  17. freeze_go_sync
  18. freeze_go_xmote_bh
  19. freeze_go_demote_ok
  20. iopen_go_callback

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   5  */
   6 
   7 #include <linux/spinlock.h>
   8 #include <linux/completion.h>
   9 #include <linux/buffer_head.h>
  10 #include <linux/gfs2_ondisk.h>
  11 #include <linux/bio.h>
  12 #include <linux/posix_acl.h>
  13 #include <linux/security.h>
  14 
  15 #include "gfs2.h"
  16 #include "incore.h"
  17 #include "bmap.h"
  18 #include "glock.h"
  19 #include "glops.h"
  20 #include "inode.h"
  21 #include "log.h"
  22 #include "meta_io.h"
  23 #include "recovery.h"
  24 #include "rgrp.h"
  25 #include "util.h"
  26 #include "trans.h"
  27 #include "dir.h"
  28 #include "lops.h"
  29 
  30 struct workqueue_struct *gfs2_freeze_wq;
  31 
  32 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  33 {
  34         fs_err(gl->gl_name.ln_sbd,
  35                "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
  36                "state 0x%lx\n",
  37                bh, (unsigned long long)bh->b_blocknr, bh->b_state,
  38                bh->b_page->mapping, bh->b_page->flags);
  39         fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
  40                gl->gl_name.ln_type, gl->gl_name.ln_number,
  41                gfs2_glock2aspace(gl));
  42         gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
  43 }
  44 
  45 /**
  46  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
  47  * @gl: the glock
  48  * @fsync: set when called from fsync (not all buffers will be clean)
  49  *
  50  * None of the buffers should be dirty, locked, or pinned.
  51  */
  52 
  53 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
  54                              unsigned int nr_revokes)
  55 {
  56         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  57         struct list_head *head = &gl->gl_ail_list;
  58         struct gfs2_bufdata *bd, *tmp;
  59         struct buffer_head *bh;
  60         const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
  61 
  62         gfs2_log_lock(sdp);
  63         spin_lock(&sdp->sd_ail_lock);
  64         list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
  65                 if (nr_revokes == 0)
  66                         break;
  67                 bh = bd->bd_bh;
  68                 if (bh->b_state & b_state) {
  69                         if (fsync)
  70                                 continue;
  71                         gfs2_ail_error(gl, bh);
  72                 }
  73                 gfs2_trans_add_revoke(sdp, bd);
  74                 nr_revokes--;
  75         }
  76         GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
  77         spin_unlock(&sdp->sd_ail_lock);
  78         gfs2_log_unlock(sdp);
  79 }
  80 
  81 
  82 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
  83 {
  84         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  85         struct gfs2_trans tr;
  86 
  87         memset(&tr, 0, sizeof(tr));
  88         INIT_LIST_HEAD(&tr.tr_buf);
  89         INIT_LIST_HEAD(&tr.tr_databuf);
  90         tr.tr_revokes = atomic_read(&gl->gl_ail_count);
  91 
  92         if (!tr.tr_revokes) {
  93                 bool have_revokes;
  94                 bool log_in_flight;
  95 
  96                 /*
  97                  * We have nothing on the ail, but there could be revokes on
  98                  * the sdp revoke queue, in which case, we still want to flush
  99                  * the log and wait for it to finish.
 100                  *
 101                  * If the sdp revoke list is empty too, we might still have an
 102                  * io outstanding for writing revokes, so we should wait for
 103                  * it before returning.
 104                  *
 105                  * If none of these conditions are true, our revokes are all
 106                  * flushed and we can return.
 107                  */
 108                 gfs2_log_lock(sdp);
 109                 have_revokes = !list_empty(&sdp->sd_log_revokes);
 110                 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
 111                 gfs2_log_unlock(sdp);
 112                 if (have_revokes)
 113                         goto flush;
 114                 if (log_in_flight)
 115                         log_flush_wait(sdp);
 116                 return;
 117         }
 118 
 119         /* A shortened, inline version of gfs2_trans_begin()
 120          * tr->alloced is not set since the transaction structure is
 121          * on the stack */
 122         tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
 123         tr.tr_ip = _RET_IP_;
 124         if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
 125                 return;
 126         WARN_ON_ONCE(current->journal_info);
 127         current->journal_info = &tr;
 128 
 129         __gfs2_ail_flush(gl, 0, tr.tr_revokes);
 130 
 131         gfs2_trans_end(sdp);
 132 flush:
 133         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
 134                        GFS2_LFC_AIL_EMPTY_GL);
 135 }
 136 
 137 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 138 {
 139         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 140         unsigned int revokes = atomic_read(&gl->gl_ail_count);
 141         unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 142         int ret;
 143 
 144         if (!revokes)
 145                 return;
 146 
 147         while (revokes > max_revokes)
 148                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
 149 
 150         ret = gfs2_trans_begin(sdp, 0, max_revokes);
 151         if (ret)
 152                 return;
 153         __gfs2_ail_flush(gl, fsync, max_revokes);
 154         gfs2_trans_end(sdp);
 155         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
 156                        GFS2_LFC_AIL_FLUSH);
 157 }
 158 
 159 /**
 160  * rgrp_go_sync - sync out the metadata for this glock
 161  * @gl: the glock
 162  *
 163  * Called when demoting or unlocking an EX glock.  We must flush
 164  * to disk all dirty buffers/pages relating to this glock, and must not
 165  * return to caller to demote/unlock the glock until I/O is complete.
 166  */
 167 
 168 static void rgrp_go_sync(struct gfs2_glock *gl)
 169 {
 170         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 171         struct address_space *mapping = &sdp->sd_aspace;
 172         struct gfs2_rgrpd *rgd;
 173         int error;
 174 
 175         spin_lock(&gl->gl_lockref.lock);
 176         rgd = gl->gl_object;
 177         if (rgd)
 178                 gfs2_rgrp_brelse(rgd);
 179         spin_unlock(&gl->gl_lockref.lock);
 180 
 181         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
 182                 return;
 183         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
 184 
 185         gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
 186                        GFS2_LFC_RGRP_GO_SYNC);
 187         filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
 188         error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
 189         mapping_set_error(mapping, error);
 190         gfs2_ail_empty_gl(gl);
 191 
 192         spin_lock(&gl->gl_lockref.lock);
 193         rgd = gl->gl_object;
 194         if (rgd)
 195                 gfs2_free_clones(rgd);
 196         spin_unlock(&gl->gl_lockref.lock);
 197 }
 198 
 199 /**
 200  * rgrp_go_inval - invalidate the metadata for this glock
 201  * @gl: the glock
 202  * @flags:
 203  *
 204  * We never used LM_ST_DEFERRED with resource groups, so that we
 205  * should always see the metadata flag set here.
 206  *
 207  */
 208 
 209 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
 210 {
 211         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 212         struct address_space *mapping = &sdp->sd_aspace;
 213         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
 214 
 215         if (rgd)
 216                 gfs2_rgrp_brelse(rgd);
 217 
 218         WARN_ON_ONCE(!(flags & DIO_METADATA));
 219         gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
 220         truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
 221 
 222         if (rgd)
 223                 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
 224 }
 225 
 226 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
 227 {
 228         struct gfs2_inode *ip;
 229 
 230         spin_lock(&gl->gl_lockref.lock);
 231         ip = gl->gl_object;
 232         if (ip)
 233                 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
 234         spin_unlock(&gl->gl_lockref.lock);
 235         return ip;
 236 }
 237 
 238 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
 239 {
 240         struct gfs2_rgrpd *rgd;
 241 
 242         spin_lock(&gl->gl_lockref.lock);
 243         rgd = gl->gl_object;
 244         spin_unlock(&gl->gl_lockref.lock);
 245 
 246         return rgd;
 247 }
 248 
 249 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
 250 {
 251         if (!ip)
 252                 return;
 253 
 254         clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
 255         wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
 256 }
 257 
 258 /**
 259  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 260  * @gl: the glock protecting the inode
 261  *
 262  */
 263 
 264 static void inode_go_sync(struct gfs2_glock *gl)
 265 {
 266         struct gfs2_inode *ip = gfs2_glock2inode(gl);
 267         int isreg = ip && S_ISREG(ip->i_inode.i_mode);
 268         struct address_space *metamapping = gfs2_glock2aspace(gl);
 269         int error;
 270 
 271         if (isreg) {
 272                 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
 273                         unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
 274                 inode_dio_wait(&ip->i_inode);
 275         }
 276         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
 277                 goto out;
 278 
 279         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
 280 
 281         gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
 282                        GFS2_LFC_INODE_GO_SYNC);
 283         filemap_fdatawrite(metamapping);
 284         if (isreg) {
 285                 struct address_space *mapping = ip->i_inode.i_mapping;
 286                 filemap_fdatawrite(mapping);
 287                 error = filemap_fdatawait(mapping);
 288                 mapping_set_error(mapping, error);
 289         }
 290         error = filemap_fdatawait(metamapping);
 291         mapping_set_error(metamapping, error);
 292         gfs2_ail_empty_gl(gl);
 293         /*
 294          * Writeback of the data mapping may cause the dirty flag to be set
 295          * so we have to clear it again here.
 296          */
 297         smp_mb__before_atomic();
 298         clear_bit(GLF_DIRTY, &gl->gl_flags);
 299 
 300 out:
 301         gfs2_clear_glop_pending(ip);
 302 }
 303 
 304 /**
 305  * inode_go_inval - prepare a inode glock to be released
 306  * @gl: the glock
 307  * @flags:
 308  *
 309  * Normally we invalidate everything, but if we are moving into
 310  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 311  * can keep hold of the metadata, since it won't have changed.
 312  *
 313  */
 314 
 315 static void inode_go_inval(struct gfs2_glock *gl, int flags)
 316 {
 317         struct gfs2_inode *ip = gfs2_glock2inode(gl);
 318 
 319         gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
 320 
 321         if (flags & DIO_METADATA) {
 322                 struct address_space *mapping = gfs2_glock2aspace(gl);
 323                 truncate_inode_pages(mapping, 0);
 324                 if (ip) {
 325                         set_bit(GIF_INVALID, &ip->i_flags);
 326                         forget_all_cached_acls(&ip->i_inode);
 327                         security_inode_invalidate_secctx(&ip->i_inode);
 328                         gfs2_dir_hash_inval(ip);
 329                 }
 330         }
 331 
 332         if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
 333                 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
 334                                GFS2_LOG_HEAD_FLUSH_NORMAL |
 335                                GFS2_LFC_INODE_GO_INVAL);
 336                 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
 337         }
 338         if (ip && S_ISREG(ip->i_inode.i_mode))
 339                 truncate_inode_pages(ip->i_inode.i_mapping, 0);
 340 
 341         gfs2_clear_glop_pending(ip);
 342 }
 343 
 344 /**
 345  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 346  * @gl: the glock
 347  *
 348  * Returns: 1 if it's ok
 349  */
 350 
 351 static int inode_go_demote_ok(const struct gfs2_glock *gl)
 352 {
 353         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 354 
 355         if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
 356                 return 0;
 357 
 358         return 1;
 359 }
 360 
 361 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
 362 {
 363         const struct gfs2_dinode *str = buf;
 364         struct timespec64 atime;
 365         u16 height, depth;
 366 
 367         if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
 368                 goto corrupt;
 369         ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
 370         ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
 371         ip->i_inode.i_rdev = 0;
 372         switch (ip->i_inode.i_mode & S_IFMT) {
 373         case S_IFBLK:
 374         case S_IFCHR:
 375                 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
 376                                            be32_to_cpu(str->di_minor));
 377                 break;
 378         };
 379 
 380         i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
 381         i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
 382         set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
 383         i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
 384         gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
 385         atime.tv_sec = be64_to_cpu(str->di_atime);
 386         atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
 387         if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
 388                 ip->i_inode.i_atime = atime;
 389         ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
 390         ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
 391         ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
 392         ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
 393 
 394         ip->i_goal = be64_to_cpu(str->di_goal_meta);
 395         ip->i_generation = be64_to_cpu(str->di_generation);
 396 
 397         ip->i_diskflags = be32_to_cpu(str->di_flags);
 398         ip->i_eattr = be64_to_cpu(str->di_eattr);
 399         /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
 400         gfs2_set_inode_flags(&ip->i_inode);
 401         height = be16_to_cpu(str->di_height);
 402         if (unlikely(height > GFS2_MAX_META_HEIGHT))
 403                 goto corrupt;
 404         ip->i_height = (u8)height;
 405 
 406         depth = be16_to_cpu(str->di_depth);
 407         if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
 408                 goto corrupt;
 409         ip->i_depth = (u8)depth;
 410         ip->i_entries = be32_to_cpu(str->di_entries);
 411 
 412         if (S_ISREG(ip->i_inode.i_mode))
 413                 gfs2_set_aops(&ip->i_inode);
 414 
 415         return 0;
 416 corrupt:
 417         gfs2_consist_inode(ip);
 418         return -EIO;
 419 }
 420 
 421 /**
 422  * gfs2_inode_refresh - Refresh the incore copy of the dinode
 423  * @ip: The GFS2 inode
 424  *
 425  * Returns: errno
 426  */
 427 
 428 int gfs2_inode_refresh(struct gfs2_inode *ip)
 429 {
 430         struct buffer_head *dibh;
 431         int error;
 432 
 433         error = gfs2_meta_inode_buffer(ip, &dibh);
 434         if (error)
 435                 return error;
 436 
 437         error = gfs2_dinode_in(ip, dibh->b_data);
 438         brelse(dibh);
 439         clear_bit(GIF_INVALID, &ip->i_flags);
 440 
 441         return error;
 442 }
 443 
 444 /**
 445  * inode_go_lock - operation done after an inode lock is locked by a process
 446  * @gl: the glock
 447  * @flags:
 448  *
 449  * Returns: errno
 450  */
 451 
 452 static int inode_go_lock(struct gfs2_holder *gh)
 453 {
 454         struct gfs2_glock *gl = gh->gh_gl;
 455         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 456         struct gfs2_inode *ip = gl->gl_object;
 457         int error = 0;
 458 
 459         if (!ip || (gh->gh_flags & GL_SKIP))
 460                 return 0;
 461 
 462         if (test_bit(GIF_INVALID, &ip->i_flags)) {
 463                 error = gfs2_inode_refresh(ip);
 464                 if (error)
 465                         return error;
 466         }
 467 
 468         if (gh->gh_state != LM_ST_DEFERRED)
 469                 inode_dio_wait(&ip->i_inode);
 470 
 471         if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
 472             (gl->gl_state == LM_ST_EXCLUSIVE) &&
 473             (gh->gh_state == LM_ST_EXCLUSIVE)) {
 474                 spin_lock(&sdp->sd_trunc_lock);
 475                 if (list_empty(&ip->i_trunc_list))
 476                         list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
 477                 spin_unlock(&sdp->sd_trunc_lock);
 478                 wake_up(&sdp->sd_quota_wait);
 479                 return 1;
 480         }
 481 
 482         return error;
 483 }
 484 
 485 /**
 486  * inode_go_dump - print information about an inode
 487  * @seq: The iterator
 488  * @ip: the inode
 489  * @fs_id_buf: file system id (may be empty)
 490  *
 491  */
 492 
 493 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
 494                           const char *fs_id_buf)
 495 {
 496         struct gfs2_inode *ip = gl->gl_object;
 497         struct inode *inode = &ip->i_inode;
 498         unsigned long nrpages;
 499 
 500         if (ip == NULL)
 501                 return;
 502 
 503         xa_lock_irq(&inode->i_data.i_pages);
 504         nrpages = inode->i_data.nrpages;
 505         xa_unlock_irq(&inode->i_data.i_pages);
 506 
 507         gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
 508                        "p:%lu\n", fs_id_buf,
 509                   (unsigned long long)ip->i_no_formal_ino,
 510                   (unsigned long long)ip->i_no_addr,
 511                   IF2DT(ip->i_inode.i_mode), ip->i_flags,
 512                   (unsigned int)ip->i_diskflags,
 513                   (unsigned long long)i_size_read(inode), nrpages);
 514 }
 515 
 516 /**
 517  * freeze_go_sync - promote/demote the freeze glock
 518  * @gl: the glock
 519  * @state: the requested state
 520  * @flags:
 521  *
 522  */
 523 
 524 static void freeze_go_sync(struct gfs2_glock *gl)
 525 {
 526         int error = 0;
 527         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 528 
 529         if (gl->gl_state == LM_ST_SHARED &&
 530             test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
 531                 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
 532                 error = freeze_super(sdp->sd_vfs);
 533                 if (error) {
 534                         fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
 535                                 error);
 536                         gfs2_assert_withdraw(sdp, 0);
 537                 }
 538                 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
 539                 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
 540                                GFS2_LFC_FREEZE_GO_SYNC);
 541         }
 542 }
 543 
 544 /**
 545  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
 546  * @gl: the glock
 547  *
 548  */
 549 
 550 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
 551 {
 552         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 553         struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 554         struct gfs2_glock *j_gl = ip->i_gl;
 555         struct gfs2_log_header_host head;
 556         int error;
 557 
 558         if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
 559                 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 560 
 561                 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
 562                 if (error)
 563                         gfs2_consist(sdp);
 564                 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
 565                         gfs2_consist(sdp);
 566 
 567                 /*  Initialize some head of the log stuff  */
 568                 if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
 569                         sdp->sd_log_sequence = head.lh_sequence + 1;
 570                         gfs2_log_pointers_init(sdp, head.lh_blkno);
 571                 }
 572         }
 573         return 0;
 574 }
 575 
 576 /**
 577  * trans_go_demote_ok
 578  * @gl: the glock
 579  *
 580  * Always returns 0
 581  */
 582 
 583 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
 584 {
 585         return 0;
 586 }
 587 
 588 /**
 589  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 590  * @gl: the glock
 591  *
 592  * gl_lockref.lock lock is held while calling this
 593  */
 594 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
 595 {
 596         struct gfs2_inode *ip = gl->gl_object;
 597         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 598 
 599         if (!remote || sb_rdonly(sdp->sd_vfs))
 600                 return;
 601 
 602         if (gl->gl_demote_state == LM_ST_UNLOCKED &&
 603             gl->gl_state == LM_ST_SHARED && ip) {
 604                 gl->gl_lockref.count++;
 605                 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
 606                         gl->gl_lockref.count--;
 607         }
 608 }
 609 
 610 const struct gfs2_glock_operations gfs2_meta_glops = {
 611         .go_type = LM_TYPE_META,
 612 };
 613 
 614 const struct gfs2_glock_operations gfs2_inode_glops = {
 615         .go_sync = inode_go_sync,
 616         .go_inval = inode_go_inval,
 617         .go_demote_ok = inode_go_demote_ok,
 618         .go_lock = inode_go_lock,
 619         .go_dump = inode_go_dump,
 620         .go_type = LM_TYPE_INODE,
 621         .go_flags = GLOF_ASPACE | GLOF_LRU,
 622 };
 623 
 624 const struct gfs2_glock_operations gfs2_rgrp_glops = {
 625         .go_sync = rgrp_go_sync,
 626         .go_inval = rgrp_go_inval,
 627         .go_lock = gfs2_rgrp_go_lock,
 628         .go_unlock = gfs2_rgrp_go_unlock,
 629         .go_dump = gfs2_rgrp_dump,
 630         .go_type = LM_TYPE_RGRP,
 631         .go_flags = GLOF_LVB,
 632 };
 633 
 634 const struct gfs2_glock_operations gfs2_freeze_glops = {
 635         .go_sync = freeze_go_sync,
 636         .go_xmote_bh = freeze_go_xmote_bh,
 637         .go_demote_ok = freeze_go_demote_ok,
 638         .go_type = LM_TYPE_NONDISK,
 639 };
 640 
 641 const struct gfs2_glock_operations gfs2_iopen_glops = {
 642         .go_type = LM_TYPE_IOPEN,
 643         .go_callback = iopen_go_callback,
 644         .go_flags = GLOF_LRU,
 645 };
 646 
 647 const struct gfs2_glock_operations gfs2_flock_glops = {
 648         .go_type = LM_TYPE_FLOCK,
 649         .go_flags = GLOF_LRU,
 650 };
 651 
 652 const struct gfs2_glock_operations gfs2_nondisk_glops = {
 653         .go_type = LM_TYPE_NONDISK,
 654 };
 655 
 656 const struct gfs2_glock_operations gfs2_quota_glops = {
 657         .go_type = LM_TYPE_QUOTA,
 658         .go_flags = GLOF_LVB | GLOF_LRU,
 659 };
 660 
 661 const struct gfs2_glock_operations gfs2_journal_glops = {
 662         .go_type = LM_TYPE_JOURNAL,
 663 };
 664 
 665 const struct gfs2_glock_operations *gfs2_glops_list[] = {
 666         [LM_TYPE_META] = &gfs2_meta_glops,
 667         [LM_TYPE_INODE] = &gfs2_inode_glops,
 668         [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
 669         [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
 670         [LM_TYPE_FLOCK] = &gfs2_flock_glops,
 671         [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
 672         [LM_TYPE_QUOTA] = &gfs2_quota_glops,
 673         [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
 674 };
 675 

/* [<][>][^][v][top][bottom][index][help] */