root/fs/xfs/xfs_dquot.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_qm_dqdestroy
  2. xfs_qm_adjust_dqlimits
  3. xfs_qm_adjust_dqtimers
  4. xfs_qm_init_dquot_blk
  5. xfs_dquot_set_prealloc_limits
  6. xfs_dquot_disk_alloc
  7. xfs_dquot_disk_read
  8. xfs_dquot_alloc
  9. xfs_dquot_from_disk
  10. xfs_qm_dqread_alloc
  11. xfs_qm_dqread
  12. xfs_dq_get_next_id
  13. xfs_qm_dqget_cache_lookup
  14. xfs_qm_dqget_cache_insert
  15. xfs_qm_dqget_checks
  16. xfs_qm_dqget
  17. xfs_qm_dqget_uncached
  18. xfs_qm_id_for_quotatype
  19. xfs_qm_dqget_inode
  20. xfs_qm_dqget_next
  21. xfs_qm_dqput
  22. xfs_qm_dqrele
  23. xfs_qm_dqflush_done
  24. xfs_qm_dqflush
  25. xfs_dqlock2
  26. xfs_qm_init
  27. xfs_qm_exit
  28. xfs_qm_dqiterate

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
   4  * All Rights Reserved.
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_format.h"
   9 #include "xfs_log_format.h"
  10 #include "xfs_shared.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_bit.h"
  13 #include "xfs_mount.h"
  14 #include "xfs_defer.h"
  15 #include "xfs_inode.h"
  16 #include "xfs_bmap.h"
  17 #include "xfs_quota.h"
  18 #include "xfs_trans.h"
  19 #include "xfs_buf_item.h"
  20 #include "xfs_trans_space.h"
  21 #include "xfs_trans_priv.h"
  22 #include "xfs_qm.h"
  23 #include "xfs_trace.h"
  24 #include "xfs_log.h"
  25 #include "xfs_bmap_btree.h"
  26 
  27 /*
  28  * Lock order:
  29  *
  30  * ip->i_lock
  31  *   qi->qi_tree_lock
  32  *     dquot->q_qlock (xfs_dqlock() and friends)
  33  *       dquot->q_flush (xfs_dqflock() and friends)
  34  *       qi->qi_lru_lock
  35  *
  36  * If two dquots need to be locked the order is user before group/project,
  37  * otherwise by the lowest id first, see xfs_dqlock2.
  38  */
  39 
  40 struct kmem_zone                *xfs_qm_dqtrxzone;
  41 static struct kmem_zone         *xfs_qm_dqzone;
  42 
  43 static struct lock_class_key xfs_dquot_group_class;
  44 static struct lock_class_key xfs_dquot_project_class;
  45 
  46 /*
  47  * This is called to free all the memory associated with a dquot
  48  */
  49 void
  50 xfs_qm_dqdestroy(
  51         xfs_dquot_t     *dqp)
  52 {
  53         ASSERT(list_empty(&dqp->q_lru));
  54 
  55         kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
  56         mutex_destroy(&dqp->q_qlock);
  57 
  58         XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
  59         kmem_zone_free(xfs_qm_dqzone, dqp);
  60 }
  61 
  62 /*
  63  * If default limits are in force, push them into the dquot now.
  64  * We overwrite the dquot limits only if they are zero and this
  65  * is not the root dquot.
  66  */
  67 void
  68 xfs_qm_adjust_dqlimits(
  69         struct xfs_mount        *mp,
  70         struct xfs_dquot        *dq)
  71 {
  72         struct xfs_quotainfo    *q = mp->m_quotainfo;
  73         struct xfs_disk_dquot   *d = &dq->q_core;
  74         struct xfs_def_quota    *defq;
  75         int                     prealloc = 0;
  76 
  77         ASSERT(d->d_id);
  78         defq = xfs_get_defquota(dq, q);
  79 
  80         if (defq->bsoftlimit && !d->d_blk_softlimit) {
  81                 d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
  82                 prealloc = 1;
  83         }
  84         if (defq->bhardlimit && !d->d_blk_hardlimit) {
  85                 d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
  86                 prealloc = 1;
  87         }
  88         if (defq->isoftlimit && !d->d_ino_softlimit)
  89                 d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
  90         if (defq->ihardlimit && !d->d_ino_hardlimit)
  91                 d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
  92         if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
  93                 d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
  94         if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
  95                 d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
  96 
  97         if (prealloc)
  98                 xfs_dquot_set_prealloc_limits(dq);
  99 }
 100 
 101 /*
 102  * Check the limits and timers of a dquot and start or reset timers
 103  * if necessary.
 104  * This gets called even when quota enforcement is OFF, which makes our
 105  * life a little less complicated. (We just don't reject any quota
 106  * reservations in that case, when enforcement is off).
 107  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
 108  * enforcement's off.
 109  * In contrast, warnings are a little different in that they don't
 110  * 'automatically' get started when limits get exceeded.  They do
 111  * get reset to zero, however, when we find the count to be under
 112  * the soft limit (they are only ever set non-zero via userspace).
 113  */
 114 void
 115 xfs_qm_adjust_dqtimers(
 116         xfs_mount_t             *mp,
 117         xfs_disk_dquot_t        *d)
 118 {
 119         ASSERT(d->d_id);
 120 
 121 #ifdef DEBUG
 122         if (d->d_blk_hardlimit)
 123                 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
 124                        be64_to_cpu(d->d_blk_hardlimit));
 125         if (d->d_ino_hardlimit)
 126                 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
 127                        be64_to_cpu(d->d_ino_hardlimit));
 128         if (d->d_rtb_hardlimit)
 129                 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
 130                        be64_to_cpu(d->d_rtb_hardlimit));
 131 #endif
 132 
 133         if (!d->d_btimer) {
 134                 if ((d->d_blk_softlimit &&
 135                      (be64_to_cpu(d->d_bcount) >
 136                       be64_to_cpu(d->d_blk_softlimit))) ||
 137                     (d->d_blk_hardlimit &&
 138                      (be64_to_cpu(d->d_bcount) >
 139                       be64_to_cpu(d->d_blk_hardlimit)))) {
 140                         d->d_btimer = cpu_to_be32(get_seconds() +
 141                                         mp->m_quotainfo->qi_btimelimit);
 142                 } else {
 143                         d->d_bwarns = 0;
 144                 }
 145         } else {
 146                 if ((!d->d_blk_softlimit ||
 147                      (be64_to_cpu(d->d_bcount) <=
 148                       be64_to_cpu(d->d_blk_softlimit))) &&
 149                     (!d->d_blk_hardlimit ||
 150                     (be64_to_cpu(d->d_bcount) <=
 151                      be64_to_cpu(d->d_blk_hardlimit)))) {
 152                         d->d_btimer = 0;
 153                 }
 154         }
 155 
 156         if (!d->d_itimer) {
 157                 if ((d->d_ino_softlimit &&
 158                      (be64_to_cpu(d->d_icount) >
 159                       be64_to_cpu(d->d_ino_softlimit))) ||
 160                     (d->d_ino_hardlimit &&
 161                      (be64_to_cpu(d->d_icount) >
 162                       be64_to_cpu(d->d_ino_hardlimit)))) {
 163                         d->d_itimer = cpu_to_be32(get_seconds() +
 164                                         mp->m_quotainfo->qi_itimelimit);
 165                 } else {
 166                         d->d_iwarns = 0;
 167                 }
 168         } else {
 169                 if ((!d->d_ino_softlimit ||
 170                      (be64_to_cpu(d->d_icount) <=
 171                       be64_to_cpu(d->d_ino_softlimit)))  &&
 172                     (!d->d_ino_hardlimit ||
 173                      (be64_to_cpu(d->d_icount) <=
 174                       be64_to_cpu(d->d_ino_hardlimit)))) {
 175                         d->d_itimer = 0;
 176                 }
 177         }
 178 
 179         if (!d->d_rtbtimer) {
 180                 if ((d->d_rtb_softlimit &&
 181                      (be64_to_cpu(d->d_rtbcount) >
 182                       be64_to_cpu(d->d_rtb_softlimit))) ||
 183                     (d->d_rtb_hardlimit &&
 184                      (be64_to_cpu(d->d_rtbcount) >
 185                       be64_to_cpu(d->d_rtb_hardlimit)))) {
 186                         d->d_rtbtimer = cpu_to_be32(get_seconds() +
 187                                         mp->m_quotainfo->qi_rtbtimelimit);
 188                 } else {
 189                         d->d_rtbwarns = 0;
 190                 }
 191         } else {
 192                 if ((!d->d_rtb_softlimit ||
 193                      (be64_to_cpu(d->d_rtbcount) <=
 194                       be64_to_cpu(d->d_rtb_softlimit))) &&
 195                     (!d->d_rtb_hardlimit ||
 196                      (be64_to_cpu(d->d_rtbcount) <=
 197                       be64_to_cpu(d->d_rtb_hardlimit)))) {
 198                         d->d_rtbtimer = 0;
 199                 }
 200         }
 201 }
 202 
 203 /*
 204  * initialize a buffer full of dquots and log the whole thing
 205  */
 206 STATIC void
 207 xfs_qm_init_dquot_blk(
 208         xfs_trans_t     *tp,
 209         xfs_mount_t     *mp,
 210         xfs_dqid_t      id,
 211         uint            type,
 212         xfs_buf_t       *bp)
 213 {
 214         struct xfs_quotainfo    *q = mp->m_quotainfo;
 215         xfs_dqblk_t     *d;
 216         xfs_dqid_t      curid;
 217         int             i;
 218 
 219         ASSERT(tp);
 220         ASSERT(xfs_buf_islocked(bp));
 221 
 222         d = bp->b_addr;
 223 
 224         /*
 225          * ID of the first dquot in the block - id's are zero based.
 226          */
 227         curid = id - (id % q->qi_dqperchunk);
 228         memset(d, 0, BBTOB(q->qi_dqchunklen));
 229         for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
 230                 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 231                 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
 232                 d->dd_diskdq.d_id = cpu_to_be32(curid);
 233                 d->dd_diskdq.d_flags = type;
 234                 if (xfs_sb_version_hascrc(&mp->m_sb)) {
 235                         uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 236                         xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 237                                          XFS_DQUOT_CRC_OFF);
 238                 }
 239         }
 240 
 241         xfs_trans_dquot_buf(tp, bp,
 242                             (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
 243                             ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
 244                              XFS_BLF_GDQUOT_BUF)));
 245         xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 246 }
 247 
 248 /*
 249  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
 250  * watermarks correspond to the soft and hard limits by default. If a soft limit
 251  * is not specified, we use 95% of the hard limit.
 252  */
 253 void
 254 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 255 {
 256         uint64_t space;
 257 
 258         dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
 259         dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
 260         if (!dqp->q_prealloc_lo_wmark) {
 261                 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
 262                 do_div(dqp->q_prealloc_lo_wmark, 100);
 263                 dqp->q_prealloc_lo_wmark *= 95;
 264         }
 265 
 266         space = dqp->q_prealloc_hi_wmark;
 267 
 268         do_div(space, 100);
 269         dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
 270         dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
 271         dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 272 }
 273 
 274 /*
 275  * Ensure that the given in-core dquot has a buffer on disk backing it, and
 276  * return the buffer locked and held. This is called when the bmapi finds a
 277  * hole.
 278  */
 279 STATIC int
 280 xfs_dquot_disk_alloc(
 281         struct xfs_trans        **tpp,
 282         struct xfs_dquot        *dqp,
 283         struct xfs_buf          **bpp)
 284 {
 285         struct xfs_bmbt_irec    map;
 286         struct xfs_trans        *tp = *tpp;
 287         struct xfs_mount        *mp = tp->t_mountp;
 288         struct xfs_buf          *bp;
 289         struct xfs_inode        *quotip = xfs_quota_inode(mp, dqp->dq_flags);
 290         int                     nmaps = 1;
 291         int                     error;
 292 
 293         trace_xfs_dqalloc(dqp);
 294 
 295         xfs_ilock(quotip, XFS_ILOCK_EXCL);
 296         if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 297                 /*
 298                  * Return if this type of quotas is turned off while we didn't
 299                  * have an inode lock
 300                  */
 301                 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 302                 return -ESRCH;
 303         }
 304 
 305         /* Create the block mapping. */
 306         xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 307         error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
 308                         XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
 309                         XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
 310         if (error)
 311                 return error;
 312         ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
 313         ASSERT(nmaps == 1);
 314         ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
 315                (map.br_startblock != HOLESTARTBLOCK));
 316 
 317         /*
 318          * Keep track of the blkno to save a lookup later
 319          */
 320         dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 321 
 322         /* now we can just get the buffer (there's nothing to read yet) */
 323         bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
 324                         mp->m_quotainfo->qi_dqchunklen, 0);
 325         if (!bp)
 326                 return -ENOMEM;
 327         bp->b_ops = &xfs_dquot_buf_ops;
 328 
 329         /*
 330          * Make a chunk of dquots out of this buffer and log
 331          * the entire thing.
 332          */
 333         xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
 334                               dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
 335         xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 336 
 337         /*
 338          * Hold the buffer and join it to the dfops so that we'll still own
 339          * the buffer when we return to the caller.  The buffer disposal on
 340          * error must be paid attention to very carefully, as it has been
 341          * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
 342          * code when allocating a new dquot record" in 2005, and the later
 343          * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
 344          * the buffer locked across the _defer_finish call.  We can now do
 345          * this correctly with xfs_defer_bjoin.
 346          *
 347          * Above, we allocated a disk block for the dquot information and used
 348          * get_buf to initialize the dquot. If the _defer_finish fails, the old
 349          * transaction is gone but the new buffer is not joined or held to any
 350          * transaction, so we must _buf_relse it.
 351          *
 352          * If everything succeeds, the caller of this function is returned a
 353          * buffer that is locked and held to the transaction.  The caller
 354          * is responsible for unlocking any buffer passed back, either
 355          * manually or by committing the transaction.  On error, the buffer is
 356          * released and not passed back.
 357          */
 358         xfs_trans_bhold(tp, bp);
 359         error = xfs_defer_finish(tpp);
 360         if (error) {
 361                 xfs_trans_bhold_release(*tpp, bp);
 362                 xfs_trans_brelse(*tpp, bp);
 363                 return error;
 364         }
 365         *bpp = bp;
 366         return 0;
 367 }
 368 
 369 /*
 370  * Read in the in-core dquot's on-disk metadata and return the buffer.
 371  * Returns ENOENT to signal a hole.
 372  */
 373 STATIC int
 374 xfs_dquot_disk_read(
 375         struct xfs_mount        *mp,
 376         struct xfs_dquot        *dqp,
 377         struct xfs_buf          **bpp)
 378 {
 379         struct xfs_bmbt_irec    map;
 380         struct xfs_buf          *bp;
 381         struct xfs_inode        *quotip = xfs_quota_inode(mp, dqp->dq_flags);
 382         uint                    lock_mode;
 383         int                     nmaps = 1;
 384         int                     error;
 385 
 386         lock_mode = xfs_ilock_data_map_shared(quotip);
 387         if (!xfs_this_quota_on(mp, dqp->dq_flags)) {
 388                 /*
 389                  * Return if this type of quotas is turned off while we
 390                  * didn't have the quota inode lock.
 391                  */
 392                 xfs_iunlock(quotip, lock_mode);
 393                 return -ESRCH;
 394         }
 395 
 396         /*
 397          * Find the block map; no allocations yet
 398          */
 399         error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
 400                         XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
 401         xfs_iunlock(quotip, lock_mode);
 402         if (error)
 403                 return error;
 404 
 405         ASSERT(nmaps == 1);
 406         ASSERT(map.br_blockcount >= 1);
 407         ASSERT(map.br_startblock != DELAYSTARTBLOCK);
 408         if (map.br_startblock == HOLESTARTBLOCK)
 409                 return -ENOENT;
 410 
 411         trace_xfs_dqtobp_read(dqp);
 412 
 413         /*
 414          * store the blkno etc so that we don't have to do the
 415          * mapping all the time
 416          */
 417         dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 418 
 419         error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
 420                         mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 421                         &xfs_dquot_buf_ops);
 422         if (error) {
 423                 ASSERT(bp == NULL);
 424                 return error;
 425         }
 426 
 427         ASSERT(xfs_buf_islocked(bp));
 428         xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 429         *bpp = bp;
 430 
 431         return 0;
 432 }
 433 
 434 /* Allocate and initialize everything we need for an incore dquot. */
 435 STATIC struct xfs_dquot *
 436 xfs_dquot_alloc(
 437         struct xfs_mount        *mp,
 438         xfs_dqid_t              id,
 439         uint                    type)
 440 {
 441         struct xfs_dquot        *dqp;
 442 
 443         dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0);
 444 
 445         dqp->dq_flags = type;
 446         dqp->q_core.d_id = cpu_to_be32(id);
 447         dqp->q_mount = mp;
 448         INIT_LIST_HEAD(&dqp->q_lru);
 449         mutex_init(&dqp->q_qlock);
 450         init_waitqueue_head(&dqp->q_pinwait);
 451         dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 452         /*
 453          * Offset of dquot in the (fixed sized) dquot chunk.
 454          */
 455         dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
 456                         sizeof(xfs_dqblk_t);
 457 
 458         /*
 459          * Because we want to use a counting completion, complete
 460          * the flush completion once to allow a single access to
 461          * the flush completion without blocking.
 462          */
 463         init_completion(&dqp->q_flush);
 464         complete(&dqp->q_flush);
 465 
 466         /*
 467          * Make sure group quotas have a different lock class than user
 468          * quotas.
 469          */
 470         switch (type) {
 471         case XFS_DQ_USER:
 472                 /* uses the default lock class */
 473                 break;
 474         case XFS_DQ_GROUP:
 475                 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
 476                 break;
 477         case XFS_DQ_PROJ:
 478                 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
 479                 break;
 480         default:
 481                 ASSERT(0);
 482                 break;
 483         }
 484 
 485         xfs_qm_dquot_logitem_init(dqp);
 486 
 487         XFS_STATS_INC(mp, xs_qm_dquot);
 488         return dqp;
 489 }
 490 
 491 /* Copy the in-core quota fields in from the on-disk buffer. */
 492 STATIC void
 493 xfs_dquot_from_disk(
 494         struct xfs_dquot        *dqp,
 495         struct xfs_buf          *bp)
 496 {
 497         struct xfs_disk_dquot   *ddqp = bp->b_addr + dqp->q_bufoffset;
 498 
 499         /* copy everything from disk dquot to the incore dquot */
 500         memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
 501 
 502         /*
 503          * Reservation counters are defined as reservation plus current usage
 504          * to avoid having to add every time.
 505          */
 506         dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
 507         dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
 508         dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
 509 
 510         /* initialize the dquot speculative prealloc thresholds */
 511         xfs_dquot_set_prealloc_limits(dqp);
 512 }
 513 
 514 /* Allocate and initialize the dquot buffer for this in-core dquot. */
 515 static int
 516 xfs_qm_dqread_alloc(
 517         struct xfs_mount        *mp,
 518         struct xfs_dquot        *dqp,
 519         struct xfs_buf          **bpp)
 520 {
 521         struct xfs_trans        *tp;
 522         int                     error;
 523 
 524         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
 525                         XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
 526         if (error)
 527                 goto err;
 528 
 529         error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
 530         if (error)
 531                 goto err_cancel;
 532 
 533         error = xfs_trans_commit(tp);
 534         if (error) {
 535                 /*
 536                  * Buffer was held to the transaction, so we have to unlock it
 537                  * manually here because we're not passing it back.
 538                  */
 539                 xfs_buf_relse(*bpp);
 540                 *bpp = NULL;
 541                 goto err;
 542         }
 543         return 0;
 544 
 545 err_cancel:
 546         xfs_trans_cancel(tp);
 547 err:
 548         return error;
 549 }
 550 
 551 /*
 552  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 553  * and release the buffer immediately.  If @can_alloc is true, fill any
 554  * holes in the on-disk metadata.
 555  */
 556 static int
 557 xfs_qm_dqread(
 558         struct xfs_mount        *mp,
 559         xfs_dqid_t              id,
 560         uint                    type,
 561         bool                    can_alloc,
 562         struct xfs_dquot        **dqpp)
 563 {
 564         struct xfs_dquot        *dqp;
 565         struct xfs_buf          *bp;
 566         int                     error;
 567 
 568         dqp = xfs_dquot_alloc(mp, id, type);
 569         trace_xfs_dqread(dqp);
 570 
 571         /* Try to read the buffer, allocating if necessary. */
 572         error = xfs_dquot_disk_read(mp, dqp, &bp);
 573         if (error == -ENOENT && can_alloc)
 574                 error = xfs_qm_dqread_alloc(mp, dqp, &bp);
 575         if (error)
 576                 goto err;
 577 
 578         /*
 579          * At this point we should have a clean locked buffer.  Copy the data
 580          * to the incore dquot and release the buffer since the incore dquot
 581          * has its own locking protocol so we needn't tie up the buffer any
 582          * further.
 583          */
 584         ASSERT(xfs_buf_islocked(bp));
 585         xfs_dquot_from_disk(dqp, bp);
 586 
 587         xfs_buf_relse(bp);
 588         *dqpp = dqp;
 589         return error;
 590 
 591 err:
 592         trace_xfs_dqread_fail(dqp);
 593         xfs_qm_dqdestroy(dqp);
 594         *dqpp = NULL;
 595         return error;
 596 }
 597 
 598 /*
 599  * Advance to the next id in the current chunk, or if at the
 600  * end of the chunk, skip ahead to first id in next allocated chunk
 601  * using the SEEK_DATA interface.
 602  */
 603 static int
 604 xfs_dq_get_next_id(
 605         struct xfs_mount        *mp,
 606         uint                    type,
 607         xfs_dqid_t              *id)
 608 {
 609         struct xfs_inode        *quotip = xfs_quota_inode(mp, type);
 610         xfs_dqid_t              next_id = *id + 1; /* simple advance */
 611         uint                    lock_flags;
 612         struct xfs_bmbt_irec    got;
 613         struct xfs_iext_cursor  cur;
 614         xfs_fsblock_t           start;
 615         int                     error = 0;
 616 
 617         /* If we'd wrap past the max ID, stop */
 618         if (next_id < *id)
 619                 return -ENOENT;
 620 
 621         /* If new ID is within the current chunk, advancing it sufficed */
 622         if (next_id % mp->m_quotainfo->qi_dqperchunk) {
 623                 *id = next_id;
 624                 return 0;
 625         }
 626 
 627         /* Nope, next_id is now past the current chunk, so find the next one */
 628         start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
 629 
 630         lock_flags = xfs_ilock_data_map_shared(quotip);
 631         if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
 632                 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
 633                 if (error)
 634                         return error;
 635         }
 636 
 637         if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
 638                 /* contiguous chunk, bump startoff for the id calculation */
 639                 if (got.br_startoff < start)
 640                         got.br_startoff = start;
 641                 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
 642         } else {
 643                 error = -ENOENT;
 644         }
 645 
 646         xfs_iunlock(quotip, lock_flags);
 647 
 648         return error;
 649 }
 650 
 651 /*
 652  * Look up the dquot in the in-core cache.  If found, the dquot is returned
 653  * locked and ready to go.
 654  */
 655 static struct xfs_dquot *
 656 xfs_qm_dqget_cache_lookup(
 657         struct xfs_mount        *mp,
 658         struct xfs_quotainfo    *qi,
 659         struct radix_tree_root  *tree,
 660         xfs_dqid_t              id)
 661 {
 662         struct xfs_dquot        *dqp;
 663 
 664 restart:
 665         mutex_lock(&qi->qi_tree_lock);
 666         dqp = radix_tree_lookup(tree, id);
 667         if (!dqp) {
 668                 mutex_unlock(&qi->qi_tree_lock);
 669                 XFS_STATS_INC(mp, xs_qm_dqcachemisses);
 670                 return NULL;
 671         }
 672 
 673         xfs_dqlock(dqp);
 674         if (dqp->dq_flags & XFS_DQ_FREEING) {
 675                 xfs_dqunlock(dqp);
 676                 mutex_unlock(&qi->qi_tree_lock);
 677                 trace_xfs_dqget_freeing(dqp);
 678                 delay(1);
 679                 goto restart;
 680         }
 681 
 682         dqp->q_nrefs++;
 683         mutex_unlock(&qi->qi_tree_lock);
 684 
 685         trace_xfs_dqget_hit(dqp);
 686         XFS_STATS_INC(mp, xs_qm_dqcachehits);
 687         return dqp;
 688 }
 689 
 690 /*
 691  * Try to insert a new dquot into the in-core cache.  If an error occurs the
 692  * caller should throw away the dquot and start over.  Otherwise, the dquot
 693  * is returned locked (and held by the cache) as if there had been a cache
 694  * hit.
 695  */
 696 static int
 697 xfs_qm_dqget_cache_insert(
 698         struct xfs_mount        *mp,
 699         struct xfs_quotainfo    *qi,
 700         struct radix_tree_root  *tree,
 701         xfs_dqid_t              id,
 702         struct xfs_dquot        *dqp)
 703 {
 704         int                     error;
 705 
 706         mutex_lock(&qi->qi_tree_lock);
 707         error = radix_tree_insert(tree, id, dqp);
 708         if (unlikely(error)) {
 709                 /* Duplicate found!  Caller must try again. */
 710                 WARN_ON(error != -EEXIST);
 711                 mutex_unlock(&qi->qi_tree_lock);
 712                 trace_xfs_dqget_dup(dqp);
 713                 return error;
 714         }
 715 
 716         /* Return a locked dquot to the caller, with a reference taken. */
 717         xfs_dqlock(dqp);
 718         dqp->q_nrefs = 1;
 719 
 720         qi->qi_dquots++;
 721         mutex_unlock(&qi->qi_tree_lock);
 722 
 723         return 0;
 724 }
 725 
 726 /* Check our input parameters. */
 727 static int
 728 xfs_qm_dqget_checks(
 729         struct xfs_mount        *mp,
 730         uint                    type)
 731 {
 732         if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
 733                 return -ESRCH;
 734 
 735         switch (type) {
 736         case XFS_DQ_USER:
 737                 if (!XFS_IS_UQUOTA_ON(mp))
 738                         return -ESRCH;
 739                 return 0;
 740         case XFS_DQ_GROUP:
 741                 if (!XFS_IS_GQUOTA_ON(mp))
 742                         return -ESRCH;
 743                 return 0;
 744         case XFS_DQ_PROJ:
 745                 if (!XFS_IS_PQUOTA_ON(mp))
 746                         return -ESRCH;
 747                 return 0;
 748         default:
 749                 WARN_ON_ONCE(0);
 750                 return -EINVAL;
 751         }
 752 }
 753 
 754 /*
 755  * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked
 756  * dquot, doing an allocation (if requested) as needed.
 757  */
 758 int
 759 xfs_qm_dqget(
 760         struct xfs_mount        *mp,
 761         xfs_dqid_t              id,
 762         uint                    type,
 763         bool                    can_alloc,
 764         struct xfs_dquot        **O_dqpp)
 765 {
 766         struct xfs_quotainfo    *qi = mp->m_quotainfo;
 767         struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 768         struct xfs_dquot        *dqp;
 769         int                     error;
 770 
 771         error = xfs_qm_dqget_checks(mp, type);
 772         if (error)
 773                 return error;
 774 
 775 restart:
 776         dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 777         if (dqp) {
 778                 *O_dqpp = dqp;
 779                 return 0;
 780         }
 781 
 782         error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
 783         if (error)
 784                 return error;
 785 
 786         error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
 787         if (error) {
 788                 /*
 789                  * Duplicate found. Just throw away the new dquot and start
 790                  * over.
 791                  */
 792                 xfs_qm_dqdestroy(dqp);
 793                 XFS_STATS_INC(mp, xs_qm_dquot_dups);
 794                 goto restart;
 795         }
 796 
 797         trace_xfs_dqget_miss(dqp);
 798         *O_dqpp = dqp;
 799         return 0;
 800 }
 801 
 802 /*
 803  * Given a dquot id and type, read and initialize a dquot from the on-disk
 804  * metadata.  This function is only for use during quota initialization so
 805  * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
 806  * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
 807  */
 808 int
 809 xfs_qm_dqget_uncached(
 810         struct xfs_mount        *mp,
 811         xfs_dqid_t              id,
 812         uint                    type,
 813         struct xfs_dquot        **dqpp)
 814 {
 815         int                     error;
 816 
 817         error = xfs_qm_dqget_checks(mp, type);
 818         if (error)
 819                 return error;
 820 
 821         return xfs_qm_dqread(mp, id, type, 0, dqpp);
 822 }
 823 
 824 /* Return the quota id for a given inode and type. */
 825 xfs_dqid_t
 826 xfs_qm_id_for_quotatype(
 827         struct xfs_inode        *ip,
 828         uint                    type)
 829 {
 830         switch (type) {
 831         case XFS_DQ_USER:
 832                 return ip->i_d.di_uid;
 833         case XFS_DQ_GROUP:
 834                 return ip->i_d.di_gid;
 835         case XFS_DQ_PROJ:
 836                 return xfs_get_projid(ip);
 837         }
 838         ASSERT(0);
 839         return 0;
 840 }
 841 
 842 /*
 843  * Return the dquot for a given inode and type.  If @can_alloc is true, then
 844  * allocate blocks if needed.  The inode's ILOCK must be held and it must not
 845  * have already had an inode attached.
 846  */
 847 int
 848 xfs_qm_dqget_inode(
 849         struct xfs_inode        *ip,
 850         uint                    type,
 851         bool                    can_alloc,
 852         struct xfs_dquot        **O_dqpp)
 853 {
 854         struct xfs_mount        *mp = ip->i_mount;
 855         struct xfs_quotainfo    *qi = mp->m_quotainfo;
 856         struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 857         struct xfs_dquot        *dqp;
 858         xfs_dqid_t              id;
 859         int                     error;
 860 
 861         error = xfs_qm_dqget_checks(mp, type);
 862         if (error)
 863                 return error;
 864 
 865         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 866         ASSERT(xfs_inode_dquot(ip, type) == NULL);
 867 
 868         id = xfs_qm_id_for_quotatype(ip, type);
 869 
 870 restart:
 871         dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 872         if (dqp) {
 873                 *O_dqpp = dqp;
 874                 return 0;
 875         }
 876 
 877         /*
 878          * Dquot cache miss. We don't want to keep the inode lock across
 879          * a (potential) disk read. Also we don't want to deal with the lock
 880          * ordering between quotainode and this inode. OTOH, dropping the inode
 881          * lock here means dealing with a chown that can happen before
 882          * we re-acquire the lock.
 883          */
 884         xfs_iunlock(ip, XFS_ILOCK_EXCL);
 885         error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
 886         xfs_ilock(ip, XFS_ILOCK_EXCL);
 887         if (error)
 888                 return error;
 889 
 890         /*
 891          * A dquot could be attached to this inode by now, since we had
 892          * dropped the ilock.
 893          */
 894         if (xfs_this_quota_on(mp, type)) {
 895                 struct xfs_dquot        *dqp1;
 896 
 897                 dqp1 = xfs_inode_dquot(ip, type);
 898                 if (dqp1) {
 899                         xfs_qm_dqdestroy(dqp);
 900                         dqp = dqp1;
 901                         xfs_dqlock(dqp);
 902                         goto dqret;
 903                 }
 904         } else {
 905                 /* inode stays locked on return */
 906                 xfs_qm_dqdestroy(dqp);
 907                 return -ESRCH;
 908         }
 909 
 910         error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
 911         if (error) {
 912                 /*
 913                  * Duplicate found. Just throw away the new dquot and start
 914                  * over.
 915                  */
 916                 xfs_qm_dqdestroy(dqp);
 917                 XFS_STATS_INC(mp, xs_qm_dquot_dups);
 918                 goto restart;
 919         }
 920 
 921 dqret:
 922         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 923         trace_xfs_dqget_miss(dqp);
 924         *O_dqpp = dqp;
 925         return 0;
 926 }
 927 
 928 /*
 929  * Starting at @id and progressing upwards, look for an initialized incore
 930  * dquot, lock it, and return it.
 931  */
 932 int
 933 xfs_qm_dqget_next(
 934         struct xfs_mount        *mp,
 935         xfs_dqid_t              id,
 936         uint                    type,
 937         struct xfs_dquot        **dqpp)
 938 {
 939         struct xfs_dquot        *dqp;
 940         int                     error = 0;
 941 
 942         *dqpp = NULL;
 943         for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
 944                 error = xfs_qm_dqget(mp, id, type, false, &dqp);
 945                 if (error == -ENOENT)
 946                         continue;
 947                 else if (error != 0)
 948                         break;
 949 
 950                 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 951                         *dqpp = dqp;
 952                         return 0;
 953                 }
 954 
 955                 xfs_qm_dqput(dqp);
 956         }
 957 
 958         return error;
 959 }
 960 
 961 /*
 962  * Release a reference to the dquot (decrement ref-count) and unlock it.
 963  *
 964  * If there is a group quota attached to this dquot, carefully release that
 965  * too without tripping over deadlocks'n'stuff.
 966  */
 967 void
 968 xfs_qm_dqput(
 969         struct xfs_dquot        *dqp)
 970 {
 971         ASSERT(dqp->q_nrefs > 0);
 972         ASSERT(XFS_DQ_IS_LOCKED(dqp));
 973 
 974         trace_xfs_dqput(dqp);
 975 
 976         if (--dqp->q_nrefs == 0) {
 977                 struct xfs_quotainfo    *qi = dqp->q_mount->m_quotainfo;
 978                 trace_xfs_dqput_free(dqp);
 979 
 980                 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
 981                         XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
 982         }
 983         xfs_dqunlock(dqp);
 984 }
 985 
 986 /*
 987  * Release a dquot. Flush it if dirty, then dqput() it.
 988  * dquot must not be locked.
 989  */
 990 void
 991 xfs_qm_dqrele(
 992         xfs_dquot_t     *dqp)
 993 {
 994         if (!dqp)
 995                 return;
 996 
 997         trace_xfs_dqrele(dqp);
 998 
 999         xfs_dqlock(dqp);
1000         /*
1001          * We don't care to flush it if the dquot is dirty here.
1002          * That will create stutters that we want to avoid.
1003          * Instead we do a delayed write when we try to reclaim
1004          * a dirty dquot. Also xfs_sync will take part of the burden...
1005          */
1006         xfs_qm_dqput(dqp);
1007 }
1008 
1009 /*
1010  * This is the dquot flushing I/O completion routine.  It is called
1011  * from interrupt level when the buffer containing the dquot is
1012  * flushed to disk.  It is responsible for removing the dquot logitem
1013  * from the AIL if it has not been re-logged, and unlocking the dquot's
1014  * flush lock. This behavior is very similar to that of inodes..
1015  */
1016 STATIC void
1017 xfs_qm_dqflush_done(
1018         struct xfs_buf          *bp,
1019         struct xfs_log_item     *lip)
1020 {
1021         xfs_dq_logitem_t        *qip = (struct xfs_dq_logitem *)lip;
1022         xfs_dquot_t             *dqp = qip->qli_dquot;
1023         struct xfs_ail          *ailp = lip->li_ailp;
1024 
1025         /*
1026          * We only want to pull the item from the AIL if its
1027          * location in the log has not changed since we started the flush.
1028          * Thus, we only bother if the dquot's lsn has
1029          * not changed. First we check the lsn outside the lock
1030          * since it's cheaper, and then we recheck while
1031          * holding the lock before removing the dquot from the AIL.
1032          */
1033         if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1034             ((lip->li_lsn == qip->qli_flush_lsn) ||
1035              test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1036 
1037                 /* xfs_trans_ail_delete() drops the AIL lock. */
1038                 spin_lock(&ailp->ail_lock);
1039                 if (lip->li_lsn == qip->qli_flush_lsn) {
1040                         xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1041                 } else {
1042                         /*
1043                          * Clear the failed state since we are about to drop the
1044                          * flush lock
1045                          */
1046                         xfs_clear_li_failed(lip);
1047                         spin_unlock(&ailp->ail_lock);
1048                 }
1049         }
1050 
1051         /*
1052          * Release the dq's flush lock since we're done with it.
1053          */
1054         xfs_dqfunlock(dqp);
1055 }
1056 
1057 /*
1058  * Write a modified dquot to disk.
1059  * The dquot must be locked and the flush lock too taken by caller.
1060  * The flush lock will not be unlocked until the dquot reaches the disk,
1061  * but the dquot is free to be unlocked and modified by the caller
1062  * in the interim. Dquot is still locked on return. This behavior is
1063  * identical to that of inodes.
1064  */
1065 int
1066 xfs_qm_dqflush(
1067         struct xfs_dquot        *dqp,
1068         struct xfs_buf          **bpp)
1069 {
1070         struct xfs_mount        *mp = dqp->q_mount;
1071         struct xfs_buf          *bp;
1072         struct xfs_dqblk        *dqb;
1073         struct xfs_disk_dquot   *ddqp;
1074         xfs_failaddr_t          fa;
1075         int                     error;
1076 
1077         ASSERT(XFS_DQ_IS_LOCKED(dqp));
1078         ASSERT(!completion_done(&dqp->q_flush));
1079 
1080         trace_xfs_dqflush(dqp);
1081 
1082         *bpp = NULL;
1083 
1084         xfs_qm_dqunpin_wait(dqp);
1085 
1086         /*
1087          * This may have been unpinned because the filesystem is shutting
1088          * down forcibly. If that's the case we must not write this dquot
1089          * to disk, because the log record didn't make it to disk.
1090          *
1091          * We also have to remove the log item from the AIL in this case,
1092          * as we wait for an emptry AIL as part of the unmount process.
1093          */
1094         if (XFS_FORCED_SHUTDOWN(mp)) {
1095                 struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
1096                 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1097 
1098                 xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1099 
1100                 error = -EIO;
1101                 goto out_unlock;
1102         }
1103 
1104         /*
1105          * Get the buffer containing the on-disk dquot
1106          */
1107         error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1108                                    mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1109                                    &xfs_dquot_buf_ops);
1110         if (error)
1111                 goto out_unlock;
1112 
1113         /*
1114          * Calculate the location of the dquot inside the buffer.
1115          */
1116         dqb = bp->b_addr + dqp->q_bufoffset;
1117         ddqp = &dqb->dd_diskdq;
1118 
1119         /*
1120          * A simple sanity check in case we got a corrupted dquot.
1121          */
1122         fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0);
1123         if (fa) {
1124                 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1125                                 be32_to_cpu(ddqp->d_id), fa);
1126                 xfs_buf_relse(bp);
1127                 xfs_dqfunlock(dqp);
1128                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1129                 return -EIO;
1130         }
1131 
1132         /* This is the only portion of data that needs to persist */
1133         memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1134 
1135         /*
1136          * Clear the dirty field and remember the flush lsn for later use.
1137          */
1138         dqp->dq_flags &= ~XFS_DQ_DIRTY;
1139 
1140         xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1141                                         &dqp->q_logitem.qli_item.li_lsn);
1142 
1143         /*
1144          * copy the lsn into the on-disk dquot now while we have the in memory
1145          * dquot here. This can't be done later in the write verifier as we
1146          * can't get access to the log item at that point in time.
1147          *
1148          * We also calculate the CRC here so that the on-disk dquot in the
1149          * buffer always has a valid CRC. This ensures there is no possibility
1150          * of a dquot without an up-to-date CRC getting to disk.
1151          */
1152         if (xfs_sb_version_hascrc(&mp->m_sb)) {
1153                 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1154                 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1155                                  XFS_DQUOT_CRC_OFF);
1156         }
1157 
1158         /*
1159          * Attach an iodone routine so that we can remove this dquot from the
1160          * AIL and release the flush lock once the dquot is synced to disk.
1161          */
1162         xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1163                                   &dqp->q_logitem.qli_item);
1164 
1165         /*
1166          * If the buffer is pinned then push on the log so we won't
1167          * get stuck waiting in the write for too long.
1168          */
1169         if (xfs_buf_ispinned(bp)) {
1170                 trace_xfs_dqflush_force(dqp);
1171                 xfs_log_force(mp, 0);
1172         }
1173 
1174         trace_xfs_dqflush_done(dqp);
1175         *bpp = bp;
1176         return 0;
1177 
1178 out_unlock:
1179         xfs_dqfunlock(dqp);
1180         return -EIO;
1181 }
1182 
1183 /*
1184  * Lock two xfs_dquot structures.
1185  *
1186  * To avoid deadlocks we always lock the quota structure with
1187  * the lowerd id first.
1188  */
1189 void
1190 xfs_dqlock2(
1191         xfs_dquot_t     *d1,
1192         xfs_dquot_t     *d2)
1193 {
1194         if (d1 && d2) {
1195                 ASSERT(d1 != d2);
1196                 if (be32_to_cpu(d1->q_core.d_id) >
1197                     be32_to_cpu(d2->q_core.d_id)) {
1198                         mutex_lock(&d2->q_qlock);
1199                         mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1200                 } else {
1201                         mutex_lock(&d1->q_qlock);
1202                         mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1203                 }
1204         } else if (d1) {
1205                 mutex_lock(&d1->q_qlock);
1206         } else if (d2) {
1207                 mutex_lock(&d2->q_qlock);
1208         }
1209 }
1210 
1211 int __init
1212 xfs_qm_init(void)
1213 {
1214         xfs_qm_dqzone =
1215                 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1216         if (!xfs_qm_dqzone)
1217                 goto out;
1218 
1219         xfs_qm_dqtrxzone =
1220                 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1221         if (!xfs_qm_dqtrxzone)
1222                 goto out_free_dqzone;
1223 
1224         return 0;
1225 
1226 out_free_dqzone:
1227         kmem_zone_destroy(xfs_qm_dqzone);
1228 out:
1229         return -ENOMEM;
1230 }
1231 
1232 void
1233 xfs_qm_exit(void)
1234 {
1235         kmem_zone_destroy(xfs_qm_dqtrxzone);
1236         kmem_zone_destroy(xfs_qm_dqzone);
1237 }
1238 
1239 /*
1240  * Iterate every dquot of a particular type.  The caller must ensure that the
1241  * particular quota type is active.  iter_fn can return negative error codes,
1242  * or -ECANCELED to indicate that it wants to stop iterating.
1243  */
1244 int
1245 xfs_qm_dqiterate(
1246         struct xfs_mount        *mp,
1247         uint                    dqtype,
1248         xfs_qm_dqiterate_fn     iter_fn,
1249         void                    *priv)
1250 {
1251         struct xfs_dquot        *dq;
1252         xfs_dqid_t              id = 0;
1253         int                     error;
1254 
1255         do {
1256                 error = xfs_qm_dqget_next(mp, id, dqtype, &dq);
1257                 if (error == -ENOENT)
1258                         return 0;
1259                 if (error)
1260                         return error;
1261 
1262                 error = iter_fn(dq, dqtype, priv);
1263                 id = be32_to_cpu(dq->q_core.d_id);
1264                 xfs_qm_dqput(dq);
1265                 id++;
1266         } while (error == 0 && id != 0);
1267 
1268         return error;
1269 }

/* [<][>][^][v][top][bottom][index][help] */