rgd              1499 fs/gfs2/bmap.c 	struct gfs2_rgrpd *rgd;
rgd              1509 fs/gfs2/bmap.c 	rgd = NULL;
rgd              1511 fs/gfs2/bmap.c 		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
rgd              1524 fs/gfs2/bmap.c 		if (rgd) {
rgd              1525 fs/gfs2/bmap.c 			if (!rgrp_contains_block(rgd, bn)) {
rgd              1530 fs/gfs2/bmap.c 			rgd = gfs2_blk2rgrpd(sdp, bn, true);
rgd              1531 fs/gfs2/bmap.c 			if (unlikely(!rgd)) {
rgd              1535 fs/gfs2/bmap.c 			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
rgd              1542 fs/gfs2/bmap.c 			    rgd == ip->i_res.rs_rbm.rgd)
rgd              1554 fs/gfs2/bmap.c 			jblocks_rqsted = rgd->rd_length + RES_DINODE +
rgd              1605 fs/gfs2/bmap.c 			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
rgd              1613 fs/gfs2/bmap.c 		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
rgd              2023 fs/gfs2/dir.c  		struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rgd              2025 fs/gfs2/dir.c  		rg_blocks += rgd->rd_length;
rgd              2041 fs/gfs2/dir.c  		struct gfs2_rgrpd *rgd;
rgd              2053 fs/gfs2/dir.c  		rgd = gfs2_blk2rgrpd(sdp, blk, true);
rgd              2054 fs/gfs2/dir.c  		gfs2_free_meta(dip, rgd, blk, 1);
rgd               172 fs/gfs2/glops.c 	struct gfs2_rgrpd *rgd;
rgd               176 fs/gfs2/glops.c 	rgd = gl->gl_object;
rgd               177 fs/gfs2/glops.c 	if (rgd)
rgd               178 fs/gfs2/glops.c 		gfs2_rgrp_brelse(rgd);
rgd               193 fs/gfs2/glops.c 	rgd = gl->gl_object;
rgd               194 fs/gfs2/glops.c 	if (rgd)
rgd               195 fs/gfs2/glops.c 		gfs2_free_clones(rgd);
rgd               213 fs/gfs2/glops.c 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
rgd               215 fs/gfs2/glops.c 	if (rgd)
rgd               216 fs/gfs2/glops.c 		gfs2_rgrp_brelse(rgd);
rgd               222 fs/gfs2/glops.c 	if (rgd)
rgd               223 fs/gfs2/glops.c 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
rgd               240 fs/gfs2/glops.c 	struct gfs2_rgrpd *rgd;
rgd               243 fs/gfs2/glops.c 	rgd = gl->gl_object;
rgd               246 fs/gfs2/glops.c 	return rgd;
rgd               125 fs/gfs2/incore.h 	struct gfs2_rgrpd *rgd;
rgd               132 fs/gfs2/incore.h 	return rbm->rgd->rd_bits + rbm->bii;
rgd               137 fs/gfs2/incore.h 	BUG_ON(rbm->offset >= rbm->rgd->rd_data);
rgd               138 fs/gfs2/incore.h 	return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
rgd               145 fs/gfs2/incore.h 	return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
rgd              1104 fs/gfs2/inode.c 	struct gfs2_rgrpd *rgd;
rgd              1116 fs/gfs2/inode.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
rgd              1117 fs/gfs2/inode.c 	if (!rgd)
rgd              1120 fs/gfs2/inode.c 	gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
rgd                74 fs/gfs2/lops.c 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
rgd                76 fs/gfs2/lops.c 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
rgd                81 fs/gfs2/lops.c 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
rgd                85 fs/gfs2/lops.c 	rgd->rd_free_clone = rgd->rd_free;
rgd                86 fs/gfs2/lops.c 	rgd->rd_extfail_pt = rgd->rd_free;
rgd               787 fs/gfs2/lops.c 				struct gfs2_rgrpd *rgd;
rgd               789 fs/gfs2/lops.c 				rgd = gfs2_blk2rgrpd(sdp, blkno, false);
rgd               790 fs/gfs2/lops.c 				if (rgd && rgd->rd_addr == blkno &&
rgd               791 fs/gfs2/lops.c 				    rgd->rd_bits && rgd->rd_bits->bi_bh) {
rgd               796 fs/gfs2/lops.c 						buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
rgd               797 fs/gfs2/lops.c 						buffer_pinned(rgd->rd_bits->bi_bh));
rgd               798 fs/gfs2/lops.c 					gfs2_dump_glock(NULL, rgd->rd_gl, true);
rgd               101 fs/gfs2/rgrp.c 		struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
rgd               106 fs/gfs2/rgrp.c 			(unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
rgd               112 fs/gfs2/rgrp.c 		gfs2_consist_rgrpd(rbm->rgd);
rgd               273 fs/gfs2/rgrp.c 	if (!rgrp_contains_block(rbm->rgd, block))
rgd               276 fs/gfs2/rgrp.c 	rbm->offset = block - rbm->rgd->rd_data0;
rgd               284 fs/gfs2/rgrp.c 	rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
rgd               285 fs/gfs2/rgrp.c 	rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
rgd               307 fs/gfs2/rgrp.c 	if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
rgd               419 fs/gfs2/rgrp.c static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
rgd               449 fs/gfs2/rgrp.c void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
rgd               451 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               453 fs/gfs2/rgrp.c 	u32 length = rgd->rd_length;
rgd               461 fs/gfs2/rgrp.c 		bi = rgd->rd_bits + buf;
rgd               463 fs/gfs2/rgrp.c 			count[x] += gfs2_bitcount(rgd,
rgd               469 fs/gfs2/rgrp.c 	if (count[0] != rgd->rd_free) {
rgd               470 fs/gfs2/rgrp.c 		if (gfs2_consist_rgrpd(rgd))
rgd               472 fs/gfs2/rgrp.c 			       count[0], rgd->rd_free);
rgd               476 fs/gfs2/rgrp.c 	tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
rgd               478 fs/gfs2/rgrp.c 		if (gfs2_consist_rgrpd(rgd))
rgd               484 fs/gfs2/rgrp.c 	if (count[2] + count[3] != rgd->rd_dinodes) {
rgd               485 fs/gfs2/rgrp.c 		if (gfs2_consist_rgrpd(rgd))
rgd               487 fs/gfs2/rgrp.c 			       count[2] + count[3], rgd->rd_dinodes);
rgd               549 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd               553 fs/gfs2/rgrp.c 	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
rgd               556 fs/gfs2/rgrp.c 	return rgd;
rgd               566 fs/gfs2/rgrp.c struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
rgd               568 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               572 fs/gfs2/rgrp.c 	n = rb_next(&rgd->rd_node);
rgd               576 fs/gfs2/rgrp.c 	if (unlikely(&rgd->rd_node == n)) {
rgd               580 fs/gfs2/rgrp.c 	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
rgd               582 fs/gfs2/rgrp.c 	return rgd;
rgd               592 fs/gfs2/rgrp.c void gfs2_free_clones(struct gfs2_rgrpd *rgd)
rgd               596 fs/gfs2/rgrp.c 	for (x = 0; x < rgd->rd_length; x++) {
rgd               597 fs/gfs2/rgrp.c 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
rgd               631 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd               636 fs/gfs2/rgrp.c 	rgd = rs->rs_rbm.rgd;
rgd               638 fs/gfs2/rgrp.c 	rb_erase(&rs->rs_node, &rgd->rd_rstree);
rgd               644 fs/gfs2/rgrp.c 		struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
rgd               648 fs/gfs2/rgrp.c 		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rgd               649 fs/gfs2/rgrp.c 		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
rgd               654 fs/gfs2/rgrp.c 		rgd->rd_extfail_pt += rs->rs_free;
rgd               673 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd               675 fs/gfs2/rgrp.c 	rgd = rs->rs_rbm.rgd;
rgd               676 fs/gfs2/rgrp.c 	if (rgd) {
rgd               677 fs/gfs2/rgrp.c 		spin_lock(&rgd->rd_rsspin);
rgd               680 fs/gfs2/rgrp.c 		spin_unlock(&rgd->rd_rsspin);
rgd               707 fs/gfs2/rgrp.c static void return_all_reservations(struct gfs2_rgrpd *rgd)
rgd               712 fs/gfs2/rgrp.c 	spin_lock(&rgd->rd_rsspin);
rgd               713 fs/gfs2/rgrp.c 	while ((n = rb_first(&rgd->rd_rstree))) {
rgd               717 fs/gfs2/rgrp.c 	spin_unlock(&rgd->rd_rsspin);
rgd               723 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd               727 fs/gfs2/rgrp.c 		rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
rgd               728 fs/gfs2/rgrp.c 		gl = rgd->rd_gl;
rgd               733 fs/gfs2/rgrp.c 			glock_clear_object(gl, rgd);
rgd               734 fs/gfs2/rgrp.c 			gfs2_rgrp_brelse(rgd);
rgd               738 fs/gfs2/rgrp.c 		gfs2_free_clones(rgd);
rgd               739 fs/gfs2/rgrp.c 		kfree(rgd->rd_bits);
rgd               740 fs/gfs2/rgrp.c 		rgd->rd_bits = NULL;
rgd               741 fs/gfs2/rgrp.c 		return_all_reservations(rgd);
rgd               742 fs/gfs2/rgrp.c 		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
rgd               746 fs/gfs2/rgrp.c static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
rgd               748 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               750 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
rgd               751 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
rgd               752 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
rgd               753 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
rgd               754 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
rgd               766 fs/gfs2/rgrp.c static int compute_bitstructs(struct gfs2_rgrpd *rgd)
rgd               768 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               770 fs/gfs2/rgrp.c 	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
rgd               777 fs/gfs2/rgrp.c 	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
rgd               778 fs/gfs2/rgrp.c 	if (!rgd->rd_bits)
rgd               781 fs/gfs2/rgrp.c 	bytes_left = rgd->rd_bitbytes;
rgd               784 fs/gfs2/rgrp.c 		bi = rgd->rd_bits + x;
rgd               805 fs/gfs2/rgrp.c 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
rgd               813 fs/gfs2/rgrp.c 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
rgd               822 fs/gfs2/rgrp.c 		gfs2_consist_rgrpd(rgd);
rgd               825 fs/gfs2/rgrp.c 	bi = rgd->rd_bits + (length - 1);
rgd               826 fs/gfs2/rgrp.c 	if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
rgd               827 fs/gfs2/rgrp.c 		if (gfs2_consist_rgrpd(rgd)) {
rgd               828 fs/gfs2/rgrp.c 			gfs2_rindex_print(rgd);
rgd               865 fs/gfs2/rgrp.c static int rgd_insert(struct gfs2_rgrpd *rgd)
rgd               867 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               876 fs/gfs2/rgrp.c 		if (rgd->rd_addr < cur->rd_addr)
rgd               878 fs/gfs2/rgrp.c 		else if (rgd->rd_addr > cur->rd_addr)
rgd               884 fs/gfs2/rgrp.c 	rb_link_node(&rgd->rd_node, parent, newn);
rgd               885 fs/gfs2/rgrp.c 	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
rgd               904 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd               915 fs/gfs2/rgrp.c 	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
rgd               917 fs/gfs2/rgrp.c 	if (!rgd)
rgd               920 fs/gfs2/rgrp.c 	rgd->rd_sbd = sdp;
rgd               921 fs/gfs2/rgrp.c 	rgd->rd_addr = be64_to_cpu(buf.ri_addr);
rgd               922 fs/gfs2/rgrp.c 	rgd->rd_length = be32_to_cpu(buf.ri_length);
rgd               923 fs/gfs2/rgrp.c 	rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
rgd               924 fs/gfs2/rgrp.c 	rgd->rd_data = be32_to_cpu(buf.ri_data);
rgd               925 fs/gfs2/rgrp.c 	rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
rgd               926 fs/gfs2/rgrp.c 	spin_lock_init(&rgd->rd_rsspin);
rgd               928 fs/gfs2/rgrp.c 	error = compute_bitstructs(rgd);
rgd               932 fs/gfs2/rgrp.c 	error = gfs2_glock_get(sdp, rgd->rd_addr,
rgd               933 fs/gfs2/rgrp.c 			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
rgd               937 fs/gfs2/rgrp.c 	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd               938 fs/gfs2/rgrp.c 	rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
rgd               939 fs/gfs2/rgrp.c 	if (rgd->rd_data > sdp->sd_max_rg_data)
rgd               940 fs/gfs2/rgrp.c 		sdp->sd_max_rg_data = rgd->rd_data;
rgd               942 fs/gfs2/rgrp.c 	error = rgd_insert(rgd);
rgd               945 fs/gfs2/rgrp.c 		glock_set_object(rgd->rd_gl, rgd);
rgd               946 fs/gfs2/rgrp.c 		rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
rgd               947 fs/gfs2/rgrp.c 		rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
rgd               948 fs/gfs2/rgrp.c 						    rgd->rd_length) * bsize) - 1;
rgd               953 fs/gfs2/rgrp.c 	gfs2_glock_put(rgd->rd_gl);
rgd               956 fs/gfs2/rgrp.c 	kfree(rgd->rd_bits);
rgd               957 fs/gfs2/rgrp.c 	rgd->rd_bits = NULL;
rgd               958 fs/gfs2/rgrp.c 	kmem_cache_free(gfs2_rgrpd_cachep, rgd);
rgd               972 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd, *first;
rgd               977 fs/gfs2/rgrp.c 	rgd = gfs2_rgrpd_get_first(sdp);
rgd               979 fs/gfs2/rgrp.c 		rgd = gfs2_rgrpd_get_next(rgd);
rgd               980 fs/gfs2/rgrp.c 	first = rgd;
rgd               983 fs/gfs2/rgrp.c 		rgd->rd_flags |= GFS2_RDF_PREFERRED;
rgd               985 fs/gfs2/rgrp.c 			rgd = gfs2_rgrpd_get_next(rgd);
rgd               986 fs/gfs2/rgrp.c 			if (!rgd || rgd == first)
rgd               989 fs/gfs2/rgrp.c 	} while (rgd && rgd != first);
rgd              1059 fs/gfs2/rgrp.c static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
rgd              1066 fs/gfs2/rgrp.c 	rgd->rd_flags &= GFS2_RDF_MASK;
rgd              1067 fs/gfs2/rgrp.c 	rgd->rd_flags |= rg_flags;
rgd              1068 fs/gfs2/rgrp.c 	rgd->rd_free = be32_to_cpu(str->rg_free);
rgd              1069 fs/gfs2/rgrp.c 	rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
rgd              1070 fs/gfs2/rgrp.c 	rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
rgd              1086 fs/gfs2/rgrp.c static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
rgd              1088 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
rgd              1092 fs/gfs2/rgrp.c 	str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
rgd              1093 fs/gfs2/rgrp.c 	str->rg_free = cpu_to_be32(rgd->rd_free);
rgd              1094 fs/gfs2/rgrp.c 	str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
rgd              1097 fs/gfs2/rgrp.c 	else if (next->rd_addr > rgd->rd_addr)
rgd              1098 fs/gfs2/rgrp.c 		str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
rgd              1099 fs/gfs2/rgrp.c 	str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
rgd              1100 fs/gfs2/rgrp.c 	str->rg_data0 = cpu_to_be64(rgd->rd_data0);
rgd              1101 fs/gfs2/rgrp.c 	str->rg_data = cpu_to_be32(rgd->rd_data);
rgd              1102 fs/gfs2/rgrp.c 	str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
rgd              1108 fs/gfs2/rgrp.c 	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
rgd              1111 fs/gfs2/rgrp.c static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
rgd              1113 fs/gfs2/rgrp.c 	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
rgd              1114 fs/gfs2/rgrp.c 	struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
rgd              1115 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              1120 fs/gfs2/rgrp.c 			(unsigned long long)rgd->rd_addr,
rgd              1126 fs/gfs2/rgrp.c 			(unsigned long long)rgd->rd_addr,
rgd              1132 fs/gfs2/rgrp.c 			(unsigned long long)rgd->rd_addr,
rgd              1139 fs/gfs2/rgrp.c 			(unsigned long long)rgd->rd_addr,
rgd              1147 fs/gfs2/rgrp.c static u32 count_unlinked(struct gfs2_rgrpd *rgd)
rgd              1150 fs/gfs2/rgrp.c 	const u32 length = rgd->rd_length;
rgd              1154 fs/gfs2/rgrp.c 	for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
rgd              1182 fs/gfs2/rgrp.c static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
rgd              1184 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              1185 fs/gfs2/rgrp.c 	struct gfs2_glock *gl = rgd->rd_gl;
rgd              1186 fs/gfs2/rgrp.c 	unsigned int length = rgd->rd_length;
rgd              1191 fs/gfs2/rgrp.c 	if (rgd->rd_bits[0].bi_bh != NULL)
rgd              1195 fs/gfs2/rgrp.c 		bi = rgd->rd_bits + x;
rgd              1196 fs/gfs2/rgrp.c 		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
rgd              1202 fs/gfs2/rgrp.c 		bi = rgd->rd_bits + y;
rgd              1213 fs/gfs2/rgrp.c 	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
rgd              1215 fs/gfs2/rgrp.c 			clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
rgd              1216 fs/gfs2/rgrp.c 		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
rgd              1217 fs/gfs2/rgrp.c 		rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd              1218 fs/gfs2/rgrp.c 		rgd->rd_free_clone = rgd->rd_free;
rgd              1220 fs/gfs2/rgrp.c 		rgd->rd_extfail_pt = rgd->rd_free;
rgd              1222 fs/gfs2/rgrp.c 	if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
rgd              1223 fs/gfs2/rgrp.c 		rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
rgd              1224 fs/gfs2/rgrp.c 		gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
rgd              1225 fs/gfs2/rgrp.c 				     rgd->rd_bits[0].bi_bh->b_data);
rgd              1228 fs/gfs2/rgrp.c 		if (!gfs2_rgrp_lvb_valid(rgd)){
rgd              1229 fs/gfs2/rgrp.c 			gfs2_consist_rgrpd(rgd);
rgd              1233 fs/gfs2/rgrp.c 		if (rgd->rd_rgl->rl_unlinked == 0)
rgd              1234 fs/gfs2/rgrp.c 			rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd              1240 fs/gfs2/rgrp.c 		bi = rgd->rd_bits + x;
rgd              1249 fs/gfs2/rgrp.c static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
rgd              1253 fs/gfs2/rgrp.c 	if (rgd->rd_flags & GFS2_RDF_UPTODATE)
rgd              1256 fs/gfs2/rgrp.c 	if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
rgd              1257 fs/gfs2/rgrp.c 		return gfs2_rgrp_bh_get(rgd);
rgd              1259 fs/gfs2/rgrp.c 	rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
rgd              1261 fs/gfs2/rgrp.c 	rgd->rd_flags &= GFS2_RDF_MASK;
rgd              1262 fs/gfs2/rgrp.c 	rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
rgd              1263 fs/gfs2/rgrp.c 	if (rgd->rd_rgl->rl_unlinked == 0)
rgd              1264 fs/gfs2/rgrp.c 		rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd              1265 fs/gfs2/rgrp.c 	rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
rgd              1266 fs/gfs2/rgrp.c 	rgd->rd_free_clone = rgd->rd_free;
rgd              1267 fs/gfs2/rgrp.c 	rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
rgd              1268 fs/gfs2/rgrp.c 	rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
rgd              1274 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
rgd              1275 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              1279 fs/gfs2/rgrp.c 	return gfs2_rgrp_bh_get(rgd);
rgd              1288 fs/gfs2/rgrp.c void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
rgd              1290 fs/gfs2/rgrp.c 	int x, length = rgd->rd_length;
rgd              1293 fs/gfs2/rgrp.c 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
rgd              1310 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
rgd              1314 fs/gfs2/rgrp.c 	if (rgd && demote_requested)
rgd              1315 fs/gfs2/rgrp.c 		gfs2_rgrp_brelse(rgd);
rgd              1399 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd              1431 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, start, 0);
rgd              1440 fs/gfs2/rgrp.c 		ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
rgd              1444 fs/gfs2/rgrp.c 		if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
rgd              1446 fs/gfs2/rgrp.c 			for (x = 0; x < rgd->rd_length; x++) {
rgd              1447 fs/gfs2/rgrp.c 				struct gfs2_bitmap *bi = rgd->rd_bits + x;
rgd              1449 fs/gfs2/rgrp.c 						rgd->rd_data0, NULL, bi, minlen,
rgd              1461 fs/gfs2/rgrp.c 				bh = rgd->rd_bits[0].bi_bh;
rgd              1462 fs/gfs2/rgrp.c 				rgd->rd_flags |= GFS2_RGF_TRIMMED;
rgd              1463 fs/gfs2/rgrp.c 				gfs2_trans_add_meta(rgd->rd_gl, bh);
rgd              1464 fs/gfs2/rgrp.c 				gfs2_rgrp_out(rgd, bh->b_data);
rgd              1470 fs/gfs2/rgrp.c 		if (rgd == rgd_end)
rgd              1473 fs/gfs2/rgrp.c 		rgd = gfs2_rgrpd_get_next(rgd);
rgd              1494 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
rgd              1499 fs/gfs2/rgrp.c 	spin_lock(&rgd->rd_rsspin);
rgd              1500 fs/gfs2/rgrp.c 	newn = &rgd->rd_rstree.rb_node;
rgd              1512 fs/gfs2/rgrp.c 			spin_unlock(&rgd->rd_rsspin);
rgd              1519 fs/gfs2/rgrp.c 	rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
rgd              1522 fs/gfs2/rgrp.c 	rgd->rd_reserved += rs->rs_free; /* blocks reserved */
rgd              1523 fs/gfs2/rgrp.c 	spin_unlock(&rgd->rd_rsspin);
rgd              1539 fs/gfs2/rgrp.c static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
rgd              1543 fs/gfs2/rgrp.c 	if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
rgd              1545 fs/gfs2/rgrp.c 	tot_reserved = rgd->rd_reserved - rs->rs_free;
rgd              1547 fs/gfs2/rgrp.c 	if (rgd->rd_free_clone < tot_reserved)
rgd              1550 fs/gfs2/rgrp.c 	tot_free = rgd->rd_free_clone - tot_reserved;
rgd              1563 fs/gfs2/rgrp.c static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
rgd              1566 fs/gfs2/rgrp.c 	struct gfs2_rbm rbm = { .rgd = rgd, };
rgd              1570 fs/gfs2/rgrp.c 	u32 free_blocks = rgd_free(rgd, rs);
rgd              1580 fs/gfs2/rgrp.c 	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
rgd              1584 fs/gfs2/rgrp.c 	if (rgrp_contains_block(rgd, ip->i_goal))
rgd              1587 fs/gfs2/rgrp.c 		goal = rgd->rd_last_alloc + rgd->rd_data0;
rgd              1598 fs/gfs2/rgrp.c 		if (goal == rgd->rd_last_alloc + rgd->rd_data0)
rgd              1599 fs/gfs2/rgrp.c 			rgd->rd_last_alloc = 0;
rgd              1616 fs/gfs2/rgrp.c static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
rgd              1624 fs/gfs2/rgrp.c 	spin_lock(&rgd->rd_rsspin);
rgd              1625 fs/gfs2/rgrp.c 	n = rgd->rd_rstree.rb_node;
rgd              1647 fs/gfs2/rgrp.c 	spin_unlock(&rgd->rd_rsspin);
rgd              1691 fs/gfs2/rgrp.c 	nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
rgd              1739 fs/gfs2/rgrp.c 	struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
rgd              1787 fs/gfs2/rgrp.c 		if (rbm->bii == rbm->rgd->rd_length)
rgd              1810 fs/gfs2/rgrp.c 	    *minext < rbm->rgd->rd_extfail_pt)
rgd              1811 fs/gfs2/rgrp.c 		rbm->rgd->rd_extfail_pt = *minext;
rgd              1834 fs/gfs2/rgrp.c static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
rgd              1837 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              1842 fs/gfs2/rgrp.c 	struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
rgd              1886 fs/gfs2/rgrp.c 	rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd              1917 fs/gfs2/rgrp.c static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
rgd              1919 fs/gfs2/rgrp.c 	const struct gfs2_glock *gl = rgd->rd_gl;
rgd              1976 fs/gfs2/rgrp.c                             rs->rs_rbm.rgd->rd_gl->gl_dstamp));
rgd              1992 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = *pos;
rgd              1993 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              1995 fs/gfs2/rgrp.c 	rgd = gfs2_rgrpd_get_next(rgd);
rgd              1996 fs/gfs2/rgrp.c 	if (rgd == NULL)
rgd              1997 fs/gfs2/rgrp.c 		rgd = gfs2_rgrpd_get_first(sdp);
rgd              1998 fs/gfs2/rgrp.c 	*pos = rgd;
rgd              1999 fs/gfs2/rgrp.c 	if (rgd != begin) /* If we didn't wrap */
rgd              2010 fs/gfs2/rgrp.c static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
rgd              2012 fs/gfs2/rgrp.c 	struct gfs2_glock *gl = rgd->rd_gl;
rgd              2018 fs/gfs2/rgrp.c 	if (rgd->rd_flags & GFS2_RDF_PREFERRED)
rgd              2054 fs/gfs2/rgrp.c 		begin = rs->rs_rbm.rgd;
rgd              2055 fs/gfs2/rgrp.c 	} else if (rs->rs_rbm.rgd &&
rgd              2056 fs/gfs2/rgrp.c 		   rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
rgd              2057 fs/gfs2/rgrp.c 		begin = rs->rs_rbm.rgd;
rgd              2060 fs/gfs2/rgrp.c 		rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
rgd              2064 fs/gfs2/rgrp.c 	if (rs->rs_rbm.rgd == NULL)
rgd              2070 fs/gfs2/rgrp.c 		if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
rgd              2076 fs/gfs2/rgrp.c 				    !fast_to_acquire(rs->rs_rbm.rgd))
rgd              2080 fs/gfs2/rgrp.c 				    gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
rgd              2083 fs/gfs2/rgrp.c 			error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
rgd              2089 fs/gfs2/rgrp.c 			    gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
rgd              2092 fs/gfs2/rgrp.c 				error = update_rgrp_lvb(rs->rs_rbm.rgd);
rgd              2101 fs/gfs2/rgrp.c 		if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
rgd              2103 fs/gfs2/rgrp.c 		    (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
rgd              2107 fs/gfs2/rgrp.c 			gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
rgd              2111 fs/gfs2/rgrp.c 			rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
rgd              2118 fs/gfs2/rgrp.c 		free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
rgd              2127 fs/gfs2/rgrp.c 		if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
rgd              2128 fs/gfs2/rgrp.c 			try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
rgd              2140 fs/gfs2/rgrp.c 		if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
rgd              2190 fs/gfs2/rgrp.c 	struct gfs2_rbm pos = { .rgd = rbm->rgd, };
rgd              2197 fs/gfs2/rgrp.c 	gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
rgd              2204 fs/gfs2/rgrp.c 		gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
rgd              2220 fs/gfs2/rgrp.c static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
rgd              2226 fs/gfs2/rgrp.c 	rbm.rgd = rgd;
rgd              2239 fs/gfs2/rgrp.c 			gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
rgd              2258 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = gl->gl_object;
rgd              2262 fs/gfs2/rgrp.c 	if (rgd == NULL)
rgd              2266 fs/gfs2/rgrp.c 		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd              2267 fs/gfs2/rgrp.c 		       rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd              2268 fs/gfs2/rgrp.c 		       rgd->rd_reserved, rgd->rd_extfail_pt);
rgd              2269 fs/gfs2/rgrp.c 	if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
rgd              2270 fs/gfs2/rgrp.c 		struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
rgd              2277 fs/gfs2/rgrp.c 	spin_lock(&rgd->rd_rsspin);
rgd              2278 fs/gfs2/rgrp.c 	for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
rgd              2282 fs/gfs2/rgrp.c 	spin_unlock(&rgd->rd_rsspin);
rgd              2285 fs/gfs2/rgrp.c static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
rgd              2287 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              2291 fs/gfs2/rgrp.c 		(unsigned long long)rgd->rd_addr);
rgd              2294 fs/gfs2/rgrp.c 	gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
rgd              2295 fs/gfs2/rgrp.c 	rgd->rd_flags |= GFS2_RDF_ERROR;
rgd              2313 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd = rbm->rgd;
rgd              2318 fs/gfs2/rgrp.c 	spin_lock(&rgd->rd_rsspin);
rgd              2325 fs/gfs2/rgrp.c 			rgd->rd_reserved -= rlen;
rgd              2336 fs/gfs2/rgrp.c 	spin_unlock(&rgd->rd_rsspin);
rgd              2360 fs/gfs2/rgrp.c 	if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
rgd              2363 fs/gfs2/rgrp.c 		goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
rgd              2387 fs/gfs2/rgrp.c 	struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
rgd              2404 fs/gfs2/rgrp.c 			test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
rgd              2405 fs/gfs2/rgrp.c 			rbm.rgd->rd_extfail_pt);
rgd              2411 fs/gfs2/rgrp.c 	rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
rgd              2430 fs/gfs2/rgrp.c 	if (rbm.rgd->rd_free < *nblocks) {
rgd              2435 fs/gfs2/rgrp.c 	rbm.rgd->rd_free -= *nblocks;
rgd              2437 fs/gfs2/rgrp.c 		rbm.rgd->rd_dinodes++;
rgd              2438 fs/gfs2/rgrp.c 		*generation = rbm.rgd->rd_igeneration++;
rgd              2440 fs/gfs2/rgrp.c 			*generation = rbm.rgd->rd_igeneration++;
rgd              2443 fs/gfs2/rgrp.c 	gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
rgd              2444 fs/gfs2/rgrp.c 	gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
rgd              2452 fs/gfs2/rgrp.c 	rbm.rgd->rd_free_clone -= *nblocks;
rgd              2453 fs/gfs2/rgrp.c 	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
rgd              2459 fs/gfs2/rgrp.c 	gfs2_rgrp_error(rbm.rgd);
rgd              2473 fs/gfs2/rgrp.c void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
rgd              2478 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
rgd              2479 fs/gfs2/rgrp.c 	trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
rgd              2480 fs/gfs2/rgrp.c 	rgd->rd_free += blen;
rgd              2481 fs/gfs2/rgrp.c 	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
rgd              2482 fs/gfs2/rgrp.c 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
rgd              2483 fs/gfs2/rgrp.c 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
rgd              2499 fs/gfs2/rgrp.c void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
rgd              2504 fs/gfs2/rgrp.c 	__gfs2_free_blocks(ip, rgd, bstart, blen, 1);
rgd              2513 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd              2516 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, blkno, true);
rgd              2517 fs/gfs2/rgrp.c 	if (!rgd)
rgd              2519 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
rgd              2520 fs/gfs2/rgrp.c 	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
rgd              2521 fs/gfs2/rgrp.c 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
rgd              2522 fs/gfs2/rgrp.c 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
rgd              2523 fs/gfs2/rgrp.c 	be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
rgd              2526 fs/gfs2/rgrp.c void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
rgd              2528 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd              2530 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
rgd              2531 fs/gfs2/rgrp.c 	if (!rgd->rd_dinodes)
rgd              2532 fs/gfs2/rgrp.c 		gfs2_consist_rgrpd(rgd);
rgd              2533 fs/gfs2/rgrp.c 	rgd->rd_dinodes--;
rgd              2534 fs/gfs2/rgrp.c 	rgd->rd_free++;
rgd              2536 fs/gfs2/rgrp.c 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
rgd              2537 fs/gfs2/rgrp.c 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
rgd              2538 fs/gfs2/rgrp.c 	be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
rgd              2541 fs/gfs2/rgrp.c 	trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
rgd              2559 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd              2564 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
rgd              2565 fs/gfs2/rgrp.c 	if (!rgd)
rgd              2568 fs/gfs2/rgrp.c 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
rgd              2572 fs/gfs2/rgrp.c 	rbm.rgd = rgd;
rgd              2601 fs/gfs2/rgrp.c 	struct gfs2_rgrpd *rgd;
rgd              2614 fs/gfs2/rgrp.c 		rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
rgd              2615 fs/gfs2/rgrp.c 		if (rgrp_contains_block(rgd, block))
rgd              2617 fs/gfs2/rgrp.c 		rgd = gfs2_blk2rgrpd(sdp, block, 1);
rgd              2619 fs/gfs2/rgrp.c 		rgd = ip->i_res.rs_rbm.rgd;
rgd              2620 fs/gfs2/rgrp.c 		if (!rgd || !rgrp_contains_block(rgd, block))
rgd              2621 fs/gfs2/rgrp.c 			rgd = gfs2_blk2rgrpd(sdp, block, 1);
rgd              2624 fs/gfs2/rgrp.c 	if (!rgd) {
rgd              2631 fs/gfs2/rgrp.c 		if (rlist->rl_rgd[x] == rgd) {
rgd              2654 fs/gfs2/rgrp.c 	rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
rgd                25 fs/gfs2/rgrp.h extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
rgd                29 fs/gfs2/rgrp.h extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
rgd                33 fs/gfs2/rgrp.h extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
rgd                35 fs/gfs2/rgrp.h extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
rgd                51 fs/gfs2/rgrp.h extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
rgd                53 fs/gfs2/rgrp.h extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
rgd                55 fs/gfs2/rgrp.h extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
rgd                85 fs/gfs2/rgrp.h static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
rgd                87 fs/gfs2/rgrp.h 	u64 first = rgd->rd_data0;
rgd                88 fs/gfs2/rgrp.h 	u64 last = first + rgd->rd_data;
rgd               818 fs/gfs2/super.c static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
rgd               821 fs/gfs2/super.c 	gfs2_rgrp_verify(rgd);
rgd               822 fs/gfs2/super.c 	sc->sc_total += rgd->rd_data;
rgd               823 fs/gfs2/super.c 	sc->sc_free += rgd->rd_free;
rgd               824 fs/gfs2/super.c 	sc->sc_dinodes += rgd->rd_dinodes;
rgd               872 fs/gfs2/super.c 						struct gfs2_rgrpd *rgd =
rgd               875 fs/gfs2/super.c 						error = statfs_slow_fill(rgd, sc);
rgd              1157 fs/gfs2/super.c 	struct gfs2_rgrpd *rgd;
rgd              1174 fs/gfs2/super.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
rgd              1175 fs/gfs2/super.c 	if (!rgd) {
rgd              1181 fs/gfs2/super.c 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
rgd              1190 fs/gfs2/super.c 	gfs2_free_di(rgd, ip);
rgd               549 fs/gfs2/trace_gfs2.h 	TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
rgd               552 fs/gfs2/trace_gfs2.h 	TP_ARGS(ip, rgd, block, len, block_state),
rgd               566 fs/gfs2/trace_gfs2.h 		__entry->dev		= rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
rgd               571 fs/gfs2/trace_gfs2.h 		__entry->rd_addr	= rgd->rd_addr;
rgd               572 fs/gfs2/trace_gfs2.h 		__entry->rd_free_clone	= rgd->rd_free_clone;
rgd               573 fs/gfs2/trace_gfs2.h 		__entry->rd_reserved	= rgd->rd_reserved;
rgd               605 fs/gfs2/trace_gfs2.h 		__entry->dev		= rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
rgd               606 fs/gfs2/trace_gfs2.h 		__entry->rd_addr	= rs->rs_rbm.rgd->rd_addr;
rgd               607 fs/gfs2/trace_gfs2.h 		__entry->rd_free_clone	= rs->rs_rbm.rgd->rd_free_clone;
rgd               608 fs/gfs2/trace_gfs2.h 		__entry->rd_reserved	= rs->rs_rbm.rgd->rd_reserved;
rgd                30 fs/gfs2/trans.h 	struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
rgd                32 fs/gfs2/trans.h 	if (requested < rgd->rd_length)
rgd                34 fs/gfs2/trans.h 	return rgd->rd_length;
rgd               177 fs/gfs2/util.c int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
rgd               180 fs/gfs2/util.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd               185 fs/gfs2/util.c 	gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
rgd               190 fs/gfs2/util.c 			      (unsigned long long)rgd->rd_addr,
rgd                69 fs/gfs2/util.h int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
rgd                72 fs/gfs2/util.h #define gfs2_consist_rgrpd(rgd) \
rgd                73 fs/gfs2/util.h gfs2_consist_rgrpd_i((rgd), 0, __func__, __FILE__, __LINE__)
rgd               229 fs/gfs2/xattr.c 	struct gfs2_rgrpd *rgd;
rgd               256 fs/gfs2/xattr.c 	rgd = gfs2_blk2rgrpd(sdp, bn, 1);
rgd               257 fs/gfs2/xattr.c 	if (!rgd) {
rgd               262 fs/gfs2/xattr.c 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
rgd               266 fs/gfs2/xattr.c 	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
rgd               283 fs/gfs2/xattr.c 				gfs2_free_meta(ip, rgd, bstart, blen);
rgd               292 fs/gfs2/xattr.c 		gfs2_free_meta(ip, rgd, bstart, blen);
rgd              1250 fs/gfs2/xattr.c 	struct gfs2_rgrpd *rgd;
rgd              1303 fs/gfs2/xattr.c 		rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rgd              1304 fs/gfs2/xattr.c 		rg_blocks += rgd->rd_length;
rgd              1320 fs/gfs2/xattr.c 	rgd = NULL;
rgd              1334 fs/gfs2/xattr.c 				gfs2_free_meta(ip, rgd, bstart, blen);
rgd              1336 fs/gfs2/xattr.c 			rgd = gfs2_blk2rgrpd(sdp, bstart, true);
rgd              1344 fs/gfs2/xattr.c 		gfs2_free_meta(ip, rgd, bstart, blen);
rgd              1369 fs/gfs2/xattr.c 	struct gfs2_rgrpd *rgd;
rgd              1378 fs/gfs2/xattr.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
rgd              1379 fs/gfs2/xattr.c 	if (!rgd) {
rgd              1384 fs/gfs2/xattr.c 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
rgd              1393 fs/gfs2/xattr.c 	gfs2_free_meta(ip, rgd, ip->i_eattr, 1);