rbm 920 drivers/block/drbd/drbd_req.c enum drbd_read_balancing rbm) rbm 925 drivers/block/drbd/drbd_req.c switch (rbm) { rbm 938 drivers/block/drbd/drbd_req.c stripe_shift = (rbm - RB_32K_STRIPING + 15); rbm 1050 drivers/block/drbd/drbd_req.c enum drbd_read_balancing rbm; rbm 1071 drivers/block/drbd/drbd_req.c rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; rbm 1074 drivers/block/drbd/drbd_req.c if (rbm == RB_PREFER_LOCAL && req->private_bio) rbm 1077 drivers/block/drbd/drbd_req.c if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { rbm 1238 drivers/net/wireless/ath/wil6210/debugfs.c struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt; rbm 1242 drivers/net/wireless/ath/wil6210/debugfs.c if (!rbm->buff_arr) rbm 1245 drivers/net/wireless/ath/wil6210/debugfs.c seq_printf(s, " size = %zu\n", rbm->size); rbm 1247 drivers/net/wireless/ath/wil6210/debugfs.c rbm->free_list_empty_cnt); rbm 1251 drivers/net/wireless/ath/wil6210/debugfs.c num_active = wil_print_rx_buff(s, &rbm->active); rbm 1253 drivers/net/wireless/ath/wil6210/debugfs.c num_free = wil_print_rx_buff(s, &rbm->free); rbm 130 fs/gfs2/incore.h static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm) rbm 132 fs/gfs2/incore.h return rbm->rgd->rd_bits + rbm->bii; rbm 135 fs/gfs2/incore.h static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) rbm 137 fs/gfs2/incore.h BUG_ON(rbm->offset >= rbm->rgd->rd_data); rbm 138 fs/gfs2/incore.h return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) + rbm 139 fs/gfs2/incore.h rbm->offset; rbm 61 fs/gfs2/rgrp.c struct gfs2_rbm rbm; rbm 73 fs/gfs2/rgrp.c static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, rbm 85 fs/gfs2/rgrp.c static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, rbm 89 fs/gfs2/rgrp.c struct gfs2_bitmap *bi = rbm_bi(rbm); rbm 91 fs/gfs2/rgrp.c const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; rbm 93 fs/gfs2/rgrp.c byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY); rbm 101 fs/gfs2/rgrp.c struct gfs2_sbd *sdp = rbm->rgd->rd_sbd; rbm 104 fs/gfs2/rgrp.c rbm->offset, cur_state, new_state); rbm 106 fs/gfs2/rgrp.c (unsigned long long)rbm->rgd->rd_addr, bi->bi_start, rbm 110 fs/gfs2/rgrp.c (unsigned long long)gfs2_rbm_to_block(rbm)); rbm 112 fs/gfs2/rgrp.c gfs2_consist_rgrpd(rbm->rgd); rbm 118 fs/gfs2/rgrp.c byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY); rbm 135 fs/gfs2/rgrp.c static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone) rbm 137 fs/gfs2/rgrp.c struct gfs2_bitmap *bi = rbm_bi(rbm); rbm 147 fs/gfs2/rgrp.c byte = buffer + (rbm->offset / GFS2_NBBY); rbm 148 fs/gfs2/rgrp.c bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; rbm 271 fs/gfs2/rgrp.c static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) rbm 273 fs/gfs2/rgrp.c if (!rgrp_contains_block(rbm->rgd, block)) rbm 275 fs/gfs2/rgrp.c rbm->bii = 0; rbm 276 fs/gfs2/rgrp.c rbm->offset = block - rbm->rgd->rd_data0; rbm 278 fs/gfs2/rgrp.c if (rbm->offset < rbm_bi(rbm)->bi_blocks) rbm 282 fs/gfs2/rgrp.c rbm->offset += (sizeof(struct gfs2_rgrp) - rbm 284 fs/gfs2/rgrp.c rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; rbm 285 fs/gfs2/rgrp.c rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; rbm 301 fs/gfs2/rgrp.c static bool gfs2_rbm_incr(struct gfs2_rbm *rbm) rbm 303 fs/gfs2/rgrp.c if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */ rbm 304 fs/gfs2/rgrp.c rbm->offset++; rbm 307 fs/gfs2/rgrp.c if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */ rbm 310 fs/gfs2/rgrp.c rbm->offset = 0; rbm 311 fs/gfs2/rgrp.c rbm->bii++; rbm 324 fs/gfs2/rgrp.c static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) rbm 330 fs/gfs2/rgrp.c res = gfs2_testbit(rbm, true); rbm 336 fs/gfs2/rgrp.c if (gfs2_rbm_incr(rbm)) rbm 360 fs/gfs2/rgrp.c struct gfs2_rbm rbm = *rrbm; rbm 361 fs/gfs2/rgrp.c u32 n_unaligned = rbm.offset & 3; rbm 370 fs/gfs2/rgrp.c gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) rbm 376 fs/gfs2/rgrp.c bi = rbm_bi(&rbm); rbm 382 fs/gfs2/rgrp.c BUG_ON(rbm.offset & 3); rbm 383 fs/gfs2/rgrp.c start += (rbm.offset / GFS2_NBBY); rbm 390 fs/gfs2/rgrp.c block = gfs2_rbm_to_block(&rbm); rbm 391 fs/gfs2/rgrp.c if (gfs2_rbm_from_block(&rbm, block + chunk_size)) { rbm 404 fs/gfs2/rgrp.c gfs2_unaligned_extlen(&rbm, n_unaligned, &len); rbm 1566 fs/gfs2/rgrp.c struct gfs2_rbm rbm = { .rgd = rgd, }; rbm 1589 fs/gfs2/rgrp.c if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) rbm 1592 fs/gfs2/rgrp.c ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true); rbm 1594 fs/gfs2/rgrp.c rs->rs_rbm = rbm; rbm 1667 fs/gfs2/rgrp.c static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, rbm 1672 fs/gfs2/rgrp.c u64 block = gfs2_rbm_to_block(rbm); rbm 1682 fs/gfs2/rgrp.c extlen = gfs2_free_extlen(rbm, minext); rbm 1691 fs/gfs2/rgrp.c nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); rbm 1698 fs/gfs2/rgrp.c maxext->rbm = *rbm; rbm 1703 fs/gfs2/rgrp.c ret = gfs2_rbm_from_block(rbm, nblock); rbm 1728 fs/gfs2/rgrp.c static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, rbm 1731 fs/gfs2/rgrp.c bool scan_from_start = rbm->bii == 0 && rbm->offset == 0; rbm 1739 fs/gfs2/rgrp.c struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, }; rbm 1746 fs/gfs2/rgrp.c last_bii = rbm->bii - (rbm->offset == 0); rbm 1749 fs/gfs2/rgrp.c bi = rbm_bi(rbm); rbm 1760 fs/gfs2/rgrp.c offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state); rbm 1762 fs/gfs2/rgrp.c if (state == GFS2_BLKST_FREE && rbm->offset == 0) rbm 1766 fs/gfs2/rgrp.c rbm->offset = offset; rbm 1770 fs/gfs2/rgrp.c ret = gfs2_reservation_check_and_update(rbm, ip, rbm 1778 fs/gfs2/rgrp.c rbm->bii = 0; rbm 1779 fs/gfs2/rgrp.c rbm->offset = 0; rbm 1785 fs/gfs2/rgrp.c rbm->offset = 0; rbm 1786 fs/gfs2/rgrp.c rbm->bii++; rbm 1787 fs/gfs2/rgrp.c if (rbm->bii == rbm->rgd->rd_length) rbm 1788 fs/gfs2/rgrp.c rbm->bii = 0; rbm 1790 fs/gfs2/rgrp.c if (rbm->bii == 0) { rbm 1799 fs/gfs2/rgrp.c if (wrapped && rbm->bii > last_bii) rbm 1809 fs/gfs2/rgrp.c if (wrapped && (scan_from_start || rbm->bii > last_bii) && rbm 1810 fs/gfs2/rgrp.c *minext < rbm->rgd->rd_extfail_pt) rbm 1811 fs/gfs2/rgrp.c rbm->rgd->rd_extfail_pt = *minext; rbm 1816 fs/gfs2/rgrp.c *rbm = maxext.rbm; rbm 1842 fs/gfs2/rgrp.c struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 }; rbm 1846 fs/gfs2/rgrp.c error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL, rbm 1854 fs/gfs2/rgrp.c block = gfs2_rbm_to_block(&rbm); rbm 1855 fs/gfs2/rgrp.c if (gfs2_rbm_from_block(&rbm, block + 1)) rbm 2187 fs/gfs2/rgrp.c static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, rbm 2190 fs/gfs2/rgrp.c struct gfs2_rbm pos = { .rgd = rbm->rgd, }; rbm 2196 fs/gfs2/rgrp.c block = gfs2_rbm_to_block(rbm); rbm 2197 fs/gfs2/rgrp.c gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh); rbm 2198 fs/gfs2/rgrp.c gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); rbm 2223 fs/gfs2/rgrp.c struct gfs2_rbm rbm; rbm 2226 fs/gfs2/rgrp.c rbm.rgd = rgd; rbm 2227 fs/gfs2/rgrp.c if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart))) rbm 2230 fs/gfs2/rgrp.c bi = rbm_bi(&rbm); rbm 2239 fs/gfs2/rgrp.c gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh); rbm 2242 fs/gfs2/rgrp.c gfs2_setbit(&rbm, false, new_state); rbm 2243 fs/gfs2/rgrp.c gfs2_rbm_incr(&rbm); rbm 2310 fs/gfs2/rgrp.c const struct gfs2_rbm *rbm, unsigned len) rbm 2313 fs/gfs2/rgrp.c struct gfs2_rgrpd *rgd = rbm->rgd; rbm 2320 fs/gfs2/rgrp.c if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) { rbm 2321 fs/gfs2/rgrp.c block = gfs2_rbm_to_block(rbm); rbm 2350 fs/gfs2/rgrp.c static void gfs2_set_alloc_start(struct gfs2_rbm *rbm, rbm 2356 fs/gfs2/rgrp.c *rbm = ip->i_res.rs_rbm; rbm 2360 fs/gfs2/rgrp.c if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal)) rbm 2363 fs/gfs2/rgrp.c goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0; rbm 2365 fs/gfs2/rgrp.c if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) { rbm 2366 fs/gfs2/rgrp.c rbm->bii = 0; rbm 2367 fs/gfs2/rgrp.c rbm->offset = 0; rbm 2387 fs/gfs2/rgrp.c struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, }; rbm 2392 fs/gfs2/rgrp.c gfs2_set_alloc_start(&rbm, ip, dinode); rbm 2393 fs/gfs2/rgrp.c error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false); rbm 2396 fs/gfs2/rgrp.c gfs2_set_alloc_start(&rbm, ip, dinode); rbm 2397 fs/gfs2/rgrp.c error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false); rbm 2404 fs/gfs2/rgrp.c test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags), rbm 2405 fs/gfs2/rgrp.c rbm.rgd->rd_extfail_pt); rbm 2409 fs/gfs2/rgrp.c gfs2_alloc_extent(&rbm, dinode, nblocks); rbm 2410 fs/gfs2/rgrp.c block = gfs2_rbm_to_block(&rbm); rbm 2411 fs/gfs2/rgrp.c rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; rbm 2413 fs/gfs2/rgrp.c gfs2_adjust_reservation(ip, &rbm, *nblocks); rbm 2430 fs/gfs2/rgrp.c if (rbm.rgd->rd_free < *nblocks) { rbm 2435 fs/gfs2/rgrp.c rbm.rgd->rd_free -= *nblocks; rbm 2437 fs/gfs2/rgrp.c rbm.rgd->rd_dinodes++; rbm 2438 fs/gfs2/rgrp.c *generation = rbm.rgd->rd_igeneration++; rbm 2440 fs/gfs2/rgrp.c *generation = rbm.rgd->rd_igeneration++; rbm 2443 fs/gfs2/rgrp.c gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh); rbm 2444 fs/gfs2/rgrp.c gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); rbm 2452 fs/gfs2/rgrp.c rbm.rgd->rd_free_clone -= *nblocks; rbm 2453 fs/gfs2/rgrp.c trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, rbm 2459 fs/gfs2/rgrp.c gfs2_rgrp_error(rbm.rgd); rbm 2561 fs/gfs2/rgrp.c struct gfs2_rbm rbm; rbm 2572 fs/gfs2/rgrp.c rbm.rgd = rgd; rbm 2573 fs/gfs2/rgrp.c error = gfs2_rbm_from_block(&rbm, no_addr); rbm 2577 fs/gfs2/rgrp.c if (gfs2_testbit(&rbm, false) != type)