rl                 73 arch/arm/vfp/vfp.h 	u64 rh, rma, rmb, rl;
rl                 77 arch/arm/vfp/vfp.h 	rl = (u64)nl * ml;
rl                 90 arch/arm/vfp/vfp.h 	rl += rma;
rl                 91 arch/arm/vfp/vfp.h 	rh += (rl < rma);
rl                 93 arch/arm/vfp/vfp.h 	*resl = rl;
rl                105 arch/arm/vfp/vfp.h 	u64 rh, rl;
rl                106 arch/arm/vfp/vfp.h 	mul64to128(&rh, &rl, n, m);
rl                107 arch/arm/vfp/vfp.h 	return rh | (rl != 0);
rl                197 arch/mips/include/asm/octeon/cvmx-pciercx-defs.h 		__BITFIELD_FIELD(uint32_t rl:1,
rl                171 arch/riscv/include/asm/bitops.h 	__op_bit_ord(and, __NOT, nr, addr, .rl);
rl                211 arch/riscv/net/bpf_jit_comp.c static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
rl                214 arch/riscv/net/bpf_jit_comp.c 	u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
rl                454 arch/riscv/net/bpf_jit_comp.c static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
rl                456 arch/riscv/net/bpf_jit_comp.c 	return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
rl                459 arch/riscv/net/bpf_jit_comp.c static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
rl                461 arch/riscv/net/bpf_jit_comp.c 	return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
rl                475 arch/s390/kvm/guestdbg.c 			s32 rl = *((s32 *) (opcode + 1));
rl                478 arch/s390/kvm/guestdbg.c 			*addr += (u64)(s64) rl * 2;
rl               1226 block/sed-opal.c 				     bool rl, bool wl)
rl               1248 block/sed-opal.c 	add_token_u8(&err, dev, rl);
rl                327 crypto/camellia_generic.c #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) ({		\
rl                330 crypto/camellia_generic.c 	lr = (lr << bits) + (rl >> (32 - bits));	\
rl                331 crypto/camellia_generic.c 	rl = (rl << bits) + (rr >> (32 - bits));	\
rl                335 crypto/camellia_generic.c #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) ({	\
rl                338 crypto/camellia_generic.c 	ll = (lr << (bits - 32)) + (rl >> (64 - bits));	\
rl                339 crypto/camellia_generic.c 	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
rl                340 crypto/camellia_generic.c 	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
rl                819 crypto/camellia_generic.c #define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) ({ \
rl                824 crypto/camellia_generic.c 	rl ^= t2;							\
rl                828 crypto/camellia_generic.c 	t3 &= rl;							\
rl                118 crypto/crypto_user_base.c 		struct crypto_report_larval rl;
rl                120 crypto/crypto_user_base.c 		memset(&rl, 0, sizeof(rl));
rl                121 crypto/crypto_user_base.c 		strscpy(rl.type, "larval", sizeof(rl.type));
rl                122 crypto/crypto_user_base.c 		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
rl                198 crypto/crypto_user_stat.c 		struct crypto_stat_larval rl;
rl                200 crypto/crypto_user_stat.c 		memset(&rl, 0, sizeof(rl));
rl                201 crypto/crypto_user_stat.c 		strscpy(rl.type, "larval", sizeof(rl.type));
rl                202 crypto/crypto_user_stat.c 		if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
rl                103 crypto/vmac.c  #define ADD128(rh, rl, ih, il)						\
rl                106 crypto/vmac.c  		(rl) += (_il);						\
rl                107 crypto/vmac.c  		if ((rl) < (_il))					\
rl                114 crypto/vmac.c  #define PMUL64(rh, rl, i1, i2)	/* Assumes m doesn't overflow */	\
rl                119 crypto/vmac.c  		rl = MUL32(_i1, _i2);					\
rl                120 crypto/vmac.c  		ADD128(rh, rl, (m >> 32), (m << 32));			\
rl                123 crypto/vmac.c  #define MUL64(rh, rl, i1, i2)						\
rl                129 crypto/vmac.c  		rl = MUL32(_i1, _i2);					\
rl                130 crypto/vmac.c  		ADD128(rh, rl, (m1 >> 32), (m1 << 32));			\
rl                131 crypto/vmac.c  		ADD128(rh, rl, (m2 >> 32), (m2 << 32));			\
rl                148 crypto/vmac.c  #define nh_16(mp, kp, nw, rh, rl)					\
rl                151 crypto/vmac.c  		rh = rl = 0;						\
rl                155 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                159 crypto/vmac.c  #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1)				\
rl                162 crypto/vmac.c  		rh1 = rl1 = rh = rl = 0;				\
rl                166 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                174 crypto/vmac.c  #define nh_vmac_nhbytes(mp, kp, nw, rh, rl)				\
rl                177 crypto/vmac.c  		rh = rl = 0;						\
rl                181 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                184 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                187 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                190 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                194 crypto/vmac.c  #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1)			\
rl                197 crypto/vmac.c  		rh1 = rl1 = rh = rl = 0;				\
rl                201 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                207 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                213 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                219 crypto/vmac.c  			ADD128(rh, rl, th, tl);				\
rl                253 crypto/vmac.c  #define nh_16(mp, kp, nw, rh, rl)					\
rl                257 crypto/vmac.c  		rh = rl = t = 0;					\
rl                263 crypto/vmac.c  			ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32),	\
rl                269 crypto/vmac.c  		ADD128(rh, rl, (t >> 32), (t << 32));			\
rl                340 crypto/vmac.c  #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2)				\
rl                342 crypto/vmac.c  		nh_16(mp, kp, nw, rh, rl);				\
rl                347 crypto/vmac.c  #define nh_vmac_nhbytes(mp, kp, nw, rh, rl)				\
rl                348 crypto/vmac.c  	nh_16(mp, kp, nw, rh, rl)
rl                351 crypto/vmac.c  #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2)			\
rl                353 crypto/vmac.c  		nh_vmac_nhbytes(mp, kp, nw, rh, rl);			\
rl                360 crypto/vmac.c  	u64 rh, rl, t, z = 0;
rl                385 crypto/vmac.c  	MUL64(rh, rl, p1, p2);
rl                387 crypto/vmac.c  	ADD128(t, rl, z, rh);
rl                389 crypto/vmac.c  	ADD128(t, rl, z, rh);
rl                391 crypto/vmac.c  	rl += t;
rl                392 crypto/vmac.c  	rl += (0 - (rl < t)) & 257;
rl                393 crypto/vmac.c  	rl += (0 - (rl > p64-1)) & 257;
rl                394 crypto/vmac.c  	return rl;
rl                407 crypto/vmac.c  	u64 rh, rl;
rl                411 crypto/vmac.c  		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rl                413 crypto/vmac.c  		ADD128(ch, cl, rh, rl);
rl                419 crypto/vmac.c  		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rl                421 crypto/vmac.c  		poly_step(ch, cl, pkh, pkl, rh, rl);
rl                547 crypto/vmac.c  		u64 rh, rl;
rl                550 crypto/vmac.c  		nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
rl                554 crypto/vmac.c  				  rh, rl);
rl                556 crypto/vmac.c  			ADD128(ch, cl, rh, rl);
rl               1116 drivers/block/drbd/drbd_main.c 	unsigned long rl;
rl               1149 drivers/block/drbd/drbd_main.c 		rl = tmp - c->bit_offset;
rl               1152 drivers/block/drbd/drbd_main.c 			if (rl == 0) {
rl               1165 drivers/block/drbd/drbd_main.c 		if (rl == 0) {
rl               1171 drivers/block/drbd/drbd_main.c 		bits = vli_encode_bits(&bs, rl);
rl               1180 drivers/block/drbd/drbd_main.c 		plain_bits += rl;
rl               4738 drivers/block/drbd/drbd_receiver.c 	u64 rl;
rl               4752 drivers/block/drbd/drbd_receiver.c 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
rl               4753 drivers/block/drbd/drbd_receiver.c 		bits = vli_decode_bits(&rl, look_ahead);
rl               4758 drivers/block/drbd/drbd_receiver.c 			e = s + rl -1;
rl                799 drivers/hwmon/asc7621.c #define PREAD(name, n, pri, rm, rl, m, s, r) \
rl                801 drivers/hwmon/asc7621.c 	  .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
rl                804 drivers/hwmon/asc7621.c #define PWRITE(name, n, pri, rm, rl, m, s, r) \
rl                806 drivers/hwmon/asc7621.c 	  .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
rl                813 drivers/hwmon/asc7621.c #define PWRITEM(name, n, pri, rm, rl, m, s, r) \
rl                815 drivers/hwmon/asc7621.c 	  .priority = pri, .msb = rm, .lsb = rl, .mask = m, .shift = s,}
rl                439 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_rate_limit	rl;
rl                 93 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_rate_limit rl;
rl               3196 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_rate_limit old_rl = ibqp->rl;
rl               3221 drivers/infiniband/hw/mlx5/qp.c 			new_rl = raw_qp_param->rl;
rl               3259 drivers/infiniband/hw/mlx5/qp.c 	ibqp->rl = new_rl;
rl               3663 drivers/infiniband/hw/mlx5/qp.c 			raw_qp_param.rl.rate = attr->rate_limit;
rl               3668 drivers/infiniband/hw/mlx5/qp.c 					raw_qp_param.rl.max_burst_sz =
rl               3679 drivers/infiniband/hw/mlx5/qp.c 					raw_qp_param.rl.typical_pkt_sz =
rl                 79 drivers/lightnvm/pblk-cache.c 	pblk_rl_inserted(&pblk->rl, nr_entries);
rl                422 drivers/lightnvm/pblk-core.c 			pblk_rl_werr_line_in(&pblk->rl);
rl               1235 drivers/lightnvm/pblk-core.c 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
rl               1356 drivers/lightnvm/pblk-core.c 	pblk_rl_free_lines_dec(&pblk->rl, line, false);
rl               1366 drivers/lightnvm/pblk-core.c 	struct pblk_rl *rl = &pblk->rl;
rl               1368 drivers/lightnvm/pblk-core.c 	atomic_set(&rl->rb_space, 0);
rl               1431 drivers/lightnvm/pblk-core.c 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
rl               1593 drivers/lightnvm/pblk-core.c 	pblk_rl_free_lines_dec(&pblk->rl, new, true);
rl               1637 drivers/lightnvm/pblk-core.c 		pblk_rl_werr_line_out(&pblk->rl);
rl               1649 drivers/lightnvm/pblk-core.c 	pblk_rl_free_lines_inc(&pblk->rl, line);
rl               1738 drivers/lightnvm/pblk-core.c 	if (pblk_rl_is_limit(&pblk->rl))
rl                394 drivers/lightnvm/pblk-gc.c static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
rl                397 drivers/lightnvm/pblk-gc.c 	unsigned int werr_lines = atomic_read(&rl->werr_lines);
rl                399 drivers/lightnvm/pblk-gc.c 	nr_blocks_need = pblk_rl_high_thrs(rl);
rl                400 drivers/lightnvm/pblk-gc.c 	nr_blocks_free = pblk_rl_nr_free_blks(rl);
rl                455 drivers/lightnvm/pblk-gc.c 	run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
rl                491 drivers/lightnvm/pblk-gc.c 		run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
rl                496 drivers/lightnvm/pblk-gc.c 	if (!prev_group && pblk->rl.rb_state > gc_group &&
rl                588 drivers/lightnvm/pblk-gc.c 	pblk_rl_update_rates(&pblk->rl);
rl                 73 drivers/lightnvm/pblk-init.c 		if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
rl                689 drivers/lightnvm/pblk-init.c 	pblk->rl.total_blocks = nr_free_chks;
rl                698 drivers/lightnvm/pblk-init.c 	atomic_set(&pblk->rl.free_blocks, nr_free_chks);
rl                699 drivers/lightnvm/pblk-init.c 	atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
rl               1118 drivers/lightnvm/pblk-init.c 	pblk_rl_free(&pblk->rl);
rl                165 drivers/lightnvm/pblk-rb.c 	pblk_rl_init(&pblk->rl, rb->nr_entries, threshold);
rl                269 drivers/lightnvm/pblk-rb.c 	pblk_rl_out(&pblk->rl, user_io, gc_io);
rl                501 drivers/lightnvm/pblk-rb.c 	io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
rl                512 drivers/lightnvm/pblk-rb.c 	pblk_rl_user_in(&pblk->rl, nr_entries);
rl                527 drivers/lightnvm/pblk-rb.c 	if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
rl                537 drivers/lightnvm/pblk-rb.c 	pblk_rl_gc_in(&pblk->rl, nr_entries);
rl                 22 drivers/lightnvm/pblk-rl.c static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
rl                 24 drivers/lightnvm/pblk-rl.c 	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
rl                 27 drivers/lightnvm/pblk-rl.c int pblk_rl_is_limit(struct pblk_rl *rl)
rl                 31 drivers/lightnvm/pblk-rl.c 	rb_space = atomic_read(&rl->rb_space);
rl                 36 drivers/lightnvm/pblk-rl.c int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
rl                 38 drivers/lightnvm/pblk-rl.c 	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
rl                 39 drivers/lightnvm/pblk-rl.c 	int rb_space = atomic_read(&rl->rb_space);
rl                 44 drivers/lightnvm/pblk-rl.c 	if (rb_user_cnt >= rl->rb_user_max)
rl                 50 drivers/lightnvm/pblk-rl.c void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
rl                 52 drivers/lightnvm/pblk-rl.c 	int rb_space = atomic_read(&rl->rb_space);
rl                 55 drivers/lightnvm/pblk-rl.c 		atomic_sub(nr_entries, &rl->rb_space);
rl                 58 drivers/lightnvm/pblk-rl.c int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
rl                 60 drivers/lightnvm/pblk-rl.c 	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
rl                 64 drivers/lightnvm/pblk-rl.c 	rb_user_active = READ_ONCE(rl->rb_user_active);
rl                 65 drivers/lightnvm/pblk-rl.c 	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
rl                 68 drivers/lightnvm/pblk-rl.c void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
rl                 70 drivers/lightnvm/pblk-rl.c 	atomic_add(nr_entries, &rl->rb_user_cnt);
rl                 73 drivers/lightnvm/pblk-rl.c 	smp_store_release(&rl->rb_user_active, 1);
rl                 74 drivers/lightnvm/pblk-rl.c 	pblk_rl_kick_u_timer(rl);
rl                 77 drivers/lightnvm/pblk-rl.c void pblk_rl_werr_line_in(struct pblk_rl *rl)
rl                 79 drivers/lightnvm/pblk-rl.c 	atomic_inc(&rl->werr_lines);
rl                 82 drivers/lightnvm/pblk-rl.c void pblk_rl_werr_line_out(struct pblk_rl *rl)
rl                 84 drivers/lightnvm/pblk-rl.c 	atomic_dec(&rl->werr_lines);
rl                 87 drivers/lightnvm/pblk-rl.c void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
rl                 89 drivers/lightnvm/pblk-rl.c 	atomic_add(nr_entries, &rl->rb_gc_cnt);
rl                 92 drivers/lightnvm/pblk-rl.c void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
rl                 94 drivers/lightnvm/pblk-rl.c 	atomic_sub(nr_user, &rl->rb_user_cnt);
rl                 95 drivers/lightnvm/pblk-rl.c 	atomic_sub(nr_gc, &rl->rb_gc_cnt);
rl                 98 drivers/lightnvm/pblk-rl.c unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
rl                100 drivers/lightnvm/pblk-rl.c 	return atomic_read(&rl->free_blocks);
rl                103 drivers/lightnvm/pblk-rl.c unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
rl                105 drivers/lightnvm/pblk-rl.c 	return atomic_read(&rl->free_user_blocks);
rl                108 drivers/lightnvm/pblk-rl.c static void __pblk_rl_update_rates(struct pblk_rl *rl,
rl                111 drivers/lightnvm/pblk-rl.c 	struct pblk *pblk = container_of(rl, struct pblk, rl);
rl                112 drivers/lightnvm/pblk-rl.c 	int max = rl->rb_budget;
rl                113 drivers/lightnvm/pblk-rl.c 	int werr_gc_needed = atomic_read(&rl->werr_lines);
rl                115 drivers/lightnvm/pblk-rl.c 	if (free_blocks >= rl->high) {
rl                120 drivers/lightnvm/pblk-rl.c 			rl->rb_gc_max = 1 << rl->rb_windows_pw;
rl                121 drivers/lightnvm/pblk-rl.c 			rl->rb_user_max = max - rl->rb_gc_max;
rl                122 drivers/lightnvm/pblk-rl.c 			rl->rb_state = PBLK_RL_WERR;
rl                124 drivers/lightnvm/pblk-rl.c 			rl->rb_user_max = max;
rl                125 drivers/lightnvm/pblk-rl.c 			rl->rb_gc_max = 0;
rl                126 drivers/lightnvm/pblk-rl.c 			rl->rb_state = PBLK_RL_OFF;
rl                128 drivers/lightnvm/pblk-rl.c 	} else if (free_blocks < rl->high) {
rl                129 drivers/lightnvm/pblk-rl.c 		int shift = rl->high_pw - rl->rb_windows_pw;
rl                133 drivers/lightnvm/pblk-rl.c 		rl->rb_user_max = user_max;
rl                134 drivers/lightnvm/pblk-rl.c 		rl->rb_gc_max = max - user_max;
rl                136 drivers/lightnvm/pblk-rl.c 		if (free_blocks <= rl->rsv_blocks) {
rl                137 drivers/lightnvm/pblk-rl.c 			rl->rb_user_max = 0;
rl                138 drivers/lightnvm/pblk-rl.c 			rl->rb_gc_max = max;
rl                145 drivers/lightnvm/pblk-rl.c 		rl->rb_state = PBLK_RL_LOW;
rl                148 drivers/lightnvm/pblk-rl.c 	if (rl->rb_state != PBLK_RL_OFF)
rl                154 drivers/lightnvm/pblk-rl.c void pblk_rl_update_rates(struct pblk_rl *rl)
rl                156 drivers/lightnvm/pblk-rl.c 	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
rl                159 drivers/lightnvm/pblk-rl.c void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
rl                164 drivers/lightnvm/pblk-rl.c 	atomic_add(blk_in_line, &rl->free_blocks);
rl                165 drivers/lightnvm/pblk-rl.c 	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
rl                167 drivers/lightnvm/pblk-rl.c 	__pblk_rl_update_rates(rl, free_blocks);
rl                170 drivers/lightnvm/pblk-rl.c void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
rl                176 drivers/lightnvm/pblk-rl.c 	atomic_sub(blk_in_line, &rl->free_blocks);
rl                180 drivers/lightnvm/pblk-rl.c 							&rl->free_user_blocks);
rl                182 drivers/lightnvm/pblk-rl.c 		free_blocks = atomic_read(&rl->free_user_blocks);
rl                184 drivers/lightnvm/pblk-rl.c 	__pblk_rl_update_rates(rl, free_blocks);
rl                187 drivers/lightnvm/pblk-rl.c int pblk_rl_high_thrs(struct pblk_rl *rl)
rl                189 drivers/lightnvm/pblk-rl.c 	return rl->high;
rl                192 drivers/lightnvm/pblk-rl.c int pblk_rl_max_io(struct pblk_rl *rl)
rl                194 drivers/lightnvm/pblk-rl.c 	return rl->rb_max_io;
rl                199 drivers/lightnvm/pblk-rl.c 	struct pblk_rl *rl = from_timer(rl, t, u_timer);
rl                202 drivers/lightnvm/pblk-rl.c 	smp_store_release(&rl->rb_user_active, 0);
rl                205 drivers/lightnvm/pblk-rl.c void pblk_rl_free(struct pblk_rl *rl)
rl                207 drivers/lightnvm/pblk-rl.c 	del_timer(&rl->u_timer);
rl                210 drivers/lightnvm/pblk-rl.c void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
rl                212 drivers/lightnvm/pblk-rl.c 	struct pblk *pblk = container_of(rl, struct pblk, rl);
rl                224 drivers/lightnvm/pblk-rl.c 	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
rl                225 drivers/lightnvm/pblk-rl.c 	rl->high_pw = get_count_order(rl->high);
rl                227 drivers/lightnvm/pblk-rl.c 	rl->rsv_blocks = pblk_get_min_chks(pblk);
rl                231 drivers/lightnvm/pblk-rl.c 	rl->rb_windows_pw = get_count_order(rb_windows);
rl                234 drivers/lightnvm/pblk-rl.c 	rl->rb_budget = budget;
rl                235 drivers/lightnvm/pblk-rl.c 	rl->rb_user_max = budget;
rl                236 drivers/lightnvm/pblk-rl.c 	rl->rb_gc_max = 0;
rl                237 drivers/lightnvm/pblk-rl.c 	rl->rb_state = PBLK_RL_HIGH;
rl                241 drivers/lightnvm/pblk-rl.c 		rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
rl                243 drivers/lightnvm/pblk-rl.c 		rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
rl                245 drivers/lightnvm/pblk-rl.c 	atomic_set(&rl->rb_user_cnt, 0);
rl                246 drivers/lightnvm/pblk-rl.c 	atomic_set(&rl->rb_gc_cnt, 0);
rl                247 drivers/lightnvm/pblk-rl.c 	atomic_set(&rl->rb_space, -1);
rl                248 drivers/lightnvm/pblk-rl.c 	atomic_set(&rl->werr_lines, 0);
rl                250 drivers/lightnvm/pblk-rl.c 	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
rl                252 drivers/lightnvm/pblk-rl.c 	rl->rb_user_active = 0;
rl                253 drivers/lightnvm/pblk-rl.c 	rl->rb_gc_active = 0;
rl                 57 drivers/lightnvm/pblk-sysfs.c 	free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
rl                 58 drivers/lightnvm/pblk-sysfs.c 	free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rl                 59 drivers/lightnvm/pblk-sysfs.c 	rb_user_max = pblk->rl.rb_user_max;
rl                 60 drivers/lightnvm/pblk-sysfs.c 	rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rl                 61 drivers/lightnvm/pblk-sysfs.c 	rb_gc_max = pblk->rl.rb_gc_max;
rl                 62 drivers/lightnvm/pblk-sysfs.c 	rb_gc_cnt = atomic_read(&pblk->rl.rb_gc_cnt);
rl                 63 drivers/lightnvm/pblk-sysfs.c 	rb_budget = pblk->rl.rb_budget;
rl                 64 drivers/lightnvm/pblk-sysfs.c 	rb_state = pblk->rl.rb_state;
rl                 66 drivers/lightnvm/pblk-sysfs.c 	total_blocks = pblk->rl.total_blocks;
rl                 76 drivers/lightnvm/pblk-sysfs.c 				pblk->rl.high,
rl                 80 drivers/lightnvm/pblk-sysfs.c 				READ_ONCE(pblk->rl.rb_user_active));
rl                623 drivers/lightnvm/pblk.h 	struct pblk_rl rl;
rl                902 drivers/lightnvm/pblk.h void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
rl                903 drivers/lightnvm/pblk.h void pblk_rl_free(struct pblk_rl *rl);
rl                904 drivers/lightnvm/pblk.h void pblk_rl_update_rates(struct pblk_rl *rl);
rl                905 drivers/lightnvm/pblk.h int pblk_rl_high_thrs(struct pblk_rl *rl);
rl                906 drivers/lightnvm/pblk.h unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
rl                907 drivers/lightnvm/pblk.h unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
rl                908 drivers/lightnvm/pblk.h int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
rl                909 drivers/lightnvm/pblk.h void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
rl                910 drivers/lightnvm/pblk.h void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
rl                911 drivers/lightnvm/pblk.h int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
rl                912 drivers/lightnvm/pblk.h void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
rl                913 drivers/lightnvm/pblk.h void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
rl                914 drivers/lightnvm/pblk.h int pblk_rl_max_io(struct pblk_rl *rl);
rl                915 drivers/lightnvm/pblk.h void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
rl                916 drivers/lightnvm/pblk.h void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
rl                918 drivers/lightnvm/pblk.h int pblk_rl_is_limit(struct pblk_rl *rl);
rl                920 drivers/lightnvm/pblk.h void pblk_rl_werr_line_in(struct pblk_rl *rl);
rl                921 drivers/lightnvm/pblk.h void pblk_rl_werr_line_out(struct pblk_rl *rl);
rl               1163 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	u32 rl;
rl               1178 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
rl               1179 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	if (rl != cmd->rx_coalesce_usecs_high) {
rl               1182 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			    cmd->rx_coalesce_usecs_high, rl);
rl               1393 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_rate_limit rl = {0};
rl               1399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rl.rate = sq->rate_limit;
rl               1400 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_rl_remove_rate(mdev, &rl);
rl               1750 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_rate_limit rl = {0};
rl               1759 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rl.rate = sq->rate_limit;
rl               1761 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_rl_remove_rate(mdev, &rl);
rl               1767 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rl.rate = rate;
rl               1768 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
rl               1786 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5_rl_remove_rate(mdev, &rl);
rl                110 drivers/net/ethernet/mellanox/mlx5/core/rl.c 					   struct mlx5_rate_limit *rl)
rl                117 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
rl                119 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		if (!empty_found && !table->rl_entry[i].rl.rate) {
rl                130 drivers/net/ethernet/mellanox/mlx5/core/rl.c 				      struct mlx5_rate_limit *rl)
rl                138 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
rl                139 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
rl                140 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
rl                162 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		     struct mlx5_rate_limit *rl)
rl                170 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
rl                172 drivers/net/ethernet/mellanox/mlx5/core/rl.c 			      rl->rate, table->min_rate, table->max_rate);
rl                177 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	entry = find_rl_entry(table, rl);
rl                189 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
rl                192 drivers/net/ethernet/mellanox/mlx5/core/rl.c 				      err, rl->rate, rl->max_burst_sz,
rl                193 drivers/net/ethernet/mellanox/mlx5/core/rl.c 				      rl->typical_pkt_sz);
rl                196 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		entry->rl = *rl;
rl                207 drivers/net/ethernet/mellanox/mlx5/core/rl.c void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
rl                214 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	if (rl->rate == 0)
rl                218 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	entry = find_rl_entry(table, rl);
rl                221 drivers/net/ethernet/mellanox/mlx5/core/rl.c 			       rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
rl                229 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		entry->rl = reset_rl;
rl                276 drivers/net/ethernet/mellanox/mlx5/core/rl.c 	struct mlx5_rate_limit rl = {0};
rl                281 drivers/net/ethernet/mellanox/mlx5/core/rl.c 		if (table->rl_entry[i].rl.rate)
rl                283 drivers/net/ethernet/mellanox/mlx5/core/rl.c 						   &rl);
rl                293 drivers/net/ethernet/microchip/enc28j60.c 	int rl, rh;
rl                296 drivers/net/ethernet/microchip/enc28j60.c 	rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
rl                299 drivers/net/ethernet/microchip/enc28j60.c 	return (rh << 8) | rl;
rl                201 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
rl                203 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c 	((rl_valid) << 22) | ((rl) << 24))
rl                 95 drivers/nvme/host/trace.c 	u16 rl = get_unaligned_le16(cdw10 + 12);
rl                 99 drivers/nvme/host/trace.c 			slba, mndw, rl, atype);
rl                 42 drivers/nvme/target/trace.c 	u16 rl = get_unaligned_le16(cdw10 + 12);
rl                 46 drivers/nvme/target/trace.c 			slba, mndw, rl, atype);
rl                 41 drivers/s390/scsi/zfcp_reqlist.h 	struct zfcp_reqlist *rl;
rl                 43 drivers/s390/scsi/zfcp_reqlist.h 	rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
rl                 44 drivers/s390/scsi/zfcp_reqlist.h 	if (!rl)
rl                 47 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_init(&rl->lock);
rl                 50 drivers/s390/scsi/zfcp_reqlist.h 		INIT_LIST_HEAD(&rl->buckets[i]);
rl                 52 drivers/s390/scsi/zfcp_reqlist.h 	return rl;
rl                 61 drivers/s390/scsi/zfcp_reqlist.h static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
rl                 66 drivers/s390/scsi/zfcp_reqlist.h 		if (!list_empty(&rl->buckets[i]))
rl                 75 drivers/s390/scsi/zfcp_reqlist.h static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
rl                 78 drivers/s390/scsi/zfcp_reqlist.h 	BUG_ON(!zfcp_reqlist_isempty(rl));
rl                 80 drivers/s390/scsi/zfcp_reqlist.h 	kfree(rl);
rl                 84 drivers/s390/scsi/zfcp_reqlist.h _zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
rl                 90 drivers/s390/scsi/zfcp_reqlist.h 	list_for_each_entry(req, &rl->buckets[i], list)
rl                105 drivers/s390/scsi/zfcp_reqlist.h zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
rl                110 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_irqsave(&rl->lock, flags);
rl                111 drivers/s390/scsi/zfcp_reqlist.h 	req = _zfcp_reqlist_find(rl, req_id);
rl                112 drivers/s390/scsi/zfcp_reqlist.h 	spin_unlock_irqrestore(&rl->lock, flags);
rl                130 drivers/s390/scsi/zfcp_reqlist.h zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
rl                135 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_irqsave(&rl->lock, flags);
rl                136 drivers/s390/scsi/zfcp_reqlist.h 	req = _zfcp_reqlist_find(rl, req_id);
rl                139 drivers/s390/scsi/zfcp_reqlist.h 	spin_unlock_irqrestore(&rl->lock, flags);
rl                154 drivers/s390/scsi/zfcp_reqlist.h static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
rl                162 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_irqsave(&rl->lock, flags);
rl                163 drivers/s390/scsi/zfcp_reqlist.h 	list_add_tail(&req->list, &rl->buckets[i]);
rl                164 drivers/s390/scsi/zfcp_reqlist.h 	spin_unlock_irqrestore(&rl->lock, flags);
rl                172 drivers/s390/scsi/zfcp_reqlist.h static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
rl                178 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_irqsave(&rl->lock, flags);
rl                180 drivers/s390/scsi/zfcp_reqlist.h 		list_splice_init(&rl->buckets[i], list);
rl                181 drivers/s390/scsi/zfcp_reqlist.h 	spin_unlock_irqrestore(&rl->lock, flags);
rl                198 drivers/s390/scsi/zfcp_reqlist.h zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
rl                205 drivers/s390/scsi/zfcp_reqlist.h 	spin_lock_irqsave(&rl->lock, flags);
rl                207 drivers/s390/scsi/zfcp_reqlist.h 		list_for_each_entry(req, &rl->buckets[i], list)
rl                209 drivers/s390/scsi/zfcp_reqlist.h 	spin_unlock_irqrestore(&rl->lock, flags);
rl                151 drivers/slimbus/messaging.c 		unsigned long ms = txn->rl + HZ;
rl                260 drivers/slimbus/messaging.c 		txn->rl += msg->num_bytes;
rl                266 drivers/slimbus/messaging.c 		txn->rl++;
rl                331 drivers/slimbus/qcom-ctrl.c 	unsigned long ms = txn->rl + HZ;
rl                337 drivers/slimbus/qcom-ctrl.c 	txn->rl--;
rl                356 drivers/slimbus/qcom-ctrl.c 		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
rl                360 drivers/slimbus/qcom-ctrl.c 		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
rl                376 drivers/slimbus/qcom-ctrl.c 	qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
rl                789 drivers/slimbus/qcom-ngd-ctrl.c 			txn->rl > SLIM_MSGQ_BUF_LEN) {
rl                794 drivers/slimbus/qcom-ngd-ctrl.c 	pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent);
rl                839 drivers/slimbus/qcom-ngd-ctrl.c 		txn->rl = txn->msg->num_bytes + 4;
rl                843 drivers/slimbus/qcom-ngd-ctrl.c 	txn->rl--;
rl                847 drivers/slimbus/qcom-ngd-ctrl.c 		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
rl                851 drivers/slimbus/qcom-ngd-ctrl.c 		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
rl                867 drivers/slimbus/qcom-ngd-ctrl.c 	ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
rl                969 drivers/slimbus/qcom-ngd-ctrl.c 	txn.rl = txn.msg->num_bytes + 4;
rl                981 drivers/slimbus/qcom-ngd-ctrl.c 	txn.rl = txn.msg->num_bytes + 4;
rl               1016 drivers/slimbus/qcom-ngd-ctrl.c 	txn.rl = 11;
rl               1171 drivers/slimbus/qcom-ngd-ctrl.c 	txn.rl = 8;
rl                 97 drivers/slimbus/sched.c 	txn.rl = 4;
rl                105 drivers/slimbus/sched.c 	txn.rl = 3;
rl                128 drivers/slimbus/slimbus.h 	u8			rl;
rl                140 drivers/slimbus/slimbus.h #define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \
rl                141 drivers/slimbus/slimbus.h 	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
rl                144 drivers/slimbus/slimbus.h #define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \
rl                145 drivers/slimbus/slimbus.h 	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
rl                148 drivers/slimbus/slimbus.h #define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \
rl                149 drivers/slimbus/slimbus.h 	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
rl                480 drivers/staging/olpc_dcon/olpc_dcon.c 	unsigned short rl;
rl                483 drivers/staging/olpc_dcon/olpc_dcon.c 	rc = kstrtou16(buf, 10, &rl);
rl                487 drivers/staging/olpc_dcon/olpc_dcon.c 	resumeline = rl;
rl                540 drivers/usb/isp1760/isp1760-hcd.c 	u32 rl = RL_COUNTER;
rl                582 drivers/usb/isp1760/isp1760-hcd.c 		rl = 0;
rl                593 drivers/usb/isp1760/isp1760-hcd.c 	ptd->dw2 |= TO_DW2_RL(rl);
rl                276 drivers/video/fbdev/metronomefb.c 		unsigned char rl;
rl                289 drivers/video/fbdev/metronomefb.c 		rl = mem[wfm_idx++];
rl                290 drivers/video/fbdev/metronomefb.c 		for (i = 0; i <= rl; i++)
rl                384 fs/binfmt_flat.c static void old_reloc(unsigned long rl)
rl                391 fs/binfmt_flat.c 	r.value = rl;
rl               5578 fs/dlm/lock.c  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
rl               5581 fs/dlm/lock.c  	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
rl               5582 fs/dlm/lock.c  	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
rl               5583 fs/dlm/lock.c  	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
rl               5584 fs/dlm/lock.c  	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
rl               5586 fs/dlm/lock.c  	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
rl               5587 fs/dlm/lock.c  	lkb->lkb_rqmode = rl->rl_rqmode;
rl               5588 fs/dlm/lock.c  	lkb->lkb_grmode = rl->rl_grmode;
rl               5591 fs/dlm/lock.c  	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
rl               5592 fs/dlm/lock.c  	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
rl               5602 fs/dlm/lock.c  		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
rl               5609 fs/dlm/lock.c  	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
rl               5611 fs/dlm/lock.c  		rl->rl_status = DLM_LKSTS_CONVERT;
rl               5628 fs/dlm/lock.c  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
rl               5635 fs/dlm/lock.c  	if (rl->rl_parent_lkid) {
rl               5640 fs/dlm/lock.c  	remid = le32_to_cpu(rl->rl_lkid);
rl               5650 fs/dlm/lock.c  	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
rl               5681 fs/dlm/lock.c  	add_lkb(r, lkb, rl->rl_status);
rl               5691 fs/dlm/lock.c  	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
rl               5702 fs/dlm/lock.c  	rl->rl_result = cpu_to_le32(error);
rl               5709 fs/dlm/lock.c  	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
rl               5715 fs/dlm/lock.c  	lkid = le32_to_cpu(rl->rl_lkid);
rl               5716 fs/dlm/lock.c  	remid = le32_to_cpu(rl->rl_remid);
rl               5717 fs/dlm/lock.c  	result = le32_to_cpu(rl->rl_result);
rl                374 fs/dlm/rcom.c  			   struct rcom_lock *rl)
rl                376 fs/dlm/rcom.c  	memset(rl, 0, sizeof(*rl));
rl                378 fs/dlm/rcom.c  	rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid);
rl                379 fs/dlm/rcom.c  	rl->rl_lkid = cpu_to_le32(lkb->lkb_id);
rl                380 fs/dlm/rcom.c  	rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags);
rl                381 fs/dlm/rcom.c  	rl->rl_flags = cpu_to_le32(lkb->lkb_flags);
rl                382 fs/dlm/rcom.c  	rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
rl                383 fs/dlm/rcom.c  	rl->rl_rqmode = lkb->lkb_rqmode;
rl                384 fs/dlm/rcom.c  	rl->rl_grmode = lkb->lkb_grmode;
rl                385 fs/dlm/rcom.c  	rl->rl_status = lkb->lkb_status;
rl                386 fs/dlm/rcom.c  	rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type);
rl                389 fs/dlm/rcom.c  		rl->rl_asts |= DLM_CB_BAST;
rl                391 fs/dlm/rcom.c  		rl->rl_asts |= DLM_CB_CAST;
rl                393 fs/dlm/rcom.c  	rl->rl_namelen = cpu_to_le16(r->res_length);
rl                394 fs/dlm/rcom.c  	memcpy(rl->rl_name, r->res_name, r->res_length);
rl                400 fs/dlm/rcom.c  		memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
rl                408 fs/dlm/rcom.c  	struct rcom_lock *rl;
rl                418 fs/dlm/rcom.c  	rl = (struct rcom_lock *) rc->rc_buf;
rl                419 fs/dlm/rcom.c  	pack_rcom_lock(r, lkb, rl);
rl                176 fs/ntfs/aops.c 	runlist_element *rl;
rl                189 fs/ntfs/aops.c 	BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
rl                228 fs/ntfs/aops.c 	rl = NULL;
rl                249 fs/ntfs/aops.c 			if (!rl) {
rl                252 fs/ntfs/aops.c 				rl = ni->runlist.rl;
rl                254 fs/ntfs/aops.c 			if (likely(rl != NULL)) {
rl                256 fs/ntfs/aops.c 				while (rl->length && rl[1].vcn <= vcn)
rl                257 fs/ntfs/aops.c 					rl++;
rl                258 fs/ntfs/aops.c 				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl                289 fs/ntfs/aops.c 				rl = NULL;
rl                290 fs/ntfs/aops.c 			} else if (!rl)
rl                330 fs/ntfs/aops.c 	if (rl)
rl                543 fs/ntfs/aops.c 	runlist_element *rl;
rl                613 fs/ntfs/aops.c 	rl = NULL;
rl                705 fs/ntfs/aops.c 		if (!rl) {
rl                708 fs/ntfs/aops.c 			rl = ni->runlist.rl;
rl                710 fs/ntfs/aops.c 		if (likely(rl != NULL)) {
rl                712 fs/ntfs/aops.c 			while (rl->length && rl[1].vcn <= vcn)
rl                713 fs/ntfs/aops.c 				rl++;
rl                714 fs/ntfs/aops.c 			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl                767 fs/ntfs/aops.c 			rl = NULL;
rl                768 fs/ntfs/aops.c 		} else if (!rl)
rl                798 fs/ntfs/aops.c 	if (rl)
rl                914 fs/ntfs/aops.c 	runlist_element *rl;
rl                961 fs/ntfs/aops.c 	rl = NULL;
rl               1014 fs/ntfs/aops.c 			if (!rl) {
rl               1017 fs/ntfs/aops.c 				rl = ni->runlist.rl;
rl               1019 fs/ntfs/aops.c 			if (likely(rl != NULL)) {
rl               1021 fs/ntfs/aops.c 				while (rl->length && rl[1].vcn <= vcn)
rl               1022 fs/ntfs/aops.c 					rl++;
rl               1023 fs/ntfs/aops.c 				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl               1055 fs/ntfs/aops.c 					if (!rl)
rl               1099 fs/ntfs/aops.c 	if (unlikely(rl))
rl                 77 fs/ntfs/attrib.c 	runlist_element *rl;
rl                173 fs/ntfs/attrib.c 	rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
rl                174 fs/ntfs/attrib.c 	if (IS_ERR(rl))
rl                175 fs/ntfs/attrib.c 		err = PTR_ERR(rl);
rl                177 fs/ntfs/attrib.c 		ni->runlist.rl = rl;
rl                290 fs/ntfs/attrib.c 	if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
rl                340 fs/ntfs/attrib.c 	if (!ni->runlist.rl) {
rl                350 fs/ntfs/attrib.c 	lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
rl                364 fs/ntfs/attrib.c 			if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
rl                454 fs/ntfs/attrib.c 	runlist_element *rl;
rl                463 fs/ntfs/attrib.c 	if (!ni->runlist.rl) {
rl                472 fs/ntfs/attrib.c 	rl = ni->runlist.rl;
rl                473 fs/ntfs/attrib.c 	if (likely(rl && vcn >= rl[0].vcn)) {
rl                474 fs/ntfs/attrib.c 		while (likely(rl->length)) {
rl                475 fs/ntfs/attrib.c 			if (unlikely(vcn < rl[1].vcn)) {
rl                476 fs/ntfs/attrib.c 				if (likely(rl->lcn >= LCN_HOLE)) {
rl                478 fs/ntfs/attrib.c 					return rl;
rl                482 fs/ntfs/attrib.c 			rl++;
rl                484 fs/ntfs/attrib.c 		if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
rl                485 fs/ntfs/attrib.c 			if (likely(rl->lcn == LCN_ENOENT))
rl                702 fs/ntfs/attrib.c 	runlist_element *rl;
rl                722 fs/ntfs/attrib.c 	rl = runlist->rl;
rl                723 fs/ntfs/attrib.c 	if (!rl) {
rl                729 fs/ntfs/attrib.c 	while (rl->length) {
rl                730 fs/ntfs/attrib.c 		lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
rl                732 fs/ntfs/attrib.c 				(unsigned long long)rl->vcn,
rl                742 fs/ntfs/attrib.c 		max_block = block + (rl->length << vol->cluster_size_bits >>
rl                759 fs/ntfs/attrib.c 		rl++;
rl               1531 fs/ntfs/attrib.c 	runlist_element *rl;
rl               1571 fs/ntfs/attrib.c 		rl = ntfs_cluster_alloc(vol, 0, new_size >>
rl               1573 fs/ntfs/attrib.c 		if (IS_ERR(rl)) {
rl               1574 fs/ntfs/attrib.c 			err = PTR_ERR(rl);
rl               1582 fs/ntfs/attrib.c 		rl = NULL;
rl               1586 fs/ntfs/attrib.c 	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
rl               1691 fs/ntfs/attrib.c 			arec_size - mp_ofs, rl, 0, -1, NULL);
rl               1698 fs/ntfs/attrib.c 	ni->runlist.rl = rl;
rl               1810 fs/ntfs/attrib.c 	ni->runlist.rl = NULL;
rl               1813 fs/ntfs/attrib.c 	if (rl) {
rl               1814 fs/ntfs/attrib.c 		if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
rl               1821 fs/ntfs/attrib.c 		ntfs_free(rl);
rl               1901 fs/ntfs/attrib.c 	runlist_element *rl, *rl2;
rl               2158 fs/ntfs/attrib.c 	rl = ni->runlist.rl;
rl               2159 fs/ntfs/attrib.c 	if (likely(rl)) {
rl               2161 fs/ntfs/attrib.c 		while (rl->length)
rl               2162 fs/ntfs/attrib.c 			rl++;
rl               2165 fs/ntfs/attrib.c 	if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
rl               2166 fs/ntfs/attrib.c 			(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
rl               2167 fs/ntfs/attrib.c 			(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
rl               2168 fs/ntfs/attrib.c 		if (!rl && !allocated_size)
rl               2170 fs/ntfs/attrib.c 		rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
rl               2171 fs/ntfs/attrib.c 		if (IS_ERR(rl)) {
rl               2172 fs/ntfs/attrib.c 			err = PTR_ERR(rl);
rl               2186 fs/ntfs/attrib.c 		ni->runlist.rl = rl;
rl               2188 fs/ntfs/attrib.c 		while (rl->length)
rl               2189 fs/ntfs/attrib.c 			rl++;
rl               2199 fs/ntfs/attrib.c 	while (rl->lcn < 0 && rl > ni->runlist.rl)
rl               2200 fs/ntfs/attrib.c 		rl--;
rl               2207 fs/ntfs/attrib.c 			vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl               2208 fs/ntfs/attrib.c 			rl->lcn + rl->length : -1, DATA_ZONE, true);
rl               2221 fs/ntfs/attrib.c 	rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
rl               2222 fs/ntfs/attrib.c 	if (IS_ERR(rl)) {
rl               2223 fs/ntfs/attrib.c 		err = PTR_ERR(rl);
rl               2242 fs/ntfs/attrib.c 	ni->runlist.rl = rl;
rl               2247 fs/ntfs/attrib.c 	rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
rl                 64 fs/ntfs/attrib.h extern int load_attribute_list(ntfs_volume *vol, runlist *rl, u8 *al_start,
rl                470 fs/ntfs/compress.c 	runlist_element *rl;
rl                595 fs/ntfs/compress.c 	rl = NULL;
rl                600 fs/ntfs/compress.c 		if (!rl) {
rl                603 fs/ntfs/compress.c 			rl = ni->runlist.rl;
rl                605 fs/ntfs/compress.c 		if (likely(rl != NULL)) {
rl                607 fs/ntfs/compress.c 			while (rl->length && rl[1].vcn <= vcn)
rl                608 fs/ntfs/compress.c 				rl++;
rl                609 fs/ntfs/compress.c 			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl                646 fs/ntfs/compress.c 	if (rl)
rl                120 fs/ntfs/debug.c void ntfs_debug_dump_runlist(const runlist_element *rl)
rl                129 fs/ntfs/debug.c 	if (!rl) {
rl                135 fs/ntfs/debug.c 		LCN lcn = (rl + i)->lcn;
rl                143 fs/ntfs/debug.c 					(long long)(rl + i)->vcn, lcn_str[index],
rl                144 fs/ntfs/debug.c 					(long long)(rl + i)->length,
rl                145 fs/ntfs/debug.c 					(rl + i)->length ? "" :
rl                149 fs/ntfs/debug.c 					(long long)(rl + i)->vcn,
rl                150 fs/ntfs/debug.c 					(long long)(rl + i)->lcn,
rl                151 fs/ntfs/debug.c 					(long long)(rl + i)->length,
rl                152 fs/ntfs/debug.c 					(rl + i)->length ? "" :
rl                154 fs/ntfs/debug.c 		if (!(rl + i)->length)
rl                 33 fs/ntfs/debug.h extern void ntfs_debug_dump_runlist(const runlist_element *rl);
rl                 43 fs/ntfs/debug.h #define ntfs_debug_dump_runlist(rl)	do {} while (0)
rl                581 fs/ntfs/file.c 	runlist_element *rl, *rl2;
rl                626 fs/ntfs/file.c 	rl = NULL;
rl                829 fs/ntfs/file.c 		if (!rl) {
rl                832 fs/ntfs/file.c 			rl = ni->runlist.rl;
rl                834 fs/ntfs/file.c 		if (likely(rl != NULL)) {
rl                836 fs/ntfs/file.c 			while (rl->length && rl[1].vcn <= bh_cpos)
rl                837 fs/ntfs/file.c 				rl++;
rl                838 fs/ntfs/file.c 			lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
rl                846 fs/ntfs/file.c 				vcn_len = rl[1].vcn - vcn;
rl                863 fs/ntfs/file.c 					rl = NULL;
rl                964 fs/ntfs/file.c 		BUG_ON(!rl);
rl                972 fs/ntfs/file.c 		BUG_ON(rl->lcn != LCN_HOLE);
rl                974 fs/ntfs/file.c 		rl2 = rl;
rl                975 fs/ntfs/file.c 		while (--rl2 >= ni->runlist.rl) {
rl                990 fs/ntfs/file.c 		rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
rl                991 fs/ntfs/file.c 		if (IS_ERR(rl)) {
rl                992 fs/ntfs/file.c 			err = PTR_ERR(rl);
rl               1005 fs/ntfs/file.c 		ni->runlist.rl = rl;
rl               1043 fs/ntfs/file.c 		rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
rl               1171 fs/ntfs/file.c 			rl = NULL;
rl               1183 fs/ntfs/file.c 		} else if (unlikely(rl))
rl               1185 fs/ntfs/file.c 		rl = NULL;
rl               1303 fs/ntfs/file.c 					mapping_pairs_offset), ni->runlist.rl,
rl               1323 fs/ntfs/file.c 	else if (rl)
rl                714 fs/ntfs/inode.c 			ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
rl                716 fs/ntfs/inode.c 			if (IS_ERR(ni->attr_list_rl.rl)) {
rl                717 fs/ntfs/inode.c 				err = PTR_ERR(ni->attr_list_rl.rl);
rl                718 fs/ntfs/inode.c 				ni->attr_list_rl.rl = NULL;
rl               1886 fs/ntfs/inode.c 			ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
rl               1888 fs/ntfs/inode.c 			if (IS_ERR(ni->attr_list_rl.rl)) {
rl               1889 fs/ntfs/inode.c 				err = PTR_ERR(ni->attr_list_rl.rl);
rl               1890 fs/ntfs/inode.c 				ni->attr_list_rl.rl = NULL;
rl               2017 fs/ntfs/inode.c 		nrl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
rl               2024 fs/ntfs/inode.c 		ni->runlist.rl = nrl;
rl               2166 fs/ntfs/inode.c 	if (ni->runlist.rl) {
rl               2167 fs/ntfs/inode.c 		ntfs_free(ni->runlist.rl);
rl               2168 fs/ntfs/inode.c 		ni->runlist.rl = NULL;
rl               2178 fs/ntfs/inode.c 	if (ni->attr_list_rl.rl) {
rl               2179 fs/ntfs/inode.c 		ntfs_free(ni->attr_list_rl.rl);
rl               2180 fs/ntfs/inode.c 		ni->attr_list_rl.rl = NULL;
rl               2702 fs/ntfs/inode.c 	mp_size = ntfs_get_size_for_mapping_pairs(vol, ni->runlist.rl, 0, -1);
rl               2726 fs/ntfs/inode.c 			mp_size, ni->runlist.rl, 0, -1, NULL);
rl                 37 fs/ntfs/lcnalloc.c 		const runlist_element *rl)
rl                 43 fs/ntfs/lcnalloc.c 	if (!rl)
rl                 45 fs/ntfs/lcnalloc.c 	for (; rl->length; rl++) {
rl                 48 fs/ntfs/lcnalloc.c 		if (rl->lcn < 0)
rl                 50 fs/ntfs/lcnalloc.c 		err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length);
rl                142 fs/ntfs/lcnalloc.c 	runlist_element *rl = NULL;
rl                320 fs/ntfs/lcnalloc.c 			if ((rlpos + 2) * sizeof(*rl) > rlsize) {
rl                324 fs/ntfs/lcnalloc.c 				if (!rl)
rl                336 fs/ntfs/lcnalloc.c 				memcpy(rl2, rl, rlsize);
rl                337 fs/ntfs/lcnalloc.c 				ntfs_free(rl);
rl                338 fs/ntfs/lcnalloc.c 				rl = rl2;
rl                367 fs/ntfs/lcnalloc.c 						rl[rlpos - 1].lcn,
rl                369 fs/ntfs/lcnalloc.c 						rl[rlpos - 1].length);
rl                370 fs/ntfs/lcnalloc.c 				rl[rlpos - 1].length = ++prev_run_len;
rl                374 fs/ntfs/lcnalloc.c 						rl[rlpos - 1].lcn,
rl                376 fs/ntfs/lcnalloc.c 						rl[rlpos - 1].length,
rl                385 fs/ntfs/lcnalloc.c 							rl[rlpos - 1].lcn,
rl                387 fs/ntfs/lcnalloc.c 							rl[rlpos - 1].length);
rl                388 fs/ntfs/lcnalloc.c 					rl[rlpos].vcn = rl[rlpos - 1].vcn +
rl                393 fs/ntfs/lcnalloc.c 					rl[rlpos].vcn = start_vcn;
rl                395 fs/ntfs/lcnalloc.c 				rl[rlpos].lcn = prev_lcn = lcn + bmp_pos;
rl                396 fs/ntfs/lcnalloc.c 				rl[rlpos].length = prev_run_len = 1;
rl                551 fs/ntfs/lcnalloc.c 					tc = rl[rlpos - 1].lcn +
rl                552 fs/ntfs/lcnalloc.c 							rl[rlpos - 1].length;
rl                594 fs/ntfs/lcnalloc.c 					tc = rl[rlpos - 1].lcn +
rl                595 fs/ntfs/lcnalloc.c 							rl[rlpos - 1].length;
rl                635 fs/ntfs/lcnalloc.c 					tc = rl[rlpos - 1].lcn +
rl                636 fs/ntfs/lcnalloc.c 							rl[rlpos - 1].length;
rl                725 fs/ntfs/lcnalloc.c 	if (likely(rl)) {
rl                726 fs/ntfs/lcnalloc.c 		rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length;
rl                727 fs/ntfs/lcnalloc.c 		rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED;
rl                728 fs/ntfs/lcnalloc.c 		rl[rlpos].length = 0;
rl                742 fs/ntfs/lcnalloc.c 		return rl;
rl                746 fs/ntfs/lcnalloc.c 	if (rl) {
rl                754 fs/ntfs/lcnalloc.c 					(unsigned long long)rl[0].lcn,
rl                758 fs/ntfs/lcnalloc.c 		err2 = ntfs_cluster_free_from_rl_nolock(vol, rl);
rl                766 fs/ntfs/lcnalloc.c 		ntfs_free(rl);
rl                841 fs/ntfs/lcnalloc.c 	runlist_element *rl;
rl                866 fs/ntfs/lcnalloc.c 	rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx);
rl                867 fs/ntfs/lcnalloc.c 	if (IS_ERR(rl)) {
rl                871 fs/ntfs/lcnalloc.c 					PTR_ERR(rl));
rl                872 fs/ntfs/lcnalloc.c 		err = PTR_ERR(rl);
rl                875 fs/ntfs/lcnalloc.c 	if (unlikely(rl->lcn < LCN_HOLE)) {
rl                883 fs/ntfs/lcnalloc.c 	delta = start_vcn - rl->vcn;
rl                886 fs/ntfs/lcnalloc.c 	to_free = rl->length - delta;
rl                890 fs/ntfs/lcnalloc.c 	if (likely(rl->lcn >= 0)) {
rl                892 fs/ntfs/lcnalloc.c 		err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta,
rl                904 fs/ntfs/lcnalloc.c 	++rl;
rl                914 fs/ntfs/lcnalloc.c 	for (; rl->length && count != 0; ++rl) {
rl                915 fs/ntfs/lcnalloc.c 		if (unlikely(rl->lcn < LCN_HOLE)) {
rl                919 fs/ntfs/lcnalloc.c 			vcn = rl->vcn;
rl                920 fs/ntfs/lcnalloc.c 			rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx);
rl                921 fs/ntfs/lcnalloc.c 			if (IS_ERR(rl)) {
rl                922 fs/ntfs/lcnalloc.c 				err = PTR_ERR(rl);
rl                931 fs/ntfs/lcnalloc.c 			if (unlikely(rl->lcn < LCN_HOLE)) {
rl                937 fs/ntfs/lcnalloc.c 							rl->lcn);
rl                943 fs/ntfs/lcnalloc.c 		to_free = rl->length;
rl                947 fs/ntfs/lcnalloc.c 		if (likely(rl->lcn >= 0)) {
rl                949 fs/ntfs/lcnalloc.c 			err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn,
rl                100 fs/ntfs/lcnalloc.h 		const runlist_element *rl);
rl                119 fs/ntfs/lcnalloc.h 		const runlist_element *rl)
rl                124 fs/ntfs/lcnalloc.h 	ret = ntfs_cluster_free_from_rl_nolock(vol, rl);
rl                719 fs/ntfs/logfile.c 	runlist_element *rl;
rl                746 fs/ntfs/logfile.c 	rl = log_ni->runlist.rl;
rl                747 fs/ntfs/logfile.c 	if (unlikely(!rl || vcn < rl->vcn || !rl->length)) {
rl                755 fs/ntfs/logfile.c 		rl = log_ni->runlist.rl;
rl                756 fs/ntfs/logfile.c 		BUG_ON(!rl || vcn < rl->vcn || !rl->length);
rl                759 fs/ntfs/logfile.c 	while (rl->length && vcn >= rl[1].vcn)
rl                760 fs/ntfs/logfile.c 		rl++;
rl                770 fs/ntfs/logfile.c 		lcn = rl->lcn;
rl                772 fs/ntfs/logfile.c 			vcn = rl->vcn;
rl                776 fs/ntfs/logfile.c 		if (unlikely(!rl->length || lcn < LCN_HOLE))
rl                782 fs/ntfs/logfile.c 		len = rl->length;
rl                783 fs/ntfs/logfile.c 		if (rl[1].vcn > end_vcn)
rl                784 fs/ntfs/logfile.c 			len = end_vcn - rl->vcn;
rl                820 fs/ntfs/logfile.c 	} while ((++rl)->vcn < end_vcn);
rl                463 fs/ntfs/mft.c  	runlist_element *rl;
rl                511 fs/ntfs/mft.c  	rl = NULL;
rl                535 fs/ntfs/mft.c  			if (!rl) {
rl                538 fs/ntfs/mft.c  				rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;
rl                543 fs/ntfs/mft.c  				BUG_ON(!rl);
rl                546 fs/ntfs/mft.c  			while (rl->length && rl[1].vcn <= vcn)
rl                547 fs/ntfs/mft.c  				rl++;
rl                548 fs/ntfs/mft.c  			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl                573 fs/ntfs/mft.c  	if (unlikely(rl))
rl                669 fs/ntfs/mft.c  	runlist_element *rl;
rl                691 fs/ntfs/mft.c  	rl = NULL;
rl                728 fs/ntfs/mft.c  			if (!rl) {
rl                730 fs/ntfs/mft.c  				rl = NTFS_I(vol->mft_ino)->runlist.rl;
rl                731 fs/ntfs/mft.c  				BUG_ON(!rl);
rl                734 fs/ntfs/mft.c  			while (rl->length && rl[1].vcn <= vcn)
rl                735 fs/ntfs/mft.c  				rl++;
rl                736 fs/ntfs/mft.c  			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
rl                760 fs/ntfs/mft.c  	if (unlikely(rl))
rl               1280 fs/ntfs/mft.c  	runlist_element *rl, *rl2 = NULL;
rl               1304 fs/ntfs/mft.c  	rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
rl               1306 fs/ntfs/mft.c  	if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
rl               1310 fs/ntfs/mft.c  		if (!IS_ERR(rl))
rl               1313 fs/ntfs/mft.c  			ret = PTR_ERR(rl);
rl               1316 fs/ntfs/mft.c  	lcn = rl->lcn + rl->length;
rl               1343 fs/ntfs/mft.c  		rl->length++;
rl               1344 fs/ntfs/mft.c  		rl[1].vcn++;
rl               1351 fs/ntfs/mft.c  		rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE,
rl               1359 fs/ntfs/mft.c  		rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2);
rl               1360 fs/ntfs/mft.c  		if (IS_ERR(rl)) {
rl               1370 fs/ntfs/mft.c  			return PTR_ERR(rl);
rl               1372 fs/ntfs/mft.c  		mftbmp_ni->runlist.rl = rl;
rl               1376 fs/ntfs/mft.c  		for (; rl[1].length; rl++)
rl               1396 fs/ntfs/mft.c  			mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
rl               1408 fs/ntfs/mft.c  	for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) {
rl               1456 fs/ntfs/mft.c  	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
rl               1495 fs/ntfs/mft.c  			mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
rl               1513 fs/ntfs/mft.c  	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2);
rl               1517 fs/ntfs/mft.c  		rl->length--;
rl               1518 fs/ntfs/mft.c  		rl[1].vcn--;
rl               1520 fs/ntfs/mft.c  		lcn = rl->lcn;
rl               1522 fs/ntfs/mft.c  		rl->lcn = rl[1].lcn;
rl               1523 fs/ntfs/mft.c  		rl->length = 0;
rl               1716 fs/ntfs/mft.c  	runlist_element *rl, *rl2;
rl               1735 fs/ntfs/mft.c  	rl = ntfs_attr_find_vcn_nolock(mft_ni,
rl               1737 fs/ntfs/mft.c  	if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
rl               1741 fs/ntfs/mft.c  		if (!IS_ERR(rl))
rl               1744 fs/ntfs/mft.c  			ret = PTR_ERR(rl);
rl               1747 fs/ntfs/mft.c  	lcn = rl->lcn + rl->length;
rl               1775 fs/ntfs/mft.c  	old_last_vcn = rl[1].vcn;
rl               1797 fs/ntfs/mft.c  	rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2);
rl               1798 fs/ntfs/mft.c  	if (IS_ERR(rl)) {
rl               1808 fs/ntfs/mft.c  		return PTR_ERR(rl);
rl               1810 fs/ntfs/mft.c  	mft_ni->runlist.rl = rl;
rl               1813 fs/ntfs/mft.c  	for (; rl[1].length; rl++)
rl               1829 fs/ntfs/mft.c  			CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx);
rl               1840 fs/ntfs/mft.c  	for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) {
rl               1893 fs/ntfs/mft.c  	a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
rl               1934 fs/ntfs/mft.c  			CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) {
rl                 60 fs/ntfs/runlist.c static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
rl                 65 fs/ntfs/runlist.c 	old_size = PAGE_ALIGN(old_size * sizeof(*rl));
rl                 66 fs/ntfs/runlist.c 	new_size = PAGE_ALIGN(new_size * sizeof(*rl));
rl                 68 fs/ntfs/runlist.c 		return rl;
rl                 74 fs/ntfs/runlist.c 	if (likely(rl != NULL)) {
rl                 77 fs/ntfs/runlist.c 		memcpy(new_rl, rl, old_size);
rl                 78 fs/ntfs/runlist.c 		ntfs_free(rl);
rl                106 fs/ntfs/runlist.c static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl,
rl                111 fs/ntfs/runlist.c 	old_size = PAGE_ALIGN(old_size * sizeof(*rl));
rl                112 fs/ntfs/runlist.c 	new_size = PAGE_ALIGN(new_size * sizeof(*rl));
rl                114 fs/ntfs/runlist.c 		return rl;
rl                119 fs/ntfs/runlist.c 	if (likely(rl != NULL)) {
rl                122 fs/ntfs/runlist.c 		memcpy(new_rl, rl, old_size);
rl                123 fs/ntfs/runlist.c 		ntfs_free(rl);
rl                741 fs/ntfs/runlist.c 	runlist_element *rl;	/* The output runlist. */
rl                774 fs/ntfs/runlist.c 	rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
rl                775 fs/ntfs/runlist.c 	if (unlikely(!rl))
rl                779 fs/ntfs/runlist.c 		rl->vcn = 0;
rl                780 fs/ntfs/runlist.c 		rl->lcn = LCN_RL_NOT_MAPPED;
rl                781 fs/ntfs/runlist.c 		rl->length = vcn;
rl                795 fs/ntfs/runlist.c 				ntfs_free(rl);
rl                798 fs/ntfs/runlist.c 			memcpy(rl2, rl, rlsize);
rl                799 fs/ntfs/runlist.c 			ntfs_free(rl);
rl                800 fs/ntfs/runlist.c 			rl = rl2;
rl                804 fs/ntfs/runlist.c 		rl[rlpos].vcn = vcn;
rl                836 fs/ntfs/runlist.c 		rl[rlpos].length = deltaxcn;
rl                845 fs/ntfs/runlist.c 			rl[rlpos].lcn = LCN_HOLE;
rl                878 fs/ntfs/runlist.c 			rl[rlpos].lcn = lcn;
rl                924 fs/ntfs/runlist.c 				rl[rlpos].vcn = vcn;
rl                925 fs/ntfs/runlist.c 				vcn += rl[rlpos].length = max_cluster -
rl                927 fs/ntfs/runlist.c 				rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
rl                939 fs/ntfs/runlist.c 		rl[rlpos].lcn = LCN_ENOENT;
rl                941 fs/ntfs/runlist.c 		rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
rl                944 fs/ntfs/runlist.c 	rl[rlpos].vcn = vcn;
rl                945 fs/ntfs/runlist.c 	rl[rlpos].length = (s64)0;
rl                949 fs/ntfs/runlist.c 		ntfs_debug_dump_runlist(rl);
rl                950 fs/ntfs/runlist.c 		return rl;
rl                953 fs/ntfs/runlist.c 	old_rl = ntfs_runlists_merge(old_rl, rl);
rl                956 fs/ntfs/runlist.c 	ntfs_free(rl);
rl                962 fs/ntfs/runlist.c 	ntfs_free(rl);
rl                990 fs/ntfs/runlist.c LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
rl               1000 fs/ntfs/runlist.c 	if (unlikely(!rl))
rl               1004 fs/ntfs/runlist.c 	if (unlikely(vcn < rl[0].vcn))
rl               1007 fs/ntfs/runlist.c 	for (i = 0; likely(rl[i].length); i++) {
rl               1008 fs/ntfs/runlist.c 		if (unlikely(vcn < rl[i+1].vcn)) {
rl               1009 fs/ntfs/runlist.c 			if (likely(rl[i].lcn >= (LCN)0))
rl               1010 fs/ntfs/runlist.c 				return rl[i].lcn + (vcn - rl[i].vcn);
rl               1011 fs/ntfs/runlist.c 			return rl[i].lcn;
rl               1018 fs/ntfs/runlist.c 	if (likely(rl[i].lcn < (LCN)0))
rl               1019 fs/ntfs/runlist.c 		return rl[i].lcn;
rl               1039 fs/ntfs/runlist.c runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn)
rl               1042 fs/ntfs/runlist.c 	if (unlikely(!rl || vcn < rl[0].vcn))
rl               1044 fs/ntfs/runlist.c 	while (likely(rl->length)) {
rl               1045 fs/ntfs/runlist.c 		if (unlikely(vcn < rl[1].vcn)) {
rl               1046 fs/ntfs/runlist.c 			if (likely(rl->lcn >= LCN_HOLE))
rl               1047 fs/ntfs/runlist.c 				return rl;
rl               1050 fs/ntfs/runlist.c 		rl++;
rl               1052 fs/ntfs/runlist.c 	if (likely(rl->lcn == LCN_ENOENT))
rl               1053 fs/ntfs/runlist.c 		return rl;
rl               1118 fs/ntfs/runlist.c 		const runlist_element *rl, const VCN first_vcn,
rl               1128 fs/ntfs/runlist.c 	if (!rl) {
rl               1134 fs/ntfs/runlist.c 	while (rl->length && first_vcn >= rl[1].vcn)
rl               1135 fs/ntfs/runlist.c 		rl++;
rl               1136 fs/ntfs/runlist.c 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
rl               1137 fs/ntfs/runlist.c 			first_vcn < rl->vcn))
rl               1143 fs/ntfs/runlist.c 	if (first_vcn > rl->vcn) {
rl               1144 fs/ntfs/runlist.c 		s64 delta, length = rl->length;
rl               1147 fs/ntfs/runlist.c 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
rl               1153 fs/ntfs/runlist.c 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
rl               1155 fs/ntfs/runlist.c 			if (unlikely(rl[1].vcn > s1))
rl               1156 fs/ntfs/runlist.c 				length = s1 - rl->vcn;
rl               1159 fs/ntfs/runlist.c 		delta = first_vcn - rl->vcn;
rl               1169 fs/ntfs/runlist.c 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
rl               1170 fs/ntfs/runlist.c 			prev_lcn = rl->lcn;
rl               1171 fs/ntfs/runlist.c 			if (likely(rl->lcn >= 0))
rl               1177 fs/ntfs/runlist.c 		rl++;
rl               1180 fs/ntfs/runlist.c 	for (; rl->length && !the_end; rl++) {
rl               1181 fs/ntfs/runlist.c 		s64 length = rl->length;
rl               1183 fs/ntfs/runlist.c 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
rl               1189 fs/ntfs/runlist.c 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
rl               1191 fs/ntfs/runlist.c 			if (unlikely(rl[1].vcn > s1))
rl               1192 fs/ntfs/runlist.c 				length = s1 - rl->vcn;
rl               1204 fs/ntfs/runlist.c 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
rl               1206 fs/ntfs/runlist.c 			rls += ntfs_get_nr_significant_bytes(rl->lcn -
rl               1208 fs/ntfs/runlist.c 			prev_lcn = rl->lcn;
rl               1213 fs/ntfs/runlist.c 	if (rl->lcn == LCN_RL_NOT_MAPPED)
rl               1310 fs/ntfs/runlist.c 		const int dst_len, const runlist_element *rl,
rl               1323 fs/ntfs/runlist.c 	if (!rl) {
rl               1333 fs/ntfs/runlist.c 	while (rl->length && first_vcn >= rl[1].vcn)
rl               1334 fs/ntfs/runlist.c 		rl++;
rl               1335 fs/ntfs/runlist.c 	if (unlikely((!rl->length && first_vcn > rl->vcn) ||
rl               1336 fs/ntfs/runlist.c 			first_vcn < rl->vcn))
rl               1345 fs/ntfs/runlist.c 	if (first_vcn > rl->vcn) {
rl               1346 fs/ntfs/runlist.c 		s64 delta, length = rl->length;
rl               1349 fs/ntfs/runlist.c 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
rl               1355 fs/ntfs/runlist.c 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
rl               1357 fs/ntfs/runlist.c 			if (unlikely(rl[1].vcn > s1))
rl               1358 fs/ntfs/runlist.c 				length = s1 - rl->vcn;
rl               1361 fs/ntfs/runlist.c 		delta = first_vcn - rl->vcn;
rl               1376 fs/ntfs/runlist.c 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
rl               1377 fs/ntfs/runlist.c 			prev_lcn = rl->lcn;
rl               1378 fs/ntfs/runlist.c 			if (likely(rl->lcn >= 0))
rl               1395 fs/ntfs/runlist.c 		rl++;
rl               1398 fs/ntfs/runlist.c 	for (; rl->length && !the_end; rl++) {
rl               1399 fs/ntfs/runlist.c 		s64 length = rl->length;
rl               1401 fs/ntfs/runlist.c 		if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
rl               1407 fs/ntfs/runlist.c 		if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
rl               1409 fs/ntfs/runlist.c 			if (unlikely(rl[1].vcn > s1))
rl               1410 fs/ntfs/runlist.c 				length = s1 - rl->vcn;
rl               1427 fs/ntfs/runlist.c 		if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
rl               1430 fs/ntfs/runlist.c 					len_len, dst_max, rl->lcn - prev_lcn);
rl               1433 fs/ntfs/runlist.c 			prev_lcn = rl->lcn;
rl               1449 fs/ntfs/runlist.c 		*stop_vcn = rl->vcn;
rl               1454 fs/ntfs/runlist.c 	if (rl->lcn == LCN_RL_NOT_MAPPED)
rl               1488 fs/ntfs/runlist.c 	runlist_element *rl;
rl               1494 fs/ntfs/runlist.c 	rl = runlist->rl;
rl               1497 fs/ntfs/runlist.c 		runlist->rl = NULL;
rl               1498 fs/ntfs/runlist.c 		if (rl)
rl               1499 fs/ntfs/runlist.c 			ntfs_free(rl);
rl               1502 fs/ntfs/runlist.c 	if (unlikely(!rl)) {
rl               1507 fs/ntfs/runlist.c 		rl = ntfs_malloc_nofs(PAGE_SIZE);
rl               1508 fs/ntfs/runlist.c 		if (unlikely(!rl)) {
rl               1513 fs/ntfs/runlist.c 		runlist->rl = rl;
rl               1514 fs/ntfs/runlist.c 		rl[1].length = rl->vcn = 0;
rl               1515 fs/ntfs/runlist.c 		rl->lcn = LCN_HOLE;
rl               1516 fs/ntfs/runlist.c 		rl[1].vcn = rl->length = new_length;
rl               1517 fs/ntfs/runlist.c 		rl[1].lcn = LCN_ENOENT;
rl               1520 fs/ntfs/runlist.c 	BUG_ON(new_length < rl->vcn);
rl               1522 fs/ntfs/runlist.c 	while (likely(rl->length && new_length >= rl[1].vcn))
rl               1523 fs/ntfs/runlist.c 		rl++;
rl               1528 fs/ntfs/runlist.c 	if (rl->length) {
rl               1534 fs/ntfs/runlist.c 		trl = rl + 1;
rl               1537 fs/ntfs/runlist.c 		old_size = trl - runlist->rl + 1;
rl               1539 fs/ntfs/runlist.c 		rl->length = new_length - rl->vcn;
rl               1545 fs/ntfs/runlist.c 		if (rl->length) {
rl               1546 fs/ntfs/runlist.c 			rl++;
rl               1547 fs/ntfs/runlist.c 			if (!rl->length)
rl               1549 fs/ntfs/runlist.c 			rl->vcn = new_length;
rl               1550 fs/ntfs/runlist.c 			rl->length = 0;
rl               1552 fs/ntfs/runlist.c 		rl->lcn = LCN_ENOENT;
rl               1555 fs/ntfs/runlist.c 			int new_size = rl - runlist->rl + 1;
rl               1556 fs/ntfs/runlist.c 			rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
rl               1557 fs/ntfs/runlist.c 			if (IS_ERR(rl))
rl               1564 fs/ntfs/runlist.c 				runlist->rl = rl;
rl               1566 fs/ntfs/runlist.c 	} else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
rl               1573 fs/ntfs/runlist.c 		if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
rl               1574 fs/ntfs/runlist.c 			(rl - 1)->length = new_length - (rl - 1)->vcn;
rl               1577 fs/ntfs/runlist.c 			old_size = rl - runlist->rl + 1;
rl               1579 fs/ntfs/runlist.c 			rl = ntfs_rl_realloc(runlist->rl, old_size,
rl               1581 fs/ntfs/runlist.c 			if (IS_ERR(rl)) {
rl               1584 fs/ntfs/runlist.c 				return PTR_ERR(rl);
rl               1586 fs/ntfs/runlist.c 			runlist->rl = rl;
rl               1591 fs/ntfs/runlist.c 			rl += old_size - 1;
rl               1593 fs/ntfs/runlist.c 			rl->lcn = LCN_HOLE;
rl               1594 fs/ntfs/runlist.c 			rl->length = new_length - rl->vcn;
rl               1596 fs/ntfs/runlist.c 			rl++;
rl               1597 fs/ntfs/runlist.c 			rl->length = 0;
rl               1599 fs/ntfs/runlist.c 		rl->vcn = new_length;
rl               1600 fs/ntfs/runlist.c 		rl->lcn = LCN_ENOENT;
rl               1603 fs/ntfs/runlist.c 		rl->lcn = LCN_ENOENT;
rl               1635 fs/ntfs/runlist.c 	runlist_element *rl, *rl_end, *rl_real_end, *trl;
rl               1645 fs/ntfs/runlist.c 	rl = runlist->rl;
rl               1646 fs/ntfs/runlist.c 	if (unlikely(!rl)) {
rl               1652 fs/ntfs/runlist.c 	while (likely(rl->length && start >= rl[1].vcn))
rl               1653 fs/ntfs/runlist.c 		rl++;
rl               1654 fs/ntfs/runlist.c 	rl_end = rl;
rl               1670 fs/ntfs/runlist.c 	if (!rl->length)
rl               1676 fs/ntfs/runlist.c 	old_size = rl_real_end - runlist->rl + 1;
rl               1678 fs/ntfs/runlist.c 	if (rl->lcn == LCN_HOLE) {
rl               1683 fs/ntfs/runlist.c 		if (end <= rl[1].vcn) {
rl               1689 fs/ntfs/runlist.c 		rl->length = end - rl->vcn;
rl               1693 fs/ntfs/runlist.c 			rl->length = rl_end->vcn - rl->vcn;
rl               1696 fs/ntfs/runlist.c 		rl++;
rl               1698 fs/ntfs/runlist.c 		if (rl < rl_end)
rl               1699 fs/ntfs/runlist.c 			memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
rl               1700 fs/ntfs/runlist.c 					sizeof(*rl));
rl               1702 fs/ntfs/runlist.c 		if (end > rl->vcn) {
rl               1703 fs/ntfs/runlist.c 			delta = end - rl->vcn;
rl               1704 fs/ntfs/runlist.c 			rl->vcn = end;
rl               1705 fs/ntfs/runlist.c 			rl->length -= delta;
rl               1707 fs/ntfs/runlist.c 			if (rl->lcn >= 0)
rl               1708 fs/ntfs/runlist.c 				rl->lcn += delta;
rl               1712 fs/ntfs/runlist.c 		if (rl < rl_end) {
rl               1713 fs/ntfs/runlist.c 			rl = ntfs_rl_realloc(runlist->rl, old_size,
rl               1714 fs/ntfs/runlist.c 					old_size - (rl_end - rl));
rl               1715 fs/ntfs/runlist.c 			if (IS_ERR(rl))
rl               1722 fs/ntfs/runlist.c 				runlist->rl = rl;
rl               1731 fs/ntfs/runlist.c 	if (start == rl->vcn) {
rl               1743 fs/ntfs/runlist.c 		if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) {
rl               1744 fs/ntfs/runlist.c 			rl--;
rl               1747 fs/ntfs/runlist.c 		if (end >= rl[1].vcn) {
rl               1748 fs/ntfs/runlist.c 			rl->lcn = LCN_HOLE;
rl               1758 fs/ntfs/runlist.c 		trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
rl               1762 fs/ntfs/runlist.c 		if (runlist->rl != trl) {
rl               1763 fs/ntfs/runlist.c 			rl = trl + (rl - runlist->rl);
rl               1764 fs/ntfs/runlist.c 			rl_end = trl + (rl_end - runlist->rl);
rl               1765 fs/ntfs/runlist.c 			rl_real_end = trl + (rl_real_end - runlist->rl);
rl               1766 fs/ntfs/runlist.c 			runlist->rl = trl;
rl               1770 fs/ntfs/runlist.c 		memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl));
rl               1772 fs/ntfs/runlist.c 		rl->lcn = LCN_HOLE;
rl               1773 fs/ntfs/runlist.c 		rl->length = length;
rl               1774 fs/ntfs/runlist.c 		rl++;
rl               1775 fs/ntfs/runlist.c 		rl->vcn += length;
rl               1777 fs/ntfs/runlist.c 		if (rl->lcn >= 0 || lcn_fixup)
rl               1778 fs/ntfs/runlist.c 			rl->lcn += length;
rl               1779 fs/ntfs/runlist.c 		rl->length -= length;
rl               1793 fs/ntfs/runlist.c 		rl->length = start - rl->vcn;
rl               1794 fs/ntfs/runlist.c 		rl++;
rl               1796 fs/ntfs/runlist.c 		if (rl < rl_end)
rl               1797 fs/ntfs/runlist.c 			memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
rl               1798 fs/ntfs/runlist.c 					sizeof(*rl));
rl               1800 fs/ntfs/runlist.c 		rl->vcn = start;
rl               1801 fs/ntfs/runlist.c 		rl->length = rl[1].vcn - start;
rl               1815 fs/ntfs/runlist.c 	if (end >= rl[1].vcn) {
rl               1820 fs/ntfs/runlist.c 		if (rl[1].length && end >= rl[2].vcn) {
rl               1822 fs/ntfs/runlist.c 			rl->length = start - rl->vcn;
rl               1823 fs/ntfs/runlist.c 			rl++;
rl               1824 fs/ntfs/runlist.c 			rl->vcn = start;
rl               1825 fs/ntfs/runlist.c 			rl->lcn = LCN_HOLE;
rl               1828 fs/ntfs/runlist.c 		trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
rl               1832 fs/ntfs/runlist.c 		if (runlist->rl != trl) {
rl               1833 fs/ntfs/runlist.c 			rl = trl + (rl - runlist->rl);
rl               1834 fs/ntfs/runlist.c 			rl_end = trl + (rl_end - runlist->rl);
rl               1835 fs/ntfs/runlist.c 			rl_real_end = trl + (rl_real_end - runlist->rl);
rl               1836 fs/ntfs/runlist.c 			runlist->rl = trl;
rl               1839 fs/ntfs/runlist.c 		rl->length = start - rl->vcn;
rl               1840 fs/ntfs/runlist.c 		rl++;
rl               1846 fs/ntfs/runlist.c 		delta = rl->vcn - start;
rl               1847 fs/ntfs/runlist.c 		rl->vcn = start;
rl               1848 fs/ntfs/runlist.c 		if (rl->lcn >= 0) {
rl               1849 fs/ntfs/runlist.c 			rl->lcn -= delta;
rl               1853 fs/ntfs/runlist.c 		rl->length += delta;
rl               1863 fs/ntfs/runlist.c 	trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2);
rl               1867 fs/ntfs/runlist.c 	if (runlist->rl != trl) {
rl               1868 fs/ntfs/runlist.c 		rl = trl + (rl - runlist->rl);
rl               1869 fs/ntfs/runlist.c 		rl_end = trl + (rl_end - runlist->rl);
rl               1870 fs/ntfs/runlist.c 		rl_real_end = trl + (rl_real_end - runlist->rl);
rl               1871 fs/ntfs/runlist.c 		runlist->rl = trl;
rl               1874 fs/ntfs/runlist.c 	memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl));
rl               1876 fs/ntfs/runlist.c 	rl->length = start - rl->vcn;
rl               1877 fs/ntfs/runlist.c 	rl++;
rl               1878 fs/ntfs/runlist.c 	rl->vcn = start;
rl               1879 fs/ntfs/runlist.c 	rl->lcn = LCN_HOLE;
rl               1880 fs/ntfs/runlist.c 	rl->length = length;
rl               1881 fs/ntfs/runlist.c 	rl++;
rl               1882 fs/ntfs/runlist.c 	delta = end - rl->vcn;
rl               1883 fs/ntfs/runlist.c 	rl->vcn = end;
rl               1884 fs/ntfs/runlist.c 	rl->lcn += delta;
rl               1885 fs/ntfs/runlist.c 	rl->length -= delta;
rl                 41 fs/ntfs/runlist.h 	runlist_element *rl;
rl                 45 fs/ntfs/runlist.h static inline void ntfs_init_runlist(runlist *rl)
rl                 47 fs/ntfs/runlist.h 	rl->rl = NULL;
rl                 48 fs/ntfs/runlist.h 	init_rwsem(&rl->lock);
rl                 65 fs/ntfs/runlist.h extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
rl                 69 fs/ntfs/runlist.h extern runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl,
rl                 73 fs/ntfs/runlist.h 		const runlist_element *rl, const VCN first_vcn,
rl                 77 fs/ntfs/runlist.h 		const int dst_len, const runlist_element *rl,
rl               1079 fs/ntfs/super.c 	runlist_element *rl, rl2[2];
rl               1179 fs/ntfs/super.c 	rl = mirr_ni->runlist.rl;
rl               1183 fs/ntfs/super.c 		if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn ||
rl               1184 fs/ntfs/super.c 				rl2[i].length != rl[i].length) {
rl               1241 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rl = &rb->rf_records;
rl               1242 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
rl               1257 fs/ocfs2/refcounttree.c 		if (index != le16_to_cpu(rl->rl_used) - 1) {
rl               1259 fs/ocfs2/refcounttree.c 				(le16_to_cpu(rl->rl_used) - index - 1) *
rl               1261 fs/ocfs2/refcounttree.c 			memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
rl               1265 fs/ocfs2/refcounttree.c 		le16_add_cpu(&rl->rl_used, -1);
rl               1411 fs/ocfs2/refcounttree.c static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
rl               1414 fs/ocfs2/refcounttree.c 	int num_used = le16_to_cpu(rl->rl_used);
rl               1420 fs/ocfs2/refcounttree.c 					&rl->rl_recs[middle - delta - 1],
rl               1421 fs/ocfs2/refcounttree.c 					&rl->rl_recs[middle - delta])) {
rl               1432 fs/ocfs2/refcounttree.c 					&rl->rl_recs[middle + delta],
rl               1433 fs/ocfs2/refcounttree.c 					&rl->rl_recs[middle + delta + 1])) {
rl               1442 fs/ocfs2/refcounttree.c 	*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
rl               1454 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rl = &rb->rf_records;
rl               1461 fs/ocfs2/refcounttree.c 		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
rl               1475 fs/ocfs2/refcounttree.c 	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
rl               1479 fs/ocfs2/refcounttree.c 	ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
rl               1488 fs/ocfs2/refcounttree.c 	num_moved = le16_to_cpu(rl->rl_used) - split_index;
rl               1489 fs/ocfs2/refcounttree.c 	memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
rl               1493 fs/ocfs2/refcounttree.c 	memset(&rl->rl_recs[split_index], 0,
rl               1497 fs/ocfs2/refcounttree.c 	le16_add_cpu(&rl->rl_used, -num_moved);
rl               1500 fs/ocfs2/refcounttree.c 	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
rl                156 fs/qnx4/inode.c 	int rd, rl;
rl                163 fs/qnx4/inode.c 	rl = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_size);
rl                164 fs/qnx4/inode.c 	for (j = 0; j < rl; j++) {
rl                872 include/linux/blk-cgroup.h #define blk_queue_for_each_rl(rl, q)	\
rl                873 include/linux/blk-cgroup.h 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
rl                 41 include/linux/jump_label_ratelimit.h jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
rl                 45 include/linux/jump_label_ratelimit.h #define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl)			\
rl                 48 include/linux/jump_label_ratelimit.h 		.timeout =	(rl),					\
rl                 54 include/linux/jump_label_ratelimit.h #define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl)			\
rl                 57 include/linux/jump_label_ratelimit.h 		.timeout =	(rl),					\
rl                 73 include/linux/jump_label_ratelimit.h #define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl)	\
rl                 75 include/linux/jump_label_ratelimit.h #define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl)	\
rl                 91 include/linux/jump_label_ratelimit.h 		unsigned long rl)
rl                221 include/linux/math64.h 	} rl, rm, rn, rh, a0, b0;
rl                227 include/linux/math64.h 	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
rl                237 include/linux/math64.h 	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
rl                246 include/linux/math64.h 		return rl.ll;
rl                248 include/linux/math64.h 		return (rl.ll >> shift) | (rh.ll << (64 - shift));
rl                267 include/linux/math64.h 	} u, rl, rh;
rl                270 include/linux/math64.h 	rl.ll = mul_u32_u32(u.l.low, mul);
rl                271 include/linux/math64.h 	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
rl                274 include/linux/math64.h 	rl.l.high = do_div(rh.ll, divisor);
rl                277 include/linux/math64.h 	do_div(rl.ll, divisor);
rl                279 include/linux/math64.h 	rl.l.high = rh.l.low;
rl                280 include/linux/math64.h 	return rl.ll;
rl                514 include/linux/mlx5/driver.h 	struct mlx5_rate_limit	rl;
rl               1007 include/linux/mlx5/driver.h 		     struct mlx5_rate_limit *rl);
rl               1008 include/linux/mlx5/driver.h void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
rl                147 include/math-emu/op-2.h #define __FP_FRAC_ADD_2(rh, rl, xh, xl, yh, yl)	\
rl                148 include/math-emu/op-2.h   (rh = xh + yh + ((rl = xl + yl) < xl))
rl                151 include/math-emu/op-2.h #define __FP_FRAC_SUB_2(rh, rl, xh, xl, yh, yl)	\
rl                152 include/math-emu/op-2.h   (rh = xh - yh - ((rl = xl - yl) > xl))
rl                301 kernel/jump_label.c 		unsigned long rl)
rl                304 kernel/jump_label.c 	key->timeout = rl;
rl                996 sound/soc/codecs/rt1305.c 	unsigned int rh, rl, rhl, r0ohm;
rl               1061 sound/soc/codecs/rt1305.c 	regmap_read(rt1305->regmap, RT1305_PR_BASE + 0x56, &rl);
rl               1062 sound/soc/codecs/rt1305.c 	rhl = (rh << 16) | rl;
rl               1065 sound/soc/codecs/rt1305.c 	pr_debug("Left_rhl = 0x%x rh=0x%x rl=0x%x\n", rhl, rh, rl);
rl               1078 sound/soc/codecs/rt1305.c 	regmap_read(rt1305->regmap, RT1305_PR_BASE + 0x56, &rl);
rl               1079 sound/soc/codecs/rt1305.c 	rhl = (rh << 16) | rl;
rl               1082 sound/soc/codecs/rt1305.c 	pr_debug("Right_rhl = 0x%x rh=0x%x rl=0x%x\n", rhl, rh, rl);
rl                307 tools/perf/bench/epoll-ctl.c 	struct rlimit rl, prevrl;
rl                345 tools/perf/bench/epoll-ctl.c 	rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
rl                347 tools/perf/bench/epoll-ctl.c 		  (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
rl                348 tools/perf/bench/epoll-ctl.c 	if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
rl                422 tools/perf/bench/epoll-wait.c 	struct rlimit rl, prevrl;
rl                465 tools/perf/bench/epoll-wait.c 	rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
rl                467 tools/perf/bench/epoll-wait.c 		  (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
rl                468 tools/perf/bench/epoll-wait.c 	if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
rl                235 tools/perf/util/metricgroup.c static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
rl                273 tools/perf/util/metricgroup.c static void mep_delete(struct rblist *rl __maybe_unused,