Searched refs:rl (Results 1 - 66 of 66) sorted by relevance

/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_reqlist.h40 struct zfcp_reqlist *rl; zfcp_reqlist_alloc() local
42 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL); zfcp_reqlist_alloc()
43 if (!rl) zfcp_reqlist_alloc()
46 spin_lock_init(&rl->lock); zfcp_reqlist_alloc()
49 INIT_LIST_HEAD(&rl->buckets[i]); zfcp_reqlist_alloc()
51 return rl; zfcp_reqlist_alloc()
56 * @rl: pointer to reqlist
60 static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl) zfcp_reqlist_isempty() argument
65 if (!list_empty(&rl->buckets[i])) zfcp_reqlist_isempty()
72 * @rl: The reqlist where to free memory
74 static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl) zfcp_reqlist_free() argument
77 BUG_ON(!zfcp_reqlist_isempty(rl)); zfcp_reqlist_free()
79 kfree(rl); zfcp_reqlist_free()
83 _zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id) _zfcp_reqlist_find() argument
89 list_for_each_entry(req, &rl->buckets[i], list) _zfcp_reqlist_find()
97 * @rl: The reqlist where to lookup the FSF request
104 zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id) zfcp_reqlist_find() argument
109 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_find()
110 req = _zfcp_reqlist_find(rl, req_id); zfcp_reqlist_find()
111 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_find()
118 * @rl: reqlist where to search and remove entry
129 zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id) zfcp_reqlist_find_rm() argument
134 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_find_rm()
135 req = _zfcp_reqlist_find(rl, req_id); zfcp_reqlist_find_rm()
138 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_find_rm()
145 * @rl: reqlist where to add the entry
153 static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl, zfcp_reqlist_add() argument
161 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_add()
162 list_add_tail(&req->list, &rl->buckets[i]); zfcp_reqlist_add()
163 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_add()
168 * @rl: The zfcp_reqlist where to remove all entries
171 static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, zfcp_reqlist_move() argument
177 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_move()
179 list_splice_init(&rl->buckets[i], list); zfcp_reqlist_move()
180 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_move()
/linux-4.1.27/fs/ntfs/
H A Drunlist.c56 * @rl: original runlist
57 * @old_size: number of runlist elements in the original runlist @rl
64 * It is up to the caller to serialize access to the runlist @rl.
74 static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, ntfs_rl_realloc() argument
79 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); ntfs_rl_realloc()
80 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); ntfs_rl_realloc()
82 return rl; ntfs_rl_realloc()
88 if (likely(rl != NULL)) { ntfs_rl_realloc()
91 memcpy(new_rl, rl, old_size); ntfs_rl_realloc()
92 ntfs_free(rl); ntfs_rl_realloc()
99 * @rl: original runlist
100 * @old_size: number of runlist elements in the original runlist @rl
110 * It is up to the caller to serialize access to the runlist @rl.
120 static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, ntfs_rl_realloc_nofail() argument
125 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); ntfs_rl_realloc_nofail()
126 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); ntfs_rl_realloc_nofail()
128 return rl; ntfs_rl_realloc_nofail()
133 if (likely(rl != NULL)) { ntfs_rl_realloc_nofail()
136 memcpy(new_rl, rl, old_size); ntfs_rl_realloc_nofail()
137 ntfs_free(rl); ntfs_rl_realloc_nofail()
534 int di, si; /* Current index into @[ds]rl. */ ntfs_runlists_merge()
537 int dend, send; /* Last index into @[ds]rl. */ ntfs_runlists_merge()
538 int dfinal, sfinal; /* The last index into @[ds]rl with ntfs_runlists_merge()
755 runlist_element *rl; /* The output runlist. */ ntfs_mapping_pairs_decompress() local
788 rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE); ntfs_mapping_pairs_decompress()
789 if (unlikely(!rl)) ntfs_mapping_pairs_decompress()
793 rl->vcn = 0; ntfs_mapping_pairs_decompress()
794 rl->lcn = LCN_RL_NOT_MAPPED; ntfs_mapping_pairs_decompress()
795 rl->length = vcn; ntfs_mapping_pairs_decompress()
809 ntfs_free(rl); ntfs_mapping_pairs_decompress()
812 memcpy(rl2, rl, rlsize); ntfs_mapping_pairs_decompress()
813 ntfs_free(rl); ntfs_mapping_pairs_decompress()
814 rl = rl2; ntfs_mapping_pairs_decompress()
818 rl[rlpos].vcn = vcn; ntfs_mapping_pairs_decompress()
850 rl[rlpos].length = deltaxcn; ntfs_mapping_pairs_decompress()
859 rl[rlpos].lcn = LCN_HOLE; ntfs_mapping_pairs_decompress()
892 rl[rlpos].lcn = lcn; ntfs_mapping_pairs_decompress()
938 rl[rlpos].vcn = vcn; ntfs_mapping_pairs_decompress()
939 vcn += rl[rlpos].length = max_cluster - ntfs_mapping_pairs_decompress()
941 rl[rlpos].lcn = LCN_RL_NOT_MAPPED; ntfs_mapping_pairs_decompress()
953 rl[rlpos].lcn = LCN_ENOENT; ntfs_mapping_pairs_decompress()
955 rl[rlpos].lcn = LCN_RL_NOT_MAPPED; ntfs_mapping_pairs_decompress()
958 rl[rlpos].vcn = vcn; ntfs_mapping_pairs_decompress()
959 rl[rlpos].length = (s64)0; ntfs_mapping_pairs_decompress()
963 ntfs_debug_dump_runlist(rl); ntfs_mapping_pairs_decompress()
964 return rl; ntfs_mapping_pairs_decompress()
967 old_rl = ntfs_runlists_merge(old_rl, rl); ntfs_mapping_pairs_decompress()
970 ntfs_free(rl); ntfs_mapping_pairs_decompress()
976 ntfs_free(rl); ntfs_mapping_pairs_decompress()
982 * @rl: runlist to use for conversion
986 * cluster number (lcn) of a device using the runlist @rl to map vcns to their
989 * It is up to the caller to serialize access to the runlist @rl.
1004 LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn) ntfs_rl_vcn_to_lcn() argument
1010 * If rl is NULL, assume that we have found an unmapped runlist. The ntfs_rl_vcn_to_lcn()
1014 if (unlikely(!rl)) ntfs_rl_vcn_to_lcn()
1018 if (unlikely(vcn < rl[0].vcn)) ntfs_rl_vcn_to_lcn()
1021 for (i = 0; likely(rl[i].length); i++) { ntfs_rl_vcn_to_lcn()
1022 if (unlikely(vcn < rl[i+1].vcn)) { ntfs_rl_vcn_to_lcn()
1023 if (likely(rl[i].lcn >= (LCN)0)) ntfs_rl_vcn_to_lcn()
1024 return rl[i].lcn + (vcn - rl[i].vcn); ntfs_rl_vcn_to_lcn()
1025 return rl[i].lcn; ntfs_rl_vcn_to_lcn()
1032 if (likely(rl[i].lcn < (LCN)0)) ntfs_rl_vcn_to_lcn()
1033 return rl[i].lcn; ntfs_rl_vcn_to_lcn()
1042 * @rl: runlist to search
1045 * Find the virtual cluster number @vcn in the runlist @rl and return the
1048 * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of
1053 runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn) ntfs_rl_find_vcn_nolock() argument
1056 if (unlikely(!rl || vcn < rl[0].vcn)) ntfs_rl_find_vcn_nolock()
1058 while (likely(rl->length)) { ntfs_rl_find_vcn_nolock()
1059 if (unlikely(vcn < rl[1].vcn)) { ntfs_rl_find_vcn_nolock()
1060 if (likely(rl->lcn >= LCN_HOLE)) ntfs_rl_find_vcn_nolock()
1061 return rl; ntfs_rl_find_vcn_nolock()
1064 rl++; ntfs_rl_find_vcn_nolock()
1066 if (likely(rl->lcn == LCN_ENOENT)) ntfs_rl_find_vcn_nolock()
1067 return rl; ntfs_rl_find_vcn_nolock()
1105 * @rl: locked runlist to determine the size of the mapping pairs of
1109 * Walk the locked runlist @rl and calculate the size in bytes of the mapping
1110 * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and
1120 * If @rl is NULL, just return 1 (for the single terminator byte).
1128 * Locking: @rl must be locked on entry (either for reading or writing), it
1132 const runlist_element *rl, const VCN first_vcn, ntfs_get_size_for_mapping_pairs()
1142 if (!rl) { ntfs_get_size_for_mapping_pairs()
1148 while (rl->length && first_vcn >= rl[1].vcn) ntfs_get_size_for_mapping_pairs()
1149 rl++; ntfs_get_size_for_mapping_pairs()
1150 if (unlikely((!rl->length && first_vcn > rl->vcn) || ntfs_get_size_for_mapping_pairs()
1151 first_vcn < rl->vcn)) ntfs_get_size_for_mapping_pairs()
1157 if (first_vcn > rl->vcn) { ntfs_get_size_for_mapping_pairs()
1158 s64 delta, length = rl->length; ntfs_get_size_for_mapping_pairs()
1160 /* We know rl->length != 0 already. */ ntfs_get_size_for_mapping_pairs()
1161 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) ntfs_get_size_for_mapping_pairs()
1167 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { ntfs_get_size_for_mapping_pairs()
1169 if (unlikely(rl[1].vcn > s1)) ntfs_get_size_for_mapping_pairs()
1170 length = s1 - rl->vcn; ntfs_get_size_for_mapping_pairs()
1173 delta = first_vcn - rl->vcn; ntfs_get_size_for_mapping_pairs()
1183 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { ntfs_get_size_for_mapping_pairs()
1184 prev_lcn = rl->lcn; ntfs_get_size_for_mapping_pairs()
1185 if (likely(rl->lcn >= 0)) ntfs_get_size_for_mapping_pairs()
1191 rl++; ntfs_get_size_for_mapping_pairs()
1194 for (; rl->length && !the_end; rl++) { ntfs_get_size_for_mapping_pairs()
1195 s64 length = rl->length; ntfs_get_size_for_mapping_pairs()
1197 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) ntfs_get_size_for_mapping_pairs()
1203 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { ntfs_get_size_for_mapping_pairs()
1205 if (unlikely(rl[1].vcn > s1)) ntfs_get_size_for_mapping_pairs()
1206 length = s1 - rl->vcn; ntfs_get_size_for_mapping_pairs()
1218 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { ntfs_get_size_for_mapping_pairs()
1220 rls += ntfs_get_nr_significant_bytes(rl->lcn - ntfs_get_size_for_mapping_pairs()
1222 prev_lcn = rl->lcn; ntfs_get_size_for_mapping_pairs()
1227 if (rl->lcn == LCN_RL_NOT_MAPPED) ntfs_get_size_for_mapping_pairs()
1290 * @rl: locked runlist for which to build the mapping pairs array
1295 * Create the mapping pairs array from the locked runlist @rl, starting at vcn
1304 * If @rl is NULL, just write a single terminator byte to @dst.
1320 * Locking: @rl must be locked on entry (either for reading or writing), it
1324 const int dst_len, const runlist_element *rl, ntfs_mapping_pairs_build()
1337 if (!rl) { ntfs_mapping_pairs_build()
1347 while (rl->length && first_vcn >= rl[1].vcn) ntfs_mapping_pairs_build()
1348 rl++; ntfs_mapping_pairs_build()
1349 if (unlikely((!rl->length && first_vcn > rl->vcn) || ntfs_mapping_pairs_build()
1350 first_vcn < rl->vcn)) ntfs_mapping_pairs_build()
1359 if (first_vcn > rl->vcn) { ntfs_mapping_pairs_build()
1360 s64 delta, length = rl->length; ntfs_mapping_pairs_build()
1362 /* We know rl->length != 0 already. */ ntfs_mapping_pairs_build()
1363 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) ntfs_mapping_pairs_build()
1369 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { ntfs_mapping_pairs_build()
1371 if (unlikely(rl[1].vcn > s1)) ntfs_mapping_pairs_build()
1372 length = s1 - rl->vcn; ntfs_mapping_pairs_build()
1375 delta = first_vcn - rl->vcn; ntfs_mapping_pairs_build()
1390 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { ntfs_mapping_pairs_build()
1391 prev_lcn = rl->lcn; ntfs_mapping_pairs_build()
1392 if (likely(rl->lcn >= 0)) ntfs_mapping_pairs_build()
1409 rl++; ntfs_mapping_pairs_build()
1412 for (; rl->length && !the_end; rl++) { ntfs_mapping_pairs_build()
1413 s64 length = rl->length; ntfs_mapping_pairs_build()
1415 if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) ntfs_mapping_pairs_build()
1421 if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { ntfs_mapping_pairs_build()
1423 if (unlikely(rl[1].vcn > s1)) ntfs_mapping_pairs_build()
1424 length = s1 - rl->vcn; ntfs_mapping_pairs_build()
1441 if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { ntfs_mapping_pairs_build()
1444 len_len, dst_max, rl->lcn - prev_lcn); ntfs_mapping_pairs_build()
1447 prev_lcn = rl->lcn; ntfs_mapping_pairs_build()
1463 *stop_vcn = rl->vcn; ntfs_mapping_pairs_build()
1468 if (rl->lcn == LCN_RL_NOT_MAPPED) ntfs_mapping_pairs_build()
1502 runlist_element *rl; ntfs_rl_truncate_nolock() local
1508 rl = runlist->rl; ntfs_rl_truncate_nolock()
1511 runlist->rl = NULL; ntfs_rl_truncate_nolock()
1512 if (rl) ntfs_rl_truncate_nolock()
1513 ntfs_free(rl); ntfs_rl_truncate_nolock()
1516 if (unlikely(!rl)) { ntfs_rl_truncate_nolock()
1521 rl = ntfs_malloc_nofs(PAGE_SIZE); ntfs_rl_truncate_nolock()
1522 if (unlikely(!rl)) { ntfs_rl_truncate_nolock()
1527 runlist->rl = rl; ntfs_rl_truncate_nolock()
1528 rl[1].length = rl->vcn = 0; ntfs_rl_truncate_nolock()
1529 rl->lcn = LCN_HOLE; ntfs_rl_truncate_nolock()
1530 rl[1].vcn = rl->length = new_length; ntfs_rl_truncate_nolock()
1531 rl[1].lcn = LCN_ENOENT; ntfs_rl_truncate_nolock()
1534 BUG_ON(new_length < rl->vcn); ntfs_rl_truncate_nolock()
1536 while (likely(rl->length && new_length >= rl[1].vcn)) ntfs_rl_truncate_nolock()
1537 rl++; ntfs_rl_truncate_nolock()
1542 if (rl->length) { ntfs_rl_truncate_nolock()
1548 trl = rl + 1; ntfs_rl_truncate_nolock()
1551 old_size = trl - runlist->rl + 1; ntfs_rl_truncate_nolock()
1553 rl->length = new_length - rl->vcn; ntfs_rl_truncate_nolock()
1559 if (rl->length) { ntfs_rl_truncate_nolock()
1560 rl++; ntfs_rl_truncate_nolock()
1561 if (!rl->length) ntfs_rl_truncate_nolock()
1563 rl->vcn = new_length; ntfs_rl_truncate_nolock()
1564 rl->length = 0; ntfs_rl_truncate_nolock()
1566 rl->lcn = LCN_ENOENT; ntfs_rl_truncate_nolock()
1569 int new_size = rl - runlist->rl + 1; ntfs_rl_truncate_nolock()
1570 rl = ntfs_rl_realloc(runlist->rl, old_size, new_size); ntfs_rl_truncate_nolock()
1571 if (IS_ERR(rl)) ntfs_rl_truncate_nolock()
1578 runlist->rl = rl; ntfs_rl_truncate_nolock()
1580 } else if (likely(/* !rl->length && */ new_length > rl->vcn)) { ntfs_rl_truncate_nolock()
1587 if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE)) ntfs_rl_truncate_nolock()
1588 (rl - 1)->length = new_length - (rl - 1)->vcn; ntfs_rl_truncate_nolock()
1591 old_size = rl - runlist->rl + 1; ntfs_rl_truncate_nolock()
1593 rl = ntfs_rl_realloc(runlist->rl, old_size, ntfs_rl_truncate_nolock()
1595 if (IS_ERR(rl)) { ntfs_rl_truncate_nolock()
1598 return PTR_ERR(rl); ntfs_rl_truncate_nolock()
1600 runlist->rl = rl; ntfs_rl_truncate_nolock()
1602 * Set @rl to the same runlist element in the new ntfs_rl_truncate_nolock()
1605 rl += old_size - 1; ntfs_rl_truncate_nolock()
1607 rl->lcn = LCN_HOLE; ntfs_rl_truncate_nolock()
1608 rl->length = new_length - rl->vcn; ntfs_rl_truncate_nolock()
1610 rl++; ntfs_rl_truncate_nolock()
1611 rl->length = 0; ntfs_rl_truncate_nolock()
1613 rl->vcn = new_length; ntfs_rl_truncate_nolock()
1614 rl->lcn = LCN_ENOENT; ntfs_rl_truncate_nolock()
1615 } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ { ntfs_rl_truncate_nolock()
1617 rl->lcn = LCN_ENOENT; ntfs_rl_truncate_nolock()
1649 runlist_element *rl, *rl_end, *rl_real_end, *trl; ntfs_rl_punch_nolock() local
1659 rl = runlist->rl; ntfs_rl_punch_nolock()
1660 if (unlikely(!rl)) { ntfs_rl_punch_nolock()
1666 while (likely(rl->length && start >= rl[1].vcn)) ntfs_rl_punch_nolock()
1667 rl++; ntfs_rl_punch_nolock()
1668 rl_end = rl; ntfs_rl_punch_nolock()
1684 if (!rl->length) ntfs_rl_punch_nolock()
1690 old_size = rl_real_end - runlist->rl + 1; ntfs_rl_punch_nolock()
1692 if (rl->lcn == LCN_HOLE) { ntfs_rl_punch_nolock()
1697 if (end <= rl[1].vcn) { ntfs_rl_punch_nolock()
1703 rl->length = end - rl->vcn; ntfs_rl_punch_nolock()
1707 rl->length = rl_end->vcn - rl->vcn; ntfs_rl_punch_nolock()
1710 rl++; ntfs_rl_punch_nolock()
1712 if (rl < rl_end) ntfs_rl_punch_nolock()
1713 memmove(rl, rl_end, (rl_real_end - rl_end + 1) * ntfs_rl_punch_nolock()
1714 sizeof(*rl)); ntfs_rl_punch_nolock()
1716 if (end > rl->vcn) { ntfs_rl_punch_nolock()
1717 delta = end - rl->vcn; ntfs_rl_punch_nolock()
1718 rl->vcn = end; ntfs_rl_punch_nolock()
1719 rl->length -= delta; ntfs_rl_punch_nolock()
1721 if (rl->lcn >= 0) ntfs_rl_punch_nolock()
1722 rl->lcn += delta; ntfs_rl_punch_nolock()
1726 if (rl < rl_end) { ntfs_rl_punch_nolock()
1727 rl = ntfs_rl_realloc(runlist->rl, old_size, ntfs_rl_punch_nolock()
1728 old_size - (rl_end - rl)); ntfs_rl_punch_nolock()
1729 if (IS_ERR(rl)) ntfs_rl_punch_nolock()
1736 runlist->rl = rl; ntfs_rl_punch_nolock()
1745 if (start == rl->vcn) { ntfs_rl_punch_nolock()
1757 if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) { ntfs_rl_punch_nolock()
1758 rl--; ntfs_rl_punch_nolock()
1761 if (end >= rl[1].vcn) { ntfs_rl_punch_nolock()
1762 rl->lcn = LCN_HOLE; ntfs_rl_punch_nolock()
1772 trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); ntfs_rl_punch_nolock()
1776 if (runlist->rl != trl) { ntfs_rl_punch_nolock()
1777 rl = trl + (rl - runlist->rl); ntfs_rl_punch_nolock()
1778 rl_end = trl + (rl_end - runlist->rl); ntfs_rl_punch_nolock()
1779 rl_real_end = trl + (rl_real_end - runlist->rl); ntfs_rl_punch_nolock()
1780 runlist->rl = trl; ntfs_rl_punch_nolock()
1784 memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl)); ntfs_rl_punch_nolock()
1786 rl->lcn = LCN_HOLE; ntfs_rl_punch_nolock()
1787 rl->length = length; ntfs_rl_punch_nolock()
1788 rl++; ntfs_rl_punch_nolock()
1789 rl->vcn += length; ntfs_rl_punch_nolock()
1791 if (rl->lcn >= 0 || lcn_fixup) ntfs_rl_punch_nolock()
1792 rl->lcn += length; ntfs_rl_punch_nolock()
1793 rl->length -= length; ntfs_rl_punch_nolock()
1807 rl->length = start - rl->vcn; ntfs_rl_punch_nolock()
1808 rl++; ntfs_rl_punch_nolock()
1810 if (rl < rl_end) ntfs_rl_punch_nolock()
1811 memmove(rl, rl_end, (rl_real_end - rl_end + 1) * ntfs_rl_punch_nolock()
1812 sizeof(*rl)); ntfs_rl_punch_nolock()
1814 rl->vcn = start; ntfs_rl_punch_nolock()
1815 rl->length = rl[1].vcn - start; ntfs_rl_punch_nolock()
1829 if (end >= rl[1].vcn) { ntfs_rl_punch_nolock()
1834 if (rl[1].length && end >= rl[2].vcn) { ntfs_rl_punch_nolock()
1836 rl->length = start - rl->vcn; ntfs_rl_punch_nolock()
1837 rl++; ntfs_rl_punch_nolock()
1838 rl->vcn = start; ntfs_rl_punch_nolock()
1839 rl->lcn = LCN_HOLE; ntfs_rl_punch_nolock()
1842 trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); ntfs_rl_punch_nolock()
1846 if (runlist->rl != trl) { ntfs_rl_punch_nolock()
1847 rl = trl + (rl - runlist->rl); ntfs_rl_punch_nolock()
1848 rl_end = trl + (rl_end - runlist->rl); ntfs_rl_punch_nolock()
1849 rl_real_end = trl + (rl_real_end - runlist->rl); ntfs_rl_punch_nolock()
1850 runlist->rl = trl; ntfs_rl_punch_nolock()
1853 rl->length = start - rl->vcn; ntfs_rl_punch_nolock()
1854 rl++; ntfs_rl_punch_nolock()
1860 delta = rl->vcn - start; ntfs_rl_punch_nolock()
1861 rl->vcn = start; ntfs_rl_punch_nolock()
1862 if (rl->lcn >= 0) { ntfs_rl_punch_nolock()
1863 rl->lcn -= delta; ntfs_rl_punch_nolock()
1867 rl->length += delta; ntfs_rl_punch_nolock()
1877 trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2); ntfs_rl_punch_nolock()
1881 if (runlist->rl != trl) { ntfs_rl_punch_nolock()
1882 rl = trl + (rl - runlist->rl); ntfs_rl_punch_nolock()
1883 rl_end = trl + (rl_end - runlist->rl); ntfs_rl_punch_nolock()
1884 rl_real_end = trl + (rl_real_end - runlist->rl); ntfs_rl_punch_nolock()
1885 runlist->rl = trl; ntfs_rl_punch_nolock()
1888 memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl)); ntfs_rl_punch_nolock()
1890 rl->length = start - rl->vcn; ntfs_rl_punch_nolock()
1891 rl++; ntfs_rl_punch_nolock()
1892 rl->vcn = start; ntfs_rl_punch_nolock()
1893 rl->lcn = LCN_HOLE; ntfs_rl_punch_nolock()
1894 rl->length = length; ntfs_rl_punch_nolock()
1895 rl++; ntfs_rl_punch_nolock()
1896 delta = end - rl->vcn; ntfs_rl_punch_nolock()
1897 rl->vcn = end; ntfs_rl_punch_nolock()
1898 rl->lcn += delta; ntfs_rl_punch_nolock()
1899 rl->length -= delta; ntfs_rl_punch_nolock()
1131 ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn) ntfs_get_size_for_mapping_pairs() argument
1323 ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, const int dst_len, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn, VCN *const stop_vcn) ntfs_mapping_pairs_build() argument
H A Drunlist.h50 * @rl: pointer to an array of runlist elements
51 * @lock: read/write spinlock for serializing access to @rl
55 runlist_element *rl; member in struct:__anon11241
59 static inline void ntfs_init_runlist(runlist *rl) ntfs_init_runlist() argument
61 rl->rl = NULL; ntfs_init_runlist()
62 init_rwsem(&rl->lock); ntfs_init_runlist()
79 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
83 extern runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl,
87 const runlist_element *rl, const VCN first_vcn,
91 const int dst_len, const runlist_element *rl,
H A Ddebug.c133 /* Dump a runlist. Caller has to provide synchronisation for @rl. */ ntfs_debug_dump_runlist()
134 void ntfs_debug_dump_runlist(const runlist_element *rl) ntfs_debug_dump_runlist() argument
143 if (!rl) { ntfs_debug_dump_runlist()
149 LCN lcn = (rl + i)->lcn; ntfs_debug_dump_runlist()
157 (long long)(rl + i)->vcn, lcn_str[index], ntfs_debug_dump_runlist()
158 (long long)(rl + i)->length, ntfs_debug_dump_runlist()
159 (rl + i)->length ? "" : ntfs_debug_dump_runlist()
163 (long long)(rl + i)->vcn, ntfs_debug_dump_runlist()
164 (long long)(rl + i)->lcn, ntfs_debug_dump_runlist()
165 (long long)(rl + i)->length, ntfs_debug_dump_runlist()
166 (rl + i)->length ? "" : ntfs_debug_dump_runlist()
168 if (!(rl + i)->length) ntfs_debug_dump_runlist()
H A Dlcnalloc.c39 * @rl: runlist describing the clusters to free
41 * Free all the clusters described by the runlist @rl on the volume @vol. In
51 const runlist_element *rl) ntfs_cluster_free_from_rl_nolock()
57 if (!rl) ntfs_cluster_free_from_rl_nolock()
59 for (; rl->length; rl++) { ntfs_cluster_free_from_rl_nolock()
62 if (rl->lcn < 0) ntfs_cluster_free_from_rl_nolock()
64 err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); ntfs_cluster_free_from_rl_nolock()
156 runlist_element *rl = NULL; ntfs_cluster_alloc() local
334 if ((rlpos + 2) * sizeof(*rl) > rlsize) { ntfs_cluster_alloc()
338 if (!rl) ntfs_cluster_alloc()
350 memcpy(rl2, rl, rlsize); ntfs_cluster_alloc()
351 ntfs_free(rl); ntfs_cluster_alloc()
352 rl = rl2; ntfs_cluster_alloc()
381 rl[rlpos - 1].lcn, ntfs_cluster_alloc()
383 rl[rlpos - 1].length); ntfs_cluster_alloc()
384 rl[rlpos - 1].length = ++prev_run_len; ntfs_cluster_alloc()
388 rl[rlpos - 1].lcn, ntfs_cluster_alloc()
390 rl[rlpos - 1].length, ntfs_cluster_alloc()
399 rl[rlpos - 1].lcn, ntfs_cluster_alloc()
401 rl[rlpos - 1].length); ntfs_cluster_alloc()
402 rl[rlpos].vcn = rl[rlpos - 1].vcn + ntfs_cluster_alloc()
407 rl[rlpos].vcn = start_vcn; ntfs_cluster_alloc()
409 rl[rlpos].lcn = prev_lcn = lcn + bmp_pos; ntfs_cluster_alloc()
410 rl[rlpos].length = prev_run_len = 1; ntfs_cluster_alloc()
565 tc = rl[rlpos - 1].lcn + ntfs_cluster_alloc()
566 rl[rlpos - 1].length; ntfs_cluster_alloc()
608 tc = rl[rlpos - 1].lcn + ntfs_cluster_alloc()
609 rl[rlpos - 1].length; ntfs_cluster_alloc()
649 tc = rl[rlpos - 1].lcn + ntfs_cluster_alloc()
650 rl[rlpos - 1].length; ntfs_cluster_alloc()
739 if (likely(rl)) { ntfs_cluster_alloc()
740 rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; ntfs_cluster_alloc()
741 rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED; ntfs_cluster_alloc()
742 rl[rlpos].length = 0; ntfs_cluster_alloc()
756 return rl; ntfs_cluster_alloc()
760 if (rl) { ntfs_cluster_alloc()
768 (unsigned long long)rl[0].lcn, ntfs_cluster_alloc()
772 err2 = ntfs_cluster_free_from_rl_nolock(vol, rl); ntfs_cluster_alloc()
780 ntfs_free(rl); ntfs_cluster_alloc()
855 runlist_element *rl; __ntfs_cluster_free() local
880 rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx); __ntfs_cluster_free()
881 if (IS_ERR(rl)) { __ntfs_cluster_free()
885 PTR_ERR(rl)); __ntfs_cluster_free()
886 err = PTR_ERR(rl); __ntfs_cluster_free()
889 if (unlikely(rl->lcn < LCN_HOLE)) { __ntfs_cluster_free()
897 delta = start_vcn - rl->vcn; __ntfs_cluster_free()
900 to_free = rl->length - delta; __ntfs_cluster_free()
904 if (likely(rl->lcn >= 0)) { __ntfs_cluster_free()
906 err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta, __ntfs_cluster_free()
918 ++rl; __ntfs_cluster_free()
928 for (; rl->length && count != 0; ++rl) { __ntfs_cluster_free()
929 if (unlikely(rl->lcn < LCN_HOLE)) { __ntfs_cluster_free()
933 vcn = rl->vcn; __ntfs_cluster_free()
934 rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); __ntfs_cluster_free()
935 if (IS_ERR(rl)) { __ntfs_cluster_free()
936 err = PTR_ERR(rl); __ntfs_cluster_free()
945 if (unlikely(rl->lcn < LCN_HOLE)) { __ntfs_cluster_free()
951 rl->lcn); __ntfs_cluster_free()
957 to_free = rl->length; __ntfs_cluster_free()
961 if (likely(rl->lcn >= 0)) { __ntfs_cluster_free()
963 err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn, __ntfs_cluster_free()
50 ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, const runlist_element *rl) ntfs_cluster_free_from_rl_nolock() argument
H A Ddebug.h47 extern void ntfs_debug_dump_runlist(const runlist_element *rl);
57 #define ntfs_debug_dump_runlist(rl) do {} while (0)
H A Dlcnalloc.h114 const runlist_element *rl);
119 * @rl: runlist describing the clusters to free
121 * Free all the clusters described by the runlist @rl on the volume @vol. In
129 * - The caller must have locked the runlist @rl for reading or
133 const runlist_element *rl) ntfs_cluster_free_from_rl()
138 ret = ntfs_cluster_free_from_rl_nolock(vol, rl); ntfs_cluster_free_from_rl()
132 ntfs_cluster_free_from_rl(ntfs_volume *vol, const runlist_element *rl) ntfs_cluster_free_from_rl() argument
H A Daops.c193 runlist_element *rl; ntfs_read_block() local
206 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); ntfs_read_block()
245 rl = NULL; ntfs_read_block()
266 if (!rl) { ntfs_read_block()
269 rl = ni->runlist.rl; ntfs_read_block()
271 if (likely(rl != NULL)) { ntfs_read_block()
273 while (rl->length && rl[1].vcn <= vcn) ntfs_read_block()
274 rl++; ntfs_read_block()
275 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); ntfs_read_block()
306 rl = NULL; ntfs_read_block()
307 } else if (!rl) ntfs_read_block()
347 if (rl) ntfs_read_block()
560 runlist_element *rl; ntfs_write_block() local
630 rl = NULL; ntfs_write_block()
722 if (!rl) { ntfs_write_block()
725 rl = ni->runlist.rl; ntfs_write_block()
727 if (likely(rl != NULL)) { ntfs_write_block()
729 while (rl->length && rl[1].vcn <= vcn) ntfs_write_block()
730 rl++; ntfs_write_block()
731 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); ntfs_write_block()
784 rl = NULL; ntfs_write_block()
785 } else if (!rl) ntfs_write_block()
815 if (rl) ntfs_write_block()
931 runlist_element *rl; ntfs_write_mst_block() local
975 rl = NULL; ntfs_write_mst_block()
1028 if (!rl) { ntfs_write_mst_block()
1031 rl = ni->runlist.rl; ntfs_write_mst_block()
1033 if (likely(rl != NULL)) { ntfs_write_mst_block()
1035 while (rl->length && rl[1].vcn <= vcn) ntfs_write_mst_block()
1036 rl++; ntfs_write_mst_block()
1037 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); ntfs_write_mst_block()
1069 if (!rl) ntfs_write_mst_block()
1113 if (unlikely(rl)) ntfs_write_mst_block()
H A Dattrib.c91 runlist_element *rl; ntfs_map_runlist_nolock() local
187 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl); ntfs_map_runlist_nolock()
188 if (IS_ERR(rl)) ntfs_map_runlist_nolock()
189 err = PTR_ERR(rl); ntfs_map_runlist_nolock()
191 ni->runlist.rl = rl; ntfs_map_runlist_nolock()
304 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= ntfs_map_runlist()
354 if (!ni->runlist.rl) { ntfs_attr_vcn_to_lcn_nolock()
364 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn); ntfs_attr_vcn_to_lcn_nolock()
378 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) != ntfs_attr_vcn_to_lcn_nolock()
468 runlist_element *rl; ntfs_attr_find_vcn_nolock() local
477 if (!ni->runlist.rl) { ntfs_attr_find_vcn_nolock()
486 rl = ni->runlist.rl; ntfs_attr_find_vcn_nolock()
487 if (likely(rl && vcn >= rl[0].vcn)) { ntfs_attr_find_vcn_nolock()
488 while (likely(rl->length)) { ntfs_attr_find_vcn_nolock()
489 if (unlikely(vcn < rl[1].vcn)) { ntfs_attr_find_vcn_nolock()
490 if (likely(rl->lcn >= LCN_HOLE)) { ntfs_attr_find_vcn_nolock()
492 return rl; ntfs_attr_find_vcn_nolock()
496 rl++; ntfs_attr_find_vcn_nolock()
498 if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) { ntfs_attr_find_vcn_nolock()
499 if (likely(rl->lcn == LCN_ENOENT)) ntfs_attr_find_vcn_nolock()
716 runlist_element *rl; load_attribute_list() local
736 rl = runlist->rl; load_attribute_list()
737 if (!rl) { load_attribute_list()
743 while (rl->length) { load_attribute_list()
744 lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn); load_attribute_list()
746 (unsigned long long)rl->vcn, load_attribute_list()
756 max_block = block + (rl->length << vol->cluster_size_bits >> load_attribute_list()
773 rl++; load_attribute_list()
1545 runlist_element *rl; ntfs_attr_make_non_resident() local
1585 rl = ntfs_cluster_alloc(vol, 0, new_size >> ntfs_attr_make_non_resident()
1587 if (IS_ERR(rl)) { ntfs_attr_make_non_resident()
1588 err = PTR_ERR(rl); ntfs_attr_make_non_resident()
1596 rl = NULL; ntfs_attr_make_non_resident()
1600 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1); ntfs_attr_make_non_resident()
1705 arec_size - mp_ofs, rl, 0, -1, NULL); ntfs_attr_make_non_resident()
1712 ni->runlist.rl = rl; ntfs_attr_make_non_resident()
1824 ni->runlist.rl = NULL; ntfs_attr_make_non_resident()
1827 if (rl) { ntfs_attr_make_non_resident()
1828 if (ntfs_cluster_free_from_rl(vol, rl) < 0) { ntfs_attr_make_non_resident()
1835 ntfs_free(rl); ntfs_attr_make_non_resident()
1915 runlist_element *rl, *rl2; ntfs_attr_extend_allocation() local
2172 rl = ni->runlist.rl; ntfs_attr_extend_allocation()
2173 if (likely(rl)) { ntfs_attr_extend_allocation()
2175 while (rl->length) ntfs_attr_extend_allocation()
2176 rl++; ntfs_attr_extend_allocation()
2179 if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED || ntfs_attr_extend_allocation()
2180 (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl && ntfs_attr_extend_allocation()
2181 (rl-1)->lcn == LCN_RL_NOT_MAPPED))) { ntfs_attr_extend_allocation()
2182 if (!rl && !allocated_size) ntfs_attr_extend_allocation()
2184 rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl); ntfs_attr_extend_allocation()
2185 if (IS_ERR(rl)) { ntfs_attr_extend_allocation()
2186 err = PTR_ERR(rl); ntfs_attr_extend_allocation()
2200 ni->runlist.rl = rl; ntfs_attr_extend_allocation()
2202 while (rl->length) ntfs_attr_extend_allocation()
2203 rl++; ntfs_attr_extend_allocation()
2206 * We now know the runlist of the last extent is mapped and @rl is at ntfs_attr_extend_allocation()
2213 while (rl->lcn < 0 && rl > ni->runlist.rl) ntfs_attr_extend_allocation()
2214 rl--; ntfs_attr_extend_allocation()
2221 vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ? ntfs_attr_extend_allocation()
2222 rl->lcn + rl->length : -1, DATA_ZONE, true); ntfs_attr_extend_allocation()
2235 rl = ntfs_runlists_merge(ni->runlist.rl, rl2); ntfs_attr_extend_allocation()
2236 if (IS_ERR(rl)) { ntfs_attr_extend_allocation()
2237 err = PTR_ERR(rl); ntfs_attr_extend_allocation()
2256 ni->runlist.rl = rl; ntfs_attr_extend_allocation()
2261 rl2 = ntfs_rl_find_vcn_nolock(rl, ll); ntfs_attr_extend_allocation()
H A Dlogfile.c732 runlist_element *rl; ntfs_empty_logfile() local
759 rl = log_ni->runlist.rl; ntfs_empty_logfile()
760 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { ntfs_empty_logfile()
768 rl = log_ni->runlist.rl; ntfs_empty_logfile()
769 BUG_ON(!rl || vcn < rl->vcn || !rl->length); ntfs_empty_logfile()
772 while (rl->length && vcn >= rl[1].vcn) ntfs_empty_logfile()
773 rl++; ntfs_empty_logfile()
783 lcn = rl->lcn; ntfs_empty_logfile()
785 vcn = rl->vcn; ntfs_empty_logfile()
789 if (unlikely(!rl->length || lcn < LCN_HOLE)) ntfs_empty_logfile()
795 len = rl->length; ntfs_empty_logfile()
796 if (rl[1].vcn > end_vcn) ntfs_empty_logfile()
797 len = end_vcn - rl->vcn; ntfs_empty_logfile()
833 } while ((++rl)->vcn < end_vcn); ntfs_empty_logfile()
H A Dmft.c474 runlist_element *rl; ntfs_sync_mft_mirror() local
520 rl = NULL; ntfs_sync_mft_mirror()
544 if (!rl) { ntfs_sync_mft_mirror()
547 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; ntfs_sync_mft_mirror()
552 BUG_ON(!rl); ntfs_sync_mft_mirror()
555 while (rl->length && rl[1].vcn <= vcn) ntfs_sync_mft_mirror()
556 rl++; ntfs_sync_mft_mirror()
557 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); ntfs_sync_mft_mirror()
582 if (unlikely(rl)) ntfs_sync_mft_mirror()
678 runlist_element *rl; write_mft_record_nolock() local
696 rl = NULL; write_mft_record_nolock()
733 if (!rl) { write_mft_record_nolock()
735 rl = NTFS_I(vol->mft_ino)->runlist.rl; write_mft_record_nolock()
736 BUG_ON(!rl); write_mft_record_nolock()
739 while (rl->length && rl[1].vcn <= vcn) write_mft_record_nolock()
740 rl++; write_mft_record_nolock()
741 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); write_mft_record_nolock()
765 if (unlikely(rl)) write_mft_record_nolock()
1285 runlist_element *rl, *rl2 = NULL; ntfs_mft_bitmap_extend_allocation_nolock() local
1309 rl = ntfs_attr_find_vcn_nolock(mftbmp_ni, ntfs_mft_bitmap_extend_allocation_nolock()
1311 if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { ntfs_mft_bitmap_extend_allocation_nolock()
1315 if (!IS_ERR(rl)) ntfs_mft_bitmap_extend_allocation_nolock()
1318 ret = PTR_ERR(rl); ntfs_mft_bitmap_extend_allocation_nolock()
1321 lcn = rl->lcn + rl->length; ntfs_mft_bitmap_extend_allocation_nolock()
1348 rl->length++; ntfs_mft_bitmap_extend_allocation_nolock()
1349 rl[1].vcn++; ntfs_mft_bitmap_extend_allocation_nolock()
1356 rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE, ntfs_mft_bitmap_extend_allocation_nolock()
1364 rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2); ntfs_mft_bitmap_extend_allocation_nolock()
1365 if (IS_ERR(rl)) { ntfs_mft_bitmap_extend_allocation_nolock()
1375 return PTR_ERR(rl); ntfs_mft_bitmap_extend_allocation_nolock()
1377 mftbmp_ni->runlist.rl = rl; ntfs_mft_bitmap_extend_allocation_nolock()
1381 for (; rl[1].length; rl++) ntfs_mft_bitmap_extend_allocation_nolock()
1385 * Update the attribute record as well. Note: @rl is the last ntfs_mft_bitmap_extend_allocation_nolock()
1401 mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL, ntfs_mft_bitmap_extend_allocation_nolock()
1413 for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) { ntfs_mft_bitmap_extend_allocation_nolock()
1461 a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1); ntfs_mft_bitmap_extend_allocation_nolock()
1500 mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL, ntfs_mft_bitmap_extend_allocation_nolock()
1518 a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2); ntfs_mft_bitmap_extend_allocation_nolock()
1522 rl->length--; ntfs_mft_bitmap_extend_allocation_nolock()
1523 rl[1].vcn--; ntfs_mft_bitmap_extend_allocation_nolock()
1525 lcn = rl->lcn; ntfs_mft_bitmap_extend_allocation_nolock()
1527 rl->lcn = rl[1].lcn; ntfs_mft_bitmap_extend_allocation_nolock()
1528 rl->length = 0; ntfs_mft_bitmap_extend_allocation_nolock()
1721 runlist_element *rl, *rl2; ntfs_mft_data_extend_allocation_nolock() local
1740 rl = ntfs_attr_find_vcn_nolock(mft_ni, ntfs_mft_data_extend_allocation_nolock()
1742 if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { ntfs_mft_data_extend_allocation_nolock()
1746 if (!IS_ERR(rl)) ntfs_mft_data_extend_allocation_nolock()
1749 ret = PTR_ERR(rl); ntfs_mft_data_extend_allocation_nolock()
1752 lcn = rl->lcn + rl->length; ntfs_mft_data_extend_allocation_nolock()
1780 old_last_vcn = rl[1].vcn; ntfs_mft_data_extend_allocation_nolock()
1802 rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2); ntfs_mft_data_extend_allocation_nolock()
1803 if (IS_ERR(rl)) { ntfs_mft_data_extend_allocation_nolock()
1813 return PTR_ERR(rl); ntfs_mft_data_extend_allocation_nolock()
1815 mft_ni->runlist.rl = rl; ntfs_mft_data_extend_allocation_nolock()
1818 for (; rl[1].length; rl++) ntfs_mft_data_extend_allocation_nolock()
1834 CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx); ntfs_mft_data_extend_allocation_nolock()
1845 for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) { ntfs_mft_data_extend_allocation_nolock()
1898 a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1); ntfs_mft_data_extend_allocation_nolock()
1902 * @rl is the last (non-terminator) runlist element of mft data ntfs_mft_data_extend_allocation_nolock()
1939 CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) { ntfs_mft_data_extend_allocation_nolock()
H A Dcompress.c490 runlist_element *rl; ntfs_read_compressed_block() local
611 rl = NULL; ntfs_read_compressed_block()
616 if (!rl) { ntfs_read_compressed_block()
619 rl = ni->runlist.rl; ntfs_read_compressed_block()
621 if (likely(rl != NULL)) { ntfs_read_compressed_block()
623 while (rl->length && rl[1].vcn <= vcn) ntfs_read_compressed_block()
624 rl++; ntfs_read_compressed_block()
625 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); ntfs_read_compressed_block()
662 if (rl) ntfs_read_compressed_block()
H A Dfile.c594 runlist_element *rl, *rl2; ntfs_prepare_pages_for_non_resident_write() local
639 rl = NULL; ntfs_prepare_pages_for_non_resident_write()
843 if (!rl) { ntfs_prepare_pages_for_non_resident_write()
846 rl = ni->runlist.rl; ntfs_prepare_pages_for_non_resident_write()
848 if (likely(rl != NULL)) { ntfs_prepare_pages_for_non_resident_write()
850 while (rl->length && rl[1].vcn <= bh_cpos) ntfs_prepare_pages_for_non_resident_write()
851 rl++; ntfs_prepare_pages_for_non_resident_write()
852 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); ntfs_prepare_pages_for_non_resident_write()
860 vcn_len = rl[1].vcn - vcn; ntfs_prepare_pages_for_non_resident_write()
877 rl = NULL; ntfs_prepare_pages_for_non_resident_write()
978 BUG_ON(!rl); ntfs_prepare_pages_for_non_resident_write()
986 BUG_ON(rl->lcn != LCN_HOLE); ntfs_prepare_pages_for_non_resident_write()
988 rl2 = rl; ntfs_prepare_pages_for_non_resident_write()
989 while (--rl2 >= ni->runlist.rl) { ntfs_prepare_pages_for_non_resident_write()
1004 rl = ntfs_runlists_merge(ni->runlist.rl, rl2); ntfs_prepare_pages_for_non_resident_write()
1005 if (IS_ERR(rl)) { ntfs_prepare_pages_for_non_resident_write()
1006 err = PTR_ERR(rl); ntfs_prepare_pages_for_non_resident_write()
1019 ni->runlist.rl = rl; ntfs_prepare_pages_for_non_resident_write()
1057 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); ntfs_prepare_pages_for_non_resident_write()
1185 rl = NULL; ntfs_prepare_pages_for_non_resident_write()
1197 } else if (unlikely(rl)) ntfs_prepare_pages_for_non_resident_write()
1199 rl = NULL; ntfs_prepare_pages_for_non_resident_write()
1317 mapping_pairs_offset), ni->runlist.rl, ntfs_prepare_pages_for_non_resident_write()
1337 else if (rl) ntfs_prepare_pages_for_non_resident_write()
H A Dattrib.h78 extern int load_attribute_list(ntfs_volume *vol, runlist *rl, u8 *al_start,
H A Dinode.h74 If runlist.rl is NULL, the runlist has not
80 runlist.rl is always NULL.*/
H A Dinode.c747 ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol, ntfs_read_locked_inode()
749 if (IS_ERR(ni->attr_list_rl.rl)) { ntfs_read_locked_inode()
750 err = PTR_ERR(ni->attr_list_rl.rl); ntfs_read_locked_inode()
751 ni->attr_list_rl.rl = NULL; ntfs_read_locked_inode()
1921 ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol, ntfs_read_inode_mount()
1923 if (IS_ERR(ni->attr_list_rl.rl)) { ntfs_read_inode_mount()
1924 err = PTR_ERR(ni->attr_list_rl.rl); ntfs_read_inode_mount()
1925 ni->attr_list_rl.rl = NULL; ntfs_read_inode_mount()
2052 nrl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl); ntfs_read_inode_mount()
2059 ni->runlist.rl = nrl; ntfs_read_inode_mount()
2201 if (ni->runlist.rl) { __ntfs_clear_inode()
2202 ntfs_free(ni->runlist.rl); __ntfs_clear_inode()
2203 ni->runlist.rl = NULL; __ntfs_clear_inode()
2213 if (ni->attr_list_rl.rl) { __ntfs_clear_inode()
2214 ntfs_free(ni->attr_list_rl.rl); __ntfs_clear_inode()
2215 ni->attr_list_rl.rl = NULL; __ntfs_clear_inode()
2734 mp_size = ntfs_get_size_for_mapping_pairs(vol, ni->runlist.rl, 0, -1); ntfs_truncate()
2758 mp_size, ni->runlist.rl, 0, -1, NULL); ntfs_truncate()
H A Dsuper.c1093 runlist_element *rl, rl2[2]; check_mft_mirror() local
1193 rl = mirr_ni->runlist.rl; check_mft_mirror()
1197 if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn || check_mft_mirror()
1198 rl2[i].length != rl[i].length) { check_mft_mirror()
/linux-4.1.27/include/linux/
H A Djump_label_ratelimit.h18 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
31 unsigned long rl) jump_label_rate_limit()
30 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) jump_label_rate_limit() argument
H A Dblkdev.h57 struct request_queue *q; /* the queue this rl belongs to */
168 struct request_list *rl; /* rl this rq is alloced from */ member in struct:request
325 * blkgs from their own blkg->rl. Which one to use should be
653 static inline bool blk_rl_full(struct request_list *rl, bool sync) blk_rl_full() argument
657 return rl->flags & flag; blk_rl_full()
660 static inline void blk_set_rl_full(struct request_list *rl, bool sync) blk_set_rl_full() argument
664 rl->flags |= flag; blk_set_rl_full()
667 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) blk_clear_rl_full() argument
671 rl->flags &= ~flag; blk_clear_rl_full()
/linux-4.1.27/crypto/
H A Dvmac.c66 #define ADD128(rh, rl, ih, il) \
69 (rl) += (_il); \
70 if ((rl) < (_il)) \
77 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
82 rl = MUL32(_i1, _i2); \
83 ADD128(rh, rl, (m >> 32), (m << 32)); \
86 #define MUL64(rh, rl, i1, i2) \
92 rl = MUL32(_i1, _i2); \
93 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
94 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
111 #define nh_16(mp, kp, nw, rh, rl) \
114 rh = rl = 0; \
118 ADD128(rh, rl, th, tl); \
122 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
125 rh1 = rl1 = rh = rl = 0; \
129 ADD128(rh, rl, th, tl); \
137 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
140 rh = rl = 0; \
144 ADD128(rh, rl, th, tl); \
147 ADD128(rh, rl, th, tl); \
150 ADD128(rh, rl, th, tl); \
153 ADD128(rh, rl, th, tl); \
157 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
160 rh1 = rl1 = rh = rl = 0; \
164 ADD128(rh, rl, th, tl); \
170 ADD128(rh, rl, th, tl); \
176 ADD128(rh, rl, th, tl); \
182 ADD128(rh, rl, th, tl); \
216 #define nh_16(mp, kp, nw, rh, rl) \
220 rh = rl = t = 0; \
226 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
232 ADD128(rh, rl, (t >> 32), (t << 32)); \
303 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
305 nh_16(mp, kp, nw, rh, rl); \
310 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
311 nh_16(mp, kp, nw, rh, rl)
314 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
316 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
330 u64 rh, rl, t, z = 0; l3hash() local
355 MUL64(rh, rl, p1, p2); l3hash()
357 ADD128(t, rl, z, rh); l3hash()
359 ADD128(t, rl, z, rh); l3hash()
361 rl += t; l3hash()
362 rl += (0 - (rl < t)) & 257; l3hash()
363 rl += (0 - (rl > p64-1)) & 257; l3hash()
364 return rl; l3hash()
371 u64 rh, rl, *mptr; vhash_update() local
391 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); vhash_update()
393 ADD128(ch, cl, rh, rl); vhash_update()
399 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); vhash_update()
401 poly_step(ch, cl, pkh, pkl, rh, rl); vhash_update()
412 u64 rh, rl, *mptr; vhash() local
444 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); vhash()
446 poly_step(ch, cl, pkh, pkl, rh, rl); vhash()
450 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); vhash()
452 poly_step(ch, cl, pkh, pkl, rh, rl); vhash()
H A Dcrypto_user.c130 struct crypto_report_larval rl; crypto_report_one() local
132 strncpy(rl.type, "larval", sizeof(rl.type)); crypto_report_one()
134 sizeof(struct crypto_report_larval), &rl)) crypto_report_one()
H A Dcamellia_generic.c340 #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) ({ \
343 lr = (lr << bits) + (rl >> (32 - bits)); \
344 rl = (rl << bits) + (rr >> (32 - bits)); \
348 #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) ({ \
351 ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
352 lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
353 rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
832 #define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) ({ \
837 rl ^= t2; \
841 t3 &= rl; \
/linux-4.1.27/fs/dlm/
H A Drcom.c388 struct rcom_lock *rl) pack_rcom_lock()
390 memset(rl, 0, sizeof(*rl)); pack_rcom_lock()
392 rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); pack_rcom_lock()
393 rl->rl_lkid = cpu_to_le32(lkb->lkb_id); pack_rcom_lock()
394 rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); pack_rcom_lock()
395 rl->rl_flags = cpu_to_le32(lkb->lkb_flags); pack_rcom_lock()
396 rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); pack_rcom_lock()
397 rl->rl_rqmode = lkb->lkb_rqmode; pack_rcom_lock()
398 rl->rl_grmode = lkb->lkb_grmode; pack_rcom_lock()
399 rl->rl_status = lkb->lkb_status; pack_rcom_lock()
400 rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type); pack_rcom_lock()
403 rl->rl_asts |= DLM_CB_BAST; pack_rcom_lock()
405 rl->rl_asts |= DLM_CB_CAST; pack_rcom_lock()
407 rl->rl_namelen = cpu_to_le16(r->res_length); pack_rcom_lock()
408 memcpy(rl->rl_name, r->res_name, r->res_length); pack_rcom_lock()
414 memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); pack_rcom_lock()
422 struct rcom_lock *rl; dlm_send_rcom_lock() local
432 rl = (struct rcom_lock *) rc->rc_buf; dlm_send_rcom_lock()
433 pack_rcom_lock(r, lkb, rl); dlm_send_rcom_lock()
387 pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, struct rcom_lock *rl) pack_rcom_lock() argument
H A Dlock.c5577 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; receive_rcom_lock_args() local
5580 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); receive_rcom_lock_args()
5581 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); receive_rcom_lock_args()
5582 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); receive_rcom_lock_args()
5583 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; receive_rcom_lock_args()
5585 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); receive_rcom_lock_args()
5586 lkb->lkb_rqmode = rl->rl_rqmode; receive_rcom_lock_args()
5587 lkb->lkb_grmode = rl->rl_grmode; receive_rcom_lock_args()
5590 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; receive_rcom_lock_args()
5591 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; receive_rcom_lock_args()
5601 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); receive_rcom_lock_args()
5608 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && receive_rcom_lock_args()
5610 rl->rl_status = DLM_LKSTS_CONVERT; receive_rcom_lock_args()
5627 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; dlm_recover_master_copy() local
5634 if (rl->rl_parent_lkid) { dlm_recover_master_copy()
5639 remid = le32_to_cpu(rl->rl_lkid); dlm_recover_master_copy()
5649 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), dlm_recover_master_copy()
5680 add_lkb(r, lkb, rl->rl_status); dlm_recover_master_copy()
5690 rl->rl_remid = cpu_to_le32(lkb->lkb_id); dlm_recover_master_copy()
5701 rl->rl_result = cpu_to_le32(error); dlm_recover_master_copy()
5708 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; dlm_recover_process_copy() local
5714 lkid = le32_to_cpu(rl->rl_lkid); dlm_recover_process_copy()
5715 remid = le32_to_cpu(rl->rl_remid); dlm_recover_process_copy()
5716 result = le32_to_cpu(rl->rl_result); dlm_recover_process_copy()
/linux-4.1.27/block/
H A Dblk-cgroup.h102 struct request_list rl; member in struct:blkcg_gq
339 * Try to use blkg->rl. blkg lookup may fail under memory pressure blk_get_rl()
349 return &blkg->rl; blk_get_rl()
357 * @rl: request_list to put
362 static inline void blk_put_rl(struct request_list *rl) blk_put_rl() argument
365 if (rl->blkg && rl->blkg->blkcg != &blkcg_root) blk_put_rl()
366 blkg_put(rl->blkg); blk_put_rl()
372 * @rl: target request_list
374 * Associate @rq with @rl so that accounting and freeing can know the
377 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) blk_rq_set_rl() argument
379 rq->rl = rl; blk_rq_set_rl()
390 return rq->rl; blk_rq_rl()
393 struct request_list *__blk_queue_next_rl(struct request_list *rl,
400 #define blk_queue_for_each_rl(rl, q) \
401 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
595 static inline void blk_put_rl(struct request_list *rl) { } blk_rq_set_rl() argument
596 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } blk_rq_rl() argument
599 #define blk_queue_for_each_rl(rl, q) \
600 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
H A Dblk-core.c419 struct request_list *rl; variable in typeref:struct:request_list
421 blk_queue_for_each_rl(rl, q)
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
423 wake_up_all(&rl->wait[i]);
483 struct request_list *rl; blk_set_queue_dying() local
485 blk_queue_for_each_rl(rl, q) { blk_queue_for_each_rl()
486 if (rl->rq_pool) { blk_queue_for_each_rl()
487 wake_up(&rl->wait[BLK_RW_SYNC]); blk_queue_for_each_rl()
488 wake_up(&rl->wait[BLK_RW_ASYNC]); blk_queue_for_each_rl()
574 int blk_init_rl(struct request_list *rl, struct request_queue *q, blk_init_rl() argument
577 if (unlikely(rl->rq_pool)) blk_init_rl()
580 rl->q = q; blk_init_rl()
581 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; blk_init_rl()
582 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; blk_init_rl()
583 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); blk_init_rl()
584 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); blk_init_rl()
586 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, blk_init_rl()
590 if (!rl->rq_pool) blk_init_rl()
596 void blk_exit_rl(struct request_list *rl) blk_exit_rl() argument
598 if (rl->rq_pool) blk_exit_rl()
599 mempool_destroy(rl->rq_pool); blk_exit_rl()
799 static inline void blk_free_request(struct request_list *rl, struct request *rq) blk_free_request() argument
802 elv_put_request(rl->q, rq); blk_free_request()
807 mempool_free(rq, rl->rq_pool); blk_free_request()
844 static void __freed_request(struct request_list *rl, int sync) __freed_request() argument
846 struct request_queue *q = rl->q; __freed_request()
852 if (rl == &q->root_rl && __freed_request()
853 rl->count[sync] < queue_congestion_off_threshold(q)) __freed_request()
856 if (rl->count[sync] + 1 <= q->nr_requests) { __freed_request()
857 if (waitqueue_active(&rl->wait[sync])) __freed_request()
858 wake_up(&rl->wait[sync]); __freed_request()
860 blk_clear_rl_full(rl, sync); __freed_request()
868 static void freed_request(struct request_list *rl, unsigned int flags) freed_request() argument
870 struct request_queue *q = rl->q; freed_request()
874 rl->count[sync]--; freed_request()
878 __freed_request(rl, sync); freed_request()
880 if (unlikely(rl->starved[sync ^ 1])) freed_request()
881 __freed_request(rl, sync ^ 1); freed_request()
886 struct request_list *rl; blk_update_nr_requests() local
893 rl = &q->root_rl; blk_update_nr_requests()
895 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_update_nr_requests()
897 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) blk_update_nr_requests()
900 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) blk_update_nr_requests()
902 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_update_nr_requests()
905 blk_queue_for_each_rl(rl, q) { blk_queue_for_each_rl()
906 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_queue_for_each_rl()
907 blk_set_rl_full(rl, BLK_RW_SYNC); blk_queue_for_each_rl()
909 blk_clear_rl_full(rl, BLK_RW_SYNC); blk_queue_for_each_rl()
910 wake_up(&rl->wait[BLK_RW_SYNC]); blk_queue_for_each_rl()
913 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_queue_for_each_rl()
914 blk_set_rl_full(rl, BLK_RW_ASYNC); blk_queue_for_each_rl()
916 blk_clear_rl_full(rl, BLK_RW_ASYNC); blk_queue_for_each_rl()
917 wake_up(&rl->wait[BLK_RW_ASYNC]); blk_queue_for_each_rl()
962 * @rl: request list to allocate from
974 static struct request *__get_request(struct request_list *rl, int rw_flags, __get_request() argument
977 struct request_queue *q = rl->q; __get_request()
992 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { __get_request()
993 if (rl->count[is_sync]+1 >= q->nr_requests) { __get_request()
1000 if (!blk_rl_full(rl, is_sync)) { __get_request()
1002 blk_set_rl_full(rl, is_sync); __get_request()
1019 if (rl == &q->root_rl) __get_request()
1028 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) __get_request()
1032 rl->count[is_sync]++; __get_request()
1033 rl->starved[is_sync] = 0; __get_request()
1057 rq = mempool_alloc(rl->rq_pool, gfp_mask); __get_request()
1062 blk_rq_set_rl(rq, rl); __get_request()
1122 freed_request(rl, rw_flags); __get_request()
1132 if (unlikely(rl->count[is_sync] == 0)) __get_request()
1133 rl->starved[is_sync] = 1; __get_request()
1156 struct request_list *rl; get_request() local
1159 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ get_request()
1161 rq = __get_request(rl, rw_flags, bio, gfp_mask); get_request()
1166 blk_put_rl(rl); get_request()
1170 /* wait on @rl and retry */ get_request()
1171 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, get_request()
1187 finish_wait(&rl->wait[is_sync], &wait); get_request()
1403 struct request_list *rl = blk_rq_rl(req); __blk_put_request() local
1408 blk_free_request(rl, req); __blk_put_request()
1409 freed_request(rl, flags); __blk_put_request()
1410 blk_put_rl(rl); __blk_put_request()
H A Dblk-cgroup.c57 blk_exit_rl(&blkg->rl); blkg_free()
85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ blkg_alloc()
87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) blkg_alloc()
89 blkg->rl.blkg = blkg; blkg_alloc()
411 * because the root blkg uses @q->root_rl instead of its own rl.
413 struct request_list *__blk_queue_next_rl(struct request_list *rl, __blk_queue_next_rl() argument
423 if (rl == &q->root_rl) { __blk_queue_next_rl()
429 blkg = container_of(rl, struct blkcg_gq, rl); __blk_queue_next_rl()
441 return &blkg->rl; __blk_queue_next_rl()
H A Dblk.h55 int blk_init_rl(struct request_list *rl, struct request_queue *q,
57 void blk_exit_rl(struct request_list *rl);
H A Dblk-mq.c201 rq->rl = NULL; blk_mq_rq_ctx_init()
/linux-4.1.27/arch/arm/mm/
H A Dproc-v7-3level.S69 #define rl r3 define
72 #define rl r2 define
85 tst rl, #L_PTE_VALID
88 bicne rl, #L_PTE_VALID
94 orrne rl, #PTE_AP2
95 biceq rl, #PTE_AP2
/linux-4.1.27/fs/f2fs/
H A Dgc.h108 struct request_list *rl = &q->root_rl; is_idle() local
109 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); is_idle()
/linux-4.1.27/arch/arm/vfp/
H A Dvfp.h76 u64 rh, rma, rmb, rl; mul64to128() local
80 rl = (u64)nl * ml; mul64to128()
93 rl += rma; mul64to128()
94 rh += (rl < rma); mul64to128()
96 *resl = rl; mul64to128()
108 u64 rh, rl; vfp_hi64multiply64() local
109 mul64to128(&rh, &rl, n, m); vfp_hi64multiply64()
110 return rh | (rl != 0); vfp_hi64multiply64()
/linux-4.1.27/net/ipv4/
H A Dinetpeer.c301 struct inet_peer *rr, *rl, *rlr, *rll; peer_avl_rebalance() local
304 rl = rcu_deref_locked(r->avl_left, base); peer_avl_rebalance()
305 rlh = node_height(rl); peer_avl_rebalance()
307 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ peer_avl_rebalance()
314 } else { /* rr: RH, rl: RH+1 */ peer_avl_rebalance()
315 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ peer_avl_rebalance()
316 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ peer_avl_rebalance()
323 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ peer_avl_rebalance()
324 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ peer_avl_rebalance()
325 rl->avl_height = lh + 2; peer_avl_rebalance()
326 RCU_INIT_POINTER(*nodep, rl); peer_avl_rebalance()
/linux-4.1.27/sound/oss/
H A Dswarm_cs4297a.c1187 unsigned char l, r, rl, rr, vidx; mixer_ioctl() local
1348 rl = 63; mixer_ioctl()
1351 rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten. mixer_ioctl()
1362 if ((rl > 60) && (rr > 60)) // If both l & r are 'low', mixer_ioctl()
1367 temp1 |= (rl << 8) | rr; mixer_ioctl()
1386 rl = 0; mixer_ioctl()
1389 rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15. mixer_ioctl()
1390 l = (rl * 13 + 5) / 2; mixer_ioctl()
1393 if (rl < 3) { mixer_ioctl()
1395 rl = 0; mixer_ioctl()
1398 rl = 15 - rl; // Convert volume to attenuation. mixer_ioctl()
1399 temp1 |= rl << 1; mixer_ioctl()
1418 rl = (l * 2 - 5) / 13; // Convert 0-100 scale to 0-15. mixer_ioctl()
1420 if (rl < 3 && rr < 3) mixer_ioctl()
1425 temp1 = temp1 | (rl << 8) | rr; mixer_ioctl()
1443 rl = 0; mixer_ioctl()
1445 rl = ((unsigned) l * 5 - 4) / 16; // Convert 0-100 range to 0-31. mixer_ioctl()
1446 l = (rl * 16 + 4) / 5; mixer_ioctl()
1450 if (rl < 3) { mixer_ioctl()
1452 rl = 0; mixer_ioctl()
1454 rl = 31 - rl; // Convert volume to attenuation. mixer_ioctl()
1455 temp1 |= rl; mixer_ioctl()
1477 rl = (l * 2 - 11) / 3; // Convert 0-100 range to 0-63. mixer_ioctl()
1479 if (rl < 3) // If l is low, turn on mixer_ioctl()
1484 rl = 63 - rl; // Convert vol to attenuation. mixer_ioctl()
1485 // writel(temp1 | rl, s->pBA0 + FMLVC); mixer_ioctl()
1515 rl = 31; mixer_ioctl()
1517 rl = (attentbl[(l * 10) / 100]) >> 1; mixer_ioctl()
1527 if ((rl > 30) && (rr > 30)) mixer_ioctl()
1531 temp1 = temp1 | (rl << 8) | rr; mixer_ioctl()
H A Daedsp16.c134 From: Mr S J Greenaway <sjg95@unixfe.rl.ac.uk>
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_transfer.c114 * Rather than 256 pixel commands which are either rl or raw encoded,
115 * the rlx command simply assumes alternating raw and rl spans within one cmd.
120 * But for very rl friendly data, will compress not quite as well.
/linux-4.1.27/fs/qnx4/
H A Dinode.c155 int rd, rl; qnx4_checkroot() local
162 rl = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_size); qnx4_checkroot()
163 for (j = 0; j < rl; j++) { qnx4_checkroot()
/linux-4.1.27/include/math-emu/
H A Dop-2.h152 #define __FP_FRAC_ADD_2(rh, rl, xh, xl, yh, yl) \
153 (rh = xh + yh + ((rl = xl + yl) < xl))
156 #define __FP_FRAC_SUB_2(rh, rl, xh, xl, yh, yl) \
157 (rh = xh - yh - ((rl = xl - yl) > xl))
/linux-4.1.27/fs/ocfs2/
H A Drefcounttree.c1253 struct ocfs2_refcount_list *rl = &rb->rf_records; ocfs2_change_refcount_rec() local
1254 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; ocfs2_change_refcount_rec()
1269 if (index != le16_to_cpu(rl->rl_used) - 1) { ocfs2_change_refcount_rec()
1271 (le16_to_cpu(rl->rl_used) - index - 1) * ocfs2_change_refcount_rec()
1273 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], ocfs2_change_refcount_rec()
1277 le16_add_cpu(&rl->rl_used, -1); ocfs2_change_refcount_rec()
1425 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, ocfs2_find_refcount_split_pos() argument
1428 int num_used = le16_to_cpu(rl->rl_used); ocfs2_find_refcount_split_pos()
1434 &rl->rl_recs[middle - delta - 1], ocfs2_find_refcount_split_pos()
1435 &rl->rl_recs[middle - delta])) { ocfs2_find_refcount_split_pos()
1446 &rl->rl_recs[middle + delta], ocfs2_find_refcount_split_pos()
1447 &rl->rl_recs[middle + delta + 1])) { ocfs2_find_refcount_split_pos()
1456 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); ocfs2_find_refcount_split_pos()
1468 struct ocfs2_refcount_list *rl = &rb->rf_records; ocfs2_divide_leaf_refcount_block() local
1475 le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); ocfs2_divide_leaf_refcount_block()
1489 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), ocfs2_divide_leaf_refcount_block()
1493 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); ocfs2_divide_leaf_refcount_block()
1502 num_moved = le16_to_cpu(rl->rl_used) - split_index; ocfs2_divide_leaf_refcount_block()
1503 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], ocfs2_divide_leaf_refcount_block()
1507 memset(&rl->rl_recs[split_index], 0, ocfs2_divide_leaf_refcount_block()
1511 le16_add_cpu(&rl->rl_used, -num_moved); ocfs2_divide_leaf_refcount_block()
1514 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), ocfs2_divide_leaf_refcount_block()
/linux-4.1.27/arch/x86/kvm/
H A Di8254.c62 u64 rl, rh; muldiv64() local
65 rl = (u64)u.l.low * (u64)b; muldiv64()
67 rh += (rl >> 32); muldiv64()
69 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); muldiv64()
/linux-4.1.27/kernel/
H A Djump_label.c120 unsigned long rl) jump_label_rate_limit()
123 key->timeout = rl; jump_label_rate_limit()
119 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) jump_label_rate_limit() argument
/linux-4.1.27/drivers/hwmon/
H A Dasc7621.c813 #define PREAD(name, n, pri, rm, rl, m, s, r) \
815 .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
818 #define PWRITE(name, n, pri, rm, rl, m, s, r) \
820 .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
827 #define PWRITEM(name, n, pri, rm, rl, m, s, r) \
829 .priority = pri, .msb = rm, .lsb = rl, .mask = m, .shift = s,}
/linux-4.1.27/net/can/
H A Daf_can.c474 struct hlist_head *rl; can_rx_register() local
491 rl = find_rcv_list(&can_id, &mask, d); can_rx_register()
500 hlist_add_head_rcu(&r->list, rl); can_rx_register()
542 struct hlist_head *rl; can_rx_unregister() local
558 rl = find_rcv_list(&can_id, &mask, d); can_rx_unregister()
566 hlist_for_each_entry_rcu(r, rl, list) { hlist_for_each_entry_rcu()
/linux-4.1.27/drivers/staging/olpc_dcon/
H A Dolpc_dcon.c484 unsigned short rl; dcon_resumeline_store() local
487 rc = kstrtou16(buf, 10, &rl); dcon_resumeline_store()
491 resumeline = rl; dcon_resumeline_store()
/linux-4.1.27/drivers/video/fbdev/
H A Dmetronomefb.c276 unsigned char rl; load_waveform() local
289 rl = mem[wfm_idx++]; load_waveform()
290 for (i = 0; i <= rl; i++) load_waveform()
H A Dcyber2000fb.c317 * n rl cyber2000fb_setcolreg()
323 * n = bpp, rl = red length, gl = green length, bl = blue length cyber2000fb_setcolreg()
392 * n rl cyber2000fb_setcolreg()
398 * n = bpp, rl = red length, gl = green length, bl = blue length cyber2000fb_setcolreg()
H A Dudlfb.c415 * Rather than 256 pixel commands which are either rl or raw encoded,
416 * the rlx command simply assumes alternating raw and rl spans within one cmd.
421 * But for very rl friendly data, will compress not quite as well.
/linux-4.1.27/drivers/net/ethernet/microchip/
H A Denc28j60.c301 int rl, rh; nolock_regw_read() local
304 rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); nolock_regw_read()
307 return (rh << 8) | rl; nolock_regw_read()
/linux-4.1.27/drivers/mmc/host/
H A Dsdricoh_cs.c106 dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); sdricoh_readl()
/linux-4.1.27/net/dccp/
H A Dackvec.c81 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n", dccp_ackvec_update_records()
/linux-4.1.27/arch/x86/crypto/
H A Dcamellia-aesni-avx-asm_64.S333 * rl ^= t2; \
361 * t2 &= rl; \
H A Dcamellia-aesni-avx2-asm_64.S373 * rl ^= t2; \
401 * t2 &= rl; \
/linux-4.1.27/fs/
H A Dbinfmt_flat.c383 static void old_reloc(unsigned long rl) old_reloc() argument
391 r.value = rl; old_reloc()
/linux-4.1.27/drivers/usb/isp1760/
H A Disp1760-hcd.c541 u32 rl = RL_COUNTER; create_ptd_atl() local
583 rl = 0; create_ptd_atl()
594 ptd->dw2 |= TO_DW2_RL(rl); create_ptd_atl()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Drw.c540 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
670 LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", ll_read_ahead_pages()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_main.c1089 unsigned long rl; fill_bitmap_rle_bits() local
1122 rl = tmp - c->bit_offset; fill_bitmap_rle_bits()
1125 if (rl == 0) { fill_bitmap_rle_bits()
1138 if (rl == 0) { fill_bitmap_rle_bits()
1144 bits = vli_encode_bits(&bs, rl); fill_bitmap_rle_bits()
1153 plain_bits += rl; fill_bitmap_rle_bits()
H A Ddrbd_receiver.c4309 u64 rl; recv_bm_rle_bits() local
4323 for (have = bits; have > 0; s += rl, toggle = !toggle) { recv_bm_rle_bits()
4324 bits = vli_decode_bits(&rl, look_ahead); recv_bm_rle_bits()
4329 e = s + rl -1; recv_bm_rle_bits()
/linux-4.1.27/arch/tile/kernel/
H A Dtile-desc_32.c769 { "rl", TILEPRO_OPC_RL, 0xf, 3, TREG_ZERO, 1,
772 { "rl.sn", TILEPRO_OPC_RL_SN, 0x3, 3, TREG_SN, 1,
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-pciercx-defs.h1040 uint32_t rl:1; member in struct:cvmx_pciercx_cfg032::cvmx_pciercx_cfg032_s
1050 uint32_t rl:1;
/linux-4.1.27/drivers/s390/char/
H A Dvmur.c399 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, urfile_alloc()
/linux-4.1.27/include/linux/mlx4/
H A Ddevice.h458 enum { /* rl */
/linux-4.1.27/firmware/keyspan_pda/
H A Dkeyspan_pda.S658 rl a ; a = index*2
H A Dxircom_pgs.S696 rl a ; a = index*2
/linux-4.1.27/fs/btrfs/
H A Dcheck-integrity.c2474 "rl=%d, %c @%llu (%s/%llu/%d)" btrfsic_check_all_ref_blocks()
2586 "rl=%d, %c @%llu (%s/%llu/%d)" btrfsic_is_block_ref_by_superblock()
/linux-4.1.27/drivers/net/ethernet/via/
H A Dvia-rhine.c5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
/linux-4.1.27/drivers/net/ethernet/dec/tulip/
H A Dde4x5.c250 Fix recognition bug reported by <bkm@star.rl.ac.uk>.

Completed in 2403 milliseconds