Searched refs:cur (Results 1 - 200 of 480) sorted by relevance

123

/linux-4.1.27/security/selinux/ss/
H A Dhashtab.c42 struct hashtab_node *prev, *cur, *newnode; hashtab_insert() local
51 cur = h->htable[hvalue]; hashtab_insert()
52 while (cur && h->keycmp(h, key, cur->key) > 0) { hashtab_insert()
53 prev = cur; hashtab_insert()
54 cur = cur->next; hashtab_insert()
57 if (cur && (h->keycmp(h, key, cur->key) == 0)) hashtab_insert()
80 struct hashtab_node *cur; hashtab_search() local
86 cur = h->htable[hvalue]; hashtab_search()
87 while (cur && h->keycmp(h, key, cur->key) > 0) hashtab_search()
88 cur = cur->next; hashtab_search()
90 if (cur == NULL || (h->keycmp(h, key, cur->key) != 0)) hashtab_search()
93 return cur->datum; hashtab_search()
99 struct hashtab_node *cur, *temp; hashtab_destroy() local
105 cur = h->htable[i]; hashtab_destroy()
106 while (cur) { hashtab_destroy()
107 temp = cur; hashtab_destroy()
108 cur = cur->next; hashtab_destroy()
126 struct hashtab_node *cur; hashtab_map() local
132 cur = h->htable[i]; hashtab_map()
133 while (cur) { hashtab_map()
134 ret = apply(cur->key, cur->datum, args); hashtab_map()
137 cur = cur->next; hashtab_map()
147 struct hashtab_node *cur; hashtab_stat() local
152 cur = h->htable[i]; hashtab_stat()
153 if (cur) { hashtab_stat()
156 while (cur) { hashtab_stat()
158 cur = cur->next; hashtab_stat()
H A Dsidtab.c36 struct sidtab_node *prev, *cur, *newnode; sidtab_insert() local
45 cur = s->htable[hvalue]; sidtab_insert()
46 while (cur && sid > cur->sid) { sidtab_insert()
47 prev = cur; sidtab_insert()
48 cur = cur->next; sidtab_insert()
51 if (cur && sid == cur->sid) { sidtab_insert()
88 struct sidtab_node *cur; sidtab_search_core() local
94 cur = s->htable[hvalue]; sidtab_search_core()
95 while (cur && sid > cur->sid) sidtab_search_core()
96 cur = cur->next; sidtab_search_core()
98 if (force && cur && sid == cur->sid && cur->context.len) sidtab_search_core()
99 return &cur->context; sidtab_search_core()
101 if (cur == NULL || sid != cur->sid || cur->context.len) { sidtab_search_core()
105 cur = s->htable[hvalue]; sidtab_search_core()
106 while (cur && sid > cur->sid) sidtab_search_core()
107 cur = cur->next; sidtab_search_core()
108 if (!cur || sid != cur->sid) sidtab_search_core()
112 return &cur->context; sidtab_search_core()
132 struct sidtab_node *cur; sidtab_map() local
138 cur = s->htable[i]; sidtab_map()
139 while (cur) { sidtab_map()
140 rc = apply(cur->sid, &cur->context, args); sidtab_map()
143 cur = cur->next; sidtab_map()
165 struct sidtab_node *cur; sidtab_search_context() local
168 cur = s->htable[i]; sidtab_search_context()
169 while (cur) { sidtab_search_context()
170 if (context_cmp(&cur->context, context)) { sidtab_search_context()
171 sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1); sidtab_search_context()
172 return cur->sid; sidtab_search_context()
174 cur = cur->next; sidtab_search_context()
243 struct sidtab_node *cur; sidtab_hash_eval() local
248 cur = h->htable[i]; sidtab_hash_eval()
249 if (cur) { sidtab_hash_eval()
252 while (cur) { sidtab_hash_eval()
254 cur = cur->next; sidtab_hash_eval()
270 struct sidtab_node *cur, *temp; sidtab_destroy() local
276 cur = s->htable[i]; sidtab_destroy()
277 while (cur) { sidtab_destroy()
278 temp = cur; sidtab_destroy()
279 cur = cur->next; sidtab_destroy()
H A Davtab.c69 struct avtab_node *prev, struct avtab_node *cur, avtab_insert_node()
97 struct avtab_node *prev, *cur, *newnode; avtab_insert() local
104 for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue); avtab_insert()
105 cur; avtab_insert()
106 prev = cur, cur = cur->next) { avtab_insert()
107 if (key->source_type == cur->key.source_type && avtab_insert()
108 key->target_type == cur->key.target_type && avtab_insert()
109 key->target_class == cur->key.target_class && avtab_insert()
110 (specified & cur->key.specified)) avtab_insert()
112 if (key->source_type < cur->key.source_type) avtab_insert()
114 if (key->source_type == cur->key.source_type && avtab_insert()
115 key->target_type < cur->key.target_type) avtab_insert()
117 if (key->source_type == cur->key.source_type && avtab_insert()
118 key->target_type == cur->key.target_type && avtab_insert()
119 key->target_class < cur->key.target_class) avtab_insert()
123 newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); avtab_insert()
138 struct avtab_node *prev, *cur; avtab_insert_nonunique() local
144 for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue); avtab_insert_nonunique()
145 cur; avtab_insert_nonunique()
146 prev = cur, cur = cur->next) { avtab_insert_nonunique()
147 if (key->source_type == cur->key.source_type && avtab_insert_nonunique()
148 key->target_type == cur->key.target_type && avtab_insert_nonunique()
149 key->target_class == cur->key.target_class && avtab_insert_nonunique()
150 (specified & cur->key.specified)) avtab_insert_nonunique()
152 if (key->source_type < cur->key.source_type) avtab_insert_nonunique()
154 if (key->source_type == cur->key.source_type && avtab_insert_nonunique()
155 key->target_type < cur->key.target_type) avtab_insert_nonunique()
157 if (key->source_type == cur->key.source_type && avtab_insert_nonunique()
158 key->target_type == cur->key.target_type && avtab_insert_nonunique()
159 key->target_class < cur->key.target_class) avtab_insert_nonunique()
162 return avtab_insert_node(h, hvalue, prev, cur, key, datum); avtab_insert_nonunique()
168 struct avtab_node *cur; avtab_search() local
175 for (cur = flex_array_get_ptr(h->htable, hvalue); cur; avtab_search()
176 cur = cur->next) { avtab_search()
177 if (key->source_type == cur->key.source_type && avtab_search()
178 key->target_type == cur->key.target_type && avtab_search()
179 key->target_class == cur->key.target_class && avtab_search()
180 (specified & cur->key.specified)) avtab_search()
181 return &cur->datum; avtab_search()
183 if (key->source_type < cur->key.source_type) avtab_search()
185 if (key->source_type == cur->key.source_type && avtab_search()
186 key->target_type < cur->key.target_type) avtab_search()
188 if (key->source_type == cur->key.source_type && avtab_search()
189 key->target_type == cur->key.target_type && avtab_search()
190 key->target_class < cur->key.target_class) avtab_search()
204 struct avtab_node *cur; avtab_search_node() local
211 for (cur = flex_array_get_ptr(h->htable, hvalue); cur; avtab_search_node()
212 cur = cur->next) { avtab_search_node()
213 if (key->source_type == cur->key.source_type && avtab_search_node()
214 key->target_type == cur->key.target_type && avtab_search_node()
215 key->target_class == cur->key.target_class && avtab_search_node()
216 (specified & cur->key.specified)) avtab_search_node()
217 return cur; avtab_search_node()
219 if (key->source_type < cur->key.source_type) avtab_search_node()
221 if (key->source_type == cur->key.source_type && avtab_search_node()
222 key->target_type < cur->key.target_type) avtab_search_node()
224 if (key->source_type == cur->key.source_type && avtab_search_node()
225 key->target_type == cur->key.target_type && avtab_search_node()
226 key->target_class < cur->key.target_class) avtab_search_node()
235 struct avtab_node *cur; avtab_search_node_next() local
241 for (cur = node->next; cur; cur = cur->next) { avtab_search_node_next()
242 if (node->key.source_type == cur->key.source_type && avtab_search_node_next()
243 node->key.target_type == cur->key.target_type && avtab_search_node_next()
244 node->key.target_class == cur->key.target_class && avtab_search_node_next()
245 (specified & cur->key.specified)) avtab_search_node_next()
246 return cur; avtab_search_node_next()
248 if (node->key.source_type < cur->key.source_type) avtab_search_node_next()
250 if (node->key.source_type == cur->key.source_type && avtab_search_node_next()
251 node->key.target_type < cur->key.target_type) avtab_search_node_next()
253 if (node->key.source_type == cur->key.source_type && avtab_search_node_next()
254 node->key.target_type == cur->key.target_type && avtab_search_node_next()
255 node->key.target_class < cur->key.target_class) avtab_search_node_next()
264 struct avtab_node *cur, *temp; avtab_destroy() local
270 cur = flex_array_get_ptr(h->htable, i); avtab_destroy()
271 while (cur) { avtab_destroy()
272 temp = cur; avtab_destroy()
273 cur = cur->next; avtab_destroy()
329 struct avtab_node *cur; avtab_hash_eval() local
335 cur = flex_array_get_ptr(h->htable, i); avtab_hash_eval()
336 if (cur) { avtab_hash_eval()
339 while (cur) { avtab_hash_eval()
341 cur = cur->next; avtab_hash_eval()
543 int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp) avtab_write_item() argument
549 buf16[0] = cpu_to_le16(cur->key.source_type); avtab_write_item()
550 buf16[1] = cpu_to_le16(cur->key.target_type); avtab_write_item()
551 buf16[2] = cpu_to_le16(cur->key.target_class); avtab_write_item()
552 buf16[3] = cpu_to_le16(cur->key.specified); avtab_write_item()
556 buf32[0] = cpu_to_le32(cur->datum.data); avtab_write_item()
567 struct avtab_node *cur; avtab_write() local
576 for (cur = flex_array_get_ptr(a->htable, i); cur; avtab_write()
577 cur = cur->next) { avtab_write()
578 rc = avtab_write_item(p, cur, fp); avtab_write()
68 avtab_insert_node(struct avtab *h, int hvalue, struct avtab_node *prev, struct avtab_node *cur, struct avtab_key *key, struct avtab_datum *datum) avtab_insert_node() argument
H A Dconditional.c28 struct cond_expr *cur; cond_evaluate_expr() local
32 for (cur = expr; cur; cur = cur->next) { cond_evaluate_expr()
33 switch (cur->expr_type) { cond_evaluate_expr()
38 s[sp] = p->bool_val_to_struct[cur->bool - 1]->state; cond_evaluate_expr()
92 struct cond_av_list *cur; evaluate_cond_node() local
100 for (cur = node->true_list; cur; cur = cur->next) { evaluate_cond_node()
102 cur->node->key.specified &= ~AVTAB_ENABLED; evaluate_cond_node()
104 cur->node->key.specified |= AVTAB_ENABLED; evaluate_cond_node()
107 for (cur = node->false_list; cur; cur = cur->next) { evaluate_cond_node()
110 cur->node->key.specified &= ~AVTAB_ENABLED; evaluate_cond_node()
112 cur->node->key.specified |= AVTAB_ENABLED; evaluate_cond_node()
134 struct cond_av_list *cur, *next; cond_av_list_destroy() local
135 for (cur = list; cur; cur = next) { cond_av_list_destroy()
136 next = cur->next; cond_av_list_destroy()
138 kfree(cur); cond_av_list_destroy()
157 struct cond_node *next, *cur; cond_list_destroy() local
162 for (cur = list; cur; cur = next) { cond_list_destroy()
163 next = cur->next; cond_list_destroy()
164 cond_node_destroy(cur); cond_list_destroy()
274 struct cond_av_list *other = data->other, *list, *cur; cond_insertf() local
305 for (cur = other; cur; cur = cur->next) { cond_insertf()
306 if (cur->node == node_ptr) { cond_insertf()
594 struct cond_node *cur; cond_write_list() local
600 for (cur = list; cur != NULL; cur = cur->next) cond_write_list()
607 for (cur = list; cur != NULL; cur = cur->next) { cond_write_list()
608 rc = cond_write_node(p, cur, fp); cond_write_list()
H A Davtab.h76 int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
/linux-4.1.27/tools/perf/util/
H A Dstrfilter.c79 struct strfilter_node root, *cur, *last_op; strfilter_node__new() local
86 last_op = cur = &root; strfilter_node__new()
92 if (!cur->r || !last_op->r) strfilter_node__new()
94 cur = strfilter_node__alloc(OP_and, last_op->r, NULL); strfilter_node__new()
95 if (!cur) strfilter_node__new()
97 last_op->r = cur; strfilter_node__new()
98 last_op = cur; strfilter_node__new()
101 if (!cur->r || !root.r) strfilter_node__new()
103 cur = strfilter_node__alloc(OP_or, root.r, NULL); strfilter_node__new()
104 if (!cur) strfilter_node__new()
106 root.r = cur; strfilter_node__new()
107 last_op = cur; strfilter_node__new()
110 if (cur->r) strfilter_node__new()
112 cur->r = strfilter_node__alloc(OP_not, NULL, NULL); strfilter_node__new()
113 if (!cur->r) strfilter_node__new()
115 cur = cur->r; strfilter_node__new()
118 if (cur->r) strfilter_node__new()
120 cur->r = strfilter_node__new(s + 1, &s); strfilter_node__new()
123 if (!cur->r || *s != ')') strfilter_node__new()
128 if (cur->r) strfilter_node__new()
130 cur->r = strfilter_node__alloc(NULL, NULL, NULL); strfilter_node__new()
131 if (!cur->r) strfilter_node__new()
133 cur->r->p = strndup(s, e - s); strfilter_node__new()
134 if (!cur->r->p) strfilter_node__new()
139 if (!cur->r) strfilter_node__new()
H A Dunwind-libunwind.c106 u8 *cur = *p; __dw_read_encoded_value() local
114 *val = dw_read(cur, unsigned long, end); __dw_read_encoded_value()
124 *val = (unsigned long) cur; __dw_read_encoded_value()
135 *val += dw_read(cur, s32, end); __dw_read_encoded_value()
138 *val += dw_read(cur, u32, end); __dw_read_encoded_value()
141 *val += dw_read(cur, s64, end); __dw_read_encoded_value()
144 *val += dw_read(cur, u64, end); __dw_read_encoded_value()
151 *p = cur; __dw_read_encoded_value()
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_btree.c50 #define xfs_btree_magic(cur) \
51 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
56 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_lblock()
64 mp = cur->bc_mp; xfs_btree_check_lblock()
74 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && xfs_btree_check_lblock()
77 cur->bc_ops->get_maxrecs(cur, level) && xfs_btree_check_lblock()
100 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_sblock()
111 mp = cur->bc_mp; xfs_btree_check_sblock()
112 agbp = cur->bc_private.a.agbp; xfs_btree_check_sblock()
124 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && xfs_btree_check_sblock()
127 cur->bc_ops->get_maxrecs(cur, level) && xfs_btree_check_sblock()
151 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_block()
156 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_check_block()
157 return xfs_btree_check_lblock(cur, block, level, bp); xfs_btree_check_block()
159 return xfs_btree_check_sblock(cur, block, level, bp); xfs_btree_check_block()
167 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_lptr()
171 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, xfs_btree_check_lptr()
174 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); xfs_btree_check_lptr()
184 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_sptr()
188 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; xfs_btree_check_sptr()
190 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, xfs_btree_check_sptr()
203 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_check_ptr()
208 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_check_ptr()
209 return xfs_btree_check_lptr(cur, xfs_btree_check_ptr()
212 return xfs_btree_check_sptr(cur, xfs_btree_check_ptr()
287 xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_del_cursor()
302 for (i = 0; i < cur->bc_nlevels; i++) { xfs_btree_del_cursor()
303 if (cur->bc_bufs[i]) xfs_btree_del_cursor()
304 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]); xfs_btree_del_cursor()
312 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || xfs_btree_del_cursor()
313 cur->bc_private.b.allocated == 0); xfs_btree_del_cursor()
317 kmem_zone_free(xfs_btree_cur_zone, cur); xfs_btree_del_cursor()
326 xfs_btree_cur_t *cur, /* input cursor */ xfs_btree_dup_cursor()
336 tp = cur->bc_tp; xfs_btree_dup_cursor()
337 mp = cur->bc_mp; xfs_btree_dup_cursor()
342 new = cur->bc_ops->dup_cursor(cur); xfs_btree_dup_cursor()
347 new->bc_rec = cur->bc_rec; xfs_btree_dup_cursor()
353 new->bc_ptrs[i] = cur->bc_ptrs[i]; xfs_btree_dup_cursor()
354 new->bc_ra[i] = cur->bc_ra[i]; xfs_btree_dup_cursor()
355 bp = cur->bc_bufs[i]; xfs_btree_dup_cursor()
360 cur->bc_ops->buf_ops); xfs_btree_dup_cursor()
406 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) xfs_btree_block_len() argument
408 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_block_len()
409 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) xfs_btree_block_len()
413 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) xfs_btree_block_len()
421 static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur) xfs_btree_ptr_len() argument
423 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? xfs_btree_ptr_len()
432 struct xfs_btree_cur *cur, xfs_btree_rec_offset()
435 return xfs_btree_block_len(cur) + xfs_btree_rec_offset()
436 (n - 1) * cur->bc_ops->rec_len; xfs_btree_rec_offset()
444 struct xfs_btree_cur *cur, xfs_btree_key_offset()
447 return xfs_btree_block_len(cur) + xfs_btree_key_offset()
448 (n - 1) * cur->bc_ops->key_len; xfs_btree_key_offset()
456 struct xfs_btree_cur *cur, xfs_btree_ptr_offset()
460 return xfs_btree_block_len(cur) + xfs_btree_ptr_offset()
461 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + xfs_btree_ptr_offset()
462 (n - 1) * xfs_btree_ptr_len(cur); xfs_btree_ptr_offset()
470 struct xfs_btree_cur *cur, xfs_btree_rec_addr()
475 ((char *)block + xfs_btree_rec_offset(cur, n)); xfs_btree_rec_addr()
483 struct xfs_btree_cur *cur, xfs_btree_key_addr()
488 ((char *)block + xfs_btree_key_offset(cur, n)); xfs_btree_key_addr()
496 struct xfs_btree_cur *cur, xfs_btree_ptr_addr()
505 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); xfs_btree_ptr_addr()
516 struct xfs_btree_cur *cur) xfs_btree_get_iroot()
520 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); xfs_btree_get_iroot()
530 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_get_block()
534 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_get_block()
535 (level == cur->bc_nlevels - 1)) { xfs_btree_get_block()
537 return xfs_btree_get_iroot(cur); xfs_btree_get_block()
540 *bpp = cur->bc_bufs[level]; xfs_btree_get_block()
587 xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_islastblock()
593 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_islastblock()
594 xfs_btree_check_block(cur, block, level, bp); xfs_btree_islastblock()
595 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_islastblock()
607 xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_firstrec()
616 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_firstrec()
617 xfs_btree_check_block(cur, block, level, bp); xfs_btree_firstrec()
626 cur->bc_ptrs[level] = 1; xfs_btree_firstrec()
636 xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_lastrec()
645 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_lastrec()
646 xfs_btree_check_block(cur, block, level, bp); xfs_btree_lastrec()
655 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs); xfs_btree_lastrec()
767 struct xfs_btree_cur *cur, xfs_btree_readahead_lblock()
776 xfs_btree_reada_bufl(cur->bc_mp, left, 1, xfs_btree_readahead_lblock()
777 cur->bc_ops->buf_ops); xfs_btree_readahead_lblock()
782 xfs_btree_reada_bufl(cur->bc_mp, right, 1, xfs_btree_readahead_lblock()
783 cur->bc_ops->buf_ops); xfs_btree_readahead_lblock()
792 struct xfs_btree_cur *cur, xfs_btree_readahead_sblock()
802 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, xfs_btree_readahead_sblock()
803 left, 1, cur->bc_ops->buf_ops); xfs_btree_readahead_sblock()
808 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, xfs_btree_readahead_sblock()
809 right, 1, cur->bc_ops->buf_ops); xfs_btree_readahead_sblock()
822 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_readahead()
832 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_readahead()
833 (lev == cur->bc_nlevels - 1)) xfs_btree_readahead()
836 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) xfs_btree_readahead()
839 cur->bc_ra[lev] |= lr; xfs_btree_readahead()
840 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]); xfs_btree_readahead()
842 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_readahead()
843 return xfs_btree_readahead_lblock(cur, lr, block); xfs_btree_readahead()
844 return xfs_btree_readahead_sblock(cur, lr, block); xfs_btree_readahead()
849 struct xfs_btree_cur *cur, xfs_btree_ptr_to_daddr()
852 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_ptr_to_daddr()
855 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); xfs_btree_ptr_to_daddr()
857 ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); xfs_btree_ptr_to_daddr()
860 return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, xfs_btree_ptr_to_daddr()
873 struct xfs_btree_cur *cur, xfs_btree_readahead_ptr()
877 xfs_buf_readahead(cur->bc_mp->m_ddev_targp, xfs_btree_readahead_ptr()
878 xfs_btree_ptr_to_daddr(cur, ptr), xfs_btree_readahead_ptr()
879 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops); xfs_btree_readahead_ptr()
888 xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_setbuf()
894 if (cur->bc_bufs[lev]) xfs_btree_setbuf()
895 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]); xfs_btree_setbuf()
896 cur->bc_bufs[lev] = bp; xfs_btree_setbuf()
897 cur->bc_ra[lev] = 0; xfs_btree_setbuf()
900 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_setbuf()
902 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; xfs_btree_setbuf()
904 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; xfs_btree_setbuf()
907 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; xfs_btree_setbuf()
909 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; xfs_btree_setbuf()
915 struct xfs_btree_cur *cur, xfs_btree_ptr_is_null()
918 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_ptr_is_null()
926 struct xfs_btree_cur *cur, xfs_btree_set_ptr_null()
929 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_set_ptr_null()
940 struct xfs_btree_cur *cur, xfs_btree_get_sibling()
947 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_get_sibling()
962 struct xfs_btree_cur *cur, xfs_btree_set_sibling()
969 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { xfs_btree_set_sibling()
1038 struct xfs_btree_cur *cur, xfs_btree_init_block_cur()
1051 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_init_block_cur()
1052 owner = cur->bc_private.b.ip->i_ino; xfs_btree_init_block_cur()
1054 owner = cur->bc_private.a.agno; xfs_btree_init_block_cur()
1056 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_init_block_cur()
1057 xfs_btree_magic(cur), level, numrecs, xfs_btree_init_block_cur()
1058 owner, cur->bc_flags); xfs_btree_init_block_cur()
1068 struct xfs_btree_cur *cur, xfs_btree_is_lastrec()
1076 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE)) xfs_btree_is_lastrec()
1079 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); xfs_btree_is_lastrec()
1080 if (!xfs_btree_ptr_is_null(cur, &ptr)) xfs_btree_is_lastrec()
1087 struct xfs_btree_cur *cur, xfs_btree_buf_to_ptr()
1091 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_buf_to_ptr()
1092 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, xfs_btree_buf_to_ptr()
1095 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, xfs_btree_buf_to_ptr()
1102 struct xfs_btree_cur *cur, xfs_btree_set_refs()
1105 switch (cur->bc_btnum) { xfs_btree_set_refs()
1124 struct xfs_btree_cur *cur, xfs_btree_get_buf_block()
1130 struct xfs_mount *mp = cur->bc_mp; xfs_btree_get_buf_block()
1136 d = xfs_btree_ptr_to_daddr(cur, ptr); xfs_btree_get_buf_block()
1137 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, xfs_btree_get_buf_block()
1143 (*bpp)->b_ops = cur->bc_ops->buf_ops; xfs_btree_get_buf_block()
1154 struct xfs_btree_cur *cur, xfs_btree_read_buf_block()
1160 struct xfs_mount *mp = cur->bc_mp; xfs_btree_read_buf_block()
1167 d = xfs_btree_ptr_to_daddr(cur, ptr); xfs_btree_read_buf_block()
1168 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, xfs_btree_read_buf_block()
1170 cur->bc_ops->buf_ops); xfs_btree_read_buf_block()
1174 xfs_btree_set_refs(cur, *bpp); xfs_btree_read_buf_block()
1184 struct xfs_btree_cur *cur, xfs_btree_copy_keys()
1190 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); xfs_btree_copy_keys()
1198 struct xfs_btree_cur *cur, xfs_btree_copy_recs()
1204 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); xfs_btree_copy_recs()
1212 struct xfs_btree_cur *cur, xfs_btree_copy_ptrs()
1218 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur)); xfs_btree_copy_ptrs()
1226 struct xfs_btree_cur *cur, xfs_btree_shift_keys()
1236 dst_key = (char *)key + (dir * cur->bc_ops->key_len); xfs_btree_shift_keys()
1237 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); xfs_btree_shift_keys()
1245 struct xfs_btree_cur *cur, xfs_btree_shift_recs()
1255 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); xfs_btree_shift_recs()
1256 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); xfs_btree_shift_recs()
1264 struct xfs_btree_cur *cur, xfs_btree_shift_ptrs()
1274 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur)); xfs_btree_shift_ptrs()
1275 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur)); xfs_btree_shift_ptrs()
1283 struct xfs_btree_cur *cur, xfs_btree_log_keys()
1288 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_log_keys()
1289 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_keys()
1292 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_keys()
1293 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_keys()
1294 xfs_btree_key_offset(cur, first), xfs_btree_log_keys()
1295 xfs_btree_key_offset(cur, last + 1) - 1); xfs_btree_log_keys()
1297 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_btree_log_keys()
1298 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); xfs_btree_log_keys()
1301 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_log_keys()
1309 struct xfs_btree_cur *cur, xfs_btree_log_recs()
1314 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_log_recs()
1315 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_recs()
1317 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_recs()
1318 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_recs()
1319 xfs_btree_rec_offset(cur, first), xfs_btree_log_recs()
1320 xfs_btree_rec_offset(cur, last + 1) - 1); xfs_btree_log_recs()
1322 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_log_recs()
1330 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_log_ptrs()
1335 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_log_ptrs()
1336 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_ptrs()
1342 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_ptrs()
1343 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_ptrs()
1344 xfs_btree_ptr_offset(cur, first, level), xfs_btree_log_ptrs()
1345 xfs_btree_ptr_offset(cur, last + 1, level) - 1); xfs_btree_log_ptrs()
1347 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_btree_log_ptrs()
1348 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); xfs_btree_log_ptrs()
1351 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_log_ptrs()
1359 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_log_block()
1393 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_log_block()
1394 XFS_BTREE_TRACE_ARGBI(cur, bp, fields); xfs_btree_log_block()
1399 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { xfs_btree_log_block()
1414 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? xfs_btree_log_block()
1417 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_block()
1418 xfs_trans_log_buf(cur->bc_tp, bp, first, last); xfs_btree_log_block()
1420 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_btree_log_block()
1421 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); xfs_btree_log_block()
1424 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_log_block()
1433 struct xfs_btree_cur *cur, xfs_btree_increment()
1443 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_increment()
1444 XFS_BTREE_TRACE_ARGI(cur, level); xfs_btree_increment()
1446 ASSERT(level < cur->bc_nlevels); xfs_btree_increment()
1449 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); xfs_btree_increment()
1452 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_increment()
1455 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_increment()
1461 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block)) xfs_btree_increment()
1465 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); xfs_btree_increment()
1466 if (xfs_btree_ptr_is_null(cur, &ptr)) xfs_btree_increment()
1469 XFS_BTREE_STATS_INC(cur, increment); xfs_btree_increment()
1475 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { xfs_btree_increment()
1476 block = xfs_btree_get_block(cur, lev, &bp); xfs_btree_increment()
1479 error = xfs_btree_check_block(cur, block, lev, bp); xfs_btree_increment()
1484 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block)) xfs_btree_increment()
1488 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); xfs_btree_increment()
1495 if (lev == cur->bc_nlevels) { xfs_btree_increment()
1496 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) xfs_btree_increment()
1502 ASSERT(lev < cur->bc_nlevels); xfs_btree_increment()
1508 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_increment()
1511 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); xfs_btree_increment()
1513 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_increment()
1517 xfs_btree_setbuf(cur, lev, bp); xfs_btree_increment()
1518 cur->bc_ptrs[lev] = 1; xfs_btree_increment()
1521 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_increment()
1526 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_increment()
1531 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_increment()
1541 struct xfs_btree_cur *cur, xfs_btree_decrement()
1551 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_decrement()
1552 XFS_BTREE_TRACE_ARGI(cur, level); xfs_btree_decrement()
1554 ASSERT(level < cur->bc_nlevels); xfs_btree_decrement()
1557 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); xfs_btree_decrement()
1560 if (--cur->bc_ptrs[level] > 0) xfs_btree_decrement()
1564 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_decrement()
1567 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_decrement()
1573 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); xfs_btree_decrement()
1574 if (xfs_btree_ptr_is_null(cur, &ptr)) xfs_btree_decrement()
1577 XFS_BTREE_STATS_INC(cur, decrement); xfs_btree_decrement()
1583 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { xfs_btree_decrement()
1584 if (--cur->bc_ptrs[lev] > 0) xfs_btree_decrement()
1587 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); xfs_btree_decrement()
1594 if (lev == cur->bc_nlevels) { xfs_btree_decrement()
1595 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) xfs_btree_decrement()
1601 ASSERT(lev < cur->bc_nlevels); xfs_btree_decrement()
1607 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_decrement()
1610 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); xfs_btree_decrement()
1612 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_decrement()
1615 xfs_btree_setbuf(cur, lev, bp); xfs_btree_decrement()
1616 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block); xfs_btree_decrement()
1619 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_decrement()
1624 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_decrement()
1629 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_decrement()
1635 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_lookup_get_block()
1644 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_lookup_get_block()
1645 (level == cur->bc_nlevels - 1)) { xfs_btree_lookup_get_block()
1646 *blkp = xfs_btree_get_iroot(cur); xfs_btree_lookup_get_block()
1656 bp = cur->bc_bufs[level]; xfs_btree_lookup_get_block()
1657 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { xfs_btree_lookup_get_block()
1662 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); xfs_btree_lookup_get_block()
1666 xfs_btree_setbuf(cur, level, bp); xfs_btree_lookup_get_block()
1677 struct xfs_btree_cur *cur, xfs_lookup_get_search_key()
1684 cur->bc_ops->init_key_from_rec(kp, xfs_lookup_get_search_key()
1685 xfs_btree_rec_addr(cur, keyno, block)); xfs_lookup_get_search_key()
1689 return xfs_btree_key_addr(cur, keyno, block); xfs_lookup_get_search_key()
1698 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_lookup()
1710 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_lookup()
1711 XFS_BTREE_TRACE_ARGI(cur, dir); xfs_btree_lookup()
1713 XFS_BTREE_STATS_INC(cur, lookup); xfs_btree_lookup()
1719 cur->bc_ops->init_ptr_from_cur(cur, &ptr); xfs_btree_lookup()
1728 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { xfs_btree_lookup()
1730 error = xfs_btree_lookup_get_block(cur, level, pp, &block); xfs_btree_lookup()
1751 ASSERT(level == 0 && cur->bc_nlevels == 1); xfs_btree_lookup()
1753 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; xfs_btree_lookup()
1754 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_lookup()
1764 XFS_BTREE_STATS_INC(cur, compare); xfs_btree_lookup()
1770 kp = xfs_lookup_get_search_key(cur, level, xfs_btree_lookup()
1779 diff = cur->bc_ops->key_diff(cur, kp); xfs_btree_lookup()
1800 pp = xfs_btree_ptr_addr(cur, keyno, block); xfs_btree_lookup()
1803 error = xfs_btree_check_ptr(cur, pp, 0, level); xfs_btree_lookup()
1807 cur->bc_ptrs[level] = keyno; xfs_btree_lookup()
1818 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); xfs_btree_lookup()
1821 !xfs_btree_ptr_is_null(cur, &ptr)) { xfs_btree_lookup()
1824 cur->bc_ptrs[0] = keyno; xfs_btree_lookup()
1825 error = xfs_btree_increment(cur, 0, &i); xfs_btree_lookup()
1828 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_btree_lookup()
1829 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_lookup()
1835 cur->bc_ptrs[0] = keyno; xfs_btree_lookup()
1844 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_lookup()
1848 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_lookup()
1857 struct xfs_btree_cur *cur, xfs_btree_updkey()
1866 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_updkey()
1867 XFS_BTREE_TRACE_ARGIK(cur, level, keyp); xfs_btree_updkey()
1869 ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1); xfs_btree_updkey()
1877 for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { xfs_btree_updkey()
1881 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_updkey()
1883 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_updkey()
1885 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_updkey()
1889 ptr = cur->bc_ptrs[level]; xfs_btree_updkey()
1890 kp = xfs_btree_key_addr(cur, ptr, block); xfs_btree_updkey()
1891 xfs_btree_copy_keys(cur, kp, keyp, 1); xfs_btree_updkey()
1892 xfs_btree_log_keys(cur, bp, ptr, ptr); xfs_btree_updkey()
1895 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_updkey()
1900 * Update the record referred to by cur to the value in the
1906 struct xfs_btree_cur *cur, xfs_btree_update()
1915 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_update()
1916 XFS_BTREE_TRACE_ARGR(cur, rec); xfs_btree_update()
1919 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_update()
1922 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_update()
1927 ptr = cur->bc_ptrs[0]; xfs_btree_update()
1928 rp = xfs_btree_rec_addr(cur, ptr, block); xfs_btree_update()
1931 xfs_btree_copy_recs(cur, rp, rec, 1); xfs_btree_update()
1932 xfs_btree_log_recs(cur, bp, ptr, ptr); xfs_btree_update()
1938 if (xfs_btree_is_lastrec(cur, block, 0)) { xfs_btree_update()
1939 cur->bc_ops->update_lastrec(cur, block, rec, xfs_btree_update()
1947 cur->bc_ops->init_key_from_rec(&key, rec); xfs_btree_update()
1948 error = xfs_btree_updkey(cur, &key, 1); xfs_btree_update()
1953 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_update()
1957 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_update()
1962 * Move 1 record left from cur/level if possible.
1963 * Update cur to reflect the new path.
1967 struct xfs_btree_cur *cur, xfs_btree_lshift()
1984 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_lshift()
1985 XFS_BTREE_TRACE_ARGI(cur, level); xfs_btree_lshift()
1987 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_lshift()
1988 level == cur->bc_nlevels - 1) xfs_btree_lshift()
1992 right = xfs_btree_get_block(cur, level, &rbp); xfs_btree_lshift()
1995 error = xfs_btree_check_block(cur, right, level, rbp); xfs_btree_lshift()
2001 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); xfs_btree_lshift()
2002 if (xfs_btree_ptr_is_null(cur, &lptr)) xfs_btree_lshift()
2009 if (cur->bc_ptrs[level] <= 1) xfs_btree_lshift()
2013 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); xfs_btree_lshift()
2019 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) xfs_btree_lshift()
2032 XFS_BTREE_STATS_INC(cur, lshift); xfs_btree_lshift()
2033 XFS_BTREE_STATS_ADD(cur, moves, 1); xfs_btree_lshift()
2044 lkp = xfs_btree_key_addr(cur, lrecs, left); xfs_btree_lshift()
2045 rkp = xfs_btree_key_addr(cur, 1, right); xfs_btree_lshift()
2047 lpp = xfs_btree_ptr_addr(cur, lrecs, left); xfs_btree_lshift()
2048 rpp = xfs_btree_ptr_addr(cur, 1, right); xfs_btree_lshift()
2050 error = xfs_btree_check_ptr(cur, rpp, 0, level); xfs_btree_lshift()
2054 xfs_btree_copy_keys(cur, lkp, rkp, 1); xfs_btree_lshift()
2055 xfs_btree_copy_ptrs(cur, lpp, rpp, 1); xfs_btree_lshift()
2057 xfs_btree_log_keys(cur, lbp, lrecs, lrecs); xfs_btree_lshift()
2058 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); xfs_btree_lshift()
2060 ASSERT(cur->bc_ops->keys_inorder(cur, xfs_btree_lshift()
2061 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); xfs_btree_lshift()
2066 lrp = xfs_btree_rec_addr(cur, lrecs, left); xfs_btree_lshift()
2067 rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_lshift()
2069 xfs_btree_copy_recs(cur, lrp, rrp, 1); xfs_btree_lshift()
2070 xfs_btree_log_recs(cur, lbp, lrecs, lrecs); xfs_btree_lshift()
2072 ASSERT(cur->bc_ops->recs_inorder(cur, xfs_btree_lshift()
2073 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); xfs_btree_lshift()
2077 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); xfs_btree_lshift()
2080 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); xfs_btree_lshift()
2085 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); xfs_btree_lshift()
2092 error = xfs_btree_check_ptr(cur, rpp, i + 1, level); xfs_btree_lshift()
2097 xfs_btree_shift_keys(cur, xfs_btree_lshift()
2098 xfs_btree_key_addr(cur, 2, right), xfs_btree_lshift()
2100 xfs_btree_shift_ptrs(cur, xfs_btree_lshift()
2101 xfs_btree_ptr_addr(cur, 2, right), xfs_btree_lshift()
2104 xfs_btree_log_keys(cur, rbp, 1, rrecs); xfs_btree_lshift()
2105 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); xfs_btree_lshift()
2108 xfs_btree_shift_recs(cur, xfs_btree_lshift()
2109 xfs_btree_rec_addr(cur, 2, right), xfs_btree_lshift()
2111 xfs_btree_log_recs(cur, rbp, 1, rrecs); xfs_btree_lshift()
2117 cur->bc_ops->init_key_from_rec(&key, xfs_btree_lshift()
2118 xfs_btree_rec_addr(cur, 1, right)); xfs_btree_lshift()
2123 error = xfs_btree_updkey(cur, rkp, level + 1); xfs_btree_lshift()
2128 cur->bc_ptrs[level]--; xfs_btree_lshift()
2130 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_lshift()
2135 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_lshift()
2140 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_lshift()
2145 * Move 1 record right from cur/level if possible.
2146 * Update cur to reflect the new path.
2150 struct xfs_btree_cur *cur, xfs_btree_rshift()
2167 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_rshift()
2168 XFS_BTREE_TRACE_ARGI(cur, level); xfs_btree_rshift()
2170 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_rshift()
2171 (level == cur->bc_nlevels - 1)) xfs_btree_rshift()
2175 left = xfs_btree_get_block(cur, level, &lbp); xfs_btree_rshift()
2178 error = xfs_btree_check_block(cur, left, level, lbp); xfs_btree_rshift()
2184 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); xfs_btree_rshift()
2185 if (xfs_btree_ptr_is_null(cur, &rptr)) xfs_btree_rshift()
2193 if (cur->bc_ptrs[level] >= lrecs) xfs_btree_rshift()
2197 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); xfs_btree_rshift()
2203 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) xfs_btree_rshift()
2206 XFS_BTREE_STATS_INC(cur, rshift); xfs_btree_rshift()
2207 XFS_BTREE_STATS_ADD(cur, moves, rrecs); xfs_btree_rshift()
2219 lkp = xfs_btree_key_addr(cur, lrecs, left); xfs_btree_rshift()
2220 lpp = xfs_btree_ptr_addr(cur, lrecs, left); xfs_btree_rshift()
2221 rkp = xfs_btree_key_addr(cur, 1, right); xfs_btree_rshift()
2222 rpp = xfs_btree_ptr_addr(cur, 1, right); xfs_btree_rshift()
2226 error = xfs_btree_check_ptr(cur, rpp, i, level); xfs_btree_rshift()
2232 xfs_btree_shift_keys(cur, rkp, 1, rrecs); xfs_btree_rshift()
2233 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); xfs_btree_rshift()
2236 error = xfs_btree_check_ptr(cur, lpp, 0, level); xfs_btree_rshift()
2242 xfs_btree_copy_keys(cur, rkp, lkp, 1); xfs_btree_rshift()
2243 xfs_btree_copy_ptrs(cur, rpp, lpp, 1); xfs_btree_rshift()
2245 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); xfs_btree_rshift()
2246 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); xfs_btree_rshift()
2248 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, xfs_btree_rshift()
2249 xfs_btree_key_addr(cur, 2, right))); xfs_btree_rshift()
2255 lrp = xfs_btree_rec_addr(cur, lrecs, left); xfs_btree_rshift()
2256 rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_rshift()
2258 xfs_btree_shift_recs(cur, rrp, 1, rrecs); xfs_btree_rshift()
2261 xfs_btree_copy_recs(cur, rrp, lrp, 1); xfs_btree_rshift()
2262 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); xfs_btree_rshift()
2264 cur->bc_ops->init_key_from_rec(&key, rrp); xfs_btree_rshift()
2267 ASSERT(cur->bc_ops->recs_inorder(cur, rrp, xfs_btree_rshift()
2268 xfs_btree_rec_addr(cur, 2, right))); xfs_btree_rshift()
2275 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); xfs_btree_rshift()
2278 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); xfs_btree_rshift()
2284 error = xfs_btree_dup_cursor(cur, &tcur); xfs_btree_rshift()
2288 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_rshift()
2300 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_rshift()
2305 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_rshift()
2310 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_rshift()
2320 * Split cur/level block in half.
2326 struct xfs_btree_cur *cur, __xfs_btree_split()
2350 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); __xfs_btree_split()
2351 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key); __xfs_btree_split()
2353 XFS_BTREE_STATS_INC(cur, split); __xfs_btree_split()
2356 left = xfs_btree_get_block(cur, level, &lbp); __xfs_btree_split()
2359 error = xfs_btree_check_block(cur, left, level, lbp); __xfs_btree_split()
2364 xfs_btree_buf_to_ptr(cur, lbp, &lptr); __xfs_btree_split()
2367 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat); __xfs_btree_split()
2372 XFS_BTREE_STATS_INC(cur, alloc); __xfs_btree_split()
2375 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp); __xfs_btree_split()
2380 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0); __xfs_btree_split()
2389 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1) __xfs_btree_split()
2393 XFS_BTREE_STATS_ADD(cur, moves, rrecs); __xfs_btree_split()
2407 lkp = xfs_btree_key_addr(cur, src_index, left); __xfs_btree_split()
2408 lpp = xfs_btree_ptr_addr(cur, src_index, left); __xfs_btree_split()
2409 rkp = xfs_btree_key_addr(cur, 1, right); __xfs_btree_split()
2410 rpp = xfs_btree_ptr_addr(cur, 1, right); __xfs_btree_split()
2414 error = xfs_btree_check_ptr(cur, lpp, i, level); __xfs_btree_split()
2420 xfs_btree_copy_keys(cur, rkp, lkp, rrecs); __xfs_btree_split()
2421 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); __xfs_btree_split()
2423 xfs_btree_log_keys(cur, rbp, 1, rrecs); __xfs_btree_split()
2424 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); __xfs_btree_split()
2427 xfs_btree_copy_keys(cur, key, rkp, 1); __xfs_btree_split()
2433 lrp = xfs_btree_rec_addr(cur, src_index, left); __xfs_btree_split()
2434 rrp = xfs_btree_rec_addr(cur, 1, right); __xfs_btree_split()
2436 xfs_btree_copy_recs(cur, rrp, lrp, rrecs); __xfs_btree_split()
2437 xfs_btree_log_recs(cur, rbp, 1, rrecs); __xfs_btree_split()
2439 cur->bc_ops->init_key_from_rec(key, __xfs_btree_split()
2440 xfs_btree_rec_addr(cur, 1, right)); __xfs_btree_split()
2448 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); __xfs_btree_split()
2449 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); __xfs_btree_split()
2450 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); __xfs_btree_split()
2451 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); __xfs_btree_split()
2457 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); __xfs_btree_split()
2458 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); __xfs_btree_split()
2464 if (!xfs_btree_ptr_is_null(cur, &rrptr)) { __xfs_btree_split()
2465 error = xfs_btree_read_buf_block(cur, &rrptr, __xfs_btree_split()
2469 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); __xfs_btree_split()
2470 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); __xfs_btree_split()
2477 if (cur->bc_ptrs[level] > lrecs + 1) { __xfs_btree_split()
2478 xfs_btree_setbuf(cur, level, rbp); __xfs_btree_split()
2479 cur->bc_ptrs[level] -= lrecs; __xfs_btree_split()
2485 if (level + 1 < cur->bc_nlevels) { __xfs_btree_split()
2486 error = xfs_btree_dup_cursor(cur, curp); __xfs_btree_split()
2492 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); __xfs_btree_split()
2496 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); __xfs_btree_split()
2501 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); __xfs_btree_split()
2506 struct xfs_btree_cur *cur; member in struct:xfs_btree_split_args
2541 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, xfs_btree_split_worker()
2555 struct xfs_btree_cur *cur, xfs_btree_split()
2565 if (cur->bc_btnum != XFS_BTNUM_BMAP) xfs_btree_split()
2566 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); xfs_btree_split()
2568 args.cur = cur; xfs_btree_split()
2590 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_new_iroot()
2608 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_new_iroot()
2609 XFS_BTREE_STATS_INC(cur, newroot); xfs_btree_new_iroot()
2611 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); xfs_btree_new_iroot()
2613 level = cur->bc_nlevels - 1; xfs_btree_new_iroot()
2615 block = xfs_btree_get_iroot(cur); xfs_btree_new_iroot()
2616 pp = xfs_btree_ptr_addr(cur, 1, block); xfs_btree_new_iroot()
2619 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat); xfs_btree_new_iroot()
2623 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_new_iroot()
2626 XFS_BTREE_STATS_INC(cur, alloc); xfs_btree_new_iroot()
2629 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp); xfs_btree_new_iroot()
2637 memcpy(cblock, block, xfs_btree_block_len(cur)); xfs_btree_new_iroot()
2638 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { xfs_btree_new_iroot()
2639 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_new_iroot()
2647 cur->bc_nlevels++; xfs_btree_new_iroot()
2648 cur->bc_ptrs[level + 1] = 1; xfs_btree_new_iroot()
2650 kp = xfs_btree_key_addr(cur, 1, block); xfs_btree_new_iroot()
2651 ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_new_iroot()
2652 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); xfs_btree_new_iroot()
2654 cpp = xfs_btree_ptr_addr(cur, 1, cblock); xfs_btree_new_iroot()
2657 error = xfs_btree_check_ptr(cur, pp, i, level); xfs_btree_new_iroot()
2662 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); xfs_btree_new_iroot()
2665 error = xfs_btree_check_ptr(cur, &nptr, 0, level); xfs_btree_new_iroot()
2669 xfs_btree_copy_ptrs(cur, pp, &nptr, 1); xfs_btree_new_iroot()
2671 xfs_iroot_realloc(cur->bc_private.b.ip, xfs_btree_new_iroot()
2673 cur->bc_private.b.whichfork); xfs_btree_new_iroot()
2675 xfs_btree_setbuf(cur, level, cbp); xfs_btree_new_iroot()
2681 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); xfs_btree_new_iroot()
2682 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); xfs_btree_new_iroot()
2683 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); xfs_btree_new_iroot()
2686 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork); xfs_btree_new_iroot()
2688 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_new_iroot()
2691 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_new_iroot()
2700 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_new_root()
2716 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_new_root()
2717 XFS_BTREE_STATS_INC(cur, newroot); xfs_btree_new_root()
2720 cur->bc_ops->init_ptr_from_cur(cur, &rptr); xfs_btree_new_root()
2723 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat); xfs_btree_new_root()
2728 XFS_BTREE_STATS_INC(cur, alloc); xfs_btree_new_root()
2731 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp); xfs_btree_new_root()
2736 cur->bc_ops->set_root(cur, &lptr, 1); xfs_btree_new_root()
2744 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); xfs_btree_new_root()
2747 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); xfs_btree_new_root()
2752 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); xfs_btree_new_root()
2753 if (!xfs_btree_ptr_is_null(cur, &rptr)) { xfs_btree_new_root()
2756 xfs_btree_buf_to_ptr(cur, lbp, &lptr); xfs_btree_new_root()
2758 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); xfs_btree_new_root()
2766 xfs_btree_buf_to_ptr(cur, rbp, &rptr); xfs_btree_new_root()
2768 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); xfs_btree_new_root()
2769 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); xfs_btree_new_root()
2776 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); xfs_btree_new_root()
2777 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); xfs_btree_new_root()
2778 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && xfs_btree_new_root()
2779 !xfs_btree_ptr_is_null(cur, &rptr)); xfs_btree_new_root()
2783 xfs_btree_copy_keys(cur, xfs_btree_new_root()
2784 xfs_btree_key_addr(cur, 1, new), xfs_btree_new_root()
2785 xfs_btree_key_addr(cur, 1, left), 1); xfs_btree_new_root()
2786 xfs_btree_copy_keys(cur, xfs_btree_new_root()
2787 xfs_btree_key_addr(cur, 2, new), xfs_btree_new_root()
2788 xfs_btree_key_addr(cur, 1, right), 1); xfs_btree_new_root()
2790 cur->bc_ops->init_key_from_rec( xfs_btree_new_root()
2791 xfs_btree_key_addr(cur, 1, new), xfs_btree_new_root()
2792 xfs_btree_rec_addr(cur, 1, left)); xfs_btree_new_root()
2793 cur->bc_ops->init_key_from_rec( xfs_btree_new_root()
2794 xfs_btree_key_addr(cur, 2, new), xfs_btree_new_root()
2795 xfs_btree_rec_addr(cur, 1, right)); xfs_btree_new_root()
2797 xfs_btree_log_keys(cur, nbp, 1, 2); xfs_btree_new_root()
2800 xfs_btree_copy_ptrs(cur, xfs_btree_new_root()
2801 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); xfs_btree_new_root()
2802 xfs_btree_copy_ptrs(cur, xfs_btree_new_root()
2803 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); xfs_btree_new_root()
2804 xfs_btree_log_ptrs(cur, nbp, 1, 2); xfs_btree_new_root()
2807 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); xfs_btree_new_root()
2808 cur->bc_ptrs[cur->bc_nlevels] = nptr; xfs_btree_new_root()
2809 cur->bc_nlevels++; xfs_btree_new_root()
2810 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_new_root()
2814 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_new_root()
2817 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_new_root()
2824 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_make_block_unfull()
2837 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_make_block_unfull()
2838 level == cur->bc_nlevels - 1) { xfs_btree_make_block_unfull()
2839 struct xfs_inode *ip = cur->bc_private.b.ip; xfs_btree_make_block_unfull()
2841 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { xfs_btree_make_block_unfull()
2843 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork); xfs_btree_make_block_unfull()
2848 error = xfs_btree_new_iroot(cur, &logflags, stat); xfs_btree_make_block_unfull()
2852 xfs_trans_log_inode(cur->bc_tp, ip, logflags); xfs_btree_make_block_unfull()
2859 error = xfs_btree_rshift(cur, level, stat); xfs_btree_make_block_unfull()
2864 error = xfs_btree_lshift(cur, level, stat); xfs_btree_make_block_unfull()
2869 *oindex = *index = cur->bc_ptrs[level]; xfs_btree_make_block_unfull()
2879 error = xfs_btree_split(cur, level, nptr, &key, ncur, stat); xfs_btree_make_block_unfull()
2884 *index = cur->bc_ptrs[level]; xfs_btree_make_block_unfull()
2885 cur->bc_ops->init_rec_from_key(&key, nrec); xfs_btree_make_block_unfull()
2895 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_insrec()
2899 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */ xfs_btree_insrec()
2916 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_insrec()
2917 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp); xfs_btree_insrec()
2925 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && xfs_btree_insrec()
2926 (level >= cur->bc_nlevels)) { xfs_btree_insrec()
2927 error = xfs_btree_new_root(cur, stat); xfs_btree_insrec()
2928 xfs_btree_set_ptr_null(cur, ptrp); xfs_btree_insrec()
2930 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_insrec()
2935 ptr = cur->bc_ptrs[level]; xfs_btree_insrec()
2937 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_insrec()
2943 cur->bc_ops->init_key_from_rec(&key, recp); xfs_btree_insrec()
2947 XFS_BTREE_STATS_INC(cur, insrec); xfs_btree_insrec()
2950 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec()
2954 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec()
2961 ASSERT(cur->bc_ops->recs_inorder(cur, recp, xfs_btree_insrec()
2962 xfs_btree_rec_addr(cur, ptr, block))); xfs_btree_insrec()
2964 ASSERT(cur->bc_ops->keys_inorder(cur, &key, xfs_btree_insrec()
2965 xfs_btree_key_addr(cur, ptr, block))); xfs_btree_insrec()
2974 xfs_btree_set_ptr_null(cur, &nptr); xfs_btree_insrec()
2975 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { xfs_btree_insrec()
2976 error = xfs_btree_make_block_unfull(cur, level, numrecs, xfs_btree_insrec()
2986 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec()
2990 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec()
2999 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); xfs_btree_insrec()
3006 kp = xfs_btree_key_addr(cur, ptr, block); xfs_btree_insrec()
3007 pp = xfs_btree_ptr_addr(cur, ptr, block); xfs_btree_insrec()
3011 error = xfs_btree_check_ptr(cur, pp, i, level); xfs_btree_insrec()
3017 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); xfs_btree_insrec()
3018 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); xfs_btree_insrec()
3021 error = xfs_btree_check_ptr(cur, ptrp, 0, level); xfs_btree_insrec()
3027 xfs_btree_copy_keys(cur, kp, &key, 1); xfs_btree_insrec()
3028 xfs_btree_copy_ptrs(cur, pp, ptrp, 1); xfs_btree_insrec()
3031 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); xfs_btree_insrec()
3032 xfs_btree_log_keys(cur, bp, ptr, numrecs); xfs_btree_insrec()
3035 ASSERT(cur->bc_ops->keys_inorder(cur, kp, xfs_btree_insrec()
3036 xfs_btree_key_addr(cur, ptr + 1, block))); xfs_btree_insrec()
3043 rp = xfs_btree_rec_addr(cur, ptr, block); xfs_btree_insrec()
3045 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); xfs_btree_insrec()
3048 xfs_btree_copy_recs(cur, rp, recp, 1); xfs_btree_insrec()
3050 xfs_btree_log_recs(cur, bp, ptr, numrecs); xfs_btree_insrec()
3053 ASSERT(cur->bc_ops->recs_inorder(cur, rp, xfs_btree_insrec()
3054 xfs_btree_rec_addr(cur, ptr + 1, block))); xfs_btree_insrec()
3060 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_insrec()
3064 error = xfs_btree_updkey(cur, &key, level + 1); xfs_btree_insrec()
3073 if (xfs_btree_is_lastrec(cur, block, level)) { xfs_btree_insrec()
3074 cur->bc_ops->update_lastrec(cur, block, recp, xfs_btree_insrec()
3083 if (!xfs_btree_ptr_is_null(cur, &nptr)) { xfs_btree_insrec()
3088 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_insrec()
3093 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_insrec()
3098 * Insert the record at the point referenced by cur.
3106 struct xfs_btree_cur *cur, xfs_btree_insert()
3119 pcur = cur; xfs_btree_insert()
3121 xfs_btree_set_ptr_null(cur, &nptr); xfs_btree_insert()
3122 cur->bc_ops->init_rec_from_cur(cur, &rec); xfs_btree_insert()
3136 if (pcur != cur) xfs_btree_insert()
3141 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_insert()
3149 if (pcur != cur && xfs_btree_insert()
3150 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { xfs_btree_insert()
3152 if (cur->bc_ops->update_cursor) xfs_btree_insert()
3153 cur->bc_ops->update_cursor(pcur, cur); xfs_btree_insert()
3154 cur->bc_nlevels = pcur->bc_nlevels; xfs_btree_insert()
3162 } while (!xfs_btree_ptr_is_null(cur, &nptr)); xfs_btree_insert()
3164 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_insert()
3168 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_insert()
3182 struct xfs_btree_cur *cur) xfs_btree_kill_iroot()
3184 int whichfork = cur->bc_private.b.whichfork; xfs_btree_kill_iroot()
3185 struct xfs_inode *ip = cur->bc_private.b.ip; xfs_btree_kill_iroot()
3202 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_kill_iroot()
3204 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); xfs_btree_kill_iroot()
3205 ASSERT(cur->bc_nlevels > 1); xfs_btree_kill_iroot()
3211 level = cur->bc_nlevels - 1; xfs_btree_kill_iroot()
3218 block = xfs_btree_get_iroot(cur); xfs_btree_kill_iroot()
3222 cblock = xfs_btree_get_block(cur, level - 1, &cbp); xfs_btree_kill_iroot()
3230 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) xfs_btree_kill_iroot()
3233 XFS_BTREE_STATS_INC(cur, killroot); xfs_btree_kill_iroot()
3236 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); xfs_btree_kill_iroot()
3237 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); xfs_btree_kill_iroot()
3238 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); xfs_btree_kill_iroot()
3239 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); xfs_btree_kill_iroot()
3242 index = numrecs - cur->bc_ops->get_maxrecs(cur, level); xfs_btree_kill_iroot()
3244 xfs_iroot_realloc(cur->bc_private.b.ip, index, xfs_btree_kill_iroot()
3245 cur->bc_private.b.whichfork); xfs_btree_kill_iroot()
3252 kp = xfs_btree_key_addr(cur, 1, block); xfs_btree_kill_iroot()
3253 ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_kill_iroot()
3254 xfs_btree_copy_keys(cur, kp, ckp, numrecs); xfs_btree_kill_iroot()
3256 pp = xfs_btree_ptr_addr(cur, 1, block); xfs_btree_kill_iroot()
3257 cpp = xfs_btree_ptr_addr(cur, 1, cblock); xfs_btree_kill_iroot()
3262 error = xfs_btree_check_ptr(cur, cpp, i, level - 1); xfs_btree_kill_iroot()
3264 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_kill_iroot()
3269 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); xfs_btree_kill_iroot()
3271 cur->bc_ops->free_block(cur, cbp); xfs_btree_kill_iroot()
3272 XFS_BTREE_STATS_INC(cur, free); xfs_btree_kill_iroot()
3274 cur->bc_bufs[level - 1] = NULL; xfs_btree_kill_iroot()
3276 xfs_trans_log_inode(cur->bc_tp, ip, xfs_btree_kill_iroot()
3277 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork)); xfs_btree_kill_iroot()
3278 cur->bc_nlevels--; xfs_btree_kill_iroot()
3280 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_kill_iroot()
3289 struct xfs_btree_cur *cur, xfs_btree_kill_root()
3296 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_kill_root()
3297 XFS_BTREE_STATS_INC(cur, killroot); xfs_btree_kill_root()
3303 cur->bc_ops->set_root(cur, newroot, -1); xfs_btree_kill_root()
3305 error = cur->bc_ops->free_block(cur, bp); xfs_btree_kill_root()
3307 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_kill_root()
3311 XFS_BTREE_STATS_INC(cur, free); xfs_btree_kill_root()
3313 cur->bc_bufs[level] = NULL; xfs_btree_kill_root()
3314 cur->bc_ra[level] = 0; xfs_btree_kill_root()
3315 cur->bc_nlevels--; xfs_btree_kill_root()
3317 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_kill_root()
3323 struct xfs_btree_cur *cur, xfs_btree_dec_cursor()
3331 error = xfs_btree_decrement(cur, level, &i); xfs_btree_dec_cursor()
3336 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_dec_cursor()
3343 * Delete record pointed to by cur/level.
3349 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_delrec()
3374 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_delrec()
3375 XFS_BTREE_TRACE_ARGI(cur, level); xfs_btree_delrec()
3380 ptr = cur->bc_ptrs[level]; xfs_btree_delrec()
3382 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_delrec()
3388 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_delrec()
3392 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_delrec()
3399 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_delrec()
3404 XFS_BTREE_STATS_INC(cur, delrec); xfs_btree_delrec()
3405 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); xfs_btree_delrec()
3413 lkp = xfs_btree_key_addr(cur, ptr + 1, block); xfs_btree_delrec()
3414 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); xfs_btree_delrec()
3418 error = xfs_btree_check_ptr(cur, lpp, i, level); xfs_btree_delrec()
3425 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); xfs_btree_delrec()
3426 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); xfs_btree_delrec()
3427 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3428 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3436 keyp = xfs_btree_key_addr(cur, 1, block); xfs_btree_delrec()
3440 xfs_btree_shift_recs(cur, xfs_btree_delrec()
3441 xfs_btree_rec_addr(cur, ptr + 1, block), xfs_btree_delrec()
3443 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3451 cur->bc_ops->init_key_from_rec(&key, xfs_btree_delrec()
3452 xfs_btree_rec_addr(cur, 1, block)); xfs_btree_delrec()
3461 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_delrec()
3467 if (xfs_btree_is_lastrec(cur, block, level)) { xfs_btree_delrec()
3468 cur->bc_ops->update_lastrec(cur, block, NULL, xfs_btree_delrec()
3477 if (level == cur->bc_nlevels - 1) { xfs_btree_delrec()
3478 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { xfs_btree_delrec()
3479 xfs_iroot_realloc(cur->bc_private.b.ip, -1, xfs_btree_delrec()
3480 cur->bc_private.b.whichfork); xfs_btree_delrec()
3482 error = xfs_btree_kill_iroot(cur); xfs_btree_delrec()
3486 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3504 pp = xfs_btree_ptr_addr(cur, 1, block); xfs_btree_delrec()
3505 error = xfs_btree_kill_root(cur, bp, level, pp); xfs_btree_delrec()
3509 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3522 error = xfs_btree_updkey(cur, keyp, level + 1); xfs_btree_delrec()
3531 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { xfs_btree_delrec()
3532 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3543 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); xfs_btree_delrec()
3544 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); xfs_btree_delrec()
3546 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { xfs_btree_delrec()
3552 if (xfs_btree_ptr_is_null(cur, &rptr) && xfs_btree_delrec()
3553 xfs_btree_ptr_is_null(cur, &lptr) && xfs_btree_delrec()
3554 level == cur->bc_nlevels - 2) { xfs_btree_delrec()
3555 error = xfs_btree_kill_iroot(cur); xfs_btree_delrec()
3557 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3564 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || xfs_btree_delrec()
3565 !xfs_btree_ptr_is_null(cur, &lptr)); xfs_btree_delrec()
3571 error = xfs_btree_dup_cursor(cur, &tcur); xfs_btree_delrec()
3579 if (!xfs_btree_ptr_is_null(cur, &rptr)) { xfs_btree_delrec()
3585 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3590 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3593 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3611 cur->bc_ops->get_minrecs(tcur, level)) { xfs_btree_delrec()
3617 cur->bc_ops->get_minrecs(tcur, level)); xfs_btree_delrec()
3622 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3635 if (!xfs_btree_ptr_is_null(cur, &lptr)) { xfs_btree_delrec()
3637 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3642 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3650 if (!xfs_btree_ptr_is_null(cur, &lptr)) { xfs_btree_delrec()
3656 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3662 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); xfs_btree_delrec()
3667 error = xfs_btree_check_block(cur, left, level, lbp); xfs_btree_delrec()
3680 cur->bc_ops->get_minrecs(tcur, level)) { xfs_btree_delrec()
3686 cur->bc_ops->get_minrecs(tcur, level)); xfs_btree_delrec()
3690 cur->bc_ptrs[0]++; xfs_btree_delrec()
3691 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_delrec()
3709 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); xfs_btree_delrec()
3711 if (!xfs_btree_ptr_is_null(cur, &lptr) && xfs_btree_delrec()
3713 cur->bc_ops->get_maxrecs(cur, level)) { xfs_btree_delrec()
3721 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); xfs_btree_delrec()
3728 } else if (!xfs_btree_ptr_is_null(cur, &rptr) && xfs_btree_delrec()
3730 cur->bc_ops->get_maxrecs(cur, level)) { xfs_btree_delrec()
3738 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); xfs_btree_delrec()
3747 error = xfs_btree_dec_cursor(cur, level, stat); xfs_btree_delrec()
3760 XFS_BTREE_STATS_ADD(cur, moves, rrecs); xfs_btree_delrec()
3768 lkp = xfs_btree_key_addr(cur, lrecs + 1, left); xfs_btree_delrec()
3769 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); xfs_btree_delrec()
3770 rkp = xfs_btree_key_addr(cur, 1, right); xfs_btree_delrec()
3771 rpp = xfs_btree_ptr_addr(cur, 1, right); xfs_btree_delrec()
3774 error = xfs_btree_check_ptr(cur, rpp, i, level); xfs_btree_delrec()
3779 xfs_btree_copy_keys(cur, lkp, rkp, rrecs); xfs_btree_delrec()
3780 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); xfs_btree_delrec()
3782 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); xfs_btree_delrec()
3783 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); xfs_btree_delrec()
3789 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); xfs_btree_delrec()
3790 rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_delrec()
3792 xfs_btree_copy_recs(cur, lrp, rrp, rrecs); xfs_btree_delrec()
3793 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); xfs_btree_delrec()
3796 XFS_BTREE_STATS_INC(cur, join); xfs_btree_delrec()
3803 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB), xfs_btree_delrec()
3804 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); xfs_btree_delrec()
3805 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); xfs_btree_delrec()
3808 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); xfs_btree_delrec()
3809 if (!xfs_btree_ptr_is_null(cur, &cptr)) { xfs_btree_delrec()
3810 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp); xfs_btree_delrec()
3813 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); xfs_btree_delrec()
3814 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); xfs_btree_delrec()
3818 error = cur->bc_ops->free_block(cur, rbp); xfs_btree_delrec()
3821 XFS_BTREE_STATS_INC(cur, free); xfs_btree_delrec()
3828 cur->bc_bufs[level] = lbp; xfs_btree_delrec()
3829 cur->bc_ptrs[level] += lrecs; xfs_btree_delrec()
3830 cur->bc_ra[level] = 0; xfs_btree_delrec()
3836 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || xfs_btree_delrec()
3837 (level + 1 < cur->bc_nlevels)) { xfs_btree_delrec()
3838 error = xfs_btree_increment(cur, level + 1, &i); xfs_btree_delrec()
3850 cur->bc_ptrs[level]--; xfs_btree_delrec()
3852 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_delrec()
3858 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_delrec()
3865 * Delete the record pointed to by cur.
3871 struct xfs_btree_cur *cur, xfs_btree_delete()
3878 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_btree_delete()
3887 error = xfs_btree_delrec(cur, level, &i); xfs_btree_delete()
3893 for (level = 1; level < cur->bc_nlevels; level++) { xfs_btree_delete()
3894 if (cur->bc_ptrs[level] == 0) { xfs_btree_delete()
3895 error = xfs_btree_decrement(cur, level, &i); xfs_btree_delete()
3903 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_btree_delete()
3907 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_btree_delete()
3916 struct xfs_btree_cur *cur, /* btree cursor */ xfs_btree_get_rec()
3927 ptr = cur->bc_ptrs[0]; xfs_btree_get_rec()
3928 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_get_rec()
3931 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_get_rec()
3947 *recp = xfs_btree_rec_addr(cur, ptr, block); xfs_btree_get_rec()
3978 struct xfs_btree_cur *cur, xfs_btree_block_change_owner()
3988 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); xfs_btree_block_change_owner()
3991 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_block_change_owner()
3992 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_block_change_owner()
4005 if (cur->bc_tp) { xfs_btree_block_change_owner()
4006 xfs_trans_ordered_buf(cur->bc_tp, bp); xfs_btree_block_change_owner()
4007 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); xfs_btree_block_change_owner()
4012 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); xfs_btree_block_change_owner()
4013 ASSERT(level == cur->bc_nlevels - 1); xfs_btree_block_change_owner()
4017 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); xfs_btree_block_change_owner()
4018 if (xfs_btree_ptr_is_null(cur, &rptr)) xfs_btree_block_change_owner()
4021 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); xfs_btree_block_change_owner()
4026 struct xfs_btree_cur *cur, xfs_btree_change_owner()
4035 cur->bc_ops->init_ptr_from_cur(cur, &lptr); xfs_btree_change_owner()
4038 for (level = cur->bc_nlevels - 1; level >= 0; level--) { xfs_btree_change_owner()
4040 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); xfs_btree_change_owner()
4048 ptr = xfs_btree_ptr_addr(cur, 1, block); xfs_btree_change_owner()
4049 xfs_btree_readahead_ptr(cur, ptr, 1); xfs_btree_change_owner()
4057 error = xfs_btree_block_change_owner(cur, level, xfs_btree_change_owner()
55 xfs_btree_check_lblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_lblock() argument
99 xfs_btree_check_sblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_sblock() argument
150 xfs_btree_check_block( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_block() argument
166 xfs_btree_check_lptr( struct xfs_btree_cur *cur, xfs_fsblock_t bno, int level) xfs_btree_check_lptr() argument
183 xfs_btree_check_sptr( struct xfs_btree_cur *cur, xfs_agblock_t bno, int level) xfs_btree_check_sptr() argument
202 xfs_btree_check_ptr( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int index, int level) xfs_btree_check_ptr() argument
286 xfs_btree_del_cursor( xfs_btree_cur_t *cur, int error) xfs_btree_del_cursor() argument
325 xfs_btree_dup_cursor( xfs_btree_cur_t *cur, xfs_btree_cur_t **ncur) xfs_btree_dup_cursor() argument
431 xfs_btree_rec_offset( struct xfs_btree_cur *cur, int n) xfs_btree_rec_offset() argument
443 xfs_btree_key_offset( struct xfs_btree_cur *cur, int n) xfs_btree_key_offset() argument
455 xfs_btree_ptr_offset( struct xfs_btree_cur *cur, int n, int level) xfs_btree_ptr_offset() argument
469 xfs_btree_rec_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) xfs_btree_rec_addr() argument
482 xfs_btree_key_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) xfs_btree_key_addr() argument
495 xfs_btree_ptr_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) xfs_btree_ptr_addr() argument
515 xfs_btree_get_iroot( struct xfs_btree_cur *cur) xfs_btree_get_iroot() argument
529 xfs_btree_get_block( struct xfs_btree_cur *cur, int level, struct xfs_buf **bpp) xfs_btree_get_block() argument
586 xfs_btree_islastblock( xfs_btree_cur_t *cur, int level) xfs_btree_islastblock() argument
606 xfs_btree_firstrec( xfs_btree_cur_t *cur, int level) xfs_btree_firstrec() argument
635 xfs_btree_lastrec( xfs_btree_cur_t *cur, int level) xfs_btree_lastrec() argument
766 xfs_btree_readahead_lblock( struct xfs_btree_cur *cur, int lr, struct xfs_btree_block *block) xfs_btree_readahead_lblock() argument
791 xfs_btree_readahead_sblock( struct xfs_btree_cur *cur, int lr, struct xfs_btree_block *block) xfs_btree_readahead_sblock() argument
821 xfs_btree_readahead( struct xfs_btree_cur *cur, int lev, int lr) xfs_btree_readahead() argument
848 xfs_btree_ptr_to_daddr( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_btree_ptr_to_daddr() argument
872 xfs_btree_readahead_ptr( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, xfs_extlen_t count) xfs_btree_readahead_ptr() argument
887 xfs_btree_setbuf( xfs_btree_cur_t *cur, int lev, xfs_buf_t *bp) xfs_btree_setbuf() argument
914 xfs_btree_ptr_is_null( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_btree_ptr_is_null() argument
925 xfs_btree_set_ptr_null( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_btree_set_ptr_null() argument
939 xfs_btree_get_sibling( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_ptr *ptr, int lr) xfs_btree_get_sibling() argument
961 xfs_btree_set_sibling( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_ptr *ptr, int lr) xfs_btree_set_sibling() argument
1037 xfs_btree_init_block_cur( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, int numrecs) xfs_btree_init_block_cur() argument
1067 xfs_btree_is_lastrec( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level) xfs_btree_is_lastrec() argument
1086 xfs_btree_buf_to_ptr( struct xfs_btree_cur *cur, struct xfs_buf *bp, union xfs_btree_ptr *ptr) xfs_btree_buf_to_ptr() argument
1101 xfs_btree_set_refs( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_btree_set_refs() argument
1123 xfs_btree_get_buf_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int flags, struct xfs_btree_block **block, struct xfs_buf **bpp) xfs_btree_get_buf_block() argument
1153 xfs_btree_read_buf_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int flags, struct xfs_btree_block **block, struct xfs_buf **bpp) xfs_btree_read_buf_block() argument
1183 xfs_btree_copy_keys( struct xfs_btree_cur *cur, union xfs_btree_key *dst_key, union xfs_btree_key *src_key, int numkeys) xfs_btree_copy_keys() argument
1197 xfs_btree_copy_recs( struct xfs_btree_cur *cur, union xfs_btree_rec *dst_rec, union xfs_btree_rec *src_rec, int numrecs) xfs_btree_copy_recs() argument
1211 xfs_btree_copy_ptrs( struct xfs_btree_cur *cur, union xfs_btree_ptr *dst_ptr, union xfs_btree_ptr *src_ptr, int numptrs) xfs_btree_copy_ptrs() argument
1225 xfs_btree_shift_keys( struct xfs_btree_cur *cur, union xfs_btree_key *key, int dir, int numkeys) xfs_btree_shift_keys() argument
1244 xfs_btree_shift_recs( struct xfs_btree_cur *cur, union xfs_btree_rec *rec, int dir, int numrecs) xfs_btree_shift_recs() argument
1263 xfs_btree_shift_ptrs( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int dir, int numptrs) xfs_btree_shift_ptrs() argument
1282 xfs_btree_log_keys( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_keys() argument
1308 xfs_btree_log_recs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_recs() argument
1329 xfs_btree_log_ptrs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_ptrs() argument
1358 xfs_btree_log_block( struct xfs_btree_cur *cur, struct xfs_buf *bp, int fields) xfs_btree_log_block() argument
1432 xfs_btree_increment( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_increment() argument
1540 xfs_btree_decrement( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_decrement() argument
1634 xfs_btree_lookup_get_block( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *pp, struct xfs_btree_block **blkp) xfs_btree_lookup_get_block() argument
1676 xfs_lookup_get_search_key( struct xfs_btree_cur *cur, int level, int keyno, struct xfs_btree_block *block, union xfs_btree_key *kp) xfs_lookup_get_search_key() argument
1697 xfs_btree_lookup( struct xfs_btree_cur *cur, xfs_lookup_t dir, int *stat) xfs_btree_lookup() argument
1856 xfs_btree_updkey( struct xfs_btree_cur *cur, union xfs_btree_key *keyp, int level) xfs_btree_updkey() argument
1905 xfs_btree_update( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) xfs_btree_update() argument
1966 xfs_btree_lshift( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_lshift() argument
2149 xfs_btree_rshift( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_rshift() argument
2325 __xfs_btree_split( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, union xfs_btree_key *key, struct xfs_btree_cur **curp, int *stat) __xfs_btree_split() argument
2554 xfs_btree_split( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, union xfs_btree_key *key, struct xfs_btree_cur **curp, int *stat) xfs_btree_split() argument
2589 xfs_btree_new_iroot( struct xfs_btree_cur *cur, int *logflags, int *stat) xfs_btree_new_iroot() argument
2699 xfs_btree_new_root( struct xfs_btree_cur *cur, int *stat) xfs_btree_new_root() argument
2823 xfs_btree_make_block_unfull( struct xfs_btree_cur *cur, int level, int numrecs, int *oindex, int *index, union xfs_btree_ptr *nptr, struct xfs_btree_cur **ncur, union xfs_btree_rec *nrec, int *stat) xfs_btree_make_block_unfull() argument
2894 xfs_btree_insrec( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, union xfs_btree_rec *recp, struct xfs_btree_cur **curp, int *stat) xfs_btree_insrec() argument
3105 xfs_btree_insert( struct xfs_btree_cur *cur, int *stat) xfs_btree_insert() argument
3181 xfs_btree_kill_iroot( struct xfs_btree_cur *cur) xfs_btree_kill_iroot() argument
3288 xfs_btree_kill_root( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, union xfs_btree_ptr *newroot) xfs_btree_kill_root() argument
3322 xfs_btree_dec_cursor( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_dec_cursor() argument
3348 xfs_btree_delrec( struct xfs_btree_cur *cur, int level, int *stat) xfs_btree_delrec() argument
3870 xfs_btree_delete( struct xfs_btree_cur *cur, int *stat) xfs_btree_delete() argument
3915 xfs_btree_get_rec( struct xfs_btree_cur *cur, union xfs_btree_rec **recp, int *stat) xfs_btree_get_rec() argument
3977 xfs_btree_block_change_owner( struct xfs_btree_cur *cur, int level, __uint64_t new_owner, struct list_head *buffer_list) xfs_btree_block_change_owner() argument
4025 xfs_btree_change_owner( struct xfs_btree_cur *cur, __uint64_t new_owner, struct list_head *buffer_list) xfs_btree_change_owner() argument
H A Dxfs_alloc_btree.c38 struct xfs_btree_cur *cur) xfs_allocbt_dup_cursor()
40 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, xfs_allocbt_dup_cursor()
41 cur->bc_private.a.agbp, cur->bc_private.a.agno, xfs_allocbt_dup_cursor()
42 cur->bc_btnum); xfs_allocbt_dup_cursor()
47 struct xfs_btree_cur *cur, xfs_allocbt_set_root()
51 struct xfs_buf *agbp = cur->bc_private.a.agbp; xfs_allocbt_set_root()
54 int btnum = cur->bc_btnum; xfs_allocbt_set_root()
55 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); xfs_allocbt_set_root()
64 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); xfs_allocbt_set_root()
69 struct xfs_btree_cur *cur, xfs_allocbt_alloc_block()
77 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_allocbt_alloc_block()
80 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, xfs_allocbt_alloc_block()
83 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_allocbt_alloc_block()
88 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_allocbt_alloc_block()
93 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false); xfs_allocbt_alloc_block()
95 xfs_trans_agbtree_delta(cur->bc_tp, 1); xfs_allocbt_alloc_block()
98 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_allocbt_alloc_block()
105 struct xfs_btree_cur *cur, xfs_allocbt_free_block()
108 struct xfs_buf *agbp = cur->bc_private.a.agbp; xfs_allocbt_free_block()
113 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_allocbt_free_block()
114 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); xfs_allocbt_free_block()
118 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, xfs_allocbt_free_block()
120 xfs_trans_agbtree_delta(cur->bc_tp, -1); xfs_allocbt_free_block()
122 xfs_trans_binval(cur->bc_tp, bp); xfs_allocbt_free_block()
131 struct xfs_btree_cur *cur, xfs_allocbt_update_lastrec()
137 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); xfs_allocbt_update_lastrec()
143 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); xfs_allocbt_update_lastrec()
170 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); xfs_allocbt_update_lastrec()
183 pag = xfs_perag_get(cur->bc_mp, seqno); xfs_allocbt_update_lastrec()
186 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); xfs_allocbt_update_lastrec()
191 struct xfs_btree_cur *cur, xfs_allocbt_get_minrecs()
194 return cur->bc_mp->m_alloc_mnr[level != 0]; xfs_allocbt_get_minrecs()
199 struct xfs_btree_cur *cur, xfs_allocbt_get_maxrecs()
202 return cur->bc_mp->m_alloc_mxr[level != 0]; xfs_allocbt_get_maxrecs()
229 struct xfs_btree_cur *cur, xfs_allocbt_init_rec_from_cur()
232 ASSERT(cur->bc_rec.a.ar_startblock != 0); xfs_allocbt_init_rec_from_cur()
234 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); xfs_allocbt_init_rec_from_cur()
235 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); xfs_allocbt_init_rec_from_cur()
240 struct xfs_btree_cur *cur, xfs_allocbt_init_ptr_from_cur()
243 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); xfs_allocbt_init_ptr_from_cur()
245 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); xfs_allocbt_init_ptr_from_cur()
246 ASSERT(agf->agf_roots[cur->bc_btnum] != 0); xfs_allocbt_init_ptr_from_cur()
248 ptr->s = agf->agf_roots[cur->bc_btnum]; xfs_allocbt_init_ptr_from_cur()
253 struct xfs_btree_cur *cur, xfs_allocbt_key_diff()
256 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a; xfs_allocbt_key_diff()
260 if (cur->bc_btnum == XFS_BTNUM_BNO) { xfs_allocbt_key_diff()
390 struct xfs_btree_cur *cur, xfs_allocbt_keys_inorder()
394 if (cur->bc_btnum == XFS_BTNUM_BNO) { xfs_allocbt_keys_inorder()
408 struct xfs_btree_cur *cur, xfs_allocbt_recs_inorder()
412 if (cur->bc_btnum == XFS_BTNUM_BNO) { xfs_allocbt_recs_inorder()
461 struct xfs_btree_cur *cur; xfs_allocbt_init_cursor() local
465 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); xfs_allocbt_init_cursor()
467 cur->bc_tp = tp; xfs_allocbt_init_cursor()
468 cur->bc_mp = mp; xfs_allocbt_init_cursor()
469 cur->bc_btnum = btnum; xfs_allocbt_init_cursor()
470 cur->bc_blocklog = mp->m_sb.sb_blocklog; xfs_allocbt_init_cursor()
471 cur->bc_ops = &xfs_allocbt_ops; xfs_allocbt_init_cursor()
474 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); xfs_allocbt_init_cursor()
475 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; xfs_allocbt_init_cursor()
477 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); xfs_allocbt_init_cursor()
480 cur->bc_private.a.agbp = agbp; xfs_allocbt_init_cursor()
481 cur->bc_private.a.agno = agno; xfs_allocbt_init_cursor()
484 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; xfs_allocbt_init_cursor()
486 return cur; xfs_allocbt_init_cursor()
37 xfs_allocbt_dup_cursor( struct xfs_btree_cur *cur) xfs_allocbt_dup_cursor() argument
46 xfs_allocbt_set_root( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int inc) xfs_allocbt_set_root() argument
68 xfs_allocbt_alloc_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *start, union xfs_btree_ptr *new, int *stat) xfs_allocbt_alloc_block() argument
104 xfs_allocbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_allocbt_free_block() argument
130 xfs_allocbt_update_lastrec( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_rec *rec, int ptr, int reason) xfs_allocbt_update_lastrec() argument
190 xfs_allocbt_get_minrecs( struct xfs_btree_cur *cur, int level) xfs_allocbt_get_minrecs() argument
198 xfs_allocbt_get_maxrecs( struct xfs_btree_cur *cur, int level) xfs_allocbt_get_maxrecs() argument
228 xfs_allocbt_init_rec_from_cur( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) xfs_allocbt_init_rec_from_cur() argument
239 xfs_allocbt_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_allocbt_init_ptr_from_cur() argument
252 xfs_allocbt_key_diff( struct xfs_btree_cur *cur, union xfs_btree_key *key) xfs_allocbt_key_diff() argument
389 xfs_allocbt_keys_inorder( struct xfs_btree_cur *cur, union xfs_btree_key *k1, union xfs_btree_key *k2) xfs_allocbt_keys_inorder() argument
407 xfs_allocbt_recs_inorder( struct xfs_btree_cur *cur, union xfs_btree_rec *r1, union xfs_btree_rec *r2) xfs_allocbt_recs_inorder() argument
H A Dxfs_ialloc_btree.c39 struct xfs_btree_cur *cur, xfs_inobt_get_minrecs()
42 return cur->bc_mp->m_inobt_mnr[level != 0]; xfs_inobt_get_minrecs()
47 struct xfs_btree_cur *cur) xfs_inobt_dup_cursor()
49 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, xfs_inobt_dup_cursor()
50 cur->bc_private.a.agbp, cur->bc_private.a.agno, xfs_inobt_dup_cursor()
51 cur->bc_btnum); xfs_inobt_dup_cursor()
56 struct xfs_btree_cur *cur, xfs_inobt_set_root()
60 struct xfs_buf *agbp = cur->bc_private.a.agbp; xfs_inobt_set_root()
65 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); xfs_inobt_set_root()
70 struct xfs_btree_cur *cur, xfs_finobt_set_root()
74 struct xfs_buf *agbp = cur->bc_private.a.agbp; xfs_finobt_set_root()
79 xfs_ialloc_log_agi(cur->bc_tp, agbp, xfs_finobt_set_root()
85 struct xfs_btree_cur *cur, xfs_inobt_alloc_block()
94 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); xfs_inobt_alloc_block()
97 args.tp = cur->bc_tp; xfs_inobt_alloc_block()
98 args.mp = cur->bc_mp; xfs_inobt_alloc_block()
99 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); xfs_inobt_alloc_block()
107 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_inobt_alloc_block()
111 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_inobt_alloc_block()
116 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_inobt_alloc_block()
125 struct xfs_btree_cur *cur, xfs_inobt_free_block()
131 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_inobt_free_block()
132 error = xfs_free_extent(cur->bc_tp, fsbno, 1); xfs_inobt_free_block()
136 xfs_trans_binval(cur->bc_tp, bp); xfs_inobt_free_block()
142 struct xfs_btree_cur *cur, xfs_inobt_get_maxrecs()
145 return cur->bc_mp->m_inobt_mxr[level != 0]; xfs_inobt_get_maxrecs()
166 struct xfs_btree_cur *cur, xfs_inobt_init_rec_from_cur()
169 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); xfs_inobt_init_rec_from_cur()
170 rec->inobt.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount); xfs_inobt_init_rec_from_cur()
171 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); xfs_inobt_init_rec_from_cur()
179 struct xfs_btree_cur *cur, xfs_inobt_init_ptr_from_cur()
182 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); xfs_inobt_init_ptr_from_cur()
184 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); xfs_inobt_init_ptr_from_cur()
191 struct xfs_btree_cur *cur, xfs_finobt_init_ptr_from_cur()
194 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); xfs_finobt_init_ptr_from_cur()
196 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); xfs_finobt_init_ptr_from_cur()
202 struct xfs_btree_cur *cur, xfs_inobt_key_diff()
206 cur->bc_rec.i.ir_startino; xfs_inobt_key_diff()
305 struct xfs_btree_cur *cur, xfs_inobt_keys_inorder()
315 struct xfs_btree_cur *cur, xfs_inobt_recs_inorder()
380 struct xfs_btree_cur *cur; xfs_inobt_init_cursor() local
382 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); xfs_inobt_init_cursor()
384 cur->bc_tp = tp; xfs_inobt_init_cursor()
385 cur->bc_mp = mp; xfs_inobt_init_cursor()
386 cur->bc_btnum = btnum; xfs_inobt_init_cursor()
388 cur->bc_nlevels = be32_to_cpu(agi->agi_level); xfs_inobt_init_cursor()
389 cur->bc_ops = &xfs_inobt_ops; xfs_inobt_init_cursor()
391 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); xfs_inobt_init_cursor()
392 cur->bc_ops = &xfs_finobt_ops; xfs_inobt_init_cursor()
395 cur->bc_blocklog = mp->m_sb.sb_blocklog; xfs_inobt_init_cursor()
398 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; xfs_inobt_init_cursor()
400 cur->bc_private.a.agbp = agbp; xfs_inobt_init_cursor()
401 cur->bc_private.a.agno = agno; xfs_inobt_init_cursor()
403 return cur; xfs_inobt_init_cursor()
38 xfs_inobt_get_minrecs( struct xfs_btree_cur *cur, int level) xfs_inobt_get_minrecs() argument
46 xfs_inobt_dup_cursor( struct xfs_btree_cur *cur) xfs_inobt_dup_cursor() argument
55 xfs_inobt_set_root( struct xfs_btree_cur *cur, union xfs_btree_ptr *nptr, int inc) xfs_inobt_set_root() argument
69 xfs_finobt_set_root( struct xfs_btree_cur *cur, union xfs_btree_ptr *nptr, int inc) xfs_finobt_set_root() argument
84 xfs_inobt_alloc_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *start, union xfs_btree_ptr *new, int *stat) xfs_inobt_alloc_block() argument
124 xfs_inobt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_inobt_free_block() argument
141 xfs_inobt_get_maxrecs( struct xfs_btree_cur *cur, int level) xfs_inobt_get_maxrecs() argument
165 xfs_inobt_init_rec_from_cur( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) xfs_inobt_init_rec_from_cur() argument
178 xfs_inobt_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_inobt_init_ptr_from_cur() argument
190 xfs_finobt_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_finobt_init_ptr_from_cur() argument
201 xfs_inobt_key_diff( struct xfs_btree_cur *cur, union xfs_btree_key *key) xfs_inobt_key_diff() argument
304 xfs_inobt_keys_inorder( struct xfs_btree_cur *cur, union xfs_btree_key *k1, union xfs_btree_key *k2) xfs_inobt_keys_inorder() argument
314 xfs_inobt_recs_inorder( struct xfs_btree_cur *cur, union xfs_btree_rec *r1, union xfs_btree_rec *r2) xfs_inobt_recs_inorder() argument
H A Dxfs_bmap_btree.c400 struct xfs_btree_cur *cur) xfs_bmbt_dup_cursor()
404 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, xfs_bmbt_dup_cursor()
405 cur->bc_private.b.ip, cur->bc_private.b.whichfork); xfs_bmbt_dup_cursor()
411 new->bc_private.b.firstblock = cur->bc_private.b.firstblock; xfs_bmbt_dup_cursor()
412 new->bc_private.b.flist = cur->bc_private.b.flist; xfs_bmbt_dup_cursor()
413 new->bc_private.b.flags = cur->bc_private.b.flags; xfs_bmbt_dup_cursor()
435 struct xfs_btree_cur *cur, xfs_bmbt_alloc_block()
444 args.tp = cur->bc_tp; xfs_bmbt_alloc_block()
445 args.mp = cur->bc_mp; xfs_bmbt_alloc_block()
446 args.fsbno = cur->bc_private.b.firstblock; xfs_bmbt_alloc_block()
464 } else if (cur->bc_private.b.flist->xbf_low) { xfs_bmbt_alloc_block()
471 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; xfs_bmbt_alloc_block()
492 cur->bc_private.b.flist->xbf_low = 1; xfs_bmbt_alloc_block()
495 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_bmbt_alloc_block()
500 cur->bc_private.b.firstblock = args.fsbno; xfs_bmbt_alloc_block()
501 cur->bc_private.b.allocated++; xfs_bmbt_alloc_block()
502 cur->bc_private.b.ip->i_d.di_nblocks++; xfs_bmbt_alloc_block()
503 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); xfs_bmbt_alloc_block()
504 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip, xfs_bmbt_alloc_block()
509 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); xfs_bmbt_alloc_block()
514 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); xfs_bmbt_alloc_block()
520 struct xfs_btree_cur *cur, xfs_bmbt_free_block()
523 struct xfs_mount *mp = cur->bc_mp; xfs_bmbt_free_block()
524 struct xfs_inode *ip = cur->bc_private.b.ip; xfs_bmbt_free_block()
525 struct xfs_trans *tp = cur->bc_tp; xfs_bmbt_free_block()
528 xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp); xfs_bmbt_free_block()
539 struct xfs_btree_cur *cur, xfs_bmbt_get_minrecs()
542 if (level == cur->bc_nlevels - 1) { xfs_bmbt_get_minrecs()
545 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, xfs_bmbt_get_minrecs()
546 cur->bc_private.b.whichfork); xfs_bmbt_get_minrecs()
548 return xfs_bmbt_maxrecs(cur->bc_mp, xfs_bmbt_get_minrecs()
552 return cur->bc_mp->m_bmap_dmnr[level != 0]; xfs_bmbt_get_minrecs()
557 struct xfs_btree_cur *cur, xfs_bmbt_get_maxrecs()
560 if (level == cur->bc_nlevels - 1) { xfs_bmbt_get_maxrecs()
563 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, xfs_bmbt_get_maxrecs()
564 cur->bc_private.b.whichfork); xfs_bmbt_get_maxrecs()
566 return xfs_bmbt_maxrecs(cur->bc_mp, xfs_bmbt_get_maxrecs()
570 return cur->bc_mp->m_bmap_dmxr[level != 0]; xfs_bmbt_get_maxrecs()
585 struct xfs_btree_cur *cur, xfs_bmbt_get_dmaxrecs()
588 if (level != cur->bc_nlevels - 1) xfs_bmbt_get_dmaxrecs()
589 return cur->bc_mp->m_bmap_dmxr[level != 0]; xfs_bmbt_get_dmaxrecs()
590 return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0); xfs_bmbt_get_dmaxrecs()
615 struct xfs_btree_cur *cur, xfs_bmbt_init_rec_from_cur()
618 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); xfs_bmbt_init_rec_from_cur()
623 struct xfs_btree_cur *cur, xfs_bmbt_init_ptr_from_cur()
631 struct xfs_btree_cur *cur, xfs_bmbt_key_diff()
635 cur->bc_rec.b.br_startoff; xfs_bmbt_key_diff()
730 struct xfs_btree_cur *cur, xfs_bmbt_keys_inorder()
740 struct xfs_btree_cur *cur, xfs_bmbt_recs_inorder()
784 struct xfs_btree_cur *cur; xfs_bmbt_init_cursor() local
786 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); xfs_bmbt_init_cursor()
788 cur->bc_tp = tp; xfs_bmbt_init_cursor()
789 cur->bc_mp = mp; xfs_bmbt_init_cursor()
790 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; xfs_bmbt_init_cursor()
791 cur->bc_btnum = XFS_BTNUM_BMAP; xfs_bmbt_init_cursor()
792 cur->bc_blocklog = mp->m_sb.sb_blocklog; xfs_bmbt_init_cursor()
794 cur->bc_ops = &xfs_bmbt_ops; xfs_bmbt_init_cursor()
795 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; xfs_bmbt_init_cursor()
797 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; xfs_bmbt_init_cursor()
799 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); xfs_bmbt_init_cursor()
800 cur->bc_private.b.ip = ip; xfs_bmbt_init_cursor()
801 cur->bc_private.b.firstblock = NULLFSBLOCK; xfs_bmbt_init_cursor()
802 cur->bc_private.b.flist = NULL; xfs_bmbt_init_cursor()
803 cur->bc_private.b.allocated = 0; xfs_bmbt_init_cursor()
804 cur->bc_private.b.flags = 0; xfs_bmbt_init_cursor()
805 cur->bc_private.b.whichfork = whichfork; xfs_bmbt_init_cursor()
807 return cur; xfs_bmbt_init_cursor()
866 struct xfs_btree_cur *cur; xfs_bmbt_change_owner() local
876 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); xfs_bmbt_change_owner()
877 if (!cur) xfs_bmbt_change_owner()
880 error = xfs_btree_change_owner(cur, new_owner, buffer_list); xfs_bmbt_change_owner()
881 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); xfs_bmbt_change_owner()
399 xfs_bmbt_dup_cursor( struct xfs_btree_cur *cur) xfs_bmbt_dup_cursor() argument
434 xfs_bmbt_alloc_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *start, union xfs_btree_ptr *new, int *stat) xfs_bmbt_alloc_block() argument
519 xfs_bmbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_bmbt_free_block() argument
538 xfs_bmbt_get_minrecs( struct xfs_btree_cur *cur, int level) xfs_bmbt_get_minrecs() argument
556 xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, int level) xfs_bmbt_get_maxrecs() argument
584 xfs_bmbt_get_dmaxrecs( struct xfs_btree_cur *cur, int level) xfs_bmbt_get_dmaxrecs() argument
614 xfs_bmbt_init_rec_from_cur( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) xfs_bmbt_init_rec_from_cur() argument
622 xfs_bmbt_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) xfs_bmbt_init_ptr_from_cur() argument
630 xfs_bmbt_key_diff( struct xfs_btree_cur *cur, union xfs_btree_key *key) xfs_bmbt_key_diff() argument
729 xfs_bmbt_keys_inorder( struct xfs_btree_cur *cur, union xfs_btree_key *k1, union xfs_btree_key *k2) xfs_bmbt_keys_inorder() argument
739 xfs_bmbt_recs_inorder( struct xfs_btree_cur *cur, union xfs_btree_rec *r1, union xfs_btree_rec *r2) xfs_bmbt_recs_inorder() argument
H A Dxfs_ialloc.c58 * Lookup a record by ino in the btree given by cur.
62 struct xfs_btree_cur *cur, /* btree cursor */ xfs_inobt_lookup()
67 cur->bc_rec.i.ir_startino = ino; xfs_inobt_lookup()
68 cur->bc_rec.i.ir_freecount = 0; xfs_inobt_lookup()
69 cur->bc_rec.i.ir_free = 0; xfs_inobt_lookup()
70 return xfs_btree_lookup(cur, dir, stat); xfs_inobt_lookup()
74 * Update the record referred to by cur to the value given.
79 struct xfs_btree_cur *cur, /* btree cursor */ xfs_inobt_update()
87 return xfs_btree_update(cur, &rec); xfs_inobt_update()
95 struct xfs_btree_cur *cur, /* btree cursor */ xfs_inobt_get_rec()
102 error = xfs_btree_get_rec(cur, &rec, stat); xfs_inobt_get_rec()
116 struct xfs_btree_cur *cur, xfs_inobt_insert_rec()
121 cur->bc_rec.i.ir_freecount = freecount; xfs_inobt_insert_rec()
122 cur->bc_rec.i.ir_free = free; xfs_inobt_insert_rec()
123 return xfs_btree_insert(cur, stat); xfs_inobt_insert_rec()
138 struct xfs_btree_cur *cur; xfs_inobt_insert() local
145 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum); xfs_inobt_insert()
150 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); xfs_inobt_insert()
152 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_inobt_insert()
157 error = xfs_inobt_insert_rec(cur, XFS_INODES_PER_CHUNK, xfs_inobt_insert()
160 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_inobt_insert()
166 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_inobt_insert()
177 struct xfs_btree_cur *cur, xfs_check_agi_freecount()
180 if (cur->bc_nlevels == 1) { xfs_check_agi_freecount()
186 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); xfs_check_agi_freecount()
191 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_check_agi_freecount()
197 error = xfs_btree_increment(cur, 0, &i); xfs_check_agi_freecount()
203 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp)) xfs_check_agi_freecount()
209 #define xfs_check_agi_freecount(cur, agi) 0
684 struct xfs_btree_cur *cur, xfs_ialloc_next_rec()
693 error = xfs_btree_decrement(cur, 0, &i); xfs_ialloc_next_rec()
695 error = xfs_btree_increment(cur, 0, &i); xfs_ialloc_next_rec()
701 error = xfs_inobt_get_rec(cur, rec, &i); xfs_ialloc_next_rec()
704 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_ialloc_next_rec()
712 struct xfs_btree_cur *cur, xfs_ialloc_get_rec()
720 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); xfs_ialloc_get_rec()
725 error = xfs_inobt_get_rec(cur, rec, &i); xfs_ialloc_get_rec()
728 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_ialloc_get_rec()
750 struct xfs_btree_cur *cur, *tcur; xfs_dialloc_ag_inobt() local
764 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); xfs_dialloc_ag_inobt()
772 error = xfs_check_agi_freecount(cur, agi); xfs_dialloc_ag_inobt()
784 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); xfs_dialloc_ag_inobt()
789 error = xfs_inobt_get_rec(cur, &rec, &j); xfs_dialloc_ag_inobt()
808 error = xfs_btree_dup_cursor(cur, &tcur); xfs_dialloc_ag_inobt()
824 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, xfs_dialloc_ag_inobt()
834 /* search right with cur, go forward 1 record. */ xfs_dialloc_ag_inobt()
835 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); xfs_dialloc_ag_inobt()
870 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_dialloc_ag_inobt()
871 cur = tcur; xfs_dialloc_ag_inobt()
894 error = xfs_ialloc_next_rec(cur, &rec, xfs_dialloc_ag_inobt()
912 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_dialloc_ag_inobt()
922 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), xfs_dialloc_ag_inobt()
928 error = xfs_inobt_get_rec(cur, &rec, &j); xfs_dialloc_ag_inobt()
945 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); xfs_dialloc_ag_inobt()
951 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_dialloc_ag_inobt()
957 error = xfs_btree_increment(cur, 0, &i); xfs_dialloc_ag_inobt()
972 error = xfs_inobt_update(cur, &rec); xfs_dialloc_ag_inobt()
979 error = xfs_check_agi_freecount(cur, agi); xfs_dialloc_ag_inobt()
983 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_dialloc_ag_inobt()
991 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_dialloc_ag_inobt()
1084 struct xfs_btree_cur *cur, xfs_dialloc_ag_finobt_newino()
1091 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), xfs_dialloc_ag_finobt_newino()
1096 error = xfs_inobt_get_rec(cur, rec, &i); xfs_dialloc_ag_finobt_newino()
1099 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_dialloc_ag_finobt_newino()
1107 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); xfs_dialloc_ag_finobt_newino()
1110 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_dialloc_ag_finobt_newino()
1112 error = xfs_inobt_get_rec(cur, rec, &i); xfs_dialloc_ag_finobt_newino()
1115 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_dialloc_ag_finobt_newino()
1126 struct xfs_btree_cur *cur, /* inobt cursor */ xfs_dialloc_ag_update_inobt()
1134 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); xfs_dialloc_ag_update_inobt()
1137 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_dialloc_ag_update_inobt()
1139 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_dialloc_ag_update_inobt()
1142 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); xfs_dialloc_ag_update_inobt()
1143 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % xfs_dialloc_ag_update_inobt()
1149 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) && xfs_dialloc_ag_update_inobt()
1152 return xfs_inobt_update(cur, &rec); xfs_dialloc_ag_update_inobt()
1175 struct xfs_btree_cur *cur; /* finobt cursor */ xfs_dialloc_ag() local
1195 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); xfs_dialloc_ag()
1197 error = xfs_check_agi_freecount(cur, agi); xfs_dialloc_ag()
1207 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); xfs_dialloc_ag()
1209 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); xfs_dialloc_ag()
1226 error = xfs_inobt_update(cur, &rec); xfs_dialloc_ag()
1228 error = xfs_btree_delete(cur, &i); xfs_dialloc_ag()
1261 error = xfs_check_agi_freecount(cur, agi); xfs_dialloc_ag()
1266 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_dialloc_ag()
1274 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_dialloc_ag()
1456 struct xfs_btree_cur *cur; xfs_difree_inobt() local
1469 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); xfs_difree_inobt()
1471 error = xfs_check_agi_freecount(cur, agi); xfs_difree_inobt()
1478 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { xfs_difree_inobt()
1484 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_difree_inobt()
1527 if ((error = xfs_btree_delete(cur, &i))) { xfs_difree_inobt()
1539 error = xfs_inobt_update(cur, &rec); xfs_difree_inobt()
1557 error = xfs_check_agi_freecount(cur, agi); xfs_difree_inobt()
1562 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_difree_inobt()
1566 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_difree_inobt()
1583 struct xfs_btree_cur *cur; xfs_difree_finobt() local
1589 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); xfs_difree_finobt()
1591 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); xfs_difree_finobt()
1602 error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount, xfs_difree_finobt()
1618 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_difree_finobt()
1640 error = xfs_btree_delete(cur, &i); xfs_difree_finobt()
1645 error = xfs_inobt_update(cur, &rec); xfs_difree_finobt()
1651 error = xfs_check_agi_freecount(cur, agi); xfs_difree_finobt()
1655 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_difree_finobt()
1659 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_difree_finobt()
1758 struct xfs_btree_cur *cur; xfs_imap_lookup() local
1777 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); xfs_imap_lookup()
1778 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); xfs_imap_lookup()
1781 error = xfs_inobt_get_rec(cur, &rec, &i); xfs_imap_lookup()
1787 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_imap_lookup()
61 xfs_inobt_lookup( struct xfs_btree_cur *cur, xfs_agino_t ino, xfs_lookup_t dir, int *stat) xfs_inobt_lookup() argument
78 xfs_inobt_update( struct xfs_btree_cur *cur, xfs_inobt_rec_incore_t *irec) xfs_inobt_update() argument
94 xfs_inobt_get_rec( struct xfs_btree_cur *cur, xfs_inobt_rec_incore_t *irec, int *stat) xfs_inobt_get_rec() argument
115 xfs_inobt_insert_rec( struct xfs_btree_cur *cur, __int32_t freecount, xfs_inofree_t free, int *stat) xfs_inobt_insert_rec() argument
176 xfs_check_agi_freecount( struct xfs_btree_cur *cur, struct xfs_agi *agi) xfs_check_agi_freecount() argument
683 xfs_ialloc_next_rec( struct xfs_btree_cur *cur, xfs_inobt_rec_incore_t *rec, int *done, int left) xfs_ialloc_next_rec() argument
711 xfs_ialloc_get_rec( struct xfs_btree_cur *cur, xfs_agino_t agino, xfs_inobt_rec_incore_t *rec, int *done) xfs_ialloc_get_rec() argument
1082 xfs_dialloc_ag_finobt_newino( struct xfs_agi *agi, struct xfs_btree_cur *cur, struct xfs_inobt_rec_incore *rec) xfs_dialloc_ag_finobt_newino() argument
1125 xfs_dialloc_ag_update_inobt( struct xfs_btree_cur *cur, struct xfs_inobt_rec_incore *frec, int offset) xfs_dialloc_ag_update_inobt() argument
H A Dxfs_bmap.c109 struct xfs_btree_cur *cur, xfs_bmbt_lookup_eq()
115 cur->bc_rec.b.br_startoff = off; xfs_bmbt_lookup_eq()
116 cur->bc_rec.b.br_startblock = bno; xfs_bmbt_lookup_eq()
117 cur->bc_rec.b.br_blockcount = len; xfs_bmbt_lookup_eq()
118 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); xfs_bmbt_lookup_eq()
123 struct xfs_btree_cur *cur, xfs_bmbt_lookup_ge()
129 cur->bc_rec.b.br_startoff = off; xfs_bmbt_lookup_ge()
130 cur->bc_rec.b.br_startblock = bno; xfs_bmbt_lookup_ge()
131 cur->bc_rec.b.br_blockcount = len; xfs_bmbt_lookup_ge()
132 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); xfs_bmbt_lookup_ge()
156 * Update the record referred to by cur to the value given
162 struct xfs_btree_cur *cur, xfs_bmbt_update()
171 return xfs_btree_update(cur, &rec); xfs_bmbt_update()
250 struct xfs_btree_cur *cur, xfs_bmap_get_bp()
256 if (!cur) xfs_bmap_get_bp()
260 if (!cur->bc_bufs[i]) xfs_bmap_get_bp()
262 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) xfs_bmap_get_bp()
263 return cur->bc_bufs[i]; xfs_bmap_get_bp()
267 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { xfs_bmap_get_bp()
333 xfs_btree_cur_t *cur, /* btree cursor or null */ xfs_bmap_check_leaf_extents()
377 /* See if buf is in cur first */ xfs_bmap_check_leaf_extents()
379 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); xfs_bmap_check_leaf_extents()
463 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); xfs_bmap_check_leaf_extents()
556 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
575 xfs_bmap_free_item_t *cur; /* current (next) element */ xfs_bmap_add_free() local
597 for (prev = NULL, cur = flist->xbf_first; xfs_bmap_add_free()
598 cur != NULL; xfs_bmap_add_free()
599 prev = cur, cur = cur->xbfi_next) { xfs_bmap_add_free()
600 if (cur->xbfi_startblock >= bno) xfs_bmap_add_free()
607 new->xbfi_next = cur; xfs_bmap_add_free()
663 xfs_btree_cur_t *cur, /* btree cursor */ xfs_bmap_btree_to_extents()
689 if ((error = xfs_btree_check_lptr(cur, cbno, 1))) xfs_bmap_btree_to_extents()
697 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) xfs_bmap_btree_to_extents()
699 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); xfs_bmap_btree_to_extents()
703 if (cur->bc_bufs[0] == cbp) xfs_bmap_btree_to_extents()
704 cur->bc_bufs[0] = NULL; xfs_bmap_btree_to_extents()
733 xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmap_extents_to_btree() local
769 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_extents_to_btree()
770 cur->bc_private.b.firstblock = *firstblock; xfs_bmap_extents_to_btree()
771 cur->bc_private.b.flist = flist; xfs_bmap_extents_to_btree()
772 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; xfs_bmap_extents_to_btree()
796 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_bmap_extents_to_btree()
807 *firstblock = cur->bc_private.b.firstblock = args.fsbno; xfs_bmap_extents_to_btree()
808 cur->bc_private.b.allocated++; xfs_bmap_extents_to_btree()
845 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, xfs_bmap_extents_to_btree()
853 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); xfs_bmap_extents_to_btree()
854 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); xfs_bmap_extents_to_btree()
856 *curp = cur; xfs_bmap_extents_to_btree()
991 xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmap_add_attrfork_btree() local
1000 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); xfs_bmap_add_attrfork_btree()
1001 cur->bc_private.b.flist = flist; xfs_bmap_add_attrfork_btree()
1002 cur->bc_private.b.firstblock = *firstblock; xfs_bmap_add_attrfork_btree()
1003 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) xfs_bmap_add_attrfork_btree()
1007 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) xfs_bmap_add_attrfork_btree()
1010 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_bmap_add_attrfork_btree()
1013 *firstblock = cur->bc_private.b.firstblock; xfs_bmap_add_attrfork_btree()
1014 cur->bc_private.b.allocated = 0; xfs_bmap_add_attrfork_btree()
1015 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_bmap_add_attrfork_btree()
1019 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_bmap_add_attrfork_btree()
1034 xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmap_add_attrfork_extents() local
1039 cur = NULL; xfs_bmap_add_attrfork_extents()
1040 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, xfs_bmap_add_attrfork_extents()
1042 if (cur) { xfs_bmap_add_attrfork_extents()
1043 cur->bc_private.b.allocated = 0; xfs_bmap_add_attrfork_extents()
1044 xfs_btree_del_cursor(cur, xfs_bmap_add_attrfork_extents()
1735 ASSERT(!bma->cur || xfs_bmap_add_extent_delay_real()
1736 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); xfs_bmap_add_extent_delay_real()
1831 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
1835 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, xfs_bmap_add_extent_delay_real()
1841 error = xfs_btree_delete(bma->cur, &i); xfs_bmap_add_extent_delay_real()
1845 error = xfs_btree_decrement(bma->cur, 0, &i); xfs_bmap_add_extent_delay_real()
1849 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, xfs_bmap_add_extent_delay_real()
1872 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
1876 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, xfs_bmap_add_extent_delay_real()
1882 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, xfs_bmap_add_extent_delay_real()
1903 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
1907 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, xfs_bmap_add_extent_delay_real()
1913 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, xfs_bmap_add_extent_delay_real()
1933 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
1937 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, xfs_bmap_add_extent_delay_real()
1943 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_delay_real()
1944 error = xfs_btree_insert(bma->cur, &i); xfs_bmap_add_extent_delay_real()
1966 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
1970 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, xfs_bmap_add_extent_delay_real()
1976 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, xfs_bmap_add_extent_delay_real()
2003 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
2007 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, xfs_bmap_add_extent_delay_real()
2013 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_delay_real()
2014 error = xfs_btree_insert(bma->cur, &i); xfs_bmap_add_extent_delay_real()
2023 &bma->cur, 1, &tmp_rval, XFS_DATA_FORK); xfs_bmap_add_extent_delay_real()
2030 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); xfs_bmap_add_extent_delay_real()
2049 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
2053 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, xfs_bmap_add_extent_delay_real()
2059 error = xfs_bmbt_update(bma->cur, new->br_startoff, xfs_bmap_add_extent_delay_real()
2087 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
2091 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, xfs_bmap_add_extent_delay_real()
2097 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_delay_real()
2098 error = xfs_btree_insert(bma->cur, &i); xfs_bmap_add_extent_delay_real()
2106 bma->firstblock, bma->flist, &bma->cur, 1, xfs_bmap_add_extent_delay_real()
2114 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); xfs_bmap_add_extent_delay_real()
2156 if (bma->cur == NULL) xfs_bmap_add_extent_delay_real()
2160 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, xfs_bmap_add_extent_delay_real()
2166 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_delay_real()
2167 error = xfs_btree_insert(bma->cur, &i); xfs_bmap_add_extent_delay_real()
2175 bma->firstblock, bma->flist, &bma->cur, xfs_bmap_add_extent_delay_real()
2184 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); xfs_bmap_add_extent_delay_real()
2222 ASSERT(bma->cur == NULL); xfs_bmap_add_extent_delay_real()
2224 bma->firstblock, bma->flist, &bma->cur, xfs_bmap_add_extent_delay_real()
2234 if (bma->cur) xfs_bmap_add_extent_delay_real()
2235 temp += bma->cur->bc_private.b.allocated; xfs_bmap_add_extent_delay_real()
2243 if (bma->cur) xfs_bmap_add_extent_delay_real()
2244 bma->cur->bc_private.b.allocated = 0; xfs_bmap_add_extent_delay_real()
2246 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK); xfs_bmap_add_extent_delay_real()
2269 xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmap_add_extent_unwritten_real() local
2285 cur = *curp; xfs_bmap_add_extent_unwritten_real()
2386 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2390 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, xfs_bmap_add_extent_unwritten_real()
2395 if ((error = xfs_btree_delete(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2398 if ((error = xfs_btree_decrement(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2401 if ((error = xfs_btree_delete(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2404 if ((error = xfs_btree_decrement(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2407 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, xfs_bmap_add_extent_unwritten_real()
2429 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2433 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2438 if ((error = xfs_btree_delete(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2441 if ((error = xfs_btree_decrement(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2444 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, xfs_bmap_add_extent_unwritten_real()
2464 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2468 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, xfs_bmap_add_extent_unwritten_real()
2473 if ((error = xfs_btree_delete(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2476 if ((error = xfs_btree_decrement(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2479 if ((error = xfs_bmbt_update(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2497 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2501 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2506 if ((error = xfs_bmbt_update(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2534 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2538 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2543 if ((error = xfs_bmbt_update(cur, xfs_bmap_add_extent_unwritten_real()
2549 if ((error = xfs_btree_decrement(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2551 error = xfs_bmbt_update(cur, LEFT.br_startoff, xfs_bmap_add_extent_unwritten_real()
2576 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2580 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2585 if ((error = xfs_bmbt_update(cur, xfs_bmap_add_extent_unwritten_real()
2591 cur->bc_rec.b = *new; xfs_bmap_add_extent_unwritten_real()
2592 if ((error = xfs_btree_insert(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2616 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2620 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2625 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2630 if ((error = xfs_btree_increment(cur, 0, &i))) xfs_bmap_add_extent_unwritten_real()
2632 if ((error = xfs_bmbt_update(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2654 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2658 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2663 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2668 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2673 cur->bc_rec.b.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_unwritten_real()
2674 if ((error = xfs_btree_insert(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2702 if (cur == NULL) xfs_bmap_add_extent_unwritten_real()
2706 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, xfs_bmap_add_extent_unwritten_real()
2712 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, xfs_bmap_add_extent_unwritten_real()
2717 cur->bc_rec.b = PREV; xfs_bmap_add_extent_unwritten_real()
2718 cur->bc_rec.b.br_blockcount = xfs_bmap_add_extent_unwritten_real()
2720 if ((error = xfs_btree_insert(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2728 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, xfs_bmap_add_extent_unwritten_real()
2734 cur->bc_rec.b.br_state = new->br_state; xfs_bmap_add_extent_unwritten_real()
2735 if ((error = xfs_btree_insert(cur, &i))) xfs_bmap_add_extent_unwritten_real()
2758 ASSERT(cur == NULL); xfs_bmap_add_extent_unwritten_real()
2759 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, xfs_bmap_add_extent_unwritten_real()
2767 if (cur) { xfs_bmap_add_extent_unwritten_real()
2768 cur->bc_private.b.allocated = 0; xfs_bmap_add_extent_unwritten_real()
2769 *curp = cur; xfs_bmap_add_extent_unwritten_real()
2949 ASSERT(!bma->cur || xfs_bmap_add_extent_hole_real()
2950 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); xfs_bmap_add_extent_hole_real()
3022 if (bma->cur == NULL) { xfs_bmap_add_extent_hole_real()
3026 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff, xfs_bmap_add_extent_hole_real()
3032 error = xfs_btree_delete(bma->cur, &i); xfs_bmap_add_extent_hole_real()
3036 error = xfs_btree_decrement(bma->cur, 0, &i); xfs_bmap_add_extent_hole_real()
3040 error = xfs_bmbt_update(bma->cur, left.br_startoff, xfs_bmap_add_extent_hole_real()
3063 if (bma->cur == NULL) { xfs_bmap_add_extent_hole_real()
3067 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff, xfs_bmap_add_extent_hole_real()
3073 error = xfs_bmbt_update(bma->cur, left.br_startoff, xfs_bmap_add_extent_hole_real()
3096 if (bma->cur == NULL) { xfs_bmap_add_extent_hole_real()
3100 error = xfs_bmbt_lookup_eq(bma->cur, xfs_bmap_add_extent_hole_real()
3107 error = xfs_bmbt_update(bma->cur, new->br_startoff, xfs_bmap_add_extent_hole_real()
3126 if (bma->cur == NULL) { xfs_bmap_add_extent_hole_real()
3130 error = xfs_bmbt_lookup_eq(bma->cur, xfs_bmap_add_extent_hole_real()
3137 bma->cur->bc_rec.b.br_state = new->br_state; xfs_bmap_add_extent_hole_real()
3138 error = xfs_btree_insert(bma->cur, &i); xfs_bmap_add_extent_hole_real()
3150 ASSERT(bma->cur == NULL); xfs_bmap_add_extent_hole_real()
3152 bma->firstblock, bma->flist, &bma->cur, xfs_bmap_add_extent_hole_real()
3160 if (bma->cur) xfs_bmap_add_extent_hole_real()
3161 bma->cur->bc_private.b.allocated = 0; xfs_bmap_add_extent_hole_real()
3163 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); xfs_bmap_add_extent_hole_real()
4331 if (bma->cur) xfs_bmapi_allocate()
4332 bma->cur->bc_private.b.firstblock = *bma->firstblock; xfs_bmapi_allocate()
4335 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { xfs_bmapi_allocate()
4336 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); xfs_bmapi_allocate()
4337 bma->cur->bc_private.b.firstblock = *bma->firstblock; xfs_bmapi_allocate()
4338 bma->cur->bc_private.b.flist = bma->flist; xfs_bmapi_allocate()
4346 if (bma->cur) xfs_bmapi_allocate()
4347 bma->cur->bc_private.b.flags = xfs_bmapi_allocate()
4415 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { xfs_bmapi_convert_unwritten()
4416 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, xfs_bmapi_convert_unwritten()
4418 bma->cur->bc_private.b.firstblock = *bma->firstblock; xfs_bmapi_convert_unwritten()
4419 bma->cur->bc_private.b.flist = bma->flist; xfs_bmapi_convert_unwritten()
4425 &bma->cur, mval, bma->firstblock, bma->flist, xfs_bmapi_convert_unwritten()
4621 * Transform from btree to extents, give it cur. xfs_bmapi_write()
4626 ASSERT(bma.cur); xfs_bmapi_write()
4627 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, xfs_bmapi_write()
4657 if (bma.cur) { xfs_bmapi_write()
4662 bma.cur->bc_private.b.firstblock) || xfs_bmapi_write()
4666 bma.cur->bc_private.b.firstblock))); xfs_bmapi_write()
4667 *firstblock = bma.cur->bc_private.b.firstblock; xfs_bmapi_write()
4669 xfs_btree_del_cursor(bma.cur, xfs_bmapi_write()
4688 xfs_btree_cur_t *cur, /* if null, not a btree */ xfs_bmap_del_extent()
4772 * Set up del_endblock and cur for later. xfs_bmap_del_extent()
4775 if (cur) { xfs_bmap_del_extent()
4776 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, xfs_bmap_del_extent()
4808 if (!cur) { xfs_bmap_del_extent()
4812 if ((error = xfs_btree_delete(cur, &i))) xfs_bmap_del_extent()
4835 if (!cur) { xfs_bmap_del_extent()
4839 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, xfs_bmap_del_extent()
4861 if (!cur) { xfs_bmap_del_extent()
4865 if ((error = xfs_bmbt_update(cur, got.br_startoff, xfs_bmap_del_extent()
4886 if (cur) { xfs_bmap_del_extent()
4887 if ((error = xfs_bmbt_update(cur, xfs_bmap_del_extent()
4892 if ((error = xfs_btree_increment(cur, 0, &i))) xfs_bmap_del_extent()
4894 cur->bc_rec.b = new; xfs_bmap_del_extent()
4895 error = xfs_btree_insert(cur, &i); xfs_bmap_del_extent()
4909 if ((error = xfs_bmbt_lookup_eq(cur, xfs_bmap_del_extent()
4920 if ((error = xfs_bmbt_update(cur, xfs_bmap_del_extent()
5018 xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bunmapi() local
5086 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bunmapi()
5087 cur->bc_private.b.firstblock = *firstblock; xfs_bunmapi()
5088 cur->bc_private.b.flist = flist; xfs_bunmapi()
5089 cur->bc_private.b.flags = 0; xfs_bunmapi()
5091 cur = NULL; xfs_bunmapi()
5180 &lastx, &cur, &del, firstblock, flist, xfs_bunmapi()
5238 ip, &lastx, &cur, &prev, xfs_bunmapi()
5247 ip, &lastx, &cur, &del, xfs_bunmapi()
5274 if (cur) xfs_bunmapi()
5275 cur->bc_private.b.flags |= xfs_bunmapi()
5277 } else if (cur) xfs_bunmapi()
5278 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; xfs_bunmapi()
5300 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, xfs_bunmapi()
5329 ASSERT(cur == NULL); xfs_bunmapi()
5331 &cur, 0, &tmp_logflags, whichfork); xfs_bunmapi()
5337 * transform from btree to extents, give it cur xfs_bunmapi()
5340 ASSERT(cur != NULL); xfs_bunmapi()
5341 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, xfs_bunmapi()
5368 if (cur) { xfs_bunmapi()
5370 *firstblock = cur->bc_private.b.firstblock; xfs_bunmapi()
5371 cur->bc_private.b.allocated = 0; xfs_bunmapi()
5373 xfs_btree_del_cursor(cur, xfs_bunmapi()
5423 struct xfs_btree_cur *cur, xfs_bmse_merge()
5455 if (!cur) { xfs_bmse_merge()
5461 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, xfs_bmse_merge()
5467 error = xfs_btree_delete(cur, &i); xfs_bmse_merge()
5473 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, xfs_bmse_merge()
5481 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock, xfs_bmse_merge()
5495 struct xfs_btree_cur *cur, xfs_bmse_shift_one()
5547 cur, logflags); xfs_bmse_shift_one()
5584 if (!cur) { xfs_bmse_shift_one()
5589 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, xfs_bmse_shift_one()
5596 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, xfs_bmse_shift_one()
5623 struct xfs_btree_cur *cur = NULL; xfs_bmap_shift_extents() local
5662 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_shift_extents()
5663 cur->bc_private.b.firstblock = *firstblock; xfs_bmap_shift_extents()
5664 cur->bc_private.b.flist = flist; xfs_bmap_shift_extents()
5665 cur->bc_private.b.flags = 0; xfs_bmap_shift_extents()
5724 &current_ext, gotp, cur, &logflags, xfs_bmap_shift_extents()
5751 if (cur) xfs_bmap_shift_extents()
5752 xfs_btree_del_cursor(cur, xfs_bmap_shift_extents()
5776 struct xfs_btree_cur *cur = NULL; xfs_bmap_split_extent_at() local
5833 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_split_extent_at()
5834 cur->bc_private.b.firstblock = *firstfsb; xfs_bmap_split_extent_at()
5835 cur->bc_private.b.flist = free_list; xfs_bmap_split_extent_at()
5836 cur->bc_private.b.flags = 0; xfs_bmap_split_extent_at()
5837 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, xfs_bmap_split_extent_at()
5850 if (cur) { xfs_bmap_split_extent_at()
5851 error = xfs_bmbt_update(cur, got.br_startoff, xfs_bmap_split_extent_at()
5866 if (cur) { xfs_bmap_split_extent_at()
5867 error = xfs_bmbt_lookup_eq(cur, new.br_startoff, xfs_bmap_split_extent_at()
5873 cur->bc_rec.b.br_state = new.br_state; xfs_bmap_split_extent_at()
5875 error = xfs_btree_insert(cur, &i); xfs_bmap_split_extent_at()
5887 ASSERT(cur == NULL); xfs_bmap_split_extent_at()
5889 &cur, 0, &tmp_logflags, whichfork); xfs_bmap_split_extent_at()
5894 if (cur) { xfs_bmap_split_extent_at()
5895 cur->bc_private.b.allocated = 0; xfs_bmap_split_extent_at()
5896 xfs_btree_del_cursor(cur, xfs_bmap_split_extent_at()
108 xfs_bmbt_lookup_eq( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, int *stat) xfs_bmbt_lookup_eq() argument
122 xfs_bmbt_lookup_ge( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, int *stat) xfs_bmbt_lookup_ge() argument
161 xfs_bmbt_update( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, xfs_exntst_t state) xfs_bmbt_update() argument
249 xfs_bmap_get_bp( struct xfs_btree_cur *cur, xfs_fsblock_t bno) xfs_bmap_get_bp() argument
332 xfs_bmap_check_leaf_extents( xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork) xfs_bmap_check_leaf_extents() argument
660 xfs_bmap_btree_to_extents( xfs_trans_t *tp, xfs_inode_t *ip, xfs_btree_cur_t *cur, int *logflagsp, int whichfork) xfs_bmap_btree_to_extents() argument
4683 xfs_bmap_del_extent( xfs_inode_t *ip, xfs_trans_t *tp, xfs_extnum_t *idx, xfs_bmap_free_t *flist, xfs_btree_cur_t *cur, xfs_bmbt_irec_t *del, int *logflagsp, int whichfork) xfs_bmap_del_extent() argument
5416 xfs_bmse_merge( struct xfs_inode *ip, int whichfork, xfs_fileoff_t shift, int current_ext, struct xfs_bmbt_rec_host *gotp, struct xfs_bmbt_rec_host *leftp, struct xfs_btree_cur *cur, int *logflags) xfs_bmse_merge() argument
5489 xfs_bmse_shift_one( struct xfs_inode *ip, int whichfork, xfs_fileoff_t offset_shift_fsb, int *current_ext, struct xfs_bmbt_rec_host *gotp, struct xfs_btree_cur *cur, int *logflags, enum shift_direction direction) xfs_bmse_shift_one() argument
H A Dxfs_btree.h89 #define XFS_BTREE_STATS_INC(cur, stat) \
91 switch (cur->bc_btnum) { \
103 #define XFS_BTREE_STATS_ADD(cur, stat, val) \
105 switch (cur->bc_btnum) { \
128 void (*set_root)(struct xfs_btree_cur *cur,
132 int (*alloc_block)(struct xfs_btree_cur *cur,
136 int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
139 void (*update_lastrec)(struct xfs_btree_cur *cur,
145 int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
146 int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
149 int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
156 void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
158 void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
162 __int64_t (*key_diff)(struct xfs_btree_cur *cur,
169 int (*keys_inorder)(struct xfs_btree_cur *cur,
174 int (*recs_inorder)(struct xfs_btree_cur *cur,
250 struct xfs_btree_cur *cur, /* btree cursor */
260 struct xfs_btree_cur *cur, /* btree cursor */
269 xfs_btree_cur_t *cur, /* btree cursor */
278 xfs_btree_cur_t *cur, /* input cursor */
309 xfs_btree_cur_t *cur, /* btree cursor */
396 int xfs_btree_change_owner(struct xfs_btree_cur *cur, __uint64_t new_owner,
H A Dxfs_ialloc.h144 * Lookup a record by ino in the btree given by cur.
146 int xfs_inobt_lookup(struct xfs_btree_cur *cur, xfs_agino_t ino,
152 int xfs_inobt_get_rec(struct xfs_btree_cur *cur,
H A Dxfs_alloc.h215 struct xfs_btree_cur *cur, /* btree cursor */
222 struct xfs_btree_cur *cur, /* btree cursor */
229 struct xfs_btree_cur *cur, /* btree cursor */
/linux-4.1.27/drivers/char/
H A Dbsr.c168 struct bsr_dev *cur, *n; bsr_cleanup_devs() local
170 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { bsr_cleanup_devs()
171 if (cur->bsr_device) { bsr_cleanup_devs()
172 cdev_del(&cur->bsr_cdev); bsr_cleanup_devs()
173 device_del(cur->bsr_device); bsr_cleanup_devs()
175 list_del(&cur->bsr_list); bsr_cleanup_devs()
176 kfree(cur); bsr_cleanup_devs()
200 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev), bsr_add_node() local
205 if (!cur) { bsr_add_node()
214 kfree(cur); bsr_add_node()
218 cur->bsr_minor = i + total_bsr_devs; bsr_add_node()
219 cur->bsr_addr = res.start; bsr_add_node()
220 cur->bsr_len = resource_size(&res); bsr_add_node()
221 cur->bsr_bytes = bsr_bytes[i]; bsr_add_node()
222 cur->bsr_stride = bsr_stride[i]; bsr_add_node()
223 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); bsr_add_node()
227 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) bsr_add_node()
228 cur->bsr_len = 4096; bsr_add_node()
230 switch(cur->bsr_bytes) { bsr_add_node()
232 cur->bsr_type = BSR_8; bsr_add_node()
235 cur->bsr_type = BSR_16; bsr_add_node()
238 cur->bsr_type = BSR_64; bsr_add_node()
241 cur->bsr_type = BSR_128; bsr_add_node()
244 cur->bsr_type = BSR_4096; bsr_add_node()
247 cur->bsr_type = BSR_UNKNOWN; bsr_add_node()
250 cur->bsr_num = bsr_types[cur->bsr_type]; bsr_add_node()
251 snprintf(cur->bsr_name, 32, "bsr%d_%d", bsr_add_node()
252 cur->bsr_bytes, cur->bsr_num); bsr_add_node()
254 cdev_init(&cur->bsr_cdev, &bsr_fops); bsr_add_node()
255 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); bsr_add_node()
257 kfree(cur); bsr_add_node()
261 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, bsr_add_node()
262 cur, "%s", cur->bsr_name); bsr_add_node()
263 if (IS_ERR(cur->bsr_device)) { bsr_add_node()
265 cur->bsr_name); bsr_add_node()
266 cdev_del(&cur->bsr_cdev); bsr_add_node()
267 kfree(cur); bsr_add_node()
271 bsr_types[cur->bsr_type] = cur->bsr_num + 1; bsr_add_node()
272 list_add_tail(&cur->bsr_list, &bsr_devs); bsr_add_node()
/linux-4.1.27/arch/arm64/kernel/
H A Dcpuinfo.c92 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) check_reg_mask() argument
94 if ((boot & mask) == (cur & mask)) check_reg_mask()
98 name, (unsigned long)boot, cpu, (unsigned long)cur); check_reg_mask()
103 #define CHECK_MASK(field, mask, boot, cur, cpu) \
104 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
106 #define CHECK(field, boot, cur, cpu) \
107 CHECK_MASK(field, ~0ULL, boot, cur, cpu)
112 static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) cpuinfo_sanity_check() argument
123 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); cpuinfo_sanity_check()
130 diff |= CHECK(dczid, boot, cur, cpu); cpuinfo_sanity_check()
133 diff |= CHECK(cntfrq, boot, cur, cpu); cpuinfo_sanity_check()
141 diff |= CHECK(id_aa64dfr0, boot, cur, cpu); cpuinfo_sanity_check()
142 diff |= CHECK(id_aa64dfr1, boot, cur, cpu); cpuinfo_sanity_check()
148 diff |= CHECK(id_aa64isar0, boot, cur, cpu); cpuinfo_sanity_check()
149 diff |= CHECK(id_aa64isar1, boot, cur, cpu); cpuinfo_sanity_check()
157 diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); cpuinfo_sanity_check()
158 diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); cpuinfo_sanity_check()
164 diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); cpuinfo_sanity_check()
165 diff |= CHECK(id_aa64pfr1, boot, cur, cpu); cpuinfo_sanity_check()
171 diff |= CHECK(id_dfr0, boot, cur, cpu); cpuinfo_sanity_check()
172 diff |= CHECK(id_isar0, boot, cur, cpu); cpuinfo_sanity_check()
173 diff |= CHECK(id_isar1, boot, cur, cpu); cpuinfo_sanity_check()
174 diff |= CHECK(id_isar2, boot, cur, cpu); cpuinfo_sanity_check()
175 diff |= CHECK(id_isar3, boot, cur, cpu); cpuinfo_sanity_check()
176 diff |= CHECK(id_isar4, boot, cur, cpu); cpuinfo_sanity_check()
177 diff |= CHECK(id_isar5, boot, cur, cpu); cpuinfo_sanity_check()
183 diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); cpuinfo_sanity_check()
184 diff |= CHECK(id_mmfr1, boot, cur, cpu); cpuinfo_sanity_check()
185 diff |= CHECK(id_mmfr2, boot, cur, cpu); cpuinfo_sanity_check()
186 diff |= CHECK(id_mmfr3, boot, cur, cpu); cpuinfo_sanity_check()
187 diff |= CHECK(id_pfr0, boot, cur, cpu); cpuinfo_sanity_check()
188 diff |= CHECK(id_pfr1, boot, cur, cpu); cpuinfo_sanity_check()
190 diff |= CHECK(mvfr0, boot, cur, cpu); cpuinfo_sanity_check()
191 diff |= CHECK(mvfr1, boot, cur, cpu); cpuinfo_sanity_check()
192 diff |= CHECK(mvfr2, boot, cur, cpu); cpuinfo_sanity_check()
/linux-4.1.27/arch/mips/cavium-octeon/
H A Dcsrc-octeon.c140 u64 cur, end, inc; __udelay() local
142 cur = read_c0_cvmcount(); __udelay()
145 end = cur + inc; __udelay()
147 while (end > cur) __udelay()
148 cur = read_c0_cvmcount(); __udelay()
154 u64 cur, end, inc; __ndelay() local
156 cur = read_c0_cvmcount(); __ndelay()
159 end = cur + inc; __ndelay()
161 while (end > cur) __ndelay()
162 cur = read_c0_cvmcount(); __ndelay()
168 u64 cur, end; __delay() local
170 cur = read_c0_cvmcount(); __delay()
171 end = cur + loops; __delay()
173 while (end > cur) __delay()
174 cur = read_c0_cvmcount(); __delay()
189 u64 cur, end; octeon_io_clk_delay() local
191 cur = read_c0_cvmcount(); octeon_io_clk_delay()
201 end = cur + end; octeon_io_clk_delay()
203 end = cur + count; octeon_io_clk_delay()
205 while (end > cur) octeon_io_clk_delay()
206 cur = read_c0_cvmcount(); octeon_io_clk_delay()
/linux-4.1.27/net/dccp/ccids/lib/
H A Dloss_interval.c90 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); tfrc_lh_update_i_mean() local
94 if (cur == NULL) /* not initialised */ tfrc_lh_update_i_mean()
97 len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1; tfrc_lh_update_i_mean()
99 if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */ tfrc_lh_update_i_mean()
102 if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) tfrc_lh_update_i_mean()
106 * starting the current loss interval (cur) and if the modulo-16 tfrc_lh_update_i_mean()
107 * distance from C(cur) to C(S) is greater than 4, consider all tfrc_lh_update_i_mean()
111 cur->li_is_closed = 1; tfrc_lh_update_i_mean()
116 cur->li_length = len; tfrc_lh_update_i_mean()
123 static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, tfrc_lh_is_new_loss() argument
126 return dccp_delta_seqno(cur->li_seqno, new_loss->tfrchrx_seqno) > 0 && tfrc_lh_is_new_loss()
127 (cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4); tfrc_lh_is_new_loss()
142 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; tfrc_lh_interval_add() local
144 if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh))) tfrc_lh_interval_add()
160 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); tfrc_lh_interval_add()
/linux-4.1.27/tools/perf/
H A Dperf-completion.sh70 cur)
71 cur=$cur_
117 __ltrim_colon_completions $cur
119 __perf__ltrim_colon_completions $cur
155 if [[ $cur == --* ]]; then
160 __perfcomp "$cmds" "$cur"
165 __perfcomp_colon "$evts" "$cur"
171 __perfcomp_colon "$subcmds" "$cur"
174 if [[ $cur == --* ]]; then
179 __perfcomp "$opts" "$cur"
210 local cur_="${2-$cur}"
232 local _ret=1 cur cword prev
233 cur=${words[CURRENT]}
248 local cur words cword prev
250 _get_comp_words_by_ref -n =: cur words cword prev
252 __perf_get_comp_words_by_ref -n =: cur words cword prev
/linux-4.1.27/net/netfilter/
H A Dnf_conntrack_h323_asn1.c98 unsigned char *cur; member in struct:__anon14170
103 #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
104 #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
105 #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
106 #define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
157 v = *bs->cur++; get_len()
162 v += *bs->cur++; get_len()
171 unsigned int b = (*bs->cur) & (0x80 >> bs->bit); get_bit()
184 v = (*bs->cur) & (0xffU >> bs->bit); get_bits()
191 bs->cur++; get_bits()
196 v += *(++bs->cur); get_bits()
216 v = (unsigned int)(*bs->cur) << (bs->bit + 24); get_bitmap()
219 v = (unsigned int)(*bs->cur++) << (bs->bit + 24); get_bitmap()
224 v |= (unsigned int)(*bs->cur++) << shift; get_bitmap()
227 v |= (unsigned int)(*bs->cur) << shift; get_bitmap()
231 v |= (*bs->cur) >> (8 - bs->bit); get_bitmap()
251 v |= *bs->cur++; get_uint()
254 v |= *bs->cur++; get_uint()
257 v |= *bs->cur++; get_uint()
260 v |= *bs->cur++; get_uint()
297 len = *bs->cur++; decode_oid()
298 bs->cur += len; decode_oid()
315 bs->cur++; decode_int()
319 bs->cur += 2; decode_int()
329 bs->cur += len; decode_int()
335 bs->cur += len; decode_int()
379 len = (*bs->cur++) << 8; decode_bitstr()
380 len += (*bs->cur++) + f->lb; decode_bitstr()
391 bs->cur += len >> 3; decode_bitstr()
432 bs->cur[0], bs->cur[1], decode_octstr()
433 bs->cur[2], bs->cur[3], decode_octstr()
434 bs->cur[4] * 256 + bs->cur[5])); decode_octstr()
436 bs->cur - bs->buf; decode_octstr()
444 len = (*bs->cur++) + f->lb; decode_octstr()
457 bs->cur += len; decode_octstr()
477 len = (*bs->cur++) + f->lb; decode_bmpstr()
485 bs->cur += len << 1; decode_bmpstr()
534 bs->cur += len; decode_seq()
537 beg = bs->cur; decode_seq()
545 bs->cur = beg + len; decode_seq()
573 bs->cur += len; decode_seq()
592 bs->cur += len; decode_seq()
595 beg = bs->cur; decode_seq()
602 bs->cur = beg + len; decode_seq()
627 count = *bs->cur++; decode_seqof()
632 count = *bs->cur++; decode_seqof()
634 count += *bs->cur++; decode_seqof()
666 bs->cur += len; decode_seqof()
669 beg = bs->cur; decode_seqof()
679 bs->cur = beg + len; decode_seqof()
732 bs->cur += len; decode_choice()
750 bs->cur += len; decode_choice()
753 beg = bs->cur; decode_choice()
759 bs->cur = beg + len; decode_choice()
777 bs.buf = bs.beg = bs.cur = buf; DecodeRasMessage()
795 bs.beg = bs.cur = beg; DecodeH323_UserInformation()
813 bs.buf = bs.beg = bs.cur = buf; DecodeMultimediaSystemControlMessage()
H A Dnfnetlink_acct.c188 struct nf_acct *cur, *last; nfnl_acct_dump() local
199 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { nfnl_acct_dump()
201 if (cur != last) nfnl_acct_dump()
207 if (filter && (cur->flags & filter->mask) != filter->value) nfnl_acct_dump()
213 NFNL_MSG_ACCT_NEW, cur) < 0) { nfnl_acct_dump()
214 cb->args[1] = (unsigned long)cur; nfnl_acct_dump()
261 struct nf_acct *cur; nfnl_acct_get() local
286 list_for_each_entry(cur, &nfnl_acct_list, head) { nfnl_acct_get()
289 if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0) nfnl_acct_get()
301 NFNL_MSG_ACCT_NEW, cur); nfnl_acct_get()
318 static int nfnl_acct_try_del(struct nf_acct *cur) nfnl_acct_try_del() argument
323 if (atomic_dec_and_test(&cur->refcnt)) { nfnl_acct_try_del()
325 list_del_rcu(&cur->head); nfnl_acct_try_del()
326 kfree_rcu(cur, rcu_head); nfnl_acct_try_del()
329 atomic_inc(&cur->refcnt); nfnl_acct_try_del()
340 struct nf_acct *cur; nfnl_acct_del() local
344 list_for_each_entry(cur, &nfnl_acct_list, head) nfnl_acct_del()
345 nfnl_acct_try_del(cur); nfnl_acct_del()
351 list_for_each_entry(cur, &nfnl_acct_list, head) { nfnl_acct_del()
352 if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0) nfnl_acct_del()
355 ret = nfnl_acct_try_del(cur); nfnl_acct_del()
399 struct nf_acct *cur, *acct = NULL; nfnl_acct_find_get() local
402 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { nfnl_acct_find_get()
403 if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0) nfnl_acct_find_get()
409 if (!atomic_inc_not_zero(&cur->refcnt)) { nfnl_acct_find_get()
414 acct = cur; nfnl_acct_find_get()
498 struct nf_acct *cur, *tmp; nfnl_acct_exit() local
503 list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) { nfnl_acct_exit()
504 list_del_rcu(&cur->head); nfnl_acct_exit()
507 kfree_rcu(cur, rcu_head); nfnl_acct_exit()
H A Dnfnetlink_cthelper.c294 struct nf_conntrack_helper *cur, *helper = NULL; nfnl_cthelper_new() local
309 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { nfnl_cthelper_new()
312 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) nfnl_cthelper_new()
315 if (strncmp(cur->name, helper_name, nfnl_cthelper_new()
319 if ((tuple.src.l3num != cur->tuple.src.l3num || nfnl_cthelper_new()
320 tuple.dst.protonum != cur->tuple.dst.protonum)) nfnl_cthelper_new()
327 helper = cur; nfnl_cthelper_new()
464 struct nf_conntrack_helper *cur, *last; nfnl_cthelper_dump_table() local
470 hlist_for_each_entry_rcu(cur, nfnl_cthelper_dump_table()
474 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) nfnl_cthelper_dump_table()
478 if (cur != last) nfnl_cthelper_dump_table()
486 NFNL_MSG_CTHELPER_NEW, cur) < 0) { nfnl_cthelper_dump_table()
487 cb->args[1] = (unsigned long)cur; nfnl_cthelper_dump_table()
506 struct nf_conntrack_helper *cur; nfnl_cthelper_get() local
531 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { nfnl_cthelper_get()
534 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) nfnl_cthelper_get()
537 if (helper_name && strncmp(cur->name, helper_name, nfnl_cthelper_get()
542 (tuple.src.l3num != cur->tuple.src.l3num || nfnl_cthelper_get()
543 tuple.dst.protonum != cur->tuple.dst.protonum)) nfnl_cthelper_get()
555 NFNL_MSG_CTHELPER_NEW, cur); nfnl_cthelper_get()
578 struct nf_conntrack_helper *cur; nfnl_cthelper_del() local
596 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], nfnl_cthelper_del()
599 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) nfnl_cthelper_del()
604 if (helper_name && strncmp(cur->name, helper_name, nfnl_cthelper_del()
609 (tuple.src.l3num != cur->tuple.src.l3num || nfnl_cthelper_del()
610 tuple.dst.protonum != cur->tuple.dst.protonum)) nfnl_cthelper_del()
614 nf_conntrack_helper_unregister(cur); nfnl_cthelper_del()
664 struct nf_conntrack_helper *cur; nfnl_cthelper_exit() local
671 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], nfnl_cthelper_exit()
674 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) nfnl_cthelper_exit()
677 nf_conntrack_helper_unregister(cur); nfnl_cthelper_exit()
H A Dnfnetlink_cttimeout.c212 struct ctnl_timeout *cur, *last; ctnl_timeout_dump() local
222 list_for_each_entry_rcu(cur, &cttimeout_list, head) { ctnl_timeout_dump()
224 if (cur != last) ctnl_timeout_dump()
232 IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) { ctnl_timeout_dump()
233 cb->args[1] = (unsigned long)cur; ctnl_timeout_dump()
250 struct ctnl_timeout *cur; cttimeout_get_timeout() local
263 list_for_each_entry(cur, &cttimeout_list, head) { cttimeout_get_timeout()
266 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0) cttimeout_get_timeout()
278 IPCTNL_MSG_TIMEOUT_NEW, cur); cttimeout_get_timeout()
319 struct ctnl_timeout *cur; cttimeout_del_timeout() local
323 list_for_each_entry(cur, &cttimeout_list, head) cttimeout_del_timeout()
324 ctnl_timeout_try_del(cur); cttimeout_del_timeout()
330 list_for_each_entry(cur, &cttimeout_list, head) { cttimeout_del_timeout()
331 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0) cttimeout_del_timeout()
334 ret = ctnl_timeout_try_del(cur); cttimeout_del_timeout()
565 struct ctnl_timeout *cur, *tmp; cttimeout_exit() local
570 list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) { cttimeout_exit()
571 list_del_rcu(&cur->head); cttimeout_exit()
575 nf_ct_l4proto_put(cur->l4proto); cttimeout_exit()
576 kfree_rcu(cur, rcu_head); cttimeout_exit()
H A Dnf_conntrack_helper.c302 struct nf_ct_helper_expectfn *cur; nf_ct_helper_expectfn_find_by_name() local
306 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { nf_ct_helper_expectfn_find_by_name()
307 if (!strcmp(cur->name, name)) { nf_ct_helper_expectfn_find_by_name()
313 return found ? cur : NULL; nf_ct_helper_expectfn_find_by_name()
320 struct nf_ct_helper_expectfn *cur; nf_ct_helper_expectfn_find_by_symbol() local
324 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { nf_ct_helper_expectfn_find_by_symbol()
325 if (cur->expectfn == symbol) { nf_ct_helper_expectfn_find_by_symbol()
331 return found ? cur : NULL; nf_ct_helper_expectfn_find_by_symbol()
365 struct nf_conntrack_helper *cur; nf_conntrack_helper_register() local
373 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { nf_conntrack_helper_register()
374 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && nf_conntrack_helper_register()
375 cur->tuple.src.l3num == me->tuple.src.l3num && nf_conntrack_helper_register()
376 cur->tuple.dst.protonum == me->tuple.dst.protonum) { nf_conntrack_helper_register()
H A Dx_tables.c56 unsigned int cur; /* number of used slots in compat_tab[] */ member in struct:xt_af
432 xp->cur = 0; xt_compat_add_offset()
435 if (xp->cur >= xp->number) xt_compat_add_offset()
438 if (xp->cur) xt_compat_add_offset()
439 delta += xp->compat_tab[xp->cur - 1].delta; xt_compat_add_offset()
440 xp->compat_tab[xp->cur].offset = offset; xt_compat_add_offset()
441 xp->compat_tab[xp->cur].delta = delta; xt_compat_add_offset()
442 xp->cur++; xt_compat_add_offset()
453 xt[af].cur = 0; xt_compat_flush_offsets()
461 int mid, left = 0, right = xt[af].cur - 1; xt_compat_calc_jump()
479 xt[af].cur = 0; xt_compat_init_offsets()
/linux-4.1.27/drivers/video/backlight/
H A Dcr_bllcd.c78 u32 cur = inl(addr); cr_backlight_set_intensity() local
90 cur &= ~CRVML_BACKLIGHT_OFF; cr_backlight_set_intensity()
91 outl(cur, addr); cr_backlight_set_intensity()
93 cur |= CRVML_BACKLIGHT_OFF; cr_backlight_set_intensity()
94 outl(cur, addr); cr_backlight_set_intensity()
103 u32 cur = inl(addr); cr_backlight_get_intensity() local
106 if (cur & CRVML_BACKLIGHT_OFF) cr_backlight_get_intensity()
122 u32 cur = inl(addr); cr_panel_on() local
124 if (!(cur & CRVML_PANEL_ON)) { cr_panel_on()
126 if (cur & 0x00000001) { cr_panel_on()
127 cur &= ~CRVML_LVDS_ON; cr_panel_on()
128 outl(cur, addr); cr_panel_on()
132 cur |= CRVML_PANEL_ON; cr_panel_on()
133 outl(cur, addr); cr_panel_on()
138 if (!(cur & CRVML_LVDS_ON)) { cr_panel_on()
140 outl(cur | CRVML_LVDS_ON, addr); cr_panel_on()
147 u32 cur = inl(addr); cr_panel_off() local
150 if (cur & CRVML_LVDS_ON) { cr_panel_off()
151 cur &= ~CRVML_LVDS_ON; cr_panel_off()
152 outl(cur, addr); cr_panel_off()
154 if (cur & CRVML_PANEL_ON) { cr_panel_off()
156 outl(cur & ~CRVML_PANEL_ON, addr); cr_panel_off()
/linux-4.1.27/arch/arm/mach-s3c24xx/
H A Dmach-rx1950.c170 { .volt = 4100, .cur = 156, .level = 100},
171 { .volt = 4050, .cur = 156, .level = 95},
172 { .volt = 4025, .cur = 141, .level = 90},
173 { .volt = 3995, .cur = 144, .level = 85},
174 { .volt = 3957, .cur = 162, .level = 80},
175 { .volt = 3931, .cur = 147, .level = 75},
176 { .volt = 3902, .cur = 147, .level = 70},
177 { .volt = 3863, .cur = 153, .level = 65},
178 { .volt = 3838, .cur = 150, .level = 60},
179 { .volt = 3800, .cur = 153, .level = 55},
180 { .volt = 3765, .cur = 153, .level = 50},
181 { .volt = 3748, .cur = 172, .level = 45},
182 { .volt = 3740, .cur = 153, .level = 40},
183 { .volt = 3714, .cur = 175, .level = 35},
184 { .volt = 3710, .cur = 156, .level = 30},
185 { .volt = 3963, .cur = 156, .level = 25},
186 { .volt = 3672, .cur = 178, .level = 20},
187 { .volt = 3651, .cur = 178, .level = 15},
188 { .volt = 3629, .cur = 178, .level = 10},
189 { .volt = 3612, .cur = 162, .level = 5},
190 { .volt = 3605, .cur = 162, .level = 0},
194 { .volt = 4200, .cur = 0, .level = 100},
195 { .volt = 4190, .cur = 0, .level = 99},
196 { .volt = 4178, .cur = 0, .level = 95},
197 { .volt = 4110, .cur = 0, .level = 70},
198 { .volt = 4076, .cur = 0, .level = 65},
199 { .volt = 4046, .cur = 0, .level = 60},
200 { .volt = 4021, .cur = 0, .level = 55},
201 { .volt = 3999, .cur = 0, .level = 50},
202 { .volt = 3982, .cur = 0, .level = 45},
203 { .volt = 3965, .cur = 0, .level = 40},
204 { .volt = 3957, .cur = 0, .level = 35},
205 { .volt = 3948, .cur = 0, .level = 30},
206 { .volt = 3936, .cur = 0, .level = 25},
207 { .volt = 3927, .cur = 0, .level = 20},
208 { .volt = 3906, .cur = 0, .level = 15},
209 { .volt = 3880, .cur = 0, .level = 10},
210 { .volt = 3829, .cur = 0, .level = 5},
211 { .volt = 3820, .cur = 0, .level = 0},
H A Dmach-h1940.c270 { .volt = 4070, .cur = 162, .level = 100},
271 { .volt = 4040, .cur = 165, .level = 95},
272 { .volt = 4016, .cur = 164, .level = 90},
273 { .volt = 3996, .cur = 166, .level = 85},
274 { .volt = 3971, .cur = 168, .level = 80},
275 { .volt = 3951, .cur = 168, .level = 75},
276 { .volt = 3931, .cur = 170, .level = 70},
277 { .volt = 3903, .cur = 172, .level = 65},
278 { .volt = 3886, .cur = 172, .level = 60},
279 { .volt = 3858, .cur = 176, .level = 55},
280 { .volt = 3842, .cur = 176, .level = 50},
281 { .volt = 3818, .cur = 176, .level = 45},
282 { .volt = 3789, .cur = 180, .level = 40},
283 { .volt = 3769, .cur = 180, .level = 35},
284 { .volt = 3749, .cur = 184, .level = 30},
285 { .volt = 3732, .cur = 184, .level = 25},
286 { .volt = 3716, .cur = 184, .level = 20},
287 { .volt = 3708, .cur = 184, .level = 15},
288 { .volt = 3716, .cur = 96, .level = 10},
289 { .volt = 3700, .cur = 96, .level = 5},
290 { .volt = 3684, .cur = 96, .level = 0},
294 { .volt = 4130, .cur = 0, .level = 100},
295 { .volt = 3982, .cur = 0, .level = 50},
296 { .volt = 3854, .cur = 0, .level = 10},
297 { .volt = 3841, .cur = 0, .level = 0},
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_vli.h211 static inline void bitstream_cursor_reset(struct bitstream_cursor *cur, void *s) bitstream_cursor_reset() argument
213 cur->b = s; bitstream_cursor_reset()
214 cur->bit = 0; bitstream_cursor_reset()
219 static inline void bitstream_cursor_advance(struct bitstream_cursor *cur, unsigned int bits) bitstream_cursor_advance() argument
221 bits += cur->bit; bitstream_cursor_advance()
222 cur->b = cur->b + (bits >> 3); bitstream_cursor_advance()
223 cur->bit = bits & 7; bitstream_cursor_advance()
228 struct bitstream_cursor cur; member in struct:bitstream
243 bitstream_cursor_reset(&bs->cur, bs->buf); bitstream_init()
248 bitstream_cursor_reset(&bs->cur, bs->buf); bitstream_rewind()
262 unsigned char *b = bs->cur.b; bitstream_put_bits()
268 if ((bs->cur.b + ((bs->cur.bit + bits -1) >> 3)) - bs->buf >= bs->buf_len) bitstream_put_bits()
275 *b++ |= (val & 0xff) << bs->cur.bit; bitstream_put_bits()
277 for (tmp = 8 - bs->cur.bit; tmp < bits; tmp += 8) bitstream_put_bits()
280 bitstream_cursor_advance(&bs->cur, bits); bitstream_put_bits()
301 if (bs->cur.b + ((bs->cur.bit + bs->pad_bits + bits -1) >> 3) - bs->buf >= bs->buf_len) bitstream_get_bits()
302 bits = ((bs->buf_len - (bs->cur.b - bs->buf)) << 3) bitstream_get_bits()
303 - bs->cur.bit - bs->pad_bits; bitstream_get_bits()
312 n = (bs->cur.bit + bits + 7) >> 3; bitstream_get_bits()
313 /* n may be at most 9, if cur.bit + bits > 64 */ bitstream_get_bits()
316 memcpy(&val, bs->cur.b+1, n - 1); bitstream_get_bits()
317 val = le64_to_cpu(val) << (8 - bs->cur.bit); bitstream_get_bits()
321 val |= bs->cur.b[0] >> bs->cur.bit; bitstream_get_bits()
326 bitstream_cursor_advance(&bs->cur, bits); bitstream_get_bits()
/linux-4.1.27/drivers/char/agp/
H A Disoch.c24 struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); agp_3_5_dev_list_insert() local
28 cur = list_entry(pos, struct agp_3_5_dev, list); list_for_each()
29 if (cur->maxbw > n->maxbw) list_for_each()
37 struct agp_3_5_dev *cur; agp_3_5_dev_list_sort() local
45 cur = list_entry(pos, struct agp_3_5_dev, list); agp_3_5_dev_list_sort()
46 dev = cur->dev; agp_3_5_dev_list_sort()
48 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); agp_3_5_dev_list_sort()
49 cur->maxbw = (nistat >> 16) & 0xff; agp_3_5_dev_list_sort()
81 struct agp_3_5_dev *cur; agp_3_5_isochronous_node_enable() local
136 cur = list_entry(pos, struct agp_3_5_dev, list); list_for_each()
137 dev = cur->dev; list_for_each()
139 mcapndx = cur->capndx; list_for_each()
141 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); list_for_each()
146 master[cdev].dev = cur; list_for_each()
249 cur = master[cdev].dev;
250 dev = cur->dev;
252 mcapndx = cur->capndx;
257 pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
258 pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
268 pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
269 pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
289 struct agp_3_5_dev *cur; agp_3_5_nonisochronous_node_enable() local
303 cur = list_entry(pos, struct agp_3_5_dev, list); agp_3_5_nonisochronous_node_enable()
305 pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); agp_3_5_nonisochronous_node_enable()
308 pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); agp_3_5_nonisochronous_node_enable()
324 struct agp_3_5_dev *dev_list, *cur; agp_3_5_enable() local
370 if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { for_each_pci_dev()
374 cur->dev = dev; for_each_pci_dev()
376 pos = &cur->list; for_each_pci_dev()
393 cur = list_entry(pos, struct agp_3_5_dev, list); list_for_each()
394 dev = cur->dev; list_for_each()
427 cur->capndx = mcapndx; list_for_each()
429 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); list_for_each()
461 cur = list_entry(pos, struct agp_3_5_dev, list);
464 kfree(cur);
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_event.c56 struct vmci_subscription *cur, *p2; vmci_event_exit() local
57 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { vmci_event_exit()
65 list_del(&cur->node); vmci_event_exit()
66 kfree(cur); vmci_event_exit()
79 struct vmci_subscription *cur; event_find() local
80 list_for_each_entry(cur, &subscriber_array[e], node) { event_find()
81 if (cur->id == sub_id) event_find()
82 return cur; event_find()
94 struct vmci_subscription *cur; event_deliver() local
99 list_for_each_entry_rcu(cur, subscriber_list, node) { list_for_each_entry_rcu()
100 cur->callback(cur->id, &event_msg->event_data, list_for_each_entry_rcu()
101 cur->callback_data); list_for_each_entry_rcu()
/linux-4.1.27/drivers/mtd/nand/
H A Dnand_ecc.c165 uint32_t cur; /* current value in buffer */ __nand_calculate_ecc() local
195 cur = *bp++; __nand_calculate_ecc()
196 tmppar = cur; __nand_calculate_ecc()
197 rp4 ^= cur; __nand_calculate_ecc()
198 cur = *bp++; __nand_calculate_ecc()
199 tmppar ^= cur; __nand_calculate_ecc()
201 cur = *bp++; __nand_calculate_ecc()
202 tmppar ^= cur; __nand_calculate_ecc()
203 rp4 ^= cur; __nand_calculate_ecc()
204 cur = *bp++; __nand_calculate_ecc()
205 tmppar ^= cur; __nand_calculate_ecc()
208 cur = *bp++; __nand_calculate_ecc()
209 tmppar ^= cur; __nand_calculate_ecc()
210 rp4 ^= cur; __nand_calculate_ecc()
211 rp6 ^= cur; __nand_calculate_ecc()
212 cur = *bp++; __nand_calculate_ecc()
213 tmppar ^= cur; __nand_calculate_ecc()
214 rp6 ^= cur; __nand_calculate_ecc()
215 cur = *bp++; __nand_calculate_ecc()
216 tmppar ^= cur; __nand_calculate_ecc()
217 rp4 ^= cur; __nand_calculate_ecc()
218 cur = *bp++; __nand_calculate_ecc()
219 tmppar ^= cur; __nand_calculate_ecc()
222 cur = *bp++; __nand_calculate_ecc()
223 tmppar ^= cur; __nand_calculate_ecc()
224 rp4 ^= cur; __nand_calculate_ecc()
225 rp6 ^= cur; __nand_calculate_ecc()
226 rp8 ^= cur; __nand_calculate_ecc()
227 cur = *bp++; __nand_calculate_ecc()
228 tmppar ^= cur; __nand_calculate_ecc()
229 rp6 ^= cur; __nand_calculate_ecc()
230 rp8 ^= cur; __nand_calculate_ecc()
231 cur = *bp++; __nand_calculate_ecc()
232 tmppar ^= cur; __nand_calculate_ecc()
233 rp4 ^= cur; __nand_calculate_ecc()
234 rp8 ^= cur; __nand_calculate_ecc()
235 cur = *bp++; __nand_calculate_ecc()
236 tmppar ^= cur; __nand_calculate_ecc()
237 rp8 ^= cur; __nand_calculate_ecc()
239 cur = *bp++; __nand_calculate_ecc()
240 tmppar ^= cur; __nand_calculate_ecc()
241 rp4 ^= cur; __nand_calculate_ecc()
242 rp6 ^= cur; __nand_calculate_ecc()
243 cur = *bp++; __nand_calculate_ecc()
244 tmppar ^= cur; __nand_calculate_ecc()
245 rp6 ^= cur; __nand_calculate_ecc()
246 cur = *bp++; __nand_calculate_ecc()
247 tmppar ^= cur; __nand_calculate_ecc()
248 rp4 ^= cur; __nand_calculate_ecc()
249 cur = *bp++; __nand_calculate_ecc()
250 tmppar ^= cur; __nand_calculate_ecc()
/linux-4.1.27/drivers/infiniband/core/
H A Dnetlink.c55 struct ibnl_client *cur; ibnl_add_client() local
68 list_for_each_entry(cur, &client_list, list) { ibnl_add_client()
69 if (cur->index == index) { ibnl_add_client()
87 struct ibnl_client *cur, *next; ibnl_remove_client() local
90 list_for_each_entry_safe(cur, next, &client_list, list) { ibnl_remove_client()
91 if (cur->index == index) { ibnl_remove_client()
92 list_del(&(cur->list)); ibnl_remove_client()
94 kfree(cur); ibnl_remove_client()
206 struct ibnl_client *cur, *next; ibnl_cleanup() local
209 list_for_each_entry_safe(cur, next, &client_list, list) { ibnl_cleanup()
210 list_del(&(cur->list)); ibnl_cleanup()
211 kfree(cur); ibnl_cleanup()
/linux-4.1.27/drivers/cpufreq/
H A Damd_freq_sensitivity.c65 freq_next = policy->cur; amd_powersave_bias_target()
74 freq_next = policy->cur; amd_powersave_bias_target()
85 if (data->freq_prev == policy->cur) amd_powersave_bias_target()
86 freq_next = policy->cur; amd_powersave_bias_target()
88 if (freq_next > policy->cur) amd_powersave_bias_target()
89 freq_next = policy->cur; amd_powersave_bias_target()
90 else if (freq_next < policy->cur) amd_powersave_bias_target()
96 od_info->freq_table, policy->cur - 1, amd_powersave_bias_target()
H A Dcpufreq_userspace.c49 return sprintf(buf, "%u\n", policy->cur); show_speed()
60 BUG_ON(!policy->cur); cpufreq_governor_userspace()
78 policy->cur); cpufreq_governor_userspace()
80 if (policy->max < policy->cur) cpufreq_governor_userspace()
83 else if (policy->min > policy->cur) cpufreq_governor_userspace()
H A Dsa1100-cpufreq.c182 unsigned int cur = sa11x0_getspeed(0); sa1100_target() local
187 if (new_freq > cur) sa1100_target()
188 sa1100_update_dram_timings(cur, new_freq); sa1100_target()
192 if (new_freq < cur) sa1100_target()
193 sa1100_update_dram_timings(cur, new_freq); sa1100_target()
H A Dcpufreq.c304 (policy->cur) && (policy->cur != freqs->old)) { __cpufreq_notify_transition()
306 freqs->old, policy->cur); __cpufreq_notify_transition()
307 freqs->old = policy->cur; __cpufreq_notify_transition()
323 policy->cur = freqs->new; __cpufreq_notify_transition()
525 ret = sprintf(buf, "%u\n", policy->cur); show_scaling_cur_freq()
1200 policy->cur = cpufreq_driver->get(policy->cpu);
1201 if (!policy->cur) {
1217 * for the next freq which is >= policy->cur ('cur' must be set by now,
1218 * otherwise we will end up setting freq to lowest of the table as 'cur'
1221 * We are passing target-freq as "policy->cur - 1" otherwise
1222 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1228 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1232 __func__, policy->cpu, policy->cur);
1233 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1243 __func__, policy->cpu, policy->cur);
1507 policy->cur, new_freq); cpufreq_out_of_sync()
1509 freqs.old = policy->cur; cpufreq_out_of_sync()
1517 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1533 ret_freq = policy->cur; cpufreq_quick_get()
1570 if (ret_freq && policy->cur && __cpufreq_get()
1574 if (unlikely(ret_freq != policy->cur)) { __cpufreq_get()
1856 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; __target_index()
1932 if (target_freq == policy->cur) __cpufreq_driver_target()
1936 policy->restore_freq = policy->cur; __cpufreq_driver_target()
1957 if (freq_table[index].frequency == policy->cur) { __cpufreq_driver_target()
2272 new_policy.cur = cpufreq_driver->get(cpu); cpufreq_update_policy()
2273 if (WARN_ON(!new_policy.cur)) { cpufreq_update_policy()
2278 if (!policy->cur) { cpufreq_update_policy()
2280 policy->cur = new_policy.cur; cpufreq_update_policy()
2282 if (policy->cur != new_policy.cur && has_target()) cpufreq_update_policy()
2283 cpufreq_out_of_sync(policy, new_policy.cur); cpufreq_update_policy()
H A Dunicore2-cpufreq.c44 freqs.old = policy->cur; ucv2_target()
H A Dtegra-cpufreq.c57 * - we are already at it, i.e. policy->cur == ifreq tegra_get_intermediate()
60 if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) tegra_get_intermediate()
H A Dat32ap-cpufreq.c33 old_freq = policy->cur; at32_set_target()
H A Dcpufreq_governor.c367 if (!policy->cur) cpufreq_governor_dbs()
399 cs_dbs_info->requested_freq = policy->cur; cpufreq_governor_dbs()
436 if (policy->max < cpu_cdbs->cur_policy->cur) cpufreq_governor_dbs()
439 else if (policy->min > cpu_cdbs->cur_policy->cur) cpufreq_governor_dbs()
H A Dppc_cbe_cpufreq.c118 policy->cur = cbe_freqs[cur_pmode].frequency;
/linux-4.1.27/lib/
H A Dlist_sort.c123 struct list_head *cur = list; list_sort() local
125 cur->next = NULL; list_sort()
128 cur = merge(priv, cmp, part[lev], cur); list_sort()
138 part[lev] = cur; list_sort()
216 struct list_head *cur; list_sort_test() local
245 for (cur = head.next; cur->next != &head; cur = cur->next) { list_sort_test()
249 if (cur->next->prev != cur) { list_sort_test()
254 cmp_result = cmp(NULL, cur, cur->next); list_sort_test()
260 el = container_of(cur, struct debug_el, list); list_sort_test()
261 el1 = container_of(cur->next, struct debug_el, list); list_sort_test()
274 if (head.prev != cur) { list_sort_test()
H A Dcmdline.c54 char *cur = *str; get_option() local
56 if (!cur || !(*cur)) get_option()
58 *pint = simple_strtol(cur, str, 0); get_option()
59 if (cur == *str) get_option()
H A Dts_fsm.c141 struct ts_fsm_token *cur = NULL, *next; fsm_find() local
170 cur = &fsm->tokens[tok_idx]; fsm_find()
177 switch (cur->recur) { fsm_find()
182 if (!match_token(cur, data[block_idx])) fsm_find()
188 !match_token(cur, data[block_idx])) fsm_find()
196 if (!match_token(cur, data[block_idx])) fsm_find()
210 if (!match_token(cur, data[block_idx])) fsm_find()
233 if (!match_token(cur, data[block_idx])) fsm_find()
H A Drbtree_test.c119 struct test_node *cur, *n; check_postorder_foreach() local
121 rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) check_postorder_foreach()
/linux-4.1.27/drivers/net/team/
H A Dteam_mode_broadcast.c21 struct team_port *cur; bc_transmit() local
27 list_for_each_entry_rcu(cur, &team->port_list, list) { bc_transmit()
28 if (team_port_txable(cur)) { bc_transmit()
38 last = cur; bc_transmit()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_ringbuffer.h27 uint32_t *start, *end, *cur; member in struct:msm_ringbuffer
38 if (ring->cur == ring->end) OUT_RING()
39 ring->cur = ring->start; OUT_RING()
40 *(ring->cur++) = data; OUT_RING()
H A Dmsm_ringbuffer.c44 ring->cur = ring->start; msm_ringbuffer_new()
/linux-4.1.27/arch/x86/kernel/
H A Dreboot_fixups_32.c83 const struct device_fixup *cur; mach_reboot_fixups() local
93 cur = &(fixups_table[i]); mach_reboot_fixups()
94 dev = pci_get_device(cur->vendor, cur->device, NULL); mach_reboot_fixups()
98 cur->reboot_fixup(dev); mach_reboot_fixups()
H A Dirq_64.c67 WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", stack_overflow_check()
/linux-4.1.27/drivers/media/platform/vivid/
H A Dvivid-radio-common.c82 rds->picode = dev->radio_tx_rds_pi->cur.val; vivid_radio_rds_init()
83 rds->pty = dev->radio_tx_rds_pty->cur.val; vivid_radio_rds_init()
84 rds->mono_stereo = dev->radio_tx_rds_mono_stereo->cur.val; vivid_radio_rds_init()
85 rds->art_head = dev->radio_tx_rds_art_head->cur.val; vivid_radio_rds_init()
86 rds->compressed = dev->radio_tx_rds_compressed->cur.val; vivid_radio_rds_init()
87 rds->dyn_pty = dev->radio_tx_rds_dyn_pty->cur.val; vivid_radio_rds_init()
88 rds->ta = dev->radio_tx_rds_ta->cur.val; vivid_radio_rds_init()
89 rds->tp = dev->radio_tx_rds_tp->cur.val; vivid_radio_rds_init()
90 rds->ms = dev->radio_tx_rds_ms->cur.val; vivid_radio_rds_init()
H A Dvivid-kthread-cap.c512 dev->brightness->cur.val, vivid_fillbuff()
513 dev->contrast->cur.val, vivid_fillbuff()
514 dev->saturation->cur.val, vivid_fillbuff()
515 dev->hue->cur.val); vivid_fillbuff()
519 dev->autogain->cur.val, gain, dev->alpha->cur.val); vivid_fillbuff()
525 dev->volume->cur.val, dev->mute->cur.val); vivid_fillbuff()
530 dev->int32->cur.val, vivid_fillbuff()
532 dev->bitmask->cur.val); vivid_fillbuff()
535 dev->boolean->cur.val, vivid_fillbuff()
536 dev->menu->qmenu[dev->menu->cur.val], vivid_fillbuff()
540 dev->int_menu->qmenu_int[dev->int_menu->cur.val], vivid_fillbuff()
541 dev->int_menu->cur.val); vivid_fillbuff()
/linux-4.1.27/arch/m68k/sun3/
H A Dsun3dvma.c88 struct list_head *cur; print_holes() local
92 list_for_each(cur, holes) { list_for_each()
93 hole = list_entry(cur, struct hole, list); list_for_each()
111 struct list_head *cur; refill() local
114 list_for_each(cur, &hole_list) { refill()
115 hole = list_entry(cur, struct hole, list); refill()
155 struct list_head *cur; get_baddr() local
167 list_for_each(cur, &hole_list) { get_baddr()
170 hole = list_entry(cur, struct hole, list); get_baddr()
208 struct list_head *cur; free_baddr() local
222 list_for_each(cur, &hole_list) { free_baddr()
223 hole = list_entry(cur, struct hole, list); free_baddr()
243 // list_add_tail(&(hole->list), cur); free_baddr()
244 list_add(&(hole->list), cur); free_baddr()
/linux-4.1.27/arch/mn10300/kernel/
H A Dkgdb.c159 u8 *pc = (u8 *)regs->pc, *sp = (u8 *)(regs + 1), cur; kgdb_arch_do_singlestep() local
163 ret = probe_kernel_read(&cur, pc, 1); kgdb_arch_do_singlestep()
167 size = mn10300_kgdb_insn_sizes[cur]; kgdb_arch_do_singlestep()
173 switch (cur) { kgdb_arch_do_singlestep()
233 ret = probe_kernel_read(&cur, pc + 1, 1); kgdb_arch_do_singlestep()
237 if (cur >= 0xf0 && cur <= 0xf7) { kgdb_arch_do_singlestep()
239 switch (cur & 3) { kgdb_arch_do_singlestep()
246 } else if (cur == 0xfc) { kgdb_arch_do_singlestep()
252 } else if (cur == 0xfd) { kgdb_arch_do_singlestep()
266 ret = probe_kernel_read(&cur, pc + 1, 1); kgdb_arch_do_singlestep()
271 if (cur >= 0xe8 && cur <= 0xeb) { kgdb_arch_do_singlestep()
283 ret = probe_kernel_read(&cur, pc + 1, 1); kgdb_arch_do_singlestep()
287 if (cur == 0xff) { kgdb_arch_do_singlestep()
300 ret = probe_kernel_read(&cur, pc + 1, 1); kgdb_arch_do_singlestep()
304 if (cur == 0xff) { kgdb_arch_do_singlestep()
H A Dgdb-stub.c495 uint8_t cur, *pc, *sp; gdbstub_single_step() local
507 if (gdbstub_read_byte(pc, &cur) < 0) gdbstub_single_step()
510 gdbstub_bkpt("Single Step from %p { %02x }\n", pc, cur); gdbstub_single_step()
514 size = gdbstub_insn_sizes[cur]; gdbstub_single_step()
519 switch (cur) { gdbstub_single_step()
590 if (gdbstub_read_byte(pc + 1, &cur) < 0) gdbstub_single_step()
593 if (cur >= 0xf0 && cur <= 0xf7) { gdbstub_single_step()
595 switch (cur & 3) { gdbstub_single_step()
603 } else if (cur == 0xfc) { gdbstub_single_step()
616 } else if (cur == 0xfd) { gdbstub_single_step()
638 if (gdbstub_read_byte(pc + 1, &cur) < 0) gdbstub_single_step()
643 if (cur >= 0xe8 && cur <= 0xeb) { gdbstub_single_step()
654 if (gdbstub_read_byte(pc + 1, &cur) < 0) gdbstub_single_step()
657 if (cur == 0xff) { gdbstub_single_step()
673 if (gdbstub_read_byte(pc + 1, &cur) < 0) gdbstub_single_step()
675 if (cur == 0xff) { gdbstub_single_step()
/linux-4.1.27/arch/avr32/kernel/
H A Dkprobes.c167 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
169 pr_debug("post_kprobe_handler, cur=%p\n", cur); post_kprobe_handler()
171 if (!cur) post_kprobe_handler()
174 if (cur->post_handler) { post_kprobe_handler()
176 cur->post_handler(cur, regs, 0); post_kprobe_handler()
179 resume_execution(cur, regs); post_kprobe_handler()
188 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
192 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
196 resume_execution(cur, regs); kprobe_fault_handler()
/linux-4.1.27/fs/cifs/
H A Dsmb2file.c96 struct smb2_lock_element *buf, *cur; smb2_unlock_range() local
118 cur = buf; smb2_unlock_range()
138 cur->Length = cpu_to_le64(li->length); smb2_unlock_range()
139 cur->Offset = cpu_to_le64(li->offset); smb2_unlock_range()
140 cur->Flags = cpu_to_le32(SMB2_LOCKFLAG_UNLOCK); smb2_unlock_range()
166 cur = buf; smb2_unlock_range()
169 cur++; smb2_unlock_range()
195 struct smb2_lock_element *cur = buf; smb2_push_mand_fdlocks() local
199 cur->Length = cpu_to_le64(li->length); smb2_push_mand_fdlocks()
200 cur->Offset = cpu_to_le64(li->offset); smb2_push_mand_fdlocks()
201 cur->Flags = cpu_to_le32(li->type | smb2_push_mand_fdlocks()
210 cur = buf; smb2_push_mand_fdlocks()
213 cur++; smb2_push_mand_fdlocks()
/linux-4.1.27/fs/efs/
H A Dinode.c202 int cur, last, first = 1; efs_map_block() local
227 cur = (last + dirext) % in->numextents; efs_map_block()
228 if ((result = efs_extent_check(&in->extents[cur], block, sb))) { efs_map_block()
229 in->lastextent = cur; efs_map_block()
244 cur = (last + indext) % indexts; efs_map_block()
247 * work out which direct extent contains `cur'. efs_map_block()
250 * indirect extent contained within direct extent `cur'. efs_map_block()
254 for(dirext = 0; cur < ibase && dirext < direxts; dirext++) { efs_map_block()
262 cur, block); efs_map_block()
269 (cur - ibase) / efs_map_block()
271 ioffset = (cur - ibase) % efs_map_block()
295 cur, iblock); efs_map_block()
302 in->lastextent = cur; efs_map_block()
/linux-4.1.27/drivers/gpu/drm/savage/
H A Dsavage_bci.c364 unsigned int cur = dev_priv->current_dma_page; savage_dma_alloc() local
366 dev_priv->dma_pages[cur].used; savage_dma_alloc()
372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", savage_dma_alloc()
373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); savage_dma_alloc()
375 if (cur + nr_pages < dev_priv->nr_dma_pages) { savage_dma_alloc()
377 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; savage_dma_alloc()
380 dev_priv->dma_pages[cur].used += rest; savage_dma_alloc()
382 cur++; savage_dma_alloc()
387 for (i = cur; i < dev_priv->nr_dma_pages; ++i) { savage_dma_alloc()
393 dev_priv->first_dma_page = cur = 0; savage_dma_alloc()
395 for (i = cur; nr_pages > 0; ++i, --nr_pages) { savage_dma_alloc()
410 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", savage_dma_alloc()
421 unsigned int cur = dev_priv->current_dma_page; savage_dma_flush() local
427 if (first == cur && savage_dma_flush()
428 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) savage_dma_flush()
433 pad = -dev_priv->dma_pages[cur].used & 1; savage_dma_flush()
434 align = -(dev_priv->dma_pages[cur].used + pad) & 7; savage_dma_flush()
436 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " savage_dma_flush()
438 first, cur, dev_priv->dma_pages[first].flushed, savage_dma_flush()
439 dev_priv->dma_pages[cur].used, pad, align); savage_dma_flush()
444 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; savage_dma_flush()
445 dev_priv->dma_pages[cur].used += pad; savage_dma_flush()
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + savage_dma_flush()
459 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; savage_dma_flush()
470 dev_priv->dma_pages[cur].used += align; savage_dma_flush()
475 for (i = first; i < cur; ++i) { savage_dma_flush()
481 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { savage_dma_flush()
482 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); savage_dma_flush()
483 dev_priv->dma_pages[cur].used = 0; savage_dma_flush()
484 dev_priv->dma_pages[cur].flushed = 0; savage_dma_flush()
486 cur++; savage_dma_flush()
487 if (cur == dev_priv->nr_dma_pages) savage_dma_flush()
488 cur = 0; savage_dma_flush()
489 dev_priv->first_dma_page = dev_priv->current_dma_page = cur; savage_dma_flush()
491 dev_priv->first_dma_page = cur; savage_dma_flush()
492 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; savage_dma_flush()
496 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, savage_dma_flush()
497 dev_priv->dma_pages[cur].used, savage_dma_flush()
498 dev_priv->dma_pages[cur].flushed); savage_dma_flush()
510 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", savage_fake_dma_flush()
H A Dsavage_drv.h512 unsigned int cur = dev_priv->current_dma_page; \
514 dev_priv->dma_pages[cur].used; \
519 cur * SAVAGE_DMA_PAGE_SIZE + \
520 dev_priv->dma_pages[cur].used; \
521 if (dev_priv->dma_pages[cur].used == 0) \
522 savage_dma_wait(dev_priv, cur); \
523 dev_priv->dma_pages[cur].used += (n); \
536 unsigned int cur = dev_priv->current_dma_page; \
538 cur * SAVAGE_DMA_PAGE_SIZE + \
539 dev_priv->dma_pages[cur].used; \
/linux-4.1.27/include/linux/
H A Dmdio-mux.h15 int (*switch_fn) (int cur, int desired, void *data),
H A Ds3c_adc_battery.h6 int cur; /* mA */ member in struct:s3c_adc_bat_thresh
H A Dif_team.h270 struct team_port *cur; team_get_first_port_txable_rcu() local
274 cur = port; team_get_first_port_txable_rcu()
275 list_for_each_entry_continue_rcu(cur, &team->port_list, list) team_get_first_port_txable_rcu()
276 if (team_port_txable(cur)) team_get_first_port_txable_rcu()
277 return cur; list_for_each_entry_rcu()
278 list_for_each_entry_rcu(cur, &team->port_list, list) { list_for_each_entry_rcu()
279 if (cur == port) list_for_each_entry_rcu()
281 if (team_port_txable(cur)) list_for_each_entry_rcu()
282 return cur; list_for_each_entry_rcu()
/linux-4.1.27/net/core/
H A Dnetpoll.c513 char *cur=opt, *delim; netpoll_parse_options() local
517 if (*cur != '@') { netpoll_parse_options()
518 if ((delim = strchr(cur, '@')) == NULL) netpoll_parse_options()
521 if (kstrtou16(cur, 10, &np->local_port)) netpoll_parse_options()
523 cur = delim; netpoll_parse_options()
525 cur++; netpoll_parse_options()
527 if (*cur != '/') { netpoll_parse_options()
529 if ((delim = strchr(cur, '/')) == NULL) netpoll_parse_options()
532 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip); netpoll_parse_options()
537 cur = delim; netpoll_parse_options()
539 cur++; netpoll_parse_options()
541 if (*cur != ',') { netpoll_parse_options()
543 if ((delim = strchr(cur, ',')) == NULL) netpoll_parse_options()
546 strlcpy(np->dev_name, cur, sizeof(np->dev_name)); netpoll_parse_options()
547 cur = delim; netpoll_parse_options()
549 cur++; netpoll_parse_options()
551 if (*cur != '@') { netpoll_parse_options()
553 if ((delim = strchr(cur, '@')) == NULL) netpoll_parse_options()
556 if (*cur == ' ' || *cur == '\t') netpoll_parse_options()
558 if (kstrtou16(cur, 10, &np->remote_port)) netpoll_parse_options()
560 cur = delim; netpoll_parse_options()
562 cur++; netpoll_parse_options()
565 if ((delim = strchr(cur, '/')) == NULL) netpoll_parse_options()
568 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); netpoll_parse_options()
575 cur = delim + 1; netpoll_parse_options()
577 if (*cur != 0) { netpoll_parse_options()
579 if (!mac_pton(cur, np->remote_mac)) netpoll_parse_options()
588 np_info(np, "couldn't parse config at '%s'!\n", cur); netpoll_parse_options()
H A Dsysctl_net_core.c105 struct sd_flow_limit *cur; flow_limit_cpu_sysctl() local
119 len = sizeof(*cur) + netdev_flow_limit_table_len; for_each_possible_cpu()
122 cur = rcu_dereference_protected(sd->flow_limit, for_each_possible_cpu()
124 if (cur && !cpumask_test_cpu(i, mask)) { for_each_possible_cpu()
127 kfree(cur); for_each_possible_cpu()
128 } else if (!cur && cpumask_test_cpu(i, mask)) { for_each_possible_cpu()
129 cur = kzalloc_node(len, GFP_KERNEL, for_each_possible_cpu()
131 if (!cur) { for_each_possible_cpu()
136 cur->num_buckets = netdev_flow_limit_table_len; for_each_possible_cpu()
137 rcu_assign_pointer(sd->flow_limit, cur); for_each_possible_cpu()
/linux-4.1.27/drivers/firmware/google/
H A Dmemconsole.c103 size_t length, cur; found_memconsole() local
119 for (cur = 0; cur < length; cur++) { found_memconsole()
120 struct biosmemcon_ebda *hdr = phys_to_virt(address + cur); found_memconsole()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dresource.c115 rdev->stats.qid.cur += rdev->qpmask + 1; c4iw_get_cqid()
146 if (rdev->stats.qid.cur > rdev->stats.qid.max) c4iw_get_cqid()
147 rdev->stats.qid.max = rdev->stats.qid.cur; c4iw_get_cqid()
189 rdev->stats.qid.cur += rdev->qpmask + 1; c4iw_get_qpid()
220 if (rdev->stats.qid.cur > rdev->stats.qid.max) c4iw_get_qpid()
221 rdev->stats.qid.max = rdev->stats.qid.cur; c4iw_get_qpid()
260 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); c4iw_pblpool_alloc()
261 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) c4iw_pblpool_alloc()
262 rdev->stats.pbl.max = rdev->stats.pbl.cur; c4iw_pblpool_alloc()
273 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); c4iw_pblpool_free()
333 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); c4iw_rqtpool_alloc()
334 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) c4iw_rqtpool_alloc()
335 rdev->stats.rqt.max = rdev->stats.rqt.cur; c4iw_rqtpool_alloc()
346 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); c4iw_rqtpool_free()
400 rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); c4iw_ocqp_pool_alloc()
401 if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) c4iw_ocqp_pool_alloc()
402 rdev->stats.ocqp.max = rdev->stats.ocqp.cur; c4iw_ocqp_pool_alloc()
412 rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); c4iw_ocqp_pool_free()
/linux-4.1.27/drivers/gpu/drm/nouveau/dispnv04/
H A Doverlay.c41 struct nouveau_bo *cur; member in struct:nouveau_plane
104 struct nouveau_bo *cur = nv_plane->cur; nv10_update_plane() local
133 nv_plane->cur = nv_fb->nvbo; nv10_update_plane()
167 if (cur) nv10_update_plane()
168 nouveau_bo_unpin(cur); nv10_update_plane()
181 if (nv_plane->cur) { nv10_disable_plane()
182 nouveau_bo_unpin(nv_plane->cur); nv10_disable_plane()
183 nv_plane->cur = NULL; nv10_disable_plane()
212 if (plane->cur) { nv10_set_params()
353 struct nouveau_bo *cur = nv_plane->cur; nv04_update_plane() local
380 nv_plane->cur = nv_fb->nvbo; nv04_update_plane()
420 if (cur) nv04_update_plane()
421 nouveau_bo_unpin(cur); nv04_update_plane()
437 if (nv_plane->cur) { nv04_disable_plane()
438 nouveau_bo_unpin(nv_plane->cur); nv04_disable_plane()
439 nv_plane->cur = NULL; nv04_disable_plane()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_dma.c35 mem = &mem[chan->dma.cur]; OUT_RINGp()
40 chan->dma.cur += nr_dwords; OUT_RINGp()
155 if (get <= chan->dma.cur) { nv50_dma_wait()
156 chan->dma.free = chan->dma.max - chan->dma.cur; nv50_dma_wait()
169 chan->dma.cur = 0; nv50_dma_wait()
173 chan->dma.free = get - chan->dma.cur - 1; nv50_dma_wait()
205 if (get <= chan->dma.cur) { nouveau_dma_wait()
219 chan->dma.free = chan->dma.max - chan->dma.cur; nouveau_dma_wait()
246 chan->dma.cur = nouveau_dma_wait()
256 chan->dma.free = get - chan->dma.cur - 1; nouveau_dma_wait()
H A Dnouveau_chan.h29 int cur; member in struct:nouveau_channel::__anon4153
H A Dnouveau_dma.h104 nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data); OUT_RING()
149 if (chan->dma.cur == chan->dma.put) FIRE_RING()
155 (chan->dma.cur - chan->dma.put) << 2); FIRE_RING()
157 WRITE_PUT(chan->dma.cur); FIRE_RING()
160 chan->dma.put = chan->dma.cur; FIRE_RING()
166 chan->dma.cur = chan->dma.put; WIND_RING()
/linux-4.1.27/drivers/video/fbdev/core/
H A Dfb_defio.c100 struct page *cur; fb_deferred_io_mkwrite() local
129 list_for_each_entry(cur, &fbdefio->pagelist, lru) { fb_deferred_io_mkwrite()
135 if (unlikely(cur == page)) fb_deferred_io_mkwrite()
137 else if (cur->index > page->index) fb_deferred_io_mkwrite()
141 list_add_tail(&page->lru, &cur->lru); fb_deferred_io_mkwrite()
183 struct page *cur; fb_deferred_io_work() local
188 list_for_each_entry(cur, &fbdefio->pagelist, lru) { fb_deferred_io_work()
189 lock_page(cur); fb_deferred_io_work()
190 page_mkclean(cur); fb_deferred_io_work()
191 unlock_page(cur); fb_deferred_io_work()
/linux-4.1.27/arch/ia64/kernel/
H A Dperfmon_default_smpl.c106 void *cur, *last; default_handler() local
118 cur = buf+hdr->hdr_cur_offs; default_handler()
126 if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full; default_handler()
130 ent = (pfm_default_smpl_entry_t *)cur; default_handler()
141 DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n", default_handler()
144 cur, last, default_handler()
145 last-cur, default_handler()
187 cur += entry_size; default_handler()
192 if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full; default_handler()
204 DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify)); default_handler()
H A Dkprobes.c897 struct kprobe *cur = kprobe_running(); post_kprobes_handler() local
900 if (!cur) post_kprobes_handler()
903 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobes_handler()
905 cur->post_handler(cur, regs, 0); post_kprobes_handler()
908 resume_execution(cur, regs); post_kprobes_handler()
924 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
938 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; kprobe_fault_handler()
939 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; kprobe_fault_handler()
953 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
962 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
/linux-4.1.27/tools/power/cpupower/utils/helpers/
H A Dbitmask.c277 /* current bit is 'cur', most recently seen range is [rbot, rtop] */ bitmask_displaylist()
278 unsigned int cur, rbot, rtop; bitmask_displaylist() local
282 rbot = cur = bitmask_first(bmp); bitmask_displaylist()
283 while (cur < bmp->size) { bitmask_displaylist()
284 rtop = cur; bitmask_displaylist()
285 cur = bitmask_next(bmp, cur+1); bitmask_displaylist()
286 if (cur >= bmp->size || cur > rtop + 1) { bitmask_displaylist()
288 rbot = cur; bitmask_displaylist()
/linux-4.1.27/drivers/scsi/sym53c8xx_2/
H A Dsym_fw.c368 u32 *end, *cur; sym_fw_bind_script() local
371 cur = start; sym_fw_bind_script()
374 while (cur < end) { sym_fw_bind_script()
376 opcode = *cur; sym_fw_bind_script()
386 sym_name(np), (int) (cur-start)); sym_fw_bind_script()
387 ++cur; sym_fw_bind_script()
396 *cur++ = 0; sym_fw_bind_script()
401 printf ("%d: <%x>\n", (int) (cur-start), sym_fw_bind_script()
425 tmp1 = cur[1]; sym_fw_bind_script()
426 tmp2 = cur[2]; sym_fw_bind_script()
429 sym_name(np), (int) (cur-start)); sym_fw_bind_script()
502 *cur++ = cpu_to_scr(opcode); sym_fw_bind_script()
509 *cur = cpu_to_scr(*cur); sym_fw_bind_script()
510 ++cur; sym_fw_bind_script()
518 old = *cur; sym_fw_bind_script()
551 *cur++ = cpu_to_scr(new); sym_fw_bind_script()
/linux-4.1.27/Documentation/networking/timestamping/
H A Dtxtimestamp.c80 static void __print_timestamp(const char *name, struct timespec *cur, __print_timestamp() argument
83 if (!(cur->tv_sec | cur->tv_nsec)) __print_timestamp()
87 name, cur->tv_sec, cur->tv_nsec / 1000, __print_timestamp()
93 cur_ms = (long) cur->tv_sec * 1000 * 1000; __print_timestamp()
94 cur_ms += cur->tv_nsec / 1000; __print_timestamp()
102 ts_prev = *cur; __print_timestamp()
471 struct addrinfo *addrs, *cur; resolve_hostname() local
477 cur = addrs; resolve_hostname()
478 while (cur && !have_ipv4 && !have_ipv6) { resolve_hostname()
479 if (!have_ipv4 && cur->ai_family == AF_INET) { resolve_hostname()
480 memcpy(&daddr, cur->ai_addr, sizeof(daddr)); resolve_hostname()
484 else if (!have_ipv6 && cur->ai_family == AF_INET6) { resolve_hostname()
485 memcpy(&daddr6, cur->ai_addr, sizeof(daddr6)); resolve_hostname()
489 cur = cur->ai_next; resolve_hostname()
/linux-4.1.27/drivers/media/pci/mantis/
H A Dmantis_ioc.c81 u32 cur; mantis_gpio_set_bits() local
84 cur = mmread(MANTIS_GPIF_ADDR); mantis_gpio_set_bits()
86 mantis->gpio_status = cur | (1 << bitpos); mantis_gpio_set_bits()
88 mantis->gpio_status = cur & (~(1 << bitpos)); mantis_gpio_set_bits()
H A Dmantis_core.c180 u32 cur; gpio_set_bits() local
182 cur = mmread(MANTIS_GPIF_ADDR); gpio_set_bits()
184 mantis->gpio_status = cur | (1 << bitpos); gpio_set_bits()
186 mantis->gpio_status = cur & (~(1 << bitpos)); gpio_set_bits()
/linux-4.1.27/drivers/scsi/
H A Dsun3_scsi.h55 #define UDC_CURA_HI 0x1a /* cur reg A high */
56 #define UDC_CURA_LO 0x0a /* cur reg A low */
57 #define UDC_CURB_HI 0x12 /* cur reg B high */
58 #define UDC_CURB_LO 0x02 /* cur reg B low */
H A Du14-34f.c1035 char *cur = str, *pc; internal_setup() local
1047 while (cur && (pc = strchr(cur, ':'))) { internal_setup()
1054 if (!strncmp(cur, "lc:", 3)) linked_comm = val; internal_setup()
1055 else if (!strncmp(cur, "of:", 3)) have_old_firmware = val; internal_setup()
1056 else if (!strncmp(cur, "tm:", 3)) tag_mode = val; internal_setup()
1057 else if (!strncmp(cur, "tc:", 3)) tag_mode = val; internal_setup()
1058 else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val; internal_setup()
1059 else if (!strncmp(cur, "ls:", 3)) link_statistics = val; internal_setup()
1060 else if (!strncmp(cur, "et:", 3)) ext_tran = val; internal_setup()
1062 if ((cur = strchr(cur, ','))) ++cur; internal_setup()
1070 char *cur = str; option_setup() local
1073 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) { option_setup()
1074 ints[i++] = simple_strtoul(cur, NULL, 0); option_setup()
1076 if ((cur = strchr(cur, ',')) != NULL) cur++; option_setup()
1080 internal_setup(cur, ints); option_setup()
1667 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", reorder()
H A Deata.c1443 char *cur = str, *pc; internal_setup() local
1456 while (cur && (pc = strchr(cur, ':'))) { internal_setup()
1466 if (!strncmp(cur, "lc:", 3)) internal_setup()
1468 else if (!strncmp(cur, "tm:", 3)) internal_setup()
1470 else if (!strncmp(cur, "tc:", 3)) internal_setup()
1472 else if (!strncmp(cur, "mq:", 3)) internal_setup()
1474 else if (!strncmp(cur, "ls:", 3)) internal_setup()
1476 else if (!strncmp(cur, "et:", 3)) internal_setup()
1478 else if (!strncmp(cur, "rs:", 3)) internal_setup()
1480 else if (!strncmp(cur, "ip:", 3)) internal_setup()
1482 else if (!strncmp(cur, "ep:", 3)) internal_setup()
1484 else if (!strncmp(cur, "pp:", 3)) internal_setup()
1487 if ((cur = strchr(cur, ','))) internal_setup()
1488 ++cur; internal_setup()
1497 char *cur = str; option_setup() local
1500 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) { option_setup()
1501 ints[i++] = simple_strtoul(cur, NULL, 0); option_setup()
1503 if ((cur = strchr(cur, ',')) != NULL) option_setup()
1504 cur++; option_setup()
1508 internal_setup(cur, ints); option_setup()
2226 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", reorder()
/linux-4.1.27/drivers/power/
H A Dapm_power.c122 union power_supply_propval cur; do_calculate_time() local
182 if (_MPSY_PROP(cur_avg_prop, &cur)) { do_calculate_time()
184 if (_MPSY_PROP(cur_now_prop, &cur)) do_calculate_time()
189 return ((cur.intval - full.intval) * 60L) / I.intval; do_calculate_time()
191 return -((cur.intval - empty.intval) * 60L) / I.intval; do_calculate_time()
218 union power_supply_propval empty, full, cur; calculate_capacity() local
257 if (_MPSY_PROP(avg_prop, &cur)) { calculate_capacity()
259 if (_MPSY_PROP(now_prop, &cur)) calculate_capacity()
270 ret = ((cur.intval - empty.intval) * 100L) / calculate_capacity()
H A Ds3c_adc_battery.c182 if (full_volt < calc_full_volt(lut->volt, lut->cur, s3c_adc_bat_get_property()
189 lut_volt1 = calc_full_volt(lut[0].volt, lut[0].cur, s3c_adc_bat_get_property()
191 lut_volt2 = calc_full_volt(lut[1].volt, lut[1].cur, s3c_adc_bat_get_property()
/linux-4.1.27/fs/xfs/
H A Dxfs_trans_ail.c145 struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_init()
147 cur->item = NULL; xfs_trans_ail_cursor_init()
148 list_add_tail(&cur->list, &ailp->xa_cursors); xfs_trans_ail_cursor_init()
158 struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_next()
160 struct xfs_log_item *lip = cur->item; xfs_trans_ail_cursor_next()
165 cur->item = xfs_ail_next(ailp, lip); xfs_trans_ail_cursor_next()
175 struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_done()
177 cur->item = NULL; xfs_trans_ail_cursor_done()
178 list_del_init(&cur->list); xfs_trans_ail_cursor_done()
194 struct xfs_ail_cursor *cur; xfs_trans_ail_cursor_clear() local
196 list_for_each_entry(cur, &ailp->xa_cursors, list) { xfs_trans_ail_cursor_clear()
197 if (cur->item == lip) xfs_trans_ail_cursor_clear()
198 cur->item = (struct xfs_log_item *) xfs_trans_ail_cursor_clear()
199 ((__psint_t)cur->item | 1); xfs_trans_ail_cursor_clear()
212 struct xfs_ail_cursor *cur, xfs_trans_ail_cursor_first()
217 xfs_trans_ail_cursor_init(ailp, cur); xfs_trans_ail_cursor_first()
232 cur->item = xfs_ail_next(ailp, lip); xfs_trans_ail_cursor_first()
259 struct xfs_ail_cursor *cur, xfs_trans_ail_cursor_last()
262 xfs_trans_ail_cursor_init(ailp, cur); xfs_trans_ail_cursor_last()
263 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); xfs_trans_ail_cursor_last()
264 return cur->item; xfs_trans_ail_cursor_last()
276 struct xfs_ail_cursor *cur, xfs_ail_splice()
289 lip = cur ? cur->item : NULL; xfs_ail_splice()
300 if (cur) xfs_ail_splice()
301 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail); xfs_ail_splice()
333 struct xfs_ail_cursor cur; xfsaild_push() local
363 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); xfsaild_push()
369 xfs_trans_ail_cursor_done(&cur); xfsaild_push()
449 lip = xfs_trans_ail_cursor_next(ailp, &cur); xfsaild_push()
454 xfs_trans_ail_cursor_done(&cur); xfsaild_push()
640 struct xfs_ail_cursor *cur,
673 xfs_ail_splice(ailp, cur, &tmp, lsn);
143 xfs_trans_ail_cursor_init( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_init() argument
156 xfs_trans_ail_cursor_next( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_next() argument
174 xfs_trans_ail_cursor_done( struct xfs_ail_cursor *cur) xfs_trans_ail_cursor_done() argument
210 xfs_trans_ail_cursor_first( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn) xfs_trans_ail_cursor_first() argument
257 xfs_trans_ail_cursor_last( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn) xfs_trans_ail_cursor_last() argument
274 xfs_ail_splice( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct list_head *list, xfs_lsn_t lsn) xfs_ail_splice() argument
H A Dxfs_itable.c210 struct xfs_btree_cur *cur, /* btree cursor */ xfs_bulkstat_grab_ichunk()
220 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); xfs_bulkstat_grab_ichunk()
229 error = xfs_inobt_get_rec(cur, irec, &stat); xfs_bulkstat_grab_ichunk()
232 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); xfs_bulkstat_grab_ichunk()
353 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ xfs_bulkstat() local
404 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, xfs_bulkstat()
413 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); xfs_bulkstat()
423 error = xfs_btree_increment(cur, 0, &stat); xfs_bulkstat()
426 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); xfs_bulkstat()
440 error = xfs_inobt_get_rec(cur, &r, &stat); xfs_bulkstat()
458 error = xfs_btree_increment(cur, 0, &stat); xfs_bulkstat()
472 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_bulkstat()
559 struct xfs_btree_cur *cur = NULL; xfs_inumbers() local
583 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, xfs_inumbers()
585 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, xfs_inumbers()
593 error = xfs_inobt_get_rec(cur, &r, &stat); xfs_inumbers()
618 error = xfs_btree_increment(cur, 0, &stat); xfs_inumbers()
625 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_inumbers()
626 cur = NULL; xfs_inumbers()
645 if (cur) xfs_inumbers()
646 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : xfs_inumbers()
209 xfs_bulkstat_grab_ichunk( struct xfs_btree_cur *cur, xfs_agino_t agino, int *icount, struct xfs_inobt_rec_incore *irec) xfs_bulkstat_grab_ichunk() argument
H A Dxfs_trans_priv.h85 struct xfs_ail_cursor *cur,
129 struct xfs_ail_cursor *cur,
132 struct xfs_ail_cursor *cur,
135 struct xfs_ail_cursor *cur);
136 void xfs_trans_ail_cursor_done(struct xfs_ail_cursor *cur);
H A Dxfs_discard.c45 struct xfs_btree_cur *cur; xfs_trim_extents() local
57 cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); xfs_trim_extents()
69 error = xfs_alloc_lookup_ge(cur, 0, xfs_trim_extents()
84 error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); xfs_trim_extents()
132 error = xfs_btree_decrement(cur, 0, &i); xfs_trim_extents()
138 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); xfs_trim_extents()
/linux-4.1.27/scripts/genksyms/
H A Dgenksyms.c563 struct string_list *cur; expand_and_crc_sym() local
566 cur = *(b++); expand_and_crc_sym()
567 switch (cur->tag) { expand_and_crc_sym()
570 fprintf(debugfile, "%s ", cur->string); expand_and_crc_sym()
571 crc = partial_crc32(cur->string, crc); expand_and_crc_sym()
577 subsym = find_symbol(cur->string, cur->tag, 0); expand_and_crc_sym()
581 fprintf(debugfile, "%s ", cur->string); expand_and_crc_sym()
582 crc = partial_crc32(cur->string, crc); expand_and_crc_sym()
594 subsym = find_symbol(cur->string, cur->tag, 0); expand_and_crc_sym()
599 symbol_types[cur->tag].name, expand_and_crc_sym()
600 cur->string); expand_and_crc_sym()
602 (symbol_types[cur->tag].name), expand_and_crc_sym()
603 mk_node(cur->string), expand_and_crc_sym()
608 add_symbol(cur->string, cur->tag, n, 0); expand_and_crc_sym()
613 symbol_types[cur->tag].name, expand_and_crc_sym()
614 cur->string); expand_and_crc_sym()
617 crc = partial_crc32(symbol_types[cur->tag].name, expand_and_crc_sym()
620 crc = partial_crc32(cur->string, crc); expand_and_crc_sym()
/linux-4.1.27/arch/arc/kernel/
H A Dkprobes.c256 struct kprobe *cur = kprobe_running(); arc_post_kprobe_handler() local
259 if (!cur) arc_post_kprobe_handler()
262 resume_execution(cur, addr, regs); arc_post_kprobe_handler()
265 arch_arm_kprobe(cur); arc_post_kprobe_handler()
274 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { arc_post_kprobe_handler()
276 cur->post_handler(cur, regs, 0); arc_post_kprobe_handler()
300 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
312 resume_execution(cur, (unsigned long)cur->addr, regs); kprobe_fault_handler()
333 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
342 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
H A Dunwind.c449 const u8 *cur = *pcur; get_uleb128() local
453 for (shift = 0, value = 0; cur < end; shift += 7) { get_uleb128()
455 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { get_uleb128()
456 cur = end + 1; get_uleb128()
459 value |= (uleb128_t) (*cur & 0x7f) << shift; get_uleb128()
460 if (!(*cur++ & 0x80)) get_uleb128()
463 *pcur = cur; get_uleb128()
470 const u8 *cur = *pcur; get_sleb128() local
474 for (shift = 0, value = 0; cur < end; shift += 7) { get_sleb128()
476 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { get_sleb128()
477 cur = end + 1; get_sleb128()
480 value |= (sleb128_t) (*cur & 0x7f) << shift; get_sleb128()
481 if (!(*cur & 0x80)) { get_sleb128()
482 value |= -(*cur++ & 0x40) << shift; get_sleb128()
486 *pcur = cur; get_sleb128()
946 const u8 *cur = arc_unwind() local
949 startLoc = read_pointer(&cur, arc_unwind()
950 cur + tableSize, arc_unwind()
955 ptr = cur - tableSize; arc_unwind()
/linux-4.1.27/arch/alpha/kernel/
H A Dsrmcons.c96 char *cur; srmcons_do_write() local
99 for (cur = (char *)buf; remaining > 0; ) { srmcons_do_write()
106 if (cur[c] == '\n') srmcons_do_write()
110 result.as_long = callback_puts(0, cur, c); srmcons_do_write()
113 cur += result.bits.c; srmcons_do_write()
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/
H A Dqlcnic_hw.c470 struct qlcnic_mac_vlan_list *cur; qlcnic_nic_del_mac() local
476 cur = list_entry(head, struct qlcnic_mac_vlan_list, list); qlcnic_nic_del_mac()
477 if (ether_addr_equal(addr, cur->mac_addr)) { qlcnic_nic_del_mac()
478 err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr, qlcnic_nic_del_mac()
482 list_del(&cur->list); qlcnic_nic_del_mac()
483 kfree(cur); qlcnic_nic_del_mac()
493 struct qlcnic_mac_vlan_list *cur; qlcnic_nic_add_mac() local
498 cur = list_entry(head, struct qlcnic_mac_vlan_list, list); qlcnic_nic_add_mac()
499 if (ether_addr_equal(addr, cur->mac_addr) && qlcnic_nic_add_mac()
500 cur->vlan_id == vlan) qlcnic_nic_add_mac()
504 cur = kzalloc(sizeof(*cur), GFP_ATOMIC); qlcnic_nic_add_mac()
505 if (cur == NULL) qlcnic_nic_add_mac()
508 memcpy(cur->mac_addr, addr, ETH_ALEN); qlcnic_nic_add_mac()
511 cur->mac_addr, vlan, QLCNIC_MAC_ADD)) { qlcnic_nic_add_mac()
512 kfree(cur); qlcnic_nic_add_mac()
516 cur->vlan_id = vlan; qlcnic_nic_add_mac()
517 cur->mac_type = mac_type; qlcnic_nic_add_mac()
519 list_add_tail(&cur->list, &adapter->mac_list); qlcnic_nic_add_mac()
525 struct qlcnic_mac_vlan_list *cur; qlcnic_flush_mcast_mac() local
529 cur = list_entry(head, struct qlcnic_mac_vlan_list, list); qlcnic_flush_mcast_mac()
530 if (cur->mac_type != QLCNIC_MULTICAST_MAC) qlcnic_flush_mcast_mac()
533 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, qlcnic_flush_mcast_mac()
534 cur->vlan_id, QLCNIC_MAC_DEL); qlcnic_flush_mcast_mac()
535 list_del(&cur->list); qlcnic_flush_mcast_mac()
536 kfree(cur); qlcnic_flush_mcast_mac()
630 struct qlcnic_mac_vlan_list *cur; qlcnic_82xx_free_mac_list() local
633 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); qlcnic_82xx_free_mac_list()
635 cur->mac_addr, 0, QLCNIC_MAC_DEL); qlcnic_82xx_free_mac_list()
636 list_del(&cur->list); qlcnic_82xx_free_mac_list()
637 kfree(cur); qlcnic_82xx_free_mac_list()
/linux-4.1.27/sound/core/seq/
H A Dseq_prioq.c149 struct snd_seq_event_cell *cur, *prev; snd_seq_prioq_cell_in() local
180 cur = f->head; /* cursor */ snd_seq_prioq_cell_in()
183 while (cur != NULL) { snd_seq_prioq_cell_in()
185 int rel = compare_timestamp_rel(&cell->event, &cur->event); snd_seq_prioq_cell_in()
194 prev = cur; snd_seq_prioq_cell_in()
195 cur = cur->next; snd_seq_prioq_cell_in()
206 cell->next = cur; snd_seq_prioq_cell_in()
208 if (f->head == cur) /* this is the first cell, set head to it */ snd_seq_prioq_cell_in()
210 if (cur == NULL) /* reached end of the list */ snd_seq_prioq_cell_in()
H A Dseq_clientmgr.h75 int cur; member in struct:snd_seq_usage
/linux-4.1.27/mm/
H A Dslob.c219 slob_t *prev, *cur, *aligned = NULL; slob_page_alloc() local
222 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { slob_page_alloc()
223 slobidx_t avail = slob_units(cur); slob_page_alloc()
226 aligned = (slob_t *)ALIGN((unsigned long)cur, align); slob_page_alloc()
227 delta = aligned - cur; slob_page_alloc()
233 next = slob_next(cur); slob_page_alloc()
235 set_slob(cur, delta, aligned); slob_page_alloc()
236 prev = cur; slob_page_alloc()
237 cur = aligned; slob_page_alloc()
238 avail = slob_units(cur); slob_page_alloc()
241 next = slob_next(cur); slob_page_alloc()
249 set_slob(prev, slob_units(prev), cur + units); slob_page_alloc()
251 sp->freelist = cur + units; slob_page_alloc()
252 set_slob(cur + units, avail - units, next); slob_page_alloc()
258 return cur; slob_page_alloc()
260 if (slob_last(cur)) slob_page_alloc()
H A Dbootmem.c175 unsigned long *map, start, end, pages, cur, count = 0; free_all_bootmem_core() local
217 cur = start; free_all_bootmem_core()
220 while (vec && cur != start) { free_all_bootmem_core()
222 page = pfn_to_page(cur); free_all_bootmem_core()
223 __free_pages_bootmem(page, cur, 0); free_all_bootmem_core()
227 ++cur; free_all_bootmem_core()
232 cur = bdata->node_min_pfn; free_all_bootmem_core()
238 __free_pages_bootmem(page++, cur++, 0); free_all_bootmem_core()
/linux-4.1.27/crypto/
H A Dalgif_aead.c27 unsigned int cur; member in struct:aead_sg_list
85 for (i = 0; i < sgl->cur; i++) { aead_put_sgl()
92 sgl->cur = 0; aead_put_sgl()
219 sg = sgl->sg + sgl->cur - 1; aead_sendmsg()
250 if (sgl->cur >= ALG_MAX_PAGES) { aead_sendmsg()
256 sg = sgl->sg + sgl->cur; aead_sendmsg()
277 sgl->cur++; aead_sendmsg()
310 if (sgl->cur >= ALG_MAX_PAGES) aead_sendpage()
330 sg_set_page(sgl->sg + sgl->cur, page, size, offset); aead_sendpage()
331 sgl->cur++; aead_sendpage()
462 for (i = 0; i < ctx->tsgl.cur; i++) { aead_recvmsg()
469 if (i >= ctx->tsgl.cur) aead_recvmsg()
621 ctx->tsgl.cur = 0; aead_accept_parent()
H A Dalgif_skcipher.c29 int cur; member in struct:skcipher_sg_list
142 if (!sg || sgl->cur >= MAX_SGL_ENTS) { skcipher_alloc_sgl()
150 sgl->cur = 0; skcipher_alloc_sgl()
174 for (i = 0; i < sgl->cur; i++) { skcipher_pull_sgl()
361 sg = sgl->sg + sgl->cur - 1; skcipher_sendmsg()
395 if (sgl->cur) skcipher_sendmsg()
396 sg_unmark_end(sg + sgl->cur - 1); skcipher_sendmsg()
398 i = sgl->cur; skcipher_sendmsg()
419 sgl->cur++; skcipher_sendmsg()
420 } while (len && sgl->cur < MAX_SGL_ENTS); skcipher_sendmsg()
423 sg_mark_end(sg + sgl->cur - 1); skcipher_sendmsg()
471 if (sgl->cur) skcipher_sendpage()
472 sg_unmark_end(sgl->sg + sgl->cur - 1); skcipher_sendpage()
474 sg_mark_end(sgl->sg + sgl->cur); skcipher_sendpage()
476 sg_set_page(sgl->sg + sgl->cur, page, size, offset); skcipher_sendpage()
477 sgl->cur++; skcipher_sendpage()
/linux-4.1.27/arch/powerpc/kernel/
H A Dkprobes.c365 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
368 if (!cur) post_kprobe_handler()
372 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) post_kprobe_handler()
375 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobe_handler()
377 cur->post_handler(cur, regs, 0); post_kprobe_handler()
381 regs->nip = (unsigned long)cur->addr + 4; post_kprobe_handler()
406 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
420 regs->nip = (unsigned long)cur->addr; kprobe_fault_handler()
436 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
445 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
/linux-4.1.27/drivers/media/tuners/
H A Dmsi001.c263 ret = msi001_set_gain(s, s->lna_gain->cur.val, s->mixer_gain->cur.val, msi001_set_tuner()
264 s->if_gain->cur.val); msi001_set_tuner()
400 s->mixer_gain->cur.val, s->if_gain->cur.val); msi001_s_ctrl()
403 ret = msi001_set_gain(s, s->lna_gain->cur.val, msi001_s_ctrl()
404 s->mixer_gain->val, s->if_gain->cur.val); msi001_s_ctrl()
407 ret = msi001_set_gain(s, s->lna_gain->cur.val, msi001_s_ctrl()
408 s->mixer_gain->cur.val, s->if_gain->val); msi001_s_ctrl()
H A De4000.c278 s->lna_gain_auto->cur.val, s->lna_gain_auto->val, e4000_set_lna_gain()
279 s->lna_gain->cur.val, s->lna_gain->val); e4000_set_lna_gain()
281 if (s->lna_gain_auto->val && s->if_gain_auto->cur.val) e4000_set_lna_gain()
285 else if (s->if_gain_auto->cur.val) e4000_set_lna_gain()
313 s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val, e4000_set_mixer_gain()
314 s->mixer_gain->cur.val, s->mixer_gain->val); e4000_set_mixer_gain()
345 s->if_gain_auto->cur.val, s->if_gain_auto->val, e4000_set_if_gain()
346 s->if_gain->cur.val, s->if_gain->val); e4000_set_if_gain()
348 if (s->if_gain_auto->val && s->lna_gain_auto->cur.val) e4000_set_if_gain()
350 else if (s->lna_gain_auto->cur.val) e4000_set_if_gain()
/linux-4.1.27/kernel/
H A Dsmpboot.c207 struct smp_hotplug_thread *cur; smpboot_create_threads() local
211 list_for_each_entry(cur, &hotplug_threads, list) { smpboot_create_threads()
212 ret = __smpboot_create_thread(cur, cpu); smpboot_create_threads()
231 struct smp_hotplug_thread *cur; smpboot_unpark_threads() local
234 list_for_each_entry(cur, &hotplug_threads, list) smpboot_unpark_threads()
235 smpboot_unpark_thread(cur, cpu); smpboot_unpark_threads()
249 struct smp_hotplug_thread *cur; smpboot_park_threads() local
252 list_for_each_entry_reverse(cur, &hotplug_threads, list) smpboot_park_threads()
253 smpboot_park_thread(cur, cpu); smpboot_park_threads()
H A Dkexec.c1641 char *cur = cmdline, *tmp; parse_crashkernel_mem() local
1648 start = memparse(cur, &tmp); parse_crashkernel_mem()
1649 if (cur == tmp) { parse_crashkernel_mem()
1653 cur = tmp; parse_crashkernel_mem()
1654 if (*cur != '-') { parse_crashkernel_mem()
1658 cur++; parse_crashkernel_mem()
1661 if (*cur != ':') { parse_crashkernel_mem()
1662 end = memparse(cur, &tmp); parse_crashkernel_mem()
1663 if (cur == tmp) { parse_crashkernel_mem()
1667 cur = tmp; parse_crashkernel_mem()
1674 if (*cur != ':') { parse_crashkernel_mem()
1678 cur++; parse_crashkernel_mem()
1680 size = memparse(cur, &tmp); parse_crashkernel_mem()
1681 if (cur == tmp) { parse_crashkernel_mem()
1685 cur = tmp; parse_crashkernel_mem()
1696 } while (*cur++ == ','); parse_crashkernel_mem()
1699 while (*cur && *cur != ' ' && *cur != '@') parse_crashkernel_mem()
1700 cur++; parse_crashkernel_mem()
1701 if (*cur == '@') { parse_crashkernel_mem()
1702 cur++; parse_crashkernel_mem()
1703 *crash_base = memparse(cur, &tmp); parse_crashkernel_mem()
1704 if (cur == tmp) { parse_crashkernel_mem()
1725 char *cur = cmdline; parse_crashkernel_simple() local
1727 *crash_size = memparse(cmdline, &cur); parse_crashkernel_simple()
1728 if (cmdline == cur) { parse_crashkernel_simple()
1733 if (*cur == '@') parse_crashkernel_simple()
1734 *crash_base = memparse(cur+1, &cur); parse_crashkernel_simple()
1735 else if (*cur != ' ' && *cur != '\0') { parse_crashkernel_simple()
1763 char *cur = cmdline; parse_crashkernel_suffix() local
1765 *crash_size = memparse(cmdline, &cur); parse_crashkernel_suffix()
1766 if (cmdline == cur) { parse_crashkernel_suffix()
1772 if (strncmp(cur, suffix, strlen(suffix))) { parse_crashkernel_suffix()
1776 cur += strlen(suffix); parse_crashkernel_suffix()
1777 if (*cur != ' ' && *cur != '\0') { parse_crashkernel_suffix()
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_layer.c199 flip = dma->queue ? dma->queue : dma->cur; atmel_hlcdc_layer_irq()
265 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); atmel_hlcdc_layer_irq()
266 dma->cur = dma->queue; atmel_hlcdc_layer_irq()
271 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); atmel_hlcdc_layer_irq()
272 dma->cur = NULL; atmel_hlcdc_layer_irq()
283 if (dma->cur) atmel_hlcdc_layer_irq()
285 dma->cur); atmel_hlcdc_layer_irq()
287 dma->cur = NULL; atmel_hlcdc_layer_irq()
294 if (!dma->cur) atmel_hlcdc_layer_irq()
321 if (dma->cur) { atmel_hlcdc_layer_disable()
322 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); atmel_hlcdc_layer_disable()
323 dma->cur = NULL; atmel_hlcdc_layer_disable()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dintel_pt.h92 * @cur: current topa table
94 * @cur_idx: current output region's index within @cur table
108 struct topa *first, *last, *cur; member in struct:pt_buffer
H A Dperf_event_intel_pt.c336 buf->first = buf->last = buf->cur = topa; topa_insert_table()
449 if (buf->cur_idx == buf->cur->last) { pt_buffer_advance()
450 if (buf->cur == buf->last) pt_buffer_advance()
451 buf->cur = buf->first; pt_buffer_advance()
453 buf->cur = list_entry(buf->cur->list.next, struct topa, pt_buffer_advance()
471 base = buf->cur->offset + buf->output_off; pt_update_head()
475 base += sizes(buf->cur->table[topa_idx].size); pt_update_head()
495 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT); pt_buffer_region()
504 return sizes(buf->cur->table[buf->cur_idx].size); pt_buffer_region_size()
534 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { pt_handle_status()
572 buf->cur = phys_to_virt(base_topa); pt_read_offset()
626 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) pt_buffer_reset_markers()
676 struct topa *cur = buf->first, *prev = buf->last; pt_buffer_setup_topa_index() local
677 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0), pt_buffer_setup_topa_index()
690 if (idx == cur->last - 1) { pt_buffer_setup_topa_index()
693 cur = list_entry(cur->list.next, struct topa, list); pt_buffer_setup_topa_index()
697 te_cur = TOPA_ENTRY(cur, idx); pt_buffer_setup_topa_index()
720 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK); pt_buffer_reset_offsets()
722 (unsigned long)buf->cur) / sizeof(struct topa_entry); pt_buffer_reset_offsets()
723 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1); pt_buffer_reset_offsets()
912 pt_config_buffer(buf->cur->table, buf->cur_idx, intel_pt_interrupt()
936 pt_config_buffer(buf->cur->table, buf->cur_idx, pt_event_start()
/linux-4.1.27/arch/sparc/kernel/
H A Dkprobes.c298 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
301 if (!cur) post_kprobe_handler()
304 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobe_handler()
306 cur->post_handler(cur, regs, 0); post_kprobe_handler()
309 resume_execution(cur, regs, kcb); post_kprobe_handler()
325 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
339 regs->tpc = (unsigned long)cur->addr; kprobe_fault_handler()
356 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
365 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
/linux-4.1.27/arch/arm/probes/kprobes/
H A Dcore.c247 struct kprobe *p, *cur; kprobe_handler() local
251 cur = kprobe_running(); kprobe_handler()
268 if (cur) { kprobe_handler()
314 } else if (cur) { kprobe_handler()
316 if (cur->break_handler && cur->break_handler(cur, regs)) { kprobe_handler()
318 singlestep(cur, regs, kcb); kprobe_handler()
319 if (cur->post_handler) { kprobe_handler()
321 cur->post_handler(cur, regs, 0); kprobe_handler()
346 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
359 regs->ARM_pc = (long)cur->addr; kprobe_fault_handler()
374 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
383 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) kprobe_fault_handler()
/linux-4.1.27/fs/btrfs/
H A Dextent_io.c2918 u64 cur = start; __do_readpage() local
2958 while (cur <= end) { __do_readpage()
2962 if (cur >= last_byte) { __do_readpage()
2971 set_extent_uptodate(tree, cur, cur + iosize - 1, __do_readpage()
2974 unlock_extent_cached(tree, cur, __do_readpage()
2975 cur + iosize - 1, __do_readpage()
2979 em = __get_extent_map(inode, page, pg_offset, cur, __do_readpage()
2980 end - cur + 1, get_extent, em_cached); __do_readpage()
2984 unlock_extent(tree, cur, end); __do_readpage()
2987 extent_offset = cur - em->start; __do_readpage()
2988 BUG_ON(extent_map_end(em) <= cur); __do_readpage()
2989 BUG_ON(end < cur); __do_readpage()
2997 iosize = min(extent_map_end(em) - cur, end - cur + 1); __do_readpage()
3067 set_extent_uptodate(tree, cur, cur + iosize - 1, __do_readpage()
3069 unlock_extent_cached(tree, cur, cur + iosize - 1, __do_readpage()
3071 cur = cur + iosize; __do_readpage()
3076 if (test_range_bit(tree, cur, cur_end, __do_readpage()
3080 unlock_extent(tree, cur, cur + iosize - 1); __do_readpage()
3081 cur = cur + iosize; __do_readpage()
3091 unlock_extent(tree, cur, cur + iosize - 1); __do_readpage()
3092 cur = cur + iosize; __do_readpage()
3111 unlock_extent(tree, cur, cur + iosize - 1); __do_readpage()
3113 cur = cur + iosize; __do_readpage()
3375 u64 cur = start; __extent_writepage_io() local
3422 while (cur <= end) { __extent_writepage_io()
3424 if (cur >= i_size) { __extent_writepage_io()
3426 tree->ops->writepage_end_io_hook(page, cur, __extent_writepage_io()
3430 em = epd->get_extent(inode, page, pg_offset, cur, __extent_writepage_io()
3431 end - cur + 1, 1); __extent_writepage_io()
3438 extent_offset = cur - em->start; __extent_writepage_io()
3440 BUG_ON(em_end <= cur); __extent_writepage_io()
3441 BUG_ON(end < cur); __extent_writepage_io()
3442 iosize = min(em_end - cur, end - cur + 1); __extent_writepage_io()
3463 tree->ops->writepage_end_io_hook(page, cur, __extent_writepage_io()
3464 cur + iosize - 1, __extent_writepage_io()
3474 cur += iosize; __extent_writepage_io()
3480 ret = tree->ops->writepage_io_hook(page, cur, __extent_writepage_io()
3481 cur + iosize - 1); __extent_writepage_io()
3490 set_range_writeback(tree, cur, cur + iosize - 1); __extent_writepage_io()
3493 "page %lu not writeback, cur %llu end %llu", __extent_writepage_io()
3494 page->index, cur, end); __extent_writepage_io()
3505 cur = cur + iosize; __extent_writepage_io()
5294 size_t cur; read_extent_buffer() local
5310 cur = min(len, (PAGE_CACHE_SIZE - offset)); read_extent_buffer()
5312 memcpy(dst, kaddr + offset, cur); read_extent_buffer()
5314 dst += cur; read_extent_buffer()
5315 len -= cur; read_extent_buffer()
5325 size_t cur; read_extent_buffer_to_user() local
5342 cur = min(len, (PAGE_CACHE_SIZE - offset)); read_extent_buffer_to_user()
5344 if (copy_to_user(dst, kaddr + offset, cur)) { read_extent_buffer_to_user()
5349 dst += cur; read_extent_buffer_to_user()
5350 len -= cur; read_extent_buffer_to_user()
5400 size_t cur; memcmp_extent_buffer() local
5417 cur = min(len, (PAGE_CACHE_SIZE - offset)); memcmp_extent_buffer()
5420 ret = memcmp(ptr, kaddr + offset, cur); memcmp_extent_buffer()
5424 ptr += cur; memcmp_extent_buffer()
5425 len -= cur; memcmp_extent_buffer()
5435 size_t cur; write_extent_buffer() local
5452 cur = min(len, PAGE_CACHE_SIZE - offset); write_extent_buffer()
5454 memcpy(kaddr + offset, src, cur); write_extent_buffer()
5456 src += cur; write_extent_buffer()
5457 len -= cur; write_extent_buffer()
5466 size_t cur; memset_extent_buffer() local
5482 cur = min(len, PAGE_CACHE_SIZE - offset); memset_extent_buffer()
5484 memset(kaddr + offset, c, cur); memset_extent_buffer()
5486 len -= cur; memset_extent_buffer()
5497 size_t cur; copy_extent_buffer() local
5513 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); copy_extent_buffer()
5516 read_extent_buffer(src, kaddr + offset, src_offset, cur); copy_extent_buffer()
5518 src_offset += cur; copy_extent_buffer()
5519 len -= cur; copy_extent_buffer()
5556 size_t cur; memcpy_extent_buffer() local
5583 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - memcpy_extent_buffer()
5585 cur = min_t(unsigned long, cur, memcpy_extent_buffer()
5589 dst_off_in_page, src_off_in_page, cur); memcpy_extent_buffer()
5591 src_offset += cur; memcpy_extent_buffer()
5592 dst_offset += cur; memcpy_extent_buffer()
5593 len -= cur; memcpy_extent_buffer()
5600 size_t cur; memmove_extent_buffer() local
5632 cur = min_t(unsigned long, len, src_off_in_page + 1); memmove_extent_buffer()
5633 cur = min(cur, dst_off_in_page + 1); memmove_extent_buffer()
5635 dst_off_in_page - cur + 1, memmove_extent_buffer()
5636 src_off_in_page - cur + 1, cur); memmove_extent_buffer()
5638 dst_end -= cur; memmove_extent_buffer()
5639 src_end -= cur; memmove_extent_buffer()
5640 len -= cur; memmove_extent_buffer()
H A Dsend.c860 u32 cur = 0; iterate_inode_ref() local
896 while (cur < total) { iterate_inode_ref()
900 iref = (struct btrfs_inode_ref *)(ptr + cur); iterate_inode_ref()
906 extref = (struct btrfs_inode_extref *)(ptr + cur); iterate_inode_ref()
945 cur += elem_size + name_len; iterate_inode_ref()
983 u32 cur; iterate_dir_item() local
1007 cur = 0; iterate_dir_item()
1012 while (cur < total) { iterate_dir_item()
1064 cur += len; iterate_dir_item()
2024 struct name_cache_entry *cur; name_cache_search() local
2030 list_for_each_entry(cur, nce_head, radix_list) { list_for_each_entry()
2031 if (cur->ino == ino && cur->gen == gen) list_for_each_entry()
2032 return cur; list_for_each_entry()
2742 struct recorded_ref *cur; __free_recorded_refs() local
2745 cur = list_entry(head->next, struct recorded_ref, list); __free_recorded_refs()
2746 fs_path_free(cur->full_path); __free_recorded_refs()
2747 list_del(&cur->list); __free_recorded_refs()
2748 kfree(cur); __free_recorded_refs()
3008 struct recorded_ref *cur; add_pending_dir_move() local
3036 list_for_each_entry(cur, deleted_refs, list) { list_for_each_entry()
3037 ret = dup_ref(cur, &pm->update_refs); list_for_each_entry()
3041 list_for_each_entry(cur, new_refs, list) { list_for_each_entry()
3042 ret = dup_ref(cur, &pm->update_refs); list_for_each_entry()
3090 struct recorded_ref *cur; apply_dir_move() local
3174 list_for_each_entry(cur, &pm->update_refs, list) { apply_dir_move()
3175 if (cur->dir == rmdir_ino) apply_dir_move()
3177 ret = send_utimes(sctx, cur->dir, cur->dir_gen); apply_dir_move()
3451 struct recorded_ref *cur; process_recorded_refs() local
3509 list_for_each_entry(cur, &sctx->new_refs, list) { process_recorded_refs()
3517 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3527 if (cur == cur2) process_recorded_refs()
3529 if (cur2->dir == cur->dir) { process_recorded_refs()
3540 ret = did_create_dir(sctx, cur->dir); process_recorded_refs()
3544 ret = send_create_inode(sctx, cur->dir); process_recorded_refs()
3556 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3557 cur->name, cur->name_len, process_recorded_refs()
3563 ow_inode, cur->dir, cur->name, process_recorded_refs()
3564 cur->name_len); process_recorded_refs()
3571 cur->full_path); process_recorded_refs()
3590 ret = send_unlink(sctx, cur->full_path); process_recorded_refs()
3597 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); process_recorded_refs()
3612 ret = send_rename(sctx, valid_path, cur->full_path); process_recorded_refs()
3616 ret = fs_path_copy(valid_path, cur->full_path); process_recorded_refs()
3626 ret = wait_for_parent_move(sctx, cur); process_recorded_refs()
3633 cur->full_path); process_recorded_refs()
3636 cur->full_path); process_recorded_refs()
3641 ret = send_link(sctx, cur->full_path, process_recorded_refs()
3647 ret = dup_ref(cur, &check_dirs); process_recorded_refs()
3675 list_for_each_entry(cur, &sctx->deleted_refs, list) { process_recorded_refs()
3676 ret = dup_ref(cur, &check_dirs); process_recorded_refs()
3685 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, process_recorded_refs()
3687 ret = dup_ref(cur, &check_dirs); process_recorded_refs()
3696 list_for_each_entry(cur, &sctx->deleted_refs, list) { process_recorded_refs()
3697 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3699 cur->name, cur->name_len); process_recorded_refs()
3703 ret = send_unlink(sctx, cur->full_path); process_recorded_refs()
3707 ret = dup_ref(cur, &check_dirs); process_recorded_refs()
3732 list_for_each_entry(cur, &check_dirs, list) { process_recorded_refs()
3738 if (cur->dir > sctx->cur_ino) process_recorded_refs()
3741 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3748 ret = send_utimes(sctx, cur->dir, cur->dir_gen); process_recorded_refs()
3752 cur->dir != last_dir_ino_rm) { process_recorded_refs()
3753 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, process_recorded_refs()
3758 ret = get_cur_path(sctx, cur->dir, process_recorded_refs()
3759 cur->dir_gen, valid_path); process_recorded_refs()
3765 last_dir_ino_rm = cur->dir; process_recorded_refs()
H A Draid56.c203 struct btrfs_stripe_hash *cur; btrfs_alloc_stripe_hash_table() local
233 cur = h + i; btrfs_alloc_stripe_hash_table()
234 INIT_LIST_HEAD(&cur->hash_list); btrfs_alloc_stripe_hash_table()
235 spin_lock_init(&cur->lock); btrfs_alloc_stripe_hash_table()
236 init_waitqueue_head(&cur->wait); btrfs_alloc_stripe_hash_table()
569 struct btrfs_raid_bio *cur) rbio_can_merge()
572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) rbio_can_merge()
583 test_bit(RBIO_CACHE_BIT, &cur->flags)) rbio_can_merge()
587 cur->bbio->raid_map[0]) rbio_can_merge()
591 if (last->operation != cur->operation) rbio_can_merge()
602 cur->operation == BTRFS_RBIO_PARITY_SCRUB) rbio_can_merge()
657 struct btrfs_raid_bio *cur; lock_stripe_add() local
667 list_for_each_entry(cur, &h->hash_list, hash_list) { lock_stripe_add()
669 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { lock_stripe_add()
670 spin_lock(&cur->bio_list_lock); lock_stripe_add()
673 if (bio_list_empty(&cur->bio_list) && lock_stripe_add()
674 list_empty(&cur->plug_list) && lock_stripe_add()
675 test_bit(RBIO_CACHE_BIT, &cur->flags) && lock_stripe_add()
676 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { lock_stripe_add()
677 list_del_init(&cur->hash_list); lock_stripe_add()
678 atomic_dec(&cur->refs); lock_stripe_add()
680 steal_rbio(cur, rbio); lock_stripe_add()
681 cache_drop = cur; lock_stripe_add()
682 spin_unlock(&cur->bio_list_lock); lock_stripe_add()
688 if (rbio_can_merge(cur, rbio)) { lock_stripe_add()
689 merge_rbio(cur, rbio); lock_stripe_add()
690 spin_unlock(&cur->bio_list_lock); lock_stripe_add()
705 list_for_each_entry(pending, &cur->plug_list, lock_stripe_add()
709 spin_unlock(&cur->bio_list_lock); lock_stripe_add()
720 list_add_tail(&rbio->plug_list, &cur->plug_list); lock_stripe_add()
721 spin_unlock(&cur->bio_list_lock); lock_stripe_add()
856 struct bio *cur = bio_list_get(&rbio->bio_list); rbio_orig_end_io() local
864 while (cur) { rbio_orig_end_io()
865 next = cur->bi_next; rbio_orig_end_io()
866 cur->bi_next = NULL; rbio_orig_end_io()
868 set_bit(BIO_UPTODATE, &cur->bi_flags); rbio_orig_end_io()
869 bio_endio(cur, err); rbio_orig_end_io()
870 cur = next; rbio_orig_end_io()
1674 struct btrfs_raid_bio *cur; run_plug() local
1684 cur = list_entry(plug->rbio_list.next, run_plug()
1686 list_del_init(&cur->plug_list); run_plug()
1688 if (rbio_is_full(cur)) { run_plug()
1690 full_stripe_write(cur); run_plug()
1694 if (rbio_can_merge(last, cur)) { run_plug()
1695 merge_rbio(last, cur); run_plug()
1696 __free_raid_bio(cur); run_plug()
1702 last = cur; run_plug()
568 rbio_can_merge(struct btrfs_raid_bio *last, struct btrfs_raid_bio *cur) rbio_can_merge() argument
H A Dprops.c178 u32 total_len, cur, this_len; iterate_object_props() local
205 cur = 0; iterate_object_props()
208 while (cur < total_len) { iterate_object_props()
253 cur += this_len; iterate_object_props()
H A Dulist.c139 struct ulist_node *cur = NULL; ulist_rbtree_insert() local
143 cur = rb_entry(parent, struct ulist_node, rb_node); ulist_rbtree_insert()
145 if (cur->val < ins->val) ulist_rbtree_insert()
147 else if (cur->val > ins->val) ulist_rbtree_insert()
H A Dextent_map.c443 struct extent_map *cur, replace_extent_mapping()
447 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); replace_extent_mapping()
448 ASSERT(extent_map_in_tree(cur)); replace_extent_mapping()
449 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) replace_extent_mapping()
450 list_del_init(&cur->list); replace_extent_mapping()
451 rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map); replace_extent_mapping()
452 RB_CLEAR_NODE(&cur->rb_node); replace_extent_mapping()
442 replace_extent_mapping(struct extent_map_tree *tree, struct extent_map *cur, struct extent_map *new, int modified) replace_extent_mapping() argument
H A Dctree.c452 struct tree_mod_elem *cur; __tree_mod_log_insert() local
461 cur = container_of(*new, struct tree_mod_elem, node); __tree_mod_log_insert()
463 if (cur->index < tm->index) __tree_mod_log_insert()
465 else if (cur->index > tm->index) __tree_mod_log_insert()
467 else if (cur->seq < tm->seq) __tree_mod_log_insert()
469 else if (cur->seq > tm->seq) __tree_mod_log_insert()
740 struct tree_mod_elem *cur = NULL; __tree_mod_log_search() local
748 cur = container_of(node, struct tree_mod_elem, node); __tree_mod_log_search()
749 if (cur->index < index) { __tree_mod_log_search()
751 } else if (cur->index > index) { __tree_mod_log_search()
753 } else if (cur->seq < min_seq) { __tree_mod_log_search()
758 BUG_ON(found->seq > cur->seq); __tree_mod_log_search()
759 found = cur; __tree_mod_log_search()
761 } else if (cur->seq > min_seq) { __tree_mod_log_search()
764 BUG_ON(found->seq < cur->seq); __tree_mod_log_search()
765 found = cur; __tree_mod_log_search()
768 found = cur; __tree_mod_log_search()
1624 struct extent_buffer *cur; btrfs_realloc_node() local
1680 cur = btrfs_find_tree_block(root->fs_info, blocknr); btrfs_realloc_node()
1681 if (cur) btrfs_realloc_node()
1682 uptodate = btrfs_buffer_uptodate(cur, gen, 0); btrfs_realloc_node()
1685 if (!cur || !uptodate) { btrfs_realloc_node()
1686 if (!cur) { btrfs_realloc_node()
1687 cur = read_tree_block(root, blocknr, gen); btrfs_realloc_node()
1688 if (!cur || !extent_buffer_uptodate(cur)) { btrfs_realloc_node()
1689 free_extent_buffer(cur); btrfs_realloc_node()
1693 err = btrfs_read_buffer(cur, gen); btrfs_realloc_node()
1695 free_extent_buffer(cur); btrfs_realloc_node()
1703 btrfs_tree_lock(cur); btrfs_realloc_node()
1704 btrfs_set_lock_blocking(cur); btrfs_realloc_node()
1705 err = __btrfs_cow_block(trans, root, cur, parent, i, btrfs_realloc_node()
1706 &cur, search_start, btrfs_realloc_node()
1710 btrfs_tree_unlock(cur); btrfs_realloc_node()
1711 free_extent_buffer(cur); btrfs_realloc_node()
1714 search_start = cur->start; btrfs_realloc_node()
1715 last_block = cur->start; btrfs_realloc_node()
1717 btrfs_tree_unlock(cur); btrfs_realloc_node()
1718 free_extent_buffer(cur); btrfs_realloc_node()
5119 struct extent_buffer *cur; btrfs_search_forward() local
5130 cur = btrfs_read_lock_root_node(root); btrfs_search_forward()
5131 level = btrfs_header_level(cur); btrfs_search_forward()
5133 path->nodes[level] = cur; btrfs_search_forward()
5136 if (btrfs_header_generation(cur) < min_trans) { btrfs_search_forward()
5141 nritems = btrfs_header_nritems(cur); btrfs_search_forward()
5142 level = btrfs_header_level(cur); btrfs_search_forward()
5143 sret = bin_search(cur, min_key, level, &slot); btrfs_search_forward()
5151 btrfs_item_key_to_cpu(cur, &found_key, slot); btrfs_search_forward()
5163 gen = btrfs_node_ptr_generation(cur, slot); btrfs_search_forward()
5188 btrfs_node_key_to_cpu(cur, &found_key, slot); btrfs_search_forward()
5195 cur = read_node_slot(root, cur, slot); btrfs_search_forward()
5196 BUG_ON(!cur); /* -ENOMEM */ btrfs_search_forward()
5198 btrfs_tree_read_lock(cur); btrfs_search_forward()
5201 path->nodes[level - 1] = cur; btrfs_search_forward()
H A Dextent_map.h73 struct extent_map *cur,
H A Dfree-space-cache.c347 if (io_ctl->cur) { io_ctl_unmap_page()
348 io_ctl->cur = NULL; io_ctl_unmap_page()
357 io_ctl->cur = page_address(io_ctl->page); io_ctl_map_page()
358 io_ctl->orig = io_ctl->cur; io_ctl_map_page()
361 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); io_ctl_map_page()
424 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); io_ctl_set_generation()
427 io_ctl->cur += sizeof(u64); io_ctl_set_generation()
431 val = io_ctl->cur; io_ctl_set_generation()
433 io_ctl->cur += sizeof(u64); io_ctl_set_generation()
445 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; io_ctl_check_generation()
449 io_ctl->cur += sizeof(u64); io_ctl_check_generation()
453 gen = io_ctl->cur; io_ctl_check_generation()
461 io_ctl->cur += sizeof(u64); io_ctl_check_generation()
525 if (!io_ctl->cur) io_ctl_add_entry()
528 entry = io_ctl->cur; io_ctl_add_entry()
533 io_ctl->cur += sizeof(struct btrfs_free_space_entry); io_ctl_add_entry()
552 if (!io_ctl->cur) io_ctl_add_bitmap()
559 if (io_ctl->cur != io_ctl->orig) { io_ctl_add_bitmap()
566 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); io_ctl_add_bitmap()
579 if (io_ctl->cur != io_ctl->orig) io_ctl_zero_remaining_pages()
596 if (!io_ctl->cur) { io_ctl_read_entry()
602 e = io_ctl->cur; io_ctl_read_entry()
606 io_ctl->cur += sizeof(struct btrfs_free_space_entry); io_ctl_read_entry()
626 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); io_ctl_read_bitmap()
/linux-4.1.27/drivers/char/hw_random/
H A Dtimeriomem-rng.c69 unsigned long cur; timeriomem_rng_data_read() local
74 cur = jiffies; timeriomem_rng_data_read()
76 delay = cur - priv->expires; timeriomem_rng_data_read()
79 priv->expires = cur + delay; timeriomem_rng_data_read()
/linux-4.1.27/drivers/gpu/drm/via/
H A Dvia_dmablit.c319 int cur; via_dmablit_handler() local
336 cur = blitq->cur; via_dmablit_handler()
339 blitq->blits[cur]->aborted = blitq->aborting; via_dmablit_handler()
341 wake_up(blitq->blit_queue + cur); via_dmablit_handler()
343 cur++; via_dmablit_handler()
344 if (cur >= VIA_NUM_BLIT_SLOTS) via_dmablit_handler()
345 cur = 0; via_dmablit_handler()
346 blitq->cur = cur; via_dmablit_handler()
371 via_fire_dmablit(dev, blitq->blits[cur], engine); via_dmablit_handler()
373 blitq->cur = cur; via_dmablit_handler()
414 slot = handle - blitq->done_blit_handle + blitq->cur - 1; via_dmablit_active()
510 while (blitq->serviced != blitq->cur) { via_dmablit_workqueue()
556 blitq->cur = 0; via_init_dmablit()
/linux-4.1.27/arch/sh/kernel/
H A Dkprobes.c373 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
378 if (!cur) post_kprobe_handler()
381 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobe_handler()
383 cur->post_handler(cur, regs, 0); post_kprobe_handler()
422 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
436 regs->pc = (unsigned long)cur->addr; kprobe_fault_handler()
450 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
459 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
/linux-4.1.27/arch/tile/kernel/
H A Dkprobes.c291 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
294 if (!cur) post_kprobe_handler()
297 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobe_handler()
299 cur->post_handler(cur, regs, 0); post_kprobe_handler()
302 resume_execution(cur, regs, kcb); post_kprobe_handler()
318 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
321 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
332 resume_execution(cur, regs, kcb); kprobe_fault_handler()
/linux-4.1.27/arch/arm/plat-samsung/
H A Dadc.c70 struct s3c_adc_client *cur; member in struct:adc_device
142 adc->cur = next; s3c_adc_try()
175 if (!adc->cur) s3c_adc_start()
263 if (adc_dev->cur == client) s3c_adc_release()
264 adc_dev->cur = NULL; s3c_adc_release()
278 if (adc_dev->cur == NULL) s3c_adc_release()
289 struct s3c_adc_client *client = adc->cur; s3c_adc_irq()
324 adc->cur = NULL; s3c_adc_irq()
/linux-4.1.27/drivers/md/
H A Ddm-cache-policy-cleaner.c144 struct wb_cache_entry *cur; lookup_cache_entry() local
147 hlist_for_each_entry(cur, bucket, hlist) { hlist_for_each_entry()
148 if (cur->oblock == oblock) { hlist_for_each_entry()
150 hlist_del(&cur->hlist); hlist_for_each_entry()
151 hlist_add_head(&cur->hlist, bucket); hlist_for_each_entry()
152 return cur; hlist_for_each_entry()
/linux-4.1.27/drivers/parisc/
H A Dled.c183 char *cur, lbuf[32]; led_proc_write() local
196 cur = lbuf; led_proc_write()
201 d = *cur++ - '0'; led_proc_write()
205 if (*cur++ != ' ') goto parse_error; led_proc_write()
207 d = *cur++ - '0'; led_proc_write()
211 if (*cur++ != ' ') goto parse_error; led_proc_write()
213 d = *cur++ - '0'; led_proc_write()
219 if (*cur && cur[strlen(cur)-1] == '\n') led_proc_write()
220 cur[strlen(cur)-1] = 0; led_proc_write()
221 if (*cur == 0) led_proc_write()
222 cur = lcd_text_default; led_proc_write()
223 lcd_print(cur); led_proc_write()
H A Deisa.c427 char *cur = str; eisa_irq_setup() local
431 while (cur != NULL) { eisa_irq_setup()
434 val = (int) simple_strtoul(cur, &pe, 0); eisa_irq_setup()
445 if ((cur = strchr(cur, ','))) { eisa_irq_setup()
446 cur++; eisa_irq_setup()
/linux-4.1.27/kernel/power/
H A Dsnapshot.c311 struct bm_position cur; /* most recently used bit position */ member in struct:memory_bitmap
478 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, memory_bm_position_reset()
480 bm->cur.node = list_entry(bm->cur.zone->leaves.next, memory_bm_position_reset()
482 bm->cur.node_pfn = 0; memory_bm_position_reset()
483 bm->cur.node_bit = 0; memory_bm_position_reset()
522 struct mem_extent *ext, *cur, *aux; for_each_populated_zone() local
553 cur = ext; list_for_each_entry_safe_continue()
554 list_for_each_entry_safe_continue(cur, aux, list, hook) { list_for_each_entry_safe_continue()
555 if (zone_end < cur->start) list_for_each_entry_safe_continue()
557 if (zone_end < cur->end) list_for_each_entry_safe_continue()
558 ext->end = cur->end; list_for_each_entry_safe_continue()
559 list_del(&cur->hook); list_for_each_entry_safe_continue()
560 kfree(cur); list_for_each_entry_safe_continue()
629 * The cur.zone, cur.block and cur.node_pfn member of @bm are
641 zone = bm->cur.zone; memory_bm_find_bit()
665 node = bm->cur.node; memory_bm_find_bit()
666 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) memory_bm_find_bit()
683 bm->cur.zone = zone; memory_bm_find_bit()
684 bm->cur.node = node; memory_bm_find_bit()
685 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; memory_bm_find_bit()
733 bit = max(bm->cur.node_bit - 1, 0); memory_bm_clear_current()
734 clear_bit(bit, bm->cur.node->data); memory_bm_clear_current()
768 bm->cur.node = list_entry(bm->cur.node->list.next, rtree_next_node()
770 if (&bm->cur.node->list != &bm->cur.zone->leaves) { rtree_next_node()
771 bm->cur.node_pfn += BM_BITS_PER_BLOCK; rtree_next_node()
772 bm->cur.node_bit = 0; rtree_next_node()
778 bm->cur.zone = list_entry(bm->cur.zone->list.next, rtree_next_node()
780 if (&bm->cur.zone->list != &bm->zones) { rtree_next_node()
781 bm->cur.node = list_entry(bm->cur.zone->leaves.next, rtree_next_node()
783 bm->cur.node_pfn = 0; rtree_next_node()
784 bm->cur.node_bit = 0; rtree_next_node()
808 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; memory_bm_next_pfn()
809 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); memory_bm_next_pfn()
810 bit = find_next_bit(bm->cur.node->data, bits, memory_bm_next_pfn()
811 bm->cur.node_bit); memory_bm_next_pfn()
813 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; memory_bm_next_pfn()
814 bm->cur.node_bit = bit + 1; memory_bm_next_pfn()
1958 if (handle->cur > nr_meta_pages + nr_copy_pages) snapshot_read_next()
1967 if (!handle->cur) { snapshot_read_next()
1976 } else if (handle->cur <= nr_meta_pages) { snapshot_read_next()
1998 handle->cur++; snapshot_read_next()
2475 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) snapshot_write_next()
2480 if (!handle->cur) { snapshot_write_next()
2489 } else if (handle->cur == 1) { snapshot_write_next()
2503 } else if (handle->cur <= nr_meta_pages + 1) { snapshot_write_next()
2508 if (handle->cur == nr_meta_pages + 1) { snapshot_write_next()
2531 handle->cur++; snapshot_write_next()
2550 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { snapshot_write_finalize()
2559 handle->cur <= nr_meta_pages + nr_copy_pages); snapshot_image_loaded()
H A Dswap.c88 struct swap_map_page *cur; member in struct:swap_map_handle
313 if (handle->cur) release_swap_writer()
314 free_page((unsigned long)handle->cur); release_swap_writer()
315 handle->cur = NULL; release_swap_writer()
329 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); get_swap_writer()
330 if (!handle->cur) { get_swap_writer()
356 if (!handle->cur) swap_write_page()
362 handle->cur->entries[handle->k++] = offset; swap_write_page()
367 handle->cur->next_swap = offset; swap_write_page()
368 error = write_page(handle->cur, handle->cur_swap, bio_chain); swap_write_page()
371 clear_page(handle->cur); swap_write_page()
392 if (handle->cur && handle->cur_swap) flush_swap_writer()
393 return write_page(handle->cur, handle->cur_swap, NULL); flush_swap_writer()
871 handle->cur = NULL; release_swap_reader()
886 handle->cur = NULL; get_swap_reader()
917 handle->cur = handle->maps->map; get_swap_reader()
928 if (!handle->cur) swap_read_page()
930 offset = handle->cur->entries[handle->k]; swap_read_page()
945 handle->cur = handle->maps->map; swap_read_page()
1208 if (handle->cur && load_image_lzo()
1209 handle->cur->entries[handle->k]) { load_image_lzo()
/linux-4.1.27/fs/reiserfs/
H A Dibalance.c122 * Insert count node pointers into buffer cur before position to + 1.
123 * Insert count items into buffer cur before position to.
131 struct buffer_head *cur = cur_bi->bi_bh; internal_insert_childs() local
142 blkh = B_BLK_HEAD(cur); internal_insert_childs()
146 RFALSE(B_FREE_SPACE(cur) < count * (KEY_SIZE + DC_SIZE), internal_insert_childs()
148 B_FREE_SPACE(cur), count * (KEY_SIZE + DC_SIZE)); internal_insert_childs()
151 dc = B_N_CHILD(cur, to + 1); internal_insert_childs()
164 ih = internal_key(cur, ((to == -1) ? 0 : to)); internal_insert_childs()
180 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); internal_insert_childs()
183 check_internal(cur); internal_insert_childs()
202 * Delete del_num items and node pointers from buffer cur starting from
209 struct buffer_head *cur = cur_bi->bi_bh; internal_delete_pointers_items() local
215 RFALSE(cur == NULL, "buffer is 0"); internal_delete_pointers_items()
218 RFALSE(first_p < 0 || first_p + del_num > B_NR_ITEMS(cur) + 1 internal_delete_pointers_items()
223 B_NR_ITEMS(cur) + 1, first_i); internal_delete_pointers_items()
227 blkh = B_BLK_HEAD(cur); internal_delete_pointers_items()
237 RFALSE(first_i + del_num > B_NR_ITEMS(cur), internal_delete_pointers_items()
240 first_i, del_num, first_i + del_num, cur, cur); internal_delete_pointers_items()
243 dc = B_N_CHILD(cur, first_p); internal_delete_pointers_items()
246 key = internal_key(cur, first_i); internal_delete_pointers_items()
257 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); internal_delete_pointers_items()
259 check_internal(cur); internal_delete_pointers_items()
H A Djournal.c1850 struct reiserfs_journal_cnode *cur; remove_journal_hash() local
1857 cur = *head; remove_journal_hash()
1858 while (cur) { remove_journal_hash()
1859 if (cur->blocknr == block && cur->sb == sb remove_journal_hash()
1860 && (jl == NULL || jl == cur->jlist) remove_journal_hash()
1861 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { remove_journal_hash()
1862 if (cur->hnext) { remove_journal_hash()
1863 cur->hnext->hprev = cur->hprev; remove_journal_hash()
1865 if (cur->hprev) { remove_journal_hash()
1866 cur->hprev->hnext = cur->hnext; remove_journal_hash()
1868 *head = cur->hnext; remove_journal_hash()
1870 cur->blocknr = 0; remove_journal_hash()
1871 cur->sb = NULL; remove_journal_hash()
1872 cur->state = 0; remove_journal_hash()
1874 * anybody who clears the cur->bh will also remove_journal_hash()
1877 if (cur->bh && cur->jlist) remove_journal_hash()
1878 atomic_dec(&cur->jlist->j_nonzerolen); remove_journal_hash()
1879 cur->bh = NULL; remove_journal_hash()
1880 cur->jlist = NULL; remove_journal_hash()
1882 cur = cur->hnext; remove_journal_hash()
3484 struct reiserfs_journal_cnode *cur = cn->hprev; can_dirty() local
3492 while (cur && can_dirty) { can_dirty()
3493 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && can_dirty()
3494 cur->blocknr == blocknr) { can_dirty()
3497 cur = cur->hprev; can_dirty()
3503 cur = cn->hnext; can_dirty()
3504 while (cur && can_dirty) { can_dirty()
3505 if (cur->jlist && cur->jlist->j_len > 0 && can_dirty()
3506 atomic_read(&cur->jlist->j_commit_left) > 0 && cur->bh && can_dirty()
3507 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty()
3510 cur = cur->hnext; can_dirty()
/linux-4.1.27/drivers/dma-buf/
H A Dfence.c68 struct fence_cb *cur, *tmp; fence_signal_locked() local
89 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { fence_signal_locked()
90 list_del_init(&cur->node); fence_signal_locked()
91 cur->func(fence, cur); fence_signal_locked()
125 struct fence_cb *cur, *tmp; fence_signal() local
128 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { fence_signal()
129 list_del_init(&cur->node); fence_signal()
130 cur->func(fence, cur); fence_signal()
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_cache.c523 * if @cur and @victim are contiguous at chunk level.
525 static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, osc_extent_merge() argument
528 struct osc_object *obj = cur->oe_obj; osc_extent_merge()
533 LASSERT(cur->oe_state == OES_CACHE); osc_extent_merge()
541 if (cur->oe_max_end != victim->oe_max_end) osc_extent_merge()
544 LASSERT(cur->oe_osclock == victim->oe_osclock); osc_extent_merge()
546 chunk_start = cur->oe_start >> ppc_bits; osc_extent_merge()
547 chunk_end = cur->oe_end >> ppc_bits; osc_extent_merge()
552 OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur); osc_extent_merge()
554 cur->oe_start = min(cur->oe_start, victim->oe_start); osc_extent_merge()
555 cur->oe_end = max(cur->oe_end, victim->oe_end); osc_extent_merge()
556 cur->oe_grants += victim->oe_grants; osc_extent_merge()
557 cur->oe_nr_pages += victim->oe_nr_pages; osc_extent_merge()
559 cur->oe_urgent |= victim->oe_urgent; osc_extent_merge()
560 cur->oe_memalloc |= victim->oe_memalloc; osc_extent_merge()
561 list_splice_init(&victim->oe_pages, &cur->oe_pages); osc_extent_merge()
569 OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim); osc_extent_merge()
628 struct osc_extent *cur; osc_extent_find() local
640 cur = osc_extent_alloc(obj); osc_extent_find()
641 if (cur == NULL) osc_extent_find()
661 cur->oe_max_end = max_end; osc_extent_find()
662 cur->oe_start = index & chunk_mask; osc_extent_find()
663 cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1; osc_extent_find()
664 if (cur->oe_start < lock->cll_descr.cld_start) osc_extent_find()
665 cur->oe_start = lock->cll_descr.cld_start; osc_extent_find()
666 if (cur->oe_end > max_end) osc_extent_find()
667 cur->oe_end = max_end; osc_extent_find()
668 cur->oe_osclock = lock; osc_extent_find()
669 cur->oe_grants = 0; osc_extent_find()
670 cur->oe_mppr = max_pages; osc_extent_find()
675 LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur)); osc_extent_find()
679 ext = osc_extent_search(obj, cur->oe_start); osc_extent_find()
692 EASSERTF(!overlapped(ext, cur), ext, osc_extent_find()
693 EXTSTR, EXTPARA(cur)); osc_extent_find()
705 /* ok, from now on, ext and cur have these attrs: osc_extent_find()
709 if (overlapped(ext, cur)) { osc_extent_find()
710 /* cur is the minimum unit, so overlapping means osc_extent_find()
712 EASSERTF((ext->oe_start <= cur->oe_start && osc_extent_find()
713 ext->oe_end >= cur->oe_end), osc_extent_find()
714 ext, EXTSTR, EXTPARA(cur)); osc_extent_find()
757 /* pull ext's start back to cover cur */ osc_extent_find()
758 ext->oe_start = cur->oe_start; osc_extent_find()
765 ext->oe_end = cur->oe_end; osc_extent_find()
787 LASSERT(found->oe_osclock == cur->oe_osclock); osc_extent_find()
793 EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); osc_extent_find() local
794 cur->oe_grants = chunksize + cli->cl_extent_tax; osc_extent_find()
795 *grants -= cur->oe_grants; osc_extent_find()
798 cur->oe_state = OES_CACHE; osc_extent_find()
799 found = osc_extent_hold(cur); osc_extent_find()
800 osc_extent_insert(obj, cur); osc_extent_find()
801 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n", osc_extent_find()
823 osc_extent_put(env, cur); osc_extent_find()
/linux-4.1.27/drivers/hwmon/
H A Dlm77.c255 int i, cur, conf, hyst, crit, min, max; lm77_detect() local
276 cur = i2c_smbus_read_word_data(client, 0); lm77_detect()
292 if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0) lm77_detect()
304 cur = i2c_smbus_read_word_data(client, 0); lm77_detect()
305 if (i2c_smbus_read_word_data(client, 6) != cur lm77_detect()
306 || i2c_smbus_read_word_data(client, 7) != cur) lm77_detect()
/linux-4.1.27/drivers/pwm/
H A Dpwm-sti.c59 struct pwm_device *cur; member in struct:sti_pwm_chip
120 struct pwm_device *cur = pc->cur; sti_pwm_config() local
129 period_same = (period_ns == pwm_get_period(cur)); sti_pwm_config()
143 ((ncfg == 1) && (pwm->hwpwm == cur->hwpwm)) || sti_pwm_config()
144 ((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) || sti_pwm_config()
184 pc->cur = pwm; sti_pwm_config()
/linux-4.1.27/arch/mips/kvm/
H A Dtrap_emul.c543 unsigned int cur, change; kvm_trap_emul_set_one_reg() local
576 cur = kvm_read_c0_guest_config1(cop0); kvm_trap_emul_set_one_reg()
577 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); kvm_trap_emul_set_one_reg()
579 v = cur ^ change; kvm_trap_emul_set_one_reg()
587 cur = kvm_read_c0_guest_config3(cop0); kvm_trap_emul_set_one_reg()
588 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); kvm_trap_emul_set_one_reg()
590 v = cur ^ change; kvm_trap_emul_set_one_reg()
595 cur = kvm_read_c0_guest_config4(cop0); kvm_trap_emul_set_one_reg()
596 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); kvm_trap_emul_set_one_reg()
598 v = cur ^ change; kvm_trap_emul_set_one_reg()
603 cur = kvm_read_c0_guest_config5(cop0); kvm_trap_emul_set_one_reg()
604 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); kvm_trap_emul_set_one_reg()
606 v = cur ^ change; kvm_trap_emul_set_one_reg()
/linux-4.1.27/drivers/watchdog/
H A Dbcm_kona_wdt.c119 int ctl, cur, ctl_sec, cur_sec, res; bcm_kona_wdt_dbg_show() local
123 cur = cur_val & SECWDOG_COUNT_MASK; bcm_kona_wdt_dbg_show()
125 cur_sec = TICKS_TO_SECS(cur, wdt); bcm_kona_wdt_dbg_show()
133 cur_sec, cur, cur, bcm_kona_wdt_dbg_show()
H A Dbcm2835_wdt.c48 uint32_t cur; bcm2835_wdt_start() local
55 cur = readl_relaxed(wdt->base + PM_RSTC); bcm2835_wdt_start()
56 writel_relaxed(PM_PASSWORD | (cur & PM_RSTC_WRCFG_CLR) | bcm2835_wdt_start()
/linux-4.1.27/arch/s390/mm/
H A Ddump_pagetables.c62 unsigned int prot, cur; note_page() local
71 cur = st->current_prot; note_page()
79 } else if (prot != cur || level != st->level || note_page()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Drv6xx_dpm.c195 struct rv6xx_sclk_stepping *cur, rv6xx_next_vco_step()
200 next.post_divider = cur->post_divider; rv6xx_next_vco_step()
203 next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100; rv6xx_next_vco_step()
205 next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size); rv6xx_next_vco_step()
211 struct rv6xx_sclk_stepping *cur, rv6xx_can_step_post_div()
214 return (cur->post_divider > target->post_divider) && rv6xx_can_step_post_div()
215 ((cur->vco_frequency * target->post_divider) <= rv6xx_can_step_post_div()
216 (target->vco_frequency * (cur->post_divider - 1))); rv6xx_can_step_post_div()
220 struct rv6xx_sclk_stepping *cur, rv6xx_next_post_div_step()
223 struct rv6xx_sclk_stepping next = *cur; rv6xx_next_post_div_step()
232 struct rv6xx_sclk_stepping *cur, rv6xx_reached_stepping_target()
236 return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) || rv6xx_reached_stepping_target()
237 (!increasing_vco && (cur->vco_frequency <= target->vco_frequency)); rv6xx_reached_stepping_target()
244 struct rv6xx_sclk_stepping cur; rv6xx_generate_steps() local
249 rv6xx_convert_clock_to_stepping(rdev, low, &cur); rv6xx_generate_steps()
252 rv6xx_output_stepping(rdev, step_index++, &cur); rv6xx_generate_steps()
254 increasing_vco = (target.vco_frequency >= cur.vco_frequency); rv6xx_generate_steps()
256 if (target.post_divider > cur.post_divider) rv6xx_generate_steps()
257 cur.post_divider = target.post_divider; rv6xx_generate_steps()
262 if (rv6xx_can_step_post_div(rdev, &cur, &target)) rv6xx_generate_steps()
263 next = rv6xx_next_post_div_step(rdev, &cur, &target); rv6xx_generate_steps()
265 next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT); rv6xx_generate_steps()
272 if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco)) rv6xx_generate_steps()
290 cur = next; rv6xx_generate_steps()
194 rv6xx_next_vco_step(struct radeon_device *rdev, struct rv6xx_sclk_stepping *cur, bool increasing_vco, u32 step_size) rv6xx_next_vco_step() argument
210 rv6xx_can_step_post_div(struct radeon_device *rdev, struct rv6xx_sclk_stepping *cur, struct rv6xx_sclk_stepping *target) rv6xx_can_step_post_div() argument
219 rv6xx_next_post_div_step(struct radeon_device *rdev, struct rv6xx_sclk_stepping *cur, struct rv6xx_sclk_stepping *target) rv6xx_next_post_div_step() argument
231 rv6xx_reached_stepping_target(struct radeon_device *rdev, struct rv6xx_sclk_stepping *cur, struct rv6xx_sclk_stepping *target, bool increasing_vco) rv6xx_reached_stepping_target() argument
/linux-4.1.27/sound/synth/emux/
H A Dsoundfont.c64 static void add_preset(struct snd_sf_list *sflist, struct snd_sf_zone *cur);
1149 struct snd_sf_zone *cur; rebuild_presets() local
1156 for (cur = sf->zones; cur; cur = cur->next) { rebuild_presets()
1157 if (! cur->mapped && cur->sample == NULL) { rebuild_presets()
1159 cur->sample = set_sample(sf, &cur->v); rebuild_presets()
1160 if (cur->sample == NULL) rebuild_presets()
1164 add_preset(sflist, cur); rebuild_presets()
1174 add_preset(struct snd_sf_list *sflist, struct snd_sf_zone *cur) add_preset() argument
1179 zone = search_first_zone(sflist, cur->bank, cur->instr, cur->v.low); add_preset()
1180 if (zone && zone->v.sf_id != cur->v.sf_id) { add_preset()
1185 if (p->counter > cur->counter) add_preset()
1195 if ((index = get_index(cur->bank, cur->instr, cur->v.low)) < 0) add_preset()
1197 cur->next_zone = zone; /* zone link */ add_preset()
1198 cur->next_instr = sflist->presets[index]; /* preset table link */ add_preset()
1199 sflist->presets[index] = cur; add_preset()
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dobd.h72 unsigned long cur = jiffies; __client_obd_list_lock() local
83 if (time_before(cur + 5 * HZ, jiffies) && __client_obd_list_lock()
/linux-4.1.27/drivers/clk/
H A Dclk-conf.c81 const __be32 *cur; __set_clk_rates() local
86 of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) { __set_clk_rates()
/linux-4.1.27/net/ipv6/
H A Dndisc.c173 static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, ndisc_next_option() argument
177 if (!cur || !end || cur >= end) ndisc_next_option()
179 type = cur->nd_opt_type; ndisc_next_option()
181 cur = ((void *)cur) + (cur->nd_opt_len << 3); ndisc_next_option()
182 } while (cur < end && cur->nd_opt_type != type); ndisc_next_option()
183 return cur <= end && cur->nd_opt_type == type ? cur : NULL; ndisc_next_option()
192 static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, ndisc_next_useropt() argument
195 if (!cur || !end || cur >= end) ndisc_next_useropt()
198 cur = ((void *)cur) + (cur->nd_opt_len << 3); ndisc_next_useropt()
199 } while (cur < end && !ndisc_is_useropt(cur)); ndisc_next_useropt()
200 return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; ndisc_next_useropt()
/linux-4.1.27/drivers/md/bcache/
H A Djournal.c530 j->cur = (j->cur == j->w) bch_journal_next()
541 j->cur->data->seq = ++j->seq; bch_journal_next()
542 j->cur->dirty = false; bch_journal_next()
543 j->cur->need_write = false; bch_journal_next()
544 j->cur->data->keys = 0; bch_journal_next()
563 struct journal_write *w = (j->cur == j->w) journal_write_done()
584 struct journal_write *w = c->journal.cur;
666 struct journal_write *w = c->journal.cur;
690 struct journal_write *w = c->journal.cur; journal_wait_for_write()
738 if (c->journal.cur->dirty) journal_write_work()
/linux-4.1.27/drivers/media/i2c/
H A Das3645a.c337 ctrl->cur.val = 0; as3645a_get_ctrl()
339 ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; as3645a_get_ctrl()
341 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; as3645a_get_ctrl()
343 ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT; as3645a_get_ctrl()
345 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; as3645a_get_ctrl()
347 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_CURRENT; as3645a_get_ctrl()
349 ctrl->cur.val |= V4L2_FLASH_FAULT_INDICATOR; as3645a_get_ctrl()
354 ctrl->cur.val = 0; as3645a_get_ctrl()
362 ctrl->cur.val = value; as3645a_get_ctrl()
366 dev_dbg(&client->dev, "G_CTRL %08x:%d\n", ctrl->id, ctrl->cur.val); as3645a_get_ctrl()
461 if ((ctrl->val == 0) == (ctrl->cur.val == 0)) as3645a_set_ctrl()
H A Dadp1653.c161 ctrl->cur.val = 0; adp1653_get_ctrl()
164 ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; adp1653_get_ctrl()
166 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; adp1653_get_ctrl()
168 ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT; adp1653_get_ctrl()
170 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; adp1653_get_ctrl()
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/
H A Ddxe.c239 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl; wcn36xx_dxe_init_tx_bd() local
245 cur->bd_phy_addr = bd_phy_addr; wcn36xx_dxe_init_tx_bd()
246 cur->bd_cpu_addr = bd_cpu_addr; wcn36xx_dxe_init_tx_bd()
250 cur->bd_phy_addr = 0; wcn36xx_dxe_init_tx_bd()
251 cur->bd_cpu_addr = NULL; wcn36xx_dxe_init_tx_bd()
253 cur = cur->next; wcn36xx_dxe_init_tx_bd()
310 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl; wcn36xx_dxe_ch_free_skbs() local
314 kfree_skb(cur->skb); wcn36xx_dxe_ch_free_skbs()
315 cur = cur->next; wcn36xx_dxe_ch_free_skbs()
/linux-4.1.27/arch/mips/kernel/
H A Dkprobes.c440 struct kprobe *cur = kprobe_running(); post_kprobe_handler() local
443 if (!cur) post_kprobe_handler()
446 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { post_kprobe_handler()
448 cur->post_handler(cur, regs, 0); post_kprobe_handler()
451 resume_execution(cur, regs, kcb); post_kprobe_handler()
469 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
472 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
476 resume_execution(cur, regs, kcb); kprobe_fault_handler()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
H A Dmcg.c903 struct mlx4_spec_list *cur; mlx4_err_rule() local
912 list_for_each_entry(cur, &rule->list, list) { mlx4_err_rule()
913 switch (cur->id) { mlx4_err_rule()
916 "dmac = %pM ", &cur->eth.dst_mac); mlx4_err_rule()
917 if (cur->eth.ether_type) mlx4_err_rule()
920 be16_to_cpu(cur->eth.ether_type)); mlx4_err_rule()
921 if (cur->eth.vlan_id) mlx4_err_rule()
924 be16_to_cpu(cur->eth.vlan_id)); mlx4_err_rule()
928 if (cur->ipv4.src_ip) mlx4_err_rule()
931 &cur->ipv4.src_ip); mlx4_err_rule()
932 if (cur->ipv4.dst_ip) mlx4_err_rule()
935 &cur->ipv4.dst_ip); mlx4_err_rule()
940 if (cur->tcp_udp.src_port) mlx4_err_rule()
943 be16_to_cpu(cur->tcp_udp.src_port)); mlx4_err_rule()
944 if (cur->tcp_udp.dst_port) mlx4_err_rule()
947 be16_to_cpu(cur->tcp_udp.dst_port)); mlx4_err_rule()
952 "dst-gid = %pI6\n", cur->ib.dst_gid); mlx4_err_rule()
955 cur->ib.dst_gid_msk); mlx4_err_rule()
960 "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); mlx4_err_rule()
980 struct mlx4_spec_list *cur; mlx4_flow_attach() local
992 list_for_each_entry(cur, &rule->list, list) { mlx4_flow_attach()
993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); mlx4_flow_attach()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dmem.c158 u64 cur = 0; __mlx5_ib_populate_pas() local
185 cur = base + (k << umem_page_shift); __mlx5_ib_populate_pas()
186 cur |= access_flags; __mlx5_ib_populate_pas()
188 pas[i >> shift] = cpu_to_be64(cur); __mlx5_ib_populate_pas()
H A Dmr.c122 ent->cur++; reg_mr_callback()
204 ent->cur--; remove_keys()
305 if (ent->cur < ent->limit) { limit_write()
306 err = add_keys(dev, c, 2 * ent->limit - ent->cur); limit_write()
348 if (cache->ent[i].cur < cache->ent[i].limit) someone_adding()
366 if (ent->cur < 2 * ent->limit && !dev->fill_delay) { __cache_work_func()
368 if (ent->cur < 2 * ent->limit) { __cache_work_func()
383 } else if (ent->cur > 2 * ent->limit) { __cache_work_func()
387 if (ent->cur > ent->limit) __cache_work_func()
435 ent->cur--; alloc_cached_mr()
437 if (ent->cur < ent->limit) alloc_cached_mr()
470 ent->cur++; free_cached_mr()
471 if (ent->cur > 2 * ent->limit) free_cached_mr()
495 ent->cur--; clean_keys()
536 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, mlx5_mr_cache_debugfs_init()
537 &ent->cur); mlx5_mr_cache_debugfs_init()
/linux-4.1.27/drivers/crypto/qat/qat_common/
H A Dqat_crypto.c127 unsigned long cur; local
130 cur = atomic_read(&inst->refctr);
131 if (best > cur) {
133 best = cur;
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/therm/
H A Dfan.c131 u32 cycles, cur, prev; nvkm_therm_fan_sense() local
147 cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line); nvkm_therm_fan_sense()
148 if (prev != cur) { nvkm_therm_fan_sense()
152 prev = cur; nvkm_therm_fan_sense()
H A Dg84.c102 int temp, cur; g84_therm_threshold_hyst_emulation() local
117 cur = therm->temp_get(therm); g84_therm_threshold_hyst_emulation()
118 if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp) g84_therm_threshold_hyst_emulation()
121 cur < thrs->temp - thrs->hysteresis) g84_therm_threshold_hyst_emulation()
/linux-4.1.27/net/rfkill/
H A Dcore.c120 bool cur, sav; member in struct:__anon14215
344 rfkill_global_states[type].cur = blocked; __rfkill_switch_all()
400 rfkill_global_states[i].sav = rfkill_global_states[i].cur; rfkill_epo()
401 rfkill_global_states[i].cur = true; rfkill_epo()
468 return rfkill_global_states[type].cur; rfkill_get_global_sw_state()
809 bool cur; rfkill_resume() local
812 cur = !!(rfkill->state & RFKILL_BLOCK_SW); rfkill_resume()
813 rfkill_set_block(rfkill, cur); rfkill_resume()
917 bool cur; rfkill_sync_work() local
922 cur = rfkill_global_states[rfkill->type].cur; rfkill_sync_work()
923 rfkill_set_block(rfkill, cur); rfkill_sync_work()
1157 rfkill_global_states[i].cur = ev.soft; rfkill_fop_write()
1159 rfkill_global_states[ev.type].cur = ev.soft; rfkill_fop_write()
1253 rfkill_global_states[i].cur = !rfkill_default_state; rfkill_init()
/linux-4.1.27/arch/mips/boot/
H A Delf2ecoff.c69 int remaining, cur, count; copy() local
79 cur = remaining; copy()
80 if (cur > sizeof ibuf) copy()
81 cur = sizeof ibuf; copy()
82 remaining -= cur; copy()
83 if ((count = read(in, ibuf, cur)) != cur) { copy()
89 if ((count = write(out, ibuf, cur)) != cur) { copy()
/linux-4.1.27/drivers/crypto/
H A Dpicoxcell_crypto.c296 struct scatterlist *cur; spacc_sg_to_ddt() local
310 for_each_sg(payload, cur, mapped_ents, i) spacc_sg_to_ddt()
311 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); spacc_sg_to_ddt()
330 struct scatterlist *cur; spacc_aead_make_ddts() local
372 for_each_sg(areq->assoc, cur, assoc_ents, i) { spacc_aead_make_ddts()
373 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); spacc_aead_make_ddts()
375 ddt_set(dst_ddt++, sg_dma_address(cur), spacc_aead_make_ddts()
376 sg_dma_len(cur)); spacc_aead_make_ddts()
387 for_each_sg(areq->src, cur, src_ents, i) { spacc_aead_make_ddts()
388 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); spacc_aead_make_ddts()
390 ddt_set(dst_ddt++, sg_dma_address(cur), spacc_aead_make_ddts()
391 sg_dma_len(cur)); spacc_aead_make_ddts()
394 for_each_sg(areq->dst, cur, dst_ents, i) spacc_aead_make_ddts()
395 ddt_set(dst_ddt++, sg_dma_address(cur), spacc_aead_make_ddts()
396 sg_dma_len(cur)); spacc_aead_make_ddts()
/linux-4.1.27/drivers/macintosh/
H A Dvia-macii.c124 struct adb_request *cur; request_is_queued() local
127 cur = current_req; request_is_queued()
128 while (cur) { request_is_queued()
129 if (cur == req) { request_is_queued()
133 cur = cur->next; request_is_queued()
/linux-4.1.27/net/dccp/
H A Dackvec.c33 struct dccp_ackvec_record *cur, *next; dccp_ackvec_purge_records() local
35 list_for_each_entry_safe(cur, next, &av->av_records, avr_node) dccp_ackvec_purge_records()
36 kmem_cache_free(dccp_ackvec_record_slab, cur); dccp_ackvec_purge_records()
367 struct dccp_ackvec_parsed *cur, *next; dccp_ackvec_parsed_cleanup() local
369 list_for_each_entry_safe(cur, next, parsed_chunks, node) dccp_ackvec_parsed_cleanup()
370 kfree(cur); dccp_ackvec_parsed_cleanup()
H A Dfeat.c1497 struct dccp_feat_entry *cur, *next; dccp_feat_activate_values() local
1503 list_for_each_entry(cur, fn_list, node) { list_for_each_entry()
1509 if (cur->empty_confirm) list_for_each_entry()
1512 idx = dccp_feat_index(cur->feat_num); list_for_each_entry()
1514 DCCP_BUG("Unknown feature %u", cur->feat_num); list_for_each_entry()
1517 if (cur->state != FEAT_STABLE) { list_for_each_entry()
1519 cur->is_local ? "local" : "remote", list_for_each_entry()
1520 dccp_feat_fname(cur->feat_num), list_for_each_entry()
1521 dccp_feat_sname[cur->state]); list_for_each_entry()
1524 fvals[idx][cur->is_local] = &cur->val; list_for_each_entry()
1541 list_for_each_entry_safe(cur, next, fn_list, node)
1542 if (!cur->needs_confirm)
1543 dccp_feat_list_pop(cur);
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dtxrx.c1118 u16 idx, st, cur, end; aggr_process_recv_frm() local
1141 cur = seq_no; aggr_process_recv_frm()
1144 if (((st < end) && (cur < st || cur > end)) || aggr_process_recv_frm()
1145 ((st > end) && (cur > end) && (cur < st))) { aggr_process_recv_frm()
1150 (cur < end || cur > extended_end)) || aggr_process_recv_frm()
1151 ((end > extended_end) && (cur > extended_end) && aggr_process_recv_frm()
1152 (cur < end))) { aggr_process_recv_frm()
1155 if (cur >= rxtid->hold_q_sz - 1) aggr_process_recv_frm()
1156 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); aggr_process_recv_frm()
1159 (rxtid->hold_q_sz - 2 - cur); aggr_process_recv_frm()
1166 if (cur >= rxtid->hold_q_sz - 1) aggr_process_recv_frm()
1167 st = cur - (rxtid->hold_q_sz - 1); aggr_process_recv_frm()
1170 (rxtid->hold_q_sz - 2 - cur); aggr_process_recv_frm()
1185 * Is the cur frame duplicate or something beyond our window(hold_q aggr_process_recv_frm()
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dcdv_intel_dp.c2067 struct edp_power_seq cur; cdv_intel_dp_init() local
2086 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> cdv_intel_dp_init()
2089 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> cdv_intel_dp_init()
2092 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> cdv_intel_dp_init()
2095 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> cdv_intel_dp_init()
2098 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> cdv_intel_dp_init()
2101 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", cdv_intel_dp_init()
2102 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); cdv_intel_dp_init()
2105 intel_dp->panel_power_up_delay = cur.t1_t3 / 10; cdv_intel_dp_init()
2106 intel_dp->backlight_on_delay = cur.t8 / 10; cdv_intel_dp_init()
2107 intel_dp->backlight_off_delay = cur.t9 / 10; cdv_intel_dp_init()
2108 intel_dp->panel_power_down_delay = cur.t10 / 10; cdv_intel_dp_init()
2109 intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100; cdv_intel_dp_init()
/linux-4.1.27/arch/x86/kernel/kprobes/
H A Dcore.c910 struct kprobe *cur = kprobe_running(); kprobe_debug_handler() local
913 if (!cur) kprobe_debug_handler()
916 resume_execution(cur, regs, kcb); kprobe_debug_handler()
919 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kprobe_debug_handler()
921 cur->post_handler(cur, regs, 0); kprobe_debug_handler()
947 struct kprobe *cur = kprobe_running(); kprobe_fault_handler() local
950 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { kprobe_fault_handler()
961 regs->ip = (unsigned long)cur->addr; kprobe_fault_handler()
975 kprobes_inc_nmissed_count(cur); kprobe_fault_handler()
984 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) kprobe_fault_handler()
/linux-4.1.27/drivers/base/
H A Ddevres.c412 struct list_head *cur; remove_nodes() local
417 cur = first; remove_nodes()
418 while (cur != end) { remove_nodes()
422 node = list_entry(cur, struct devres_node, entry); remove_nodes()
423 cur = cur->next; remove_nodes()
444 * [cur, end). That is, for a closed group, both opening and remove_nodes()
448 cur = first; remove_nodes()
449 while (cur != end) { remove_nodes()
453 node = list_entry(cur, struct devres_node, entry); remove_nodes()
454 cur = cur->next; remove_nodes()
465 /* No need to update cur or end. The removed remove_nodes()
/linux-4.1.27/drivers/net/wan/
H A Ddscc4.c591 int cur = dpriv->iqtx_current%IRQ_RING_SIZE; dscc4_xpr_ack() local
596 (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) dscc4_xpr_ack()
1537 int cur, loop = 0; dscc4_tx_irq() local
1540 cur = dpriv->iqtx_current%IRQ_RING_SIZE; dscc4_tx_irq()
1541 state = le32_to_cpu(dpriv->iqtx[cur]); dscc4_tx_irq()
1558 dpriv->iqtx[cur] = 0; dscc4_tx_irq()
1575 cur = dpriv->tx_dirty%TX_RING_SIZE; dscc4_tx_irq()
1576 tx_fd = dpriv->tx_fd + cur; dscc4_tx_irq()
1577 skb = dpriv->tx_skbuff[cur]; dscc4_tx_irq()
1586 dpriv->tx_skbuff[cur] = NULL; dscc4_tx_irq()
1591 cur); dscc4_tx_irq()
1706 int cur; dscc4_rx_irq() local
1709 cur = dpriv->iqrx_current%IRQ_RING_SIZE; dscc4_rx_irq()
1710 state = le32_to_cpu(dpriv->iqrx[cur]); dscc4_rx_irq()
1713 dpriv->iqrx[cur] = 0; dscc4_rx_irq()
1728 cur = dpriv->rx_current%RX_RING_SIZE; dscc4_rx_irq()
1729 rx_fd = dpriv->rx_fd + cur; dscc4_rx_irq()
1744 cur++; dscc4_rx_irq()
1745 if (!(cur = cur%RX_RING_SIZE)) dscc4_rx_irq()
1804 int cur; dscc4_rx_irq() local
1825 cur = dpriv->rx_current++%RX_RING_SIZE; dscc4_rx_irq()
1826 rx_fd = dpriv->rx_fd + cur; dscc4_rx_irq()
/linux-4.1.27/drivers/pnp/
H A Dcore.c71 struct pnp_protocol *cur = to_pnp_protocol(pos); pnp_register_protocol() local
72 if (cur->number == nodenum) { pnp_register_protocol()
/linux-4.1.27/drivers/net/ethernet/
H A Dfealnx.c1381 struct fealnx_desc *cur; reset_tx_descriptors() local
1391 cur = &np->tx_ring[i]; reset_tx_descriptors()
1392 if (cur->skbuff) { reset_tx_descriptors()
1393 pci_unmap_single(np->pci_dev, cur->buffer, reset_tx_descriptors()
1394 cur->skbuff->len, PCI_DMA_TODEVICE); reset_tx_descriptors()
1395 dev_kfree_skb_any(cur->skbuff); reset_tx_descriptors()
1396 cur->skbuff = NULL; reset_tx_descriptors()
1398 cur->status = 0; reset_tx_descriptors()
1399 cur->control = 0; /* needed? */ reset_tx_descriptors()
1401 cur->next_desc = np->tx_ring_dma + reset_tx_descriptors()
1403 cur->next_desc_logical = &np->tx_ring[i + 1]; reset_tx_descriptors()
1415 struct fealnx_desc *cur = np->cur_rx; reset_rx_descriptors() local
1421 if (cur->skbuff) reset_rx_descriptors()
1422 cur->status = RXOWN; reset_rx_descriptors()
1423 cur = cur->next_desc_logical; reset_rx_descriptors()
1647 struct fealnx_desc *cur; netdev_rx() local
1650 cur = np->cur_rx; netdev_rx()
1653 if ((!(cur->status & RXOWN)) && netdev_rx()
1654 (cur->status & RXLSD)) netdev_rx()
1657 cur = cur->next_desc_logical; netdev_rx()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dllog_swab.c198 struct llog_changelog_user_rec *cur = lustre_swab_llog_rec() local
201 __swab32s(&cur->cur_id); lustre_swab_llog_rec()
202 __swab64s(&cur->cur_endrec); lustre_swab_llog_rec()
203 tail = &cur->cur_tail; lustre_swab_llog_rec()
/linux-4.1.27/drivers/net/wireless/b43legacy/
H A Ddebugfs.c473 struct b43legacy_txstatus *cur; b43legacy_debugfs_log_txstat() local
485 cur = &(log->log[i]); b43legacy_debugfs_log_txstat()
486 memcpy(cur, status, sizeof(*cur)); b43legacy_debugfs_log_txstat()
/linux-4.1.27/drivers/video/fbdev/via/
H A Dhw.c1391 struct via_pll_config cur, up, down, best = {0, 1, 0}; get_pll_config() local
1396 cur.rshift = limits[i].rshift; get_pll_config()
1397 cur.divisor = limits[i].divisor; get_pll_config()
1398 cur.multiplier = clk / ((f0 / cur.divisor)>>cur.rshift); get_pll_config()
1399 f = abs(get_pll_output_frequency(f0, cur) - clk); get_pll_config()
1400 up = down = cur; get_pll_config()
1404 cur = up; get_pll_config()
1406 cur = down; get_pll_config()
1408 if (cur.multiplier < limits[i].multiplier_min) get_pll_config()
1409 cur.multiplier = limits[i].multiplier_min; get_pll_config()
1410 else if (cur.multiplier > limits[i].multiplier_max) get_pll_config()
1411 cur.multiplier = limits[i].multiplier_max; get_pll_config()
1413 f = abs(get_pll_output_frequency(f0, cur) - clk); get_pll_config()
1415 best = cur; get_pll_config()
/linux-4.1.27/drivers/platform/x86/
H A Dmsi-wmi.c209 ktime_t cur = ktime_get_real(); msi_wmi_notify() local
210 ktime_t diff = ktime_sub(cur, last_pressed); msi_wmi_notify()
219 last_pressed = cur; msi_wmi_notify()
/linux-4.1.27/fs/configfs/
H A Dsymlink.c63 int cur = strlen(config_item_name(p)); fill_item_path() local
66 length -= cur; fill_item_path()
67 strncpy(buffer + length,config_item_name(p),cur); fill_item_path()
/linux-4.1.27/net/sunrpc/
H A Dxdr.c907 unsigned int cur = xdr_stream_pos(xdr); xdr_align_pages() local
913 if (iov->iov_len > cur) { xdr_align_pages()
914 xdr_shrink_bufhead(buf, iov->iov_len - cur); xdr_align_pages()
915 xdr->nwords = XDR_QUADLEN(buf->len - cur); xdr_align_pages()
927 xdr->nwords = XDR_QUADLEN(buf->len - cur); xdr_align_pages()
1079 size_t cur; xdr_buf_trim() local
1083 cur = min_t(size_t, buf->tail[0].iov_len, trim); xdr_buf_trim()
1084 buf->tail[0].iov_len -= cur; xdr_buf_trim()
1085 trim -= cur; xdr_buf_trim()
1091 cur = min_t(unsigned int, buf->page_len, trim); xdr_buf_trim()
1092 buf->page_len -= cur; xdr_buf_trim()
1093 trim -= cur; xdr_buf_trim()
1099 cur = min_t(size_t, buf->head[0].iov_len, trim); xdr_buf_trim()
1100 buf->head[0].iov_len -= cur; xdr_buf_trim()
1101 trim -= cur; xdr_buf_trim()
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_packet_manager.c143 struct queue *cur; pm_create_map_process() local
164 list_for_each_entry(cur, &qpd->queues_list, list) pm_create_map_process()
244 struct device_process_node *cur; pm_create_runlist_ib() local
266 list_for_each_entry(cur, queues, list) { list_for_each_entry()
267 qpd = cur->qpd; list_for_each_entry()
/linux-4.1.27/fs/ext4/
H A Dmballoc.c567 struct list_head *cur; __mb_check_buddy() local
639 list_for_each(cur, &grp->bb_prealloc_list) { __mb_check_buddy()
642 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); __mb_check_buddy()
1271 static void mb_clear_bits(void *bm, int cur, int len) mb_clear_bits() argument
1275 len = cur + len; mb_clear_bits()
1276 while (cur < len) { mb_clear_bits()
1277 if ((cur & 31) == 0 && (len - cur) >= 32) { mb_clear_bits()
1279 addr = bm + (cur >> 3); mb_clear_bits()
1281 cur += 32; mb_clear_bits()
1284 mb_clear_bit(cur, bm); mb_clear_bits()
1285 cur++; mb_clear_bits()
1292 static int mb_test_and_clear_bits(void *bm, int cur, int len) mb_test_and_clear_bits() argument
1297 len = cur + len; mb_test_and_clear_bits()
1298 while (cur < len) { mb_test_and_clear_bits()
1299 if ((cur & 31) == 0 && (len - cur) >= 32) { mb_test_and_clear_bits()
1301 addr = bm + (cur >> 3); mb_test_and_clear_bits()
1303 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); mb_test_and_clear_bits()
1305 cur += 32; mb_test_and_clear_bits()
1308 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) mb_test_and_clear_bits()
1309 zero_bit = cur; mb_test_and_clear_bits()
1310 cur++; mb_test_and_clear_bits()
1316 void ext4_set_bits(void *bm, int cur, int len) ext4_set_bits() argument
1320 len = cur + len; ext4_set_bits()
1321 while (cur < len) { ext4_set_bits()
1322 if ((cur & 31) == 0 && (len - cur) >= 32) { ext4_set_bits()
1324 addr = bm + (cur >> 3); ext4_set_bits()
1326 cur += 32; ext4_set_bits()
1329 mb_set_bit(cur, bm); ext4_set_bits()
1330 cur++; ext4_set_bits()
1550 int cur; mb_mark_used() local
1605 cur = (start >> ord) & ~1U; mb_mark_used()
1607 mb_clear_bit(cur, buddy); mb_mark_used()
1608 mb_clear_bit(cur + 1, buddy); mb_mark_used()
2673 struct list_head *cur, *tmp; ext4_mb_cleanup_pa() local
2676 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { ext4_mb_cleanup_pa()
2677 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_mb_cleanup_pa()
3458 struct list_head *cur; ext4_mb_generate_from_pa() local
3472 list_for_each(cur, &grp->bb_prealloc_list) { ext4_mb_generate_from_pa()
3473 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_mb_generate_from_pa()
4063 struct list_head *cur; ext4_mb_show_ac() local
4065 list_for_each(cur, &grp->bb_prealloc_list) { ext4_mb_show_ac()
4066 pa = list_entry(cur, struct ext4_prealloc_space, ext4_mb_show_ac()
H A Dpage-io.c191 struct list_head *cur, *before, *after; dump_completed_IO() local
199 cur = &io->list; list_for_each_entry()
200 before = cur->prev; list_for_each_entry()
202 after = cur->next; list_for_each_entry()
/linux-4.1.27/drivers/net/ethernet/qlogic/netxen/
H A Dnetxen_nic_hw.c658 nx_mac_list_t *cur; nx_p3_nic_add_mac() local
662 cur = list_entry(head, nx_mac_list_t, list); list_for_each()
664 if (ether_addr_equal(addr, cur->mac_addr)) { list_for_each()
670 cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
671 if (cur == NULL)
674 memcpy(cur->mac_addr, addr, ETH_ALEN);
675 list_add_tail(&cur->list, &adapter->mac_list);
677 cur->mac_addr, NETXEN_MAC_ADD);
690 nx_mac_list_t *cur; netxen_p3_nic_set_multi() local
720 cur = list_entry(head->next, nx_mac_list_t, list); netxen_p3_nic_set_multi()
723 cur->mac_addr, NETXEN_MAC_DEL); netxen_p3_nic_set_multi()
724 list_del(&cur->list); netxen_p3_nic_set_multi()
725 kfree(cur); netxen_p3_nic_set_multi()
750 nx_mac_list_t *cur; netxen_p3_free_mac_list() local
754 cur = list_entry(head->next, nx_mac_list_t, list); netxen_p3_free_mac_list()
756 cur->mac_addr, NETXEN_MAC_DEL); netxen_p3_free_mac_list()
757 list_del(&cur->list); netxen_p3_free_mac_list()
758 kfree(cur); netxen_p3_free_mac_list()
H A Dnetxen_nic_main.c3217 struct nx_ip_list *cur, *tmp_cur; netxen_free_ip_list() local
3219 list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { netxen_free_ip_list()
3221 if (cur->master) { netxen_free_ip_list()
3222 netxen_config_ipaddr(adapter, cur->ip_addr, netxen_free_ip_list()
3224 list_del(&cur->list); netxen_free_ip_list()
3225 kfree(cur); netxen_free_ip_list()
3228 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); netxen_free_ip_list()
3229 list_del(&cur->list); netxen_free_ip_list()
3230 kfree(cur); netxen_free_ip_list()
3240 struct nx_ip_list *cur, *tmp_cur; netxen_list_config_ip() local
3252 cur = list_entry(head, struct nx_ip_list, list); netxen_list_config_ip()
3254 if (cur->ip_addr == ifa->ifa_address) netxen_list_config_ip()
3258 cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); netxen_list_config_ip()
3259 if (cur == NULL) netxen_list_config_ip()
3263 cur->master = !!netif_is_bond_master(dev); netxen_list_config_ip()
3264 cur->ip_addr = ifa->ifa_address; netxen_list_config_ip()
3265 list_add_tail(&cur->list, &adapter->ip_list); netxen_list_config_ip()
3270 list_for_each_entry_safe(cur, tmp_cur, netxen_list_config_ip()
3272 if (cur->ip_addr == ifa->ifa_address) { netxen_list_config_ip()
3273 list_del(&cur->list); netxen_list_config_ip()
3274 kfree(cur); netxen_list_config_ip()
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
H A Dxgbe-desc.c379 ring->cur = 0; xgbe_wrapper_tx_descriptor_init()
423 ring->cur = 0; xgbe_wrapper_rx_descriptor_init()
499 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); xgbe_map_tx_skb()
502 start_index = ring->cur; xgbe_map_tx_skb()
503 cur_index = ring->cur; xgbe_map_tx_skb()
/linux-4.1.27/drivers/hv/
H A Dchannel_mgmt.c786 struct list_head *cur, *tmp; vmbus_get_outgoing_channel() local
805 list_for_each_safe(cur, tmp, &primary->sc_list) { vmbus_get_outgoing_channel()
806 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); vmbus_get_outgoing_channel()
825 struct list_head *cur, *tmp; invoke_sc_cb() local
831 list_for_each_safe(cur, tmp, &primary_channel->sc_list) { invoke_sc_cb()
832 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); invoke_sc_cb()
H A Dconnection.c278 struct list_head *cur, *tmp; relid2channel() local
290 list_for_each_safe(cur, tmp, &channel->sc_list) { relid2channel()
291 cur_sc = list_entry(cur, struct vmbus_channel, relid2channel()
H A Dhv_balloon.c674 struct list_head *cur; hv_online_page() local
679 list_for_each(cur, &dm_device.ha_region_list) { hv_online_page()
680 has = list_entry(cur, struct hv_hotadd_state, list); hv_online_page()
699 struct list_head *cur; pfn_covered() local
706 list_for_each(cur, &dm_device.ha_region_list) { pfn_covered()
707 has = list_entry(cur, struct hv_hotadd_state, list); pfn_covered()
752 struct list_head *cur; handle_pg_range() local
760 list_for_each(cur, &dm_device.ha_region_list) { handle_pg_range()
761 has = list_entry(cur, struct hv_hotadd_state, list); handle_pg_range()
1566 struct list_head *cur, *tmp; balloon_remove() local
1582 list_for_each_safe(cur, tmp, &dm->ha_region_list) { balloon_remove()
1583 has = list_entry(cur, struct hv_hotadd_state, list); balloon_remove()
/linux-4.1.27/drivers/media/i2c/s5c73m3/
H A Ds5c73m3-ctrls.c198 if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) { s5c73m3_3a_lock()
205 if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE) s5c73m3_3a_lock()
213 if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS) s5c73m3_3a_lock()
/linux-4.1.27/arch/x86/mm/
H A Ddump_pagetables.c193 pgprotval_t prot, cur; note_page() local
202 cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; note_page()
212 } else if (prot != cur || level != st->level || note_page()
/linux-4.1.27/arch/arm/mach-integrator/
H A Dimpd1.c48 u32 cur; impd1_tweak_control() local
51 cur = readl(impd1->base + IMPD1_CTRL) & ~mask; impd1_tweak_control()
52 writel(cur | val, impd1->base + IMPD1_CTRL); impd1_tweak_control()
/linux-4.1.27/sound/usb/
H A Dclock.c202 int ret, i, cur; __uac_clock_find_source() local
220 cur = ret; __uac_clock_find_source()
230 if (i == cur) __uac_clock_find_source()
/linux-4.1.27/drivers/scsi/ibmvscsi/
H A Dibmvscsi.h58 int size, cur; member in struct:crq_queue
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dlproc_llite.c1331 int i, cur = -1; ll_rw_stats_tally() local
1347 cur = i; ll_rw_stats_tally()
1352 if (cur == -1) { ll_rw_stats_tally()
1356 cur = sbi->ll_extent_process_count; ll_rw_stats_tally()
1357 io_extents->pp_extents[cur].pid = pid; ll_rw_stats_tally()
1358 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist); ll_rw_stats_tally()
1359 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist); ll_rw_stats_tally()
1365 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++; ll_rw_stats_tally()
1368 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++; ll_rw_stats_tally()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dnv50.c45 struct nvkm_gpuobj *cur; nv50_fifo_playlist_update_locked() local
48 cur = priv->playlist[priv->cur_playlist]; nv50_fifo_playlist_update_locked()
53 nv_wo32(cur, p++ * 4, i); nv50_fifo_playlist_update_locked()
58 nv_wr32(priv, 0x0032f4, cur->addr >> 12); nv50_fifo_playlist_update_locked()
/linux-4.1.27/drivers/gpu/drm/qxl/
H A Dqxl_release.c65 unsigned long cur, end = jiffies + timeout; qxl_fence_wait() local
112 cur = jiffies; qxl_fence_wait()
113 if (time_after(cur, end)) qxl_fence_wait()
115 return end - cur; qxl_fence_wait()
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_fb.c90 struct page *cur; udlfb_dpy_deferred_io() local
114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { udlfb_dpy_deferred_io()
118 &cmd, cur->index << PAGE_SHIFT, udlfb_dpy_deferred_io()
119 cur->index << PAGE_SHIFT, udlfb_dpy_deferred_io()
/linux-4.1.27/drivers/video/fbdev/riva/
H A Driva_hw.c173 char cur; member in struct:__anon10611
251 int last, next, cur; nv3_iterate() local
260 cur = ainfo->cur; nv3_iterate()
328 last = cur; nv3_iterate()
329 cur = next; nv3_iterate()
331 switch (cur) nv3_iterate()
334 if (last==cur) misses = 0; nv3_iterate()
338 if (last!=cur) nv3_iterate()
350 if (last==cur) misses = 0; nv3_iterate()
354 if (last!=cur) nv3_iterate()
366 if (last==cur) misses = 0; nv3_iterate()
455 ainfo->cur = ENGINE; nv3_arb()
467 ainfo->cur = MPORT; nv3_arb()
482 ainfo->cur = GRAPHICS; nv3_arb()
494 ainfo->cur = VIDEO; nv3_arb()
/linux-4.1.27/scripts/
H A Dcheckpatch.pl1385 my $cur = $stream;
1389 while (length($cur)) {
1393 if ($cur =~ /^(\s+)/o) {
1400 } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
1405 } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
1409 } elsif ($cur =~ /^($Modifier)\s*/) {
1413 } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
1422 } elsif ($cur =~ /^(\#\s*(?:undef\s*$Ident|include\b))/o) {
1427 } elsif ($cur =~ /^(\#\s*(?:ifdef|ifndef|if))/o) {
1435 } elsif ($cur =~ /^(\#\s*(?:else|elif))/o) {
1443 } elsif ($cur =~ /^(\#\s*(?:endif))/o) {
1454 } elsif ($cur =~ /^(\\\n)/o) {
1457 } elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
1462 } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
1469 } elsif ($cur =~ /^(if|while|for)\b/o) {
1474 } elsif ($cur =~/^(case)/o) {
1479 } elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\b/o) {
1483 } elsif ($cur =~ /^(\()/o) {
1489 } elsif ($cur =~ /^(\))/o) {
1499 } elsif ($cur =~ /^($Ident)\s*\(/o) {
1504 } elsif ($cur =~ /^($Ident\s*):(?:\s*\d+\s*(,|=|;))?/) {
1513 } elsif ($cur =~ /^($Ident|$Constant)/o) {
1517 } elsif ($cur =~ /^($Assignment)/o) {
1521 } elsif ($cur =~/^(;|{|})/) {
1526 } elsif ($cur =~/^(,)/) {
1530 } elsif ($cur =~ /^(\?)/o) {
1534 } elsif ($cur =~ /^(:)/o) {
1545 } elsif ($cur =~ /^(\[)/o) {
1549 } elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&\&|\&)/o) {
1562 } elsif ($cur =~ /^($Operators)/o) {
1568 } elsif ($cur =~ /(^.)/o) {
1572 $cur = substr($cur, length($1));
/linux-4.1.27/drivers/media/radio/
H A Dradio-sf16fmr2.c157 balance = fmr2->balance->cur.val; fmr2_s_ctrl()
161 volume = fmr2->volume->cur.val; fmr2_s_ctrl()
/linux-4.1.27/drivers/mfd/
H A Dti_am335x_tscadc.c144 const __be32 *cur; ti_tscadc_probe() local
161 of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) { ti_tscadc_probe()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/linux/
H A Dlinux-obdo.c154 "valid %#llx, cur time %lu/%lu, new %llu/%llu\n", obdo_refresh_inode()
193 "valid %#llx, cur time %lu/%lu, new %llu/%llu\n", obdo_to_inode()
/linux-4.1.27/drivers/net/ethernet/atheros/alx/
H A Dmain.c79 u16 cur, next, count = 0; alx_refill_rx_ring() local
81 next = cur = rxq->write_idx; alx_refill_rx_ring()
84 cur_buf = &rxq->bufs[cur]; alx_refill_rx_ring()
87 struct alx_rfd *rfd = &rxq->rfd[cur]; alx_refill_rx_ring()
113 cur = next; alx_refill_rx_ring()
116 cur_buf = &rxq->bufs[cur]; alx_refill_rx_ring()
123 rxq->write_idx = cur; alx_refill_rx_ring()
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); alx_refill_rx_ring()

Completed in 9156 milliseconds

123