Searched refs:node (Results 1 - 200 of 3595) sorted by relevance

1234567891011>>

/linux-4.4.14/fs/hfs/
H A Dbnode.c8 * Handle basic btree node operations
17 void hfs_bnode_read(struct hfs_bnode *node, void *buf, hfs_bnode_read() argument
22 off += node->page_offset; hfs_bnode_read()
23 page = node->page[0]; hfs_bnode_read()
29 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) hfs_bnode_read_u16() argument
33 hfs_bnode_read(node, &data, off, 2); hfs_bnode_read_u16()
37 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) hfs_bnode_read_u8() argument
41 hfs_bnode_read(node, &data, off, 1); hfs_bnode_read_u8()
45 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) hfs_bnode_read_key() argument
50 tree = node->tree; hfs_bnode_read_key()
51 if (node->type == HFS_NODE_LEAF || hfs_bnode_read_key()
53 key_len = hfs_bnode_read_u8(node, off) + 1; hfs_bnode_read_key()
57 hfs_bnode_read(node, key, off, key_len); hfs_bnode_read_key()
60 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) hfs_bnode_write() argument
64 off += node->page_offset; hfs_bnode_write()
65 page = node->page[0]; hfs_bnode_write()
72 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) hfs_bnode_write_u16() argument
76 hfs_bnode_write(node, &v, off, 2); hfs_bnode_write_u16()
79 void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) hfs_bnode_write_u8() argument
82 hfs_bnode_write(node, &data, off, 1); hfs_bnode_write_u8()
85 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) hfs_bnode_clear() argument
89 off += node->page_offset; hfs_bnode_clear()
90 page = node->page[0]; hfs_bnode_clear()
118 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_bnode_move() argument
126 src += node->page_offset; hfs_bnode_move()
127 dst += node->page_offset; hfs_bnode_move()
128 page = node->page[0]; hfs_bnode_move()
135 void hfs_bnode_dump(struct hfs_bnode *node) hfs_bnode_dump() argument
141 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); hfs_bnode_dump()
142 hfs_bnode_read(node, &desc, 0, sizeof(desc)); hfs_bnode_dump()
147 off = node->tree->node_size - 2; hfs_bnode_dump()
149 key_off = hfs_bnode_read_u16(node, off); hfs_bnode_dump()
151 if (i && node->type == HFS_NODE_INDEX) { hfs_bnode_dump()
154 if (node->tree->attributes & HFS_TREE_VARIDXKEYS) hfs_bnode_dump()
155 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; hfs_bnode_dump()
157 tmp = node->tree->max_key_len + 1; hfs_bnode_dump()
159 tmp, hfs_bnode_read_u8(node, key_off)); hfs_bnode_dump()
160 hfs_bnode_read(node, &cnid, key_off + tmp, 4); hfs_bnode_dump()
162 } else if (i && node->type == HFS_NODE_LEAF) { hfs_bnode_dump()
165 tmp = hfs_bnode_read_u8(node, key_off); hfs_bnode_dump()
172 void hfs_bnode_unlink(struct hfs_bnode *node) hfs_bnode_unlink() argument
178 tree = node->tree; hfs_bnode_unlink()
179 if (node->prev) { hfs_bnode_unlink()
180 tmp = hfs_bnode_find(tree, node->prev); hfs_bnode_unlink()
183 tmp->next = node->next; hfs_bnode_unlink()
187 } else if (node->type == HFS_NODE_LEAF) hfs_bnode_unlink()
188 tree->leaf_head = node->next; hfs_bnode_unlink()
190 if (node->next) { hfs_bnode_unlink()
191 tmp = hfs_bnode_find(tree, node->next); hfs_bnode_unlink()
194 tmp->prev = node->prev; hfs_bnode_unlink()
198 } else if (node->type == HFS_NODE_LEAF) hfs_bnode_unlink()
199 tree->leaf_tail = node->prev; hfs_bnode_unlink()
202 if (!node->prev && !node->next) { hfs_bnode_unlink()
205 if (!node->parent) { hfs_bnode_unlink()
209 set_bit(HFS_BNODE_DELETED, &node->flags); hfs_bnode_unlink()
221 struct hfs_bnode *node; hfs_bnode_findhash() local
224 pr_err("request for non-existent node %d in B*Tree\n", cnid); hfs_bnode_findhash()
228 for (node = tree->node_hash[hfs_bnode_hash(cnid)]; hfs_bnode_findhash()
229 node; node = node->next_hash) { hfs_bnode_findhash()
230 if (node->this == cnid) { hfs_bnode_findhash()
231 return node; hfs_bnode_findhash()
240 struct hfs_bnode *node, *node2; __hfs_bnode_create() local
247 pr_err("request for non-existent node %d in B*Tree\n", cnid); __hfs_bnode_create()
254 node = kzalloc(size, GFP_KERNEL); __hfs_bnode_create()
255 if (!node) __hfs_bnode_create()
257 node->tree = tree; __hfs_bnode_create()
258 node->this = cnid; __hfs_bnode_create()
259 set_bit(HFS_BNODE_NEW, &node->flags); __hfs_bnode_create()
260 atomic_set(&node->refcnt, 1); __hfs_bnode_create()
262 node->tree->cnid, node->this); __hfs_bnode_create()
263 init_waitqueue_head(&node->lock_wq); __hfs_bnode_create()
268 node->next_hash = tree->node_hash[hash]; __hfs_bnode_create()
269 tree->node_hash[hash] = node; __hfs_bnode_create()
273 kfree(node); __hfs_bnode_create()
282 node->page_offset = off & ~PAGE_CACHE_MASK; __hfs_bnode_create()
291 node->page[i] = page; __hfs_bnode_create()
294 return node; __hfs_bnode_create()
296 set_bit(HFS_BNODE_ERROR, &node->flags); __hfs_bnode_create()
297 return node; __hfs_bnode_create()
300 void hfs_bnode_unhash(struct hfs_bnode *node) hfs_bnode_unhash() argument
305 node->tree->cnid, node->this, atomic_read(&node->refcnt)); hfs_bnode_unhash()
306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; hfs_bnode_unhash()
307 *p && *p != node; p = &(*p)->next_hash) hfs_bnode_unhash()
310 *p = node->next_hash; hfs_bnode_unhash()
311 node->tree->node_hash_cnt--; hfs_bnode_unhash()
314 /* Load a particular node out of a tree */ hfs_bnode_find()
317 struct hfs_bnode *node; hfs_bnode_find() local
323 node = hfs_bnode_findhash(tree, num); hfs_bnode_find()
324 if (node) { hfs_bnode_find()
325 hfs_bnode_get(node); hfs_bnode_find()
327 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); hfs_bnode_find()
328 if (test_bit(HFS_BNODE_ERROR, &node->flags)) hfs_bnode_find()
330 return node; hfs_bnode_find()
333 node = __hfs_bnode_create(tree, num); hfs_bnode_find()
334 if (!node) hfs_bnode_find()
336 if (test_bit(HFS_BNODE_ERROR, &node->flags)) hfs_bnode_find()
338 if (!test_bit(HFS_BNODE_NEW, &node->flags)) hfs_bnode_find()
339 return node; hfs_bnode_find()
341 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); hfs_bnode_find()
342 node->prev = be32_to_cpu(desc->prev); hfs_bnode_find()
343 node->next = be32_to_cpu(desc->next); hfs_bnode_find()
344 node->num_recs = be16_to_cpu(desc->num_recs); hfs_bnode_find()
345 node->type = desc->type; hfs_bnode_find()
346 node->height = desc->height; hfs_bnode_find()
347 kunmap(node->page[0]); hfs_bnode_find()
349 switch (node->type) { hfs_bnode_find()
352 if (node->height != 0) hfs_bnode_find()
356 if (node->height != 1) hfs_bnode_find()
360 if (node->height <= 1 || node->height > tree->depth) hfs_bnode_find()
368 off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_find()
371 for (i = 1; i <= node->num_recs; off = next_off, i++) { hfs_bnode_find()
373 next_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_find()
379 if (node->type != HFS_NODE_INDEX && hfs_bnode_find()
380 node->type != HFS_NODE_LEAF) hfs_bnode_find()
382 key_size = hfs_bnode_read_u8(node, off) + 1; hfs_bnode_find()
386 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_find()
387 wake_up(&node->lock_wq); hfs_bnode_find()
388 return node; hfs_bnode_find()
391 set_bit(HFS_BNODE_ERROR, &node->flags); hfs_bnode_find()
392 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_find()
393 wake_up(&node->lock_wq); hfs_bnode_find()
394 hfs_bnode_put(node); hfs_bnode_find()
398 void hfs_bnode_free(struct hfs_bnode *node) hfs_bnode_free() argument
402 for (i = 0; i < node->tree->pages_per_bnode; i++) hfs_bnode_free()
403 if (node->page[i]) hfs_bnode_free()
404 page_cache_release(node->page[i]); hfs_bnode_free()
405 kfree(node); hfs_bnode_free()
410 struct hfs_bnode *node; hfs_bnode_create() local
415 node = hfs_bnode_findhash(tree, num); hfs_bnode_create()
417 if (node) { hfs_bnode_create()
418 pr_crit("new node %u already hashed?\n", num); hfs_bnode_create()
420 return node; hfs_bnode_create()
422 node = __hfs_bnode_create(tree, num); hfs_bnode_create()
423 if (!node) hfs_bnode_create()
425 if (test_bit(HFS_BNODE_ERROR, &node->flags)) { hfs_bnode_create()
426 hfs_bnode_put(node); hfs_bnode_create()
430 pagep = node->page; hfs_bnode_create()
431 memset(kmap(*pagep) + node->page_offset, 0, hfs_bnode_create()
440 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_create()
441 wake_up(&node->lock_wq); hfs_bnode_create()
443 return node; hfs_bnode_create()
446 void hfs_bnode_get(struct hfs_bnode *node) hfs_bnode_get() argument
448 if (node) { hfs_bnode_get()
449 atomic_inc(&node->refcnt); hfs_bnode_get()
451 node->tree->cnid, node->this, hfs_bnode_get()
452 atomic_read(&node->refcnt)); hfs_bnode_get()
456 /* Dispose of resources used by a node */ hfs_bnode_put()
457 void hfs_bnode_put(struct hfs_bnode *node) hfs_bnode_put() argument
459 if (node) { hfs_bnode_put()
460 struct hfs_btree *tree = node->tree; hfs_bnode_put()
464 node->tree->cnid, node->this, hfs_bnode_put()
465 atomic_read(&node->refcnt)); hfs_bnode_put()
466 BUG_ON(!atomic_read(&node->refcnt)); hfs_bnode_put()
467 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) hfs_bnode_put()
470 if (!node->page[i]) hfs_bnode_put()
472 mark_page_accessed(node->page[i]); hfs_bnode_put()
475 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_put()
476 hfs_bnode_unhash(node); hfs_bnode_put()
478 hfs_bmap_free(node); hfs_bnode_put()
479 hfs_bnode_free(node); hfs_bnode_put()
H A Dbtree.c138 struct hfs_bnode *node; hfs_btree_close() local
145 while ((node = tree->node_hash[i])) { hfs_btree_close()
146 tree->node_hash[i] = node->next_hash; hfs_btree_close()
147 if (atomic_read(&node->refcnt)) hfs_btree_close()
148 pr_err("node %d:%d still has %d user(s)!\n", hfs_btree_close()
149 node->tree->cnid, node->this, hfs_btree_close()
150 atomic_read(&node->refcnt)); hfs_btree_close()
151 hfs_bnode_free(node); hfs_btree_close()
162 struct hfs_bnode *node; hfs_btree_write() local
165 node = hfs_bnode_find(tree, 0); hfs_btree_write()
166 if (IS_ERR(node)) hfs_btree_write()
170 page = node->page[0]; hfs_btree_write()
184 hfs_bnode_put(node); hfs_btree_write()
190 struct hfs_bnode *node; hfs_bmap_new_bmap() local
194 node = hfs_bnode_create(tree, idx); hfs_bmap_new_bmap()
195 if (IS_ERR(node)) hfs_bmap_new_bmap()
196 return node; hfs_bmap_new_bmap()
205 node->type = HFS_NODE_MAP; hfs_bmap_new_bmap()
206 node->num_recs = 1; hfs_bmap_new_bmap()
207 hfs_bnode_clear(node, 0, tree->node_size); hfs_bmap_new_bmap()
214 hfs_bnode_write(node, &desc, 0, sizeof(desc)); hfs_bmap_new_bmap()
215 hfs_bnode_write_u16(node, 14, 0x8000); hfs_bmap_new_bmap()
216 hfs_bnode_write_u16(node, tree->node_size - 2, 14); hfs_bmap_new_bmap()
217 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); hfs_bmap_new_bmap()
219 return node; hfs_bmap_new_bmap()
224 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local
253 node = hfs_bnode_find(tree, nidx); hfs_bmap_alloc()
254 if (IS_ERR(node)) hfs_bmap_alloc()
255 return node; hfs_bmap_alloc()
256 len = hfs_brec_lenoff(node, 2, &off16); hfs_bmap_alloc()
259 off += node->page_offset; hfs_bmap_alloc()
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bmap_alloc()
277 hfs_bnode_put(node); hfs_bmap_alloc()
291 nidx = node->next; hfs_bmap_alloc()
293 printk(KERN_DEBUG "create new bmap node...\n"); hfs_bmap_alloc()
294 next_node = hfs_bmap_new_bmap(node, idx); hfs_bmap_alloc()
297 hfs_bnode_put(node); hfs_bmap_alloc()
300 node = next_node; hfs_bmap_alloc()
302 len = hfs_brec_lenoff(node, 0, &off16); hfs_bmap_alloc()
304 off += node->page_offset; hfs_bmap_alloc()
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bmap_alloc()
311 void hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument
319 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); hfs_bmap_free()
320 tree = node->tree; hfs_bmap_free()
321 nidx = node->this; hfs_bmap_free()
322 node = hfs_bnode_find(tree, 0); hfs_bmap_free()
323 if (IS_ERR(node)) hfs_bmap_free()
325 len = hfs_brec_lenoff(node, 2, &off); hfs_bmap_free()
330 i = node->next; hfs_bmap_free()
331 hfs_bnode_put(node); hfs_bmap_free()
335 node->this); hfs_bmap_free()
338 node = hfs_bnode_find(tree, i); hfs_bmap_free()
339 if (IS_ERR(node)) hfs_bmap_free()
341 if (node->type != HFS_NODE_MAP) { hfs_bmap_free()
344 node->this, node->type); hfs_bmap_free()
345 hfs_bnode_put(node); hfs_bmap_free()
348 len = hfs_brec_lenoff(node, 0, &off); hfs_bmap_free()
350 off += node->page_offset + nidx / 8; hfs_bmap_free()
351 page = node->page[off >> PAGE_CACHE_SHIFT]; hfs_bmap_free()
358 node->this, node->type); hfs_bmap_free()
360 hfs_bnode_put(node); hfs_bmap_free()
366 hfs_bnode_put(node); hfs_bmap_free()
H A Dbrec.c17 /* Get the length and offset of the given record in the given node */ hfs_brec_lenoff()
18 u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) hfs_brec_lenoff() argument
23 dataoff = node->tree->node_size - (rec + 2) * 2; hfs_brec_lenoff()
24 hfs_bnode_read(node, retval, dataoff, 4); hfs_brec_lenoff()
30 u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) hfs_brec_keylen() argument
34 if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) hfs_brec_keylen()
37 if ((node->type == HFS_NODE_INDEX) && hfs_brec_keylen()
38 !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { hfs_brec_keylen()
39 if (node->tree->attributes & HFS_TREE_BIGKEYS) hfs_brec_keylen()
40 retval = node->tree->max_key_len + 2; hfs_brec_keylen()
42 retval = node->tree->max_key_len + 1; hfs_brec_keylen()
44 recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); hfs_brec_keylen()
47 if (node->tree->attributes & HFS_TREE_BIGKEYS) { hfs_brec_keylen()
48 retval = hfs_bnode_read_u16(node, recoff) + 2; hfs_brec_keylen()
49 if (retval > node->tree->max_key_len + 2) { hfs_brec_keylen()
54 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; hfs_brec_keylen()
55 if (retval > node->tree->max_key_len + 1) { hfs_brec_keylen()
67 struct hfs_bnode *node, *new_node; hfs_brec_insert() local
89 node = fd->bnode; hfs_brec_insert()
90 hfs_bnode_dump(node); hfs_brec_insert()
92 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; hfs_brec_insert()
93 end_off = hfs_bnode_read_u16(node, end_rec_off); hfs_brec_insert()
105 if (node->type == HFS_NODE_LEAF) { hfs_brec_insert()
109 node->num_recs++; hfs_brec_insert()
111 hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_brec_insert()
112 hfs_bnode_write_u16(node, end_rec_off, end_off + size); hfs_brec_insert()
120 data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_brec_insert()
121 hfs_bnode_write_u16(node, data_rec_off, data_off + size); hfs_brec_insert()
126 hfs_bnode_move(node, data_off + size, data_off, hfs_brec_insert()
130 hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_brec_insert()
131 hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_brec_insert()
132 hfs_bnode_dump(node); hfs_brec_insert()
136 * at the start of the node and it is not the new node hfs_brec_insert()
138 if (!rec && new_node != node) { hfs_brec_insert()
139 hfs_bnode_read_key(node, fd->search_key, data_off + size); hfs_brec_insert()
178 struct hfs_bnode *node, *parent; hfs_brec_remove() local
182 node = fd->bnode; hfs_brec_remove()
185 end_off = tree->node_size - (node->num_recs + 1) * 2; hfs_brec_remove()
187 if (node->type == HFS_NODE_LEAF) { hfs_brec_remove()
191 hfs_bnode_dump(node); hfs_brec_remove()
194 if (!--node->num_recs) { hfs_brec_remove()
195 hfs_bnode_unlink(node); hfs_brec_remove()
196 if (!node->parent) hfs_brec_remove()
198 parent = hfs_bnode_find(tree, node->parent); hfs_brec_remove()
201 hfs_bnode_put(node); hfs_brec_remove()
202 node = fd->bnode = parent; hfs_brec_remove()
204 __hfs_brec_find(node, fd); hfs_brec_remove()
207 hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_brec_remove()
214 data_off = hfs_bnode_read_u16(node, rec_off); hfs_brec_remove()
215 hfs_bnode_write_u16(node, rec_off + 2, data_off - size); hfs_brec_remove()
220 hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, hfs_brec_remove()
223 hfs_bnode_dump(node); hfs_brec_remove()
232 struct hfs_bnode *node, *new_node, *next_node; hfs_bnode_split() local
238 node = fd->bnode; hfs_bnode_split()
242 hfs_bnode_get(node); hfs_bnode_split()
244 node->this, new_node->this, node->next); hfs_bnode_split()
245 new_node->next = node->next; hfs_bnode_split()
246 new_node->prev = node->this; hfs_bnode_split()
247 new_node->parent = node->parent; hfs_bnode_split()
248 new_node->type = node->type; hfs_bnode_split()
249 new_node->height = node->height; hfs_bnode_split()
251 if (node->next) hfs_bnode_split()
252 next_node = hfs_bnode_find(tree, node->next); hfs_bnode_split()
257 hfs_bnode_put(node); hfs_bnode_split()
262 size = tree->node_size / 2 - node->num_recs * 2 - 14; hfs_bnode_split()
266 data_start = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
270 if (++num_recs < node->num_recs) hfs_bnode_split()
273 hfs_bnode_put(node); hfs_bnode_split()
286 data_start = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
288 hfs_bnode_put(node); hfs_bnode_split()
295 new_node->num_recs = node->num_recs - num_recs; hfs_bnode_split()
296 node->num_recs = num_recs; hfs_bnode_split()
307 data_end = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
312 hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); hfs_bnode_split()
324 node->next = new_node->this; hfs_bnode_split()
325 hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_split()
326 node_desc.next = cpu_to_be32(node->next); hfs_bnode_split()
327 node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_split()
328 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_split()
337 } else if (node->this == tree->leaf_tail) { hfs_bnode_split()
338 /* if there is no next node, this might be the new tail */ hfs_bnode_split()
343 hfs_bnode_dump(node); hfs_bnode_split()
345 hfs_bnode_put(node); hfs_bnode_split()
353 struct hfs_bnode *node, *new_node, *parent; hfs_brec_update_parent() local
359 node = fd->bnode; hfs_brec_update_parent()
361 if (!node->parent) hfs_brec_update_parent()
365 parent = hfs_bnode_find(tree, node->parent); hfs_brec_update_parent()
376 newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; hfs_brec_update_parent()
391 printk(KERN_DEBUG "splitting index node...\n"); hfs_brec_update_parent()
415 hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); hfs_brec_update_parent()
420 hfs_bnode_put(node); hfs_brec_update_parent()
421 node = parent; hfs_brec_update_parent()
437 if (new_node == node) hfs_brec_update_parent()
440 hfs_bnode_read_key(node, fd->search_key, 14); hfs_brec_update_parent()
444 if (!rec && node->parent) hfs_brec_update_parent()
447 fd->bnode = node; hfs_brec_update_parent()
453 struct hfs_bnode *node, *new_node; hfs_btree_inc_height() local
458 node = NULL; hfs_btree_inc_height()
460 node = hfs_bnode_find(tree, tree->root); hfs_btree_inc_height()
461 if (IS_ERR(node)) hfs_btree_inc_height()
462 return PTR_ERR(node); hfs_btree_inc_height()
466 hfs_bnode_put(node); hfs_btree_inc_height()
495 if (node) { hfs_btree_inc_height()
497 node->parent = tree->root; hfs_btree_inc_height()
498 if (node->type == HFS_NODE_LEAF || hfs_btree_inc_height()
500 key_size = hfs_bnode_read_u8(node, 14) + 1; hfs_btree_inc_height()
503 hfs_bnode_copy(new_node, 14, node, 14, key_size); hfs_btree_inc_height()
510 cnid = cpu_to_be32(node->this); hfs_btree_inc_height()
516 hfs_bnode_put(node); hfs_btree_inc_height()
H A Dbtree.h44 /* A HFS BTree node in memory */
85 extern void hfs_bmap_free(struct hfs_bnode *node);
125 __be32 next; /* (V) Number of the next node at this level */
126 __be32 prev; /* (V) Number of the prev node at this level */
127 u8 type; /* (F) The type of node */
128 u8 height; /* (F) The level of this node (leaves=1) */
129 __be16 num_recs; /* (V) The number of records in this node */
133 #define HFS_NODE_INDEX 0x00 /* An internal (index) node */
134 #define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
136 #define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
140 __be32 root; /* (V) The node number of the root node */
142 __be32 leaf_head; /* (V) The number of the first leaf node */
143 __be32 leaf_tail; /* (V) The number of the last leaf node */
144 __be16 node_size; /* (F) The number of bytes in a node (=512) */
145 __be16 max_key_len; /* (F) The length of a key in an index node */
/linux-4.4.14/arch/mips/include/asm/netlogic/xlp-hal/
H A Diomap.h50 #define XLP_IO_DEV(node, dev) ((dev) + (node) * 8)
53 #define XLP_HDR_OFFSET(node, bus, dev, fn) \
54 XLP_IO_PCI_OFFSET(bus, XLP_IO_DEV(node, dev), fn)
56 #define XLP_IO_BRIDGE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 0)
58 #define XLP_IO_CIC0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 1)
59 #define XLP_IO_CIC1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 2)
60 #define XLP_IO_CIC2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 3)
61 #define XLP_IO_PIC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 4)
63 #define XLP_IO_PCIE_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 1, i)
64 #define XLP_IO_PCIE0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 0)
65 #define XLP_IO_PCIE1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 1)
66 #define XLP_IO_PCIE2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 2)
67 #define XLP_IO_PCIE3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 3)
69 #define XLP_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 2, i)
70 #define XLP_IO_USB_EHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 0)
71 #define XLP_IO_USB_OHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 1)
72 #define XLP_IO_USB_OHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 2)
73 #define XLP_IO_USB_EHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 3)
74 #define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
75 #define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
77 #define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2)
80 #define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i)
81 #define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1)
82 #define XLP2XX_IO_USB_XHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 2)
83 #define XLP2XX_IO_USB_XHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 3)
85 #define XLP_IO_NAE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 0)
86 #define XLP_IO_POE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 1)
88 #define XLP_IO_CMS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 0)
90 #define XLP_IO_DMA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 1)
91 #define XLP_IO_SEC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 2)
92 #define XLP_IO_CMP_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 3)
94 #define XLP_IO_UART_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, i)
95 #define XLP_IO_UART0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 0)
96 #define XLP_IO_UART1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 1)
97 #define XLP_IO_I2C_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, 2 + i)
98 #define XLP_IO_I2C0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 2)
99 #define XLP_IO_I2C1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 3)
100 #define XLP_IO_GPIO_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 4)
102 #define XLP2XX_IO_I2C_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 7)
105 #define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
106 #define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
109 #define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
110 #define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
111 #define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
112 #define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
118 #define XLP9XX_IO_BRIDGE_OFFSET(node) XLP_IO_PCI_OFFSET(0, 0, node)
119 #define XLP9XX_IO_PIC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 0)
120 #define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2)
121 #define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0)
122 #define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1)
123 #define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2)
124 #define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3)
125 #define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4)
127 #define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i)
128 #define XLP9XX_IO_PCIE0_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 0)
129 #define XLP9XX_IO_PCIE2_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 2)
130 #define XLP9XX_IO_PCIE3_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 3)
133 #define XLP9XX_IO_USB_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 4, i)
134 #define XLP9XX_IO_USB_XHCI0_OFFSET(node) XLP9XX_HDR_OFFSET(node, 4, 1)
135 #define XLP9XX_IO_USB_XHCI1_OFFSET(node) XLP9XX_HDR_OFFSET(node, 4, 2)
138 #define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2)
141 #define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0)
142 #define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1)
143 #define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2)
144 #define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
203 static inline int xlp9xx_get_socbus(int node) xlp9xx_get_socbus() argument
207 if (node == 0) xlp9xx_get_socbus()
209 socbridge = nlm_pcicfg_base(XLP9XX_IO_BRIDGE_OFFSET(node)); xlp9xx_get_socbus()
/linux-4.4.14/arch/powerpc/sysdev/
H A Dmpc5xxx_clocks.c3 * @node: device node
14 unsigned long mpc5xxx_get_bus_frequency(struct device_node *node) mpc5xxx_get_bus_frequency() argument
18 of_node_get(node); mpc5xxx_get_bus_frequency()
19 while (node) { mpc5xxx_get_bus_frequency()
20 p_bus_freq = of_get_property(node, "bus-frequency", NULL); mpc5xxx_get_bus_frequency()
24 node = of_get_next_parent(node); mpc5xxx_get_bus_frequency()
26 of_node_put(node); mpc5xxx_get_bus_frequency()
H A Dfsl_soc.c112 struct device_node *node; get_brgfreq() local
119 node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); get_brgfreq()
120 if (node) { get_brgfreq()
121 prop = of_get_property(node, "clock-frequency", &size); get_brgfreq()
125 of_node_put(node); get_brgfreq()
130 node = of_find_node_by_type(NULL, "cpm"); get_brgfreq()
131 if (!node) get_brgfreq()
132 node = of_find_compatible_node(NULL, NULL, "fsl,qe"); get_brgfreq()
133 if (!node) get_brgfreq()
134 node = of_find_node_by_type(NULL, "qe"); get_brgfreq()
136 if (node) { get_brgfreq()
137 prop = of_get_property(node, "brg-frequency", &size); get_brgfreq()
142 prop = of_get_property(node, "bus-frequency", &size); get_brgfreq()
146 of_node_put(node); get_brgfreq()
158 struct device_node *node; get_baudrate() local
163 node = of_find_node_by_type(NULL, "serial"); get_baudrate()
164 if (node) { get_baudrate()
166 const unsigned int *prop = of_get_property(node, get_baudrate()
171 of_node_put(node); get_baudrate()
H A Dof_rtc.c27 struct device_node *node; of_instantiate_rtc() local
34 for_each_compatible_node(node, NULL, for_each_compatible_node()
42 node->full_name); for_each_compatible_node()
46 err = of_address_to_resource(node, 0, res); for_each_compatible_node()
50 node->full_name); for_each_compatible_node()
55 node->full_name, plat_name, for_each_compatible_node()
/linux-4.4.14/lib/
H A Dinterval_tree.c6 #define START(node) ((node)->start)
7 #define LAST(node) ((node)->last)
H A Dtimerqueue.c34 * @node: timer node to be added
36 * Adds the timer node to the timerqueue, sorted by the
37 * node's expires value.
39 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_add() argument
46 WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node)); timerqueue_add()
50 ptr = rb_entry(parent, struct timerqueue_node, node); timerqueue_add()
51 if (node->expires.tv64 < ptr->expires.tv64) timerqueue_add()
56 rb_link_node(&node->node, parent, p); timerqueue_add()
57 rb_insert_color(&node->node, &head->head); timerqueue_add()
59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) { timerqueue_add()
60 head->next = node; timerqueue_add()
71 * @node: timer node to be removed
73 * Removes the timer node from the timerqueue.
75 bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_del() argument
77 WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); timerqueue_del()
80 if (head->next == node) { timerqueue_del()
81 struct rb_node *rbn = rb_next(&node->node); timerqueue_del()
84 rb_entry(rbn, struct timerqueue_node, node) : NULL; timerqueue_del()
86 rb_erase(&node->node, &head->head); timerqueue_del()
87 RB_CLEAR_NODE(&node->node); timerqueue_del()
95 * @node: Pointer to a timer.
97 * Provides the timer that is after the given node. This is used, when
101 struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node) timerqueue_iterate_next() argument
105 if (!node) timerqueue_iterate_next()
107 next = rb_next(&node->node); timerqueue_iterate_next()
110 return container_of(next, struct timerqueue_node, node); timerqueue_iterate_next()
H A Dbtree.c18 * well is that access to a random tree node is much faster than a large number
19 * of operations within each node.
37 * values are to the right, not to the left. All used slots within a node
95 unsigned long *node; btree_node_alloc() local
97 node = mempool_alloc(head->mempool, gfp); btree_node_alloc()
98 if (likely(node)) btree_node_alloc()
99 memset(node, 0, NODESIZE); btree_node_alloc()
100 return node; btree_node_alloc()
148 static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) bkey() argument
150 return &node[n * geo->keylen]; bkey()
153 static void *bval(struct btree_geo *geo, unsigned long *node, int n) bval() argument
155 return (void *)node[geo->no_longs + n]; bval()
158 static void setkey(struct btree_geo *geo, unsigned long *node, int n, setkey() argument
161 longcpy(bkey(geo, node, n), key, geo->keylen); setkey()
164 static void setval(struct btree_geo *geo, unsigned long *node, int n, setval() argument
167 node[geo->no_longs + n] = (unsigned long) val; setval()
170 static void clearpair(struct btree_geo *geo, unsigned long *node, int n) clearpair() argument
172 longset(bkey(geo, node, n), 0, geo->keylen); clearpair()
173 node[geo->no_longs + n] = 0; clearpair()
178 head->node = NULL; __btree_init()
201 mempool_free(head->node, head->mempool); btree_destroy()
211 unsigned long *node = head->node; btree_last() local
217 node = bval(geo, node, 0); btree_last()
219 longcpy(key, bkey(geo, node, 0), geo->keylen); btree_last()
220 return bval(geo, node, 0); btree_last()
224 static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, keycmp() argument
227 return longcmp(bkey(geo, node, pos), key, geo->keylen); keycmp()
245 unsigned long *node = head->node; btree_lookup() local
252 if (keycmp(geo, node, i, key) <= 0) btree_lookup()
256 node = bval(geo, node, i); btree_lookup()
257 if (!node) btree_lookup()
261 if (!node) btree_lookup()
265 if (keycmp(geo, node, i, key) == 0) btree_lookup()
266 return bval(geo, node, i); btree_lookup()
275 unsigned long *node = head->node; btree_update() local
282 if (keycmp(geo, node, i, key) <= 0) btree_update()
286 node = bval(geo, node, i); btree_update()
287 if (!node) btree_update()
291 if (!node) btree_update()
295 if (keycmp(geo, node, i, key) == 0) { btree_update()
296 setval(geo, node, i, val); btree_update()
305 * a parent node may be smaller than the smallest key of all its siblings.
315 unsigned long *node, *oldnode; btree_get_prev() local
327 node = head->node; btree_get_prev()
330 if (keycmp(geo, node, i, key) <= 0) btree_get_prev()
334 oldnode = node; btree_get_prev()
335 node = bval(geo, node, i); btree_get_prev()
336 if (!node) btree_get_prev()
341 if (!node) btree_get_prev()
345 if (keycmp(geo, node, i, key) <= 0) { btree_get_prev()
346 if (bval(geo, node, i)) { btree_get_prev()
347 longcpy(__key, bkey(geo, node, i), geo->keylen); btree_get_prev()
348 return bval(geo, node, i); btree_get_prev()
363 static int getpos(struct btree_geo *geo, unsigned long *node, getpos() argument
369 if (keycmp(geo, node, i, key) <= 0) getpos()
375 static int getfill(struct btree_geo *geo, unsigned long *node, int start) getfill() argument
380 if (!bval(geo, node, i)) getfill()
386 * locate the correct leaf node in the btree
391 unsigned long *node = head->node; find_level() local
396 if (keycmp(geo, node, i, key) <= 0) find_level()
399 if ((i == geo->no_pairs) || !bval(geo, node, i)) { find_level()
404 setkey(geo, node, i, key); find_level()
407 node = bval(geo, node, i); find_level()
409 BUG_ON(!node); find_level()
410 return node; find_level()
416 unsigned long *node; btree_grow() local
419 node = btree_node_alloc(head, gfp); btree_grow()
420 if (!node) btree_grow()
422 if (head->node) { btree_grow()
423 fill = getfill(geo, head->node, 0); btree_grow()
424 setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); btree_grow()
425 setval(geo, node, 0, head->node); btree_grow()
427 head->node = node; btree_grow()
434 unsigned long *node; btree_shrink() local
440 node = head->node; btree_shrink()
441 fill = getfill(geo, node, 0); btree_shrink()
443 head->node = bval(geo, node, 0); btree_shrink()
445 mempool_free(node, head->mempool); btree_shrink()
452 unsigned long *node; btree_insert_level() local
463 node = find_level(head, geo, key, level); btree_insert_level()
464 pos = getpos(geo, node, key); btree_insert_level()
465 fill = getfill(geo, node, pos); btree_insert_level()
467 BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); btree_insert_level()
470 /* need to split node */ btree_insert_level()
477 bkey(geo, node, fill / 2 - 1), btree_insert_level()
484 setkey(geo, new, i, bkey(geo, node, i)); btree_insert_level()
485 setval(geo, new, i, bval(geo, node, i)); btree_insert_level()
486 setkey(geo, node, i, bkey(geo, node, i + fill / 2)); btree_insert_level()
487 setval(geo, node, i, bval(geo, node, i + fill / 2)); btree_insert_level()
488 clearpair(geo, node, i + fill / 2); btree_insert_level()
491 setkey(geo, node, i, bkey(geo, node, fill - 1)); btree_insert_level()
492 setval(geo, node, i, bval(geo, node, fill - 1)); btree_insert_level()
493 clearpair(geo, node, fill - 1); btree_insert_level()
501 setkey(geo, node, i, bkey(geo, node, i - 1)); btree_insert_level()
502 setval(geo, node, i, bval(geo, node, i - 1)); btree_insert_level()
504 setkey(geo, node, pos, key); btree_insert_level()
505 setval(geo, node, pos, val); btree_insert_level()
548 * can happen. Parent node contains a single child, this rebalance()
549 * node, so merging with a sibling never happens. rebalance()
594 unsigned long *node; btree_remove_level() local
601 head->node = NULL; btree_remove_level()
605 node = find_level(head, geo, key, level); btree_remove_level()
606 pos = getpos(geo, node, key); btree_remove_level()
607 fill = getfill(geo, node, pos); btree_remove_level()
608 if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) btree_remove_level()
610 ret = bval(geo, node, pos); btree_remove_level()
614 setkey(geo, node, i, bkey(geo, node, i + 1)); btree_remove_level()
615 setval(geo, node, i, bval(geo, node, i + 1)); btree_remove_level()
617 clearpair(geo, node, fill - 1); btree_remove_level()
621 rebalance(head, geo, key, level, node, fill - 1); btree_remove_level()
649 if (!(target->node)) { btree_merge()
651 target->node = victim->node; btree_merge()
677 unsigned long *node, unsigned long opaque, __btree_for_each()
687 child = bval(geo, node, i); __btree_for_each()
694 func(child, opaque, bkey(geo, node, i), count++, __btree_for_each()
698 mempool_free(node, head->mempool); __btree_for_each()
757 if (head->node) btree_visitor()
758 count = __btree_for_each(head, geo, head->node, opaque, func, btree_visitor()
775 if (head->node) btree_grim_visitor()
776 count = __btree_for_each(head, geo, head->node, opaque, func, btree_grim_visitor()
676 __btree_for_each(struct btree_head *head, struct btree_geo *geo, unsigned long *node, unsigned long opaque, void (*func)(void *elem, unsigned long opaque, unsigned long *key, size_t index, void *func2), void *func2, int reap, int height, size_t count) __btree_for_each() argument
H A Dplist.c69 * plist_add - add @node to @head
71 * @node: &struct plist_node pointer
74 void plist_add(struct plist_node *node, struct plist_head *head) plist_add() argument
80 WARN_ON(!plist_node_empty(node)); plist_add()
81 WARN_ON(!list_empty(&node->prio_list)); plist_add()
89 if (node->prio < iter->prio) { plist_add()
99 if (!prev || prev->prio != node->prio) plist_add()
100 list_add_tail(&node->prio_list, &iter->prio_list); plist_add()
102 list_add_tail(&node->node_list, node_next); plist_add()
108 * plist_del - Remove a @node from plist.
110 * @node: &struct plist_node pointer - entry to be removed
113 void plist_del(struct plist_node *node, struct plist_head *head) plist_del() argument
117 if (!list_empty(&node->prio_list)) { plist_del()
118 if (node->node_list.next != &head->node_list) { plist_del()
121 next = list_entry(node->node_list.next, plist_del()
126 list_add(&next->prio_list, &node->prio_list); plist_del()
128 list_del_init(&node->prio_list); plist_del()
131 list_del_init(&node->node_list); plist_del()
137 * plist_requeue - Requeue @node at end of same-prio entries.
143 * @node: &struct plist_node pointer - entry to be moved
146 void plist_requeue(struct plist_node *node, struct plist_head *head) plist_requeue() argument
153 BUG_ON(plist_node_empty(node)); plist_requeue()
155 if (node == plist_last(head)) plist_requeue()
158 iter = plist_next(node); plist_requeue()
160 if (node->prio != iter->prio) plist_requeue()
163 plist_del(node, head); plist_requeue()
166 if (node->prio != iter->prio) { plist_for_each_continue()
171 list_add_tail(&node->node_list, node_next);
212 static void __init plist_test_requeue(struct plist_node *node) plist_test_requeue() argument
214 plist_requeue(node, &test_head); plist_test_requeue()
216 if (node != plist_last(&test_head)) plist_test_requeue()
217 BUG_ON(node->prio == plist_next(node)->prio); plist_test_requeue()
H A Drbtree_test.c24 static void insert(struct test_node *node, struct rb_root *root) insert() argument
27 u32 key = node->key; insert()
37 rb_link_node(&node->rb, parent, new); insert()
38 rb_insert_color(&node->rb, root); insert()
41 static inline void erase(struct test_node *node, struct rb_root *root) erase() argument
43 rb_erase(&node->rb, root); erase()
46 static inline u32 augment_recompute(struct test_node *node) augment_recompute() argument
48 u32 max = node->val, child_augmented; augment_recompute()
49 if (node->rb.rb_left) { augment_recompute()
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node, augment_recompute()
55 if (node->rb.rb_right) { augment_recompute()
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node, augment_recompute()
67 static void insert_augmented(struct test_node *node, struct rb_root *root) insert_augmented() argument
70 u32 key = node->key; insert_augmented()
71 u32 val = node->val; insert_augmented()
85 node->augmented = val; insert_augmented()
86 rb_link_node(&node->rb, rb_parent, new); insert_augmented()
87 rb_insert_augmented(&node->rb, root, &augment_callbacks); insert_augmented()
90 static void erase_augmented(struct test_node *node, struct rb_root *root) erase_augmented() argument
92 rb_erase_augmented(&node->rb, root, &augment_callbacks); erase_augmented()
144 struct test_node *node = rb_entry(rb, struct test_node, rb); check() local
145 WARN_ON_ONCE(node->key < prev_key); check()
153 prev_key = node->key; check()
170 struct test_node *node = rb_entry(rb, struct test_node, rb); check_augmented() local
171 WARN_ON_ONCE(node->augmented != augment_recompute(node)); check_augmented()
H A Drbtree.c30 * 1) A node is either red or black
33 * 4) Both children of every red node are black
38 * consecutive red nodes in a path and every red node is therefore followed by
97 __rb_insert(struct rb_node *node, struct rb_root *root, __rb_insert() argument
100 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; __rb_insert()
104 * Loop invariant: node is red __rb_insert()
111 rb_set_parent_color(node, NULL, RB_BLACK); __rb_insert()
136 node = gparent; __rb_insert()
137 parent = rb_parent(node); __rb_insert()
138 rb_set_parent_color(node, parent, RB_RED); __rb_insert()
143 if (node == tmp) { __rb_insert()
156 tmp = node->rb_left; __rb_insert()
158 WRITE_ONCE(node->rb_left, parent); __rb_insert()
162 rb_set_parent_color(parent, node, RB_RED); __rb_insert()
163 augment_rotate(parent, node); __rb_insert()
164 parent = node; __rb_insert()
165 tmp = node->rb_right; __rb_insert()
190 node = gparent; __rb_insert()
191 parent = rb_parent(node); __rb_insert()
192 rb_set_parent_color(node, parent, RB_RED); __rb_insert()
197 if (node == tmp) { __rb_insert()
199 tmp = node->rb_right; __rb_insert()
201 WRITE_ONCE(node->rb_right, parent); __rb_insert()
205 rb_set_parent_color(parent, node, RB_RED); __rb_insert()
206 augment_rotate(parent, node); __rb_insert()
207 parent = node; __rb_insert()
208 tmp = node->rb_left; __rb_insert()
231 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; ____rb_erase_color() local
236 * - node is black (or NULL on first iteration) ____rb_erase_color()
237 * - node is not the root (parent is not NULL) ____rb_erase_color()
238 * - All leaf paths going through parent and node have a ____rb_erase_color()
239 * black node count that is 1 lower than other leaf paths. ____rb_erase_color()
242 if (node != sibling) { /* node == parent->rb_left */ ____rb_erase_color()
286 node = parent; ____rb_erase_color()
287 parent = rb_parent(node); ____rb_erase_color()
361 node = parent; ____rb_erase_color()
362 parent = rb_parent(node); ____rb_erase_color()
410 static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} dummy_copy() argument
418 void rb_insert_color(struct rb_node *node, struct rb_root *root) rb_insert_color() argument
420 __rb_insert(node, root, dummy_rotate); rb_insert_color()
424 void rb_erase(struct rb_node *node, struct rb_root *root) rb_erase() argument
427 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); rb_erase()
440 void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, __rb_insert_augmented() argument
443 __rb_insert(node, root, augment_rotate); __rb_insert_augmented()
448 * This function returns the first node (in sort order) of the tree.
476 struct rb_node *rb_next(const struct rb_node *node) rb_next() argument
480 if (RB_EMPTY_NODE(node)) rb_next()
487 if (node->rb_right) { rb_next()
488 node = node->rb_right; rb_next()
489 while (node->rb_left) rb_next()
490 node=node->rb_left; rb_next()
491 return (struct rb_node *)node; rb_next()
496 * so any 'next' node must be in the general direction of our parent. rb_next()
499 * parent, said parent is our 'next' node. rb_next()
501 while ((parent = rb_parent(node)) && node == parent->rb_right) rb_next()
502 node = parent; rb_next()
508 struct rb_node *rb_prev(const struct rb_node *node) rb_prev() argument
512 if (RB_EMPTY_NODE(node)) rb_prev()
519 if (node->rb_left) { rb_prev()
520 node = node->rb_left; rb_prev()
521 while (node->rb_right) rb_prev()
522 node=node->rb_right; rb_prev()
523 return (struct rb_node *)node; rb_prev()
530 while ((parent = rb_parent(node)) && node == parent->rb_left) rb_prev()
531 node = parent; rb_prev()
554 static struct rb_node *rb_left_deepest_node(const struct rb_node *node) rb_left_deepest_node() argument
557 if (node->rb_left) rb_left_deepest_node()
558 node = node->rb_left; rb_left_deepest_node()
559 else if (node->rb_right) rb_left_deepest_node()
560 node = node->rb_right; rb_left_deepest_node()
562 return (struct rb_node *)node; rb_left_deepest_node()
566 struct rb_node *rb_next_postorder(const struct rb_node *node) rb_next_postorder() argument
569 if (!node) rb_next_postorder()
571 parent = rb_parent(node); rb_next_postorder()
573 /* If we're sitting on node, we've already seen our children */ rb_next_postorder()
574 if (parent && node == parent->rb_left && parent->rb_right) { rb_next_postorder()
575 /* If we are the parent's left node, go to the parent's right rb_next_postorder()
576 * node then all the way down to the left */ rb_next_postorder()
579 /* Otherwise we are the parent's right node, and the parent rb_next_postorder()
H A Dradix-tree.c46 * Radix tree node cache.
58 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
68 /* nodes->private_data points to next preallocated node */
88 static inline void tag_set(struct radix_tree_node *node, unsigned int tag, tag_set() argument
91 __set_bit(offset, node->tags[tag]); tag_set()
94 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, tag_clear() argument
97 __clear_bit(offset, node->tags[tag]); tag_clear()
100 static inline int tag_get(struct radix_tree_node *node, unsigned int tag, tag_get() argument
103 return test_bit(offset, node->tags[tag]); tag_get()
127 * Returns 1 if any slot in the node has this tag set.
130 static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) any_tag_set() argument
134 if (node->tags[tag][idx]) any_tag_set()
196 * succeed in getting a node here (and never reach radix_tree_node_alloc()
221 struct radix_tree_node *node = radix_tree_node_rcu_free() local
231 tag_clear(node, i, 0); radix_tree_node_rcu_free()
233 node->slots[0] = NULL; radix_tree_node_rcu_free()
234 node->count = 0; radix_tree_node_rcu_free()
236 kmem_cache_free(radix_tree_node_cachep, node); radix_tree_node_rcu_free()
240 radix_tree_node_free(struct radix_tree_node *node) radix_tree_node_free() argument
242 call_rcu(&node->rcu_head, radix_tree_node_rcu_free); radix_tree_node_free()
257 struct radix_tree_node *node; __radix_tree_preload() local
264 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); __radix_tree_preload()
265 if (node == NULL) __radix_tree_preload()
270 node->private_data = rtp->nodes; __radix_tree_preload()
271 rtp->nodes = node; __radix_tree_preload()
274 kmem_cache_free(radix_tree_node_cachep, node); __radix_tree_preload()
328 struct radix_tree_node *node; radix_tree_extend() local
345 if (!(node = radix_tree_node_alloc(root))) radix_tree_extend()
351 tag_set(node, tag, 0); radix_tree_extend()
357 node->path = newheight; radix_tree_extend()
358 node->count = 1; radix_tree_extend()
359 node->parent = NULL; radix_tree_extend()
363 slot->parent = node; radix_tree_extend()
365 node->slots[0] = slot; radix_tree_extend()
366 node = ptr_to_indirect(node); radix_tree_extend()
367 rcu_assign_pointer(root->rnode, node); radix_tree_extend()
378 * @nodep: returns node
381 * Create, if necessary, and return the node and slot for an item
386 * pointing to a node, in which case *@nodep will be NULL.
393 struct radix_tree_node *node = NULL, *slot; __radix_tree_create() local
412 /* Have to add a child node. */ __radix_tree_create()
416 slot->parent = node; __radix_tree_create()
417 if (node) { __radix_tree_create()
418 rcu_assign_pointer(node->slots[offset], slot); __radix_tree_create()
419 node->count++; __radix_tree_create()
427 node = slot; __radix_tree_create()
428 slot = node->slots[offset]; __radix_tree_create()
434 *nodep = node; __radix_tree_create()
436 *slotp = node ? node->slots + offset : (void **)&root->rnode; __radix_tree_create()
451 struct radix_tree_node *node; radix_tree_insert() local
457 error = __radix_tree_create(root, index, &node, &slot); radix_tree_insert()
464 if (node) { radix_tree_insert()
465 node->count++; radix_tree_insert()
466 BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK)); radix_tree_insert()
467 BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK)); radix_tree_insert()
481 * @nodep: returns node
489 * pointing to a node, in which case *@nodep will be NULL.
494 struct radix_tree_node *node, *parent; __radix_tree_lookup() local
498 node = rcu_dereference_raw(root->rnode); __radix_tree_lookup()
499 if (node == NULL) __radix_tree_lookup()
502 if (!radix_tree_is_indirect_ptr(node)) { __radix_tree_lookup()
510 return node; __radix_tree_lookup()
512 node = indirect_to_ptr(node); __radix_tree_lookup()
514 height = node->path & RADIX_TREE_HEIGHT_MASK; __radix_tree_lookup()
521 parent = node; __radix_tree_lookup()
522 slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK); __radix_tree_lookup()
523 node = rcu_dereference_raw(*slot); __radix_tree_lookup()
524 if (node == NULL) __radix_tree_lookup()
535 return node; __radix_tree_lookup()
580 * radix_tree_tag_set - set a tag on a radix tree node
587 * the root all the way down to the leaf node.
625 * radix_tree_tag_clear - clear a tag on a radix tree node
632 * this causes the leaf node to have no tags set then clear the tag in the
633 * next-to-leaf node, etc.
641 struct radix_tree_node *node = NULL; radix_tree_tag_clear() local
659 node = slot; radix_tree_tag_clear()
666 while (node) { radix_tree_tag_clear()
667 if (!tag_get(node, tag, offset)) radix_tree_tag_clear()
669 tag_clear(node, tag, offset); radix_tree_tag_clear()
670 if (any_tag_set(node, tag)) radix_tree_tag_clear()
675 node = node->parent; radix_tree_tag_clear()
688 * radix_tree_tag_get - get a tag on a radix tree node
699 * the RCU lock is held, unless tag modification and node deletion are excluded
706 struct radix_tree_node *node; radix_tree_tag_get() local
712 node = rcu_dereference_raw(root->rnode); radix_tree_tag_get()
713 if (node == NULL) radix_tree_tag_get()
716 if (!radix_tree_is_indirect_ptr(node)) radix_tree_tag_get()
718 node = indirect_to_ptr(node); radix_tree_tag_get()
720 height = node->path & RADIX_TREE_HEIGHT_MASK; radix_tree_tag_get()
729 if (node == NULL) radix_tree_tag_get()
733 if (!tag_get(node, tag, offset)) radix_tree_tag_get()
737 node = rcu_dereference_raw(node->slots[offset]); radix_tree_tag_get()
756 struct radix_tree_node *rnode, *node; radix_tree_next_chunk() local
796 node = rnode; radix_tree_next_chunk()
799 !test_bit(offset, node->tags[tag]) : radix_tree_next_chunk()
800 !node->slots[offset]) { radix_tree_next_chunk()
807 node->tags[tag], radix_tree_next_chunk()
812 if (node->slots[offset]) radix_tree_next_chunk()
824 /* This is leaf-node */ radix_tree_next_chunk()
828 node = rcu_dereference_raw(node->slots[offset]); radix_tree_next_chunk()
829 if (node == NULL) radix_tree_next_chunk()
839 /* Construct iter->tags bit-mask from node->tags[tag] array */ radix_tree_next_chunk()
845 iter->tags = node->tags[tag][tag_long] >> tag_bit; radix_tree_next_chunk()
850 iter->tags |= node->tags[tag][tag_long + 1] << radix_tree_next_chunk()
857 return node->slots + offset; radix_tree_next_chunk()
879 * we can get to the leaf node and find that the index that has the iftag
894 struct radix_tree_node *node = NULL; radix_tree_range_tag_if_tagged() local
930 node = slot; radix_tree_range_tag_if_tagged()
941 while (node) { radix_tree_range_tag_if_tagged()
945 /* stop if we find a node with the tag already set */ radix_tree_range_tag_if_tagged()
946 if (tag_get(node, settag, offset)) radix_tree_range_tag_if_tagged()
948 tag_set(node, settag, offset); radix_tree_range_tag_if_tagged()
949 node = node->parent; radix_tree_range_tag_if_tagged()
953 * Small optimization: now clear that node pointer. radix_tree_range_tag_if_tagged()
959 node = NULL; radix_tree_range_tag_if_tagged()
971 * We've fully scanned this node. Go up. Because radix_tree_range_tag_if_tagged()
1214 struct radix_tree_node *node; radix_tree_locate_item() local
1221 node = rcu_dereference_raw(root->rnode); radix_tree_locate_item()
1222 if (!radix_tree_is_indirect_ptr(node)) { radix_tree_locate_item()
1224 if (node == item) radix_tree_locate_item()
1229 node = indirect_to_ptr(node); radix_tree_locate_item()
1230 max_index = radix_tree_maxindex(node->path & radix_tree_locate_item()
1237 cur_index = __locate(node, item, cur_index, &found_index); radix_tree_locate_item()
1266 * The candidate node has more than one child, or its child radix_tree_shrink()
1276 * moving the node from one part of the tree to another: if it radix_tree_shrink()
1290 * We have a dilemma here. The node's slot[0] must not be radix_tree_shrink()
1292 * find the item. However if this was a bottom-level node, radix_tree_shrink()
1303 * problem (replacing direct root node with an indirect pointer radix_tree_shrink()
1316 * __radix_tree_delete_node - try to free node after clearing a slot
1318 * @node: node containing @index
1320 * After clearing the slot at @index in @node from radix tree
1322 * node and shrinking the tree.
1324 * Returns %true if @node was freed, %false otherwise.
1327 struct radix_tree_node *node) __radix_tree_delete_node()
1334 if (node->count) { __radix_tree_delete_node()
1335 if (node == indirect_to_ptr(root->rnode)) { __radix_tree_delete_node()
1343 parent = node->parent; __radix_tree_delete_node()
1347 offset = node->path >> RADIX_TREE_HEIGHT_SHIFT; __radix_tree_delete_node()
1356 radix_tree_node_free(node); __radix_tree_delete_node()
1359 node = parent; __radix_tree_delete_node()
1360 } while (node); __radix_tree_delete_node()
1379 struct radix_tree_node *node; radix_tree_delete_item() local
1385 entry = __radix_tree_lookup(root, index, &node, &slot); radix_tree_delete_item()
1392 if (!node) { radix_tree_delete_item()
1405 if (tag_get(node, tag, offset)) radix_tree_delete_item()
1409 node->slots[offset] = NULL; radix_tree_delete_item()
1410 node->count--; radix_tree_delete_item()
1412 __radix_tree_delete_node(root, node); radix_tree_delete_item()
1447 struct radix_tree_node *node = arg; radix_tree_node_ctor() local
1449 memset(node, 0, sizeof(*node)); radix_tree_node_ctor()
1450 INIT_LIST_HEAD(&node->private_list); radix_tree_node_ctor()
1479 struct radix_tree_node *node; radix_tree_callback() local
1485 node = rtp->nodes; radix_tree_callback()
1486 rtp->nodes = node->private_data; radix_tree_callback()
1487 kmem_cache_free(radix_tree_node_cachep, node); radix_tree_callback()
1326 __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node) __radix_tree_delete_node() argument
H A Dassoc_array.c30 const struct assoc_array_node *node; assoc_array_subtree_iterate() local
45 node = assoc_array_ptr_to_node(cursor); assoc_array_subtree_iterate()
49 /* We perform two passes of each node. assoc_array_subtree_iterate()
51 * The first pass does all the leaves in this node. This means we assoc_array_subtree_iterate()
52 * don't miss any leaves if the node is split up by insertion whilst assoc_array_subtree_iterate()
58 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_subtree_iterate()
77 * back to a replacement node with the leaves in a different layout. assoc_array_subtree_iterate()
88 node = assoc_array_ptr_to_node(cursor); assoc_array_subtree_iterate()
92 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_subtree_iterate()
101 parent = ACCESS_ONCE(node->back_pointer); assoc_array_subtree_iterate()
102 slot = node->parent_slot; assoc_array_subtree_iterate()
116 /* Ascend to next slot in parent node */ assoc_array_subtree_iterate()
165 struct assoc_array_node *node; /* Node in which leaf might be found */ member in struct:assoc_array_walk_result::__anon14848
179 * Navigate through the internal tree looking for the closest node to the key.
188 struct assoc_array_node *node; assoc_array_walk() local
206 * either empty or contains a leaf at which point we've found a node in assoc_array_walk()
218 node = assoc_array_ptr_to_node(cursor); assoc_array_walk()
223 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_walk()
229 /* The node doesn't have a node/shortcut pointer in the slot assoc_array_walk()
232 result->terminal_node.node = node; assoc_array_walk()
240 /* There is a pointer to a node in the slot corresponding to assoc_array_walk()
314 * to the node that should contain the object and then searching the leaves
324 const struct assoc_array_node *node; assoc_array_find() local
333 node = result.terminal_node.node; assoc_array_find()
337 * the terminal node. assoc_array_find()
340 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_find()
364 struct assoc_array_node *node; assoc_array_destroy_subtree() local
390 pr_devel("[%d] node\n", slot); assoc_array_destroy_subtree()
391 node = assoc_array_ptr_to_node(cursor); assoc_array_destroy_subtree()
392 BUG_ON(node->back_pointer != parent); assoc_array_destroy_subtree()
393 BUG_ON(slot != -1 && node->parent_slot != slot); assoc_array_destroy_subtree()
397 pr_devel("Node %p [back=%p]\n", node, node->back_pointer); assoc_array_destroy_subtree()
399 struct assoc_array_ptr *ptr = node->slots[slot]; assoc_array_destroy_subtree()
414 parent = node->back_pointer; assoc_array_destroy_subtree()
415 slot = node->parent_slot; assoc_array_destroy_subtree()
416 pr_devel("free node\n"); assoc_array_destroy_subtree()
417 kfree(node); assoc_array_destroy_subtree()
437 /* Ascend to next slot in parent node */ assoc_array_destroy_subtree()
440 node = assoc_array_ptr_to_node(cursor); assoc_array_destroy_subtree()
489 * Handle insertion into a terminal node.
497 struct assoc_array_node *node, *new_n0, *new_n1, *side; assoc_array_insert_into_terminal_node() local
505 node = result->terminal_node.node; assoc_array_insert_into_terminal_node()
511 /* We arrived at a node which doesn't have an onward node or shortcut assoc_array_insert_into_terminal_node()
514 * need to split this node and insert in one of the fragments. assoc_array_insert_into_terminal_node()
518 /* Firstly, we have to check the leaves in this node to see if there's assoc_array_insert_into_terminal_node()
522 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
531 edit->leaf_p = &node->slots[i]; assoc_array_insert_into_terminal_node()
532 edit->dead_leaf = node->slots[i]; assoc_array_insert_into_terminal_node()
538 /* If there is a free slot in this node then we can just insert the assoc_array_insert_into_terminal_node()
543 edit->leaf_p = &node->slots[free_slot]; assoc_array_insert_into_terminal_node()
544 edit->adjust_count_on = node; assoc_array_insert_into_terminal_node()
549 /* The node has no spare slots - so we're either going to have to split assoc_array_insert_into_terminal_node()
550 * it or insert another node before it. assoc_array_insert_into_terminal_node()
569 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
586 /* The node contains only leaves */ assoc_array_insert_into_terminal_node()
596 * to insert a shortcut if the new node wants to cluster with them. assoc_array_insert_into_terminal_node()
601 /* Otherwise we can just insert a new node ahead of the old assoc_array_insert_into_terminal_node()
608 pr_devel("split node\n"); assoc_array_insert_into_terminal_node()
610 /* We need to split the current node; we know that the node doesn't assoc_array_insert_into_terminal_node()
615 * leaves in the node and the new leaf. assoc_array_insert_into_terminal_node()
617 * We need a new node (n0) to replace the current one and a new node to assoc_array_insert_into_terminal_node()
621 new_n0->back_pointer = node->back_pointer; assoc_array_insert_into_terminal_node()
622 new_n0->parent_slot = node->parent_slot; assoc_array_insert_into_terminal_node()
629 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; assoc_array_insert_into_terminal_node()
655 if (assoc_array_ptr_is_meta(node->slots[i])) assoc_array_insert_into_terminal_node()
656 new_n0->slots[i] = node->slots[i]; assoc_array_insert_into_terminal_node()
666 if (assoc_array_ptr_is_meta(node->slots[i])) assoc_array_insert_into_terminal_node()
669 new_n1->slots[next_slot++] = node->slots[i]; assoc_array_insert_into_terminal_node()
675 new_n0->slots[free_slot] = node->slots[i]; assoc_array_insert_into_terminal_node()
697 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
709 ptr = node->back_pointer; assoc_array_insert_into_terminal_node()
713 edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot]; assoc_array_insert_into_terminal_node()
716 edit->excised_meta[0] = assoc_array_node_to_ptr(node); assoc_array_insert_into_terminal_node()
717 pr_devel("<--%s() = ok [split node]\n", __func__); assoc_array_insert_into_terminal_node()
722 * to go into a different slot, so we create a new node to hold the new assoc_array_insert_into_terminal_node()
723 * leaf and a pointer to a new node holding all the old leaves. assoc_array_insert_into_terminal_node()
727 new_n0->back_pointer = node->back_pointer; assoc_array_insert_into_terminal_node()
728 new_n0->parent_slot = node->parent_slot; assoc_array_insert_into_terminal_node()
729 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; assoc_array_insert_into_terminal_node()
732 new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch; assoc_array_insert_into_terminal_node()
736 new_n1->slots[i] = node->slots[i]; assoc_array_insert_into_terminal_node()
741 edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot]; assoc_array_insert_into_terminal_node()
743 edit->excised_meta[0] = assoc_array_node_to_ptr(node); assoc_array_insert_into_terminal_node()
744 pr_devel("<--%s() = ok [insert node before]\n", __func__); assoc_array_insert_into_terminal_node()
748 /* All the leaves, new and old, want to cluster together in this node assoc_array_insert_into_terminal_node()
749 * in the same slot, so we have to replace this node with a shortcut to assoc_array_insert_into_terminal_node()
765 int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]), assoc_array_insert_into_terminal_node()
785 new_s0->back_pointer = node->back_pointer; assoc_array_insert_into_terminal_node()
786 new_s0->parent_slot = node->parent_slot; assoc_array_insert_into_terminal_node()
805 /* This now reduces to a node splitting exercise for which we'll need assoc_array_insert_into_terminal_node()
809 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
830 struct assoc_array_node *node, *new_n0, *side; assoc_array_insert_mid_shortcut() local
845 /* We need to split a shortcut and insert a node between the two assoc_array_insert_mid_shortcut()
859 node = assoc_array_ptr_to_node(shortcut->back_pointer); assoc_array_insert_mid_shortcut()
860 edit->set[0].ptr = &node->slots[shortcut->parent_slot]; assoc_array_insert_mid_shortcut()
867 /* Create a new node now since we're going to need it anyway */ assoc_array_insert_mid_shortcut()
874 /* Insert a new shortcut before the new node if this segment isn't of assoc_array_insert_mid_shortcut()
875 * zero length - otherwise we just connect the new node directly to the assoc_array_insert_mid_shortcut()
914 /* We need to know which slot in the new node is going to take a assoc_array_insert_mid_shortcut()
923 /* Determine whether we need to follow the new node with a replacement assoc_array_insert_mid_shortcut()
955 /* We don't have to replace the pointed-to node as long as we assoc_array_insert_mid_shortcut()
967 /* Install the new leaf in a spare slot in the new node. */ assoc_array_insert_mid_shortcut()
1023 /* Allocate a root node if there isn't one yet */ assoc_array_insert()
1029 /* We found a node that doesn't have a node/shortcut pointer in assoc_array_insert()
1070 struct assoc_array_node *node; member in struct:assoc_array_delete_collapse_context
1076 * Subtree collapse to node iterator.
1088 collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf); assoc_array_delete_collapse_iterator()
1117 struct assoc_array_node *node, *new_n0; assoc_array_delete() local
1134 /* We found a node that should contain the leaf we've been assoc_array_delete()
1138 node = result.terminal_node.node; assoc_array_delete()
1141 ptr = node->slots[slot]; assoc_array_delete()
1162 edit->dead_leaf = node->slots[slot]; assoc_array_delete()
1163 edit->set[0].ptr = &node->slots[slot]; assoc_array_delete()
1165 edit->adjust_count_on = node; assoc_array_delete()
1182 * We go for a simple algorithm of: if this node has FAN_OUT or fewer assoc_array_delete()
1187 * up space in this node. assoc_array_delete()
1189 if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) { assoc_array_delete()
1193 /* First of all, we need to know if this node has metadata so assoc_array_delete()
1199 ptr = node->slots[i]; assoc_array_delete()
1207 node->nr_leaves_on_branch - 1, has_meta); assoc_array_delete()
1209 /* Look further up the tree to see if we can collapse this node assoc_array_delete()
1210 * into a more proximal node too. assoc_array_delete()
1212 parent = node; assoc_array_delete()
1233 /* There's no point collapsing if the original node has no meta assoc_array_delete()
1235 * node's ancestry. assoc_array_delete()
1237 if (has_meta || parent != node) { assoc_array_delete()
1238 node = parent; assoc_array_delete()
1240 /* Create a new node to collapse into */ assoc_array_delete()
1246 new_n0->back_pointer = node->back_pointer; assoc_array_delete()
1247 new_n0->parent_slot = node->parent_slot; assoc_array_delete()
1248 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; assoc_array_delete()
1251 collapse.node = new_n0; assoc_array_delete()
1254 assoc_array_subtree_iterate(assoc_array_node_to_ptr(node), assoc_array_delete()
1255 node->back_pointer, assoc_array_delete()
1261 if (!node->back_pointer) { assoc_array_delete()
1263 } else if (assoc_array_ptr_is_leaf(node->back_pointer)) { assoc_array_delete()
1265 } else if (assoc_array_ptr_is_node(node->back_pointer)) { assoc_array_delete()
1267 assoc_array_ptr_to_node(node->back_pointer); assoc_array_delete()
1268 edit->set[1].ptr = &p->slots[node->parent_slot]; assoc_array_delete()
1269 } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) { assoc_array_delete()
1271 assoc_array_ptr_to_shortcut(node->back_pointer); assoc_array_delete()
1275 edit->excised_subtree = assoc_array_node_to_ptr(node); assoc_array_delete()
1380 struct assoc_array_node *node; assoc_array_apply_edit() local
1408 node = edit->adjust_count_on; assoc_array_apply_edit()
1410 node->nr_leaves_on_branch += edit->adjust_count_by; assoc_array_apply_edit()
1412 ptr = node->back_pointer; assoc_array_apply_edit()
1422 node = assoc_array_ptr_to_node(ptr); assoc_array_apply_edit()
1491 struct assoc_array_node *node, *new_n; assoc_array_gc() local
1538 /* Duplicate the node at this position */ assoc_array_gc()
1539 node = assoc_array_ptr_to_node(cursor); assoc_array_gc()
1543 pr_devel("dup node %p -> %p\n", node, new_n); assoc_array_gc()
1545 new_n->parent_slot = node->parent_slot; assoc_array_gc()
1553 ptr = node->slots[slot]; assoc_array_gc()
1572 pr_devel("-- compress node %p --\n", new_n); assoc_array_gc()
1574 /* Count up the number of empty slots in this node and work out the assoc_array_gc()
1608 /* Fold the child node into this one */ assoc_array_gc()
1609 pr_devel("[%d] fold node %lu/%d [nx %d]\n", assoc_array_gc()
1635 pr_devel("[%d] retain node %lu/%d [nx %d]\n", assoc_array_gc()
1645 /* Excise this node if it is singly occupied by a shortcut */ assoc_array_gc()
1653 pr_devel("excise node %p with 1 shortcut\n", new_n); assoc_array_gc()
1724 ptr = node->back_pointer; assoc_array_gc()
1732 slot = node->parent_slot; assoc_array_gc()
1736 node = assoc_array_ptr_to_node(cursor); assoc_array_gc()
H A Dklist.c10 * "node" (struct klist_node) objects. For struct klist, a spinlock is
13 * reference count that indicates the number of current users of that node
19 * current node on the list.
30 * Only when the count goes to 0 is the node removed from the list.
31 * klist_remove() will try to delete the node from the list and block until
120 * @n: node we're adding.
132 * @n: node we're adding.
143 * klist_add_behind - Init a klist_node and add it after an existing node
144 * @n: node we're adding.
145 * @pos: node to put @n after
159 * klist_add_before - Init a klist_node and add it before an existing node
160 * @n: node we're adding.
161 * @pos: node to put @n after
176 struct klist_node *node; member in struct:klist_waiter
193 if (waiter->node != n) klist_release()
226 * klist_del - Decrement the reference count of node and try to remove.
227 * @n: node we're deleting.
236 * klist_remove - Decrement the refcount of node and wait for it to go away.
237 * @n: node we're removing.
243 waiter.node = n; klist_remove()
263 * klist_node_attached - Say whether a node is bound to a list or not.
276 * @n: node to start with.
309 * refcount of the current node. Necessary in case iteration exited before
327 * klist_prev - Ante up prev node in list.
331 * node, if there was one. Grab the prev node, increment its reference
332 * count, drop the lock, and return that prev node.
368 * klist_next - Ante up next node in list.
372 * node, if there was one. Grab the next node, increment its reference
373 * count, drop the lock, and return that next node.
/linux-4.4.14/fs/hfsplus/
H A Dbnode.c8 * Handle basic btree node operations
20 /* Copy a specified range of bytes from the raw data of a node */ hfs_bnode_read()
21 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) hfs_bnode_read() argument
26 off += node->page_offset; hfs_bnode_read()
27 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bnode_read()
42 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) hfs_bnode_read_u16() argument
46 hfs_bnode_read(node, &data, off, 2); hfs_bnode_read_u16()
50 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) hfs_bnode_read_u8() argument
54 hfs_bnode_read(node, &data, off, 1); hfs_bnode_read_u8()
58 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) hfs_bnode_read_key() argument
63 tree = node->tree; hfs_bnode_read_key()
64 if (node->type == HFS_NODE_LEAF || hfs_bnode_read_key()
66 node->tree->cnid == HFSPLUS_ATTR_CNID) hfs_bnode_read_key()
67 key_len = hfs_bnode_read_u16(node, off) + 2; hfs_bnode_read_key()
71 hfs_bnode_read(node, key, off, key_len); hfs_bnode_read_key()
74 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) hfs_bnode_write() argument
79 off += node->page_offset; hfs_bnode_write()
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bnode_write()
97 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) hfs_bnode_write_u16() argument
101 hfs_bnode_write(node, &v, off, 2); hfs_bnode_write_u16()
104 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) hfs_bnode_clear() argument
109 off += node->page_offset; hfs_bnode_clear()
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bnode_clear()
186 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) hfs_bnode_move() argument
194 src += node->page_offset; hfs_bnode_move()
195 dst += node->page_offset; hfs_bnode_move()
198 src_page = node->page + (src >> PAGE_CACHE_SHIFT); hfs_bnode_move()
201 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); hfs_bnode_move()
248 src_page = node->page + (src >> PAGE_CACHE_SHIFT); hfs_bnode_move()
250 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); hfs_bnode_move()
299 void hfs_bnode_dump(struct hfs_bnode *node) hfs_bnode_dump() argument
305 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); hfs_bnode_dump()
306 hfs_bnode_read(node, &desc, 0, sizeof(desc)); hfs_bnode_dump()
311 off = node->tree->node_size - 2; hfs_bnode_dump()
313 key_off = hfs_bnode_read_u16(node, off); hfs_bnode_dump()
315 if (i && node->type == HFS_NODE_INDEX) { hfs_bnode_dump()
318 if (node->tree->attributes & HFS_TREE_VARIDXKEYS || hfs_bnode_dump()
319 node->tree->cnid == HFSPLUS_ATTR_CNID) hfs_bnode_dump()
320 tmp = hfs_bnode_read_u16(node, key_off) + 2; hfs_bnode_dump()
322 tmp = node->tree->max_key_len + 2; hfs_bnode_dump()
324 hfs_bnode_read(node, &cnid, key_off + tmp, 4); hfs_bnode_dump()
326 } else if (i && node->type == HFS_NODE_LEAF) { hfs_bnode_dump()
329 tmp = hfs_bnode_read_u16(node, key_off); hfs_bnode_dump()
336 void hfs_bnode_unlink(struct hfs_bnode *node) hfs_bnode_unlink() argument
342 tree = node->tree; hfs_bnode_unlink()
343 if (node->prev) { hfs_bnode_unlink()
344 tmp = hfs_bnode_find(tree, node->prev); hfs_bnode_unlink()
347 tmp->next = node->next; hfs_bnode_unlink()
352 } else if (node->type == HFS_NODE_LEAF) hfs_bnode_unlink()
353 tree->leaf_head = node->next; hfs_bnode_unlink()
355 if (node->next) { hfs_bnode_unlink()
356 tmp = hfs_bnode_find(tree, node->next); hfs_bnode_unlink()
359 tmp->prev = node->prev; hfs_bnode_unlink()
364 } else if (node->type == HFS_NODE_LEAF) hfs_bnode_unlink()
365 tree->leaf_tail = node->prev; hfs_bnode_unlink()
368 if (!node->prev && !node->next) hfs_bnode_unlink()
370 if (!node->parent) { hfs_bnode_unlink()
374 set_bit(HFS_BNODE_DELETED, &node->flags); hfs_bnode_unlink()
386 struct hfs_bnode *node; hfs_bnode_findhash() local
389 pr_err("request for non-existent node %d in B*Tree\n", hfs_bnode_findhash()
394 for (node = tree->node_hash[hfs_bnode_hash(cnid)]; hfs_bnode_findhash()
395 node; node = node->next_hash) hfs_bnode_findhash()
396 if (node->this == cnid) hfs_bnode_findhash()
397 return node; hfs_bnode_findhash()
404 struct hfs_bnode *node, *node2; __hfs_bnode_create() local
411 pr_err("request for non-existent node %d in B*Tree\n", __hfs_bnode_create()
419 node = kzalloc(size, GFP_KERNEL); __hfs_bnode_create()
420 if (!node) __hfs_bnode_create()
422 node->tree = tree; __hfs_bnode_create()
423 node->this = cnid; __hfs_bnode_create()
424 set_bit(HFS_BNODE_NEW, &node->flags); __hfs_bnode_create()
425 atomic_set(&node->refcnt, 1); __hfs_bnode_create()
427 node->tree->cnid, node->this); __hfs_bnode_create()
428 init_waitqueue_head(&node->lock_wq); __hfs_bnode_create()
433 node->next_hash = tree->node_hash[hash]; __hfs_bnode_create()
434 tree->node_hash[hash] = node; __hfs_bnode_create()
438 kfree(node); __hfs_bnode_create()
448 node->page_offset = off & ~PAGE_CACHE_MASK; __hfs_bnode_create()
457 node->page[i] = page; __hfs_bnode_create()
460 return node; __hfs_bnode_create()
462 set_bit(HFS_BNODE_ERROR, &node->flags); __hfs_bnode_create()
463 return node; __hfs_bnode_create()
466 void hfs_bnode_unhash(struct hfs_bnode *node) hfs_bnode_unhash() argument
471 node->tree->cnid, node->this, atomic_read(&node->refcnt)); hfs_bnode_unhash()
472 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; hfs_bnode_unhash()
473 *p && *p != node; p = &(*p)->next_hash) hfs_bnode_unhash()
476 *p = node->next_hash; hfs_bnode_unhash()
477 node->tree->node_hash_cnt--; hfs_bnode_unhash()
480 /* Load a particular node out of a tree */ hfs_bnode_find()
483 struct hfs_bnode *node; hfs_bnode_find() local
489 node = hfs_bnode_findhash(tree, num); hfs_bnode_find()
490 if (node) { hfs_bnode_find()
491 hfs_bnode_get(node); hfs_bnode_find()
493 wait_event(node->lock_wq, hfs_bnode_find()
494 !test_bit(HFS_BNODE_NEW, &node->flags)); hfs_bnode_find()
495 if (test_bit(HFS_BNODE_ERROR, &node->flags)) hfs_bnode_find()
497 return node; hfs_bnode_find()
500 node = __hfs_bnode_create(tree, num); hfs_bnode_find()
501 if (!node) hfs_bnode_find()
503 if (test_bit(HFS_BNODE_ERROR, &node->flags)) hfs_bnode_find()
505 if (!test_bit(HFS_BNODE_NEW, &node->flags)) hfs_bnode_find()
506 return node; hfs_bnode_find()
508 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + hfs_bnode_find()
509 node->page_offset); hfs_bnode_find()
510 node->prev = be32_to_cpu(desc->prev); hfs_bnode_find()
511 node->next = be32_to_cpu(desc->next); hfs_bnode_find()
512 node->num_recs = be16_to_cpu(desc->num_recs); hfs_bnode_find()
513 node->type = desc->type; hfs_bnode_find()
514 node->height = desc->height; hfs_bnode_find()
515 kunmap(node->page[0]); hfs_bnode_find()
517 switch (node->type) { hfs_bnode_find()
520 if (node->height != 0) hfs_bnode_find()
524 if (node->height != 1) hfs_bnode_find()
528 if (node->height <= 1 || node->height > tree->depth) hfs_bnode_find()
536 off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_find()
539 for (i = 1; i <= node->num_recs; off = next_off, i++) { hfs_bnode_find()
541 next_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_find()
547 if (node->type != HFS_NODE_INDEX && hfs_bnode_find()
548 node->type != HFS_NODE_LEAF) hfs_bnode_find()
550 key_size = hfs_bnode_read_u16(node, off) + 2; hfs_bnode_find()
554 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_find()
555 wake_up(&node->lock_wq); hfs_bnode_find()
556 return node; hfs_bnode_find()
559 set_bit(HFS_BNODE_ERROR, &node->flags); hfs_bnode_find()
560 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_find()
561 wake_up(&node->lock_wq); hfs_bnode_find()
562 hfs_bnode_put(node); hfs_bnode_find()
566 void hfs_bnode_free(struct hfs_bnode *node) hfs_bnode_free() argument
570 for (i = 0; i < node->tree->pages_per_bnode; i++) hfs_bnode_free()
571 if (node->page[i]) hfs_bnode_free()
572 page_cache_release(node->page[i]); hfs_bnode_free()
573 kfree(node); hfs_bnode_free()
578 struct hfs_bnode *node; hfs_bnode_create() local
583 node = hfs_bnode_findhash(tree, num); hfs_bnode_create()
585 if (node) { hfs_bnode_create()
586 pr_crit("new node %u already hashed?\n", num); hfs_bnode_create()
588 return node; hfs_bnode_create()
590 node = __hfs_bnode_create(tree, num); hfs_bnode_create()
591 if (!node) hfs_bnode_create()
593 if (test_bit(HFS_BNODE_ERROR, &node->flags)) { hfs_bnode_create()
594 hfs_bnode_put(node); hfs_bnode_create()
598 pagep = node->page; hfs_bnode_create()
599 memset(kmap(*pagep) + node->page_offset, 0, hfs_bnode_create()
608 clear_bit(HFS_BNODE_NEW, &node->flags); hfs_bnode_create()
609 wake_up(&node->lock_wq); hfs_bnode_create()
611 return node; hfs_bnode_create()
614 void hfs_bnode_get(struct hfs_bnode *node) hfs_bnode_get() argument
616 if (node) { hfs_bnode_get()
617 atomic_inc(&node->refcnt); hfs_bnode_get()
619 node->tree->cnid, node->this, hfs_bnode_get()
620 atomic_read(&node->refcnt)); hfs_bnode_get()
624 /* Dispose of resources used by a node */ hfs_bnode_put()
625 void hfs_bnode_put(struct hfs_bnode *node) hfs_bnode_put() argument
627 if (node) { hfs_bnode_put()
628 struct hfs_btree *tree = node->tree; hfs_bnode_put()
632 node->tree->cnid, node->this, hfs_bnode_put()
633 atomic_read(&node->refcnt)); hfs_bnode_put()
634 BUG_ON(!atomic_read(&node->refcnt)); hfs_bnode_put()
635 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) hfs_bnode_put()
638 if (!node->page[i]) hfs_bnode_put()
640 mark_page_accessed(node->page[i]); hfs_bnode_put()
643 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_put()
644 hfs_bnode_unhash(node); hfs_bnode_put()
647 hfs_bnode_clear(node, 0, tree->node_size); hfs_bnode_put()
648 hfs_bmap_free(node); hfs_bnode_put()
649 hfs_bnode_free(node); hfs_bnode_put()
H A Dbrec.c18 /* Get the length and offset of the given record in the given node */ hfs_brec_lenoff()
19 u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) hfs_brec_lenoff() argument
24 dataoff = node->tree->node_size - (rec + 2) * 2; hfs_brec_lenoff()
25 hfs_bnode_read(node, retval, dataoff, 4); hfs_brec_lenoff()
31 u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) hfs_brec_keylen() argument
35 if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) hfs_brec_keylen()
38 if ((node->type == HFS_NODE_INDEX) && hfs_brec_keylen()
39 !(node->tree->attributes & HFS_TREE_VARIDXKEYS) && hfs_brec_keylen()
40 (node->tree->cnid != HFSPLUS_ATTR_CNID)) { hfs_brec_keylen()
41 retval = node->tree->max_key_len + 2; hfs_brec_keylen()
43 recoff = hfs_bnode_read_u16(node, hfs_brec_keylen()
44 node->tree->node_size - (rec + 1) * 2); hfs_brec_keylen()
47 if (recoff > node->tree->node_size - 2) { hfs_brec_keylen()
52 retval = hfs_bnode_read_u16(node, recoff) + 2; hfs_brec_keylen()
53 if (retval > node->tree->max_key_len + 2) { hfs_brec_keylen()
65 struct hfs_bnode *node, *new_node; hfs_brec_insert() local
87 node = fd->bnode; hfs_brec_insert()
88 hfs_bnode_dump(node); hfs_brec_insert()
90 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; hfs_brec_insert()
91 end_off = hfs_bnode_read_u16(node, end_rec_off); hfs_brec_insert()
103 if (node->type == HFS_NODE_LEAF) { hfs_brec_insert()
107 node->num_recs++; hfs_brec_insert()
109 hfs_bnode_write_u16(node, hfs_brec_insert()
111 node->num_recs); hfs_brec_insert()
112 hfs_bnode_write_u16(node, end_rec_off, end_off + size); hfs_brec_insert()
120 data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_brec_insert()
121 hfs_bnode_write_u16(node, data_rec_off, data_off + size); hfs_brec_insert()
126 hfs_bnode_move(node, data_off + size, data_off, hfs_brec_insert()
130 hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_brec_insert()
131 hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_brec_insert()
132 hfs_bnode_dump(node); hfs_brec_insert()
136 * at the start of the node and it is not the new node hfs_brec_insert()
138 if (!rec && new_node != node) { hfs_brec_insert()
139 hfs_bnode_read_key(node, fd->search_key, data_off + size); hfs_brec_insert()
180 struct hfs_bnode *node, *parent; hfs_brec_remove() local
184 node = fd->bnode; hfs_brec_remove()
187 end_off = tree->node_size - (node->num_recs + 1) * 2; hfs_brec_remove()
189 if (node->type == HFS_NODE_LEAF) { hfs_brec_remove()
193 hfs_bnode_dump(node); hfs_brec_remove()
196 if (!--node->num_recs) { hfs_brec_remove()
197 hfs_bnode_unlink(node); hfs_brec_remove()
198 if (!node->parent) hfs_brec_remove()
200 parent = hfs_bnode_find(tree, node->parent); hfs_brec_remove()
203 hfs_bnode_put(node); hfs_brec_remove()
204 node = fd->bnode = parent; hfs_brec_remove()
206 __hfs_brec_find(node, fd, hfs_find_rec_by_key); hfs_brec_remove()
209 hfs_bnode_write_u16(node, hfs_brec_remove()
211 node->num_recs); hfs_brec_remove()
218 data_off = hfs_bnode_read_u16(node, rec_off); hfs_brec_remove()
219 hfs_bnode_write_u16(node, rec_off + 2, data_off - size); hfs_brec_remove()
224 hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, hfs_brec_remove()
227 hfs_bnode_dump(node); hfs_brec_remove()
236 struct hfs_bnode *node, *new_node, *next_node; hfs_bnode_split() local
242 node = fd->bnode; hfs_bnode_split()
246 hfs_bnode_get(node); hfs_bnode_split()
248 node->this, new_node->this, node->next); hfs_bnode_split()
249 new_node->next = node->next; hfs_bnode_split()
250 new_node->prev = node->this; hfs_bnode_split()
251 new_node->parent = node->parent; hfs_bnode_split()
252 new_node->type = node->type; hfs_bnode_split()
253 new_node->height = node->height; hfs_bnode_split()
255 if (node->next) hfs_bnode_split()
256 next_node = hfs_bnode_find(tree, node->next); hfs_bnode_split()
261 hfs_bnode_put(node); hfs_bnode_split()
266 size = tree->node_size / 2 - node->num_recs * 2 - 14; hfs_bnode_split()
270 data_start = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
274 if (++num_recs < node->num_recs) hfs_bnode_split()
277 hfs_bnode_put(node); hfs_bnode_split()
290 data_start = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
292 hfs_bnode_put(node); hfs_bnode_split()
299 new_node->num_recs = node->num_recs - num_recs; hfs_bnode_split()
300 node->num_recs = num_recs; hfs_bnode_split()
311 data_end = hfs_bnode_read_u16(node, old_rec_off); hfs_bnode_split()
316 hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); hfs_bnode_split()
328 node->next = new_node->this; hfs_bnode_split()
329 hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_split()
330 node_desc.next = cpu_to_be32(node->next); hfs_bnode_split()
331 node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_split()
332 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_split()
341 } else if (node->this == tree->leaf_tail) { hfs_bnode_split()
342 /* if there is no next node, this might be the new tail */ hfs_bnode_split()
347 hfs_bnode_dump(node); hfs_bnode_split()
349 hfs_bnode_put(node); hfs_bnode_split()
357 struct hfs_bnode *node, *new_node, *parent; hfs_brec_update_parent() local
363 node = fd->bnode; hfs_brec_update_parent()
365 if (!node->parent) hfs_brec_update_parent()
369 parent = hfs_bnode_find(tree, node->parent); hfs_brec_update_parent()
381 newkeylen = hfs_bnode_read_u16(node, 14) + 2; hfs_brec_update_parent()
396 hfs_dbg(BNODE_MOD, "splitting index node\n"); hfs_brec_update_parent()
421 hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); hfs_brec_update_parent()
424 hfs_bnode_put(node); hfs_brec_update_parent()
425 node = parent; hfs_brec_update_parent()
441 if (new_node == node) hfs_brec_update_parent()
444 hfs_bnode_read_key(node, fd->search_key, 14); hfs_brec_update_parent()
448 if (!rec && node->parent) hfs_brec_update_parent()
451 fd->bnode = node; hfs_brec_update_parent()
457 struct hfs_bnode *node, *new_node; hfs_btree_inc_height() local
462 node = NULL; hfs_btree_inc_height()
464 node = hfs_bnode_find(tree, tree->root); hfs_btree_inc_height()
465 if (IS_ERR(node)) hfs_btree_inc_height()
466 return PTR_ERR(node); hfs_btree_inc_height()
470 hfs_bnode_put(node); hfs_btree_inc_height()
499 if (node) { hfs_btree_inc_height()
501 node->parent = tree->root; hfs_btree_inc_height()
502 if (node->type == HFS_NODE_LEAF || hfs_btree_inc_height()
505 key_size = hfs_bnode_read_u16(node, 14) + 2; hfs_btree_inc_height()
508 hfs_bnode_copy(new_node, 14, node, 14, key_size); hfs_btree_inc_height()
515 cnid = cpu_to_be32(node->this); hfs_btree_inc_height()
521 hfs_bnode_put(node); hfs_btree_inc_height()
H A Dbtree.c97 * it must also be a multiple of the node and block size. hfsplus_calc_btree_clump_size()
115 * Round the clump size to a multiple of node and block size. hfsplus_calc_btree_clump_size()
123 * greater than the clump size. If so, just use one block or node. hfsplus_calc_btree_clump_size()
259 struct hfs_bnode *node; hfs_btree_close() local
266 while ((node = tree->node_hash[i])) { hfs_btree_close()
267 tree->node_hash[i] = node->next_hash; hfs_btree_close()
268 if (atomic_read(&node->refcnt)) hfs_btree_close()
269 pr_crit("node %d:%d " hfs_btree_close()
271 node->tree->cnid, node->this, hfs_btree_close()
272 atomic_read(&node->refcnt)); hfs_btree_close()
273 hfs_bnode_free(node); hfs_btree_close()
284 struct hfs_bnode *node; hfs_btree_write() local
287 node = hfs_bnode_find(tree, 0); hfs_btree_write()
288 if (IS_ERR(node)) hfs_btree_write()
292 page = node->page[0]; hfs_btree_write()
307 hfs_bnode_put(node); hfs_btree_write()
314 struct hfs_bnode *node; hfs_bmap_new_bmap() local
318 node = hfs_bnode_create(tree, idx); hfs_bmap_new_bmap()
319 if (IS_ERR(node)) hfs_bmap_new_bmap()
320 return node; hfs_bmap_new_bmap()
327 node->type = HFS_NODE_MAP; hfs_bmap_new_bmap()
328 node->num_recs = 1; hfs_bmap_new_bmap()
329 hfs_bnode_clear(node, 0, tree->node_size); hfs_bmap_new_bmap()
336 hfs_bnode_write(node, &desc, 0, sizeof(desc)); hfs_bmap_new_bmap()
337 hfs_bnode_write_u16(node, 14, 0x8000); hfs_bmap_new_bmap()
338 hfs_bnode_write_u16(node, tree->node_size - 2, 14); hfs_bmap_new_bmap()
339 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); hfs_bmap_new_bmap()
341 return node; hfs_bmap_new_bmap()
346 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local
376 node = hfs_bnode_find(tree, nidx); hfs_bmap_alloc()
377 if (IS_ERR(node)) hfs_bmap_alloc()
378 return node; hfs_bmap_alloc()
379 len = hfs_brec_lenoff(node, 2, &off16); hfs_bmap_alloc()
382 off += node->page_offset; hfs_bmap_alloc()
383 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bmap_alloc()
400 hfs_bnode_put(node); hfs_bmap_alloc()
415 nidx = node->next; hfs_bmap_alloc()
417 hfs_dbg(BNODE_MOD, "create new bmap node\n"); hfs_bmap_alloc()
418 next_node = hfs_bmap_new_bmap(node, idx); hfs_bmap_alloc()
421 hfs_bnode_put(node); hfs_bmap_alloc()
424 node = next_node; hfs_bmap_alloc()
426 len = hfs_brec_lenoff(node, 0, &off16); hfs_bmap_alloc()
428 off += node->page_offset; hfs_bmap_alloc()
429 pagep = node->page + (off >> PAGE_CACHE_SHIFT); hfs_bmap_alloc()
435 void hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument
443 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); hfs_bmap_free()
444 BUG_ON(!node->this); hfs_bmap_free()
445 tree = node->tree; hfs_bmap_free()
446 nidx = node->this; hfs_bmap_free()
447 node = hfs_bnode_find(tree, 0); hfs_bmap_free()
448 if (IS_ERR(node)) hfs_bmap_free()
450 len = hfs_brec_lenoff(node, 2, &off); hfs_bmap_free()
455 i = node->next; hfs_bmap_free()
456 hfs_bnode_put(node); hfs_bmap_free()
461 node->this); hfs_bmap_free()
464 node = hfs_bnode_find(tree, i); hfs_bmap_free()
465 if (IS_ERR(node)) hfs_bmap_free()
467 if (node->type != HFS_NODE_MAP) { hfs_bmap_free()
471 node->this, node->type); hfs_bmap_free()
472 hfs_bnode_put(node); hfs_bmap_free()
475 len = hfs_brec_lenoff(node, 0, &off); hfs_bmap_free()
477 off += node->page_offset + nidx / 8; hfs_bmap_free()
478 page = node->page[off >> PAGE_CACHE_SHIFT]; hfs_bmap_free()
486 node->this, node->type); hfs_bmap_free()
488 hfs_bnode_put(node); hfs_bmap_free()
494 hfs_bnode_put(node); hfs_bmap_free()
/linux-4.4.14/arch/alpha/include/asm/
H A Dtopology.h11 int node; cpu_to_node() local
16 node = alpha_mv.cpuid_to_nid(cpu); cpu_to_node()
19 BUG_ON(node < 0); cpu_to_node()
22 return node; cpu_to_node()
27 static const struct cpumask *cpumask_of_node(int node)
31 if (node == -1)
34 cpumask_clear(&node_to_cpumask_map[node]);
37 if (cpu_to_node(cpu) == node)
38 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
41 return &node_to_cpumask_map[node];
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/
H A Dinterval_tree.c50 static inline int node_is_left_child(struct interval_node *node) node_is_left_child() argument
52 LASSERT(node->in_parent != NULL); node_is_left_child()
53 return node == node->in_parent->in_left; node_is_left_child()
56 static inline int node_is_right_child(struct interval_node *node) node_is_right_child() argument
58 LASSERT(node->in_parent != NULL); node_is_right_child()
59 return node == node->in_parent->in_right; node_is_right_child()
62 static inline int node_is_red(struct interval_node *node) node_is_red() argument
64 return node->in_color == INTERVAL_RED; node_is_red()
67 static inline int node_is_black(struct interval_node *node) node_is_black() argument
69 return node->in_color == INTERVAL_BLACK; node_is_black()
116 static struct interval_node *interval_first(struct interval_node *node) interval_first() argument
118 if (!node) interval_first()
120 while (node->in_left) interval_first()
121 node = node->in_left; interval_first()
122 return node; interval_first()
125 static struct interval_node *interval_next(struct interval_node *node) interval_next() argument
127 if (!node) interval_next()
129 if (node->in_right) interval_next()
130 return interval_first(node->in_right); interval_next()
131 while (node->in_parent && node_is_right_child(node)) interval_next()
132 node = node->in_parent; interval_next()
133 return node->in_parent; interval_next()
136 static void __rotate_change_maxhigh(struct interval_node *node, __rotate_change_maxhigh() argument
141 rotate->in_max_high = node->in_max_high; __rotate_change_maxhigh()
142 left_max = node->in_left ? node->in_left->in_max_high : 0; __rotate_change_maxhigh()
143 right_max = node->in_right ? node->in_right->in_max_high : 0; __rotate_change_maxhigh()
144 node->in_max_high = max_u64(interval_high(node), __rotate_change_maxhigh()
148 /* The left rotation "pivots" around the link from node to node->right, and
149 * - node will be linked to node->right's left child, and
150 * - node->right's left child will be linked to node's right child. */ __rotate_left()
151 static void __rotate_left(struct interval_node *node, __rotate_left() argument
154 struct interval_node *right = node->in_right; __rotate_left()
155 struct interval_node *parent = node->in_parent; __rotate_left()
157 node->in_right = right->in_left; __rotate_left()
158 if (node->in_right) __rotate_left()
159 right->in_left->in_parent = node; __rotate_left()
161 right->in_left = node; __rotate_left()
164 if (node_is_left_child(node)) __rotate_left()
171 node->in_parent = right; __rotate_left()
173 /* update max_high for node and right */ __rotate_left()
174 __rotate_change_maxhigh(node, right); __rotate_left()
177 /* The right rotation "pivots" around the link from node to node->left, and
178 * - node will be linked to node->left's right child, and
179 * - node->left's right child will be linked to node's left child. */ __rotate_right()
180 static void __rotate_right(struct interval_node *node, __rotate_right() argument
183 struct interval_node *left = node->in_left; __rotate_right()
184 struct interval_node *parent = node->in_parent; __rotate_right()
186 node->in_left = left->in_right; __rotate_right()
187 if (node->in_left) __rotate_right()
188 left->in_right->in_parent = node; __rotate_right()
189 left->in_right = node; __rotate_right()
193 if (node_is_right_child(node)) __rotate_right()
200 node->in_parent = left; __rotate_right()
202 /* update max_high for node and left */ __rotate_right()
203 __rotate_change_maxhigh(node, left); __rotate_right()
217 static void interval_insert_color(struct interval_node *node, interval_insert_color() argument
222 while ((parent = node->in_parent) && node_is_red(parent)) { interval_insert_color()
233 node = gparent; interval_insert_color()
237 if (parent->in_right == node) { interval_insert_color()
239 interval_swap(node, parent); interval_insert_color()
253 node = gparent; interval_insert_color()
257 if (node_is_left_child(node)) { interval_insert_color()
259 interval_swap(node, parent); interval_insert_color()
271 struct interval_node *interval_insert(struct interval_node *node, interval_insert() argument
277 LASSERT(!interval_is_intree(node)); interval_insert()
281 if (node_equal(parent, node)) interval_insert()
285 if (parent->in_max_high < interval_high(node)) interval_insert()
286 parent->in_max_high = interval_high(node); interval_insert()
288 if (node_compare(node, parent) < 0) interval_insert()
294 /* link node into the tree */ interval_insert()
295 node->in_parent = parent; interval_insert()
296 node->in_color = INTERVAL_RED; interval_insert()
297 node->in_left = NULL; interval_insert()
298 node->in_right = NULL; interval_insert()
299 *p = node; interval_insert()
301 interval_insert_color(node, root); interval_insert()
302 node->in_intree = 1; interval_insert()
308 static inline int node_is_black_or_0(struct interval_node *node) node_is_black_or_0() argument
310 return !node || node_is_black(node); node_is_black_or_0()
313 static void interval_erase_color(struct interval_node *node, interval_erase_color() argument
319 while (node_is_black_or_0(node) && node != *root) { interval_erase_color()
320 if (parent->in_left == node) { interval_erase_color()
331 node = parent; interval_erase_color()
332 parent = node->in_parent; interval_erase_color()
349 node = *root; interval_erase_color()
363 node = parent; interval_erase_color()
364 parent = node->in_parent; interval_erase_color()
381 node = *root; interval_erase_color()
386 if (node) interval_erase_color()
387 node->in_color = INTERVAL_BLACK; interval_erase_color()
391 * if the @max_high value of @node is changed, this function traverse a path
392 * from node up to the root to update max_high for the whole tree.
394 static void update_maxhigh(struct interval_node *node, update_maxhigh() argument
399 while (node) { update_maxhigh()
400 left_max = node->in_left ? node->in_left->in_max_high : 0; update_maxhigh()
401 right_max = node->in_right ? node->in_right->in_max_high : 0; update_maxhigh()
402 node->in_max_high = max_u64(interval_high(node), update_maxhigh()
405 if (node->in_max_high >= old_maxhigh) update_maxhigh()
407 node = node->in_parent; update_maxhigh()
411 void interval_erase(struct interval_node *node, interval_erase() argument
417 LASSERT(interval_is_intree(node)); interval_erase()
418 node->in_intree = 0; interval_erase()
419 if (!node->in_left) { interval_erase()
420 child = node->in_right; interval_erase()
421 } else if (!node->in_right) { interval_erase()
422 child = node->in_left; interval_erase()
424 struct interval_node *old = node; interval_erase()
426 node = interval_next(node); interval_erase()
427 child = node->in_right; interval_erase()
428 parent = node->in_parent; interval_erase()
429 color = node->in_color; interval_erase()
438 node->in_color = old->in_color; interval_erase()
439 node->in_right = old->in_right; interval_erase()
440 node->in_left = old->in_left; interval_erase()
441 node->in_parent = old->in_parent; interval_erase()
445 old->in_parent->in_left = node; interval_erase()
447 old->in_parent->in_right = node; interval_erase()
449 *root = node; interval_erase()
452 old->in_left->in_parent = node; interval_erase()
454 old->in_right->in_parent = node; interval_erase()
455 update_maxhigh(child ? : parent, node->in_max_high); interval_erase()
456 update_maxhigh(node, old->in_max_high); interval_erase()
458 parent = node; interval_erase()
461 parent = node->in_parent; interval_erase()
462 color = node->in_color; interval_erase()
467 if (node_is_left_child(node)) interval_erase()
475 update_maxhigh(child ? : parent, node->in_max_high); interval_erase()
H A Dldlm_extent.c112 struct ldlm_interval *node; ldlm_interval_alloc() local
115 node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO); ldlm_interval_alloc()
116 if (node == NULL) ldlm_interval_alloc()
119 INIT_LIST_HEAD(&node->li_group); ldlm_interval_alloc()
120 ldlm_interval_attach(node, lock); ldlm_interval_alloc()
121 return node; ldlm_interval_alloc()
124 void ldlm_interval_free(struct ldlm_interval *node) ldlm_interval_free() argument
126 if (node) { ldlm_interval_free()
127 LASSERT(list_empty(&node->li_group)); ldlm_interval_free()
128 LASSERT(!interval_is_intree(&node->li_node)); ldlm_interval_free()
129 kmem_cache_free(ldlm_interval_slab, node); ldlm_interval_free()
164 struct ldlm_interval *node; ldlm_extent_add_lock() local
170 node = lock->l_tree_node; ldlm_extent_add_lock()
171 LASSERT(node != NULL); ldlm_extent_add_lock()
172 LASSERT(!interval_is_intree(&node->li_node)); ldlm_extent_add_lock()
178 /* node extent initialize */ ldlm_extent_add_lock()
180 interval_set(&node->li_node, extent->start, extent->end); ldlm_extent_add_lock()
183 found = interval_insert(&node->li_node, root); ldlm_extent_add_lock()
203 struct ldlm_interval *node = lock->l_tree_node; ldlm_extent_unlink_lock() local
207 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */ ldlm_extent_unlink_lock()
217 node = ldlm_interval_detach(lock); ldlm_extent_unlink_lock()
218 if (node) { ldlm_extent_unlink_lock()
219 interval_erase(&node->li_node, &tree->lit_root); ldlm_extent_unlink_lock()
220 ldlm_interval_free(node); ldlm_extent_unlink_lock()
/linux-4.4.14/scripts/dtc/
H A Dlivetree.c100 struct node *build_node(struct property *proplist, struct node *children) build_node()
102 struct node *new = xmalloc(sizeof(*new)); build_node()
103 struct node *child; build_node()
117 struct node *build_node_delete(void) build_node_delete()
119 struct node *new = xmalloc(sizeof(*new)); build_node_delete()
128 struct node *name_node(struct node *node, char *name) name_node() argument
130 assert(node->name == NULL); name_node()
132 node->name = name; name_node()
134 return node; name_node()
137 struct node *merge_nodes(struct node *old_node, struct node *new_node) merge_nodes()
140 struct node *new_child, *old_child; merge_nodes()
145 /* Add new node labels to old node */ merge_nodes()
149 /* Move properties from the new node to the old node. If there merge_nodes()
178 /* if no collision occurred, add property to the old node. */
183 /* Move the override child nodes into the primary node. If
186 /* Pop the child node off the list */
207 /* if no collision occured, add child to the old node. */
212 /* The new node contents are now merged into the old node. Free
213 * the new node. */
219 struct node *chain_node(struct node *first, struct node *list) chain_node()
227 void add_property(struct node *node, struct property *prop) add_property() argument
233 p = &node->proplist; add_property()
240 void delete_property_by_name(struct node *node, char *name) delete_property_by_name() argument
242 struct property *prop = node->proplist; delete_property_by_name()
259 void add_child(struct node *parent, struct node *child) add_child()
261 struct node **p; add_child()
273 void delete_node_by_name(struct node *parent, char *name) delete_node_by_name()
275 struct node *node = parent->children; delete_node_by_name() local
277 while (node) { delete_node_by_name()
278 if (!strcmp(node->name, name)) { delete_node_by_name()
279 delete_node(node); delete_node_by_name()
282 node = node->next_sibling; delete_node_by_name()
286 void delete_node(struct node *node) delete_node() argument
289 struct node *child; delete_node()
291 node->deleted = 1; delete_node()
292 for_each_child(node, child) delete_node()
294 for_each_property(node, prop) delete_node()
296 delete_labels(&node->labels); delete_node()
339 struct node *tree, uint32_t boot_cpuid_phys) build_boot_info()
355 const char *get_unitname(struct node *node) get_unitname() argument
357 if (node->name[node->basenamelen] == '\0') get_unitname()
360 return node->name + node->basenamelen + 1; get_unitname()
363 struct property *get_property(struct node *node, const char *propname) get_property() argument
367 for_each_property(node, prop) get_property()
380 struct property *get_property_by_label(struct node *tree, const char *label, get_property_by_label() argument
381 struct node **node) get_property_by_label()
384 struct node *c; get_property_by_label()
386 *node = tree; get_property_by_label()
397 prop = get_property_by_label(c, label, node); for_each_child()
402 *node = NULL;
406 struct marker *get_marker_label(struct node *tree, const char *label, get_marker_label() argument
407 struct node **node, struct property **prop) get_marker_label()
411 struct node *c; get_marker_label()
413 *node = tree; get_marker_label()
424 m = get_marker_label(c, label, node, prop); for_each_child()
430 *node = NULL;
434 struct node *get_subnode(struct node *node, const char *nodename) get_subnode() argument
436 struct node *child; get_subnode()
438 for_each_child(node, child) get_subnode()
445 struct node *get_node_by_path(struct node *tree, const char *path) get_node_by_path()
448 struct node *child; get_node_by_path()
471 struct node *get_node_by_label(struct node *tree, const char *label) get_node_by_label()
473 struct node *child, *node; get_node_by_label() local
483 node = get_node_by_label(child, label); for_each_child()
484 if (node) for_each_child()
485 return node; for_each_child()
491 struct node *get_node_by_phandle(struct node *tree, cell_t phandle) get_node_by_phandle()
493 struct node *child, *node; get_node_by_phandle() local
504 node = get_node_by_phandle(child, phandle); for_each_child()
505 if (node) for_each_child()
506 return node; for_each_child()
512 struct node *get_node_by_ref(struct node *tree, const char *ref) get_node_by_ref()
522 cell_t get_node_phandle(struct node *root, struct node *node) get_node_phandle() argument
526 if ((node->phandle != 0) && (node->phandle != -1)) get_node_phandle()
527 return node->phandle; get_node_phandle()
532 node->phandle = phandle; get_node_phandle()
534 if (!get_property(node, "linux,phandle") get_node_phandle()
536 add_property(node, get_node_phandle()
540 if (!get_property(node, "phandle") get_node_phandle()
542 add_property(node, get_node_phandle()
546 /* If the node *does* have a phandle property, we must get_node_phandle()
550 return node->phandle; get_node_phandle()
553 uint32_t guess_boot_cpuid(struct node *tree) guess_boot_cpuid()
555 struct node *cpus, *bootcpu; guess_boot_cpuid()
571 /* FIXME: Sanity check node? */ guess_boot_cpuid()
635 static void sort_properties(struct node *node) sort_properties() argument
640 for_each_property_withdel(node, prop) sort_properties()
648 for_each_property_withdel(node, prop) sort_properties()
653 node->proplist = tbl[0]; sort_properties()
663 const struct node *a, *b; cmp_subnode()
665 a = *((const struct node * const *)ax); cmp_subnode()
666 b = *((const struct node * const *)bx); cmp_subnode()
671 static void sort_subnodes(struct node *node) sort_subnodes() argument
674 struct node *subnode, **tbl; sort_subnodes()
676 for_each_child_withdel(node, subnode) sort_subnodes()
684 for_each_child_withdel(node, subnode) sort_subnodes()
689 node->children = tbl[0]; sort_subnodes()
697 static void sort_node(struct node *node) sort_node() argument
699 struct node *c; sort_node()
701 sort_properties(node); sort_node()
702 sort_subnodes(node); sort_node()
703 for_each_child_withdel(node, c) sort_node()
H A Dchecks.c43 typedef void (*tree_check_fn)(struct check *c, struct node *dt);
44 typedef void (*node_check_fn)(struct check *c, struct node *dt, struct node *node);
45 typedef void (*prop_check_fn)(struct check *c, struct node *dt,
46 struct node *node, struct property *prop);
126 static void check_nodes_props(struct check *c, struct node *dt, struct node *node) check_nodes_props() argument
128 struct node *child; check_nodes_props()
131 TRACE(c, "%s", node->fullpath); check_nodes_props()
133 c->node_fn(c, dt, node); check_nodes_props()
136 for_each_property(node, prop) { for_each_property()
137 TRACE(c, "%s\t'%s'", node->fullpath, prop->name); for_each_property()
138 c->prop_fn(c, dt, node, prop); for_each_property()
141 for_each_child(node, child)
145 static bool run_check(struct check *c, struct node *dt) run_check()
192 static inline void check_always_fail(struct check *c, struct node *dt) check_always_fail()
198 static void check_is_string(struct check *c, struct node *root, check_is_string() argument
199 struct node *node) check_is_string()
204 prop = get_property(node, propname); check_is_string()
210 propname, node->fullpath); check_is_string()
217 static void check_is_cell(struct check *c, struct node *root, check_is_cell() argument
218 struct node *node) check_is_cell()
223 prop = get_property(node, propname); check_is_cell()
229 propname, node->fullpath); check_is_cell()
240 static void check_duplicate_node_names(struct check *c, struct node *dt, check_duplicate_node_names() argument
241 struct node *node) check_duplicate_node_names()
243 struct node *child, *child2; check_duplicate_node_names()
245 for_each_child(node, child) check_duplicate_node_names()
250 FAIL(c, "Duplicate node name %s", check_duplicate_node_names()
255 static void check_duplicate_property_names(struct check *c, struct node *dt, check_duplicate_property_names() argument
256 struct node *node) check_duplicate_property_names()
260 for_each_property(node, prop) { for_each_property()
266 prop->name, node->fullpath); for_each_property()
277 static void check_node_name_chars(struct check *c, struct node *dt, check_node_name_chars() argument
278 struct node *node) check_node_name_chars()
280 int n = strspn(node->name, c->data); check_node_name_chars()
282 if (n < strlen(node->name)) check_node_name_chars()
283 FAIL(c, "Bad character '%c' in node %s", check_node_name_chars()
284 node->name[n], node->fullpath); check_node_name_chars()
288 static void check_node_name_format(struct check *c, struct node *dt, check_node_name_format() argument
289 struct node *node) check_node_name_format()
291 if (strchr(get_unitname(node), '@')) check_node_name_format()
293 node->fullpath); check_node_name_format()
297 static void check_property_name_chars(struct check *c, struct node *dt, check_property_name_chars() argument
298 struct node *node, struct property *prop) check_property_name_chars()
303 FAIL(c, "Bad character '%c' in property name \"%s\", node %s", check_property_name_chars()
304 prop->name[n], prop->name, node->fullpath); check_property_name_chars()
309 #define DESCLABEL_ARGS(node,prop,mark) \
313 ((prop) ? "' in " : ""), (node)->fullpath
315 static void check_duplicate_label(struct check *c, struct node *dt, check_duplicate_label() argument
316 const char *label, struct node *node, check_duplicate_label()
319 struct node *othernode = NULL; check_duplicate_label()
334 if ((othernode != node) || (otherprop != prop) || (othermark != mark)) check_duplicate_label()
337 label, DESCLABEL_ARGS(node, prop, mark), check_duplicate_label()
341 static void check_duplicate_label_node(struct check *c, struct node *dt, check_duplicate_label_node() argument
342 struct node *node) check_duplicate_label_node()
346 for_each_label(node->labels, l) check_duplicate_label_node()
347 check_duplicate_label(c, dt, l->label, node, NULL, NULL); check_duplicate_label_node()
349 static void check_duplicate_label_prop(struct check *c, struct node *dt, check_duplicate_label_prop() argument
350 struct node *node, struct property *prop) check_duplicate_label_prop()
356 check_duplicate_label(c, dt, l->label, node, prop, NULL); check_duplicate_label_prop()
359 check_duplicate_label(c, dt, m->ref, node, prop, m); check_duplicate_label_prop()
364 static void check_explicit_phandles(struct check *c, struct node *root, check_explicit_phandles() argument
365 struct node *node, struct property *prop) check_explicit_phandles()
368 struct node *other; check_explicit_phandles()
377 node->fullpath, prop->val.len, prop->name); check_explicit_phandles()
384 if (node != get_node_by_ref(root, m->ref)) for_each_marker_of_type()
385 /* "Set this node's phandle equal to some for_each_marker_of_type()
386 * other node's phandle". That's nonsensical for_each_marker_of_type()
388 FAIL(c, "%s in %s is a reference to another node", for_each_marker_of_type()
389 prop->name, node->fullpath); for_each_marker_of_type()
392 /* But setting this node's phandle equal to its own for_each_marker_of_type()
394 * phandle for this node, even if it's not otherwise for_each_marker_of_type()
404 node->fullpath, phandle, prop->name);
408 if (node->phandle && (node->phandle != phandle))
410 node->fullpath, prop->name);
413 if (other && (other != node)) {
415 node->fullpath, phandle, other->fullpath);
419 node->phandle = phandle;
423 static void check_name_properties(struct check *c, struct node *root, check_name_properties() argument
424 struct node *node) check_name_properties()
428 for (pp = &node->proplist; *pp; pp = &((*pp)->next)) check_name_properties()
437 if ((prop->val.len != node->basenamelen+1) check_name_properties()
438 || (memcmp(prop->val.val, node->name, node->basenamelen) != 0)) { check_name_properties()
440 " of base node name)", node->fullpath, prop->val.val); check_name_properties()
457 static void fixup_phandle_references(struct check *c, struct node *dt, fixup_phandle_references() argument
458 struct node *node, struct property *prop) fixup_phandle_references()
461 struct node *refnode; fixup_phandle_references()
469 FAIL(c, "Reference to non-existent node or label \"%s\"\n", for_each_marker_of_type()
481 static void fixup_path_references(struct check *c, struct node *dt, fixup_path_references() argument
482 struct node *node, struct property *prop) fixup_path_references()
485 struct node *refnode; fixup_path_references()
493 FAIL(c, "Reference to non-existent node or label \"%s\"\n", for_each_marker_of_type()
517 static void fixup_addr_size_cells(struct check *c, struct node *dt, fixup_addr_size_cells() argument
518 struct node *node) fixup_addr_size_cells()
522 node->addr_cells = -1; fixup_addr_size_cells()
523 node->size_cells = -1; fixup_addr_size_cells()
525 prop = get_property(node, "#address-cells"); fixup_addr_size_cells()
527 node->addr_cells = propval_cell(prop); fixup_addr_size_cells()
529 prop = get_property(node, "#size-cells"); fixup_addr_size_cells()
531 node->size_cells = propval_cell(prop); fixup_addr_size_cells()
541 static void check_reg_format(struct check *c, struct node *dt, check_reg_format() argument
542 struct node *node) check_reg_format()
547 prop = get_property(node, "reg"); check_reg_format()
551 if (!node->parent) { check_reg_format()
552 FAIL(c, "Root node has a \"reg\" property"); check_reg_format()
557 FAIL(c, "\"reg\" property in %s is empty", node->fullpath); check_reg_format()
559 addr_cells = node_addr_cells(node->parent); check_reg_format()
560 size_cells = node_size_cells(node->parent); check_reg_format()
566 node->fullpath, prop->val.len, addr_cells, size_cells); check_reg_format()
570 static void check_ranges_format(struct check *c, struct node *dt, check_ranges_format() argument
571 struct node *node) check_ranges_format()
576 prop = get_property(node, "ranges"); check_ranges_format()
580 if (!node->parent) { check_ranges_format()
581 FAIL(c, "Root node has a \"ranges\" property"); check_ranges_format()
585 p_addr_cells = node_addr_cells(node->parent); check_ranges_format()
586 p_size_cells = node_size_cells(node->parent); check_ranges_format()
587 c_addr_cells = node_addr_cells(node); check_ranges_format()
588 c_size_cells = node_size_cells(node); check_ranges_format()
595 node->fullpath, c_addr_cells, node->parent->fullpath, check_ranges_format()
600 node->fullpath, c_size_cells, node->parent->fullpath, check_ranges_format()
605 "#size-cells == %d)", node->fullpath, prop->val.len, check_ranges_format()
614 static void check_avoid_default_addr_size(struct check *c, struct node *dt, check_avoid_default_addr_size() argument
615 struct node *node) check_avoid_default_addr_size()
619 if (!node->parent) check_avoid_default_addr_size()
620 return; /* Ignore root node */ check_avoid_default_addr_size()
622 reg = get_property(node, "reg"); check_avoid_default_addr_size()
623 ranges = get_property(node, "ranges"); check_avoid_default_addr_size()
628 if (node->parent->addr_cells == -1) check_avoid_default_addr_size()
630 node->fullpath); check_avoid_default_addr_size()
632 if (node->parent->size_cells == -1) check_avoid_default_addr_size()
634 node->fullpath); check_avoid_default_addr_size()
639 struct node *dt) check_obsolete_chosen_interrupt_controller()
641 struct node *chosen; check_obsolete_chosen_interrupt_controller()
739 struct node *dt = bi->dt; process_checks()
H A Ddtc.h145 struct node { struct
149 struct node *children;
151 struct node *parent;
152 struct node *next_sibling;
192 struct node *build_node(struct property *proplist, struct node *children);
193 struct node *build_node_delete(void);
194 struct node *name_node(struct node *node, char *name);
195 struct node *chain_node(struct node *first, struct node *list);
196 struct node *merge_nodes(struct node *old_node, struct node *new_node);
198 void add_property(struct node *node, struct property *prop);
199 void delete_property_by_name(struct node *node, char *name);
201 void add_child(struct node *parent, struct node *child);
202 void delete_node_by_name(struct node *parent, char *name);
203 void delete_node(struct node *node);
205 const char *get_unitname(struct node *node);
206 struct property *get_property(struct node *node, const char *propname);
208 struct property *get_property_by_label(struct node *tree, const char *label,
209 struct node **node);
210 struct marker *get_marker_label(struct node *tree, const char *label,
211 struct node **node, struct property **prop);
212 struct node *get_subnode(struct node *node, const char *nodename);
213 struct node *get_node_by_path(struct node *tree, const char *path);
214 struct node *get_node_by_label(struct node *tree, const char *label);
215 struct node *get_node_by_phandle(struct node *tree, cell_t phandle);
216 struct node *get_node_by_ref(struct node *tree, const char *ref);
217 cell_t get_node_phandle(struct node *root, struct node *node);
219 uint32_t guess_boot_cpuid(struct node *tree);
240 struct node *dt; /* the device tree */
245 struct node *tree, uint32_t boot_cpuid_phys);
H A Dfdtput.c33 OPER_WRITE_PROP, /* Write a property in a node */
34 OPER_CREATE_NODE, /* Create a new node */
47 * Report an error with a particular node.
50 * @param namelen Length of node name, or -1 to use entire string
137 int node; store_key_value() local
140 node = fdt_path_offset(blob, node_name); store_key_value()
141 if (node < 0) { store_key_value()
142 report_error(node_name, -1, node); store_key_value()
146 err = fdt_setprop(blob, node, property, buf, len); store_key_value()
168 int node, offset = 0; create_paths() local
174 for (sep = path; *sep; path = sep + 1, offset = node) { create_paths()
180 node = fdt_subnode_offset_namelen(blob, offset, path, create_paths()
182 if (node == -FDT_ERR_NOTFOUND) { create_paths()
183 node = fdt_add_subnode_namelen(blob, offset, path, create_paths()
186 if (node < 0) { create_paths()
187 report_error(path, sep - path, node); create_paths()
196 * Create a new node in the fdt.
203 * @param node_name Name of node to create
204 * @return new node offset if found, or -1 on failure
208 int node = 0; create_node() local
219 node = fdt_path_offset(blob, node_name); create_node()
220 if (node < 0) { create_node()
221 report_error(node_name, -1, node); create_node()
226 node = fdt_add_subnode(blob, node, p + 1); create_node()
227 if (node < 0) { create_node()
228 report_error(p + 1, -1, node); create_node()
281 " fdtput <options> <dt file> <node> <property> [<value>...]\n"
282 " fdtput -c <options> <dt file> [<node>...]\n"
285 "\t-p\t\tAutomatically create nodes as needed for the node path\n"
316 * - delete node (optionally recursively) main()
317 * - rename node main()
354 usage("Missing node"); main()
H A Dfdtget.c38 MODE_SHOW_VALUE, /* show values for node properties */
39 MODE_LIST_PROPS, /* list the properties for a node */
40 MODE_LIST_SUBNODES, /* list the subnodes of a node */
48 const char *default_val; /* default value if node/property not found */
116 * List all properties in a node, one per line.
119 * @param node Node to display
122 static int list_properties(const void *blob, int node) list_properties() argument
128 prop = fdt_first_property_offset(blob, node); list_properties()
144 * List all subnodes in a node, one per line
147 * @param node Node to display
150 static int list_subnodes(const void *blob, int node) list_subnodes() argument
152 int nextoffset; /* next node offset from libfdt */ list_subnodes()
156 int depth = 1; /* the assumed depth of this node */ list_subnodes()
159 tag = fdt_next_tag(blob, node, &nextoffset); list_subnodes()
162 pathp = fdt_get_name(blob, node, NULL); list_subnodes()
191 node = nextoffset; list_subnodes()
197 * Show the data for a given node (and perhaps property) according to the
202 * @param node Node to display
207 int node, const char *property) show_data_for_item()
214 err = list_properties(blob, node); show_data_for_item()
218 err = list_subnodes(blob, node); show_data_for_item()
223 value = fdt_getprop(blob, node, property, &len); show_data_for_item()
255 int i, node; do_fdtget() local
262 node = fdt_path_offset(blob, arg[i]); do_fdtget()
263 if (node < 0) { do_fdtget()
268 report_error(arg[i], node); do_fdtget()
274 if (show_data_for_item(blob, disp, node, prop)) do_fdtget()
285 " fdtget <options> <dt file> [<node> <property>]...\n"
286 " fdtget -p <options> <dt file> [<node> ]...\n"
289 "\t-p\t\tList properties for each node\n"
290 "\t-l\t\tList subnodes for each node\n"
359 /* Check for node, property arguments */ main()
206 show_data_for_item(const void *blob, struct display_info *disp, int node, const char *property) show_data_for_item() argument
/linux-4.4.14/include/linux/
H A Dtimerqueue.h9 struct rb_node node; member in struct:timerqueue_node
20 struct timerqueue_node *node);
22 struct timerqueue_node *node);
24 struct timerqueue_node *node);
31 * Returns a pointer to the timer node that has the
40 static inline void timerqueue_init(struct timerqueue_node *node) timerqueue_init() argument
42 RB_CLEAR_NODE(&node->node); timerqueue_init()
H A Dinterval_tree_generic.h31 * ITSTART(n): start endpoint of ITSTRUCT node n
32 * ITLAST(n): last endpoint of ITSTRUCT node n
45 static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
47 ITTYPE max = ITLAST(node), subtree_last; \
48 if (node->ITRB.rb_left) { \
49 subtree_last = rb_entry(node->ITRB.rb_left, \
54 if (node->ITRB.rb_right) { \
55 subtree_last = rb_entry(node->ITRB.rb_right, \
68 ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
71 ITTYPE start = ITSTART(node), last = ITLAST(node); \
85 node->ITSUBTREE = last; \
86 rb_link_node(&node->ITRB, rb_parent, link); \
87 rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
90 ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
92 rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
98 * Note that a node's interval intersects [start;last] iff: \
99 * Cond1: ITSTART(node) <= last \
101 * Cond2: start <= ITLAST(node) \
105 ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
109 * Loop invariant: start <= node->ITSUBTREE \
112 if (node->ITRB.rb_left) { \
113 ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
118 * Iterate to find the leftmost such node N. \
124 node = left; \
128 if (ITSTART(node) <= last) { /* Cond1 */ \
129 if (start <= ITLAST(node)) /* Cond2 */ \
130 return node; /* node is leftmost match */ \
131 if (node->ITRB.rb_right) { \
132 node = rb_entry(node->ITRB.rb_right, \
134 if (start <= node->ITSUBTREE) \
145 ITSTRUCT *node; \
149 node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
150 if (node->ITSUBTREE < start) \
152 return ITPREFIX ## _subtree_search(node, start, last); \
156 ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
158 struct rb_node *rb = node->ITRB.rb_right, *prev; \
163 * Cond1: ITSTART(node) <= last \
164 * rb == node->ITRB.rb_right \
175 /* Move up the tree until we come from a node's left child */ \
177 rb = rb_parent(&node->ITRB); \
180 prev = &node->ITRB; \
181 node = rb_entry(rb, ITSTRUCT, ITRB); \
182 rb = node->ITRB.rb_right; \
185 /* Check if the node intersects [start;last] */ \
186 if (last < ITSTART(node)) /* !Cond1 */ \
188 else if (start <= ITLAST(node)) /* Cond2 */ \
189 return node; \
H A Dnode.h2 * include/linux/node.h - generic node definition
5 * basic 'struct node' here, which can be embedded in per-arch
8 * Basic handling of the devices is done in drivers/base/node.c
11 * Nodes are exported via driverfs in the class/node/devices/
21 struct node { struct
30 extern struct node *node_devices[];
31 typedef void (*node_registration_func_t)(struct node *);
33 extern void unregister_node(struct node *node);
82 #define to_node(device) container_of(device, struct node, dev)
H A Drbtree_latch.h39 struct rb_node node[2]; member in struct:latch_tree_node
68 __lt_from_rb(struct rb_node *node, int idx) __lt_from_rb() argument
70 return container_of(node, struct latch_tree_node, node[idx]); __lt_from_rb()
79 struct rb_node *node = &ltn->node[idx]; __lt_insert() local
93 rb_link_node_rcu(node, parent, link); __lt_insert()
94 rb_insert_color(node, root); __lt_insert()
100 rb_erase(&ltn->node[idx], &ltr->tree[idx]); __lt_erase()
105 int (*comp)(void *key, struct latch_tree_node *node)) __lt_find()
107 struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); __lt_find() local
111 while (node) { __lt_find()
112 ltn = __lt_from_rb(node, idx); __lt_find()
116 node = rcu_dereference_raw(node->rb_left); __lt_find()
118 node = rcu_dereference_raw(node->rb_right); __lt_find()
127 * latch_tree_insert() - insert @node into the trees @root
128 * @node: nodes to insert
129 * @root: trees to insert @node into
130 * @ops: operators defining the node order
132 * It inserts @node into @root in an ordered fashion such that we can always
136 * tree structure is stored before we can observe the new @node.
142 latch_tree_insert(struct latch_tree_node *node, latch_tree_insert() argument
147 __lt_insert(node, root, 0, ops->less); latch_tree_insert()
149 __lt_insert(node, root, 1, ops->less); latch_tree_insert()
153 * latch_tree_erase() - removes @node from the trees @root
154 * @node: nodes to remote
155 * @root: trees to remove @node from
156 * @ops: operators defining the node order
158 * Removes @node from the trees @root in an ordered fashion such that we can
162 * It is assumed that @node will observe one RCU quiescent state before being
169 latch_tree_erase(struct latch_tree_node *node, latch_tree_erase() argument
174 __lt_erase(node, root, 0); latch_tree_erase()
176 __lt_erase(node, root, 1); latch_tree_erase()
180 * latch_tree_find() - find the node matching @key in the trees @root
183 * @ops: operators defining the node order
185 * Does a lockless lookup in the trees @root for the node matching @key.
195 * Returns: a pointer to the node matching @key or NULL.
201 struct latch_tree_node *node; latch_tree_find() local
206 node = __lt_find(key, root, seq & 1, ops->comp); latch_tree_find()
209 return node; latch_tree_find()
104 __lt_find(void *key, struct latch_tree_root *ltr, int idx, int (*comp)(void *key, struct latch_tree_node *node)) __lt_find() argument
H A Dinterval_tree.h14 interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
17 interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
24 interval_tree_iter_next(struct interval_tree_node *node,
H A Dnodemask.h21 * void node_set(node, mask) turn on bit 'node' in mask
22 * void node_clear(node, mask) turn off bit 'node' in mask
25 * int node_isset(node, mask) true iff bit 'node' set in mask
26 * int node_test_and_set(node, mask) test and set bit 'node' in mask
45 * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
46 * int first_unset_node(mask) First node not set in mask, or
49 * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
61 * for_each_node_mask(node, mask) for-loop node over mask
66 * int node_random(mask) Random node with set bit in mask
68 * int node_online(node) Is some node online?
69 * int node_possible(node) Is some node possible?
71 * node_set_online(node) set bit 'node' in node_online_map
72 * node_set_offline(node) clear bit 'node' in node_online_map
74 * for_each_node(node) for-loop node over node_possible_map
75 * for_each_online_node(node) for-loop node over node_online_map
115 #define node_set(node, dst) __node_set((node), &(dst)) __node_set()
116 static __always_inline void __node_set(int node, volatile nodemask_t *dstp) __node_set() argument
118 set_bit(node, dstp->bits); __node_set()
121 #define node_clear(node, dst) __node_clear((node), &(dst)) __node_clear()
122 static inline void __node_clear(int node, volatile nodemask_t *dstp) __node_clear() argument
124 clear_bit(node, dstp->bits); __node_clear()
140 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
142 #define node_test_and_set(node, nodemask) \
143 __node_test_and_set((node), &(nodemask)) __node_test_and_set()
144 static inline int __node_test_and_set(int node, nodemask_t *addr) __node_test_and_set() argument
146 return test_and_set_bit(node, addr->bits); __node_test_and_set()
262 static inline void init_nodemask_of_node(nodemask_t *mask, int node) init_nodemask_of_node() argument
265 node_set(node, *mask); init_nodemask_of_node()
268 #define nodemask_of_node(node) \
272 m.bits[0] = 1UL << (node); \
274 init_nodemask_of_node(&m, (node)); \
359 #define for_each_node_mask(node, mask) \
360 for ((node) = first_node(mask); \
361 (node) < MAX_NUMNODES; \
362 (node) = next_node((node), (mask)))
364 #define for_each_node_mask(node, mask) \
366 for ((node) = 0; (node) < 1; (node)++)
373 N_POSSIBLE, /* The node could become online at some point */
374 N_ONLINE, /* The node is online */
375 N_NORMAL_MEMORY, /* The node has regular memory */
377 N_HIGH_MEMORY, /* The node has regular or high memory */
382 N_MEMORY, /* The node has memory(regular, high, movable) */
386 N_CPU, /* The node has one or more cpus */
398 static inline int node_state(int node, enum node_states state) node_state() argument
400 return node_isset(node, node_states[state]); node_state()
403 static inline void node_set_state(int node, enum node_states state) node_set_state() argument
405 __node_set(node, &node_states[state]); node_set_state()
408 static inline void node_clear_state(int node, enum node_states state) node_clear_state() argument
410 __node_clear(node, &node_states[state]); node_clear_state()
449 static inline int node_state(int node, enum node_states state) node_state() argument
451 return node == 0; node_state()
454 static inline void node_set_state(int node, enum node_states state) node_set_state() argument
458 static inline void node_clear_state(int node, enum node_states state) node_clear_state() argument
467 #define for_each_node_state(node, __state) \
468 for ( (node) = 0; (node) == 0; (node) = 1)
476 #define node_set_online(node) node_set_state((node), N_ONLINE)
477 #define node_set_offline(node) node_clear_state((node), N_ONLINE)
495 #define node_online(node) node_state((node), N_ONLINE)
496 #define node_possible(node) node_state((node), N_POSSIBLE)
498 #define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
499 #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
H A Dof_graph.h32 * for_each_endpoint_of_node - iterate over every endpoint in a device node
33 * @parent: parent device node containing ports and endpoints
34 * @child: loop variable pointing to the current endpoint node
43 int of_graph_parse_endpoint(const struct device_node *node,
45 struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
51 const struct device_node *node);
52 struct device_node *of_graph_get_remote_port(const struct device_node *node);
55 static inline int of_graph_parse_endpoint(const struct device_node *node, of_graph_parse_endpoint() argument
62 struct device_node *node, u32 id) of_graph_get_port_by_id()
81 const struct device_node *node) of_graph_get_remote_port_parent()
87 const struct device_node *node) of_graph_get_remote_port()
61 of_graph_get_port_by_id( struct device_node *node, u32 id) of_graph_get_port_by_id() argument
80 of_graph_get_remote_port_parent( const struct device_node *node) of_graph_get_remote_port_parent() argument
86 of_graph_get_remote_port( const struct device_node *node) of_graph_get_remote_port() argument
H A Drbtree_augmented.h39 void (*propagate)(struct rb_node *node, struct rb_node *stop);
44 extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
50 * leading to the inserted node, then call rb_link_node() as usual and
57 rb_insert_augmented(struct rb_node *node, struct rb_root *root, rb_insert_augmented() argument
60 __rb_insert_augmented(node, root, augment->rotate); rb_insert_augmented()
69 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
70 rbtype augmented = rbcompute(node); \
71 if (node->rbaugmented == augmented) \
73 node->rbaugmented = augmented; \
74 rb = rb_parent(&node->rbfield); \
137 __rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_erase_augmented() argument
140 struct rb_node *child = node->rb_right; __rb_erase_augmented()
141 struct rb_node *tmp = node->rb_left; __rb_erase_augmented()
147 * Case 1: node to erase has no more than 1 child (easy!) __rb_erase_augmented()
150 * and node must be black due to 4). We adjust colors locally __rb_erase_augmented()
153 pc = node->__rb_parent_color; __rb_erase_augmented()
155 __rb_change_child(node, child, parent, root); __rb_erase_augmented()
163 /* Still case 1, but this time the child is node->rb_left */ __rb_erase_augmented()
164 tmp->__rb_parent_color = pc = node->__rb_parent_color; __rb_erase_augmented()
166 __rb_change_child(node, tmp, parent, root); __rb_erase_augmented()
175 * Case 2: node's successor is its right child __rb_erase_augmented()
186 augment->copy(node, successor); __rb_erase_augmented()
189 * Case 3: node's successor is leftmost under __rb_erase_augmented()
190 * node's right child subtree __rb_erase_augmented()
212 augment->copy(node, successor); __rb_erase_augmented()
216 tmp = node->rb_left; __rb_erase_augmented()
220 pc = node->__rb_parent_color; __rb_erase_augmented()
222 __rb_change_child(node, successor, tmp, root); __rb_erase_augmented()
241 rb_erase_augmented(struct rb_node *node, struct rb_root *root, rb_erase_augmented() argument
244 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); rb_erase_augmented()
H A Dtopology.h42 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
45 #define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
47 if (nr_cpus_node(node))
89 static inline void set_numa_node(int node) set_numa_node() argument
91 this_cpu_write(numa_node, node); set_numa_node()
96 static inline void set_cpu_numa_node(int cpu, int node) set_cpu_numa_node() argument
98 per_cpu(numa_node, cpu) = node; set_cpu_numa_node()
125 static inline void set_numa_mem(int node) set_numa_mem() argument
127 this_cpu_write(_numa_mem_, node); set_numa_mem()
128 _node_numa_mem_[numa_node_id()] = node; set_numa_mem()
133 static inline int node_to_mem_node(int node) node_to_mem_node() argument
135 return _node_numa_mem_[node]; node_to_mem_node()
155 static inline void set_cpu_numa_mem(int cpu, int node) set_cpu_numa_mem() argument
157 per_cpu(_numa_mem_, cpu) = node; set_cpu_numa_mem()
158 _node_numa_mem_[cpu_to_node(cpu)] = node; set_cpu_numa_mem()
173 static inline int node_to_mem_node(int node) node_to_mem_node() argument
175 return node; node_to_mem_node()
H A Drbtree.h56 #define RB_EMPTY_NODE(node) \
57 ((node)->__rb_parent_color == (unsigned long)(node))
58 #define RB_CLEAR_NODE(node) \
59 ((node)->__rb_parent_color = (unsigned long)(node))
76 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
80 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, rb_link_node() argument
83 node->__rb_parent_color = (unsigned long)parent; rb_link_node()
84 node->rb_left = node->rb_right = NULL; rb_link_node()
86 *rb_link = node; rb_link_node()
89 static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, rb_link_node_rcu() argument
92 node->__rb_parent_color = (unsigned long)parent; rb_link_node_rcu()
93 node->rb_left = node->rb_right = NULL; rb_link_node_rcu()
95 rcu_assign_pointer(*rb_link, node); rb_link_node_rcu()
H A Dof_fdt.h30 unsigned long node,
34 unsigned long node,
37 unsigned long node);
38 extern int of_fdt_match(const void *blob, unsigned long node,
52 extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
55 extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
57 extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
58 extern int of_flat_dt_match(unsigned long node, const char *const *matches);
62 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
64 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
75 extern int early_init_dt_scan_root(unsigned long node, const char *uname,
H A Dresource_ext.h32 struct list_head node; member in struct:resource_entry
45 list_add(&entry->node, head); resource_list_add()
51 list_add_tail(&entry->node, head); resource_list_add_tail()
56 list_del(&entry->node); resource_list_del()
72 list_for_each_entry((entry), (list), node)
75 list_for_each_entry_safe((entry), (tmp), (list), node)
/linux-4.4.14/mm/
H A Dinterval_tree.c28 /* Insert node immediately after prev in the interval tree */ vma_interval_tree_insert_after()
29 void vma_interval_tree_insert_after(struct vm_area_struct *node, vma_interval_tree_insert_after() argument
35 unsigned long last = vma_last_pgoff(node); vma_interval_tree_insert_after()
37 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); vma_interval_tree_insert_after() local
56 node->shared.rb_subtree_last = last; vma_interval_tree_insert_after()
57 rb_link_node(&node->shared.rb, &parent->shared.rb, link); vma_interval_tree_insert_after()
58 rb_insert_augmented(&node->shared.rb, root, vma_interval_tree_insert_after()
76 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, anon_vma_interval_tree_insert() argument
80 node->cached_vma_start = avc_start_pgoff(node); anon_vma_interval_tree_insert()
81 node->cached_vma_last = avc_last_pgoff(node); anon_vma_interval_tree_insert()
83 __anon_vma_interval_tree_insert(node, root); anon_vma_interval_tree_insert()
86 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, anon_vma_interval_tree_remove() argument
89 __anon_vma_interval_tree_remove(node, root); anon_vma_interval_tree_remove()
100 anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, anon_vma_interval_tree_iter_next() argument
103 return __anon_vma_interval_tree_iter_next(node, first, last); anon_vma_interval_tree_iter_next()
107 void anon_vma_interval_tree_verify(struct anon_vma_chain *node) anon_vma_interval_tree_verify() argument
109 WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node)); anon_vma_interval_tree_verify()
110 WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node)); anon_vma_interval_tree_verify()
H A Dsparse-vmemmap.c38 static void * __init_refok __earlyonly_bootmem_alloc(int node, __earlyonly_bootmem_alloc() argument
44 BOOTMEM_ALLOC_ACCESSIBLE, node); __earlyonly_bootmem_alloc()
50 void * __meminit vmemmap_alloc_block(unsigned long size, int node) vmemmap_alloc_block() argument
56 if (node_state(node, N_HIGH_MEMORY)) vmemmap_alloc_block()
58 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, vmemmap_alloc_block()
68 return __earlyonly_bootmem_alloc(node, size, size, vmemmap_alloc_block()
73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) vmemmap_alloc_block_buf() argument
78 return vmemmap_alloc_block(size, node); vmemmap_alloc_block_buf()
83 return vmemmap_alloc_block(size, node); vmemmap_alloc_block_buf()
90 void __meminit vmemmap_verify(pte_t *pte, int node, vmemmap_verify() argument
96 if (node_distance(actual_node, node) > LOCAL_DISTANCE) vmemmap_verify()
101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) vmemmap_pte_populate() argument
106 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); vmemmap_pte_populate()
115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) vmemmap_pmd_populate() argument
119 void *p = vmemmap_alloc_block(PAGE_SIZE, node); vmemmap_pmd_populate()
127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) vmemmap_pud_populate() argument
131 void *p = vmemmap_alloc_block(PAGE_SIZE, node); vmemmap_pud_populate()
139 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) vmemmap_pgd_populate() argument
143 void *p = vmemmap_alloc_block(PAGE_SIZE, node); vmemmap_pgd_populate()
152 unsigned long end, int node) vmemmap_populate_basepages()
161 pgd = vmemmap_pgd_populate(addr, node); vmemmap_populate_basepages()
164 pud = vmemmap_pud_populate(pgd, addr, node); vmemmap_populate_basepages()
167 pmd = vmemmap_pmd_populate(pud, addr, node); vmemmap_populate_basepages()
170 pte = vmemmap_pte_populate(pmd, addr, node); vmemmap_populate_basepages()
173 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); vmemmap_populate_basepages()
151 vmemmap_populate_basepages(unsigned long start, unsigned long end, int node) vmemmap_populate_basepages() argument
/linux-4.4.14/arch/sparc/prom/
H A Dtree_64.c19 static phandle prom_node_to_node(const char *type, phandle node) prom_node_to_node() argument
26 args[3] = (unsigned int) node; prom_node_to_node()
34 /* Return the child of node 'node' or zero if no this node has no
37 inline phandle __prom_getchild(phandle node) __prom_getchild() argument
39 return prom_node_to_node("child", node); __prom_getchild()
42 phandle prom_getchild(phandle node) prom_getchild() argument
46 if ((s32)node == -1) prom_getchild()
48 cnode = __prom_getchild(node); prom_getchild()
55 inline phandle prom_getparent(phandle node) prom_getparent() argument
59 if ((s32)node == -1) prom_getparent()
61 cnode = prom_node_to_node("parent", node); prom_getparent()
67 /* Return the next sibling of node 'node' or zero if no more siblings
70 inline phandle __prom_getsibling(phandle node) __prom_getsibling() argument
72 return prom_node_to_node(prom_peer_name, node); __prom_getsibling()
75 phandle prom_getsibling(phandle node) prom_getsibling() argument
79 if ((s32)node == -1) prom_getsibling()
81 sibnode = __prom_getsibling(node); prom_getsibling()
89 /* Return the length in bytes of property 'prop' at node 'node'.
92 int prom_getproplen(phandle node, const char *prop) prom_getproplen() argument
96 if (!node || !prop) prom_getproplen()
102 args[3] = (unsigned int) node; prom_getproplen()
112 /* Acquire a property 'prop' at node 'node' and place it in
116 int prom_getproperty(phandle node, const char *prop, prom_getproperty() argument
122 plen = prom_getproplen(node, prop); prom_getproperty()
129 args[3] = (unsigned int) node; prom_getproperty()
144 int prom_getint(phandle node, const char *prop) prom_getint() argument
148 if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) prom_getint()
159 int prom_getintdefault(phandle node, const char *property, int deflt) prom_getintdefault() argument
163 retval = prom_getint(node, property); prom_getintdefault()
172 int prom_getbool(phandle node, const char *prop) prom_getbool() argument
176 retval = prom_getproplen(node, prop); prom_getbool()
187 void prom_getstring(phandle node, const char *prop, char *user_buf, prom_getstring() argument
192 len = prom_getproperty(node, prop, user_buf, ubuf_size); prom_getstring()
199 /* Does the device at node 'node' have name 'name'?
202 int prom_nodematch(phandle node, const char *name) prom_nodematch() argument
205 prom_getproperty(node, "name", namebuf, sizeof(namebuf)); prom_nodematch()
211 /* Search siblings at 'node_start' for a node with name
212 * 'nodename'. Return node if successful, zero if not.
235 /* Return the first property type for node 'node'.
238 char *prom_firstprop(phandle node, char *buffer) prom_firstprop() argument
243 if ((s32)node == -1) prom_firstprop()
249 args[3] = (unsigned int) node; prom_firstprop()
261 * at node 'node' . Returns NULL string if no more
262 * property types for this node.
264 char *prom_nextprop(phandle node, const char *oprop, char *buffer) prom_nextprop() argument
269 if ((s32)node == -1) { prom_nextprop()
281 args[3] = (unsigned int) node; prom_nextprop()
310 int prom_node_has_property(phandle node, const char *prop) prom_node_has_property() argument
316 prom_nextprop(node, buf, buf); prom_node_has_property()
324 /* Set property 'pname' at node 'node' to value 'value' which has a length
328 prom_setprop(phandle node, const char *pname, char *value, int size) prom_setprop() argument
346 args[3] = (unsigned int) node; prom_setprop()
361 phandle node; prom_inst2pkg() local
371 node = (int) args[4]; prom_inst2pkg()
372 if ((s32)node == -1) prom_inst2pkg()
374 return node; prom_inst2pkg()
H A Dtree_32.c23 static phandle __prom_getchild(phandle node) __prom_getchild() argument
29 cnode = prom_nodeops->no_child(node); __prom_getchild()
36 /* Return the child of node 'node' or zero if no this node has no
39 phandle prom_getchild(phandle node) prom_getchild() argument
43 if ((s32)node == -1) prom_getchild()
46 cnode = __prom_getchild(node); prom_getchild()
55 static phandle __prom_getsibling(phandle node) __prom_getsibling() argument
61 cnode = prom_nodeops->no_nextnode(node); __prom_getsibling()
68 /* Return the next sibling of node 'node' or zero if no more siblings
71 phandle prom_getsibling(phandle node) prom_getsibling() argument
75 if ((s32)node == -1) prom_getsibling()
78 sibnode = __prom_getsibling(node); prom_getsibling()
86 /* Return the length in bytes of property 'prop' at node 'node'.
89 int prom_getproplen(phandle node, const char *prop) prom_getproplen() argument
94 if((!node) || (!prop)) prom_getproplen()
98 ret = prom_nodeops->no_proplen(node, prop); prom_getproplen()
105 /* Acquire a property 'prop' at node 'node' and place it in
109 int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize) prom_getproperty() argument
114 plen = prom_getproplen(node, prop); prom_getproperty()
119 ret = prom_nodeops->no_getprop(node, prop, buffer); prom_getproperty()
129 int prom_getint(phandle node, char *prop) prom_getint() argument
133 if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) prom_getint()
143 int prom_getintdefault(phandle node, char *property, int deflt) prom_getintdefault() argument
147 retval = prom_getint(node, property); prom_getintdefault()
155 int prom_getbool(phandle node, char *prop) prom_getbool() argument
159 retval = prom_getproplen(node, prop); prom_getbool()
169 void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size) prom_getstring() argument
173 len = prom_getproperty(node, prop, user_buf, ubuf_size); prom_getstring()
180 /* Search siblings at 'node_start' for a node with name
181 * 'nodename'. Return node if successful, zero if not.
203 static char *__prom_nextprop(phandle node, char * oprop) __prom_nextprop() argument
209 prop = prom_nodeops->no_nextprop(node, oprop); __prom_nextprop()
217 * at node 'node' . Returns empty string if no more
218 * property types for this node.
220 char *prom_nextprop(phandle node, char *oprop, char *buffer) prom_nextprop() argument
222 if (node == 0 || (s32)node == -1) prom_nextprop()
225 return __prom_nextprop(node, oprop); prom_nextprop()
233 phandle node = prom_root_node, node2; prom_finddevice() local
238 if (!*s) return node; /* path '.../' is legal */ prom_finddevice()
239 node = prom_getchild(node); prom_finddevice()
245 node = prom_searchsiblings(node, nbuf); prom_finddevice()
246 if (!node) prom_finddevice()
255 node2 = node; prom_finddevice()
259 node = node2; prom_finddevice()
273 return node; prom_finddevice()
277 /* Set property 'pname' at node 'node' to value 'value' which has a length
280 int prom_setprop(phandle node, const char *pname, char *value, int size) prom_setprop() argument
290 ret = prom_nodeops->no_setprop(node, pname, value, size); prom_setprop()
299 phandle node; prom_inst2pkg() local
303 node = (*romvec->pv_v2devops.v2_inst2pkg)(inst); prom_inst2pkg()
306 if ((s32)node == -1) prom_inst2pkg()
308 return node; prom_inst2pkg()
H A Dinit_64.c20 /* The root node of the prom device tree. */
33 phandle node; prom_init() local
43 node = prom_finddevice("/openprom"); prom_init()
44 if (!node || (s32)node == -1) prom_init()
47 prom_getstring(node, "version", prom_version, sizeof(prom_version)); prom_init()
55 printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); prom_init_report()
/linux-4.4.14/tools/perf/util/
H A Dstrfilter.c13 static void strfilter_node__delete(struct strfilter_node *node) strfilter_node__delete() argument
15 if (node) { strfilter_node__delete()
16 if (node->p && !is_operator(*node->p)) strfilter_node__delete()
17 zfree((char **)&node->p); strfilter_node__delete()
18 strfilter_node__delete(node->l); strfilter_node__delete()
19 strfilter_node__delete(node->r); strfilter_node__delete()
20 free(node); strfilter_node__delete()
65 struct strfilter_node *node = zalloc(sizeof(*node)); strfilter_node__alloc() local
67 if (node) { strfilter_node__alloc()
68 node->p = op; strfilter_node__alloc()
69 node->l = l; strfilter_node__alloc()
70 node->r = r; strfilter_node__alloc()
73 return node; strfilter_node__alloc()
109 case '!': /* Add NOT as a leaf node */ strfilter_node__new()
213 static bool strfilter_node__compare(struct strfilter_node *node, strfilter_node__compare() argument
216 if (!node || !node->p) strfilter_node__compare()
219 switch (*node->p) { strfilter_node__compare()
221 return strfilter_node__compare(node->l, str) || strfilter_node__compare()
222 strfilter_node__compare(node->r, str); strfilter_node__compare()
224 return strfilter_node__compare(node->l, str) && strfilter_node__compare()
225 strfilter_node__compare(node->r, str); strfilter_node__compare()
227 return !strfilter_node__compare(node->r, str); strfilter_node__compare()
229 return strglobmatch(str, node->p); strfilter_node__compare()
241 static int strfilter_node__sprint(struct strfilter_node *node, char *buf);
243 /* sprint node in parenthesis if needed */ strfilter_node__sprint_pt()
244 static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf) strfilter_node__sprint_pt() argument
247 int pt = node->r ? 2 : 0; /* don't need to check node->l */ strfilter_node__sprint_pt()
251 len = strfilter_node__sprint(node, buf); strfilter_node__sprint_pt()
259 static int strfilter_node__sprint(struct strfilter_node *node, char *buf) strfilter_node__sprint() argument
263 if (!node || !node->p) strfilter_node__sprint()
266 switch (*node->p) { strfilter_node__sprint()
269 len = strfilter_node__sprint_pt(node->l, buf); strfilter_node__sprint()
274 *(buf + len++) = *node->p; strfilter_node__sprint()
278 rlen = strfilter_node__sprint_pt(node->r, buf); strfilter_node__sprint()
284 len = strlen(node->p); strfilter_node__sprint()
286 strcpy(buf, node->p); strfilter_node__sprint()
H A Dintlist.c19 struct int_node *node = malloc(sizeof(*node)); intlist__node_new() local
21 if (node != NULL) { intlist__node_new()
22 node->i = i; intlist__node_new()
23 node->priv = NULL; intlist__node_new()
24 rc = &node->rb_node; intlist__node_new()
38 struct int_node *node = container_of(rb_node, struct int_node, rb_node); intlist__node_delete() local
40 int_node__delete(node); intlist__node_delete()
46 struct int_node *node = container_of(rb_node, struct int_node, rb_node); intlist__node_cmp() local
48 return node->i - i; intlist__node_cmp()
56 void intlist__remove(struct intlist *ilist, struct int_node *node) intlist__remove() argument
58 rblist__remove_node(&ilist->rblist, &node->rb_node); intlist__remove()
64 struct int_node *node = NULL; __intlist__findnew() local
76 node = container_of(rb_node, struct int_node, rb_node); __intlist__findnew()
78 return node; __intlist__findnew()
138 struct int_node *node = NULL; intlist__entry() local
143 node = container_of(rb_node, struct int_node, rb_node); intlist__entry()
145 return node; intlist__entry()
H A Dcallchain.c244 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, __sort_chain_flat() argument
250 n = rb_first(&node->rb_root_in); __sort_chain_flat()
258 if (node->hit && node->hit >= min_hit) __sort_chain_flat()
259 rb_insert_callchain(rb_root, node, CHAIN_FLAT); __sort_chain_flat()
270 __sort_chain_flat(rb_root, &root->node, min_hit); sort_chain_flat()
273 static void __sort_chain_graph_abs(struct callchain_node *node, __sort_chain_graph_abs() argument
279 node->rb_root = RB_ROOT; __sort_chain_graph_abs()
280 n = rb_first(&node->rb_root_in); __sort_chain_graph_abs()
288 rb_insert_callchain(&node->rb_root, child, __sort_chain_graph_abs()
297 __sort_chain_graph_abs(&chain_root->node, min_hit); sort_chain_graph_abs()
298 rb_root->rb_node = chain_root->node.rb_root.rb_node; sort_chain_graph_abs()
301 static void __sort_chain_graph_rel(struct callchain_node *node, __sort_chain_graph_rel() argument
308 node->rb_root = RB_ROOT; __sort_chain_graph_rel()
309 min_hit = ceil(node->children_hit * min_percent); __sort_chain_graph_rel()
311 n = rb_first(&node->rb_root_in); __sort_chain_graph_rel()
318 rb_insert_callchain(&node->rb_root, child, __sort_chain_graph_rel()
327 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); sort_chain_graph_rel()
328 rb_root->rb_node = chain_root->node.rb_root.rb_node; sort_chain_graph_rel()
391 * Fill the node with callchain values
394 fill_node(struct callchain_node *node, struct callchain_cursor *cursor) fill_node() argument
398 node->val_nr = cursor->nr - cursor->pos; fill_node()
399 if (!node->val_nr) fill_node()
400 pr_warning("Warning: empty node in callchain tree\n"); fill_node()
415 list_add_tail(&call->list, &node->val); fill_node()
437 static s64 match_chain(struct callchain_cursor_node *node, match_chain() argument
440 struct symbol *sym = node->sym; match_chain()
446 return cnode->ip - node->ip; match_chain()
486 struct callchain_cursor_node *node; split_add_child() local
492 node = callchain_cursor_current(cursor); split_add_child()
504 if (match_chain(node, cnode) < 0) split_add_child()
527 struct callchain_cursor_node *node; append_chain_children() local
531 node = callchain_cursor_current(cursor); append_chain_children()
532 if (!node) append_chain_children()
552 /* nothing in children, add to the current node */ append_chain_children()
573 * Lookup in the current node append_chain()
579 struct callchain_cursor_node *node; append_chain() local
581 node = callchain_cursor_current(cursor); append_chain()
582 if (!node) append_chain()
585 cmp = match_chain(node, cnode); append_chain()
602 /* we match only a part of the node. Split it and add the new chain */ append_chain()
614 /* We match the node and still have a part remaining */ append_chain()
629 append_chain_children(&root->node, cursor, period); callchain_append()
682 return merge_chain_branch(cursor, &dst->node, &src->node); callchain_merge()
688 struct callchain_cursor_node *node = *cursor->last; callchain_cursor_append() local
690 if (!node) { callchain_cursor_append()
691 node = calloc(1, sizeof(*node)); callchain_cursor_append()
692 if (!node) callchain_cursor_append()
695 *cursor->last = node; callchain_cursor_append()
698 node->ip = ip; callchain_cursor_append()
699 node->map = map; callchain_cursor_append()
700 node->sym = sym; callchain_cursor_append()
704 cursor->last = &node->next; callchain_cursor_append()
731 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, fill_callchain_info() argument
734 al->map = node->map; fill_callchain_info()
735 al->sym = node->sym; fill_callchain_info()
736 if (node->map) fill_callchain_info()
737 al->addr = node->map->map_ip(node->map, node->ip); fill_callchain_info()
739 al->addr = node->ip; fill_callchain_info()
802 static void free_callchain_node(struct callchain_node *node) free_callchain_node() argument
808 list_for_each_entry_safe(list, tmp, &node->val, list) { free_callchain_node()
813 n = rb_first(&node->rb_root_in); free_callchain_node()
817 rb_erase(&child->rb_node_in, &node->rb_root_in); free_callchain_node()
829 free_callchain_node(&root->node); free_callchain()
/linux-4.4.14/arch/x86/entry/vdso/
H A Dvgetcpu.c14 __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) __vdso_getcpu() argument
22 if (node) __vdso_getcpu()
23 *node = p >> 12; __vdso_getcpu()
27 long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
/linux-4.4.14/arch/mips/include/asm/mach-loongson64/
H A Dtopology.h7 #define parent_node(node) (node)
8 #define cpumask_of_node(node) (&__node_data[(node)]->cpumask)
/linux-4.4.14/arch/sh/include/asm/
H A Dtopology.h7 #define parent_node(node) ((void)(node),0)
9 #define cpumask_of_node(node) ((void)node, cpu_online_mask)
/linux-4.4.14/arch/metag/include/asm/
H A Dtopology.h7 #define parent_node(node) ((void)(node), 0)
9 #define cpumask_of_node(node) ((void)node, cpu_online_mask)
/linux-4.4.14/kernel/gcov/
H A Dfs.c32 * @list: list head for child node list
35 * @parent: parent node
141 * Return a profiling data set associated with the given node. This is
145 static struct gcov_info *get_node_info(struct gcov_node *node) get_node_info() argument
147 if (node->num_loaded > 0) get_node_info()
148 return node->loaded_info[0]; get_node_info()
150 return node->unloaded_info; get_node_info()
155 * all profiling data associated with the given node.
157 static struct gcov_info *get_accumulated_info(struct gcov_node *node) get_accumulated_info() argument
162 if (node->unloaded_info) get_accumulated_info()
163 info = gcov_info_dup(node->unloaded_info); get_accumulated_info()
165 info = gcov_info_dup(node->loaded_info[i++]); get_accumulated_info()
168 for (; i < node->num_loaded; i++) get_accumulated_info()
169 gcov_info_add(info, node->loaded_info[i]); get_accumulated_info()
180 struct gcov_node *node = inode->i_private; gcov_seq_open() local
190 * profiling data sets associated with one node simple. gcov_seq_open()
192 info = get_accumulated_info(node); gcov_seq_open()
235 * Find a node by the associated data file name. Needs to be called with
240 struct gcov_node *node; get_node_by_name() local
243 list_for_each_entry(node, &all_head, all) { get_node_by_name()
244 info = get_node_info(node); get_node_by_name()
246 return node; get_node_by_name()
253 * Reset all profiling data associated with the specified node.
255 static void reset_node(struct gcov_node *node) reset_node() argument
259 if (node->unloaded_info) reset_node()
260 gcov_info_reset(node->unloaded_info); reset_node()
261 for (i = 0; i < node->num_loaded; i++) reset_node()
262 gcov_info_reset(node->loaded_info[i]); reset_node()
265 static void remove_node(struct gcov_node *node);
270 * remove the debug fs node as well.
277 struct gcov_node *node; gcov_seq_write() local
282 node = get_node_by_name(gcov_info_filename(info)); gcov_seq_write()
283 if (node) { gcov_seq_write()
284 /* Reset counts or remove node for unloaded modules. */ gcov_seq_write()
285 if (node->num_loaded == 0) gcov_seq_write()
286 remove_node(node); gcov_seq_write()
288 reset_node(node); gcov_seq_write()
366 static void add_links(struct gcov_node *node, struct dentry *parent) add_links() argument
375 node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); add_links()
376 if (!node->links) add_links()
380 gcov_info_filename(get_node_info(node)), add_links()
387 node->links[i] = debugfs_create_symlink(deskew(basename), add_links()
389 if (!node->links[i]) add_links()
398 debugfs_remove(node->links[i]); add_links()
399 kfree(node->links); add_links()
400 node->links = NULL; add_links()
411 /* Basic initialization of a new node. */ init_node()
412 static void init_node(struct gcov_node *node, struct gcov_info *info, init_node() argument
415 INIT_LIST_HEAD(&node->list); init_node()
416 INIT_LIST_HEAD(&node->children); init_node()
417 INIT_LIST_HEAD(&node->all); init_node()
418 if (node->loaded_info) { init_node()
419 node->loaded_info[0] = info; init_node()
420 node->num_loaded = 1; init_node()
422 node->parent = parent; init_node()
424 strcpy(node->name, name); init_node()
428 * Create a new node and associated debugfs entry. Needs to be called with
434 struct gcov_node *node; new_node() local
436 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); new_node()
437 if (!node) new_node()
440 node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), new_node()
442 if (!node->loaded_info) new_node()
445 init_node(node, info, name, parent); new_node()
448 node->dentry = debugfs_create_file(deskew(node->name), 0600, new_node()
449 parent->dentry, node, &gcov_data_fops); new_node()
451 node->dentry = debugfs_create_dir(node->name, parent->dentry); new_node()
452 if (!node->dentry) { new_node()
454 kfree(node); new_node()
458 add_links(node, parent->dentry); new_node()
459 list_add(&node->list, &parent->children); new_node()
460 list_add(&node->all, &all_head); new_node()
462 return node; new_node()
465 kfree(node); new_node()
470 /* Remove symbolic links associated with node. */ remove_links()
471 static void remove_links(struct gcov_node *node) remove_links() argument
475 if (!node->links) remove_links()
478 debugfs_remove(node->links[i]); remove_links()
479 kfree(node->links); remove_links()
480 node->links = NULL; remove_links()
484 * Remove node from all lists and debugfs and release associated resources.
487 static void release_node(struct gcov_node *node) release_node() argument
489 list_del(&node->list); release_node()
490 list_del(&node->all); release_node()
491 debugfs_remove(node->dentry); release_node()
492 remove_links(node); release_node()
493 kfree(node->loaded_info); release_node()
494 if (node->unloaded_info) release_node()
495 gcov_info_free(node->unloaded_info); release_node()
496 kfree(node); release_node()
499 /* Release node and empty parents. Needs to be called with node_lock held. */ remove_node()
500 static void remove_node(struct gcov_node *node) remove_node() argument
504 while ((node != &root_node) && list_empty(&node->children)) { remove_node()
505 parent = node->parent; remove_node()
506 release_node(node); remove_node()
507 node = parent; remove_node()
512 * Find child node with given basename. Needs to be called with node_lock
518 struct gcov_node *node; get_child_by_name() local
520 list_for_each_entry(node, &parent->children, list) { get_child_by_name()
521 if (strcmp(node->name, name) == 0) get_child_by_name()
522 return node; get_child_by_name()
535 struct gcov_node *node; reset_write() local
539 list_for_each_entry(node, &all_head, all) { reset_write()
540 if (node->num_loaded > 0) reset_write()
541 reset_node(node); reset_write()
542 else if (list_empty(&node->children)) { reset_write()
543 remove_node(node); reset_write()
568 * Create a node for a given profiling data set and add it to all lists and
577 struct gcov_node *node; add_node() local
596 node = get_child_by_name(parent, curr); add_node()
597 if (!node) { add_node()
598 node = new_node(parent, NULL, curr); add_node()
599 if (!node) add_node()
602 parent = node; add_node()
604 /* Create file node. */ add_node()
605 node = new_node(parent, info, curr); add_node()
606 if (!node) add_node()
618 * Associate a profiling data set with an existing node. Needs to be called
621 static void add_info(struct gcov_node *node, struct gcov_info *info) add_info() argument
624 int num = node->num_loaded; add_info()
628 * case the new data set is incompatible, the node only contains add_info()
637 memcpy(loaded_info, node->loaded_info, add_info()
646 if (!gcov_info_is_compatible(node->unloaded_info, info)) { add_info()
650 gcov_info_free(node->unloaded_info); add_info()
651 node->unloaded_info = NULL; add_info()
658 if (!gcov_info_is_compatible(node->loaded_info[0], info)) { add_info()
666 kfree(node->loaded_info); add_info()
667 node->loaded_info = loaded_info; add_info()
668 node->num_loaded = num + 1; add_info()
672 * Return the index of a profiling data set associated with a node.
674 static int get_info_index(struct gcov_node *node, struct gcov_info *info) get_info_index() argument
678 for (i = 0; i < node->num_loaded; i++) { get_info_index()
679 if (node->loaded_info[i] == info) get_info_index()
688 static void save_info(struct gcov_node *node, struct gcov_info *info) save_info() argument
690 if (node->unloaded_info) save_info()
691 gcov_info_add(node->unloaded_info, info); save_info()
693 node->unloaded_info = gcov_info_dup(info); save_info()
694 if (!node->unloaded_info) { save_info()
703 * Disassociate a profiling data set from a node. Needs to be called with
706 static void remove_info(struct gcov_node *node, struct gcov_info *info) remove_info() argument
710 i = get_info_index(node, info); remove_info()
717 save_info(node, info); remove_info()
719 node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; remove_info()
720 node->num_loaded--; remove_info()
721 if (node->num_loaded > 0) remove_info()
724 kfree(node->loaded_info); remove_info()
725 node->loaded_info = NULL; remove_info()
726 node->num_loaded = 0; remove_info()
727 if (!node->unloaded_info) remove_info()
728 remove_node(node); remove_info()
737 struct gcov_node *node; gcov_event() local
740 node = get_node_by_name(gcov_info_filename(info)); gcov_event()
743 if (node) gcov_event()
744 add_info(node, info); gcov_event()
749 if (node) gcov_event()
750 remove_info(node, info); gcov_event()
/linux-4.4.14/drivers/clk/
H A Dclk-moxart.c19 static void __init moxart_of_pll_clk_init(struct device_node *node) moxart_of_pll_clk_init() argument
24 const char *name = node->name; moxart_of_pll_clk_init()
27 of_property_read_string(node, "clock-output-names", &name); moxart_of_pll_clk_init()
28 parent_name = of_clk_get_parent_name(node, 0); moxart_of_pll_clk_init()
30 base = of_iomap(node, 0); moxart_of_pll_clk_init()
32 pr_err("%s: of_iomap failed\n", node->full_name); moxart_of_pll_clk_init()
39 ref_clk = of_clk_get(node, 0); moxart_of_pll_clk_init()
41 pr_err("%s: of_clk_get failed\n", node->full_name); moxart_of_pll_clk_init()
47 pr_err("%s: failed to register clock\n", node->full_name); moxart_of_pll_clk_init()
52 of_clk_add_provider(node, of_clk_src_simple_get, clk); moxart_of_pll_clk_init()
57 static void __init moxart_of_apb_clk_init(struct device_node *node) moxart_of_apb_clk_init() argument
63 const char *name = node->name; moxart_of_apb_clk_init()
66 of_property_read_string(node, "clock-output-names", &name); moxart_of_apb_clk_init()
67 parent_name = of_clk_get_parent_name(node, 0); moxart_of_apb_clk_init()
69 base = of_iomap(node, 0); moxart_of_apb_clk_init()
71 pr_err("%s: of_iomap failed\n", node->full_name); moxart_of_apb_clk_init()
82 pll_clk = of_clk_get(node, 0); moxart_of_apb_clk_init()
84 pr_err("%s: of_clk_get failed\n", node->full_name); moxart_of_apb_clk_init()
90 pr_err("%s: failed to register clock\n", node->full_name); moxart_of_apb_clk_init()
95 of_clk_add_provider(node, of_clk_src_simple_get, clk); moxart_of_apb_clk_init()
H A Dclk-conf.c17 static int __set_clk_parents(struct device_node *node, bool clk_supplier) __set_clk_parents() argument
23 num_parents = of_count_phandle_with_args(node, "assigned-clock-parents", __set_clk_parents()
27 node->full_name); __set_clk_parents()
30 rc = of_parse_phandle_with_args(node, "assigned-clock-parents", __set_clk_parents()
39 if (clkspec.np == node && !clk_supplier) __set_clk_parents()
44 index, node->full_name); __set_clk_parents()
48 rc = of_parse_phandle_with_args(node, "assigned-clocks", __set_clk_parents()
52 if (clkspec.np == node && !clk_supplier) { __set_clk_parents()
59 index, node->full_name); __set_clk_parents()
77 static int __set_clk_rates(struct device_node *node, bool clk_supplier) __set_clk_rates() argument
86 of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) { __set_clk_rates()
88 rc = of_parse_phandle_with_args(node, "assigned-clocks", __set_clk_rates()
97 if (clkspec.np == node && !clk_supplier) __set_clk_rates()
103 index, node->full_name); __set_clk_rates()
121 * @node: device node to apply clock settings for
122 * @clk_supplier: true if clocks supplied by @node should also be considered
126 * should be set to true if @node may be also a clock supplier of any clock
129 * determines the @node is also a supplier of any of the clocks.
131 int of_clk_set_defaults(struct device_node *node, bool clk_supplier) of_clk_set_defaults() argument
135 if (!node) of_clk_set_defaults()
138 rc = __set_clk_parents(node, clk_supplier); of_clk_set_defaults()
142 return __set_clk_rates(node, clk_supplier); of_clk_set_defaults()
H A Dclk-nspire.c67 static void __init nspire_ahbdiv_setup(struct device_node *node, nspire_ahbdiv_setup() argument
73 const char *clk_name = node->name; nspire_ahbdiv_setup()
77 io = of_iomap(node, 0); nspire_ahbdiv_setup()
85 of_property_read_string(node, "clock-output-names", &clk_name); nspire_ahbdiv_setup()
86 parent_name = of_clk_get_parent_name(node, 0); nspire_ahbdiv_setup()
91 of_clk_add_provider(node, of_clk_src_simple_get, clk); nspire_ahbdiv_setup()
94 static void __init nspire_ahbdiv_setup_cx(struct device_node *node) nspire_ahbdiv_setup_cx() argument
96 nspire_ahbdiv_setup(node, nspire_clkinfo_cx); nspire_ahbdiv_setup_cx()
99 static void __init nspire_ahbdiv_setup_classic(struct device_node *node) nspire_ahbdiv_setup_classic() argument
101 nspire_ahbdiv_setup(node, nspire_clkinfo_classic); nspire_ahbdiv_setup_classic()
109 static void __init nspire_clk_setup(struct device_node *node, nspire_clk_setup() argument
115 const char *clk_name = node->name; nspire_clk_setup()
118 io = of_iomap(node, 0); nspire_clk_setup()
126 of_property_read_string(node, "clock-output-names", &clk_name); nspire_clk_setup()
131 of_clk_add_provider(node, of_clk_src_simple_get, clk); nspire_clk_setup()
141 static void __init nspire_clk_setup_cx(struct device_node *node) nspire_clk_setup_cx() argument
143 nspire_clk_setup(node, nspire_clkinfo_cx); nspire_clk_setup_cx()
146 static void __init nspire_clk_setup_classic(struct device_node *node) nspire_clk_setup_classic() argument
148 nspire_clk_setup(node, nspire_clkinfo_classic); nspire_clk_setup_classic()
/linux-4.4.14/include/drm/
H A Ddrm_vma_manager.h61 struct drm_vma_offset_node *node, unsigned long pages);
63 struct drm_vma_offset_node *node);
65 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
66 void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
67 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
71 * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
76 * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
87 struct drm_vma_offset_node *node; drm_vma_offset_exact_lookup_locked() local
89 node = drm_vma_offset_lookup_locked(mgr, start, pages); drm_vma_offset_exact_lookup_locked()
90 return (node && node->vm_node.start == start) ? node : NULL; drm_vma_offset_exact_lookup_locked()
126 * drm_vma_node_reset() - Initialize or reset node object
127 * @node: Node to initialize or reset
129 * Reset a node to its initial state. This must be called before using it with
132 * This must not be called on an already allocated node, or you will leak
135 static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) drm_vma_node_reset() argument
137 memset(node, 0, sizeof(*node)); drm_vma_node_reset()
138 node->vm_files = RB_ROOT; drm_vma_node_reset()
139 rwlock_init(&node->vm_lock); drm_vma_node_reset()
144 * @node: Node to inspect
146 * Return the start address of the given node. This can be used as offset into
153 * Start address of @node for page-based addressing. 0 if the node does not
156 static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) drm_vma_node_start() argument
158 return node->vm_node.start; drm_vma_node_start()
163 * @node: Node to inspect
165 * Return the size as number of pages for the given node. This is the same size
167 * node, this is 0.
170 * Size of @node as number of pages. 0 if the node does not have an offset
173 static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) drm_vma_node_size() argument
175 return node->vm_node.size; drm_vma_node_size()
179 * drm_vma_node_has_offset() - Check whether node is added to offset manager
180 * @node: Node to be checked
183 * true iff the node was previously allocated an offset and added to
186 static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node) drm_vma_node_has_offset() argument
188 return drm_mm_node_allocated(&node->vm_node); drm_vma_node_has_offset()
193 * @node: Linked offset node
200 * Offset of @node for byte-based addressing. 0 if the node does not have an
203 static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) drm_vma_node_offset_addr() argument
205 return ((__u64)node->vm_node.start) << PAGE_SHIFT; drm_vma_node_offset_addr()
209 * drm_vma_node_unmap() - Unmap offset node
210 * @node: Offset node
211 * @file_mapping: Address space to unmap @node from
213 * Unmap all userspace mappings for a given offset node. The mappings must be
218 * is not called on this node concurrently.
220 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, drm_vma_node_unmap() argument
223 if (drm_vma_node_has_offset(node)) drm_vma_node_unmap()
225 drm_vma_node_offset_addr(node), drm_vma_node_unmap()
226 drm_vma_node_size(node) << PAGE_SHIFT, 1); drm_vma_node_unmap()
231 * @node: Offset node
234 * This checks whether @filp is granted access to @node. It is the same as
241 static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, drm_vma_node_verify_access() argument
244 return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES; drm_vma_node_verify_access()
H A Ddrm_mm.h80 * according to the (increasing) start address of the memory node. */
93 void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
98 * drm_mm_node_allocated - checks whether a node is allocated
99 * @node: drm_mm_node to check
105 * True if the @node is allocated.
107 static inline bool drm_mm_node_allocated(struct drm_mm_node *node) drm_mm_node_allocated() argument
109 return node->allocated; drm_mm_node_allocated()
133 * drm_mm_hole_node_start - computes the start of the hole following @node
138 * follows by looking at node->hole_follows.
156 * drm_mm_hole_node_end - computes the end of the hole following @node
161 * follows by looking at node->hole_follows.
221 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
224 struct drm_mm_node *node,
231 * drm_mm_insert_node - search for space and insert @node
233 * @node: preallocate node to insert
241 * The preallocated node must be cleared to 0.
247 struct drm_mm_node *node, drm_mm_insert_node()
252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, drm_mm_insert_node()
257 struct drm_mm_node *node,
266 * drm_mm_insert_node_in_range - ranged search for space and insert @node
268 * @node: preallocate node to insert
271 * @start: start of the allowed range for this node
272 * @end: end of the allowed range for this node
278 * The preallocated node must be cleared to 0.
284 struct drm_mm_node *node, drm_mm_insert_node_in_range()
291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, drm_mm_insert_node_in_range()
296 void drm_mm_remove_node(struct drm_mm_node *node);
314 bool drm_mm_scan_add_block(struct drm_mm_node *node);
315 bool drm_mm_scan_remove_block(struct drm_mm_node *node);
246 drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, u64 size, unsigned alignment, enum drm_mm_search_flags flags) drm_mm_insert_node() argument
283 drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end, enum drm_mm_search_flags flags) drm_mm_insert_node_in_range() argument
/linux-4.4.14/arch/ia64/mm/
H A Ddiscontig.c33 * Track per-node information needed to setup the boot memory allocator, the
34 * per-node areas, and the real VM.
53 * To prevent cache aliasing effects, align per-node structures so that they
54 * start at addresses that are strided by node number.
57 #define NODEDATA_ALIGN(addr, node) \
59 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
62 * build_node_maps - callback to setup bootmem structs for each node
65 * @node: node where this range resides
68 * treat as a virtually contiguous block (i.e. each node). Each such block
75 int node) build_node_maps()
78 struct bootmem_data *bdp = &bootmem_node_data[node]; build_node_maps()
95 * early_nr_cpus_node - return number of cpus on a given node
96 * @node: node to check
98 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
100 * called yet. Note that node 0 will also count all non-existent cpus.
102 static int __meminit early_nr_cpus_node(int node) early_nr_cpus_node() argument
107 if (node == node_cpuid[cpu].nid) early_nr_cpus_node()
115 * @node: the node id.
117 static unsigned long __meminit compute_pernodesize(int node) compute_pernodesize() argument
121 cpus = early_nr_cpus_node(node); compute_pernodesize()
123 pernodesize += node * L1_CACHE_BYTES; compute_pernodesize()
132 * per_cpu_node_setup - setup per-cpu areas on each node
133 * @cpu_data: per-cpu area on this node
134 * @node: node to setup
137 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
140 static void *per_cpu_node_setup(void *cpu_data, int node) per_cpu_node_setup() argument
148 if (node != node_cpuid[cpu].nid) for_each_possible_early_cpu()
159 * area for cpu0 is on the correct node and its for_each_possible_early_cpu()
193 int node, prev_node, unit, nr_units, rc; setup_per_cpu_areas() local
207 /* build cpu_map, units are grouped by node */ setup_per_cpu_areas()
209 for_each_node(node) setup_per_cpu_areas()
211 if (node == node_cpuid[cpu].nid) setup_per_cpu_areas()
231 * CPUs are put into groups according to node. Walk cpu_map setup_per_cpu_areas()
232 * and create new groups at node boundaries. setup_per_cpu_areas()
238 node = node_cpuid[cpu].nid; setup_per_cpu_areas()
240 if (node == prev_node) { setup_per_cpu_areas()
244 prev_node = node; setup_per_cpu_areas()
262 * @node: the node id.
266 static void __init fill_pernode(int node, unsigned long pernode, fill_pernode() argument
270 int cpus = early_nr_cpus_node(node); fill_pernode()
271 struct bootmem_data *bdp = &bootmem_node_data[node]; fill_pernode()
273 mem_data[node].pernode_addr = pernode; fill_pernode()
274 mem_data[node].pernode_size = pernodesize; fill_pernode()
279 pernode += node * L1_CACHE_BYTES; fill_pernode()
281 pgdat_list[node] = __va(pernode); fill_pernode()
284 mem_data[node].node_data = __va(pernode); fill_pernode()
287 pgdat_list[node]->bdata = bdp; fill_pernode()
290 cpu_data = per_cpu_node_setup(cpu_data, node); fill_pernode()
296 * find_pernode_space - allocate memory for memory map and per-node structures
299 * @node: node where this range resides
302 * pg_data_ts and the per-node data struct. Each node will have something like
307 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
324 int node) find_pernode_space()
328 struct bootmem_data *bdp = &bootmem_node_data[node]; find_pernode_space()
337 * Make sure this memory falls within this node's usable memory find_pernode_space()
343 /* Don't setup this node's local space twice... */ find_pernode_space()
344 if (mem_data[node].pernode_addr) find_pernode_space()
351 pernodesize = compute_pernodesize(node); find_pernode_space()
352 pernode = NODEDATA_ALIGN(start, node); find_pernode_space()
356 fill_pernode(node, pernode, pernodesize); find_pernode_space()
365 * @node: node where this range resides
373 int node) free_node_bootmem()
375 free_bootmem_node(pgdat_list[node], start, len); free_node_bootmem()
381 * reserve_pernode_space - reserve memory for per-node space
383 * Reserve the space used by the bootmem maps & per-node space in the boot
391 int node; reserve_pernode_space() local
393 for_each_online_node(node) { for_each_online_node()
394 pg_data_t *pdp = pgdat_list[node]; for_each_online_node()
396 if (node_isset(node, memory_less_mask)) for_each_online_node()
407 /* Now the per-node space */ for_each_online_node()
408 size = mem_data[node].pernode_size; for_each_online_node()
409 base = __pa(mem_data[node].pernode_addr); for_each_online_node()
417 int node; scatter_node_data() local
422 * because we are halfway through initialization of the new node's scatter_node_data()
423 * structures. If for_each_online_node() is used, a new node's scatter_node_data()
427 for_each_node(node) { for_each_node()
428 if (pgdat_list[node]) { for_each_node()
429 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; for_each_node()
436 * initialize_pernode_data - fixup per-cpu & per-node pointers
438 * Each node's per-node area has a copy of the global pg_data_t list, so
439 * we copy that to each node here, as well as setting the per-cpu pointer
440 * to the local node data structure. The active_cpus field of the per-node
445 int cpu, node; initialize_pernode_data() local
452 node = node_cpuid[cpu].nid; for_each_possible_early_cpu()
454 mem_data[node].node_data; for_each_possible_early_cpu()
460 node = node_cpuid[cpu].nid;
463 cpu0_cpu_info->node_data = mem_data[node].node_data;
470 * node but fall back to any other node when __alloc_bootmem_node fails
472 * @nid: node id
473 * @pernodesize: size of this node's pernode data
479 int bestnode = -1, node, anynode = 0; memory_less_node_alloc() local
481 for_each_online_node(node) { for_each_online_node()
482 if (node_isset(node, memory_less_mask)) for_each_online_node()
484 else if (node_distance(nid, node) < best) { for_each_online_node()
485 best = node_distance(nid, node); for_each_online_node()
486 bestnode = node; for_each_online_node()
488 anynode = node; for_each_online_node()
508 int node; memory_less_nodes() local
510 for_each_node_mask(node, memory_less_mask) { for_each_node_mask()
511 pernodesize = compute_pernodesize(node); for_each_node_mask()
512 pernode = memory_less_node_alloc(node, pernodesize); for_each_node_mask()
513 fill_pernode(node, __pa(pernode), pernodesize); for_each_node_mask()
523 * allocate the per-cpu and per-node structures.
527 int node; find_memory() local
532 printk(KERN_ERR "node info missing!\n"); find_memory()
545 for_each_online_node(node) for_each_online_node()
546 if (bootmem_node_data[node].node_low_pfn) { for_each_online_node()
547 node_clear(node, memory_less_mask); for_each_online_node()
548 mem_data[node].min_pfn = ~0UL; for_each_online_node()
557 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
561 if (!node_online(node))
563 else if (node_isset(node, memory_less_mask))
566 bdp = &bootmem_node_data[node];
567 pernode = mem_data[node].pernode_addr;
568 pernodesize = mem_data[node].pernode_size;
571 init_bootmem_node(pgdat_list[node],
611 * call_pernode_memory - use SRAT to call callback functions with node info
617 * out to which node a block of memory belongs. Ignore memory that we cannot
637 /* No SRAT table, so assume one node (node 0) */ call_pernode_memory()
657 * count_node_pages - callback to build per-node memory info structures
660 * @node: node where this range resides
662 * Each node has it's own number of physical pages, DMAable pages, start, and
664 * for each piece of usable memory and will setup these values for each node.
667 static __init int count_node_pages(unsigned long start, unsigned long len, int node) count_node_pages() argument
673 mem_data[node].num_dma_physpages += count_node_pages()
678 mem_data[node].max_pfn = max(mem_data[node].max_pfn, count_node_pages()
680 mem_data[node].min_pfn = min(mem_data[node].min_pfn, count_node_pages()
689 * paging_init() sets up the page tables for each node of the system and frees
697 int node; paging_init() local
715 for_each_online_node(node) { for_each_online_node()
716 pfn_offset = mem_data[node].min_pfn; for_each_online_node()
719 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; for_each_online_node()
721 if (mem_data[node].max_pfn > max_pfn) for_each_online_node()
722 max_pfn = mem_data[node].max_pfn; for_each_online_node()
756 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) vmemmap_populate() argument
758 return vmemmap_populate_basepages(start, end, node); vmemmap_populate()
74 build_node_maps(unsigned long start, unsigned long len, int node) build_node_maps() argument
323 find_pernode_space(unsigned long start, unsigned long len, int node) find_pernode_space() argument
372 free_node_bootmem(unsigned long start, unsigned long len, int node) free_node_bootmem() argument
/linux-4.4.14/fs/ocfs2/cluster/
H A Dquorum.c28 * that a node is broken and should be recovered. They can't both recover each
33 * So we declare that a node which has given up on connecting to a majority
36 * There are huge opportunities for races here. After we give up on a node's
38 * to declare the node as truly dead. We also need to be careful with the
39 * race between when we see a node start heartbeating and when we connect
98 * go away as our node would be fenced externally before other nodes
130 mlog(ML_ERROR, "fencing this node because it is " o2quo_make_decision()
141 * the lowest numbered node */ o2quo_make_decision()
144 mlog(ML_ERROR, "fencing this node because it is " o2quo_make_decision()
153 mlog(ML_ERROR, "fencing this node because it is " o2quo_make_decision()
156 "node %u\n", quorum, qs->qs_heartbeating, o2quo_make_decision()
167 mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, " o2quo_make_decision()
177 static void o2quo_set_hold(struct o2quo_state *qs, u8 node) o2quo_set_hold() argument
181 if (!test_and_set_bit(node, qs->qs_hold_bm)) { o2quo_set_hold()
184 "node %u\n", node); o2quo_set_hold()
185 mlog(0, "node %u, %d total\n", node, qs->qs_holds); o2quo_set_hold()
189 static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) o2quo_clear_hold() argument
193 if (test_and_clear_bit(node, qs->qs_hold_bm)) { o2quo_clear_hold()
194 mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); o2quo_clear_hold()
201 mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n", o2quo_clear_hold()
202 node, qs->qs_holds); o2quo_clear_hold()
206 /* as a node comes up we delay the quorum decision until we know the fate of
210 void o2quo_hb_up(u8 node) o2quo_hb_up() argument
218 "node %u\n", node); o2quo_hb_up()
219 mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node); o2quo_hb_up()
220 set_bit(node, qs->qs_hb_bm); o2quo_hb_up()
222 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); o2quo_hb_up()
224 if (!test_bit(node, qs->qs_conn_bm)) o2quo_hb_up()
225 o2quo_set_hold(qs, node); o2quo_hb_up()
227 o2quo_clear_hold(qs, node); o2quo_hb_up()
232 /* hb going down releases any holds we might have had due to this node from
234 void o2quo_hb_down(u8 node) o2quo_hb_down() argument
242 "node %u, %d heartbeating\n", o2quo_hb_down()
243 node, qs->qs_heartbeating); o2quo_hb_down()
244 mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node); o2quo_hb_down()
245 clear_bit(node, qs->qs_hb_bm); o2quo_hb_down()
247 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); o2quo_hb_down()
249 o2quo_clear_hold(qs, node); o2quo_hb_down()
254 /* this tells us that we've decided that the node is still heartbeating
259 void o2quo_hb_still_up(u8 node) o2quo_hb_still_up() argument
265 mlog(0, "node %u\n", node); o2quo_hb_still_up()
268 o2quo_clear_hold(qs, node); o2quo_hb_still_up()
273 /* This is analogous to hb_up. as a node's connection comes up we delay the
278 void o2quo_conn_up(u8 node) o2quo_conn_up() argument
286 "node %u\n", node); o2quo_conn_up()
287 mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node); o2quo_conn_up()
288 set_bit(node, qs->qs_conn_bm); o2quo_conn_up()
290 mlog(0, "node %u, %d total\n", node, qs->qs_connected); o2quo_conn_up()
292 if (!test_bit(node, qs->qs_hb_bm)) o2quo_conn_up()
293 o2quo_set_hold(qs, node); o2quo_conn_up()
295 o2quo_clear_hold(qs, node); o2quo_conn_up()
300 /* we've decided that we won't ever be connecting to the node again. if it's
302 * node stops heartbeating from hb_down or the caller decides that the node is
304 void o2quo_conn_err(u8 node) o2quo_conn_err() argument
310 if (test_bit(node, qs->qs_conn_bm)) { o2quo_conn_err()
313 "node %u, connected %d\n", o2quo_conn_err()
314 node, qs->qs_connected); o2quo_conn_err()
316 clear_bit(node, qs->qs_conn_bm); o2quo_conn_err()
319 mlog(0, "node %u, %d total\n", node, qs->qs_connected); o2quo_conn_err()
321 if (test_bit(node, qs->qs_hb_bm)) o2quo_conn_err()
322 o2quo_set_hold(qs, node); o2quo_conn_err()
H A Dnodemanager.c45 struct o2nm_node *node = NULL; o2nm_get_node_by_num() local
51 node = o2nm_single_cluster->cl_nodes[node_num]; o2nm_get_node_by_num()
52 if (node) o2nm_get_node_by_num()
53 config_item_get(&node->nd_item); o2nm_get_node_by_num()
56 return node; o2nm_get_node_by_num()
84 struct o2nm_node *node, *ret = NULL; o2nm_node_ip_tree_lookup() local
90 node = rb_entry(parent, struct o2nm_node, nd_ip_node); o2nm_node_ip_tree_lookup()
92 cmp = memcmp(&ip_needle, &node->nd_ipv4_address, o2nm_node_ip_tree_lookup()
99 ret = node; o2nm_node_ip_tree_lookup()
114 struct o2nm_node *node = NULL; o2nm_get_node_by_ip() local
121 node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); o2nm_get_node_by_ip()
122 if (node) o2nm_get_node_by_ip()
123 config_item_get(&node->nd_item); o2nm_get_node_by_ip()
127 return node; o2nm_get_node_by_ip()
131 void o2nm_node_put(struct o2nm_node *node) o2nm_node_put() argument
133 config_item_put(&node->nd_item); o2nm_node_put()
137 void o2nm_node_get(struct o2nm_node *node) o2nm_node_get() argument
139 config_item_get(&node->nd_item); o2nm_node_get()
154 /* node configfs bits */
171 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_release() local
172 kfree(node); o2nm_node_release()
180 static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) to_o2nm_cluster_from_node() argument
184 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); to_o2nm_cluster_from_node()
196 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_num_store() local
197 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); o2nm_node_num_store()
210 * node number and try to use our address and port attributes o2nm_node_num_store()
211 * to connect to this node.. make sure that they've been set o2nm_node_num_store()
212 * before writing the node attribute? */ o2nm_node_num_store()
213 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || o2nm_node_num_store()
214 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) o2nm_node_num_store()
221 &node->nd_set_attributes)) o2nm_node_num_store()
224 cluster->cl_nodes[tmp] = node; o2nm_node_num_store()
225 node->nd_num = tmp; o2nm_node_num_store()
242 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_ipv4_port_store() local
255 if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) o2nm_node_ipv4_port_store()
257 node->nd_ipv4_port = htons(tmp); o2nm_node_ipv4_port_store()
271 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_ipv4_address_store() local
272 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); o2nm_node_ipv4_address_store()
294 &node->nd_set_attributes)) o2nm_node_ipv4_address_store()
297 rb_link_node(&node->nd_ip_node, parent, p); o2nm_node_ipv4_address_store()
298 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); o2nm_node_ipv4_address_store()
304 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); o2nm_node_ipv4_address_store()
317 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_local_store() local
318 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); o2nm_node_local_store()
327 tmp = !!tmp; /* boolean of whether this node wants to be local */ o2nm_node_local_store()
331 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || o2nm_node_local_store()
332 !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || o2nm_node_local_store()
333 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) o2nm_node_local_store()
336 /* the only failure case is trying to set a new local node o2nm_node_local_store()
339 cluster->cl_local_node != node->nd_num) o2nm_node_local_store()
342 /* bring up the rx thread if we're setting the new local node. */ o2nm_node_local_store()
344 ret = o2net_start_listening(node); o2nm_node_local_store()
350 cluster->cl_local_node == node->nd_num) { o2nm_node_local_store()
351 o2net_stop_listening(node); o2nm_node_local_store()
355 node->nd_local = tmp; o2nm_node_local_store()
356 if (node->nd_local) { o2nm_node_local_store()
358 cluster->cl_local_node = node->nd_num; o2nm_node_local_store()
387 /* node set */
563 struct o2nm_node *node = NULL; o2nm_node_group_make_item() local
568 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); o2nm_node_group_make_item()
569 if (node == NULL) o2nm_node_group_make_item()
572 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ o2nm_node_group_make_item()
573 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); o2nm_node_group_make_item()
574 spin_lock_init(&node->nd_lock); o2nm_node_group_make_item()
576 mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); o2nm_node_group_make_item()
578 return &node->nd_item; o2nm_node_group_make_item()
584 struct o2nm_node *node = to_o2nm_node(item); o2nm_node_group_drop_item() local
587 o2net_disconnect_node(node); o2nm_node_group_drop_item()
590 (cluster->cl_local_node == node->nd_num)) { o2nm_node_group_drop_item()
593 o2net_stop_listening(node); o2nm_node_group_drop_item()
596 /* XXX call into net to stop this node from trading messages */ o2nm_node_group_drop_item()
601 if (node->nd_ipv4_address) o2nm_node_group_drop_item()
602 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); o2nm_node_group_drop_item()
604 /* nd_num might be 0 if the node number hasn't been set.. */ o2nm_node_group_drop_item()
605 if (cluster->cl_nodes[node->nd_num] == node) { o2nm_node_group_drop_item()
606 cluster->cl_nodes[node->nd_num] = NULL; o2nm_node_group_drop_item()
607 clear_bit(node->nd_num, cluster->cl_nodes_bitmap); o2nm_node_group_drop_item()
611 mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", o2nm_node_group_drop_item()
612 config_item_name(&node->nd_item)); o2nm_node_group_drop_item()
685 config_group_init_type_name(&ns->ns_group, "node", o2nm_cluster_group_make_group()
H A Dquorum.h29 void o2quo_hb_up(u8 node);
30 void o2quo_hb_down(u8 node);
31 void o2quo_hb_still_up(u8 node);
32 void o2quo_conn_up(u8 node);
33 void o2quo_conn_err(u8 node);
/linux-4.4.14/drivers/block/drbd/
H A Ddrbd_interval.c6 * interval_end - return end of @node
9 sector_t interval_end(struct rb_node *node) interval_end() argument
11 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); interval_end()
16 * compute_subtree_last - compute end of @node
19 * node and of its children. Called for @node and its parents whenever the end
23 compute_subtree_last(struct drbd_interval *node) compute_subtree_last() argument
25 sector_t max = node->sector + (node->size >> 9); compute_subtree_last()
27 if (node->rb.rb_left) { compute_subtree_last()
28 sector_t left = interval_end(node->rb.rb_left); compute_subtree_last()
32 if (node->rb.rb_right) { compute_subtree_last()
33 sector_t right = interval_end(node->rb.rb_right); compute_subtree_last()
84 * Returns if the tree contains the node @interval with start sector @start.
93 struct rb_node *node = root->rb_node; drbd_contains_interval() local
95 while (node) { drbd_contains_interval()
97 rb_entry(node, struct drbd_interval, rb); drbd_contains_interval()
100 node = node->rb_left; drbd_contains_interval()
102 node = node->rb_right; drbd_contains_interval()
104 node = node->rb_left; drbd_contains_interval()
106 node = node->rb_right; drbd_contains_interval()
136 struct rb_node *node = root->rb_node; drbd_find_overlap() local
142 while (node) { drbd_find_overlap()
144 rb_entry(node, struct drbd_interval, rb); drbd_find_overlap()
146 if (node->rb_left && drbd_find_overlap()
147 sector < interval_end(node->rb_left)) { drbd_find_overlap()
149 node = node->rb_left; drbd_find_overlap()
156 node = node->rb_right; drbd_find_overlap()
167 struct rb_node *node; drbd_next_overlap() local
170 node = rb_next(&i->rb); drbd_next_overlap()
171 if (!node) drbd_next_overlap()
173 i = rb_entry(node, struct drbd_interval, rb); drbd_next_overlap()
/linux-4.4.14/arch/alpha/kernel/
H A Dgct.c13 gct6_find_nodes(gct6_node *node, gct6_search_struct *search) gct6_find_nodes() argument
19 if (node->magic != GCT_NODE_MAGIC) { gct6_find_nodes()
28 if (node->type != wanted->type) gct6_find_nodes()
30 if (node->subtype != wanted->subtype) gct6_find_nodes()
35 wanted->callout(node); gct6_find_nodes()
39 if (node->next) gct6_find_nodes()
40 status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search); gct6_find_nodes()
43 if (node->child) gct6_find_nodes()
44 status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search); gct6_find_nodes()
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c12 struct nvkm_mem *node; member in struct:nouveau_sgdma_be
30 struct nvkm_mem *node = mem->mm_node; nv04_sgdma_bind() local
33 node->sg = ttm->sg; nv04_sgdma_bind()
34 node->pages = NULL; nv04_sgdma_bind()
36 node->sg = NULL; nv04_sgdma_bind()
37 node->pages = nvbe->ttm.dma_address; nv04_sgdma_bind()
39 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; nv04_sgdma_bind()
41 nvkm_vm_map(&node->vma[0], node); nv04_sgdma_bind()
42 nvbe->node = node; nv04_sgdma_bind()
50 nvkm_vm_unmap(&nvbe->node->vma[0]); nv04_sgdma_unbind()
64 struct nvkm_mem *node = mem->mm_node; nv50_sgdma_bind() local
68 node->sg = ttm->sg; nv50_sgdma_bind()
69 node->pages = NULL; nv50_sgdma_bind()
71 node->sg = NULL; nv50_sgdma_bind()
72 node->pages = nvbe->ttm.dma_address; nv50_sgdma_bind()
74 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; nv50_sgdma_bind()
H A Dnouveau_ttm.c52 nvkm_mem_node_cleanup(struct nvkm_mem *node) nvkm_mem_node_cleanup() argument
54 if (node->vma[0].node) { nvkm_mem_node_cleanup()
55 nvkm_vm_unmap(&node->vma[0]); nvkm_mem_node_cleanup()
56 nvkm_vm_put(&node->vma[0]); nvkm_mem_node_cleanup()
59 if (node->vma[1].node) { nvkm_mem_node_cleanup()
60 nvkm_vm_unmap(&node->vma[1]); nvkm_mem_node_cleanup()
61 nvkm_vm_put(&node->vma[1]); nvkm_mem_node_cleanup()
84 struct nvkm_mem *node; nouveau_vram_manager_new() local
96 (nvbo->tile_flags >> 8) & 0x3ff, &node); nouveau_vram_manager_new()
102 node->page_shift = nvbo->page_shift; nouveau_vram_manager_new()
104 mem->mm_node = node; nouveau_vram_manager_new()
105 mem->start = node->offset >> PAGE_SHIFT; nouveau_vram_manager_new()
145 struct nvkm_mem *node; nouveau_gart_manager_new() local
147 node = kzalloc(sizeof(*node), GFP_KERNEL); nouveau_gart_manager_new()
148 if (!node) nouveau_gart_manager_new()
151 node->page_shift = 12; nouveau_gart_manager_new()
162 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; nouveau_gart_manager_new()
167 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; nouveau_gart_manager_new()
175 mem->mm_node = node; nouveau_gart_manager_new()
219 struct nvkm_mem *node = mem->mm_node; nv04_gart_manager_del() local
220 if (node->vma[0].node) nv04_gart_manager_del()
221 nvkm_vm_put(&node->vma[0]); nv04_gart_manager_del()
232 struct nvkm_mem *node; nv04_gart_manager_new() local
235 node = kzalloc(sizeof(*node), GFP_KERNEL); nv04_gart_manager_new()
236 if (!node) nv04_gart_manager_new()
239 node->page_shift = 12; nv04_gart_manager_new()
241 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, nv04_gart_manager_new()
242 NV_MEM_ACCESS_RW, &node->vma[0]); nv04_gart_manager_new()
244 kfree(node); nv04_gart_manager_new()
248 mem->mm_node = node; nv04_gart_manager_new()
249 mem->start = node->vma[0].offset >> PAGE_SHIFT; nv04_gart_manager_new()
/linux-4.4.14/kernel/locking/
H A Dosq_lock.c9 * Using a single mcs node per CPU is safe because sleeping locks should not be
32 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
37 struct optimistic_spin_node *node, osq_wait_next()
45 * If there is a prev node in queue, then the 'old' value will be osq_wait_next()
46 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if osq_wait_next()
63 * We must xchg() the @node->next value, because if we were to osq_wait_next()
65 * @node->next might complete Step-A and think its @prev is osq_wait_next()
70 * wait for a new @node->next from its Step-C. osq_wait_next()
72 if (node->next) { osq_wait_next()
73 next = xchg(&node->next, NULL); osq_wait_next()
86 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); osq_lock() local
91 node->locked = 0; osq_lock()
92 node->next = NULL; osq_lock()
93 node->cpu = curr; osq_lock()
98 * the node fields we just initialised) semantics when updating osq_lock()
106 node->prev = prev; osq_lock()
107 WRITE_ONCE(prev->next, node); osq_lock()
111 * moment unlock can proceed and wipe the node element from stack. osq_lock()
118 while (!READ_ONCE(node->locked)) { osq_lock()
139 if (prev->next == node && osq_lock()
140 cmpxchg(&prev->next, node, NULL) == node) osq_lock()
145 * in which case we should observe @node->locked becomming osq_lock()
148 if (smp_load_acquire(&node->locked)) osq_lock()
155 * case its step-C will write us a new @node->prev pointer. osq_lock()
157 prev = READ_ONCE(node->prev); osq_lock()
163 * Similar to unlock(), wait for @node->next or move @lock from @node osq_lock()
167 next = osq_wait_next(lock, node, prev); osq_lock()
175 * pointer, @next is stable because our @node->next pointer is NULL and osq_lock()
187 struct optimistic_spin_node *node, *next; osq_unlock() local
200 node = this_cpu_ptr(&osq_node); osq_unlock()
201 next = xchg(&node->next, NULL); osq_unlock()
207 next = osq_wait_next(lock, node, NULL); osq_unlock()
36 osq_wait_next(struct optimistic_spin_queue *lock, struct optimistic_spin_node *node, struct optimistic_spin_node *prev) osq_wait_next() argument
H A Dmcs_spinlock.h55 * In order to acquire the lock, the caller should declare a local node and
56 * pass a reference of the node to this function in addition to the lock.
58 * on this node->locked until the previous lock holder sets the node->locked
62 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) mcs_spin_lock() argument
66 /* Init node */ mcs_spin_lock()
67 node->locked = 0; mcs_spin_lock()
68 node->next = NULL; mcs_spin_lock()
73 * observation of @node. And to provide the ACQUIRE ordering associated mcs_spin_lock()
76 prev = xchg(lock, node); mcs_spin_lock()
79 * Lock acquired, don't need to set node->locked to 1. Threads mcs_spin_lock()
80 * only spin on its own node->locked value for lock acquisition. mcs_spin_lock()
82 * and does not proceed to spin on its own node->locked, this mcs_spin_lock()
84 * audit lock status, then set node->locked value here. mcs_spin_lock()
88 WRITE_ONCE(prev->next, node); mcs_spin_lock()
91 arch_mcs_spin_lock_contended(&node->locked); mcs_spin_lock()
95 * Releases the lock. The caller should pass in the corresponding node that
99 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) mcs_spin_unlock() argument
101 struct mcs_spinlock *next = READ_ONCE(node->next); mcs_spin_unlock()
107 if (likely(cmpxchg_release(lock, node, NULL) == node)) mcs_spin_unlock()
110 while (!(next = READ_ONCE(node->next))) mcs_spin_unlock()
/linux-4.4.14/net/hsr/
H A Dhsr_framereg.c30 /* Local slave through which AddrB frames are received from this node */
62 struct hsr_node *node; hsr_addr_is_self() local
64 node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node, hsr_addr_is_self()
66 if (!node) { hsr_addr_is_self()
67 WARN_ONCE(1, "HSR: No self node\n"); hsr_addr_is_self()
71 if (ether_addr_equal(addr, node->MacAddressA)) hsr_addr_is_self()
73 if (ether_addr_equal(addr, node->MacAddressB)) hsr_addr_is_self()
84 struct hsr_node *node; find_node_by_AddrA() local
86 list_for_each_entry_rcu(node, node_db, mac_list) { list_for_each_entry_rcu()
87 if (ether_addr_equal(node->MacAddressA, addr)) list_for_each_entry_rcu()
88 return node; list_for_each_entry_rcu()
102 struct hsr_node *node, *oldnode; hsr_create_self_node() local
104 node = kmalloc(sizeof(*node), GFP_KERNEL); hsr_create_self_node()
105 if (!node) hsr_create_self_node()
108 ether_addr_copy(node->MacAddressA, addr_a); hsr_create_self_node()
109 ether_addr_copy(node->MacAddressB, addr_b); hsr_create_self_node()
115 list_replace_rcu(&oldnode->mac_list, &node->mac_list); hsr_create_self_node()
121 list_add_tail_rcu(&node->mac_list, self_node_db); hsr_create_self_node()
128 /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
130 * originating from the newly added node.
135 struct hsr_node *node; hsr_add_node() local
139 node = kzalloc(sizeof(*node), GFP_ATOMIC); hsr_add_node()
140 if (!node) hsr_add_node()
143 ether_addr_copy(node->MacAddressA, addr); hsr_add_node()
150 node->time_in[i] = now; hsr_add_node()
152 node->seq_out[i] = seq_out; hsr_add_node()
154 list_add_tail_rcu(&node->mac_list, node_db); hsr_add_node()
156 return node; hsr_add_node()
164 struct hsr_node *node; hsr_get_node() local
173 list_for_each_entry_rcu(node, node_db, mac_list) { list_for_each_entry_rcu()
174 if (ether_addr_equal(node->MacAddressA, ethhdr->h_source)) list_for_each_entry_rcu()
175 return node; list_for_each_entry_rcu()
176 if (ether_addr_equal(node->MacAddressB, ethhdr->h_source)) list_for_each_entry_rcu()
177 return node; list_for_each_entry_rcu()
181 return NULL; /* Only supervision frame may create node entry */
198 * node.
212 /* Not sent from MacAddressB of a PICS_SUBS capable node */ hsr_handle_sup_frame()
219 /* No frame received from AddrA of this node yet */ hsr_handle_sup_frame()
250 * If the frame was sent by a node's B interface, replace the source
251 * address with that node's "official" address (MacAddressA) so that upper
254 void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb) hsr_addr_subst_source() argument
261 memcpy(&eth_hdr(skb)->h_source, node->MacAddressA, ETH_ALEN); hsr_addr_subst_source()
288 WARN_ONCE(1, "%s: Unknown node\n", __func__); hsr_addr_subst_dest()
298 void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, hsr_register_frame_in() argument
305 if (seq_nr_before(sequence_nr, node->seq_out[port->type])) hsr_register_frame_in()
308 node->time_in[port->type] = jiffies; hsr_register_frame_in()
309 node->time_in_stale[port->type] = false; hsr_register_frame_in()
320 int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, hsr_register_frame_out() argument
323 if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type])) hsr_register_frame_out()
326 node->seq_out[port->type] = sequence_nr; hsr_register_frame_out()
332 struct hsr_node *node) get_late_port()
334 if (node->time_in_stale[HSR_PT_SLAVE_A]) get_late_port()
336 if (node->time_in_stale[HSR_PT_SLAVE_B]) get_late_port()
339 if (time_after(node->time_in[HSR_PT_SLAVE_B], get_late_port()
340 node->time_in[HSR_PT_SLAVE_A] + get_late_port()
343 if (time_after(node->time_in[HSR_PT_SLAVE_A], get_late_port()
344 node->time_in[HSR_PT_SLAVE_B] + get_late_port()
358 struct hsr_node *node; hsr_prune_nodes() local
366 list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { hsr_prune_nodes()
368 time_a = node->time_in[HSR_PT_SLAVE_A]; hsr_prune_nodes()
369 time_b = node->time_in[HSR_PT_SLAVE_B]; hsr_prune_nodes()
373 node->time_in_stale[HSR_PT_SLAVE_A] = true; hsr_prune_nodes()
375 node->time_in_stale[HSR_PT_SLAVE_B] = true; hsr_prune_nodes()
377 /* Get age of newest frame from node. hsr_prune_nodes()
382 if (node->time_in_stale[HSR_PT_SLAVE_A] || hsr_prune_nodes()
383 (!node->time_in_stale[HSR_PT_SLAVE_B] && hsr_prune_nodes()
391 port = get_late_port(hsr, node); hsr_prune_nodes()
393 hsr_nl_ringerror(hsr, node->MacAddressA, port); hsr_prune_nodes()
400 hsr_nl_nodedown(hsr, node->MacAddressA); hsr_prune_nodes()
401 list_del_rcu(&node->mac_list); hsr_prune_nodes()
403 kfree_rcu(node, rcu_head); hsr_prune_nodes()
413 struct hsr_node *node; hsr_get_next_node() local
416 node = list_first_or_null_rcu(&hsr->node_db, hsr_get_next_node()
418 if (node) hsr_get_next_node()
419 ether_addr_copy(addr, node->MacAddressA); hsr_get_next_node()
420 return node; hsr_get_next_node()
423 node = _pos; hsr_get_next_node()
424 list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) { hsr_get_next_node()
425 ether_addr_copy(addr, node->MacAddressA); hsr_get_next_node()
426 return node; hsr_get_next_node()
442 struct hsr_node *node; hsr_get_node_data() local
448 node = find_node_by_AddrA(&hsr->node_db, addr); hsr_get_node_data()
449 if (!node) { hsr_get_node_data()
454 ether_addr_copy(addr_b, node->MacAddressB); hsr_get_node_data()
456 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A]; hsr_get_node_data()
457 if (node->time_in_stale[HSR_PT_SLAVE_A]) hsr_get_node_data()
466 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B]; hsr_get_node_data()
467 if (node->time_in_stale[HSR_PT_SLAVE_B]) hsr_get_node_data()
477 *if1_seq = node->seq_out[HSR_PT_SLAVE_B]; hsr_get_node_data()
478 *if2_seq = node->seq_out[HSR_PT_SLAVE_A]; hsr_get_node_data()
480 if (node->AddrB_port != HSR_PT_NONE) { hsr_get_node_data()
481 port = hsr_port_get_hsr(hsr, node->AddrB_port); hsr_get_node_data()
331 get_late_port(struct hsr_priv *hsr, struct hsr_node *node) get_late_port() argument
/linux-4.4.14/arch/mips/include/asm/mach-ip27/
H A Dtopology.h10 cnodeid_t p_nodeid; /* my node ID in compact-id-space */
11 nasid_t p_nasid; /* my node ID in numa-as-id-space */
12 unsigned char p_slice; /* Physical position on node board */
26 #define parent_node(node) (node)
27 #define cpumask_of_node(node) ((node) == -1 ? \
29 &hub_data(node)->h_cpus)
/linux-4.4.14/arch/m32r/include/asm/
H A Dmmzone.h33 * generic node memory support, the following assumptions apply:
38 int node; pfn_to_nid() local
40 for (node = 0 ; node < MAX_NUMNODES ; node++) pfn_to_nid()
41 if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node)) pfn_to_nid()
44 return node; pfn_to_nid()
/linux-4.4.14/include/linux/irqchip/
H A Dversatile-fpga.h9 struct device_node *node);
10 int fpga_irq_of_init(struct device_node *node,
/linux-4.4.14/arch/sparc/kernel/
H A Dauxio_32.c31 phandle node, auxio_nd; auxio_probe() local
42 node = prom_getchild(prom_root_node); auxio_probe()
43 auxio_nd = prom_searchsiblings(node, "auxiliary-io"); auxio_probe()
45 node = prom_searchsiblings(node, "obio"); auxio_probe()
46 node = prom_getchild(node); auxio_probe()
47 auxio_nd = prom_searchsiblings(node, "auxio"); auxio_probe()
53 if(prom_searchsiblings(node, "leds")) { auxio_probe()
57 prom_printf("Cannot find auxio node, cannot continue...\n"); auxio_probe()
114 phandle node; auxio_power_probe() local
117 /* Attempt to find the sun4m power control node. */ auxio_power_probe()
118 node = prom_getchild(prom_root_node); auxio_power_probe()
119 node = prom_searchsiblings(node, "obio"); auxio_power_probe()
120 node = prom_getchild(node); auxio_power_probe()
121 node = prom_searchsiblings(node, "power"); auxio_power_probe()
122 if (node == 0 || (s32)node == -1) auxio_power_probe()
126 if (prom_getproperty(node, "reg", (char *)&regs, sizeof(regs)) <= 0) auxio_power_probe()
H A Dcpumap.c36 int child_start; /* Array index of the first child node */
37 int child_end; /* Array index of the last child node */
38 int rover; /* Child node iterator */
42 int start_index; /* Index of first node of a level in a cpuinfo tree */
43 int end_index; /* Index of last node of a level in a cpuinfo tree */
69 * Go to next NUMA node when all cores are used.
135 num_nodes = 1; /* Include the root node */ enumerate_cpuinfo_nodes()
181 * assumed to be sorted in ascending order based on node, core_id, and
187 struct cpuinfo_node *node; build_cpuinfo_tree() local
211 node = &new_tree->nodes[n]; build_cpuinfo_tree()
218 node->id = id; build_cpuinfo_tree()
219 node->level = level; build_cpuinfo_tree()
220 node->num_cpus = 1; build_cpuinfo_tree()
222 node->parent_index = (level > CPUINFO_LVL_ROOT) build_cpuinfo_tree()
225 node->child_start = node->child_end = node->rover = build_cpuinfo_tree()
229 prev_id[level] = node->id; build_cpuinfo_tree()
252 node = &new_tree->nodes[level_rover[level]]; build_cpuinfo_tree()
253 node->num_cpus = num_cpus[level]; build_cpuinfo_tree()
257 node->num_cpus++; build_cpuinfo_tree()
259 /* Connect tree node to parent */ build_cpuinfo_tree()
261 node->parent_index = -1; build_cpuinfo_tree()
263 node->parent_index = build_cpuinfo_tree()
267 node->child_end = build_cpuinfo_tree()
270 node->child_end = build_cpuinfo_tree()
274 /* Initialize the next node in the same level */ build_cpuinfo_tree()
277 node = &new_tree->nodes[n]; build_cpuinfo_tree()
278 node->id = id; build_cpuinfo_tree()
279 node->level = level; build_cpuinfo_tree()
281 /* Connect node to child */ build_cpuinfo_tree()
282 node->child_start = node->child_end = build_cpuinfo_tree()
283 node->rover = build_cpuinfo_tree()
299 struct cpuinfo_node *node = &t->nodes[node_index]; increment_rover() local
303 for (level = node->level; level >= top_level; level--) { increment_rover()
304 node->rover++; increment_rover()
305 if (node->rover <= node->child_end) increment_rover()
308 node->rover = node->child_start; increment_rover()
314 node = &t->nodes[node->parent_index]; increment_rover()
/linux-4.4.14/tools/lib/
H A Drbtree.c29 * 1) A node is either red or black
32 * 4) Both children of every red node are black
37 * consecutive red nodes in a path and every red node is therefore followed by
72 __rb_insert(struct rb_node *node, struct rb_root *root, __rb_insert() argument
75 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; __rb_insert()
79 * Loop invariant: node is red __rb_insert()
86 rb_set_parent_color(node, NULL, RB_BLACK); __rb_insert()
111 node = gparent; __rb_insert()
112 parent = rb_parent(node); __rb_insert()
113 rb_set_parent_color(node, parent, RB_RED); __rb_insert()
118 if (node == tmp) { __rb_insert()
131 parent->rb_right = tmp = node->rb_left; __rb_insert()
132 node->rb_left = parent; __rb_insert()
136 rb_set_parent_color(parent, node, RB_RED); __rb_insert()
137 augment_rotate(parent, node); __rb_insert()
138 parent = node; __rb_insert()
139 tmp = node->rb_right; __rb_insert()
164 node = gparent; __rb_insert()
165 parent = rb_parent(node); __rb_insert()
166 rb_set_parent_color(node, parent, RB_RED); __rb_insert()
171 if (node == tmp) { __rb_insert()
173 parent->rb_left = tmp = node->rb_right; __rb_insert()
174 node->rb_right = parent; __rb_insert()
178 rb_set_parent_color(parent, node, RB_RED); __rb_insert()
179 augment_rotate(parent, node); __rb_insert()
180 parent = node; __rb_insert()
181 tmp = node->rb_left; __rb_insert()
204 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; ____rb_erase_color() local
209 * - node is black (or NULL on first iteration) ____rb_erase_color()
210 * - node is not the root (parent is not NULL) ____rb_erase_color()
211 * - All leaf paths going through parent and node have a ____rb_erase_color()
212 * black node count that is 1 lower than other leaf paths. ____rb_erase_color()
215 if (node != sibling) { /* node == parent->rb_left */ ____rb_erase_color()
258 node = parent; ____rb_erase_color()
259 parent = rb_parent(node); ____rb_erase_color()
330 node = parent; ____rb_erase_color()
331 parent = rb_parent(node); ____rb_erase_color()
376 static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} dummy_copy() argument
384 void rb_insert_color(struct rb_node *node, struct rb_root *root) rb_insert_color() argument
386 __rb_insert(node, root, dummy_rotate); rb_insert_color()
389 void rb_erase(struct rb_node *node, struct rb_root *root) rb_erase() argument
392 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); rb_erase()
404 void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, __rb_insert_augmented() argument
407 __rb_insert(node, root, augment_rotate); __rb_insert_augmented()
411 * This function returns the first node (in sort order) of the tree.
437 struct rb_node *rb_next(const struct rb_node *node) rb_next() argument
441 if (RB_EMPTY_NODE(node)) rb_next()
448 if (node->rb_right) { rb_next()
449 node = node->rb_right; rb_next()
450 while (node->rb_left) rb_next()
451 node=node->rb_left; rb_next()
452 return (struct rb_node *)node; rb_next()
457 * so any 'next' node must be in the general direction of our parent. rb_next()
460 * parent, said parent is our 'next' node. rb_next()
462 while ((parent = rb_parent(node)) && node == parent->rb_right) rb_next()
463 node = parent; rb_next()
468 struct rb_node *rb_prev(const struct rb_node *node) rb_prev() argument
472 if (RB_EMPTY_NODE(node)) rb_prev()
479 if (node->rb_left) { rb_prev()
480 node = node->rb_left; rb_prev()
481 while (node->rb_right) rb_prev()
482 node=node->rb_right; rb_prev()
483 return (struct rb_node *)node; rb_prev()
490 while ((parent = rb_parent(node)) && node == parent->rb_left) rb_prev()
491 node = parent; rb_prev()
512 static struct rb_node *rb_left_deepest_node(const struct rb_node *node) rb_left_deepest_node() argument
515 if (node->rb_left) rb_left_deepest_node()
516 node = node->rb_left; rb_left_deepest_node()
517 else if (node->rb_right) rb_left_deepest_node()
518 node = node->rb_right; rb_left_deepest_node()
520 return (struct rb_node *)node; rb_left_deepest_node()
524 struct rb_node *rb_next_postorder(const struct rb_node *node) rb_next_postorder() argument
527 if (!node) rb_next_postorder()
529 parent = rb_parent(node); rb_next_postorder()
531 /* If we're sitting on node, we've already seen our children */ rb_next_postorder()
532 if (parent && node == parent->rb_left && parent->rb_right) { rb_next_postorder()
533 /* If we are the parent's left node, go to the parent's right rb_next_postorder()
534 * node then all the way down to the left */ rb_next_postorder()
537 /* Otherwise we are the parent's right node, and the parent rb_next_postorder()
/linux-4.4.14/arch/sparc/include/asm/
H A Dfb.h20 struct device_node *node; fb_is_primary_device() local
25 node = dev->of_node; fb_is_primary_device()
26 if (node && fb_is_primary_device()
27 node == of_console_device) fb_is_primary_device()
H A Dtopology_64.h13 #define parent_node(node) (node)
15 #define cpumask_of_node(node) ((node) == -1 ? \
17 &numa_cpumask_lookup_table[node])
H A Doplib_32.h30 /* Root node of the prom device tree, this stays constant after
78 /* Acquire the IDPROM of the root node in the prom device tree. This
102 /* Start the CPU with the given device tree node, context table, and context
113 /* Get the child node of the given node, or zero if no child exists. */
116 /* Get the next sibling node of the given node, or zero if no further
119 phandle prom_getsibling(phandle node);
121 /* Get the length, at the passed node, of the given property type.
122 * Returns -1 on error (ie. no such property at this node).
133 int prom_getint(phandle node, char *property);
136 int prom_getintdefault(phandle node, char *property, int defval);
139 int prom_getbool(phandle node, char *prop);
142 void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
144 /* Search all siblings starting at the passed node for "name" matching
145 * the given string. Returns the node on success, zero on failure.
150 * node. Returns null string on failure.
152 char *prom_nextprop(phandle node, char *prev_property, char *buffer);
157 /* Set the indicated property at the given node with the passed value.
160 int prom_setprop(phandle node, const char *prop_name, char *prop_value,
170 /* Apply ranges of any prom node (and optionally parent node as well) to registers. */
171 void prom_apply_generic_ranges(phandle node, phandle parent,
/linux-4.4.14/drivers/clk/ti/
H A Dfixed-factor.c32 * @node: device node for this clock
36 static void __init of_ti_fixed_factor_clk_setup(struct device_node *node) of_ti_fixed_factor_clk_setup() argument
39 const char *clk_name = node->name; of_ti_fixed_factor_clk_setup()
44 if (of_property_read_u32(node, "ti,clock-div", &div)) { of_ti_fixed_factor_clk_setup()
45 pr_err("%s must have a clock-div property\n", node->name); of_ti_fixed_factor_clk_setup()
49 if (of_property_read_u32(node, "ti,clock-mult", &mult)) { of_ti_fixed_factor_clk_setup()
50 pr_err("%s must have a clock-mult property\n", node->name); of_ti_fixed_factor_clk_setup()
54 if (of_property_read_bool(node, "ti,set-rate-parent")) of_ti_fixed_factor_clk_setup()
57 parent_name = of_clk_get_parent_name(node, 0); of_ti_fixed_factor_clk_setup()
63 of_clk_add_provider(node, of_clk_src_simple_get, clk); of_ti_fixed_factor_clk_setup()
64 of_ti_clk_autoidle_setup(node); of_ti_fixed_factor_clk_setup()
H A Ddpll.c137 * @node: device node for the clock
144 struct device_node *node) _register_dpll()
150 dd->clk_ref = of_clk_get(node, 0); _register_dpll()
151 dd->clk_bypass = of_clk_get(node, 1); _register_dpll()
155 node->name); _register_dpll()
156 if (!ti_clk_retry_init(node, hw, _register_dpll)) _register_dpll()
167 of_clk_add_provider(node, of_clk_src_simple_get, clk); _register_dpll()
283 * @node: device node for this clock
289 static void _register_dpll_x2(struct device_node *node, _register_dpll_x2() argument
296 const char *name = node->name; _register_dpll_x2()
299 parent_name = of_clk_get_parent_name(node, 0); _register_dpll_x2()
301 pr_err("%s must have parent\n", node->name); _register_dpll_x2()
324 of_clk_add_provider(node, of_clk_src_simple_get, clk); _register_dpll_x2()
331 * @node: device node containing the DPLL info
337 static void __init of_ti_dpll_setup(struct device_node *node, of_ti_dpll_setup() argument
360 init->name = node->name; of_ti_dpll_setup()
363 init->num_parents = of_clk_get_parent_count(node); of_ti_dpll_setup()
365 pr_err("%s must have parent(s)\n", node->name); of_ti_dpll_setup()
373 of_clk_parent_fill(node, parent_names, init->num_parents); of_ti_dpll_setup()
377 dd->control_reg = ti_clk_get_reg_addr(node, 0); of_ti_dpll_setup()
385 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1); of_ti_dpll_setup()
391 dd->idlest_reg = ti_clk_get_reg_addr(node, 1); of_ti_dpll_setup()
395 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); of_ti_dpll_setup()
402 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3); of_ti_dpll_setup()
407 if (of_property_read_bool(node, "ti,low-power-stop")) of_ti_dpll_setup()
410 if (of_property_read_bool(node, "ti,low-power-bypass")) of_ti_dpll_setup()
413 if (of_property_read_bool(node, "ti,lock")) of_ti_dpll_setup()
419 _register_dpll(&clk_hw->hw, node); of_ti_dpll_setup()
431 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node) of_ti_omap4_dpll_x2_setup() argument
433 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); of_ti_omap4_dpll_x2_setup()
440 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) of_ti_am3_dpll_x2_setup() argument
442 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL); of_ti_am3_dpll_x2_setup()
449 static void __init of_ti_omap3_dpll_setup(struct device_node *node) of_ti_omap3_dpll_setup() argument
464 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd); of_ti_omap3_dpll_setup()
469 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node) of_ti_omap3_core_dpll_setup() argument
483 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd); of_ti_omap3_core_dpll_setup()
488 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node) of_ti_omap3_per_dpll_setup() argument
503 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd); of_ti_omap3_per_dpll_setup()
508 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node) of_ti_omap3_per_jtype_dpll_setup() argument
525 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd); of_ti_omap3_per_jtype_dpll_setup()
531 static void __init of_ti_omap4_dpll_setup(struct device_node *node) of_ti_omap4_dpll_setup() argument
545 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); of_ti_omap4_dpll_setup()
550 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node) of_ti_omap5_mpu_dpll_setup() argument
566 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); of_ti_omap5_mpu_dpll_setup()
571 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node) of_ti_omap4_core_dpll_setup() argument
585 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd); of_ti_omap4_core_dpll_setup()
592 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node) of_ti_omap4_m4xen_dpll_setup() argument
608 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd); of_ti_omap4_m4xen_dpll_setup()
613 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node) of_ti_omap4_jtype_dpll_setup() argument
629 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd); of_ti_omap4_jtype_dpll_setup()
635 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node) of_ti_am3_no_gate_dpll_setup() argument
648 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd); of_ti_am3_no_gate_dpll_setup()
653 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node) of_ti_am3_jtype_dpll_setup() argument
667 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); of_ti_am3_jtype_dpll_setup()
672 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node) of_ti_am3_no_gate_jtype_dpll_setup() argument
686 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd); of_ti_am3_no_gate_jtype_dpll_setup()
692 static void __init of_ti_am3_dpll_setup(struct device_node *node) of_ti_am3_dpll_setup() argument
705 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); of_ti_am3_dpll_setup()
709 static void __init of_ti_am3_core_dpll_setup(struct device_node *node) of_ti_am3_core_dpll_setup() argument
722 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd); of_ti_am3_core_dpll_setup()
727 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node) of_ti_omap2_core_dpll_setup() argument
737 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd); of_ti_omap2_core_dpll_setup()
143 _register_dpll(struct clk_hw *hw, struct device_node *node) _register_dpll() argument
H A Dgate.c75 /* Parent is the x2 node, get parent of parent for the m2 div */ omap36xx_gate_clk_enable_with_hsdiv_restore()
214 static void __init _of_ti_gate_clk_setup(struct device_node *node, _of_ti_gate_clk_setup() argument
227 reg = ti_clk_get_reg_addr(node, 0); _of_ti_gate_clk_setup()
231 if (!of_property_read_u32(node, "ti,bit-shift", &val)) _of_ti_gate_clk_setup()
235 if (of_clk_get_parent_count(node) != 1) { _of_ti_gate_clk_setup()
236 pr_err("%s must have 1 parent\n", node->name); _of_ti_gate_clk_setup()
240 parent_name = of_clk_get_parent_name(node, 0); _of_ti_gate_clk_setup()
242 if (of_property_read_bool(node, "ti,set-rate-parent")) _of_ti_gate_clk_setup()
245 if (of_property_read_bool(node, "ti,set-bit-to-disable")) _of_ti_gate_clk_setup()
248 clk = _register_gate(NULL, node->name, parent_name, flags, reg, _of_ti_gate_clk_setup()
252 of_clk_add_provider(node, of_clk_src_simple_get, clk); _of_ti_gate_clk_setup()
256 _of_ti_composite_gate_clk_setup(struct device_node *node, _of_ti_composite_gate_clk_setup() argument
266 gate->enable_reg = ti_clk_get_reg_addr(node, 0); _of_ti_composite_gate_clk_setup()
270 of_property_read_u32(node, "ti,bit-shift", &val); _of_ti_composite_gate_clk_setup()
276 if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE)) _of_ti_composite_gate_clk_setup()
284 of_ti_composite_no_wait_gate_clk_setup(struct device_node *node) of_ti_composite_no_wait_gate_clk_setup() argument
286 _of_ti_composite_gate_clk_setup(node, NULL); of_ti_composite_no_wait_gate_clk_setup()
292 static void __init of_ti_composite_interface_clk_setup(struct device_node *node) of_ti_composite_interface_clk_setup() argument
294 _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait); of_ti_composite_interface_clk_setup()
300 static void __init of_ti_composite_gate_clk_setup(struct device_node *node) of_ti_composite_gate_clk_setup() argument
302 _of_ti_composite_gate_clk_setup(node, &clkhwops_wait); of_ti_composite_gate_clk_setup()
308 static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node) of_ti_clkdm_gate_clk_setup() argument
310 _of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL); of_ti_clkdm_gate_clk_setup()
315 static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node) of_ti_hsdiv_gate_clk_setup() argument
317 _of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops, of_ti_hsdiv_gate_clk_setup()
323 static void __init of_ti_gate_clk_setup(struct device_node *node) of_ti_gate_clk_setup() argument
325 _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL); of_ti_gate_clk_setup()
329 static void __init of_ti_wait_gate_clk_setup(struct device_node *node) of_ti_wait_gate_clk_setup() argument
331 _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait); of_ti_wait_gate_clk_setup()
337 static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node) of_ti_am35xx_gate_clk_setup() argument
339 _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, of_ti_am35xx_gate_clk_setup()
345 static void __init of_ti_dss_gate_clk_setup(struct device_node *node) of_ti_dss_gate_clk_setup() argument
347 _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, of_ti_dss_gate_clk_setup()
H A Dinterface.c104 static void __init _of_ti_interface_clk_setup(struct device_node *node, _of_ti_interface_clk_setup() argument
113 reg = ti_clk_get_reg_addr(node, 0); _of_ti_interface_clk_setup()
117 if (!of_property_read_u32(node, "ti,bit-shift", &val)) _of_ti_interface_clk_setup()
120 parent_name = of_clk_get_parent_name(node, 0); _of_ti_interface_clk_setup()
122 pr_err("%s must have a parent\n", node->name); _of_ti_interface_clk_setup()
126 clk = _register_interface(NULL, node->name, parent_name, reg, _of_ti_interface_clk_setup()
130 of_clk_add_provider(node, of_clk_src_simple_get, clk); _of_ti_interface_clk_setup()
133 static void __init of_ti_interface_clk_setup(struct device_node *node) of_ti_interface_clk_setup() argument
135 _of_ti_interface_clk_setup(node, &clkhwops_iclk_wait); of_ti_interface_clk_setup()
140 static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node) of_ti_no_wait_interface_clk_setup() argument
142 _of_ti_interface_clk_setup(node, &clkhwops_iclk); of_ti_no_wait_interface_clk_setup()
148 static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node) of_ti_hsotgusb_interface_clk_setup() argument
150 _of_ti_interface_clk_setup(node, of_ti_hsotgusb_interface_clk_setup()
156 static void __init of_ti_dss_interface_clk_setup(struct device_node *node) of_ti_dss_interface_clk_setup() argument
158 _of_ti_interface_clk_setup(node, of_ti_dss_interface_clk_setup()
164 static void __init of_ti_ssi_interface_clk_setup(struct device_node *node) of_ti_ssi_interface_clk_setup() argument
166 _of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait); of_ti_ssi_interface_clk_setup()
171 static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node) of_ti_am35xx_interface_clk_setup() argument
173 _of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait); of_ti_am35xx_interface_clk_setup()
180 static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node) of_ti_omap2430_interface_clk_setup() argument
182 _of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait); of_ti_omap2430_interface_clk_setup()
H A Dapll.c137 struct device_node *node) omap_clk_register_apll()
143 ad->clk_ref = of_clk_get(node, 0); omap_clk_register_apll()
144 ad->clk_bypass = of_clk_get(node, 1); omap_clk_register_apll()
148 node->name); omap_clk_register_apll()
149 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) omap_clk_register_apll()
157 of_clk_add_provider(node, of_clk_src_simple_get, clk); omap_clk_register_apll()
170 static void __init of_dra7_apll_setup(struct device_node *node) of_dra7_apll_setup() argument
187 init->name = node->name; of_dra7_apll_setup()
190 init->num_parents = of_clk_get_parent_count(node); of_dra7_apll_setup()
192 pr_err("dra7 apll %s must have parent(s)\n", node->name); of_dra7_apll_setup()
200 of_clk_parent_fill(node, parent_names, init->num_parents); of_dra7_apll_setup()
204 ad->control_reg = ti_clk_get_reg_addr(node, 0); of_dra7_apll_setup()
205 ad->idlest_reg = ti_clk_get_reg_addr(node, 1); of_dra7_apll_setup()
213 omap_clk_register_apll(&clk_hw->hw, node); of_dra7_apll_setup()
331 static void __init of_omap2_apll_setup(struct device_node *node) of_omap2_apll_setup() argument
350 init->name = node->name; of_omap2_apll_setup()
353 init->num_parents = of_clk_get_parent_count(node); of_omap2_apll_setup()
355 pr_err("%s must have one parent\n", node->name); of_omap2_apll_setup()
359 parent_name = of_clk_get_parent_name(node, 0); of_omap2_apll_setup()
362 if (of_property_read_u32(node, "ti,clock-frequency", &val)) { of_omap2_apll_setup()
363 pr_err("%s missing clock-frequency\n", node->name); of_omap2_apll_setup()
368 if (of_property_read_u32(node, "ti,bit-shift", &val)) { of_omap2_apll_setup()
369 pr_err("%s missing bit-shift\n", node->name); of_omap2_apll_setup()
377 if (of_property_read_u32(node, "ti,idlest-shift", &val)) { of_omap2_apll_setup()
378 pr_err("%s missing idlest-shift\n", node->name); of_omap2_apll_setup()
384 ad->control_reg = ti_clk_get_reg_addr(node, 0); of_omap2_apll_setup()
385 ad->autoidle_reg = ti_clk_get_reg_addr(node, 1); of_omap2_apll_setup()
386 ad->idlest_reg = ti_clk_get_reg_addr(node, 2); of_omap2_apll_setup()
394 of_clk_add_provider(node, of_clk_src_simple_get, clk); of_omap2_apll_setup()
136 omap_clk_register_apll(struct clk_hw *hw, struct device_node *node) omap_clk_register_apll() argument
H A Dcomposite.c66 struct device_node *node; member in struct:component_clk
78 static struct device_node *_get_component_node(struct device_node *node, int i) _get_component_node() argument
83 rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i, _get_component_node()
91 static struct component_clk *_lookup_component(struct device_node *node) _lookup_component() argument
96 if (comp->node == node) _lookup_component()
160 struct device_node *node) _register_composite()
177 cclk->comp_nodes[i]->name, node->name); _register_composite()
178 if (!ti_clk_retry_init(node, hw, _register_composite()
186 node->name, component_clk_types[comp->type]); _register_composite()
192 /* Mark this node as found */ _register_composite()
209 pr_err("%s: no parents found for %s!\n", __func__, node->name); _register_composite()
213 clk = clk_register_composite(NULL, node->name, _register_composite()
223 of_clk_add_provider(node, of_clk_src_simple_get, clk); _register_composite()
237 static void __init of_ti_composite_clk_setup(struct device_node *node) of_ti_composite_clk_setup() argument
244 num_clks = of_clk_get_parent_count(node); of_ti_composite_clk_setup()
247 pr_err("composite clk %s must have component(s)\n", node->name); of_ti_composite_clk_setup()
255 /* Get device node pointers for each component clock */ of_ti_composite_clk_setup()
257 cclk->comp_nodes[i] = _get_component_node(node, i); of_ti_composite_clk_setup()
259 _register_composite(&cclk->hw, node); of_ti_composite_clk_setup()
266 * @node: device node of the component clock
273 int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw, ti_clk_add_component() argument
280 num_parents = of_clk_get_parent_count(node); ti_clk_add_component()
283 pr_err("component-clock %s must have parent(s)\n", node->name); ti_clk_add_component()
291 of_clk_parent_fill(node, parent_names, num_parents); ti_clk_add_component()
302 clk->node = node; ti_clk_add_component()
159 _register_composite(struct clk_hw *hw, struct device_node *node) _register_composite() argument
H A Dautoidle.c32 struct list_head node; member in struct:clk_ti_autoidle
110 list_for_each_entry(c, &autoidle_clks, node) _clk_generic_allow_autoidle_all()
124 list_for_each_entry(c, &autoidle_clks, node) _clk_generic_deny_autoidle_all()
130 * @node: pointer to the clock device node
134 * node) and sets up the hardware autoidle feature for the clock
139 int __init of_ti_clk_autoidle_setup(struct device_node *node) of_ti_clk_autoidle_setup() argument
145 if (of_property_read_u32(node, "ti,autoidle-shift", &shift)) of_ti_clk_autoidle_setup()
154 clk->name = node->name; of_ti_clk_autoidle_setup()
155 clk->reg = ti_clk_get_reg_addr(node, 0); of_ti_clk_autoidle_setup()
162 if (of_property_read_bool(node, "ti,invert-autoidle-bit")) of_ti_clk_autoidle_setup()
165 list_add(&clk->node, &autoidle_clks); of_ti_clk_autoidle_setup()
187 list_add(&c->node, &clk_hw_omap_clocks); omap2_init_clk_hw_omap_clocks()
203 list_for_each_entry(c, &clk_hw_omap_clocks, node) omap2_clk_enable_autoidle_all()
225 list_for_each_entry(c, &clk_hw_omap_clocks, node) omap2_clk_disable_autoidle_all()
/linux-4.4.14/drivers/acpi/acpica/
H A Dnsobject.c56 * PARAMETERS: node - Parent Node
73 acpi_ns_attach_object(struct acpi_namespace_node *node, acpi_ns_attach_object() argument
85 if (!node) { acpi_ns_attach_object()
102 if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { acpi_ns_attach_object()
107 node, acpi_ut_get_descriptor_name(node))); acpi_ns_attach_object()
113 if (node->object == object) { acpi_ns_attach_object()
116 object, node)); acpi_ns_attach_object()
155 obj_desc, node, acpi_ut_get_node_name(node))); acpi_ns_attach_object()
159 if (node->object) { acpi_ns_attach_object()
160 acpi_ns_detach_object(node); acpi_ns_attach_object()
181 last_obj_desc->common.next_object = node->object; acpi_ns_attach_object()
184 node->type = (u8) object_type; acpi_ns_attach_object()
185 node->object = obj_desc; acpi_ns_attach_object()
194 * PARAMETERS: node - A Namespace node whose object will be detached
198 * DESCRIPTION: Detach/delete an object associated with a namespace node.
204 void acpi_ns_detach_object(struct acpi_namespace_node *node) acpi_ns_detach_object() argument
210 obj_desc = node->object; acpi_ns_detach_object()
216 if (node->flags & ANOBJ_ALLOCATED_BUFFER) { acpi_ns_detach_object()
227 node->object = NULL; acpi_ns_detach_object()
232 node->object = obj_desc->common.next_object; acpi_ns_detach_object()
236 if (node->object && acpi_ns_detach_object()
237 (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) { acpi_ns_detach_object()
238 node->object = node->object->common.next_object; acpi_ns_detach_object()
243 * the namespace node) acpi_ns_detach_object()
252 /* Reset the node type to untyped */ acpi_ns_detach_object()
254 node->type = ACPI_TYPE_ANY; acpi_ns_detach_object()
257 node, acpi_ut_get_node_name(node), obj_desc)); acpi_ns_detach_object()
269 * PARAMETERS: node - Namespace node
274 * DESCRIPTION: Obtain the object attached to a namespace node.
280 *node) acpi_ns_get_attached_object()
282 ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node); acpi_ns_get_attached_object()
284 if (!node) { acpi_ns_get_attached_object()
289 if (!node->object || acpi_ns_get_attached_object()
290 ((ACPI_GET_DESCRIPTOR_TYPE(node->object) != ACPI_DESC_TYPE_OPERAND) acpi_ns_get_attached_object()
291 && (ACPI_GET_DESCRIPTOR_TYPE(node->object) != acpi_ns_get_attached_object()
293 || ((node->object)->common.type == ACPI_TYPE_LOCAL_DATA)) { acpi_ns_get_attached_object()
297 return_PTR(node->object); acpi_ns_get_attached_object()
304 * PARAMETERS: node - Namespace node
309 * DESCRIPTION: Obtain a secondary object associated with a namespace node.
334 * PARAMETERS: node - Namespace node
345 acpi_ns_attach_data(struct acpi_namespace_node *node, acpi_ns_attach_data() argument
355 obj_desc = node->object; acpi_ns_attach_data()
381 node->object = data_desc; acpi_ns_attach_data()
391 * PARAMETERS: node - Namespace node
396 * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
402 acpi_ns_detach_data(struct acpi_namespace_node * node, acpi_ns_detach_data() argument
409 obj_desc = node->object; acpi_ns_detach_data()
417 node->object = obj_desc->common.next_object; acpi_ns_detach_data()
435 * PARAMETERS: node - Namespace node
442 * a namespace node.
447 acpi_ns_get_attached_data(struct acpi_namespace_node * node, acpi_ns_get_attached_data() argument
452 obj_desc = node->object; acpi_ns_get_attached_data()
278 acpi_ns_get_attached_object(struct acpi_namespace_node *node) acpi_ns_get_attached_object() argument
H A Ddsargs.c57 acpi_ds_execute_arguments(struct acpi_namespace_node *node,
65 * PARAMETERS: node - Object NS node
66 * scope_node - Parent NS node
77 acpi_ds_execute_arguments(struct acpi_namespace_node *node, acpi_ds_execute_arguments() argument
96 op->common.node = scope_node; acpi_ds_execute_arguments()
116 walk_state->deferred_node = node; acpi_ds_execute_arguments()
127 op->common.node = node; acpi_ds_execute_arguments()
137 op->common.node = scope_node; acpi_ds_execute_arguments()
158 walk_state->deferred_node = node; acpi_ds_execute_arguments()
183 struct acpi_namespace_node *node; acpi_ds_get_buffer_field_arguments() local
192 /* Get the AML pointer (method object) and buffer_field node */ acpi_ds_get_buffer_field_arguments()
195 node = obj_desc->buffer_field.node; acpi_ds_get_buffer_field_arguments()
198 node, NULL)); acpi_ds_get_buffer_field_arguments()
201 acpi_ut_get_node_name(node))); acpi_ds_get_buffer_field_arguments()
205 status = acpi_ds_execute_arguments(node, node->parent, acpi_ds_get_buffer_field_arguments()
228 struct acpi_namespace_node *node; acpi_ds_get_bank_field_arguments() local
237 /* Get the AML pointer (method object) and bank_field node */ acpi_ds_get_bank_field_arguments()
240 node = obj_desc->bank_field.node; acpi_ds_get_bank_field_arguments()
243 (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL)); acpi_ds_get_bank_field_arguments()
246 acpi_ut_get_node_name(node))); acpi_ds_get_bank_field_arguments()
250 status = acpi_ds_execute_arguments(node, node->parent, acpi_ds_get_bank_field_arguments()
259 obj_desc->region.length, node); acpi_ds_get_bank_field_arguments()
278 struct acpi_namespace_node *node; acpi_ds_get_buffer_arguments() local
287 /* Get the Buffer node */ acpi_ds_get_buffer_arguments()
289 node = obj_desc->buffer.node; acpi_ds_get_buffer_arguments()
290 if (!node) { acpi_ds_get_buffer_arguments()
292 "No pointer back to namespace node in buffer object %p", acpi_ds_get_buffer_arguments()
301 status = acpi_ds_execute_arguments(node, node, acpi_ds_get_buffer_arguments()
322 struct acpi_namespace_node *node; acpi_ds_get_package_arguments() local
331 /* Get the Package node */ acpi_ds_get_package_arguments()
333 node = obj_desc->package.node; acpi_ds_get_package_arguments()
334 if (!node) { acpi_ds_get_package_arguments()
336 "No pointer back to namespace node in package %p", acpi_ds_get_package_arguments()
345 status = acpi_ds_execute_arguments(node, node, acpi_ds_get_package_arguments()
366 struct acpi_namespace_node *node; acpi_ds_get_region_arguments() local
381 /* Get the Region node */ acpi_ds_get_region_arguments()
383 node = obj_desc->region.node; acpi_ds_get_region_arguments()
386 (ACPI_TYPE_REGION, node, NULL)); acpi_ds_get_region_arguments()
389 acpi_ut_get_node_name(node), acpi_ds_get_region_arguments()
394 status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node, acpi_ds_get_region_arguments()
403 obj_desc->region.length, node); acpi_ds_get_region_arguments()
H A Dnsalloc.c55 * PARAMETERS: name - Name of the new node (4 char ACPI name)
57 * RETURN: New namespace node (Null on failure)
59 * DESCRIPTION: Create a namespace node
64 struct acpi_namespace_node *node; acpi_ns_create_node() local
71 node = acpi_os_acquire_object(acpi_gbl_namespace_cache); acpi_ns_create_node()
72 if (!node) { acpi_ns_create_node()
86 node->name.integer = name; acpi_ns_create_node()
87 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED); acpi_ns_create_node()
88 return_PTR(node); acpi_ns_create_node()
95 * PARAMETERS: node - Node to be deleted
99 * DESCRIPTION: Delete a namespace node. All node deletions must come through
102 * invoked before the node is deleted.
106 void acpi_ns_delete_node(struct acpi_namespace_node *node) acpi_ns_delete_node() argument
115 acpi_ns_detach_object(node); acpi_ns_delete_node()
123 obj_desc = node->object; acpi_ns_delete_node()
129 obj_desc->data.handler(node, obj_desc->data.pointer); acpi_ns_delete_node()
137 /* Special case for the statically allocated root node */ acpi_ns_delete_node()
139 if (node == acpi_gbl_root_node) { acpi_ns_delete_node()
143 /* Now we can delete the node */ acpi_ns_delete_node()
145 (void)acpi_os_release_object(acpi_gbl_namespace_cache, node); acpi_ns_delete_node()
149 node, acpi_gbl_current_node_count)); acpi_ns_delete_node()
156 * PARAMETERS: node - Node to be removed/deleted
160 * DESCRIPTION: Remove (unlink) and delete a namespace node
164 void acpi_ns_remove_node(struct acpi_namespace_node *node) acpi_ns_remove_node() argument
170 ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node); acpi_ns_remove_node()
172 parent_node = node->parent; acpi_ns_remove_node()
177 /* Find the node that is the previous peer in the parent's child list */ acpi_ns_remove_node()
179 while (next_node != node) { acpi_ns_remove_node()
188 prev_node->peer = node->peer; acpi_ns_remove_node()
194 parent_node->child = node->peer; acpi_ns_remove_node()
197 /* Delete the node and any attached objects */ acpi_ns_remove_node()
199 acpi_ns_delete_node(node); acpi_ns_remove_node()
209 * node - The new Node to install
214 * DESCRIPTION: Initialize a new namespace node and install it amongst
224 struct acpi_namespace_node *node, /* New Child */ acpi_ns_install_node()
242 * A method is creating a new node that is not a child of the acpi_ns_install_node()
254 node->peer = NULL; acpi_ns_install_node()
255 node->parent = parent_node; acpi_ns_install_node()
259 parent_node->child = node; acpi_ns_install_node()
261 /* Add node to the end of the peer list */ acpi_ns_install_node()
267 child_node->peer = node; acpi_ns_install_node()
272 node->owner_id = owner_id; acpi_ns_install_node()
273 node->type = (u8) type; acpi_ns_install_node()
277 acpi_ut_get_node_name(node), acpi_ns_install_node()
278 acpi_ut_get_type_name(node->type), node, owner_id, acpi_ns_install_node()
323 * Delete this child node and move on to the next child in the list. acpi_ns_delete_children()
324 * No need to unlink the node since we are deleting the entire branch. acpi_ns_delete_children()
375 /* Get the next node in this scope (NULL if none) */ acpi_ns_delete_namespace_subtree()
380 /* Found a child node - detach any attached object */ acpi_ns_delete_namespace_subtree()
384 /* Check if this node has any children */ acpi_ns_delete_namespace_subtree()
388 * There is at least one child of this node, acpi_ns_delete_namespace_subtree()
389 * visit the node acpi_ns_delete_namespace_subtree()
397 * No more children of this parent node. acpi_ns_delete_namespace_subtree()
408 /* New "last child" is this parent node */ acpi_ns_delete_namespace_subtree()
470 * Get the next child of this parent node. When child_node is NULL, acpi_ns_delete_namespace_by_owner()
484 /* Found a matching child node - detach any attached object */ acpi_ns_delete_namespace_by_owner()
489 /* Check if this node has any children */ acpi_ns_delete_namespace_by_owner()
493 * There is at least one child of this node, acpi_ns_delete_namespace_by_owner()
494 * visit the node acpi_ns_delete_namespace_by_owner()
504 * No more children of this parent node. acpi_ns_delete_namespace_by_owner()
514 /* New "last child" is this parent node */ acpi_ns_delete_namespace_by_owner()
223 acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node, struct acpi_namespace_node *node, acpi_object_type type) acpi_ns_install_node() argument
H A Dnsnames.c56 * PARAMETERS: node - Namespace node whose pathname is needed
59 * the node, In external format (name segments separated by path
62 * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
66 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) acpi_ns_get_external_pathname() argument
70 ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); acpi_ns_get_external_pathname()
72 name_buffer = acpi_ns_get_normalized_pathname(node, FALSE); acpi_ns_get_external_pathname()
81 * PARAMETERS: node - Namespace node
85 * DESCRIPTION: Get the length of the pathname string for this node
89 acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) acpi_ns_get_pathname_length() argument
95 size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE); acpi_ns_get_pathname_length()
121 struct acpi_namespace_node *node; acpi_ns_handle_to_pathname() local
126 node = acpi_ns_validate_handle(target_handle); acpi_ns_handle_to_pathname()
127 if (!node) { acpi_ns_handle_to_pathname()
134 acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); acpi_ns_handle_to_pathname()
148 (void)acpi_ns_build_normalized_path(node, buffer->pointer, acpi_ns_handle_to_pathname()
163 * PARAMETERS: node - Namespace node
174 * contain the namespace node's path name, the actual required
182 acpi_ns_build_normalized_path(struct acpi_namespace_node *node, acpi_ns_build_normalized_path() argument
191 ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node); acpi_ns_build_normalized_path()
210 if (!node) { acpi_ns_build_normalized_path()
214 next_node = node; acpi_ns_build_normalized_path()
216 if (next_node != node) { acpi_ns_build_normalized_path()
261 * PARAMETERS: node - Namespace node whose pathname is needed
265 * the node, In external format (name segments separated by path
268 * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
274 char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, acpi_ns_get_normalized_pathname() argument
280 ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node); acpi_ns_get_normalized_pathname()
284 size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); acpi_ns_get_normalized_pathname()
299 (void)acpi_ns_build_normalized_path(node, name_buffer, size, acpi_ns_get_normalized_pathname()
H A Ddswload2.c73 struct acpi_namespace_node *node; acpi_ds_load2_begin_op() local
142 node = NULL; acpi_ds_load2_begin_op()
155 ACPI_NS_SEARCH_PARENT, walk_state, &(node)); acpi_ds_load2_begin_op()
160 /* Special case for Scope(\) -> refers to the Root node */ acpi_ds_load2_begin_op()
162 if (op && (op->named.node == acpi_gbl_root_node)) { acpi_ds_load2_begin_op()
163 node = op->named.node; acpi_ds_load2_begin_op()
166 acpi_ds_scope_stack_push(node, object_type, acpi_ds_load2_begin_op()
181 &(node)); acpi_ds_load2_begin_op()
201 switch (node->type) { acpi_ds_load2_begin_op()
226 acpi_ut_get_node_name(node), acpi_ds_load2_begin_op()
227 acpi_ut_get_type_name(node->type))); acpi_ds_load2_begin_op()
229 node->type = ACPI_TYPE_ANY; acpi_ds_load2_begin_op()
239 if ((node == acpi_gbl_root_node) && acpi_ds_load2_begin_op()
254 acpi_ut_get_type_name(node->type), acpi_ds_load2_begin_op()
255 acpi_ut_get_node_name(node))); acpi_ds_load2_begin_op()
265 if (op && op->common.node) { acpi_ds_load2_begin_op()
267 /* This op/node was previously entered into the namespace */ acpi_ds_load2_begin_op()
269 node = op->common.node; acpi_ds_load2_begin_op()
273 acpi_ds_scope_stack_push(node, object_type, acpi_ds_load2_begin_op()
293 /* This name is already in the namespace, get the node */ acpi_ds_load2_begin_op()
295 node = walk_state->deferred_node; acpi_ds_load2_begin_op()
303 /* Execution mode, node cannot already exist, node is temporary */ acpi_ds_load2_begin_op()
319 walk_state, &node); acpi_ds_load2_begin_op()
324 acpi_ut_get_node_name(node), node)); acpi_ds_load2_begin_op()
345 if (node) { acpi_ds_load2_begin_op()
346 op->named.name = node->name.integer; acpi_ds_load2_begin_op()
355 op->common.node = node; acpi_ds_load2_begin_op()
377 struct acpi_namespace_node *node; acpi_ds_load2_end_op() local
409 node = op->common.node; acpi_ds_load2_end_op()
415 walk_state->operands[0] = (void *)node; acpi_ds_load2_end_op()
463 walk_state, op, node)); acpi_ds_load2_end_op()
494 common.node, walk_state); acpi_ds_load2_end_op()
500 acpi_ds_create_bank_field(op, arg->common.node, acpi_ds_load2_end_op()
507 acpi_ds_create_field(op, arg->common.node, acpi_ds_load2_end_op()
615 (acpi_ns_get_attached_object(node), FALSE); acpi_ds_load2_end_op()
634 status = acpi_ds_create_node(walk_state, node, op); acpi_ds_load2_end_op()
641 * Note: We must create the method node/object pair as soon as we acpi_ds_load2_end_op()
648 walk_state, op, op->named.node)); acpi_ds_load2_end_op()
650 if (!acpi_ns_get_attached_object(op->named.node)) { acpi_ds_load2_end_op()
652 ACPI_CAST_PTR(void, op->named.node); acpi_ds_load2_end_op()
694 walk_state, op, node)); acpi_ds_load2_end_op()
720 op->common.node = new_node; acpi_ds_load2_end_op()
H A Ddbcmds.c63 acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name);
82 * RETURN: Pointer to a NS node
91 struct acpi_namespace_node *node; acpi_db_convert_to_node() local
99 node = ACPI_TO_POINTER(address); acpi_db_convert_to_node()
100 if (!acpi_os_readable(node, sizeof(struct acpi_namespace_node))) { acpi_db_convert_to_node()
101 acpi_os_printf("Address %p is invalid", node); acpi_db_convert_to_node()
105 /* Make sure pointer is valid NS node */ acpi_db_convert_to_node()
107 if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { acpi_db_convert_to_node()
109 ("Address %p is not a valid namespace node [%s]\n", acpi_db_convert_to_node()
110 node, acpi_ut_get_descriptor_name(node)); acpi_db_convert_to_node()
118 node = acpi_db_local_ns_lookup(in_string); acpi_db_convert_to_node()
119 if (!node) { acpi_db_convert_to_node()
121 ("Could not find [%s] in namespace, defaulting to root node\n", acpi_db_convert_to_node()
123 node = acpi_gbl_root_node; acpi_db_convert_to_node()
127 return (node); acpi_db_convert_to_node()
366 * DESCRIPTION: Unload an ACPI table, via any namespace node that is owned
373 struct acpi_namespace_node *node; acpi_db_unload_acpi_table() local
378 node = acpi_db_convert_to_node(object_name); acpi_db_unload_acpi_table()
379 if (!node) { acpi_db_unload_acpi_table()
383 status = acpi_unload_parent_table(ACPI_CAST_PTR(acpi_handle, node)); acpi_db_unload_acpi_table()
386 object_name, node); acpi_db_unload_acpi_table()
409 struct acpi_namespace_node *node; acpi_db_send_notify() local
414 node = acpi_db_convert_to_node(name); acpi_db_send_notify()
415 if (!node) { acpi_db_send_notify()
421 if (acpi_ev_is_notify_object(node)) { acpi_db_send_notify()
422 status = acpi_ev_queue_notify_request(node, value); acpi_db_send_notify()
429 acpi_ut_get_node_name(node), acpi_db_send_notify()
430 acpi_ut_get_type_name(node->type)); acpi_db_send_notify()
529 struct acpi_namespace_node *node; acpi_db_display_template() local
535 node = acpi_db_convert_to_node(buffer_arg); acpi_db_display_template()
536 if (!node || (node == acpi_gbl_root_node)) { acpi_db_display_template()
543 if (node->type != ACPI_TYPE_BUFFER) { acpi_db_display_template()
555 status = acpi_rs_create_resource_list(node->object, &return_buffer); acpi_db_display_template()
574 acpi_ut_debug_dump_buffer((u8 *)node->object->buffer.pointer, acpi_db_display_template()
575 node->object->buffer.length, acpi_db_display_template()
684 * PARAMETERS: node - Parent device node
695 acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name) acpi_dm_test_resource_conversion() argument
711 status = acpi_evaluate_object(node, name, NULL, &return_buffer); acpi_dm_test_resource_conversion()
720 status = acpi_get_current_resources(node, &resource_buffer); acpi_dm_test_resource_conversion()
791 struct acpi_namespace_node *node; acpi_db_device_resources() local
800 node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); acpi_db_device_resources()
801 parent_path = acpi_ns_get_external_pathname(node); acpi_db_device_resources()
808 (void)acpi_get_handle(node, METHOD_NAME__PRT, acpi_db_device_resources()
810 (void)acpi_get_handle(node, METHOD_NAME__CRS, acpi_db_device_resources()
812 (void)acpi_get_handle(node, METHOD_NAME__PRS, acpi_db_device_resources()
814 (void)acpi_get_handle(node, METHOD_NAME__AEI, acpi_db_device_resources()
844 status = acpi_get_irq_routing_table(node, &return_buffer); acpi_db_device_resources()
873 status = acpi_walk_resources(node, METHOD_NAME__CRS, acpi_db_device_resources()
886 status = acpi_get_current_resources(node, &return_buffer); acpi_db_device_resources()
915 (void)acpi_dm_test_resource_conversion(node, METHOD_NAME__CRS); acpi_db_device_resources()
921 status = acpi_set_current_resources(node, &return_buffer); acpi_db_device_resources()
952 status = acpi_get_possible_resources(node, &return_buffer); acpi_db_device_resources()
984 status = acpi_get_event_resources(node, &return_buffer); acpi_db_device_resources()
1017 struct acpi_namespace_node *node; acpi_db_display_resources() local
1032 node = acpi_db_convert_to_node(object_arg); acpi_db_display_resources()
1033 if (node) { acpi_db_display_resources()
1034 if (node->type != ACPI_TYPE_DEVICE) { acpi_db_display_resources()
1037 node->name.ascii, acpi_db_display_resources()
1038 acpi_ut_get_type_name(node->type)); acpi_db_display_resources()
1040 (void)acpi_db_device_resources(node, 0, NULL, acpi_db_display_resources()
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_nm.c27 static void scif_invalidate_ep(int node) scif_invalidate_ep() argument
36 if (ep->remote_dev->node == node) { scif_invalidate_ep()
45 if (ep->remote_dev->node == node) { scif_invalidate_ep()
96 msg.src.node = scif_info.nodeid; scif_send_acks()
97 msg.dst.node = SCIF_MGMT_NODE; scif_send_acks()
98 msg.payload[0] = dev->node; scif_send_acks()
104 msg.src.node = scif_info.nodeid; scif_send_acks()
105 msg.dst.node = dev->node; scif_send_acks()
132 scif_invalidate_ep(dev->node); scif_cleanup_scifdev()
133 scif_zap_mmaps(dev->node); scif_cleanup_scifdev()
134 scif_cleanup_rma_for_zombies(dev->node); scif_cleanup_scifdev()
137 if (!dev->node && scif_info.card_initiated_exit) { scif_cleanup_scifdev()
151 * @node: Node to remove
153 void scif_handle_remove_node(int node) scif_handle_remove_node() argument
155 struct scif_dev *scifdev = &scif_dev[node]; scif_handle_remove_node()
161 static int scif_send_rmnode_msg(int node, int remove_node) scif_send_rmnode_msg() argument
164 struct scif_dev *dev = &scif_dev[node]; scif_send_rmnode_msg()
167 notif_msg.src.node = scif_info.nodeid; scif_send_rmnode_msg()
168 notif_msg.dst.node = node; scif_send_rmnode_msg()
176 * @node_id[in]: source node id.
177 * @mgmt_initiated: Disconnection initiated from the mgmt node
179 * Disconnect a node from the scif network.
213 /* Tell the mgmt node to clean up */ scif_disconnect_node()
216 /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */ scif_disconnect_node()
228 msg.src.node = scif_info.nodeid; scif_get_node_info()
229 msg.dst.node = SCIF_MGMT_NODE; scif_get_node_info()
H A Dscif_nodeqp.c26 * SCIF node Queue Pair (QP) setup flow:
32 * 3) The local node updates the device page with the DMA address of the QP
34 * the peer node has updated its QP DMA address
36 * in the device page, the local node maps the remote node's QP,
38 * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
40 * 7) scif_init(..) registers a new SCIF peer node by calling
42 * SCIF node
43 * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
48 * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
49 * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
52 * outbound QPs, make sure they can access memory on the remote node
53 * and then add a new SCIF peer node by calling
55 * SCIF node.
59 * SCIF node QP teardown flow (initiated by non mgmt node):
63 * 3) A non mgmt node now cleans up all local data structures and sends a
68 * 6) As part of scif_node_remove(..) a remote node unregisters the peer
69 * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
70 * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
71 * it sends itself a node remove message whose handling cleans up local
72 * data structures and unregisters the peer node from the SCIF network
73 * 8) The mgmt node sends a SCIF_EXIT_ACK
74 * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
76 * 10) The SCIF network is now torn down for the node initiating the
80 * SCIF node QP teardown flow (initiated by mgmt node):
84 * 3) The mgmt node calls scif_disconnect_node(..)
87 * 5) As part of scif_node_remove(..) a remote node unregisters the peer
88 * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
89 * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
90 * it unregisters the peer node from the SCIF network
91 * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
92 * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
94 * then send a SCIF_EXIT_ACK back to the mgmt node
95 * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
248 scif_dev[scif_info.nodeid].node, scifdev->node); scif_setup_qp_connect_response()
267 * Because the node QP may already be processing an INIT message, set scif_setup_qp_connect_response()
307 msg.dst.node = scifdev->node; scif_qp_response()
320 msg.src.node = scif_info.nodeid; scif_send_exit()
321 msg.dst.node = scifdev->node; scif_send_exit()
425 p2p->ppi_peer_id = peerdev->node; scif_init_p2p_info()
455 * @dst: Destination node
457 * Connect the src and dst node by setting up the p2p connection
458 * between them. Management node here acts like a proxy.
481 * up then just ignore this request. The requested node will get scif_node_connect()
487 if (p2p->ppi_peer_id == dev_j->node) scif_node_connect()
507 msg.src.node = dev_j->node; scif_node_connect()
508 msg.dst.node = dev_i->node; scif_node_connect()
524 msg.src.node = dev_i->node; scif_node_connect()
525 msg.dst.node = dev_j->node; scif_node_connect()
613 label, message_types[msg->uop], msg->src.node, msg->src.port, scif_display_message()
614 msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1], scif_display_message()
646 * queuing work for the queue handling real node _scif_nodeqp_send()
662 * scif_nodeqp_send - Send a message on the node queue pair
707 * @scifdev: Remote SCIF device node
730 * @scifdev: Remote SCIF device node
733 * This function stops the SCIF interface for the node which sent
734 * the SCIF_EXIT message and starts waiting for that node to
742 scif_disconnect_node(scifdev->node, false); scif_exit()
751 * @scifdev: Remote SCIF device node
764 * @scifdev: Remote SCIF device node
767 * When the mgmt node driver has finished initializing a MIC node queue pair it
768 * marks the node as online. It then looks for all currently online MIC cards
772 * The local node allocates its incoming queue and sends its address in the
773 * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
774 * this message to the new node
785 "Scifdev %d:%d received NODE_ADD msg for node %d\n", scif_node_add()
786 scifdev->node, msg->dst.node, msg->src.node); scif_node_add()
788 "Remote address for this node's aperture %llx\n", scif_node_add()
790 newdev = &scif_dev[msg->src.node]; scif_node_add()
791 newdev->node = msg->src.node; scif_node_add()
797 "failed to setup interrupts for %d\n", msg->src.node); scif_node_add()
803 "failed to map mmio for %d\n", msg->src.node); scif_node_add()
810 * Set the base address of the remote node's memory since it gets scif_node_add()
833 msg->dst.node = msg->src.node; scif_node_add()
834 msg->src.node = scif_info.nodeid; scif_node_add()
848 "node add failed for node %d\n", msg->src.node); scif_node_add()
850 msg->dst.node = msg->src.node; scif_node_add()
851 msg->src.node = scif_info.nodeid; scif_node_add()
878 "%s %d remote node %d offline, state = 0x%x\n", scif_poll_qp_state()
879 __func__, __LINE__, peerdev->node, qp->qp_state); scif_poll_qp_state()
887 * @scifdev: Remote SCIF device node
890 * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
891 * message to the mgmt node to confirm the sequence is finished.
899 struct scif_dev *dst_dev = &scif_dev[msg->dst.node]; scif_node_add_ack()
903 scifdev->node, msg->src.node, msg->dst.node); scif_node_add_ack()
909 * the lock serializes with scif_qp_response_ack. The mgmt node scif_node_add_ack()
920 peerdev = &scif_dev[msg->src.node]; scif_node_add_ack()
922 peerdev->node = msg->src.node; scif_node_add_ack()
950 struct scif_dev *dst_dev = &scif_dev[msg->dst.node]; scif_node_add_nack()
953 "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node); scif_node_add_nack()
962 * Handle node removal.
967 int node = msg->payload[0]; scif_node_remove() local
968 struct scif_dev *scdev = &scif_dev[node]; scif_node_remove()
971 scif_handle_remove_node(node); scif_node_remove()
993 * Retrieve node info i.e maxid and total from the mgmt node.
999 swap(msg->dst.node, msg->src.node); scif_get_node_info_resp()
1022 "Unknown message 0x%xn scifdev->node 0x%x\n", scif_msg_unknown()
1023 msg->uop, scifdev->node); scif_msg_unknown()
1071 * scif_nodeqp_msg_handler() - Common handler for node messages
1090 "Unknown message 0x%xn scifdev->node 0x%x\n", scif_nodeqp_msg_handler()
1091 msg->uop, scifdev->node); scif_nodeqp_msg_handler()
1099 * scif_nodeqp_intrhandler() - Interrupt handler for node messages
1104 * messages from the node queue RB and calls the Node QP Message handling
1118 * The node queue pair is unmapped so skip the read pointer scif_nodeqp_intrhandler()
1133 * of the loopback message list, calls the node QP message handler,
1236 "SCIF LOOPB %d", scifdev->node); scif_setup_loopback_qp()
1275 scif_info.nodeid = scifdev->node; scif_setup_loopback_qp()
1317 /* Free P2P mappings in the given node for all its peer nodes */ scif_destroy_p2p()
1332 /* Free P2P mapping created in the peer nodes for the given node */ scif_destroy_p2p()
1337 if (p2p->ppi_peer_id == scifdev->node) { scif_destroy_p2p()
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
H A Dmce-genpool.c32 struct mce_evt_llist *node, *tmp; mce_gen_pool_process() local
40 llist_for_each_entry_safe(node, tmp, head, llnode) { llist_for_each_entry_safe()
41 mce = &node->mce; llist_for_each_entry_safe()
43 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); llist_for_each_entry_safe()
54 struct mce_evt_llist *node; mce_gen_pool_add() local
59 node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node)); mce_gen_pool_add()
60 if (!node) { mce_gen_pool_add()
65 memcpy(&node->mce, mce, sizeof(*mce)); mce_gen_pool_add()
66 llist_add(&node->llnode, &mce_event_llist); mce_gen_pool_add()
/linux-4.4.14/drivers/clk/h8300/
H A Dclk-div.c14 static void __init h8300_div_clk_setup(struct device_node *node) h8300_div_clk_setup() argument
18 const char *clk_name = node->name; h8300_div_clk_setup()
24 num_parents = of_clk_get_parent_count(node); h8300_div_clk_setup()
30 divcr = of_iomap(node, 0); h8300_div_clk_setup()
39 parent_name = of_clk_get_parent_name(node, 0); h8300_div_clk_setup()
40 of_property_read_u32(node, "renesas,width", &width); h8300_div_clk_setup()
45 of_clk_add_provider(node, of_clk_src_simple_get, clk); h8300_div_clk_setup()
/linux-4.4.14/drivers/clk/sunxi/
H A Dclk-a10-codec.c23 static void __init sun4i_codec_clk_setup(struct device_node *node) sun4i_codec_clk_setup() argument
26 const char *clk_name = node->name, *parent_name; sun4i_codec_clk_setup()
29 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun4i_codec_clk_setup()
33 of_property_read_string(node, "clock-output-names", &clk_name); sun4i_codec_clk_setup()
34 parent_name = of_clk_get_parent_name(node, 0); sun4i_codec_clk_setup()
41 of_clk_add_provider(node, of_clk_src_simple_get, clk); sun4i_codec_clk_setup()
H A Dclk-a10-mod1.c29 static void __init sun4i_mod1_clk_setup(struct device_node *node) sun4i_mod1_clk_setup() argument
35 const char *clk_name = node->name; sun4i_mod1_clk_setup()
39 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun4i_mod1_clk_setup()
51 of_property_read_string(node, "clock-output-names", &clk_name); sun4i_mod1_clk_setup()
52 i = of_clk_parent_fill(node, parents, SUN4I_MOD1_MAX_PARENTS); sun4i_mod1_clk_setup()
69 of_clk_add_provider(node, of_clk_src_simple_get, clk); sun4i_mod1_clk_setup()
H A Dclk-sun9i-core.c91 static void __init sun9i_a80_pll4_setup(struct device_node *node) sun9i_a80_pll4_setup() argument
95 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun9i_a80_pll4_setup()
98 node->name); sun9i_a80_pll4_setup()
102 sunxi_factors_register(node, &sun9i_a80_pll4_data, sun9i_a80_pll4_setup()
151 static void __init sun9i_a80_gt_setup(struct device_node *node) sun9i_a80_gt_setup() argument
156 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun9i_a80_gt_setup()
159 node->name); sun9i_a80_gt_setup()
163 gt = sunxi_factors_register(node, &sun9i_a80_gt_data, sun9i_a80_gt_setup()
216 static void __init sun9i_a80_ahb_setup(struct device_node *node) sun9i_a80_ahb_setup() argument
220 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun9i_a80_ahb_setup()
223 node->name); sun9i_a80_ahb_setup()
227 sunxi_factors_register(node, &sun9i_a80_ahb_data, sun9i_a80_ahb_setup()
242 static void __init sun9i_a80_apb0_setup(struct device_node *node) sun9i_a80_apb0_setup() argument
246 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun9i_a80_apb0_setup()
249 node->name); sun9i_a80_apb0_setup()
253 sunxi_factors_register(node, &sun9i_a80_apb0_data, sun9i_a80_apb0_setup()
308 static void __init sun9i_a80_apb1_setup(struct device_node *node) sun9i_a80_apb1_setup() argument
312 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); sun9i_a80_apb1_setup()
315 node->name); sun9i_a80_apb1_setup()
319 sunxi_factors_register(node, &sun9i_a80_apb1_data, sun9i_a80_apb1_setup()
H A Dclk-a10-hosc.c26 static void __init sun4i_osc_clk_setup(struct device_node *node) sun4i_osc_clk_setup() argument
31 const char *clk_name = node->name; sun4i_osc_clk_setup()
34 if (of_property_read_u32(node, "clock-frequency", &rate)) sun4i_osc_clk_setup()
45 of_property_read_string(node, "clock-output-names", &clk_name); sun4i_osc_clk_setup()
48 gate->reg = of_iomap(node, 0); sun4i_osc_clk_setup()
63 of_clk_add_provider(node, of_clk_src_simple_get, clk); sun4i_osc_clk_setup()
/linux-4.4.14/arch/powerpc/boot/
H A Dcuboot-8xx.c25 void *node; platform_fixups() local
31 node = finddevice("/soc/cpm"); platform_fixups()
32 if (node) platform_fixups()
33 setprop(node, "clock-frequency", &bd.bi_busfreq, 4); platform_fixups()
35 node = finddevice("/soc/cpm/brg"); platform_fixups()
36 if (node) platform_fixups()
37 setprop(node, "clock-frequency", &bd.bi_busfreq, 4); platform_fixups()
H A Dsimpleboot.c33 int node, size, i; platform_init() local
40 node = fdt_path_offset(_dtb_start, "/"); platform_init()
41 if (node < 0) platform_init()
42 fatal("Cannot find root node\n"); platform_init()
43 na = fdt_getprop(_dtb_start, node, "#address-cells", &size); platform_init()
46 ns = fdt_getprop(_dtb_start, node, "#size-cells", &size); platform_init()
51 node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", platform_init()
53 if (node < 0) platform_init()
54 fatal("Cannot find memory node\n"); platform_init()
55 reg = fdt_getprop(_dtb_start, node, "reg", &size); platform_init()
72 node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", platform_init()
74 if (!node) platform_init()
75 fatal("Cannot find cpu node\n"); platform_init()
76 timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); platform_init()
H A Dmpc8xx.c61 void *node; mpc8xx_set_clocks() local
65 node = finddevice("/soc/cpm"); mpc8xx_set_clocks()
66 if (node) mpc8xx_set_clocks()
67 setprop(node, "clock-frequency", &sysclk, 4); mpc8xx_set_clocks()
69 node = finddevice("/soc/cpm/brg"); mpc8xx_set_clocks()
70 if (node) mpc8xx_set_clocks()
71 setprop(node, "clock-frequency", &sysclk, 4); mpc8xx_set_clocks()
H A Dredboot-83xx.c27 void *node; platform_fixups() local
33 node = finddevice("/soc/cpm/brg"); platform_fixups()
34 if (node) { platform_fixups()
37 setprop(node, "clock-frequency", &bd.bi_busfreq, 4); platform_fixups()
H A Dredboot-8xx.c26 void *node; platform_fixups() local
32 node = finddevice("/soc/cpm/brg"); platform_fixups()
33 if (node) { platform_fixups()
36 setprop(node, "clock-frequency", &bd.bi_busfreq, 4); platform_fixups()
/linux-4.4.14/arch/ia64/include/asm/sn/
H A Dnodepda.h19 * In particular, this is the location of the node PDA.
20 * A pointer to the right node PDA is saved in each CPU PDA.
26 * One of these structures is allocated on each node of a NUMA system.
29 * all per-node data structures.
38 void *pdinfo; /* Platform-dependent per-node info */
41 * The BTEs on this node are shared by the local cpus
48 * Array of pointers to the nodepdas for each node.
62 * Access Functions for node PDA.
63 * Since there is one nodepda for each node, we need a convenient mechanism
68 * sn_nodepda - to access node PDA for the node on which code is running
69 * NODEPDA(cnodeid) - to access node PDA for cnodeid
77 * Check if given a compact node id the corresponding node has all the
/linux-4.4.14/arch/ia64/kernel/
H A Dnuma.c45 /* we don't have cpu-driven node hot add yet... map_cpu_to_node()
46 In usual case, node is created from SRAT at boot time. */ map_cpu_to_node()
64 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
66 * Build cpu to node mapping and initialize the per node cpu masks using
71 int cpu, i, node; build_cpu_to_node_map() local
73 for(node=0; node < MAX_NUMNODES; node++) build_cpu_to_node_map()
74 cpumask_clear(&node_to_cpu_mask[node]); build_cpu_to_node_map()
77 node = -1; for_each_possible_early_cpu()
80 node = node_cpuid[i].nid; for_each_possible_early_cpu()
83 map_cpu_to_node(cpu, node); for_each_possible_early_cpu()
/linux-4.4.14/drivers/firewire/
H A Dcore-topology.c113 struct fw_node *node; fw_node_create() local
115 node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]), fw_node_create()
117 if (node == NULL) fw_node_create()
120 node->color = color; fw_node_create()
121 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); fw_node_create()
122 node->link_on = SELF_ID_LINK_ON(sid); fw_node_create()
123 node->phy_speed = SELF_ID_PHY_SPEED(sid); fw_node_create()
124 node->initiated_reset = SELF_ID_PHY_INITIATOR(sid); fw_node_create()
125 node->port_count = port_count; fw_node_create()
127 atomic_set(&node->ref_count, 1); fw_node_create()
128 INIT_LIST_HEAD(&node->link); fw_node_create()
130 return node; fw_node_create()
134 * Compute the maximum hop count for this node and it's children. The
136 * two nodes in the subtree rooted at this node. We need this for
138 * build_tree() below, this is fairly easy to do: for each node we
141 * two cases: either the path goes through this node, in which case
147 static void update_hop_count(struct fw_node *node) update_hop_count() argument
153 for (i = 0; i < node->port_count; i++) { update_hop_count()
154 if (node->ports[i] == NULL) update_hop_count()
157 if (node->ports[i]->max_hops > max_child_hops) update_hop_count()
158 max_child_hops = node->ports[i]->max_hops; update_hop_count()
160 if (node->ports[i]->max_depth > depths[0]) { update_hop_count()
162 depths[0] = node->ports[i]->max_depth; update_hop_count()
163 } else if (node->ports[i]->max_depth > depths[1]) update_hop_count()
164 depths[1] = node->ports[i]->max_depth; update_hop_count()
167 node->max_depth = depths[0] + 1; update_hop_count()
168 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2); update_hop_count()
186 struct fw_node *node, *child, *local_node, *irm_node; build_tree() local
194 node = NULL; build_tree()
225 * start of the child nodes for this node. build_tree()
235 node = fw_node_create(q, port_count, card->color); build_tree()
236 if (node == NULL) { build_tree()
242 local_node = node; build_tree()
245 irm_node = node; build_tree()
254 * parent node at this time, so we build_tree()
255 * temporarily abuse node->color for build_tree()
257 * node->ports array where the parent build_tree()
258 * node should be. Later, when we build_tree()
259 * handle the parent node, we fix up build_tree()
263 node->color = i; build_tree()
267 node->ports[i] = child; build_tree()
270 * child node. build_tree()
272 child->ports[child->color] = node; build_tree()
280 * Check that the node reports exactly one parent build_tree()
286 fw_err(card, "parent port inconsistency for node %d: " build_tree()
291 /* Pop the child nodes off the stack and push the new node. */ build_tree()
293 list_add_tail(&node->link, &stack); build_tree()
296 if (node->phy_speed == SCODE_BETA && build_tree()
307 update_hop_count(node); build_tree()
313 card->root_node = node; build_tree()
322 struct fw_node * node,
329 struct fw_node *node, *next, *child, *parent; for_each_fw_node() local
337 list_for_each_entry(node, &list, link) { for_each_fw_node()
338 node->color = card->color; for_each_fw_node()
340 for (i = 0; i < node->port_count; i++) { for_each_fw_node()
341 child = node->ports[i]; for_each_fw_node()
352 callback(card, node, parent); for_each_fw_node()
355 list_for_each_entry_safe(node, next, &list, link) for_each_fw_node()
356 fw_node_put(node); for_each_fw_node()
360 struct fw_node *node, struct fw_node *parent) report_lost_node()
362 fw_node_event(card, node, FW_NODE_DESTROYED); report_lost_node()
363 fw_node_put(node); report_lost_node()
370 struct fw_node *node, struct fw_node *parent) report_found_node()
372 int b_path = (node->phy_speed == SCODE_BETA); report_found_node()
376 node->max_speed = parent->max_speed < node->phy_speed ? report_found_node()
377 parent->max_speed : node->phy_speed; report_found_node()
378 node->b_path = parent->b_path && b_path; report_found_node()
380 node->max_speed = node->phy_speed; report_found_node()
381 node->b_path = b_path; report_found_node()
384 fw_node_event(card, node, FW_NODE_CREATED); report_found_node()
465 * connected node for further update_tree()
485 * One or more node were connected to update_tree()
359 report_lost_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent) report_lost_node() argument
369 report_found_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent) report_found_node() argument
/linux-4.4.14/tools/perf/tests/
H A Dhists_output.c98 struct rb_node *node; del_hist_entries() local
108 node = rb_first(root_out); del_hist_entries()
110 he = rb_entry(node, struct hist_entry, rb_node); del_hist_entries()
111 rb_erase(node, root_out); del_hist_entries()
132 struct rb_node *node; test1() local
167 node = rb_first(root); test1()
168 he = rb_entry(node, struct hist_entry, rb_node); test1()
173 node = rb_next(node); test1()
174 he = rb_entry(node, struct hist_entry, rb_node); test1()
179 node = rb_next(node); test1()
180 he = rb_entry(node, struct hist_entry, rb_node); test1()
185 node = rb_next(node); test1()
186 he = rb_entry(node, struct hist_entry, rb_node); test1()
191 node = rb_next(node); test1()
192 he = rb_entry(node, struct hist_entry, rb_node); test1()
197 node = rb_next(node); test1()
198 he = rb_entry(node, struct hist_entry, rb_node); test1()
203 node = rb_next(node); test1()
204 he = rb_entry(node, struct hist_entry, rb_node); test1()
209 node = rb_next(node); test1()
210 he = rb_entry(node, struct hist_entry, rb_node); test1()
215 node = rb_next(node); test1()
216 he = rb_entry(node, struct hist_entry, rb_node); test1()
234 struct rb_node *node; test2() local
267 node = rb_first(root); test2()
268 he = rb_entry(node, struct hist_entry, rb_node); test2()
272 node = rb_next(node); test2()
273 he = rb_entry(node, struct hist_entry, rb_node); test2()
290 struct rb_node *node; test3() local
321 node = rb_first(root); test3()
322 he = rb_entry(node, struct hist_entry, rb_node); test3()
327 node = rb_next(node); test3()
328 he = rb_entry(node, struct hist_entry, rb_node); test3()
333 node = rb_next(node); test3()
334 he = rb_entry(node, struct hist_entry, rb_node); test3()
339 node = rb_next(node); test3()
340 he = rb_entry(node, struct hist_entry, rb_node); test3()
345 node = rb_next(node); test3()
346 he = rb_entry(node, struct hist_entry, rb_node); test3()
364 struct rb_node *node; test4() local
399 node = rb_first(root); test4()
400 he = rb_entry(node, struct hist_entry, rb_node); test4()
405 node = rb_next(node); test4()
406 he = rb_entry(node, struct hist_entry, rb_node); test4()
411 node = rb_next(node); test4()
412 he = rb_entry(node, struct hist_entry, rb_node); test4()
417 node = rb_next(node); test4()
418 he = rb_entry(node, struct hist_entry, rb_node); test4()
423 node = rb_next(node); test4()
424 he = rb_entry(node, struct hist_entry, rb_node); test4()
429 node = rb_next(node); test4()
430 he = rb_entry(node, struct hist_entry, rb_node); test4()
435 node = rb_next(node); test4()
436 he = rb_entry(node, struct hist_entry, rb_node); test4()
441 node = rb_next(node); test4()
442 he = rb_entry(node, struct hist_entry, rb_node); test4()
447 node = rb_next(node); test4()
448 he = rb_entry(node, struct hist_entry, rb_node); test4()
466 struct rb_node *node; test5() local
502 node = rb_first(root); test5()
503 he = rb_entry(node, struct hist_entry, rb_node); test5()
510 node = rb_next(node); test5()
511 he = rb_entry(node, struct hist_entry, rb_node); test5()
517 node = rb_next(node); test5()
518 he = rb_entry(node, struct hist_entry, rb_node); test5()
524 node = rb_next(node); test5()
525 he = rb_entry(node, struct hist_entry, rb_node); test5()
531 node = rb_next(node); test5()
532 he = rb_entry(node, struct hist_entry, rb_node); test5()
538 node = rb_next(node); test5()
539 he = rb_entry(node, struct hist_entry, rb_node); test5()
545 node = rb_next(node); test5()
546 he = rb_entry(node, struct hist_entry, rb_node); test5()
552 node = rb_next(node); test5()
553 he = rb_entry(node, struct hist_entry, rb_node); test5()
559 node = rb_next(node); test5()
560 he = rb_entry(node, struct hist_entry, rb_node); test5()
566 node = rb_next(node); test5()
567 he = rb_entry(node, struct hist_entry, rb_node); test5()
H A Dhists_common.c161 struct rb_node *node; print_hists_in() local
169 node = rb_first(root); print_hists_in()
170 while (node) { print_hists_in()
173 he = rb_entry(node, struct hist_entry, rb_node_in); print_hists_in()
183 node = rb_next(node); print_hists_in()
191 struct rb_node *node; print_hists_out() local
196 node = rb_first(root); print_hists_out()
197 while (node) { print_hists_out()
200 he = rb_entry(node, struct hist_entry, rb_node); print_hists_out()
211 node = rb_next(node); print_hists_out()
/linux-4.4.14/drivers/clk/keystone/
H A Dpll.c158 * @node: device tree node for this clock
162 static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) _of_pll_clk_init() argument
175 parent_name = of_clk_get_parent_name(node, 0); _of_pll_clk_init()
176 if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv)) { _of_pll_clk_init()
185 i = of_property_match_string(node, "reg-names", _of_pll_clk_init()
187 pll_data->pllod = of_iomap(node, i); _of_pll_clk_init()
190 i = of_property_match_string(node, "reg-names", "control"); _of_pll_clk_init()
191 pll_data->pll_ctl0 = of_iomap(node, i); _of_pll_clk_init()
206 i = of_property_match_string(node, "reg-names", "multiplier"); _of_pll_clk_init()
207 pll_data->pllm = of_iomap(node, i); _of_pll_clk_init()
215 clk = clk_register_pll(NULL, node->name, parent_name, pll_data); _of_pll_clk_init()
217 of_clk_add_provider(node, of_clk_src_simple_get, clk); _of_pll_clk_init()
222 pr_err("%s: error initializing pll %s\n", __func__, node->name); _of_pll_clk_init()
228 * @node: device tree node for this clock
230 static void __init of_keystone_pll_clk_init(struct device_node *node) of_keystone_pll_clk_init() argument
232 _of_pll_clk_init(node, false); of_keystone_pll_clk_init()
239 * @node: device tree node for this clock
241 static void __init of_keystone_main_pll_clk_init(struct device_node *node) of_keystone_main_pll_clk_init() argument
243 _of_pll_clk_init(node, true); of_keystone_main_pll_clk_init()
250 * @node: device tree node for this clock
252 static void __init of_pll_div_clk_init(struct device_node *node) of_pll_div_clk_init() argument
258 const char *clk_name = node->name; of_pll_div_clk_init()
260 of_property_read_string(node, "clock-output-names", &clk_name); of_pll_div_clk_init()
261 reg = of_iomap(node, 0); of_pll_div_clk_init()
267 parent_name = of_clk_get_parent_name(node, 0); of_pll_div_clk_init()
273 if (of_property_read_u32(node, "bit-shift", &shift)) { of_pll_div_clk_init()
278 if (of_property_read_u32(node, "bit-mask", &mask)) { of_pll_div_clk_init()
286 of_clk_add_provider(node, of_clk_src_simple_get, clk); of_pll_div_clk_init()
294 * @node: device tree node for this clock
296 static void __init of_pll_mux_clk_init(struct device_node *node) of_pll_mux_clk_init() argument
302 const char *clk_name = node->name; of_pll_mux_clk_init()
304 of_property_read_string(node, "clock-output-names", &clk_name); of_pll_mux_clk_init()
305 reg = of_iomap(node, 0); of_pll_mux_clk_init()
311 of_clk_parent_fill(node, parents, 2); of_pll_mux_clk_init()
317 if (of_property_read_u32(node, "bit-shift", &shift)) { of_pll_mux_clk_init()
322 if (of_property_read_u32(node, "bit-mask", &mask)) { of_pll_mux_clk_init()
331 of_clk_add_provider(node, of_clk_src_simple_get, clk); of_pll_mux_clk_init()
/linux-4.4.14/drivers/media/v4l2-core/
H A Dv4l2-of.c23 static int v4l2_of_parse_csi_bus(const struct device_node *node, v4l2_of_parse_csi_bus() argument
32 prop = of_find_property(node, "data-lanes", NULL); v4l2_of_parse_csi_bus()
46 prop = of_find_property(node, "lane-polarities", NULL); v4l2_of_parse_csi_bus()
60 node->full_name, 1 + bus->num_data_lanes, i); v4l2_of_parse_csi_bus()
65 if (!of_property_read_u32(node, "clock-lanes", &v)) { v4l2_of_parse_csi_bus()
70 if (of_get_property(node, "clock-noncontinuous", &v)) v4l2_of_parse_csi_bus()
81 static void v4l2_of_parse_parallel_bus(const struct device_node *node, v4l2_of_parse_parallel_bus() argument
88 if (!of_property_read_u32(node, "hsync-active", &v)) v4l2_of_parse_parallel_bus()
92 if (!of_property_read_u32(node, "vsync-active", &v)) v4l2_of_parse_parallel_bus()
96 if (!of_property_read_u32(node, "field-even-active", &v)) v4l2_of_parse_parallel_bus()
104 if (!of_property_read_u32(node, "pclk-sample", &v)) v4l2_of_parse_parallel_bus()
108 if (!of_property_read_u32(node, "data-active", &v)) v4l2_of_parse_parallel_bus()
112 if (of_get_property(node, "slave-mode", &v)) v4l2_of_parse_parallel_bus()
117 if (!of_property_read_u32(node, "bus-width", &v)) v4l2_of_parse_parallel_bus()
120 if (!of_property_read_u32(node, "data-shift", &v)) v4l2_of_parse_parallel_bus()
123 if (!of_property_read_u32(node, "sync-on-green-active", &v)) v4l2_of_parse_parallel_bus()
132 * v4l2_of_parse_endpoint() - parse all endpoint node properties
133 * @node: pointer to endpoint device_node
143 * The caller should hold a reference to @node.
151 int v4l2_of_parse_endpoint(const struct device_node *node, v4l2_of_parse_endpoint() argument
156 of_graph_parse_endpoint(node, &endpoint->base); v4l2_of_parse_endpoint()
161 rval = v4l2_of_parse_csi_bus(node, endpoint); v4l2_of_parse_endpoint()
169 v4l2_of_parse_parallel_bus(node, endpoint); v4l2_of_parse_endpoint()
194 * v4l2_of_alloc_parse_endpoint() - parse all endpoint node properties
195 * @node: pointer to endpoint device_node
204 * The caller should hold a reference to @node.
218 const struct device_node *node) v4l2_of_alloc_parse_endpoint()
228 rval = v4l2_of_parse_endpoint(node, endpoint); v4l2_of_alloc_parse_endpoint()
232 if (of_get_property(node, "link-frequencies", &len)) { v4l2_of_alloc_parse_endpoint()
243 node, "link-frequencies", endpoint->link_frequencies, v4l2_of_alloc_parse_endpoint()
259 * @node: pointer to the endpoint at the local end of the link
264 * remote port's parent nodes respectively (the port parent node being the
265 * parent node of the port node if that node isn't a 'ports' node, or the
266 * grand-parent node of the port node otherwise).
273 int v4l2_of_parse_link(const struct device_node *node, v4l2_of_parse_link() argument
280 np = of_get_parent(node); v4l2_of_parse_link()
287 np = of_parse_phandle(node, "remote-endpoint", 0); v4l2_of_parse_link()
217 v4l2_of_alloc_parse_endpoint( const struct device_node *node) v4l2_of_alloc_parse_endpoint() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
H A Dmm.c26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ macro
32 struct nvkm_mm_node *node; nvkm_mm_dump() local
35 printk(KERN_ERR "nvkm: node list:\n"); nvkm_mm_dump()
36 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_dump()
38 node->offset, node->length, node->type); nvkm_mm_dump()
41 list_for_each_entry(node, &mm->free, fl_entry) { nvkm_mm_dump()
43 node->offset, node->length, node->type); nvkm_mm_dump()
53 struct nvkm_mm_node *prev = node(this, prev); nvkm_mm_free()
54 struct nvkm_mm_node *next = node(this, next); nvkm_mm_free()
129 prev = node(this, prev); nvkm_mm_head()
133 next = node(this, next); nvkm_mm_head()
202 prev = node(this, prev); nvkm_mm_tail()
206 next = node(this, next); nvkm_mm_tail()
240 struct nvkm_mm_node *node, *prev; nvkm_mm_init() local
244 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); nvkm_mm_init()
248 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) nvkm_mm_init()
250 node->type = NVKM_MM_TYPE_HOLE; nvkm_mm_init()
251 node->offset = next; nvkm_mm_init()
252 node->length = offset - next; nvkm_mm_init()
253 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init()
263 node = kzalloc(sizeof(*node), GFP_KERNEL); nvkm_mm_init()
264 if (!node) nvkm_mm_init()
268 node->offset = roundup(offset, mm->block_size); nvkm_mm_init()
269 node->length = rounddown(offset + length, mm->block_size); nvkm_mm_init()
270 node->length -= node->offset; nvkm_mm_init()
273 list_add_tail(&node->nl_entry, &mm->nodes); nvkm_mm_init()
274 list_add_tail(&node->fl_entry, &mm->free); nvkm_mm_init()
275 node->heap = ++mm->heap_nodes; nvkm_mm_init()
282 struct nvkm_mm_node *node, *temp; nvkm_mm_fini() local
288 list_for_each_entry(node, &mm->nodes, nl_entry) { nvkm_mm_fini()
289 if (node->type != NVKM_MM_TYPE_HOLE) { nvkm_mm_fini()
297 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) { nvkm_mm_fini()
298 list_del(&node->nl_entry); nvkm_mm_fini()
299 kfree(node); nvkm_mm_fini()
/linux-4.4.14/arch/mips/loongson64/loongson-3/
H A Dnuma.c124 static void __init szmem(unsigned int node) szmem() argument
133 if (node_id != node) szmem()
153 PFN_PHYS(end_pfn - start_pfn), node); szmem()
167 PFN_PHYS(end_pfn - start_pfn), node); szmem()
181 static void __init node_mem_init(unsigned int node) node_mem_init() argument
187 node_addrspace_offset = nid_to_addroffset(node); node_mem_init()
189 node, node_addrspace_offset); node_mem_init()
191 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); node_mem_init()
193 if (node == 0) node_mem_init()
196 node, start_pfn, end_pfn, freepfn); node_mem_init()
198 __node_data[node] = prealloc__node_data + node; node_mem_init()
200 NODE_DATA(node)->bdata = &bootmem_node_data[node]; node_mem_init()
201 NODE_DATA(node)->node_start_pfn = start_pfn; node_mem_init()
202 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; node_mem_init()
204 bootmap_size = init_bootmem_node(NODE_DATA(node), freepfn, node_mem_init()
206 free_bootmem_with_active_regions(node, end_pfn); node_mem_init()
207 if (node == 0) /* used by finalize_initrd() */ node_mem_init()
211 reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT, node_mem_init()
215 if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { node_mem_init()
217 reserve_bootmem_node(NODE_DATA(node), node_mem_init()
222 sparse_memory_present_with_active_regions(node); node_mem_init()
227 unsigned int node, cpu, active_cpu = 0; prom_meminit() local
232 for (node = 0; node < loongson_sysconf.nr_nodes; node++) { prom_meminit()
233 if (node_online(node)) { prom_meminit()
234 szmem(node); prom_meminit()
235 node_mem_init(node); prom_meminit()
236 cpumask_clear(&__node_data[(node)]->cpumask); prom_meminit()
240 node = cpu / loongson_sysconf.cores_per_node; prom_meminit()
241 if (node >= num_online_nodes()) prom_meminit()
242 node = 0; prom_meminit()
247 cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask); prom_meminit()
248 pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); prom_meminit()
256 unsigned node; paging_init() local
261 for_each_online_node(node) { for_each_online_node()
264 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); for_each_online_node()
280 setup_zero_pages(); /* This comes from node 0 */ mem_init()
/linux-4.4.14/arch/tile/include/asm/
H A Dtopology.h22 /* Mappings between logical cpu number and node number. */
26 /* Returns the number of the node containing CPU 'cpu'. */ cpu_to_node()
33 * Returns the number of the node containing Node 'node'.
36 #define parent_node(node) (node)
38 /* Returns a bitmask of CPUs on Node 'node'. */ cpumask_of_node()
39 static inline const struct cpumask *cpumask_of_node(int node) cpumask_of_node() argument
41 return &node_2_cpu_mask[node]; cpumask_of_node()
44 /* For now, use numa node -1 for global allocation. */
/linux-4.4.14/arch/x86/platform/olpc/
H A Dolpc_dt.c27 static phandle __init olpc_dt_getsibling(phandle node) olpc_dt_getsibling() argument
29 const void *args[] = { (void *)node }; olpc_dt_getsibling()
30 void *res[] = { &node }; olpc_dt_getsibling()
32 if ((s32)node == -1) olpc_dt_getsibling()
35 if (olpc_ofw("peer", args, res) || (s32)node == -1) olpc_dt_getsibling()
38 return node; olpc_dt_getsibling()
41 static phandle __init olpc_dt_getchild(phandle node) olpc_dt_getchild() argument
43 const void *args[] = { (void *)node }; olpc_dt_getchild()
44 void *res[] = { &node }; olpc_dt_getchild()
46 if ((s32)node == -1) olpc_dt_getchild()
49 if (olpc_ofw("child", args, res) || (s32)node == -1) { olpc_dt_getchild()
54 return node; olpc_dt_getchild()
57 static int __init olpc_dt_getproplen(phandle node, const char *prop) olpc_dt_getproplen() argument
59 const void *args[] = { (void *)node, prop }; olpc_dt_getproplen()
63 if ((s32)node == -1) olpc_dt_getproplen()
74 static int __init olpc_dt_getproperty(phandle node, const char *prop, olpc_dt_getproperty() argument
79 plen = olpc_dt_getproplen(node, prop); olpc_dt_getproperty()
83 const void *args[] = { (void *)node, prop, buf, (void *)plen }; olpc_dt_getproperty()
95 static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf) olpc_dt_nextprop() argument
97 const void *args[] = { (void *)node, prev, buf }; olpc_dt_nextprop()
103 if ((s32)node == -1) olpc_dt_nextprop()
112 static int __init olpc_dt_pkg2path(phandle node, char *buf, olpc_dt_pkg2path() argument
115 const void *args[] = { (void *)node, buf, (void *)buflen }; olpc_dt_pkg2path()
118 if ((s32)node == -1) olpc_dt_pkg2path()
170 phandle node; olpc_dt_finddevice() local
172 void *res[] = { &node }; olpc_dt_finddevice()
179 if ((s32) node == -1) olpc_dt_finddevice()
182 return node; olpc_dt_finddevice()
205 phandle node; olpc_dt_get_board_revision() local
209 node = olpc_dt_finddevice("/"); olpc_dt_get_board_revision()
210 if (!node) olpc_dt_get_board_revision()
213 r = olpc_dt_getproperty(node, "board-revision-int", olpc_dt_get_board_revision()
225 phandle node; olpc_dt_fixup() local
228 node = olpc_dt_finddevice("/battery@0"); olpc_dt_fixup()
229 if (!node) olpc_dt_fixup()
233 * If the battery node has a compatible property, we are running a new olpc_dt_fixup()
236 r = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf)); olpc_dt_fixup()
242 /* Add olpc,xo1-battery compatible marker to battery node */ olpc_dt_fixup()
280 pr_err("PROM: unable to get root node from OFW!\n"); olpc_dt_build_devicetree()
289 /* A list of DT node/bus matches that we want to expose as platform devices */
/linux-4.4.14/arch/mips/netlogic/common/
H A Dirq.c81 struct nlm_soc_info *node; member in struct:nlm_pic_irq
93 spin_lock_irqsave(&pd->node->piclock, flags); xlp_pic_enable()
94 nlm_pic_enable_irt(pd->node->picbase, pd->irt); xlp_pic_enable()
95 spin_unlock_irqrestore(&pd->node->piclock, flags); xlp_pic_enable()
104 spin_lock_irqsave(&pd->node->piclock, flags); xlp_pic_disable()
105 nlm_pic_disable_irt(pd->node->picbase, pd->irt); xlp_pic_disable()
106 spin_unlock_irqrestore(&pd->node->piclock, flags); xlp_pic_disable()
130 nlm_pic_ack(pd->node->picbase, pd->irt); xlp_pic_unmask()
184 void nlm_setup_pic_irq(int node, int picirq, int irq, int irt) nlm_setup_pic_irq() argument
189 xirq = nlm_irq_to_xirq(node, irq); nlm_setup_pic_irq()
194 pic_data->node = nlm_get_node(node); nlm_setup_pic_irq()
199 void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *)) nlm_set_pic_extra_ack() argument
204 xirq = nlm_irq_to_xirq(node, irq); nlm_set_pic_extra_ack()
211 static void nlm_init_node_irqs(int node) nlm_init_node_irqs() argument
216 pr_info("Init IRQ for node %d\n", node); nlm_init_node_irqs()
217 nodep = nlm_get_node(node); nlm_init_node_irqs()
228 node * nlm_threads_per_node(), 0); nlm_init_node_irqs()
229 nlm_setup_pic_irq(node, i, i, irt); nlm_init_node_irqs()
235 int cpu, node; nlm_smp_irq_init() local
238 node = hwtid / nlm_threads_per_node(); nlm_smp_irq_init()
240 if (cpu == 0 && node != 0) nlm_smp_irq_init()
241 nlm_init_node_irqs(node); nlm_smp_irq_init()
242 write_c0_eimr(nlm_get_node(node)->irqmask); nlm_smp_irq_init()
248 int i, node; plat_irq_dispatch() local
250 node = nlm_nodeid(); plat_irq_dispatch()
265 nlm_dispatch_msi(node, i); plat_irq_dispatch()
269 nlm_dispatch_msix(node, i); plat_irq_dispatch()
275 do_IRQ(nlm_irq_to_xirq(node, i)); plat_irq_dispatch()
283 static int __init xlp_of_pic_init(struct device_node *node, xlp_of_pic_init() argument
292 ret = of_address_to_resource(node, 0, &res); xlp_of_pic_init()
294 pr_err("PIC %s: reg property not found!\n", node->name); xlp_of_pic_init()
308 node->name, bus); xlp_of_pic_init()
314 pr_err("PIC %s: node %d does not exist!\n", xlp_of_pic_init()
315 node->name, socid); xlp_of_pic_init()
321 pr_err("PIC %s: node %d does not exist!\n", node->name, socid); xlp_of_pic_init()
325 xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs, xlp_of_pic_init()
329 pr_err("PIC %s: Creating legacy domain failed!\n", node->name); xlp_of_pic_init()
/linux-4.4.14/drivers/media/platform/sti/bdisp/
H A Dbdisp-hw.c13 /* Max width of the source frame in a single node */
121 * Free node memory
128 if (ctx && ctx->node[0]) { bdisp_hw_free_nodes()
134 ctx->node[0], ctx->node_paddr[0], &attrs); bdisp_hw_free_nodes()
167 ctx->node[i] = base; bdisp_hw_alloc_nodes()
169 dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i], bdisp_hw_alloc_nodes()
453 * @node: node to be set
454 * @t_plan: whether the node refers to a RGB/Y or a CbCr plane
457 * Build a node
464 struct bdisp_node *node, bdisp_hw_build_node()
479 memset(node, 0, sizeof(*node)); bdisp_hw_build_node()
494 node->nip = 0; bdisp_hw_build_node()
495 node->cic = BLT_CIC_ALL_GRP; bdisp_hw_build_node()
496 node->ack = BLT_ACK_BYPASS_S2S3; bdisp_hw_build_node()
501 node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF; bdisp_hw_build_node()
507 node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM; bdisp_hw_build_node()
509 node->ins |= BLT_INS_S2_CF; bdisp_hw_build_node()
511 node->ins |= BLT_INS_S2_MEM; bdisp_hw_build_node()
518 node->ins = BLT_INS_S3_MEM; bdisp_hw_build_node()
520 node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF; bdisp_hw_build_node()
522 node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM; bdisp_hw_build_node()
527 node->ins |= cfg->cconv ? BLT_INS_IVMX : 0; bdisp_hw_build_node()
529 node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ? bdisp_hw_build_node()
533 node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0]; bdisp_hw_build_node()
535 node->tty = dst->bytesperline; bdisp_hw_build_node()
536 node->tty |= bdisp_hw_color_format(dst_fmt); bdisp_hw_build_node()
537 node->tty |= BLT_TTY_DITHER; bdisp_hw_build_node()
538 node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0; bdisp_hw_build_node()
539 node->tty |= cfg->hflip ? BLT_TTY_HSO : 0; bdisp_hw_build_node()
540 node->tty |= cfg->vflip ? BLT_TTY_VSO : 0; bdisp_hw_build_node()
552 node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top; bdisp_hw_build_node()
553 node->txy <<= 16; bdisp_hw_build_node()
554 node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) : bdisp_hw_build_node()
557 node->tsz = dst_rect.height << 16 | dst_rect.width; bdisp_hw_build_node()
567 node->s2ba = src->paddr[0]; bdisp_hw_build_node()
569 node->s2ty = src->bytesperline; bdisp_hw_build_node()
571 node->s2ty *= 2; bdisp_hw_build_node()
573 node->s2ty |= bdisp_hw_color_format(src_fmt); bdisp_hw_build_node()
575 node->s2xy = src_rect.top << 16 | src_rect.left; bdisp_hw_build_node()
576 node->s2sz = src_rect.height << 16 | src_rect.width; bdisp_hw_build_node()
587 node->s2ba = src->paddr[1]; bdisp_hw_build_node()
589 node->s2ty = src->bytesperline; bdisp_hw_build_node()
591 node->s2ty /= 2; bdisp_hw_build_node()
593 node->s2ty *= 2; bdisp_hw_build_node()
595 node->s2ty |= bdisp_hw_color_format(src_fmt); bdisp_hw_build_node()
597 node->s2xy = src_rect.top << 16 | src_rect.left; bdisp_hw_build_node()
598 node->s2sz = src_rect.height << 16 | src_rect.width; bdisp_hw_build_node()
602 node->s1ba = src->paddr[2]; bdisp_hw_build_node()
604 node->s1ty = node->s2ty; bdisp_hw_build_node()
605 node->s1xy = node->s2xy; bdisp_hw_build_node()
609 node->s3ba = src->paddr[0]; bdisp_hw_build_node()
611 node->s3ty = src->bytesperline; bdisp_hw_build_node()
613 node->s3ty *= 2; bdisp_hw_build_node()
614 node->s3ty |= bdisp_hw_color_format(src_fmt); bdisp_hw_build_node()
618 node->s3xy = node->s2xy * 2; bdisp_hw_build_node()
619 node->s3sz = node->s2sz * 2; bdisp_hw_build_node()
622 node->s3ty |= BLT_S3TY_BLANK_ACC; bdisp_hw_build_node()
623 node->s3xy = node->s2xy; bdisp_hw_build_node()
624 node->s3sz = node->s2sz; bdisp_hw_build_node()
629 if (node->ins & BLT_INS_SCALE) { bdisp_hw_build_node()
635 node->fctl = BLT_FCTL_HV_SCALE; bdisp_hw_build_node()
637 node->fctl |= BLT_FCTL_Y_HV_SCALE; bdisp_hw_build_node()
639 node->fctl = BLT_FCTL_HV_SAMPLE; bdisp_hw_build_node()
641 node->fctl |= BLT_FCTL_Y_HV_SAMPLE; bdisp_hw_build_node()
656 node->rsf = v_inc << 16 | h_inc; bdisp_hw_build_node()
659 node->rzi = BLT_RZI_DEFAULT; bdisp_hw_build_node()
662 node->hfp = bdisp_hw_get_hf_addr(h_inc); bdisp_hw_build_node()
663 node->vfp = bdisp_hw_get_vf_addr(v_inc); bdisp_hw_build_node()
670 node->y_rsf = yv_inc << 16 | yh_inc; bdisp_hw_build_node()
671 node->y_rzi = BLT_RZI_DEFAULT; bdisp_hw_build_node()
672 node->y_hfp = bdisp_hw_get_hf_addr(yh_inc); bdisp_hw_build_node()
673 node->y_vfp = bdisp_hw_get_vf_addr(yv_inc); bdisp_hw_build_node()
681 node->ivmx0 = ivmx[0]; bdisp_hw_build_node()
682 node->ivmx1 = ivmx[1]; bdisp_hw_build_node()
683 node->ivmx2 = ivmx[2]; bdisp_hw_build_node()
684 node->ivmx3 = ivmx[3]; bdisp_hw_build_node()
704 if (!ctx->node[i]) { bdisp_hw_build_all_nodes()
705 dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i); bdisp_hw_build_all_nodes()
715 /* Build RGB/Y node and link it to the previous node */ bdisp_hw_build_all_nodes()
716 bdisp_hw_build_node(ctx, &cfg, ctx->node[nid], bdisp_hw_build_all_nodes()
720 ctx->node[nid - 1]->nip = ctx->node_paddr[nid]; bdisp_hw_build_all_nodes()
723 /* Build additional Cb(Cr) node, link it to the previous one */ bdisp_hw_build_all_nodes()
725 bdisp_hw_build_node(ctx, &cfg, ctx->node[nid], bdisp_hw_build_all_nodes()
727 ctx->node[nid - 1]->nip = ctx->node_paddr[nid]; bdisp_hw_build_all_nodes()
737 /* Mark last node as the last */ bdisp_hw_build_all_nodes()
738 ctx->node[nid - 1]->nip = 0; bdisp_hw_build_all_nodes()
756 struct bdisp_node **node = ctx->node; bdisp_hw_save_request() local
776 *copy_node[i] = *node[i]; bdisp_hw_save_request()
812 /* Write first node addr */ bdisp_hw_update()
815 /* Find and write last node addr : this starts the HW processing */ bdisp_hw_update()
817 if (!ctx->node[node_id]->nip) bdisp_hw_update()
462 bdisp_hw_build_node(struct bdisp_ctx *ctx, struct bdisp_op_cfg *cfg, struct bdisp_node *node, enum bdisp_target_plan t_plan, int src_x_offset) bdisp_hw_build_node() argument
H A Dbdisp-debug.c322 struct bdisp_node *node; bdisp_dbg_last_nodes() local
326 seq_puts(s, "No node built yet\n"); bdisp_dbg_last_nodes()
331 node = bdisp->dbg.copy_node[i]; bdisp_dbg_last_nodes()
332 if (!node) bdisp_dbg_last_nodes()
336 seq_printf(s, "NIP\t0x%08X\n", node->nip); bdisp_dbg_last_nodes()
337 seq_printf(s, "CIC\t0x%08X\n", node->cic); bdisp_dbg_last_nodes()
338 bdisp_dbg_dump_ins(s, node->ins); bdisp_dbg_last_nodes()
339 seq_printf(s, "ACK\t0x%08X\n", node->ack); bdisp_dbg_last_nodes()
341 seq_printf(s, "TBA\t0x%08X\n", node->tba); bdisp_dbg_last_nodes()
342 bdisp_dbg_dump_tty(s, node->tty); bdisp_dbg_last_nodes()
343 bdisp_dbg_dump_xy(s, node->txy, "TXY"); bdisp_dbg_last_nodes()
344 bdisp_dbg_dump_sz(s, node->tsz, "TSZ"); bdisp_dbg_last_nodes()
347 seq_printf(s, "S1BA\t0x%08X\n", node->s1ba); bdisp_dbg_last_nodes()
348 bdisp_dbg_dump_sty(s, node->s1ty, node->s1ba, "S1TY"); bdisp_dbg_last_nodes()
349 bdisp_dbg_dump_xy(s, node->s1xy, "S1XY"); bdisp_dbg_last_nodes()
351 seq_printf(s, "S2BA\t0x%08X\n", node->s2ba); bdisp_dbg_last_nodes()
352 bdisp_dbg_dump_sty(s, node->s2ty, node->s2ba, "S2TY"); bdisp_dbg_last_nodes()
353 bdisp_dbg_dump_xy(s, node->s2xy, "S2XY"); bdisp_dbg_last_nodes()
354 bdisp_dbg_dump_sz(s, node->s2sz, "S2SZ"); bdisp_dbg_last_nodes()
356 seq_printf(s, "S3BA\t0x%08X\n", node->s3ba); bdisp_dbg_last_nodes()
357 bdisp_dbg_dump_sty(s, node->s3ty, node->s3ba, "S3TY"); bdisp_dbg_last_nodes()
358 bdisp_dbg_dump_xy(s, node->s3xy, "S3XY"); bdisp_dbg_last_nodes()
359 bdisp_dbg_dump_sz(s, node->s3sz, "S3SZ"); bdisp_dbg_last_nodes()
363 bdisp_dbg_dump_fctl(s, node->fctl); bdisp_dbg_last_nodes()
366 bdisp_dbg_dump_rsf(s, node->rsf, "RSF"); bdisp_dbg_last_nodes()
367 bdisp_dbg_dump_rzi(s, node->rzi, "RZI"); bdisp_dbg_last_nodes()
368 seq_printf(s, "HFP\t0x%08X\n", node->hfp); bdisp_dbg_last_nodes()
369 seq_printf(s, "VFP\t0x%08X\n", node->vfp); bdisp_dbg_last_nodes()
371 bdisp_dbg_dump_rsf(s, node->y_rsf, "Y_RSF"); bdisp_dbg_last_nodes()
372 bdisp_dbg_dump_rzi(s, node->y_rzi, "Y_RZI"); bdisp_dbg_last_nodes()
373 seq_printf(s, "Y_HFP\t0x%08X\n", node->y_hfp); bdisp_dbg_last_nodes()
374 seq_printf(s, "Y_VFP\t0x%08X\n", node->y_vfp); bdisp_dbg_last_nodes()
380 bdisp_dbg_dump_ivmx(s, node->ivmx0, node->ivmx1, bdisp_dbg_last_nodes()
381 node->ivmx2, node->ivmx3); bdisp_dbg_last_nodes()
386 } while ((++i < MAX_NB_NODE) && node->nip); bdisp_dbg_last_nodes()
394 struct bdisp_node *node; bdisp_dbg_last_nodes_raw() local
399 seq_puts(s, "No node built yet\n"); bdisp_dbg_last_nodes_raw()
404 node = bdisp->dbg.copy_node[i]; bdisp_dbg_last_nodes_raw()
405 if (!node) bdisp_dbg_last_nodes_raw()
409 val = (u32 *)node; bdisp_dbg_last_nodes_raw()
412 } while ((++i < MAX_NB_NODE) && node->nip); bdisp_dbg_last_nodes_raw()
/linux-4.4.14/drivers/of/
H A Dpdt.c90 static struct property * __init of_pdt_build_one_prop(phandle node, char *prev, of_pdt_build_one_prop() argument
115 err = of_pdt_prom_ops->nextprop(node, prev, p->name); of_pdt_build_one_prop()
120 p->length = of_pdt_prom_ops->getproplen(node, p->name); of_pdt_build_one_prop()
127 len = of_pdt_prom_ops->getproperty(node, p->name, of_pdt_build_one_prop()
137 static struct property * __init of_pdt_build_prop_list(phandle node) of_pdt_build_prop_list() argument
141 head = tail = of_pdt_build_one_prop(node, NULL, of_pdt_build_prop_list()
142 ".node", &node, sizeof(node)); of_pdt_build_prop_list()
144 tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0); of_pdt_build_prop_list()
147 tail->next = of_pdt_build_one_prop(node, tail->name, of_pdt_build_prop_list()
155 static char * __init of_pdt_get_one_property(phandle node, const char *name) of_pdt_get_one_property() argument
160 len = of_pdt_prom_ops->getproplen(node, name); of_pdt_get_one_property()
163 len = of_pdt_prom_ops->getproperty(node, name, buf, len); of_pdt_get_one_property()
169 static struct device_node * __init of_pdt_create_node(phandle node, of_pdt_create_node() argument
174 if (!node) of_pdt_create_node()
182 dp->name = of_pdt_get_one_property(node, "name"); of_pdt_create_node()
183 dp->type = of_pdt_get_one_property(node, "device_type"); of_pdt_create_node()
184 dp->phandle = node; of_pdt_create_node()
186 dp->properties = of_pdt_build_prop_list(node); of_pdt_create_node()
194 phandle node) of_pdt_build_tree()
200 dp = of_pdt_create_node(node, parent); of_pdt_build_tree()
213 dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); of_pdt_build_tree()
218 node = of_pdt_prom_ops->getsibling(node); of_pdt_build_tree()
193 of_pdt_build_tree(struct device_node *parent, phandle node) of_pdt_build_tree() argument
H A Dresolver.c26 * Find a node with the give full name by recursively following any of
27 * the child node links.
29 static struct device_node *__of_find_node_by_full_name(struct device_node *node, __of_find_node_by_full_name() argument
34 if (node == NULL) __of_find_node_by_full_name()
38 if (of_node_cmp(node->full_name, full_name) == 0) __of_find_node_by_full_name()
39 return node; __of_find_node_by_full_name()
41 for_each_child_of_node(node, child) { for_each_child_of_node()
55 struct device_node *node; of_get_tree_max_phandle() local
62 for_each_of_allnodes(node) { for_each_of_allnodes()
63 if (node->phandle != OF_PHANDLE_ILLEGAL && for_each_of_allnodes()
64 node->phandle > phandle) for_each_of_allnodes()
65 phandle = node->phandle; for_each_of_allnodes()
74 * Makes sure not to just adjust the device node's phandle value,
77 static void __of_adjust_tree_phandles(struct device_node *node, __of_adjust_tree_phandles() argument
84 /* first adjust the node's phandle direct value */ __of_adjust_tree_phandles()
85 if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL) __of_adjust_tree_phandles()
86 node->phandle += phandle_delta; __of_adjust_tree_phandles()
89 for_each_property_of_node(node, prop) { for_each_property_of_node()
106 *(uint32_t *)prop->value = cpu_to_be32(node->phandle); for_each_property_of_node()
110 for_each_child_of_node(node, child)
114 static int __of_adjust_phandle_ref(struct device_node *node, __of_adjust_phandle_ref() argument
164 /* look into the resolve node for the full path */ __of_adjust_phandle_ref()
165 refnode = __of_find_node_by_full_name(node, nodestr); __of_adjust_phandle_ref()
206 * Assumes the existances of a __local_fixups__ node at the root.
211 static int __of_adjust_tree_phandle_references(struct device_node *node, __of_adjust_tree_phandle_references() argument
220 if (node == NULL) __of_adjust_tree_phandle_references()
223 for_each_property_of_node(node, rprop) { for_each_property_of_node()
233 __func__, rprop->name, node->full_name); for_each_property_of_node()
246 __func__, rprop->name, node->full_name);
257 node->full_name);
270 for_each_child_of_node(node, child) { for_each_child_of_node()
278 __func__, child->name, node->full_name); for_each_child_of_node()
292 * of_resolve - Resolve the given node against the live tree.
297 * to the given node to resolve. This depends on the live tree
298 * having a __symbols__ node, and the resolve node the __fixups__ &
300 * The result of the operation is a resolve node that it's contents
313 /* the resolve node must exist, and be detached */ of_resolve_phandles()
389 pr_err("%s: Could not find node by path '%s'\n", for_each_property_of_node()
H A Dof_pci.c10 static inline int __of_pci_pci_compare(struct device_node *node, __of_pci_pci_compare() argument
15 devfn = of_pci_get_devfn(node); __of_pci_pci_compare()
25 struct device_node *node, *node2; of_pci_find_child_device() local
27 for_each_child_of_node(parent, node) { for_each_child_of_node()
28 if (__of_pci_pci_compare(node, devfn)) for_each_child_of_node()
29 return node; for_each_child_of_node()
31 * Some OFs create a parent node "multifunc-device" as for_each_child_of_node()
35 if (!strcmp(node->name, "multifunc-device")) { for_each_child_of_node()
36 for_each_child_of_node(node, node2) { for_each_child_of_node()
38 of_node_put(node); for_each_child_of_node()
49 * of_pci_get_devfn() - Get device and function numbers for a device node
50 * @np: device node
73 * @node: device node
78 int of_pci_parse_bus_range(struct device_node *node, struct resource *res) of_pci_parse_bus_range() argument
83 values = of_get_property(node, "bus-range", &len); of_pci_parse_bus_range()
87 res->name = node->name; of_pci_parse_bus_range()
98 * finding a property called "linux,pci-domain" of the given device node.
100 * @node: device tree node with the domain information
105 int of_get_pci_domain_nr(struct device_node *node) of_get_pci_domain_nr() argument
111 value = of_get_property(node, "linux,pci-domain", &len); of_get_pci_domain_nr()
149 * @dev: device node of the host bridge having the range property
160 * node and setup the resource mapping based on its content. It is expected
H A Dof_reserved_mem.c70 * res_mem_save_node() - save fdt node for second pass initialization
72 void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, fdt_reserved_mem_save_node() argument
82 rmem->fdt_node = node; fdt_reserved_mem_save_node()
95 static int __init __reserved_mem_alloc_size(unsigned long node, __reserved_mem_alloc_size() argument
106 prop = of_get_flat_dt_prop(node, "size", &len); __reserved_mem_alloc_size()
111 pr_err("Reserved memory: invalid size property in '%s' node.\n", __reserved_mem_alloc_size()
117 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; __reserved_mem_alloc_size()
119 prop = of_get_flat_dt_prop(node, "alignment", &len); __reserved_mem_alloc_size()
122 pr_err("Reserved memory: invalid alignment property in '%s' node.\n", __reserved_mem_alloc_size()
130 if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) __reserved_mem_alloc_size()
133 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); __reserved_mem_alloc_size()
137 pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n", __reserved_mem_alloc_size()
152 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", __reserved_mem_alloc_size()
164 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", __reserved_mem_alloc_size()
169 pr_info("Reserved memory: failed to allocate memory for node '%s'\n", __reserved_mem_alloc_size()
199 pr_info("Reserved memory: initialized node %s, compatible id %s\n", __reserved_mem_init_node()
260 unsigned long node = rmem->fdt_node; fdt_init_reserved_mem() local
265 prop = of_get_flat_dt_prop(node, "phandle", &len); fdt_init_reserved_mem()
267 prop = of_get_flat_dt_prop(node, "linux,phandle", &len); fdt_init_reserved_mem()
272 err = __reserved_mem_alloc_size(node, rmem->name, fdt_init_reserved_mem()
279 static inline struct reserved_mem *__find_rmem(struct device_node *node) __find_rmem() argument
283 if (!node->phandle) __find_rmem()
287 if (reserved_mem[i].phandle == node->phandle) __find_rmem()
316 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); of_reserved_mem_device_init()
H A Ddynamic.c18 * of_node_get() - Increment refcount of a node
19 * @node: Node to inc refcount, NULL is supported to simplify writing of
22 * Returns node.
24 struct device_node *of_node_get(struct device_node *node) of_node_get() argument
26 if (node) of_node_get()
27 kobject_get(&node->kobj); of_node_get()
28 return node; of_node_get()
33 * of_node_put() - Decrement refcount of a node
34 * @node: Node to dec refcount, NULL is supported to simplify writing of
37 void of_node_put(struct device_node *node) of_node_put() argument
39 if (node) of_node_put()
40 kobject_put(&node->kobj); of_node_put()
207 /* only call notifiers if the node is attached */ of_property_notify()
239 * of_attach_node() - Plug a device node into the tree and global list.
288 * of_detach_node() - "Unplug" a node from the device tree.
290 * The caller must hold a reference to the node. The memory associated with
291 * the node is not freed until its refcount goes to zero.
316 * of_node_release() - release a dynamically allocated node
317 * @kref: kref element of the node to be released
323 struct device_node *node = kobj_to_device_node(kobj); of_node_release() local
324 struct property *prop = node->properties; of_node_release()
327 if (!of_node_check_flag(node, OF_DETACHED)) { of_node_release()
328 pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); of_node_release()
333 if (!of_node_check_flag(node, OF_DYNAMIC)) of_node_release()
344 prop = node->deadprops; of_node_release()
345 node->deadprops = NULL; of_node_release()
348 kfree(node->full_name); of_node_release()
349 kfree(node->data); of_node_release()
350 kfree(node); of_node_release()
397 * __of_node_dup() - Duplicate or create an empty device node dynamically.
398 * @fmt: Format string (plus vargs) for new full name of the device node
400 * Create an device tree node, either by duplicating an empty node or by allocating
401 * an empty one suitable for further modification. The node data are
402 * dynamically allocated and all the node flags have the OF_DYNAMIC &
403 * OF_DETACHED bits set. Returns the newly allocated node or NULL on out of
409 struct device_node *node; __of_node_dup() local
411 node = kzalloc(sizeof(*node), GFP_KERNEL); __of_node_dup()
412 if (!node) __of_node_dup()
415 node->full_name = kvasprintf(GFP_KERNEL, fmt, vargs); __of_node_dup()
417 if (!node->full_name) { __of_node_dup()
418 kfree(node); __of_node_dup()
422 of_node_set_flag(node, OF_DYNAMIC); __of_node_dup()
423 of_node_set_flag(node, OF_DETACHED); __of_node_dup()
424 of_node_init(node); __of_node_dup()
433 if (__of_add_property(node, new_pp)) { for_each_property_of_node()
441 return node;
444 of_node_put(node); /* Frees the node and properties */
451 list_del(&ce->node); __of_changeset_entry_destroy()
662 list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node) of_changeset_destroy()
685 list_for_each_entry(ce, &ocs->entries, node) { of_changeset_apply()
689 list_for_each_entry_continue_reverse(ce, &ocs->entries, node) of_changeset_apply()
698 list_for_each_entry(ce, &ocs->entries, node) of_changeset_apply()
723 list_for_each_entry_reverse(ce, &ocs->entries, node) { of_changeset_revert()
727 list_for_each_entry_continue(ce, &ocs->entries, node) of_changeset_revert()
736 list_for_each_entry_reverse(ce, &ocs->entries, node) of_changeset_revert()
749 * @np: Pointer to device node
770 /* get a reference to the node */ of_changeset_action()
779 list_add_tail(&ce->node, &ocs->entries); of_changeset_action()
/linux-4.4.14/arch/x86/mm/
H A Dpat_rbtree.c39 static int is_node_overlap(struct memtype *node, u64 start, u64 end) is_node_overlap() argument
41 if (node->start >= end || node->end <= start) is_node_overlap()
47 static u64 get_subtree_max_end(struct rb_node *node) get_subtree_max_end() argument
50 if (node) { get_subtree_max_end()
51 struct memtype *data = container_of(node, struct memtype, rb); get_subtree_max_end()
79 struct rb_node *node = root->rb_node; memtype_rb_lowest_match() local
82 while (node) { memtype_rb_lowest_match()
83 struct memtype *data = container_of(node, struct memtype, rb); memtype_rb_lowest_match()
85 if (get_subtree_max_end(node->rb_left) > start) { memtype_rb_lowest_match()
87 node = node->rb_left; memtype_rb_lowest_match()
93 node = node->rb_right; memtype_rb_lowest_match()
108 struct rb_node *node; memtype_rb_exact_match() local
113 node = rb_next(&match->rb); memtype_rb_exact_match()
114 if (node) memtype_rb_exact_match()
115 match = container_of(node, struct memtype, rb); memtype_rb_exact_match()
128 struct rb_node *node; memtype_rb_check_conflict() local
142 node = rb_next(&match->rb); memtype_rb_check_conflict()
143 while (node) { memtype_rb_check_conflict()
144 match = container_of(node, struct memtype, rb); memtype_rb_check_conflict()
154 node = rb_next(&match->rb); memtype_rb_check_conflict()
171 struct rb_node **node = &(root->rb_node); memtype_rb_insert() local
174 while (*node) { memtype_rb_insert()
175 struct memtype *data = container_of(*node, struct memtype, rb); memtype_rb_insert()
177 parent = *node; memtype_rb_insert()
181 node = &((*node)->rb_left); memtype_rb_insert()
183 node = &((*node)->rb_right); memtype_rb_insert()
187 rb_link_node(&newdata->rb, parent, node); memtype_rb_insert()
232 struct rb_node *node; rbt_memtype_copy_nth_element() local
235 node = rb_first(&memtype_rbroot); rbt_memtype_copy_nth_element()
236 while (node && pos != i) { rbt_memtype_copy_nth_element()
237 node = rb_next(node); rbt_memtype_copy_nth_element()
241 if (node) { /* pos == i */ rbt_memtype_copy_nth_element()
242 struct memtype *this = container_of(node, struct memtype, rb); rbt_memtype_copy_nth_element()
H A Dsrat.c9 * are in one chunk. Holes between them will be included in the node.
76 int pxm, node; acpi_numa_x2apic_affinity_init() local
94 node = setup_node(pxm); acpi_numa_x2apic_affinity_init()
95 if (node < 0) { acpi_numa_x2apic_affinity_init()
102 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); acpi_numa_x2apic_affinity_init()
105 set_apicid_to_node(apic_id, node); acpi_numa_x2apic_affinity_init()
106 node_set(node, numa_nodes_parsed); acpi_numa_x2apic_affinity_init()
109 pxm, apic_id, node); acpi_numa_x2apic_affinity_init()
116 int pxm, node; acpi_numa_processor_affinity_init() local
130 node = setup_node(pxm); acpi_numa_processor_affinity_init()
131 if (node < 0) { acpi_numa_processor_affinity_init()
143 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); acpi_numa_processor_affinity_init()
147 set_apicid_to_node(apic_id, node); acpi_numa_processor_affinity_init()
148 node_set(node, numa_nodes_parsed); acpi_numa_processor_affinity_init()
151 pxm, apic_id, node); acpi_numa_processor_affinity_init()
166 int node, pxm; acpi_numa_memory_affinity_init() local
184 node = setup_node(pxm); acpi_numa_memory_affinity_init()
185 if (node < 0) { acpi_numa_memory_affinity_init()
190 if (numa_add_memblk(node, start, end) < 0) acpi_numa_memory_affinity_init()
193 node_set(node, numa_nodes_parsed); acpi_numa_memory_affinity_init()
196 node, pxm, acpi_numa_memory_affinity_init()
H A Dnuma.c57 * apicid, cpu, node mappings
76 * Map cpu index to node index
81 void numa_set_node(int cpu, int node) numa_set_node() argument
87 cpu_to_node_map[cpu] = node; numa_set_node()
98 per_cpu(x86_cpu_to_node_map, cpu) = node; numa_set_node()
100 set_cpu_numa_node(cpu, node); numa_set_node()
117 unsigned int node; setup_node_to_cpumask_map() local
124 for (node = 0; node < nr_node_ids; node++) setup_node_to_cpumask_map()
125 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); setup_node_to_cpumask_map()
140 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", numa_add_memblk_to()
174 * @nid: NUMA node ID of the new memblk
188 /* Allocate NODE_DATA for a node on the local memory */ alloc_node_data()
197 * Allocate node data. Try node-local memory and then any node. alloc_node_data()
205 pr_err("Cannot find %zu bytes in node %d\n", alloc_node_data()
217 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); alloc_node_data()
271 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", numa_cleanup_meminfo()
276 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", numa_cleanup_meminfo()
282 * Join together blocks on the same node, holes numa_cleanup_meminfo()
390 * @from: the 'from' node to set distance
391 * @to: the 'to' node to set distance
394 * Set the distance from node @from to @to to @distance. If distance table
402 * If @from or @to is higher than the highest known node or lower than zero
414 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", numa_set_distance()
531 * node the kernel resides in should be un-hotpluggable. numa_register_memblks()
533 * And when we come here, alloc node data won't fail. numa_register_memblks()
569 * Don't confuse VM with a node that doesn't have the for_each_node_mask()
578 /* Dump memblock with node info and return. */
585 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
668 * Must online at least one node and add memory blocks that cover all
675 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", dummy_numa_init()
688 * last fallback is dummy single node config encomapssing whole memory and
707 static __init int find_near_online_node(int node) find_near_online_node() argument
714 val = node_distance(node, n); for_each_online_node()
731 * emulation and faking node case (when running a kernel compiled
747 int node = numa_cpu_node(cpu); for_each_possible_cpu() local
749 if (node == NUMA_NO_NODE) for_each_possible_cpu()
751 if (!node_online(node)) for_each_possible_cpu()
752 node = find_near_online_node(node); for_each_possible_cpu()
753 numa_set_node(cpu, node); for_each_possible_cpu()
803 void debug_cpumask_set_cpu(int cpu, int node, bool enable) debug_cpumask_set_cpu() argument
807 if (node == NUMA_NO_NODE) { debug_cpumask_set_cpu()
811 mask = node_to_cpumask_map[node]; debug_cpumask_set_cpu()
813 pr_err("node_to_cpumask_map[%i] NULL\n", node); debug_cpumask_set_cpu()
823 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", debug_cpumask_set_cpu()
825 cpu, node, cpumask_pr_args(mask)); debug_cpumask_set_cpu()
847 * Returns a pointer to the bitmask of CPUs on Node 'node'.
849 const struct cpumask *cpumask_of_node(int node) cpumask_of_node() argument
851 if (node >= nr_node_ids) { cpumask_of_node()
853 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", cpumask_of_node()
854 node, nr_node_ids); cpumask_of_node()
858 if (node_to_cpumask_map[node] == NULL) { cpumask_of_node()
861 node); cpumask_of_node()
865 return node_to_cpumask_map[node]; cpumask_of_node()
/linux-4.4.14/arch/x86/kernel/
H A Dkdebugfs.c33 struct setup_data_node *node = file->private_data; setup_data_read() local
43 if (pos >= node->len) setup_data_read()
46 if (count > node->len - pos) setup_data_read()
47 count = node->len - pos; setup_data_read()
49 pa = node->paddr + sizeof(struct setup_data) + pos; setup_data_read()
79 struct setup_data_node *node) create_setup_data_node()
89 type = debugfs_create_x32("type", S_IRUGO, d, &node->type); create_setup_data_node()
93 data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data); create_setup_data_node()
108 struct setup_data_node *node; create_setup_data_nodes() local
123 node = kmalloc(sizeof(*node), GFP_KERNEL); create_setup_data_nodes()
124 if (!node) { create_setup_data_nodes()
133 kfree(node); create_setup_data_nodes()
140 node->paddr = pa_data; create_setup_data_nodes()
141 node->type = data->type; create_setup_data_nodes()
142 node->len = data->len; create_setup_data_nodes()
143 error = create_setup_data_node(d, no, node); create_setup_data_nodes()
78 create_setup_data_node(struct dentry *parent, int no, struct setup_data_node *node) create_setup_data_node() argument
/linux-4.4.14/security/selinux/
H A Dnetnode.c2 * Network node table
72 * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
76 * This is the IPv4 hashing function for the node interface table, it returns
88 * sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
92 * This is the IPv6 hashing function for the node interface table, it returns
105 * sel_netnode_find - Search for a node record
110 * Search the network node table and return the record matching @addr. If an
117 struct sel_netnode *node; sel_netnode_find() local
131 list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list) sel_netnode_find()
132 if (node->nsec.family == family) sel_netnode_find()
135 if (node->nsec.addr.ipv4 == *(__be32 *)addr) sel_netnode_find()
136 return node; sel_netnode_find()
139 if (ipv6_addr_equal(&node->nsec.addr.ipv6, sel_netnode_find()
141 return node; sel_netnode_find()
149 * sel_netnode_insert - Insert a new node into the table
150 * @node: the new node record
153 * Add a new node record to the network address hash table.
156 static void sel_netnode_insert(struct sel_netnode *node) sel_netnode_insert() argument
160 switch (node->nsec.family) { sel_netnode_insert()
162 idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4); sel_netnode_insert()
165 idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6); sel_netnode_insert()
174 list_add_rcu(&node->list, &sel_netnode_hash[idx].list); sel_netnode_insert()
191 * @sid: node SID
203 struct sel_netnode *node; sel_netnode_sid_slow() local
207 node = sel_netnode_find(addr, family); sel_netnode_sid_slow()
208 if (node != NULL) { sel_netnode_sid_slow()
209 *sid = node->nsec.sid; sel_netnode_sid_slow()
243 " unable to determine network node label\n"); sel_netnode_sid_slow()
253 * @sid: node SID
265 struct sel_netnode *node; sel_netnode_sid() local
268 node = sel_netnode_find(addr, family); sel_netnode_sid()
269 if (node != NULL) { sel_netnode_sid()
270 *sid = node->nsec.sid; sel_netnode_sid()
289 struct sel_netnode *node, *node_tmp; sel_netnode_flush() local
293 list_for_each_entry_safe(node, node_tmp, sel_netnode_flush()
295 list_del_rcu(&node->list); sel_netnode_flush()
296 kfree_rcu(node, rcu); sel_netnode_flush()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_vma_manager.c64 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
65 * open-file with the offset of the node will fail with -EACCES. To revoke
81 * against concurrent access. However, node allocation and destruction is left
82 * for the caller. While calling into the vma-manager, a given node must
115 * drm_vma_offset_lookup_locked() - Find node in offset space
120 * Find a node given a start address and object size. This returns the _best_
121 * match for the given node. That is, @start may point somewhere into a valid
122 * region and the given node will be returned, as long as the node spans the
131 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node)
133 * kref_get_unless_zero(container_of(node, sth, entr));
137 * Returns NULL if no suitable node can be found. Otherwise, the best match
138 * is returned. It's the caller's responsibility to make sure the node doesn't
145 struct drm_vma_offset_node *node, *best; drm_vma_offset_lookup_locked() local
153 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); drm_vma_offset_lookup_locked()
154 offset = node->vm_node.start; drm_vma_offset_lookup_locked()
157 best = node; drm_vma_offset_lookup_locked()
165 /* verify that the node spans the requested area */ drm_vma_offset_lookup_locked()
176 /* internal helper to link @node into the rb-tree */ _drm_vma_offset_add_rb()
178 struct drm_vma_offset_node *node) _drm_vma_offset_add_rb()
188 if (node->vm_node.start < iter_node->vm_node.start) _drm_vma_offset_add_rb()
190 else if (node->vm_node.start > iter_node->vm_node.start) _drm_vma_offset_add_rb()
196 rb_link_node(&node->vm_rb, parent, iter); _drm_vma_offset_add_rb()
197 rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb); _drm_vma_offset_add_rb()
201 * drm_vma_offset_add() - Add offset node to manager
203 * @node: Node to be added
206 * Add a node to the offset-manager. If the node was already added, this does
209 * After this call succeeds, you can access the offset of the node until it
224 struct drm_vma_offset_node *node, unsigned long pages) drm_vma_offset_add()
230 if (drm_mm_node_allocated(&node->vm_node)) { drm_vma_offset_add()
235 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, drm_vma_offset_add()
240 _drm_vma_offset_add_rb(mgr, node); drm_vma_offset_add()
249 * drm_vma_offset_remove() - Remove offset node from manager
251 * @node: Node to be removed
253 * Remove a node from the offset manager. If the node wasn't added before, this
260 struct drm_vma_offset_node *node) drm_vma_offset_remove()
264 if (drm_mm_node_allocated(&node->vm_node)) { drm_vma_offset_remove()
265 rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb); drm_vma_offset_remove()
266 drm_mm_remove_node(&node->vm_node); drm_vma_offset_remove()
267 memset(&node->vm_node, 0, sizeof(node->vm_node)); drm_vma_offset_remove()
276 * @node: Node to modify
279 * Add @filp to the list of allowed open-files for this node. If @filp is
283 * drm_vma_offset_remove() calls. You may even call it if the node is currently
287 * before destroying the node. Otherwise, you will leak memory.
294 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp) drm_vma_node_allow() argument
302 * unlikely that an open-file is added twice to a single node so we drm_vma_node_allow()
307 write_lock(&node->vm_lock); drm_vma_node_allow()
309 iter = &node->vm_files.rb_node; drm_vma_node_allow()
333 rb_insert_color(&new->vm_rb, &node->vm_files); drm_vma_node_allow()
337 write_unlock(&node->vm_lock); drm_vma_node_allow()
345 * @node: Node to modify
348 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
356 void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp) drm_vma_node_revoke() argument
361 write_lock(&node->vm_lock); drm_vma_node_revoke()
363 iter = node->vm_files.rb_node; drm_vma_node_revoke()
368 rb_erase(&entry->vm_rb, &node->vm_files); drm_vma_node_revoke()
379 write_unlock(&node->vm_lock); drm_vma_node_revoke()
385 * @node: Node to check
388 * Search the list in @node whether @filp is currently on the list of allowed
396 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, drm_vma_node_is_allowed() argument
402 read_lock(&node->vm_lock); drm_vma_node_is_allowed()
404 iter = node->vm_files.rb_node; drm_vma_node_is_allowed()
415 read_unlock(&node->vm_lock); drm_vma_node_is_allowed()
177 _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node) _drm_vma_offset_add_rb() argument
223 drm_vma_offset_add(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node, unsigned long pages) drm_vma_offset_add() argument
259 drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node) drm_vma_offset_remove() argument
H A Ddrm_mm.c76 * steep cliff not a real concern. Removing a node again is O(1).
107 struct drm_mm_node *node, drm_mm_insert_helper()
118 BUG_ON(node->allocated); drm_mm_insert_helper()
147 node->start = adj_start; drm_mm_insert_helper()
148 node->size = size; drm_mm_insert_helper()
149 node->mm = mm; drm_mm_insert_helper()
150 node->color = color; drm_mm_insert_helper()
151 node->allocated = 1; drm_mm_insert_helper()
153 INIT_LIST_HEAD(&node->hole_stack); drm_mm_insert_helper()
154 list_add(&node->node_list, &hole_node->node_list); drm_mm_insert_helper()
156 BUG_ON(node->start + node->size > adj_end); drm_mm_insert_helper()
158 node->hole_follows = 0; drm_mm_insert_helper()
159 if (__drm_mm_hole_node_start(node) < hole_end) { drm_mm_insert_helper()
160 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper()
161 node->hole_follows = 1; drm_mm_insert_helper()
166 * drm_mm_reserve_node - insert an pre-initialized node
167 * @mm: drm_mm allocator to insert @node into
168 * @node: drm_mm_node to insert
177 * 0 on success, -ENOSPC if there's no hole where @node is.
179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) drm_mm_reserve_node() argument
182 u64 end = node->start + node->size; drm_mm_reserve_node()
186 BUG_ON(node == NULL); drm_mm_reserve_node()
188 /* Find the relevant hole to add our node to */ drm_mm_for_each_hole()
190 if (hole_start > node->start || hole_end < end) drm_mm_for_each_hole()
193 node->mm = mm; drm_mm_for_each_hole()
194 node->allocated = 1; drm_mm_for_each_hole()
196 INIT_LIST_HEAD(&node->hole_stack); drm_mm_for_each_hole()
197 list_add(&node->node_list, &hole->node_list); drm_mm_for_each_hole()
199 if (node->start == hole_start) { drm_mm_for_each_hole()
204 node->hole_follows = 0; drm_mm_for_each_hole()
206 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_for_each_hole()
207 node->hole_follows = 1; drm_mm_for_each_hole()
218 * drm_mm_insert_node_generic - search for space and insert @node
220 * @node: preallocate node to insert
223 * @color: opaque tag value to use for this node
227 * The preallocated node must be cleared to 0.
232 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_generic() argument
245 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); drm_mm_insert_node_generic()
251 struct drm_mm_node *node, drm_mm_insert_helper_range()
263 BUG_ON(!hole_node->hole_follows || node->allocated); drm_mm_insert_helper_range()
294 node->start = adj_start; drm_mm_insert_helper_range()
295 node->size = size; drm_mm_insert_helper_range()
296 node->mm = mm; drm_mm_insert_helper_range()
297 node->color = color; drm_mm_insert_helper_range()
298 node->allocated = 1; drm_mm_insert_helper_range()
300 INIT_LIST_HEAD(&node->hole_stack); drm_mm_insert_helper_range()
301 list_add(&node->node_list, &hole_node->node_list); drm_mm_insert_helper_range()
303 BUG_ON(node->start < start); drm_mm_insert_helper_range()
304 BUG_ON(node->start < adj_start); drm_mm_insert_helper_range()
305 BUG_ON(node->start + node->size > adj_end); drm_mm_insert_helper_range()
306 BUG_ON(node->start + node->size > end); drm_mm_insert_helper_range()
308 node->hole_follows = 0; drm_mm_insert_helper_range()
309 if (__drm_mm_hole_node_start(node) < hole_end) { drm_mm_insert_helper_range()
310 list_add(&node->hole_stack, &mm->hole_stack); drm_mm_insert_helper_range()
311 node->hole_follows = 1; drm_mm_insert_helper_range()
316 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
318 * @node: preallocate node to insert
321 * @color: opaque tag value to use for this node
322 * @start: start of the allowed range for this node
323 * @end: end of the allowed range for this node
327 * The preallocated node must be cleared to 0.
332 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, drm_mm_insert_node_in_range_generic() argument
347 drm_mm_insert_helper_range(hole_node, node, drm_mm_insert_node_in_range_generic()
355 * drm_mm_remove_node - Remove a memory node from the allocator.
356 * @node: drm_mm_node to remove
358 * This just removes a node from its drm_mm allocator. The node does not need to
360 * allocator. It is a bug to call this function on a un-allocated node.
362 void drm_mm_remove_node(struct drm_mm_node *node) drm_mm_remove_node() argument
364 struct drm_mm *mm = node->mm; drm_mm_remove_node()
367 if (WARN_ON(!node->allocated)) drm_mm_remove_node()
370 BUG_ON(node->scanned_block || node->scanned_prev_free drm_mm_remove_node()
371 || node->scanned_next_free); drm_mm_remove_node()
374 list_entry(node->node_list.prev, struct drm_mm_node, node_list); drm_mm_remove_node()
376 if (node->hole_follows) { drm_mm_remove_node()
377 BUG_ON(__drm_mm_hole_node_start(node) == drm_mm_remove_node()
378 __drm_mm_hole_node_end(node)); drm_mm_remove_node()
379 list_del(&node->hole_stack); drm_mm_remove_node()
381 BUG_ON(__drm_mm_hole_node_start(node) != drm_mm_remove_node()
382 __drm_mm_hole_node_end(node)); drm_mm_remove_node()
391 list_del(&node->node_list); drm_mm_remove_node()
392 node->allocated = 0; drm_mm_remove_node()
550 * removing an object is O(1), and since freeing a node is also O(1) the overall
565 * change the place a node is allocated from within a suitable hole.
598 * change the place a node is allocated from within a suitable hole.
625 * drm_mm_scan_add_block - add a node to the scan list
626 * @node: drm_mm_node to add
628 * Add a node to the scan list that might be freed to make space for the desired
634 bool drm_mm_scan_add_block(struct drm_mm_node *node) drm_mm_scan_add_block() argument
636 struct drm_mm *mm = node->mm; drm_mm_scan_add_block()
643 BUG_ON(node->scanned_block); drm_mm_scan_add_block()
644 node->scanned_block = 1; drm_mm_scan_add_block()
646 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, drm_mm_scan_add_block()
649 node->scanned_preceeds_hole = prev_node->hole_follows; drm_mm_scan_add_block()
651 list_del(&node->node_list); drm_mm_scan_add_block()
652 node->node_list.prev = &prev_node->node_list; drm_mm_scan_add_block()
653 node->node_list.next = &mm->prev_scanned_node->node_list; drm_mm_scan_add_block()
654 mm->prev_scanned_node = node; drm_mm_scan_add_block()
682 * drm_mm_scan_remove_block - remove a node from the scan list
683 * @node: drm_mm_node to remove
697 bool drm_mm_scan_remove_block(struct drm_mm_node *node) drm_mm_scan_remove_block() argument
699 struct drm_mm *mm = node->mm; drm_mm_scan_remove_block()
704 BUG_ON(!node->scanned_block); drm_mm_scan_remove_block()
705 node->scanned_block = 0; drm_mm_scan_remove_block()
707 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, drm_mm_scan_remove_block()
710 prev_node->hole_follows = node->scanned_preceeds_hole; drm_mm_scan_remove_block()
711 list_add(&node->node_list, &prev_node->node_list); drm_mm_scan_remove_block()
713 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && drm_mm_scan_remove_block()
714 node->start < mm->scan_hit_end); drm_mm_scan_remove_block()
723 * True if the allocator is completely free, false if there's still a node
106 drm_mm_insert_helper(struct drm_mm_node *hole_node, struct drm_mm_node *node, u64 size, unsigned alignment, unsigned long color, enum drm_mm_allocator_flags flags) drm_mm_insert_helper() argument
250 drm_mm_insert_helper_range(struct drm_mm_node *hole_node, struct drm_mm_node *node, u64 size, unsigned alignment, unsigned long color, u64 start, u64 end, enum drm_mm_allocator_flags flags) drm_mm_insert_helper_range() argument
/linux-4.4.14/net/tipc/
H A Dname_distr.c51 u32 node; member in struct:distr_queue_item
91 struct tipc_node *node; named_cluster_distribute() local
95 list_for_each_entry_rcu(node, &tn->node_list, list) { named_cluster_distribute()
96 dnode = node->addr; named_cluster_distribute()
99 if (!tipc_node_is_up(node)) named_cluster_distribute()
113 * tipc_named_publish - tell other nodes about a new publication by this node
139 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
163 * named_distribute - prepare name info for bulk distribution to another node
165 * @dnode: node to be updated
210 * tipc_named_node_up - tell specified node about all publications by this node
232 struct tipc_node *node; tipc_publ_subscribe() local
237 node = tipc_node_find(net, addr); tipc_publ_subscribe()
238 if (!node) { tipc_publ_subscribe()
239 pr_warn("Node subscription rejected, unknown node 0x%x\n", tipc_publ_subscribe()
244 tipc_node_lock(node); tipc_publ_subscribe()
245 list_add_tail(&publ->nodesub_list, &node->publ_list); tipc_publ_subscribe()
246 tipc_node_unlock(node); tipc_publ_subscribe()
247 tipc_node_put(node); tipc_publ_subscribe()
253 struct tipc_node *node; tipc_publ_unsubscribe() local
255 node = tipc_node_find(net, addr); tipc_publ_unsubscribe()
256 if (!node) tipc_publ_unsubscribe()
259 tipc_node_lock(node); tipc_publ_unsubscribe()
261 tipc_node_unlock(node); tipc_publ_unsubscribe()
262 tipc_node_put(node); tipc_publ_unsubscribe()
266 * tipc_publ_purge - remove publication associated with a failed node
268 * Invoked for each publication issued by a newly failed node.
278 publ->node, publ->ref, publ->key); tipc_publ_purge()
284 pr_err("Unable to remove publication from failed node\n" tipc_publ_purge()
285 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n", tipc_publ_purge()
286 publ->type, publ->lower, publ->node, publ->ref, tipc_publ_purge()
309 u32 node, u32 dtype) tipc_update_nametbl()
317 TIPC_CLUSTER_SCOPE, node, tipc_update_nametbl()
320 tipc_publ_subscribe(net, publ, node); tipc_update_nametbl()
326 node, ntohl(i->ref), tipc_update_nametbl()
329 tipc_publ_unsubscribe(net, publ, node); tipc_update_nametbl()
343 static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) tipc_named_add_backlog() argument
352 e->node = node; tipc_named_add_backlog()
370 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) tipc_named_process_backlog()
373 tipc_addr_string_fill(addr, e->node); tipc_named_process_backlog()
386 * tipc_named_rcv - process name table update messages sent by another node
394 u32 node; tipc_named_rcv() local
404 node = msg_orignode(msg); tipc_named_rcv()
406 if (!tipc_update_nametbl(net, item, node, mtype)) tipc_named_rcv()
407 tipc_named_add_backlog(item, mtype, node); tipc_named_rcv()
420 * All name table entries published by this node are updated to reflect
421 * the node's new network address.
434 publ->node = tn->own_addr; tipc_named_reinit()
308 tipc_update_nametbl(struct net *net, struct distr_item *i, u32 node, u32 dtype) tipc_update_nametbl() argument
H A Dnode.c2 * net/tipc/node.c: TIPC node management routines
39 #include "node.h"
75 static void tipc_node_delete(struct tipc_node *node);
96 * usually be much smaller (typically only a single node).
105 struct tipc_node *node = container_of(kref, struct tipc_node, kref); tipc_node_kref_release() local
107 tipc_node_delete(node); tipc_node_kref_release()
110 void tipc_node_put(struct tipc_node *node) tipc_node_put() argument
112 kref_put(&node->kref, tipc_node_kref_release); tipc_node_put()
115 static void tipc_node_get(struct tipc_node *node) tipc_node_get() argument
117 kref_get(&node->kref); tipc_node_get()
121 * tipc_node_find - locate specified node object, if it exists
126 struct tipc_node *node; tipc_node_find() local
132 hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], tipc_node_find()
134 if (node->addr == addr) { tipc_node_find()
135 tipc_node_get(node); tipc_node_find()
137 return node; tipc_node_find()
215 static void tipc_node_delete(struct tipc_node *node) tipc_node_delete() argument
217 list_del_rcu(&node->list); tipc_node_delete()
218 hlist_del_rcu(&node->hash); tipc_node_delete()
219 kfree(node->bc_entry.link); tipc_node_delete()
220 kfree_rcu(node, rcu); tipc_node_delete()
226 struct tipc_node *node, *t_node; tipc_node_stop() local
229 list_for_each_entry_safe(node, t_node, &tn->node_list, list) { tipc_node_stop()
230 if (del_timer(&node->timer)) tipc_node_stop()
231 tipc_node_put(node); tipc_node_stop()
232 tipc_node_put(node); tipc_node_stop()
239 struct tipc_node *node; tipc_node_add_conn() local
246 node = tipc_node_find(net, dnode); tipc_node_add_conn()
247 if (!node) { tipc_node_add_conn()
248 pr_warn("Connecting sock to node 0x%x failed\n", dnode); tipc_node_add_conn()
260 tipc_node_lock(node); tipc_node_add_conn()
261 list_add_tail(&conn->list, &node->conn_sks); tipc_node_add_conn()
262 tipc_node_unlock(node); tipc_node_add_conn()
264 tipc_node_put(node); tipc_node_add_conn()
270 struct tipc_node *node; tipc_node_remove_conn() local
276 node = tipc_node_find(net, dnode); tipc_node_remove_conn()
277 if (!node) tipc_node_remove_conn()
280 tipc_node_lock(node); tipc_node_remove_conn()
281 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { tipc_node_remove_conn()
287 tipc_node_unlock(node); tipc_node_remove_conn()
288 tipc_node_put(node); tipc_node_remove_conn()
291 /* tipc_node_timeout - handle expiration of node timer
530 /* Prepare to validate requesting node's signature and media address */ tipc_node_check_dest()
549 * chosen the same node address and signature as an tipc_node_check_dest()
563 /* Peer node rebooted. Two possibilities: tipc_node_check_dest()
566 * receiving a discovery message from that node. tipc_node_check_dest()
567 * (The peer happened to receive one from this node first). tipc_node_check_dest()
576 /* The peer node has rebooted. tipc_node_check_dest()
660 /* tipc_node_fsm_evt - node finite state machine
661 * Determines when contact is allowed with peer node
827 pr_err("Unknown node fsm state %x\n", state); tipc_node_fsm_evt()
834 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); tipc_node_fsm_evt()
878 /* Notify publications from this node */ node_lost_contact()
881 /* Notify sockets connected to node */ list_for_each_entry_safe()
898 * @node: peer node address
908 struct tipc_node *node = tipc_node_find(net, addr); tipc_node_get_linkname() local
910 if (!node) tipc_node_get_linkname()
916 tipc_node_lock(node); tipc_node_get_linkname()
917 link = node->links[bearer_id].link; tipc_node_get_linkname()
923 tipc_node_unlock(node); tipc_node_get_linkname()
924 tipc_node_put(node); tipc_node_get_linkname()
928 void tipc_node_unlock(struct tipc_node *node) tipc_node_unlock() argument
930 struct net *net = node->net; tipc_node_unlock()
932 u32 flags = node->action_flags; tipc_node_unlock()
937 spin_unlock_bh(&node->lock); tipc_node_unlock()
941 addr = node->addr; tipc_node_unlock()
942 link_id = node->link_id; tipc_node_unlock()
943 publ_list = &node->publ_list; tipc_node_unlock()
945 node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | tipc_node_unlock()
948 spin_unlock_bh(&node->lock); tipc_node_unlock()
966 /* Caller should hold node lock for the passed node */ __tipc_nl_add_node()
967 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) __tipc_nl_add_node() argument
981 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) __tipc_nl_add_node()
983 if (tipc_node_is_up(node)) __tipc_nl_add_node()
1018 * @dnode: address of destination node
1079 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1099 /* If NACK for other node, let rcv link for that node peek into it */ tipc_node_bc_rcv()
1140 * tipc_node_check_state - check and if necessary update node state
1177 /* Update node accesibility if applicable */ tipc_node_check_state()
1265 * tipc_rcv - process TIPC packets/messages arriving from off-node
1298 /* Locate neighboring node that sent packet */ tipc_rcv()
1316 /* Check and if necessary update node state */ tipc_rcv()
1351 struct tipc_node *node; tipc_nl_node_dump() local
1363 node = tipc_node_find(net, last_addr); tipc_nl_node_dump()
1364 if (!node) { tipc_nl_node_dump()
1370 * the NLM_F_DUMP_INTR flag set if the node state tipc_nl_node_dump()
1376 tipc_node_put(node); tipc_nl_node_dump()
1379 list_for_each_entry_rcu(node, &tn->node_list, list) { tipc_nl_node_dump()
1381 if (node->addr == last_addr) tipc_nl_node_dump()
1387 tipc_node_lock(node); tipc_nl_node_dump()
1388 err = __tipc_nl_add_node(&msg, node); tipc_nl_node_dump()
1390 last_addr = node->addr; tipc_nl_node_dump()
1391 tipc_node_unlock(node); tipc_nl_node_dump()
1395 tipc_node_unlock(node); tipc_nl_node_dump()
/linux-4.4.14/fs/ext4/
H A Dblock_validity.c23 struct rb_node node; member in struct:ext4_system_zone
61 struct rb_node **n = &sbi->system_blks.rb_node, *node; add_system_zone() local
66 entry = rb_entry(parent, struct ext4_system_zone, node); add_system_zone()
78 node); add_system_zone()
90 new_node = &new_entry->node; add_system_zone()
97 node = rb_prev(new_node); add_system_zone()
98 if (node) { add_system_zone()
99 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
103 rb_erase(node, &sbi->system_blks); add_system_zone()
109 node = rb_next(new_node); add_system_zone()
110 if (node) { add_system_zone()
111 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
114 rb_erase(node, &sbi->system_blks); add_system_zone()
123 struct rb_node *node; debug_print_tree() local
128 node = rb_first(&sbi->system_blks); debug_print_tree()
129 while (node) { debug_print_tree()
130 entry = rb_entry(node, struct ext4_system_zone, node); debug_print_tree()
134 node = rb_next(node); debug_print_tree()
185 &EXT4_SB(sb)->system_blks, node) ext4_release_system_zone()
209 entry = rb_entry(n, struct ext4_system_zone, node); ext4_data_block_valid()
/linux-4.4.14/arch/c6x/platforms/
H A Demif.c47 struct device_node *node; c6x_emifa_init() local
52 node = of_find_matching_node(NULL, emifa_match); c6x_emifa_init()
53 if (!node) c6x_emifa_init()
56 regs = of_iomap(node, 0); c6x_emifa_init()
61 err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1); c6x_emifa_init()
66 p = of_get_property(node, "ti,emifa-ce-config", &len); c6x_emifa_init()
75 err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1); c6x_emifa_init()
79 err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1); c6x_emifa_init()
84 of_node_put(node); c6x_emifa_init()
/linux-4.4.14/include/linux/regulator/
H A Dof_regulator.h22 struct device_node *node,
24 extern int of_regulator_match(struct device *dev, struct device_node *node,
30 struct device_node *node, of_get_regulator_init_data()
37 struct device_node *node, of_regulator_match()
29 of_get_regulator_init_data(struct device *dev, struct device_node *node, const struct regulator_desc *desc) of_get_regulator_init_data() argument
36 of_regulator_match(struct device *dev, struct device_node *node, struct of_regulator_match *matches, unsigned int num_matches) of_regulator_match() argument
/linux-4.4.14/arch/powerpc/platforms/pasemi/
H A Dmisc.c34 static int __init find_i2c_driver(struct device_node *node, find_i2c_driver() argument
40 if (!of_device_is_compatible(node, i2c_devices[i].of_device)) find_i2c_driver()
54 struct device_node *node; pasemi_register_i2c_devices() local
63 node = NULL; pasemi_register_i2c_devices()
64 while ((node = of_get_next_child(adap_node, node))) { pasemi_register_i2c_devices()
69 addr = of_get_property(node, "reg", &len); pasemi_register_i2c_devices()
78 info.irq = irq_of_parse_and_map(node, 0); pasemi_register_i2c_devices()
82 if (find_i2c_driver(node, &info) < 0) pasemi_register_i2c_devices()
/linux-4.4.14/arch/s390/include/asm/
H A Dtopology.h67 /* Returns a pointer to the cpumask of CPUs on node 'node'. */
69 static inline const struct cpumask *cpumask_of_node(int node) cpumask_of_node() argument
71 return &node_to_cpumask_map[node]; cpumask_of_node()
75 * Returns the number of the node containing node 'node'. This
78 #define parent_node(node) (node)
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dgk20a.c137 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); gk20a_instobj_cpu_map_dma() local
138 struct device *dev = node->base.imem->base.subdev.device->dev; gk20a_instobj_cpu_map_dma()
145 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); gk20a_instobj_cpu_map_dma()
159 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); gk20a_instobj_cpu_map_iommu() local
162 return vmap(node->pages, npages, VM_MAP, gk20a_instobj_cpu_map_iommu()
194 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_acquire() local
195 struct gk20a_instmem *imem = node->imem; gk20a_instobj_acquire()
204 if (node->vaddr) { gk20a_instobj_acquire()
206 list_del(&node->vaddr_node); gk20a_instobj_acquire()
214 node->vaddr = imem->cpu_map(memory); gk20a_instobj_acquire()
216 if (!node->vaddr) { gk20a_instobj_acquire()
229 return node->vaddr; gk20a_instobj_acquire()
235 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_release() local
236 struct gk20a_instmem *imem = node->imem; gk20a_instobj_release()
243 list_add_tail(&node->vaddr_node, &imem->vaddr_lru); gk20a_instobj_release()
254 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_rd32() local
256 return node->vaddr[offset / 4]; gk20a_instobj_rd32()
262 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_wr32() local
264 node->vaddr[offset / 4] = data; gk20a_instobj_wr32()
270 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_map() local
272 nvkm_vm_map_at(vma, offset, &node->mem); gk20a_instobj_map()
279 gk20a_instobj_dtor(struct gk20a_instobj *node) gk20a_instobj_dtor() argument
281 struct gk20a_instmem *imem = node->imem; gk20a_instobj_dtor()
287 if (!node->vaddr) gk20a_instobj_dtor()
291 if (obj == node) { gk20a_instobj_dtor()
296 vunmap(node->vaddr); gk20a_instobj_dtor()
297 node->vaddr = NULL; gk20a_instobj_dtor()
298 imem->vaddr_use -= nvkm_memory_size(&node->memory); gk20a_instobj_dtor()
309 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); gk20a_instobj_dtor_dma() local
310 struct gk20a_instmem *imem = node->base.imem; gk20a_instobj_dtor_dma()
313 gk20a_instobj_dtor(&node->base); gk20a_instobj_dtor_dma()
315 if (unlikely(!node->cpuaddr)) gk20a_instobj_dtor_dma()
318 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr, gk20a_instobj_dtor_dma()
319 node->handle, &imem->attrs); gk20a_instobj_dtor_dma()
322 return node; gk20a_instobj_dtor_dma()
328 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); gk20a_instobj_dtor_iommu() local
329 struct gk20a_instmem *imem = node->base.imem; gk20a_instobj_dtor_iommu()
334 gk20a_instobj_dtor(&node->base); gk20a_instobj_dtor_iommu()
336 if (unlikely(list_empty(&node->base.mem.regions))) gk20a_instobj_dtor_iommu()
339 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node, gk20a_instobj_dtor_iommu()
346 for (i = 0; i < node->base.mem.size; i++) { gk20a_instobj_dtor_iommu()
349 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, gk20a_instobj_dtor_iommu()
351 __free_page(node->pages[i]); gk20a_instobj_dtor_iommu()
360 return node; gk20a_instobj_dtor_iommu()
393 struct gk20a_instobj_dma *node; gk20a_instobj_ctor_dma() local
397 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) gk20a_instobj_ctor_dma()
399 *_node = &node->base; gk20a_instobj_ctor_dma()
401 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); gk20a_instobj_ctor_dma()
403 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, gk20a_instobj_ctor_dma()
404 &node->handle, GFP_KERNEL, gk20a_instobj_ctor_dma()
406 if (!node->cpuaddr) { gk20a_instobj_ctor_dma()
412 if (unlikely(node->handle & (align - 1))) gk20a_instobj_ctor_dma()
415 &node->handle, align); gk20a_instobj_ctor_dma()
418 node->r.type = 12; gk20a_instobj_ctor_dma()
419 node->r.offset = node->handle >> 12; gk20a_instobj_ctor_dma()
420 node->r.length = (npages << PAGE_SHIFT) >> 12; gk20a_instobj_ctor_dma()
422 node->base.mem.offset = node->handle; gk20a_instobj_ctor_dma()
424 INIT_LIST_HEAD(&node->base.mem.regions); gk20a_instobj_ctor_dma()
425 list_add_tail(&node->r.rl_entry, &node->base.mem.regions); gk20a_instobj_ctor_dma()
434 struct gk20a_instobj_iommu *node; gk20a_instobj_ctor_iommu() local
445 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + gk20a_instobj_ctor_iommu()
446 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) gk20a_instobj_ctor_iommu()
448 *_node = &node->base; gk20a_instobj_ctor_iommu()
449 node->dma_addrs = (void *)(node->pages + npages); gk20a_instobj_ctor_iommu()
451 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); gk20a_instobj_ctor_iommu()
462 node->pages[i] = p; gk20a_instobj_ctor_iommu()
469 node->dma_addrs[i] = dma_adr; gk20a_instobj_ctor_iommu()
486 ret = iommu_map(imem->domain, offset, node->dma_addrs[i], gk20a_instobj_ctor_iommu()
502 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; gk20a_instobj_ctor_iommu()
504 INIT_LIST_HEAD(&node->base.mem.regions); gk20a_instobj_ctor_iommu()
505 list_add_tail(&r->rl_entry, &node->base.mem.regions); gk20a_instobj_ctor_iommu()
515 for (i = 0; i < npages && node->pages[i] != NULL; i++) { gk20a_instobj_ctor_iommu()
516 dma_addr_t dma_addr = node->dma_addrs[i]; gk20a_instobj_ctor_iommu()
520 __free_page(node->pages[i]); gk20a_instobj_ctor_iommu()
532 struct gk20a_instobj *node = NULL; gk20a_instobj_new() local
544 align, &node); gk20a_instobj_new()
547 align, &node); gk20a_instobj_new()
548 *pmemory = node ? &node->memory : NULL; gk20a_instobj_new()
552 node->imem = imem; gk20a_instobj_new()
555 node->mem.size = size >> 12; gk20a_instobj_new()
556 node->mem.memtype = 0; gk20a_instobj_new()
557 node->mem.page_shift = 12; gk20a_instobj_new()
560 size, align, node->mem.offset); gk20a_instobj_new()
/linux-4.4.14/arch/mips/netlogic/xlp/
H A Dusb-init-xlp2.c87 #define nlm_xlpii_get_usb_pcibase(node, inst) \
89 XLP9XX_IO_USB_OFFSET(node, inst) : \
90 XLP2XX_IO_USB_OFFSET(node, inst))
91 #define nlm_xlpii_get_usb_regbase(node, inst) \
92 (nlm_xlpii_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
118 int node, irq; xlp9xx_usb_ack() local
120 /* Find the node and irq on the node */ xlp9xx_usb_ack()
122 node = data->irq / NLM_IRQS_PER_NODE; xlp9xx_usb_ack()
126 port_addr = nlm_xlpii_get_usb_regbase(node, 1); xlp9xx_usb_ack()
129 port_addr = nlm_xlpii_get_usb_regbase(node, 2); xlp9xx_usb_ack()
132 port_addr = nlm_xlpii_get_usb_regbase(node, 3); xlp9xx_usb_ack()
135 pr_err("No matching USB irq %d node %d!\n", irq, node); xlp9xx_usb_ack()
141 static void nlm_xlpii_usb_hw_reset(int node, int port) nlm_xlpii_usb_hw_reset() argument
147 port_addr = nlm_xlpii_get_usb_regbase(node, port); nlm_xlpii_usb_hw_reset()
180 pci_base = nlm_xlpii_get_usb_pcibase(node, port); nlm_xlpii_usb_hw_reset()
210 int node; nlm_platform_xlpii_usb_init() local
216 /* XLP 2XX single node */ nlm_platform_xlpii_usb_init()
227 /* XLP 9XX, multi-node */ nlm_platform_xlpii_usb_init()
229 for (node = 0; node < NLM_NR_NODES; node++) { nlm_platform_xlpii_usb_init()
230 if (!nlm_node_present(node)) nlm_platform_xlpii_usb_init()
232 nlm_xlpii_usb_hw_reset(node, 1); nlm_platform_xlpii_usb_init()
233 nlm_xlpii_usb_hw_reset(node, 2); nlm_platform_xlpii_usb_init()
234 nlm_xlpii_usb_hw_reset(node, 3); nlm_platform_xlpii_usb_init()
235 nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack); nlm_platform_xlpii_usb_init()
236 nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack); nlm_platform_xlpii_usb_init()
237 nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_2_IRQ, xlp9xx_usb_ack); nlm_platform_xlpii_usb_init()
249 int node; nlm_xlp9xx_usb_fixup_final() local
251 node = xlp_socdev_to_node(dev); nlm_xlp9xx_usb_fixup_final()
256 dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_0_IRQ); nlm_xlp9xx_usb_fixup_final()
259 dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ); nlm_xlp9xx_usb_fixup_final()
262 dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_2_IRQ); nlm_xlp9xx_usb_fixup_final()
/linux-4.4.14/arch/powerpc/kernel/
H A Dpci_of_scan.c67 * of_pci_parse_addrs - Parse PCI addresses assigned in the device tree node
68 * @node: device tree node for the PCI device
72 * device tree node and writes them into the associated pci_dev structure.
74 static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) of_pci_parse_addrs() argument
84 addrs = of_get_property(node, "assigned-addresses", &proplen); of_pci_parse_addrs()
119 * of_create_pci_dev - Given a device tree node on a pci bus, create a pci_dev
120 * @node: device tree node pointer
124 struct pci_dev *of_create_pci_dev(struct device_node *node, of_create_pci_dev() argument
133 type = of_get_property(node, "device_type", NULL); of_create_pci_dev()
139 dev->dev.of_node = of_node_get(node); of_create_pci_dev()
148 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); of_create_pci_dev()
149 dev->device = get_int_prop(node, "device-id", 0xffff); of_create_pci_dev()
150 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); of_create_pci_dev()
151 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); of_create_pci_dev()
157 dev->class = get_int_prop(node, "class-code", 0); of_create_pci_dev()
158 dev->revision = get_int_prop(node, "revision-id", 0); of_create_pci_dev()
184 of_pci_parse_addrs(node, dev); of_create_pci_dev()
207 struct device_node *node = dev->dev.of_node; of_scan_pci_bridge() local
217 pr_debug("of_scan_pci_bridge(%s)\n", node->full_name); of_scan_pci_bridge()
220 busrange = of_get_property(node, "bus-range", &len); of_scan_pci_bridge()
223 node->full_name); of_scan_pci_bridge()
226 ranges = of_get_property(node, "ranges", &len); of_scan_pci_bridge()
229 node->full_name); of_scan_pci_bridge()
240 node->full_name); of_scan_pci_bridge()
268 " for bridge %s\n", node->full_name); of_scan_pci_bridge()
274 " for bridge %s\n", node->full_name); of_scan_pci_bridge()
297 of_scan_bus(node, bus); of_scan_pci_bridge()
345 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
346 * @node: device tree node for the PCI bus
350 static void __of_scan_bus(struct device_node *node, struct pci_bus *bus, __of_scan_bus() argument
357 node->full_name, bus->number); __of_scan_bus()
360 for_each_child_of_node(node, child) { for_each_child_of_node()
383 * of_scan_bus - given a PCI bus node, setup bus and scan for child devices
384 * @node: device tree node for the PCI bus
387 void of_scan_bus(struct device_node *node, struct pci_bus *bus) of_scan_bus() argument
389 __of_scan_bus(node, bus, 0); of_scan_bus()
394 * of_rescan_bus - given a PCI bus node, scan for child devices
395 * @node: device tree node for the PCI bus
401 void of_rescan_bus(struct device_node *node, struct pci_bus *bus) of_rescan_bus() argument
403 __of_scan_bus(node, bus, 1); of_rescan_bus()
/linux-4.4.14/drivers/bus/
H A Dvexpress-config.c53 static void vexpress_config_find_prop(struct device_node *node, vexpress_config_find_prop() argument
59 of_node_get(node); vexpress_config_find_prop()
60 while (node) { vexpress_config_find_prop()
61 if (of_property_read_u32(node, name, val) == 0) { vexpress_config_find_prop()
62 of_node_put(node); vexpress_config_find_prop()
65 node = of_get_next_parent(node); vexpress_config_find_prop()
69 int vexpress_config_get_topo(struct device_node *node, u32 *site, vexpress_config_get_topo() argument
72 vexpress_config_find_prop(node, "arm,vexpress,site", site); vexpress_config_get_topo()
77 vexpress_config_find_prop(node, "arm,vexpress,position", position); vexpress_config_get_topo()
78 vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc); vexpress_config_get_topo()
153 dev_dbg(parent, "Registered bridge '%s', parent node %p\n", vexpress_config_bridge_register()
162 const struct device_node *node = data; vexpress_config_node_match() local
164 dev_dbg(dev, "Parent node %p, looking for %p\n", vexpress_config_node_match()
165 dev->parent->of_node, node); vexpress_config_node_match()
167 return dev->parent->of_node == node; vexpress_config_node_match()
170 static int vexpress_config_populate(struct device_node *node) vexpress_config_populate() argument
175 bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); vexpress_config_populate()
184 return of_platform_populate(node, NULL, NULL, parent); vexpress_config_populate()
190 struct device_node *node; vexpress_config_init() local
193 for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { vexpress_config_init()
194 err = vexpress_config_populate(node); vexpress_config_init()
/linux-4.4.14/kernel/power/
H A Dwakelock.c28 struct rb_node node; member in struct:wakelock
39 struct rb_node *node; pm_show_wakelocks() local
46 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { pm_show_wakelocks()
47 wl = rb_entry(node, struct wakelock, node); pm_show_wakelocks()
124 rb_erase(&wl->node, &wakelocks_tree); __wakelocks_gc()
152 struct rb_node **node = &wakelocks_tree.rb_node; wakelock_lookup_add() local
153 struct rb_node *parent = *node; wakelock_lookup_add()
156 while (*node) { wakelock_lookup_add()
159 parent = *node; wakelock_lookup_add()
160 wl = rb_entry(*node, struct wakelock, node); wakelock_lookup_add()
169 node = &(*node)->rb_left; wakelock_lookup_add()
171 node = &(*node)->rb_right; wakelock_lookup_add()
191 rb_link_node(&wl->node, parent, node); wakelock_lookup_add()
192 rb_insert_color(&wl->node, &wakelocks_tree); wakelock_lookup_add()
/linux-4.4.14/security/selinux/ss/
H A Dconditional.c86 * current state of the node it sets the rules in the true/false
90 int evaluate_cond_node(struct policydb *p, struct cond_node *node) evaluate_cond_node() argument
95 new_state = cond_evaluate_expr(p, node->expr); evaluate_cond_node()
96 if (new_state != node->cur_state) { evaluate_cond_node()
97 node->cur_state = new_state; evaluate_cond_node()
101 for (cur = node->true_list; cur; cur = cur->next) { evaluate_cond_node()
103 cur->node->key.specified &= ~AVTAB_ENABLED; evaluate_cond_node()
105 cur->node->key.specified |= AVTAB_ENABLED; evaluate_cond_node()
108 for (cur = node->false_list; cur; cur = cur->next) { evaluate_cond_node()
111 cur->node->key.specified &= ~AVTAB_ENABLED; evaluate_cond_node()
113 cur->node->key.specified |= AVTAB_ENABLED; evaluate_cond_node()
138 /* the avtab_ptr_t node is destroy by the avtab */ cond_av_list_destroy()
143 static void cond_node_destroy(struct cond_node *node) cond_node_destroy() argument
147 for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) { cond_node_destroy()
151 cond_av_list_destroy(node->true_list); cond_node_destroy()
152 cond_av_list_destroy(node->false_list); cond_node_destroy()
153 kfree(node); cond_node_destroy()
307 if (cur->node == node_ptr) { cond_insertf()
338 list->node = node_ptr; cond_insertf()
399 static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) cond_read_node() argument
410 node->cur_state = le32_to_cpu(buf[0]); cond_read_node()
435 node->expr = expr; cond_read_node()
441 rc = cond_read_av_list(p, fp, &node->true_list, NULL); cond_read_node()
444 rc = cond_read_av_list(p, fp, &node->false_list, node->true_list); cond_read_node()
449 cond_node_destroy(node); cond_read_node()
455 struct cond_node *node, *last = NULL; cond_read_list() local
472 node = kzalloc(sizeof(struct cond_node), GFP_KERNEL); cond_read_list()
473 if (!node) cond_read_list()
476 rc = cond_read_node(p, node, fp); cond_read_list()
481 p->cond_list = node; cond_read_list()
483 last->next = node; cond_read_list()
484 last = node; cond_read_list()
546 rc = avtab_write_item(p, cur_list->node, fp); cond_write_av_list()
554 static int cond_write_node(struct policydb *p, struct cond_node *node, cond_write_node() argument
562 buf[0] = cpu_to_le32(node->cur_state); cond_write_node()
567 for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) cond_write_node()
575 for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) { cond_write_node()
583 rc = cond_write_av_list(p, node->true_list, fp); cond_write_node()
586 rc = cond_write_av_list(p, node->false_list, fp); cond_write_node()
620 struct avtab_node *node; cond_compute_xperms() local
625 for (node = avtab_search_node(ctab, key); node; cond_compute_xperms()
626 node = avtab_search_node_next(node, key->specified)) { cond_compute_xperms()
627 if (node->key.specified & AVTAB_ENABLED) cond_compute_xperms()
628 services_compute_xperms_decision(xpermd, node); cond_compute_xperms()
639 struct avtab_node *node; cond_compute_av() local
644 for (node = avtab_search_node(ctab, key); node; cond_compute_av()
645 node = avtab_search_node_next(node, key->specified)) { cond_compute_av()
647 (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED))) cond_compute_av()
648 avd->allowed |= node->datum.u.data; cond_compute_av()
650 (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED))) cond_compute_av()
656 avd->auditdeny &= node->datum.u.data; cond_compute_av()
658 (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) cond_compute_av()
659 avd->auditallow |= node->datum.u.data; cond_compute_av()
660 if (xperms && (node->key.specified & AVTAB_ENABLED) && cond_compute_av()
661 (node->key.specified & AVTAB_XPERMS)) cond_compute_av()
662 services_compute_xperms_drivers(xperms, node); cond_compute_av()
H A Dservices.h15 struct avtab_node *node);
18 struct avtab_node *node);
/linux-4.4.14/drivers/staging/lustre/lustre/include/
H A Dinterval_tree.h50 in_intree:1, /** set if the node is in tree */
65 static inline int interval_is_intree(struct interval_node *node) interval_is_intree() argument
67 return node->in_intree == 1; interval_is_intree()
70 static inline __u64 interval_high(struct interval_node *node) interval_high() argument
72 return node->in_extent.end; interval_high()
75 static inline void interval_set(struct interval_node *node, interval_set() argument
79 node->in_extent.start = start; interval_set()
80 node->in_extent.end = end; interval_set()
81 node->in_max_high = end; interval_set()
84 struct interval_node *interval_insert(struct interval_node *node,
86 void interval_erase(struct interval_node *node, struct interval_node **root);
/linux-4.4.14/drivers/pnp/pnpbios/
H A Dproc.c7 * The .../devices and .../<node> and .../boot/<node> files are
176 struct pnp_bios_node *node; pnp_devices_proc_show() local
179 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnp_devices_proc_show()
180 if (!node) pnp_devices_proc_show()
186 if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node)) pnp_devices_proc_show()
189 node->handle, node->eisa_id, pnp_devices_proc_show()
190 node->type_code, node->flags); pnp_devices_proc_show()
193 "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", pnp_devices_proc_show()
200 kfree(node); pnp_devices_proc_show()
220 struct pnp_bios_node *node; pnpbios_proc_show() local
225 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnpbios_proc_show()
226 if (!node) pnpbios_proc_show()
228 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { pnpbios_proc_show()
229 kfree(node); pnpbios_proc_show()
232 len = node->size - sizeof(struct pnp_bios_node); pnpbios_proc_show()
233 seq_write(m, node->data, len); pnpbios_proc_show()
234 kfree(node); pnpbios_proc_show()
247 struct pnp_bios_node *node; pnpbios_proc_write() local
252 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnpbios_proc_write()
253 if (!node) pnpbios_proc_write()
255 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { pnpbios_proc_write()
259 if (count != node->size - sizeof(struct pnp_bios_node)) { pnpbios_proc_write()
263 if (copy_from_user(node->data, buf, count)) { pnpbios_proc_write()
267 if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) { pnpbios_proc_write()
273 kfree(node); pnpbios_proc_write()
286 int pnpbios_interface_attach_device(struct pnp_bios_node *node) pnpbios_interface_attach_device() argument
290 sprintf(name, "%02x", node->handle); pnpbios_interface_attach_device()
296 (void *)(long)(node->handle)); pnpbios_interface_attach_device()
302 (void *)(long)(node->handle + 0x100))) pnpbios_interface_attach_device()
H A Dcore.c204 struct pnp_bios_node *node; pnpbios_get_resources() local
210 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnpbios_get_resources()
211 if (!node) pnpbios_get_resources()
213 if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { pnpbios_get_resources()
214 kfree(node); pnpbios_get_resources()
217 pnpbios_read_resources_from_node(dev, node); pnpbios_get_resources()
219 kfree(node); pnpbios_get_resources()
226 struct pnp_bios_node *node; pnpbios_set_resources() local
233 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnpbios_set_resources()
234 if (!node) pnpbios_set_resources()
236 if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { pnpbios_set_resources()
237 kfree(node); pnpbios_set_resources()
240 if (pnpbios_write_resources_to_node(dev, node) < 0) { pnpbios_set_resources()
241 kfree(node); pnpbios_set_resources()
244 ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node); pnpbios_set_resources()
245 kfree(node); pnpbios_set_resources()
251 static void pnpbios_zero_data_stream(struct pnp_bios_node *node) pnpbios_zero_data_stream() argument
253 unsigned char *p = (char *)node->data; pnpbios_zero_data_stream()
254 unsigned char *end = (char *)(node->data + node->size); pnpbios_zero_data_stream()
278 struct pnp_bios_node *node; pnpbios_disable_resources() local
285 node = kzalloc(node_info.max_node_size, GFP_KERNEL); pnpbios_disable_resources()
286 if (!node) pnpbios_disable_resources()
289 if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { pnpbios_disable_resources()
290 kfree(node); pnpbios_disable_resources()
293 pnpbios_zero_data_stream(node); pnpbios_disable_resources()
295 ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node); pnpbios_disable_resources()
296 kfree(node); pnpbios_disable_resources()
311 static int __init insert_device(struct pnp_bios_node *node) insert_device() argument
321 if (dev->number == node->handle) insert_device()
325 pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id); insert_device()
326 dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id); insert_device()
330 pnpbios_parse_data_stream(dev, node); insert_device()
332 dev->flags = node->flags; insert_device()
353 pnpbios_interface_attach_device(node); insert_device()
363 struct pnp_bios_node *node; build_devlist() local
365 node = kzalloc(node_info.max_node_size, GFP_KERNEL); build_devlist()
366 if (!node) build_devlist()
376 (&nodenum, (char)PNPMODE_DYNAMIC, node)) build_devlist()
380 (&nodenum, (char)PNPMODE_STATIC, node)) build_devlist()
384 if (insert_device(node) == 0) build_devlist()
388 "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", build_devlist()
394 kfree(node); build_devlist()
397 "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", build_devlist()
543 /* read the node info */ pnpbios_init()
547 "PnPBIOS: Unable to get node info. Aborting.\n"); pnpbios_init()
/linux-4.4.14/drivers/acpi/
H A Dnuma.c39 /* maps to convert between proximity domain and logical node ID */
54 int node_to_pxm(int node) node_to_pxm() argument
56 if (node < 0) node_to_pxm()
58 return node_to_pxm_map[node]; node_to_pxm()
61 static void __acpi_map_pxm_to_node(int pxm, int node) __acpi_map_pxm_to_node() argument
63 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) __acpi_map_pxm_to_node()
64 pxm_to_node_map[pxm] = node; __acpi_map_pxm_to_node()
65 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) __acpi_map_pxm_to_node()
66 node_to_pxm_map[node] = pxm; __acpi_map_pxm_to_node()
71 int node; acpi_map_pxm_to_node() local
76 node = pxm_to_node_map[pxm]; acpi_map_pxm_to_node()
78 if (node == NUMA_NO_NODE) { acpi_map_pxm_to_node()
81 node = first_unset_node(nodes_found_map); acpi_map_pxm_to_node()
82 __acpi_map_pxm_to_node(pxm, node); acpi_map_pxm_to_node()
83 node_set(node, nodes_found_map); acpi_map_pxm_to_node()
86 return node; acpi_map_pxm_to_node()
90 * acpi_map_pxm_to_online_node - Map proximity ID to online node
94 * node. When the mapped node from a given proximity ID is offline, it
95 * looks up the node distance table and returns the nearest online node.
100 * offline nodes. A node may be offline when a device proximity ID is
106 int node, n, dist, min_dist; acpi_map_pxm_to_online_node() local
108 node = acpi_map_pxm_to_node(pxm); acpi_map_pxm_to_online_node()
110 if (node == NUMA_NO_NODE) acpi_map_pxm_to_online_node()
111 node = 0; acpi_map_pxm_to_online_node()
113 if (!node_online(node)) { acpi_map_pxm_to_online_node()
116 dist = node_distance(node, n); for_each_online_node()
119 node = n; for_each_online_node()
124 return node;
199 * up the NUMA heuristics which wants the local node to have a smaller
325 * So go over all cpu entries in SRAT to get apicid to node mapping. acpi_numa_init()
H A Dproc.c24 struct list_head *node, *next; acpi_system_wakeup_device_seq_show() local
26 seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); acpi_system_wakeup_device_seq_show()
29 list_for_each_safe(node, next, &acpi_wakeup_device_list) { acpi_system_wakeup_device_seq_show()
31 container_of(node, struct acpi_device, wakeup_list); acpi_system_wakeup_device_seq_show()
51 node) { acpi_system_wakeup_device_seq_show()
56 if (&entry->node != acpi_system_wakeup_device_seq_show()
84 &adev->physical_node_list, node) physical_device_enable_wakeup()
98 struct list_head *node, *next; acpi_system_write_wakeup_device() local
111 list_for_each_safe(node, next, &acpi_wakeup_device_list) { acpi_system_write_wakeup_device()
113 container_of(node, struct acpi_device, wakeup_list); acpi_system_write_wakeup_device()
H A Dwakeup.c32 struct list_head *node, *next; acpi_enable_wakeup_devices() local
34 list_for_each_safe(node, next, &acpi_wakeup_device_list) { acpi_enable_wakeup_devices()
36 container_of(node, struct acpi_device, wakeup_list); acpi_enable_wakeup_devices()
59 struct list_head *node, *next; acpi_disable_wakeup_devices() local
61 list_for_each_safe(node, next, &acpi_wakeup_device_list) { acpi_disable_wakeup_devices()
63 container_of(node, struct acpi_device, wakeup_list); acpi_disable_wakeup_devices()
81 struct list_head *node, *next; acpi_wakeup_device_init() local
84 list_for_each_safe(node, next, &acpi_wakeup_device_list) { acpi_wakeup_device_init()
85 struct acpi_device *dev = container_of(node, acpi_wakeup_device_init()
/linux-4.4.14/drivers/video/fbdev/omap2/dss/
H A Domapdss-boot-init.c39 struct device_node *node; member in struct:dss_conv_node
55 static void __init omapdss_update_prop(struct device_node *node, char *compat, omapdss_update_prop() argument
68 of_update_property(node, prop); omapdss_update_prop()
91 static void __init omapdss_omapify_node(struct device_node *node) omapdss_omapify_node() argument
98 prop = of_find_property(node, "compatible", NULL); omapdss_omapify_node()
117 omapdss_update_prop(node, new_compat, new_len); omapdss_omapify_node()
120 static void __init omapdss_add_to_list(struct device_node *node, bool root) omapdss_add_to_list() argument
125 n->node = node; omapdss_add_to_list()
131 static bool __init omapdss_list_contains(const struct device_node *node) omapdss_list_contains() argument
136 if (n->node == node) omapdss_list_contains()
143 static void __init omapdss_walk_device(struct device_node *node, bool root) omapdss_walk_device() argument
147 omapdss_add_to_list(node, root); omapdss_walk_device()
151 * port/ports node. To avoid that, check first that there's the node. omapdss_walk_device()
153 n = of_get_child_by_name(node, "ports"); omapdss_walk_device()
155 n = of_get_child_by_name(node, "port"); omapdss_walk_device()
162 while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { omapdss_walk_device()
217 omapdss_omapify_node(n->node);
220 of_node_put(n->node);
/linux-4.4.14/drivers/iommu/
H A Diova.c53 container_of(iovad->cached32_node, struct iova, node); __get_cached_rbnode()
65 iovad->cached32_node = &new->node; __cached_rbnode_insert_update()
77 cached_iova = container_of(curr, struct iova, node); __cached_rbnode_delete_update()
80 struct rb_node *node = rb_next(&free->node); __cached_rbnode_delete_update() local
81 struct iova *iova = container_of(node, struct iova, node); __cached_rbnode_delete_update()
84 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) __cached_rbnode_delete_update()
85 iovad->cached32_node = node; __cached_rbnode_delete_update()
116 struct iova *curr_iova = container_of(curr, struct iova, node); __alloc_and_insert_iova_range()
149 /* Add new node and rebalance tree. */ __alloc_and_insert_iova_range()
160 /* Figure out where to put new node */ __alloc_and_insert_iova_range()
163 struct iova, node); __alloc_and_insert_iova_range()
174 /* Add new node and rebalance tree. */ __alloc_and_insert_iova_range()
175 rb_link_node(&new->node, parent, entry); __alloc_and_insert_iova_range()
176 rb_insert_color(&new->node, &iovad->rbroot); __alloc_and_insert_iova_range()
190 /* Figure out where to put new node */ iova_insert_rbtree()
192 struct iova *this = container_of(*new, struct iova, node); iova_insert_rbtree()
203 /* Add new node and rebalance tree. */ iova_insert_rbtree()
204 rb_link_node(&iova->node, parent, new); iova_insert_rbtree()
205 rb_insert_color(&iova->node, root); iova_insert_rbtree()
304 struct rb_node *node; find_iova() local
308 node = iovad->rbroot.rb_node; find_iova()
309 while (node) { find_iova()
310 struct iova *iova = container_of(node, struct iova, node); find_iova()
325 node = node->rb_left; find_iova()
327 node = node->rb_right; find_iova()
348 rb_erase(&iova->node, &iovad->rbroot); __free_iova()
379 struct rb_node *node; put_iova_domain() local
383 node = rb_first(&iovad->rbroot); put_iova_domain()
384 while (node) { put_iova_domain()
385 struct iova *iova = container_of(node, struct iova, node); put_iova_domain()
387 rb_erase(node, &iovad->rbroot); put_iova_domain()
389 node = rb_first(&iovad->rbroot); put_iova_domain()
396 __is_range_overlap(struct rb_node *node, __is_range_overlap() argument
399 struct iova *iova = container_of(node, struct iova, node); __is_range_overlap()
455 struct rb_node *node; reserve_iova() local
461 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { reserve_iova()
462 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { reserve_iova()
463 iova = container_of(node, struct iova, node); reserve_iova()
474 /* We are here either because this is the first reserver node reserve_iova()
496 struct rb_node *node; copy_reserved_iova() local
499 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { copy_reserved_iova()
500 struct iova *iova = container_of(node, struct iova, node); copy_reserved_iova()
532 rb_erase(&iova->node, &iovad->rbroot); split_and_remove_iova()
/linux-4.4.14/arch/x86/include/asm/
H A Dnuma.h14 * Too small node sizes may confuse the VM badly. Usually they
24 * node and is used to initialize cpu_to_node mapping.
36 static inline void set_apicid_to_node(int apicid, s16 node) set_apicid_to_node() argument
38 __apicid_to_node[apicid] = node; set_apicid_to_node()
44 static inline void set_apicid_to_node(int apicid, s16 node) set_apicid_to_node() argument
59 extern void numa_set_node(int cpu, int node);
65 static inline void numa_set_node(int cpu, int node) { } numa_clear_node() argument
73 void debug_cpumask_set_cpu(int cpu, int node, bool enable);
H A Dtopology.h50 /* Mappings between logical cpu number and node number */
72 /* Mappings between node number and cpus on that node. */
76 extern const struct cpumask *cpumask_of_node(int node);
78 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ cpumask_of_node()
79 static inline const struct cpumask *cpumask_of_node(int node) cpumask_of_node() argument
81 return node_to_cpumask_map[node]; cpumask_of_node()
88 * Returns the number of the node containing Node 'node'. This
91 #define parent_node(node) (node)
/linux-4.4.14/include/asm-generic/
H A Dtopology.h38 #define set_numa_node(node)
41 #define set_cpu_numa_node(cpu, node)
48 #define parent_node(node) ((void)(node),0)
51 #define cpumask_of_node(node) ((void)node, cpu_online_mask)
68 #define set_numa_mem(node)
71 #define set_cpu_numa_mem(cpu, node)
/linux-4.4.14/arch/sh/mm/
H A Dnuma.c2 * arch/sh/mm/numa.c - Multiple node support for SH machines
23 * in node 0, and other memory blocks in to node 1 and up, ordered by
24 * latency. Each node's pgdat is node-local at the beginning of the node,
25 * immediately followed by the node mem map.
33 /* Don't allow bogus node assignment */ setup_bootmem_node()
/linux-4.4.14/drivers/cpufreq/
H A Dppc_cbe_cpufreq_pmi.c81 u8 node, slow_mode; cbe_cpufreq_handle_pmi() local
85 node = pmi_msg.data1; cbe_cpufreq_handle_pmi()
88 pmi_slow_mode_limit[node] = slow_mode; cbe_cpufreq_handle_pmi()
90 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); cbe_cpufreq_handle_pmi()
98 u8 node; pmi_notifier() local
107 node = cbe_cpu_to_node(policy->cpu); pmi_notifier()
109 pr_debug("got notified, event=%lu, node=%u\n", event, node); pmi_notifier()
111 if (pmi_slow_mode_limit[node] != 0) { pmi_notifier()
112 pr_debug("limiting node %d to slow mode %d\n", pmi_notifier()
113 node, pmi_slow_mode_limit[node]); pmi_notifier()
117 cbe_freqs[pmi_slow_mode_limit[node]].frequency); pmi_notifier()
/linux-4.4.14/fs/nilfs2/
H A Dbtree.c69 * B-tree node operations
86 static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node) nilfs_btree_node_get_flags() argument
88 return node->bn_flags; nilfs_btree_node_get_flags()
92 nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags) nilfs_btree_node_set_flags() argument
94 node->bn_flags = flags; nilfs_btree_node_set_flags()
97 static int nilfs_btree_node_root(const struct nilfs_btree_node *node) nilfs_btree_node_root() argument
99 return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT; nilfs_btree_node_root()
102 static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node) nilfs_btree_node_get_level() argument
104 return node->bn_level; nilfs_btree_node_get_level()
108 nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level) nilfs_btree_node_set_level() argument
110 node->bn_level = level; nilfs_btree_node_set_level()
113 static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node) nilfs_btree_node_get_nchildren() argument
115 return le16_to_cpu(node->bn_nchildren); nilfs_btree_node_get_nchildren()
119 nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) nilfs_btree_node_set_nchildren() argument
121 node->bn_nchildren = cpu_to_le16(nchildren); nilfs_btree_node_set_nchildren()
135 nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) nilfs_btree_node_dkeys() argument
137 return (__le64 *)((char *)(node + 1) + nilfs_btree_node_dkeys()
138 (nilfs_btree_node_root(node) ? nilfs_btree_node_dkeys()
143 nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax) nilfs_btree_node_dptrs() argument
145 return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax); nilfs_btree_node_dptrs()
149 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index) nilfs_btree_node_get_key() argument
151 return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index)); nilfs_btree_node_get_key()
155 nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) nilfs_btree_node_set_key() argument
157 *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key); nilfs_btree_node_set_key()
161 nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index, nilfs_btree_node_get_ptr() argument
164 return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index)); nilfs_btree_node_get_ptr()
168 nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr, nilfs_btree_node_set_ptr() argument
171 *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr); nilfs_btree_node_set_ptr()
174 static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags, nilfs_btree_node_init() argument
182 nilfs_btree_node_set_flags(node, flags); nilfs_btree_node_init()
183 nilfs_btree_node_set_level(node, level); nilfs_btree_node_init()
184 nilfs_btree_node_set_nchildren(node, nchildren); nilfs_btree_node_init()
186 dkeys = nilfs_btree_node_dkeys(node); nilfs_btree_node_init()
187 dptrs = nilfs_btree_node_dptrs(node, ncmax); nilfs_btree_node_init()
250 /* Assume that the buffer head corresponding to node is locked. */ nilfs_btree_node_insert()
251 static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index, nilfs_btree_node_insert() argument
258 dkeys = nilfs_btree_node_dkeys(node); nilfs_btree_node_insert()
259 dptrs = nilfs_btree_node_dptrs(node, ncmax); nilfs_btree_node_insert()
260 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_node_insert()
270 nilfs_btree_node_set_nchildren(node, nchildren); nilfs_btree_node_insert()
273 /* Assume that the buffer head corresponding to node is locked. */ nilfs_btree_node_delete()
274 static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index, nilfs_btree_node_delete() argument
283 dkeys = nilfs_btree_node_dkeys(node); nilfs_btree_node_delete()
284 dptrs = nilfs_btree_node_dptrs(node, ncmax); nilfs_btree_node_delete()
287 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_node_delete()
300 nilfs_btree_node_set_nchildren(node, nchildren); nilfs_btree_node_delete()
303 static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, nilfs_btree_node_lookup() argument
311 high = nilfs_btree_node_get_nchildren(node) - 1; nilfs_btree_node_lookup()
316 nkey = nilfs_btree_node_get_key(node, index); nilfs_btree_node_lookup()
330 if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) { nilfs_btree_node_lookup()
343 * nilfs_btree_node_broken - verify consistency of btree node
344 * @node: btree node block to be examined
345 * @size: node size (in bytes)
348 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
350 static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, nilfs_btree_node_broken() argument
356 level = nilfs_btree_node_get_level(node); nilfs_btree_node_broken()
357 flags = nilfs_btree_node_get_flags(node); nilfs_btree_node_broken()
358 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_node_broken()
365 printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): " nilfs_btree_node_broken()
374 * nilfs_btree_root_broken - verify consistency of btree root node
375 * @node: btree root node to be examined
378 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
380 static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, nilfs_btree_root_broken() argument
386 level = nilfs_btree_node_get_level(node); nilfs_btree_root_broken()
387 flags = nilfs_btree_node_get_flags(node); nilfs_btree_root_broken()
388 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_root_broken()
443 struct nilfs_btree_node *node; nilfs_btree_get_node() local
446 node = nilfs_btree_get_root(btree); nilfs_btree_get_node()
449 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_get_node()
452 return node; nilfs_btree_get_node()
456 nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) nilfs_btree_bad_node() argument
458 if (unlikely(nilfs_btree_node_get_level(node) != level)) { nilfs_btree_bad_node()
461 nilfs_btree_node_get_level(node), level); nilfs_btree_bad_node()
468 struct nilfs_btree_node *node; /* parent node */ member in struct:nilfs_btree_readahead_info
470 int index; /* current index on the parent node */
471 int ncmax; /* nof children in the parent node */
497 ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); __nilfs_btree_get_block()
540 struct nilfs_btree_node *node; nilfs_btree_do_lookup() local
545 node = nilfs_btree_get_root(btree); nilfs_btree_do_lookup()
546 level = nilfs_btree_node_get_level(node); nilfs_btree_do_lookup()
547 if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0) nilfs_btree_do_lookup()
550 found = nilfs_btree_node_lookup(node, key, &index); nilfs_btree_do_lookup()
551 ptr = nilfs_btree_node_get_ptr(node, index, nilfs_btree_do_lookup()
561 p.node = nilfs_btree_get_node(btree, path, level + 1, nilfs_btree_do_lookup()
572 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_lookup()
573 if (nilfs_btree_bad_node(node, level)) nilfs_btree_do_lookup()
576 found = nilfs_btree_node_lookup(node, key, &index); nilfs_btree_do_lookup()
580 ptr = nilfs_btree_node_get_ptr(node, index, ncmax); nilfs_btree_do_lookup()
601 struct nilfs_btree_node *node; nilfs_btree_do_lookup_last() local
605 node = nilfs_btree_get_root(btree); nilfs_btree_do_lookup_last()
606 index = nilfs_btree_node_get_nchildren(node) - 1; nilfs_btree_do_lookup_last()
609 level = nilfs_btree_node_get_level(node); nilfs_btree_do_lookup_last()
610 ptr = nilfs_btree_node_get_ptr(node, index, nilfs_btree_do_lookup_last()
620 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_lookup_last()
621 if (nilfs_btree_bad_node(node, level)) nilfs_btree_do_lookup_last()
623 index = nilfs_btree_node_get_nchildren(node) - 1; nilfs_btree_do_lookup_last()
624 ptr = nilfs_btree_node_get_ptr(node, index, ncmax); nilfs_btree_do_lookup_last()
629 *keyp = nilfs_btree_node_get_key(node, index); nilfs_btree_do_lookup_last()
650 struct nilfs_btree_node *node; nilfs_btree_get_next_key() local
658 node = nilfs_btree_get_root(btree); nilfs_btree_get_next_key()
660 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_get_next_key()
663 if (index < nilfs_btree_node_get_nchildren(node)) { nilfs_btree_get_next_key()
664 /* Next key is in this node */ nilfs_btree_get_next_key()
665 *nextkey = nilfs_btree_node_get_key(node, index); nilfs_btree_get_next_key()
695 struct nilfs_btree_node *node; nilfs_btree_lookup_contig() local
723 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_lookup_contig()
726 while (index < nilfs_btree_node_get_nchildren(node)) { nilfs_btree_lookup_contig()
727 if (nilfs_btree_node_get_key(node, index) != nilfs_btree_lookup_contig()
730 ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); nilfs_btree_lookup_contig()
745 /* look-up right sibling node */ nilfs_btree_lookup_contig()
746 p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); nilfs_btree_lookup_contig()
749 if (p.index >= nilfs_btree_node_get_nchildren(p.node) || nilfs_btree_lookup_contig()
750 nilfs_btree_node_get_key(p.node, p.index) != key + cnt) nilfs_btree_lookup_contig()
752 ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax); nilfs_btree_lookup_contig()
762 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_lookup_contig()
801 struct nilfs_btree_node *node; nilfs_btree_do_insert() local
805 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_insert()
807 nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_do_insert()
814 nilfs_btree_node_get_key(node, nilfs_btree_do_insert()
817 node = nilfs_btree_get_root(btree); nilfs_btree_do_insert()
818 nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_do_insert()
828 struct nilfs_btree_node *node, *left; nilfs_btree_carry_left() local
831 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_carry_left()
833 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_carry_left()
845 nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_carry_left()
853 nilfs_btree_node_get_key(node, 0)); nilfs_btree_carry_left()
874 struct nilfs_btree_node *node, *right; nilfs_btree_carry_right() local
877 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_carry_right()
879 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_carry_right()
891 nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_carry_right()
907 path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_carry_right()
921 struct nilfs_btree_node *node, *right; nilfs_btree_split() local
924 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_split()
926 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_split()
936 nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_split()
944 path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_split()
999 struct nilfs_btree_node *node; nilfs_btree_find_near() local
1008 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_find_near()
1009 return nilfs_btree_node_get_ptr(node, nilfs_btree_find_near()
1017 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_find_near()
1018 return nilfs_btree_node_get_ptr(node, path[level].bp_index, nilfs_btree_find_near()
1051 struct nilfs_btree_node *node, *parent, *sib; nilfs_btree_prepare_insert() local
1075 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_prepare_insert()
1076 if (nilfs_btree_node_get_nchildren(node) < ncblk) { nilfs_btree_prepare_insert()
1143 node = nilfs_btree_get_root(btree); nilfs_btree_prepare_insert()
1144 if (nilfs_btree_node_get_nchildren(node) < nilfs_btree_prepare_insert()
1169 /* a newly-created node block and a data block are added */ nilfs_btree_prepare_insert()
1251 struct nilfs_btree_node *node; nilfs_btree_do_delete() local
1255 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_delete()
1257 nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_do_delete()
1263 nilfs_btree_node_get_key(node, 0)); nilfs_btree_do_delete()
1265 node = nilfs_btree_get_root(btree); nilfs_btree_do_delete()
1266 nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_do_delete()
1276 struct nilfs_btree_node *node, *left; nilfs_btree_borrow_left() local
1281 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_borrow_left()
1283 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_borrow_left()
1289 nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); nilfs_btree_borrow_left()
1297 nilfs_btree_node_get_key(node, 0)); nilfs_btree_borrow_left()
1308 struct nilfs_btree_node *node, *right; nilfs_btree_borrow_right() local
1313 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_borrow_right()
1315 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_borrow_right()
1321 nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_borrow_right()
1341 struct nilfs_btree_node *node, *left; nilfs_btree_concat_left() local
1346 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_concat_left()
1350 n = nilfs_btree_node_get_nchildren(node); nilfs_btree_concat_left()
1352 nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_concat_left()
1367 struct nilfs_btree_node *node, *right; nilfs_btree_concat_right() local
1372 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_concat_right()
1378 nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_concat_right()
1425 struct nilfs_btree_node *node, *parent, *sib; nilfs_btree_prepare_delete() local
1437 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_prepare_delete()
1439 nilfs_btree_node_get_ptr(node, dindex, ncblk); nilfs_btree_prepare_delete()
1445 if (nilfs_btree_node_get_nchildren(node) > ncmin) { nilfs_btree_prepare_delete()
1493 * When merging right sibling node nilfs_btree_prepare_delete()
1494 * into the current node, pointer to nilfs_btree_prepare_delete()
1495 * the right sibling node must be nilfs_btree_prepare_delete()
1504 /* the only child of the root node */ nilfs_btree_prepare_delete()
1506 if (nilfs_btree_node_get_nchildren(node) - 1 <= nilfs_btree_prepare_delete()
1521 /* child of the root node is deleted */ nilfs_btree_prepare_delete()
1526 node = nilfs_btree_get_root(btree); nilfs_btree_prepare_delete()
1528 nilfs_btree_node_get_ptr(node, dindex, nilfs_btree_prepare_delete()
1639 struct nilfs_btree_node *root, *node; nilfs_btree_check_delete() local
1648 node = root; nilfs_btree_check_delete()
1659 node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_check_delete()
1665 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_check_delete()
1666 maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nilfs_btree_check_delete()
1668 nilfs_btree_node_get_key(node, nchildren - 2) : 0; nilfs_btree_check_delete()
1679 struct nilfs_btree_node *node, *root; nilfs_btree_gather_data() local
1689 node = root; nilfs_btree_gather_data()
1700 node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_gather_data()
1704 node = NULL; nilfs_btree_gather_data()
1708 nchildren = nilfs_btree_node_get_nchildren(node); nilfs_btree_gather_data()
1711 dkeys = nilfs_btree_node_dkeys(node); nilfs_btree_gather_data()
1712 dptrs = nilfs_btree_node_dptrs(node, ncmax); nilfs_btree_gather_data()
1786 struct nilfs_btree_node *node; nilfs_btree_commit_convert_and_insert() local
1805 /* create child node at level 1 */ nilfs_btree_commit_convert_and_insert()
1806 node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_commit_convert_and_insert()
1808 nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); nilfs_btree_commit_convert_and_insert()
1809 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); nilfs_btree_commit_convert_and_insert()
1817 /* create root node at level 2 */ nilfs_btree_commit_convert_and_insert()
1818 node = nilfs_btree_get_root(btree); nilfs_btree_commit_convert_and_insert()
1820 nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1, nilfs_btree_commit_convert_and_insert()
1826 /* create root node at level 1 */ nilfs_btree_commit_convert_and_insert()
1827 node = nilfs_btree_get_root(btree); nilfs_btree_commit_convert_and_insert()
1828 nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n, nilfs_btree_commit_convert_and_insert()
1831 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, nilfs_btree_commit_convert_and_insert()
2054 struct nilfs_btree_node *node; nilfs_btree_propagate() local
2065 node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_propagate()
2066 key = nilfs_btree_node_get_key(node, 0); nilfs_btree_propagate()
2067 level = nilfs_btree_node_get_level(node); nilfs_btree_propagate()
2103 struct nilfs_btree_node *node, *cnode; nilfs_btree_add_dirty_buffer() local
2108 node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_add_dirty_buffer()
2109 key = nilfs_btree_node_get_key(node, 0); nilfs_btree_add_dirty_buffer()
2110 level = nilfs_btree_node_get_level(node); nilfs_btree_add_dirty_buffer()
2248 struct nilfs_btree_node *node; nilfs_btree_assign() local
2257 node = (struct nilfs_btree_node *)(*bh)->b_data; nilfs_btree_assign()
2258 key = nilfs_btree_node_get_key(node, 0); nilfs_btree_assign()
2259 level = nilfs_btree_node_get_level(node); nilfs_btree_assign()
2286 struct nilfs_btree_node *node; nilfs_btree_assign_gc() local
2296 node = (struct nilfs_btree_node *)(*bh)->b_data; nilfs_btree_assign_gc()
2297 key = nilfs_btree_node_get_key(node, 0); nilfs_btree_assign_gc()
/linux-4.4.14/arch/mips/sgi-ip27/
H A Dip27-memory.c265 static unsigned long __init slot_psize_compute(cnodeid_t node, int slot) slot_psize_compute() argument
272 nasid = COMPACT_TO_NASID_NODEID(node); slot_psize_compute()
273 /* Find the node board */ slot_psize_compute()
331 * Always have node 0 in the region mask, otherwise for_each_online_node()
333 * thinks it is a node 0 address. for_each_online_node()
358 cnodeid_t node; szmem() local
360 for_each_online_node(node) { for_each_online_node()
363 slot_psize = slot_psize_compute(node, slot); for_each_online_node()
377 printk("Ignoring slot %d onwards on node %d\n", for_each_online_node()
378 slot, node); for_each_online_node()
382 memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), for_each_online_node()
383 PFN_PHYS(slot_psize), node); for_each_online_node() local
388 static void __init node_mem_init(cnodeid_t node) node_mem_init() argument
390 unsigned long slot_firstpfn = slot_getbasepfn(node, 0); node_mem_init()
391 unsigned long slot_freepfn = node_getfirstfree(node); node_mem_init()
395 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); node_mem_init()
398 * Allocate the node data structures on the node first. node_mem_init()
400 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); node_mem_init()
401 memset(__node_data[node], 0, PAGE_SIZE); node_mem_init()
403 NODE_DATA(node)->bdata = &bootmem_node_data[node]; node_mem_init()
404 NODE_DATA(node)->node_start_pfn = start_pfn; node_mem_init()
405 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; node_mem_init()
407 cpumask_clear(&hub_data(node)->h_cpus); node_mem_init()
412 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, node_mem_init()
414 free_bootmem_with_active_regions(node, end_pfn); node_mem_init()
415 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, node_mem_init()
418 sparse_memory_present_with_active_regions(node); node_mem_init()
422 * A node with nothing. We use it to avoid any special casing in
438 cnodeid_t node; prom_meminit() local
443 for (node = 0; node < MAX_COMPACT_NODES; node++) { prom_meminit()
444 if (node_online(node)) { prom_meminit()
445 node_mem_init(node); prom_meminit()
448 __node_data[node] = &null_node; prom_meminit()
462 unsigned node; paging_init() local
466 for_each_online_node(node) { for_each_online_node()
469 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); for_each_online_node()
482 setup_zero_pages(); /* This comes from node 0 */ mem_init()
/linux-4.4.14/drivers/clk/socfpga/
H A Dclk-periph.c60 static __init void __socfpga_periph_init(struct device_node *node, __socfpga_periph_init() argument
66 const char *clk_name = node->name; __socfpga_periph_init()
73 of_property_read_u32(node, "reg", &reg); __socfpga_periph_init()
81 rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); __socfpga_periph_init()
90 rc = of_property_read_u32(node, "fixed-divider", &fixed_div); __socfpga_periph_init()
96 of_property_read_string(node, "clock-output-names", &clk_name); __socfpga_periph_init()
102 init.num_parents = of_clk_parent_fill(node, parent_name, __socfpga_periph_init()
113 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); __socfpga_periph_init()
116 void __init socfpga_periph_init(struct device_node *node) socfpga_periph_init() argument
118 __socfpga_periph_init(node, &periclk_ops); socfpga_periph_init()
H A Dclk-periph-a10.c70 static __init void __socfpga_periph_init(struct device_node *node, __socfpga_periph_init() argument
76 const char *clk_name = node->name; __socfpga_periph_init()
83 of_property_read_u32(node, "reg", &reg); __socfpga_periph_init()
91 rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); __socfpga_periph_init()
100 rc = of_property_read_u32(node, "fixed-divider", &fixed_div); __socfpga_periph_init()
106 of_property_read_string(node, "clock-output-names", &clk_name); __socfpga_periph_init()
112 parent_name = of_clk_get_parent_name(node, 0); __socfpga_periph_init()
123 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); __socfpga_periph_init()
125 pr_err("Could not register clock provider for node:%s\n", __socfpga_periph_init()
136 void __init socfpga_a10_periph_init(struct device_node *node) socfpga_a10_periph_init() argument
138 __socfpga_periph_init(node, &periclk_ops); socfpga_a10_periph_init()
H A Dclk.h38 void __init socfpga_pll_init(struct device_node *node);
39 void __init socfpga_periph_init(struct device_node *node);
40 void __init socfpga_gate_init(struct device_node *node);
41 void socfpga_a10_pll_init(struct device_node *node);
42 void socfpga_a10_periph_init(struct device_node *node);
43 void socfpga_a10_gate_init(struct device_node *node);
/linux-4.4.14/fs/jffs2/
H A Dnodelist.c36 dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", jffs2_add_fd_to_list()
93 if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { jffs2_truncate_fragtree()
96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; jffs2_truncate_fragtree()
104 if (this->node) { jffs2_obsolete_node_frag()
105 this->node->frags--; jffs2_obsolete_node_frag()
106 if (!this->node->frags) { jffs2_obsolete_node_frag()
107 /* The node has no valid frags left. It's totally obsoleted */ jffs2_obsolete_node_frag()
108 dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", jffs2_obsolete_node_frag()
109 ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); jffs2_obsolete_node_frag()
110 jffs2_mark_node_obsolete(c, this->node->raw); jffs2_obsolete_node_frag()
111 jffs2_free_full_dnode(this->node); jffs2_obsolete_node_frag()
113 dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", jffs2_obsolete_node_frag()
114 ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); jffs2_obsolete_node_frag()
115 mark_ref_normal(this->node->raw); jffs2_obsolete_node_frag()
157 newfrag->node = fn; new_fragment()
173 if (lastend < newfrag->node->ofs) { no_overlapping_node()
177 holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); no_overlapping_node()
184 /* By definition, the 'this' node has no right-hand child, no_overlapping_node()
200 /* By definition, the 'this' node has no right-hand child, no_overlapping_node()
203 dbg_fragtree2("add the new node at the right\n"); no_overlapping_node()
206 dbg_fragtree2("insert the new node at the root of the tree\n"); no_overlapping_node()
221 this = jffs2_lookup_node_frag(root, newfrag->node->ofs); jffs2_add_frag_to_fragtree()
225 this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); jffs2_add_frag_to_fragtree()
236 /* Check if 'this' node was on the same page as the new node. jffs2_add_frag_to_fragtree()
237 If so, both 'this' and the new node get marked REF_NORMAL so jffs2_add_frag_to_fragtree()
241 if (this->node) jffs2_add_frag_to_fragtree()
242 mark_ref_normal(this->node->raw); jffs2_add_frag_to_fragtree()
243 mark_ref_normal(newfrag->node->raw); jffs2_add_frag_to_fragtree()
249 if (this->node) jffs2_add_frag_to_fragtree()
252 ref_offset(this->node->raw), ref_flags(this->node->raw)); jffs2_add_frag_to_fragtree()
261 /* This node isn't completely obsoleted. The start of it remains valid */ jffs2_add_frag_to_fragtree()
263 /* Mark the new node and the partially covered node REF_NORMAL -- let jffs2_add_frag_to_fragtree()
265 mark_ref_normal(newfrag->node->raw); jffs2_add_frag_to_fragtree()
266 if (this->node) jffs2_add_frag_to_fragtree()
267 mark_ref_normal(this->node->raw); jffs2_add_frag_to_fragtree()
270 /* The new node splits 'this' frag into two */ jffs2_add_frag_to_fragtree()
273 if (this->node) jffs2_add_frag_to_fragtree()
275 this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); jffs2_add_frag_to_fragtree()
280 /* New second frag pointing to this's node */ jffs2_add_frag_to_fragtree()
281 newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, jffs2_add_frag_to_fragtree()
285 if (this->node) jffs2_add_frag_to_fragtree()
286 this->node->frags++; jffs2_add_frag_to_fragtree()
291 /* Now, we know there's no node with offset jffs2_add_frag_to_fragtree()
305 /* New node just reduces 'this' frag in size, doesn't split it */ jffs2_add_frag_to_fragtree()
320 dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); jffs2_add_frag_to_fragtree()
336 dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", jffs2_add_frag_to_fragtree()
352 if (this->node) jffs2_add_frag_to_fragtree()
353 mark_ref_normal(this->node->raw); jffs2_add_frag_to_fragtree()
354 mark_ref_normal(newfrag->node->raw); jffs2_add_frag_to_fragtree()
360 * Given an inode, probably with existing tree of fragments, add the new node
374 newfrag->node->frags = 1; jffs2_add_full_dnode_to_inode()
376 dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", jffs2_add_full_dnode_to_inode()
384 or next node REF_NORMAL, as appropriate. */ jffs2_add_full_dnode_to_inode()
390 if (prev->node) jffs2_add_full_dnode_to_inode()
391 mark_ref_normal(prev->node->raw); jffs2_add_full_dnode_to_inode()
399 if (next->node) jffs2_add_full_dnode_to_inode()
400 mark_ref_normal(next->node->raw); jffs2_add_full_dnode_to_inode()
526 /* The common case in lookup is that there will be a node jffs2_lookup_node_frag()
571 if (frag->node && !(--frag->node->frags)) { rbtree_postorder_for_each_entry_safe()
573 of this node. Free the node */ rbtree_postorder_for_each_entry_safe()
575 jffs2_mark_node_obsolete(c, frag->node->raw); rbtree_postorder_for_each_entry_safe()
577 jffs2_free_full_dnode(frag->node); rbtree_postorder_for_each_entry_safe()
597 dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, jffs2_link_node_ref()
705 /* Last node in block. Use free_space */ __ref_totlen()
H A Ddebug.c81 struct jffs2_full_dnode *fn = frag->node; __jffs2_dbg_fragtree_paranoia_check_nolock()
88 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", __jffs2_dbg_fragtree_paranoia_check_nolock()
93 /* A hole node which isn't multi-page should be garbage-collected __jffs2_dbg_fragtree_paranoia_check_nolock()
95 rather than mucking around with actually reading the node __jffs2_dbg_fragtree_paranoia_check_nolock()
97 to tell a hole node. */ __jffs2_dbg_fragtree_paranoia_check_nolock()
99 && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { __jffs2_dbg_fragtree_paranoia_check_nolock()
100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", __jffs2_dbg_fragtree_paranoia_check_nolock()
106 && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { __jffs2_dbg_fragtree_paranoia_check_nolock()
107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", __jffs2_dbg_fragtree_paranoia_check_nolock()
150 JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", __jffs2_dbg_prewrite_paranoia_check()
336 JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", __jffs2_dbg_acct_paranoia_check_nolock()
711 if (this->node) __jffs2_dbg_dump_fragtree_nolock()
713 this->ofs, this->ofs+this->size, ref_offset(this->node->raw), __jffs2_dbg_dump_fragtree_nolock()
714 ref_flags(this->node->raw), this, frag_left(this), frag_right(this), __jffs2_dbg_dump_fragtree_nolock()
770 * Dump a JFFS2 node.
775 union jffs2_node_union node; __jffs2_dbg_dump_node() local
781 printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); __jffs2_dbg_dump_node()
783 ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); __jffs2_dbg_dump_node()
790 printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); __jffs2_dbg_dump_node()
791 printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); __jffs2_dbg_dump_node()
792 printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); __jffs2_dbg_dump_node()
793 printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); __jffs2_dbg_dump_node()
795 crc = crc32(0, &node.u, sizeof(node.u) - 4); __jffs2_dbg_dump_node()
796 if (crc != je32_to_cpu(node.u.hdr_crc)) { __jffs2_dbg_dump_node()
801 if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && __jffs2_dbg_dump_node()
802 je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) __jffs2_dbg_dump_node()
804 JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", __jffs2_dbg_dump_node()
805 je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); __jffs2_dbg_dump_node()
809 switch(je16_to_cpu(node.u.nodetype)) { __jffs2_dbg_dump_node()
813 printk(JFFS2_DBG "the node is inode node\n"); __jffs2_dbg_dump_node()
814 printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); __jffs2_dbg_dump_node()
815 printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); __jffs2_dbg_dump_node()
816 printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); __jffs2_dbg_dump_node()
817 printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); __jffs2_dbg_dump_node()
818 printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); __jffs2_dbg_dump_node()
819 printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); __jffs2_dbg_dump_node()
820 printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); __jffs2_dbg_dump_node()
821 printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); __jffs2_dbg_dump_node()
822 printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); __jffs2_dbg_dump_node()
823 printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); __jffs2_dbg_dump_node()
824 printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); __jffs2_dbg_dump_node()
825 printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); __jffs2_dbg_dump_node()
826 printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); __jffs2_dbg_dump_node()
827 printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); __jffs2_dbg_dump_node()
828 printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); __jffs2_dbg_dump_node()
829 printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); __jffs2_dbg_dump_node()
830 printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); __jffs2_dbg_dump_node()
832 crc = crc32(0, &node.i, sizeof(node.i) - 8); __jffs2_dbg_dump_node()
833 if (crc != je32_to_cpu(node.i.node_crc)) { __jffs2_dbg_dump_node()
834 JFFS2_ERROR("wrong node header CRC.\n"); __jffs2_dbg_dump_node()
841 printk(JFFS2_DBG "the node is dirent node\n"); __jffs2_dbg_dump_node()
842 printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); __jffs2_dbg_dump_node()
843 printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); __jffs2_dbg_dump_node()
844 printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); __jffs2_dbg_dump_node()
845 printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); __jffs2_dbg_dump_node()
846 printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); __jffs2_dbg_dump_node()
847 printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); __jffs2_dbg_dump_node()
848 printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); __jffs2_dbg_dump_node()
849 printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); __jffs2_dbg_dump_node()
851 node.d.name[node.d.nsize] = '\0'; __jffs2_dbg_dump_node()
852 printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); __jffs2_dbg_dump_node()
854 crc = crc32(0, &node.d, sizeof(node.d) - 8); __jffs2_dbg_dump_node()
855 if (crc != je32_to_cpu(node.d.node_crc)) { __jffs2_dbg_dump_node()
856 JFFS2_ERROR("wrong node header CRC.\n"); __jffs2_dbg_dump_node()
862 printk(JFFS2_DBG "node type is unknown\n"); __jffs2_dbg_dump_node()
H A Dscan.c175 /* Only a CLEANMARKER node is valid */ jffs2_scan_medium()
337 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", jffs2_scan_xattr_node()
350 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", jffs2_scan_xattr_node()
364 raw->next_in_ino = xd->node->next_in_ino; jffs2_scan_xattr_node()
365 xd->node->next_in_ino = raw; jffs2_scan_xattr_node()
393 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", jffs2_scan_xref_node()
401 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n", jffs2_scan_xref_node()
444 struct jffs2_unknown_node *node; jffs2_scan_eraseblock() local
526 /* Need to read more so that the entire summary node is present */ jffs2_scan_eraseblock()
613 /* Make sure there are node refs available for use */ jffs2_scan_eraseblock()
635 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { jffs2_scan_eraseblock()
639 sizeof(*node)); jffs2_scan_eraseblock()
645 if (buf_ofs + buf_len < ofs + sizeof(*node)) { jffs2_scan_eraseblock()
647 jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", jffs2_scan_eraseblock()
656 node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; jffs2_scan_eraseblock()
718 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { jffs2_scan_eraseblock()
726 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { jffs2_scan_eraseblock()
733 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { jffs2_scan_eraseblock()
741 if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { jffs2_scan_eraseblock()
746 je16_to_cpu(node->magic)); jffs2_scan_eraseblock()
752 /* We seem to have a node of sorts. Check the CRC */ jffs2_scan_eraseblock()
753 crcnode.magic = node->magic; jffs2_scan_eraseblock()
754 crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); jffs2_scan_eraseblock()
755 crcnode.totlen = node->totlen; jffs2_scan_eraseblock()
758 if (hdr_crc != je32_to_cpu(node->hdr_crc)) { jffs2_scan_eraseblock()
761 ofs, je16_to_cpu(node->magic), jffs2_scan_eraseblock()
762 je16_to_cpu(node->nodetype), jffs2_scan_eraseblock()
763 je32_to_cpu(node->totlen), jffs2_scan_eraseblock()
764 je32_to_cpu(node->hdr_crc), jffs2_scan_eraseblock()
772 if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { jffs2_scan_eraseblock()
775 ofs, je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
783 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { jffs2_scan_eraseblock()
784 /* Wheee. This is an obsoleted node */ jffs2_scan_eraseblock()
787 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) jffs2_scan_eraseblock()
789 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
793 switch(je16_to_cpu(node->nodetype)) { jffs2_scan_eraseblock()
797 jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", jffs2_scan_eraseblock()
804 node = (void *)buf; jffs2_scan_eraseblock()
806 err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); jffs2_scan_eraseblock()
808 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
812 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { jffs2_scan_eraseblock()
814 jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", jffs2_scan_eraseblock()
815 je32_to_cpu(node->totlen), buf_len, jffs2_scan_eraseblock()
821 node = (void *)buf; jffs2_scan_eraseblock()
823 err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); jffs2_scan_eraseblock()
825 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
830 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { jffs2_scan_eraseblock()
832 jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", jffs2_scan_eraseblock()
833 je32_to_cpu(node->totlen), buf_len, jffs2_scan_eraseblock()
839 node = (void *)buf; jffs2_scan_eraseblock()
841 err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); jffs2_scan_eraseblock()
844 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
847 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { jffs2_scan_eraseblock()
849 jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", jffs2_scan_eraseblock()
850 je32_to_cpu(node->totlen), buf_len, jffs2_scan_eraseblock()
856 node = (void *)buf; jffs2_scan_eraseblock()
858 err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); jffs2_scan_eraseblock()
861 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
866 jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); jffs2_scan_eraseblock()
867 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { jffs2_scan_eraseblock()
868 pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", jffs2_scan_eraseblock()
869 ofs, je32_to_cpu(node->totlen), jffs2_scan_eraseblock()
875 pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", jffs2_scan_eraseblock()
889 jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
890 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) jffs2_scan_eraseblock()
892 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
896 switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { jffs2_scan_eraseblock()
898 pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", jffs2_scan_eraseblock()
899 je16_to_cpu(node->nodetype), ofs); jffs2_scan_eraseblock()
903 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) jffs2_scan_eraseblock()
905 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
909 pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", jffs2_scan_eraseblock()
910 je16_to_cpu(node->nodetype), ofs); jffs2_scan_eraseblock()
914 jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", jffs2_scan_eraseblock()
915 je16_to_cpu(node->nodetype), ofs); jffs2_scan_eraseblock()
916 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) jffs2_scan_eraseblock()
918 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
922 jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", jffs2_scan_eraseblock()
923 je16_to_cpu(node->nodetype), ofs); jffs2_scan_eraseblock()
925 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); jffs2_scan_eraseblock()
929 ofs += PAD(je32_to_cpu(node->totlen)); jffs2_scan_eraseblock()
994 this node; we can do all the CRC checking etc. later. There's a tradeoff here -- jffs2_scan_inode_node()
1002 /* Check the node CRC in any case. */ jffs2_scan_inode_node()
1005 pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", jffs2_scan_inode_node()
1008 * We believe totlen because the CRC on the node jffs2_scan_inode_node()
1009 * _header_ was OK, just the node itself failed. jffs2_scan_inode_node()
1050 /* We don't get here unless the node is still valid, so we don't have to jffs2_scan_dirent_node()
1055 pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", jffs2_scan_dirent_node()
1057 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ jffs2_scan_dirent_node()
1080 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", jffs2_scan_dirent_node()
1086 /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ jffs2_scan_dirent_node()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dradeon_mn.c49 struct hlist_node node; member in struct:radeon_mn
72 struct radeon_mn_node *node, *next_node; radeon_mn_destroy() local
77 hash_del(&rmn->node); radeon_mn_destroy()
78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, radeon_mn_destroy()
81 interval_tree_remove(&node->it, &rmn->objects); radeon_mn_destroy()
82 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { radeon_mn_destroy()
86 kfree(node); radeon_mn_destroy()
136 struct radeon_mn_node *node; radeon_mn_invalidate_range_start() local
140 node = container_of(it, struct radeon_mn_node, it); radeon_mn_invalidate_range_start()
143 list_for_each_entry(bo, &node->bos, mn_list) { radeon_mn_invalidate_range_start()
192 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) radeon_mn_get()
212 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); radeon_mn_get()
242 struct radeon_mn_node *node = NULL; radeon_mn_register() local
255 kfree(node); radeon_mn_register()
256 node = container_of(it, struct radeon_mn_node, it); radeon_mn_register()
257 interval_tree_remove(&node->it, &rmn->objects); radeon_mn_register()
260 list_splice(&node->bos, &bos); radeon_mn_register()
263 if (!node) { radeon_mn_register()
264 node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL); radeon_mn_register()
265 if (!node) { radeon_mn_register()
273 node->it.start = addr; radeon_mn_register()
274 node->it.last = end; radeon_mn_register()
275 INIT_LIST_HEAD(&node->bos); radeon_mn_register()
276 list_splice(&bos, &node->bos); radeon_mn_register()
277 list_add(&bo->mn_list, &node->bos); radeon_mn_register()
279 interval_tree_insert(&node->it, &rmn->objects); radeon_mn_register()
314 struct radeon_mn_node *node; radeon_mn_unregister() local
315 node = container_of(head, struct radeon_mn_node, bos); radeon_mn_unregister()
316 interval_tree_remove(&node->it, &rmn->objects); radeon_mn_unregister()
317 kfree(node); radeon_mn_unregister()
/linux-4.4.14/fs/btrfs/
H A Dulist.c18 * visiting a node twice. The pseudo-code could look like this:
27 * do something useful with the node;
63 struct ulist_node *node; ulist_fini() local
66 list_for_each_entry_safe(node, next, &ulist->nodes, list) { ulist_fini()
67 kfree(node); ulist_fini()
135 static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node) ulist_rbtree_erase() argument
137 rb_erase(&node->rb_node, &ulist->root); ulist_rbtree_erase()
138 list_del(&node->list); ulist_rbtree_erase()
139 kfree(node); ulist_rbtree_erase()
195 struct ulist_node *node; ulist_add_merge() local
197 node = ulist_rbtree_search(ulist, val); ulist_add_merge()
198 if (node) { ulist_add_merge()
200 *old_aux = node->aux; ulist_add_merge()
203 node = kmalloc(sizeof(*node), gfp_mask); ulist_add_merge()
204 if (!node) ulist_add_merge()
207 node->val = val; ulist_add_merge()
208 node->aux = aux; ulist_add_merge()
210 ret = ulist_rbtree_insert(ulist, node); ulist_add_merge()
212 list_add_tail(&node->list, &ulist->nodes); ulist_add_merge()
219 * ulist_del - delete one node from ulist
220 * @ulist: ulist to remove node from
230 struct ulist_node *node; ulist_del() local
232 node = ulist_rbtree_search(ulist, val); ulist_del()
234 if (!node) ulist_del()
237 if (node->aux != aux) ulist_del()
241 ulist_rbtree_erase(ulist, node); ulist_del()
263 struct ulist_node *node; ulist_next() local
274 node = list_entry(uiter->cur_list, struct ulist_node, list); ulist_next()
275 return node; ulist_next()
H A Ddelayed-inode.c92 struct btrfs_delayed_node *node; btrfs_get_delayed_node() local
94 node = ACCESS_ONCE(btrfs_inode->delayed_node); btrfs_get_delayed_node()
95 if (node) { btrfs_get_delayed_node()
96 atomic_inc(&node->refs); btrfs_get_delayed_node()
97 return node; btrfs_get_delayed_node()
101 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); btrfs_get_delayed_node()
102 if (node) { btrfs_get_delayed_node()
104 atomic_inc(&node->refs); /* can be accessed */ btrfs_get_delayed_node()
105 BUG_ON(btrfs_inode->delayed_node != node); btrfs_get_delayed_node()
107 return node; btrfs_get_delayed_node()
109 btrfs_inode->delayed_node = node; btrfs_get_delayed_node()
111 atomic_add(2, &node->refs); btrfs_get_delayed_node()
113 return node; btrfs_get_delayed_node()
120 /* Will return either the node or PTR_ERR(-ENOMEM) */ btrfs_get_or_create_delayed_node()
124 struct btrfs_delayed_node *node; btrfs_get_or_create_delayed_node() local
131 node = btrfs_get_delayed_node(inode); btrfs_get_or_create_delayed_node()
132 if (node) btrfs_get_or_create_delayed_node()
133 return node; btrfs_get_or_create_delayed_node()
135 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); btrfs_get_or_create_delayed_node()
136 if (!node) btrfs_get_or_create_delayed_node()
138 btrfs_init_delayed_node(node, root, ino); btrfs_get_or_create_delayed_node()
141 atomic_add(2, &node->refs); btrfs_get_or_create_delayed_node()
145 kmem_cache_free(delayed_node_cache, node); btrfs_get_or_create_delayed_node()
150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); btrfs_get_or_create_delayed_node()
153 kmem_cache_free(delayed_node_cache, node); btrfs_get_or_create_delayed_node()
157 btrfs_inode->delayed_node = node; btrfs_get_or_create_delayed_node()
161 return node; btrfs_get_or_create_delayed_node()
167 * If mod = 1, add this node into the prepared list.
170 struct btrfs_delayed_node *node, btrfs_queue_delayed_node()
174 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { btrfs_queue_delayed_node()
175 if (!list_empty(&node->p_list)) btrfs_queue_delayed_node()
176 list_move_tail(&node->p_list, &root->prepare_list); btrfs_queue_delayed_node()
178 list_add_tail(&node->p_list, &root->prepare_list); btrfs_queue_delayed_node()
180 list_add_tail(&node->n_list, &root->node_list); btrfs_queue_delayed_node()
181 list_add_tail(&node->p_list, &root->prepare_list); btrfs_queue_delayed_node()
182 atomic_inc(&node->refs); /* inserted into list */ btrfs_queue_delayed_node()
184 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); btrfs_queue_delayed_node()
191 struct btrfs_delayed_node *node) btrfs_dequeue_delayed_node()
194 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { btrfs_dequeue_delayed_node()
196 atomic_dec(&node->refs); /* not in the list */ btrfs_dequeue_delayed_node()
197 list_del_init(&node->n_list); btrfs_dequeue_delayed_node()
198 if (!list_empty(&node->p_list)) btrfs_dequeue_delayed_node()
199 list_del_init(&node->p_list); btrfs_dequeue_delayed_node()
200 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); btrfs_dequeue_delayed_node()
209 struct btrfs_delayed_node *node = NULL; btrfs_first_delayed_node() local
216 node = list_entry(p, struct btrfs_delayed_node, n_list); btrfs_first_delayed_node()
217 atomic_inc(&node->refs); btrfs_first_delayed_node()
221 return node; btrfs_first_delayed_node()
225 struct btrfs_delayed_node *node) btrfs_next_delayed_node()
231 delayed_root = node->root->fs_info->delayed_root; btrfs_next_delayed_node()
233 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { btrfs_next_delayed_node()
238 } else if (list_is_last(&node->n_list, &delayed_root->node_list)) btrfs_next_delayed_node()
241 p = node->n_list.next; btrfs_next_delayed_node()
284 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) btrfs_release_delayed_node() argument
286 __btrfs_release_delayed_node(node, 0); btrfs_release_delayed_node()
293 struct btrfs_delayed_node *node = NULL; btrfs_first_prepared_delayed_node() local
301 node = list_entry(p, struct btrfs_delayed_node, p_list); btrfs_first_prepared_delayed_node()
302 atomic_inc(&node->refs); btrfs_first_prepared_delayed_node()
306 return node; btrfs_first_prepared_delayed_node()
310 struct btrfs_delayed_node *node) btrfs_release_prepared_delayed_node()
312 __btrfs_release_delayed_node(node, 1); btrfs_release_prepared_delayed_node()
331 * @delayed_node: pointer to the delayed node
345 struct rb_node *node, *prev_node = NULL; __btrfs_lookup_delayed_item() local
349 node = root->rb_node; __btrfs_lookup_delayed_item()
351 while (node) { __btrfs_lookup_delayed_item()
352 delayed_item = rb_entry(node, struct btrfs_delayed_item, __btrfs_lookup_delayed_item()
354 prev_node = node; __btrfs_lookup_delayed_item()
357 node = node->rb_right; __btrfs_lookup_delayed_item()
359 node = node->rb_left; __btrfs_lookup_delayed_item()
369 else if ((node = rb_prev(prev_node)) != NULL) { __btrfs_lookup_delayed_item()
370 *prev = rb_entry(node, struct btrfs_delayed_item, __btrfs_lookup_delayed_item()
381 else if ((node = rb_next(prev_node)) != NULL) { __btrfs_lookup_delayed_item()
382 *next = rb_entry(node, struct btrfs_delayed_item, __btrfs_lookup_delayed_item()
405 struct rb_node **p, *node; __btrfs_add_delayed_item() local
418 node = &ins->rb_node; __btrfs_add_delayed_item()
434 rb_link_node(node, parent_node, p); __btrfs_add_delayed_item()
435 rb_insert_color(node, root); __btrfs_add_delayed_item()
449 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, __btrfs_add_delayed_insertion_item() argument
452 return __btrfs_add_delayed_item(node, item, __btrfs_add_delayed_insertion_item()
456 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, __btrfs_add_delayed_deletion_item() argument
459 return __btrfs_add_delayed_item(node, item, __btrfs_add_delayed_deletion_item()
593 struct btrfs_delayed_node *node) btrfs_delayed_inode_reserve_metadata()
628 node->bytes_reserved = num_bytes; btrfs_delayed_inode_reserve_metadata()
691 node->bytes_reserved = num_bytes; btrfs_delayed_inode_reserve_metadata()
704 struct btrfs_delayed_node *node) btrfs_delayed_inode_release_metadata()
708 if (!node->bytes_reserved) btrfs_delayed_inode_release_metadata()
713 node->inode_id, node->bytes_reserved, 0); btrfs_delayed_inode_release_metadata()
715 node->bytes_reserved); btrfs_delayed_inode_release_metadata()
716 node->bytes_reserved = 0; btrfs_delayed_inode_release_metadata()
866 struct btrfs_delayed_node *node) btrfs_insert_delayed_items()
872 mutex_lock(&node->mutex); btrfs_insert_delayed_items()
873 curr = __btrfs_first_delayed_insertion_item(node); btrfs_insert_delayed_items()
894 mutex_unlock(&node->mutex); btrfs_insert_delayed_items()
898 mutex_unlock(&node->mutex); btrfs_insert_delayed_items()
968 struct btrfs_delayed_node *node) btrfs_delete_delayed_items()
974 mutex_lock(&node->mutex); btrfs_delete_delayed_items()
975 curr = __btrfs_first_delayed_deletion_item(node); btrfs_delete_delayed_items()
984 * can't find the item which the node points to, so this node btrfs_delete_delayed_items()
993 mutex_unlock(&node->mutex); btrfs_delete_delayed_items()
1001 mutex_unlock(&node->mutex); btrfs_delete_delayed_items()
1006 mutex_unlock(&node->mutex); btrfs_delete_delayed_items()
1040 struct btrfs_delayed_node *node) __btrfs_update_delayed_inode()
1048 key.objectid = node->inode_id; __btrfs_update_delayed_inode()
1052 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) __btrfs_update_delayed_inode()
1068 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, __btrfs_update_delayed_inode()
1072 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) __btrfs_update_delayed_inode()
1080 if (key.objectid != node->inode_id) __btrfs_update_delayed_inode()
1094 btrfs_release_delayed_iref(node); __btrfs_update_delayed_inode()
1098 btrfs_delayed_inode_release_metadata(root, node); __btrfs_update_delayed_inode()
1099 btrfs_release_delayed_inode(node); __btrfs_update_delayed_inode()
1122 struct btrfs_delayed_node *node) btrfs_update_delayed_inode()
1126 mutex_lock(&node->mutex); btrfs_update_delayed_inode()
1127 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) { btrfs_update_delayed_inode()
1128 mutex_unlock(&node->mutex); btrfs_update_delayed_inode()
1132 ret = __btrfs_update_delayed_inode(trans, root, path, node); btrfs_update_delayed_inode()
1133 mutex_unlock(&node->mutex); btrfs_update_delayed_inode()
1140 struct btrfs_delayed_node *node) __btrfs_commit_inode_delayed_items()
1144 ret = btrfs_insert_delayed_items(trans, path, node->root, node); __btrfs_commit_inode_delayed_items()
1148 ret = btrfs_delete_delayed_items(trans, path, node->root, node); __btrfs_commit_inode_delayed_items()
1152 ret = btrfs_update_delayed_inode(trans, node->root, path, node); __btrfs_commit_inode_delayed_items()
1504 "into the insertion tree of the delayed node" btrfs_insert_delayed_dir_index()
1518 struct btrfs_delayed_node *node, btrfs_delete_delayed_insertion_item()
1523 mutex_lock(&node->mutex); btrfs_delete_delayed_insertion_item()
1524 item = __btrfs_lookup_delayed_insertion_item(node, key); btrfs_delete_delayed_insertion_item()
1526 mutex_unlock(&node->mutex); btrfs_delete_delayed_insertion_item()
1532 mutex_unlock(&node->mutex); btrfs_delete_delayed_insertion_item()
1540 struct btrfs_delayed_node *node; btrfs_delete_delayed_dir_index() local
1545 node = btrfs_get_or_create_delayed_node(dir); btrfs_delete_delayed_dir_index()
1546 if (IS_ERR(node)) btrfs_delete_delayed_dir_index()
1547 return PTR_ERR(node); btrfs_delete_delayed_dir_index()
1553 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); btrfs_delete_delayed_dir_index()
1572 mutex_lock(&node->mutex); btrfs_delete_delayed_dir_index()
1573 ret = __btrfs_add_delayed_deletion_item(node, item); btrfs_delete_delayed_dir_index()
1576 "into the deletion tree of the delayed node" btrfs_delete_delayed_dir_index()
1578 index, node->root->objectid, node->inode_id, btrfs_delete_delayed_dir_index()
1582 mutex_unlock(&node->mutex); btrfs_delete_delayed_dir_index()
1584 btrfs_release_delayed_node(node); btrfs_delete_delayed_dir_index()
1597 * a new directory index is added into the delayed node and index_cnt btrfs_inode_delayed_dir_index_count()
1598 * is updated now. So we needn't lock the delayed node. btrfs_inode_delayed_dir_index_count()
1636 * This delayed node is still cached in the btrfs inode, so refs btrfs_get_delayed_items()
1642 * requeue or dequeue this delayed node. btrfs_get_delayed_items()
169 btrfs_queue_delayed_node(struct btrfs_delayed_root *root, struct btrfs_delayed_node *node, int mod) btrfs_queue_delayed_node() argument
190 btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, struct btrfs_delayed_node *node) btrfs_dequeue_delayed_node() argument
224 btrfs_next_delayed_node( struct btrfs_delayed_node *node) btrfs_next_delayed_node() argument
309 btrfs_release_prepared_delayed_node( struct btrfs_delayed_node *node) btrfs_release_prepared_delayed_node() argument
589 btrfs_delayed_inode_reserve_metadata( struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_delayed_node *node) btrfs_delayed_inode_reserve_metadata() argument
703 btrfs_delayed_inode_release_metadata(struct btrfs_root *root, struct btrfs_delayed_node *node) btrfs_delayed_inode_release_metadata() argument
863 btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_root *root, struct btrfs_delayed_node *node) btrfs_insert_delayed_items() argument
965 btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_root *root, struct btrfs_delayed_node *node) btrfs_delete_delayed_items() argument
1037 __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_node *node) __btrfs_update_delayed_inode() argument
1119 btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_node *node) btrfs_update_delayed_inode() argument
1138 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_delayed_node *node) __btrfs_commit_inode_delayed_items() argument
1517 btrfs_delete_delayed_insertion_item(struct btrfs_root *root, struct btrfs_delayed_node *node, struct btrfs_key *key) btrfs_delete_delayed_insertion_item() argument
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_mn.c49 struct hlist_node node; member in struct:amdgpu_mn
72 struct amdgpu_mn_node *node, *next_node; amdgpu_mn_destroy() local
77 hash_del(&rmn->node); amdgpu_mn_destroy()
78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, amdgpu_mn_destroy()
81 interval_tree_remove(&node->it, &rmn->objects); amdgpu_mn_destroy()
82 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { amdgpu_mn_destroy()
86 kfree(node); amdgpu_mn_destroy()
136 struct amdgpu_mn_node *node; amdgpu_mn_invalidate_range_start() local
140 node = container_of(it, struct amdgpu_mn_node, it); amdgpu_mn_invalidate_range_start()
143 list_for_each_entry(bo, &node->bos, mn_list) { amdgpu_mn_invalidate_range_start()
193 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) amdgpu_mn_get()
213 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); amdgpu_mn_get()
243 struct amdgpu_mn_node *node = NULL; amdgpu_mn_register() local
256 kfree(node); amdgpu_mn_register()
257 node = container_of(it, struct amdgpu_mn_node, it); amdgpu_mn_register()
258 interval_tree_remove(&node->it, &rmn->objects); amdgpu_mn_register()
261 list_splice(&node->bos, &bos); amdgpu_mn_register()
264 if (!node) { amdgpu_mn_register()
265 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); amdgpu_mn_register()
266 if (!node) { amdgpu_mn_register()
274 node->it.start = addr; amdgpu_mn_register()
275 node->it.last = end; amdgpu_mn_register()
276 INIT_LIST_HEAD(&node->bos); amdgpu_mn_register()
277 list_splice(&bos, &node->bos); amdgpu_mn_register()
278 list_add(&bo->mn_list, &node->bos); amdgpu_mn_register()
280 interval_tree_insert(&node->it, &rmn->objects); amdgpu_mn_register()
315 struct amdgpu_mn_node *node; amdgpu_mn_unregister() local
316 node = container_of(head, struct amdgpu_mn_node, bos); amdgpu_mn_unregister()
317 interval_tree_remove(&node->it, &rmn->objects); amdgpu_mn_unregister()
318 kfree(node); amdgpu_mn_unregister()
/linux-4.4.14/tools/include/linux/
H A Drbtree_augmented.h42 void (*propagate)(struct rb_node *node, struct rb_node *stop);
47 extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
53 * leading to the inserted node, then call rb_link_node() as usual and
60 rb_insert_augmented(struct rb_node *node, struct rb_root *root, rb_insert_augmented() argument
63 __rb_insert_augmented(node, root, augment->rotate); rb_insert_augmented()
72 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
73 rbtype augmented = rbcompute(node); \
74 if (node->rbaugmented == augmented) \
76 node->rbaugmented = augmented; \
77 rb = rb_parent(&node->rbfield); \
140 __rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_erase_augmented() argument
143 struct rb_node *child = node->rb_right, *tmp = node->rb_left; __rb_erase_augmented()
149 * Case 1: node to erase has no more than 1 child (easy!) __rb_erase_augmented()
152 * and node must be black due to 4). We adjust colors locally __rb_erase_augmented()
155 pc = node->__rb_parent_color; __rb_erase_augmented()
157 __rb_change_child(node, child, parent, root); __rb_erase_augmented()
165 /* Still case 1, but this time the child is node->rb_left */ __rb_erase_augmented()
166 tmp->__rb_parent_color = pc = node->__rb_parent_color; __rb_erase_augmented()
168 __rb_change_child(node, tmp, parent, root); __rb_erase_augmented()
176 * Case 2: node's successor is its right child __rb_erase_augmented()
186 augment->copy(node, successor); __rb_erase_augmented()
189 * Case 3: node's successor is leftmost under __rb_erase_augmented()
190 * node's right child subtree __rb_erase_augmented()
210 augment->copy(node, successor); __rb_erase_augmented()
214 successor->rb_left = tmp = node->rb_left; __rb_erase_augmented()
217 pc = node->__rb_parent_color; __rb_erase_augmented()
219 __rb_change_child(node, successor, tmp, root); __rb_erase_augmented()
237 rb_erase_augmented(struct rb_node *node, struct rb_root *root, rb_erase_augmented() argument
240 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); rb_erase_augmented()
H A Drbtree.h55 #define RB_EMPTY_NODE(node) \
56 ((node)->__rb_parent_color == (unsigned long)(node))
57 #define RB_CLEAR_NODE(node) \
58 ((node)->__rb_parent_color = (unsigned long)(node))
75 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
79 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, rb_link_node() argument
82 node->__rb_parent_color = (unsigned long)parent; rb_link_node()
83 node->rb_left = node->rb_right = NULL; rb_link_node()
85 *rb_link = node; rb_link_node()
/linux-4.4.14/arch/s390/numa/
H A Dtoptree.c19 * toptree_alloc - Allocate and initialize a new tree node.
20 * @level: The node's vertical level; level 0 contains the leaves.
21 * @id: ID number, explicitly not unique beyond scope of node's siblings
23 * Allocate a new tree node and initialize it.
26 * Pointer to the new tree node or NULL on error
44 * toptree_remove - Remove a tree node from a tree
45 * @cand: Pointer to the node to remove
47 * The node is detached from its parent node. The parent node's
61 * toptree_free - discard a tree node
62 * @cand: Pointer to the tree node to discard
64 * Checks if @cand is attached to a parent node. Detaches it
80 * toptree_update_mask - Update node bitmasks
81 * @cand: Pointer to a tree node
83 * The node's cpumask will be updated by combining all children's
103 * toptree_insert - Insert a tree node into tree
104 * @cand: Pointer to the node to insert
105 * @target: Pointer to the node to which @cand will added as a child
107 * Insert a tree node into a tree. Masks will be updated automatically.
110 * 0 on success, -1 if NULL is passed as argument or the node levels
126 * toptree_move_children - Move all child nodes of a node to a new place
127 * @cand: Pointer to the node whose children are to be moved
128 * @target: Pointer to the node to which @cand's children will be attached
142 * @cand: Pointer to node whose direct children should be made unique
144 * When mangling the tree it is possible that a node has two or more children
146 * moves all children of the merged nodes into the unified node.
174 * toptree_move - Move a node to another context
175 * @cand: Pointer to the node to move
176 * @target: Pointer to the node where @cand should go
219 * toptree_get_child - Access a tree node's child by its ID
220 * @cand: Pointer to tree node whose child is to access
241 * @context: Pointer to tree node whose descendants are to be used
267 * @cur: Pointer to a tree node
287 * @context: Pointer to the root node of the tree or subtree to
292 * Pointer to the next node on level @level
293 * or NULL when there is no next node.
328 * @context: Pointer to node whose descendants are to be considered
H A Dmode_emu.c12 * Because the current Linux scheduler code requires a stable cpu to node
24 #include <linux/node.h>
61 s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */
63 int per_node_target; /* Cores per node without extra cores */
64 int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */
68 * Pin a core to a node
82 * Number of pinned cores of a node
84 static int cores_pinned(struct toptree *node) cores_pinned() argument
86 return emu_cores->per_node[node->id]; cores_pinned()
90 * ID of the node where the core is pinned (or NODE_ID_FREE)
113 * Return node of core
150 * Distance of a node to a core
152 static int dist_node_to_core(struct toptree *node, struct toptree *core) dist_node_to_core() argument
157 toptree_for_each(core_node, node, CORE) dist_node_to_core()
175 * Find the best/nearest node for a given core and ensure that no node
181 struct toptree *node, *node_best = NULL; node_for_core() local
187 toptree_for_each(node, numa, NODE) { toptree_for_each()
189 if (core_pinned_to_node_id(core) == node->id) { toptree_for_each()
190 node_best = node; toptree_for_each()
194 if (cores_pinned(node) >= cores_target) toptree_for_each()
196 dist_cur = dist_node_to_core(node, core); toptree_for_each()
199 node_best = node; toptree_for_each()
206 * Find the best node for each core with respect to "extra" core count
211 struct toptree *node, *core, *tmp; toptree_to_numa_single() local
214 node = node_for_core(numa, core, extra); toptree_for_each_safe()
215 if (!node) toptree_for_each_safe()
217 toptree_move(core, node); toptree_for_each_safe()
218 pin_core_to_node(core->id, node->id); toptree_for_each_safe()
223 * Move structures of given level to specified NUMA node
225 static void move_level_to_numa_node(struct toptree *node, struct toptree *phys, move_level_to_numa_node() argument
232 cores_free = cores_target - toptree_count(node, CORE); toptree_for_each_safe()
235 toptree_move(cur, node); toptree_for_each_safe()
238 toptree_move(cur, node); toptree_for_each_safe()
251 struct toptree *node; move_level_to_numa() local
253 toptree_for_each(node, numa, NODE) move_level_to_numa()
254 move_level_to_numa_node(node, phys, level, perfect); move_level_to_numa()
295 * Allocate and initialize core to node mapping
303 panic("Could not allocate cores to node memory"); create_core_to_node_map()
338 struct toptree *phys, *node, *book, *mc, *core; toptree_from_topology() local
346 node = toptree_get_child(phys, 0); for_each_online_cpu()
347 book = toptree_get_child(node, top->book_id); for_each_online_cpu()
384 /* Clear all node masks */ toptree_to_topology()
394 * Show the node to core mapping
402 printk(KERN_DEBUG "NUMA node to core mapping\n"); print_node_to_core_map()
404 printk(KERN_DEBUG " node %3d: ", nid); print_node_to_core_map()
451 * If we have not enough memory for the specified nodes, reduce the node count.
461 pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes); emu_setup_nodes_adjust()
477 * Return node id for given page number
H A Dnuma_mode.h16 int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */
17 unsigned long (*align)(void); /* Minimum node alignment */
/linux-4.4.14/fs/befs/
H A Dbtree.c58 * it states that the overflow field of node headers is used by internal nodes
59 * to point to another node that "effectively continues this one". Here is what
60 * I believe that means. Each key in internal nodes points to another node that
62 * in the internal node is not the last key in the index. Keys that are
63 * greater than the last key in the internal node go into the overflow node.
66 * Second, it states that the header of a btree node is sufficient to
79 * In memory structure of each btree node
82 befs_host_btree_nodehead head; /* head of node converted to cpu byteorder */
84 befs_btree_nodehead *od_node; /* on disk node */
100 struct befs_btree_node *node,
103 static int befs_leafnode(struct befs_btree_node *node);
105 static fs16 *befs_bt_keylen_index(struct befs_btree_node *node);
107 static fs64 *befs_bt_valarray(struct befs_btree_node *node);
109 static char *befs_bt_keydata(struct befs_btree_node *node);
112 struct befs_btree_node *node,
116 struct befs_btree_node *node,
177 * befs_bt_read_node - read in btree node and convert to cpu byteorder
180 * @node: Buffer in which to place the btree node
181 * @node_off: Starting offset (in bytes) of the node in @ds
183 * Calls befs_read_datastream to read in the indicated btree node and
186 * Note: node->bh must be NULL when this function called first
187 * time. Don't forget brelse(node->bh) after last call.
189 * On success, returns BEFS_OK and *@node contains the btree node that
190 * starts at @node_off, with the node->head fields in cpu byte order.
197 struct befs_btree_node *node, befs_off_t node_off) befs_bt_read_node()
203 if (node->bh) befs_bt_read_node()
204 brelse(node->bh); befs_bt_read_node()
206 node->bh = befs_read_datastream(sb, ds, node_off, &off); befs_bt_read_node()
207 if (!node->bh) { befs_bt_read_node()
209 "node at %llu", __func__, node_off); befs_bt_read_node()
214 node->od_node = befs_bt_read_node()
215 (befs_btree_nodehead *) ((void *) node->bh->b_data + off); befs_bt_read_node()
217 befs_dump_index_node(sb, node->od_node); befs_bt_read_node()
219 node->head.left = fs64_to_cpu(sb, node->od_node->left); befs_bt_read_node()
220 node->head.right = fs64_to_cpu(sb, node->od_node->right); befs_bt_read_node()
221 node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow); befs_bt_read_node()
222 node->head.all_key_count = befs_bt_read_node()
223 fs16_to_cpu(sb, node->od_node->all_key_count); befs_bt_read_node()
224 node->head.all_key_length = befs_bt_read_node()
225 fs16_to_cpu(sb, node->od_node->all_key_length); befs_bt_read_node()
246 * Once at the correct leaf node, use befs_find_key() again to get the
276 /* read in root node */ befs_btree_find()
280 "node at %llu", node_off); befs_btree_find()
288 /* if no match, go to overflow node */ befs_btree_find()
291 "node at %llu", node_off); befs_btree_find()
296 /* at the correct leaf node now */ befs_btree_find()
321 * befs_find_key - Search for a key within a node
323 * @node: Node to find the key within
328 * If no exact match, finds first key in node that is greater
339 befs_find_key(struct super_block *sb, struct befs_btree_node *node, befs_find_key() argument
355 /* if node can not contain key, just skeep this node */ befs_find_key()
356 last = node->head.all_key_count - 1; befs_find_key()
357 thiskey = befs_bt_get_key(sb, node, last, &keylen); befs_find_key()
365 valarray = befs_bt_valarray(node); befs_find_key()
374 thiskey = befs_bt_get_key(sb, node, mid, &keylen); befs_find_key()
415 * node. If not, follow the node->right link to the next leafnode. Repeat
465 /* find the leaf node containing the key_no key */ befs_btree_read()
486 befs_error(sb, "%s failed to read node at %llu", befs_btree_read()
495 /* get pointers to datastructures within the node body */ befs_btree_read()
541 * @node_off: Pointer to offset of current node within datastream. Modified
546 * start of the first leaf node.
561 "node at %llu", __func__, *node_off); befs_btree_seekleaf()
564 befs_debug(sb, "Seekleaf to root node %llu", *node_off); befs_btree_seekleaf()
575 "an empty interior node: %llu. Using Overflow " befs_btree_seekleaf()
576 "node: %llu", __func__, *node_off, befs_btree_seekleaf()
585 "node at %llu", __func__, *node_off); befs_btree_seekleaf()
589 befs_debug(sb, "Seekleaf to child node %llu", *node_off); befs_btree_seekleaf()
591 befs_debug(sb, "Node %llu is a leaf node", *node_off); befs_btree_seekleaf()
601 * befs_leafnode - Determine if the btree node is a leaf node or an
602 * interior node
603 * @node: Pointer to node structure to test
608 befs_leafnode(struct befs_btree_node *node) befs_leafnode() argument
610 /* all interior nodes (and only interior nodes) have an overflow node */ befs_leafnode()
611 if (node->head.overflow == befs_bt_inval) befs_leafnode()
618 * befs_bt_keylen_index - Finds start of keylen index in a node
619 * @node: Pointer to the node structure to find the keylen index within
622 * of the B+tree node *@node
624 * "The length of all the keys in the node is added to the size of the
631 befs_bt_keylen_index(struct befs_btree_node *node) befs_bt_keylen_index() argument
635 (sizeof (befs_btree_nodehead) + node->head.all_key_length); befs_bt_keylen_index()
641 return (fs16 *) ((void *) node->od_node + off); befs_bt_keylen_index()
645 * befs_bt_valarray - Finds the start of value array in a node
646 * @node: Pointer to the node structure to find the value array within
649 * of the node pointed to by the node header
652 befs_bt_valarray(struct befs_btree_node *node) befs_bt_valarray() argument
654 void *keylen_index_start = (void *) befs_bt_keylen_index(node); befs_bt_valarray()
655 size_t keylen_index_size = node->head.all_key_count * sizeof (fs16); befs_bt_valarray()
661 * befs_bt_keydata - Finds start of keydata array in a node
662 * @node: Pointer to the node structure to find the keydata array within
665 * of the node pointed to by the node header
668 befs_bt_keydata(struct befs_btree_node *node) befs_bt_keydata() argument
670 return (char *) ((void *) node->od_node + sizeof (befs_btree_nodehead)); befs_bt_keydata()
676 * @node: node in which to look for the key
680 * Returns a valid pointer into @node on success.
684 befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node, befs_bt_get_key() argument
691 if (index < 0 || index > node->head.all_key_count) { befs_bt_get_key()
696 keystart = befs_bt_keydata(node); befs_bt_get_key()
697 keylen_index = befs_bt_keylen_index(node); befs_bt_get_key()
196 befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, struct befs_btree_node *node, befs_off_t node_off) befs_bt_read_node() argument
/linux-4.4.14/drivers/base/
H A Dnode.c11 #include <linux/node.h>
23 .name = "node",
24 .dev_name = "node",
30 struct node *node_dev = to_node(dev); node_read_cpumap()
190 * buf is currently PAGE_SIZE in length and each node needs 4 chars node_read_distance()
216 * hugetlbfs per node attributes registration interface:
218 * it will register its per node attributes for all online nodes with
220 * register its attribute registration functions with this node driver.
221 * Once these hooks have been initialized, the node driver will call into
227 static inline bool hugetlb_register_node(struct node *node) hugetlb_register_node() argument
230 node_state(node->dev.id, N_MEMORY)) { hugetlb_register_node()
231 __hugetlb_register_node(node); hugetlb_register_node()
237 static inline void hugetlb_unregister_node(struct node *node) hugetlb_unregister_node() argument
240 __hugetlb_unregister_node(node); hugetlb_unregister_node()
250 static inline void hugetlb_register_node(struct node *node) {} hugetlb_register_node() argument
252 static inline void hugetlb_unregister_node(struct node *node) {} hugetlb_unregister_node() argument
257 struct node *node = to_node(dev); node_device_release() local
262 * onlined/offlined on this node. When we come here, node_device_release()
263 * all the memory on this node has been offlined, node_device_release()
266 * The work is using node->node_work, so we should node_device_release()
269 flush_work(&node->node_work); node_device_release()
271 kfree(node); node_device_release()
275 * register_node - Setup a sysfs device for a node.
278 * Initialize and register the node device.
280 static int register_node(struct node *node, int num, struct node *parent) register_node() argument
284 node->dev.id = num; register_node()
285 node->dev.bus = &node_subsys; register_node()
286 node->dev.release = node_device_release; register_node()
287 node->dev.groups = node_dev_groups; register_node()
288 error = device_register(&node->dev); register_node()
291 hugetlb_register_node(node); register_node()
293 compaction_register_node(node); register_node()
299 * unregister_node - unregister a node device
300 * @node: node going away
302 * Unregisters a node device @node. All the devices on the node must be
305 void unregister_node(struct node *node) unregister_node() argument
307 hugetlb_unregister_node(node); /* no-op, if memoryless node */ unregister_node()
309 device_unregister(&node->dev); unregister_node()
312 struct node *node_devices[MAX_NUMNODES];
315 * register cpu under node
378 /* register memory section under specified node if it spans that node */ register_mem_sect_under_node()
420 /* mem section does not span the specified node */ register_mem_sect_under_node()
499 * Handle per node hstate attribute [un]registration on transistions
504 struct node *node = container_of(work, struct node, node_work); node_hugetlb_work() local
507 * We only get here when a node transitions to/from memoryless state. node_hugetlb_work()
509 * node has memory now. hugetlb_register_node() already check this node_hugetlb_work()
511 * node has transitioned to memoryless, try to unregister the node_hugetlb_work()
514 if (!hugetlb_register_node(node)) node_hugetlb_work()
515 hugetlb_unregister_node(node); node_hugetlb_work()
533 * offload per node hstate [un]registration to a work thread node_memory_callback()
575 struct node *parent = NULL; register_one_node()
580 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); register_one_node()
586 /* link cpu under this node */ for_each_present_cpu()
592 /* link memory sections under this node */
613 * node states attributes
696 * Note: we're not going to unregister the node class if we fail register_node_type()
697 * to register the node state class attribute files. register_node_type()
/linux-4.4.14/arch/arm/mach-sunxi/
H A Dplatsmp.c44 struct device_node *node; sun6i_smp_prepare_cpus() local
46 node = of_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-prcm"); sun6i_smp_prepare_cpus()
47 if (!node) { sun6i_smp_prepare_cpus()
48 pr_err("Missing A31 PRCM node in the device tree\n"); sun6i_smp_prepare_cpus()
52 prcm_membase = of_iomap(node, 0); sun6i_smp_prepare_cpus()
58 node = of_find_compatible_node(NULL, NULL, sun6i_smp_prepare_cpus()
60 if (!node) { sun6i_smp_prepare_cpus()
61 pr_err("Missing A31 CPU config node in the device tree\n"); sun6i_smp_prepare_cpus()
65 cpucfg_membase = of_iomap(node, 0); sun6i_smp_prepare_cpus()
127 struct device_node *node; sun8i_smp_prepare_cpus() local
129 node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a23-prcm"); sun8i_smp_prepare_cpus()
130 if (!node) { sun8i_smp_prepare_cpus()
131 pr_err("Missing A23 PRCM node in the device tree\n"); sun8i_smp_prepare_cpus()
135 prcm_membase = of_iomap(node, 0); sun8i_smp_prepare_cpus()
141 node = of_find_compatible_node(NULL, NULL, sun8i_smp_prepare_cpus()
143 if (!node) { sun8i_smp_prepare_cpus()
144 pr_err("Missing A23 CPU config node in the device tree\n"); sun8i_smp_prepare_cpus()
148 cpucfg_membase = of_iomap(node, 0); sun8i_smp_prepare_cpus()
/linux-4.4.14/arch/ia64/include/asm/
H A Dtopology.h30 * Returns a bitmask of CPUs on Node 'node'.
32 #define cpumask_of_node(node) ((node) == -1 ? \
34 &node_to_cpu_mask[node])
37 * Returns the number of the node containing Node 'nid'.
44 * Determines the node for a given pci bus
46 #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
H A Dnodedata.h22 * Node Data. One of these structures is located on each node of a NUMA system.
28 short node; member in struct:ia64_node_data
39 * Given a node id, return a pointer to the pg_data_t for the node.
50 * LOCAL_DATA_ADDR - This is to calculate the address of other node's
53 * just executing cpu. However, when new node is hot-added,
/linux-4.4.14/drivers/memory/
H A Dmvebu-devbus.c99 struct device_node *node, get_timing_param_ps()
106 err = of_property_read_u32(node, name, &time_ps); get_timing_param_ps()
109 name, node->full_name); get_timing_param_ps()
121 struct device_node *node, devbus_get_timing_params()
127 err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width); devbus_get_timing_params()
131 node->full_name); devbus_get_timing_params()
148 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", devbus_get_timing_params()
153 err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps", devbus_get_timing_params()
158 err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps", devbus_get_timing_params()
163 err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps", devbus_get_timing_params()
169 err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps", devbus_get_timing_params()
174 err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", devbus_get_timing_params()
179 err = of_property_read_u32(node, "devbus,sync-enable", devbus_get_timing_params()
184 node->full_name); devbus_get_timing_params()
189 err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps", devbus_get_timing_params()
194 err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps", devbus_get_timing_params()
199 err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps", devbus_get_timing_params()
208 struct device_node *node, devbus_orion_set_timing_params()
241 struct device_node *node, devbus_armada_set_timing_params()
278 struct device_node *node = pdev->dev.of_node; mvebu_devbus_probe() local
313 if (!of_property_read_bool(node, "devbus,keep-config")) { mvebu_devbus_probe()
314 /* Read the Device Tree node */ mvebu_devbus_probe()
315 err = devbus_get_timing_params(devbus, node, &r, &w); mvebu_devbus_probe()
320 if (of_device_is_compatible(node, "marvell,orion-devbus")) mvebu_devbus_probe()
321 devbus_orion_set_timing_params(devbus, node, &r, &w); mvebu_devbus_probe()
323 devbus_armada_set_timing_params(devbus, node, &r, &w); mvebu_devbus_probe()
331 err = of_platform_populate(node, NULL, NULL, dev); mvebu_devbus_probe()
98 get_timing_param_ps(struct devbus *devbus, struct device_node *node, const char *name, u32 *ticks) get_timing_param_ps() argument
120 devbus_get_timing_params(struct devbus *devbus, struct device_node *node, struct devbus_read_params *r, struct devbus_write_params *w) devbus_get_timing_params() argument
207 devbus_orion_set_timing_params(struct devbus *devbus, struct device_node *node, struct devbus_read_params *r, struct devbus_write_params *w) devbus_orion_set_timing_params() argument
240 devbus_armada_set_timing_params(struct devbus *devbus, struct device_node *node, struct devbus_read_params *r, struct devbus_write_params *w) devbus_armada_set_timing_params() argument
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
H A Dsched.c137 * it again on a different node. But it shouldn't hurt anything __spu_update_sched_info()
140 * runqueue. The context will be rescheduled on the proper node __spu_update_sched_info()
151 int node; spu_update_sched_info() local
154 node = ctx->spu->node; spu_update_sched_info()
159 mutex_lock(&cbe_spu_info[node].list_mutex); spu_update_sched_info()
161 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_update_sched_info()
167 static int __node_allowed(struct spu_context *ctx, int node) __node_allowed() argument
169 if (nr_cpus_node(node)) { __node_allowed()
170 const struct cpumask *mask = cpumask_of_node(node); __node_allowed()
179 static int node_allowed(struct spu_context *ctx, int node) node_allowed() argument
184 rval = __node_allowed(ctx, node); node_allowed()
192 int node; do_notify_spus_active() local
200 for_each_online_node(node) { for_each_online_node()
203 mutex_lock(&cbe_spu_info[node].list_mutex); for_each_online_node()
204 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { for_each_online_node()
213 mutex_unlock(&cbe_spu_info[node].list_mutex); for_each_online_node()
229 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); spu_bind_context()
265 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); sched_spu()
308 int node, n; aff_ref_location() local
314 node = cpu_to_node(raw_smp_processor_id()); aff_ref_location()
315 for (n = 0; n < MAX_NUMNODES; n++, node++) { aff_ref_location()
321 * contexts bigger than the amount of spus in the node, aff_ref_location()
327 node = (node < MAX_NUMNODES) ? node : 0; aff_ref_location()
328 if (!node_allowed(ctx, node)) aff_ref_location()
332 mutex_lock(&cbe_spu_info[node].list_mutex); aff_ref_location()
333 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { aff_ref_location()
340 mutex_unlock(&cbe_spu_info[node].list_mutex); aff_ref_location()
344 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { aff_ref_location()
347 mutex_unlock(&cbe_spu_info[node].list_mutex); aff_ref_location()
351 mutex_unlock(&cbe_spu_info[node].list_mutex); aff_ref_location()
380 static struct spu *ctx_location(struct spu *ref, int offset, int node) ctx_location() argument
387 BUG_ON(spu->node != node); ctx_location()
395 BUG_ON(spu->node != node); ctx_location()
445 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); spu_unbind_context()
574 int node, n; spu_get_idle() local
584 node = aff_ref_spu->node; spu_get_idle()
586 mutex_lock(&cbe_spu_info[node].list_mutex); spu_get_idle()
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); spu_get_idle()
590 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_get_idle()
597 node = cpu_to_node(raw_smp_processor_id()); spu_get_idle()
598 for (n = 0; n < MAX_NUMNODES; n++, node++) { spu_get_idle()
599 node = (node < MAX_NUMNODES) ? node : 0; spu_get_idle()
600 if (!node_allowed(ctx, node)) spu_get_idle()
603 mutex_lock(&cbe_spu_info[node].list_mutex); spu_get_idle()
604 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { spu_get_idle()
608 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_get_idle()
617 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_get_idle()
633 int node, n; find_victim() local
638 * Look for a possible preemption candidate on the local node first. find_victim()
641 * a strong node affinity. We might want to fine-tune this in find_victim()
645 node = cpu_to_node(raw_smp_processor_id()); find_victim()
646 for (n = 0; n < MAX_NUMNODES; n++, node++) { find_victim()
647 node = (node < MAX_NUMNODES) ? node : 0; find_victim()
648 if (!node_allowed(ctx, node)) find_victim()
651 mutex_lock(&cbe_spu_info[node].list_mutex); find_victim()
652 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { find_victim()
663 mutex_unlock(&cbe_spu_info[node].list_mutex); find_victim()
697 mutex_lock(&cbe_spu_info[node].list_mutex); find_victim()
698 cbe_spu_info[node].nr_active--; find_victim()
700 mutex_unlock(&cbe_spu_info[node].list_mutex); find_victim()
719 int node = spu->node; __spu_schedule() local
724 mutex_lock(&cbe_spu_info[node].list_mutex); __spu_schedule()
727 cbe_spu_info[node].nr_active++; __spu_schedule()
731 mutex_unlock(&cbe_spu_info[node].list_mutex); __spu_schedule()
765 int node = spu->node; spu_unschedule() local
767 mutex_lock(&cbe_spu_info[node].list_mutex); spu_unschedule()
768 cbe_spu_info[node].nr_active--; spu_unschedule()
774 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_unschedule()
837 static struct spu_context *grab_runnable_context(int prio, int node) grab_runnable_context() argument
849 if (__node_allowed(ctx, node)) { list_for_each_entry()
868 new = grab_runnable_context(max_prio, spu->node); __spu_deactivate()
941 new = grab_runnable_context(ctx->prio + 1, spu->node); spusched_tick()
969 int nr_active = 0, node; count_active_contexts() local
971 for (node = 0; node < MAX_NUMNODES; node++) count_active_contexts()
972 nr_active += cbe_spu_info[node].nr_active; count_active_contexts()
1009 int node; spusched_thread() local
1014 for (node = 0; node < MAX_NUMNODES; node++) { spusched_thread()
1015 struct mutex *mtx = &cbe_spu_info[node].list_mutex; spusched_thread()
1018 list_for_each_entry(spu, &cbe_spu_info[node].spus, spusched_thread()
1044 int node; spuctx_switch_state() local
1065 node = spu->node; spuctx_switch_state()
1067 atomic_dec(&cbe_spu_info[node].busy_spus); spuctx_switch_state()
1069 atomic_inc(&cbe_spu_info[node].busy_spus); spuctx_switch_state()
1156 int node; spu_sched_exit() local
1164 for (node = 0; node < MAX_NUMNODES; node++) { spu_sched_exit()
1165 mutex_lock(&cbe_spu_info[node].list_mutex); spu_sched_exit()
1166 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) spu_sched_exit()
1169 mutex_unlock(&cbe_spu_info[node].list_mutex); spu_sched_exit()
/linux-4.4.14/drivers/base/regmap/
H A Dregcache-rbtree.c34 /* the actual rbtree node holding this block */
35 struct rb_node node; member in struct:regcache_rbtree_node
70 struct rb_node *node; regcache_rbtree_lookup() local
82 node = rbtree_ctx->root.rb_node; regcache_rbtree_lookup()
83 while (node) { regcache_rbtree_lookup()
84 rbnode = container_of(node, struct regcache_rbtree_node, node); regcache_rbtree_lookup()
91 node = node->rb_right; regcache_rbtree_lookup()
93 node = node->rb_left; regcache_rbtree_lookup()
112 node); regcache_rbtree_insert()
129 /* insert the node into the rbtree */ regcache_rbtree_insert()
130 rb_link_node(&rbnode->node, parent, new); regcache_rbtree_insert()
131 rb_insert_color(&rbnode->node, root); regcache_rbtree_insert()
142 struct rb_node *node; rbtree_show() local
153 for (node = rb_first(&rbtree_ctx->root); node != NULL; rbtree_show()
154 node = rb_next(node)) { rbtree_show()
155 n = container_of(node, struct regcache_rbtree_node, node); rbtree_show()
242 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); regcache_rbtree_exit()
243 next = rb_next(&rbtree_node->node); regcache_rbtree_exit()
244 rb_erase(&rbtree_node->node, &rbtree_ctx->root); regcache_rbtree_exit()
388 struct rb_node *node; regcache_rbtree_write() local
416 for (node = rb_first(&rbtree_ctx->root); node; regcache_rbtree_write()
417 node = rb_next(node)) { regcache_rbtree_write()
418 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, regcache_rbtree_write()
419 node); regcache_rbtree_write()
460 struct rb_node *node; regcache_rbtree_sync() local
467 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { regcache_rbtree_sync()
468 rbnode = rb_entry(node, struct regcache_rbtree_node, node); regcache_rbtree_sync()
502 struct rb_node *node; regcache_rbtree_drop() local
507 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { regcache_rbtree_drop()
508 rbnode = rb_entry(node, struct regcache_rbtree_node, node); regcache_rbtree_drop()
/linux-4.4.14/fs/f2fs/
H A Dnode.h2 * fs/f2fs/node.h
11 /* start node id of a node block dedicated to the given node id */
14 /* node block offset on the NAT area dedicated to the given start node id */
22 /* maximum readahead size for node during getting data blocks */
39 HAS_LAST_FSYNC, /* has the latest node fsync mark? */
44 * For node information
47 nid_t nid; /* node id */
48 nid_t ino; /* inode number of the node's owner */
49 block_t blk_addr; /* block address of the node */
50 unsigned char version; /* version of the node */
51 unsigned char flag; /* for node information bits */
56 struct node_info ni; /* in-memory node information */
145 struct list_head list; /* for free node id list */
146 nid_t nid; /* node id */
282 * f2fs assigns the following node offsets described as (num).
286 * |- direct node (1)
287 * |- direct node (2)
288 * |- indirect node (3)
289 * | `- direct node (4 => 4 + N - 1)
290 * |- indirect node (4 + N)
291 * | `- direct node (5 + N => 5 + 2N - 1)
292 * `- double indirect node (5 + 2N)
293 * `- indirect node (6 + 2N)
294 * `- direct node
296 * `- indirect node ((6 + 2N) + x(N + 1))
297 * `- direct node
299 * `- indirect node ((6 + 2N) + (N - 1)(N + 1))
300 * `- direct node
345 * - Mark cold node blocks in their node footer
H A DMakefile4 f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
/linux-4.4.14/security/selinux/include/
H A Dinitial_sid_to_string.h16 "node",
/linux-4.4.14/arch/powerpc/include/asm/
H A Di8259.h7 extern void i8259_init(struct device_node *node, unsigned long intack_addr);
/linux-4.4.14/arch/powerpc/platforms/pseries/
H A Dof_helpers.c10 * @path: the full_name of a node to be added to the tree
12 * Returns the node which should be the parent of the node
14 * the node with full_name = "/foo".
/linux-4.4.14/arch/arm/mach-sa1100/include/mach/
H A Dmemory.h21 * node 0: 0xc0000000 - 0xc7ffffff
22 * node 1: 0xc8000000 - 0xcfffffff
23 * node 2: 0xd0000000 - 0xd7ffffff
24 * node 3: 0xd8000000 - 0xdfffffff
/linux-4.4.14/drivers/irqchip/
H A Dirq-digicolor.c71 static int __init digicolor_of_init(struct device_node *node, digicolor_of_init() argument
79 reg_base = of_iomap(node, 0); digicolor_of_init()
81 pr_err("%s: unable to map IC registers\n", node->full_name); digicolor_of_init()
89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); digicolor_of_init()
91 pr_err("%s: unable to map UC registers\n", node->full_name); digicolor_of_init()
98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); digicolor_of_init()
100 pr_err("%s: unable to create IRQ domain\n", node->full_name); digicolor_of_init()
108 pr_err("%s: unable to allocate IRQ gc\n", node->full_name); digicolor_of_init()
H A Dirq-moxart.c60 static int __init moxart_of_intc_init(struct device_node *node, moxart_of_intc_init() argument
67 intc.base = of_iomap(node, 0); moxart_of_intc_init()
70 node->full_name); moxart_of_intc_init()
74 intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, moxart_of_intc_init()
77 pr_err("%s: unable to create IRQ domain\n", node->full_name); moxart_of_intc_init()
86 node->full_name); moxart_of_intc_init()
91 ret = of_property_read_u32(node, "interrupt-mask", moxart_of_intc_init()
95 node->full_name); moxart_of_intc_init()
/linux-4.4.14/arch/x86/pci/
H A Dbus_numa.h17 int node; member in struct:pci_root_info
23 int node, int link);
/linux-4.4.14/include/linux/clk/
H A Dclk-conf.h15 int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
17 static inline int of_clk_set_defaults(struct device_node *node, of_clk_set_defaults() argument
/linux-4.4.14/arch/metag/mm/
H A Dnuma.c2 * Multiple memory node support for Meta machines
26 * in node 0, and other memory blocks in to node 1 and up, ordered by
27 * latency. Each node's pgdat is node-local at the beginning of the node,
28 * immediately followed by the node mem map.
36 /* Don't allow bogus node assignment */ setup_bootmem_node()
/linux-4.4.14/arch/mips/ralink/
H A Dof.c31 __iomem void *plat_of_remap_node(const char *node) plat_of_remap_node() argument
36 np = of_find_compatible_node(NULL, NULL, node); plat_of_remap_node()
38 panic("Failed to find %s node", node); plat_of_remap_node()
41 panic("Failed to get resource for %s", node); plat_of_remap_node()
46 panic("Failed to request resources for %s", node); plat_of_remap_node()
58 static int __init early_init_dt_find_memory(unsigned long node, early_init_dt_find_memory() argument
72 * Load the builtin devicetree. This causes the chosen node to be plat_mem_setup()
/linux-4.4.14/drivers/clk/rockchip/
H A Dclk-rockchip.c27 static void __init rk2928_gate_clk_init(struct device_node *node) rk2928_gate_clk_init() argument
40 qty = of_property_count_strings(node, "clock-output-names"); rk2928_gate_clk_init()
51 reg = of_iomap(node, 0); rk2928_gate_clk_init()
66 of_property_read_string_index(node, "clock-output-names", rk2928_gate_clk_init()
73 clk_parent = of_clk_get_parent_name(node, i); rk2928_gate_clk_init()
91 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); rk2928_gate_clk_init()
/linux-4.4.14/kernel/trace/
H A Dtrace_stat.c26 struct rb_node node; member in struct:stat_node
50 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { __reset_stat_session()
87 * Figure out where to put new node insert_stat()
94 this = container_of(*new, struct stat_node, node); insert_stat()
104 rb_link_node(&data->node, parent, new); insert_stat()
105 rb_insert_color(&data->node, root); insert_stat()
111 * This one will force an insertion as right-most node
175 struct rb_node *node; stat_seq_start() local
189 node = rb_first(&session->stat_root); stat_seq_start()
190 for (i = 0; node && i < n; i++) stat_seq_start()
191 node = rb_next(node); stat_seq_start()
193 return node; stat_seq_start()
199 struct rb_node *node = p; stat_seq_next() local
206 return rb_next(node); stat_seq_next()
218 struct stat_node *l = container_of(v, struct stat_node, node); stat_seq_show()
304 struct stat_session *session, *node; register_stat_tracer() local
315 list_for_each_entry(node, &all_stat_sessions, session_list) { register_stat_tracer()
316 if (node->ts == trace) { register_stat_tracer()
348 struct stat_session *node, *tmp; unregister_stat_tracer() local
351 list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { unregister_stat_tracer()
352 if (node->ts == trace) { unregister_stat_tracer()
353 list_del(&node->session_list); unregister_stat_tracer()
354 destroy_session(node); unregister_stat_tracer()
/linux-4.4.14/arch/arm/mach-rockchip/
H A Dplatsmp.c169 * @node: mmio-sram device node
171 static int __init rockchip_smp_prepare_sram(struct device_node *node) rockchip_smp_prepare_sram() argument
179 ret = of_address_to_resource(node, 0, &res); rockchip_smp_prepare_sram()
181 pr_err("%s: could not get address for node %s\n", rockchip_smp_prepare_sram()
182 __func__, node->full_name); rockchip_smp_prepare_sram()
214 struct device_node *node; rockchip_smp_prepare_pmu() local
219 * That only happens if a "/cpus" device tree node exists rockchip_smp_prepare_pmu()
223 node = of_find_node_by_path("/cpus"); rockchip_smp_prepare_pmu()
225 pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu"); rockchip_smp_prepare_pmu()
226 of_node_put(node); rockchip_smp_prepare_pmu()
236 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu"); rockchip_smp_prepare_pmu()
237 if (!node) { rockchip_smp_prepare_pmu()
238 pr_err("%s: could not find pmu dt node\n", __func__); rockchip_smp_prepare_pmu()
242 pmu_base = of_iomap(node, 0); rockchip_smp_prepare_pmu()
263 struct device_node *node; rockchip_smp_prepare_cpus() local
266 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram"); rockchip_smp_prepare_cpus()
267 if (!node) { rockchip_smp_prepare_cpus()
268 pr_err("%s: could not find sram dt node\n", __func__); rockchip_smp_prepare_cpus()
272 sram_base_addr = of_iomap(node, 0); rockchip_smp_prepare_cpus()
282 if (rockchip_smp_prepare_sram(node)) rockchip_smp_prepare_cpus()
288 node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); rockchip_smp_prepare_cpus()
289 if (!node) { rockchip_smp_prepare_cpus()
294 scu_base_addr = of_iomap(node, 0); rockchip_smp_prepare_cpus()
/linux-4.4.14/arch/m68k/include/asm/
H A Doplib.h28 /* Root node of the prom device tree, this stays constant after
121 /* Acquire the IDPROM of the root node in the prom device tree. This
177 /* Start the CPU with the given device tree node, context table, and context
183 /* Stop the CPU with the passed device tree node. */
186 /* Idle the CPU with the passed device tree node. */
189 /* Re-Start the CPU with the passed device tree node. */
212 /* Get the child node of the given node, or zero if no child exists. */
215 /* Get the next sibling node of the given node, or zero if no further
218 extern int prom_getsibling(int node);
220 /* Get the length, at the passed node, of the given property type.
221 * Returns -1 on error (ie. no such property at this node).
232 extern int prom_getint(int node, char *property);
235 extern int prom_getintdefault(int node, char *property, int defval);
238 extern int prom_getbool(int node, char *prop);
241 extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
243 /* Does the passed node have the given "name"? YES=1 NO=0 */
246 /* Search all siblings starting at the passed node for "name" matching
247 * the given string. Returns the node on success, zero on failure.
251 /* Return the first property type, as a string, for the given node.
254 extern char *prom_firstprop(int node);
257 * node. Returns null string on failure.
259 extern char *prom_nextprop(int node, char *prev_property);
261 /* Returns 1 if the specified node has given property. */
262 extern int prom_node_has_property(int node, char *property);
264 /* Set the indicated property at the given node with the passed value.
267 extern int prom_setprop(int node, char *prop_name, char *prop_value,
286 /* Apply ranges of any prom node (and optionally parent node as well) to registers. */
287 extern void prom_apply_generic_ranges(int node, int parent,
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at() argument
35 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_at()
36 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_at()
37 u32 bits = vma->node->type - 12; nvkm_vm_map_at()
44 list_for_each_entry(r, &node->regions, rl_entry) { nvkm_vm_map_at()
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); nvkm_vm_map_at()
66 delta += (u64)len << vma->node->type; nvkm_vm_map_at()
79 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg_table()
80 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg_table()
81 u32 bits = vma->node->type - 12; nvkm_vm_map_sg_table()
82 u32 num = length >> vma->node->type; nvkm_vm_map_sg_table()
138 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg()
139 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg()
140 u32 bits = vma->node->type - 12; nvkm_vm_map_sg()
141 u32 num = length >> vma->node->type; nvkm_vm_map_sg()
170 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) nvkm_vm_map() argument
172 if (node->sg) nvkm_vm_map()
173 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); nvkm_vm_map()
175 if (node->pages) nvkm_vm_map()
176 nvkm_vm_map_sg(vma, 0, node->size << 12, node); nvkm_vm_map()
178 nvkm_vm_map_at(vma, 0, node); nvkm_vm_map()
186 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_unmap_at()
187 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_unmap_at()
188 u32 bits = vma->node->type - 12; nvkm_vm_unmap_at()
189 u32 num = length >> vma->node->type; nvkm_vm_unmap_at()
219 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); nvkm_vm_unmap()
285 &vma->node); nvkm_vm_get()
291 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_get()
292 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_get()
296 int big = (vma->node->type != mmu->func->spg_shift); nvkm_vm_get()
303 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); nvkm_vm_get()
307 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_get()
316 vma->offset = (u64)vma->node->offset << 12; nvkm_vm_get()
328 if (unlikely(vma->node == NULL)) nvkm_vm_put()
333 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_put()
334 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_put()
337 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); nvkm_vm_put()
338 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_put()
/linux-4.4.14/drivers/android/
H A Dbinder.c257 /* node + proc => ref (transaction) */
259 /* node => refs + procs (proc exit) */
265 struct binder_node *node; member in struct:binder_ref
871 struct binder_node *node; binder_get_node() local
874 node = rb_entry(n, struct binder_node, rb_node); binder_get_node()
876 if (ptr < node->ptr) binder_get_node()
878 else if (ptr > node->ptr) binder_get_node()
881 return node; binder_get_node()
892 struct binder_node *node; binder_new_node() local
896 node = rb_entry(parent, struct binder_node, rb_node); binder_new_node()
898 if (ptr < node->ptr) binder_new_node()
900 else if (ptr > node->ptr) binder_new_node()
906 node = kzalloc(sizeof(*node), GFP_KERNEL); binder_new_node()
907 if (node == NULL) binder_new_node()
910 rb_link_node(&node->rb_node, parent, p); binder_new_node()
911 rb_insert_color(&node->rb_node, &proc->nodes); binder_new_node()
912 node->debug_id = ++binder_last_id; binder_new_node()
913 node->proc = proc; binder_new_node()
914 node->ptr = ptr; binder_new_node()
915 node->cookie = cookie; binder_new_node()
916 node->work.type = BINDER_WORK_NODE; binder_new_node()
917 INIT_LIST_HEAD(&node->work.entry); binder_new_node()
918 INIT_LIST_HEAD(&node->async_todo); binder_new_node()
920 "%d:%d node %d u%016llx c%016llx created\n", binder_new_node()
921 proc->pid, current->pid, node->debug_id, binder_new_node()
922 (u64)node->ptr, (u64)node->cookie); binder_new_node()
923 return node; binder_new_node()
926 static int binder_inc_node(struct binder_node *node, int strong, int internal, binder_inc_node() argument
932 node->internal_strong_refs == 0 && binder_inc_node()
933 !(node == binder_context_mgr_node && binder_inc_node()
934 node->has_strong_ref)) { binder_inc_node()
935 pr_err("invalid inc strong node for %d\n", binder_inc_node()
936 node->debug_id); binder_inc_node()
939 node->internal_strong_refs++; binder_inc_node()
941 node->local_strong_refs++; binder_inc_node()
942 if (!node->has_strong_ref && target_list) { binder_inc_node()
943 list_del_init(&node->work.entry); binder_inc_node()
944 list_add_tail(&node->work.entry, target_list); binder_inc_node()
948 node->local_weak_refs++; binder_inc_node()
949 if (!node->has_weak_ref && list_empty(&node->work.entry)) { binder_inc_node()
951 pr_err("invalid inc weak node for %d\n", binder_inc_node()
952 node->debug_id); binder_inc_node()
955 list_add_tail(&node->work.entry, target_list); binder_inc_node()
961 static int binder_dec_node(struct binder_node *node, int strong, int internal) binder_dec_node() argument
965 node->internal_strong_refs--; binder_dec_node()
967 node->local_strong_refs--; binder_dec_node()
968 if (node->local_strong_refs || node->internal_strong_refs) binder_dec_node()
972 node->local_weak_refs--; binder_dec_node()
973 if (node->local_weak_refs || !hlist_empty(&node->refs)) binder_dec_node()
976 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { binder_dec_node()
977 if (list_empty(&node->work.entry)) { binder_dec_node()
978 list_add_tail(&node->work.entry, &node->proc->todo); binder_dec_node()
979 wake_up_interruptible(&node->proc->wait); binder_dec_node()
982 if (hlist_empty(&node->refs) && !node->local_strong_refs && binder_dec_node()
983 !node->local_weak_refs) { binder_dec_node()
984 list_del_init(&node->work.entry); binder_dec_node()
985 if (node->proc) { binder_dec_node()
986 rb_erase(&node->rb_node, &node->proc->nodes); binder_dec_node()
988 "refless node %d deleted\n", binder_dec_node()
989 node->debug_id); binder_dec_node()
991 hlist_del(&node->dead_node); binder_dec_node()
993 "dead node %d deleted\n", binder_dec_node()
994 node->debug_id); binder_dec_node()
996 kfree(node); binder_dec_node()
1025 struct binder_node *node) binder_get_ref_for_node()
1036 if (node < ref->node) binder_get_ref_for_node()
1038 else if (node > ref->node) binder_get_ref_for_node()
1049 new_ref->node = node; binder_get_ref_for_node()
1053 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; binder_get_ref_for_node()
1075 if (node) { binder_get_ref_for_node()
1076 hlist_add_head(&new_ref->node_entry, &node->refs); binder_get_ref_for_node()
1079 "%d new ref %d desc %d for node %d\n", binder_get_ref_for_node()
1081 node->debug_id); binder_get_ref_for_node()
1084 "%d new ref %d desc %d for dead node\n", binder_get_ref_for_node()
1093 "%d delete ref %d desc %d for node %d\n", binder_delete_ref()
1095 ref->node->debug_id); binder_delete_ref()
1100 binder_dec_node(ref->node, 1, 1); binder_delete_ref()
1102 binder_dec_node(ref->node, 0, 1); binder_delete_ref()
1122 ret = binder_inc_node(ref->node, 1, 1, target_list); binder_inc_ref()
1129 ret = binder_inc_node(ref->node, 0, 1, target_list); binder_inc_ref()
1152 ret = binder_dec_node(ref->node, strong, 1); binder_dec_ref()
1275 struct binder_node *node = binder_get_node(proc, fp->binder); binder_transaction_buffer_release() local
1277 if (node == NULL) { binder_transaction_buffer_release()
1278 pr_err("transaction release %d bad node %016llx\n", binder_transaction_buffer_release()
1283 " node %d u%016llx\n", binder_transaction_buffer_release()
1284 node->debug_id, (u64)node->ptr); binder_transaction_buffer_release()
1285 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); binder_transaction_buffer_release()
1297 " ref %d desc %d (node %d)\n", binder_transaction_buffer_release()
1298 ref->debug_id, ref->desc, ref->node->debug_id); binder_transaction_buffer_release()
1390 target_node = ref->node; binder_transaction()
1467 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", binder_transaction()
1541 struct binder_node *node = binder_get_node(proc, fp->binder); binder_transaction() local
1543 if (node == NULL) { binder_transaction()
1544 node = binder_new_node(proc, fp->binder, fp->cookie); binder_transaction()
1545 if (node == NULL) { binder_transaction()
1549 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; binder_transaction()
1550 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); binder_transaction()
1552 if (fp->cookie != node->cookie) { binder_transaction()
1553 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", binder_transaction()
1555 (u64)fp->binder, node->debug_id, binder_transaction()
1556 (u64)fp->cookie, (u64)node->cookie); binder_transaction()
1565 ref = binder_get_ref_for_node(target_proc, node); binder_transaction()
1578 trace_binder_transaction_node_to_ref(t, node, ref); binder_transaction()
1580 " node %d u%016llx -> ref %d desc %d\n", binder_transaction()
1581 node->debug_id, (u64)node->ptr, binder_transaction()
1600 if (ref->node->proc == target_proc) { binder_transaction()
1605 fp->binder = ref->node->ptr; binder_transaction()
1606 fp->cookie = ref->node->cookie; binder_transaction()
1607 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); binder_transaction()
1610 " ref %d desc %d -> node %d u%016llx\n", binder_transaction()
1611 ref->debug_id, ref->desc, ref->node->debug_id, binder_transaction()
1612 (u64)ref->node->ptr); binder_transaction()
1616 new_ref = binder_get_ref_for_node(target_proc, ref->node); binder_transaction()
1626 " ref %d desc %d -> ref %d desc %d (node %d)\n", binder_transaction()
1628 new_ref->desc, ref->node->debug_id); binder_transaction()
1823 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", binder_thread_write()
1825 ref->desc, ref->strong, ref->weak, ref->node->debug_id); binder_thread_write()
1832 struct binder_node *node; binder_thread_write() local
1840 node = binder_get_node(proc, node_ptr); binder_thread_write()
1841 if (node == NULL) { binder_thread_write()
1850 if (cookie != node->cookie) { binder_thread_write()
1851 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", binder_thread_write()
1855 (u64)node_ptr, node->debug_id, binder_thread_write()
1856 (u64)cookie, (u64)node->cookie); binder_thread_write()
1860 if (node->pending_strong_ref == 0) { binder_thread_write()
1861 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", binder_thread_write()
1863 node->debug_id); binder_thread_write()
1866 node->pending_strong_ref = 0; binder_thread_write()
1868 if (node->pending_weak_ref == 0) { binder_thread_write()
1869 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", binder_thread_write()
1871 node->debug_id); binder_thread_write()
1874 node->pending_weak_ref = 0; binder_thread_write()
1876 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); binder_thread_write()
1878 "%d:%d %s node %d ls %d lw %d\n", binder_thread_write()
1881 node->debug_id, node->local_strong_refs, node->local_weak_refs); binder_thread_write()
2005 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", binder_thread_write()
2011 ref->strong, ref->weak, ref->node->debug_id); binder_thread_write()
2031 if (ref->node->proc == NULL) { binder_thread_write()
2266 struct binder_node *node = container_of(w, struct binder_node, work); binder_thread_read() local
2269 int strong = node->internal_strong_refs || node->local_strong_refs; binder_thread_read()
2270 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; binder_thread_read()
2272 if (weak && !node->has_weak_ref) { binder_thread_read()
2275 node->has_weak_ref = 1; binder_thread_read()
2276 node->pending_weak_ref = 1; binder_thread_read()
2277 node->local_weak_refs++; binder_thread_read()
2278 } else if (strong && !node->has_strong_ref) { binder_thread_read()
2281 node->has_strong_ref = 1; binder_thread_read()
2282 node->pending_strong_ref = 1; binder_thread_read()
2283 node->local_strong_refs++; binder_thread_read()
2284 } else if (!strong && node->has_strong_ref) { binder_thread_read()
2287 node->has_strong_ref = 0; binder_thread_read()
2288 } else if (!weak && node->has_weak_ref) { binder_thread_read()
2291 node->has_weak_ref = 0; binder_thread_read()
2297 if (put_user(node->ptr, binder_thread_read()
2301 if (put_user(node->cookie, binder_thread_read()
2310 node->debug_id, binder_thread_read()
2311 (u64)node->ptr, (u64)node->cookie); binder_thread_read()
2316 "%d:%d node %d u%016llx c%016llx deleted\n", binder_thread_read()
2318 node->debug_id, binder_thread_read()
2319 (u64)node->ptr, binder_thread_read()
2320 (u64)node->cookie); binder_thread_read()
2321 rb_erase(&node->rb_node, &proc->nodes); binder_thread_read()
2322 kfree(node); binder_thread_read()
2326 "%d:%d node %d u%016llx c%016llx state unchanged\n", binder_thread_read()
2328 node->debug_id, binder_thread_read()
2329 (u64)node->ptr, binder_thread_read()
2330 (u64)node->cookie); binder_thread_read()
3019 static int binder_node_release(struct binder_node *node, int refs) binder_node_release() argument
3024 list_del_init(&node->work.entry); binder_node_release()
3025 binder_release_work(&node->async_todo); binder_node_release()
3027 if (hlist_empty(&node->refs)) { binder_node_release()
3028 kfree(node); binder_node_release()
3034 node->proc = NULL; binder_node_release()
3035 node->local_strong_refs = 0; binder_node_release()
3036 node->local_weak_refs = 0; binder_node_release()
3037 hlist_add_head(&node->dead_node, &binder_dead_nodes); binder_node_release()
3039 hlist_for_each_entry(ref, &node->refs, node_entry) { binder_node_release()
3057 "node %d now dead, refs %d, death %d\n", binder_node_release()
3058 node->debug_id, refs, death); binder_node_release()
3095 struct binder_node *node; binder_deferred_release() local
3097 node = rb_entry(n, struct binder_node, rb_node); binder_deferred_release()
3099 rb_erase(&node->rb_node, &proc->nodes); binder_deferred_release()
3100 incoming_refs = binder_node_release(node, incoming_refs); binder_deferred_release()
3239 seq_printf(m, " node %d", print_binder_transaction()
3259 struct binder_node *node; print_binder_work() local
3271 node = container_of(w, struct binder_node, work); print_binder_work()
3273 prefix, node->debug_id, print_binder_work()
3274 (u64)node->ptr, (u64)node->cookie); print_binder_work()
3324 static void print_binder_node(struct seq_file *m, struct binder_node *node) print_binder_node() argument
3331 hlist_for_each_entry(ref, &node->refs, node_entry) print_binder_node()
3334 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", print_binder_node()
3335 node->debug_id, (u64)node->ptr, (u64)node->cookie, print_binder_node()
3336 node->has_strong_ref, node->has_weak_ref, print_binder_node()
3337 node->local_strong_refs, node->local_weak_refs, print_binder_node()
3338 node->internal_strong_refs, count); print_binder_node()
3341 hlist_for_each_entry(ref, &node->refs, node_entry) print_binder_node()
3345 list_for_each_entry(w, &node->async_todo, entry) print_binder_node()
3353 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", print_binder_ref()
3354 ref->node->debug_id, ref->strong, ref->weak, ref->death); print_binder_ref()
3372 struct binder_node *node = rb_entry(n, struct binder_node, print_binder_proc() local
3374 if (print_all || node->has_async_transaction) print_binder_proc()
3375 print_binder_node(m, node); print_binder_proc()
3441 "node",
3539 struct binder_node *node; binder_state_show() local
3549 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) binder_state_show()
3550 print_binder_node(m, node); binder_state_show()
3612 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", print_binder_transaction_log_entry()
1024 binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) binder_get_ref_for_node() argument

Completed in 7594 milliseconds

1234567891011>>