/linux-4.1.27/arch/avr32/boot/u-boot/ |
H A D | Makefile | 1 extra-y := head.o
|
/linux-4.1.27/arch/m68k/68360/ |
H A D | Makefile | 9 extra-y := head.o 11 $(obj)/head.o: $(obj)/head-$(model-y).o 12 ln -sf head-$(model-y).o $(obj)/head.o
|
/linux-4.1.27/arch/m68k/kernel/ |
H A D | Makefile | 5 extra-$(CONFIG_AMIGA) := head.o 6 extra-$(CONFIG_ATARI) := head.o 7 extra-$(CONFIG_MAC) := head.o 8 extra-$(CONFIG_APOLLO) := head.o 9 extra-$(CONFIG_VME) := head.o 10 extra-$(CONFIG_HP300) := head.o 11 extra-$(CONFIG_Q40) := head.o 12 extra-$(CONFIG_SUN3X) := head.o 13 extra-$(CONFIG_SUN3) := sun3-head.o
|
/linux-4.1.27/include/linux/ |
H A D | timerqueue.h | 14 struct rb_root head; member in struct:timerqueue_head 19 extern void timerqueue_add(struct timerqueue_head *head, 21 extern void timerqueue_del(struct timerqueue_head *head, 29 * @head: head of timerqueue 35 struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) timerqueue_getnext() argument 37 return head->next; timerqueue_getnext() 45 static inline void timerqueue_init_head(struct timerqueue_head *head) timerqueue_init_head() argument 47 head->head = RB_ROOT; timerqueue_init_head() 48 head->next = NULL; timerqueue_init_head()
|
H A D | btree-128.h | 5 static inline void btree_init_mempool128(struct btree_head128 *head, btree_init_mempool128() argument 8 btree_init_mempool(&head->h, mempool); btree_init_mempool128() 11 static inline int btree_init128(struct btree_head128 *head) btree_init128() argument 13 return btree_init(&head->h); btree_init128() 16 static inline void btree_destroy128(struct btree_head128 *head) btree_destroy128() argument 18 btree_destroy(&head->h); btree_destroy128() 21 static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) btree_lookup128() argument 24 return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); btree_lookup128() 27 static inline void *btree_get_prev128(struct btree_head128 *head, btree_get_prev128() argument 33 val = btree_get_prev(&head->h, &btree_geo128, btree_get_prev128() 40 static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, btree_insert128() argument 44 return btree_insert(&head->h, &btree_geo128, btree_insert128() 48 static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, btree_update128() argument 52 return btree_update(&head->h, &btree_geo128, btree_update128() 56 static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) btree_remove128() argument 59 return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); btree_remove128() 62 static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) btree_last128() argument 67 val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); btree_last128() 89 static inline size_t btree_visitor128(struct btree_head128 *head, btree_visitor128() argument 93 return btree_visitor(&head->h, &btree_geo128, opaque, btree_visitor128() 97 static inline size_t btree_grim_visitor128(struct btree_head128 *head, btree_grim_visitor128() argument 101 return btree_grim_visitor(&head->h, &btree_geo128, opaque, btree_grim_visitor128() 105 #define btree_for_each_safe128(head, k1, k2, val) \ 106 for (val = btree_last128(head, &k1, &k2); \ 108 val = btree_get_prev128(head, &k1, &k2))
|
H A D | btree-type.h | 13 static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, init_mempool() argument 16 btree_init_mempool(&head->h, mempool); init_mempool() 19 static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) init() argument 21 return btree_init(&head->h); init() 24 static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) destroy() argument 26 btree_destroy(&head->h); destroy() 37 static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) lookup() argument 40 return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); lookup() 43 static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, insert() argument 47 return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); insert() 50 static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, update() argument 54 return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); update() 57 static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) remove() argument 60 return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); remove() 63 static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) last() argument 66 void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); last() 72 static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) get_prev() argument 75 void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); get_prev() 81 static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) lookup() argument 83 return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); lookup() 86 static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, insert() argument 89 return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, insert() 93 static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, update() argument 96 return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); update() 99 static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) remove() argument 101 return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); remove() 104 static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) last() argument 106 return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); last() 109 static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) get_prev() argument 111 return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); get_prev() 121 static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, visitor() argument 125 return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, visitor() 129 static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, grim_visitor() argument 133 return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, grim_visitor()
|
H A D | circ_buf.h | 10 int head; member in struct:circ_buf 15 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) 18 as a completely full buffer has head == tail, which is the same as 20 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) 23 accessing head and tail more than once, so they can change 25 #define CIRC_CNT_TO_END(head,tail,size) \ 27 int n = ((head) + end) & ((size)-1); \ 31 #define CIRC_SPACE_TO_END(head,tail,size) \ 32 ({int end = (size) - 1 - (head); \
|
H A D | plist.h | 93 * @head: struct plist_head variable name 95 #define PLIST_HEAD_INIT(head) \ 97 .node_list = LIST_HEAD_INIT((head).node_list) \ 102 * @head: name for struct plist_head variable 104 #define PLIST_HEAD(head) \ 105 struct plist_head head = PLIST_HEAD_INIT(head) 121 * @head: &struct plist_head pointer 124 plist_head_init(struct plist_head *head) plist_head_init() argument 126 INIT_LIST_HEAD(&head->node_list); plist_head_init() 141 extern void plist_add(struct plist_node *node, struct plist_head *head); 142 extern void plist_del(struct plist_node *node, struct plist_head *head); 144 extern void plist_requeue(struct plist_node *node, struct plist_head *head); 149 * @head: the head for your list 151 #define plist_for_each(pos, head) \ 152 list_for_each_entry(pos, &(head)->node_list, node_list) 157 * @head: the head for your list 161 #define plist_for_each_continue(pos, head) \ 162 list_for_each_entry_continue(pos, &(head)->node_list, node_list) 168 * @head: the head for your list 172 #define plist_for_each_safe(pos, n, head) \ 173 list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) 178 * @head: the head for your list 181 #define plist_for_each_entry(pos, head, mem) \ 182 list_for_each_entry(pos, &(head)->node_list, mem.node_list) 187 * @head: the head for your list 193 #define plist_for_each_entry_continue(pos, head, m) \ 194 list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) 200 * @head: the head for your list 205 #define plist_for_each_entry_safe(pos, n, head, m) \ 206 list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) 210 * @head: &struct plist_head pointer 212 static inline int plist_head_empty(const struct plist_head *head) plist_head_empty() argument 214 return list_empty(&head->node_list); plist_head_empty() 230 * @head: the &struct plist_head pointer 235 # define plist_first_entry(head, type, member) \ 237 WARN_ON(plist_head_empty(head)); \ 238 container_of(plist_first(head), type, member); \ 241 # define plist_first_entry(head, type, member) \ 242 container_of(plist_first(head), type, member) 247 * @head: the &struct plist_head pointer 252 # define plist_last_entry(head, type, member) \ 254 WARN_ON(plist_head_empty(head)); \ 255 container_of(plist_last(head), type, member); \ 258 # define plist_last_entry(head, type, member) \ 259 container_of(plist_last(head), type, member) 278 * @head: the &struct plist_head pointer 282 static inline struct plist_node *plist_first(const struct plist_head *head) plist_first() argument 284 return list_entry(head->node_list.next, plist_first() 290 * @head: the &struct plist_head pointer 294 static inline struct plist_node *plist_last(const struct plist_head *head) plist_last() argument 296 return list_entry(head->node_list.prev, plist_last()
|
H A D | list.h | 56 * @head: list head to add it after 58 * Insert a new entry after the specified head. 61 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 63 __list_add(new, head, head->next); list_add() 70 * @head: list head to add it before 72 * Insert a new entry before the specified head. 75 static inline void list_add_tail(struct list_head *new, struct list_head *head) list_add_tail() argument 77 __list_add(new, head->prev, head); list_add_tail() 150 * list_move - delete from one list and add as another's head 152 * @head: the head that will precede our entry 154 static inline void list_move(struct list_head *list, struct list_head *head) list_move() argument 157 list_add(list, head); list_move() 163 * @head: the head that will follow our entry 166 struct list_head *head) list_move_tail() 169 list_add_tail(list, head); list_move_tail() 173 * list_is_last - tests whether @list is the last entry in list @head 175 * @head: the head of the list 178 const struct list_head *head) list_is_last() 180 return list->next == head; list_is_last() 185 * @head: the list to test. 187 static inline int list_empty(const struct list_head *head) list_empty() argument 189 return head->next == head; list_empty() 194 * @head: the list to test 205 static inline int list_empty_careful(const struct list_head *head) list_empty_careful() argument 207 struct list_head *next = head->next; list_empty_careful() 208 return (next == head) && (next == head->prev); list_empty_careful() 213 * @head: the head of the list 215 static inline void list_rotate_left(struct list_head *head) list_rotate_left() argument 219 if (!list_empty(head)) { list_rotate_left() 220 first = head->next; list_rotate_left() 221 list_move_tail(first, head); list_rotate_left() 227 * @head: the list to test. 229 static inline int list_is_singular(const struct list_head *head) list_is_singular() argument 231 return !list_empty(head) && (head->next == head->prev); list_is_singular() 235 struct list_head *head, struct list_head *entry) __list_cut_position() 238 list->next = head->next; __list_cut_position() 242 head->next = new_first; __list_cut_position() 243 new_first->prev = head; __list_cut_position() 249 * @head: a list with entries 250 * @entry: an entry within head, could be the head itself 253 * This helper moves the initial part of @head, up to and 254 * including @entry, from @head to @list. You should 255 * pass on @entry an element you know is on @head. @list 261 struct list_head *head, struct list_head *entry) list_cut_position() 263 if (list_empty(head)) list_cut_position() 265 if (list_is_singular(head) && list_cut_position() 266 (head->next != entry && head != entry)) list_cut_position() 268 if (entry == head) list_cut_position() 271 __list_cut_position(list, head, entry); list_cut_position() 291 * @head: the place to add it in the first list. 294 struct list_head *head) list_splice() 297 __list_splice(list, head, head->next); list_splice() 303 * @head: the place to add it in the first list. 306 struct list_head *head) list_splice_tail() 309 __list_splice(list, head->prev, head); list_splice_tail() 315 * @head: the place to add it in the first list. 320 struct list_head *head) list_splice_init() 323 __list_splice(list, head, head->next); list_splice_init() 331 * @head: the place to add it in the first list. 337 struct list_head *head) list_splice_tail_init() 340 __list_splice(list, head->prev, head); list_splice_tail_init() 356 * @ptr: the list head to take the element from. 367 * @ptr: the list head to take the element from. 378 * @ptr: the list head to take the element from. 406 * @head: the head for your list. 408 #define list_for_each(pos, head) \ 409 for (pos = (head)->next; pos != (head); pos = pos->next) 414 * @head: the head for your list. 416 #define list_for_each_prev(pos, head) \ 417 for (pos = (head)->prev; pos != (head); pos = pos->prev) 423 * @head: the head for your list. 425 #define list_for_each_safe(pos, n, head) \ 426 for (pos = (head)->next, n = pos->next; pos != (head); \ 433 * @head: the head for your list. 435 #define list_for_each_prev_safe(pos, n, head) \ 436 for (pos = (head)->prev, n = pos->prev; \ 437 pos != (head); \ 443 * @head: the head for your list. 446 #define list_for_each_entry(pos, head, member) \ 447 for (pos = list_first_entry(head, typeof(*pos), member); \ 448 &pos->member != (head); \ 454 * @head: the head for your list. 457 #define list_for_each_entry_reverse(pos, head, member) \ 458 for (pos = list_last_entry(head, typeof(*pos), member); \ 459 &pos->member != (head); \ 465 * @head: the head of the list 470 #define list_prepare_entry(pos, head, member) \ 471 ((pos) ? : list_entry(head, typeof(*pos), member)) 476 * @head: the head for your list. 482 #define list_for_each_entry_continue(pos, head, member) \ 484 &pos->member != (head); \ 490 * @head: the head for your list. 496 #define list_for_each_entry_continue_reverse(pos, head, member) \ 498 &pos->member != (head); \ 504 * @head: the head for your list. 509 #define list_for_each_entry_from(pos, head, member) \ 510 for (; &pos->member != (head); \ 517 * @head: the head for your list. 520 #define list_for_each_entry_safe(pos, n, head, member) \ 521 for (pos = list_first_entry(head, typeof(*pos), member), \ 523 &pos->member != (head); \ 530 * @head: the head for your list. 536 #define list_for_each_entry_safe_continue(pos, n, head, member) \ 539 &pos->member != (head); \ 546 * @head: the head for your list. 552 #define list_for_each_entry_safe_from(pos, n, head, member) \ 554 &pos->member != (head); \ 561 * @head: the head for your list. 567 #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 568 for (pos = list_last_entry(head, typeof(*pos), member), \ 570 &pos->member != (head); \ 589 * Double linked lists with a single pointer list head. 590 * Mostly useful for hash tables where the two pointer list head is 676 * Move a list from one list head to another. Fixup the pprev 690 #define hlist_for_each(pos, head) \ 691 for (pos = (head)->first; pos ; pos = pos->next) 693 #define hlist_for_each_safe(pos, n, head) \ 694 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 705 * @head: the head for your list. 708 #define hlist_for_each_entry(pos, head, member) \ 709 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ 736 * @head: the head for your list. 739 #define hlist_for_each_entry_safe(pos, n, head, member) \ 740 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ 165 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument 177 list_is_last(const struct list_head *list, const struct list_head *head) list_is_last() argument 234 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument 260 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument 293 list_splice(const struct list_head *list, struct list_head *head) list_splice() argument 305 list_splice_tail(struct list_head *list, struct list_head *head) list_splice_tail() argument 319 list_splice_init(struct list_head *list, struct list_head *head) list_splice_init() argument 336 list_splice_tail_init(struct list_head *list, struct list_head *head) list_splice_tail_init() argument
|
H A D | btree.h | 15 * Each B+Tree consists of a head, that contains bookkeeping information and 28 * struct btree_head - btree head 60 * @head: the btree head to initialise 66 void btree_init_mempool(struct btree_head *head, mempool_t *mempool); 71 * @head: the btree head to initialise 78 int __must_check btree_init(struct btree_head *head); 83 * @head: the btree head to destroy 88 void btree_destroy(struct btree_head *head); 93 * @head: the btree to look in 99 void *btree_lookup(struct btree_head *head, struct btree_geo *geo, 105 * @head: the btree to add to 114 int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, 119 * @head: the btree to update 127 int btree_update(struct btree_head *head, struct btree_geo *geo, 132 * @head: the btree to update 139 void *btree_remove(struct btree_head *head, struct btree_geo *geo, 163 * @head: btree head 171 void *btree_last(struct btree_head *head, struct btree_geo *geo, 177 * @head: btree head 185 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, 190 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, 198 size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, 215 #define btree_for_each_safel(head, key, val) \ 216 for (val = btree_lastl(head, &key); \ 218 val = btree_get_prevl(head, &key)) 226 #define btree_for_each_safe32(head, key, val) \ 227 for (val = btree_last32(head, &key); \ 229 val = btree_get_prev32(head, &key)) 238 #define btree_for_each_safe64(head, key, val) \ 239 for (val = btree_last64(head, &key); \ 241 val = btree_get_prev64(head, &key))
|
H A D | list_sort.h | 8 void list_sort(void *priv, struct list_head *head,
|
H A D | list_bl.h | 8 * Special version of lists, where head of the list has a lock in the lowest 137 * @head: the head for your list. 141 #define hlist_bl_for_each_entry(tpos, pos, head, member) \ 142 for (pos = hlist_bl_first(head); \ 152 * @head: the head for your list. 155 #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ 156 for (pos = hlist_bl_first(head); \
|
H A D | resource_ext.h | 40 extern void resource_list_free(struct list_head *head); 43 struct list_head *head) resource_list_add() 45 list_add(&entry->node, head); resource_list_add() 49 struct list_head *head) resource_list_add_tail() 51 list_add_tail(&entry->node, head); resource_list_add_tail() 42 resource_list_add(struct resource_entry *entry, struct list_head *head) resource_list_add() argument 48 resource_list_add_tail(struct resource_entry *entry, struct list_head *head) resource_list_add_tail() argument
|
H A D | llist.h | 73 * init_llist_head - initialize lock-less list head 74 * @head: the head for your lock-less list 97 * instead of list head. 115 * instead of list head. 137 * instead of list head. 152 * @head: the list to test 158 static inline bool llist_empty(const struct llist_head *head) llist_empty() argument 160 return ACCESS_ONCE(head->first) == NULL; llist_empty() 170 struct llist_head *head); 174 * @head: the head for your lock-less list 178 static inline bool llist_add(struct llist_node *new, struct llist_head *head) llist_add() argument 180 return llist_add_batch(new, new, head); llist_add() 185 * @head: the head of lock-less list to delete all entries 191 static inline struct llist_node *llist_del_all(struct llist_head *head) llist_del_all() argument 193 return xchg(&head->first, NULL); llist_del_all() 196 extern struct llist_node *llist_del_first(struct llist_head *head); 198 struct llist_node *llist_reverse_order(struct llist_node *head);
|
H A D | rculist.h | 15 * and compares it to the address of the list head, but neither dereferences 65 * @head: list head to add it after 67 * Insert a new entry after the specified head. 78 static inline void list_add_rcu(struct list_head *new, struct list_head *head) list_add_rcu() argument 80 __list_add_rcu(new, head, head->next); list_add_rcu() 86 * @head: list head to add it before 88 * Insert a new entry before the specified head. 100 struct list_head *head) list_add_tail_rcu() 102 __list_add_rcu(new, head->prev, head); list_add_tail_rcu() 184 * @head: the place in the list to splice the first list into 187 * @head can be RCU-read traversed concurrently with this function. 192 * prevent any other updates to @head. In principle, it is possible 199 struct list_head *head, list_splice_init_rcu() 204 struct list_head *at = head->next; list_splice_init_rcu() 235 rcu_assign_pointer(list_next_rcu(head), first); list_splice_init_rcu() 236 first->prev = head; list_splice_init_rcu() 279 * @ptr: the list head to take the element from. 298 * @head: the head for your list. 305 #define list_for_each_entry_rcu(pos, head, member) \ 306 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 307 &pos->member != (head); \ 313 * @head: the head for your list. 319 #define list_for_each_entry_continue_rcu(pos, head, member) \ 321 &pos->member != (head); \ 372 #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) 462 #define __hlist_for_each_rcu(pos, head) \ 463 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 470 * @head: the head for your list. 477 #define hlist_for_each_entry_rcu(pos, head, member) \ 478 for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ 487 * @head: the head for your list. 497 #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ 498 for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ 507 * @head: the head for your list. 514 #define hlist_for_each_entry_rcu_bh(pos, head, member) \ 515 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ 99 list_add_tail_rcu(struct list_head *new, struct list_head *head) list_add_tail_rcu() argument 198 list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) list_splice_init_rcu() argument
|
H A D | fd.h | 12 compat_uint_t head; member in struct:compat_floppy_struct
|
H A D | seq_file.h | 191 extern struct list_head *seq_list_start(struct list_head *head, 193 extern struct list_head *seq_list_start_head(struct list_head *head, 195 extern struct list_head *seq_list_next(void *v, struct list_head *head, 202 extern struct hlist_node *seq_hlist_start(struct hlist_head *head, 204 extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, 206 extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, 209 extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, 211 extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, 214 struct hlist_head *head, 218 extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos); 220 extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
|
H A D | nfs_page.h | 52 struct nfs_page *wb_head; /* head pointer for req list */ 158 * @head: head of list into which to insert the request. 161 nfs_list_add_request(struct nfs_page *req, struct list_head *head) nfs_list_add_request() argument 163 list_add_tail(&req->wb_list, head); nfs_list_add_request() 180 nfs_list_entry(struct list_head *head) nfs_list_entry() argument 182 return list_entry(head, struct nfs_page, wb_list); nfs_list_entry()
|
/linux-4.1.27/security/tomoyo/ |
H A D | common.c | 207 * @head: Pointer to "struct tomoyo_io_buffer". 211 static bool tomoyo_flush(struct tomoyo_io_buffer *head) tomoyo_flush() argument 213 while (head->r.w_pos) { tomoyo_flush() 214 const char *w = head->r.w[0]; tomoyo_flush() 217 if (len > head->read_user_buf_avail) tomoyo_flush() 218 len = head->read_user_buf_avail; tomoyo_flush() 221 if (copy_to_user(head->read_user_buf, w, len)) tomoyo_flush() 223 head->read_user_buf_avail -= len; tomoyo_flush() 224 head->read_user_buf += len; tomoyo_flush() 227 head->r.w[0] = w; tomoyo_flush() 231 if (head->poll) { tomoyo_flush() 232 if (!head->read_user_buf_avail || tomoyo_flush() 233 copy_to_user(head->read_user_buf, "", 1)) tomoyo_flush() 235 head->read_user_buf_avail--; tomoyo_flush() 236 head->read_user_buf++; tomoyo_flush() 238 head->r.w_pos--; tomoyo_flush() 239 for (len = 0; len < head->r.w_pos; len++) tomoyo_flush() 240 head->r.w[len] = head->r.w[len + 1]; tomoyo_flush() 242 head->r.avail = 0; tomoyo_flush() 249 * @head: Pointer to "struct tomoyo_io_buffer". 252 * Note that @string has to be kept valid until @head is kfree()d. 256 static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string) tomoyo_set_string() argument 258 if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) { tomoyo_set_string() 259 head->r.w[head->r.w_pos++] = string; tomoyo_set_string() 260 tomoyo_flush(head); tomoyo_set_string() 265 static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, 271 * @head: Pointer to "struct tomoyo_io_buffer". 274 static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, tomoyo_io_printf() argument 279 size_t pos = head->r.avail; tomoyo_io_printf() 280 int size = head->readbuf_size - pos; tomoyo_io_printf() 284 len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1; tomoyo_io_printf() 286 if (pos + len >= head->readbuf_size) { tomoyo_io_printf() 290 head->r.avail += len; tomoyo_io_printf() 291 tomoyo_set_string(head, head->read_buf + pos); tomoyo_io_printf() 297 * @head: Pointer to "struct tomoyo_io_buffer". 301 static void tomoyo_set_space(struct tomoyo_io_buffer *head) tomoyo_set_space() argument 303 tomoyo_set_string(head, " "); tomoyo_set_space() 309 * @head: Pointer to "struct tomoyo_io_buffer". 313 static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) tomoyo_set_lf() argument 315 tomoyo_set_string(head, "\n"); tomoyo_set_lf() 316 return !head->r.w_pos; tomoyo_set_lf() 322 * @head: Pointer to "struct tomoyo_io_buffer". 326 static void tomoyo_set_slash(struct tomoyo_io_buffer *head) tomoyo_set_slash() argument 328 tomoyo_set_string(head, "/"); tomoyo_set_slash() 360 * @head: Pointer to "struct tomoyo_io_buffer". 364 static void tomoyo_print_namespace(struct tomoyo_io_buffer *head) tomoyo_print_namespace() argument 368 tomoyo_set_string(head, tomoyo_print_namespace() 369 container_of(head->r.ns, tomoyo_print_namespace() 372 tomoyo_set_space(head); tomoyo_print_namespace() 378 * @head: Pointer to "struct tomoyo_io_buffer". 381 static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, tomoyo_print_name_union() argument 384 tomoyo_set_space(head); tomoyo_print_name_union() 386 tomoyo_set_string(head, "@"); tomoyo_print_name_union() 387 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_name_union() 389 tomoyo_set_string(head, ptr->filename->name); tomoyo_print_name_union() 396 * @head: Pointer to "struct tomoyo_io_buffer". 401 static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head, tomoyo_print_name_union_quoted() argument 405 tomoyo_set_string(head, "@"); tomoyo_print_name_union_quoted() 406 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_name_union_quoted() 408 tomoyo_set_string(head, "\""); tomoyo_print_name_union_quoted() 409 tomoyo_set_string(head, ptr->filename->name); tomoyo_print_name_union_quoted() 410 tomoyo_set_string(head, "\""); tomoyo_print_name_union_quoted() 417 * @head: Pointer to "struct tomoyo_io_buffer". 423 (struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) tomoyo_print_number_union_nospace() 426 tomoyo_set_string(head, "@"); tomoyo_print_number_union_nospace() 427 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_number_union_nospace() 457 tomoyo_io_printf(head, "%s", buffer); tomoyo_print_number_union_nospace() 464 * @head: Pointer to "struct tomoyo_io_buffer". 469 static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, tomoyo_print_number_union() argument 472 tomoyo_set_space(head); tomoyo_print_number_union() 473 tomoyo_print_number_union_nospace(head, ptr); tomoyo_print_number_union() 656 * @head: Pointer to "struct tomoyo_io_buffer". 660 static int tomoyo_write_profile(struct tomoyo_io_buffer *head) tomoyo_write_profile() argument 662 char *data = head->write_buf; tomoyo_write_profile() 666 if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version) tomoyo_write_profile() 673 profile = tomoyo_assign_profile(head->w.ns, i); tomoyo_write_profile() 706 * @head: Pointer to "struct tomoyo_io_buffer". 713 static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) tomoyo_print_config() argument 715 tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n", tomoyo_print_config() 724 * @head: Pointer to "struct tomoyo_io_buffer". 728 static void tomoyo_read_profile(struct tomoyo_io_buffer *head) tomoyo_read_profile() argument 732 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_profile() 734 if (head->r.eof) tomoyo_read_profile() 737 index = head->r.index; tomoyo_read_profile() 739 switch (head->r.step) { tomoyo_read_profile() 741 tomoyo_print_namespace(head); tomoyo_read_profile() 742 tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", tomoyo_read_profile() 744 head->r.step++; tomoyo_read_profile() 747 for ( ; head->r.index < TOMOYO_MAX_PROFILES; tomoyo_read_profile() 748 head->r.index++) tomoyo_read_profile() 749 if (ns->profile_ptr[head->r.index]) tomoyo_read_profile() 751 if (head->r.index == TOMOYO_MAX_PROFILES) { tomoyo_read_profile() 752 head->r.eof = true; tomoyo_read_profile() 755 head->r.step++; tomoyo_read_profile() 762 tomoyo_print_namespace(head); tomoyo_read_profile() 763 tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_read_profile() 764 tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_read_profile() 765 tomoyo_set_lf(head); tomoyo_read_profile() 766 tomoyo_print_namespace(head); tomoyo_read_profile() 767 tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); tomoyo_read_profile() 769 tomoyo_io_printf(head, "%s=%u ", tomoyo_read_profile() 772 tomoyo_set_string(head, "}\n"); tomoyo_read_profile() 773 head->r.step++; tomoyo_read_profile() 778 tomoyo_print_namespace(head); tomoyo_read_profile() 779 tomoyo_io_printf(head, "%u-%s", index, "CONFIG"); tomoyo_read_profile() 780 tomoyo_print_config(head, profile->default_config); tomoyo_read_profile() 781 head->r.bit = 0; tomoyo_read_profile() 782 head->r.step++; tomoyo_read_profile() 786 for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX tomoyo_read_profile() 787 + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) { tomoyo_read_profile() 788 const u8 i = head->r.bit; tomoyo_read_profile() 792 tomoyo_print_namespace(head); tomoyo_read_profile() 794 tomoyo_io_printf(head, "%u-CONFIG::%s::%s", tomoyo_read_profile() 800 tomoyo_io_printf(head, "%u-CONFIG::%s", index, tomoyo_read_profile() 802 tomoyo_print_config(head, config); tomoyo_read_profile() 803 head->r.bit++; tomoyo_read_profile() 806 if (head->r.bit == TOMOYO_MAX_MAC_INDEX tomoyo_read_profile() 808 head->r.index++; tomoyo_read_profile() 809 head->r.step = 1; tomoyo_read_profile() 813 if (tomoyo_flush(head)) tomoyo_read_profile() 828 return container_of(a, struct tomoyo_manager, head)->manager == tomoyo_same_manager() 829 container_of(b, struct tomoyo_manager, head)->manager; tomoyo_same_manager() 858 error = tomoyo_update_policy(&e.head, sizeof(e), ¶m, tomoyo_update_manager_entry() 868 * @head: Pointer to "struct tomoyo_io_buffer". 874 static int tomoyo_write_manager(struct tomoyo_io_buffer *head) tomoyo_write_manager() argument 876 char *data = head->write_buf; tomoyo_write_manager() 879 tomoyo_manage_by_non_root = !head->w.is_delete; tomoyo_write_manager() 882 return tomoyo_update_manager_entry(data, head->w.is_delete); tomoyo_write_manager() 888 * @head: Pointer to "struct tomoyo_io_buffer". 892 static void tomoyo_read_manager(struct tomoyo_io_buffer *head) tomoyo_read_manager() argument 894 if (head->r.eof) tomoyo_read_manager() 896 list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace. tomoyo_read_manager() 899 list_entry(head->r.acl, typeof(*ptr), head.list); tomoyo_read_manager() 900 if (ptr->head.is_deleted) tomoyo_read_manager() 902 if (!tomoyo_flush(head)) tomoyo_read_manager() 904 tomoyo_set_string(head, ptr->manager->name); tomoyo_read_manager() 905 tomoyo_set_lf(head); tomoyo_read_manager() 907 head->r.eof = true; tomoyo_read_manager() 936 policy_list[TOMOYO_ID_MANAGER], head.list) { tomoyo_manager() 937 if (!ptr->head.is_deleted && tomoyo_manager() 963 * @head: Pointer to "struct tomoyo_io_buffer". 970 static bool tomoyo_select_domain(struct tomoyo_io_buffer *head, tomoyo_select_domain() argument 997 head->w.domain = domain; tomoyo_select_domain() 998 /* Accessing read_buf is safe because head->io_sem is held. */ tomoyo_select_domain() 999 if (!head->read_buf) tomoyo_select_domain() 1001 memset(&head->r, 0, sizeof(head->r)); tomoyo_select_domain() 1002 head->r.print_this_domain_only = true; tomoyo_select_domain() 1004 head->r.domain = &domain->list; tomoyo_select_domain() 1006 head->r.eof = 1; tomoyo_select_domain() 1007 tomoyo_io_printf(head, "# select %s\n", data); tomoyo_select_domain() 1009 tomoyo_io_printf(head, "# This is a deleted domain.\n"); tomoyo_select_domain() 1024 const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_task_acl() 1025 const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_task_acl() 1043 .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL, tomoyo_write_task() 1047 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_write_task() 1140 * @head: Pointer to "struct tomoyo_io_buffer". 1146 static int tomoyo_write_domain(struct tomoyo_io_buffer *head) tomoyo_write_domain() argument 1148 char *data = head->write_buf; tomoyo_write_domain() 1150 struct tomoyo_domain_info *domain = head->w.domain; tomoyo_write_domain() 1151 const bool is_delete = head->w.is_delete; tomoyo_write_domain() 1163 head->w.domain = domain; tomoyo_write_domain() 1195 * @head: Pointer to "struct tomoyo_io_buffer". 1200 static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, tomoyo_print_condition() argument 1203 switch (head->r.cond_step) { tomoyo_print_condition() 1205 head->r.cond_index = 0; tomoyo_print_condition() 1206 head->r.cond_step++; tomoyo_print_condition() 1208 tomoyo_set_space(head); tomoyo_print_condition() 1209 tomoyo_set_string(head, cond->transit->name); tomoyo_print_condition() 1227 for (skip = 0; skip < head->r.cond_index; skip++) { tomoyo_print_condition() 1251 while (head->r.cond_index < condc) { tomoyo_print_condition() 1255 if (!tomoyo_flush(head)) tomoyo_print_condition() 1258 head->r.cond_index++; tomoyo_print_condition() 1259 tomoyo_set_space(head); tomoyo_print_condition() 1262 tomoyo_io_printf(head, tomoyo_print_condition() 1266 tomoyo_set_string(head, tomoyo_print_condition() 1268 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1272 tomoyo_set_string(head, tomoyo_print_condition() 1274 tomoyo_set_string(head, tomoyo_print_condition() 1276 tomoyo_io_printf(head, "\"]%s=", envp-> tomoyo_print_condition() 1279 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1280 tomoyo_set_string(head, envp-> tomoyo_print_condition() 1282 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1284 tomoyo_set_string(head, tomoyo_print_condition() 1291 (head, numbers_p++); tomoyo_print_condition() 1294 tomoyo_set_string(head, tomoyo_print_condition() 1298 tomoyo_set_string(head, match ? "=" : "!="); tomoyo_print_condition() 1302 (head, names_p++); tomoyo_print_condition() 1306 (head, numbers_p++); tomoyo_print_condition() 1309 tomoyo_set_string(head, tomoyo_print_condition() 1315 head->r.cond_step++; tomoyo_print_condition() 1318 if (!tomoyo_flush(head)) tomoyo_print_condition() 1320 head->r.cond_step++; tomoyo_print_condition() 1324 tomoyo_io_printf(head, " grant_log=%s", tomoyo_print_condition() 1327 tomoyo_set_lf(head); tomoyo_print_condition() 1336 * @head: Pointer to "struct tomoyo_io_buffer". 1341 static void tomoyo_set_group(struct tomoyo_io_buffer *head, tomoyo_set_group() argument 1344 if (head->type == TOMOYO_EXCEPTIONPOLICY) { tomoyo_set_group() 1345 tomoyo_print_namespace(head); tomoyo_set_group() 1346 tomoyo_io_printf(head, "acl_group %u ", tomoyo_set_group() 1347 head->r.acl_group_index); tomoyo_set_group() 1349 tomoyo_set_string(head, category); tomoyo_set_group() 1355 * @head: Pointer to "struct tomoyo_io_buffer". 1360 static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, tomoyo_print_entry() argument 1367 if (head->r.print_cond_part) tomoyo_print_entry() 1371 if (!tomoyo_flush(head)) tomoyo_print_entry() 1375 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1380 if (head->r.print_transition_related_only && tomoyo_print_entry() 1384 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1387 tomoyo_set_slash(head); tomoyo_print_entry() 1389 tomoyo_set_string(head, tomoyo_path_keyword[bit]); tomoyo_print_entry() 1393 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1396 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1397 tomoyo_set_group(head, "task "); tomoyo_print_entry() 1398 tomoyo_set_string(head, "manual_domain_transition "); tomoyo_print_entry() 1399 tomoyo_set_string(head, ptr->domainname->name); tomoyo_print_entry() 1400 } else if (head->r.print_transition_related_only) { tomoyo_print_entry() 1404 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1410 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1413 tomoyo_set_slash(head); tomoyo_print_entry() 1415 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1420 tomoyo_print_name_union(head, &ptr->name1); tomoyo_print_entry() 1421 tomoyo_print_name_union(head, &ptr->name2); tomoyo_print_entry() 1424 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1430 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1433 tomoyo_set_slash(head); tomoyo_print_entry() 1435 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1440 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1441 tomoyo_print_number_union(head, &ptr->number); tomoyo_print_entry() 1444 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1450 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1453 tomoyo_set_slash(head); tomoyo_print_entry() 1455 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1460 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1461 tomoyo_print_number_union(head, &ptr->mode); tomoyo_print_entry() 1462 tomoyo_print_number_union(head, &ptr->major); tomoyo_print_entry() 1463 tomoyo_print_number_union(head, &ptr->minor); tomoyo_print_entry() 1466 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1473 tomoyo_set_group(head, "network inet "); tomoyo_print_entry() 1474 tomoyo_set_string(head, tomoyo_proto_keyword tomoyo_print_entry() 1476 tomoyo_set_space(head); tomoyo_print_entry() 1479 tomoyo_set_slash(head); tomoyo_print_entry() 1481 tomoyo_set_string(head, tomoyo_socket_keyword[bit]); tomoyo_print_entry() 1485 tomoyo_set_space(head); tomoyo_print_entry() 1487 tomoyo_set_string(head, "@"); tomoyo_print_entry() 1488 tomoyo_set_string(head, ptr->address.group->group_name tomoyo_print_entry() 1493 tomoyo_io_printf(head, "%s", buf); tomoyo_print_entry() 1495 tomoyo_print_number_union(head, &ptr->port); tomoyo_print_entry() 1498 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1505 tomoyo_set_group(head, "network unix "); tomoyo_print_entry() 1506 tomoyo_set_string(head, tomoyo_proto_keyword tomoyo_print_entry() 1508 tomoyo_set_space(head); tomoyo_print_entry() 1511 tomoyo_set_slash(head); tomoyo_print_entry() 1513 tomoyo_set_string(head, tomoyo_socket_keyword[bit]); tomoyo_print_entry() 1517 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1520 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1521 tomoyo_set_group(head, "file mount"); tomoyo_print_entry() 1522 tomoyo_print_name_union(head, &ptr->dev_name); tomoyo_print_entry() 1523 tomoyo_print_name_union(head, &ptr->dir_name); tomoyo_print_entry() 1524 tomoyo_print_name_union(head, &ptr->fs_type); tomoyo_print_entry() 1525 tomoyo_print_number_union(head, &ptr->flags); tomoyo_print_entry() 1528 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1530 tomoyo_set_group(head, "misc env "); tomoyo_print_entry() 1531 tomoyo_set_string(head, ptr->env->name); tomoyo_print_entry() 1534 head->r.print_cond_part = true; tomoyo_print_entry() 1535 head->r.cond_step = 0; tomoyo_print_entry() 1536 if (!tomoyo_flush(head)) tomoyo_print_entry() 1539 if (!tomoyo_print_condition(head, acl->cond)) tomoyo_print_entry() 1541 head->r.print_cond_part = false; tomoyo_print_entry() 1543 tomoyo_set_lf(head); tomoyo_print_entry() 1551 * @head: Pointer to "struct tomoyo_io_buffer". 1558 static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head, tomoyo_read_domain2() argument 1561 list_for_each_cookie(head->r.acl, list) { tomoyo_read_domain2() 1563 list_entry(head->r.acl, typeof(*ptr), list); tomoyo_read_domain2() 1564 if (!tomoyo_print_entry(head, ptr)) tomoyo_read_domain2() 1567 head->r.acl = NULL; tomoyo_read_domain2() 1574 * @head: Pointer to "struct tomoyo_io_buffer". 1578 static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_read_domain() argument 1580 if (head->r.eof) tomoyo_read_domain() 1582 list_for_each_cookie(head->r.domain, &tomoyo_domain_list) { tomoyo_read_domain() 1584 list_entry(head->r.domain, typeof(*domain), list); tomoyo_read_domain() 1585 switch (head->r.step) { tomoyo_read_domain() 1589 !head->r.print_this_domain_only) tomoyo_read_domain() 1592 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_domain() 1593 tomoyo_set_lf(head); tomoyo_read_domain() 1594 tomoyo_io_printf(head, "use_profile %u\n", tomoyo_read_domain() 1596 tomoyo_io_printf(head, "use_group %u\n", tomoyo_read_domain() 1600 tomoyo_set_string(head, tomoyo_dif[i]); tomoyo_read_domain() 1601 head->r.step++; tomoyo_read_domain() 1602 tomoyo_set_lf(head); tomoyo_read_domain() 1605 if (!tomoyo_read_domain2(head, &domain->acl_info_list)) tomoyo_read_domain() 1607 head->r.step++; tomoyo_read_domain() 1608 if (!tomoyo_set_lf(head)) tomoyo_read_domain() 1612 head->r.step = 0; tomoyo_read_domain() 1613 if (head->r.print_this_domain_only) tomoyo_read_domain() 1618 head->r.eof = true; tomoyo_read_domain() 1624 * @head: Pointer to "struct tomoyo_io_buffer". 1628 static int tomoyo_write_pid(struct tomoyo_io_buffer *head) tomoyo_write_pid() argument 1630 head->r.eof = false; tomoyo_write_pid() 1637 * @head: Pointer to "struct tomoyo_io_buffer". 1644 static void tomoyo_read_pid(struct tomoyo_io_buffer *head) tomoyo_read_pid() argument 1646 char *buf = head->write_buf; tomoyo_read_pid() 1652 /* Accessing write_buf is safe because head->io_sem is held. */ tomoyo_read_pid() 1654 head->r.eof = true; tomoyo_read_pid() 1657 if (head->r.w_pos || head->r.eof) tomoyo_read_pid() 1659 head->r.eof = true; tomoyo_read_pid() 1673 tomoyo_io_printf(head, "%u %u ", pid, domain->profile); tomoyo_read_pid() 1674 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_pid() 1697 * @head: Pointer to "struct tomoyo_io_buffer". 1703 static int tomoyo_write_exception(struct tomoyo_io_buffer *head) tomoyo_write_exception() argument 1705 const bool is_delete = head->w.is_delete; tomoyo_write_exception() 1707 .ns = head->w.ns, tomoyo_write_exception() 1709 .data = head->write_buf, tomoyo_write_exception() 1726 (head->w.ns, &head->w.ns->acl_group[group], tomoyo_write_exception() 1735 * @head: Pointer to "struct tomoyo_io_buffer". 1742 static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) tomoyo_read_group() argument 1745 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_group() 1747 list_for_each_cookie(head->r.group, list) { tomoyo_read_group() 1749 list_entry(head->r.group, typeof(*group), head.list); tomoyo_read_group() 1750 list_for_each_cookie(head->r.acl, &group->member_list) { tomoyo_read_group() 1752 list_entry(head->r.acl, typeof(*ptr), list); tomoyo_read_group() 1755 if (!tomoyo_flush(head)) tomoyo_read_group() 1757 tomoyo_print_namespace(head); tomoyo_read_group() 1758 tomoyo_set_string(head, tomoyo_group_name[idx]); tomoyo_read_group() 1759 tomoyo_set_string(head, group->group_name->name); tomoyo_read_group() 1761 tomoyo_set_space(head); tomoyo_read_group() 1762 tomoyo_set_string(head, container_of tomoyo_read_group() 1764 head)->member_name->name); tomoyo_read_group() 1766 tomoyo_print_number_union(head, &container_of tomoyo_read_group() 1769 head)->number); tomoyo_read_group() 1775 head); tomoyo_read_group() 1778 tomoyo_io_printf(head, " %s", buffer); tomoyo_read_group() 1780 tomoyo_set_lf(head); tomoyo_read_group() 1782 head->r.acl = NULL; tomoyo_read_group() 1784 head->r.group = NULL; tomoyo_read_group() 1791 * @head: Pointer to "struct tomoyo_io_buffer". 1798 static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) tomoyo_read_policy() argument 1801 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_policy() 1803 list_for_each_cookie(head->r.acl, list) { tomoyo_read_policy() 1805 container_of(head->r.acl, typeof(*acl), list); tomoyo_read_policy() 1808 if (!tomoyo_flush(head)) tomoyo_read_policy() 1814 container_of(acl, typeof(*ptr), head); tomoyo_read_policy() 1815 tomoyo_print_namespace(head); tomoyo_read_policy() 1816 tomoyo_set_string(head, tomoyo_transition_type tomoyo_read_policy() 1818 tomoyo_set_string(head, ptr->program ? tomoyo_read_policy() 1820 tomoyo_set_string(head, " from "); tomoyo_read_policy() 1821 tomoyo_set_string(head, ptr->domainname ? tomoyo_read_policy() 1829 container_of(acl, typeof(*ptr), head); tomoyo_read_policy() 1830 tomoyo_print_namespace(head); tomoyo_read_policy() 1831 tomoyo_set_string(head, "aggregator "); tomoyo_read_policy() 1832 tomoyo_set_string(head, tomoyo_read_policy() 1834 tomoyo_set_space(head); tomoyo_read_policy() 1835 tomoyo_set_string(head, tomoyo_read_policy() 1842 tomoyo_set_lf(head); tomoyo_read_policy() 1844 head->r.acl = NULL; tomoyo_read_policy() 1851 * @head: Pointer to "struct tomoyo_io_buffer". 1855 static void tomoyo_read_exception(struct tomoyo_io_buffer *head) tomoyo_read_exception() argument 1858 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_exception() 1859 if (head->r.eof) tomoyo_read_exception() 1861 while (head->r.step < TOMOYO_MAX_POLICY && tomoyo_read_exception() 1862 tomoyo_read_policy(head, head->r.step)) tomoyo_read_exception() 1863 head->r.step++; tomoyo_read_exception() 1864 if (head->r.step < TOMOYO_MAX_POLICY) tomoyo_read_exception() 1866 while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP && tomoyo_read_exception() 1867 tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY)) tomoyo_read_exception() 1868 head->r.step++; tomoyo_read_exception() 1869 if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP) tomoyo_read_exception() 1871 while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP tomoyo_read_exception() 1873 head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY tomoyo_read_exception() 1875 if (!tomoyo_read_domain2(head, &ns->acl_group tomoyo_read_exception() 1876 [head->r.acl_group_index])) tomoyo_read_exception() 1878 head->r.step++; tomoyo_read_exception() 1880 head->r.eof = true; tomoyo_read_exception() 2135 * @head: Pointer to "struct tomoyo_io_buffer". 2137 static void tomoyo_read_query(struct tomoyo_io_buffer *head) tomoyo_read_query() argument 2143 if (head->r.w_pos) tomoyo_read_query() 2145 if (head->read_buf) { tomoyo_read_query() 2146 kfree(head->read_buf); tomoyo_read_query() 2147 head->read_buf = NULL; tomoyo_read_query() 2152 if (pos++ != head->r.query_index) tomoyo_read_query() 2159 head->r.query_index = 0; tomoyo_read_query() 2169 if (pos++ != head->r.query_index) tomoyo_read_query() 2182 head->read_buf = buf; tomoyo_read_query() 2183 head->r.w[head->r.w_pos++] = buf; tomoyo_read_query() 2184 head->r.query_index++; tomoyo_read_query() 2193 * @head: Pointer to "struct tomoyo_io_buffer". 2197 static int tomoyo_write_answer(struct tomoyo_io_buffer *head) tomoyo_write_answer() argument 2199 char *data = head->write_buf; tomoyo_write_answer() 2229 * @head: Pointer to "struct tomoyo_io_buffer". 2233 static void tomoyo_read_version(struct tomoyo_io_buffer *head) tomoyo_read_version() argument 2235 if (!head->r.eof) { tomoyo_read_version() 2236 tomoyo_io_printf(head, "2.5.0"); tomoyo_read_version() 2237 head->r.eof = true; tomoyo_read_version() 2280 * @head: Pointer to "struct tomoyo_io_buffer". 2284 static void tomoyo_read_stat(struct tomoyo_io_buffer *head) tomoyo_read_stat() argument 2288 if (head->r.eof) tomoyo_read_stat() 2291 tomoyo_io_printf(head, "Policy %-30s %10u", tomoyo_read_stat() 2297 tomoyo_io_printf(head, " (Last: %04u/%02u/%02u " tomoyo_read_stat() 2302 tomoyo_set_lf(head); tomoyo_read_stat() 2307 tomoyo_io_printf(head, "Memory used by %-22s %10u", tomoyo_read_stat() 2311 tomoyo_io_printf(head, " (Quota: %10u)", used); tomoyo_read_stat() 2312 tomoyo_set_lf(head); tomoyo_read_stat() 2314 tomoyo_io_printf(head, "Total memory used: %10u\n", tomoyo_read_stat() 2316 head->r.eof = true; tomoyo_read_stat() 2322 * @head: Pointer to "struct tomoyo_io_buffer". 2326 static int tomoyo_write_stat(struct tomoyo_io_buffer *head) tomoyo_write_stat() argument 2328 char *data = head->write_buf; tomoyo_write_stat() 2347 struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS); tomoyo_open_control() local 2349 if (!head) tomoyo_open_control() 2351 mutex_init(&head->io_sem); tomoyo_open_control() 2352 head->type = type; tomoyo_open_control() 2356 head->write = tomoyo_write_domain; tomoyo_open_control() 2357 head->read = tomoyo_read_domain; tomoyo_open_control() 2361 head->write = tomoyo_write_exception; tomoyo_open_control() 2362 head->read = tomoyo_read_exception; tomoyo_open_control() 2366 head->poll = tomoyo_poll_log; tomoyo_open_control() 2367 head->read = tomoyo_read_log; tomoyo_open_control() 2371 head->write = tomoyo_write_pid; tomoyo_open_control() 2372 head->read = tomoyo_read_pid; tomoyo_open_control() 2376 head->read = tomoyo_read_version; tomoyo_open_control() 2377 head->readbuf_size = 128; tomoyo_open_control() 2381 head->write = tomoyo_write_stat; tomoyo_open_control() 2382 head->read = tomoyo_read_stat; tomoyo_open_control() 2383 head->readbuf_size = 1024; tomoyo_open_control() 2387 head->write = tomoyo_write_profile; tomoyo_open_control() 2388 head->read = tomoyo_read_profile; tomoyo_open_control() 2391 head->poll = tomoyo_poll_query; tomoyo_open_control() 2392 head->write = tomoyo_write_answer; tomoyo_open_control() 2393 head->read = tomoyo_read_query; tomoyo_open_control() 2397 head->write = tomoyo_write_manager; tomoyo_open_control() 2398 head->read = tomoyo_read_manager; tomoyo_open_control() 2406 head->read = NULL; tomoyo_open_control() 2407 head->poll = NULL; tomoyo_open_control() 2408 } else if (!head->poll) { tomoyo_open_control() 2410 if (!head->readbuf_size) tomoyo_open_control() 2411 head->readbuf_size = 4096 * 2; tomoyo_open_control() 2412 head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS); tomoyo_open_control() 2413 if (!head->read_buf) { tomoyo_open_control() 2414 kfree(head); tomoyo_open_control() 2423 head->write = NULL; tomoyo_open_control() 2424 } else if (head->write) { tomoyo_open_control() 2425 head->writebuf_size = 4096 * 2; tomoyo_open_control() 2426 head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS); tomoyo_open_control() 2427 if (!head->write_buf) { tomoyo_open_control() 2428 kfree(head->read_buf); tomoyo_open_control() 2429 kfree(head); tomoyo_open_control() 2441 file->private_data = head; tomoyo_open_control() 2442 tomoyo_notify_gc(head, true); tomoyo_open_control() 2457 struct tomoyo_io_buffer *head = file->private_data; tomoyo_poll_control() local 2458 if (head->poll) tomoyo_poll_control() 2459 return head->poll(file, wait) | POLLOUT | POLLWRNORM; tomoyo_poll_control() 2466 * @head: Pointer to "struct tomoyo_io_buffer". 2470 static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head) tomoyo_set_namespace_cursor() argument 2473 if (head->type != TOMOYO_EXCEPTIONPOLICY && tomoyo_set_namespace_cursor() 2474 head->type != TOMOYO_PROFILE) tomoyo_set_namespace_cursor() 2480 ns = head->r.ns; tomoyo_set_namespace_cursor() 2481 if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) { tomoyo_set_namespace_cursor() 2483 memset(&head->r, 0, sizeof(head->r)); tomoyo_set_namespace_cursor() 2484 head->r.ns = ns ? ns->next : tomoyo_namespace_list.next; tomoyo_set_namespace_cursor() 2491 * @head: Pointer to "struct tomoyo_io_buffer". 2495 static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) tomoyo_has_more_namespace() argument 2497 return (head->type == TOMOYO_EXCEPTIONPOLICY || tomoyo_has_more_namespace() 2498 head->type == TOMOYO_PROFILE) && head->r.eof && tomoyo_has_more_namespace() 2499 head->r.ns->next != &tomoyo_namespace_list; tomoyo_has_more_namespace() 2505 * @head: Pointer to "struct tomoyo_io_buffer". 2511 ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, tomoyo_read_control() argument 2517 if (!head->read) tomoyo_read_control() 2519 if (mutex_lock_interruptible(&head->io_sem)) tomoyo_read_control() 2521 head->read_user_buf = buffer; tomoyo_read_control() 2522 head->read_user_buf_avail = buffer_len; tomoyo_read_control() 2524 if (tomoyo_flush(head)) tomoyo_read_control() 2527 tomoyo_set_namespace_cursor(head); tomoyo_read_control() 2528 head->read(head); tomoyo_read_control() 2529 } while (tomoyo_flush(head) && tomoyo_read_control() 2530 tomoyo_has_more_namespace(head)); tomoyo_read_control() 2532 len = head->read_user_buf - buffer; tomoyo_read_control() 2533 mutex_unlock(&head->io_sem); tomoyo_read_control() 2540 * @head: Poiter to "struct tomoyo_io_buffer". 2547 static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) tomoyo_parse_policy() argument 2550 head->w.is_delete = !strncmp(line, "delete ", 7); tomoyo_parse_policy() 2551 if (head->w.is_delete) tomoyo_parse_policy() 2554 if (head->type == TOMOYO_EXCEPTIONPOLICY || tomoyo_parse_policy() 2555 head->type == TOMOYO_PROFILE) { tomoyo_parse_policy() 2560 head->w.ns = tomoyo_assign_namespace(line); tomoyo_parse_policy() 2563 head->w.ns = NULL; tomoyo_parse_policy() 2565 head->w.ns = &tomoyo_kernel_namespace; tomoyo_parse_policy() 2567 if (!head->w.ns) tomoyo_parse_policy() 2571 return head->write(head); tomoyo_parse_policy() 2577 * @head: Pointer to "struct tomoyo_io_buffer". 2583 ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, tomoyo_write_control() argument 2588 char *cp0 = head->write_buf; tomoyo_write_control() 2590 if (!head->write) tomoyo_write_control() 2594 if (mutex_lock_interruptible(&head->io_sem)) tomoyo_write_control() 2596 head->read_user_buf_avail = 0; tomoyo_write_control() 2601 if (head->w.avail >= head->writebuf_size - 1) { tomoyo_write_control() 2602 const int len = head->writebuf_size * 2; tomoyo_write_control() 2608 memmove(cp, cp0, head->w.avail); tomoyo_write_control() 2610 head->write_buf = cp; tomoyo_write_control() 2612 head->writebuf_size = len; tomoyo_write_control() 2620 cp0[head->w.avail++] = c; tomoyo_write_control() 2623 cp0[head->w.avail - 1] = '\0'; tomoyo_write_control() 2624 head->w.avail = 0; tomoyo_write_control() 2627 head->w.ns = &tomoyo_kernel_namespace; tomoyo_write_control() 2628 head->w.domain = NULL; tomoyo_write_control() 2629 memset(&head->r, 0, sizeof(head->r)); tomoyo_write_control() 2633 switch (head->type) { tomoyo_write_control() 2638 if (tomoyo_select_domain(head, cp0)) tomoyo_write_control() 2643 head->r.print_transition_related_only = true; tomoyo_write_control() 2653 switch (tomoyo_parse_policy(head, cp0)) { tomoyo_write_control() 2658 switch (head->type) { tomoyo_write_control() 2674 mutex_unlock(&head->io_sem); tomoyo_write_control() 2681 * @head: Pointer to "struct tomoyo_io_buffer". 2683 void tomoyo_close_control(struct tomoyo_io_buffer *head) tomoyo_close_control() argument 2689 if (head->type == TOMOYO_QUERY && tomoyo_close_control() 2692 tomoyo_notify_gc(head, false); tomoyo_close_control() 2745 struct tomoyo_io_buffer head = { }; tomoyo_load_builtin_policy() local 2750 head.type = TOMOYO_PROFILE; tomoyo_load_builtin_policy() 2751 head.write = tomoyo_write_profile; tomoyo_load_builtin_policy() 2755 head.type = TOMOYO_EXCEPTIONPOLICY; tomoyo_load_builtin_policy() 2756 head.write = tomoyo_write_exception; tomoyo_load_builtin_policy() 2760 head.type = TOMOYO_DOMAINPOLICY; tomoyo_load_builtin_policy() 2761 head.write = tomoyo_write_domain; tomoyo_load_builtin_policy() 2765 head.type = TOMOYO_MANAGER; tomoyo_load_builtin_policy() 2766 head.write = tomoyo_write_manager; tomoyo_load_builtin_policy() 2770 head.type = TOMOYO_STAT; tomoyo_load_builtin_policy() 2771 head.write = tomoyo_write_stat; tomoyo_load_builtin_policy() 2780 head.write_buf = start; tomoyo_load_builtin_policy() 2781 tomoyo_parse_policy(&head, start); tomoyo_load_builtin_policy() 422 tomoyo_print_number_union_nospace(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) tomoyo_print_number_union_nospace() argument
|
H A D | gc.c | 41 struct tomoyo_io_buffer *head; tomoyo_struct_used_by_io_buffer() local 45 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { tomoyo_struct_used_by_io_buffer() 46 head->users++; tomoyo_struct_used_by_io_buffer() 48 mutex_lock(&head->io_sem); tomoyo_struct_used_by_io_buffer() 49 if (head->r.domain == element || head->r.group == element || tomoyo_struct_used_by_io_buffer() 50 head->r.acl == element || &head->w.domain->list == element) tomoyo_struct_used_by_io_buffer() 52 mutex_unlock(&head->io_sem); tomoyo_struct_used_by_io_buffer() 54 head->users--; tomoyo_struct_used_by_io_buffer() 72 struct tomoyo_io_buffer *head; tomoyo_name_used_by_io_buffer() local 77 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { tomoyo_name_used_by_io_buffer() 79 head->users++; tomoyo_name_used_by_io_buffer() 81 mutex_lock(&head->io_sem); tomoyo_name_used_by_io_buffer() 83 const char *w = head->r.w[i]; tomoyo_name_used_by_io_buffer() 89 mutex_unlock(&head->io_sem); tomoyo_name_used_by_io_buffer() 91 head->users--; tomoyo_name_used_by_io_buffer() 109 container_of(element, typeof(*ptr), head.list); tomoyo_del_transition_control() 124 container_of(element, typeof(*ptr), head.list); tomoyo_del_aggregator() 139 container_of(element, typeof(*ptr), head.list); tomoyo_del_manager() 159 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 166 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 174 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 182 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 192 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 202 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 210 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 219 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 227 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 271 head.list); tomoyo_del_condition() 322 container_of(element, typeof(*member), head.list); tomoyo_del_path_group() 336 container_of(element, typeof(*group), head.list); tomoyo_del_group() 427 head.list)->entry.name)) tomoyo_try_to_gc() 560 list_for_each_entry_safe(group, tmp, list, head.list) { list_for_each_entry_safe() 563 atomic_read(&group->head.users) > 0) list_for_each_entry_safe() 565 atomic_set(&group->head.users, list_for_each_entry_safe() 568 &group->head.list); list_for_each_entry_safe() 601 struct tomoyo_io_buffer *head; tomoyo_gc_thread() local 605 list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, tomoyo_gc_thread() 607 if (head->users) tomoyo_gc_thread() 609 list_del(&head->list); tomoyo_gc_thread() 610 kfree(head->read_buf); tomoyo_gc_thread() 611 kfree(head->write_buf); tomoyo_gc_thread() 612 kfree(head); tomoyo_gc_thread() 625 * @head: Pointer to "struct tomoyo_io_buffer". 630 void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) tomoyo_notify_gc() argument 636 head->users = 1; tomoyo_notify_gc() 637 list_add(&head->list, &tomoyo_io_buffer_list); tomoyo_notify_gc() 639 is_write = head->write_buf != NULL; tomoyo_notify_gc() 640 if (!--head->users) { tomoyo_notify_gc() 641 list_del(&head->list); tomoyo_notify_gc() 642 kfree(head->read_buf); tomoyo_notify_gc() 643 kfree(head->write_buf); tomoyo_notify_gc() 644 kfree(head); tomoyo_notify_gc()
|
H A D | group.c | 21 return container_of(a, struct tomoyo_path_group, head)->member_name == tomoyo_same_path_group() 22 container_of(b, struct tomoyo_path_group, head)->member_name; tomoyo_same_path_group() 36 return !memcmp(&container_of(a, struct tomoyo_number_group, head) tomoyo_same_number_group() 38 &container_of(b, struct tomoyo_number_group, head) tomoyo_same_number_group() 40 sizeof(container_of(a, struct tomoyo_number_group, head) tomoyo_same_number_group() 56 head); tomoyo_same_address_group() 58 head); tomoyo_same_address_group() 85 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 93 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 105 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 129 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_path_matches_group() 130 if (member->head.is_deleted) tomoyo_path_matches_group() 156 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_number_matches_group() 157 if (member->head.is_deleted) tomoyo_number_matches_group() 186 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_address_matches_group() 187 if (member->head.is_deleted) tomoyo_address_matches_group()
|
H A D | memory.c | 108 list_for_each_entry(group, list, head.list) { list_for_each_entry() 110 atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) list_for_each_entry() 112 atomic_inc(&group->head.users); list_for_each_entry() 120 atomic_set(&entry->head.users, 1); 121 list_add_tail_rcu(&entry->head.list, list); 152 struct list_head *head; tomoyo_get_name() local 158 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; tomoyo_get_name() 161 list_for_each_entry(ptr, head, head.list) { list_for_each_entry() 163 atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) list_for_each_entry() 165 atomic_inc(&ptr->head.users); list_for_each_entry() 172 atomic_set(&ptr->head.users, 1); 174 list_add_tail(&ptr->head.list, head);
|
H A D | environ.c | 21 container_of(ptr, typeof(*acl), head); tomoyo_check_env_acl() 78 const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_env_acl() 79 const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_env_acl() 95 struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; tomoyo_write_env() 104 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_write_env()
|
H A D | file.c | 254 head); tomoyo_check_path_acl() 276 container_of(ptr, typeof(*acl), head); tomoyo_check_path_number_acl() 296 container_of(ptr, typeof(*acl), head); tomoyo_check_path2_acl() 315 container_of(ptr, typeof(*acl), head); tomoyo_check_mkdev_acl() 338 const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_path_acl() 339 const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_path_acl() 356 u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head) tomoyo_merge_path_acl() 359 const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; tomoyo_merge_path_acl() 382 .head.type = TOMOYO_TYPE_PATH_ACL, tomoyo_update_path_acl() 389 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path_acl() 407 const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_mkdev_acl() 408 const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_mkdev_acl() 429 head)->perm; tomoyo_merge_mkdev_acl() 431 const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head) tomoyo_merge_mkdev_acl() 455 .head.type = TOMOYO_TYPE_MKDEV_ACL, tomoyo_update_mkdev_acl() 465 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_mkdev_acl() 486 const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_path2_acl() 487 const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_path2_acl() 505 u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head) tomoyo_merge_path2_acl() 508 const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm; tomoyo_merge_path2_acl() 531 .head.type = TOMOYO_TYPE_PATH2_ACL, tomoyo_update_path2_acl() 539 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path2_acl() 620 head); tomoyo_same_path_number_acl() 622 head); tomoyo_same_path_number_acl() 641 head)->perm; tomoyo_merge_path_number_acl() 643 const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head) tomoyo_merge_path_number_acl() 665 .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, tomoyo_update_path_number_acl() 673 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path_number_acl() 947 const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_mount_acl() 948 const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_mount_acl() 966 struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; tomoyo_update_mount_acl() 974 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_mount_acl()
|
/linux-4.1.27/sound/pci/ctxfi/ |
H A D | ctimap.c | 25 struct list_head *pos, *pre, *head; input_mapper_add() local 28 head = mappers; input_mapper_add() 30 if (list_empty(head)) { input_mapper_add() 33 list_add(&entry->list, head); input_mapper_add() 37 list_for_each(pos, head) { list_for_each() 45 if (pos != head) { 47 if (pre == head) 48 pre = head->prev; 52 pre = head->prev; 53 pos = head->next; 54 list_add_tail(&entry->list, head); 71 struct list_head *next, *pre, *head; input_mapper_delete() local 74 head = mappers; input_mapper_delete() 76 if (list_empty(head)) input_mapper_delete() 79 pre = (entry->list.prev == head) ? head->prev : entry->list.prev; input_mapper_delete() 80 next = (entry->list.next == head) ? head->next : entry->list.next; input_mapper_delete() 100 void free_input_mapper_list(struct list_head *head) free_input_mapper_list() argument 105 while (!list_empty(head)) { free_input_mapper_list() 106 pos = head->next; free_input_mapper_list()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
H A D | vga.h | 7 u8 nv_rdport(void *obj, int head, u16 port); 8 void nv_wrport(void *obj, int head, u16 port, u8 value); 11 u8 nv_rdvgas(void *obj, int head, u8 index); 12 void nv_wrvgas(void *obj, int head, u8 index, u8 value); 15 u8 nv_rdvgag(void *obj, int head, u8 index); 16 void nv_wrvgag(void *obj, int head, u8 index, u8 value); 19 u8 nv_rdvgac(void *obj, int head, u8 index); 20 void nv_wrvgac(void *obj, int head, u8 index, u8 value); 23 u8 nv_rdvgai(void *obj, int head, u16 port, u8 index); 24 void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
|
/linux-4.1.27/net/netlabel/ |
H A D | netlabel_addrlist.h | 96 #define netlbl_af4list_foreach(iter, head) \ 97 for (iter = __af4list_valid((head)->next, head); \ 98 &iter->list != (head); \ 99 iter = __af4list_valid(iter->list.next, head)) 101 #define netlbl_af4list_foreach_rcu(iter, head) \ 102 for (iter = __af4list_valid_rcu((head)->next, head); \ 103 &iter->list != (head); \ 104 iter = __af4list_valid_rcu(iter->list.next, head)) 106 #define netlbl_af4list_foreach_safe(iter, tmp, head) \ 107 for (iter = __af4list_valid((head)->next, head), \ 108 tmp = __af4list_valid(iter->list.next, head); \ 109 &iter->list != (head); \ 110 iter = tmp, tmp = __af4list_valid(iter->list.next, head)) 113 struct list_head *head); 115 struct list_head *head); 118 struct list_head *head); 121 struct list_head *head); 163 #define netlbl_af6list_foreach(iter, head) \ 164 for (iter = __af6list_valid((head)->next, head); \ 165 &iter->list != (head); \ 166 iter = __af6list_valid(iter->list.next, head)) 168 #define netlbl_af6list_foreach_rcu(iter, head) \ 169 for (iter = __af6list_valid_rcu((head)->next, head); \ 170 &iter->list != (head); \ 171 iter = __af6list_valid_rcu(iter->list.next, head)) 173 #define netlbl_af6list_foreach_safe(iter, tmp, head) \ 174 for (iter = __af6list_valid((head)->next, head), \ 175 tmp = __af6list_valid(iter->list.next, head); \ 176 &iter->list != (head); \ 177 iter = tmp, tmp = __af6list_valid(iter->list.next, head)) 180 struct list_head *head); 183 struct list_head *head); 186 struct list_head *head); 189 struct list_head *head);
|
H A D | netlabel_addrlist.c | 52 * @head: the list head 55 * Searches the IPv4 address list given by @head. If a matching address entry 61 struct list_head *head) netlbl_af4list_search() 65 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search() 76 * @head: the list head 79 * Searches the IPv4 address list given by @head. If an exact match if found 86 struct list_head *head) netlbl_af4list_search_exact() 90 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search_exact() 102 * @head: the list head 105 * Searches the IPv6 address list given by @head. If a matching address entry 111 struct list_head *head) netlbl_af6list_search() 115 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search() 127 * @head: the list head 130 * Searches the IPv6 address list given by @head. If an exact match if found 137 struct list_head *head) netlbl_af6list_search_exact() 141 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search_exact() 154 * @head: the list head 157 * Add a new address entry to the list pointed to by @head. On success zero is 162 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) netlbl_af4list_add() argument 166 iter = netlbl_af4list_search(entry->addr, head); netlbl_af4list_add() 175 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 183 list_add_tail_rcu(&entry->list, head); 191 * @head: the list head 194 * Add a new address entry to the list pointed to by @head. On success zero is 199 int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) netlbl_af6list_add() argument 203 iter = netlbl_af6list_search(&entry->addr, head); netlbl_af6list_add() 213 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 221 list_add_tail_rcu(&entry->list, head); 245 * @head: the list head 248 * Remove an IP address entry from the list pointed to by @head. Returns the 254 struct list_head *head) netlbl_af4list_remove() 258 entry = netlbl_af4list_search_exact(addr, mask, head); netlbl_af4list_remove() 285 * @head: the list head 288 * Remove an IP address entry from the list pointed to by @head. Returns the 295 struct list_head *head) netlbl_af6list_remove() 299 entry = netlbl_af6list_search_exact(addr, mask, head); netlbl_af6list_remove() 60 netlbl_af4list_search(__be32 addr, struct list_head *head) netlbl_af4list_search() argument 84 netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head) netlbl_af4list_search_exact() argument 110 netlbl_af6list_search(const struct in6_addr *addr, struct list_head *head) netlbl_af6list_search() argument 135 netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) netlbl_af6list_search_exact() argument 253 netlbl_af4list_remove(__be32 addr, __be32 mask, struct list_head *head) netlbl_af4list_remove() argument 293 netlbl_af6list_remove(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) netlbl_af6list_remove() argument
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
H A D | queue.h | 40 * added to the list after an existing element or at the head of the list. 41 * Elements being removed from the head of the list should use the explicit 48 * head of the list and the other to the tail of the list. The elements are 51 * to the list after an existing element, at the head of the list, or at the 52 * end of the list. Elements being removed from the head of the tail queue 62 * or after an existing element or at the head of the list. A list 65 * A tail queue is headed by a pair of pointers, one to the head of the 69 * after an existing element, at the head of the list, or at the end of 72 * A circle queue is headed by a pair of pointers, one to the head of the 76 * an existing element, at the head of the list, or at the end of the list. 112 #define SLIST_HEAD_INITIALIZER(head) \ 123 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 125 #define SLIST_FIRST(head) ((head)->slh_first) 127 #define SLIST_FOREACH(var, head, field) \ 128 for ((var) = SLIST_FIRST((head)); \ 132 #define SLIST_INIT(head) do { \ 133 SLIST_FIRST((head)) = NULL; \ 141 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 142 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ 143 SLIST_FIRST((head)) = (elm); \ 148 #define SLIST_REMOVE(head, elm, type, field) do { \ 149 if (SLIST_FIRST((head)) == (elm)) { \ 150 SLIST_REMOVE_HEAD((head), field); \ 153 struct type *curelm = SLIST_FIRST((head)); \ 161 #define SLIST_REMOVE_HEAD(head, field) do { \ 162 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ 174 #define STAILQ_HEAD_INITIALIZER(head) \ 175 { NULL, &(head).stqh_first } 185 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 187 #define STAILQ_FIRST(head) ((head)->stqh_first) 189 #define STAILQ_FOREACH(var, head, field) \ 190 for((var) = STAILQ_FIRST((head)); \ 194 #define STAILQ_INIT(head) do { \ 195 STAILQ_FIRST((head)) = NULL; \ 196 (head)->stqh_last = &STAILQ_FIRST((head)); \ 199 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ 201 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 205 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 206 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ 207 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 208 STAILQ_FIRST((head)) = (elm); \ 211 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 213 STAILQ_LAST((head)) = (elm); \ 214 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 217 #define STAILQ_LAST(head) (*(head)->stqh_last) 221 #define STAILQ_REMOVE(head, elm, type, field) do { \ 222 if (STAILQ_FIRST((head)) == (elm)) { \ 223 STAILQ_REMOVE_HEAD(head, field); \ 226 struct type *curelm = STAILQ_FIRST((head)); \ 231 (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ 235 #define STAILQ_REMOVE_HEAD(head, field) do { \ 236 if ((STAILQ_FIRST((head)) = \ 237 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ 238 (head)->stqh_last = &STAILQ_FIRST((head)); \ 241 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ 242 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ 243 (head)->stqh_last = &STAILQ_FIRST((head)); \ 254 #define LIST_HEAD_INITIALIZER(head) \ 267 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 269 #define LIST_FIRST(head) ((head)->lh_first) 271 #define LIST_FOREACH(var, head, field) \ 272 for ((var) = LIST_FIRST((head)); \ 276 #define LIST_INIT(head) do { \ 277 LIST_FIRST((head)) = NULL; \ 295 #define LIST_INSERT_HEAD(head, elm, field) do { \ 296 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ 297 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ 298 LIST_FIRST((head)) = (elm); \ 299 (elm)->field.le_prev = &LIST_FIRST((head)); \ 320 #define TAILQ_HEAD_INITIALIZER(head) \ 321 { NULL, &(head).tqh_first } 332 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 334 #define TAILQ_FIRST(head) ((head)->tqh_first) 336 #define TAILQ_FOREACH(var, head, field) \ 337 for ((var) = TAILQ_FIRST((head)); \ 341 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 342 for ((var) = TAILQ_LAST((head), headname); \ 346 #define TAILQ_INIT(head) do { \ 347 TAILQ_FIRST((head)) = NULL; \ 348 (head)->tqh_last = &TAILQ_FIRST((head)); \ 351 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 356 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 368 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 369 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ 370 TAILQ_FIRST((head))->field.tqe_prev = \ 373 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 374 TAILQ_FIRST((head)) = (elm); \ 375 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ 378 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 380 (elm)->field.tqe_prev = (head)->tqh_last; \ 381 *(head)->tqh_last = (elm); \ 382 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 385 #define TAILQ_LAST(head, headname) \ 386 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 393 #define TAILQ_REMOVE(head, elm, field) do { \ 398 (head)->tqh_last = (elm)->field.tqe_prev; \ 411 #define CIRCLEQ_HEAD_INITIALIZER(head) \ 412 { (void *)&(head), (void *)&(head) } 423 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) 425 #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 427 #define CIRCLEQ_FOREACH(var, head, field) \ 428 for ((var) = CIRCLEQ_FIRST((head)); \ 429 (var) != (void *)(head); \ 432 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 433 for ((var) = CIRCLEQ_LAST((head)); \ 434 (var) != (void *)(head); \ 437 #define CIRCLEQ_INIT(head) do { \ 438 CIRCLEQ_FIRST((head)) = (void *)(head); \ 439 CIRCLEQ_LAST((head)) = (void *)(head); \ 442 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 445 if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \ 446 CIRCLEQ_LAST((head)) = (elm); \ 452 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 455 if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \ 456 CIRCLEQ_FIRST((head)) = (elm); \ 462 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 463 CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \ 464 CIRCLEQ_PREV((elm), field) = (void *)(head); \ 465 if (CIRCLEQ_LAST((head)) == (void *)(head)) \ 466 CIRCLEQ_LAST((head)) = (elm); \ 468 CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \ 469 CIRCLEQ_FIRST((head)) = (elm); \ 472 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 473 CIRCLEQ_NEXT((elm), field) = (void *)(head); \ 474 CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \ 475 if (CIRCLEQ_FIRST((head)) == (void *)(head)) \ 476 CIRCLEQ_FIRST((head)) = (elm); \ 478 CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \ 479 CIRCLEQ_LAST((head)) = (elm); \ 482 #define CIRCLEQ_LAST(head) ((head)->cqh_last) 488 #define CIRCLEQ_REMOVE(head, elm, field) do { \ 489 if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \ 490 CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \ 494 if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \ 495 CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | intr_queue.h | 6 #define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */ 8 #define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */ 10 #define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */ 12 #define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */ 13 #define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
|
/linux-4.1.27/arch/frv/kernel/ |
H A D | Makefile | 5 heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o 6 heads-$(CONFIG_MMU) := head-mmu-fr451.o 8 extra-y:= head.o vmlinux.lds
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
H A D | c2_alloc.c | 40 struct sp_chunk **head) c2_alloc_mqsp_chunk() 55 new_head->head = 0; c2_alloc_mqsp_chunk() 67 *head = new_head; c2_alloc_mqsp_chunk() 89 __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, c2_alloc_mqsp() argument 94 while (head) { c2_alloc_mqsp() 95 mqsp = head->head; c2_alloc_mqsp() 97 head->head = head->shared_ptr[mqsp]; c2_alloc_mqsp() 99 } else if (head->next == NULL) { c2_alloc_mqsp() 100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == c2_alloc_mqsp() 102 head = head->next; c2_alloc_mqsp() 103 mqsp = head->head; c2_alloc_mqsp() 104 head->head = head->shared_ptr[mqsp]; c2_alloc_mqsp() 109 head = head->next; c2_alloc_mqsp() 111 if (head) { c2_alloc_mqsp() 112 *dma_addr = head->dma_addr + c2_alloc_mqsp() 113 ((unsigned long) &(head->shared_ptr[mqsp]) - c2_alloc_mqsp() 114 (unsigned long) head); c2_alloc_mqsp() 116 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); c2_alloc_mqsp() 117 return (__force __be16 *) &(head->shared_ptr[mqsp]); c2_alloc_mqsp() 124 struct sp_chunk *head; c2_free_mqsp() local 128 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); c2_free_mqsp() 130 /* Link head to new mqsp */ c2_free_mqsp() 131 *mqsp = (__force __be16) head->head; c2_free_mqsp() 137 /* Point this index at the head */ c2_free_mqsp() 138 head->shared_ptr[idx] = head->head; c2_free_mqsp() 140 /* Point head at this index */ c2_free_mqsp() 141 head->head = idx; c2_free_mqsp() 39 c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, struct sp_chunk **head) c2_alloc_mqsp_chunk() argument
|
/linux-4.1.27/drivers/gpu/drm/nouveau/dispnv04/ |
H A D | hw.c | 39 NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaSeq() argument 41 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); NVWriteVgaSeq() 42 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); NVWriteVgaSeq() 46 NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) NVReadVgaSeq() argument 48 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); NVReadVgaSeq() 49 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); NVReadVgaSeq() 53 NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaGr() argument 55 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); NVWriteVgaGr() 56 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); NVWriteVgaGr() 60 NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) NVReadVgaGr() argument 62 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); NVReadVgaGr() 63 return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); NVReadVgaGr() 66 /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) 70 * expected and values can be set for the appropriate head by using a 0x2000 73 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and 74 * cr44 must be set to 0 or 3 for accessing values on the correct head 76 * b) in tied mode (4) head B is programmed to the values set on head A, and 77 * access using the head B addresses can have strange results, ergo we leave 81 * 0 and 1 are treated as head values and so the set value is (owner * 3) 110 NVBlankScreen(struct drm_device *dev, int head, bool blank) NVBlankScreen() argument 115 NVSetOwner(dev, head); NVBlankScreen() 117 seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); NVBlankScreen() 119 NVVgaSeqReset(dev, head, true); NVBlankScreen() 121 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); NVBlankScreen() 123 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); NVBlankScreen() 124 NVVgaSeqReset(dev, head, false); NVBlankScreen() 246 nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) nouveau_hw_fix_bad_vpll() argument 248 /* the vpll on an unused head can come up with a random value, way nouveau_hw_fix_bad_vpll() 260 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; nouveau_hw_fix_bad_vpll() 271 NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1); nouveau_hw_fix_bad_vpll() 373 rd_cio_state(struct drm_device *dev, int head, rd_cio_state() argument 376 crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); rd_cio_state() 380 wr_cio_state(struct drm_device *dev, int head, wr_cio_state() argument 383 NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); wr_cio_state() 387 nv_save_state_ramdac(struct drm_device *dev, int head, nv_save_state_ramdac() argument 391 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_ramdac() 395 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); nv_save_state_ramdac() 397 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); nv_save_state_ramdac() 402 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); nv_save_state_ramdac() 404 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); nv_save_state_ramdac() 407 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); nv_save_state_ramdac() 409 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); nv_save_state_ramdac() 411 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); nv_save_state_ramdac() 412 regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); nv_save_state_ramdac() 413 regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); nv_save_state_ramdac() 414 regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); nv_save_state_ramdac() 415 regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); nv_save_state_ramdac() 416 regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); nv_save_state_ramdac() 417 regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); nv_save_state_ramdac() 418 regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); nv_save_state_ramdac() 422 regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); nv_save_state_ramdac() 423 regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); nv_save_state_ramdac() 427 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); nv_save_state_ramdac() 429 regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); nv_save_state_ramdac() 430 regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); nv_save_state_ramdac() 434 regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv_save_state_ramdac() 435 regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); nv_save_state_ramdac() 436 if (!nv_gf4_disp_arch(dev) && head == 0) { nv_save_state_ramdac() 438 * the head A FPCLK on (nv11 even locks up) */ nv_save_state_ramdac() 442 regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); nv_save_state_ramdac() 443 regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); nv_save_state_ramdac() 445 regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); nv_save_state_ramdac() 448 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); nv_save_state_ramdac() 451 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); nv_save_state_ramdac() 452 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); nv_save_state_ramdac() 453 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); nv_save_state_ramdac() 456 regp->ctv_regs[i] = NVReadRAMDAC(dev, head, nv_save_state_ramdac() 462 nv_load_state_ramdac(struct drm_device *dev, int head, nv_load_state_ramdac() argument 467 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_ramdac() 468 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; nv_load_state_ramdac() 472 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); nv_load_state_ramdac() 479 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); nv_load_state_ramdac() 481 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); nv_load_state_ramdac() 484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); nv_load_state_ramdac() 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); nv_load_state_ramdac() 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); nv_load_state_ramdac() 489 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); nv_load_state_ramdac() 490 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); nv_load_state_ramdac() 491 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); nv_load_state_ramdac() 492 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); nv_load_state_ramdac() 493 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); nv_load_state_ramdac() 494 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); nv_load_state_ramdac() 495 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); nv_load_state_ramdac() 500 NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); nv_load_state_ramdac() 501 NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); nv_load_state_ramdac() 505 NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); nv_load_state_ramdac() 507 NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); nv_load_state_ramdac() 508 NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); nv_load_state_ramdac() 512 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); nv_load_state_ramdac() 513 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); nv_load_state_ramdac() 514 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); nv_load_state_ramdac() 515 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); nv_load_state_ramdac() 517 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); nv_load_state_ramdac() 520 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); nv_load_state_ramdac() 523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); nv_load_state_ramdac() 524 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); nv_load_state_ramdac() 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); nv_load_state_ramdac() 528 NVWriteRAMDAC(dev, head, nv_load_state_ramdac() 534 nv_save_state_vga(struct drm_device *dev, int head, nv_save_state_vga() argument 537 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_vga() 540 regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); nv_save_state_vga() 543 rd_cio_state(dev, head, regp, i); nv_save_state_vga() 545 NVSetEnablePalette(dev, head, true); nv_save_state_vga() 547 regp->Attribute[i] = NVReadVgaAttr(dev, head, i); nv_save_state_vga() 548 NVSetEnablePalette(dev, head, false); nv_save_state_vga() 551 regp->Graphics[i] = NVReadVgaGr(dev, head, i); nv_save_state_vga() 554 regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); nv_save_state_vga() 558 nv_load_state_vga(struct drm_device *dev, int head, nv_load_state_vga() argument 561 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_vga() 564 NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); nv_load_state_vga() 567 NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); nv_load_state_vga() 569 nv_lock_vga_crtc_base(dev, head, false); nv_load_state_vga() 571 wr_cio_state(dev, head, regp, i); nv_load_state_vga() 572 nv_lock_vga_crtc_base(dev, head, true); nv_load_state_vga() 575 NVWriteVgaGr(dev, head, i, regp->Graphics[i]); nv_load_state_vga() 577 NVSetEnablePalette(dev, head, true); nv_load_state_vga() 579 NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); nv_load_state_vga() 580 NVSetEnablePalette(dev, head, false); nv_load_state_vga() 584 nv_save_state_ext(struct drm_device *dev, int head, nv_save_state_ext() argument 588 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_ext() 591 rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); nv_save_state_ext() 592 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); nv_save_state_ext() 593 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); nv_save_state_ext() 594 rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); nv_save_state_ext() 595 rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); nv_save_state_ext() 596 rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); nv_save_state_ext() 597 rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); nv_save_state_ext() 599 rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); nv_save_state_ext() 600 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); nv_save_state_ext() 601 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); nv_save_state_ext() 604 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); nv_save_state_ext() 607 rd_cio_state(dev, head, regp, 0x9f); nv_save_state_ext() 609 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); nv_save_state_ext() 610 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); nv_save_state_ext() 611 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); nv_save_state_ext() 612 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); nv_save_state_ext() 613 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); nv_save_state_ext() 616 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); nv_save_state_ext() 617 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); nv_save_state_ext() 620 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); nv_save_state_ext() 623 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); nv_save_state_ext() 626 regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); nv_save_state_ext() 627 regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); nv_save_state_ext() 630 regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); nv_save_state_ext() 632 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); nv_save_state_ext() 633 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); nv_save_state_ext() 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); nv_save_state_ext() 636 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); nv_save_state_ext() 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); nv_save_state_ext() 638 rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); nv_save_state_ext() 642 rd_cio_state(dev, head, regp, NV_CIO_CRE_42); nv_save_state_ext() 643 rd_cio_state(dev, head, regp, NV_CIO_CRE_53); nv_save_state_ext() 644 rd_cio_state(dev, head, regp, NV_CIO_CRE_54); nv_save_state_ext() 647 regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); nv_save_state_ext() 648 rd_cio_state(dev, head, regp, NV_CIO_CRE_59); nv_save_state_ext() 649 rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); nv_save_state_ext() 651 rd_cio_state(dev, head, regp, NV_CIO_CRE_85); nv_save_state_ext() 652 rd_cio_state(dev, head, regp, NV_CIO_CRE_86); nv_save_state_ext() 655 regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); nv_save_state_ext() 659 nv_load_state_ext(struct drm_device *dev, int head, nv_load_state_ext() argument 665 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_ext() 675 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); nv_load_state_ext() 687 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); nv_load_state_ext() 688 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); nv_load_state_ext() 689 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); nv_load_state_ext() 692 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); nv_load_state_ext() 695 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); nv_load_state_ext() 697 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); nv_load_state_ext() 699 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); nv_load_state_ext() 701 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); nv_load_state_ext() 705 NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); nv_load_state_ext() 707 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); nv_load_state_ext() 708 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); nv_load_state_ext() 709 wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); nv_load_state_ext() 710 wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); nv_load_state_ext() 711 wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); nv_load_state_ext() 712 wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); nv_load_state_ext() 713 wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); nv_load_state_ext() 714 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); nv_load_state_ext() 715 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); nv_load_state_ext() 718 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); nv_load_state_ext() 721 wr_cio_state(dev, head, regp, 0x9f); nv_load_state_ext() 723 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); nv_load_state_ext() 724 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); nv_load_state_ext() 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); nv_load_state_ext() 726 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); nv_load_state_ext() 728 nv_fix_nv40_hw_cursor(dev, head); nv_load_state_ext() 729 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); nv_load_state_ext() 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); nv_load_state_ext() 732 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); nv_load_state_ext() 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); nv_load_state_ext() 735 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); nv_load_state_ext() 736 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); nv_load_state_ext() 737 wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); nv_load_state_ext() 748 wr_cio_state(dev, head, regp, NV_CIO_CRE_42); nv_load_state_ext() 749 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); nv_load_state_ext() 750 wr_cio_state(dev, head, regp, NV_CIO_CRE_54); nv_load_state_ext() 753 NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); nv_load_state_ext() 754 wr_cio_state(dev, head, regp, NV_CIO_CRE_59); nv_load_state_ext() 755 wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); nv_load_state_ext() 757 wr_cio_state(dev, head, regp, NV_CIO_CRE_85); nv_load_state_ext() 758 wr_cio_state(dev, head, regp, NV_CIO_CRE_86); nv_load_state_ext() 761 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); nv_load_state_ext() 765 nv_save_state_palette(struct drm_device *dev, int head, nv_save_state_palette() argument 769 int head_offset = head * NV_PRMDIO_SIZE, i; nv_save_state_palette() 776 state->crtc_reg[head].DAC[i] = nvif_rd08(device, nv_save_state_palette() 780 NVSetEnablePalette(dev, head, false); nv_save_state_palette() 784 nouveau_hw_load_state_palette(struct drm_device *dev, int head, nouveau_hw_load_state_palette() argument 788 int head_offset = head * NV_PRMDIO_SIZE, i; nouveau_hw_load_state_palette() 796 state->crtc_reg[head].DAC[i]); nouveau_hw_load_state_palette() 799 NVSetEnablePalette(dev, head, false); nouveau_hw_load_state_palette() 802 void nouveau_hw_save_state(struct drm_device *dev, int head, nouveau_hw_save_state() argument 809 nouveau_hw_fix_bad_vpll(dev, head); nouveau_hw_save_state() 810 nv_save_state_ramdac(dev, head, state); nouveau_hw_save_state() 811 nv_save_state_vga(dev, head, state); nouveau_hw_save_state() 812 nv_save_state_palette(dev, head, state); nouveau_hw_save_state() 813 nv_save_state_ext(dev, head, state); nouveau_hw_save_state() 816 void nouveau_hw_load_state(struct drm_device *dev, int head, nouveau_hw_load_state() argument 819 NVVgaProtect(dev, head, true); nouveau_hw_load_state() 820 nv_load_state_ramdac(dev, head, state); nouveau_hw_load_state() 821 nv_load_state_ext(dev, head, state); nouveau_hw_load_state() 822 nouveau_hw_load_state_palette(dev, head, state); nouveau_hw_load_state() 823 nv_load_state_vga(dev, head, state); nouveau_hw_load_state() 824 NVVgaProtect(dev, head, false); nouveau_hw_load_state()
|
H A D | hw.h | 38 void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value); 39 uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index); 40 void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value); 41 uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); 43 void NVBlankScreen(struct drm_device *, int head, bool blank); 49 void nouveau_hw_save_state(struct drm_device *, int head, 51 void nouveau_hw_load_state(struct drm_device *, int head, 53 void nouveau_hw_load_state_palette(struct drm_device *, int head, 61 int head, uint32_t reg) NVReadCRTC() 65 if (head) NVReadCRTC() 72 int head, uint32_t reg, uint32_t val) NVWriteCRTC() 75 if (head) NVWriteCRTC() 81 int head, uint32_t reg) NVReadRAMDAC() 85 if (head) NVReadRAMDAC() 92 int head, uint32_t reg, uint32_t val) NVWriteRAMDAC() 95 if (head) NVWriteRAMDAC() 121 int head, uint8_t index, uint8_t value) NVWriteVgaCrtc() 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); NVWriteVgaCrtc() 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); NVWriteVgaCrtc() 129 int head, uint8_t index) NVReadVgaCrtc() 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); NVReadVgaCrtc() 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); NVReadVgaCrtc() 140 * per-head variables around 153 NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaCrtc5758() argument 155 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); NVWriteVgaCrtc5758() 156 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value); NVWriteVgaCrtc5758() 159 static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index) NVReadVgaCrtc5758() argument 161 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); NVReadVgaCrtc5758() 162 return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58); NVReadVgaCrtc5758() 166 int head, uint32_t reg) NVReadPRMVIO() 173 * NVSetOwner for the relevant head to be programmed */ NVReadPRMVIO() 174 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) NVReadPRMVIO() 182 int head, uint32_t reg, uint8_t value) NVWritePRMVIO() 188 * NVSetOwner for the relevant head to be programmed */ NVWritePRMVIO() 189 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) NVWritePRMVIO() 195 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) NVSetEnablePalette() argument 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVSetEnablePalette() 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); NVSetEnablePalette() 202 static inline bool NVGetEnablePalette(struct drm_device *dev, int head) NVGetEnablePalette() argument 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVGetEnablePalette() 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); NVGetEnablePalette() 210 int head, uint8_t index, uint8_t value) NVWriteVgaAttr() 213 if (NVGetEnablePalette(dev, head)) NVWriteVgaAttr() 218 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVWriteVgaAttr() 219 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); NVWriteVgaAttr() 220 nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); NVWriteVgaAttr() 224 int head, uint8_t index) NVReadVgaAttr() 228 if (NVGetEnablePalette(dev, head)) NVReadVgaAttr() 233 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVReadVgaAttr() 234 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); NVReadVgaAttr() 235 val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); NVReadVgaAttr() 239 static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) NVVgaSeqReset() argument 241 NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); NVVgaSeqReset() 244 static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) NVVgaProtect() argument 246 uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); NVVgaProtect() 249 NVVgaSeqReset(dev, head, true); NVVgaProtect() 250 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); NVVgaProtect() 253 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */ NVVgaProtect() 254 NVVgaSeqReset(dev, head, false); NVVgaProtect() 256 NVSetEnablePalette(dev, head, protect); NVVgaProtect() 271 /* makes cr0-7 on the specified head read-only */ 273 nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock) nv_lock_vga_crtc_base() argument 275 uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX); nv_lock_vga_crtc_base() 282 NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11); nv_lock_vga_crtc_base() 288 nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock) nv_lock_vga_crtc_shadow() argument 304 cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa; nv_lock_vga_crtc_shadow() 306 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21); nv_lock_vga_crtc_shadow() 342 nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) nv_fix_nv40_hw_cursor() argument 349 uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS); nv_fix_nv40_hw_cursor() 350 NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos); nv_fix_nv40_hw_cursor() 354 nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) nv_set_crtc_base() argument 358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); nv_set_crtc_base() 365 int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); nv_set_crtc_base() 367 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, nv_set_crtc_base() 373 nv_show_cursor(struct drm_device *dev, int head, bool show) nv_show_cursor() argument 377 &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; nv_show_cursor() 383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); nv_show_cursor() 386 nv_fix_nv40_hw_cursor(dev, head); nv_show_cursor() 60 NVReadCRTC(struct drm_device *dev, int head, uint32_t reg) NVReadCRTC() argument 71 NVWriteCRTC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) NVWriteCRTC() argument 80 NVReadRAMDAC(struct drm_device *dev, int head, uint32_t reg) NVReadRAMDAC() argument 91 NVWriteRAMDAC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) NVWriteRAMDAC() argument 120 NVWriteVgaCrtc(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaCrtc() argument 128 NVReadVgaCrtc(struct drm_device *dev, int head, uint8_t index) NVReadVgaCrtc() argument 165 NVReadPRMVIO(struct drm_device *dev, int head, uint32_t reg) NVReadPRMVIO() argument 181 NVWritePRMVIO(struct drm_device *dev, int head, uint32_t reg, uint8_t value) NVWritePRMVIO() argument 209 NVWriteVgaAttr(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaAttr() argument 223 NVReadVgaAttr(struct drm_device *dev, int head, uint8_t index) NVReadVgaAttr() argument
|
H A D | tvnv17.c | 54 int head; nv42_tv_sample_load() local 62 head = (dacclk & 0x100) >> 8; nv42_tv_sample_load() 67 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); nv42_tv_sample_load() 68 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); nv42_tv_sample_load() 69 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); nv42_tv_sample_load() 70 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv42_tv_sample_load() 72 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); nv42_tv_sample_load() 73 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); nv42_tv_sample_load() 74 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); nv42_tv_sample_load() 80 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); nv42_tv_sample_load() 81 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); nv42_tv_sample_load() 82 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); nv42_tv_sample_load() 83 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, nv42_tv_sample_load() 98 NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); nv42_tv_sample_load() 99 NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); nv42_tv_sample_load() 102 NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); nv42_tv_sample_load() 108 NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); nv42_tv_sample_load() 114 NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); nv42_tv_sample_load() 115 NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); nv42_tv_sample_load() 116 NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); nv42_tv_sample_load() 119 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); nv42_tv_sample_load() 120 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); nv42_tv_sample_load() 121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); nv42_tv_sample_load() 122 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); nv42_tv_sample_load() 410 int head = nouveau_crtc(encoder->crtc)->index; nv17_tv_prepare() local 411 uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[ nv17_tv_prepare() 419 nv04_dfp_disable(dev, head); nv17_tv_prepare() 421 /* Unbind any FP encoders from this head if we need the FP nv17_tv_prepare() 426 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { nv17_tv_prepare() 432 nv04_dfp_get_bound_head(dev, dcb) == head) { nv17_tv_prepare() 433 nv04_dfp_bind_head(dev, dcb, head ^ 1, nv17_tv_prepare() 441 *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); nv17_tv_prepare() 452 if (head) nv17_tv_prepare() 471 int head = nouveau_crtc(encoder->crtc)->index; nv17_tv_mode_set() local 472 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; nv17_tv_mode_set() 485 if (head) nv17_tv_mode_set() 535 * encoder in its OR enabled and routed to the head it's nv17_tv_mode_set()
|
H A D | disp.c | 94 &dev->mode_config.connector_list, head) { nv04_display_create() 102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_create() 108 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_create() 111 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_create() 131 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv04_display_destroy() 140 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_destroy() 146 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_destroy() 171 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_init() 177 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_init()
|
H A D | dfp.c | 69 int head, bool dl) nv04_dfp_bind_head() 74 * head < 0 indicates we wish to force a setting with the overrideval nv04_dfp_bind_head() 81 if (head != ramdac) nv04_dfp_bind_head() 93 void nv04_dfp_disable(struct drm_device *dev, int head) nv04_dfp_disable() argument 97 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & nv04_dfp_disable() 103 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, nv04_dfp_disable() 108 crtcstate[head].fp_control = FP_TG_CONTROL_OFF; nv04_dfp_disable() 109 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= nv04_dfp_disable() 135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv04_dfp_update_fp_control() 171 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { get_tmds_slave() 204 struct nouveau_encoder *nv_encoder, int head) nv04_dfp_prepare_sel_clk() 216 if (head) nv04_dfp_prepare_sel_clk() 229 * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6; nv04_dfp_prepare_sel_clk() 240 state->sel_clk |= (head ? 0x40 : 0x10) << shift; nv04_dfp_prepare_sel_clk() 249 int head = nouveau_crtc(encoder->crtc)->index; nv04_dfp_prepare() local 251 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; nv04_dfp_prepare() 252 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; nv04_dfp_prepare() 256 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); nv04_dfp_prepare() 262 *cr_lcd |= head ? 0x0 : 0x8; nv04_dfp_prepare() 270 NVWriteVgaCrtc(dev, head ^ 1, nv04_dfp_prepare() 452 int head = nouveau_crtc(encoder->crtc)->index; nv04_dfp_commit() local 456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); nv04_dfp_commit() 458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); nv04_dfp_commit() 462 nv04_display(dev)->mode_reg.crtc_reg[head].fp_control = nv04_dfp_commit() 463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv04_dfp_commit() 533 int head = crtc ? nouveau_crtc(crtc)->index : nv04_lvds_dpms() local 537 call_lvds_script(dev, nv_encoder->dcb, head, nv04_lvds_dpms() 543 call_lvds_script(dev, nv_encoder->dcb, head, nv04_lvds_dpms() 581 nv_encoder->restore.head = nv04_dfp_save() 589 int head = nv_encoder->restore.head; nv04_dfp_restore() local 596 call_lvds_script(dev, nv_encoder->dcb, head, nv04_dfp_restore() 602 (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals); nv04_dfp_restore() 604 run_tmds_table(dev, nv_encoder->dcb, head, clock); nv04_dfp_restore() 68 nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent, int head, bool dl) nv04_dfp_bind_head() argument 203 nv04_dfp_prepare_sel_clk(struct drm_device *dev, struct nouveau_encoder *nv_encoder, int head) nv04_dfp_prepare_sel_clk() argument
|
H A D | tvnv04.c | 85 int head = nouveau_crtc(encoder->crtc)->index; nv04_tv_dpms() local 86 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); nv04_tv_dpms() 88 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : nv04_tv_dpms() 94 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); nv04_tv_dpms() 102 static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) nv04_tv_bind() argument 104 struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; nv04_tv_bind() 113 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, nv04_tv_bind() 115 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, nv04_tv_bind() 117 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, nv04_tv_bind() 124 int head = nouveau_crtc(encoder->crtc)->index; nv04_tv_prepare() local 129 nv04_dfp_disable(dev, head); nv04_tv_prepare() 132 nv04_tv_bind(dev, head ^ 1, false); nv04_tv_prepare() 134 nv04_tv_bind(dev, head, true); nv04_tv_prepare()
|
H A D | dac.c | 146 /* only implemented for head A for now */ nv04_dac_detect() 223 NV_DEBUG(drm, "Load detected on head A\n"); nv04_dac_detect() 240 int head; nv17_dac_sample_load() local 277 head = (saved_routput & 0x100) >> 8; nv17_dac_sample_load() 280 if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) nv17_dac_sample_load() 281 head ^= 1; nv17_dac_sample_load() 284 routput = (saved_routput & 0xfffffece) | head << 8; nv17_dac_sample_load() 299 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, nv17_dac_sample_load() 301 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); nv17_dac_sample_load() 302 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, nv17_dac_sample_load() 310 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); nv17_dac_sample_load() 311 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, nv17_dac_sample_load() 313 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0); nv17_dac_sample_load() 363 int head = nouveau_crtc(encoder->crtc)->index; nv04_dac_prepare() local 367 nv04_dfp_disable(dev, head); nv04_dac_prepare() 376 int head = nouveau_crtc(encoder->crtc)->index; nv04_dac_mode_set() local 386 head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK); nv04_dac_mode_set() 388 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { nv04_dac_mode_set() 396 (otherdac & ~0x0100) | (head ^ 1) << 8); nv04_dac_mode_set()
|
/linux-4.1.27/arch/avr32/oprofile/ |
H A D | backtrace.c | 35 static struct frame_head *dump_user_backtrace(struct frame_head *head) dump_user_backtrace() argument 40 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) dump_user_backtrace() 42 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) dump_user_backtrace() 49 if (bufhead[0].fp <= head) dump_user_backtrace() 58 struct frame_head *head = (struct frame_head *)(regs->r7); avr32_backtrace() local 67 (unsigned long)head)) { avr32_backtrace() 68 oprofile_add_trace(head->lr); avr32_backtrace() 69 if (head->fp <= head) avr32_backtrace() 71 head = head->fp; avr32_backtrace() 76 while (depth-- && head) avr32_backtrace() 77 head = dump_user_backtrace(head); avr32_backtrace()
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
H A D | mkregtable.c | 79 * @head: list head to add it after 81 * Insert a new entry after the specified head. 84 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 86 __list_add(new, head, head->next); list_add() 92 * @head: list head to add it before 94 * Insert a new entry before the specified head. 97 static inline void list_add_tail(struct list_head *new, struct list_head *head) list_add_tail() argument 99 __list_add(new, head->prev, head); list_add_tail() 165 * list_move - delete from one list and add as another's head 167 * @head: the head that will precede our entry 169 static inline void list_move(struct list_head *list, struct list_head *head) list_move() argument 172 list_add(list, head); list_move() 178 * @head: the head that will follow our entry 181 struct list_head *head) list_move_tail() 184 list_add_tail(list, head); list_move_tail() 188 * list_is_last - tests whether @list is the last entry in list @head 190 * @head: the head of the list 193 const struct list_head *head) list_is_last() 195 return list->next == head; list_is_last() 200 * @head: the list to test. 202 static inline int list_empty(const struct list_head *head) list_empty() argument 204 return head->next == head; list_empty() 209 * @head: the list to test 220 static inline int list_empty_careful(const struct list_head *head) list_empty_careful() argument 222 struct list_head *next = head->next; list_empty_careful() 223 return (next == head) && (next == head->prev); list_empty_careful() 228 * @head: the list to test. 230 static inline int list_is_singular(const struct list_head *head) list_is_singular() argument 232 return !list_empty(head) && (head->next == head->prev); list_is_singular() 236 struct list_head *head, __list_cut_position() 240 list->next = head->next; __list_cut_position() 244 head->next = new_first; __list_cut_position() 245 new_first->prev = head; __list_cut_position() 251 * @head: a list with entries 252 * @entry: an entry within head, could be the head itself 255 * This helper moves the initial part of @head, up to and 256 * including @entry, from @head to @list. You should 257 * pass on @entry an element you know is on @head. @list 263 struct list_head *head, list_cut_position() 266 if (list_empty(head)) list_cut_position() 268 if (list_is_singular(head) && (head->next != entry && head != entry)) list_cut_position() 270 if (entry == head) list_cut_position() 273 __list_cut_position(list, head, entry); list_cut_position() 292 * @head: the place to add it in the first list. 295 struct list_head *head) list_splice() 298 __list_splice(list, head, head->next); list_splice() 304 * @head: the place to add it in the first list. 307 struct list_head *head) list_splice_tail() 310 __list_splice(list, head->prev, head); list_splice_tail() 316 * @head: the place to add it in the first list. 321 struct list_head *head) list_splice_init() 324 __list_splice(list, head, head->next); list_splice_init() 332 * @head: the place to add it in the first list. 338 struct list_head *head) list_splice_tail_init() 341 __list_splice(list, head->prev, head); list_splice_tail_init() 357 * @ptr: the list head to take the element from. 369 * @head: the head for your list. 371 #define list_for_each(pos, head) \ 372 for (pos = (head)->next; prefetch(pos->next), pos != (head); \ 378 * @head: the head for your list. 380 #define list_for_each_prev(pos, head) \ 381 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ 388 * @head: the head for your list. 390 #define list_for_each_safe(pos, n, head) \ 391 for (pos = (head)->next, n = pos->next; pos != (head); \ 398 * @head: the head for your list. 400 #define list_for_each_prev_safe(pos, n, head) \ 401 for (pos = (head)->prev, n = pos->prev; \ 402 prefetch(pos->prev), pos != (head); \ 408 * @head: the head for your list. 411 #define list_for_each_entry(pos, head, member) \ 412 for (pos = list_entry((head)->next, typeof(*pos), member); \ 413 &pos->member != (head); \ 419 * @head: the head for your list. 422 #define list_for_each_entry_reverse(pos, head, member) \ 423 for (pos = list_entry((head)->prev, typeof(*pos), member); \ 424 prefetch(pos->member.prev), &pos->member != (head); \ 430 * @head: the head of the list 435 #define list_prepare_entry(pos, head, member) \ 436 ((pos) ? : list_entry(head, typeof(*pos), member)) 441 * @head: the head for your list. 447 #define list_for_each_entry_continue(pos, head, member) \ 449 prefetch(pos->member.next), &pos->member != (head); \ 455 * @head: the head for your list. 461 #define list_for_each_entry_continue_reverse(pos, head, member) \ 463 prefetch(pos->member.prev), &pos->member != (head); \ 469 * @head: the head for your list. 474 #define list_for_each_entry_from(pos, head, member) \ 475 for (; prefetch(pos->member.next), &pos->member != (head); \ 482 * @head: the head for your list. 485 #define list_for_each_entry_safe(pos, n, head, member) \ 486 for (pos = list_entry((head)->next, typeof(*pos), member), \ 488 &pos->member != (head); \ 495 * @head: the head for your list. 501 #define list_for_each_entry_safe_continue(pos, n, head, member) \ 504 &pos->member != (head); \ 511 * @head: the head for your list. 517 #define list_for_each_entry_safe_from(pos, n, head, member) \ 519 &pos->member != (head); \ 526 * @head: the head for your list. 532 #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 533 for (pos = list_entry((head)->prev, typeof(*pos), member), \ 535 &pos->member != (head); \ 180 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument 192 list_is_last(const struct list_head *list, const struct list_head *head) list_is_last() argument 235 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument 262 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument 294 list_splice(const struct list_head *list, struct list_head *head) list_splice() argument 306 list_splice_tail(struct list_head *list, struct list_head *head) list_splice_tail() argument 320 list_splice_init(struct list_head *list, struct list_head *head) list_splice_init() argument 337 list_splice_tail_init(struct list_head *list, struct list_head *head) list_splice_tail_init() argument
|
/linux-4.1.27/drivers/scsi/sym53c8xx_2/ |
H A D | sym_misc.h | 55 static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) sym_que_first() argument 57 return (head->flink == head) ? 0 : head->flink; sym_que_first() 60 static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) sym_que_last() argument 62 return (head->blink == head) ? 0 : head->blink; sym_que_last() 82 static inline int sym_que_empty(struct sym_quehead *head) sym_que_empty() argument 84 return head->flink == head; sym_que_empty() 88 struct sym_quehead *head) sym_que_splice() 94 struct sym_quehead *at = head->flink; sym_que_splice() 96 first->blink = head; sym_que_splice() 97 head->flink = first; sym_que_splice() 130 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) 132 static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) sym_remque_head() argument 134 struct sym_quehead *elem = head->flink; sym_remque_head() 136 if (elem != head) sym_remque_head() 137 __sym_que_del(head, elem->flink); sym_remque_head() 143 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) 145 static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) sym_remque_tail() argument 147 struct sym_quehead *elem = head->blink; sym_remque_tail() 149 if (elem != head) sym_remque_tail() 150 __sym_que_del(elem->blink, head); sym_remque_tail() 159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 160 for (qp = (head)->flink; qp != (head); qp = qp->flink) 87 sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) sym_que_splice() argument
|
/linux-4.1.27/net/atm/ |
H A D | addr.c | 51 struct list_head *head; atm_reset_addr() local 55 head = &dev->lecs; atm_reset_addr() 57 head = &dev->local; list_for_each_entry_safe() 58 list_for_each_entry_safe(this, p, head, entry) { list_for_each_entry_safe() 63 if (head == &dev->local) 72 struct list_head *head; atm_add_addr() local 80 head = &dev->lecs; atm_add_addr() 82 head = &dev->local; list_for_each_entry() 83 list_for_each_entry(this, head, entry) { list_for_each_entry() 95 list_add(&this->entry, head); 97 if (head == &dev->local) 107 struct list_head *head; atm_del_addr() local 115 head = &dev->lecs; atm_del_addr() 117 head = &dev->local; list_for_each_entry() 118 list_for_each_entry(this, head, entry) { list_for_each_entry() 123 if (head == &dev->local) list_for_each_entry() 137 struct list_head *head; atm_get_addr() local 143 head = &dev->lecs; atm_get_addr() 145 head = &dev->local; atm_get_addr() 146 list_for_each_entry(this, head, entry) atm_get_addr() 153 list_for_each_entry(this, head, entry) atm_get_addr()
|
/linux-4.1.27/arch/x86/oprofile/ |
H A D | backtrace.c | 42 dump_user_backtrace_32(struct stack_frame_ia32 *head) dump_user_backtrace_32() argument 49 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); dump_user_backtrace_32() 59 if (head >= fp) dump_user_backtrace_32() 68 struct stack_frame_ia32 *head; x86_backtrace_32() local 74 head = (struct stack_frame_ia32 *) regs->bp; x86_backtrace_32() 75 while (depth-- && head) x86_backtrace_32() 76 head = dump_user_backtrace_32(head); x86_backtrace_32() 89 static struct stack_frame *dump_user_backtrace(struct stack_frame *head) dump_user_backtrace() argument 95 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); dump_user_backtrace() 103 if (head >= bufhead[0].next_frame) dump_user_backtrace() 112 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); x86_backtrace() local 125 while (depth-- && head) x86_backtrace() 126 head = dump_user_backtrace(head); x86_backtrace()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
H A D | vga.c | 29 nv_rdport(void *obj, int head, u16 port) nv_rdport() argument 39 return nv_rd08(obj, 0x601000 + (head * 0x2000) + port); nv_rdport() 45 head = 0; /* CR44 selects head */ nv_rdport() 46 return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port); nv_rdport() 54 nv_wrport(void *obj, int head, u16 port, u8 data) nv_wrport() argument 64 nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data); nv_wrport() 70 head = 0; /* CR44 selects head */ nv_wrport() 71 nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data); nv_wrport() 77 nv_rdvgas(void *obj, int head, u8 index) nv_rdvgas() argument 79 nv_wrport(obj, head, 0x03c4, index); nv_rdvgas() 80 return nv_rdport(obj, head, 0x03c5); nv_rdvgas() 84 nv_wrvgas(void *obj, int head, u8 index, u8 value) nv_wrvgas() argument 86 nv_wrport(obj, head, 0x03c4, index); nv_wrvgas() 87 nv_wrport(obj, head, 0x03c5, value); nv_wrvgas() 91 nv_rdvgag(void *obj, int head, u8 index) nv_rdvgag() argument 93 nv_wrport(obj, head, 0x03ce, index); nv_rdvgag() 94 return nv_rdport(obj, head, 0x03cf); nv_rdvgag() 98 nv_wrvgag(void *obj, int head, u8 index, u8 value) nv_wrvgag() argument 100 nv_wrport(obj, head, 0x03ce, index); nv_wrvgag() 101 nv_wrport(obj, head, 0x03cf, value); nv_wrvgag() 105 nv_rdvgac(void *obj, int head, u8 index) nv_rdvgac() argument 107 nv_wrport(obj, head, 0x03d4, index); nv_rdvgac() 108 return nv_rdport(obj, head, 0x03d5); nv_rdvgac() 112 nv_wrvgac(void *obj, int head, u8 index, u8 value) nv_wrvgac() argument 114 nv_wrport(obj, head, 0x03d4, index); nv_wrvgac() 115 nv_wrport(obj, head, 0x03d5, value); nv_wrvgac() 119 nv_rdvgai(void *obj, int head, u16 port, u8 index) nv_rdvgai() argument 121 if (port == 0x03c4) return nv_rdvgas(obj, head, index); nv_rdvgai() 122 if (port == 0x03ce) return nv_rdvgag(obj, head, index); nv_rdvgai() 123 if (port == 0x03d4) return nv_rdvgac(obj, head, index); nv_rdvgai() 129 nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value) nv_wrvgai() argument 131 if (port == 0x03c4) nv_wrvgas(obj, head, index, value); nv_wrvgai() 132 else if (port == 0x03ce) nv_wrvgag(obj, head, index, value); nv_wrvgai() 133 else if (port == 0x03d4) nv_wrvgac(obj, head, index, value); nv_wrvgai() 155 /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) 159 * expected and values can be set for the appropriate head by using a 0x2000 162 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and 163 * cr44 must be set to 0 or 3 for accessing values on the correct head 165 * b) in tied mode (4) head B is programmed to the values set on head A, and 166 * access using the head B addresses can have strange results, ergo we leave 170 * 0 and 1 are treated as head values and so the set value is (owner * 3)
|
H A D | gf110.c | 628 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300)); gf110_disp_main_scanoutpos() 629 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); gf110_disp_main_scanoutpos() 630 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); gf110_disp_main_scanoutpos() 647 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; gf110_disp_main_scanoutpos() 650 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; gf110_disp_main_scanoutpos() 675 for (i = 0; i < priv->head.nr; i++) { gf110_disp_main_init() 720 for (i = 0; i < priv->head.nr; i++) gf110_disp_main_init() 769 gf110_disp_vblank_init(struct nvkm_event *event, int type, int head) gf110_disp_vblank_init() argument 772 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); gf110_disp_vblank_init() 776 gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head) gf110_disp_vblank_fini() argument 779 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); gf110_disp_vblank_fini() 790 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, exec_lookup() argument 818 mask |= 0x0100 << head; exec_lookup() 820 list_for_each_entry(outp, &priv->base.outp, head) { exec_lookup() 836 exec_script(struct nv50_disp_priv *priv, int head, int id) exec_script() argument 845 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) { exec_script() 847 if (ctrl & (1 << head)) exec_script() 854 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info); exec_script() 861 .crtc = head, exec_script() 872 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf) exec_clkcmp() argument 882 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) { exec_clkcmp() 884 if (ctrl & (1 << head)) exec_clkcmp() 891 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); exec_clkcmp() 922 .crtc = head, exec_clkcmp() 934 gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head) gf110_disp_intr_unk1_0() argument 936 exec_script(priv, head, 1); gf110_disp_intr_unk1_0() 940 gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head) gf110_disp_intr_unk2_0() argument 942 struct nvkm_output *outp = exec_script(priv, head, 2); gf110_disp_intr_unk2_0() 951 .crtc = head, gf110_disp_intr_unk2_0() 962 gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head) gf110_disp_intr_unk2_1() argument 965 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; gf110_disp_intr_unk2_1() 967 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); gf110_disp_intr_unk2_1() 968 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000); gf110_disp_intr_unk2_1() 972 gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head, gf110_disp_intr_unk2_2_tu() argument 977 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); gf110_disp_intr_unk2_2_tu() 978 const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff; gf110_disp_intr_unk2_2_tu() 979 const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff; gf110_disp_intr_unk2_2_tu() 980 const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff; gf110_disp_intr_unk2_2_tu() 981 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; gf110_disp_intr_unk2_2_tu() 983 const u32 hoff = (head * 0x800); gf110_disp_intr_unk2_2_tu() 1033 gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head) gf110_disp_intr_unk2_2() argument 1036 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; gf110_disp_intr_unk2_2() 1039 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf); gf110_disp_intr_unk2_2() 1045 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); gf110_disp_intr_unk2_2() 1062 exec_clkcmp(priv, head, 0, pclk, &conf); gf110_disp_intr_unk2_2() 1075 gf110_disp_intr_unk2_2_tu(priv, head, &outp->info); gf110_disp_intr_unk2_2() 1086 gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head) gf110_disp_intr_unk4_0() argument 1088 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; gf110_disp_intr_unk4_0() 1091 exec_clkcmp(priv, head, 1, pclk, &conf); gf110_disp_intr_unk4_0() 1101 int head; gf110_disp_intr_supervisor() local 1104 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1105 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800)); gf110_disp_intr_supervisor() 1106 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]); gf110_disp_intr_supervisor() 1111 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1112 if (!(mask[head] & 0x00001000)) gf110_disp_intr_supervisor() 1114 nv_debug(priv, "supervisor 1.0 - head %d\n", head); gf110_disp_intr_supervisor() 1115 gf110_disp_intr_unk1_0(priv, head); gf110_disp_intr_supervisor() 1119 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1120 if (!(mask[head] & 0x00001000)) gf110_disp_intr_supervisor() 1122 nv_debug(priv, "supervisor 2.0 - head %d\n", head); gf110_disp_intr_supervisor() 1123 gf110_disp_intr_unk2_0(priv, head); gf110_disp_intr_supervisor() 1125 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1126 if (!(mask[head] & 0x00010000)) gf110_disp_intr_supervisor() 1128 nv_debug(priv, "supervisor 2.1 - head %d\n", head); gf110_disp_intr_supervisor() 1129 gf110_disp_intr_unk2_1(priv, head); gf110_disp_intr_supervisor() 1131 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1132 if (!(mask[head] & 0x00001000)) gf110_disp_intr_supervisor() 1134 nv_debug(priv, "supervisor 2.2 - head %d\n", head); gf110_disp_intr_supervisor() 1135 gf110_disp_intr_unk2_2(priv, head); gf110_disp_intr_supervisor() 1139 for (head = 0; head < priv->head.nr; head++) { gf110_disp_intr_supervisor() 1140 if (!(mask[head] & 0x00001000)) gf110_disp_intr_supervisor() 1142 nv_debug(priv, "supervisor 3.0 - head %d\n", head); gf110_disp_intr_supervisor() 1143 gf110_disp_intr_unk4_0(priv, head); gf110_disp_intr_supervisor() 1147 for (head = 0; head < priv->head.nr; head++) gf110_disp_intr_supervisor() 1148 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000); gf110_disp_intr_supervisor() 1241 for (i = 0; i < priv->head.nr; i++) { gf110_disp_intr() 1277 priv->head.nr = heads; gf110_disp_ctor() 1309 .head.scanoutpos = gf110_disp_main_scanoutpos,
|
H A D | nv04.c | 38 void *data, u32 size, int head) nv04_disp_scanoutpos() 40 const u32 hoff = head * 0x2000; nv04_disp_scanoutpos() 84 int head, ret; nv04_disp_mthd() local 88 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", nv04_disp_mthd() 89 args->v0.version, args->v0.method, args->v0.head); nv04_disp_mthd() 91 head = args->v0.head; nv04_disp_mthd() 95 if (head < 0 || head >= 2) nv04_disp_mthd() 100 return nv04_disp_scanoutpos(object, priv, data, size, head); nv04_disp_mthd() 129 nv04_disp_vblank_init(struct nvkm_event *event, int type, int head) nv04_disp_vblank_init() argument 132 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001); nv04_disp_vblank_init() 136 nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head) nv04_disp_vblank_fini() argument 139 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000); nv04_disp_vblank_fini() 37 nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv, void *data, u32 size, int head) nv04_disp_scanoutpos() argument
|
H A D | nv50.c | 55 struct nvkm_oclass *oclass, int head, nv50_disp_chan_create_() 61 int chid = impl->chid + head; nv50_disp_chan_create_() 208 struct nvkm_oclass *oclass, u32 pushbuf, int head, nv50_disp_dmac_create_() 214 ret = nv50_disp_chan_create_(parent, engine, oclass, head, nv50_disp_dmac_create_() 344 nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head, nv50_disp_mthd_chan() argument 356 u32 base = head * chan->addr; nv50_disp_mthd_chan() 364 chan->name, head); nv50_disp_mthd_chan() 659 "pushbuf %08x head %d\n", nv50_disp_base_ctor() 660 args->v0.version, args->v0.pushbuf, args->v0.head); nv50_disp_base_ctor() 661 if (args->v0.head > priv->head.nr) nv50_disp_base_ctor() 667 args->v0.head, sizeof(*dmac), nv50_disp_base_ctor() 749 "pushbuf %08x head %d\n", nv50_disp_ovly_ctor() 750 args->v0.version, args->v0.pushbuf, args->v0.head); nv50_disp_ovly_ctor() 751 if (args->v0.head > priv->head.nr) nv50_disp_ovly_ctor() 757 args->v0.head, sizeof(*dmac), nv50_disp_ovly_ctor() 788 struct nvkm_oclass *oclass, int head, nv50_disp_pioc_create_() 791 return nv50_disp_chan_create_(parent, engine, oclass, head, nv50_disp_pioc_create_() 868 nv_ioctl(parent, "create disp overlay vers %d head %d\n", nv50_disp_oimm_ctor() 869 args->v0.version, args->v0.head); nv50_disp_oimm_ctor() 870 if (args->v0.head > priv->head.nr) nv50_disp_oimm_ctor() 875 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, nv50_disp_oimm_ctor() 916 nv_ioctl(parent, "create disp cursor vers %d head %d\n", nv50_disp_curs_ctor() 917 args->v0.version, args->v0.head); nv50_disp_curs_ctor() 918 if (args->v0.head > priv->head.nr) nv50_disp_curs_ctor() 923 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, nv50_disp_curs_ctor() 952 const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); nv50_disp_main_scanoutpos() 953 const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); nv50_disp_main_scanoutpos() 954 const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540)); nv50_disp_main_scanoutpos() 971 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; nv50_disp_main_scanoutpos() 974 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; nv50_disp_main_scanoutpos() 993 int head, ret; nv50_disp_main_mthd() local 1000 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", nv50_disp_main_mthd() 1001 args->v0.version, args->v0.method, args->v0.head); nv50_disp_main_mthd() 1003 head = args->v0.head; nv50_disp_main_mthd() 1013 head = ffs((mask >> 8) & 0x0f) - 1; nv50_disp_main_mthd() 1017 if (head < 0 || head >= priv->head.nr) nv50_disp_main_mthd() 1021 list_for_each_entry(temp, &priv->base.outp, head) { nv50_disp_main_mthd() 1034 return impl->head.scanoutpos(object, priv, data, size, head); nv50_disp_main_mthd() 1041 return priv->dac.power(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1043 return priv->dac.sense(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1045 return priv->sor.power(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1049 return priv->sor.hda_eld(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1053 return priv->sor.hdmi(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1096 return priv->pior.power(object, priv, data, size, head, outp); nv50_disp_main_mthd() 1152 for (i = 0; i < priv->head.nr; i++) { nv50_disp_main_init() 1290 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) nv50_disp_vblank_fini() argument 1293 nv_mask(disp, 0x61002c, (4 << head), 0); nv50_disp_vblank_fini() 1297 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) nv50_disp_vblank_init() argument 1300 nv_mask(disp, 0x61002c, (4 << head), (4 << head)); nv50_disp_vblank_init() 1385 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, exec_lookup() argument 1424 mask |= 0x0100 << head; exec_lookup() 1426 list_for_each_entry(outp, &priv->base.outp, head) { exec_lookup() 1442 exec_script(struct nv50_disp_priv *priv, int head, int id) exec_script() argument 1453 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) exec_script() 1457 if (!(ctrl & (1 << head))) { exec_script() 1465 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) exec_script() 1471 if (!(ctrl & (1 << head))) { exec_script() 1472 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) exec_script() 1477 if (!(ctrl & (1 << head))) exec_script() 1481 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); exec_script() 1488 .crtc = head, exec_script() 1499 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf) exec_clkcmp() argument 1511 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) exec_clkcmp() 1515 if (!(ctrl & (1 << head))) { exec_clkcmp() 1523 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) exec_clkcmp() 1529 if (!(ctrl & (1 << head))) { exec_clkcmp() 1530 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) exec_clkcmp() 1535 if (!(ctrl & (1 << head))) exec_clkcmp() 1539 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); exec_clkcmp() 1575 .crtc = head, exec_clkcmp() 1587 nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head) nv50_disp_intr_unk10_0() argument 1589 exec_script(priv, head, 1); nv50_disp_intr_unk10_0() 1593 nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head) nv50_disp_intr_unk20_0() argument 1595 struct nvkm_output *outp = exec_script(priv, head, 2); nv50_disp_intr_unk20_0() 1616 .crtc = head, nv50_disp_intr_unk20_0() 1627 nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head) nv50_disp_intr_unk20_1() argument 1630 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk20_1() 1632 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); nv50_disp_intr_unk20_1() 1636 nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head, nv50_disp_intr_unk20_2_dp() argument 1645 const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 1646 const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 1647 const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 1755 nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) nv50_disp_intr_unk20_2() argument 1758 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk20_2() 1759 u32 hval, hreg = 0x614200 + (head * 0x800); nv50_disp_intr_unk20_2() 1763 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf); nv50_disp_intr_unk20_2() 1810 exec_clkcmp(priv, head, 0, pclk, &conf); nv50_disp_intr_unk20_2() 1820 nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk); nv50_disp_intr_unk20_2() 1861 nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head) nv50_disp_intr_unk40_0() argument 1864 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk40_0() 1867 outp = exec_clkcmp(priv, head, 1, pclk, &conf); nv50_disp_intr_unk40_0() 1882 int head; nv50_disp_intr_supervisor() local 1888 for (head = 0; head < priv->head.nr; head++) { nv50_disp_intr_supervisor() 1889 if (!(super & (0x00000020 << head))) nv50_disp_intr_supervisor() 1891 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 1893 nv50_disp_intr_unk10_0(priv, head); nv50_disp_intr_supervisor() 1897 for (head = 0; head < priv->head.nr; head++) { nv50_disp_intr_supervisor() 1898 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 1900 nv50_disp_intr_unk20_0(priv, head); nv50_disp_intr_supervisor() 1902 for (head = 0; head < priv->head.nr; head++) { nv50_disp_intr_supervisor() 1903 if (!(super & (0x00000200 << head))) nv50_disp_intr_supervisor() 1905 nv50_disp_intr_unk20_1(priv, head); nv50_disp_intr_supervisor() 1907 for (head = 0; head < priv->head.nr; head++) { nv50_disp_intr_supervisor() 1908 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 1910 nv50_disp_intr_unk20_2(priv, head); nv50_disp_intr_supervisor() 1914 for (head = 0; head < priv->head.nr; head++) { nv50_disp_intr_supervisor() 1915 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 1917 nv50_disp_intr_unk40_0(priv, head); nv50_disp_intr_supervisor() 1986 priv->head.nr = 2; nv50_disp_ctor() 2018 .head.scanoutpos = nv50_disp_main_scanoutpos, 53 nv50_disp_chan_create_(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, int head, int length, void **pobject) nv50_disp_chan_create_() argument 206 nv50_disp_dmac_create_(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, u32 pushbuf, int head, int length, void **pobject) nv50_disp_dmac_create_() argument 786 nv50_disp_pioc_create_(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, int head, int length, void **pobject) nv50_disp_pioc_create_() argument
|
/linux-4.1.27/lib/ |
H A D | plist.c | 57 static void plist_check_head(struct plist_head *head) plist_check_head() argument 59 if (!plist_head_empty(head)) plist_check_head() 60 plist_check_list(&plist_first(head)->prio_list); plist_check_head() 61 plist_check_list(&head->node_list); plist_check_head() 69 * plist_add - add @node to @head 72 * @head: &struct plist_head pointer 74 void plist_add(struct plist_node *node, struct plist_head *head) plist_add() argument 77 struct list_head *node_next = &head->node_list; plist_add() 79 plist_check_head(head); plist_add() 83 if (plist_head_empty(head)) plist_add() 86 first = iter = plist_first(head); plist_add() 104 plist_check_head(head); plist_add() 111 * @head: &struct plist_head pointer - list head 113 void plist_del(struct plist_node *node, struct plist_head *head) plist_del() argument 115 plist_check_head(head); plist_del() 118 if (node->node_list.next != &head->node_list) { plist_del() 133 plist_check_head(head); plist_del() 144 * @head: &struct plist_head pointer - list head 146 void plist_requeue(struct plist_node *node, struct plist_head *head) plist_requeue() argument 149 struct list_head *node_next = &head->node_list; plist_requeue() 151 plist_check_head(head); plist_requeue() 152 BUG_ON(plist_head_empty(head)); plist_requeue() 155 if (node == plist_last(head)) plist_requeue() 163 plist_del(node, head); plist_requeue() 165 plist_for_each_continue(iter, head) { plist_for_each_continue() 173 plist_check_head(head);
|
H A D | llist.c | 34 * @head: the head for your lock-less list 39 struct llist_head *head) llist_add_batch() 44 new_last->next = first = ACCESS_ONCE(head->first); llist_add_batch() 45 } while (cmpxchg(&head->first, first, new_first) != first); llist_add_batch() 53 * @head: the head for your lock-less list 61 * llist_add) sequence in another user may change @head->first->next, 62 * but keep @head->first. If multiple consumers are needed, please 65 struct llist_node *llist_del_first(struct llist_head *head) llist_del_first() argument 69 entry = head->first; llist_del_first() 75 entry = cmpxchg(&head->first, old_entry, next); llist_del_first() 86 * @head: first item of the list to be reversed 91 struct llist_node *llist_reverse_order(struct llist_node *head) llist_reverse_order() argument 95 while (head) { llist_reverse_order() 96 struct llist_node *tmp = head; llist_reverse_order() 97 head = head->next; llist_reverse_order() 38 llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) llist_add_batch() argument
|
H A D | timerqueue.c | 33 * @head: head of timerqueue 39 void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_add() argument 41 struct rb_node **p = &head->head.rb_node; timerqueue_add() 57 rb_insert_color(&node->node, &head->head); timerqueue_add() 59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) timerqueue_add() 60 head->next = node; timerqueue_add() 67 * @head: head of timerqueue 72 void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_del() argument 77 if (head->next == node) { timerqueue_del() 80 head->next = rbn ? timerqueue_del() 83 rb_erase(&node->node, &head->head); timerqueue_del()
|
H A D | btree.c | 93 static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) btree_node_alloc() argument 97 node = mempool_alloc(head->mempool, gfp); btree_node_alloc() 176 static inline void __btree_init(struct btree_head *head) __btree_init() argument 178 head->node = NULL; __btree_init() 179 head->height = 0; __btree_init() 182 void btree_init_mempool(struct btree_head *head, mempool_t *mempool) btree_init_mempool() argument 184 __btree_init(head); btree_init_mempool() 185 head->mempool = mempool; btree_init_mempool() 189 int btree_init(struct btree_head *head) btree_init() argument 191 __btree_init(head); btree_init() 192 head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); btree_init() 193 if (!head->mempool) btree_init() 199 void btree_destroy(struct btree_head *head) btree_destroy() argument 201 mempool_free(head->node, head->mempool); btree_destroy() 202 mempool_destroy(head->mempool); btree_destroy() 203 head->mempool = NULL; btree_destroy() 207 void *btree_last(struct btree_head *head, struct btree_geo *geo, btree_last() argument 210 int height = head->height; btree_last() 211 unsigned long *node = head->node; btree_last() 241 void *btree_lookup(struct btree_head *head, struct btree_geo *geo, btree_lookup() argument 244 int i, height = head->height; btree_lookup() 245 unsigned long *node = head->node; btree_lookup() 271 int btree_update(struct btree_head *head, struct btree_geo *geo, btree_update() argument 274 int i, height = head->height; btree_update() 275 unsigned long *node = head->node; btree_update() 311 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, btree_get_prev() argument 321 if (head->height == 0) btree_get_prev() 327 node = head->node; btree_get_prev() 328 for (height = head->height ; height > 1; height--) { btree_get_prev() 388 static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, find_level() argument 391 unsigned long *node = head->node; find_level() 394 for (height = head->height; height > level; height--) { find_level() 413 static int btree_grow(struct btree_head *head, struct btree_geo *geo, btree_grow() argument 419 node = btree_node_alloc(head, gfp); btree_grow() 422 if (head->node) { btree_grow() 423 fill = getfill(geo, head->node, 0); btree_grow() 424 setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); btree_grow() 425 setval(geo, node, 0, head->node); btree_grow() 427 head->node = node; btree_grow() 428 head->height++; btree_grow() 432 static void btree_shrink(struct btree_head *head, struct btree_geo *geo) btree_shrink() argument 437 if (head->height <= 1) btree_shrink() 440 node = head->node; btree_shrink() 443 head->node = bval(geo, node, 0); btree_shrink() 444 head->height--; btree_shrink() 445 mempool_free(node, head->mempool); btree_shrink() 448 static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, btree_insert_level() argument 456 if (head->height < level) { btree_insert_level() 457 err = btree_grow(head, geo, gfp); btree_insert_level() 463 node = find_level(head, geo, key, level); btree_insert_level() 473 new = btree_node_alloc(head, gfp); btree_insert_level() 476 err = btree_insert_level(head, geo, btree_insert_level() 480 mempool_free(new, head->mempool); btree_insert_level() 510 int btree_insert(struct btree_head *head, struct btree_geo *geo, btree_insert() argument 514 return btree_insert_level(head, geo, key, val, 1, gfp); btree_insert() 518 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, 520 static void merge(struct btree_head *head, struct btree_geo *geo, int level, merge() argument 536 btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); merge() 537 mempool_free(right, head->mempool); merge() 540 static void rebalance(struct btree_head *head, struct btree_geo *geo, rebalance() argument 551 btree_remove_level(head, geo, key, level + 1); rebalance() 552 mempool_free(child, head->mempool); rebalance() 556 parent = find_level(head, geo, key, level + 1); rebalance() 564 merge(head, geo, level, rebalance() 575 merge(head, geo, level, rebalance() 591 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, btree_remove_level() argument 598 if (level > head->height) { btree_remove_level() 600 head->height = 0; btree_remove_level() 601 head->node = NULL; btree_remove_level() 605 node = find_level(head, geo, key, level); btree_remove_level() 620 if (level < head->height) btree_remove_level() 621 rebalance(head, geo, key, level, node, fill - 1); btree_remove_level() 623 btree_shrink(head, geo); btree_remove_level() 629 void *btree_remove(struct btree_head *head, struct btree_geo *geo, btree_remove() argument 632 if (head->height == 0) btree_remove() 635 return btree_remove_level(head, geo, key, 1); btree_remove() 676 static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, __btree_for_each() argument 691 count = __btree_for_each(head, geo, child, opaque, __btree_for_each() 698 mempool_free(node, head->mempool); __btree_for_each() 746 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, btree_visitor() argument 757 if (head->node) btree_visitor() 758 count = __btree_for_each(head, geo, head->node, opaque, func, btree_visitor() 759 func2, 0, head->height, 0); btree_visitor() 764 size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, btree_grim_visitor() argument 775 if (head->node) btree_grim_visitor() 776 count = __btree_for_each(head, geo, head->node, opaque, func, btree_grim_visitor() 777 func2, 1, head->height, 0); btree_grim_visitor() 778 __btree_init(head); btree_grim_visitor()
|
H A D | list_sort.c | 17 * sentinel head node, "prev" links not maintained. 24 struct list_head head, *tail = &head; merge() local 38 return head.next; merge() 51 struct list_head *head, merge_and_restore_back_links() 54 struct list_head *tail = head; merge_and_restore_back_links() 86 tail->next = head; merge_and_restore_back_links() 87 head->prev = tail; merge_and_restore_back_links() 93 * @head: the list to sort 104 void list_sort(void *priv, struct list_head *head, list_sort() argument 114 if (list_empty(head)) list_sort() 119 head->prev->next = NULL; list_sort() 120 list = head->next; list_sort() 145 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); list_sort() 158 #define TEST_LIST_LEN (512+128+2) /* not including head */ 217 LIST_HEAD(head); list_sort_test() 239 list_add_tail(&el->list, &head); list_sort_test() 242 list_sort(NULL, &head, cmp); list_sort_test() 245 for (cur = head.next; cur->next != &head; cur = cur->next) { list_sort_test() 274 if (head.prev != cur) { list_sort_test() 48 merge_and_restore_back_links(void *priv, int (*cmp)(void *priv, struct list_head *a, struct list_head *b), struct list_head *head, struct list_head *a, struct list_head *b) merge_and_restore_back_links() argument
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 72 /* Set up the XDR head */ rdma_build_arg_xdr() 73 rqstp->rq_arg.head[0].iov_base = page_address(page); rdma_build_arg_xdr() 74 rqstp->rq_arg.head[0].iov_len = rdma_build_arg_xdr() 79 /* Compute bytes past head in the SGL */ rdma_build_arg_xdr() 80 bc = byte_count - rqstp->rq_arg.head[0].iov_len; rdma_build_arg_xdr() 87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; rdma_build_arg_xdr() 130 struct svc_rdma_op_ctxt *head, rdma_read_chunk_lcl() 146 ctxt->read_hdr = head; rdma_read_chunk_lcl() 155 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_lcl() 156 head->arg.page_len += len; rdma_read_chunk_lcl() 157 head->arg.len += len; rdma_read_chunk_lcl() 159 head->count++; rdma_read_chunk_lcl() 164 head->arg.pages[pg_no], pg_off, rdma_read_chunk_lcl() 224 struct svc_rdma_op_ctxt *head, rdma_read_chunk_frmr() 261 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_frmr() 262 head->arg.page_len += len; rdma_read_chunk_frmr() 263 head->arg.len += len; rdma_read_chunk_frmr() 265 head->count++; rdma_read_chunk_frmr() 270 head->arg.pages[pg_no], 0, rdma_read_chunk_frmr() 300 ctxt->read_hdr = head; rdma_read_chunk_frmr() 374 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, rdma_copy_tail() argument 381 srcp = head->arg.head[0].iov_base + position; rdma_copy_tail() 382 byte_count = head->arg.head[0].iov_len - position; rdma_copy_tail() 412 byte_count = head->arg.head[0].iov_len - position; rdma_copy_tail() 413 head->arg.page_len += byte_count; rdma_copy_tail() 414 head->arg.len += byte_count; rdma_copy_tail() 415 head->arg.buflen += byte_count; rdma_copy_tail() 422 struct svc_rdma_op_ctxt *head) rdma_read_chunks() 440 * head context keeps all the pages that comprise the rdma_read_chunks() 443 head->arg.head[0] = rqstp->rq_arg.head[0]; rdma_read_chunks() 444 head->arg.tail[0] = rqstp->rq_arg.tail[0]; rdma_read_chunks() 445 head->hdr_count = head->count; rdma_read_chunks() 446 head->arg.page_base = 0; rdma_read_chunks() 447 head->arg.page_len = 0; rdma_read_chunks() 448 head->arg.len = rqstp->rq_arg.len; rdma_read_chunks() 449 head->arg.buflen = rqstp->rq_arg.buflen; rdma_read_chunks() 456 head->arg.pages = &head->pages[0]; rdma_read_chunks() 457 page_offset = head->byte_len; rdma_read_chunks() 459 head->arg.pages = &head->pages[head->count]; rdma_read_chunks() 476 ret = xprt->sc_reader(xprt, rqstp, head, rdma_read_chunks() 484 head->arg.buflen += ret; rdma_read_chunks() 492 head->arg.page_len += pad; rdma_read_chunks() 493 head->arg.len += pad; rdma_read_chunks() 494 head->arg.buflen += pad; rdma_read_chunks() 499 if (position && position < head->arg.head[0].iov_len) rdma_read_chunks() 500 ret = rdma_copy_tail(rqstp, head, position, rdma_read_chunks() 502 head->arg.head[0].iov_len = position; rdma_read_chunks() 503 head->position = position; rdma_read_chunks() 515 struct svc_rdma_op_ctxt *head) rdma_read_complete() 521 for (page_no = 0; page_no < head->count; page_no++) { rdma_read_complete() 523 rqstp->rq_pages[page_no] = head->pages[page_no]; rdma_read_complete() 527 if (head->position == 0) { rdma_read_complete() 528 if (head->arg.len <= head->sge[0].length) { rdma_read_complete() 529 head->arg.head[0].iov_len = head->arg.len - rdma_read_complete() 530 head->byte_len; rdma_read_complete() 531 head->arg.page_len = 0; rdma_read_complete() 533 head->arg.head[0].iov_len = head->sge[0].length - rdma_read_complete() 534 head->byte_len; rdma_read_complete() 535 head->arg.page_len = head->arg.len - rdma_read_complete() 536 head->sge[0].length; rdma_read_complete() 541 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; rdma_read_complete() 542 rqstp->rq_arg.page_len = head->arg.page_len; rdma_read_complete() 543 rqstp->rq_arg.page_base = head->arg.page_base; rdma_read_complete() 549 /* Rebuild rq_arg head and tail. */ rdma_read_complete() 550 rqstp->rq_arg.head[0] = head->arg.head[0]; rdma_read_complete() 551 rqstp->rq_arg.tail[0] = head->arg.tail[0]; rdma_read_complete() 552 rqstp->rq_arg.len = head->arg.len; rdma_read_complete() 553 rqstp->rq_arg.buflen = head->arg.buflen; rdma_read_complete() 556 svc_rdma_put_context(head, 0); rdma_read_complete() 562 ret = rqstp->rq_arg.head[0].iov_len rdma_read_complete() 566 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n", rdma_read_complete() 567 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rdma_read_complete() 568 rqstp->rq_arg.head[0].iov_len); rdma_read_complete() 649 ret = rqstp->rq_arg.head[0].iov_len svc_rdma_recvfrom() 655 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n", svc_rdma_recvfrom() 657 rqstp->rq_arg.head[0].iov_base, svc_rdma_recvfrom() 658 rqstp->rq_arg.head[0].iov_len); svc_rdma_recvfrom() 128 rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, int *page_no, u32 *page_offset, u32 rs_handle, u32 rs_length, u64 rs_offset, bool last) rdma_read_chunk_lcl() argument 222 rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, int *page_no, u32 *page_offset, u32 rs_handle, u32 rs_length, u64 rs_offset, bool last) rdma_read_chunk_frmr() argument 419 rdma_read_chunks(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head) rdma_read_chunks() argument 514 rdma_read_complete(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head) rdma_read_complete() argument
|
/linux-4.1.27/net/sched/ |
H A D | cls_cgroup.c | 32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); cls_cgroup_classify() local 57 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) cls_cgroup_classify() 62 return tcf_exts_exec(skb, &head->exts, res); cls_cgroup_classify() 81 struct cls_cgroup_head *head = container_of(root, cls_cgroup_destroy_rcu() local 85 tcf_exts_destroy(&head->exts); cls_cgroup_destroy_rcu() 86 tcf_em_tree_destroy(&head->ematches); cls_cgroup_destroy_rcu() 87 kfree(head); cls_cgroup_destroy_rcu() 96 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_change() local 105 if (!head && !handle) cls_cgroup_change() 108 if (head && handle != head->handle) cls_cgroup_change() 111 new = kzalloc(sizeof(*head), GFP_KERNEL); cls_cgroup_change() 138 if (head) cls_cgroup_change() 139 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); cls_cgroup_change() 148 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_destroy() local 153 if (head) { cls_cgroup_destroy() 155 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); cls_cgroup_destroy() 167 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_walk() local 172 if (arg->fn(tp, (unsigned long) head, arg) < 0) { cls_cgroup_walk() 183 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_dump() local 186 t->tcm_handle = head->handle; cls_cgroup_dump() 192 if (tcf_exts_dump(skb, &head->exts) < 0 || cls_cgroup_dump() 193 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) cls_cgroup_dump() 198 if (tcf_exts_dump_stats(skb, &head->exts) < 0) cls_cgroup_dump()
|
H A D | cls_fw.c | 62 struct fw_head *head = rcu_dereference_bh(tp->root); fw_classify() local 67 if (head != NULL) { fw_classify() 68 id &= head->mask; fw_classify() 70 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; fw_classify() 100 struct fw_head *head = rtnl_dereference(tp->root); fw_get() local 103 if (head == NULL) fw_get() 106 f = rtnl_dereference(head->ht[fw_hash(handle)]); fw_get() 122 static void fw_delete_filter(struct rcu_head *head) fw_delete_filter() argument 124 struct fw_filter *f = container_of(head, struct fw_filter, rcu); fw_delete_filter() 132 struct fw_head *head = rtnl_dereference(tp->root); fw_destroy() local 136 if (head == NULL) fw_destroy() 141 if (rcu_access_pointer(head->ht[h])) fw_destroy() 146 while ((f = rtnl_dereference(head->ht[h])) != NULL) { fw_destroy() 147 RCU_INIT_POINTER(head->ht[h], fw_destroy() 154 kfree_rcu(head, rcu); fw_destroy() 160 struct fw_head *head = rtnl_dereference(tp->root); fw_delete() local 165 if (head == NULL || f == NULL) fw_delete() 168 fp = &head->ht[fw_hash(f->id)]; fw_delete() 193 struct fw_head *head = rtnl_dereference(tp->root); fw_change_attrs() local 223 if (mask != head->mask) fw_change_attrs() 225 } else if (head->mask != 0xFFFFFFFF) fw_change_attrs() 242 struct fw_head *head = rtnl_dereference(tp->root); fw_change() local 281 fp = &head->ht[fw_hash(fnew->id)]; fw_change() 299 if (!head) { fw_change() 304 head = kzalloc(sizeof(*head), GFP_KERNEL); fw_change() 305 if (!head) fw_change() 307 head->mask = mask; fw_change() 309 rcu_assign_pointer(tp->root, head); fw_change() 324 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); fw_change() 325 rcu_assign_pointer(head->ht[fw_hash(handle)], f); fw_change() 337 struct fw_head *head = rtnl_dereference(tp->root); fw_walk() local 340 if (head == NULL) fw_walk() 349 for (f = rtnl_dereference(head->ht[h]); f; fw_walk() 367 struct fw_head *head = rtnl_dereference(tp->root); fw_dump() local 394 if (head->mask != 0xFFFFFFFF && fw_dump() 395 nla_put_u32(skb, TCA_FW_MASK, head->mask)) fw_dump()
|
H A D | cls_basic.c | 44 struct basic_head *head = rcu_dereference_bh(tp->root); basic_classify() local 47 list_for_each_entry_rcu(f, &head->flist, link) { basic_classify() 62 struct basic_head *head = rtnl_dereference(tp->root); basic_get() local 65 if (head == NULL) basic_get() 68 list_for_each_entry(f, &head->flist, link) { basic_get() 80 struct basic_head *head; basic_init() local 82 head = kzalloc(sizeof(*head), GFP_KERNEL); basic_init() 83 if (head == NULL) basic_init() 85 INIT_LIST_HEAD(&head->flist); basic_init() 86 rcu_assign_pointer(tp->root, head); basic_init() 90 static void basic_delete_filter(struct rcu_head *head) basic_delete_filter() argument 92 struct basic_filter *f = container_of(head, struct basic_filter, rcu); basic_delete_filter() 101 struct basic_head *head = rtnl_dereference(tp->root); basic_destroy() local 104 if (!force && !list_empty(&head->flist)) basic_destroy() 107 list_for_each_entry_safe(f, n, &head->flist, link) { basic_destroy() 113 kfree_rcu(head, rcu); basic_destroy() 170 struct basic_head *head = rtnl_dereference(tp->root); basic_change() local 201 if (++head->hgenerator == 0x7FFFFFFF) basic_change() 202 head->hgenerator = 1; basic_change() 203 } while (--i > 0 && basic_get(tp, head->hgenerator)); basic_change() 210 fnew->handle = head->hgenerator; basic_change() 224 list_add_rcu(&fnew->link, &head->flist); basic_change() 235 struct basic_head *head = rtnl_dereference(tp->root); basic_walk() local 238 list_for_each_entry(f, &head->flist, link) { basic_walk()
|
H A D | cls_route.c | 72 route4_reset_fastmap(struct route4_head *head) route4_reset_fastmap() argument 75 memset(head->fastmap, 0, sizeof(head->fastmap)); route4_reset_fastmap() 80 route4_set_fastmap(struct route4_head *head, u32 id, int iif, route4_set_fastmap() argument 87 head->fastmap[h].id = id; route4_set_fastmap() 88 head->fastmap[h].iif = iif; route4_set_fastmap() 89 head->fastmap[h].filter = f; route4_set_fastmap() 124 route4_set_fastmap(head, id, iif, f); \ 131 struct route4_head *head = rcu_dereference_bh(tp->root); route4_classify() local 143 if (head == NULL) route4_classify() 151 if (id == head->fastmap[h].id && route4_classify() 152 iif == head->fastmap[h].iif && route4_classify() 153 (f = head->fastmap[h].filter) != NULL) { route4_classify() 168 b = rcu_dereference_bh(head->table[h]); route4_classify() 194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); route4_classify() 232 struct route4_head *head = rtnl_dereference(tp->root); route4_get() local 237 if (!head) route4_get() 248 b = rtnl_dereference(head->table[h1]); route4_get() 261 struct route4_head *head; route4_init() local 263 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); route4_init() 264 if (head == NULL) route4_init() 267 rcu_assign_pointer(tp->root, head); route4_init() 272 route4_delete_filter(struct rcu_head *head) route4_delete_filter() argument 274 struct route4_filter *f = container_of(head, struct route4_filter, rcu); route4_delete_filter() 282 struct route4_head *head = rtnl_dereference(tp->root); route4_destroy() local 285 if (head == NULL) route4_destroy() 290 if (rcu_access_pointer(head->table[h1])) route4_destroy() 298 b = rtnl_dereference(head->table[h1]); route4_destroy() 312 RCU_INIT_POINTER(head->table[h1], NULL); route4_destroy() 317 kfree_rcu(head, rcu); route4_destroy() 323 struct route4_head *head = rtnl_dereference(tp->root); route4_delete() local 331 if (!head || !f) route4_delete() 348 route4_reset_fastmap(head); route4_delete() 364 RCU_INIT_POINTER(head->table[to_hash(h)], NULL); route4_delete() 382 u32 handle, struct route4_head *head, route4_set_parms() 430 b = rtnl_dereference(head->table[h1]); route4_set_parms() 437 rcu_assign_pointer(head->table[h1], b); route4_set_parms() 480 struct route4_head *head = rtnl_dereference(tp->root); route4_change() local 518 err = route4_set_parms(net, tp, base, f, handle, head, tb, route4_change() 538 b = rtnl_dereference(head->table[th]); route4_change() 551 route4_reset_fastmap(head); route4_change() 566 struct route4_head *head = rtnl_dereference(tp->root); route4_walk() local 569 if (head == NULL) route4_walk() 576 struct route4_bucket *b = rtnl_dereference(head->table[h]); route4_walk() 380 route4_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct route4_filter *f, u32 handle, struct route4_head *head, struct nlattr **tb, struct nlattr *est, int new, bool ovr) route4_set_parms() argument
|
H A D | cls_bpf.c | 65 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); cls_bpf_classify() local 74 list_for_each_entry_rcu(prog, &head->plist, link) { cls_bpf_classify() 102 struct cls_bpf_head *head; cls_bpf_init() local 104 head = kzalloc(sizeof(*head), GFP_KERNEL); cls_bpf_init() 105 if (head == NULL) cls_bpf_init() 108 INIT_LIST_HEAD_RCU(&head->plist); cls_bpf_init() 109 rcu_assign_pointer(tp->root, head); cls_bpf_init() 148 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_destroy() local 151 if (!force && !list_empty(&head->plist)) cls_bpf_destroy() 154 list_for_each_entry_safe(prog, tmp, &head->plist, link) { cls_bpf_destroy() 161 kfree_rcu(head, rcu); cls_bpf_destroy() 167 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_get() local 171 if (head == NULL) cls_bpf_get() 174 list_for_each_entry(prog, &head->plist, link) { cls_bpf_get() 302 struct cls_bpf_head *head) cls_bpf_grab_new_handle() 308 if (++head->hgen == 0x7FFFFFFF) cls_bpf_grab_new_handle() 309 head->hgen = 1; cls_bpf_grab_new_handle() 310 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); cls_bpf_grab_new_handle() 316 handle = head->hgen; cls_bpf_grab_new_handle() 327 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_change() local 354 prog->handle = cls_bpf_grab_new_handle(tp, head); cls_bpf_change() 371 list_add_rcu(&prog->link, &head->plist); cls_bpf_change() 456 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_walk() local 459 list_for_each_entry(prog, &head->plist, link) { cls_bpf_walk() 301 cls_bpf_grab_new_handle(struct tcf_proto *tp, struct cls_bpf_head *head) cls_bpf_grab_new_handle() argument
|
H A D | sch_choke.c | 71 unsigned int head; member in struct:choke_sched_data 82 return (q->tail - q->head) & q->tab_mask; choke_len() 97 /* Move head pointer forward to skip over holes */ choke_zap_head_holes() 101 q->head = (q->head + 1) & q->tab_mask; choke_zap_head_holes() 102 if (q->head == q->tail) choke_zap_head_holes() 104 } while (q->tab[q->head] == NULL); choke_zap_head_holes() 112 if (q->head == q->tail) choke_zap_tail_holes() 125 if (idx == q->head) choke_drop_by_idx() 231 * times to find a random skb but then just give up and return the head 232 * Will return NULL if queue is empty (q->head == q->tail) 241 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; choke_peek_random() 247 return q->tab[*pidx = q->head]; choke_peek_random() 260 if (q->head == q->tail) choke_match_random() 358 if (q->head == q->tail) { choke_dequeue() 364 skb = q->tab[q->head]; choke_dequeue() 365 q->tab[q->head] = NULL; choke_dequeue() 453 while (q->head != q->tail) { choke_change() 454 struct sk_buff *skb = q->tab[q->head]; choke_change() 456 q->head = (q->head + 1) & q->tab_mask; choke_change() 468 q->head = 0; choke_change() 486 if (q->head == q->tail) choke_change() 612 return (q->head != q->tail) ? q->tab[q->head] : NULL; choke_peek_head()
|
H A D | sch_fq.c | 61 struct sk_buff *head; /* list of skbs for this flow : first skb */ member in struct:fq_flow 156 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) fq_flow_add_tail() argument 158 if (head->first) fq_flow_add_tail() 159 head->last->next = flow; fq_flow_add_tail() 161 head->first = flow; fq_flow_add_tail() 162 head->last = flow; fq_flow_add_tail() 295 /* remove one skb from head of flow queue */ fq_dequeue_head() 298 struct sk_buff *skb = flow->head; fq_dequeue_head() 301 flow->head = skb->next; fq_dequeue_head() 323 * head-> [retrans pkt 1] 332 struct sk_buff *prev, *head = flow->head; flow_queue_add() local 335 if (!head) { flow_queue_add() 336 flow->head = skb; flow_queue_add() 350 while (skb_is_retransmit(head)) { flow_queue_add() 351 prev = head; flow_queue_add() 352 head = head->next; flow_queue_add() 353 if (!head) flow_queue_add() 356 if (!prev) { /* no rtx packet in queue, become the new head */ flow_queue_add() 357 skb->next = flow->head; flow_queue_add() 358 flow->head = skb; flow_queue_add() 429 struct fq_flow_head *head; fq_dequeue() local 439 head = &q->new_flows; fq_dequeue() 440 if (!head->first) { fq_dequeue() 441 head = &q->old_flows; fq_dequeue() 442 if (!head->first) { fq_dequeue() 450 f = head->first; fq_dequeue() 454 head->first = f->next; fq_dequeue() 459 skb = f->head; fq_dequeue() 462 head->first = f->next; fq_dequeue() 469 head->first = f->next; fq_dequeue() 471 if ((head == &q->new_flows) && q->old_flows.first) { fq_dequeue()
|
H A D | cls_flow.c | 282 struct flow_head *head = rcu_dereference_bh(tp->root); flow_classify() local 289 list_for_each_entry_rcu(f, &head->filters, list) { flow_classify() 352 static void flow_destroy_filter(struct rcu_head *head) flow_destroy_filter() argument 354 struct flow_filter *f = container_of(head, struct flow_filter, rcu); flow_destroy_filter() 367 struct flow_head *head = rtnl_dereference(tp->root); flow_change() local 522 list_add_tail_rcu(&fnew->list, &head->filters); flow_change() 551 struct flow_head *head; flow_init() local 553 head = kzalloc(sizeof(*head), GFP_KERNEL); flow_init() 554 if (head == NULL) flow_init() 556 INIT_LIST_HEAD(&head->filters); flow_init() 557 rcu_assign_pointer(tp->root, head); flow_init() 563 struct flow_head *head = rtnl_dereference(tp->root); flow_destroy() local 566 if (!force && !list_empty(&head->filters)) flow_destroy() 569 list_for_each_entry_safe(f, next, &head->filters, list) { flow_destroy() 574 kfree_rcu(head, rcu); flow_destroy() 580 struct flow_head *head = rtnl_dereference(tp->root); flow_get() local 583 list_for_each_entry(f, &head->filters, list) flow_get() 652 struct flow_head *head = rtnl_dereference(tp->root); flow_walk() local 655 list_for_each_entry(f, &head->filters, list) { flow_walk()
|
H A D | sch_hhf.c | 121 struct sk_buff *head; member in struct:wdrr_bucket 197 struct list_head *head, seek_list() 203 if (list_empty(head)) seek_list() 206 list_for_each_entry_safe(flow, next, head, flowchain) { list_for_each_entry_safe() 213 if (list_is_last(&flow->flowchain, head)) list_for_each_entry_safe() 228 static struct hh_flow_state *alloc_new_hh(struct list_head *head, alloc_new_hh() argument 234 if (!list_empty(head)) { alloc_new_hh() 236 list_for_each_entry(flow, head, flowchain) { list_for_each_entry() 255 list_add_tail(&flow->flowchain, head); 344 /* Removes one skb from head of bucket. */ dequeue_head() 347 struct sk_buff *skb = bucket->head; dequeue_head() 349 bucket->head = skb->next; dequeue_head() 357 if (bucket->head == NULL) bucket_add() 358 bucket->head = skb; bucket_add() 372 if (!bucket->head) hhf_drop() 375 if (bucket->head) { hhf_drop() 437 struct list_head *head; hhf_dequeue() local 440 head = &q->new_buckets; hhf_dequeue() 441 if (list_empty(head)) { hhf_dequeue() 442 head = &q->old_buckets; hhf_dequeue() 443 if (list_empty(head)) hhf_dequeue() 446 bucket = list_first_entry(head, struct wdrr_bucket, bucketchain); hhf_dequeue() 457 if (bucket->head) { hhf_dequeue() 465 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) hhf_dequeue() 512 struct list_head *head = &q->hh_flows[i]; hhf_destroy() local 514 if (list_empty(head)) hhf_destroy() 516 list_for_each_entry_safe(flow, next, head, flowchain) { list_for_each_entry_safe() 196 seek_list(const u32 hash, struct list_head *head, struct hhf_sched_data *q) seek_list() argument
|
/linux-4.1.27/arch/unicore32/kernel/ |
H A D | Makefile | 27 head-y := head.o 30 extra-y := $(head-y) vmlinux.lds
|
/linux-4.1.27/arch/score/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/openrisc/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | atafd.h | 8 int head; /* "" "" */ member in struct:atari_format_descr
|
H A D | machw.h | 17 * head.S maps the videomem to VIDEOMEMBASE
|
/linux-4.1.27/arch/arm/mach-shmobile/include/mach/ |
H A D | zboot.h | 14 #include "mach/head-kzm9g.txt"
|
/linux-4.1.27/tools/perf/util/ |
H A D | parse-events.y | 64 %type <head> event_config 66 %type <head> event_pmu 67 %type <head> event_legacy_symbol 68 %type <head> event_legacy_cache 69 %type <head> event_legacy_mem 70 %type <head> event_legacy_tracepoint 71 %type <head> event_legacy_numeric 72 %type <head> event_legacy_raw 73 %type <head> event_def 74 %type <head> event_mod 75 %type <head> event_name 76 %type <head> event 77 %type <head> events 78 %type <head> group_def 79 %type <head> group 80 %type <head> groups 86 struct list_head *head; 229 struct list_head *head; 233 ALLOC_LIST(head); 236 list_add_tail(&term->list, head); 239 ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); 240 parse_events__free_terms(head); 247 struct list_head *head; 253 ALLOC_LIST(head); 256 list_add_tail(&term->list, head); 259 ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); 260 parse_events__free_terms(head); 428 struct list_head *head = $1; 431 ABORT_ON(!head); 432 list_add_tail(&term->list, head); 438 struct list_head *head = malloc(sizeof(*head)); 441 ABORT_ON(!head); 442 INIT_LIST_HEAD(head); 443 list_add_tail(&term->list, head); 444 $$ = head;
|
H A D | build-id.c | 133 #define dsos__for_each_with_build_id(pos, head) \ 134 list_for_each_entry(pos, head, node) \ 162 static int __dsos__write_buildid_table(struct list_head *head, __dsos__write_buildid_table() argument 169 dsos__for_each_with_build_id(pos, head) { dsos__for_each_with_build_id() 209 err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine, machine__write_buildid_table() 212 err = __dsos__write_buildid_table(&machine->user_dsos.head, machine__write_buildid_table() 235 static int __dsos__hit_all(struct list_head *head) __dsos__hit_all() argument 239 list_for_each_entry(pos, head, node) __dsos__hit_all() 249 err = __dsos__hit_all(&machine->kernel_dsos.head); machine__hit_all_dsos() 253 return __dsos__hit_all(&machine->user_dsos.head); machine__hit_all_dsos() 478 static int __dsos__cache_build_ids(struct list_head *head, __dsos__cache_build_ids() argument 484 dsos__for_each_with_build_id(pos, head) __dsos__cache_build_ids() 493 int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine); machine__cache_build_ids() 494 ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine); machine__cache_build_ids() 522 ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits); machine__read_build_ids() 523 ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits); machine__read_build_ids()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvif/ |
H A D | list.h | 45 * We need one list head in bar and a list element in all list_of_foos (both are of 60 * Now we initialize the list head: 102 * to-be-linked struct. struct list_head is required for both the head of the 107 * There are no requirements for a list head, any struct list_head can be a list 108 * head. 144 * Insert a new element after the given list head. The new element does not 147 * head → some element → ... 149 * head → new element → older element → ... 156 * @param head The existing list. 159 list_add(struct list_head *entry, struct list_head *head) list_add() argument 161 __list_add(entry, head, head->next); list_add() 165 * Append a new element to the end of the list given with this list head. 168 * head → some element → ... → lastelement 170 * head → some element → ... → lastelement → new element 177 * @param head The existing list. 180 list_add_tail(struct list_head *entry, struct list_head *head) list_add_tail() argument 182 __list_add(entry, head->prev, head); list_add_tail() 197 * Using list_del on a pure list head (like in the example at the top of 220 struct list_head *head) list_move_tail() 223 list_add_tail(list, head); list_move_tail() 235 list_empty(struct list_head *head) list_empty() argument 237 return head->next == head; list_empty() 251 * @return A pointer to the data struct containing the list head. 271 * @param ptr The list head 286 * @param ptr The list head 298 * Loop through the list given by head and set pos to struct in the list. 310 * @param head List head 314 #define list_for_each_entry(pos, head, member) \ 315 for (pos = __container_of((head)->next, pos, member); \ 316 &pos->member != (head); \ 326 #define list_for_each_entry_safe(pos, tmp, head, member) \ 327 for (pos = __container_of((head)->next, pos, member), \ 329 &pos->member != (head); \ 333 #define list_for_each_entry_reverse(pos, head, member) \ 334 for (pos = __container_of((head)->prev, pos, member); \ 335 &pos->member != (head); \ 338 #define list_for_each_entry_continue(pos, head, member) \ 340 &pos->member != (head); \ 343 #define list_for_each_entry_continue_reverse(pos, head, member) \ 345 &pos->member != (head); \ 348 #define list_for_each_entry_from(pos, head, member) \ 350 &pos->member != (head); \ 219 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | spinlock.h | 49 set_bit(0, (volatile unsigned long *)&lock->tickets.head); __ticket_enter_slowpath() 69 __ticket_t head) __ticket_check_and_clear_slowpath() 71 if (head & TICKET_SLOWPATH_FLAG) { __ticket_check_and_clear_slowpath() 74 old.tickets.head = head; __ticket_check_and_clear_slowpath() 75 new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; __ticket_check_and_clear_slowpath() 76 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; __ticket_check_and_clear_slowpath() 86 return __tickets_equal(lock.tickets.head, lock.tickets.tail); arch_spin_value_unlocked() 90 * Ticket locks are conceptually two parts, one indicating the current head of 93 * ourself to the queue and noting our position), then waiting until the head 97 * also load the position of the head, which takes care of memory ordering 107 if (likely(inc.head == inc.tail)) arch_spin_lock() 114 inc.head = READ_ONCE(lock->tickets.head); arch_spin_lock() 115 if (__tickets_equal(inc.head, inc.tail)) arch_spin_lock() 122 __ticket_check_and_clear_slowpath(lock, inc.head); arch_spin_lock() 132 if (!__tickets_equal(old.tickets.head, old.tickets.tail)) arch_spin_trylock() 146 __ticket_t head; arch_spin_unlock() local 150 head = xadd(&lock->tickets.head, TICKET_LOCK_INC); arch_spin_unlock() 152 if (unlikely(head & TICKET_SLOWPATH_FLAG)) { arch_spin_unlock() 153 head &= ~TICKET_SLOWPATH_FLAG; arch_spin_unlock() 154 __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); arch_spin_unlock() 157 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); arch_spin_unlock() 164 return !__tickets_equal(tmp.tail, tmp.head); arch_spin_is_locked() 171 tmp.head &= ~TICKET_SLOWPATH_FLAG; arch_spin_is_contended() 172 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; arch_spin_is_contended() 184 __ticket_t head = READ_ONCE(lock->tickets.head); arch_spin_unlock_wait() local 189 * We need to check "unlocked" in a loop, tmp.head == head arch_spin_unlock_wait() 192 if (__tickets_equal(tmp.head, tmp.tail) || arch_spin_unlock_wait() 193 !__tickets_equal(tmp.head, head)) arch_spin_unlock_wait() 68 __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, __ticket_t head) __ticket_check_and_clear_slowpath() argument
|
H A D | spinlock_types.h | 30 __ticket_t head, tail; member in struct:arch_spinlock::__anon3085::__raw_tickets
|
/linux-4.1.27/arch/nios2/boot/compressed/ |
H A D | Makefile | 5 targets := vmlinux head.o misc.o piggy.o vmlinux.lds 8 OBJECTS = $(obj)/head.o $(obj)/misc.o
|
/linux-4.1.27/scripts/kconfig/ |
H A D | list.h | 45 * @head: the head for your list. 48 #define list_for_each_entry(pos, head, member) \ 49 for (pos = list_entry((head)->next, typeof(*pos), member); \ 50 &pos->member != (head); \ 57 * @head: the head for your list. 60 #define list_for_each_entry_safe(pos, n, head, member) \ 61 for (pos = list_entry((head)->next, typeof(*pos), member), \ 63 &pos->member != (head); \ 68 * @head: the list to test. 70 static inline int list_empty(const struct list_head *head) list_empty() argument 72 return head->next == head; list_empty() 94 * @head: list head to add it before 96 * Insert a new entry before the specified head. 99 static inline void list_add_tail(struct list_head *_new, struct list_head *head) list_add_tail() argument 101 __list_add(_new, head->prev, head); list_add_tail()
|
/linux-4.1.27/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_marker.c | 32 struct list_head head; member in struct:vmw_marker 39 INIT_LIST_HEAD(&queue->head); vmw_marker_queue_init() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_queue_takedown() 67 list_add_tail(&marker->head, &queue->head); vmw_marker_push() 83 if (list_empty(&queue->head)) { vmw_marker_pull() 90 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_pull() 97 list_del(&marker->head); vmw_marker_pull() 137 if (list_empty(&queue->head)) vmw_wait_lag() 140 marker = list_first_entry(&queue->head, vmw_wait_lag() 141 struct vmw_marker, head); vmw_wait_lag()
|
H A D | vmwgfx_cmdbuf_res.c | 43 * @head: List head used either by the staging list or the manager list 51 struct list_head head; member in struct:vmw_cmdbuf_res 113 list_del(&entry->head); vmw_cmdbuf_res_free() 133 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe() 134 list_del(&entry->head); list_for_each_entry_safe() 138 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe() 168 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe() 176 list_del(&entry->head); list_for_each_entry_safe() 177 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe() 221 list_add_tail(&cres->head, list); vmw_cmdbuf_res_add() 262 list_del(&entry->head); vmw_cmdbuf_res_remove() 264 list_add_tail(&entry->head, list); vmw_cmdbuf_res_remove() 317 list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_man_destroy()
|
/linux-4.1.27/kernel/ |
H A D | task_work.c | 27 struct callback_head *head; task_work_add() local 30 head = ACCESS_ONCE(task->task_works); task_work_add() 31 if (unlikely(head == &work_exited)) task_work_add() 33 work->next = head; task_work_add() 34 } while (cmpxchg(&task->task_works, head, work) != head); task_work_add() 88 struct callback_head *work, *head, *next; task_work_run() local 97 head = !work && (task->flags & PF_EXITING) ? task_work_run() 99 } while (cmpxchg(&task->task_works, work, head) != work); task_work_run() 112 head = NULL; task_work_run() 115 work->next = head; task_work_run() 116 head = work; task_work_run() 120 work = head; task_work_run()
|
H A D | futex_compat.c | 24 compat_uptr_t __user *head, unsigned int *pi) fetch_robust_entry() 26 if (get_user(*uentry, head)) fetch_robust_entry() 52 struct compat_robust_list_head __user *head = curr->compat_robust_list; compat_exit_robust_list() local 64 * Fetch the list head (which was registered earlier, via compat_exit_robust_list() 67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) compat_exit_robust_list() 72 if (get_user(futex_offset, &head->futex_offset)) compat_exit_robust_list() 79 &head->list_op_pending, &pip)) compat_exit_robust_list() 83 while (entry != (struct robust_list __user *) &head->list) { compat_exit_robust_list() 121 struct compat_robust_list_head __user *, head, COMPAT_SYSCALL_DEFINE2() 127 if (unlikely(len != sizeof(*head))) COMPAT_SYSCALL_DEFINE2() 130 current->compat_robust_list = head; COMPAT_SYSCALL_DEFINE2() 139 struct compat_robust_list_head __user *head; COMPAT_SYSCALL_DEFINE3() local 161 head = p->compat_robust_list; COMPAT_SYSCALL_DEFINE3() 164 if (put_user(sizeof(*head), len_ptr)) COMPAT_SYSCALL_DEFINE3() 166 return put_user(ptr_to_compat(head), head_ptr); COMPAT_SYSCALL_DEFINE3() 23 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) fetch_robust_entry() argument
|
H A D | user-return-notifier.c | 38 struct hlist_head *head; fire_user_return_notifiers() local 40 head = &get_cpu_var(return_notifier_list); fire_user_return_notifiers() 41 hlist_for_each_entry_safe(urn, tmp2, head, link) fire_user_return_notifiers()
|
H A D | notifier.c | 64 * @nl: Pointer to head of the blocking notifier chain 114 * @nh: Pointer to head of the atomic notifier chain 128 ret = notifier_chain_register(&nh->head, n); atomic_notifier_chain_register() 136 * @nh: Pointer to head of the atomic notifier chain 150 ret = notifier_chain_unregister(&nh->head, n); atomic_notifier_chain_unregister() 159 * @nh: Pointer to head of the atomic notifier chain 183 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __atomic_notifier_call_chain() 205 * @nh: Pointer to head of the blocking notifier chain 224 return notifier_chain_register(&nh->head, n); blocking_notifier_chain_register() 227 ret = notifier_chain_register(&nh->head, n); blocking_notifier_chain_register() 235 * @nh: Pointer to head of the blocking notifier chain 250 ret = notifier_chain_cond_register(&nh->head, n); blocking_notifier_chain_cond_register() 258 * @nh: Pointer to head of the blocking notifier chain 277 return notifier_chain_unregister(&nh->head, n); blocking_notifier_chain_unregister() 280 ret = notifier_chain_unregister(&nh->head, n); blocking_notifier_chain_unregister() 288 * @nh: Pointer to head of the blocking notifier chain 311 * We check the head outside the lock, but if this access is __blocking_notifier_call_chain() 315 if (rcu_access_pointer(nh->head)) { __blocking_notifier_call_chain() 317 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, __blocking_notifier_call_chain() 339 * @nh: Pointer to head of the raw notifier chain 350 return notifier_chain_register(&nh->head, n); raw_notifier_chain_register() 356 * @nh: Pointer to head of the raw notifier chain 367 return notifier_chain_unregister(&nh->head, n); raw_notifier_chain_unregister() 373 * @nh: Pointer to head of the raw notifier chain 394 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __raw_notifier_call_chain() 413 * @nh: Pointer to head of the SRCU notifier chain 432 return notifier_chain_register(&nh->head, n); srcu_notifier_chain_register() 435 ret = notifier_chain_register(&nh->head, n); srcu_notifier_chain_register() 443 * @nh: Pointer to head of the SRCU notifier chain 462 return notifier_chain_unregister(&nh->head, n); srcu_notifier_chain_unregister() 465 ret = notifier_chain_unregister(&nh->head, n); srcu_notifier_chain_unregister() 474 * @nh: Pointer to head of the SRCU notifier chain 498 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __srcu_notifier_call_chain() 512 * srcu_init_notifier_head - Initialize an SRCU notifier head 513 * @nh: Pointer to head of the srcu notifier chain 517 * calling any of the other SRCU notifier routines for this head. 519 * If an SRCU notifier head is deallocated, it must first be cleaned 520 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's 528 nh->head = NULL; srcu_init_notifier_head()
|
/linux-4.1.27/drivers/staging/unisys/uislib/ |
H A D | uisqueue.c | 57 unsigned int head, tail, nof; spar_signal_insert() local 64 /* capture current head and tail */ spar_signal_insert() 65 head = readl(&pqhdr->head); spar_signal_insert() 68 /* queue is full if (head + 1) % n equals tail */ spar_signal_insert() 69 if (((head + 1) % readl(&pqhdr->max_slots)) == tail) { spar_signal_insert() 75 /* increment the head index */ spar_signal_insert() 76 head = (head + 1) % readl(&pqhdr->max_slots); spar_signal_insert() 78 /* copy signal to the head location from the area pointed to spar_signal_insert() 82 (head * readl(&pqhdr->signal_size)); spar_signal_insert() 86 writel(head, &pqhdr->head); spar_signal_insert() 115 unsigned int head, tail; spar_signal_remove() local 120 /* capture current head and tail */ spar_signal_remove() 121 head = readl(&pqhdr->head); spar_signal_remove() 124 /* queue is empty if the head index equals the tail index */ spar_signal_remove() 125 if (head == tail) { spar_signal_remove() 170 unsigned int head, tail, count = 0; spar_signal_remove_all() local 175 /* capture current head and tail */ spar_signal_remove_all() 176 head = pqhdr->head; spar_signal_remove_all() 179 /* queue is empty if the head index equals the tail index */ spar_signal_remove_all() 180 if (head == tail) spar_signal_remove_all() 183 while (head != tail) { spar_signal_remove_all() 223 return readl(&pqhdr->head) == readl(&pqhdr->tail); spar_signalqueue_empty() 309 /* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
|
/linux-4.1.27/arch/s390/oprofile/ |
H A D | backtrace.c | 58 unsigned long head; s390_backtrace() local 64 head = regs->gprs[15]; s390_backtrace() 65 head_sf = (struct stack_frame*)head; s390_backtrace() 70 head = head_sf->back_chain; s390_backtrace() 72 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, s390_backtrace() 75 __show_trace(&depth, head, S390_lowcore.thread_info, s390_backtrace()
|
/linux-4.1.27/fs/proc/ |
H A D | proc_sysctl.c | 28 static bool is_empty_dir(struct ctl_table_header *head) is_empty_dir() argument 30 return head->ctl_table[0].child == sysctl_mount_point; is_empty_dir() 76 static int insert_links(struct ctl_table_header *head); 105 struct ctl_table_header *head; find_entry() local 116 head = ctl_node->header; find_entry() 117 entry = &head->ctl_table[ctl_node - head->node]; find_entry() 126 *phead = head; find_entry() 133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) insert_entry() argument 135 struct rb_node *node = &head->node[entry - head->ctl_table].node; insert_entry() 136 struct rb_node **p = &head->parent->root.rb_node; insert_entry() 161 sysctl_print_dir(head->parent); insert_entry() 168 rb_insert_color(node, &head->parent->root); insert_entry() 172 static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) erase_entry() argument 174 struct rb_node *node = &head->node[entry - head->ctl_table].node; erase_entry() 176 rb_erase(node, &head->parent->root); erase_entry() 179 static void init_header(struct ctl_table_header *head, init_header() argument 183 head->ctl_table = table; init_header() 184 head->ctl_table_arg = table; init_header() 185 head->used = 0; init_header() 186 head->count = 1; init_header() 187 head->nreg = 1; init_header() 188 head->unregistering = NULL; init_header() 189 head->root = root; init_header() 190 head->set = set; init_header() 191 head->parent = NULL; init_header() 192 head->node = node; init_header() 196 node->header = head; init_header() 200 static void erase_header(struct ctl_table_header *head) erase_header() argument 203 for (entry = head->ctl_table; entry->procname; entry++) erase_header() 204 erase_entry(head, entry); erase_header() 287 static void sysctl_head_get(struct ctl_table_header *head) sysctl_head_get() argument 290 head->count++; sysctl_head_get() 294 void sysctl_head_put(struct ctl_table_header *head) sysctl_head_put() argument 297 if (!--head->count) sysctl_head_put() 298 kfree_rcu(head, rcu); sysctl_head_put() 302 static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) sysctl_head_grab() argument 304 BUG_ON(!head); sysctl_head_grab() 306 if (!use_table(head)) sysctl_head_grab() 307 head = ERR_PTR(-ENOENT); sysctl_head_grab() 309 return head; sysctl_head_grab() 312 static void sysctl_head_finish(struct ctl_table_header *head) sysctl_head_finish() argument 314 if (!head) sysctl_head_finish() 317 unuse_table(head); sysctl_head_finish() 334 struct ctl_table_header *head; lookup_entry() local 338 entry = find_entry(&head, dir, name, namelen); lookup_entry() 339 if (entry && use_table(head)) lookup_entry() 340 *phead = head; lookup_entry() 362 struct ctl_table_header *head = NULL; first_entry() local 370 head = ctl_node->header; first_entry() 371 entry = &head->ctl_table[ctl_node - head->node]; first_entry() 373 *phead = head; first_entry() 379 struct ctl_table_header *head = *phead; next_entry() local 381 struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; next_entry() 384 unuse_table(head); next_entry() 388 head = NULL; next_entry() 390 head = ctl_node->header; next_entry() 391 entry = &head->ctl_table[ctl_node - head->node]; next_entry() 393 *phead = head; next_entry() 417 static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) sysctl_perm() argument 419 struct ctl_table_root *root = head->root; sysctl_perm() 423 mode = root->permissions(head, table); sysctl_perm() 431 struct ctl_table_header *head, struct ctl_table *table) proc_sys_make_inode() 442 sysctl_head_get(head); proc_sys_make_inode() 444 ei->sysctl = head; proc_sys_make_inode() 457 if (is_empty_dir(head)) proc_sys_make_inode() 466 struct ctl_table_header *head = PROC_I(inode)->sysctl; grab_header() local 467 if (!head) grab_header() 468 head = &sysctl_table_root.default_set.dir.header; grab_header() 469 return sysctl_head_grab(head); grab_header() 475 struct ctl_table_header *head = grab_header(dir); proc_sys_lookup() local 484 if (IS_ERR(head)) proc_sys_lookup() 485 return ERR_CAST(head); proc_sys_lookup() 487 ctl_dir = container_of(head, struct ctl_dir, header); proc_sys_lookup() 501 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); proc_sys_lookup() 512 sysctl_head_finish(head); proc_sys_lookup() 520 struct ctl_table_header *head = grab_header(inode); proc_sys_call_handler() local 525 if (IS_ERR(head)) proc_sys_call_handler() 526 return PTR_ERR(head); proc_sys_call_handler() 533 if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) proc_sys_call_handler() 547 sysctl_head_finish(head); proc_sys_call_handler() 566 struct ctl_table_header *head = grab_header(inode); proc_sys_open() local 570 if (IS_ERR(head)) proc_sys_open() 571 return PTR_ERR(head); proc_sys_open() 576 sysctl_head_finish(head); proc_sys_open() 584 struct ctl_table_header *head = grab_header(inode); proc_sys_poll() local 590 if (IS_ERR(head)) proc_sys_poll() 608 sysctl_head_finish(head); proc_sys_poll() 615 struct ctl_table_header *head, proc_sys_fill_cache() 632 inode = proc_sys_make_inode(dir->d_sb, head, table); proc_sys_fill_cache() 653 struct ctl_table_header *head, proc_sys_link_fill_cache() 657 head = sysctl_head_grab(head); proc_sys_link_fill_cache() 661 int err = sysctl_follow_link(&head, &table, current->nsproxy); proc_sys_link_fill_cache() 666 ret = proc_sys_fill_cache(file, ctx, head, table); proc_sys_link_fill_cache() 668 sysctl_head_finish(head); proc_sys_link_fill_cache() 672 static int scan(struct ctl_table_header *head, struct ctl_table *table, scan() argument 682 res = proc_sys_link_fill_cache(file, ctx, head, table); scan() 684 res = proc_sys_fill_cache(file, ctx, head, table); scan() 694 struct ctl_table_header *head = grab_header(file_inode(file)); proc_sys_readdir() local 700 if (IS_ERR(head)) proc_sys_readdir() 701 return PTR_ERR(head); proc_sys_readdir() 703 ctl_dir = container_of(head, struct ctl_dir, header); proc_sys_readdir() 716 sysctl_head_finish(head); proc_sys_readdir() 726 struct ctl_table_header *head; proc_sys_permission() local 734 head = grab_header(inode); proc_sys_permission() 735 if (IS_ERR(head)) proc_sys_permission() 736 return PTR_ERR(head); proc_sys_permission() 742 error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); proc_sys_permission() 744 sysctl_head_finish(head); proc_sys_permission() 768 struct ctl_table_header *head = grab_header(inode); proc_sys_getattr() local 771 if (IS_ERR(head)) proc_sys_getattr() 772 return PTR_ERR(head); proc_sys_getattr() 778 sysctl_head_finish(head); proc_sys_getattr() 839 struct ctl_table_header *head; proc_sys_compare() local 852 head = rcu_dereference(PROC_I(inode)->sysctl); proc_sys_compare() 853 return !head || !sysctl_is_seen(head); proc_sys_compare() 865 struct ctl_table_header *head; find_subdir() local 868 entry = find_entry(&head, dir, name, namelen); find_subdir() 873 return container_of(head, struct ctl_dir, header); find_subdir() 980 struct ctl_table_header *head; sysctl_follow_link() local 996 head = NULL; sysctl_follow_link() 997 entry = find_entry(&head, dir, procname, strlen(procname)); sysctl_follow_link() 999 if (entry && use_table(head)) { sysctl_follow_link() 1001 *phead = head; sysctl_follow_link() 1103 struct ctl_table_header *head; get_links() local 1109 link = find_entry(&head, dir, procname, strlen(procname)); get_links() 1122 link = find_entry(&head, dir, procname, strlen(procname)); get_links() 1123 head->nreg++; get_links() 1128 static int insert_links(struct ctl_table_header *head) insert_links() argument 1135 if (head->set == root_set) insert_links() 1138 core_parent = xlate_dir(root_set, head->parent); insert_links() 1142 if (get_links(core_parent, head->ctl_table, head->root)) insert_links() 1148 links = new_links(core_parent, head->ctl_table, head->root); insert_links() 1156 if (get_links(core_parent, head->ctl_table, head->root)) { insert_links() 430 proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) proc_sys_make_inode() argument 613 proc_sys_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) proc_sys_fill_cache() argument 651 proc_sys_link_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) proc_sys_link_fill_cache() argument
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
H A D | fwdesc.h | 119 struct carl9170fw_desc_head head; member in struct:carl9170fw_otus_desc 140 struct carl9170fw_desc_head head; member in struct:carl9170fw_motd_desc 157 struct carl9170fw_desc_head head; member in struct:carl9170fw_fix_desc 166 struct carl9170fw_desc_head head; member in struct:carl9170fw_dbg_desc 182 struct carl9170fw_desc_head head; member in struct:carl9170fw_chk_desc 192 struct carl9170fw_desc_head head; member in struct:carl9170fw_txsq_desc 202 struct carl9170fw_desc_head head; member in struct:carl9170fw_wol_desc 212 struct carl9170fw_desc_head head; member in struct:carl9170fw_last_desc 220 .head = { \ 227 static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, carl9170fw_fill_desc() argument 231 head->magic[0] = magic[0]; carl9170fw_fill_desc() 232 head->magic[1] = magic[1]; carl9170fw_fill_desc() 233 head->magic[2] = magic[2]; carl9170fw_fill_desc() 234 head->magic[3] = magic[3]; carl9170fw_fill_desc() 236 head->length = length; carl9170fw_fill_desc() 237 head->min_ver = min_ver; carl9170fw_fill_desc() 238 head->cur_ver = cur_ver; carl9170fw_fill_desc() 248 #define CHECK_HDR_VERSION(head, _min_ver) \ 249 (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \ 256 static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, carl9170fw_desc_cmp() argument 260 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && carl9170fw_desc_cmp() 261 descid[2] == head->magic[2] && descid[3] == head->magic[3] && carl9170fw_desc_cmp() 262 !CHECK_HDR_VERSION(head, compatible_revision) && carl9170fw_desc_cmp() 263 (le16_to_cpu(head->length) >= min_len)) carl9170fw_desc_cmp()
|
/linux-4.1.27/drivers/mfd/ |
H A D | pcf50633-adc.c | 74 int head; trigger_next_adc_job_if_any() local 76 head = adc->queue_head; trigger_next_adc_job_if_any() 78 if (!adc->queue[head]) trigger_next_adc_job_if_any() 81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); trigger_next_adc_job_if_any() 88 int head, tail; adc_enqueue_request() local 92 head = adc->queue_head; adc_enqueue_request() 102 if (head == tail) adc_enqueue_request() 177 int head, res; pcf50633_adc_irq() local 180 head = adc->queue_head; pcf50633_adc_irq() 182 req = adc->queue[head]; pcf50633_adc_irq() 188 adc->queue[head] = NULL; pcf50633_adc_irq() 189 adc->queue_head = (head + 1) & pcf50633_adc_irq() 223 int i, head; pcf50633_adc_remove() local 228 head = adc->queue_head; pcf50633_adc_remove() 230 if (WARN_ON(adc->queue[head])) pcf50633_adc_remove()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/ |
H A D | nv10_fence.h | 10 struct nvif_object head[4]; member in struct:nv10_fence_chan
|
H A D | nouveau_display.c | 54 nouveau_display_vblank_enable(struct drm_device *dev, int head) nouveau_display_vblank_enable() argument 57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_enable() 59 if (nv_crtc->index == head) { nouveau_display_vblank_enable() 68 nouveau_display_vblank_disable(struct drm_device *dev, int head) nouveau_display_vblank_disable() argument 71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_disable() 73 if (nv_crtc->index == head) { nouveau_display_vblank_disable() 103 .base.head = nouveau_crtc(crtc)->index, nouveau_display_scanoutpos_head() 134 nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags, nouveau_display_scanoutpos() argument 139 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_scanoutpos() 140 if (nouveau_crtc(crtc)->index == head) { nouveau_display_scanoutpos() 150 nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error, nouveau_display_vblstamp() argument 155 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblstamp() 156 if (nouveau_crtc(crtc)->index == head) { nouveau_display_vblstamp() 158 head, max_error, time, flags, crtc, nouveau_display_vblstamp() 173 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_fini() 186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_init() 192 .head = nv_crtc->index, nouveau_display_vblank_init() 372 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nouveau_display_init() 385 int head; nouveau_display_fini() local 388 for (head = 0; head < dev->mode_config.num_crtc; head++) nouveau_display_fini() 389 drm_vblank_off(dev, head); nouveau_display_fini() 392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nouveau_display_fini() 560 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_suspend() 570 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_suspend() 587 int ret, head; nouveau_display_resume() local 590 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 602 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 617 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 624 for (head = 0; head < dev->mode_config.num_crtc; head++) nouveau_display_resume() 625 drm_vblank_on(dev, head); nouveau_display_resume() 636 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 664 list_add_tail(&s->head, &fctx->flip); nouveau_page_flip_emit() 691 list_del(&s->head); nouveau_page_flip_emit() 762 int head = nouveau_crtc(crtc)->index; nouveau_crtc_page_flip() local 772 OUT_RING (chan, head); nouveau_crtc_page_flip() 779 nouveau_bo_ref(new_bo, &dispnv04->image[head]); nouveau_crtc_page_flip() 828 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); nouveau_finish_page_flip() 840 list_del(&s->head); nouveau_finish_page_flip()
|
H A D | nv50_display.c | 66 nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, nv50_chan_create() argument 69 const u32 handle = (oclass[0] << 16) | head; nv50_chan_create() 116 nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head, nv50_pioc_create() argument 119 return nv50_chan_create(disp, oclass, head, data, size, &pioc->base); nv50_pioc_create() 131 nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs) nv50_curs_create() argument 134 .head = head, nv50_curs_create() 145 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), nv50_curs_create() 158 nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm) nv50_oimm_create() argument 161 .head = head, nv50_oimm_create() 172 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), nv50_oimm_create() 209 nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head, nv50_dmac_create() argument 236 ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base); nv50_dmac_create() 311 nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf, nv50_base_create() argument 315 .pushbuf = 0xb0007c00 | head, nv50_base_create() 316 .head = head, nv50_base_create() 329 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), nv50_base_create() 342 nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf, nv50_ovly_create() argument 346 .pushbuf = 0xb0007e00 | head, nv50_ovly_create() 347 .head = head, nv50_ovly_create() 359 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), nv50_ovly_create() 381 struct list_head head; member in struct:nv50_fbdma 547 struct nv50_head *head = nv50_head(crtc); nv50_display_flip_next() local 666 nouveau_bo_ref(nv_fb->nvbo, &head->image); nv50_display_flip_next() 1098 struct nv50_head *head = nv50_head(crtc); nv50_crtc_swap_fbs() local 1103 if (head->image) nv50_crtc_swap_fbs() 1104 nouveau_bo_unpin(head->image); nv50_crtc_swap_fbs() 1105 nouveau_bo_ref(nvfb->nvbo, &head->image); nv50_crtc_swap_fbs() 1270 struct nv50_head *head = nv50_head(crtc); nv50_crtc_disable() local 1272 if (head->image) nv50_crtc_disable() 1273 nouveau_bo_unpin(head->image); nv50_crtc_disable() 1274 nouveau_bo_ref(NULL, &head->image); nv50_crtc_disable() 1354 struct nv50_head *head = nv50_head(crtc); nv50_crtc_destroy() local 1357 list_for_each_entry(fbdma, &disp->fbdma, head) { nv50_crtc_destroy() 1361 nv50_dmac_destroy(&head->ovly.base, disp->disp); nv50_crtc_destroy() 1362 nv50_pioc_destroy(&head->oimm.base); nv50_crtc_destroy() 1363 nv50_dmac_destroy(&head->sync.base, disp->disp); nv50_crtc_destroy() 1364 nv50_pioc_destroy(&head->curs.base); nv50_crtc_destroy() 1369 if (head->image) nv50_crtc_destroy() 1370 nouveau_bo_unpin(head->image); nv50_crtc_destroy() 1371 nouveau_bo_ref(NULL, &head->image); nv50_crtc_destroy() 1412 struct nv50_head *head; nv50_crtc_create() local 1416 head = kzalloc(sizeof(*head), GFP_KERNEL); nv50_crtc_create() 1417 if (!head) nv50_crtc_create() 1420 head->base.index = index; nv50_crtc_create() 1421 head->base.set_dither = nv50_crtc_set_dither; nv50_crtc_create() 1422 head->base.set_scale = nv50_crtc_set_scale; nv50_crtc_create() 1423 head->base.set_color_vibrance = nv50_crtc_set_color_vibrance; nv50_crtc_create() 1424 head->base.color_vibrance = 50; nv50_crtc_create() 1425 head->base.vibrant_hue = 0; nv50_crtc_create() 1426 head->base.cursor.set_pos = nv50_crtc_cursor_restore; nv50_crtc_create() 1428 head->base.lut.r[i] = i << 8; nv50_crtc_create() 1429 head->base.lut.g[i] = i << 8; nv50_crtc_create() 1430 head->base.lut.b[i] = i << 8; nv50_crtc_create() 1433 crtc = &head->base.base; nv50_crtc_create() 1439 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); nv50_crtc_create() 1441 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); nv50_crtc_create() 1443 ret = nouveau_bo_map(head->base.lut.nvbo); nv50_crtc_create() 1445 nouveau_bo_unpin(head->base.lut.nvbo); nv50_crtc_create() 1448 nouveau_bo_ref(NULL, &head->base.lut.nvbo); nv50_crtc_create() 1455 ret = nv50_curs_create(disp->disp, index, &head->curs); nv50_crtc_create() 1461 &head->sync); nv50_crtc_create() 1465 head->sync.addr = EVO_FLIP_SEM0(index); nv50_crtc_create() 1466 head->sync.data = 0x00000000; nv50_crtc_create() 1469 ret = nv50_oimm_create(disp->disp, index, &head->oimm); nv50_crtc_create() 1474 &head->ovly); nv50_crtc_create() 1845 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { nv50_sor_dpms() 2283 list_del(&fbdma->head); nv50_fbdma_fini() 2306 list_for_each_entry(fbdma, &disp->fbdma, head) { nv50_fbdma_init() 2314 list_add(&fbdma->head, &disp->fbdma); nv50_fbdma_init() 2339 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv50_fbdma_init() 2340 struct nv50_head *head = nv50_head(crtc); nv50_fbdma_init() local 2341 int ret = nvif_object_init(&head->sync.base.base.user, NULL, nv50_fbdma_init() 2343 &fbdma->base[head->base.index]); nv50_fbdma_init() 2429 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv50_display_init() 2448 list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) { nv50_display_destroy() 2555 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { nv50_display_create()
|
/linux-4.1.27/arch/blackfin/mach-common/ |
H A D | Makefile | 6 cache.o cache-c.o entry.o head.o \
|
/linux-4.1.27/arch/c6x/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/cris/arch-v10/kernel/ |
H A D | Makefile | 5 extra-y := head.o
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
H A D | Makefile | 5 extra-y := head.o
|
/linux-4.1.27/arch/m32r/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/m68k/68000/ |
H A D | Makefile | 18 extra-y := head.o
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | procinfo.h | 27 * arch/arm/mm/proc-*.S and arch/arm/kernel/head.S 32 unsigned long __cpu_mm_mmu_flags; /* used by head.S */ 33 unsigned long __cpu_io_mmu_flags; /* used by head.S */ 34 unsigned long __cpu_flush; /* used by head.S */
|
/linux-4.1.27/net/ipv6/ |
H A D | reassembly.c | 383 struct sk_buff *fp, *head = fq->q.fragments; ip6_frag_reasm() local 395 /* Make the one we just received the head. */ ip6_frag_reasm() 397 head = prev->next; ip6_frag_reasm() 398 fp = skb_clone(head, GFP_ATOMIC); ip6_frag_reasm() 403 fp->next = head->next; ip6_frag_reasm() 408 skb_morph(head, fq->q.fragments); ip6_frag_reasm() 409 head->next = fq->q.fragments->next; ip6_frag_reasm() 412 fq->q.fragments = head; ip6_frag_reasm() 415 WARN_ON(head == NULL); ip6_frag_reasm() 416 WARN_ON(FRAG6_CB(head)->offset != 0); ip6_frag_reasm() 419 payload_len = ((head->data - skb_network_header(head)) - ip6_frag_reasm() 426 if (skb_unclone(head, GFP_ATOMIC)) ip6_frag_reasm() 432 if (skb_has_frag_list(head)) { ip6_frag_reasm() 439 clone->next = head->next; ip6_frag_reasm() 440 head->next = clone; ip6_frag_reasm() 441 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; ip6_frag_reasm() 442 skb_frag_list_init(head); ip6_frag_reasm() 443 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) ip6_frag_reasm() 444 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); ip6_frag_reasm() 445 clone->len = clone->data_len = head->data_len - plen; ip6_frag_reasm() 446 head->data_len -= clone->len; ip6_frag_reasm() 447 head->len -= clone->len; ip6_frag_reasm() 449 clone->ip_summed = head->ip_summed; ip6_frag_reasm() 456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; ip6_frag_reasm() 457 memmove(head->head + sizeof(struct frag_hdr), head->head, ip6_frag_reasm() 458 (head->data - head->head) - sizeof(struct frag_hdr)); ip6_frag_reasm() 459 head->mac_header += sizeof(struct frag_hdr); ip6_frag_reasm() 460 head->network_header += sizeof(struct frag_hdr); ip6_frag_reasm() 462 skb_reset_transport_header(head); ip6_frag_reasm() 463 skb_push(head, head->data - skb_network_header(head)); ip6_frag_reasm() 465 sum_truesize = head->truesize; ip6_frag_reasm() 466 for (fp = head->next; fp;) { ip6_frag_reasm() 472 if (head->ip_summed != fp->ip_summed) ip6_frag_reasm() 473 head->ip_summed = CHECKSUM_NONE; ip6_frag_reasm() 474 else if (head->ip_summed == CHECKSUM_COMPLETE) ip6_frag_reasm() 475 head->csum = csum_add(head->csum, fp->csum); ip6_frag_reasm() 477 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { ip6_frag_reasm() 480 if (!skb_shinfo(head)->frag_list) ip6_frag_reasm() 481 skb_shinfo(head)->frag_list = fp; ip6_frag_reasm() 482 head->data_len += fp->len; ip6_frag_reasm() 483 head->len += fp->len; ip6_frag_reasm() 484 head->truesize += fp->truesize; ip6_frag_reasm() 490 head->next = NULL; ip6_frag_reasm() 491 head->dev = dev; ip6_frag_reasm() 492 head->tstamp = fq->q.stamp; ip6_frag_reasm() 493 ipv6_hdr(head)->payload_len = htons(payload_len); ip6_frag_reasm() 494 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); ip6_frag_reasm() 495 IP6CB(head)->nhoff = nhoff; ip6_frag_reasm() 496 IP6CB(head)->flags |= IP6SKB_FRAGMENTED; ip6_frag_reasm() 499 if (head->ip_summed == CHECKSUM_COMPLETE) ip6_frag_reasm() 500 head->csum = csum_partial(skb_network_header(head), ip6_frag_reasm() 501 skb_network_header_len(head), ip6_frag_reasm() 502 head->csum); ip6_frag_reasm()
|
/linux-4.1.27/tools/perf/util/include/linux/ |
H A D | list.h | 25 * @head: the head for your list. 27 #define list_for_each_from(pos, head) \ 28 for (; pos != (head); pos = pos->next)
|
/linux-4.1.27/tools/usb/usbip/libsrc/ |
H A D | list.h | 52 * @head: list head to add it after 54 * Insert a new entry after the specified head. 57 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 59 __list_add(new, head, head->next); list_add() 108 * @head: the head for your list. 110 #define list_for_each(pos, head) \ 111 for (pos = (head)->next; pos != (head); pos = pos->next) 117 * @head: the head for your list. 119 #define list_for_each_safe(pos, n, head) \ 120 for (pos = (head)->next, n = pos->next; pos != (head); \
|
/linux-4.1.27/arch/x86/platform/uv/ |
H A D | uv_time.c | 170 struct uv_rtc_timer_head *head = blade_info[bid]; for_each_present_cpu() local 172 if (!head) { for_each_present_cpu() 173 head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + for_each_present_cpu() 177 if (!head) { for_each_present_cpu() 181 spin_lock_init(&head->lock); for_each_present_cpu() 182 head->ncpus = uv_blade_nr_possible_cpus(bid); for_each_present_cpu() 183 head->next_cpu = -1; for_each_present_cpu() 184 blade_info[bid] = head; for_each_present_cpu() 187 head->cpu[bcpu].lcpu = cpu; for_each_present_cpu() 188 head->cpu[bcpu].expires = ULLONG_MAX; for_each_present_cpu() 195 static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) uv_rtc_find_next_timer() argument 200 head->next_cpu = -1; uv_rtc_find_next_timer() 201 for (c = 0; c < head->ncpus; c++) { uv_rtc_find_next_timer() 202 u64 exp = head->cpu[c].expires; uv_rtc_find_next_timer() 209 head->next_cpu = bcpu; uv_rtc_find_next_timer() 210 c = head->cpu[bcpu].lcpu; uv_rtc_find_next_timer() 229 struct uv_rtc_timer_head *head = blade_info[bid]; uv_rtc_set_timer() local 231 u64 *t = &head->cpu[bcpu].expires; uv_rtc_set_timer() 235 spin_lock_irqsave(&head->lock, flags); uv_rtc_set_timer() 237 next_cpu = head->next_cpu; uv_rtc_set_timer() 242 expires < head->cpu[next_cpu].expires) { uv_rtc_set_timer() 243 head->next_cpu = bcpu; uv_rtc_set_timer() 246 uv_rtc_find_next_timer(head, pnode); uv_rtc_set_timer() 247 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_set_timer() 252 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_set_timer() 265 struct uv_rtc_timer_head *head = blade_info[bid]; uv_rtc_unset_timer() local 267 u64 *t = &head->cpu[bcpu].expires; uv_rtc_unset_timer() 271 spin_lock_irqsave(&head->lock, flags); uv_rtc_unset_timer() 273 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) uv_rtc_unset_timer() 279 if (head->next_cpu == bcpu) uv_rtc_unset_timer() 280 uv_rtc_find_next_timer(head, pnode); uv_rtc_unset_timer() 283 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_unset_timer()
|
/linux-4.1.27/arch/mn10300/boot/compressed/ |
H A D | Makefile | 5 targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o 9 $(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
|
/linux-4.1.27/net/ipv6/netfilter/ |
H A D | nf_conntrack_reasm.c | 382 struct sk_buff *fp, *op, *head = fq->q.fragments; nf_ct_frag6_reasm() local 388 WARN_ON(head == NULL); nf_ct_frag6_reasm() 389 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); nf_ct_frag6_reasm() 396 payload_len = ((head->data - skb_network_header(head)) - nf_ct_frag6_reasm() 405 if (skb_unclone(head, GFP_ATOMIC)) { nf_ct_frag6_reasm() 406 pr_debug("skb is cloned but can't expand head"); nf_ct_frag6_reasm() 413 if (skb_has_frag_list(head)) { nf_ct_frag6_reasm() 421 clone->next = head->next; nf_ct_frag6_reasm() 422 head->next = clone; nf_ct_frag6_reasm() 423 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; nf_ct_frag6_reasm() 424 skb_frag_list_init(head); nf_ct_frag6_reasm() 425 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) nf_ct_frag6_reasm() 426 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); nf_ct_frag6_reasm() 427 clone->len = clone->data_len = head->data_len - plen; nf_ct_frag6_reasm() 428 head->data_len -= clone->len; nf_ct_frag6_reasm() 429 head->len -= clone->len; nf_ct_frag6_reasm() 431 clone->ip_summed = head->ip_summed; nf_ct_frag6_reasm() 439 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; nf_ct_frag6_reasm() 440 memmove(head->head + sizeof(struct frag_hdr), head->head, nf_ct_frag6_reasm() 441 (head->data - head->head) - sizeof(struct frag_hdr)); nf_ct_frag6_reasm() 442 head->mac_header += sizeof(struct frag_hdr); nf_ct_frag6_reasm() 443 head->network_header += sizeof(struct frag_hdr); nf_ct_frag6_reasm() 445 skb_shinfo(head)->frag_list = head->next; nf_ct_frag6_reasm() 446 skb_reset_transport_header(head); nf_ct_frag6_reasm() 447 skb_push(head, head->data - skb_network_header(head)); nf_ct_frag6_reasm() 449 for (fp=head->next; fp; fp = fp->next) { nf_ct_frag6_reasm() 450 head->data_len += fp->len; nf_ct_frag6_reasm() 451 head->len += fp->len; nf_ct_frag6_reasm() 452 if (head->ip_summed != fp->ip_summed) nf_ct_frag6_reasm() 453 head->ip_summed = CHECKSUM_NONE; nf_ct_frag6_reasm() 454 else if (head->ip_summed == CHECKSUM_COMPLETE) nf_ct_frag6_reasm() 455 head->csum = csum_add(head->csum, fp->csum); nf_ct_frag6_reasm() 456 head->truesize += fp->truesize; nf_ct_frag6_reasm() 458 sub_frag_mem_limit(&fq->q, head->truesize); nf_ct_frag6_reasm() 460 head->ignore_df = 1; nf_ct_frag6_reasm() 461 head->next = NULL; nf_ct_frag6_reasm() 462 head->dev = dev; nf_ct_frag6_reasm() 463 head->tstamp = fq->q.stamp; nf_ct_frag6_reasm() 464 ipv6_hdr(head)->payload_len = htons(payload_len); nf_ct_frag6_reasm() 465 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); nf_ct_frag6_reasm() 466 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; nf_ct_frag6_reasm() 469 if (head->ip_summed == CHECKSUM_COMPLETE) nf_ct_frag6_reasm() 470 head->csum = csum_partial(skb_network_header(head), nf_ct_frag6_reasm() 471 skb_network_header_len(head), nf_ct_frag6_reasm() 472 head->csum); nf_ct_frag6_reasm() 477 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ nf_ct_frag6_reasm() 478 fp = skb_shinfo(head)->frag_list; nf_ct_frag6_reasm() 480 /* at above code, head skb is divided into two skbs. */ nf_ct_frag6_reasm() 483 op = NFCT_FRAG6_CB(head)->orig; nf_ct_frag6_reasm() 492 return head; nf_ct_frag6_reasm()
|
/linux-4.1.27/kernel/rcu/ |
H A D | rcu.h | 71 static inline int debug_rcu_head_queue(struct rcu_head *head) debug_rcu_head_queue() argument 75 r1 = debug_object_activate(head, &rcuhead_debug_descr); debug_rcu_head_queue() 76 debug_object_active_state(head, &rcuhead_debug_descr, debug_rcu_head_queue() 82 static inline void debug_rcu_head_unqueue(struct rcu_head *head) debug_rcu_head_unqueue() argument 84 debug_object_active_state(head, &rcuhead_debug_descr, debug_rcu_head_unqueue() 87 debug_object_deactivate(head, &rcuhead_debug_descr); debug_rcu_head_unqueue() 90 static inline int debug_rcu_head_queue(struct rcu_head *head) debug_rcu_head_queue() argument 95 static inline void debug_rcu_head_unqueue(struct rcu_head *head) debug_rcu_head_unqueue() argument 106 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) __rcu_reclaim() argument 108 unsigned long offset = (unsigned long)head->func; __rcu_reclaim() 112 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); __rcu_reclaim() 113 kfree((void *)head - offset); __rcu_reclaim() 117 RCU_TRACE(trace_rcu_invoke_callback(rn, head)); __rcu_reclaim() 118 head->func(head); __rcu_reclaim()
|
H A D | tiny.c | 46 static void __call_rcu(struct rcu_head *head, 238 static void __call_rcu(struct rcu_head *head, __call_rcu() argument 244 debug_rcu_head_queue(head); __call_rcu() 245 head->func = func; __call_rcu() 246 head->next = NULL; __call_rcu() 249 *rcp->curtail = head; __call_rcu() 250 rcp->curtail = &head->next; __call_rcu() 265 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) call_rcu_sched() argument 267 __call_rcu(head, func, &rcu_sched_ctrlblk); call_rcu_sched() 275 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) call_rcu_bh() argument 277 __call_rcu(head, func, &rcu_bh_ctrlblk); call_rcu_bh()
|
/linux-4.1.27/fs/hfs/ |
H A D | btree.c | 21 struct hfs_btree_header_rec *head; hfs_btree_open() local 82 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); hfs_btree_open() 83 tree->root = be32_to_cpu(head->root); hfs_btree_open() 84 tree->leaf_count = be32_to_cpu(head->leaf_count); hfs_btree_open() 85 tree->leaf_head = be32_to_cpu(head->leaf_head); hfs_btree_open() 86 tree->leaf_tail = be32_to_cpu(head->leaf_tail); hfs_btree_open() 87 tree->node_count = be32_to_cpu(head->node_count); hfs_btree_open() 88 tree->free_nodes = be32_to_cpu(head->free_nodes); hfs_btree_open() 89 tree->attributes = be32_to_cpu(head->attributes); hfs_btree_open() 90 tree->node_size = be16_to_cpu(head->node_size); hfs_btree_open() 91 tree->max_key_len = be16_to_cpu(head->max_key_len); hfs_btree_open() 92 tree->depth = be16_to_cpu(head->depth); hfs_btree_open() 161 struct hfs_btree_header_rec *head; hfs_btree_write() local 171 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); hfs_btree_write() 173 head->root = cpu_to_be32(tree->root); hfs_btree_write() 174 head->leaf_count = cpu_to_be32(tree->leaf_count); hfs_btree_write() 175 head->leaf_head = cpu_to_be32(tree->leaf_head); hfs_btree_write() 176 head->leaf_tail = cpu_to_be32(tree->leaf_tail); hfs_btree_write() 177 head->node_count = cpu_to_be32(tree->node_count); hfs_btree_write() 178 head->free_nodes = cpu_to_be32(tree->free_nodes); hfs_btree_write() 179 head->attributes = cpu_to_be32(tree->attributes); hfs_btree_write() 180 head->depth = cpu_to_be16(tree->depth); hfs_btree_write()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_cq.c | 52 u32 head; ipath_cq_enter() local 58 * Note that the head pointer might be writable by user processes. ipath_cq_enter() 62 head = wc->head; ipath_cq_enter() 63 if (head >= (unsigned) cq->ibcq.cqe) { ipath_cq_enter() 64 head = cq->ibcq.cqe; ipath_cq_enter() 67 next = head + 1; ipath_cq_enter() 81 wc->uqueue[head].wr_id = entry->wr_id; ipath_cq_enter() 82 wc->uqueue[head].status = entry->status; ipath_cq_enter() 83 wc->uqueue[head].opcode = entry->opcode; ipath_cq_enter() 84 wc->uqueue[head].vendor_err = entry->vendor_err; ipath_cq_enter() 85 wc->uqueue[head].byte_len = entry->byte_len; ipath_cq_enter() 86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data; ipath_cq_enter() 87 wc->uqueue[head].qp_num = entry->qp->qp_num; ipath_cq_enter() 88 wc->uqueue[head].src_qp = entry->src_qp; ipath_cq_enter() 89 wc->uqueue[head].wc_flags = entry->wc_flags; ipath_cq_enter() 90 wc->uqueue[head].pkey_index = entry->pkey_index; ipath_cq_enter() 91 wc->uqueue[head].slid = entry->slid; ipath_cq_enter() 92 wc->uqueue[head].sl = entry->sl; ipath_cq_enter() 93 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; ipath_cq_enter() 94 wc->uqueue[head].port_num = entry->port_num; ipath_cq_enter() 95 /* Make sure entry is written before the head index. */ ipath_cq_enter() 98 wc->kqueue[head] = *entry; ipath_cq_enter() 99 wc->head = next; ipath_cq_enter() 150 if (tail == wc->head) ipath_poll_cq() 223 * Allocate the completion queue entries and head/tail pointers. ipath_create_cq() 288 wc->head = 0; ipath_create_cq() 357 cq->queue->head != cq->queue->tail) ipath_req_notify_cq() 376 u32 head, tail, n; ipath_resize_cq() local 410 * Make sure head and tail are sane since they ipath_resize_cq() 414 head = old_wc->head; ipath_resize_cq() 415 if (head > (u32) cq->ibcq.cqe) ipath_resize_cq() 416 head = (u32) cq->ibcq.cqe; ipath_resize_cq() 420 if (head < tail) ipath_resize_cq() 421 n = cq->ibcq.cqe + 1 + head - tail; ipath_resize_cq() 423 n = head - tail; ipath_resize_cq() 428 for (n = 0; tail != head; n++) { ipath_resize_cq() 439 wc->head = n; ipath_resize_cq()
|
H A D | ipath_srq.c | 69 next = wq->head + 1; ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); ipath_post_srq_receive() 84 /* Make sure queue entry is written before the head index. */ ipath_post_srq_receive() 86 wq->head = next; ipath_post_srq_receive() 175 srq->rq.wq->head = 0; ipath_create_srq() 226 u32 sz, size, n, head, tail; ipath_modify_srq() local 264 * validate head pointer value and compute ipath_modify_srq() 268 head = owq->head; ipath_modify_srq() 269 if (head >= srq->rq.size) ipath_modify_srq() 270 head = 0; ipath_modify_srq() 274 n = head; ipath_modify_srq() 285 while (tail != head) { ipath_modify_srq() 301 wq->head = n; ipath_modify_srq()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_cq.c | 55 u32 head; qib_cq_enter() local 61 * Note that the head pointer might be writable by user processes. qib_cq_enter() 65 head = wc->head; qib_cq_enter() 66 if (head >= (unsigned) cq->ibcq.cqe) { qib_cq_enter() 67 head = cq->ibcq.cqe; qib_cq_enter() 70 next = head + 1; qib_cq_enter() 84 wc->uqueue[head].wr_id = entry->wr_id; qib_cq_enter() 85 wc->uqueue[head].status = entry->status; qib_cq_enter() 86 wc->uqueue[head].opcode = entry->opcode; qib_cq_enter() 87 wc->uqueue[head].vendor_err = entry->vendor_err; qib_cq_enter() 88 wc->uqueue[head].byte_len = entry->byte_len; qib_cq_enter() 89 wc->uqueue[head].ex.imm_data = qib_cq_enter() 91 wc->uqueue[head].qp_num = entry->qp->qp_num; qib_cq_enter() 92 wc->uqueue[head].src_qp = entry->src_qp; qib_cq_enter() 93 wc->uqueue[head].wc_flags = entry->wc_flags; qib_cq_enter() 94 wc->uqueue[head].pkey_index = entry->pkey_index; qib_cq_enter() 95 wc->uqueue[head].slid = entry->slid; qib_cq_enter() 96 wc->uqueue[head].sl = entry->sl; qib_cq_enter() 97 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; qib_cq_enter() 98 wc->uqueue[head].port_num = entry->port_num; qib_cq_enter() 99 /* Make sure entry is written before the head index. */ qib_cq_enter() 102 wc->kqueue[head] = *entry; qib_cq_enter() 103 wc->head = next; qib_cq_enter() 157 if (tail == wc->head) qib_poll_cq() 238 * Allocate the completion queue entries and head/tail pointers. qib_create_cq() 304 wc->head = 0; qib_create_cq() 373 cq->queue->head != cq->queue->tail) qib_req_notify_cq() 392 u32 head, tail, n; qib_resize_cq() local 426 * Make sure head and tail are sane since they qib_resize_cq() 430 head = old_wc->head; qib_resize_cq() 431 if (head > (u32) cq->ibcq.cqe) qib_resize_cq() 432 head = (u32) cq->ibcq.cqe; qib_resize_cq() 436 if (head < tail) qib_resize_cq() 437 n = cq->ibcq.cqe + 1 + head - tail; qib_resize_cq() 439 n = head - tail; qib_resize_cq() 444 for (n = 0; tail != head; n++) { qib_resize_cq() 455 wc->head = n; qib_resize_cq()
|
/linux-4.1.27/drivers/dma/ |
H A D | virt-dma.c | 65 LIST_HEAD(head); vchan_complete() 68 list_splice_tail_init(&vc->desc_completed, &head); vchan_complete() 80 while (!list_empty(&head)) { vchan_complete() 81 vd = list_first_entry(&head, struct virt_dma_desc, node); vchan_complete() 94 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) vchan_dma_desc_free_list() argument 96 while (!list_empty(head)) { vchan_dma_desc_free_list() 97 struct virt_dma_desc *vd = list_first_entry(head, vchan_dma_desc_free_list()
|
H A D | virt-dma.h | 44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); 127 * head: list of descriptors found 135 struct list_head *head) vchan_get_all_descriptors() 137 list_splice_tail_init(&vc->desc_submitted, head); vchan_get_all_descriptors() 138 list_splice_tail_init(&vc->desc_issued, head); vchan_get_all_descriptors() 139 list_splice_tail_init(&vc->desc_completed, head); vchan_get_all_descriptors() 145 LIST_HEAD(head); vchan_free_chan_resources() 148 vchan_get_all_descriptors(vc, &head); vchan_free_chan_resources() 151 vchan_dma_desc_free_list(vc, &head); vchan_free_chan_resources() 134 vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) vchan_get_all_descriptors() argument
|
/linux-4.1.27/fs/gfs2/ |
H A D | recovery.c | 57 struct list_head *head = &jd->jd_revoke_list; gfs2_revoke_add() local 61 list_for_each_entry(rr, head, rr_list) { list_for_each_entry() 79 list_add(&rr->rr_list, head); 110 struct list_head *head = &jd->jd_revoke_list; gfs2_revoke_clean() local 113 while (!list_empty(head)) { gfs2_revoke_clean() 114 rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); gfs2_revoke_clean() 151 struct gfs2_log_header_host *head) get_log_header() 173 *head = lh; get_log_header() 192 struct gfs2_log_header_host *head) find_good_lh() 198 error = get_log_header(jd, *blk, head); find_good_lh() 213 * jhead_scan - make sure we've found the head of the log 215 * @head: this is filled in with the log descriptor of the head 217 * At this point, seg and lh should be either the head of the log or just 218 * before. Scan forward until we find the head. 223 static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) jhead_scan() argument 225 unsigned int blk = head->lh_blkno; jhead_scan() 239 if (lh.lh_sequence == head->lh_sequence) { jhead_scan() 243 if (lh.lh_sequence < head->lh_sequence) jhead_scan() 246 *head = lh; jhead_scan() 253 * gfs2_find_jhead - find the head of a log 255 * @head: the log descriptor for the head of the log is returned here 258 * highest sequence number. (i.e. the log head) 263 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) gfs2_find_jhead() argument 296 *head = lh_1; gfs2_find_jhead() 376 * @head: the head journal to start from 381 static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) clean_journal() argument 392 lblock = head->lh_blkno; clean_journal() 417 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); clean_journal() 456 struct gfs2_log_header_host head; gfs2_recover_func() local 501 error = gfs2_find_jhead(jd, &head); gfs2_recover_func() 505 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { gfs2_recover_func() 547 lops_before_scan(jd, &head, pass); gfs2_recover_func() 548 error = foreach_descriptor(jd, head.lh_tail, gfs2_recover_func() 549 head.lh_blkno, pass); gfs2_recover_func() 555 error = clean_journal(jd, &head); gfs2_recover_func() 150 get_log_header(struct gfs2_jdesc *jd, unsigned int blk, struct gfs2_log_header_host *head) get_log_header() argument 191 find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, struct gfs2_log_header_host *head) find_good_lh() argument
|
H A D | meta_io.h | 22 static inline void gfs2_buffer_clear_tail(struct buffer_head *bh, int head) gfs2_buffer_clear_tail() argument 24 BUG_ON(head > bh->b_size); gfs2_buffer_clear_tail() 25 memset(bh->b_data + head, 0, bh->b_size - head); gfs2_buffer_clear_tail()
|
/linux-4.1.27/net/ipv4/ |
H A D | ip_fragment.c | 196 struct sk_buff *head = qp->q.fragments; ip_expire() local 206 head->dev = dev_get_by_index_rcu(net, qp->iif); ip_expire() 207 if (!head->dev) ip_expire() 211 iph = ip_hdr(head); ip_expire() 212 err = ip_route_input_noref(head, iph->daddr, iph->saddr, ip_expire() 213 iph->tos, head->dev); ip_expire() 223 (skb_rtable(head)->rt_type != RTN_LOCAL))) ip_expire() 227 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); ip_expire() 425 /* Eat head of the next overlapped fragment ip_frag_queue() 507 struct sk_buff *fp, *head = qp->q.fragments; ip_frag_reasm() local 521 /* Make the one we just received the head. */ ip_frag_reasm() 523 head = prev->next; ip_frag_reasm() 524 fp = skb_clone(head, GFP_ATOMIC); ip_frag_reasm() 528 fp->next = head->next; ip_frag_reasm() 533 skb_morph(head, qp->q.fragments); ip_frag_reasm() 534 head->next = qp->q.fragments->next; ip_frag_reasm() 537 qp->q.fragments = head; ip_frag_reasm() 540 WARN_ON(!head); ip_frag_reasm() 541 WARN_ON(FRAG_CB(head)->offset != 0); ip_frag_reasm() 544 ihlen = ip_hdrlen(head); ip_frag_reasm() 552 if (skb_unclone(head, GFP_ATOMIC)) ip_frag_reasm() 558 if (skb_has_frag_list(head)) { ip_frag_reasm() 565 clone->next = head->next; ip_frag_reasm() 566 head->next = clone; ip_frag_reasm() 567 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; ip_frag_reasm() 568 skb_frag_list_init(head); ip_frag_reasm() 569 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) ip_frag_reasm() 570 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); ip_frag_reasm() 571 clone->len = clone->data_len = head->data_len - plen; ip_frag_reasm() 572 head->data_len -= clone->len; ip_frag_reasm() 573 head->len -= clone->len; ip_frag_reasm() 575 clone->ip_summed = head->ip_summed; ip_frag_reasm() 579 skb_push(head, head->data - skb_network_header(head)); ip_frag_reasm() 581 sum_truesize = head->truesize; ip_frag_reasm() 582 for (fp = head->next; fp;) { ip_frag_reasm() 588 if (head->ip_summed != fp->ip_summed) ip_frag_reasm() 589 head->ip_summed = CHECKSUM_NONE; ip_frag_reasm() 590 else if (head->ip_summed == CHECKSUM_COMPLETE) ip_frag_reasm() 591 head->csum = csum_add(head->csum, fp->csum); ip_frag_reasm() 593 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { ip_frag_reasm() 596 if (!skb_shinfo(head)->frag_list) ip_frag_reasm() 597 skb_shinfo(head)->frag_list = fp; ip_frag_reasm() 598 head->data_len += fp->len; ip_frag_reasm() 599 head->len += fp->len; ip_frag_reasm() 600 head->truesize += fp->truesize; ip_frag_reasm() 606 head->next = NULL; ip_frag_reasm() 607 head->dev = dev; ip_frag_reasm() 608 head->tstamp = qp->q.stamp; ip_frag_reasm() 609 IPCB(head)->frag_max_size = qp->q.max_size; ip_frag_reasm() 611 iph = ip_hdr(head); ip_frag_reasm()
|
H A D | inet_hashtables.c | 62 struct inet_bind_hashbucket *head, inet_bind_bucket_create() 74 hlist_add_head(&tb->node, &head->chain); inet_bind_bucket_create() 111 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; __inet_put_port() local 116 spin_lock(&head->lock); __inet_put_port() 123 spin_unlock(&head->lock); __inet_put_port() 140 struct inet_bind_hashbucket *head = &table->bhash[bhash]; __inet_inherit_port() local 143 spin_lock(&head->lock); __inet_inherit_port() 151 inet_bind_bucket_for_each(tb, &head->chain) { __inet_inherit_port() 158 sock_net(sk), head, port); __inet_inherit_port() 160 spin_unlock(&head->lock); __inet_inherit_port() 166 spin_unlock(&head->lock); __inet_inherit_port() 222 sk_nulls_for_each_rcu(sk, node, &ilb->head) { __inet_lookup_listener() 297 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; __inet_lookup_established() local 301 sk_nulls_for_each_rcu(sk, node, &head->chain) { __inet_lookup_established() 346 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); __inet_check_established() local 355 sk_nulls_for_each(sk2, node, &head->chain) { __inet_check_established() 377 __sk_nulls_add_node_rcu(sk, &head->chain); __inet_check_established() 414 struct inet_ehash_bucket *head; __inet_hash_nolisten() local 421 head = inet_ehash_bucket(hashinfo, sk->sk_hash); __inet_hash_nolisten() 422 list = &head->chain; __inet_hash_nolisten() 449 __sk_nulls_add_node_rcu(sk, &ilb->head); __inet_hash() 495 struct inet_bind_hashbucket *head; __inet_hash_connect() local 515 head = &hinfo->bhash[inet_bhashfn(net, port, __inet_hash_connect() 517 spin_lock(&head->lock); __inet_hash_connect() 523 inet_bind_bucket_for_each(tb, &head->chain) { __inet_hash_connect() 538 net, head, port); __inet_hash_connect() 540 spin_unlock(&head->lock); __inet_hash_connect() 548 spin_unlock(&head->lock); __inet_hash_connect() 565 spin_unlock(&head->lock); __inet_hash_connect() 579 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; __inet_hash_connect() 581 spin_lock_bh(&head->lock); __inet_hash_connect() 584 spin_unlock_bh(&head->lock); __inet_hash_connect() 587 spin_unlock(&head->lock); __inet_hash_connect() 614 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, inet_hashinfo_init() 60 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, const unsigned short snum) inet_bind_bucket_create() argument
|
H A D | xfrm4_protocol.c | 44 #define for_each_protocol_rcu(head, handler) \ 45 for (handler = rcu_dereference(head); \ 53 struct xfrm4_protocol __rcu **head = proto_handlers(protocol); xfrm4_rcv_cb() local 55 if (!head) xfrm4_rcv_cb() 58 for_each_protocol_rcu(*head, handler) xfrm4_rcv_cb() 71 struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr); xfrm4_rcv_encap() local 77 if (!head) xfrm4_rcv_encap() 80 for_each_protocol_rcu(*head, handler) xfrm4_rcv_encap()
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
H A D | ttm_execbuf_util.c | 38 list_for_each_entry_continue_reverse(entry, list, head) { list_for_each_entry_continue_reverse() 49 list_for_each_entry(entry, list, head) { list_for_each_entry() 66 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_backoff_reservation() 70 list_for_each_entry(entry, list, head) { list_for_each_entry() 106 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_reserve_buffers() 112 list_for_each_entry(entry, list, head) { list_for_each_entry() 124 entry = list_prev_entry(entry, head); list_for_each_entry() 125 list_del(&safe->head); list_for_each_entry() 126 list_add(&safe->head, dups); list_for_each_entry() 169 list_del(&entry->head); list_for_each_entry() 170 list_add(&entry->head, list); list_for_each_entry() 194 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; ttm_eu_fence_buffer_objects() 201 list_for_each_entry(entry, list, head) { list_for_each_entry()
|
/linux-4.1.27/fs/nilfs2/ |
H A D | segbuf.h | 60 * @sb_list: List head to chain this structure 98 #define NILFS_LIST_SEGBUF(head) \ 99 list_entry((head), struct nilfs_segment_buffer, sb_list) 102 #define NILFS_LAST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->prev) 103 #define NILFS_FIRST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->next) 104 #define NILFS_SEGBUF_IS_LAST(segbuf, head) ((segbuf)->sb_list.next == (head)) 110 #define NILFS_SEGBUF_FIRST_BH(head) \ 111 (list_entry((head)->next, struct buffer_head, b_assoc_buffers)) 115 #define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head)
|
H A D | alloc.h | 56 * @pr_desc_bh: buffer head of the buffer containing block group descriptors 57 * @pr_bitmap_bh: buffer head of the buffer containing a block group bitmap 58 * @pr_entry_bh: buffer head of the buffer containing translation entries 82 * struct nilfs_bh_assoc - block offset and buffer head association 84 * @bh: buffer head
|
/linux-4.1.27/net/ceph/ |
H A D | pagelist.c | 11 struct page *page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_unmap_tail() 22 while (!list_empty(&pl->head)) { ceph_pagelist_release() 23 struct page *page = list_first_entry(&pl->head, struct page, ceph_pagelist_release() 48 list_add_tail(&page->lru, &pl->head); ceph_pagelist_addpage() 119 c->page_lru = pl->head.prev; ceph_pagelist_set_cursor() 137 while (pl->head.prev != c->page_lru) { ceph_pagelist_truncate() 138 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate() 144 if (!list_empty(&pl->head)) { ceph_pagelist_truncate() 145 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate()
|
/linux-4.1.27/sound/oss/ |
H A D | msnd.c | 105 f->head = 0; msnd_fifo_alloc() 116 f->len = f->tail = f->head = 0; msnd_fifo_make_empty() 127 if (f->head <= f->tail) { msnd_fifo_write_io() 133 nwritten = f->head - f->tail; msnd_fifo_write_io() 158 if (f->head <= f->tail) { msnd_fifo_write() 164 nwritten = f->head - f->tail; msnd_fifo_write() 189 if (f->tail <= f->head) { msnd_fifo_read_io() 191 if (nread > f->n - f->head) msnd_fifo_read_io() 192 nread = f->n - f->head; msnd_fifo_read_io() 195 nread = f->tail - f->head; msnd_fifo_read_io() 200 memcpy_toio(buf, f->data + f->head, nread); msnd_fifo_read_io() 205 f->head += nread; msnd_fifo_read_io() 206 f->head %= f->n; msnd_fifo_read_io() 220 if (f->tail <= f->head) { msnd_fifo_read() 222 if (nread > f->n - f->head) msnd_fifo_read() 223 nread = f->n - f->head; msnd_fifo_read() 226 nread = f->tail - f->head; msnd_fifo_read() 231 memcpy(buf, f->data + f->head, nread); msnd_fifo_read() 236 f->head += nread; msnd_fifo_read() 237 f->head %= f->n; msnd_fifo_read()
|
/linux-4.1.27/drivers/net/wireless/ath/ |
H A D | dfs_pri_detector.c | 34 struct list_head head; member in struct:pulse_elem 102 list_for_each_entry_safe(p, p0, &pulse_pool, head) { pool_deregister_ref() 103 list_del(&p->head); pool_deregister_ref() 107 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { pool_deregister_ref() 108 list_del(&ps->head); pool_deregister_ref() 119 list_add(&pe->head, &pulse_pool); pool_put_pulse_elem() 127 list_add(&pse->head, &pseq_pool); pool_put_pseq_elem() 137 pse = list_first_entry(&pseq_pool, struct pri_sequence, head); pool_get_pseq_elem() 138 list_del(&pse->head); pool_get_pseq_elem() 150 pe = list_first_entry(&pulse_pool, struct pulse_elem, head); pool_get_pulse_elem() 151 list_del(&pe->head); pool_get_pulse_elem() 163 return list_entry(l->prev, struct pulse_elem, head); pulse_queue_get_tail() 170 list_del_init(&p->head); pulse_queue_dequeue() 211 INIT_LIST_HEAD(&p->head); pulse_queue_enqueue() 213 list_add(&p->head, &pde->pulses); pulse_queue_enqueue() 226 list_for_each_entry(p, &pde->pulses, head) { pseq_handler_create_sequences() 254 list_for_each_entry_continue(p2, &pde->pulses, head) { pseq_handler_create_sequences() 293 INIT_LIST_HEAD(&new_ps->head); pseq_handler_create_sequences() 294 list_add(&new_ps->head, &pde->sequences); pseq_handler_create_sequences() 305 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { pseq_handler_add_to_existing_seqs() 311 list_del_init(&ps->head); pseq_handler_add_to_existing_seqs() 340 list_for_each_entry(ps, &pde->sequences, head) { pseq_handler_check_detection() 359 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { pri_detector_reset() 360 list_del_init(&ps->head); pri_detector_reset() 363 list_for_each_entry_safe(p, p0, &pde->pulses, head) { pri_detector_reset() 364 list_del_init(&p->head); pri_detector_reset()
|
/linux-4.1.27/drivers/isdn/capi/ |
H A D | capilib.c | 80 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize) capilib_new_ncci() argument 98 list_add_tail(&np->list, head); capilib_new_ncci() 104 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci) capilib_free_ncci() argument 109 list_for_each(l, head) { list_for_each() 125 void capilib_release_appl(struct list_head *head, u16 applid) capilib_release_appl() argument 130 list_for_each_safe(l, n, head) { list_for_each_safe() 142 void capilib_release(struct list_head *head) capilib_release() argument 147 list_for_each_safe(l, n, head) { list_for_each_safe() 157 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid) capilib_data_b3_req() argument 162 list_for_each(l, head) { list_for_each() 180 void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid) capilib_data_b3_conf() argument 185 list_for_each(l, head) { list_for_each()
|
/linux-4.1.27/arch/mips/lasat/image/ |
H A D | Makefile | 24 $(obj)/head.o: $(obj)/head.S $(KERNEL_IMAGE) 27 OBJECTS = head.o kImage.o
|
H A D | head.S | 1 #include <asm/lasat/head.h>
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | mca_drv.h | 104 #define slidx_foreach_entry(pos, head) \ 105 list_for_each_entry(pos, head, list) 106 #define slidx_first_entry(head) \ 107 (((head)->next != (head)) ? list_entry((head)->next, typeof(slidx_list_t), list) : NULL)
|
/linux-4.1.27/drivers/scsi/arm/ |
H A D | queue.c | 64 INIT_LIST_HEAD(&queue->head); queue_initialise() 69 * host-available list head, and we wouldn't queue_initialise() 92 if (!list_empty(&queue->head)) queue_free() 99 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) 100 * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. 103 * head - add command to head of queue 106 int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) __queue_add() argument 126 if (head) __queue_add() 127 list_add(l, &queue->head); __queue_add() 129 list_add_tail(l, &queue->head); __queue_add() 168 list_for_each(l, &queue->head) { queue_remove_exclude() 193 if (!list_empty(&queue->head)) queue_remove() 194 SCpnt = __queue_remove(queue, queue->head.next); queue_remove() 217 list_for_each(l, &queue->head) { queue_remove_tgtluntag() 243 list_for_each(l, &queue->head) { queue_remove_all_target() 267 list_for_each(l, &queue->head) { queue_probetgtlun() 293 list_for_each(l, &queue->head) { queue_remove_cmd()
|
H A D | queue.h | 14 struct list_head head; member in struct:__anon8658 57 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) 61 * head - add command to head of queue 64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
|
/linux-4.1.27/arch/sh/boot/romimage/ |
H A D | Makefile | 7 targets := vmlinux head.o zeropage.bin piggy.o 18 $(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
|
/linux-4.1.27/fs/hfsplus/ |
H A D | btree.c | 135 struct hfs_btree_header_rec *head; hfs_btree_open() local 165 head = (struct hfs_btree_header_rec *)(kmap(page) + hfs_btree_open() 167 tree->root = be32_to_cpu(head->root); hfs_btree_open() 168 tree->leaf_count = be32_to_cpu(head->leaf_count); hfs_btree_open() 169 tree->leaf_head = be32_to_cpu(head->leaf_head); hfs_btree_open() 170 tree->leaf_tail = be32_to_cpu(head->leaf_tail); hfs_btree_open() 171 tree->node_count = be32_to_cpu(head->node_count); hfs_btree_open() 172 tree->free_nodes = be32_to_cpu(head->free_nodes); hfs_btree_open() 173 tree->attributes = be32_to_cpu(head->attributes); hfs_btree_open() 174 tree->node_size = be16_to_cpu(head->node_size); hfs_btree_open() 175 tree->max_key_len = be16_to_cpu(head->max_key_len); hfs_btree_open() 176 tree->depth = be16_to_cpu(head->depth); hfs_btree_open() 205 (head->key_type == HFSPLUS_KEY_BINARY)) hfs_btree_open() 283 struct hfs_btree_header_rec *head; hfs_btree_write() local 293 head = (struct hfs_btree_header_rec *)(kmap(page) + hfs_btree_write() 296 head->root = cpu_to_be32(tree->root); hfs_btree_write() 297 head->leaf_count = cpu_to_be32(tree->leaf_count); hfs_btree_write() 298 head->leaf_head = cpu_to_be32(tree->leaf_head); hfs_btree_write() 299 head->leaf_tail = cpu_to_be32(tree->leaf_tail); hfs_btree_write() 300 head->node_count = cpu_to_be32(tree->node_count); hfs_btree_write() 301 head->free_nodes = cpu_to_be32(tree->free_nodes); hfs_btree_write() 302 head->attributes = cpu_to_be32(tree->attributes); hfs_btree_write() 303 head->depth = cpu_to_be16(tree->depth); hfs_btree_write()
|
/linux-4.1.27/net/ipv4/netfilter/ |
H A D | nf_conntrack_l3proto_ipv4_compat.c | 50 struct hlist_nulls_node *head) ct_get_next() 55 head = rcu_dereference(hlist_nulls_next_rcu(head)); ct_get_next() 56 while (is_a_nulls(head)) { ct_get_next() 57 if (likely(get_nulls_value(head) == st->bucket)) { ct_get_next() 61 head = rcu_dereference( ct_get_next() 64 return head; ct_get_next() 69 struct hlist_nulls_node *head = ct_get_first(seq); ct_get_idx() local 71 if (head) ct_get_idx() 72 while (pos && (head = ct_get_next(seq, head))) ct_get_idx() 74 return pos ? NULL : head; ct_get_idx() 237 struct hlist_node *head) ct_expect_get_next() 242 head = rcu_dereference(hlist_next_rcu(head)); ct_expect_get_next() 243 while (head == NULL) { ct_expect_get_next() 246 head = rcu_dereference( ct_expect_get_next() 249 return head; ct_expect_get_next() 254 struct hlist_node *head = ct_expect_get_first(seq); ct_expect_get_idx() local 256 if (head) ct_expect_get_idx() 257 while (pos && (head = ct_expect_get_next(seq, head))) ct_expect_get_idx() 259 return pos ? NULL : head; ct_expect_get_idx() 49 ct_get_next(struct seq_file *seq, struct hlist_nulls_node *head) ct_get_next() argument 236 ct_expect_get_next(struct seq_file *seq, struct hlist_node *head) ct_expect_get_next() argument
|
/linux-4.1.27/net/ieee802154/6lowpan/ |
H A D | reassembly.c | 240 struct sk_buff *fp, *head = fq->q.fragments; lowpan_frag_reasm() local 245 /* Make the one we just received the head. */ lowpan_frag_reasm() 247 head = prev->next; lowpan_frag_reasm() 248 fp = skb_clone(head, GFP_ATOMIC); lowpan_frag_reasm() 253 fp->next = head->next; lowpan_frag_reasm() 258 skb_morph(head, fq->q.fragments); lowpan_frag_reasm() 259 head->next = fq->q.fragments->next; lowpan_frag_reasm() 262 fq->q.fragments = head; lowpan_frag_reasm() 266 if (skb_unclone(head, GFP_ATOMIC)) lowpan_frag_reasm() 273 if (skb_has_frag_list(head)) { lowpan_frag_reasm() 280 clone->next = head->next; lowpan_frag_reasm() 281 head->next = clone; lowpan_frag_reasm() 282 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; lowpan_frag_reasm() 283 skb_frag_list_init(head); lowpan_frag_reasm() 284 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) lowpan_frag_reasm() 285 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); lowpan_frag_reasm() 286 clone->len = head->data_len - plen; lowpan_frag_reasm() 288 head->data_len -= clone->len; lowpan_frag_reasm() 289 head->len -= clone->len; lowpan_frag_reasm() 293 WARN_ON(head == NULL); lowpan_frag_reasm() 295 sum_truesize = head->truesize; lowpan_frag_reasm() 296 for (fp = head->next; fp;) { lowpan_frag_reasm() 302 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { lowpan_frag_reasm() 305 if (!skb_shinfo(head)->frag_list) lowpan_frag_reasm() 306 skb_shinfo(head)->frag_list = fp; lowpan_frag_reasm() 307 head->data_len += fp->len; lowpan_frag_reasm() 308 head->len += fp->len; lowpan_frag_reasm() 309 head->truesize += fp->truesize; lowpan_frag_reasm() 315 head->next = NULL; lowpan_frag_reasm() 316 head->dev = dev; lowpan_frag_reasm() 317 head->tstamp = fq->q.stamp; lowpan_frag_reasm()
|
/linux-4.1.27/drivers/net/wireless/ath/wil6210/ |
H A D | fw.h | 35 __le32 size; /* whole record, bytes after head */ 39 * data_size inferred from the @head.size. For this case, 40 * data_size = @head.size - offsetof(struct wil_fw_record_data, data) 55 * for informational purpose, data_size is @head.size from record header 62 * data_size = @head.size - offsetof(struct wil_fw_record_action, data) 78 * data_size is @head.size where @head is record header 115 * data_size inferred from the @head.size. For this case, 116 * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data, data) 139 * data_size inferred from the @head.size. For this case, 140 * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data4, data)
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_hashtab.c | 69 hlist_for_each_entry(entry, h_list, head) drm_ht_verbose_list() 82 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry() 84 return &entry->head; hlist_for_each_entry() 100 hlist_for_each_entry_rcu(entry, h_list, head) { hlist_for_each_entry_rcu() 102 return &entry->head; hlist_for_each_entry_rcu() 120 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry() 125 parent = &entry->head; hlist_for_each_entry() 128 hlist_add_behind_rcu(&item->head, parent); 130 hlist_add_head_rcu(&item->head, h_list); 174 *item = hlist_entry(list, struct drm_hash_item, head); drm_ht_find_item() 193 hlist_del_init_rcu(&item->head); drm_ht_remove_item()
|
H A D | drm_agpsupport.c | 222 list_add(&entry->head, &dev->agp->memory); drm_agp_alloc() 254 list_for_each_entry(entry, &dev->agp->memory, head) { drm_agp_lookup_entry() 369 list_del(&entry->head); drm_agp_free() 401 struct drm_agp_head *head = NULL; drm_agp_init() local 403 if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) drm_agp_init() 405 head->bridge = agp_find_bridge(dev->pdev); drm_agp_init() 406 if (!head->bridge) { drm_agp_init() 407 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { drm_agp_init() 408 kfree(head); drm_agp_init() 411 agp_copy_info(head->bridge, &head->agp_info); drm_agp_init() 412 agp_backend_release(head->bridge); drm_agp_init() 414 agp_copy_info(head->bridge, &head->agp_info); drm_agp_init() 416 if (head->agp_info.chipset == NOT_SUPPORTED) { drm_agp_init() 417 kfree(head); drm_agp_init() 420 INIT_LIST_HEAD(&head->memory); drm_agp_init() 421 head->cant_use_aperture = head->agp_info.cant_use_aperture; drm_agp_init() 422 head->page_mask = head->agp_info.page_mask; drm_agp_init() 423 head->base = head->agp_info.aper_base; drm_agp_init() 424 return head; drm_agp_init() 431 * Iterate over all AGP resources and remove them. But keep the AGP head 448 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { drm_agp_clear()
|
/linux-4.1.27/arch/arm/mach-netx/ |
H A D | xc.c | 120 struct fw_header *head; xc_request_firmware() local 135 head = (struct fw_header *)fw->data; xc_request_firmware() 136 if (head->magic != 0x4e657458) { xc_request_firmware() 137 if (head->magic == 0x5874654e) { xc_request_firmware() 144 head->magic); xc_request_firmware() 149 x->type = head->type; xc_request_firmware() 150 x->version = head->version; xc_request_firmware() 155 src = fw->data + head->fw_desc[i].ofs; xc_request_firmware() 158 size = head->fw_desc[i].size - sizeof (unsigned int); xc_request_firmware() 165 src = fw->data + head->fw_desc[i].patch_ofs; xc_request_firmware() 166 size = head->fw_desc[i].patch_entries; xc_request_firmware()
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | nrs_fifo.c | 39 * enabled on a given NRS head. 81 struct nrs_fifo_head *head; nrs_fifo_start() local 83 OBD_CPT_ALLOC_PTR(head, nrs_pol2cptab(policy), nrs_pol2cptid(policy)); nrs_fifo_start() 84 if (head == NULL) nrs_fifo_start() 87 INIT_LIST_HEAD(&head->fh_list); nrs_fifo_start() 88 policy->pol_private = head; nrs_fifo_start() 103 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_stop() local 105 LASSERT(head != NULL); nrs_fifo_stop() 106 LASSERT(list_empty(&head->fh_list)); nrs_fifo_stop() 108 OBD_FREE_PTR(head); nrs_fifo_stop() 163 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_req_get() local 166 nrq = unlikely(list_empty(&head->fh_list)) ? NULL : nrs_fifo_req_get() 167 list_entry(head->fh_list.next, struct ptlrpc_nrs_request, nrs_fifo_req_get() 197 struct nrs_fifo_head *head; nrs_fifo_req_add() local 199 head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head, nrs_fifo_req_add() 204 nrq->nr_u.fifo.fr_sequence = head->fh_sequence++; nrs_fifo_req_add() 205 list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list); nrs_fifo_req_add()
|
/linux-4.1.27/drivers/s390/cio/ |
H A D | eadm_sch.h | 16 struct list_head head; member in struct:eadm_private
|
/linux-4.1.27/arch/s390/boot/compressed/ |
H A D | Makefile | 9 targets += misc.o piggy.o sizes.h head.o 19 OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) 20 OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o 36 $(obj)/head.o: $(obj)/sizes.h
|
H A D | vmlinux.lds.S | 14 .head.text : {
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | ps3gpu.h | 46 static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, lv1_gpu_display_sync() argument 51 head, ddr_offset, 0, 0); lv1_gpu_display_sync() 54 static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, lv1_gpu_display_flip() argument 59 head, ddr_offset, 0, 0); lv1_gpu_display_flip()
|
/linux-4.1.27/arch/avr32/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/cris/include/arch-v32/arch/ |
H A D | pgtable.h | 6 /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | Makefile | 1 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/drivers/staging/unisys/visorutil/ |
H A D | charqueue.c | 26 #define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail) 32 int head, tail; member in struct:charqueue 46 cq->head = 0; visor_charqueue_create() 58 charqueue->head = (charqueue->head+1) % alloc_slots; visor_charqueue_enqueue() 59 if (charqueue->head == charqueue->tail) visor_charqueue_enqueue() 62 charqueue->buf[charqueue->head] = c; visor_charqueue_enqueue()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_bts.c | 57 local_t head; member in struct:bts_buffer 146 index = local_read(&buf->head); bts_config_buffer() 170 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) bts_buffer_pad_out() argument 172 unsigned long index = head - phys->offset; bts_buffer_pad_out() 194 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; bts_update() local 199 head = index + bts_buffer_offset(buf, buf->cur_buf); bts_update() 200 old = local_xchg(&buf->head, head); bts_update() 203 if (old == head) bts_update() 210 * old and head are always in the same physical buffer, so we bts_update() 213 local_add(head - old, &buf->data_size); bts_update() 215 local_set(&buf->data_size, head); bts_update() 304 unsigned long head, space, next_space, pad, gap, skip, wakeup; bts_buffer_reset() local 312 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); bts_buffer_reset() 313 if (WARN_ON_ONCE(head != local_read(&buf->head))) bts_buffer_reset() 317 space = phys->offset + phys->displacement + phys->size - head; bts_buffer_reset() 340 bts_buffer_pad_out(phys, head); bts_buffer_reset() 347 head = phys->offset + phys->displacement; bts_buffer_reset() 349 * After this, cur_buf and head won't match ds bts_buffer_reset() 354 local_set(&buf->head, head); bts_buffer_reset() 361 handle->head; bts_buffer_reset() 367 buf->end = head + space; bts_buffer_reset() 399 old_head = local_read(&buf->head); intel_bts_interrupt() 403 if (old_head == local_read(&buf->head)) intel_bts_interrupt() 430 bts->handle.head = bts_event_del()
|
/linux-4.1.27/drivers/tty/ |
H A D | tty_buffer.c | 69 restart = buf->head->commit != buf->head->read; tty_buffer_unlock_exclusive() 121 while ((p = buf->head) != NULL) { tty_buffer_free_all() 122 buf->head = p->next; tty_buffer_free_all() 131 buf->head = &buf->sentinel; tty_buffer_free_all() 222 while ((next = buf->head->next) != NULL) { tty_buffer_flush() 223 tty_buffer_free(port, buf->head); tty_buffer_flush() 224 buf->head = next; tty_buffer_flush() 226 buf->head->read = buf->head->commit; tty_buffer_flush() 269 * latest commit value can be read before the head is __tty_buffer_request_room() 406 receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) receive_buf() argument 409 unsigned char *p = char_buf_ptr(head, head->read); receive_buf() 412 if (~head->flags & TTYB_NORMAL) receive_buf() 413 f = flag_buf_ptr(head, head->read); receive_buf() 422 head->read += count; receive_buf() 457 struct tty_buffer *head = buf->head; flush_to_ldisc() local 465 next = head->next; flush_to_ldisc() 467 * ensures commit value read is not stale if the head flush_to_ldisc() 471 count = head->commit - head->read; flush_to_ldisc() 475 buf->head = next; flush_to_ldisc() 476 tty_buffer_free(port, head); flush_to_ldisc() 480 count = receive_buf(tty, head, count); flush_to_ldisc() 521 buf->head = &buf->sentinel; tty_buffer_init()
|
/linux-4.1.27/net/batman-adv/ |
H A D | hash.h | 60 struct hlist_head *head; batadv_hash_delete() local 66 head = &hash->table[i]; batadv_hash_delete() 70 hlist_for_each_safe(node, node_tmp, head) { hlist_for_each_safe() 123 struct hlist_head *head; batadv_hash_add() local 131 head = &hash->table[index]; batadv_hash_add() 136 hlist_for_each(node, head) { hlist_for_each() 145 hlist_add_head_rcu(data_node, head); 167 struct hlist_head *head; batadv_hash_remove() local 171 head = &hash->table[index]; batadv_hash_remove() 174 hlist_for_each(node, head) { hlist_for_each()
|
/linux-4.1.27/kernel/bpf/ |
H A D | hashtab.c | 108 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, lookup_elem_raw() argument 113 hlist_for_each_entry_rcu(l, head, hash_node) lookup_elem_raw() 124 struct hlist_head *head; htab_map_lookup_elem() local 135 head = select_bucket(htab, hash); htab_map_lookup_elem() 137 l = lookup_elem_raw(head, hash, key, key_size); htab_map_lookup_elem() 149 struct hlist_head *head; htab_map_get_next_key() local 160 head = select_bucket(htab, hash); htab_map_get_next_key() 163 l = lookup_elem_raw(head, hash, key, key_size); htab_map_get_next_key() 187 head = select_bucket(htab, i); htab_map_get_next_key() 190 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), htab_map_get_next_key() 209 struct hlist_head *head; htab_map_update_elem() local 235 head = select_bucket(htab, l_new->hash); htab_map_update_elem() 237 l_old = lookup_elem_raw(head, l_new->hash, key, key_size); htab_map_update_elem() 259 /* add new element to the head of the list, so that concurrent htab_map_update_elem() 262 hlist_add_head_rcu(&l_new->hash_node, head); htab_map_update_elem() 282 struct hlist_head *head; htab_map_delete_elem() local 296 head = select_bucket(htab, hash); htab_map_delete_elem() 298 l = lookup_elem_raw(head, hash, key, key_size); htab_map_delete_elem() 316 struct hlist_head *head = select_bucket(htab, i); delete_all_elements() local 320 hlist_for_each_entry_safe(l, n, head, hash_node) { hlist_for_each_entry_safe()
|
/linux-4.1.27/Documentation/mic/mpssd/ |
H A D | micctrl | 42 if [ "`echo $1 | head -c3`" == "mic" ]; then 63 if [ "`echo $1 | head -c3`" == "mic" ]; then 87 if [ "`echo $1 | head -c3`" == "mic" ]; then 108 if [ "`echo $1 | head -c3`" == "mic" ]; then 133 if [ "`echo $1 | head -c3`" == "mic" ]; then
|
/linux-4.1.27/drivers/firmware/efi/ |
H A D | efi-pstore.c | 108 * @head: list head 112 struct list_head *head) efi_pstore_scan_sysfs_enter() 115 if (&next->list != head) efi_pstore_scan_sysfs_enter() 140 * @head: list head 145 struct list_head *head, bool stop) efi_pstore_scan_sysfs_exit() 149 __efi_pstore_scan_sysfs_exit(next, &next->list != head); efi_pstore_scan_sysfs_exit() 169 struct list_head *head = &efivar_sysfs_list; efi_pstore_sysfs_entry_iter() local 173 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe() 174 efi_pstore_scan_sysfs_enter(entry, n, head); list_for_each_entry_safe() 177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); list_for_each_entry_safe() 185 list_for_each_entry_safe_from((*pos), n, head, list) { list_for_each_entry_safe_from() 186 efi_pstore_scan_sysfs_enter((*pos), n, head); list_for_each_entry_safe_from() 189 efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); list_for_each_entry_safe_from() 110 efi_pstore_scan_sysfs_enter(struct efivar_entry *pos, struct efivar_entry *next, struct list_head *head) efi_pstore_scan_sysfs_enter() argument 143 efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, struct efivar_entry *next, struct list_head *head, bool stop) efi_pstore_scan_sysfs_exit() argument
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_mbx.c | 33 fifo->head = 0; fm10k_fifo_init() 45 return fifo->tail - fifo->head; fm10k_fifo_used() 56 return fifo->size + fifo->head - fifo->tail; fm10k_fifo_unused() 67 return fifo->head == fifo->tail; fm10k_fifo_empty() 71 * fm10k_fifo_head_offset - returns indices of head with given offset 73 * @offset: offset to add to head 75 * This function returns the indices into the fifo based on head + offset 79 return (fifo->head + offset) & (fifo->size - 1); fm10k_fifo_head_offset() 102 u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); fm10k_fifo_head_len() local 104 /* verify there is at least 1 DWORD in the fifo so *head is valid */ fm10k_fifo_head_len() 109 return FM10K_TLV_DWORD_LEN(*head); fm10k_fifo_head_len() 122 /* update head so it is at the start of next frame */ fm10k_fifo_head_drop() 123 fifo->head += len; fm10k_fifo_head_drop() 132 * This function resets the head pointer to drop all messages in the FIFO, 137 fifo->head = fifo->tail; fm10k_fifo_drop_all() 141 * fm10k_mbx_index_len - Convert a head/tail index into a length value 143 * @head: head index 144 * @tail: head index 146 * This function takes the head and tail index and determines the length 149 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) fm10k_mbx_index_len() argument 151 u16 len = tail - head; fm10k_mbx_index_len() 163 * @offset: length to add to head offset 179 * @offset: length to add to head offset 193 * fm10k_mbx_head_add - Determine new head value with added offset 195 * @offset: length to add to head offset 197 * This function takes the local head index and recomputes it for 202 u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_head_add() local 205 return (head > mbx->head) ? --head : ++head; fm10k_mbx_head_add() 209 * fm10k_mbx_head_sub - Determine new head value with subtracted offset 211 * @offset: length to add to head offset 213 * This function takes the local head index and recomputes it for 218 u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_head_sub() local 221 return (head < mbx->head) ? ++head : --head; fm10k_mbx_head_sub() 350 u32 *head = fifo->buffer; fm10k_mbx_write_copy() local 365 head += end; fm10k_mbx_write_copy() 371 for (end = fifo->size - end; len; head = fifo->buffer) { fm10k_mbx_write_copy() 379 fm10k_write_reg(hw, mbmem + tail++, *(head++)); fm10k_mbx_write_copy() 385 * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO 388 * @head: acknowledgement number last received 391 * head index. It will then pull up to mbmem_len DWORDs off of the 392 * head of the FIFO and will place it in the MBMEM registers 396 struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_pull_head() 398 u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_pull_head() 434 * head and len determines the length to copy. 442 u16 end, len, head; fm10k_mbx_read_copy() local 444 /* determine data length and mbmem head index */ fm10k_mbx_read_copy() 446 head = fm10k_mbx_head_sub(mbx, len); fm10k_mbx_read_copy() 447 if (head >= mbx->mbmem_len) fm10k_mbx_read_copy() 448 head++; fm10k_mbx_read_copy() 457 /* adjust head to match offset for FIFO */ fm10k_mbx_read_copy() 458 head &= mbx->mbmem_len - 1; fm10k_mbx_read_copy() 459 if (!head) fm10k_mbx_read_copy() 460 head++; fm10k_mbx_read_copy() 463 *(tail++) = fm10k_read_reg(hw, mbmem + head++); fm10k_mbx_read_copy() 487 u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); fm10k_mbx_push_tail() 494 /* update head and record bytes received */ fm10k_mbx_push_tail() 495 mbx->head = fm10k_mbx_head_add(mbx, len); fm10k_mbx_push_tail() 621 * @head: head index provided by remote mailbox 624 * last head update to the current one. It uses the result of the 628 static void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_update_local_crc() argument 630 u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_update_local_crc() 633 head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); fm10k_mbx_update_local_crc() 636 mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); fm10k_mbx_update_local_crc() 706 * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO 722 err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, fm10k_mbx_dequeue_rx() 733 /* shift head and tail based on the memory we moved */ fm10k_mbx_dequeue_rx() 734 fifo->tail -= fifo->head; fm10k_mbx_dequeue_rx() 735 fifo->head = 0; fm10k_mbx_dequeue_rx() 852 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | fm10k_mbx_create_connect_hdr() 866 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_data_hdr() 892 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_disconnect_hdr() 929 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_error_msg() 943 u16 type, rsvd0, head, tail, size; fm10k_mbx_validate_msg_hdr() local 949 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_validate_msg_hdr() 958 if (tail != mbx->head) fm10k_mbx_validate_msg_hdr() 963 /* validate that head is moving correctly */ fm10k_mbx_validate_msg_hdr() 964 if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) fm10k_mbx_validate_msg_hdr() 966 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_mbx_validate_msg_hdr() 972 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_mbx_validate_msg_hdr() 983 if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) fm10k_mbx_validate_msg_hdr() 998 * fm10k_mbx_create_reply - Generate reply based on state and remote head 1000 * @head: acknowledgement number 1003 * mailbox state and the remote fifo head. It will return the length 1008 struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_create_reply() 1014 fm10k_mbx_update_local_crc(mbx, head); fm10k_mbx_create_reply() 1017 fm10k_mbx_pull_head(hw, mbx, head); fm10k_mbx_create_reply() 1058 mbx->rx.head = 0; fm10k_mbx_reset_work() 1067 * at the head of the Tx FIFO if they are larger than max_size. It does not 1124 u16 size, head; fm10k_mbx_process_connect() local 1128 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_connect() 1151 /* align our tail index to remote head index */ fm10k_mbx_process_connect() 1152 mbx->tail = head; fm10k_mbx_process_connect() 1154 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_connect() 1169 u16 head, tail; fm10k_mbx_process_data() local 1173 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_data() 1178 mbx->tail = head; fm10k_mbx_process_data() 1195 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_data() 1211 u16 head; fm10k_mbx_process_disconnect() local 1215 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_disconnect() 1221 /* we have already verified mbx->head == tail so we know this is 0 */ fm10k_mbx_process_disconnect() 1236 /* verify the head indicates we completed all transmits */ fm10k_mbx_process_disconnect() 1237 if (head != mbx->tail) fm10k_mbx_process_disconnect() 1247 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_disconnect() 1263 u16 head; fm10k_mbx_process_error() local 1266 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_error() 1283 mbx->tail = head; fm10k_mbx_process_error() 1525 * evenly splitting it. In order to allow for easy masking of head/tail 1567 /* initialize tail and head */ fm10k_pfvf_mbx_init() 1569 mbx->head = 1; fm10k_pfvf_mbx_init() 1610 FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); fm10k_sm_mbx_create_data_hdr() 1627 FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | fm10k_sm_mbx_create_connect_hdr() 1646 /* initialize tail and head */ fm10k_sm_mbx_connect_reset() 1648 mbx->head = 1; fm10k_sm_mbx_connect_reset() 1749 u16 tail, head, ver; fm10k_sm_mbx_validate_fifo_hdr() local 1753 head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); fm10k_sm_mbx_validate_fifo_hdr() 1759 if (!head || head > FM10K_SM_MBX_FIFO_LEN) fm10k_sm_mbx_validate_fifo_hdr() 1763 if (mbx->tail < head) fm10k_sm_mbx_validate_fifo_hdr() 1764 head += mbx->mbmem_len - 1; fm10k_sm_mbx_validate_fifo_hdr() 1765 if (tail < mbx->head) fm10k_sm_mbx_validate_fifo_hdr() 1767 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_sm_mbx_validate_fifo_hdr() 1769 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_sm_mbx_validate_fifo_hdr() 1859 /* push tail in front of head */ fm10k_sm_mbx_receive() 1860 if (tail < mbx->head) fm10k_sm_mbx_receive() 1871 /* guarantee head aligns with the end of the last message */ fm10k_sm_mbx_receive() 1872 mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); fm10k_sm_mbx_receive() 1876 if (mbx->head > mbmem_len) fm10k_sm_mbx_receive() 1877 mbx->head -= mbmem_len; fm10k_sm_mbx_receive() 1891 struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_transmit() 1899 /* push head behind tail */ fm10k_sm_mbx_transmit() 1900 if (mbx->tail < head) fm10k_sm_mbx_transmit() 1901 head += mbmem_len; fm10k_sm_mbx_transmit() 1903 fm10k_mbx_pull_head(hw, mbx, head); fm10k_sm_mbx_transmit() 1924 * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head 1926 * @head: acknowledgement number 1929 * mailbox state and the remote fifo head. It will return the length 1934 struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_create_reply() 1940 fm10k_sm_mbx_transmit(hw, mbx, head); fm10k_sm_mbx_create_reply() 2008 u16 head, tail; fm10k_sm_mbx_process_version_1() local 2013 head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); fm10k_sm_mbx_process_version_1() 2035 fm10k_sm_mbx_create_reply(hw, mbx, head); fm10k_sm_mbx_process_version_1() 395 fm10k_mbx_pull_head(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_pull_head() argument 1007 fm10k_mbx_create_reply(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_create_reply() argument 1890 fm10k_sm_mbx_transmit(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_transmit() argument 1933 fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_create_reply() argument
|
/linux-4.1.27/fs/ |
H A D | seq_file.c | 758 struct list_head *seq_list_start(struct list_head *head, loff_t pos) seq_list_start() argument 762 list_for_each(lh, head) seq_list_start() 770 struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) seq_list_start_head() argument 773 return head; seq_list_start_head() 775 return seq_list_start(head, pos - 1); seq_list_start_head() 779 struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) seq_list_next() argument 785 return lh == head ? NULL : lh; seq_list_next() 791 * @head: the head of the hlist 796 struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) seq_hlist_start() argument 800 hlist_for_each(node, head) seq_hlist_start() 809 * @head: the head of the hlist 815 struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) seq_hlist_start_head() argument 820 return seq_hlist_start(head, pos - 1); seq_hlist_start_head() 827 * @head: the head of the hlist 832 struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, seq_hlist_next() argument 839 return head->first; seq_hlist_next() 847 * @head: the head of the hlist 856 struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, seq_hlist_start_rcu() argument 861 __hlist_for_each_rcu(node, head) seq_hlist_start_rcu() 870 * @head: the head of the hlist 880 struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, seq_hlist_start_head_rcu() argument 886 return seq_hlist_start_rcu(head, pos - 1); seq_hlist_start_head_rcu() 893 * @head: the head of the hlist 903 struct hlist_head *head, seq_hlist_next_rcu() 910 return rcu_dereference(head->first); seq_hlist_next_rcu() 918 * @head: pointer to percpu array of struct hlist_heads 925 seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos) seq_hlist_start_percpu() argument 930 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { hlist_for_each() 942 * @head: pointer to percpu array of struct hlist_heads 949 seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, seq_hlist_next_percpu() argument 961 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); seq_hlist_next_percpu() 902 seq_hlist_next_rcu(void *v, struct hlist_head *head, loff_t *ppos) seq_hlist_next_rcu() argument
|
/linux-4.1.27/sound/core/seq/ |
H A D | seq_fifo.c | 55 f->head = NULL; snd_seq_fifo_new() 136 if (f->head == NULL) snd_seq_fifo_event_in() 137 f->head = cell; snd_seq_fifo_event_in() 156 if ((cell = f->head) != NULL) { fifo_cell_out() 157 f->head = cell->next; fifo_cell_out() 215 cell->next = f->head; snd_seq_fifo_cell_putback() 216 f->head = cell; snd_seq_fifo_cell_putback() 253 oldhead = f->head; snd_seq_fifo_resize() 256 f->head = NULL; snd_seq_fifo_resize()
|
H A D | seq_prioq.h | 30 struct snd_seq_event_cell *head; /* pointer to head of prioq */ member in struct:snd_seq_prioq 52 /* peek at cell at the head of the prioq */
|
H A D | seq_prioq.c | 66 f->head = NULL; snd_seq_prioq_new() 180 cur = f->head; /* cursor */ snd_seq_prioq_cell_in() 208 if (f->head == cur) /* this is the first cell, set head to it */ snd_seq_prioq_cell_in() 209 f->head = cell; snd_seq_prioq_cell_in() 229 cell = f->head; snd_seq_prioq_cell_out() 231 f->head = cell->next; snd_seq_prioq_cell_out() 256 /* peek at cell at the head of the prioq */ snd_seq_prioq_cell_peek() 263 return f->head; snd_seq_prioq_cell_peek() 299 cell = f->head; snd_seq_prioq_leave() 304 if (cell == f->head) { snd_seq_prioq_leave() 305 f->head = cell->next; snd_seq_prioq_leave() 409 cell = f->head; snd_seq_prioq_remove_events() 417 if (cell == f->head) { snd_seq_prioq_remove_events() 418 f->head = cell->next; snd_seq_prioq_remove_events()
|
/linux-4.1.27/drivers/input/joystick/iforce/ |
H A D | iforce-packets.c | 55 int head, tail; iforce_send_packet() local 59 * Update head and tail of xmit buffer iforce_send_packet() 63 head = iforce->xmit.head; iforce_send_packet() 67 if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { iforce_send_packet() 74 empty = head == tail; iforce_send_packet() 75 XMIT_INC(iforce->xmit.head, n+2); iforce_send_packet() 80 iforce->xmit.buf[head] = HI(cmd); iforce_send_packet() 81 XMIT_INC(head, 1); iforce_send_packet() 82 iforce->xmit.buf[head] = LO(cmd); iforce_send_packet() 83 XMIT_INC(head, 1); iforce_send_packet() 85 c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); iforce_send_packet() 88 memcpy(&iforce->xmit.buf[head], iforce_send_packet() 96 XMIT_INC(head, n); iforce_send_packet()
|
/linux-4.1.27/fs/9p/ |
H A D | vfs_dir.c | 46 * @head: start offset of current dirread buffer 55 int head; member in struct:p9_rdir 133 if (rdir->tail == rdir->head) { v9fs_dir_readdir() 144 rdir->head = 0; v9fs_dir_readdir() 147 while (rdir->head < rdir->tail) { v9fs_dir_readdir() 149 err = p9stat_read(fid->clnt, rdir->buf + rdir->head, v9fs_dir_readdir() 150 rdir->tail - rdir->head, &st); v9fs_dir_readdir() 164 rdir->head += reclen; v9fs_dir_readdir() 194 if (rdir->tail == rdir->head) { v9fs_dir_readdir_dotl() 200 rdir->head = 0; v9fs_dir_readdir_dotl() 204 while (rdir->head < rdir->tail) { v9fs_dir_readdir_dotl() 206 err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, v9fs_dir_readdir_dotl() 207 rdir->tail - rdir->head, v9fs_dir_readdir_dotl() 221 rdir->head += err; v9fs_dir_readdir_dotl()
|
/linux-4.1.27/fs/nfs/ |
H A D | write.c | 101 * nfs_page_find_head_request_locked - find head request associated with @page 105 * returns matching head request with reference held, or NULL if not found. 127 * nfs_page_find_head_request - find head request associated with @page 129 * returns matching head request with reference held, or NULL if not found. 171 * @head - head request of page group 174 * Search page group with head @head to find a request that contains the 183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) nfs_page_group_search_locked() argument 187 WARN_ON_ONCE(head != head->wb_head); nfs_page_group_search_locked() 188 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); nfs_page_group_search_locked() 190 req = head; nfs_page_group_search_locked() 197 } while (req != head); nfs_page_group_search_locked() 204 * @head - head request of page group 206 * Return true if the page group with head @head covers the whole page, 314 * @head - head request of page group, must be holding head lock 324 nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, 332 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) 340 nfs_page_group_unlock(head); variable 344 nfs_release_request(head); variable 359 * @old_head - the old head of the list 386 /* release ref on old head request */ nfs_destroy_unlinked_subrequests() 409 * nfs_lock_and_join_requests - join all subreqs to the head req and return 416 * This function joins all sub requests to the head request by first 418 * and finally updating the head request to cover the whole range covered by 422 * Returns a locked, referenced pointer to the head request - which after 431 struct nfs_page *head, *subreq; nfs_lock_and_join_requests() local 444 * A reference is taken only on the head request which acts as a nfs_lock_and_join_requests() 446 * until the head reference is released. nfs_lock_and_join_requests() 448 head = nfs_page_find_head_request_locked(NFS_I(inode), page); nfs_lock_and_join_requests() 450 if (!head) { nfs_lock_and_join_requests() 457 ret = nfs_page_group_lock(head, true); nfs_lock_and_join_requests() 462 nfs_page_group_lock_wait(head); nfs_lock_and_join_requests() 463 nfs_release_request(head); nfs_lock_and_join_requests() 467 nfs_release_request(head); nfs_lock_and_join_requests() 472 subreq = head; nfs_lock_and_join_requests() 478 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { nfs_lock_and_join_requests() 481 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || nfs_lock_and_join_requests() 483 (head->wb_offset + total_bytes)))) { nfs_lock_and_join_requests() 484 nfs_page_group_unlock(head); nfs_lock_and_join_requests() 492 ret = nfs_unroll_locks_and_wait(inode, head, nfs_lock_and_join_requests() 502 } while (subreq != head); nfs_lock_and_join_requests() 506 subreq = head; nfs_lock_and_join_requests() 510 } while (subreq != head); nfs_lock_and_join_requests() 512 /* unlink subrequests from head, destroy them later */ nfs_lock_and_join_requests() 513 if (head->wb_this_page != head) { nfs_lock_and_join_requests() 514 /* destroy list will be terminated by head */ nfs_lock_and_join_requests() 515 destroy_list = head->wb_this_page; nfs_lock_and_join_requests() 516 head->wb_this_page = head; nfs_lock_and_join_requests() 518 /* change head request to cover whole range that nfs_lock_and_join_requests() 520 head->wb_bytes = total_bytes; nfs_lock_and_join_requests() 524 * prepare head request to be added to new pgio descriptor nfs_lock_and_join_requests() 526 nfs_page_group_clear_bits(head); nfs_lock_and_join_requests() 531 * grab a reference for the head request, iff it needs one. nfs_lock_and_join_requests() 533 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) nfs_lock_and_join_requests() 534 kref_get(&head->wb_kref); nfs_lock_and_join_requests() 536 nfs_page_group_unlock(head); nfs_lock_and_join_requests() 541 nfs_destroy_unlinked_subrequests(destroy_list, head); nfs_lock_and_join_requests() 543 /* still holds ref on head from nfs_page_find_head_request_locked nfs_lock_and_join_requests() 544 * and still has lock on head from lock loop */ nfs_lock_and_join_requests() 545 return head; nfs_lock_and_join_requests() 691 /* this a head request for a page group - mark it as having an nfs_inode_add_request() 707 struct nfs_page *head; nfs_inode_remove_request() local 710 head = req->wb_head; nfs_inode_remove_request() 713 if (likely(!PageSwapCache(head->wb_page))) { nfs_inode_remove_request() 714 set_page_private(head->wb_page, 0); nfs_inode_remove_request() 715 ClearPagePrivate(head->wb_page); nfs_inode_remove_request() 717 wake_up_page(head->wb_page, PG_private); nfs_inode_remove_request() 718 clear_bit(PG_MAPPED, &head->wb_flags); nfs_inode_remove_request() 741 * Search through commit lists on @inode for the head request for @page. 744 * Returns the head request if found, or NULL if not found. 773 * @dst: commit list head 1285 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1298 static void nfs_async_write_error(struct list_head *head) nfs_async_write_error() argument 1302 while (!list_empty(head)) { nfs_async_write_error() 1303 req = nfs_list_entry(head->next); nfs_async_write_error() 1580 static loff_t nfs_get_lwb(struct list_head *head) nfs_get_lwb() argument 1585 list_for_each_entry(req, head, wb_list) nfs_get_lwb() 1596 struct list_head *head, nfs_init_commit() 1600 struct nfs_page *first = nfs_list_entry(head->next); nfs_init_commit() 1606 list_splice_init(head, &data->pages); nfs_init_commit() 1651 nfs_commit_list(struct inode *inode, struct list_head *head, int how, nfs_commit_list() argument 1662 nfs_init_commit(data, head, NULL, cinfo); nfs_commit_list() 1667 nfs_retry_commit(head, NULL, cinfo, 0); nfs_commit_list() 1753 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, nfs_generic_commit_list() argument 1758 status = pnfs_commit_list(inode, head, how, cinfo); nfs_generic_commit_list() 1760 status = nfs_commit_list(inode, head, how, cinfo); nfs_generic_commit_list() 1766 LIST_HEAD(head); nfs_commit_inode() 1775 res = nfs_scan_commit(inode, &head, &cinfo); nfs_commit_inode() 1779 error = nfs_generic_commit_list(inode, &head, how, &cinfo); nfs_commit_inode() 1876 /* blocking call to cancel all requests and join to a single (head) nfs_wb_page_cancel() 1884 * nfs_lock_and_join_requests, so just remove the head nfs_wb_page_cancel() 1595 nfs_init_commit(struct nfs_commit_data *data, struct list_head *head, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) nfs_init_commit() argument
|
/linux-4.1.27/net/sunrpc/ |
H A D | xdr.c | 135 struct kvec *head = xdr->head; xdr_inline_pages() local 137 char *buf = (char *)head->iov_base; xdr_inline_pages() 138 unsigned int buflen = head->iov_len; xdr_inline_pages() 140 head->iov_len = offset; xdr_inline_pages() 309 * @len: bytes to remove from buf->head[0] 311 * Shrinks XDR buffer's header kvec buf->head[0] by 318 struct kvec *head, *tail; xdr_shrink_bufhead() local 323 head = buf->head; xdr_shrink_bufhead() 325 WARN_ON_ONCE(len > head->iov_len); xdr_shrink_bufhead() 326 if (len > head->iov_len) xdr_shrink_bufhead() 327 len = head->iov_len; xdr_shrink_bufhead() 350 /* Do we also need to copy data from the head into the tail ? */ xdr_shrink_bufhead() 356 (char *)head->iov_base + xdr_shrink_bufhead() 357 head->iov_len - offs, xdr_shrink_bufhead() 372 (char *)head->iov_base + head->iov_len - len, xdr_shrink_bufhead() 375 head->iov_len -= len; xdr_shrink_bufhead() 402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; xdr_shrink_pagelen() 462 struct kvec *iov = buf->head; xdr_init_encode() 591 * head, tail, and page lengths are adjusted to correspond. 595 * except in the case of the head buffer when we assume the head 606 struct kvec *head = buf->head; xdr_truncate_encode() local 643 xdr->end = head->iov_base + head->iov_len; xdr_truncate_encode() 647 head->iov_len = len; xdr_truncate_encode() 649 xdr->p = head->iov_base + head->iov_len; xdr_truncate_encode() 650 xdr->iov = buf->head; xdr_truncate_encode() 777 else if (xdr->iov == xdr->buf->head) { xdr_set_next_buffer() 796 if (buf->head[0].iov_len != 0) xdr_init_decode() 797 xdr_set_iov(xdr, buf->head, buf->len); xdr_init_decode() 912 iov = buf->head; xdr_align_pages() 937 * Moves data beyond the current pointer position from the XDR head[] buffer 979 * Moves data beyond the current pointer position from the XDR head[] buffer 1001 buf->head[0] = *iov; xdr_buf_from_iov() 1027 if (base < buf->head[0].iov_len) { xdr_buf_subsegment() 1028 subbuf->head[0].iov_base = buf->head[0].iov_base + base; xdr_buf_subsegment() 1029 subbuf->head[0].iov_len = min_t(unsigned int, len, xdr_buf_subsegment() 1030 buf->head[0].iov_len - base); xdr_buf_subsegment() 1031 len -= subbuf->head[0].iov_len; xdr_buf_subsegment() 1034 base -= buf->head[0].iov_len; xdr_buf_subsegment() 1035 subbuf->head[0].iov_len = 0; xdr_buf_subsegment() 1074 * too small, or if (for instance) it's all in the head and the parser has 1098 if (buf->head[0].iov_len) { xdr_buf_trim() 1099 cur = min_t(size_t, buf->head[0].iov_len, trim); xdr_buf_trim() 1100 buf->head[0].iov_len -= cur; xdr_buf_trim() 1112 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); __read_bytes_from_xdr_buf() 1113 memcpy(obj, subbuf->head[0].iov_base, this_len); __read_bytes_from_xdr_buf() 1143 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); __write_bytes_to_xdr_buf() 1144 memcpy(subbuf->head[0].iov_base, obj, this_len); __write_bytes_to_xdr_buf() 1194 * entirely in the head or the tail, set object to point to it; otherwise 1206 /* Is the obj contained entirely in the head? */ xdr_buf_read_netobj() 1207 obj->data = subbuf.head[0].iov_base; xdr_buf_read_netobj() 1208 if (subbuf.head[0].iov_len == obj->len) xdr_buf_read_netobj() 1225 obj->data = buf->head[0].iov_base + buf->head[0].iov_len; xdr_buf_read_netobj() 1258 /* process head */ xdr_xcode_array2() 1259 if (todo && base < buf->head->iov_len) { xdr_xcode_array2() 1260 c = buf->head->iov_base + base; xdr_xcode_array2() 1262 buf->head->iov_len - base); xdr_xcode_array2() 1288 base = buf->head->iov_len; /* align to start of pages */ xdr_xcode_array2() 1292 base -= buf->head->iov_len; xdr_xcode_array2() 1442 buf->head->iov_len + buf->page_len + buf->tail->iov_len) xdr_encode_array2() 1459 if (offset >= buf->head[0].iov_len) { xdr_process_buf() 1460 offset -= buf->head[0].iov_len; xdr_process_buf() 1462 thislen = buf->head[0].iov_len - offset; xdr_process_buf() 1465 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); xdr_process_buf()
|
H A D | svcauth.c | 44 flavor = svc_getnl(&rqstp->rq_arg.head[0]); svc_authenticate() 142 struct hlist_head *head; auth_domain_lookup() local 144 head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; auth_domain_lookup() 148 hlist_for_each_entry(hp, head, hash) { hlist_for_each_entry() 156 hlist_add_head(&new->hash, head);
|
/linux-4.1.27/kernel/power/ |
H A D | console.c | 22 struct list_head head; member in struct:pm_vt_switch 50 list_for_each_entry(tmp, &pm_vt_switch_list, head) { pm_vt_switch_required() 65 list_add(&entry->head, &pm_vt_switch_list); pm_vt_switch_required() 82 list_for_each_entry(tmp, &pm_vt_switch_list, head) { pm_vt_switch_unregister() 84 list_del(&tmp->head); pm_vt_switch_unregister() 118 list_for_each_entry(entry, &pm_vt_switch_list, head) { pm_vt_switch()
|
/linux-4.1.27/tools/testing/selftests/timers/ |
H A D | clocksource-switch.c | 56 char *head, *tmp; get_clocksources() local 67 head = buf; get_clocksources() 69 while (head - buf < size) { get_clocksources() 71 for (tmp = head; *tmp != ' '; tmp++) { get_clocksources() 78 strcpy(list[i], head); get_clocksources() 79 head = tmp + 1; get_clocksources()
|
/linux-4.1.27/include/linux/isdn/ |
H A D | capilli.h | 106 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); 107 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); 108 void capilib_release_appl(struct list_head *head, u16 applid); 109 void capilib_release(struct list_head *head); 110 void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); 111 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
|
/linux-4.1.27/drivers/input/serio/ |
H A D | serio_raw.c | 33 unsigned int tail, head; member in struct:serio_raw 149 empty = serio_raw->head == serio_raw->tail; serio_raw_fetch_byte() 173 if (serio_raw->head == serio_raw->tail && serio_raw_read() 191 serio_raw->head != serio_raw->tail || serio_raw_read() 251 if (serio_raw->head != serio_raw->tail) serio_raw_poll() 278 unsigned int head = serio_raw->head; serio_raw_interrupt() local 281 serio_raw->queue[head] = data; serio_raw_interrupt() 282 head = (head + 1) % SERIO_RAW_QUEUE_LEN; serio_raw_interrupt() 283 if (likely(head != serio_raw->tail)) { serio_raw_interrupt() 284 serio_raw->head = head; serio_raw_interrupt()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
H A D | hvcserver.c | 69 * @head: list_head pointer for an allocated list of partner info structs to 75 int hvcs_free_partner_info(struct list_head *head) hvcs_free_partner_info() argument 80 if (!head) hvcs_free_partner_info() 83 while (!list_empty(head)) { hvcs_free_partner_info() 84 element = head->next; hvcs_free_partner_info() 111 * @head: An initialized list_head pointer to an empty list to use to return the 129 * hvcs_free_partner_info() using a pointer to the SAME list head instance 132 int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, hvcs_get_partner_info() argument 147 if (!head || !pi_buff) hvcs_get_partner_info() 151 INIT_LIST_HEAD(head); hvcs_get_partner_info() 161 if (!list_empty(head)) hvcs_get_partner_info() 182 hvcs_free_partner_info(head); hvcs_get_partner_info() 196 list_add_tail(&(next_partner_info->node), head); hvcs_get_partner_info()
|
/linux-4.1.27/fs/btrfs/ |
H A D | delayed-ref.c | 168 /* insert a new ref to head ref rbtree */ htree_insert() 199 * find an head entry based on bytenr. This returns the delayed ref 200 * head if it was able to find one, or NULL if nothing was in that spot. 238 struct btrfs_delayed_ref_head *head) btrfs_delayed_ref_lock() 244 if (mutex_trylock(&head->mutex)) btrfs_delayed_ref_lock() 247 atomic_inc(&head->node.refs); btrfs_delayed_ref_lock() 250 mutex_lock(&head->mutex); btrfs_delayed_ref_lock() 252 if (!head->node.in_tree) { btrfs_delayed_ref_lock() 253 mutex_unlock(&head->mutex); btrfs_delayed_ref_lock() 254 btrfs_put_delayed_ref(&head->node); btrfs_delayed_ref_lock() 257 btrfs_put_delayed_ref(&head->node); btrfs_delayed_ref_lock() 263 struct btrfs_delayed_ref_head *head, drop_delayed_ref() 267 head = btrfs_delayed_node_to_head(ref); drop_delayed_ref() 268 rb_erase(&head->href_node, &delayed_refs->href_root); drop_delayed_ref() 270 assert_spin_locked(&head->lock); drop_delayed_ref() 271 rb_erase(&ref->rb_node, &head->ref_root); drop_delayed_ref() 282 struct btrfs_delayed_ref_head *head, merge_ref() 314 drop_delayed_ref(trans, delayed_refs, head, next); merge_ref() 317 drop_delayed_ref(trans, delayed_refs, head, ref); merge_ref() 334 struct btrfs_delayed_ref_head *head) btrfs_merge_delayed_refs() 339 assert_spin_locked(&head->lock); btrfs_merge_delayed_refs() 344 if (head->is_data) btrfs_merge_delayed_refs() 357 node = rb_first(&head->ref_root); btrfs_merge_delayed_refs() 366 if (merge_ref(trans, delayed_refs, head, ref, seq)) btrfs_merge_delayed_refs() 367 node = rb_first(&head->ref_root); btrfs_merge_delayed_refs() 401 struct btrfs_delayed_ref_head *head; btrfs_select_ref_head() local 409 head = find_ref_head(&delayed_refs->href_root, start, 1); btrfs_select_ref_head() 410 if (!head && !loop) { btrfs_select_ref_head() 414 head = find_ref_head(&delayed_refs->href_root, start, 1); btrfs_select_ref_head() 415 if (!head) btrfs_select_ref_head() 417 } else if (!head && loop) { btrfs_select_ref_head() 421 while (head->processing) { btrfs_select_ref_head() 424 node = rb_next(&head->href_node); btrfs_select_ref_head() 433 head = rb_entry(node, struct btrfs_delayed_ref_head, btrfs_select_ref_head() 437 head->processing = 1; btrfs_select_ref_head() 440 delayed_refs->run_delayed_start = head->node.bytenr + btrfs_select_ref_head() 441 head->node.num_bytes; btrfs_select_ref_head() 442 return head; btrfs_select_ref_head() 456 struct btrfs_delayed_ref_head *head, update_existing_ref() 469 drop_delayed_ref(trans, delayed_refs, head, existing); update_existing_ref() 488 * helper function to update the accounting in the head ref 509 * with an existing head ref without update_existing_head_ref() 542 * update the reference mod on the head to reflect this new operation, update_existing_head_ref() 564 * helper function to actually insert a head node into the rbtree. 581 * the head node stores the sum of all the mods, so dropping a ref add_delayed_ref_head() 582 * should drop the sum in the head node by one. add_delayed_ref_head() 814 * insert both the head node and the new ref without dropping btrfs_add_delayed_tree_ref() 863 * insert both the head node and the new ref without dropping btrfs_add_delayed_data_ref() 903 * this does a simple search for the head node for a given extent. 905 * the head node if any where found, or NULL if not. 237 btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head) btrfs_delayed_ref_lock() argument 261 drop_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref) drop_delayed_ref() argument 280 merge_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref, u64 seq) merge_ref() argument 331 btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) btrfs_merge_delayed_refs() argument 454 update_existing_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *existing, struct btrfs_delayed_ref_node *update) update_existing_ref() argument
|
H A D | delayed-ref.h | 25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 44 * head refs, this may be a negative number because it is keeping 71 * the head refs are used to hold a lock on a given extent, which allows us 94 * with this head ref, this is not adjusted as delayed refs are run, 132 /* head ref rbtree */ 143 /* total number of head nodes in tree */ 146 /* total number of head nodes ready for processing */ 225 struct btrfs_delayed_ref_head *head); 230 struct btrfs_delayed_ref_head *head); btrfs_delayed_ref_unlock() 231 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) btrfs_delayed_ref_unlock() argument 233 mutex_unlock(&head->mutex); btrfs_delayed_ref_unlock() 245 * a node might live in a head or a regular ref, this lets you
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | handle.c | 45 list_for_each_entry(item, &handle->tree, head) { nvkm_handle_init() 55 list_for_each_entry_continue_reverse(item, &handle->tree, head) { nvkm_handle_init() 71 list_for_each_entry(item, &handle->tree, head) { nvkm_handle_fini() 88 list_for_each_entry_continue_reverse(item, &handle->tree, head) { nvkm_handle_fini() 113 INIT_LIST_HEAD(&handle->head); nvkm_handle_create() 140 list_add(&handle->head, &handle->parent->tree); nvkm_handle_create() 156 list_for_each_entry_safe(item, temp, &handle->tree, head) { nvkm_handle_destroy() 159 list_del(&handle->head); nvkm_handle_destroy()
|
/linux-4.1.27/drivers/block/drbd/ |
H A D | drbd_nla.c | 8 struct nlattr *head = nla_data(nla); drbd_nla_check_mandatory() local 20 nla_for_each_attr(nla, head, len, rem) { nla_for_each_attr()
|
/linux-4.1.27/arch/score/ |
H A D | Makefile | 28 head-y := arch/score/kernel/head.o
|
/linux-4.1.27/arch/hexagon/ |
H A D | Makefile | 37 head-y := arch/hexagon/kernel/head.o
|
/linux-4.1.27/arch/m32r/ |
H A D | Makefile | 34 head-y := arch/m32r/kernel/head.o
|
/linux-4.1.27/arch/m32r/boot/compressed/ |
H A D | Makefile | 8 vmlinux.bin.lzma head.o misc.o piggy.o vmlinux.lds 10 OBJECTS = $(obj)/head.o $(obj)/misc.o
|
/linux-4.1.27/arch/arm/boot/compressed/ |
H A D | head-xscale.S | 2 * linux/arch/arm/boot/compressed/head-xscale.S 4 * XScale specific tweaks. This is merged into head.S by the linker.
|
/linux-4.1.27/drivers/staging/sm750fb/ |
H A D | sm750_hw.h | 26 sm750_simul_pri,/* primary => all head */ 28 sm750_simul_sec,/* secondary => all head */ 30 sm750_dual_normal,/* primary => panel head and secondary => crt */ 32 sm750_dual_swap,/* primary => crt head and secondary => panel */
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | sparc_ksyms_32.c | 13 #include <asm/head.h>
|
/linux-4.1.27/arch/microblaze/kernel/ |
H A D | Makefile | 15 extra-y := head.o vmlinux.lds
|
/linux-4.1.27/arch/mips/include/asm/mach-generic/ |
H A D | kernel-entry-init.h | 12 /* Intentionally empty macro, used in head.S. Override in
|
/linux-4.1.27/drivers/md/ |
H A D | multipath.h | 19 * this is our 'private' 'collective' MULTIPATH buffer head.
|
/linux-4.1.27/arch/avr32/ |
H A D | Makefile | 31 head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o 32 head-y += arch/avr32/kernel/head.o
|
/linux-4.1.27/arch/m68k/hp300/ |
H A D | reboot.S | 7 * good stuff that head.S did when we started up. The caches and MMU must be
|
/linux-4.1.27/net/netfilter/ |
H A D | nf_internals.h | 16 unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | Makefile | 42 head-y := head.o 43 extra-y := $(head-y) vmlinux.lds
|
/linux-4.1.27/arch/m68k/ |
H A D | Makefile | 90 # Select the assembler head startup code. Order is important. The default 91 # head code is first, processor specific selections can override it after. 93 head-y := arch/m68k/kernel/head.o 94 head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o 95 head-$(CONFIG_M68360) := arch/m68k/68360/head.o 96 head-$(CONFIG_M68000) := arch/m68k/68000/head.o 97 head-$(CONFIG_COLDFIRE) := arch/m68k/coldfire/head.o
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | Makefile | 83 head-y := head$(MMUEXT).o 93 extra-y := $(head-y) vmlinux.lds
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/pm/ |
H A D | base.c | 68 list_for_each_entry(dom, &ppm->domains, head) { nvkm_perfsig_find() 129 list_for_each_entry(chk, &ppm->domains, head) { nvkm_perfctr_query() 157 dom = list_entry(dom->head.next, typeof(*dom), head); nvkm_perfctr_query() 158 } while (&dom->head != &ppm->domains); nvkm_perfctr_query() 182 list_for_each_entry(dom, &ppm->domains, head) { nvkm_perfctr_sample() 189 typeof(*ctr), head); nvkm_perfctr_sample() 195 list_move_tail(&ctr->head, &dom->list); nvkm_perfctr_sample() 202 list_for_each_entry(ctr, &dom->list, head) { nvkm_perfctr_sample() 260 if (ctr->head.next) nvkm_perfctr_dtor() 261 list_del(&ctr->head); nvkm_perfctr_dtor() 307 list_add_tail(&ctr->head, &dom->list); nvkm_perfctr_ctor() 411 list_add_tail(&dom->head, &ppm->domains); nvkm_perfdom_new() 453 list_for_each_entry_safe(dom, tmp, &ppm->domains, head) { _nvkm_pm_dtor() 454 list_del(&dom->head); _nvkm_pm_dtor()
|
/linux-4.1.27/drivers/target/tcm_fc/ |
H A D | tfc_sess.c | 172 struct hlist_head *head; ft_sess_get() local 180 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 181 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 203 struct hlist_head *head; ft_sess_create() local 205 head = &tport->hash[ft_sess_hash(port_id)]; ft_sess_create() 206 hlist_for_each_entry_rcu(sess, head, hash) ft_sess_create() 225 hlist_add_head_rcu(&sess->hash, head); ft_sess_create() 256 struct hlist_head *head; ft_sess_delete() local 259 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 260 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 275 struct hlist_head *head; ft_sess_delete_all() local 278 for (head = tport->hash; ft_sess_delete_all() 279 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { hlist_for_each_entry_rcu() 280 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu()
|
/linux-4.1.27/net/sunrpc/auth_gss/ |
H A D | gss_krb5_wrap.c | 58 iov = &buf->head[0]; gss_krb5_add_padding() 72 if (len <= buf->head[0].iov_len) { gss_krb5_remove_padding() 73 pad = *(u8 *)(buf->head[0].iov_base + len - 1); gss_krb5_remove_padding() 74 if (pad > buf->head[0].iov_len) gss_krb5_remove_padding() 76 buf->head[0].iov_len -= pad; gss_krb5_remove_padding() 79 len -= buf->head[0].iov_len; gss_krb5_remove_padding() 97 * However adjusting the head length, as we do above, is harmless. gss_krb5_remove_padding() 99 * also uses length and head length together to determine the original gss_krb5_remove_padding() 101 * easier on the server if we adjust head and tail length in tandem. gss_krb5_remove_padding() 149 /* Assumptions: the head and tail of inbuf are ours to play with. 186 ptr = buf->head[0].iov_base + offset; gss_wrap_kerberos_v1() 287 ptr = (u8 *)buf->head[0].iov_base + offset; gss_unwrap_kerberos_v1() 316 (unsigned char *)buf->head[0].iov_base; gss_unwrap_kerberos_v1() 377 orig_start = buf->head[0].iov_base + offset; gss_unwrap_kerberos_v1() 378 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; gss_unwrap_kerberos_v1() 380 buf->head[0].iov_len -= (data_start - orig_start); gss_unwrap_kerberos_v1() 403 char head[LOCAL_BUF_LEN]; rotate_buf_a_little() local 409 read_bytes_from_xdr_buf(buf, 0, head, shift); rotate_buf_a_little() 415 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); rotate_buf_a_little() 461 ptr = plainhdr = buf->head[0].iov_base + offset; gss_wrap_kerberos_v2() 512 ptr = buf->head[0].iov_base + offset; gss_unwrap_kerberos_v2() 571 * Move the head data back to the right position in xdr_buf. gss_unwrap_kerberos_v2() 572 * We ignore any "ec" data since it might be in the head or gss_unwrap_kerberos_v2() 574 * Note that buf->head[0].iov_len may indicate the available gss_unwrap_kerberos_v2() 575 * head buffer space rather than that actually occupied. gss_unwrap_kerberos_v2() 577 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); gss_unwrap_kerberos_v2() 580 buf->head[0].iov_len); gss_unwrap_kerberos_v2() 582 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; gss_unwrap_kerberos_v2()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
H A D | queue.c | 20 struct list_head head; member in struct:cw1200_queue_item 74 list_for_each_entry_safe(item, tmp, gc_list, head) { list_for_each_entry_safe() 75 list_del(&item->head); list_for_each_entry_safe() 89 list_add_tail(&gc_item->head, gc_list); cw1200_queue_register_post_gc() 93 struct list_head *head, __cw1200_queue_gc() 100 list_for_each_entry_safe(item, tmp, &queue->queue, head) { __cw1200_queue_gc() 111 cw1200_queue_register_post_gc(head, item); __cw1200_queue_gc() 113 list_move_tail(&item->head, &queue->free_pool); __cw1200_queue_gc() 198 list_add_tail(&queue->pool[i].head, &queue->free_pool); cw1200_queue_init() 213 list_for_each_entry_safe(item, tmp, &queue->pending, head) { cw1200_queue_clear() 217 list_move_tail(&item->head, &queue->free_pool); cw1200_queue_clear() 295 &queue->free_pool, struct cw1200_queue_item, head); cw1200_queue_put() 298 list_move_tail(&item->head, &queue->queue); cw1200_queue_put() 345 list_for_each_entry(item, &queue->queue, head) { cw1200_queue_get() 357 list_move_tail(&item->head, &queue->pending); cw1200_queue_get() 410 list_move(&item->head, &queue->queue); cw1200_queue_requeue() 422 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { cw1200_queue_requeue_all() 436 list_move(&item->head, &queue->queue); cw1200_queue_requeue_all() 478 list_move(&item->head, &queue->free_pool); cw1200_queue_remove() 548 list_for_each_entry(item, &queue->pending, head) { cw1200_queue_get_xmit_timestamp() 92 __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) __cw1200_queue_gc() argument
|
/linux-4.1.27/drivers/gpu/drm/mga/ |
H A D | mga_dma.c | 106 u32 head, tail; mga_do_dma_flush() local 141 head = MGA_READ(MGA_PRIMADDRESS); mga_do_dma_flush() 143 if (head <= tail) mga_do_dma_flush() 146 primary->space = head - tail; mga_do_dma_flush() 148 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); mga_do_dma_flush() 161 u32 head, tail; mga_do_dma_wrap_start() local 179 head = MGA_READ(MGA_PRIMADDRESS); mga_do_dma_wrap_start() 181 if (head == dev_priv->primary->offset) mga_do_dma_wrap_start() 184 primary->space = head - dev_priv->primary->offset; mga_do_dma_wrap_start() 186 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); mga_do_dma_wrap_start() 202 u32 head = dev_priv->primary->offset; mga_do_dma_wrap_end() local 209 MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); mga_do_dma_wrap_end() 235 for (entry = dev_priv->head->next; entry; entry = entry->next) { mga_freelist_print() 237 entry, entry->buf->idx, entry->age.head, mga_freelist_print() 238 (unsigned long)(entry->age.head - dev_priv->primary->offset)); mga_freelist_print() 253 dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); mga_freelist_init() 254 if (dev_priv->head == NULL) mga_freelist_init() 257 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); mga_freelist_init() 267 entry->next = dev_priv->head->next; mga_freelist_init() 268 entry->prev = dev_priv->head; mga_freelist_init() 272 if (dev_priv->head->next != NULL) mga_freelist_init() 273 dev_priv->head->next->prev = entry; mga_freelist_init() 281 dev_priv->head->next = entry; mga_freelist_init() 294 entry = dev_priv->head; mga_freelist_cleanup() 301 dev_priv->head = dev_priv->tail = NULL; mga_freelist_cleanup() 328 u32 head, wrap; mga_freelist_get() local 331 head = MGA_READ(MGA_PRIMADDRESS); mga_freelist_get() 335 tail->age.head ? mga_freelist_get() 336 (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0, mga_freelist_get() 338 DRM_DEBUG(" head=0x%06lx %d\n", mga_freelist_get() 339 (unsigned long)(head - dev_priv->primary->offset), wrap); mga_freelist_get() 341 if (TEST_AGE(&tail->age, head, wrap)) { mga_freelist_get() 359 drm_mga_freelist_t *head, *entry, *prev; mga_freelist_put() local 362 (unsigned long)(buf_priv->list_entry->age.head - mga_freelist_put() 367 head = dev_priv->head; mga_freelist_put() 369 if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { mga_freelist_put() 376 prev = head->next; mga_freelist_put() 377 head->next = entry; mga_freelist_put() 379 entry->prev = head; mga_freelist_put() 551 list_for_each_entry(_entry, &dev->maplist, head) { mga_do_agp_dma_bootstrap() 912 dev_priv->sarea_priv->last_frame.head = 0; mga_do_init_dma() 988 if (dev_priv->head != NULL) mga_do_cleanup_dma()
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | gup.c | 39 struct page *page, *head; gup_pte_range() local 52 head = compound_head(page); gup_pte_range() 53 if (!page_cache_get_speculative(head)) gup_pte_range() 56 put_page(head); gup_pte_range() 59 if (head != page) gup_pte_range() 73 struct page *head, *page, *tail; gup_huge_pmd() local 83 head = pmd_page(pmd); gup_huge_pmd() 84 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); gup_huge_pmd() 87 VM_BUG_ON(compound_head(page) != head); gup_huge_pmd() 94 if (!page_cache_add_speculative(head, refs)) { gup_huge_pmd() 102 put_page(head); gup_huge_pmd()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/timer/ |
H A D | nv04.c | 52 list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) { nv04_timer_alarm_trigger() 54 list_move_tail(&alarm->head, &exec); nv04_timer_alarm_trigger() 59 alarm = list_first_entry(&priv->alarms, typeof(*alarm), head); nv04_timer_alarm_trigger() 68 list_for_each_entry_safe(alarm, atemp, &exec, head) { nv04_timer_alarm_trigger() 69 list_del_init(&alarm->head); nv04_timer_alarm_trigger() 86 if (!list_empty(&alarm->head)) nv04_timer_alarm() 87 list_del(&alarm->head); nv04_timer_alarm() 89 list_for_each_entry(list, &priv->alarms, head) { nv04_timer_alarm() 93 list_add_tail(&alarm->head, &list->head); nv04_timer_alarm() 107 list_del_init(&alarm->head); nv04_timer_alarm_cancel()
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_ua.c | 123 * the head of the list following sam4r14, core_scsi3_ua_allocate() 205 int head = 1; core_scsi3_ua_for_check_condition() local 221 * The highest priority Unit Attentions are placed at the head of the core_scsi3_ua_for_check_condition() 240 * (head of the list) in the outgoing CHECK_CONDITION + sense. core_scsi3_ua_for_check_condition() 242 if (head) { core_scsi3_ua_for_check_condition() 245 head = 0; core_scsi3_ua_for_check_condition() 273 int head = 1; core_scsi3_ua_clear_for_request_sense() local 289 * The highest priority Unit Attentions are placed at the head of the core_scsi3_ua_clear_for_request_sense() 300 if (head) { core_scsi3_ua_clear_for_request_sense() 303 head = 0; core_scsi3_ua_clear_for_request_sense() 318 return (head) ? -EPERM : 0; core_scsi3_ua_clear_for_request_sense()
|
/linux-4.1.27/drivers/gpu/drm/qxl/ |
H A D | qxl_display.c | 34 static bool qxl_head_enabled(struct qxl_head *head) qxl_head_enabled() argument 36 return head->width && head->height; qxl_head_enabled() 109 struct qxl_head *head; qxl_update_offset_props() local 111 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { qxl_update_offset_props() 114 head = &qdev->client_monitors_config->heads[output->index]; qxl_update_offset_props() 117 dev->mode_config.suggested_x_property, head->x); qxl_update_offset_props() 119 dev->mode_config.suggested_y_property, head->y); qxl_update_offset_props() 151 struct qxl_head *head; qxl_add_monitors_config_modes() local 155 head = &qdev->client_monitors_config->heads[h]; qxl_add_monitors_config_modes() 157 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, qxl_add_monitors_config_modes() 160 *pwidth = head->width; qxl_add_monitors_config_modes() 161 *pheight = head->height; qxl_add_monitors_config_modes() 567 struct qxl_head *head = &qdev->monitors_config->heads[i]; qxl_send_monitors_config() local 569 if (head->y > 8192 || head->x > 8192 || qxl_send_monitors_config() 570 head->width > 8192 || head->height > 8192) { qxl_send_monitors_config() 571 DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", qxl_send_monitors_config() 572 i, head->width, head->height, qxl_send_monitors_config() 573 head->x, head->y); qxl_send_monitors_config() 752 struct qxl_head *head; qxl_write_monitors_config_for_encoder() local 761 "head number too large or missing monitors config: %p, %d", qxl_write_monitors_config_for_encoder() 772 DRM_DEBUG("missing for multiple monitors: no head holes\n"); qxl_write_monitors_config_for_encoder() 773 head = &qdev->monitors_config->heads[i]; qxl_write_monitors_config_for_encoder() 774 head->id = i; qxl_write_monitors_config_for_encoder() 777 head->width = mode->hdisplay; qxl_write_monitors_config_for_encoder() 778 head->height = mode->vdisplay; qxl_write_monitors_config_for_encoder() 779 head->x = encoder->crtc->x; qxl_write_monitors_config_for_encoder() 780 head->y = encoder->crtc->y; qxl_write_monitors_config_for_encoder() 784 head->width = 0; qxl_write_monitors_config_for_encoder() 785 head->height = 0; qxl_write_monitors_config_for_encoder() 786 head->x = 0; qxl_write_monitors_config_for_encoder() 787 head->y = 0; qxl_write_monitors_config_for_encoder() 789 DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n", qxl_write_monitors_config_for_encoder() 790 i, head->x, head->y, head->width, head->height, qdev->monitors_config->count); qxl_write_monitors_config_for_encoder() 791 head->flags = 0; qxl_write_monitors_config_for_encoder()
|
/linux-4.1.27/drivers/xen/events/ |
H A D | events_fifo.c | 61 uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; member in struct:evtchn_fifo_queue 112 q->head[i] = 0; init_control_block() 287 uint32_t head; consume_one_event() local 291 head = q->head[priority]; consume_one_event() 297 if (head == 0) { consume_one_event() 298 rmb(); /* Ensure word is up-to-date before reading head. */ consume_one_event() 299 head = control_block->head[priority]; consume_one_event() 302 port = head; consume_one_event() 304 head = clear_linked(word); consume_one_event() 313 if (head == 0) consume_one_event() 319 q->head[priority] = head; consume_one_event()
|
/linux-4.1.27/drivers/char/agp/ |
H A D | isoch.c | 22 static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) agp_3_5_dev_list_insert() argument 27 list_for_each(pos, head) { list_for_each() 39 struct list_head *pos, *tmp, *head = &list->list, *start = head->next; agp_3_5_dev_list_sort() local 42 INIT_LIST_HEAD(head); agp_3_5_dev_list_sort() 44 for (pos=start; pos!=head; ) { agp_3_5_dev_list_sort() 53 agp_3_5_dev_list_insert(head, tmp); agp_3_5_dev_list_sort() 80 struct list_head *head = &dev_list->list, *pos; agp_3_5_isochronous_node_enable() local 135 list_for_each(pos, head) { list_for_each() 290 struct list_head *head = &dev_list->list, *pos; agp_3_5_nonisochronous_node_enable() local 302 for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { agp_3_5_nonisochronous_node_enable() 325 struct list_head *head, *pos; agp_3_5_enable() local 338 * Allocate a head for our AGP 3.5 device list agp_3_5_enable() 345 head = &dev_list->list; agp_3_5_enable() 346 INIT_LIST_HEAD(head); agp_3_5_enable() 377 list_add(pos, head); for_each_pci_dev() 392 list_for_each(pos, head) { list_for_each() 460 for (pos=head->next; pos!=head; ) {
|
/linux-4.1.27/arch/ia64/hp/sim/ |
H A D | simserial.c | 117 if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { rs_put_char() 121 info->xmit.buf[info->xmit.head] = ch; rs_put_char() 122 info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); rs_put_char() 145 if (info->xmit.head == info->xmit.tail || tty->stopped) { transmit_chars() 147 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", transmit_chars() 148 info->xmit.head, info->xmit.tail, tty->stopped); transmit_chars() 160 count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), transmit_chars() 169 count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); transmit_chars() 182 if (info->xmit.head == info->xmit.tail || tty->stopped || rs_flush_chars() 201 c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_write() 207 memcpy(info->xmit.buf + info->xmit.head, buf, c); rs_write() 208 info->xmit.head = ((info->xmit.head + c) & rs_write() 218 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && rs_write() 229 return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_write_room() 236 return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_chars_in_buffer() 245 info->xmit.head = info->xmit.tail = 0; rs_flush_buffer() 388 state->xmit.head = state->xmit.tail = 0; activate()
|
/linux-4.1.27/drivers/pci/ |
H A D | setup-bus.c | 44 static void free_list(struct list_head *head) free_list() argument 48 list_for_each_entry_safe(dev_res, tmp, head, list) { list_for_each_entry_safe() 56 * @head: Head of the list 63 static int add_to_list(struct list_head *head, add_to_list() argument 83 list_add(&tmp->list, head); add_to_list() 88 static void remove_from_list(struct list_head *head, remove_from_list() argument 93 list_for_each_entry_safe(dev_res, tmp, head, list) { list_for_each_entry_safe() 102 static struct pci_dev_resource *res_to_dev_res(struct list_head *head, res_to_dev_res() argument 107 list_for_each_entry(dev_res, head, list) { list_for_each_entry() 124 static resource_size_t get_res_add_size(struct list_head *head, get_res_add_size() argument 129 dev_res = res_to_dev_res(head, res); get_res_add_size() 133 static resource_size_t get_res_add_align(struct list_head *head, get_res_add_align() argument 138 dev_res = res_to_dev_res(head, res); get_res_add_align() 144 static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) pdev_sort_resources() argument 176 n = head; list_for_each_entry() 177 list_for_each_entry(dev_res, head, list) { list_for_each_entry() 194 struct list_head *head) __dev_sort_resources() 210 pdev_sort_resources(dev, head); __dev_sort_resources() 223 * @realloc_head : head of the list tracking requests requiring additional 225 * @head : head of the list tracking requests with allocated 230 * is in the head list. 233 struct list_head *head) reassign_resources_sorted() 249 /* skip this resource if not found in head list */ list_for_each_entry() 250 list_for_each_entry(dev_res, head, list) { list_for_each_entry() 286 * @head : head of the list tracking requests for resources 287 * @fail_head : head of the list tracking requests that could 293 static void assign_requested_resources_sorted(struct list_head *head, assign_requested_resources_sorted() argument 300 list_for_each_entry(dev_res, head, list) { list_for_each_entry() 363 static void __assign_resources_sorted(struct list_head *head, __assign_resources_sorted() argument 400 list_for_each_entry(dev_res, head, list) { list_for_each_entry() 407 /* Update res in head list with add_size in realloc_head list */ list_for_each_entry_safe() 408 list_for_each_entry_safe(dev_res, tmp_res, head, list) { list_for_each_entry_safe() 424 * The "head" list is sorted by the alignment to make sure list_for_each_entry_safe() 426 * After we change the alignment of a dev_res in "head" list, list_for_each_entry_safe() 436 list_for_each_entry(dev_res2, head, list) { list_for_each_entry() 449 /* Try updated head list with add_size added */ 450 assign_requested_resources_sorted(head, &local_fail_head); 454 /* Remove head list from realloc_head list */ 455 list_for_each_entry(dev_res, head, list) 458 free_list(head); 464 /* remove not need to be released assigned res from head list etc */ list_for_each_entry_safe() 465 list_for_each_entry_safe(dev_res, tmp_res, head, list) list_for_each_entry_safe() 477 list_for_each_entry(dev_res, head, list) list_for_each_entry() 492 assign_requested_resources_sorted(head, fail_head); 497 reassign_resources_sorted(realloc_head, head); 498 free_list(head); 505 LIST_HEAD(head); pdev_assign_resources_sorted() 507 __dev_sort_resources(dev, &head); pdev_assign_resources_sorted() 508 __assign_resources_sorted(&head, add_head, fail_head); pdev_assign_resources_sorted() 517 LIST_HEAD(head); pbus_assign_resources_sorted() 520 __dev_sort_resources(dev, &head); pbus_assign_resources_sorted() 522 __assign_resources_sorted(&head, realloc_head, fail_head); pbus_assign_resources_sorted() 193 __dev_sort_resources(struct pci_dev *dev, struct list_head *head) __dev_sort_resources() argument 232 reassign_resources_sorted(struct list_head *realloc_head, struct list_head *head) reassign_resources_sorted() argument
|
/linux-4.1.27/net/tipc/ |
H A D | msg.c | 117 * out: head buf after successful complete reassembly, otherwise NULL 122 struct sk_buff *head = *headbuf; tipc_buf_append() local 139 if (unlikely(head)) tipc_buf_append() 143 head = *headbuf = frag; tipc_buf_append() 145 TIPC_SKB_CB(head)->tail = NULL; tipc_buf_append() 146 if (skb_is_nonlinear(head)) { skb_walk_frags() 147 skb_walk_frags(head, tail) { skb_walk_frags() 148 TIPC_SKB_CB(head)->tail = tail; skb_walk_frags() 151 skb_frag_list_init(head); 156 if (!head) 159 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { 162 tail = TIPC_SKB_CB(head)->tail; 163 if (!skb_has_frag_list(head)) 164 skb_shinfo(head)->frag_list = frag; 167 head->truesize += frag->truesize; 168 head->data_len += frag->len; 169 head->len += frag->len; 170 TIPC_SKB_CB(head)->tail = frag; 174 TIPC_SKB_CB(head)->validated = false; 175 if (unlikely(!tipc_msg_validate(head))) 177 *buf = head; 178 TIPC_SKB_CB(head)->tail = NULL; 554 struct sk_buff *head = NULL; tipc_msg_reassemble() local 570 if (tipc_buf_append(&head, &frag)) skb_queue_walk() 572 if (!head) skb_queue_walk() 578 kfree_skb(head);
|