/linux-4.4.14/arch/avr32/boot/u-boot/ |
H A D | Makefile | 1 extra-y := head.o
|
/linux-4.4.14/arch/m68k/68360/ |
H A D | Makefile | 9 extra-y := head.o 11 $(obj)/head.o: $(obj)/head-$(model-y).o 12 ln -sf head-$(model-y).o $(obj)/head.o
|
/linux-4.4.14/arch/m68k/kernel/ |
H A D | Makefile | 5 extra-$(CONFIG_AMIGA) := head.o 6 extra-$(CONFIG_ATARI) := head.o 7 extra-$(CONFIG_MAC) := head.o 8 extra-$(CONFIG_APOLLO) := head.o 9 extra-$(CONFIG_VME) := head.o 10 extra-$(CONFIG_HP300) := head.o 11 extra-$(CONFIG_Q40) := head.o 12 extra-$(CONFIG_SUN3X) := head.o 13 extra-$(CONFIG_SUN3) := sun3-head.o
|
/linux-4.4.14/include/linux/ |
H A D | timerqueue.h | 14 struct rb_root head; member in struct:timerqueue_head 19 extern bool timerqueue_add(struct timerqueue_head *head, 21 extern bool timerqueue_del(struct timerqueue_head *head, 29 * @head: head of timerqueue 35 struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) timerqueue_getnext() argument 37 return head->next; timerqueue_getnext() 45 static inline void timerqueue_init_head(struct timerqueue_head *head) timerqueue_init_head() argument 47 head->head = RB_ROOT; timerqueue_init_head() 48 head->next = NULL; timerqueue_init_head()
|
H A D | btree-128.h | 5 static inline void btree_init_mempool128(struct btree_head128 *head, btree_init_mempool128() argument 8 btree_init_mempool(&head->h, mempool); btree_init_mempool128() 11 static inline int btree_init128(struct btree_head128 *head) btree_init128() argument 13 return btree_init(&head->h); btree_init128() 16 static inline void btree_destroy128(struct btree_head128 *head) btree_destroy128() argument 18 btree_destroy(&head->h); btree_destroy128() 21 static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) btree_lookup128() argument 24 return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); btree_lookup128() 27 static inline void *btree_get_prev128(struct btree_head128 *head, btree_get_prev128() argument 33 val = btree_get_prev(&head->h, &btree_geo128, btree_get_prev128() 40 static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, btree_insert128() argument 44 return btree_insert(&head->h, &btree_geo128, btree_insert128() 48 static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, btree_update128() argument 52 return btree_update(&head->h, &btree_geo128, btree_update128() 56 static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) btree_remove128() argument 59 return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); btree_remove128() 62 static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) btree_last128() argument 67 val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); btree_last128() 89 static inline size_t btree_visitor128(struct btree_head128 *head, btree_visitor128() argument 93 return btree_visitor(&head->h, &btree_geo128, opaque, btree_visitor128() 97 static inline size_t btree_grim_visitor128(struct btree_head128 *head, btree_grim_visitor128() argument 101 return btree_grim_visitor(&head->h, &btree_geo128, opaque, btree_grim_visitor128() 105 #define btree_for_each_safe128(head, k1, k2, val) \ 106 for (val = btree_last128(head, &k1, &k2); \ 108 val = btree_get_prev128(head, &k1, &k2))
|
H A D | btree-type.h | 13 static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, init_mempool() argument 16 btree_init_mempool(&head->h, mempool); init_mempool() 19 static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) init() argument 21 return btree_init(&head->h); init() 24 static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) destroy() argument 26 btree_destroy(&head->h); destroy() 37 static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) lookup() argument 40 return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); lookup() 43 static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, insert() argument 47 return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); insert() 50 static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, update() argument 54 return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); update() 57 static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) remove() argument 60 return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); remove() 63 static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) last() argument 66 void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); last() 72 static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) get_prev() argument 75 void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); get_prev() 81 static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) lookup() argument 83 return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); lookup() 86 static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, insert() argument 89 return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, insert() 93 static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, update() argument 96 return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); update() 99 static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) remove() argument 101 return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); remove() 104 static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) last() argument 106 return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); last() 109 static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) get_prev() argument 111 return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); get_prev() 121 static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, visitor() argument 125 return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, visitor() 129 static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, grim_visitor() argument 133 return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, grim_visitor()
|
H A D | circ_buf.h | 10 int head; member in struct:circ_buf 15 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) 18 as a completely full buffer has head == tail, which is the same as 20 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) 23 accessing head and tail more than once, so they can change 25 #define CIRC_CNT_TO_END(head,tail,size) \ 27 int n = ((head) + end) & ((size)-1); \ 31 #define CIRC_SPACE_TO_END(head,tail,size) \ 32 ({int end = (size) - 1 - (head); \
|
H A D | plist.h | 93 * @head: struct plist_head variable name 95 #define PLIST_HEAD_INIT(head) \ 97 .node_list = LIST_HEAD_INIT((head).node_list) \ 102 * @head: name for struct plist_head variable 104 #define PLIST_HEAD(head) \ 105 struct plist_head head = PLIST_HEAD_INIT(head) 121 * @head: &struct plist_head pointer 124 plist_head_init(struct plist_head *head) plist_head_init() argument 126 INIT_LIST_HEAD(&head->node_list); plist_head_init() 141 extern void plist_add(struct plist_node *node, struct plist_head *head); 142 extern void plist_del(struct plist_node *node, struct plist_head *head); 144 extern void plist_requeue(struct plist_node *node, struct plist_head *head); 149 * @head: the head for your list 151 #define plist_for_each(pos, head) \ 152 list_for_each_entry(pos, &(head)->node_list, node_list) 157 * @head: the head for your list 161 #define plist_for_each_continue(pos, head) \ 162 list_for_each_entry_continue(pos, &(head)->node_list, node_list) 168 * @head: the head for your list 172 #define plist_for_each_safe(pos, n, head) \ 173 list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) 178 * @head: the head for your list 181 #define plist_for_each_entry(pos, head, mem) \ 182 list_for_each_entry(pos, &(head)->node_list, mem.node_list) 187 * @head: the head for your list 193 #define plist_for_each_entry_continue(pos, head, m) \ 194 list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) 200 * @head: the head for your list 205 #define plist_for_each_entry_safe(pos, n, head, m) \ 206 list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) 210 * @head: &struct plist_head pointer 212 static inline int plist_head_empty(const struct plist_head *head) plist_head_empty() argument 214 return list_empty(&head->node_list); plist_head_empty() 230 * @head: the &struct plist_head pointer 235 # define plist_first_entry(head, type, member) \ 237 WARN_ON(plist_head_empty(head)); \ 238 container_of(plist_first(head), type, member); \ 241 # define plist_first_entry(head, type, member) \ 242 container_of(plist_first(head), type, member) 247 * @head: the &struct plist_head pointer 252 # define plist_last_entry(head, type, member) \ 254 WARN_ON(plist_head_empty(head)); \ 255 container_of(plist_last(head), type, member); \ 258 # define plist_last_entry(head, type, member) \ 259 container_of(plist_last(head), type, member) 278 * @head: the &struct plist_head pointer 282 static inline struct plist_node *plist_first(const struct plist_head *head) plist_first() argument 284 return list_entry(head->node_list.next, plist_first() 290 * @head: the &struct plist_head pointer 294 static inline struct plist_node *plist_last(const struct plist_head *head) plist_last() argument 296 return list_entry(head->node_list.prev, plist_last()
|
H A D | list.h | 56 * @head: list head to add it after 58 * Insert a new entry after the specified head. 61 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 63 __list_add(new, head, head->next); list_add() 70 * @head: list head to add it before 72 * Insert a new entry before the specified head. 75 static inline void list_add_tail(struct list_head *new, struct list_head *head) list_add_tail() argument 77 __list_add(new, head->prev, head); list_add_tail() 150 * list_move - delete from one list and add as another's head 152 * @head: the head that will precede our entry 154 static inline void list_move(struct list_head *list, struct list_head *head) list_move() argument 157 list_add(list, head); list_move() 163 * @head: the head that will follow our entry 166 struct list_head *head) list_move_tail() 169 list_add_tail(list, head); list_move_tail() 173 * list_is_last - tests whether @list is the last entry in list @head 175 * @head: the head of the list 178 const struct list_head *head) list_is_last() 180 return list->next == head; list_is_last() 185 * @head: the list to test. 187 static inline int list_empty(const struct list_head *head) list_empty() argument 189 return head->next == head; list_empty() 194 * @head: the list to test 205 static inline int list_empty_careful(const struct list_head *head) list_empty_careful() argument 207 struct list_head *next = head->next; list_empty_careful() 208 return (next == head) && (next == head->prev); list_empty_careful() 213 * @head: the head of the list 215 static inline void list_rotate_left(struct list_head *head) list_rotate_left() argument 219 if (!list_empty(head)) { list_rotate_left() 220 first = head->next; list_rotate_left() 221 list_move_tail(first, head); list_rotate_left() 227 * @head: the list to test. 229 static inline int list_is_singular(const struct list_head *head) list_is_singular() argument 231 return !list_empty(head) && (head->next == head->prev); list_is_singular() 235 struct list_head *head, struct list_head *entry) __list_cut_position() 238 list->next = head->next; __list_cut_position() 242 head->next = new_first; __list_cut_position() 243 new_first->prev = head; __list_cut_position() 249 * @head: a list with entries 250 * @entry: an entry within head, could be the head itself 253 * This helper moves the initial part of @head, up to and 254 * including @entry, from @head to @list. You should 255 * pass on @entry an element you know is on @head. @list 261 struct list_head *head, struct list_head *entry) list_cut_position() 263 if (list_empty(head)) list_cut_position() 265 if (list_is_singular(head) && list_cut_position() 266 (head->next != entry && head != entry)) list_cut_position() 268 if (entry == head) list_cut_position() 271 __list_cut_position(list, head, entry); list_cut_position() 291 * @head: the place to add it in the first list. 294 struct list_head *head) list_splice() 297 __list_splice(list, head, head->next); list_splice() 303 * @head: the place to add it in the first list. 306 struct list_head *head) list_splice_tail() 309 __list_splice(list, head->prev, head); list_splice_tail() 315 * @head: the place to add it in the first list. 320 struct list_head *head) list_splice_init() 323 __list_splice(list, head, head->next); list_splice_init() 331 * @head: the place to add it in the first list. 337 struct list_head *head) list_splice_tail_init() 340 __list_splice(list, head->prev, head); list_splice_tail_init() 356 * @ptr: the list head to take the element from. 367 * @ptr: the list head to take the element from. 378 * @ptr: the list head to take the element from. 406 * @head: the head for your list. 408 #define list_for_each(pos, head) \ 409 for (pos = (head)->next; pos != (head); pos = pos->next) 414 * @head: the head for your list. 416 #define list_for_each_prev(pos, head) \ 417 for (pos = (head)->prev; pos != (head); pos = pos->prev) 423 * @head: the head for your list. 425 #define list_for_each_safe(pos, n, head) \ 426 for (pos = (head)->next, n = pos->next; pos != (head); \ 433 * @head: the head for your list. 435 #define list_for_each_prev_safe(pos, n, head) \ 436 for (pos = (head)->prev, n = pos->prev; \ 437 pos != (head); \ 443 * @head: the head for your list. 446 #define list_for_each_entry(pos, head, member) \ 447 for (pos = list_first_entry(head, typeof(*pos), member); \ 448 &pos->member != (head); \ 454 * @head: the head for your list. 457 #define list_for_each_entry_reverse(pos, head, member) \ 458 for (pos = list_last_entry(head, typeof(*pos), member); \ 459 &pos->member != (head); \ 465 * @head: the head of the list 470 #define list_prepare_entry(pos, head, member) \ 471 ((pos) ? : list_entry(head, typeof(*pos), member)) 476 * @head: the head for your list. 482 #define list_for_each_entry_continue(pos, head, member) \ 484 &pos->member != (head); \ 490 * @head: the head for your list. 496 #define list_for_each_entry_continue_reverse(pos, head, member) \ 498 &pos->member != (head); \ 504 * @head: the head for your list. 509 #define list_for_each_entry_from(pos, head, member) \ 510 for (; &pos->member != (head); \ 517 * @head: the head for your list. 520 #define list_for_each_entry_safe(pos, n, head, member) \ 521 for (pos = list_first_entry(head, typeof(*pos), member), \ 523 &pos->member != (head); \ 530 * @head: the head for your list. 536 #define list_for_each_entry_safe_continue(pos, n, head, member) \ 539 &pos->member != (head); \ 546 * @head: the head for your list. 552 #define list_for_each_entry_safe_from(pos, n, head, member) \ 554 &pos->member != (head); \ 561 * @head: the head for your list. 567 #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 568 for (pos = list_last_entry(head, typeof(*pos), member), \ 570 &pos->member != (head); \ 589 * Double linked lists with a single pointer list head. 590 * Mostly useful for hash tables where the two pointer list head is 682 * Move a list from one list head to another. Fixup the pprev 696 #define hlist_for_each(pos, head) \ 697 for (pos = (head)->first; pos ; pos = pos->next) 699 #define hlist_for_each_safe(pos, n, head) \ 700 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 711 * @head: the head for your list. 714 #define hlist_for_each_entry(pos, head, member) \ 715 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ 742 * @head: the head for your list. 745 #define hlist_for_each_entry_safe(pos, n, head, member) \ 746 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ 165 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument 177 list_is_last(const struct list_head *list, const struct list_head *head) list_is_last() argument 234 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument 260 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument 293 list_splice(const struct list_head *list, struct list_head *head) list_splice() argument 305 list_splice_tail(struct list_head *list, struct list_head *head) list_splice_tail() argument 319 list_splice_init(struct list_head *list, struct list_head *head) list_splice_init() argument 336 list_splice_tail_init(struct list_head *list, struct list_head *head) list_splice_tail_init() argument
|
H A D | btree.h | 15 * Each B+Tree consists of a head, that contains bookkeeping information and 28 * struct btree_head - btree head 60 * @head: the btree head to initialise 66 void btree_init_mempool(struct btree_head *head, mempool_t *mempool); 71 * @head: the btree head to initialise 78 int __must_check btree_init(struct btree_head *head); 83 * @head: the btree head to destroy 88 void btree_destroy(struct btree_head *head); 93 * @head: the btree to look in 99 void *btree_lookup(struct btree_head *head, struct btree_geo *geo, 105 * @head: the btree to add to 114 int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, 119 * @head: the btree to update 127 int btree_update(struct btree_head *head, struct btree_geo *geo, 132 * @head: the btree to update 139 void *btree_remove(struct btree_head *head, struct btree_geo *geo, 163 * @head: btree head 171 void *btree_last(struct btree_head *head, struct btree_geo *geo, 177 * @head: btree head 185 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, 190 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, 198 size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, 215 #define btree_for_each_safel(head, key, val) \ 216 for (val = btree_lastl(head, &key); \ 218 val = btree_get_prevl(head, &key)) 226 #define btree_for_each_safe32(head, key, val) \ 227 for (val = btree_last32(head, &key); \ 229 val = btree_get_prev32(head, &key)) 238 #define btree_for_each_safe64(head, key, val) \ 239 for (val = btree_last64(head, &key); \ 241 val = btree_get_prev64(head, &key))
|
H A D | list_sort.h | 8 void list_sort(void *priv, struct list_head *head,
|
H A D | list_bl.h | 8 * Special version of lists, where head of the list has a lock in the lowest 138 * @head: the head for your list. 142 #define hlist_bl_for_each_entry(tpos, pos, head, member) \ 143 for (pos = hlist_bl_first(head); \ 153 * @head: the head for your list. 156 #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ 157 for (pos = hlist_bl_first(head); \
|
H A D | resource_ext.h | 40 extern void resource_list_free(struct list_head *head); 43 struct list_head *head) resource_list_add() 45 list_add(&entry->node, head); resource_list_add() 49 struct list_head *head) resource_list_add_tail() 51 list_add_tail(&entry->node, head); resource_list_add_tail() 42 resource_list_add(struct resource_entry *entry, struct list_head *head) resource_list_add() argument 48 resource_list_add_tail(struct resource_entry *entry, struct list_head *head) resource_list_add_tail() argument
|
H A D | llist.h | 73 * init_llist_head - initialize lock-less list head 74 * @head: the head for your lock-less list 97 * instead of list head. 115 * instead of list head. 137 * instead of list head. 152 * @head: the list to test 158 static inline bool llist_empty(const struct llist_head *head) llist_empty() argument 160 return ACCESS_ONCE(head->first) == NULL; llist_empty() 170 struct llist_head *head); 174 * @head: the head for your lock-less list 178 static inline bool llist_add(struct llist_node *new, struct llist_head *head) llist_add() argument 180 return llist_add_batch(new, new, head); llist_add() 185 * @head: the head of lock-less list to delete all entries 191 static inline struct llist_node *llist_del_all(struct llist_head *head) llist_del_all() argument 193 return xchg(&head->first, NULL); llist_del_all() 196 extern struct llist_node *llist_del_first(struct llist_head *head); 198 struct llist_node *llist_reverse_order(struct llist_node *head);
|
H A D | rculist.h | 15 * and compares it to the address of the list head, but neither dereferences 65 * @head: list head to add it after 67 * Insert a new entry after the specified head. 78 static inline void list_add_rcu(struct list_head *new, struct list_head *head) list_add_rcu() argument 80 __list_add_rcu(new, head, head->next); list_add_rcu() 86 * @head: list head to add it before 88 * Insert a new entry before the specified head. 100 struct list_head *head) list_add_tail_rcu() 102 __list_add_rcu(new, head->prev, head); list_add_tail_rcu() 184 * @head: the place in the list to splice the first list into 187 * @head can be RCU-read traversed concurrently with this function. 192 * prevent any other updates to @head. In principle, it is possible 199 struct list_head *head, list_splice_init_rcu() 204 struct list_head *at = head->next; list_splice_init_rcu() 235 rcu_assign_pointer(list_next_rcu(head), first); list_splice_init_rcu() 236 first->prev = head; list_splice_init_rcu() 276 * @ptr: the list head to take the element from. 295 * @head: the head for your list. 302 #define list_for_each_entry_rcu(pos, head, member) \ 303 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 304 &pos->member != (head); \ 310 * @head: the head for your list. 316 #define list_for_each_entry_continue_rcu(pos, head, member) \ 318 &pos->member != (head); \ 369 #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) 459 #define __hlist_for_each_rcu(pos, head) \ 460 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 467 * @head: the head for your list. 474 #define hlist_for_each_entry_rcu(pos, head, member) \ 475 for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ 484 * @head: the head for your list. 494 #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ 495 for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ 504 * @head: the head for your list. 511 #define hlist_for_each_entry_rcu_bh(pos, head, member) \ 512 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ 99 list_add_tail_rcu(struct list_head *new, struct list_head *head) list_add_tail_rcu() argument 198 list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) list_splice_init_rcu() argument
|
H A D | fd.h | 12 compat_uint_t head; member in struct:compat_floppy_struct
|
/linux-4.4.14/security/tomoyo/ |
H A D | common.c | 207 * @head: Pointer to "struct tomoyo_io_buffer". 211 static bool tomoyo_flush(struct tomoyo_io_buffer *head) tomoyo_flush() argument 213 while (head->r.w_pos) { tomoyo_flush() 214 const char *w = head->r.w[0]; tomoyo_flush() 217 if (len > head->read_user_buf_avail) tomoyo_flush() 218 len = head->read_user_buf_avail; tomoyo_flush() 221 if (copy_to_user(head->read_user_buf, w, len)) tomoyo_flush() 223 head->read_user_buf_avail -= len; tomoyo_flush() 224 head->read_user_buf += len; tomoyo_flush() 227 head->r.w[0] = w; tomoyo_flush() 231 if (head->poll) { tomoyo_flush() 232 if (!head->read_user_buf_avail || tomoyo_flush() 233 copy_to_user(head->read_user_buf, "", 1)) tomoyo_flush() 235 head->read_user_buf_avail--; tomoyo_flush() 236 head->read_user_buf++; tomoyo_flush() 238 head->r.w_pos--; tomoyo_flush() 239 for (len = 0; len < head->r.w_pos; len++) tomoyo_flush() 240 head->r.w[len] = head->r.w[len + 1]; tomoyo_flush() 242 head->r.avail = 0; tomoyo_flush() 249 * @head: Pointer to "struct tomoyo_io_buffer". 252 * Note that @string has to be kept valid until @head is kfree()d. 256 static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string) tomoyo_set_string() argument 258 if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) { tomoyo_set_string() 259 head->r.w[head->r.w_pos++] = string; tomoyo_set_string() 260 tomoyo_flush(head); tomoyo_set_string() 265 static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, 271 * @head: Pointer to "struct tomoyo_io_buffer". 274 static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, tomoyo_io_printf() argument 279 size_t pos = head->r.avail; tomoyo_io_printf() 280 int size = head->readbuf_size - pos; tomoyo_io_printf() 284 len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1; tomoyo_io_printf() 286 if (pos + len >= head->readbuf_size) { tomoyo_io_printf() 290 head->r.avail += len; tomoyo_io_printf() 291 tomoyo_set_string(head, head->read_buf + pos); tomoyo_io_printf() 297 * @head: Pointer to "struct tomoyo_io_buffer". 301 static void tomoyo_set_space(struct tomoyo_io_buffer *head) tomoyo_set_space() argument 303 tomoyo_set_string(head, " "); tomoyo_set_space() 309 * @head: Pointer to "struct tomoyo_io_buffer". 313 static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) tomoyo_set_lf() argument 315 tomoyo_set_string(head, "\n"); tomoyo_set_lf() 316 return !head->r.w_pos; tomoyo_set_lf() 322 * @head: Pointer to "struct tomoyo_io_buffer". 326 static void tomoyo_set_slash(struct tomoyo_io_buffer *head) tomoyo_set_slash() argument 328 tomoyo_set_string(head, "/"); tomoyo_set_slash() 360 * @head: Pointer to "struct tomoyo_io_buffer". 364 static void tomoyo_print_namespace(struct tomoyo_io_buffer *head) tomoyo_print_namespace() argument 368 tomoyo_set_string(head, tomoyo_print_namespace() 369 container_of(head->r.ns, tomoyo_print_namespace() 372 tomoyo_set_space(head); tomoyo_print_namespace() 378 * @head: Pointer to "struct tomoyo_io_buffer". 381 static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, tomoyo_print_name_union() argument 384 tomoyo_set_space(head); tomoyo_print_name_union() 386 tomoyo_set_string(head, "@"); tomoyo_print_name_union() 387 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_name_union() 389 tomoyo_set_string(head, ptr->filename->name); tomoyo_print_name_union() 396 * @head: Pointer to "struct tomoyo_io_buffer". 401 static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head, tomoyo_print_name_union_quoted() argument 405 tomoyo_set_string(head, "@"); tomoyo_print_name_union_quoted() 406 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_name_union_quoted() 408 tomoyo_set_string(head, "\""); tomoyo_print_name_union_quoted() 409 tomoyo_set_string(head, ptr->filename->name); tomoyo_print_name_union_quoted() 410 tomoyo_set_string(head, "\""); tomoyo_print_name_union_quoted() 417 * @head: Pointer to "struct tomoyo_io_buffer". 423 (struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) tomoyo_print_number_union_nospace() 426 tomoyo_set_string(head, "@"); tomoyo_print_number_union_nospace() 427 tomoyo_set_string(head, ptr->group->group_name->name); tomoyo_print_number_union_nospace() 457 tomoyo_io_printf(head, "%s", buffer); tomoyo_print_number_union_nospace() 464 * @head: Pointer to "struct tomoyo_io_buffer". 469 static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, tomoyo_print_number_union() argument 472 tomoyo_set_space(head); tomoyo_print_number_union() 473 tomoyo_print_number_union_nospace(head, ptr); tomoyo_print_number_union() 656 * @head: Pointer to "struct tomoyo_io_buffer". 660 static int tomoyo_write_profile(struct tomoyo_io_buffer *head) tomoyo_write_profile() argument 662 char *data = head->write_buf; tomoyo_write_profile() 666 if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version) tomoyo_write_profile() 673 profile = tomoyo_assign_profile(head->w.ns, i); tomoyo_write_profile() 706 * @head: Pointer to "struct tomoyo_io_buffer". 713 static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) tomoyo_print_config() argument 715 tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n", tomoyo_print_config() 724 * @head: Pointer to "struct tomoyo_io_buffer". 728 static void tomoyo_read_profile(struct tomoyo_io_buffer *head) tomoyo_read_profile() argument 732 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_profile() 734 if (head->r.eof) tomoyo_read_profile() 737 index = head->r.index; tomoyo_read_profile() 739 switch (head->r.step) { tomoyo_read_profile() 741 tomoyo_print_namespace(head); tomoyo_read_profile() 742 tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", tomoyo_read_profile() 744 head->r.step++; tomoyo_read_profile() 747 for ( ; head->r.index < TOMOYO_MAX_PROFILES; tomoyo_read_profile() 748 head->r.index++) tomoyo_read_profile() 749 if (ns->profile_ptr[head->r.index]) tomoyo_read_profile() 751 if (head->r.index == TOMOYO_MAX_PROFILES) { tomoyo_read_profile() 752 head->r.eof = true; tomoyo_read_profile() 755 head->r.step++; tomoyo_read_profile() 762 tomoyo_print_namespace(head); tomoyo_read_profile() 763 tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_read_profile() 764 tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_read_profile() 765 tomoyo_set_lf(head); tomoyo_read_profile() 766 tomoyo_print_namespace(head); tomoyo_read_profile() 767 tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); tomoyo_read_profile() 769 tomoyo_io_printf(head, "%s=%u ", tomoyo_read_profile() 772 tomoyo_set_string(head, "}\n"); tomoyo_read_profile() 773 head->r.step++; tomoyo_read_profile() 778 tomoyo_print_namespace(head); tomoyo_read_profile() 779 tomoyo_io_printf(head, "%u-%s", index, "CONFIG"); tomoyo_read_profile() 780 tomoyo_print_config(head, profile->default_config); tomoyo_read_profile() 781 head->r.bit = 0; tomoyo_read_profile() 782 head->r.step++; tomoyo_read_profile() 786 for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX tomoyo_read_profile() 787 + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) { tomoyo_read_profile() 788 const u8 i = head->r.bit; tomoyo_read_profile() 792 tomoyo_print_namespace(head); tomoyo_read_profile() 794 tomoyo_io_printf(head, "%u-CONFIG::%s::%s", tomoyo_read_profile() 800 tomoyo_io_printf(head, "%u-CONFIG::%s", index, tomoyo_read_profile() 802 tomoyo_print_config(head, config); tomoyo_read_profile() 803 head->r.bit++; tomoyo_read_profile() 806 if (head->r.bit == TOMOYO_MAX_MAC_INDEX tomoyo_read_profile() 808 head->r.index++; tomoyo_read_profile() 809 head->r.step = 1; tomoyo_read_profile() 813 if (tomoyo_flush(head)) tomoyo_read_profile() 828 return container_of(a, struct tomoyo_manager, head)->manager == tomoyo_same_manager() 829 container_of(b, struct tomoyo_manager, head)->manager; tomoyo_same_manager() 858 error = tomoyo_update_policy(&e.head, sizeof(e), ¶m, tomoyo_update_manager_entry() 868 * @head: Pointer to "struct tomoyo_io_buffer". 874 static int tomoyo_write_manager(struct tomoyo_io_buffer *head) tomoyo_write_manager() argument 876 char *data = head->write_buf; tomoyo_write_manager() 879 tomoyo_manage_by_non_root = !head->w.is_delete; tomoyo_write_manager() 882 return tomoyo_update_manager_entry(data, head->w.is_delete); tomoyo_write_manager() 888 * @head: Pointer to "struct tomoyo_io_buffer". 892 static void tomoyo_read_manager(struct tomoyo_io_buffer *head) tomoyo_read_manager() argument 894 if (head->r.eof) tomoyo_read_manager() 896 list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace. tomoyo_read_manager() 899 list_entry(head->r.acl, typeof(*ptr), head.list); tomoyo_read_manager() 900 if (ptr->head.is_deleted) tomoyo_read_manager() 902 if (!tomoyo_flush(head)) tomoyo_read_manager() 904 tomoyo_set_string(head, ptr->manager->name); tomoyo_read_manager() 905 tomoyo_set_lf(head); tomoyo_read_manager() 907 head->r.eof = true; tomoyo_read_manager() 936 policy_list[TOMOYO_ID_MANAGER], head.list) { tomoyo_manager() 937 if (!ptr->head.is_deleted && tomoyo_manager() 963 * @head: Pointer to "struct tomoyo_io_buffer". 970 static bool tomoyo_select_domain(struct tomoyo_io_buffer *head, tomoyo_select_domain() argument 997 head->w.domain = domain; tomoyo_select_domain() 998 /* Accessing read_buf is safe because head->io_sem is held. */ tomoyo_select_domain() 999 if (!head->read_buf) tomoyo_select_domain() 1001 memset(&head->r, 0, sizeof(head->r)); tomoyo_select_domain() 1002 head->r.print_this_domain_only = true; tomoyo_select_domain() 1004 head->r.domain = &domain->list; tomoyo_select_domain() 1006 head->r.eof = 1; tomoyo_select_domain() 1007 tomoyo_io_printf(head, "# select %s\n", data); tomoyo_select_domain() 1009 tomoyo_io_printf(head, "# This is a deleted domain.\n"); tomoyo_select_domain() 1024 const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_task_acl() 1025 const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_task_acl() 1043 .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL, tomoyo_write_task() 1047 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_write_task() 1140 * @head: Pointer to "struct tomoyo_io_buffer". 1146 static int tomoyo_write_domain(struct tomoyo_io_buffer *head) tomoyo_write_domain() argument 1148 char *data = head->write_buf; tomoyo_write_domain() 1150 struct tomoyo_domain_info *domain = head->w.domain; tomoyo_write_domain() 1151 const bool is_delete = head->w.is_delete; tomoyo_write_domain() 1163 head->w.domain = domain; tomoyo_write_domain() 1195 * @head: Pointer to "struct tomoyo_io_buffer". 1200 static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, tomoyo_print_condition() argument 1203 switch (head->r.cond_step) { tomoyo_print_condition() 1205 head->r.cond_index = 0; tomoyo_print_condition() 1206 head->r.cond_step++; tomoyo_print_condition() 1208 tomoyo_set_space(head); tomoyo_print_condition() 1209 tomoyo_set_string(head, cond->transit->name); tomoyo_print_condition() 1227 for (skip = 0; skip < head->r.cond_index; skip++) { tomoyo_print_condition() 1251 while (head->r.cond_index < condc) { tomoyo_print_condition() 1255 if (!tomoyo_flush(head)) tomoyo_print_condition() 1258 head->r.cond_index++; tomoyo_print_condition() 1259 tomoyo_set_space(head); tomoyo_print_condition() 1262 tomoyo_io_printf(head, tomoyo_print_condition() 1266 tomoyo_set_string(head, tomoyo_print_condition() 1268 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1272 tomoyo_set_string(head, tomoyo_print_condition() 1274 tomoyo_set_string(head, tomoyo_print_condition() 1276 tomoyo_io_printf(head, "\"]%s=", envp-> tomoyo_print_condition() 1279 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1280 tomoyo_set_string(head, envp-> tomoyo_print_condition() 1282 tomoyo_set_string(head, "\""); tomoyo_print_condition() 1284 tomoyo_set_string(head, tomoyo_print_condition() 1291 (head, numbers_p++); tomoyo_print_condition() 1294 tomoyo_set_string(head, tomoyo_print_condition() 1298 tomoyo_set_string(head, match ? "=" : "!="); tomoyo_print_condition() 1302 (head, names_p++); tomoyo_print_condition() 1306 (head, numbers_p++); tomoyo_print_condition() 1309 tomoyo_set_string(head, tomoyo_print_condition() 1315 head->r.cond_step++; tomoyo_print_condition() 1318 if (!tomoyo_flush(head)) tomoyo_print_condition() 1320 head->r.cond_step++; tomoyo_print_condition() 1324 tomoyo_io_printf(head, " grant_log=%s", tomoyo_print_condition() 1327 tomoyo_set_lf(head); tomoyo_print_condition() 1336 * @head: Pointer to "struct tomoyo_io_buffer". 1341 static void tomoyo_set_group(struct tomoyo_io_buffer *head, tomoyo_set_group() argument 1344 if (head->type == TOMOYO_EXCEPTIONPOLICY) { tomoyo_set_group() 1345 tomoyo_print_namespace(head); tomoyo_set_group() 1346 tomoyo_io_printf(head, "acl_group %u ", tomoyo_set_group() 1347 head->r.acl_group_index); tomoyo_set_group() 1349 tomoyo_set_string(head, category); tomoyo_set_group() 1355 * @head: Pointer to "struct tomoyo_io_buffer". 1360 static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, tomoyo_print_entry() argument 1367 if (head->r.print_cond_part) tomoyo_print_entry() 1371 if (!tomoyo_flush(head)) tomoyo_print_entry() 1375 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1380 if (head->r.print_transition_related_only && tomoyo_print_entry() 1384 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1387 tomoyo_set_slash(head); tomoyo_print_entry() 1389 tomoyo_set_string(head, tomoyo_path_keyword[bit]); tomoyo_print_entry() 1393 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1396 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1397 tomoyo_set_group(head, "task "); tomoyo_print_entry() 1398 tomoyo_set_string(head, "manual_domain_transition "); tomoyo_print_entry() 1399 tomoyo_set_string(head, ptr->domainname->name); tomoyo_print_entry() 1400 } else if (head->r.print_transition_related_only) { tomoyo_print_entry() 1404 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1410 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1413 tomoyo_set_slash(head); tomoyo_print_entry() 1415 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1420 tomoyo_print_name_union(head, &ptr->name1); tomoyo_print_entry() 1421 tomoyo_print_name_union(head, &ptr->name2); tomoyo_print_entry() 1424 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1430 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1433 tomoyo_set_slash(head); tomoyo_print_entry() 1435 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1440 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1441 tomoyo_print_number_union(head, &ptr->number); tomoyo_print_entry() 1444 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1450 tomoyo_set_group(head, "file "); tomoyo_print_entry() 1453 tomoyo_set_slash(head); tomoyo_print_entry() 1455 tomoyo_set_string(head, tomoyo_mac_keywords tomoyo_print_entry() 1460 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1461 tomoyo_print_number_union(head, &ptr->mode); tomoyo_print_entry() 1462 tomoyo_print_number_union(head, &ptr->major); tomoyo_print_entry() 1463 tomoyo_print_number_union(head, &ptr->minor); tomoyo_print_entry() 1466 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1473 tomoyo_set_group(head, "network inet "); tomoyo_print_entry() 1474 tomoyo_set_string(head, tomoyo_proto_keyword tomoyo_print_entry() 1476 tomoyo_set_space(head); tomoyo_print_entry() 1479 tomoyo_set_slash(head); tomoyo_print_entry() 1481 tomoyo_set_string(head, tomoyo_socket_keyword[bit]); tomoyo_print_entry() 1485 tomoyo_set_space(head); tomoyo_print_entry() 1487 tomoyo_set_string(head, "@"); tomoyo_print_entry() 1488 tomoyo_set_string(head, ptr->address.group->group_name tomoyo_print_entry() 1493 tomoyo_io_printf(head, "%s", buf); tomoyo_print_entry() 1495 tomoyo_print_number_union(head, &ptr->port); tomoyo_print_entry() 1498 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1505 tomoyo_set_group(head, "network unix "); tomoyo_print_entry() 1506 tomoyo_set_string(head, tomoyo_proto_keyword tomoyo_print_entry() 1508 tomoyo_set_space(head); tomoyo_print_entry() 1511 tomoyo_set_slash(head); tomoyo_print_entry() 1513 tomoyo_set_string(head, tomoyo_socket_keyword[bit]); tomoyo_print_entry() 1517 tomoyo_print_name_union(head, &ptr->name); tomoyo_print_entry() 1520 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1521 tomoyo_set_group(head, "file mount"); tomoyo_print_entry() 1522 tomoyo_print_name_union(head, &ptr->dev_name); tomoyo_print_entry() 1523 tomoyo_print_name_union(head, &ptr->dir_name); tomoyo_print_entry() 1524 tomoyo_print_name_union(head, &ptr->fs_type); tomoyo_print_entry() 1525 tomoyo_print_number_union(head, &ptr->flags); tomoyo_print_entry() 1528 container_of(acl, typeof(*ptr), head); tomoyo_print_entry() 1530 tomoyo_set_group(head, "misc env "); tomoyo_print_entry() 1531 tomoyo_set_string(head, ptr->env->name); tomoyo_print_entry() 1534 head->r.print_cond_part = true; tomoyo_print_entry() 1535 head->r.cond_step = 0; tomoyo_print_entry() 1536 if (!tomoyo_flush(head)) tomoyo_print_entry() 1539 if (!tomoyo_print_condition(head, acl->cond)) tomoyo_print_entry() 1541 head->r.print_cond_part = false; tomoyo_print_entry() 1543 tomoyo_set_lf(head); tomoyo_print_entry() 1551 * @head: Pointer to "struct tomoyo_io_buffer". 1558 static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head, tomoyo_read_domain2() argument 1561 list_for_each_cookie(head->r.acl, list) { tomoyo_read_domain2() 1563 list_entry(head->r.acl, typeof(*ptr), list); tomoyo_read_domain2() 1564 if (!tomoyo_print_entry(head, ptr)) tomoyo_read_domain2() 1567 head->r.acl = NULL; tomoyo_read_domain2() 1574 * @head: Pointer to "struct tomoyo_io_buffer". 1578 static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_read_domain() argument 1580 if (head->r.eof) tomoyo_read_domain() 1582 list_for_each_cookie(head->r.domain, &tomoyo_domain_list) { tomoyo_read_domain() 1584 list_entry(head->r.domain, typeof(*domain), list); tomoyo_read_domain() 1585 switch (head->r.step) { tomoyo_read_domain() 1589 !head->r.print_this_domain_only) tomoyo_read_domain() 1592 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_domain() 1593 tomoyo_set_lf(head); tomoyo_read_domain() 1594 tomoyo_io_printf(head, "use_profile %u\n", tomoyo_read_domain() 1596 tomoyo_io_printf(head, "use_group %u\n", tomoyo_read_domain() 1600 tomoyo_set_string(head, tomoyo_dif[i]); tomoyo_read_domain() 1601 head->r.step++; tomoyo_read_domain() 1602 tomoyo_set_lf(head); tomoyo_read_domain() 1605 if (!tomoyo_read_domain2(head, &domain->acl_info_list)) tomoyo_read_domain() 1607 head->r.step++; tomoyo_read_domain() 1608 if (!tomoyo_set_lf(head)) tomoyo_read_domain() 1612 head->r.step = 0; tomoyo_read_domain() 1613 if (head->r.print_this_domain_only) tomoyo_read_domain() 1618 head->r.eof = true; tomoyo_read_domain() 1624 * @head: Pointer to "struct tomoyo_io_buffer". 1628 static int tomoyo_write_pid(struct tomoyo_io_buffer *head) tomoyo_write_pid() argument 1630 head->r.eof = false; tomoyo_write_pid() 1637 * @head: Pointer to "struct tomoyo_io_buffer". 1644 static void tomoyo_read_pid(struct tomoyo_io_buffer *head) tomoyo_read_pid() argument 1646 char *buf = head->write_buf; tomoyo_read_pid() 1652 /* Accessing write_buf is safe because head->io_sem is held. */ tomoyo_read_pid() 1654 head->r.eof = true; tomoyo_read_pid() 1657 if (head->r.w_pos || head->r.eof) tomoyo_read_pid() 1659 head->r.eof = true; tomoyo_read_pid() 1673 tomoyo_io_printf(head, "%u %u ", pid, domain->profile); tomoyo_read_pid() 1674 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_pid() 1697 * @head: Pointer to "struct tomoyo_io_buffer". 1703 static int tomoyo_write_exception(struct tomoyo_io_buffer *head) tomoyo_write_exception() argument 1705 const bool is_delete = head->w.is_delete; tomoyo_write_exception() 1707 .ns = head->w.ns, tomoyo_write_exception() 1709 .data = head->write_buf, tomoyo_write_exception() 1726 (head->w.ns, &head->w.ns->acl_group[group], tomoyo_write_exception() 1735 * @head: Pointer to "struct tomoyo_io_buffer". 1742 static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) tomoyo_read_group() argument 1745 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_group() 1747 list_for_each_cookie(head->r.group, list) { tomoyo_read_group() 1749 list_entry(head->r.group, typeof(*group), head.list); tomoyo_read_group() 1750 list_for_each_cookie(head->r.acl, &group->member_list) { tomoyo_read_group() 1752 list_entry(head->r.acl, typeof(*ptr), list); tomoyo_read_group() 1755 if (!tomoyo_flush(head)) tomoyo_read_group() 1757 tomoyo_print_namespace(head); tomoyo_read_group() 1758 tomoyo_set_string(head, tomoyo_group_name[idx]); tomoyo_read_group() 1759 tomoyo_set_string(head, group->group_name->name); tomoyo_read_group() 1761 tomoyo_set_space(head); tomoyo_read_group() 1762 tomoyo_set_string(head, container_of tomoyo_read_group() 1764 head)->member_name->name); tomoyo_read_group() 1766 tomoyo_print_number_union(head, &container_of tomoyo_read_group() 1769 head)->number); tomoyo_read_group() 1775 head); tomoyo_read_group() 1778 tomoyo_io_printf(head, " %s", buffer); tomoyo_read_group() 1780 tomoyo_set_lf(head); tomoyo_read_group() 1782 head->r.acl = NULL; tomoyo_read_group() 1784 head->r.group = NULL; tomoyo_read_group() 1791 * @head: Pointer to "struct tomoyo_io_buffer". 1798 static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) tomoyo_read_policy() argument 1801 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_policy() 1803 list_for_each_cookie(head->r.acl, list) { tomoyo_read_policy() 1805 container_of(head->r.acl, typeof(*acl), list); tomoyo_read_policy() 1808 if (!tomoyo_flush(head)) tomoyo_read_policy() 1814 container_of(acl, typeof(*ptr), head); tomoyo_read_policy() 1815 tomoyo_print_namespace(head); tomoyo_read_policy() 1816 tomoyo_set_string(head, tomoyo_transition_type tomoyo_read_policy() 1818 tomoyo_set_string(head, ptr->program ? tomoyo_read_policy() 1820 tomoyo_set_string(head, " from "); tomoyo_read_policy() 1821 tomoyo_set_string(head, ptr->domainname ? tomoyo_read_policy() 1829 container_of(acl, typeof(*ptr), head); tomoyo_read_policy() 1830 tomoyo_print_namespace(head); tomoyo_read_policy() 1831 tomoyo_set_string(head, "aggregator "); tomoyo_read_policy() 1832 tomoyo_set_string(head, tomoyo_read_policy() 1834 tomoyo_set_space(head); tomoyo_read_policy() 1835 tomoyo_set_string(head, tomoyo_read_policy() 1842 tomoyo_set_lf(head); tomoyo_read_policy() 1844 head->r.acl = NULL; tomoyo_read_policy() 1851 * @head: Pointer to "struct tomoyo_io_buffer". 1855 static void tomoyo_read_exception(struct tomoyo_io_buffer *head) tomoyo_read_exception() argument 1858 container_of(head->r.ns, typeof(*ns), namespace_list); tomoyo_read_exception() 1859 if (head->r.eof) tomoyo_read_exception() 1861 while (head->r.step < TOMOYO_MAX_POLICY && tomoyo_read_exception() 1862 tomoyo_read_policy(head, head->r.step)) tomoyo_read_exception() 1863 head->r.step++; tomoyo_read_exception() 1864 if (head->r.step < TOMOYO_MAX_POLICY) tomoyo_read_exception() 1866 while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP && tomoyo_read_exception() 1867 tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY)) tomoyo_read_exception() 1868 head->r.step++; tomoyo_read_exception() 1869 if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP) tomoyo_read_exception() 1871 while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP tomoyo_read_exception() 1873 head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY tomoyo_read_exception() 1875 if (!tomoyo_read_domain2(head, &ns->acl_group tomoyo_read_exception() 1876 [head->r.acl_group_index])) tomoyo_read_exception() 1878 head->r.step++; tomoyo_read_exception() 1880 head->r.eof = true; tomoyo_read_exception() 2135 * @head: Pointer to "struct tomoyo_io_buffer". 2137 static void tomoyo_read_query(struct tomoyo_io_buffer *head) tomoyo_read_query() argument 2143 if (head->r.w_pos) tomoyo_read_query() 2145 if (head->read_buf) { tomoyo_read_query() 2146 kfree(head->read_buf); tomoyo_read_query() 2147 head->read_buf = NULL; tomoyo_read_query() 2152 if (pos++ != head->r.query_index) tomoyo_read_query() 2159 head->r.query_index = 0; tomoyo_read_query() 2169 if (pos++ != head->r.query_index) tomoyo_read_query() 2182 head->read_buf = buf; tomoyo_read_query() 2183 head->r.w[head->r.w_pos++] = buf; tomoyo_read_query() 2184 head->r.query_index++; tomoyo_read_query() 2193 * @head: Pointer to "struct tomoyo_io_buffer". 2197 static int tomoyo_write_answer(struct tomoyo_io_buffer *head) tomoyo_write_answer() argument 2199 char *data = head->write_buf; tomoyo_write_answer() 2229 * @head: Pointer to "struct tomoyo_io_buffer". 2233 static void tomoyo_read_version(struct tomoyo_io_buffer *head) tomoyo_read_version() argument 2235 if (!head->r.eof) { tomoyo_read_version() 2236 tomoyo_io_printf(head, "2.5.0"); tomoyo_read_version() 2237 head->r.eof = true; tomoyo_read_version() 2280 * @head: Pointer to "struct tomoyo_io_buffer". 2284 static void tomoyo_read_stat(struct tomoyo_io_buffer *head) tomoyo_read_stat() argument 2288 if (head->r.eof) tomoyo_read_stat() 2291 tomoyo_io_printf(head, "Policy %-30s %10u", tomoyo_read_stat() 2297 tomoyo_io_printf(head, " (Last: %04u/%02u/%02u " tomoyo_read_stat() 2302 tomoyo_set_lf(head); tomoyo_read_stat() 2307 tomoyo_io_printf(head, "Memory used by %-22s %10u", tomoyo_read_stat() 2311 tomoyo_io_printf(head, " (Quota: %10u)", used); tomoyo_read_stat() 2312 tomoyo_set_lf(head); tomoyo_read_stat() 2314 tomoyo_io_printf(head, "Total memory used: %10u\n", tomoyo_read_stat() 2316 head->r.eof = true; tomoyo_read_stat() 2322 * @head: Pointer to "struct tomoyo_io_buffer". 2326 static int tomoyo_write_stat(struct tomoyo_io_buffer *head) tomoyo_write_stat() argument 2328 char *data = head->write_buf; tomoyo_write_stat() 2347 struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS); tomoyo_open_control() local 2349 if (!head) tomoyo_open_control() 2351 mutex_init(&head->io_sem); tomoyo_open_control() 2352 head->type = type; tomoyo_open_control() 2356 head->write = tomoyo_write_domain; tomoyo_open_control() 2357 head->read = tomoyo_read_domain; tomoyo_open_control() 2361 head->write = tomoyo_write_exception; tomoyo_open_control() 2362 head->read = tomoyo_read_exception; tomoyo_open_control() 2366 head->poll = tomoyo_poll_log; tomoyo_open_control() 2367 head->read = tomoyo_read_log; tomoyo_open_control() 2371 head->write = tomoyo_write_pid; tomoyo_open_control() 2372 head->read = tomoyo_read_pid; tomoyo_open_control() 2376 head->read = tomoyo_read_version; tomoyo_open_control() 2377 head->readbuf_size = 128; tomoyo_open_control() 2381 head->write = tomoyo_write_stat; tomoyo_open_control() 2382 head->read = tomoyo_read_stat; tomoyo_open_control() 2383 head->readbuf_size = 1024; tomoyo_open_control() 2387 head->write = tomoyo_write_profile; tomoyo_open_control() 2388 head->read = tomoyo_read_profile; tomoyo_open_control() 2391 head->poll = tomoyo_poll_query; tomoyo_open_control() 2392 head->write = tomoyo_write_answer; tomoyo_open_control() 2393 head->read = tomoyo_read_query; tomoyo_open_control() 2397 head->write = tomoyo_write_manager; tomoyo_open_control() 2398 head->read = tomoyo_read_manager; tomoyo_open_control() 2406 head->read = NULL; tomoyo_open_control() 2407 head->poll = NULL; tomoyo_open_control() 2408 } else if (!head->poll) { tomoyo_open_control() 2410 if (!head->readbuf_size) tomoyo_open_control() 2411 head->readbuf_size = 4096 * 2; tomoyo_open_control() 2412 head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS); tomoyo_open_control() 2413 if (!head->read_buf) { tomoyo_open_control() 2414 kfree(head); tomoyo_open_control() 2423 head->write = NULL; tomoyo_open_control() 2424 } else if (head->write) { tomoyo_open_control() 2425 head->writebuf_size = 4096 * 2; tomoyo_open_control() 2426 head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS); tomoyo_open_control() 2427 if (!head->write_buf) { tomoyo_open_control() 2428 kfree(head->read_buf); tomoyo_open_control() 2429 kfree(head); tomoyo_open_control() 2441 file->private_data = head; tomoyo_open_control() 2442 tomoyo_notify_gc(head, true); tomoyo_open_control() 2457 struct tomoyo_io_buffer *head = file->private_data; tomoyo_poll_control() local 2458 if (head->poll) tomoyo_poll_control() 2459 return head->poll(file, wait) | POLLOUT | POLLWRNORM; tomoyo_poll_control() 2466 * @head: Pointer to "struct tomoyo_io_buffer". 2470 static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head) tomoyo_set_namespace_cursor() argument 2473 if (head->type != TOMOYO_EXCEPTIONPOLICY && tomoyo_set_namespace_cursor() 2474 head->type != TOMOYO_PROFILE) tomoyo_set_namespace_cursor() 2480 ns = head->r.ns; tomoyo_set_namespace_cursor() 2481 if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) { tomoyo_set_namespace_cursor() 2483 memset(&head->r, 0, sizeof(head->r)); tomoyo_set_namespace_cursor() 2484 head->r.ns = ns ? ns->next : tomoyo_namespace_list.next; tomoyo_set_namespace_cursor() 2491 * @head: Pointer to "struct tomoyo_io_buffer". 2495 static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) tomoyo_has_more_namespace() argument 2497 return (head->type == TOMOYO_EXCEPTIONPOLICY || tomoyo_has_more_namespace() 2498 head->type == TOMOYO_PROFILE) && head->r.eof && tomoyo_has_more_namespace() 2499 head->r.ns->next != &tomoyo_namespace_list; tomoyo_has_more_namespace() 2505 * @head: Pointer to "struct tomoyo_io_buffer". 2511 ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, tomoyo_read_control() argument 2517 if (!head->read) tomoyo_read_control() 2519 if (mutex_lock_interruptible(&head->io_sem)) tomoyo_read_control() 2521 head->read_user_buf = buffer; tomoyo_read_control() 2522 head->read_user_buf_avail = buffer_len; tomoyo_read_control() 2524 if (tomoyo_flush(head)) tomoyo_read_control() 2527 tomoyo_set_namespace_cursor(head); tomoyo_read_control() 2528 head->read(head); tomoyo_read_control() 2529 } while (tomoyo_flush(head) && tomoyo_read_control() 2530 tomoyo_has_more_namespace(head)); tomoyo_read_control() 2532 len = head->read_user_buf - buffer; tomoyo_read_control() 2533 mutex_unlock(&head->io_sem); tomoyo_read_control() 2540 * @head: Poiter to "struct tomoyo_io_buffer". 2547 static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) tomoyo_parse_policy() argument 2550 head->w.is_delete = !strncmp(line, "delete ", 7); tomoyo_parse_policy() 2551 if (head->w.is_delete) tomoyo_parse_policy() 2554 if (head->type == TOMOYO_EXCEPTIONPOLICY || tomoyo_parse_policy() 2555 head->type == TOMOYO_PROFILE) { tomoyo_parse_policy() 2560 head->w.ns = tomoyo_assign_namespace(line); tomoyo_parse_policy() 2563 head->w.ns = NULL; tomoyo_parse_policy() 2565 head->w.ns = &tomoyo_kernel_namespace; tomoyo_parse_policy() 2567 if (!head->w.ns) tomoyo_parse_policy() 2571 return head->write(head); tomoyo_parse_policy() 2577 * @head: Pointer to "struct tomoyo_io_buffer". 2583 ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, tomoyo_write_control() argument 2588 char *cp0 = head->write_buf; tomoyo_write_control() 2590 if (!head->write) tomoyo_write_control() 2594 if (mutex_lock_interruptible(&head->io_sem)) tomoyo_write_control() 2596 head->read_user_buf_avail = 0; tomoyo_write_control() 2601 if (head->w.avail >= head->writebuf_size - 1) { tomoyo_write_control() 2602 const int len = head->writebuf_size * 2; tomoyo_write_control() 2608 memmove(cp, cp0, head->w.avail); tomoyo_write_control() 2610 head->write_buf = cp; tomoyo_write_control() 2612 head->writebuf_size = len; tomoyo_write_control() 2620 cp0[head->w.avail++] = c; tomoyo_write_control() 2623 cp0[head->w.avail - 1] = '\0'; tomoyo_write_control() 2624 head->w.avail = 0; tomoyo_write_control() 2627 head->w.ns = &tomoyo_kernel_namespace; tomoyo_write_control() 2628 head->w.domain = NULL; tomoyo_write_control() 2629 memset(&head->r, 0, sizeof(head->r)); tomoyo_write_control() 2633 switch (head->type) { tomoyo_write_control() 2638 if (tomoyo_select_domain(head, cp0)) tomoyo_write_control() 2643 head->r.print_transition_related_only = true; tomoyo_write_control() 2653 switch (tomoyo_parse_policy(head, cp0)) { tomoyo_write_control() 2658 switch (head->type) { tomoyo_write_control() 2674 mutex_unlock(&head->io_sem); tomoyo_write_control() 2681 * @head: Pointer to "struct tomoyo_io_buffer". 2683 void tomoyo_close_control(struct tomoyo_io_buffer *head) tomoyo_close_control() argument 2689 if (head->type == TOMOYO_QUERY && tomoyo_close_control() 2692 tomoyo_notify_gc(head, false); tomoyo_close_control() 2745 struct tomoyo_io_buffer head = { }; tomoyo_load_builtin_policy() local 2750 head.type = TOMOYO_PROFILE; tomoyo_load_builtin_policy() 2751 head.write = tomoyo_write_profile; tomoyo_load_builtin_policy() 2755 head.type = TOMOYO_EXCEPTIONPOLICY; tomoyo_load_builtin_policy() 2756 head.write = tomoyo_write_exception; tomoyo_load_builtin_policy() 2760 head.type = TOMOYO_DOMAINPOLICY; tomoyo_load_builtin_policy() 2761 head.write = tomoyo_write_domain; tomoyo_load_builtin_policy() 2765 head.type = TOMOYO_MANAGER; tomoyo_load_builtin_policy() 2766 head.write = tomoyo_write_manager; tomoyo_load_builtin_policy() 2770 head.type = TOMOYO_STAT; tomoyo_load_builtin_policy() 2771 head.write = tomoyo_write_stat; tomoyo_load_builtin_policy() 2780 head.write_buf = start; tomoyo_load_builtin_policy() 2781 tomoyo_parse_policy(&head, start); tomoyo_load_builtin_policy() 422 tomoyo_print_number_union_nospace(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) tomoyo_print_number_union_nospace() argument
|
H A D | gc.c | 41 struct tomoyo_io_buffer *head; tomoyo_struct_used_by_io_buffer() local 45 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { tomoyo_struct_used_by_io_buffer() 46 head->users++; tomoyo_struct_used_by_io_buffer() 48 mutex_lock(&head->io_sem); tomoyo_struct_used_by_io_buffer() 49 if (head->r.domain == element || head->r.group == element || tomoyo_struct_used_by_io_buffer() 50 head->r.acl == element || &head->w.domain->list == element) tomoyo_struct_used_by_io_buffer() 52 mutex_unlock(&head->io_sem); tomoyo_struct_used_by_io_buffer() 54 head->users--; tomoyo_struct_used_by_io_buffer() 72 struct tomoyo_io_buffer *head; tomoyo_name_used_by_io_buffer() local 77 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { tomoyo_name_used_by_io_buffer() 79 head->users++; tomoyo_name_used_by_io_buffer() 81 mutex_lock(&head->io_sem); tomoyo_name_used_by_io_buffer() 83 const char *w = head->r.w[i]; tomoyo_name_used_by_io_buffer() 89 mutex_unlock(&head->io_sem); tomoyo_name_used_by_io_buffer() 91 head->users--; tomoyo_name_used_by_io_buffer() 109 container_of(element, typeof(*ptr), head.list); tomoyo_del_transition_control() 124 container_of(element, typeof(*ptr), head.list); tomoyo_del_aggregator() 139 container_of(element, typeof(*ptr), head.list); tomoyo_del_manager() 159 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 166 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 174 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 182 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 192 = container_of(acl, typeof(*entry), head); tomoyo_del_acl() 202 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 210 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 219 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 227 container_of(acl, typeof(*entry), head); tomoyo_del_acl() 271 head.list); tomoyo_del_condition() 322 container_of(element, typeof(*member), head.list); tomoyo_del_path_group() 336 container_of(element, typeof(*group), head.list); tomoyo_del_group() 427 head.list)->entry.name)) tomoyo_try_to_gc() 560 list_for_each_entry_safe(group, tmp, list, head.list) { list_for_each_entry_safe() 563 atomic_read(&group->head.users) > 0) list_for_each_entry_safe() 565 atomic_set(&group->head.users, list_for_each_entry_safe() 568 &group->head.list); list_for_each_entry_safe() 601 struct tomoyo_io_buffer *head; tomoyo_gc_thread() local 605 list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, tomoyo_gc_thread() 607 if (head->users) tomoyo_gc_thread() 609 list_del(&head->list); tomoyo_gc_thread() 610 kfree(head->read_buf); tomoyo_gc_thread() 611 kfree(head->write_buf); tomoyo_gc_thread() 612 kfree(head); tomoyo_gc_thread() 625 * @head: Pointer to "struct tomoyo_io_buffer". 630 void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) tomoyo_notify_gc() argument 636 head->users = 1; tomoyo_notify_gc() 637 list_add(&head->list, &tomoyo_io_buffer_list); tomoyo_notify_gc() 639 is_write = head->write_buf != NULL; tomoyo_notify_gc() 640 if (!--head->users) { tomoyo_notify_gc() 641 list_del(&head->list); tomoyo_notify_gc() 642 kfree(head->read_buf); tomoyo_notify_gc() 643 kfree(head->write_buf); tomoyo_notify_gc() 644 kfree(head); tomoyo_notify_gc()
|
H A D | group.c | 21 return container_of(a, struct tomoyo_path_group, head)->member_name == tomoyo_same_path_group() 22 container_of(b, struct tomoyo_path_group, head)->member_name; tomoyo_same_path_group() 36 return !memcmp(&container_of(a, struct tomoyo_number_group, head) tomoyo_same_number_group() 38 &container_of(b, struct tomoyo_number_group, head) tomoyo_same_number_group() 40 sizeof(container_of(a, struct tomoyo_number_group, head) tomoyo_same_number_group() 56 head); tomoyo_same_address_group() 58 head); tomoyo_same_address_group() 85 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 93 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 105 error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_write_group() 129 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_path_matches_group() 130 if (member->head.is_deleted) tomoyo_path_matches_group() 156 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_number_matches_group() 157 if (member->head.is_deleted) tomoyo_number_matches_group() 186 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_address_matches_group() 187 if (member->head.is_deleted) tomoyo_address_matches_group()
|
H A D | memory.c | 108 list_for_each_entry(group, list, head.list) { list_for_each_entry() 110 atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) list_for_each_entry() 112 atomic_inc(&group->head.users); list_for_each_entry() 120 atomic_set(&entry->head.users, 1); 121 list_add_tail_rcu(&entry->head.list, list); 152 struct list_head *head; tomoyo_get_name() local 158 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; tomoyo_get_name() 161 list_for_each_entry(ptr, head, head.list) { list_for_each_entry() 163 atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) list_for_each_entry() 165 atomic_inc(&ptr->head.users); list_for_each_entry() 172 atomic_set(&ptr->head.users, 1); 174 list_add_tail(&ptr->head.list, head);
|
H A D | environ.c | 21 container_of(ptr, typeof(*acl), head); tomoyo_check_env_acl() 78 const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_env_acl() 79 const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_env_acl() 95 struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; tomoyo_write_env() 104 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_write_env()
|
H A D | file.c | 254 head); tomoyo_check_path_acl() 276 container_of(ptr, typeof(*acl), head); tomoyo_check_path_number_acl() 296 container_of(ptr, typeof(*acl), head); tomoyo_check_path2_acl() 315 container_of(ptr, typeof(*acl), head); tomoyo_check_mkdev_acl() 338 const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_path_acl() 339 const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_path_acl() 356 u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head) tomoyo_merge_path_acl() 359 const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; tomoyo_merge_path_acl() 382 .head.type = TOMOYO_TYPE_PATH_ACL, tomoyo_update_path_acl() 389 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path_acl() 407 const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_mkdev_acl() 408 const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_mkdev_acl() 429 head)->perm; tomoyo_merge_mkdev_acl() 431 const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head) tomoyo_merge_mkdev_acl() 455 .head.type = TOMOYO_TYPE_MKDEV_ACL, tomoyo_update_mkdev_acl() 465 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_mkdev_acl() 486 const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_path2_acl() 487 const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_path2_acl() 505 u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head) tomoyo_merge_path2_acl() 508 const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm; tomoyo_merge_path2_acl() 531 .head.type = TOMOYO_TYPE_PATH2_ACL, tomoyo_update_path2_acl() 539 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path2_acl() 620 head); tomoyo_same_path_number_acl() 622 head); tomoyo_same_path_number_acl() 641 head)->perm; tomoyo_merge_path_number_acl() 643 const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head) tomoyo_merge_path_number_acl() 665 .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, tomoyo_update_path_number_acl() 673 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_path_number_acl() 947 const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); tomoyo_same_mount_acl() 948 const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); tomoyo_same_mount_acl() 966 struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; tomoyo_update_mount_acl() 974 error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_update_mount_acl()
|
/linux-4.4.14/sound/pci/ctxfi/ |
H A D | ctimap.c | 25 struct list_head *pos, *pre, *head; input_mapper_add() local 28 head = mappers; input_mapper_add() 30 if (list_empty(head)) { input_mapper_add() 33 list_add(&entry->list, head); input_mapper_add() 37 list_for_each(pos, head) { list_for_each() 45 if (pos != head) { 47 if (pre == head) 48 pre = head->prev; 52 pre = head->prev; 53 pos = head->next; 54 list_add_tail(&entry->list, head); 71 struct list_head *next, *pre, *head; input_mapper_delete() local 74 head = mappers; input_mapper_delete() 76 if (list_empty(head)) input_mapper_delete() 79 pre = (entry->list.prev == head) ? head->prev : entry->list.prev; input_mapper_delete() 80 next = (entry->list.next == head) ? head->next : entry->list.next; input_mapper_delete() 100 void free_input_mapper_list(struct list_head *head) free_input_mapper_list() argument 105 while (!list_empty(head)) { free_input_mapper_list() 106 pos = head->next; free_input_mapper_list()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
H A D | vga.h | 6 u8 nvkm_rdport(struct nvkm_device *, int head, u16 port); 7 void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value); 10 u8 nvkm_rdvgas(struct nvkm_device *, int head, u8 index); 11 void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value); 14 u8 nvkm_rdvgag(struct nvkm_device *, int head, u8 index); 15 void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value); 18 u8 nvkm_rdvgac(struct nvkm_device *, int head, u8 index); 19 void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value); 22 u8 nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index); 23 void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
|
/linux-4.4.14/net/netlabel/ |
H A D | netlabel_addrlist.h | 96 #define netlbl_af4list_foreach(iter, head) \ 97 for (iter = __af4list_valid((head)->next, head); \ 98 &iter->list != (head); \ 99 iter = __af4list_valid(iter->list.next, head)) 101 #define netlbl_af4list_foreach_rcu(iter, head) \ 102 for (iter = __af4list_valid_rcu((head)->next, head); \ 103 &iter->list != (head); \ 104 iter = __af4list_valid_rcu(iter->list.next, head)) 106 #define netlbl_af4list_foreach_safe(iter, tmp, head) \ 107 for (iter = __af4list_valid((head)->next, head), \ 108 tmp = __af4list_valid(iter->list.next, head); \ 109 &iter->list != (head); \ 110 iter = tmp, tmp = __af4list_valid(iter->list.next, head)) 113 struct list_head *head); 115 struct list_head *head); 118 struct list_head *head); 121 struct list_head *head); 163 #define netlbl_af6list_foreach(iter, head) \ 164 for (iter = __af6list_valid((head)->next, head); \ 165 &iter->list != (head); \ 166 iter = __af6list_valid(iter->list.next, head)) 168 #define netlbl_af6list_foreach_rcu(iter, head) \ 169 for (iter = __af6list_valid_rcu((head)->next, head); \ 170 &iter->list != (head); \ 171 iter = __af6list_valid_rcu(iter->list.next, head)) 173 #define netlbl_af6list_foreach_safe(iter, tmp, head) \ 174 for (iter = __af6list_valid((head)->next, head), \ 175 tmp = __af6list_valid(iter->list.next, head); \ 176 &iter->list != (head); \ 177 iter = tmp, tmp = __af6list_valid(iter->list.next, head)) 180 struct list_head *head); 183 struct list_head *head); 186 struct list_head *head); 189 struct list_head *head);
|
H A D | netlabel_addrlist.c | 52 * @head: the list head 55 * Searches the IPv4 address list given by @head. If a matching address entry 61 struct list_head *head) netlbl_af4list_search() 65 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search() 76 * @head: the list head 79 * Searches the IPv4 address list given by @head. If an exact match if found 86 struct list_head *head) netlbl_af4list_search_exact() 90 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search_exact() 102 * @head: the list head 105 * Searches the IPv6 address list given by @head. If a matching address entry 111 struct list_head *head) netlbl_af6list_search() 115 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search() 127 * @head: the list head 130 * Searches the IPv6 address list given by @head. If an exact match if found 137 struct list_head *head) netlbl_af6list_search_exact() 141 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search_exact() 154 * @head: the list head 157 * Add a new address entry to the list pointed to by @head. On success zero is 162 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) netlbl_af4list_add() argument 166 iter = netlbl_af4list_search(entry->addr, head); netlbl_af4list_add() 175 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 183 list_add_tail_rcu(&entry->list, head); 191 * @head: the list head 194 * Add a new address entry to the list pointed to by @head. On success zero is 199 int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) netlbl_af6list_add() argument 203 iter = netlbl_af6list_search(&entry->addr, head); netlbl_af6list_add() 213 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 221 list_add_tail_rcu(&entry->list, head); 245 * @head: the list head 248 * Remove an IP address entry from the list pointed to by @head. Returns the 254 struct list_head *head) netlbl_af4list_remove() 258 entry = netlbl_af4list_search_exact(addr, mask, head); netlbl_af4list_remove() 285 * @head: the list head 288 * Remove an IP address entry from the list pointed to by @head. Returns the 295 struct list_head *head) netlbl_af6list_remove() 299 entry = netlbl_af6list_search_exact(addr, mask, head); netlbl_af6list_remove() 60 netlbl_af4list_search(__be32 addr, struct list_head *head) netlbl_af4list_search() argument 84 netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head) netlbl_af4list_search_exact() argument 110 netlbl_af6list_search(const struct in6_addr *addr, struct list_head *head) netlbl_af6list_search() argument 135 netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) netlbl_af6list_search_exact() argument 253 netlbl_af4list_remove(__be32 addr, __be32 mask, struct list_head *head) netlbl_af4list_remove() argument 293 netlbl_af6list_remove(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) netlbl_af6list_remove() argument
|
/linux-4.4.14/drivers/scsi/aic7xxx/ |
H A D | queue.h | 40 * added to the list after an existing element or at the head of the list. 41 * Elements being removed from the head of the list should use the explicit 48 * head of the list and the other to the tail of the list. The elements are 51 * to the list after an existing element, at the head of the list, or at the 52 * end of the list. Elements being removed from the head of the tail queue 62 * or after an existing element or at the head of the list. A list 65 * A tail queue is headed by a pair of pointers, one to the head of the 69 * after an existing element, at the head of the list, or at the end of 72 * A circle queue is headed by a pair of pointers, one to the head of the 76 * an existing element, at the head of the list, or at the end of the list. 112 #define SLIST_HEAD_INITIALIZER(head) \ 123 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 125 #define SLIST_FIRST(head) ((head)->slh_first) 127 #define SLIST_FOREACH(var, head, field) \ 128 for ((var) = SLIST_FIRST((head)); \ 132 #define SLIST_INIT(head) do { \ 133 SLIST_FIRST((head)) = NULL; \ 141 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 142 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ 143 SLIST_FIRST((head)) = (elm); \ 148 #define SLIST_REMOVE(head, elm, type, field) do { \ 149 if (SLIST_FIRST((head)) == (elm)) { \ 150 SLIST_REMOVE_HEAD((head), field); \ 153 struct type *curelm = SLIST_FIRST((head)); \ 161 #define SLIST_REMOVE_HEAD(head, field) do { \ 162 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ 174 #define STAILQ_HEAD_INITIALIZER(head) \ 175 { NULL, &(head).stqh_first } 185 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 187 #define STAILQ_FIRST(head) ((head)->stqh_first) 189 #define STAILQ_FOREACH(var, head, field) \ 190 for((var) = STAILQ_FIRST((head)); \ 194 #define STAILQ_INIT(head) do { \ 195 STAILQ_FIRST((head)) = NULL; \ 196 (head)->stqh_last = &STAILQ_FIRST((head)); \ 199 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ 201 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 205 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 206 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ 207 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 208 STAILQ_FIRST((head)) = (elm); \ 211 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 213 STAILQ_LAST((head)) = (elm); \ 214 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 217 #define STAILQ_LAST(head) (*(head)->stqh_last) 221 #define STAILQ_REMOVE(head, elm, type, field) do { \ 222 if (STAILQ_FIRST((head)) == (elm)) { \ 223 STAILQ_REMOVE_HEAD(head, field); \ 226 struct type *curelm = STAILQ_FIRST((head)); \ 231 (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ 235 #define STAILQ_REMOVE_HEAD(head, field) do { \ 236 if ((STAILQ_FIRST((head)) = \ 237 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ 238 (head)->stqh_last = &STAILQ_FIRST((head)); \ 241 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ 242 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ 243 (head)->stqh_last = &STAILQ_FIRST((head)); \ 254 #define LIST_HEAD_INITIALIZER(head) \ 267 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 269 #define LIST_FIRST(head) ((head)->lh_first) 271 #define LIST_FOREACH(var, head, field) \ 272 for ((var) = LIST_FIRST((head)); \ 276 #define LIST_INIT(head) do { \ 277 LIST_FIRST((head)) = NULL; \ 295 #define LIST_INSERT_HEAD(head, elm, field) do { \ 296 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ 297 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ 298 LIST_FIRST((head)) = (elm); \ 299 (elm)->field.le_prev = &LIST_FIRST((head)); \ 320 #define TAILQ_HEAD_INITIALIZER(head) \ 321 { NULL, &(head).tqh_first } 332 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 334 #define TAILQ_FIRST(head) ((head)->tqh_first) 336 #define TAILQ_FOREACH(var, head, field) \ 337 for ((var) = TAILQ_FIRST((head)); \ 341 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 342 for ((var) = TAILQ_LAST((head), headname); \ 346 #define TAILQ_INIT(head) do { \ 347 TAILQ_FIRST((head)) = NULL; \ 348 (head)->tqh_last = &TAILQ_FIRST((head)); \ 351 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 356 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 368 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 369 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ 370 TAILQ_FIRST((head))->field.tqe_prev = \ 373 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 374 TAILQ_FIRST((head)) = (elm); \ 375 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ 378 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 380 (elm)->field.tqe_prev = (head)->tqh_last; \ 381 *(head)->tqh_last = (elm); \ 382 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 385 #define TAILQ_LAST(head, headname) \ 386 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 393 #define TAILQ_REMOVE(head, elm, field) do { \ 398 (head)->tqh_last = (elm)->field.tqe_prev; \ 411 #define CIRCLEQ_HEAD_INITIALIZER(head) \ 412 { (void *)&(head), (void *)&(head) } 423 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) 425 #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 427 #define CIRCLEQ_FOREACH(var, head, field) \ 428 for ((var) = CIRCLEQ_FIRST((head)); \ 429 (var) != (void *)(head); \ 432 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 433 for ((var) = CIRCLEQ_LAST((head)); \ 434 (var) != (void *)(head); \ 437 #define CIRCLEQ_INIT(head) do { \ 438 CIRCLEQ_FIRST((head)) = (void *)(head); \ 439 CIRCLEQ_LAST((head)) = (void *)(head); \ 442 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 445 if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \ 446 CIRCLEQ_LAST((head)) = (elm); \ 452 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 455 if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \ 456 CIRCLEQ_FIRST((head)) = (elm); \ 462 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 463 CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \ 464 CIRCLEQ_PREV((elm), field) = (void *)(head); \ 465 if (CIRCLEQ_LAST((head)) == (void *)(head)) \ 466 CIRCLEQ_LAST((head)) = (elm); \ 468 CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \ 469 CIRCLEQ_FIRST((head)) = (elm); \ 472 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 473 CIRCLEQ_NEXT((elm), field) = (void *)(head); \ 474 CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \ 475 if (CIRCLEQ_FIRST((head)) == (void *)(head)) \ 476 CIRCLEQ_FIRST((head)) = (elm); \ 478 CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \ 479 CIRCLEQ_LAST((head)) = (elm); \ 482 #define CIRCLEQ_LAST(head) ((head)->cqh_last) 488 #define CIRCLEQ_REMOVE(head, elm, field) do { \ 489 if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \ 490 CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \ 494 if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \ 495 CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | intr_queue.h | 6 #define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */ 8 #define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */ 10 #define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */ 12 #define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */ 13 #define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
|
/linux-4.4.14/arch/frv/kernel/ |
H A D | Makefile | 5 heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o 6 heads-$(CONFIG_MMU) := head-mmu-fr451.o 8 extra-y:= head.o vmlinux.lds
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
H A D | c2_alloc.c | 40 struct sp_chunk **head) c2_alloc_mqsp_chunk() 55 new_head->head = 0; c2_alloc_mqsp_chunk() 67 *head = new_head; c2_alloc_mqsp_chunk() 89 __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, c2_alloc_mqsp() argument 94 while (head) { c2_alloc_mqsp() 95 mqsp = head->head; c2_alloc_mqsp() 97 head->head = head->shared_ptr[mqsp]; c2_alloc_mqsp() 99 } else if (head->next == NULL) { c2_alloc_mqsp() 100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == c2_alloc_mqsp() 102 head = head->next; c2_alloc_mqsp() 103 mqsp = head->head; c2_alloc_mqsp() 104 head->head = head->shared_ptr[mqsp]; c2_alloc_mqsp() 109 head = head->next; c2_alloc_mqsp() 111 if (head) { c2_alloc_mqsp() 112 *dma_addr = head->dma_addr + c2_alloc_mqsp() 113 ((unsigned long) &(head->shared_ptr[mqsp]) - c2_alloc_mqsp() 114 (unsigned long) head); c2_alloc_mqsp() 116 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); c2_alloc_mqsp() 117 return (__force __be16 *) &(head->shared_ptr[mqsp]); c2_alloc_mqsp() 124 struct sp_chunk *head; c2_free_mqsp() local 128 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); c2_free_mqsp() 130 /* Link head to new mqsp */ c2_free_mqsp() 131 *mqsp = (__force __be16) head->head; c2_free_mqsp() 137 /* Point this index at the head */ c2_free_mqsp() 138 head->shared_ptr[idx] = head->head; c2_free_mqsp() 140 /* Point head at this index */ c2_free_mqsp() 141 head->head = idx; c2_free_mqsp() 39 c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, struct sp_chunk **head) c2_alloc_mqsp_chunk() argument
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
H A D | gf119.c | 34 gf119_disp_vblank_init(struct nv50_disp *disp, int head) gf119_disp_vblank_init() argument 37 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); gf119_disp_vblank_init() 41 gf119_disp_vblank_fini(struct nv50_disp *disp, int head) gf119_disp_vblank_fini() argument 44 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); gf119_disp_vblank_fini() 48 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, exec_lookup() argument 77 mask |= 0x0100 << head; exec_lookup() 79 list_for_each_entry(outp, &disp->base.outp, head) { exec_lookup() 95 exec_script(struct nv50_disp *disp, int head, int id) exec_script() argument 106 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) { exec_script() 108 if (ctrl & (1 << head)) exec_script() 115 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info); exec_script() 122 .crtc = head, exec_script() 133 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) exec_clkcmp() argument 145 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) { exec_clkcmp() 147 if (ctrl & (1 << head)) exec_clkcmp() 154 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); exec_clkcmp() 185 .crtc = head, exec_clkcmp() 197 gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head) gf119_disp_intr_unk1_0() argument 199 exec_script(disp, head, 1); gf119_disp_intr_unk1_0() 203 gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head) gf119_disp_intr_unk2_0() argument 206 struct nvkm_output *outp = exec_script(disp, head, 2); gf119_disp_intr_unk2_0() 215 .crtc = head, gf119_disp_intr_unk2_0() 226 gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head) gf119_disp_intr_unk2_1() argument 230 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_1() 232 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk); gf119_disp_intr_unk2_1() 233 nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000); gf119_disp_intr_unk2_1() 237 gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head, gf119_disp_intr_unk2_2_tu() argument 243 const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300)); gf119_disp_intr_unk2_2_tu() 244 const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu() 245 const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu() 246 const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu() 247 const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_2_tu() 249 const u32 hoff = (head * 0x800); gf119_disp_intr_unk2_2_tu() 299 gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head) gf119_disp_intr_unk2_2() argument 303 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_2() 306 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); gf119_disp_intr_unk2_2() 312 u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300)); gf119_disp_intr_unk2_2() 329 exec_clkcmp(disp, head, 0, pclk, &conf); gf119_disp_intr_unk2_2() 342 gf119_disp_intr_unk2_2_tu(disp, head, &outp->info); gf119_disp_intr_unk2_2() 353 gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head) gf119_disp_intr_unk4_0() argument 356 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk4_0() 359 exec_clkcmp(disp, head, 1, pclk, &conf); gf119_disp_intr_unk4_0() 370 int head; gf119_disp_intr_supervisor() local 373 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 374 mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800)); gf119_disp_intr_supervisor() 375 nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]); gf119_disp_intr_supervisor() 380 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 381 if (!(mask[head] & 0x00001000)) gf119_disp_intr_supervisor() 383 nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head); gf119_disp_intr_supervisor() 384 gf119_disp_intr_unk1_0(disp, head); gf119_disp_intr_supervisor() 388 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 389 if (!(mask[head] & 0x00001000)) gf119_disp_intr_supervisor() 391 nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head); gf119_disp_intr_supervisor() 392 gf119_disp_intr_unk2_0(disp, head); gf119_disp_intr_supervisor() 394 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 395 if (!(mask[head] & 0x00010000)) gf119_disp_intr_supervisor() 397 nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head); gf119_disp_intr_supervisor() 398 gf119_disp_intr_unk2_1(disp, head); gf119_disp_intr_supervisor() 400 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 401 if (!(mask[head] & 0x00001000)) gf119_disp_intr_supervisor() 403 nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head); gf119_disp_intr_supervisor() 404 gf119_disp_intr_unk2_2(disp, head); gf119_disp_intr_supervisor() 408 for (head = 0; head < disp->base.head.nr; head++) { gf119_disp_intr_supervisor() 409 if (!(mask[head] & 0x00001000)) gf119_disp_intr_supervisor() 411 nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head); gf119_disp_intr_supervisor() 412 gf119_disp_intr_unk4_0(disp, head); gf119_disp_intr_supervisor() 416 for (head = 0; head < disp->base.head.nr; head++) gf119_disp_intr_supervisor() 417 nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000); gf119_disp_intr_supervisor() 490 for (i = 0; i < disp->base.head.nr; i++) { gf119_disp_intr() 516 .head.vblank_init = gf119_disp_vblank_init, 517 .head.vblank_fini = gf119_disp_vblank_fini, 518 .head.scanoutpos = gf119_disp_root_scanoutpos,
|
H A D | vga.c | 27 nvkm_rdport(struct nvkm_device *device, int head, u16 port) nvkm_rdport() argument 35 return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port); nvkm_rdport() 41 head = 0; /* CR44 selects head */ nvkm_rdport() 42 return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port); nvkm_rdport() 49 nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data) nvkm_wrport() argument 57 nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data); nvkm_wrport() 63 head = 0; /* CR44 selects head */ nvkm_wrport() 64 nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data); nvkm_wrport() 69 nvkm_rdvgas(struct nvkm_device *device, int head, u8 index) nvkm_rdvgas() argument 71 nvkm_wrport(device, head, 0x03c4, index); nvkm_rdvgas() 72 return nvkm_rdport(device, head, 0x03c5); nvkm_rdvgas() 76 nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgas() argument 78 nvkm_wrport(device, head, 0x03c4, index); nvkm_wrvgas() 79 nvkm_wrport(device, head, 0x03c5, value); nvkm_wrvgas() 83 nvkm_rdvgag(struct nvkm_device *device, int head, u8 index) nvkm_rdvgag() argument 85 nvkm_wrport(device, head, 0x03ce, index); nvkm_rdvgag() 86 return nvkm_rdport(device, head, 0x03cf); nvkm_rdvgag() 90 nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgag() argument 92 nvkm_wrport(device, head, 0x03ce, index); nvkm_wrvgag() 93 nvkm_wrport(device, head, 0x03cf, value); nvkm_wrvgag() 97 nvkm_rdvgac(struct nvkm_device *device, int head, u8 index) nvkm_rdvgac() argument 99 nvkm_wrport(device, head, 0x03d4, index); nvkm_rdvgac() 100 return nvkm_rdport(device, head, 0x03d5); nvkm_rdvgac() 104 nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgac() argument 106 nvkm_wrport(device, head, 0x03d4, index); nvkm_wrvgac() 107 nvkm_wrport(device, head, 0x03d5, value); nvkm_wrvgac() 111 nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index) nvkm_rdvgai() argument 113 if (port == 0x03c4) return nvkm_rdvgas(device, head, index); nvkm_rdvgai() 114 if (port == 0x03ce) return nvkm_rdvgag(device, head, index); nvkm_rdvgai() 115 if (port == 0x03d4) return nvkm_rdvgac(device, head, index); nvkm_rdvgai() 120 nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value) nvkm_wrvgai() argument 122 if (port == 0x03c4) nvkm_wrvgas(device, head, index, value); nvkm_wrvgai() 123 else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value); nvkm_wrvgai() 124 else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value); nvkm_wrvgai() 143 /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) 147 * expected and values can be set for the appropriate head by using a 0x2000 150 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and 151 * cr44 must be set to 0 or 3 for accessing values on the correct head 153 * b) in tied mode (4) head B is programmed to the values set on head A, and 154 * access using the head B addresses can have strange results, ergo we leave 158 * 0 and 1 are treated as head values and so the set value is (owner * 3)
|
H A D | cursnv50.c | 44 int head, ret; nv50_disp_curs_new() local 48 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", nv50_disp_curs_new() 49 args->v0.version, args->v0.head); nv50_disp_curs_new() 50 if (args->v0.head > disp->base.head.nr) nv50_disp_curs_new() 52 head = args->v0.head; nv50_disp_curs_new() 56 return nv50_disp_chan_new_(func, mthd, root, chid + head, nv50_disp_curs_new() 57 head, oclass, pobject); nv50_disp_curs_new()
|
H A D | oimmnv50.c | 44 int head, ret; nv50_disp_oimm_new() local 48 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", nv50_disp_oimm_new() 49 args->v0.version, args->v0.head); nv50_disp_oimm_new() 50 if (args->v0.head > disp->base.head.nr) nv50_disp_oimm_new() 52 head = args->v0.head; nv50_disp_oimm_new() 56 return nv50_disp_chan_new_(func, mthd, root, chid + head, nv50_disp_oimm_new() 57 head, oclass, pobject); nv50_disp_oimm_new()
|
H A D | nv50.c | 100 nv50_disp_vblank_fini_(struct nvkm_disp *base, int head) nv50_disp_vblank_fini_() argument 103 disp->func->head.vblank_fini(disp, head); nv50_disp_vblank_fini_() 107 nv50_disp_vblank_init_(struct nvkm_disp *base, int head) nv50_disp_vblank_init_() argument 110 disp->func->head.vblank_init(disp, head); nv50_disp_vblank_init_() 139 .head.vblank_init = nv50_disp_vblank_init_, 140 .head.vblank_fini = nv50_disp_vblank_fini_, 164 nv50_disp_vblank_fini(struct nv50_disp *disp, int head) nv50_disp_vblank_fini() argument 167 nvkm_mask(device, 0x61002c, (4 << head), 0); nv50_disp_vblank_fini() 171 nv50_disp_vblank_init(struct nv50_disp *disp, int head) nv50_disp_vblank_init() argument 174 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); nv50_disp_vblank_init() 227 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, exec_lookup() argument 267 mask |= 0x0100 << head; exec_lookup() 269 list_for_each_entry(outp, &disp->base.outp, head) { exec_lookup() 285 exec_script(struct nv50_disp *disp, int head, int id) exec_script() argument 298 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++) exec_script() 302 if (!(ctrl & (1 << head))) { exec_script() 310 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++) exec_script() 316 if (!(ctrl & (1 << head))) { exec_script() 317 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++) exec_script() 322 if (!(ctrl & (1 << head))) exec_script() 326 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); exec_script() 333 .crtc = head, exec_script() 344 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) exec_clkcmp() argument 358 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++) exec_clkcmp() 362 if (!(ctrl & (1 << head))) { exec_clkcmp() 370 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++) exec_clkcmp() 376 if (!(ctrl & (1 << head))) { exec_clkcmp() 377 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++) exec_clkcmp() 382 if (!(ctrl & (1 << head))) exec_clkcmp() 386 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); exec_clkcmp() 422 .crtc = head, exec_clkcmp() 434 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) nv50_disp_intr_unk10_0() argument 436 exec_script(disp, head, 1); nv50_disp_intr_unk10_0() 440 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head) nv50_disp_intr_unk20_0() argument 443 struct nvkm_output *outp = exec_script(disp, head, 2); nv50_disp_intr_unk20_0() 464 .crtc = head, nv50_disp_intr_unk20_0() 475 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head) nv50_disp_intr_unk20_1() argument 479 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk20_1() 481 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk); nv50_disp_intr_unk20_1() 485 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head, nv50_disp_intr_unk20_2_dp() argument 496 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 497 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 498 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff; nv50_disp_intr_unk20_2_dp() 606 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) nv50_disp_intr_unk20_2() argument 610 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk20_2() 611 u32 hval, hreg = 0x614200 + (head * 0x800); nv50_disp_intr_unk20_2() 615 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); nv50_disp_intr_unk20_2() 662 exec_clkcmp(disp, head, 0, pclk, &conf); nv50_disp_intr_unk20_2() 672 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk); nv50_disp_intr_unk20_2() 714 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) nv50_disp_intr_unk40_0() argument 718 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; nv50_disp_intr_unk40_0() 721 outp = exec_clkcmp(disp, head, 1, pclk, &conf); nv50_disp_intr_unk40_0() 737 int head; nv50_disp_intr_supervisor() local 743 for (head = 0; head < disp->base.head.nr; head++) { nv50_disp_intr_supervisor() 744 if (!(super & (0x00000020 << head))) nv50_disp_intr_supervisor() 746 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 748 nv50_disp_intr_unk10_0(disp, head); nv50_disp_intr_supervisor() 752 for (head = 0; head < disp->base.head.nr; head++) { nv50_disp_intr_supervisor() 753 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 755 nv50_disp_intr_unk20_0(disp, head); nv50_disp_intr_supervisor() 757 for (head = 0; head < disp->base.head.nr; head++) { nv50_disp_intr_supervisor() 758 if (!(super & (0x00000200 << head))) nv50_disp_intr_supervisor() 760 nv50_disp_intr_unk20_1(disp, head); nv50_disp_intr_supervisor() 762 for (head = 0; head < disp->base.head.nr; head++) { nv50_disp_intr_supervisor() 763 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 765 nv50_disp_intr_unk20_2(disp, head); nv50_disp_intr_supervisor() 769 for (head = 0; head < disp->base.head.nr; head++) { nv50_disp_intr_supervisor() 770 if (!(super & (0x00000080 << head))) nv50_disp_intr_supervisor() 772 nv50_disp_intr_unk40_0(disp, head); nv50_disp_intr_supervisor() 821 .head.vblank_init = nv50_disp_vblank_init, 822 .head.vblank_fini = nv50_disp_vblank_fini, 823 .head.scanoutpos = nv50_disp_root_scanoutpos,
|
H A D | basenv50.c | 44 int head, ret; nv50_disp_base_new() local 50 "pushbuf %016llx head %d\n", nv50_disp_base_new() 51 args->v0.version, args->v0.pushbuf, args->v0.head); nv50_disp_base_new() 52 if (args->v0.head > disp->base.head.nr) nv50_disp_base_new() 55 head = args->v0.head; nv50_disp_base_new() 59 return nv50_disp_dmac_new_(func, mthd, root, chid + head, nv50_disp_base_new() 60 head, push, oclass, pobject); nv50_disp_base_new()
|
H A D | ovlynv50.c | 44 int head, ret; nv50_disp_ovly_new() local 50 "pushbuf %016llx head %d\n", nv50_disp_ovly_new() 51 args->v0.version, args->v0.pushbuf, args->v0.head); nv50_disp_ovly_new() 52 if (args->v0.head > disp->base.head.nr) nv50_disp_ovly_new() 55 head = args->v0.head; nv50_disp_ovly_new() 59 return nv50_disp_dmac_new_(func, mthd, root, chid + head, nv50_disp_ovly_new() 60 head, push, oclass, pobject); nv50_disp_ovly_new()
|
H A D | base.c | 39 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head) nvkm_disp_vblank_fini() argument 42 disp->func->head.vblank_fini(disp, head); nvkm_disp_vblank_fini() 46 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head) nvkm_disp_vblank_init() argument 49 disp->func->head.vblank_init(disp, head); nvkm_disp_vblank_init() 65 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { nvkm_disp_vblank_ctor() 67 notify->index = req->v0.head; nvkm_disp_vblank_ctor() 83 nvkm_disp_vblank(struct nvkm_disp *disp, int head) nvkm_disp_vblank() argument 86 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep)); nvkm_disp_vblank() 103 list_for_each_entry(outp, &disp->outp, head) { nvkm_disp_hpd_ctor() 215 list_for_each_entry(outp, &disp->outp, head) { nvkm_disp_fini() 219 list_for_each_entry(conn, &disp->conn, head) { nvkm_disp_fini() 233 list_for_each_entry(conn, &disp->conn, head) { nvkm_disp_init() 237 list_for_each_entry(outp, &disp->outp, head) { nvkm_disp_init() 259 outp = list_first_entry(&disp->outp, typeof(*outp), head); nvkm_disp_dtor() 260 list_del(&outp->head); nvkm_disp_dtor() 265 conn = list_first_entry(&disp->conn, typeof(*conn), head); nvkm_disp_dtor() 266 list_del(&conn->head); nvkm_disp_dtor() 298 disp->head.nr = heads; nvkm_disp_ctor() 357 list_add_tail(&outp->head, &disp->outp); nvkm_disp_ctor() 362 list_for_each_entry_safe(outp, outt, &disp->outp, head) { nvkm_disp_ctor() 376 list_for_each_entry(pair, &disp->outp, head) { nvkm_disp_ctor() 396 list_for_each_entry(conn, &disp->conn, head) { nvkm_disp_ctor() 413 list_del(&outp->head); nvkm_disp_ctor() 418 list_add_tail(&outp->conn->head, &disp->conn); nvkm_disp_ctor()
|
H A D | nv04.c | 33 nv04_disp_vblank_init(struct nvkm_disp *disp, int head) nv04_disp_vblank_init() argument 36 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001); nv04_disp_vblank_init() 40 nv04_disp_vblank_fini(struct nvkm_disp *disp, int head) nv04_disp_vblank_fini() argument 43 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000); nv04_disp_vblank_fini() 77 .head.vblank_init = nv04_disp_vblank_init, 78 .head.vblank_fini = nv04_disp_vblank_fini,
|
H A D | rootnv50.c | 38 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); nv50_disp_root_scanoutpos() 39 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); nv50_disp_root_scanoutpos() 40 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); nv50_disp_root_scanoutpos() 58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; nv50_disp_root_scanoutpos() 61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; nv50_disp_root_scanoutpos() 81 int head, ret; nv50_disp_root_mthd_() local 88 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", nv50_disp_root_mthd_() 89 args->v0.version, args->v0.method, args->v0.head); nv50_disp_root_mthd_() 91 head = args->v0.head; nv50_disp_root_mthd_() 101 head = ffs((mask >> 8) & 0x0f) - 1; nv50_disp_root_mthd_() 105 if (head < 0 || head >= disp->base.head.nr) nv50_disp_root_mthd_() 109 list_for_each_entry(temp, &disp->base.outp, head) { nv50_disp_root_mthd_() 122 return func->head.scanoutpos(object, disp, data, size, head); nv50_disp_root_mthd_() 129 return func->dac.power(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 131 return func->dac.sense(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 133 return func->sor.power(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 137 return func->sor.hda_eld(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 141 return func->sor.hdmi(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 183 return func->pior.power(object, disp, data, size, head, outp); nv50_disp_root_mthd_() 321 for (i = 0; i < disp->base.head.nr; i++) { nv50_disp_root_init()
|
H A D | priv.h | 11 void nvkm_disp_vblank(struct nvkm_disp *, int head); 33 void (*vblank_init)(struct nvkm_disp *, int head); 34 void (*vblank_fini)(struct nvkm_disp *, int head); 35 } head; member in struct:nvkm_disp_func
|
H A D | rootnv04.c | 39 void *data, u32 size, int head) nv04_disp_scanoutpos() 43 const u32 hoff = head * 0x2000; nv04_disp_scanoutpos() 88 int head, ret; nv04_disp_mthd() local 92 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", nv04_disp_mthd() 93 args->v0.version, args->v0.method, args->v0.head); nv04_disp_mthd() 95 head = args->v0.head; nv04_disp_mthd() 99 if (head < 0 || head >= 2) nv04_disp_mthd() 104 return nv04_disp_scanoutpos(root, data, size, head); nv04_disp_mthd() 38 nv04_disp_scanoutpos(struct nv04_disp_root *root, void *data, u32 size, int head) nv04_disp_scanoutpos() argument
|
H A D | nv50.h | 10 #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head 11 #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp 78 void (*vblank_init)(struct nv50_disp *, int head); 79 void (*vblank_fini)(struct nv50_disp *, int head); 81 } head; member in struct:nv50_disp_func
|
H A D | g84.c | 33 .head.vblank_init = nv50_disp_vblank_init, 34 .head.vblank_fini = nv50_disp_vblank_fini, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
|
H A D | g94.c | 33 .head.vblank_init = nv50_disp_vblank_init, 34 .head.vblank_fini = nv50_disp_vblank_fini, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
|
H A D | gk104.c | 33 .head.vblank_init = gf119_disp_vblank_init, 34 .head.vblank_fini = gf119_disp_vblank_fini, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
|
H A D | gk110.c | 33 .head.vblank_init = gf119_disp_vblank_init, 34 .head.vblank_fini = gf119_disp_vblank_fini, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
|
H A D | gm107.c | 33 .head.vblank_init = gf119_disp_vblank_init, 34 .head.vblank_fini = gf119_disp_vblank_fini, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
|
H A D | gm204.c | 33 .head.vblank_init = gf119_disp_vblank_init, 34 .head.vblank_fini = gf119_disp_vblank_fini, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
|
H A D | gt200.c | 33 .head.vblank_init = nv50_disp_vblank_init, 34 .head.vblank_fini = nv50_disp_vblank_fini, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
|
/linux-4.4.14/drivers/gpu/drm/nouveau/dispnv04/ |
H A D | hw.c | 39 NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaSeq() argument 41 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); NVWriteVgaSeq() 42 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); NVWriteVgaSeq() 46 NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) NVReadVgaSeq() argument 48 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); NVReadVgaSeq() 49 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); NVReadVgaSeq() 53 NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaGr() argument 55 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); NVWriteVgaGr() 56 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); NVWriteVgaGr() 60 NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) NVReadVgaGr() argument 62 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); NVReadVgaGr() 63 return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); NVReadVgaGr() 66 /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) 70 * expected and values can be set for the appropriate head by using a 0x2000 73 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and 74 * cr44 must be set to 0 or 3 for accessing values on the correct head 76 * b) in tied mode (4) head B is programmed to the values set on head A, and 77 * access using the head B addresses can have strange results, ergo we leave 81 * 0 and 1 are treated as head values and so the set value is (owner * 3) 110 NVBlankScreen(struct drm_device *dev, int head, bool blank) NVBlankScreen() argument 115 NVSetOwner(dev, head); NVBlankScreen() 117 seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); NVBlankScreen() 119 NVVgaSeqReset(dev, head, true); NVBlankScreen() 121 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); NVBlankScreen() 123 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); NVBlankScreen() 124 NVVgaSeqReset(dev, head, false); NVBlankScreen() 246 nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) nouveau_hw_fix_bad_vpll() argument 248 /* the vpll on an unused head can come up with a random value, way nouveau_hw_fix_bad_vpll() 260 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; nouveau_hw_fix_bad_vpll() 271 NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1); nouveau_hw_fix_bad_vpll() 373 rd_cio_state(struct drm_device *dev, int head, rd_cio_state() argument 376 crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); rd_cio_state() 380 wr_cio_state(struct drm_device *dev, int head, wr_cio_state() argument 383 NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); wr_cio_state() 387 nv_save_state_ramdac(struct drm_device *dev, int head, nv_save_state_ramdac() argument 391 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_ramdac() 395 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); nv_save_state_ramdac() 397 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); nv_save_state_ramdac() 402 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); nv_save_state_ramdac() 404 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); nv_save_state_ramdac() 407 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); nv_save_state_ramdac() 409 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); nv_save_state_ramdac() 411 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); nv_save_state_ramdac() 412 regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); nv_save_state_ramdac() 413 regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); nv_save_state_ramdac() 414 regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); nv_save_state_ramdac() 415 regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); nv_save_state_ramdac() 416 regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); nv_save_state_ramdac() 417 regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); nv_save_state_ramdac() 418 regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); nv_save_state_ramdac() 422 regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); nv_save_state_ramdac() 423 regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); nv_save_state_ramdac() 427 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); nv_save_state_ramdac() 429 regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); nv_save_state_ramdac() 430 regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); nv_save_state_ramdac() 434 regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv_save_state_ramdac() 435 regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); nv_save_state_ramdac() 436 if (!nv_gf4_disp_arch(dev) && head == 0) { nv_save_state_ramdac() 438 * the head A FPCLK on (nv11 even locks up) */ nv_save_state_ramdac() 442 regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); nv_save_state_ramdac() 443 regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); nv_save_state_ramdac() 445 regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); nv_save_state_ramdac() 448 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); nv_save_state_ramdac() 451 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); nv_save_state_ramdac() 452 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); nv_save_state_ramdac() 453 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); nv_save_state_ramdac() 456 regp->ctv_regs[i] = NVReadRAMDAC(dev, head, nv_save_state_ramdac() 462 nv_load_state_ramdac(struct drm_device *dev, int head, nv_load_state_ramdac() argument 467 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_ramdac() 468 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; nv_load_state_ramdac() 472 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); nv_load_state_ramdac() 479 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); nv_load_state_ramdac() 481 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); nv_load_state_ramdac() 484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); nv_load_state_ramdac() 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); nv_load_state_ramdac() 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); nv_load_state_ramdac() 489 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); nv_load_state_ramdac() 490 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); nv_load_state_ramdac() 491 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); nv_load_state_ramdac() 492 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); nv_load_state_ramdac() 493 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); nv_load_state_ramdac() 494 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); nv_load_state_ramdac() 495 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); nv_load_state_ramdac() 500 NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); nv_load_state_ramdac() 501 NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); nv_load_state_ramdac() 505 NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); nv_load_state_ramdac() 507 NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); nv_load_state_ramdac() 508 NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); nv_load_state_ramdac() 512 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); nv_load_state_ramdac() 513 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); nv_load_state_ramdac() 514 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); nv_load_state_ramdac() 515 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); nv_load_state_ramdac() 517 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); nv_load_state_ramdac() 520 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); nv_load_state_ramdac() 523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); nv_load_state_ramdac() 524 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); nv_load_state_ramdac() 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); nv_load_state_ramdac() 528 NVWriteRAMDAC(dev, head, nv_load_state_ramdac() 534 nv_save_state_vga(struct drm_device *dev, int head, nv_save_state_vga() argument 537 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_vga() 540 regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); nv_save_state_vga() 543 rd_cio_state(dev, head, regp, i); nv_save_state_vga() 545 NVSetEnablePalette(dev, head, true); nv_save_state_vga() 547 regp->Attribute[i] = NVReadVgaAttr(dev, head, i); nv_save_state_vga() 548 NVSetEnablePalette(dev, head, false); nv_save_state_vga() 551 regp->Graphics[i] = NVReadVgaGr(dev, head, i); nv_save_state_vga() 554 regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); nv_save_state_vga() 558 nv_load_state_vga(struct drm_device *dev, int head, nv_load_state_vga() argument 561 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_vga() 564 NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); nv_load_state_vga() 567 NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); nv_load_state_vga() 569 nv_lock_vga_crtc_base(dev, head, false); nv_load_state_vga() 571 wr_cio_state(dev, head, regp, i); nv_load_state_vga() 572 nv_lock_vga_crtc_base(dev, head, true); nv_load_state_vga() 575 NVWriteVgaGr(dev, head, i, regp->Graphics[i]); nv_load_state_vga() 577 NVSetEnablePalette(dev, head, true); nv_load_state_vga() 579 NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); nv_load_state_vga() 580 NVSetEnablePalette(dev, head, false); nv_load_state_vga() 584 nv_save_state_ext(struct drm_device *dev, int head, nv_save_state_ext() argument 588 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_save_state_ext() 591 rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); nv_save_state_ext() 592 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); nv_save_state_ext() 593 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); nv_save_state_ext() 594 rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); nv_save_state_ext() 595 rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); nv_save_state_ext() 596 rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); nv_save_state_ext() 597 rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); nv_save_state_ext() 599 rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); nv_save_state_ext() 600 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); nv_save_state_ext() 601 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); nv_save_state_ext() 604 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); nv_save_state_ext() 607 rd_cio_state(dev, head, regp, 0x9f); nv_save_state_ext() 609 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); nv_save_state_ext() 610 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); nv_save_state_ext() 611 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); nv_save_state_ext() 612 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); nv_save_state_ext() 613 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); nv_save_state_ext() 616 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); nv_save_state_ext() 617 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); nv_save_state_ext() 620 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); nv_save_state_ext() 623 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); nv_save_state_ext() 626 regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); nv_save_state_ext() 627 regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); nv_save_state_ext() 630 regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); nv_save_state_ext() 632 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); nv_save_state_ext() 633 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); nv_save_state_ext() 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); nv_save_state_ext() 636 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); nv_save_state_ext() 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); nv_save_state_ext() 638 rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); nv_save_state_ext() 642 rd_cio_state(dev, head, regp, NV_CIO_CRE_42); nv_save_state_ext() 643 rd_cio_state(dev, head, regp, NV_CIO_CRE_53); nv_save_state_ext() 644 rd_cio_state(dev, head, regp, NV_CIO_CRE_54); nv_save_state_ext() 647 regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); nv_save_state_ext() 648 rd_cio_state(dev, head, regp, NV_CIO_CRE_59); nv_save_state_ext() 649 rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); nv_save_state_ext() 651 rd_cio_state(dev, head, regp, NV_CIO_CRE_85); nv_save_state_ext() 652 rd_cio_state(dev, head, regp, NV_CIO_CRE_86); nv_save_state_ext() 655 regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); nv_save_state_ext() 659 nv_load_state_ext(struct drm_device *dev, int head, nv_load_state_ext() argument 664 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; nv_load_state_ext() 674 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); nv_load_state_ext() 686 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); nv_load_state_ext() 687 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); nv_load_state_ext() 688 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); nv_load_state_ext() 691 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); nv_load_state_ext() 694 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); nv_load_state_ext() 696 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); nv_load_state_ext() 698 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); nv_load_state_ext() 700 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); nv_load_state_ext() 704 NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); nv_load_state_ext() 706 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); nv_load_state_ext() 707 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); nv_load_state_ext() 708 wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); nv_load_state_ext() 709 wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); nv_load_state_ext() 710 wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); nv_load_state_ext() 711 wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); nv_load_state_ext() 712 wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); nv_load_state_ext() 713 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); nv_load_state_ext() 714 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); nv_load_state_ext() 717 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); nv_load_state_ext() 720 wr_cio_state(dev, head, regp, 0x9f); nv_load_state_ext() 722 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); nv_load_state_ext() 723 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); nv_load_state_ext() 724 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); nv_load_state_ext() 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); nv_load_state_ext() 727 nv_fix_nv40_hw_cursor(dev, head); nv_load_state_ext() 728 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); nv_load_state_ext() 730 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); nv_load_state_ext() 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); nv_load_state_ext() 733 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); nv_load_state_ext() 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); nv_load_state_ext() 735 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); nv_load_state_ext() 736 wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); nv_load_state_ext() 753 wr_cio_state(dev, head, regp, NV_CIO_CRE_42); nv_load_state_ext() 754 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); nv_load_state_ext() 755 wr_cio_state(dev, head, regp, NV_CIO_CRE_54); nv_load_state_ext() 758 NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); nv_load_state_ext() 759 wr_cio_state(dev, head, regp, NV_CIO_CRE_59); nv_load_state_ext() 760 wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); nv_load_state_ext() 762 wr_cio_state(dev, head, regp, NV_CIO_CRE_85); nv_load_state_ext() 763 wr_cio_state(dev, head, regp, NV_CIO_CRE_86); nv_load_state_ext() 766 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); nv_load_state_ext() 770 nv_save_state_palette(struct drm_device *dev, int head, nv_save_state_palette() argument 774 int head_offset = head * NV_PRMDIO_SIZE, i; nv_save_state_palette() 781 state->crtc_reg[head].DAC[i] = nvif_rd08(device, nv_save_state_palette() 785 NVSetEnablePalette(dev, head, false); nv_save_state_palette() 789 nouveau_hw_load_state_palette(struct drm_device *dev, int head, nouveau_hw_load_state_palette() argument 793 int head_offset = head * NV_PRMDIO_SIZE, i; nouveau_hw_load_state_palette() 801 state->crtc_reg[head].DAC[i]); nouveau_hw_load_state_palette() 804 NVSetEnablePalette(dev, head, false); nouveau_hw_load_state_palette() 807 void nouveau_hw_save_state(struct drm_device *dev, int head, nouveau_hw_save_state() argument 814 nouveau_hw_fix_bad_vpll(dev, head); nouveau_hw_save_state() 815 nv_save_state_ramdac(dev, head, state); nouveau_hw_save_state() 816 nv_save_state_vga(dev, head, state); nouveau_hw_save_state() 817 nv_save_state_palette(dev, head, state); nouveau_hw_save_state() 818 nv_save_state_ext(dev, head, state); nouveau_hw_save_state() 821 void nouveau_hw_load_state(struct drm_device *dev, int head, nouveau_hw_load_state() argument 824 NVVgaProtect(dev, head, true); nouveau_hw_load_state() 825 nv_load_state_ramdac(dev, head, state); nouveau_hw_load_state() 826 nv_load_state_ext(dev, head, state); nouveau_hw_load_state() 827 nouveau_hw_load_state_palette(dev, head, state); nouveau_hw_load_state() 828 nv_load_state_vga(dev, head, state); nouveau_hw_load_state() 829 NVVgaProtect(dev, head, false); nouveau_hw_load_state()
|
H A D | hw.h | 38 void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value); 39 uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index); 40 void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value); 41 uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); 43 void NVBlankScreen(struct drm_device *, int head, bool blank); 49 void nouveau_hw_save_state(struct drm_device *, int head, 51 void nouveau_hw_load_state(struct drm_device *, int head, 53 void nouveau_hw_load_state_palette(struct drm_device *, int head, 61 int head, uint32_t reg) NVReadCRTC() 65 if (head) NVReadCRTC() 72 int head, uint32_t reg, uint32_t val) NVWriteCRTC() 75 if (head) NVWriteCRTC() 81 int head, uint32_t reg) NVReadRAMDAC() 85 if (head) NVReadRAMDAC() 92 int head, uint32_t reg, uint32_t val) NVWriteRAMDAC() 95 if (head) NVWriteRAMDAC() 121 int head, uint8_t index, uint8_t value) NVWriteVgaCrtc() 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); NVWriteVgaCrtc() 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); NVWriteVgaCrtc() 129 int head, uint8_t index) NVReadVgaCrtc() 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); NVReadVgaCrtc() 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); NVReadVgaCrtc() 140 * per-head variables around 153 NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaCrtc5758() argument 155 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); NVWriteVgaCrtc5758() 156 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value); NVWriteVgaCrtc5758() 159 static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index) NVReadVgaCrtc5758() argument 161 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); NVReadVgaCrtc5758() 162 return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58); NVReadVgaCrtc5758() 166 int head, uint32_t reg) NVReadPRMVIO() 173 * NVSetOwner for the relevant head to be programmed */ NVReadPRMVIO() 174 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) NVReadPRMVIO() 182 int head, uint32_t reg, uint8_t value) NVWritePRMVIO() 188 * NVSetOwner for the relevant head to be programmed */ NVWritePRMVIO() 189 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) NVWritePRMVIO() 195 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) NVSetEnablePalette() argument 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVSetEnablePalette() 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); NVSetEnablePalette() 202 static inline bool NVGetEnablePalette(struct drm_device *dev, int head) NVGetEnablePalette() argument 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVGetEnablePalette() 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); NVGetEnablePalette() 210 int head, uint8_t index, uint8_t value) NVWriteVgaAttr() 213 if (NVGetEnablePalette(dev, head)) NVWriteVgaAttr() 218 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVWriteVgaAttr() 219 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); NVWriteVgaAttr() 220 nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); NVWriteVgaAttr() 224 int head, uint8_t index) NVReadVgaAttr() 228 if (NVGetEnablePalette(dev, head)) NVReadVgaAttr() 233 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); NVReadVgaAttr() 234 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); NVReadVgaAttr() 235 val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); NVReadVgaAttr() 239 static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) NVVgaSeqReset() argument 241 NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); NVVgaSeqReset() 244 static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) NVVgaProtect() argument 246 uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); NVVgaProtect() 249 NVVgaSeqReset(dev, head, true); NVVgaProtect() 250 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); NVVgaProtect() 253 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */ NVVgaProtect() 254 NVVgaSeqReset(dev, head, false); NVVgaProtect() 256 NVSetEnablePalette(dev, head, protect); NVVgaProtect() 271 /* makes cr0-7 on the specified head read-only */ 273 nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock) nv_lock_vga_crtc_base() argument 275 uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX); nv_lock_vga_crtc_base() 282 NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11); nv_lock_vga_crtc_base() 288 nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock) nv_lock_vga_crtc_shadow() argument 304 cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa; nv_lock_vga_crtc_shadow() 306 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21); nv_lock_vga_crtc_shadow() 342 nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) nv_fix_nv40_hw_cursor() argument 349 uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS); nv_fix_nv40_hw_cursor() 350 NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos); nv_fix_nv40_hw_cursor() 354 nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) nv_set_crtc_base() argument 358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); nv_set_crtc_base() 365 int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); nv_set_crtc_base() 367 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, nv_set_crtc_base() 373 nv_show_cursor(struct drm_device *dev, int head, bool show) nv_show_cursor() argument 377 &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; nv_show_cursor() 383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); nv_show_cursor() 386 nv_fix_nv40_hw_cursor(dev, head); nv_show_cursor() 60 NVReadCRTC(struct drm_device *dev, int head, uint32_t reg) NVReadCRTC() argument 71 NVWriteCRTC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) NVWriteCRTC() argument 80 NVReadRAMDAC(struct drm_device *dev, int head, uint32_t reg) NVReadRAMDAC() argument 91 NVWriteRAMDAC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) NVWriteRAMDAC() argument 120 NVWriteVgaCrtc(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaCrtc() argument 128 NVReadVgaCrtc(struct drm_device *dev, int head, uint8_t index) NVReadVgaCrtc() argument 165 NVReadPRMVIO(struct drm_device *dev, int head, uint32_t reg) NVReadPRMVIO() argument 181 NVWritePRMVIO(struct drm_device *dev, int head, uint32_t reg, uint8_t value) NVWritePRMVIO() argument 209 NVWriteVgaAttr(struct drm_device *dev, int head, uint8_t index, uint8_t value) NVWriteVgaAttr() argument 223 NVReadVgaAttr(struct drm_device *dev, int head, uint8_t index) NVReadVgaAttr() argument
|
H A D | tvnv17.c | 54 int head; nv42_tv_sample_load() local 62 head = (dacclk & 0x100) >> 8; nv42_tv_sample_load() 67 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); nv42_tv_sample_load() 68 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); nv42_tv_sample_load() 69 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); nv42_tv_sample_load() 70 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv42_tv_sample_load() 72 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); nv42_tv_sample_load() 73 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); nv42_tv_sample_load() 74 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); nv42_tv_sample_load() 80 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); nv42_tv_sample_load() 81 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); nv42_tv_sample_load() 82 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); nv42_tv_sample_load() 83 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, nv42_tv_sample_load() 98 NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); nv42_tv_sample_load() 99 NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); nv42_tv_sample_load() 102 NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); nv42_tv_sample_load() 108 NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); nv42_tv_sample_load() 114 NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); nv42_tv_sample_load() 115 NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); nv42_tv_sample_load() 116 NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); nv42_tv_sample_load() 119 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); nv42_tv_sample_load() 120 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); nv42_tv_sample_load() 121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); nv42_tv_sample_load() 122 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); nv42_tv_sample_load() 402 int head = nouveau_crtc(encoder->crtc)->index; nv17_tv_prepare() local 403 uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[ nv17_tv_prepare() 411 nv04_dfp_disable(dev, head); nv17_tv_prepare() 413 /* Unbind any FP encoders from this head if we need the FP nv17_tv_prepare() 418 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { nv17_tv_prepare() 424 nv04_dfp_get_bound_head(dev, dcb) == head) { nv17_tv_prepare() 425 nv04_dfp_bind_head(dev, dcb, head ^ 1, nv17_tv_prepare() 433 *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); nv17_tv_prepare() 444 if (head) nv17_tv_prepare() 463 int head = nouveau_crtc(encoder->crtc)->index; nv17_tv_mode_set() local 464 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; nv17_tv_mode_set() 477 if (head) nv17_tv_mode_set() 527 * encoder in its OR enabled and routed to the head it's nv17_tv_mode_set()
|
H A D | disp.c | 94 &dev->mode_config.connector_list, head) { nv04_display_create() 102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_create() 110 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_create() 113 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_create() 133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv04_display_destroy() 142 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_destroy() 148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_destroy() 173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { nv04_display_init() 179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) nv04_display_init()
|
H A D | dfp.c | 69 int head, bool dl) nv04_dfp_bind_head() 74 * head < 0 indicates we wish to force a setting with the overrideval nv04_dfp_bind_head() 81 if (head != ramdac) nv04_dfp_bind_head() 93 void nv04_dfp_disable(struct drm_device *dev, int head) nv04_dfp_disable() argument 97 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & nv04_dfp_disable() 103 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, nv04_dfp_disable() 108 crtcstate[head].fp_control = FP_TG_CONTROL_OFF; nv04_dfp_disable() 109 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= nv04_dfp_disable() 135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nv04_dfp_update_fp_control() 171 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { get_tmds_slave() 204 struct nouveau_encoder *nv_encoder, int head) nv04_dfp_prepare_sel_clk() 216 if (head) nv04_dfp_prepare_sel_clk() 229 * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6; nv04_dfp_prepare_sel_clk() 240 state->sel_clk |= (head ? 0x40 : 0x10) << shift; nv04_dfp_prepare_sel_clk() 249 int head = nouveau_crtc(encoder->crtc)->index; nv04_dfp_prepare() local 251 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; nv04_dfp_prepare() 252 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; nv04_dfp_prepare() 256 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); nv04_dfp_prepare() 262 *cr_lcd |= head ? 0x0 : 0x8; nv04_dfp_prepare() 270 NVWriteVgaCrtc(dev, head ^ 1, nv04_dfp_prepare() 452 int head = nouveau_crtc(encoder->crtc)->index; nv04_dfp_commit() local 456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); nv04_dfp_commit() 458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); nv04_dfp_commit() 462 nv04_display(dev)->mode_reg.crtc_reg[head].fp_control = nv04_dfp_commit() 463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); nv04_dfp_commit() 533 int head = crtc ? nouveau_crtc(crtc)->index : nv04_lvds_dpms() local 537 call_lvds_script(dev, nv_encoder->dcb, head, nv04_lvds_dpms() 543 call_lvds_script(dev, nv_encoder->dcb, head, nv04_lvds_dpms() 581 nv_encoder->restore.head = nv04_dfp_save() 589 int head = nv_encoder->restore.head; nv04_dfp_restore() local 596 call_lvds_script(dev, nv_encoder->dcb, head, nv04_dfp_restore() 602 (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals); nv04_dfp_restore() 604 run_tmds_table(dev, nv_encoder->dcb, head, clock); nv04_dfp_restore() 68 nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent, int head, bool dl) nv04_dfp_bind_head() argument 203 nv04_dfp_prepare_sel_clk(struct drm_device *dev, struct nouveau_encoder *nv_encoder, int head) nv04_dfp_prepare_sel_clk() argument
|
H A D | tvnv04.c | 89 int head = nouveau_crtc(encoder->crtc)->index; nv04_tv_dpms() local 90 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); nv04_tv_dpms() 92 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : nv04_tv_dpms() 98 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); nv04_tv_dpms() 106 static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) nv04_tv_bind() argument 108 struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; nv04_tv_bind() 117 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, nv04_tv_bind() 119 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, nv04_tv_bind() 121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, nv04_tv_bind() 128 int head = nouveau_crtc(encoder->crtc)->index; nv04_tv_prepare() local 133 nv04_dfp_disable(dev, head); nv04_tv_prepare() 136 nv04_tv_bind(dev, head ^ 1, false); nv04_tv_prepare() 138 nv04_tv_bind(dev, head, true); nv04_tv_prepare()
|
/linux-4.4.14/arch/avr32/oprofile/ |
H A D | backtrace.c | 35 static struct frame_head *dump_user_backtrace(struct frame_head *head) dump_user_backtrace() argument 40 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) dump_user_backtrace() 42 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) dump_user_backtrace() 49 if (bufhead[0].fp <= head) dump_user_backtrace() 58 struct frame_head *head = (struct frame_head *)(regs->r7); avr32_backtrace() local 67 (unsigned long)head)) { avr32_backtrace() 68 oprofile_add_trace(head->lr); avr32_backtrace() 69 if (head->fp <= head) avr32_backtrace() 71 head = head->fp; avr32_backtrace() 76 while (depth-- && head) avr32_backtrace() 77 head = dump_user_backtrace(head); avr32_backtrace()
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
H A D | mkregtable.c | 79 * @head: list head to add it after 81 * Insert a new entry after the specified head. 84 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 86 __list_add(new, head, head->next); list_add() 92 * @head: list head to add it before 94 * Insert a new entry before the specified head. 97 static inline void list_add_tail(struct list_head *new, struct list_head *head) list_add_tail() argument 99 __list_add(new, head->prev, head); list_add_tail() 165 * list_move - delete from one list and add as another's head 167 * @head: the head that will precede our entry 169 static inline void list_move(struct list_head *list, struct list_head *head) list_move() argument 172 list_add(list, head); list_move() 178 * @head: the head that will follow our entry 181 struct list_head *head) list_move_tail() 184 list_add_tail(list, head); list_move_tail() 188 * list_is_last - tests whether @list is the last entry in list @head 190 * @head: the head of the list 193 const struct list_head *head) list_is_last() 195 return list->next == head; list_is_last() 200 * @head: the list to test. 202 static inline int list_empty(const struct list_head *head) list_empty() argument 204 return head->next == head; list_empty() 209 * @head: the list to test 220 static inline int list_empty_careful(const struct list_head *head) list_empty_careful() argument 222 struct list_head *next = head->next; list_empty_careful() 223 return (next == head) && (next == head->prev); list_empty_careful() 228 * @head: the list to test. 230 static inline int list_is_singular(const struct list_head *head) list_is_singular() argument 232 return !list_empty(head) && (head->next == head->prev); list_is_singular() 236 struct list_head *head, __list_cut_position() 240 list->next = head->next; __list_cut_position() 244 head->next = new_first; __list_cut_position() 245 new_first->prev = head; __list_cut_position() 251 * @head: a list with entries 252 * @entry: an entry within head, could be the head itself 255 * This helper moves the initial part of @head, up to and 256 * including @entry, from @head to @list. You should 257 * pass on @entry an element you know is on @head. @list 263 struct list_head *head, list_cut_position() 266 if (list_empty(head)) list_cut_position() 268 if (list_is_singular(head) && (head->next != entry && head != entry)) list_cut_position() 270 if (entry == head) list_cut_position() 273 __list_cut_position(list, head, entry); list_cut_position() 292 * @head: the place to add it in the first list. 295 struct list_head *head) list_splice() 298 __list_splice(list, head, head->next); list_splice() 304 * @head: the place to add it in the first list. 307 struct list_head *head) list_splice_tail() 310 __list_splice(list, head->prev, head); list_splice_tail() 316 * @head: the place to add it in the first list. 321 struct list_head *head) list_splice_init() 324 __list_splice(list, head, head->next); list_splice_init() 332 * @head: the place to add it in the first list. 338 struct list_head *head) list_splice_tail_init() 341 __list_splice(list, head->prev, head); list_splice_tail_init() 357 * @ptr: the list head to take the element from. 369 * @head: the head for your list. 371 #define list_for_each(pos, head) \ 372 for (pos = (head)->next; prefetch(pos->next), pos != (head); \ 378 * @head: the head for your list. 380 #define list_for_each_prev(pos, head) \ 381 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ 388 * @head: the head for your list. 390 #define list_for_each_safe(pos, n, head) \ 391 for (pos = (head)->next, n = pos->next; pos != (head); \ 398 * @head: the head for your list. 400 #define list_for_each_prev_safe(pos, n, head) \ 401 for (pos = (head)->prev, n = pos->prev; \ 402 prefetch(pos->prev), pos != (head); \ 408 * @head: the head for your list. 411 #define list_for_each_entry(pos, head, member) \ 412 for (pos = list_entry((head)->next, typeof(*pos), member); \ 413 &pos->member != (head); \ 419 * @head: the head for your list. 422 #define list_for_each_entry_reverse(pos, head, member) \ 423 for (pos = list_entry((head)->prev, typeof(*pos), member); \ 424 prefetch(pos->member.prev), &pos->member != (head); \ 430 * @head: the head of the list 435 #define list_prepare_entry(pos, head, member) \ 436 ((pos) ? : list_entry(head, typeof(*pos), member)) 441 * @head: the head for your list. 447 #define list_for_each_entry_continue(pos, head, member) \ 449 prefetch(pos->member.next), &pos->member != (head); \ 455 * @head: the head for your list. 461 #define list_for_each_entry_continue_reverse(pos, head, member) \ 463 prefetch(pos->member.prev), &pos->member != (head); \ 469 * @head: the head for your list. 474 #define list_for_each_entry_from(pos, head, member) \ 475 for (; prefetch(pos->member.next), &pos->member != (head); \ 482 * @head: the head for your list. 485 #define list_for_each_entry_safe(pos, n, head, member) \ 486 for (pos = list_entry((head)->next, typeof(*pos), member), \ 488 &pos->member != (head); \ 495 * @head: the head for your list. 501 #define list_for_each_entry_safe_continue(pos, n, head, member) \ 504 &pos->member != (head); \ 511 * @head: the head for your list. 517 #define list_for_each_entry_safe_from(pos, n, head, member) \ 519 &pos->member != (head); \ 526 * @head: the head for your list. 532 #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 533 for (pos = list_entry((head)->prev, typeof(*pos), member), \ 535 &pos->member != (head); \ 180 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument 192 list_is_last(const struct list_head *list, const struct list_head *head) list_is_last() argument 235 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument 262 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument 294 list_splice(const struct list_head *list, struct list_head *head) list_splice() argument 306 list_splice_tail(struct list_head *list, struct list_head *head) list_splice_tail() argument 320 list_splice_init(struct list_head *list, struct list_head *head) list_splice_init() argument 337 list_splice_tail_init(struct list_head *list, struct list_head *head) list_splice_tail_init() argument
|
/linux-4.4.14/drivers/scsi/sym53c8xx_2/ |
H A D | sym_misc.h | 55 static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) sym_que_first() argument 57 return (head->flink == head) ? 0 : head->flink; sym_que_first() 60 static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) sym_que_last() argument 62 return (head->blink == head) ? 0 : head->blink; sym_que_last() 82 static inline int sym_que_empty(struct sym_quehead *head) sym_que_empty() argument 84 return head->flink == head; sym_que_empty() 88 struct sym_quehead *head) sym_que_splice() 94 struct sym_quehead *at = head->flink; sym_que_splice() 96 first->blink = head; sym_que_splice() 97 head->flink = first; sym_que_splice() 130 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) 132 static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) sym_remque_head() argument 134 struct sym_quehead *elem = head->flink; sym_remque_head() 136 if (elem != head) sym_remque_head() 137 __sym_que_del(head, elem->flink); sym_remque_head() 143 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) 145 static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) sym_remque_tail() argument 147 struct sym_quehead *elem = head->blink; sym_remque_tail() 149 if (elem != head) sym_remque_tail() 150 __sym_que_del(elem->blink, head); sym_remque_tail() 159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 160 for (qp = (head)->flink; qp != (head); qp = qp->flink) 87 sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) sym_que_splice() argument
|
/linux-4.4.14/net/atm/ |
H A D | addr.c | 51 struct list_head *head; atm_reset_addr() local 55 head = &dev->lecs; atm_reset_addr() 57 head = &dev->local; list_for_each_entry_safe() 58 list_for_each_entry_safe(this, p, head, entry) { list_for_each_entry_safe() 63 if (head == &dev->local) 72 struct list_head *head; atm_add_addr() local 80 head = &dev->lecs; atm_add_addr() 82 head = &dev->local; list_for_each_entry() 83 list_for_each_entry(this, head, entry) { list_for_each_entry() 95 list_add(&this->entry, head); 97 if (head == &dev->local) 107 struct list_head *head; atm_del_addr() local 115 head = &dev->lecs; atm_del_addr() 117 head = &dev->local; list_for_each_entry() 118 list_for_each_entry(this, head, entry) { list_for_each_entry() 123 if (head == &dev->local) list_for_each_entry() 137 struct list_head *head; atm_get_addr() local 143 head = &dev->lecs; atm_get_addr() 145 head = &dev->local; atm_get_addr() 146 list_for_each_entry(this, head, entry) atm_get_addr() 153 list_for_each_entry(this, head, entry) atm_get_addr()
|
/linux-4.4.14/arch/x86/oprofile/ |
H A D | backtrace.c | 42 dump_user_backtrace_32(struct stack_frame_ia32 *head) dump_user_backtrace_32() argument 49 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); dump_user_backtrace_32() 59 if (head >= fp) dump_user_backtrace_32() 68 struct stack_frame_ia32 *head; x86_backtrace_32() local 74 head = (struct stack_frame_ia32 *) regs->bp; x86_backtrace_32() 75 while (depth-- && head) x86_backtrace_32() 76 head = dump_user_backtrace_32(head); x86_backtrace_32() 89 static struct stack_frame *dump_user_backtrace(struct stack_frame *head) dump_user_backtrace() argument 95 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); dump_user_backtrace() 103 if (head >= bufhead[0].next_frame) dump_user_backtrace() 112 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); x86_backtrace() local 125 while (depth-- && head) x86_backtrace() 126 head = dump_user_backtrace(head); x86_backtrace()
|
/linux-4.4.14/lib/ |
H A D | plist.c | 57 static void plist_check_head(struct plist_head *head) plist_check_head() argument 59 if (!plist_head_empty(head)) plist_check_head() 60 plist_check_list(&plist_first(head)->prio_list); plist_check_head() 61 plist_check_list(&head->node_list); plist_check_head() 69 * plist_add - add @node to @head 72 * @head: &struct plist_head pointer 74 void plist_add(struct plist_node *node, struct plist_head *head) plist_add() argument 77 struct list_head *node_next = &head->node_list; plist_add() 79 plist_check_head(head); plist_add() 83 if (plist_head_empty(head)) plist_add() 86 first = iter = plist_first(head); plist_add() 104 plist_check_head(head); plist_add() 111 * @head: &struct plist_head pointer - list head 113 void plist_del(struct plist_node *node, struct plist_head *head) plist_del() argument 115 plist_check_head(head); plist_del() 118 if (node->node_list.next != &head->node_list) { plist_del() 133 plist_check_head(head); plist_del() 144 * @head: &struct plist_head pointer - list head 146 void plist_requeue(struct plist_node *node, struct plist_head *head) plist_requeue() argument 149 struct list_head *node_next = &head->node_list; plist_requeue() 151 plist_check_head(head); plist_requeue() 152 BUG_ON(plist_head_empty(head)); plist_requeue() 155 if (node == plist_last(head)) plist_requeue() 163 plist_del(node, head); plist_requeue() 165 plist_for_each_continue(iter, head) { plist_for_each_continue() 173 plist_check_head(head);
|
H A D | llist.c | 34 * @head: the head for your lock-less list 39 struct llist_head *head) llist_add_batch() 44 new_last->next = first = ACCESS_ONCE(head->first); llist_add_batch() 45 } while (cmpxchg(&head->first, first, new_first) != first); llist_add_batch() 53 * @head: the head for your lock-less list 61 * llist_add) sequence in another user may change @head->first->next, 62 * but keep @head->first. If multiple consumers are needed, please 65 struct llist_node *llist_del_first(struct llist_head *head) llist_del_first() argument 69 entry = smp_load_acquire(&head->first); llist_del_first() 75 entry = cmpxchg(&head->first, old_entry, next); llist_del_first() 86 * @head: first item of the list to be reversed 91 struct llist_node *llist_reverse_order(struct llist_node *head) llist_reverse_order() argument 95 while (head) { llist_reverse_order() 96 struct llist_node *tmp = head; llist_reverse_order() 97 head = head->next; llist_reverse_order() 38 llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) llist_add_batch() argument
|
H A D | timerqueue.c | 33 * @head: head of timerqueue 39 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_add() argument 41 struct rb_node **p = &head->head.rb_node; timerqueue_add() 57 rb_insert_color(&node->node, &head->head); timerqueue_add() 59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) { timerqueue_add() 60 head->next = node; timerqueue_add() 70 * @head: head of timerqueue 75 bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_del() argument 80 if (head->next == node) { timerqueue_del() 83 head->next = rbn ? timerqueue_del() 86 rb_erase(&node->node, &head->head); timerqueue_del() 88 return head->next != NULL; timerqueue_del()
|
H A D | btree.c | 93 static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) btree_node_alloc() argument 97 node = mempool_alloc(head->mempool, gfp); btree_node_alloc() 176 static inline void __btree_init(struct btree_head *head) __btree_init() argument 178 head->node = NULL; __btree_init() 179 head->height = 0; __btree_init() 182 void btree_init_mempool(struct btree_head *head, mempool_t *mempool) btree_init_mempool() argument 184 __btree_init(head); btree_init_mempool() 185 head->mempool = mempool; btree_init_mempool() 189 int btree_init(struct btree_head *head) btree_init() argument 191 __btree_init(head); btree_init() 192 head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); btree_init() 193 if (!head->mempool) btree_init() 199 void btree_destroy(struct btree_head *head) btree_destroy() argument 201 mempool_free(head->node, head->mempool); btree_destroy() 202 mempool_destroy(head->mempool); btree_destroy() 203 head->mempool = NULL; btree_destroy() 207 void *btree_last(struct btree_head *head, struct btree_geo *geo, btree_last() argument 210 int height = head->height; btree_last() 211 unsigned long *node = head->node; btree_last() 241 void *btree_lookup(struct btree_head *head, struct btree_geo *geo, btree_lookup() argument 244 int i, height = head->height; btree_lookup() 245 unsigned long *node = head->node; btree_lookup() 271 int btree_update(struct btree_head *head, struct btree_geo *geo, btree_update() argument 274 int i, height = head->height; btree_update() 275 unsigned long *node = head->node; btree_update() 311 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, btree_get_prev() argument 321 if (head->height == 0) btree_get_prev() 327 node = head->node; btree_get_prev() 328 for (height = head->height ; height > 1; height--) { btree_get_prev() 388 static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, find_level() argument 391 unsigned long *node = head->node; find_level() 394 for (height = head->height; height > level; height--) { find_level() 413 static int btree_grow(struct btree_head *head, struct btree_geo *geo, btree_grow() argument 419 node = btree_node_alloc(head, gfp); btree_grow() 422 if (head->node) { btree_grow() 423 fill = getfill(geo, head->node, 0); btree_grow() 424 setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); btree_grow() 425 setval(geo, node, 0, head->node); btree_grow() 427 head->node = node; btree_grow() 428 head->height++; btree_grow() 432 static void btree_shrink(struct btree_head *head, struct btree_geo *geo) btree_shrink() argument 437 if (head->height <= 1) btree_shrink() 440 node = head->node; btree_shrink() 443 head->node = bval(geo, node, 0); btree_shrink() 444 head->height--; btree_shrink() 445 mempool_free(node, head->mempool); btree_shrink() 448 static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, btree_insert_level() argument 456 if (head->height < level) { btree_insert_level() 457 err = btree_grow(head, geo, gfp); btree_insert_level() 463 node = find_level(head, geo, key, level); btree_insert_level() 473 new = btree_node_alloc(head, gfp); btree_insert_level() 476 err = btree_insert_level(head, geo, btree_insert_level() 480 mempool_free(new, head->mempool); btree_insert_level() 510 int btree_insert(struct btree_head *head, struct btree_geo *geo, btree_insert() argument 514 return btree_insert_level(head, geo, key, val, 1, gfp); btree_insert() 518 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, 520 static void merge(struct btree_head *head, struct btree_geo *geo, int level, merge() argument 536 btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); merge() 537 mempool_free(right, head->mempool); merge() 540 static void rebalance(struct btree_head *head, struct btree_geo *geo, rebalance() argument 551 btree_remove_level(head, geo, key, level + 1); rebalance() 552 mempool_free(child, head->mempool); rebalance() 556 parent = find_level(head, geo, key, level + 1); rebalance() 564 merge(head, geo, level, rebalance() 575 merge(head, geo, level, rebalance() 591 static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, btree_remove_level() argument 598 if (level > head->height) { btree_remove_level() 600 head->height = 0; btree_remove_level() 601 head->node = NULL; btree_remove_level() 605 node = find_level(head, geo, key, level); btree_remove_level() 620 if (level < head->height) btree_remove_level() 621 rebalance(head, geo, key, level, node, fill - 1); btree_remove_level() 623 btree_shrink(head, geo); btree_remove_level() 629 void *btree_remove(struct btree_head *head, struct btree_geo *geo, btree_remove() argument 632 if (head->height == 0) btree_remove() 635 return btree_remove_level(head, geo, key, 1); btree_remove() 676 static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, __btree_for_each() argument 691 count = __btree_for_each(head, geo, child, opaque, __btree_for_each() 698 mempool_free(node, head->mempool); __btree_for_each() 746 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, btree_visitor() argument 757 if (head->node) btree_visitor() 758 count = __btree_for_each(head, geo, head->node, opaque, func, btree_visitor() 759 func2, 0, head->height, 0); btree_visitor() 764 size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, btree_grim_visitor() argument 775 if (head->node) btree_grim_visitor() 776 count = __btree_for_each(head, geo, head->node, opaque, func, btree_grim_visitor() 777 func2, 1, head->height, 0); btree_grim_visitor() 778 __btree_init(head); btree_grim_visitor()
|
H A D | list_sort.c | 17 * sentinel head node, "prev" links not maintained. 24 struct list_head head, *tail = &head; merge() local 38 return head.next; merge() 51 struct list_head *head, merge_and_restore_back_links() 54 struct list_head *tail = head; merge_and_restore_back_links() 86 tail->next = head; merge_and_restore_back_links() 87 head->prev = tail; merge_and_restore_back_links() 93 * @head: the list to sort 104 void list_sort(void *priv, struct list_head *head, list_sort() argument 114 if (list_empty(head)) list_sort() 119 head->prev->next = NULL; list_sort() 120 list = head->next; list_sort() 145 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); list_sort() 158 #define TEST_LIST_LEN (512+128+2) /* not including head */ 217 LIST_HEAD(head); list_sort_test() 239 list_add_tail(&el->list, &head); list_sort_test() 242 list_sort(NULL, &head, cmp); list_sort_test() 245 for (cur = head.next; cur->next != &head; cur = cur->next) { list_sort_test() 274 if (head.prev != cur) { list_sort_test() 48 merge_and_restore_back_links(void *priv, int (*cmp)(void *priv, struct list_head *a, struct list_head *b), struct list_head *head, struct list_head *a, struct list_head *b) merge_and_restore_back_links() argument
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 72 /* Set up the XDR head */ rdma_build_arg_xdr() 73 rqstp->rq_arg.head[0].iov_base = page_address(page); rdma_build_arg_xdr() 74 rqstp->rq_arg.head[0].iov_len = rdma_build_arg_xdr() 79 /* Compute bytes past head in the SGL */ rdma_build_arg_xdr() 80 bc = byte_count - rqstp->rq_arg.head[0].iov_len; rdma_build_arg_xdr() 87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; rdma_build_arg_xdr() 121 struct svc_rdma_op_ctxt *head, rdma_read_chunk_lcl() 137 ctxt->read_hdr = head; rdma_read_chunk_lcl() 145 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_lcl() 146 head->arg.page_len += len; rdma_read_chunk_lcl() 147 head->arg.len += len; rdma_read_chunk_lcl() 149 head->count++; rdma_read_chunk_lcl() 154 head->arg.pages[pg_no], pg_off, rdma_read_chunk_lcl() 214 struct svc_rdma_op_ctxt *head, rdma_read_chunk_frmr() 248 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_frmr() 249 head->arg.page_len += len; rdma_read_chunk_frmr() 250 head->arg.len += len; rdma_read_chunk_frmr() 252 head->count++; rdma_read_chunk_frmr() 299 ctxt->read_hdr = head; rdma_read_chunk_frmr() 371 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, rdma_copy_tail() argument 378 srcp = head->arg.head[0].iov_base + position; rdma_copy_tail() 379 byte_count = head->arg.head[0].iov_len - position; rdma_copy_tail() 409 byte_count = head->arg.head[0].iov_len - position; rdma_copy_tail() 410 head->arg.page_len += byte_count; rdma_copy_tail() 411 head->arg.len += byte_count; rdma_copy_tail() 412 head->arg.buflen += byte_count; rdma_copy_tail() 419 struct svc_rdma_op_ctxt *head) rdma_read_chunks() 437 * head context keeps all the pages that comprise the rdma_read_chunks() 440 head->arg.head[0] = rqstp->rq_arg.head[0]; rdma_read_chunks() 441 head->arg.tail[0] = rqstp->rq_arg.tail[0]; rdma_read_chunks() 442 head->hdr_count = head->count; rdma_read_chunks() 443 head->arg.page_base = 0; rdma_read_chunks() 444 head->arg.page_len = 0; rdma_read_chunks() 445 head->arg.len = rqstp->rq_arg.len; rdma_read_chunks() 446 head->arg.buflen = rqstp->rq_arg.buflen; rdma_read_chunks() 453 head->arg.pages = &head->pages[0]; rdma_read_chunks() 454 page_offset = head->byte_len; rdma_read_chunks() 456 head->arg.pages = &head->pages[head->count]; rdma_read_chunks() 473 ret = xprt->sc_reader(xprt, rqstp, head, rdma_read_chunks() 481 head->arg.buflen += ret; rdma_read_chunks() 489 head->arg.page_len += pad; rdma_read_chunks() 490 head->arg.len += pad; rdma_read_chunks() 491 head->arg.buflen += pad; rdma_read_chunks() 496 if (position && position < head->arg.head[0].iov_len) rdma_read_chunks() 497 ret = rdma_copy_tail(rqstp, head, position, rdma_read_chunks() 499 head->arg.head[0].iov_len = position; rdma_read_chunks() 500 head->position = position; rdma_read_chunks() 512 struct svc_rdma_op_ctxt *head) rdma_read_complete() 518 for (page_no = 0; page_no < head->count; page_no++) { rdma_read_complete() 520 rqstp->rq_pages[page_no] = head->pages[page_no]; rdma_read_complete() 524 if (head->position == 0) { rdma_read_complete() 525 if (head->arg.len <= head->sge[0].length) { rdma_read_complete() 526 head->arg.head[0].iov_len = head->arg.len - rdma_read_complete() 527 head->byte_len; rdma_read_complete() 528 head->arg.page_len = 0; rdma_read_complete() 530 head->arg.head[0].iov_len = head->sge[0].length - rdma_read_complete() 531 head->byte_len; rdma_read_complete() 532 head->arg.page_len = head->arg.len - rdma_read_complete() 533 head->sge[0].length; rdma_read_complete() 538 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; rdma_read_complete() 539 rqstp->rq_arg.page_len = head->arg.page_len; rdma_read_complete() 540 rqstp->rq_arg.page_base = head->arg.page_base; rdma_read_complete() 546 /* Rebuild rq_arg head and tail. */ rdma_read_complete() 547 rqstp->rq_arg.head[0] = head->arg.head[0]; rdma_read_complete() 548 rqstp->rq_arg.tail[0] = head->arg.tail[0]; rdma_read_complete() 549 rqstp->rq_arg.len = head->arg.len; rdma_read_complete() 550 rqstp->rq_arg.buflen = head->arg.buflen; rdma_read_complete() 553 svc_rdma_put_context(head, 0); rdma_read_complete() 559 ret = rqstp->rq_arg.head[0].iov_len rdma_read_complete() 563 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n", rdma_read_complete() 564 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rdma_read_complete() 565 rqstp->rq_arg.head[0].iov_len); rdma_read_complete() 646 ret = rqstp->rq_arg.head[0].iov_len svc_rdma_recvfrom() 652 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n", svc_rdma_recvfrom() 654 rqstp->rq_arg.head[0].iov_base, svc_rdma_recvfrom() 655 rqstp->rq_arg.head[0].iov_len); svc_rdma_recvfrom() 119 rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, int *page_no, u32 *page_offset, u32 rs_handle, u32 rs_length, u64 rs_offset, bool last) rdma_read_chunk_lcl() argument 212 rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, int *page_no, u32 *page_offset, u32 rs_handle, u32 rs_length, u64 rs_offset, bool last) rdma_read_chunk_frmr() argument 416 rdma_read_chunks(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head) rdma_read_chunks() argument 511 rdma_read_complete(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head) rdma_read_complete() argument
|
/linux-4.4.14/net/sched/ |
H A D | cls_cgroup.c | 32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); cls_cgroup_classify() local 37 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) cls_cgroup_classify() 43 return tcf_exts_exec(skb, &head->exts, res); cls_cgroup_classify() 62 struct cls_cgroup_head *head = container_of(root, cls_cgroup_destroy_rcu() local 66 tcf_exts_destroy(&head->exts); cls_cgroup_destroy_rcu() 67 tcf_em_tree_destroy(&head->ematches); cls_cgroup_destroy_rcu() 68 kfree(head); cls_cgroup_destroy_rcu() 77 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_change() local 86 if (!head && !handle) cls_cgroup_change() 89 if (head && handle != head->handle) cls_cgroup_change() 92 new = kzalloc(sizeof(*head), GFP_KERNEL); cls_cgroup_change() 119 if (head) cls_cgroup_change() 120 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); cls_cgroup_change() 129 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_destroy() local 134 if (head) { cls_cgroup_destroy() 136 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); cls_cgroup_destroy() 148 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_walk() local 153 if (arg->fn(tp, (unsigned long) head, arg) < 0) { cls_cgroup_walk() 164 struct cls_cgroup_head *head = rtnl_dereference(tp->root); cls_cgroup_dump() local 167 t->tcm_handle = head->handle; cls_cgroup_dump() 173 if (tcf_exts_dump(skb, &head->exts) < 0 || cls_cgroup_dump() 174 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) cls_cgroup_dump() 179 if (tcf_exts_dump_stats(skb, &head->exts) < 0) cls_cgroup_dump()
|
H A D | cls_fw.c | 62 struct fw_head *head = rcu_dereference_bh(tp->root); fw_classify() local 67 if (head != NULL) { fw_classify() 68 id &= head->mask; fw_classify() 70 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; fw_classify() 100 struct fw_head *head = rtnl_dereference(tp->root); fw_get() local 103 if (head == NULL) fw_get() 106 f = rtnl_dereference(head->ht[fw_hash(handle)]); fw_get() 122 static void fw_delete_filter(struct rcu_head *head) fw_delete_filter() argument 124 struct fw_filter *f = container_of(head, struct fw_filter, rcu); fw_delete_filter() 132 struct fw_head *head = rtnl_dereference(tp->root); fw_destroy() local 136 if (head == NULL) fw_destroy() 141 if (rcu_access_pointer(head->ht[h])) fw_destroy() 146 while ((f = rtnl_dereference(head->ht[h])) != NULL) { fw_destroy() 147 RCU_INIT_POINTER(head->ht[h], fw_destroy() 154 kfree_rcu(head, rcu); fw_destroy() 160 struct fw_head *head = rtnl_dereference(tp->root); fw_delete() local 165 if (head == NULL || f == NULL) fw_delete() 168 fp = &head->ht[fw_hash(f->id)]; fw_delete() 193 struct fw_head *head = rtnl_dereference(tp->root); fw_change_attrs() local 223 if (mask != head->mask) fw_change_attrs() 225 } else if (head->mask != 0xFFFFFFFF) fw_change_attrs() 242 struct fw_head *head = rtnl_dereference(tp->root); fw_change() local 281 fp = &head->ht[fw_hash(fnew->id)]; fw_change() 299 if (!head) { fw_change() 304 head = kzalloc(sizeof(*head), GFP_KERNEL); fw_change() 305 if (!head) fw_change() 307 head->mask = mask; fw_change() 309 rcu_assign_pointer(tp->root, head); fw_change() 324 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); fw_change() 325 rcu_assign_pointer(head->ht[fw_hash(handle)], f); fw_change() 337 struct fw_head *head = rtnl_dereference(tp->root); fw_walk() local 340 if (head == NULL) fw_walk() 349 for (f = rtnl_dereference(head->ht[h]); f; fw_walk() 367 struct fw_head *head = rtnl_dereference(tp->root); fw_dump() local 394 if (head->mask != 0xFFFFFFFF && fw_dump() 395 nla_put_u32(skb, TCA_FW_MASK, head->mask)) fw_dump()
|
H A D | cls_basic.c | 44 struct basic_head *head = rcu_dereference_bh(tp->root); basic_classify() local 47 list_for_each_entry_rcu(f, &head->flist, link) { basic_classify() 62 struct basic_head *head = rtnl_dereference(tp->root); basic_get() local 65 if (head == NULL) basic_get() 68 list_for_each_entry(f, &head->flist, link) { basic_get() 80 struct basic_head *head; basic_init() local 82 head = kzalloc(sizeof(*head), GFP_KERNEL); basic_init() 83 if (head == NULL) basic_init() 85 INIT_LIST_HEAD(&head->flist); basic_init() 86 rcu_assign_pointer(tp->root, head); basic_init() 90 static void basic_delete_filter(struct rcu_head *head) basic_delete_filter() argument 92 struct basic_filter *f = container_of(head, struct basic_filter, rcu); basic_delete_filter() 101 struct basic_head *head = rtnl_dereference(tp->root); basic_destroy() local 104 if (!force && !list_empty(&head->flist)) basic_destroy() 107 list_for_each_entry_safe(f, n, &head->flist, link) { basic_destroy() 113 kfree_rcu(head, rcu); basic_destroy() 170 struct basic_head *head = rtnl_dereference(tp->root); basic_change() local 201 if (++head->hgenerator == 0x7FFFFFFF) basic_change() 202 head->hgenerator = 1; basic_change() 203 } while (--i > 0 && basic_get(tp, head->hgenerator)); basic_change() 210 fnew->handle = head->hgenerator; basic_change() 224 list_add_rcu(&fnew->link, &head->flist); basic_change() 235 struct basic_head *head = rtnl_dereference(tp->root); basic_walk() local 238 list_for_each_entry(f, &head->flist, link) { basic_walk()
|
H A D | cls_route.c | 72 route4_reset_fastmap(struct route4_head *head) route4_reset_fastmap() argument 75 memset(head->fastmap, 0, sizeof(head->fastmap)); route4_reset_fastmap() 80 route4_set_fastmap(struct route4_head *head, u32 id, int iif, route4_set_fastmap() argument 87 head->fastmap[h].id = id; route4_set_fastmap() 88 head->fastmap[h].iif = iif; route4_set_fastmap() 89 head->fastmap[h].filter = f; route4_set_fastmap() 124 route4_set_fastmap(head, id, iif, f); \ 131 struct route4_head *head = rcu_dereference_bh(tp->root); route4_classify() local 143 if (head == NULL) route4_classify() 151 if (id == head->fastmap[h].id && route4_classify() 152 iif == head->fastmap[h].iif && route4_classify() 153 (f = head->fastmap[h].filter) != NULL) { route4_classify() 168 b = rcu_dereference_bh(head->table[h]); route4_classify() 194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); route4_classify() 232 struct route4_head *head = rtnl_dereference(tp->root); route4_get() local 237 if (!head) route4_get() 248 b = rtnl_dereference(head->table[h1]); route4_get() 261 struct route4_head *head; route4_init() local 263 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); route4_init() 264 if (head == NULL) route4_init() 267 rcu_assign_pointer(tp->root, head); route4_init() 272 route4_delete_filter(struct rcu_head *head) route4_delete_filter() argument 274 struct route4_filter *f = container_of(head, struct route4_filter, rcu); route4_delete_filter() 282 struct route4_head *head = rtnl_dereference(tp->root); route4_destroy() local 285 if (head == NULL) route4_destroy() 290 if (rcu_access_pointer(head->table[h1])) route4_destroy() 298 b = rtnl_dereference(head->table[h1]); route4_destroy() 312 RCU_INIT_POINTER(head->table[h1], NULL); route4_destroy() 317 kfree_rcu(head, rcu); route4_destroy() 323 struct route4_head *head = rtnl_dereference(tp->root); route4_delete() local 331 if (!head || !f) route4_delete() 348 route4_reset_fastmap(head); route4_delete() 364 RCU_INIT_POINTER(head->table[to_hash(h)], NULL); route4_delete() 382 u32 handle, struct route4_head *head, route4_set_parms() 430 b = rtnl_dereference(head->table[h1]); route4_set_parms() 437 rcu_assign_pointer(head->table[h1], b); route4_set_parms() 480 struct route4_head *head = rtnl_dereference(tp->root); route4_change() local 518 err = route4_set_parms(net, tp, base, f, handle, head, tb, route4_change() 538 b = rtnl_dereference(head->table[th]); route4_change() 551 route4_reset_fastmap(head); route4_change() 566 struct route4_head *head = rtnl_dereference(tp->root); route4_walk() local 569 if (head == NULL) route4_walk() 576 struct route4_bucket *b = rtnl_dereference(head->table[h]); route4_walk() 380 route4_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct route4_filter *f, u32 handle, struct route4_head *head, struct nlattr **tb, struct nlattr *est, int new, bool ovr) route4_set_parms() argument
|
H A D | cls_flower.c | 121 struct cls_fl_head *head = rcu_dereference_bh(tp->root); fl_classify() local 126 fl_clear_masked_range(&skb_key, &head->mask); fl_classify() 132 skb_flow_dissect(skb, &head->dissector, &skb_key, 0); fl_classify() 134 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask); fl_classify() 136 f = rhashtable_lookup_fast(&head->ht, fl_classify() 137 fl_key_get_start(&skb_mkey, &head->mask), fl_classify() 138 head->ht_params); fl_classify() 148 struct cls_fl_head *head; fl_init() local 150 head = kzalloc(sizeof(*head), GFP_KERNEL); fl_init() 151 if (!head) fl_init() 154 INIT_LIST_HEAD_RCU(&head->filters); fl_init() 155 rcu_assign_pointer(tp->root, head); fl_init() 160 static void fl_destroy_filter(struct rcu_head *head) fl_destroy_filter() argument 162 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); fl_destroy_filter() 170 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_destroy() local 173 if (!force && !list_empty(&head->filters)) fl_destroy() 176 list_for_each_entry_safe(f, next, &head->filters, list) { fl_destroy() 181 if (head->mask_assigned) fl_destroy() 182 rhashtable_destroy(&head->ht); fl_destroy() 183 kfree_rcu(head, rcu); fl_destroy() 189 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_get() local 192 list_for_each_entry(f, &head->filters, list) fl_get() 320 static int fl_init_hashtable(struct cls_fl_head *head, fl_init_hashtable() argument 323 head->ht_params = fl_ht_params; fl_init_hashtable() 324 head->ht_params.key_len = fl_mask_range(mask); fl_init_hashtable() 325 head->ht_params.key_offset += mask->range.start; fl_init_hashtable() 327 return rhashtable_init(&head->ht, &head->ht_params); fl_init_hashtable() 352 static void fl_init_dissector(struct cls_fl_head *head, fl_init_dissector() argument 369 skb_flow_dissector_init(&head->dissector, keys, cnt); fl_init_dissector() 372 static int fl_check_assign_mask(struct cls_fl_head *head, fl_check_assign_mask() argument 377 if (head->mask_assigned) { fl_check_assign_mask() 378 if (!fl_mask_eq(&head->mask, mask)) fl_check_assign_mask() 387 err = fl_init_hashtable(head, mask); fl_check_assign_mask() 390 memcpy(&head->mask, mask, sizeof(head->mask)); fl_check_assign_mask() 391 head->mask_assigned = true; fl_check_assign_mask() 393 fl_init_dissector(head, mask); fl_check_assign_mask() 432 struct cls_fl_head *head) fl_grab_new_handle() 438 if (++head->hgen == 0x7FFFFFFF) fl_grab_new_handle() 439 head->hgen = 1; fl_grab_new_handle() 440 } while (--i > 0 && fl_get(tp, head->hgen)); fl_grab_new_handle() 446 handle = head->hgen; fl_grab_new_handle() 457 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_change() local 481 handle = fl_grab_new_handle(tp, head); fl_change() 493 err = fl_check_assign_mask(head, &mask); fl_change() 497 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, fl_change() 498 head->ht_params); fl_change() 502 rhashtable_remove_fast(&head->ht, &fold->ht_node, fl_change() 503 head->ht_params); fl_change() 512 list_add_tail_rcu(&fnew->list, &head->filters); fl_change() 524 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_delete() local 527 rhashtable_remove_fast(&head->ht, &f->ht_node, fl_delete() 528 head->ht_params); fl_delete() 537 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_walk() local 540 list_for_each_entry_rcu(f, &head->filters, list) { fl_walk() 574 struct cls_fl_head *head = rtnl_dereference(tp->root); fl_dump() local 593 mask = &head->mask.key; fl_dump() 431 fl_grab_new_handle(struct tcf_proto *tp, struct cls_fl_head *head) fl_grab_new_handle() argument
|
H A D | cls_bpf.c | 81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); cls_bpf_classify() local 95 list_for_each_entry_rcu(prog, &head->plist, link) { cls_bpf_classify() 144 struct cls_bpf_head *head; cls_bpf_init() local 146 head = kzalloc(sizeof(*head), GFP_KERNEL); cls_bpf_init() 147 if (head == NULL) cls_bpf_init() 150 INIT_LIST_HEAD_RCU(&head->plist); cls_bpf_init() 151 rcu_assign_pointer(tp->root, head); cls_bpf_init() 190 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_destroy() local 193 if (!force && !list_empty(&head->plist)) cls_bpf_destroy() 196 list_for_each_entry_safe(prog, tmp, &head->plist, link) { cls_bpf_destroy() 203 kfree_rcu(head, rcu); cls_bpf_destroy() 209 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_get() local 213 if (head == NULL) cls_bpf_get() 216 list_for_each_entry(prog, &head->plist, link) { cls_bpf_get() 353 struct cls_bpf_head *head) cls_bpf_grab_new_handle() 359 if (++head->hgen == 0x7FFFFFFF) cls_bpf_grab_new_handle() 360 head->hgen = 1; cls_bpf_grab_new_handle() 361 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); cls_bpf_grab_new_handle() 367 handle = head->hgen; cls_bpf_grab_new_handle() 378 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_change() local 405 prog->handle = cls_bpf_grab_new_handle(tp, head); cls_bpf_change() 422 list_add_rcu(&prog->link, &head->plist); cls_bpf_change() 514 struct cls_bpf_head *head = rtnl_dereference(tp->root); cls_bpf_walk() local 517 list_for_each_entry(prog, &head->plist, link) { cls_bpf_walk() 352 cls_bpf_grab_new_handle(struct tcf_proto *tp, struct cls_bpf_head *head) cls_bpf_grab_new_handle() argument
|
H A D | sch_choke.c | 71 unsigned int head; member in struct:choke_sched_data 82 return (q->tail - q->head) & q->tab_mask; choke_len() 97 /* Move head pointer forward to skip over holes */ choke_zap_head_holes() 101 q->head = (q->head + 1) & q->tab_mask; choke_zap_head_holes() 102 if (q->head == q->tail) choke_zap_head_holes() 104 } while (q->tab[q->head] == NULL); choke_zap_head_holes() 112 if (q->head == q->tail) choke_zap_tail_holes() 125 if (idx == q->head) choke_drop_by_idx() 225 * times to find a random skb but then just give up and return the head 226 * Will return NULL if queue is empty (q->head == q->tail) 235 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; choke_peek_random() 241 return q->tab[*pidx = q->head]; choke_peek_random() 254 if (q->head == q->tail) choke_match_random() 352 if (q->head == q->tail) { choke_dequeue() 358 skb = q->tab[q->head]; choke_dequeue() 359 q->tab[q->head] = NULL; choke_dequeue() 388 while (q->head != q->tail) { choke_reset() 389 struct sk_buff *skb = q->tab[q->head]; choke_reset() 391 q->head = (q->head + 1) & q->tab_mask; choke_reset() 400 q->head = q->tail = 0; choke_reset() 461 while (q->head != q->tail) { choke_change() 462 struct sk_buff *skb = q->tab[q->head]; choke_change() 464 q->head = (q->head + 1) & q->tab_mask; choke_change() 477 q->head = 0; choke_change() 495 if (q->head == q->tail) choke_change() 562 return (q->head != q->tail) ? q->tab[q->head] : NULL; choke_peek_head()
|
H A D | sch_fq.c | 61 struct sk_buff *head; /* list of skbs for this flow : first skb */ member in struct:fq_flow 156 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) fq_flow_add_tail() argument 158 if (head->first) fq_flow_add_tail() 159 head->last->next = flow; fq_flow_add_tail() 161 head->first = flow; fq_flow_add_tail() 162 head->last = flow; fq_flow_add_tail() 298 /* remove one skb from head of flow queue */ fq_dequeue_head() 301 struct sk_buff *skb = flow->head; fq_dequeue_head() 304 flow->head = skb->next; fq_dequeue_head() 326 * head-> [retrans pkt 1] 335 struct sk_buff *prev, *head = flow->head; flow_queue_add() local 338 if (!head) { flow_queue_add() 339 flow->head = skb; flow_queue_add() 353 while (skb_is_retransmit(head)) { flow_queue_add() 354 prev = head; flow_queue_add() 355 head = head->next; flow_queue_add() 356 if (!head) flow_queue_add() 359 if (!prev) { /* no rtx packet in queue, become the new head */ flow_queue_add() 360 skb->next = flow->head; flow_queue_add() 361 flow->head = skb; flow_queue_add() 432 struct fq_flow_head *head; fq_dequeue() local 442 head = &q->new_flows; fq_dequeue() 443 if (!head->first) { fq_dequeue() 444 head = &q->old_flows; fq_dequeue() 445 if (!head->first) { fq_dequeue() 453 f = head->first; fq_dequeue() 457 head->first = f->next; fq_dequeue() 462 skb = f->head; fq_dequeue() 465 head->first = f->next; fq_dequeue() 472 head->first = f->next; fq_dequeue() 474 if ((head == &q->new_flows) && q->old_flows.first) { fq_dequeue()
|
/linux-4.4.14/tools/perf/util/ |
H A D | parse-events.y | 66 %type <head> event_config 68 %type <head> event_pmu 69 %type <head> event_legacy_symbol 70 %type <head> event_legacy_cache 71 %type <head> event_legacy_mem 72 %type <head> event_legacy_tracepoint 74 %type <head> event_legacy_numeric 75 %type <head> event_legacy_raw 76 %type <head> event_bpf_file 77 %type <head> event_def 78 %type <head> event_mod 79 %type <head> event_name 80 %type <head> event 81 %type <head> events 82 %type <head> group_def 83 %type <head> group 84 %type <head> groups 90 struct list_head *head; 238 struct list_head *head; 242 ALLOC_LIST(head); 245 list_add_tail(&term->list, head); 248 ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); 249 parse_events__free_terms(head); 256 struct list_head *head; 262 ALLOC_LIST(head); 265 list_add_tail(&term->list, head); 268 ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); 269 parse_events__free_terms(head); 488 struct list_head *head = $1; 491 ABORT_ON(!head); 492 list_add_tail(&term->list, head); 498 struct list_head *head = malloc(sizeof(*head)); 501 ABORT_ON(!head); 502 INIT_LIST_HEAD(head); 503 list_add_tail(&term->list, head); 504 $$ = head;
|
H A D | auxtrace.c | 150 INIT_LIST_HEAD(&queue_array[i].head); auxtrace_alloc_queue_array() 187 list_splice_tail(&queues->queue_array[i].head, auxtrace_queues__grow() 188 &queue_array[i].head); auxtrace_queues__grow() 247 list_add_tail(&buffer->list, &queue->head); auxtrace_queues__add_buffer() 382 while (!list_empty(&queues->queue_array[i].head)) { auxtrace_queues__free() 385 buffer = list_entry(queues->queue_array[i].head.next, auxtrace_queues__free() 526 unsigned char *data, u64 *head, u64 *old) auxtrace_record__find_snapshot() 529 return itr->find_snapshot(itr, idx, mm, data, head, old); auxtrace_record__find_snapshot() 569 static int auxtrace_index__alloc(struct list_head *head) auxtrace_index__alloc() argument 580 list_add_tail(&auxtrace_index->list, head); auxtrace_index__alloc() 585 void auxtrace_index__free(struct list_head *head) auxtrace_index__free() argument 589 list_for_each_entry_safe(auxtrace_index, n, head, list) { list_for_each_entry_safe() 595 static struct auxtrace_index *auxtrace_index__last(struct list_head *head) auxtrace_index__last() argument 600 if (list_empty(head)) { auxtrace_index__last() 601 err = auxtrace_index__alloc(head); auxtrace_index__last() 606 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); auxtrace_index__last() 609 err = auxtrace_index__alloc(head); auxtrace_index__last() 612 auxtrace_index = list_entry(head->prev, struct auxtrace_index, auxtrace_index__last() 619 int auxtrace_index__auxtrace_event(struct list_head *head, auxtrace_index__auxtrace_event() argument 625 auxtrace_index = auxtrace_index__last(head); auxtrace_index__auxtrace_event() 652 int auxtrace_index__write(int fd, struct list_head *head) auxtrace_index__write() argument 658 list_for_each_entry(auxtrace_index, head, list) auxtrace_index__write() 664 list_for_each_entry(auxtrace_index, head, list) { list_for_each_entry() 673 static int auxtrace_index__process_entry(int fd, struct list_head *head, auxtrace_index__process_entry() argument 683 auxtrace_index = auxtrace_index__last(head); auxtrace_index__process_entry() 705 struct list_head *head = &session->auxtrace_index; auxtrace_index__process() local 720 err = auxtrace_index__process_entry(fd, head, needs_swap); auxtrace_index__process() 761 if (list_is_last(&buffer->list, &queue->head)) auxtrace_buffer__next() 766 if (list_empty(&queue->head)) auxtrace_buffer__next() 768 return list_entry(queue->head.next, struct auxtrace_buffer, auxtrace_buffer__next() 1154 u64 head, old = mm->prev, offset, ref; __auxtrace_mmap__read() local 1161 head = auxtrace_mmap__read_snapshot_head(mm); __auxtrace_mmap__read() 1163 &head, &old)) __auxtrace_mmap__read() 1166 head = auxtrace_mmap__read_head(mm); __auxtrace_mmap__read() 1169 if (old == head) __auxtrace_mmap__read() 1172 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", __auxtrace_mmap__read() 1173 mm->idx, old, head, head - old); __auxtrace_mmap__read() 1176 head_off = head & mm->mask; __auxtrace_mmap__read() 1179 head_off = head % mm->len; __auxtrace_mmap__read() 1193 if (head > old || size <= head || mm->mask) { __auxtrace_mmap__read() 1194 offset = head - size; __auxtrace_mmap__read() 1197 * When the buffer size is not a power of 2, 'head' wraps at the __auxtrace_mmap__read() 1203 offset = head - size - rem; __auxtrace_mmap__read() 1243 mm->prev = head; __auxtrace_mmap__read() 1246 auxtrace_mmap__write_tail(mm, head); __auxtrace_mmap__read() 524 auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old) auxtrace_record__find_snapshot() argument
|
/linux-4.4.14/arch/unicore32/kernel/ |
H A D | Makefile | 27 head-y := head.o 30 extra-y := $(head-y) vmlinux.lds
|
/linux-4.4.14/arch/score/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/m68k/include/asm/ |
H A D | atafd.h | 8 int head; /* "" "" */ member in struct:atari_format_descr
|
H A D | machw.h | 17 * head.S maps the videomem to VIDEOMEMBASE
|
/linux-4.4.14/arch/openrisc/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/ |
H A D | list.h | 45 * We need one list head in bar and a list element in all list_of_foos (both are of 60 * Now we initialize the list head: 102 * to-be-linked struct. struct list_head is required for both the head of the 107 * There are no requirements for a list head, any struct list_head can be a list 108 * head. 144 * Insert a new element after the given list head. The new element does not 147 * head → some element → ... 149 * head → new element → older element → ... 156 * @param head The existing list. 159 list_add(struct list_head *entry, struct list_head *head) list_add() argument 161 __list_add(entry, head, head->next); list_add() 165 * Append a new element to the end of the list given with this list head. 168 * head → some element → ... → lastelement 170 * head → some element → ... → lastelement → new element 177 * @param head The existing list. 180 list_add_tail(struct list_head *entry, struct list_head *head) list_add_tail() argument 182 __list_add(entry, head->prev, head); list_add_tail() 197 * Using list_del on a pure list head (like in the example at the top of 220 struct list_head *head) list_move_tail() 223 list_add_tail(list, head); list_move_tail() 235 list_empty(struct list_head *head) list_empty() argument 237 return head->next == head; list_empty() 251 * @return A pointer to the data struct containing the list head. 271 * @param ptr The list head 286 * @param ptr The list head 298 * Loop through the list given by head and set pos to struct in the list. 310 * @param head List head 314 #define list_for_each_entry(pos, head, member) \ 315 for (pos = __container_of((head)->next, pos, member); \ 316 &pos->member != (head); \ 326 #define list_for_each_entry_safe(pos, tmp, head, member) \ 327 for (pos = __container_of((head)->next, pos, member), \ 329 &pos->member != (head); \ 333 #define list_for_each_entry_reverse(pos, head, member) \ 334 for (pos = __container_of((head)->prev, pos, member); \ 335 &pos->member != (head); \ 338 #define list_for_each_entry_continue(pos, head, member) \ 340 &pos->member != (head); \ 343 #define list_for_each_entry_continue_reverse(pos, head, member) \ 345 &pos->member != (head); \ 348 #define list_for_each_entry_from(pos, head, member) \ 350 &pos->member != (head); \ 219 list_move_tail(struct list_head *list, struct list_head *head) list_move_tail() argument
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | spinlock.h | 53 set_bit(0, (volatile unsigned long *)&lock->tickets.head); __ticket_enter_slowpath() 73 __ticket_t head) __ticket_check_and_clear_slowpath() 75 if (head & TICKET_SLOWPATH_FLAG) { __ticket_check_and_clear_slowpath() 78 old.tickets.head = head; __ticket_check_and_clear_slowpath() 79 new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; __ticket_check_and_clear_slowpath() 80 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; __ticket_check_and_clear_slowpath() 90 return __tickets_equal(lock.tickets.head, lock.tickets.tail); arch_spin_value_unlocked() 94 * Ticket locks are conceptually two parts, one indicating the current head of 97 * ourself to the queue and noting our position), then waiting until the head 101 * also load the position of the head, which takes care of memory ordering 111 if (likely(inc.head == inc.tail)) arch_spin_lock() 118 inc.head = READ_ONCE(lock->tickets.head); arch_spin_lock() 119 if (__tickets_equal(inc.head, inc.tail)) arch_spin_lock() 126 __ticket_check_and_clear_slowpath(lock, inc.head); arch_spin_lock() 136 if (!__tickets_equal(old.tickets.head, old.tickets.tail)) arch_spin_trylock() 150 __ticket_t head; arch_spin_unlock() local 154 head = xadd(&lock->tickets.head, TICKET_LOCK_INC); arch_spin_unlock() 156 if (unlikely(head & TICKET_SLOWPATH_FLAG)) { arch_spin_unlock() 157 head &= ~TICKET_SLOWPATH_FLAG; arch_spin_unlock() 158 __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); arch_spin_unlock() 161 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); arch_spin_unlock() 168 return !__tickets_equal(tmp.tail, tmp.head); arch_spin_is_locked() 175 tmp.head &= ~TICKET_SLOWPATH_FLAG; arch_spin_is_contended() 176 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; arch_spin_is_contended() 188 __ticket_t head = READ_ONCE(lock->tickets.head); arch_spin_unlock_wait() local 193 * We need to check "unlocked" in a loop, tmp.head == head arch_spin_unlock_wait() 196 if (__tickets_equal(tmp.head, tmp.tail) || arch_spin_unlock_wait() 197 !__tickets_equal(tmp.head, head)) arch_spin_unlock_wait() 72 __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, __ticket_t head) __ticket_check_and_clear_slowpath() argument
|
H A D | spinlock_types.h | 33 __ticket_t head, tail; member in struct:arch_spinlock::__anon3102::__raw_tickets
|
/linux-4.4.14/arch/x86/platform/uv/ |
H A D | uv_time.c | 169 struct uv_rtc_timer_head *head = blade_info[bid]; for_each_present_cpu() local 171 if (!head) { for_each_present_cpu() 172 head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + for_each_present_cpu() 176 if (!head) { for_each_present_cpu() 180 spin_lock_init(&head->lock); for_each_present_cpu() 181 head->ncpus = uv_blade_nr_possible_cpus(bid); for_each_present_cpu() 182 head->next_cpu = -1; for_each_present_cpu() 183 blade_info[bid] = head; for_each_present_cpu() 186 head->cpu[bcpu].lcpu = cpu; for_each_present_cpu() 187 head->cpu[bcpu].expires = ULLONG_MAX; for_each_present_cpu() 194 static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) uv_rtc_find_next_timer() argument 199 head->next_cpu = -1; uv_rtc_find_next_timer() 200 for (c = 0; c < head->ncpus; c++) { uv_rtc_find_next_timer() 201 u64 exp = head->cpu[c].expires; uv_rtc_find_next_timer() 208 head->next_cpu = bcpu; uv_rtc_find_next_timer() 209 c = head->cpu[bcpu].lcpu; uv_rtc_find_next_timer() 228 struct uv_rtc_timer_head *head = blade_info[bid]; uv_rtc_set_timer() local 230 u64 *t = &head->cpu[bcpu].expires; uv_rtc_set_timer() 234 spin_lock_irqsave(&head->lock, flags); uv_rtc_set_timer() 236 next_cpu = head->next_cpu; uv_rtc_set_timer() 241 expires < head->cpu[next_cpu].expires) { uv_rtc_set_timer() 242 head->next_cpu = bcpu; uv_rtc_set_timer() 245 uv_rtc_find_next_timer(head, pnode); uv_rtc_set_timer() 246 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_set_timer() 251 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_set_timer() 264 struct uv_rtc_timer_head *head = blade_info[bid]; uv_rtc_unset_timer() local 266 u64 *t = &head->cpu[bcpu].expires; uv_rtc_unset_timer() 270 spin_lock_irqsave(&head->lock, flags); uv_rtc_unset_timer() 272 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) uv_rtc_unset_timer() 278 if (head->next_cpu == bcpu) uv_rtc_unset_timer() 279 uv_rtc_find_next_timer(head, pnode); uv_rtc_unset_timer() 282 spin_unlock_irqrestore(&head->lock, flags); uv_rtc_unset_timer()
|
/linux-4.4.14/scripts/gdb/linux/ |
H A D | lists.py | 21 def list_check(head): 23 if (head.type == list_head.get_type().pointer()): 24 head = head.dereference() 25 elif (head.type != list_head.get_type()): 27 c = head 31 gdb.write('head is not accessible\n') 74 if c == head:
|
/linux-4.4.14/arch/nios2/boot/compressed/ |
H A D | Makefile | 5 targets := vmlinux head.o misc.o piggy.o vmlinux.lds 8 OBJECTS = $(obj)/head.o $(obj)/misc.o
|
/linux-4.4.14/scripts/kconfig/ |
H A D | list.h | 45 * @head: the head for your list. 48 #define list_for_each_entry(pos, head, member) \ 49 for (pos = list_entry((head)->next, typeof(*pos), member); \ 50 &pos->member != (head); \ 57 * @head: the head for your list. 60 #define list_for_each_entry_safe(pos, n, head, member) \ 61 for (pos = list_entry((head)->next, typeof(*pos), member), \ 63 &pos->member != (head); \ 68 * @head: the list to test. 70 static inline int list_empty(const struct list_head *head) list_empty() argument 72 return head->next == head; list_empty() 94 * @head: list head to add it before 96 * Insert a new entry before the specified head. 99 static inline void list_add_tail(struct list_head *_new, struct list_head *head) list_add_tail() argument 101 __list_add(_new, head->prev, head); list_add_tail()
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_marker.c | 32 struct list_head head; member in struct:vmw_marker 39 INIT_LIST_HEAD(&queue->head); vmw_marker_queue_init() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_queue_takedown() 67 list_add_tail(&marker->head, &queue->head); vmw_marker_push() 83 if (list_empty(&queue->head)) { vmw_marker_pull() 90 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_pull() 97 list_del(&marker->head); vmw_marker_pull() 137 if (list_empty(&queue->head)) vmw_wait_lag() 140 marker = list_first_entry(&queue->head, vmw_wait_lag() 141 struct vmw_marker, head); vmw_wait_lag()
|
/linux-4.4.14/arch/s390/oprofile/ |
H A D | backtrace.c | 58 unsigned long head; s390_backtrace() local 64 head = regs->gprs[15]; s390_backtrace() 65 head_sf = (struct stack_frame*)head; s390_backtrace() 70 head = head_sf->back_chain; s390_backtrace() 72 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, s390_backtrace() 75 __show_trace(&depth, head, S390_lowcore.thread_info, s390_backtrace()
|
/linux-4.4.14/fs/proc/ |
H A D | proc_sysctl.c | 28 static bool is_empty_dir(struct ctl_table_header *head) is_empty_dir() argument 30 return head->ctl_table[0].child == sysctl_mount_point; is_empty_dir() 76 static int insert_links(struct ctl_table_header *head); 105 struct ctl_table_header *head; find_entry() local 116 head = ctl_node->header; find_entry() 117 entry = &head->ctl_table[ctl_node - head->node]; find_entry() 126 *phead = head; find_entry() 133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) insert_entry() argument 135 struct rb_node *node = &head->node[entry - head->ctl_table].node; insert_entry() 136 struct rb_node **p = &head->parent->root.rb_node; insert_entry() 161 sysctl_print_dir(head->parent); insert_entry() 168 rb_insert_color(node, &head->parent->root); insert_entry() 172 static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) erase_entry() argument 174 struct rb_node *node = &head->node[entry - head->ctl_table].node; erase_entry() 176 rb_erase(node, &head->parent->root); erase_entry() 179 static void init_header(struct ctl_table_header *head, init_header() argument 183 head->ctl_table = table; init_header() 184 head->ctl_table_arg = table; init_header() 185 head->used = 0; init_header() 186 head->count = 1; init_header() 187 head->nreg = 1; init_header() 188 head->unregistering = NULL; init_header() 189 head->root = root; init_header() 190 head->set = set; init_header() 191 head->parent = NULL; init_header() 192 head->node = node; init_header() 196 node->header = head; init_header() 200 static void erase_header(struct ctl_table_header *head) erase_header() argument 203 for (entry = head->ctl_table; entry->procname; entry++) erase_header() 204 erase_entry(head, entry); erase_header() 287 static void sysctl_head_get(struct ctl_table_header *head) sysctl_head_get() argument 290 head->count++; sysctl_head_get() 294 void sysctl_head_put(struct ctl_table_header *head) sysctl_head_put() argument 297 if (!--head->count) sysctl_head_put() 298 kfree_rcu(head, rcu); sysctl_head_put() 302 static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) sysctl_head_grab() argument 304 BUG_ON(!head); sysctl_head_grab() 306 if (!use_table(head)) sysctl_head_grab() 307 head = ERR_PTR(-ENOENT); sysctl_head_grab() 309 return head; sysctl_head_grab() 312 static void sysctl_head_finish(struct ctl_table_header *head) sysctl_head_finish() argument 314 if (!head) sysctl_head_finish() 317 unuse_table(head); sysctl_head_finish() 334 struct ctl_table_header *head; lookup_entry() local 338 entry = find_entry(&head, dir, name, namelen); lookup_entry() 339 if (entry && use_table(head)) lookup_entry() 340 *phead = head; lookup_entry() 362 struct ctl_table_header *head = NULL; first_entry() local 370 head = ctl_node->header; first_entry() 371 entry = &head->ctl_table[ctl_node - head->node]; first_entry() 373 *phead = head; first_entry() 379 struct ctl_table_header *head = *phead; next_entry() local 381 struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; next_entry() 384 unuse_table(head); next_entry() 388 head = NULL; next_entry() 390 head = ctl_node->header; next_entry() 391 entry = &head->ctl_table[ctl_node - head->node]; next_entry() 393 *phead = head; next_entry() 417 static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) sysctl_perm() argument 419 struct ctl_table_root *root = head->root; sysctl_perm() 423 mode = root->permissions(head, table); sysctl_perm() 431 struct ctl_table_header *head, struct ctl_table *table) proc_sys_make_inode() 442 sysctl_head_get(head); proc_sys_make_inode() 444 ei->sysctl = head; proc_sys_make_inode() 457 if (is_empty_dir(head)) proc_sys_make_inode() 466 struct ctl_table_header *head = PROC_I(inode)->sysctl; grab_header() local 467 if (!head) grab_header() 468 head = &sysctl_table_root.default_set.dir.header; grab_header() 469 return sysctl_head_grab(head); grab_header() 475 struct ctl_table_header *head = grab_header(dir); proc_sys_lookup() local 484 if (IS_ERR(head)) proc_sys_lookup() 485 return ERR_CAST(head); proc_sys_lookup() 487 ctl_dir = container_of(head, struct ctl_dir, header); proc_sys_lookup() 501 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); proc_sys_lookup() 512 sysctl_head_finish(head); proc_sys_lookup() 520 struct ctl_table_header *head = grab_header(inode); proc_sys_call_handler() local 525 if (IS_ERR(head)) proc_sys_call_handler() 526 return PTR_ERR(head); proc_sys_call_handler() 533 if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) proc_sys_call_handler() 547 sysctl_head_finish(head); proc_sys_call_handler() 566 struct ctl_table_header *head = grab_header(inode); proc_sys_open() local 570 if (IS_ERR(head)) proc_sys_open() 571 return PTR_ERR(head); proc_sys_open() 576 sysctl_head_finish(head); proc_sys_open() 584 struct ctl_table_header *head = grab_header(inode); proc_sys_poll() local 590 if (IS_ERR(head)) proc_sys_poll() 608 sysctl_head_finish(head); proc_sys_poll() 615 struct ctl_table_header *head, proc_sys_fill_cache() 632 inode = proc_sys_make_inode(dir->d_sb, head, table); proc_sys_fill_cache() 653 struct ctl_table_header *head, proc_sys_link_fill_cache() 657 head = sysctl_head_grab(head); proc_sys_link_fill_cache() 661 int err = sysctl_follow_link(&head, &table, current->nsproxy); proc_sys_link_fill_cache() 666 ret = proc_sys_fill_cache(file, ctx, head, table); proc_sys_link_fill_cache() 668 sysctl_head_finish(head); proc_sys_link_fill_cache() 672 static int scan(struct ctl_table_header *head, struct ctl_table *table, scan() argument 682 res = proc_sys_link_fill_cache(file, ctx, head, table); scan() 684 res = proc_sys_fill_cache(file, ctx, head, table); scan() 694 struct ctl_table_header *head = grab_header(file_inode(file)); proc_sys_readdir() local 700 if (IS_ERR(head)) proc_sys_readdir() 701 return PTR_ERR(head); proc_sys_readdir() 703 ctl_dir = container_of(head, struct ctl_dir, header); proc_sys_readdir() 716 sysctl_head_finish(head); proc_sys_readdir() 726 struct ctl_table_header *head; proc_sys_permission() local 734 head = grab_header(inode); proc_sys_permission() 735 if (IS_ERR(head)) proc_sys_permission() 736 return PTR_ERR(head); proc_sys_permission() 742 error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); proc_sys_permission() 744 sysctl_head_finish(head); proc_sys_permission() 768 struct ctl_table_header *head = grab_header(inode); proc_sys_getattr() local 771 if (IS_ERR(head)) proc_sys_getattr() 772 return PTR_ERR(head); proc_sys_getattr() 778 sysctl_head_finish(head); proc_sys_getattr() 839 struct ctl_table_header *head; proc_sys_compare() local 852 head = rcu_dereference(PROC_I(inode)->sysctl); proc_sys_compare() 853 return !head || !sysctl_is_seen(head); proc_sys_compare() 865 struct ctl_table_header *head; find_subdir() local 868 entry = find_entry(&head, dir, name, namelen); find_subdir() 873 return container_of(head, struct ctl_dir, header); find_subdir() 980 struct ctl_table_header *head; sysctl_follow_link() local 996 head = NULL; sysctl_follow_link() 997 entry = find_entry(&head, dir, procname, strlen(procname)); sysctl_follow_link() 999 if (entry && use_table(head)) { sysctl_follow_link() 1001 *phead = head; sysctl_follow_link() 1103 struct ctl_table_header *head; get_links() local 1109 link = find_entry(&head, dir, procname, strlen(procname)); get_links() 1122 link = find_entry(&head, dir, procname, strlen(procname)); get_links() 1123 head->nreg++; get_links() 1128 static int insert_links(struct ctl_table_header *head) insert_links() argument 1135 if (head->set == root_set) insert_links() 1138 core_parent = xlate_dir(root_set, head->parent); insert_links() 1142 if (get_links(core_parent, head->ctl_table, head->root)) insert_links() 1148 links = new_links(core_parent, head->ctl_table, head->root); insert_links() 1156 if (get_links(core_parent, head->ctl_table, head->root)) { insert_links() 430 proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) proc_sys_make_inode() argument 613 proc_sys_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) proc_sys_fill_cache() argument 651 proc_sys_link_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) proc_sys_link_fill_cache() argument
|
/linux-4.4.14/drivers/net/wireless/ath/carl9170/ |
H A D | fwdesc.h | 119 struct carl9170fw_desc_head head; member in struct:carl9170fw_otus_desc 140 struct carl9170fw_desc_head head; member in struct:carl9170fw_motd_desc 157 struct carl9170fw_desc_head head; member in struct:carl9170fw_fix_desc 166 struct carl9170fw_desc_head head; member in struct:carl9170fw_dbg_desc 182 struct carl9170fw_desc_head head; member in struct:carl9170fw_chk_desc 192 struct carl9170fw_desc_head head; member in struct:carl9170fw_txsq_desc 202 struct carl9170fw_desc_head head; member in struct:carl9170fw_wol_desc 212 struct carl9170fw_desc_head head; member in struct:carl9170fw_last_desc 220 .head = { \ 227 static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, carl9170fw_fill_desc() argument 231 head->magic[0] = magic[0]; carl9170fw_fill_desc() 232 head->magic[1] = magic[1]; carl9170fw_fill_desc() 233 head->magic[2] = magic[2]; carl9170fw_fill_desc() 234 head->magic[3] = magic[3]; carl9170fw_fill_desc() 236 head->length = length; carl9170fw_fill_desc() 237 head->min_ver = min_ver; carl9170fw_fill_desc() 238 head->cur_ver = cur_ver; carl9170fw_fill_desc() 248 #define CHECK_HDR_VERSION(head, _min_ver) \ 249 (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \ 256 static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, carl9170fw_desc_cmp() argument 260 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && carl9170fw_desc_cmp() 261 descid[2] == head->magic[2] && descid[3] == head->magic[3] && carl9170fw_desc_cmp() 262 !CHECK_HDR_VERSION(head, compatible_revision) && carl9170fw_desc_cmp() 263 (le16_to_cpu(head->length) >= min_len)) carl9170fw_desc_cmp()
|
/linux-4.4.14/drivers/mfd/ |
H A D | pcf50633-adc.c | 74 int head; trigger_next_adc_job_if_any() local 76 head = adc->queue_head; trigger_next_adc_job_if_any() 78 if (!adc->queue[head]) trigger_next_adc_job_if_any() 81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); trigger_next_adc_job_if_any() 88 int head, tail; adc_enqueue_request() local 92 head = adc->queue_head; adc_enqueue_request() 102 if (head == tail) adc_enqueue_request() 177 int head, res; pcf50633_adc_irq() local 180 head = adc->queue_head; pcf50633_adc_irq() 182 req = adc->queue[head]; pcf50633_adc_irq() 188 adc->queue[head] = NULL; pcf50633_adc_irq() 189 adc->queue_head = (head + 1) & pcf50633_adc_irq() 223 int i, head; pcf50633_adc_remove() local 228 head = adc->queue_head; pcf50633_adc_remove() 230 if (WARN_ON(adc->queue[head])) pcf50633_adc_remove()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/ |
H A D | nv10_fence.h | 10 struct nvif_object head[4]; member in struct:nv10_fence_chan
|
H A D | nouveau_display.c | 57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_enable() 71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_disable() 103 .base.head = nouveau_crtc(crtc)->index, nouveau_display_scanoutpos_head() 142 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_scanoutpos() 158 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblstamp() 176 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_fini() 189 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_vblank_init() 195 .head = nv_crtc->index, nouveau_display_vblank_init() 376 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nouveau_display_init() 392 int head; nouveau_display_fini() local 395 for (head = 0; head < dev->mode_config.num_crtc; head++) nouveau_display_fini() 396 drm_vblank_off(dev, head); nouveau_display_fini() 402 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nouveau_display_fini() 574 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_suspend() 584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_suspend() 601 int ret, head; nouveau_display_resume() local 604 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 616 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 631 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 647 for (head = 0; head < dev->mode_config.num_crtc; head++) nouveau_display_resume() 648 drm_vblank_on(dev, head); nouveau_display_resume() 650 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { nouveau_display_resume() 678 list_add_tail(&s->head, &fctx->flip); nouveau_page_flip_emit() 705 list_del(&s->head); nouveau_page_flip_emit() 776 int head = nouveau_crtc(crtc)->index; nouveau_crtc_page_flip() local 786 OUT_RING (chan, head); nouveau_crtc_page_flip() 793 nouveau_bo_ref(new_bo, &dispnv04->image[head]); nouveau_crtc_page_flip() 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); nouveau_finish_page_flip() 857 list_del(&s->head); nouveau_finish_page_flip()
|
/linux-4.4.14/arch/blackfin/mach-common/ |
H A D | Makefile | 6 cache.o cache-c.o entry.o head.o \
|
/linux-4.4.14/arch/c6x/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/cris/arch-v10/kernel/ |
H A D | Makefile | 5 extra-y := head.o
|
/linux-4.4.14/arch/cris/arch-v32/kernel/ |
H A D | Makefile | 5 extra-y := head.o
|
/linux-4.4.14/arch/m32r/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/m68k/68000/ |
H A D | Makefile | 18 extra-y := head.o
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | procinfo.h | 27 * arch/arm/mm/proc-*.S and arch/arm/kernel/head.S 32 unsigned long __cpu_mm_mmu_flags; /* used by head.S */ 33 unsigned long __cpu_io_mmu_flags; /* used by head.S */ 34 unsigned long __cpu_flush; /* used by head.S */
|
/linux-4.4.14/tools/include/linux/ |
H A D | list.h | 26 * @head: the head for your list. 28 #define list_for_each_from(pos, head) \ 29 for (; pos != (head); pos = pos->next)
|
/linux-4.4.14/tools/usb/usbip/libsrc/ |
H A D | list.h | 52 * @head: list head to add it after 54 * Insert a new entry after the specified head. 57 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument 59 __list_add(new, head, head->next); list_add() 108 * @head: the head for your list. 110 #define list_for_each(pos, head) \ 111 for (pos = (head)->next; pos != (head); pos = pos->next) 117 * @head: the head for your list. 119 #define list_for_each_safe(pos, n, head) \ 120 for (pos = (head)->next, n = pos->next; pos != (head); \
|
/linux-4.4.14/arch/mn10300/boot/compressed/ |
H A D | Makefile | 5 targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o 9 $(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
|
/linux-4.4.14/net/ipv6/netfilter/ |
H A D | nf_conntrack_reasm.c | 382 struct sk_buff *fp, *op, *head = fq->q.fragments; nf_ct_frag6_reasm() local 388 WARN_ON(head == NULL); nf_ct_frag6_reasm() 389 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); nf_ct_frag6_reasm() 396 payload_len = ((head->data - skb_network_header(head)) - nf_ct_frag6_reasm() 405 if (skb_unclone(head, GFP_ATOMIC)) { nf_ct_frag6_reasm() 406 pr_debug("skb is cloned but can't expand head"); nf_ct_frag6_reasm() 413 if (skb_has_frag_list(head)) { nf_ct_frag6_reasm() 421 clone->next = head->next; nf_ct_frag6_reasm() 422 head->next = clone; nf_ct_frag6_reasm() 423 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; nf_ct_frag6_reasm() 424 skb_frag_list_init(head); nf_ct_frag6_reasm() 425 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) nf_ct_frag6_reasm() 426 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); nf_ct_frag6_reasm() 427 clone->len = clone->data_len = head->data_len - plen; nf_ct_frag6_reasm() 428 head->data_len -= clone->len; nf_ct_frag6_reasm() 429 head->len -= clone->len; nf_ct_frag6_reasm() 431 clone->ip_summed = head->ip_summed; nf_ct_frag6_reasm() 439 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; nf_ct_frag6_reasm() 440 memmove(head->head + sizeof(struct frag_hdr), head->head, nf_ct_frag6_reasm() 441 (head->data - head->head) - sizeof(struct frag_hdr)); nf_ct_frag6_reasm() 442 head->mac_header += sizeof(struct frag_hdr); nf_ct_frag6_reasm() 443 head->network_header += sizeof(struct frag_hdr); nf_ct_frag6_reasm() 445 skb_shinfo(head)->frag_list = head->next; nf_ct_frag6_reasm() 446 skb_reset_transport_header(head); nf_ct_frag6_reasm() 447 skb_push(head, head->data - skb_network_header(head)); nf_ct_frag6_reasm() 449 for (fp = head->next; fp; fp = fp->next) { nf_ct_frag6_reasm() 450 head->data_len += fp->len; nf_ct_frag6_reasm() 451 head->len += fp->len; nf_ct_frag6_reasm() 452 if (head->ip_summed != fp->ip_summed) nf_ct_frag6_reasm() 453 head->ip_summed = CHECKSUM_NONE; nf_ct_frag6_reasm() 454 else if (head->ip_summed == CHECKSUM_COMPLETE) nf_ct_frag6_reasm() 455 head->csum = csum_add(head->csum, fp->csum); nf_ct_frag6_reasm() 456 head->truesize += fp->truesize; nf_ct_frag6_reasm() 458 sub_frag_mem_limit(fq->q.net, head->truesize); nf_ct_frag6_reasm() 460 head->ignore_df = 1; nf_ct_frag6_reasm() 461 head->next = NULL; nf_ct_frag6_reasm() 462 head->dev = dev; nf_ct_frag6_reasm() 463 head->tstamp = fq->q.stamp; nf_ct_frag6_reasm() 464 ipv6_hdr(head)->payload_len = htons(payload_len); nf_ct_frag6_reasm() 465 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); nf_ct_frag6_reasm() 466 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; nf_ct_frag6_reasm() 469 if (head->ip_summed == CHECKSUM_COMPLETE) nf_ct_frag6_reasm() 470 head->csum = csum_partial(skb_network_header(head), nf_ct_frag6_reasm() 471 skb_network_header_len(head), nf_ct_frag6_reasm() 472 head->csum); nf_ct_frag6_reasm() 477 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ nf_ct_frag6_reasm() 478 fp = skb_shinfo(head)->frag_list; nf_ct_frag6_reasm() 480 /* at above code, head skb is divided into two skbs. */ nf_ct_frag6_reasm() 483 op = NFCT_FRAG6_CB(head)->orig; nf_ct_frag6_reasm() 492 return head; nf_ct_frag6_reasm()
|
/linux-4.4.14/net/ipv6/ |
H A D | reassembly.c | 383 struct sk_buff *fp, *head = fq->q.fragments; ip6_frag_reasm() local 395 /* Make the one we just received the head. */ ip6_frag_reasm() 397 head = prev->next; ip6_frag_reasm() 398 fp = skb_clone(head, GFP_ATOMIC); ip6_frag_reasm() 403 fp->next = head->next; ip6_frag_reasm() 408 skb_morph(head, fq->q.fragments); ip6_frag_reasm() 409 head->next = fq->q.fragments->next; ip6_frag_reasm() 412 fq->q.fragments = head; ip6_frag_reasm() 415 WARN_ON(head == NULL); ip6_frag_reasm() 416 WARN_ON(FRAG6_CB(head)->offset != 0); ip6_frag_reasm() 419 payload_len = ((head->data - skb_network_header(head)) - ip6_frag_reasm() 426 if (skb_unclone(head, GFP_ATOMIC)) ip6_frag_reasm() 432 if (skb_has_frag_list(head)) { ip6_frag_reasm() 439 clone->next = head->next; ip6_frag_reasm() 440 head->next = clone; ip6_frag_reasm() 441 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; ip6_frag_reasm() 442 skb_frag_list_init(head); ip6_frag_reasm() 443 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) ip6_frag_reasm() 444 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); ip6_frag_reasm() 445 clone->len = clone->data_len = head->data_len - plen; ip6_frag_reasm() 446 head->data_len -= clone->len; ip6_frag_reasm() 447 head->len -= clone->len; ip6_frag_reasm() 449 clone->ip_summed = head->ip_summed; ip6_frag_reasm() 456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; ip6_frag_reasm() 457 memmove(head->head + sizeof(struct frag_hdr), head->head, ip6_frag_reasm() 458 (head->data - head->head) - sizeof(struct frag_hdr)); ip6_frag_reasm() 459 head->mac_header += sizeof(struct frag_hdr); ip6_frag_reasm() 460 head->network_header += sizeof(struct frag_hdr); ip6_frag_reasm() 462 skb_reset_transport_header(head); ip6_frag_reasm() 463 skb_push(head, head->data - skb_network_header(head)); ip6_frag_reasm() 465 sum_truesize = head->truesize; ip6_frag_reasm() 466 for (fp = head->next; fp;) { ip6_frag_reasm() 472 if (head->ip_summed != fp->ip_summed) ip6_frag_reasm() 473 head->ip_summed = CHECKSUM_NONE; ip6_frag_reasm() 474 else if (head->ip_summed == CHECKSUM_COMPLETE) ip6_frag_reasm() 475 head->csum = csum_add(head->csum, fp->csum); ip6_frag_reasm() 477 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { ip6_frag_reasm() 480 if (!skb_shinfo(head)->frag_list) ip6_frag_reasm() 481 skb_shinfo(head)->frag_list = fp; ip6_frag_reasm() 482 head->data_len += fp->len; ip6_frag_reasm() 483 head->len += fp->len; ip6_frag_reasm() 484 head->truesize += fp->truesize; ip6_frag_reasm() 490 head->next = NULL; ip6_frag_reasm() 491 head->dev = dev; ip6_frag_reasm() 492 head->tstamp = fq->q.stamp; ip6_frag_reasm() 493 ipv6_hdr(head)->payload_len = htons(payload_len); ip6_frag_reasm() 494 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); ip6_frag_reasm() 495 IP6CB(head)->nhoff = nhoff; ip6_frag_reasm() 496 IP6CB(head)->flags |= IP6SKB_FRAGMENTED; ip6_frag_reasm() 499 skb_postpush_rcsum(head, skb_network_header(head), ip6_frag_reasm() 500 skb_network_header_len(head)); ip6_frag_reasm()
|
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/ |
H A D | mce-genpool.c | 31 struct llist_node *head; mce_gen_pool_process() local 35 head = llist_del_all(&mce_event_llist); mce_gen_pool_process() 36 if (!head) mce_gen_pool_process() 39 head = llist_reverse_order(head); llist_for_each_entry_safe() 40 llist_for_each_entry_safe(node, tmp, head, llnode) { llist_for_each_entry_safe()
|
/linux-4.4.14/kernel/ |
H A D | futex_compat.c | 24 compat_uptr_t __user *head, unsigned int *pi) fetch_robust_entry() 26 if (get_user(*uentry, head)) fetch_robust_entry() 52 struct compat_robust_list_head __user *head = curr->compat_robust_list; compat_exit_robust_list() local 64 * Fetch the list head (which was registered earlier, via compat_exit_robust_list() 67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) compat_exit_robust_list() 72 if (get_user(futex_offset, &head->futex_offset)) compat_exit_robust_list() 79 &head->list_op_pending, &pip)) compat_exit_robust_list() 83 while (entry != (struct robust_list __user *) &head->list) { compat_exit_robust_list() 121 struct compat_robust_list_head __user *, head, COMPAT_SYSCALL_DEFINE2() 127 if (unlikely(len != sizeof(*head))) COMPAT_SYSCALL_DEFINE2() 130 current->compat_robust_list = head; COMPAT_SYSCALL_DEFINE2() 139 struct compat_robust_list_head __user *head; COMPAT_SYSCALL_DEFINE3() local 161 head = p->compat_robust_list; COMPAT_SYSCALL_DEFINE3() 164 if (put_user(sizeof(*head), len_ptr)) COMPAT_SYSCALL_DEFINE3() 166 return put_user(ptr_to_compat(head), head_ptr); COMPAT_SYSCALL_DEFINE3() 23 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) fetch_robust_entry() argument
|
H A D | task_work.c | 29 struct callback_head *head; task_work_add() local 32 head = ACCESS_ONCE(task->task_works); task_work_add() 33 if (unlikely(head == &work_exited)) task_work_add() 35 work->next = head; task_work_add() 36 } while (cmpxchg(&task->task_works, head, work) != head); task_work_add() 90 struct callback_head *work, *head, *next; task_work_run() local 99 head = !work && (task->flags & PF_EXITING) ? task_work_run() 101 } while (cmpxchg(&task->task_works, work, head) != work); task_work_run()
|
H A D | user-return-notifier.c | 38 struct hlist_head *head; fire_user_return_notifiers() local 40 head = &get_cpu_var(return_notifier_list); fire_user_return_notifiers() 41 hlist_for_each_entry_safe(urn, tmp2, head, link) fire_user_return_notifiers()
|
H A D | notifier.c | 64 * @nl: Pointer to head of the blocking notifier chain 114 * @nh: Pointer to head of the atomic notifier chain 128 ret = notifier_chain_register(&nh->head, n); atomic_notifier_chain_register() 136 * @nh: Pointer to head of the atomic notifier chain 150 ret = notifier_chain_unregister(&nh->head, n); atomic_notifier_chain_unregister() 159 * @nh: Pointer to head of the atomic notifier chain 183 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __atomic_notifier_call_chain() 205 * @nh: Pointer to head of the blocking notifier chain 224 return notifier_chain_register(&nh->head, n); blocking_notifier_chain_register() 227 ret = notifier_chain_register(&nh->head, n); blocking_notifier_chain_register() 235 * @nh: Pointer to head of the blocking notifier chain 250 ret = notifier_chain_cond_register(&nh->head, n); blocking_notifier_chain_cond_register() 258 * @nh: Pointer to head of the blocking notifier chain 277 return notifier_chain_unregister(&nh->head, n); blocking_notifier_chain_unregister() 280 ret = notifier_chain_unregister(&nh->head, n); blocking_notifier_chain_unregister() 288 * @nh: Pointer to head of the blocking notifier chain 311 * We check the head outside the lock, but if this access is __blocking_notifier_call_chain() 315 if (rcu_access_pointer(nh->head)) { __blocking_notifier_call_chain() 317 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, __blocking_notifier_call_chain() 339 * @nh: Pointer to head of the raw notifier chain 350 return notifier_chain_register(&nh->head, n); raw_notifier_chain_register() 356 * @nh: Pointer to head of the raw notifier chain 367 return notifier_chain_unregister(&nh->head, n); raw_notifier_chain_unregister() 373 * @nh: Pointer to head of the raw notifier chain 394 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __raw_notifier_call_chain() 413 * @nh: Pointer to head of the SRCU notifier chain 432 return notifier_chain_register(&nh->head, n); srcu_notifier_chain_register() 435 ret = notifier_chain_register(&nh->head, n); srcu_notifier_chain_register() 443 * @nh: Pointer to head of the SRCU notifier chain 462 return notifier_chain_unregister(&nh->head, n); srcu_notifier_chain_unregister() 465 ret = notifier_chain_unregister(&nh->head, n); srcu_notifier_chain_unregister() 474 * @nh: Pointer to head of the SRCU notifier chain 498 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); __srcu_notifier_call_chain() 512 * srcu_init_notifier_head - Initialize an SRCU notifier head 513 * @nh: Pointer to head of the srcu notifier chain 517 * calling any of the other SRCU notifier routines for this head. 519 * If an SRCU notifier head is deallocated, it must first be cleaned 520 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's 528 nh->head = NULL; srcu_init_notifier_head()
|
/linux-4.4.14/kernel/rcu/ |
H A D | rcu.h | 71 static inline int debug_rcu_head_queue(struct rcu_head *head) debug_rcu_head_queue() argument 75 r1 = debug_object_activate(head, &rcuhead_debug_descr); debug_rcu_head_queue() 76 debug_object_active_state(head, &rcuhead_debug_descr, debug_rcu_head_queue() 82 static inline void debug_rcu_head_unqueue(struct rcu_head *head) debug_rcu_head_unqueue() argument 84 debug_object_active_state(head, &rcuhead_debug_descr, debug_rcu_head_unqueue() 87 debug_object_deactivate(head, &rcuhead_debug_descr); debug_rcu_head_unqueue() 90 static inline int debug_rcu_head_queue(struct rcu_head *head) debug_rcu_head_queue() argument 95 static inline void debug_rcu_head_unqueue(struct rcu_head *head) debug_rcu_head_unqueue() argument 106 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) __rcu_reclaim() argument 108 unsigned long offset = (unsigned long)head->func; __rcu_reclaim() 112 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); __rcu_reclaim() 113 kfree((void *)head - offset); __rcu_reclaim() 117 RCU_TRACE(trace_rcu_invoke_callback(rn, head)); __rcu_reclaim() 118 head->func(head); __rcu_reclaim()
|
/linux-4.4.14/fs/hfs/ |
H A D | btree.c | 21 struct hfs_btree_header_rec *head; hfs_btree_open() local 82 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); hfs_btree_open() 83 tree->root = be32_to_cpu(head->root); hfs_btree_open() 84 tree->leaf_count = be32_to_cpu(head->leaf_count); hfs_btree_open() 85 tree->leaf_head = be32_to_cpu(head->leaf_head); hfs_btree_open() 86 tree->leaf_tail = be32_to_cpu(head->leaf_tail); hfs_btree_open() 87 tree->node_count = be32_to_cpu(head->node_count); hfs_btree_open() 88 tree->free_nodes = be32_to_cpu(head->free_nodes); hfs_btree_open() 89 tree->attributes = be32_to_cpu(head->attributes); hfs_btree_open() 90 tree->node_size = be16_to_cpu(head->node_size); hfs_btree_open() 91 tree->max_key_len = be16_to_cpu(head->max_key_len); hfs_btree_open() 92 tree->depth = be16_to_cpu(head->depth); hfs_btree_open() 161 struct hfs_btree_header_rec *head; hfs_btree_write() local 171 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); hfs_btree_write() 173 head->root = cpu_to_be32(tree->root); hfs_btree_write() 174 head->leaf_count = cpu_to_be32(tree->leaf_count); hfs_btree_write() 175 head->leaf_head = cpu_to_be32(tree->leaf_head); hfs_btree_write() 176 head->leaf_tail = cpu_to_be32(tree->leaf_tail); hfs_btree_write() 177 head->node_count = cpu_to_be32(tree->node_count); hfs_btree_write() 178 head->free_nodes = cpu_to_be32(tree->free_nodes); hfs_btree_write() 179 head->attributes = cpu_to_be32(tree->attributes); hfs_btree_write() 180 head->depth = cpu_to_be16(tree->depth); hfs_btree_write()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_cq.c | 52 u32 head; ipath_cq_enter() local 58 * Note that the head pointer might be writable by user processes. ipath_cq_enter() 62 head = wc->head; ipath_cq_enter() 63 if (head >= (unsigned) cq->ibcq.cqe) { ipath_cq_enter() 64 head = cq->ibcq.cqe; ipath_cq_enter() 67 next = head + 1; ipath_cq_enter() 81 wc->uqueue[head].wr_id = entry->wr_id; ipath_cq_enter() 82 wc->uqueue[head].status = entry->status; ipath_cq_enter() 83 wc->uqueue[head].opcode = entry->opcode; ipath_cq_enter() 84 wc->uqueue[head].vendor_err = entry->vendor_err; ipath_cq_enter() 85 wc->uqueue[head].byte_len = entry->byte_len; ipath_cq_enter() 86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data; ipath_cq_enter() 87 wc->uqueue[head].qp_num = entry->qp->qp_num; ipath_cq_enter() 88 wc->uqueue[head].src_qp = entry->src_qp; ipath_cq_enter() 89 wc->uqueue[head].wc_flags = entry->wc_flags; ipath_cq_enter() 90 wc->uqueue[head].pkey_index = entry->pkey_index; ipath_cq_enter() 91 wc->uqueue[head].slid = entry->slid; ipath_cq_enter() 92 wc->uqueue[head].sl = entry->sl; ipath_cq_enter() 93 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; ipath_cq_enter() 94 wc->uqueue[head].port_num = entry->port_num; ipath_cq_enter() 95 /* Make sure entry is written before the head index. */ ipath_cq_enter() 98 wc->kqueue[head] = *entry; ipath_cq_enter() 99 wc->head = next; ipath_cq_enter() 150 if (tail == wc->head) ipath_poll_cq() 228 * Allocate the completion queue entries and head/tail pointers. ipath_create_cq() 293 wc->head = 0; ipath_create_cq() 362 cq->queue->head != cq->queue->tail) ipath_req_notify_cq() 381 u32 head, tail, n; ipath_resize_cq() local 415 * Make sure head and tail are sane since they ipath_resize_cq() 419 head = old_wc->head; ipath_resize_cq() 420 if (head > (u32) cq->ibcq.cqe) ipath_resize_cq() 421 head = (u32) cq->ibcq.cqe; ipath_resize_cq() 425 if (head < tail) ipath_resize_cq() 426 n = cq->ibcq.cqe + 1 + head - tail; ipath_resize_cq() 428 n = head - tail; ipath_resize_cq() 433 for (n = 0; tail != head; n++) { ipath_resize_cq() 444 wc->head = n; ipath_resize_cq()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | cq.c | 71 u32 head; hfi1_cq_enter() local 77 * Note that the head pointer might be writable by user processes. hfi1_cq_enter() 81 head = wc->head; hfi1_cq_enter() 82 if (head >= (unsigned) cq->ibcq.cqe) { hfi1_cq_enter() 83 head = cq->ibcq.cqe; hfi1_cq_enter() 86 next = head + 1; hfi1_cq_enter() 100 wc->uqueue[head].wr_id = entry->wr_id; hfi1_cq_enter() 101 wc->uqueue[head].status = entry->status; hfi1_cq_enter() 102 wc->uqueue[head].opcode = entry->opcode; hfi1_cq_enter() 103 wc->uqueue[head].vendor_err = entry->vendor_err; hfi1_cq_enter() 104 wc->uqueue[head].byte_len = entry->byte_len; hfi1_cq_enter() 105 wc->uqueue[head].ex.imm_data = hfi1_cq_enter() 107 wc->uqueue[head].qp_num = entry->qp->qp_num; hfi1_cq_enter() 108 wc->uqueue[head].src_qp = entry->src_qp; hfi1_cq_enter() 109 wc->uqueue[head].wc_flags = entry->wc_flags; hfi1_cq_enter() 110 wc->uqueue[head].pkey_index = entry->pkey_index; hfi1_cq_enter() 111 wc->uqueue[head].slid = entry->slid; hfi1_cq_enter() 112 wc->uqueue[head].sl = entry->sl; hfi1_cq_enter() 113 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; hfi1_cq_enter() 114 wc->uqueue[head].port_num = entry->port_num; hfi1_cq_enter() 115 /* Make sure entry is written before the head index. */ hfi1_cq_enter() 118 wc->kqueue[head] = *entry; hfi1_cq_enter() 119 wc->head = next; hfi1_cq_enter() 173 if (tail == wc->head) hfi1_poll_cq() 256 * Allocate the completion queue entries and head/tail pointers. hfi1_create_cq() 322 wc->head = 0; hfi1_create_cq() 391 cq->queue->head != cq->queue->tail) hfi1_req_notify_cq() 410 u32 head, tail, n; hfi1_resize_cq() local 444 * Make sure head and tail are sane since they hfi1_resize_cq() 448 head = old_wc->head; hfi1_resize_cq() 449 if (head > (u32) cq->ibcq.cqe) hfi1_resize_cq() 450 head = (u32) cq->ibcq.cqe; hfi1_resize_cq() 454 if (head < tail) hfi1_resize_cq() 455 n = cq->ibcq.cqe + 1 + head - tail; hfi1_resize_cq() 457 n = head - tail; hfi1_resize_cq() 462 for (n = 0; tail != head; n++) { hfi1_resize_cq() 473 wc->head = n; hfi1_resize_cq()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_cq.c | 55 u32 head; qib_cq_enter() local 61 * Note that the head pointer might be writable by user processes. qib_cq_enter() 65 head = wc->head; qib_cq_enter() 66 if (head >= (unsigned) cq->ibcq.cqe) { qib_cq_enter() 67 head = cq->ibcq.cqe; qib_cq_enter() 70 next = head + 1; qib_cq_enter() 84 wc->uqueue[head].wr_id = entry->wr_id; qib_cq_enter() 85 wc->uqueue[head].status = entry->status; qib_cq_enter() 86 wc->uqueue[head].opcode = entry->opcode; qib_cq_enter() 87 wc->uqueue[head].vendor_err = entry->vendor_err; qib_cq_enter() 88 wc->uqueue[head].byte_len = entry->byte_len; qib_cq_enter() 89 wc->uqueue[head].ex.imm_data = qib_cq_enter() 91 wc->uqueue[head].qp_num = entry->qp->qp_num; qib_cq_enter() 92 wc->uqueue[head].src_qp = entry->src_qp; qib_cq_enter() 93 wc->uqueue[head].wc_flags = entry->wc_flags; qib_cq_enter() 94 wc->uqueue[head].pkey_index = entry->pkey_index; qib_cq_enter() 95 wc->uqueue[head].slid = entry->slid; qib_cq_enter() 96 wc->uqueue[head].sl = entry->sl; qib_cq_enter() 97 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; qib_cq_enter() 98 wc->uqueue[head].port_num = entry->port_num; qib_cq_enter() 99 /* Make sure entry is written before the head index. */ qib_cq_enter() 102 wc->kqueue[head] = *entry; qib_cq_enter() 103 wc->head = next; qib_cq_enter() 157 if (tail == wc->head) qib_poll_cq() 243 * Allocate the completion queue entries and head/tail pointers. qib_create_cq() 309 wc->head = 0; qib_create_cq() 378 cq->queue->head != cq->queue->tail) qib_req_notify_cq() 397 u32 head, tail, n; qib_resize_cq() local 431 * Make sure head and tail are sane since they qib_resize_cq() 435 head = old_wc->head; qib_resize_cq() 436 if (head > (u32) cq->ibcq.cqe) qib_resize_cq() 437 head = (u32) cq->ibcq.cqe; qib_resize_cq() 441 if (head < tail) qib_resize_cq() 442 n = cq->ibcq.cqe + 1 + head - tail; qib_resize_cq() 444 n = head - tail; qib_resize_cq() 449 for (n = 0; tail != head; n++) { qib_resize_cq() 460 wc->head = n; qib_resize_cq()
|
/linux-4.4.14/drivers/dma/ |
H A D | virt-dma.c | 65 LIST_HEAD(head); vchan_complete() 68 list_splice_tail_init(&vc->desc_completed, &head); vchan_complete() 80 while (!list_empty(&head)) { vchan_complete() 81 vd = list_first_entry(&head, struct virt_dma_desc, node); vchan_complete() 94 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) vchan_dma_desc_free_list() argument 96 while (!list_empty(head)) { vchan_dma_desc_free_list() 97 struct virt_dma_desc *vd = list_first_entry(head, vchan_dma_desc_free_list()
|
H A D | virt-dma.h | 44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); 127 * @head: list of descriptors found 135 struct list_head *head) vchan_get_all_descriptors() 137 list_splice_tail_init(&vc->desc_submitted, head); vchan_get_all_descriptors() 138 list_splice_tail_init(&vc->desc_issued, head); vchan_get_all_descriptors() 139 list_splice_tail_init(&vc->desc_completed, head); vchan_get_all_descriptors() 145 LIST_HEAD(head); vchan_free_chan_resources() 148 vchan_get_all_descriptors(vc, &head); vchan_free_chan_resources() 151 vchan_dma_desc_free_list(vc, &head); vchan_free_chan_resources() 134 vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) vchan_get_all_descriptors() argument
|
/linux-4.4.14/fs/gfs2/ |
H A D | recovery.c | 57 struct list_head *head = &jd->jd_revoke_list; gfs2_revoke_add() local 61 list_for_each_entry(rr, head, rr_list) { list_for_each_entry() 79 list_add(&rr->rr_list, head); 110 struct list_head *head = &jd->jd_revoke_list; gfs2_revoke_clean() local 113 while (!list_empty(head)) { gfs2_revoke_clean() 114 rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); gfs2_revoke_clean() 151 struct gfs2_log_header_host *head) get_log_header() 173 *head = lh; get_log_header() 192 struct gfs2_log_header_host *head) find_good_lh() 198 error = get_log_header(jd, *blk, head); find_good_lh() 213 * jhead_scan - make sure we've found the head of the log 215 * @head: this is filled in with the log descriptor of the head 217 * At this point, seg and lh should be either the head of the log or just 218 * before. Scan forward until we find the head. 223 static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) jhead_scan() argument 225 unsigned int blk = head->lh_blkno; jhead_scan() 239 if (lh.lh_sequence == head->lh_sequence) { jhead_scan() 243 if (lh.lh_sequence < head->lh_sequence) jhead_scan() 246 *head = lh; jhead_scan() 253 * gfs2_find_jhead - find the head of a log 255 * @head: the log descriptor for the head of the log is returned here 258 * highest sequence number. (i.e. the log head) 263 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) gfs2_find_jhead() argument 296 *head = lh_1; gfs2_find_jhead() 376 * @head: the head journal to start from 381 static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) clean_journal() argument 392 lblock = head->lh_blkno; clean_journal() 417 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); clean_journal() 456 struct gfs2_log_header_host head; gfs2_recover_func() local 501 error = gfs2_find_jhead(jd, &head); gfs2_recover_func() 505 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { gfs2_recover_func() 547 lops_before_scan(jd, &head, pass); gfs2_recover_func() 548 error = foreach_descriptor(jd, head.lh_tail, gfs2_recover_func() 549 head.lh_blkno, pass); gfs2_recover_func() 555 error = clean_journal(jd, &head); gfs2_recover_func() 150 get_log_header(struct gfs2_jdesc *jd, unsigned int blk, struct gfs2_log_header_host *head) get_log_header() argument 191 find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, struct gfs2_log_header_host *head) find_good_lh() argument
|
H A D | meta_io.h | 22 static inline void gfs2_buffer_clear_tail(struct buffer_head *bh, int head) gfs2_buffer_clear_tail() argument 24 BUG_ON(head > bh->b_size); gfs2_buffer_clear_tail() 25 memset(bh->b_data + head, 0, bh->b_size - head); gfs2_buffer_clear_tail()
|
/linux-4.4.14/drivers/misc/mic/scif/ |
H A D | scif_rma_list.c | 28 void scif_insert_tcw(struct scif_window *window, struct list_head *head) scif_insert_tcw() argument 31 struct scif_window *prev = list_entry(head, struct scif_window, list); scif_insert_tcw() 36 if (!list_empty(head)) { scif_insert_tcw() 37 curr = list_entry(head->prev, struct scif_window, list); scif_insert_tcw() 39 list_add_tail(&window->list, head); scif_insert_tcw() 43 list_for_each(item, head) { list_for_each() 58 void scif_insert_window(struct scif_window *window, struct list_head *head) scif_insert_window() argument 64 list_for_each(item, head) { list_for_each() 71 list_add(&window->list, head); 87 struct list_head *item, *temp, *head = req->head; scif_query_tcw() local 98 if (!list_empty(head)) { scif_query_tcw() 99 window = list_last_entry(head, struct scif_window, list); scif_query_tcw() 105 list_for_each_safe(item, temp, head) { list_for_each_safe() 154 list_for_each(item, req->head) { scif_query_window() 207 struct list_head *head = &ep->rma_info.reg_list; scif_rma_list_unregister() local 213 list_for_each_entry_safe_from(window, _window, head, list) { list_for_each_entry_safe_from() 239 struct list_head *head = &ep->rma_info.reg_list; scif_unmap_all_windows() local 242 list_for_each_safe(item, tmp, head) { list_for_each_safe() 261 struct list_head *head = &ep->rma_info.reg_list; scif_unregister_all_windows() local 268 list_for_each_safe(item, tmp, head) { list_for_each_safe()
|
H A D | scif_rma_list.h | 29 * @head: Head of list on which to search 41 struct list_head *head; member in struct:scif_rma_req 45 void scif_insert_window(struct scif_window *window, struct list_head *head); 47 struct list_head *head);
|
/linux-4.4.14/net/ipv4/ |
H A D | ip_fragment.c | 212 struct sk_buff *head = qp->q.fragments; ip_expire() local 222 head->dev = dev_get_by_index_rcu(net, qp->iif); ip_expire() 223 if (!head->dev) ip_expire() 227 iph = ip_hdr(head); ip_expire() 228 err = ip_route_input_noref(head, iph->daddr, iph->saddr, ip_expire() 229 iph->tos, head->dev); ip_expire() 237 (skb_rtable(head)->rt_type != RTN_LOCAL)) ip_expire() 241 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); ip_expire() 442 /* Eat head of the next overlapped fragment ip_frag_queue() 529 struct sk_buff *fp, *head = qp->q.fragments; ip_frag_reasm() local 542 /* Make the one we just received the head. */ ip_frag_reasm() 544 head = prev->next; ip_frag_reasm() 545 fp = skb_clone(head, GFP_ATOMIC); ip_frag_reasm() 549 fp->next = head->next; ip_frag_reasm() 554 skb_morph(head, qp->q.fragments); ip_frag_reasm() 555 head->next = qp->q.fragments->next; ip_frag_reasm() 558 qp->q.fragments = head; ip_frag_reasm() 561 WARN_ON(!head); ip_frag_reasm() 562 WARN_ON(FRAG_CB(head)->offset != 0); ip_frag_reasm() 565 ihlen = ip_hdrlen(head); ip_frag_reasm() 573 if (skb_unclone(head, GFP_ATOMIC)) ip_frag_reasm() 579 if (skb_has_frag_list(head)) { ip_frag_reasm() 586 clone->next = head->next; ip_frag_reasm() 587 head->next = clone; ip_frag_reasm() 588 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; ip_frag_reasm() 589 skb_frag_list_init(head); ip_frag_reasm() 590 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) ip_frag_reasm() 591 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); ip_frag_reasm() 592 clone->len = clone->data_len = head->data_len - plen; ip_frag_reasm() 593 head->data_len -= clone->len; ip_frag_reasm() 594 head->len -= clone->len; ip_frag_reasm() 596 clone->ip_summed = head->ip_summed; ip_frag_reasm() 600 skb_shinfo(head)->frag_list = head->next; ip_frag_reasm() 601 skb_push(head, head->data - skb_network_header(head)); ip_frag_reasm() 603 for (fp=head->next; fp; fp = fp->next) { ip_frag_reasm() 604 head->data_len += fp->len; ip_frag_reasm() 605 head->len += fp->len; ip_frag_reasm() 606 if (head->ip_summed != fp->ip_summed) ip_frag_reasm() 607 head->ip_summed = CHECKSUM_NONE; ip_frag_reasm() 608 else if (head->ip_summed == CHECKSUM_COMPLETE) ip_frag_reasm() 609 head->csum = csum_add(head->csum, fp->csum); ip_frag_reasm() 610 head->truesize += fp->truesize; ip_frag_reasm() 612 sub_frag_mem_limit(qp->q.net, head->truesize); ip_frag_reasm() 614 head->next = NULL; ip_frag_reasm() 615 head->dev = dev; ip_frag_reasm() 616 head->tstamp = qp->q.stamp; ip_frag_reasm() 617 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); ip_frag_reasm() 619 iph = ip_hdr(head); ip_frag_reasm() 632 IPCB(head)->flags |= IPSKB_FRAG_PMTU; ip_frag_reasm()
|
H A D | inet_hashtables.c | 63 struct inet_bind_hashbucket *head, inet_bind_bucket_create() 75 hlist_add_head(&tb->node, &head->chain); inet_bind_bucket_create() 108 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; __inet_put_port() local 111 spin_lock(&head->lock); __inet_put_port() 118 spin_unlock(&head->lock); __inet_put_port() 135 struct inet_bind_hashbucket *head = &table->bhash[bhash]; __inet_inherit_port() local 138 spin_lock(&head->lock); __inet_inherit_port() 141 spin_unlock(&head->lock); __inet_inherit_port() 150 inet_bind_bucket_for_each(tb, &head->chain) { __inet_inherit_port() 157 sock_net(sk), head, port); __inet_inherit_port() 159 spin_unlock(&head->lock); __inet_inherit_port() 165 spin_unlock(&head->lock); __inet_inherit_port() 223 sk_nulls_for_each_rcu(sk, node, &ilb->head) { __inet_lookup_listener() 298 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; __inet_lookup_established() local 302 sk_nulls_for_each_rcu(sk, node, &head->chain) { __inet_lookup_established() 347 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); __inet_check_established() local 355 sk_nulls_for_each(sk2, node, &head->chain) { __inet_check_established() 377 __sk_nulls_add_node_rcu(sk, &head->chain); __inet_check_established() 414 struct inet_ehash_bucket *head; inet_ehash_insert() local 421 head = inet_ehash_bucket(hashinfo, sk->sk_hash); inet_ehash_insert() 422 list = &head->chain; inet_ehash_insert() 465 __sk_nulls_add_node_rcu(sk, &ilb->head); __inet_hash() 510 struct inet_bind_hashbucket *head; __inet_hash_connect() local 535 head = &hinfo->bhash[inet_bhashfn(net, port, __inet_hash_connect() 537 spin_lock(&head->lock); __inet_hash_connect() 543 inet_bind_bucket_for_each(tb, &head->chain) { __inet_hash_connect() 558 net, head, port); __inet_hash_connect() 560 spin_unlock(&head->lock); __inet_hash_connect() 568 spin_unlock(&head->lock); __inet_hash_connect() 585 spin_unlock(&head->lock); __inet_hash_connect() 594 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; __inet_hash_connect() 596 spin_lock_bh(&head->lock); __inet_hash_connect() 599 spin_unlock_bh(&head->lock); __inet_hash_connect() 602 spin_unlock(&head->lock); __inet_hash_connect() 632 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, inet_hashinfo_init() 61 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, const unsigned short snum) inet_bind_bucket_create() argument
|
H A D | xfrm4_protocol.c | 44 #define for_each_protocol_rcu(head, handler) \ 45 for (handler = rcu_dereference(head); \ 53 struct xfrm4_protocol __rcu **head = proto_handlers(protocol); xfrm4_rcv_cb() local 55 if (!head) xfrm4_rcv_cb() 58 for_each_protocol_rcu(*head, handler) xfrm4_rcv_cb() 71 struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr); xfrm4_rcv_encap() local 77 if (!head) xfrm4_rcv_encap() 80 for_each_protocol_rcu(*head, handler) xfrm4_rcv_encap()
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
H A D | ttm_execbuf_util.c | 38 list_for_each_entry_continue_reverse(entry, list, head) { list_for_each_entry_continue_reverse() 49 list_for_each_entry(entry, list, head) { list_for_each_entry() 66 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_backoff_reservation() 70 list_for_each_entry(entry, list, head) { list_for_each_entry() 106 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_reserve_buffers() 112 list_for_each_entry(entry, list, head) { list_for_each_entry() 124 entry = list_prev_entry(entry, head); list_for_each_entry() 125 list_del(&safe->head); list_for_each_entry() 126 list_add(&safe->head, dups); list_for_each_entry() 169 list_del(&entry->head); list_for_each_entry() 170 list_add(&entry->head, list); list_for_each_entry() 194 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; ttm_eu_fence_buffer_objects() 201 list_for_each_entry(entry, list, head) { list_for_each_entry()
|
/linux-4.4.14/fs/nilfs2/ |
H A D | segbuf.h | 60 * @sb_list: List head to chain this structure 98 #define NILFS_LIST_SEGBUF(head) \ 99 list_entry((head), struct nilfs_segment_buffer, sb_list) 102 #define NILFS_LAST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->prev) 103 #define NILFS_FIRST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->next) 104 #define NILFS_SEGBUF_IS_LAST(segbuf, head) ((segbuf)->sb_list.next == (head)) 110 #define NILFS_SEGBUF_FIRST_BH(head) \ 111 (list_entry((head)->next, struct buffer_head, b_assoc_buffers)) 115 #define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head)
|
/linux-4.4.14/net/ceph/ |
H A D | pagelist.c | 11 struct page *page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_unmap_tail() 22 while (!list_empty(&pl->head)) { ceph_pagelist_release() 23 struct page *page = list_first_entry(&pl->head, struct page, ceph_pagelist_release() 48 list_add_tail(&page->lru, &pl->head); ceph_pagelist_addpage() 119 c->page_lru = pl->head.prev; ceph_pagelist_set_cursor() 137 while (pl->head.prev != c->page_lru) { ceph_pagelist_truncate() 138 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate() 144 if (!list_empty(&pl->head)) { ceph_pagelist_truncate() 145 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate()
|
/linux-4.4.14/sound/oss/ |
H A D | msnd.c | 105 f->head = 0; msnd_fifo_alloc() 116 f->len = f->tail = f->head = 0; msnd_fifo_make_empty() 127 if (f->head <= f->tail) { msnd_fifo_write_io() 133 nwritten = f->head - f->tail; msnd_fifo_write_io() 158 if (f->head <= f->tail) { msnd_fifo_write() 164 nwritten = f->head - f->tail; msnd_fifo_write() 189 if (f->tail <= f->head) { msnd_fifo_read_io() 191 if (nread > f->n - f->head) msnd_fifo_read_io() 192 nread = f->n - f->head; msnd_fifo_read_io() 195 nread = f->tail - f->head; msnd_fifo_read_io() 200 memcpy_toio(buf, f->data + f->head, nread); msnd_fifo_read_io() 205 f->head += nread; msnd_fifo_read_io() 206 f->head %= f->n; msnd_fifo_read_io() 220 if (f->tail <= f->head) { msnd_fifo_read() 222 if (nread > f->n - f->head) msnd_fifo_read() 223 nread = f->n - f->head; msnd_fifo_read() 226 nread = f->tail - f->head; msnd_fifo_read() 231 memcpy(buf, f->data + f->head, nread); msnd_fifo_read() 236 f->head += nread; msnd_fifo_read() 237 f->head %= f->n; msnd_fifo_read()
|
/linux-4.4.14/drivers/net/wireless/ath/ |
H A D | dfs_pri_detector.c | 37 struct list_head head; member in struct:pulse_elem 105 list_for_each_entry_safe(p, p0, &pulse_pool, head) { pool_deregister_ref() 106 list_del(&p->head); pool_deregister_ref() 110 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { pool_deregister_ref() 111 list_del(&ps->head); pool_deregister_ref() 122 list_add(&pe->head, &pulse_pool); pool_put_pulse_elem() 130 list_add(&pse->head, &pseq_pool); pool_put_pseq_elem() 140 pse = list_first_entry(&pseq_pool, struct pri_sequence, head); pool_get_pseq_elem() 141 list_del(&pse->head); pool_get_pseq_elem() 153 pe = list_first_entry(&pulse_pool, struct pulse_elem, head); pool_get_pulse_elem() 154 list_del(&pe->head); pool_get_pulse_elem() 166 return list_entry(l->prev, struct pulse_elem, head); pulse_queue_get_tail() 173 list_del_init(&p->head); pulse_queue_dequeue() 214 INIT_LIST_HEAD(&p->head); pulse_queue_enqueue() 216 list_add(&p->head, &pde->pulses); pulse_queue_enqueue() 229 list_for_each_entry(p, &pde->pulses, head) { pseq_handler_create_sequences() 258 list_for_each_entry_continue(p2, &pde->pulses, head) { pseq_handler_create_sequences() 297 INIT_LIST_HEAD(&new_ps->head); pseq_handler_create_sequences() 298 list_add(&new_ps->head, &pde->sequences); pseq_handler_create_sequences() 309 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { pseq_handler_add_to_existing_seqs() 315 list_del_init(&ps->head); pseq_handler_add_to_existing_seqs() 344 list_for_each_entry(ps, &pde->sequences, head) { pseq_handler_check_detection() 363 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { pri_detector_reset() 364 list_del_init(&ps->head); pri_detector_reset() 367 list_for_each_entry_safe(p, p0, &pde->pulses, head) { pri_detector_reset() 368 list_del_init(&p->head); pri_detector_reset()
|
/linux-4.4.14/drivers/isdn/capi/ |
H A D | capilib.c | 80 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize) capilib_new_ncci() argument 98 list_add_tail(&np->list, head); capilib_new_ncci() 104 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci) capilib_free_ncci() argument 109 list_for_each(l, head) { list_for_each() 125 void capilib_release_appl(struct list_head *head, u16 applid) capilib_release_appl() argument 130 list_for_each_safe(l, n, head) { list_for_each_safe() 142 void capilib_release(struct list_head *head) capilib_release() argument 147 list_for_each_safe(l, n, head) { list_for_each_safe() 157 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid) capilib_data_b3_req() argument 162 list_for_each(l, head) { list_for_each() 180 void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid) capilib_data_b3_conf() argument 185 list_for_each(l, head) { list_for_each()
|
/linux-4.4.14/arch/mips/lasat/image/ |
H A D | Makefile | 24 $(obj)/head.o: $(obj)/head.S $(KERNEL_IMAGE) 27 OBJECTS = head.o kImage.o
|
H A D | head.S | 1 #include <asm/lasat/head.h>
|
/linux-4.4.14/arch/ia64/kernel/ |
H A D | mca_drv.h | 104 #define slidx_foreach_entry(pos, head) \ 105 list_for_each_entry(pos, head, list) 106 #define slidx_first_entry(head) \ 107 (((head)->next != (head)) ? list_entry((head)->next, typeof(slidx_list_t), list) : NULL)
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | nrs_fifo.c | 39 * enabled on a given NRS head. 81 struct nrs_fifo_head *head; nrs_fifo_start() local 83 head = kzalloc_node(sizeof(*head), GFP_NOFS, nrs_fifo_start() 86 if (head == NULL) nrs_fifo_start() 89 INIT_LIST_HEAD(&head->fh_list); nrs_fifo_start() 90 policy->pol_private = head; nrs_fifo_start() 105 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_stop() local 107 LASSERT(head != NULL); nrs_fifo_stop() 108 LASSERT(list_empty(&head->fh_list)); nrs_fifo_stop() 110 kfree(head); nrs_fifo_stop() 165 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_req_get() local 168 nrq = unlikely(list_empty(&head->fh_list)) ? NULL : nrs_fifo_req_get() 169 list_entry(head->fh_list.next, struct ptlrpc_nrs_request, nrs_fifo_req_get() 199 struct nrs_fifo_head *head; nrs_fifo_req_add() local 201 head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head, nrs_fifo_req_add() 206 nrq->nr_u.fifo.fr_sequence = head->fh_sequence++; nrs_fifo_req_add() 207 list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list); nrs_fifo_req_add()
|
/linux-4.4.14/drivers/scsi/arm/ |
H A D | queue.c | 64 INIT_LIST_HEAD(&queue->head); queue_initialise() 69 * host-available list head, and we wouldn't queue_initialise() 92 if (!list_empty(&queue->head)) queue_free() 99 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) 100 * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. 103 * head - add command to head of queue 106 int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) __queue_add() argument 126 if (head) __queue_add() 127 list_add(l, &queue->head); __queue_add() 129 list_add_tail(l, &queue->head); __queue_add() 168 list_for_each(l, &queue->head) { queue_remove_exclude() 193 if (!list_empty(&queue->head)) queue_remove() 194 SCpnt = __queue_remove(queue, queue->head.next); queue_remove() 217 list_for_each(l, &queue->head) { queue_remove_tgtluntag() 243 list_for_each(l, &queue->head) { queue_remove_all_target() 267 list_for_each(l, &queue->head) { queue_probetgtlun() 293 list_for_each(l, &queue->head) { queue_remove_cmd()
|
/linux-4.4.14/arch/sh/boot/romimage/ |
H A D | Makefile | 7 targets := vmlinux head.o zeropage.bin piggy.o 18 $(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
|
/linux-4.4.14/fs/hfsplus/ |
H A D | btree.c | 135 struct hfs_btree_header_rec *head; hfs_btree_open() local 165 head = (struct hfs_btree_header_rec *)(kmap(page) + hfs_btree_open() 167 tree->root = be32_to_cpu(head->root); hfs_btree_open() 168 tree->leaf_count = be32_to_cpu(head->leaf_count); hfs_btree_open() 169 tree->leaf_head = be32_to_cpu(head->leaf_head); hfs_btree_open() 170 tree->leaf_tail = be32_to_cpu(head->leaf_tail); hfs_btree_open() 171 tree->node_count = be32_to_cpu(head->node_count); hfs_btree_open() 172 tree->free_nodes = be32_to_cpu(head->free_nodes); hfs_btree_open() 173 tree->attributes = be32_to_cpu(head->attributes); hfs_btree_open() 174 tree->node_size = be16_to_cpu(head->node_size); hfs_btree_open() 175 tree->max_key_len = be16_to_cpu(head->max_key_len); hfs_btree_open() 176 tree->depth = be16_to_cpu(head->depth); hfs_btree_open() 205 (head->key_type == HFSPLUS_KEY_BINARY)) hfs_btree_open() 283 struct hfs_btree_header_rec *head; hfs_btree_write() local 293 head = (struct hfs_btree_header_rec *)(kmap(page) + hfs_btree_write() 296 head->root = cpu_to_be32(tree->root); hfs_btree_write() 297 head->leaf_count = cpu_to_be32(tree->leaf_count); hfs_btree_write() 298 head->leaf_head = cpu_to_be32(tree->leaf_head); hfs_btree_write() 299 head->leaf_tail = cpu_to_be32(tree->leaf_tail); hfs_btree_write() 300 head->node_count = cpu_to_be32(tree->node_count); hfs_btree_write() 301 head->free_nodes = cpu_to_be32(tree->free_nodes); hfs_btree_write() 302 head->attributes = cpu_to_be32(tree->attributes); hfs_btree_write() 303 head->depth = cpu_to_be16(tree->depth); hfs_btree_write()
|
/linux-4.4.14/net/ipv4/netfilter/ |
H A D | nf_conntrack_l3proto_ipv4_compat.c | 50 struct hlist_nulls_node *head) ct_get_next() 55 head = rcu_dereference(hlist_nulls_next_rcu(head)); ct_get_next() 56 while (is_a_nulls(head)) { ct_get_next() 57 if (likely(get_nulls_value(head) == st->bucket)) { ct_get_next() 61 head = rcu_dereference( ct_get_next() 64 return head; ct_get_next() 69 struct hlist_nulls_node *head = ct_get_first(seq); ct_get_idx() local 71 if (head) ct_get_idx() 72 while (pos && (head = ct_get_next(seq, head))) ct_get_idx() 74 return pos ? NULL : head; ct_get_idx() 237 struct hlist_node *head) ct_expect_get_next() 242 head = rcu_dereference(hlist_next_rcu(head)); ct_expect_get_next() 243 while (head == NULL) { ct_expect_get_next() 246 head = rcu_dereference( ct_expect_get_next() 249 return head; ct_expect_get_next() 254 struct hlist_node *head = ct_expect_get_first(seq); ct_expect_get_idx() local 256 if (head) ct_expect_get_idx() 257 while (pos && (head = ct_expect_get_next(seq, head))) ct_expect_get_idx() 259 return pos ? NULL : head; ct_expect_get_idx() 49 ct_get_next(struct seq_file *seq, struct hlist_nulls_node *head) ct_get_next() argument 236 ct_expect_get_next(struct seq_file *seq, struct hlist_node *head) ct_expect_get_next() argument
|
/linux-4.4.14/net/ieee802154/6lowpan/ |
H A D | reassembly.c | 229 struct sk_buff *fp, *head = fq->q.fragments; lowpan_frag_reasm() local 234 /* Make the one we just received the head. */ lowpan_frag_reasm() 236 head = prev->next; lowpan_frag_reasm() 237 fp = skb_clone(head, GFP_ATOMIC); lowpan_frag_reasm() 242 fp->next = head->next; lowpan_frag_reasm() 247 skb_morph(head, fq->q.fragments); lowpan_frag_reasm() 248 head->next = fq->q.fragments->next; lowpan_frag_reasm() 251 fq->q.fragments = head; lowpan_frag_reasm() 255 if (skb_unclone(head, GFP_ATOMIC)) lowpan_frag_reasm() 262 if (skb_has_frag_list(head)) { lowpan_frag_reasm() 269 clone->next = head->next; lowpan_frag_reasm() 270 head->next = clone; lowpan_frag_reasm() 271 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; lowpan_frag_reasm() 272 skb_frag_list_init(head); lowpan_frag_reasm() 273 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) lowpan_frag_reasm() 274 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); lowpan_frag_reasm() 275 clone->len = head->data_len - plen; lowpan_frag_reasm() 277 head->data_len -= clone->len; lowpan_frag_reasm() 278 head->len -= clone->len; lowpan_frag_reasm() 282 WARN_ON(head == NULL); lowpan_frag_reasm() 284 sum_truesize = head->truesize; lowpan_frag_reasm() 285 for (fp = head->next; fp;) { lowpan_frag_reasm() 291 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { lowpan_frag_reasm() 294 if (!skb_shinfo(head)->frag_list) lowpan_frag_reasm() 295 skb_shinfo(head)->frag_list = fp; lowpan_frag_reasm() 296 head->data_len += fp->len; lowpan_frag_reasm() 297 head->len += fp->len; lowpan_frag_reasm() 298 head->truesize += fp->truesize; lowpan_frag_reasm() 304 head->next = NULL; lowpan_frag_reasm() 305 head->dev = ldev; lowpan_frag_reasm() 306 head->tstamp = fq->q.stamp; lowpan_frag_reasm()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/timer/ |
H A D | base.c | 41 list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { nvkm_timer_alarm_trigger() 43 list_move_tail(&alarm->head, &exec); nvkm_timer_alarm_trigger() 48 alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); nvkm_timer_alarm_trigger() 56 list_for_each_entry_safe(alarm, atemp, &exec, head) { nvkm_timer_alarm_trigger() 57 list_del_init(&alarm->head); nvkm_timer_alarm_trigger() 73 if (!list_empty(&alarm->head)) nvkm_timer_alarm() 74 list_del(&alarm->head); nvkm_timer_alarm() 76 list_for_each_entry(list, &tmr->alarms, head) { nvkm_timer_alarm() 80 list_add_tail(&alarm->head, &list->head); nvkm_timer_alarm() 93 list_del_init(&alarm->head); nvkm_timer_alarm_cancel()
|
/linux-4.4.14/drivers/net/wireless/ath/wil6210/ |
H A D | fw.h | 35 __le32 size; /* whole record, bytes after head */ 39 * data_size inferred from the @head.size. For this case, 40 * data_size = @head.size - offsetof(struct wil_fw_record_data, data) 55 * for informational purpose, data_size is @head.size from record header 62 * data_size = @head.size - offsetof(struct wil_fw_record_action, data) 78 * data_size is @head.size where @head is record header 115 * data_size inferred from the @head.size. For this case, 116 * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data, data) 139 * data_size inferred from the @head.size. For this case, 140 * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data4, data)
|
/linux-4.4.14/drivers/gpu/drm/ |
H A D | drm_hashtab.c | 69 hlist_for_each_entry(entry, h_list, head) drm_ht_verbose_list() 82 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry() 84 return &entry->head; hlist_for_each_entry() 100 hlist_for_each_entry_rcu(entry, h_list, head) { hlist_for_each_entry_rcu() 102 return &entry->head; hlist_for_each_entry_rcu() 120 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry() 125 parent = &entry->head; hlist_for_each_entry() 128 hlist_add_behind_rcu(&item->head, parent); 130 hlist_add_head_rcu(&item->head, h_list); 174 *item = hlist_entry(list, struct drm_hash_item, head); drm_ht_find_item() 193 hlist_del_init_rcu(&item->head); drm_ht_remove_item()
|
H A D | drm_agpsupport.c | 220 list_add(&entry->head, &dev->agp->memory); drm_agp_alloc() 252 list_for_each_entry(entry, &dev->agp->memory, head) { drm_agp_lookup_entry() 367 list_del(&entry->head); drm_agp_free() 399 struct drm_agp_head *head = NULL; drm_agp_init() local 401 if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) drm_agp_init() 403 head->bridge = agp_find_bridge(dev->pdev); drm_agp_init() 404 if (!head->bridge) { drm_agp_init() 405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { drm_agp_init() 406 kfree(head); drm_agp_init() 409 agp_copy_info(head->bridge, &head->agp_info); drm_agp_init() 410 agp_backend_release(head->bridge); drm_agp_init() 412 agp_copy_info(head->bridge, &head->agp_info); drm_agp_init() 414 if (head->agp_info.chipset == NOT_SUPPORTED) { drm_agp_init() 415 kfree(head); drm_agp_init() 418 INIT_LIST_HEAD(&head->memory); drm_agp_init() 419 head->cant_use_aperture = head->agp_info.cant_use_aperture; drm_agp_init() 420 head->page_mask = head->agp_info.page_mask; drm_agp_init() 421 head->base = head->agp_info.aper_base; drm_agp_init() 422 return head; drm_agp_init() 429 * Iterate over all AGP resources and remove them. But keep the AGP head 446 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { drm_agp_clear()
|
/linux-4.4.14/arch/arm/mach-netx/ |
H A D | xc.c | 120 struct fw_header *head; xc_request_firmware() local 135 head = (struct fw_header *)fw->data; xc_request_firmware() 136 if (head->magic != 0x4e657458) { xc_request_firmware() 137 if (head->magic == 0x5874654e) { xc_request_firmware() 144 head->magic); xc_request_firmware() 149 x->type = head->type; xc_request_firmware() 150 x->version = head->version; xc_request_firmware() 155 src = fw->data + head->fw_desc[i].ofs; xc_request_firmware() 158 size = head->fw_desc[i].size - sizeof (unsigned int); xc_request_firmware() 165 src = fw->data + head->fw_desc[i].patch_ofs; xc_request_firmware() 166 size = head->fw_desc[i].patch_entries; xc_request_firmware()
|
/linux-4.4.14/drivers/s390/cio/ |
H A D | eadm_sch.h | 16 struct list_head head; member in struct:eadm_private
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
H A D | changf100.h | 11 struct list_head head; member in struct:gf100_fifo_chan
|
H A D | changk104.h | 12 struct list_head head; member in struct:gk104_fifo_chan
|
/linux-4.4.14/include/trace/ |
H A D | perf.h | 43 struct hlist_head *head; \ 50 head = this_cpu_ptr(event_call->perf_events); \ 52 hlist_empty(head)) \ 71 __count, __regs, head, __task); \
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | ps3gpu.h | 46 static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, lv1_gpu_display_sync() argument 51 head, ddr_offset, 0, 0); lv1_gpu_display_sync() 54 static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, lv1_gpu_display_flip() argument 59 head, ddr_offset, 0, 0); lv1_gpu_display_flip()
|
/linux-4.4.14/arch/s390/boot/compressed/ |
H A D | Makefile | 9 targets += misc.o piggy.o sizes.h head.o 19 OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) 20 OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o 36 $(obj)/head.o: $(obj)/sizes.h
|
H A D | vmlinux.lds.S | 14 .head.text : {
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/ |
H A D | pgtable.h | 6 /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
|
/linux-4.4.14/arch/hexagon/kernel/ |
H A D | Makefile | 1 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/avr32/kernel/ |
H A D | Makefile | 5 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_bts.c | 57 local_t head; member in struct:bts_buffer 143 index = local_read(&buf->head); bts_config_buffer() 167 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) bts_buffer_pad_out() argument 169 unsigned long index = head - phys->offset; bts_buffer_pad_out() 191 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; bts_update() local 196 head = index + bts_buffer_offset(buf, buf->cur_buf); bts_update() 197 old = local_xchg(&buf->head, head); bts_update() 200 if (old == head) bts_update() 207 * old and head are always in the same physical buffer, so we bts_update() 210 local_add(head - old, &buf->data_size); bts_update() 212 local_set(&buf->data_size, head); bts_update() 302 unsigned long head, space, next_space, pad, gap, skip, wakeup; bts_buffer_reset() local 310 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); bts_buffer_reset() 311 if (WARN_ON_ONCE(head != local_read(&buf->head))) bts_buffer_reset() 315 space = phys->offset + phys->displacement + phys->size - head; bts_buffer_reset() 338 bts_buffer_pad_out(phys, head); bts_buffer_reset() 345 head = phys->offset + phys->displacement; bts_buffer_reset() 347 * After this, cur_buf and head won't match ds bts_buffer_reset() 352 local_set(&buf->head, head); bts_buffer_reset() 359 handle->head; bts_buffer_reset() 365 buf->end = head + space; bts_buffer_reset() 397 old_head = local_read(&buf->head); intel_bts_interrupt() 401 if (old_head == local_read(&buf->head)) intel_bts_interrupt() 428 bts->handle.head = bts_event_del()
|
/linux-4.4.14/net/batman-adv/ |
H A D | hash.h | 69 struct hlist_head *head; batadv_hash_delete() local 75 head = &hash->table[i]; batadv_hash_delete() 79 hlist_for_each_safe(node, node_tmp, head) { hlist_for_each_safe() 110 struct hlist_head *head; batadv_hash_add() local 118 head = &hash->table[index]; batadv_hash_add() 123 hlist_for_each(node, head) { hlist_for_each() 132 hlist_add_head_rcu(data_node, head); 154 struct hlist_head *head; batadv_hash_remove() local 158 head = &hash->table[index]; batadv_hash_remove() 161 hlist_for_each(node, head) { hlist_for_each()
|
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_mbx.c | 33 fifo->head = 0; fm10k_fifo_init() 45 return fifo->tail - fifo->head; fm10k_fifo_used() 56 return fifo->size + fifo->head - fifo->tail; fm10k_fifo_unused() 67 return fifo->head == fifo->tail; fm10k_fifo_empty() 71 * fm10k_fifo_head_offset - returns indices of head with given offset 73 * @offset: offset to add to head 75 * This function returns the indices into the fifo based on head + offset 79 return (fifo->head + offset) & (fifo->size - 1); fm10k_fifo_head_offset() 102 u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); fm10k_fifo_head_len() local 104 /* verify there is at least 1 DWORD in the fifo so *head is valid */ fm10k_fifo_head_len() 109 return FM10K_TLV_DWORD_LEN(*head); fm10k_fifo_head_len() 122 /* update head so it is at the start of next frame */ fm10k_fifo_head_drop() 123 fifo->head += len; fm10k_fifo_head_drop() 132 * This function resets the head pointer to drop all messages in the FIFO and 137 fifo->head = fifo->tail; fm10k_fifo_drop_all() 141 * fm10k_mbx_index_len - Convert a head/tail index into a length value 143 * @head: head index 144 * @tail: head index 146 * This function takes the head and tail index and determines the length 149 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) fm10k_mbx_index_len() argument 151 u16 len = tail - head; fm10k_mbx_index_len() 163 * @offset: length to add to head offset 179 * @offset: length to add to head offset 193 * fm10k_mbx_head_add - Determine new head value with added offset 195 * @offset: length to add to head offset 197 * This function takes the local head index and recomputes it for 202 u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_head_add() local 205 return (head > mbx->head) ? --head : ++head; fm10k_mbx_head_add() 209 * fm10k_mbx_head_sub - Determine new head value with subtracted offset 211 * @offset: length to add to head offset 213 * This function takes the local head index and recomputes it for 218 u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_head_sub() local 221 return (head < mbx->head) ? ++head : --head; fm10k_mbx_head_sub() 350 u32 *head = fifo->buffer; fm10k_mbx_write_copy() local 365 head += end; fm10k_mbx_write_copy() 371 for (end = fifo->size - end; len; head = fifo->buffer) { fm10k_mbx_write_copy() 379 fm10k_write_reg(hw, mbmem + tail++, *(head++)); fm10k_mbx_write_copy() 385 * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO 388 * @head: acknowledgement number last received 391 * head index. It will then pull up to mbmem_len DWORDs off of the 392 * head of the FIFO and will place it in the MBMEM registers 396 struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_pull_head() 398 u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_pull_head() 434 * head and len determines the length to copy. 442 u16 end, len, head; fm10k_mbx_read_copy() local 444 /* determine data length and mbmem head index */ fm10k_mbx_read_copy() 446 head = fm10k_mbx_head_sub(mbx, len); fm10k_mbx_read_copy() 447 if (head >= mbx->mbmem_len) fm10k_mbx_read_copy() 448 head++; fm10k_mbx_read_copy() 457 /* adjust head to match offset for FIFO */ fm10k_mbx_read_copy() 458 head &= mbx->mbmem_len - 1; fm10k_mbx_read_copy() 459 if (!head) fm10k_mbx_read_copy() 460 head++; fm10k_mbx_read_copy() 463 *(tail++) = fm10k_read_reg(hw, mbmem + head++); fm10k_mbx_read_copy() 487 u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); fm10k_mbx_push_tail() 494 /* update head and record bytes received */ fm10k_mbx_push_tail() 495 mbx->head = fm10k_mbx_head_add(mbx, len); fm10k_mbx_push_tail() 621 * @head: head index provided by remote mailbox 624 * last head update to the current one. It uses the result of the 628 static void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_update_local_crc() argument 630 u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_update_local_crc() 633 head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); fm10k_mbx_update_local_crc() 636 mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); fm10k_mbx_update_local_crc() 706 * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO 722 err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, fm10k_mbx_dequeue_rx() 733 /* shift head and tail based on the memory we moved */ fm10k_mbx_dequeue_rx() 734 fifo->tail -= fifo->head; fm10k_mbx_dequeue_rx() 735 fifo->head = 0; fm10k_mbx_dequeue_rx() 852 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | fm10k_mbx_create_connect_hdr() 866 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_data_hdr() 892 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_disconnect_hdr() 912 FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | fm10k_mbx_create_fake_disconnect_hdr() 950 FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); fm10k_mbx_create_error_msg() 964 u16 type, rsvd0, head, tail, size; fm10k_mbx_validate_msg_hdr() local 970 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_validate_msg_hdr() 979 if (tail != mbx->head) fm10k_mbx_validate_msg_hdr() 984 /* validate that head is moving correctly */ fm10k_mbx_validate_msg_hdr() 985 if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) fm10k_mbx_validate_msg_hdr() 987 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_mbx_validate_msg_hdr() 993 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_mbx_validate_msg_hdr() 1004 if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) fm10k_mbx_validate_msg_hdr() 1019 * fm10k_mbx_create_reply - Generate reply based on state and remote head 1021 * @head: acknowledgement number 1024 * mailbox state and the remote fifo head. It will return the length 1029 struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_create_reply() 1035 fm10k_mbx_update_local_crc(mbx, head); fm10k_mbx_create_reply() 1038 fm10k_mbx_pull_head(hw, mbx, head); fm10k_mbx_create_reply() 1070 u16 len, head, ack; fm10k_mbx_reset_work() local 1076 head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); fm10k_mbx_reset_work() 1077 ack = fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_reset_work() 1096 mbx->rx.head = 0; fm10k_mbx_reset_work() 1105 * at the head of the Tx FIFO if they are larger than max_size. It does not 1162 u16 size, head; fm10k_mbx_process_connect() local 1166 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_connect() 1189 /* align our tail index to remote head index */ fm10k_mbx_process_connect() 1190 mbx->tail = head; fm10k_mbx_process_connect() 1192 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_connect() 1207 u16 head, tail; fm10k_mbx_process_data() local 1211 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_data() 1216 mbx->tail = head; fm10k_mbx_process_data() 1233 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_data() 1249 u16 head; fm10k_mbx_process_disconnect() local 1253 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_disconnect() 1259 /* we have already verified mbx->head == tail so we know this is 0 */ fm10k_mbx_process_disconnect() 1274 /* verify the head indicates we completed all transmits */ fm10k_mbx_process_disconnect() 1275 if (head != mbx->tail) fm10k_mbx_process_disconnect() 1285 return fm10k_mbx_create_reply(hw, mbx, head); fm10k_mbx_process_disconnect() 1300 u16 head; fm10k_mbx_process_error() local 1303 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); fm10k_mbx_process_error() 1316 mbx->tail = head; fm10k_mbx_process_error() 1560 * evenly splitting it. In order to allow for easy masking of head/tail 1602 /* initialize tail and head */ fm10k_pfvf_mbx_init() 1604 mbx->head = 1; fm10k_pfvf_mbx_init() 1645 FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); fm10k_sm_mbx_create_data_hdr() 1662 FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | fm10k_sm_mbx_create_connect_hdr() 1681 /* initialize tail and head */ fm10k_sm_mbx_connect_reset() 1683 mbx->head = 1; fm10k_sm_mbx_connect_reset() 1784 u16 tail, head, ver; fm10k_sm_mbx_validate_fifo_hdr() local 1788 head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); fm10k_sm_mbx_validate_fifo_hdr() 1794 if (!head || head > FM10K_SM_MBX_FIFO_LEN) fm10k_sm_mbx_validate_fifo_hdr() 1798 if (mbx->tail < head) fm10k_sm_mbx_validate_fifo_hdr() 1799 head += mbx->mbmem_len - 1; fm10k_sm_mbx_validate_fifo_hdr() 1800 if (tail < mbx->head) fm10k_sm_mbx_validate_fifo_hdr() 1802 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_sm_mbx_validate_fifo_hdr() 1804 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_sm_mbx_validate_fifo_hdr() 1894 /* push tail in front of head */ fm10k_sm_mbx_receive() 1895 if (tail < mbx->head) fm10k_sm_mbx_receive() 1906 /* guarantee head aligns with the end of the last message */ fm10k_sm_mbx_receive() 1907 mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); fm10k_sm_mbx_receive() 1911 if (mbx->head > mbmem_len) fm10k_sm_mbx_receive() 1912 mbx->head -= mbmem_len; fm10k_sm_mbx_receive() 1926 struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_transmit() 1934 /* push head behind tail */ fm10k_sm_mbx_transmit() 1935 if (mbx->tail < head) fm10k_sm_mbx_transmit() 1936 head += mbmem_len; fm10k_sm_mbx_transmit() 1938 fm10k_mbx_pull_head(hw, mbx, head); fm10k_sm_mbx_transmit() 1959 * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head 1961 * @head: acknowledgement number 1964 * mailbox state and the remote fifo head. It will return the length 1969 struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_create_reply() 1975 fm10k_sm_mbx_transmit(hw, mbx, head); fm10k_sm_mbx_create_reply() 2043 u16 head, tail; fm10k_sm_mbx_process_version_1() local 2048 head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); fm10k_sm_mbx_process_version_1() 2070 fm10k_sm_mbx_create_reply(hw, mbx, head); fm10k_sm_mbx_process_version_1() 395 fm10k_mbx_pull_head(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_pull_head() argument 1028 fm10k_mbx_create_reply(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_mbx_create_reply() argument 1925 fm10k_sm_mbx_transmit(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_transmit() argument 1968 fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 head) fm10k_sm_mbx_create_reply() argument
|
/linux-4.4.14/kernel/bpf/ |
H A D | hashtab.c | 128 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, lookup_elem_raw() argument 133 hlist_for_each_entry_rcu(l, head, hash_node) lookup_elem_raw() 144 struct hlist_head *head; htab_map_lookup_elem() local 155 head = select_bucket(htab, hash); htab_map_lookup_elem() 157 l = lookup_elem_raw(head, hash, key, key_size); htab_map_lookup_elem() 169 struct hlist_head *head; htab_map_get_next_key() local 180 head = select_bucket(htab, hash); htab_map_get_next_key() 183 l = lookup_elem_raw(head, hash, key, key_size); htab_map_get_next_key() 207 head = select_bucket(htab, i); htab_map_get_next_key() 210 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), htab_map_get_next_key() 229 struct hlist_head *head; htab_map_update_elem() local 255 head = select_bucket(htab, l_new->hash); htab_map_update_elem() 257 l_old = lookup_elem_raw(head, l_new->hash, key, key_size); htab_map_update_elem() 279 /* add new element to the head of the list, so that concurrent htab_map_update_elem() 282 hlist_add_head_rcu(&l_new->hash_node, head); htab_map_update_elem() 302 struct hlist_head *head; htab_map_delete_elem() local 316 head = select_bucket(htab, hash); htab_map_delete_elem() 318 l = lookup_elem_raw(head, hash, key, key_size); htab_map_delete_elem() 336 struct hlist_head *head = select_bucket(htab, i); delete_all_elements() local 340 hlist_for_each_entry_safe(l, n, head, hash_node) { hlist_for_each_entry_safe()
|
/linux-4.4.14/Documentation/mic/mpssd/ |
H A D | micctrl | 42 if [ "`echo $1 | head -c3`" == "mic" ]; then 63 if [ "`echo $1 | head -c3`" == "mic" ]; then 87 if [ "`echo $1 | head -c3`" == "mic" ]; then 108 if [ "`echo $1 | head -c3`" == "mic" ]; then 133 if [ "`echo $1 | head -c3`" == "mic" ]; then
|
/linux-4.4.14/drivers/firmware/efi/ |
H A D | efi-pstore.c | 108 * @head: list head 112 struct list_head *head) efi_pstore_scan_sysfs_enter() 115 if (&next->list != head) efi_pstore_scan_sysfs_enter() 140 * @head: list head 145 struct list_head *head, bool stop) efi_pstore_scan_sysfs_exit() 149 __efi_pstore_scan_sysfs_exit(next, &next->list != head); efi_pstore_scan_sysfs_exit() 169 struct list_head *head = &efivar_sysfs_list; efi_pstore_sysfs_entry_iter() local 173 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe() 174 efi_pstore_scan_sysfs_enter(entry, n, head); list_for_each_entry_safe() 177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); list_for_each_entry_safe() 185 list_for_each_entry_safe_from((*pos), n, head, list) { list_for_each_entry_safe_from() 186 efi_pstore_scan_sysfs_enter((*pos), n, head); list_for_each_entry_safe_from() 189 efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); list_for_each_entry_safe_from() 110 efi_pstore_scan_sysfs_enter(struct efivar_entry *pos, struct efivar_entry *next, struct list_head *head) efi_pstore_scan_sysfs_enter() argument 143 efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, struct efivar_entry *next, struct list_head *head, bool stop) efi_pstore_scan_sysfs_exit() argument
|
/linux-4.4.14/fs/nfs/ |
H A D | write.c | 101 * nfs_page_find_head_request_locked - find head request associated with @page 105 * returns matching head request with reference held, or NULL if not found. 127 * nfs_page_find_head_request - find head request associated with @page 129 * returns matching head request with reference held, or NULL if not found. 171 * @head - head request of page group 174 * Search page group with head @head to find a request that contains the 183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) nfs_page_group_search_locked() argument 187 WARN_ON_ONCE(head != head->wb_head); nfs_page_group_search_locked() 188 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); nfs_page_group_search_locked() 190 req = head; nfs_page_group_search_locked() 197 } while (req != head); nfs_page_group_search_locked() 204 * @head - head request of page group 206 * Return true if the page group with head @head covers the whole page, 314 * @head - head request of page group, must be holding head lock 324 nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, 332 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) 340 nfs_page_group_unlock(head); variable 344 nfs_release_request(head); variable 359 * @old_head - the old head of the list 386 /* release ref on old head request */ nfs_destroy_unlinked_subrequests() 409 * nfs_lock_and_join_requests - join all subreqs to the head req and return 416 * This function joins all sub requests to the head request by first 418 * and finally updating the head request to cover the whole range covered by 422 * Returns a locked, referenced pointer to the head request - which after 431 struct nfs_page *head, *subreq; nfs_lock_and_join_requests() local 444 * A reference is taken only on the head request which acts as a nfs_lock_and_join_requests() 446 * until the head reference is released. nfs_lock_and_join_requests() 448 head = nfs_page_find_head_request_locked(NFS_I(inode), page); nfs_lock_and_join_requests() 450 if (!head) { nfs_lock_and_join_requests() 457 ret = nfs_page_group_lock(head, true); nfs_lock_and_join_requests() 462 nfs_page_group_lock_wait(head); nfs_lock_and_join_requests() 463 nfs_release_request(head); nfs_lock_and_join_requests() 467 nfs_release_request(head); nfs_lock_and_join_requests() 472 subreq = head; nfs_lock_and_join_requests() 478 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { nfs_lock_and_join_requests() 481 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || nfs_lock_and_join_requests() 483 (head->wb_offset + total_bytes)))) { nfs_lock_and_join_requests() 484 nfs_page_group_unlock(head); nfs_lock_and_join_requests() 492 ret = nfs_unroll_locks_and_wait(inode, head, nfs_lock_and_join_requests() 502 } while (subreq != head); nfs_lock_and_join_requests() 506 subreq = head; nfs_lock_and_join_requests() 510 } while (subreq != head); nfs_lock_and_join_requests() 512 /* unlink subrequests from head, destroy them later */ nfs_lock_and_join_requests() 513 if (head->wb_this_page != head) { nfs_lock_and_join_requests() 514 /* destroy list will be terminated by head */ nfs_lock_and_join_requests() 515 destroy_list = head->wb_this_page; nfs_lock_and_join_requests() 516 head->wb_this_page = head; nfs_lock_and_join_requests() 518 /* change head request to cover whole range that nfs_lock_and_join_requests() 520 head->wb_bytes = total_bytes; nfs_lock_and_join_requests() 524 * prepare head request to be added to new pgio descriptor nfs_lock_and_join_requests() 526 nfs_page_group_clear_bits(head); nfs_lock_and_join_requests() 531 * grab a reference for the head request, iff it needs one. nfs_lock_and_join_requests() 533 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) nfs_lock_and_join_requests() 534 kref_get(&head->wb_kref); nfs_lock_and_join_requests() 536 nfs_page_group_unlock(head); nfs_lock_and_join_requests() 541 nfs_destroy_unlinked_subrequests(destroy_list, head); nfs_lock_and_join_requests() 543 /* still holds ref on head from nfs_page_find_head_request_locked nfs_lock_and_join_requests() 544 * and still has lock on head from lock loop */ nfs_lock_and_join_requests() 545 return head; nfs_lock_and_join_requests() 691 /* this a head request for a page group - mark it as having an nfs_inode_add_request() 707 struct nfs_page *head; nfs_inode_remove_request() local 710 head = req->wb_head; nfs_inode_remove_request() 713 if (likely(!PageSwapCache(head->wb_page))) { nfs_inode_remove_request() 714 set_page_private(head->wb_page, 0); nfs_inode_remove_request() 715 ClearPagePrivate(head->wb_page); nfs_inode_remove_request() 717 wake_up_page(head->wb_page, PG_private); nfs_inode_remove_request() 718 clear_bit(PG_MAPPED, &head->wb_flags); nfs_inode_remove_request() 741 * Search through commit lists on @inode for the head request for @page. 744 * Returns the head request if found, or NULL if not found. 773 * @dst: commit list head 795 * @dst: commit list head 1305 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1318 static void nfs_async_write_error(struct list_head *head) nfs_async_write_error() argument 1322 while (!list_empty(head)) { nfs_async_write_error() 1323 req = nfs_list_entry(head->next); nfs_async_write_error() 1602 static loff_t nfs_get_lwb(struct list_head *head) nfs_get_lwb() argument 1607 list_for_each_entry(req, head, wb_list) nfs_get_lwb() 1618 struct list_head *head, nfs_init_commit() 1622 struct nfs_page *first = nfs_list_entry(head->next); nfs_init_commit() 1628 list_splice_init(head, &data->pages); nfs_init_commit() 1673 nfs_commit_list(struct inode *inode, struct list_head *head, int how, nfs_commit_list() argument 1684 nfs_init_commit(data, head, NULL, cinfo); nfs_commit_list() 1689 nfs_retry_commit(head, NULL, cinfo, 0); nfs_commit_list() 1775 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, nfs_generic_commit_list() argument 1780 status = pnfs_commit_list(inode, head, how, cinfo); nfs_generic_commit_list() 1782 status = nfs_commit_list(inode, head, how, cinfo); nfs_generic_commit_list() 1788 LIST_HEAD(head); nfs_commit_inode() 1797 res = nfs_scan_commit(inode, &head, &cinfo); nfs_commit_inode() 1801 error = nfs_generic_commit_list(inode, &head, how, &cinfo); nfs_commit_inode() 1893 /* blocking call to cancel all requests and join to a single (head) nfs_wb_page_cancel() 1901 * nfs_lock_and_join_requests, so just remove the head nfs_wb_page_cancel() 1617 nfs_init_commit(struct nfs_commit_data *data, struct list_head *head, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) nfs_init_commit() argument
|
/linux-4.4.14/sound/core/seq/ |
H A D | seq_fifo.c | 55 f->head = NULL; snd_seq_fifo_new() 136 if (f->head == NULL) snd_seq_fifo_event_in() 137 f->head = cell; snd_seq_fifo_event_in() 156 if ((cell = f->head) != NULL) { fifo_cell_out() 157 f->head = cell->next; fifo_cell_out() 215 cell->next = f->head; snd_seq_fifo_cell_putback() 216 f->head = cell; snd_seq_fifo_cell_putback() 253 oldhead = f->head; snd_seq_fifo_resize() 256 f->head = NULL; snd_seq_fifo_resize()
|
H A D | seq_prioq.h | 30 struct snd_seq_event_cell *head; /* pointer to head of prioq */ member in struct:snd_seq_prioq 52 /* peek at cell at the head of the prioq */
|
H A D | seq_prioq.c | 66 f->head = NULL; snd_seq_prioq_new() 180 cur = f->head; /* cursor */ snd_seq_prioq_cell_in() 208 if (f->head == cur) /* this is the first cell, set head to it */ snd_seq_prioq_cell_in() 209 f->head = cell; snd_seq_prioq_cell_in() 229 cell = f->head; snd_seq_prioq_cell_out() 231 f->head = cell->next; snd_seq_prioq_cell_out() 256 /* peek at cell at the head of the prioq */ snd_seq_prioq_cell_peek() 263 return f->head; snd_seq_prioq_cell_peek() 299 cell = f->head; snd_seq_prioq_leave() 304 if (cell == f->head) { snd_seq_prioq_leave() 305 f->head = cell->next; snd_seq_prioq_leave() 409 cell = f->head; snd_seq_prioq_remove_events() 417 if (cell == f->head) { snd_seq_prioq_remove_events() 418 f->head = cell->next; snd_seq_prioq_remove_events()
|
/linux-4.4.14/drivers/input/joystick/iforce/ |
H A D | iforce-packets.c | 55 int head, tail; iforce_send_packet() local 59 * Update head and tail of xmit buffer iforce_send_packet() 63 head = iforce->xmit.head; iforce_send_packet() 67 if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { iforce_send_packet() 74 empty = head == tail; iforce_send_packet() 75 XMIT_INC(iforce->xmit.head, n+2); iforce_send_packet() 80 iforce->xmit.buf[head] = HI(cmd); iforce_send_packet() 81 XMIT_INC(head, 1); iforce_send_packet() 82 iforce->xmit.buf[head] = LO(cmd); iforce_send_packet() 83 XMIT_INC(head, 1); iforce_send_packet() 85 c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); iforce_send_packet() 88 memcpy(&iforce->xmit.buf[head], iforce_send_packet() 96 XMIT_INC(head, n); iforce_send_packet()
|
/linux-4.4.14/fs/9p/ |
H A D | vfs_dir.c | 46 * @head: start offset of current dirread buffer 55 int head; member in struct:p9_rdir 133 if (rdir->tail == rdir->head) { v9fs_dir_readdir() 144 rdir->head = 0; v9fs_dir_readdir() 147 while (rdir->head < rdir->tail) { v9fs_dir_readdir() 149 err = p9stat_read(fid->clnt, rdir->buf + rdir->head, v9fs_dir_readdir() 150 rdir->tail - rdir->head, &st); v9fs_dir_readdir() 164 rdir->head += reclen; v9fs_dir_readdir() 194 if (rdir->tail == rdir->head) { v9fs_dir_readdir_dotl() 200 rdir->head = 0; v9fs_dir_readdir_dotl() 204 while (rdir->head < rdir->tail) { v9fs_dir_readdir_dotl() 206 err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, v9fs_dir_readdir_dotl() 207 rdir->tail - rdir->head, v9fs_dir_readdir_dotl() 221 rdir->head += err; v9fs_dir_readdir_dotl()
|
/linux-4.4.14/net/sunrpc/ |
H A D | xdr.c | 135 struct kvec *head = xdr->head; xdr_inline_pages() local 137 char *buf = (char *)head->iov_base; xdr_inline_pages() 138 unsigned int buflen = head->iov_len; xdr_inline_pages() 140 head->iov_len = offset; xdr_inline_pages() 309 * @len: bytes to remove from buf->head[0] 311 * Shrinks XDR buffer's header kvec buf->head[0] by 318 struct kvec *head, *tail; xdr_shrink_bufhead() local 323 head = buf->head; xdr_shrink_bufhead() 325 WARN_ON_ONCE(len > head->iov_len); xdr_shrink_bufhead() 326 if (len > head->iov_len) xdr_shrink_bufhead() 327 len = head->iov_len; xdr_shrink_bufhead() 350 /* Do we also need to copy data from the head into the tail ? */ xdr_shrink_bufhead() 356 (char *)head->iov_base + xdr_shrink_bufhead() 357 head->iov_len - offs, xdr_shrink_bufhead() 372 (char *)head->iov_base + head->iov_len - len, xdr_shrink_bufhead() 375 head->iov_len -= len; xdr_shrink_bufhead() 402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; xdr_shrink_pagelen() 462 struct kvec *iov = buf->head; xdr_init_encode() 591 * head, tail, and page lengths are adjusted to correspond. 595 * except in the case of the head buffer when we assume the head 606 struct kvec *head = buf->head; xdr_truncate_encode() local 643 xdr->end = head->iov_base + head->iov_len; xdr_truncate_encode() 647 head->iov_len = len; xdr_truncate_encode() 649 xdr->p = head->iov_base + head->iov_len; xdr_truncate_encode() 650 xdr->iov = buf->head; xdr_truncate_encode() 777 else if (xdr->iov == xdr->buf->head) { xdr_set_next_buffer() 796 if (buf->head[0].iov_len != 0) xdr_init_decode() 797 xdr_set_iov(xdr, buf->head, buf->len); xdr_init_decode() 912 iov = buf->head; xdr_align_pages() 937 * Moves data beyond the current pointer position from the XDR head[] buffer 979 * Moves data beyond the current pointer position from the XDR head[] buffer 1001 buf->head[0] = *iov; xdr_buf_from_iov() 1027 if (base < buf->head[0].iov_len) { xdr_buf_subsegment() 1028 subbuf->head[0].iov_base = buf->head[0].iov_base + base; xdr_buf_subsegment() 1029 subbuf->head[0].iov_len = min_t(unsigned int, len, xdr_buf_subsegment() 1030 buf->head[0].iov_len - base); xdr_buf_subsegment() 1031 len -= subbuf->head[0].iov_len; xdr_buf_subsegment() 1034 base -= buf->head[0].iov_len; xdr_buf_subsegment() 1035 subbuf->head[0].iov_len = 0; xdr_buf_subsegment() 1074 * too small, or if (for instance) it's all in the head and the parser has 1098 if (buf->head[0].iov_len) { xdr_buf_trim() 1099 cur = min_t(size_t, buf->head[0].iov_len, trim); xdr_buf_trim() 1100 buf->head[0].iov_len -= cur; xdr_buf_trim() 1112 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); __read_bytes_from_xdr_buf() 1113 memcpy(obj, subbuf->head[0].iov_base, this_len); __read_bytes_from_xdr_buf() 1143 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); __write_bytes_to_xdr_buf() 1144 memcpy(subbuf->head[0].iov_base, obj, this_len); __write_bytes_to_xdr_buf() 1194 * entirely in the head or the tail, set object to point to it; otherwise 1206 /* Is the obj contained entirely in the head? */ xdr_buf_read_netobj() 1207 obj->data = subbuf.head[0].iov_base; xdr_buf_read_netobj() 1208 if (subbuf.head[0].iov_len == obj->len) xdr_buf_read_netobj() 1225 obj->data = buf->head[0].iov_base + buf->head[0].iov_len; xdr_buf_read_netobj() 1258 /* process head */ xdr_xcode_array2() 1259 if (todo && base < buf->head->iov_len) { xdr_xcode_array2() 1260 c = buf->head->iov_base + base; xdr_xcode_array2() 1262 buf->head->iov_len - base); xdr_xcode_array2() 1288 base = buf->head->iov_len; /* align to start of pages */ xdr_xcode_array2() 1292 base -= buf->head->iov_len; xdr_xcode_array2() 1442 buf->head->iov_len + buf->page_len + buf->tail->iov_len) xdr_encode_array2() 1459 if (offset >= buf->head[0].iov_len) { xdr_process_buf() 1460 offset -= buf->head[0].iov_len; xdr_process_buf() 1462 thislen = buf->head[0].iov_len - offset; xdr_process_buf() 1465 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); xdr_process_buf()
|
/linux-4.4.14/kernel/power/ |
H A D | console.c | 22 struct list_head head; member in struct:pm_vt_switch 50 list_for_each_entry(tmp, &pm_vt_switch_list, head) { pm_vt_switch_required() 65 list_add(&entry->head, &pm_vt_switch_list); pm_vt_switch_required() 82 list_for_each_entry(tmp, &pm_vt_switch_list, head) { pm_vt_switch_unregister() 84 list_del(&tmp->head); pm_vt_switch_unregister() 118 list_for_each_entry(entry, &pm_vt_switch_list, head) { pm_vt_switch()
|
/linux-4.4.14/include/linux/isdn/ |
H A D | capilli.h | 106 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); 107 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); 108 void capilib_release_appl(struct list_head *head, u16 applid); 109 void capilib_release(struct list_head *head); 110 void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); 111 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
|
/linux-4.4.14/tools/testing/selftests/timers/ |
H A D | clocksource-switch.c | 56 char *head, *tmp; get_clocksources() local 67 head = buf; get_clocksources() 69 while (head - buf < size) { get_clocksources() 71 for (tmp = head; *tmp != ' '; tmp++) { get_clocksources() 78 strcpy(list[i], head); get_clocksources() 79 head = tmp + 1; get_clocksources()
|
/linux-4.4.14/drivers/input/serio/ |
H A D | serio_raw.c | 33 unsigned int tail, head; member in struct:serio_raw 149 empty = serio_raw->head == serio_raw->tail; serio_raw_fetch_byte() 173 if (serio_raw->head == serio_raw->tail && serio_raw_read() 191 serio_raw->head != serio_raw->tail || serio_raw_read() 251 if (serio_raw->head != serio_raw->tail) serio_raw_poll() 278 unsigned int head = serio_raw->head; serio_raw_interrupt() local 281 serio_raw->queue[head] = data; serio_raw_interrupt() 282 head = (head + 1) % SERIO_RAW_QUEUE_LEN; serio_raw_interrupt() 283 if (likely(head != serio_raw->tail)) { serio_raw_interrupt() 284 serio_raw->head = head; serio_raw_interrupt()
|
/linux-4.4.14/fs/btrfs/ |
H A D | delayed-ref.c | 88 /* insert a new ref to head ref rbtree */ htree_insert() 119 * find an head entry based on bytenr. This returns the delayed ref 120 * head if it was able to find one, or NULL if nothing was in that spot. 158 struct btrfs_delayed_ref_head *head) btrfs_delayed_ref_lock() 164 if (mutex_trylock(&head->mutex)) btrfs_delayed_ref_lock() 167 atomic_inc(&head->node.refs); btrfs_delayed_ref_lock() 170 mutex_lock(&head->mutex); btrfs_delayed_ref_lock() 172 if (!head->node.in_tree) { btrfs_delayed_ref_lock() 173 mutex_unlock(&head->mutex); btrfs_delayed_ref_lock() 174 btrfs_put_delayed_ref(&head->node); btrfs_delayed_ref_lock() 177 btrfs_put_delayed_ref(&head->node); btrfs_delayed_ref_lock() 183 struct btrfs_delayed_ref_head *head, drop_delayed_ref() 187 head = btrfs_delayed_node_to_head(ref); drop_delayed_ref() 188 rb_erase(&head->href_node, &delayed_refs->href_root); drop_delayed_ref() 190 assert_spin_locked(&head->lock); drop_delayed_ref() 202 struct btrfs_delayed_ref_head *head, merge_ref() 209 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, merge_ref() 211 while (!done && &next->list != &head->ref_list) { merge_ref() 248 drop_delayed_ref(trans, delayed_refs, head, next); merge_ref() 251 drop_delayed_ref(trans, delayed_refs, head, ref); merge_ref() 270 struct btrfs_delayed_ref_head *head) btrfs_merge_delayed_refs() 275 assert_spin_locked(&head->lock); btrfs_merge_delayed_refs() 277 if (list_empty(&head->ref_list)) btrfs_merge_delayed_refs() 281 if (head->is_data) btrfs_merge_delayed_refs() 294 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, btrfs_merge_delayed_refs() 296 while (&ref->list != &head->ref_list) { btrfs_merge_delayed_refs() 300 if (merge_ref(trans, delayed_refs, head, ref, seq)) { btrfs_merge_delayed_refs() 301 if (list_empty(&head->ref_list)) btrfs_merge_delayed_refs() 303 ref = list_first_entry(&head->ref_list, btrfs_merge_delayed_refs() 341 struct btrfs_delayed_ref_head *head; btrfs_select_ref_head() local 349 head = find_ref_head(&delayed_refs->href_root, start, 1); btrfs_select_ref_head() 350 if (!head && !loop) { btrfs_select_ref_head() 354 head = find_ref_head(&delayed_refs->href_root, start, 1); btrfs_select_ref_head() 355 if (!head) btrfs_select_ref_head() 357 } else if (!head && loop) { btrfs_select_ref_head() 361 while (head->processing) { btrfs_select_ref_head() 364 node = rb_next(&head->href_node); btrfs_select_ref_head() 373 head = rb_entry(node, struct btrfs_delayed_ref_head, btrfs_select_ref_head() 377 head->processing = 1; btrfs_select_ref_head() 380 delayed_refs->run_delayed_start = head->node.bytenr + btrfs_select_ref_head() 381 head->node.num_bytes; btrfs_select_ref_head() 382 return head; btrfs_select_ref_head() 453 * helper function to update the accounting in the head ref 474 * with an existing head ref without update_existing_head_ref() 507 * update the reference mod on the head to reflect this new operation, update_existing_head_ref() 529 * helper function to actually insert a head node into the rbtree. 552 * the head node stores the sum of all the mods, so dropping a ref add_delayed_ref_head() 553 * should drop the sum in the head node by one. add_delayed_ref_head() 787 * insert both the head node and the new ref without dropping btrfs_add_delayed_tree_ref() 849 * insert both the head node and the new ref without dropping btrfs_add_delayed_data_ref() 917 * this does a simple search for the head node for a given extent. 919 * the head node if any where found, or NULL if not. 157 btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head) btrfs_delayed_ref_lock() argument 181 drop_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref) drop_delayed_ref() argument 200 merge_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref, u64 seq) merge_ref() argument 267 btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) btrfs_merge_delayed_refs() argument
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
H A D | hvcserver.c | 69 * @head: list_head pointer for an allocated list of partner info structs to 75 int hvcs_free_partner_info(struct list_head *head) hvcs_free_partner_info() argument 80 if (!head) hvcs_free_partner_info() 83 while (!list_empty(head)) { hvcs_free_partner_info() 84 element = head->next; hvcs_free_partner_info() 111 * @head: An initialized list_head pointer to an empty list to use to return the 129 * hvcs_free_partner_info() using a pointer to the SAME list head instance 132 int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, hvcs_get_partner_info() argument 146 if (!head || !pi_buff) hvcs_get_partner_info() 151 INIT_LIST_HEAD(head); hvcs_get_partner_info() 161 if (!list_empty(head)) hvcs_get_partner_info() 182 hvcs_free_partner_info(head); hvcs_get_partner_info() 196 list_add_tail(&(next_partner_info->node), head); hvcs_get_partner_info()
|
/linux-4.4.14/drivers/block/drbd/ |
H A D | drbd_nla.c | 8 struct nlattr *head = nla_data(nla); drbd_nla_check_mandatory() local 20 nla_for_each_attr(nla, head, len, rem) { nla_for_each_attr()
|
/linux-4.4.14/arch/arm/boot/compressed/ |
H A D | head-xscale.S | 2 * linux/arch/arm/boot/compressed/head-xscale.S 4 * XScale specific tweaks. This is merged into head.S by the linker.
|
/linux-4.4.14/arch/score/ |
H A D | Makefile | 28 head-y := arch/score/kernel/head.o
|
/linux-4.4.14/arch/h8300/boot/compressed/ |
H A D | Makefile | 7 targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o 9 OBJECTS = $(obj)/head.o $(obj)/misc.o
|
/linux-4.4.14/arch/hexagon/ |
H A D | Makefile | 37 head-y := arch/hexagon/kernel/head.o
|
/linux-4.4.14/arch/m32r/ |
H A D | Makefile | 34 head-y := arch/m32r/kernel/head.o
|
/linux-4.4.14/arch/m32r/boot/compressed/ |
H A D | Makefile | 8 vmlinux.bin.lzma head.o misc.o piggy.o vmlinux.lds 10 OBJECTS = $(obj)/head.o $(obj)/misc.o
|
/linux-4.4.14/drivers/md/ |
H A D | multipath.h | 19 * this is our 'private' 'collective' MULTIPATH buffer head.
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/sw/ |
H A D | chan.h | 12 struct list_head head; member in struct:nvkm_sw_chan
|
/linux-4.4.14/fs/notify/ |
H A D | fsnotify.h | 18 extern u32 fsnotify_recalc_mask(struct hlist_head *head); 27 extern int fsnotify_add_mark_list(struct hlist_head *head, 44 extern struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, 47 extern void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock);
|
/linux-4.4.14/include/crypto/ |
H A D | scatterwalk.h | 28 static inline void scatterwalk_crypto_chain(struct scatterlist *head, scatterwalk_crypto_chain() argument 33 head->length += sg->length; scatterwalk_crypto_chain() 38 sg_chain(head, num, sg); scatterwalk_crypto_chain() 40 sg_mark_end(head); scatterwalk_crypto_chain()
|
/linux-4.4.14/arch/mips/include/asm/mach-generic/ |
H A D | kernel-entry-init.h | 12 /* Intentionally empty macro, used in head.S. Override in
|
/linux-4.4.14/arch/sparc/kernel/ |
H A D | sparc_ksyms_32.c | 13 #include <asm/head.h>
|
/linux-4.4.14/arch/microblaze/kernel/ |
H A D | Makefile | 15 extra-y := head.o vmlinux.lds
|
/linux-4.4.14/arch/m68k/hp300/ |
H A D | reboot.S | 7 * good stuff that head.S did when we started up. The caches and MMU must be
|
/linux-4.4.14/arch/avr32/ |
H A D | Makefile | 31 head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o 32 head-y += arch/avr32/kernel/head.o
|
/linux-4.4.14/net/netfilter/ |
H A D | nf_internals.h | 16 unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/pm/ |
H A D | priv.h | 14 struct list_head head; member in struct:nvkm_perfctr 37 struct list_head head; member in struct:nvkm_perfsrc 74 struct list_head head; member in struct:nvkm_perfdom
|
/linux-4.4.14/arch/m68k/ |
H A D | Makefile | 90 # Select the assembler head startup code. Order is important. The default 91 # head code is first, processor specific selections can override it after. 93 head-y := arch/m68k/kernel/head.o 94 head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o 95 head-$(CONFIG_M68360) := arch/m68k/68360/head.o 96 head-$(CONFIG_M68000) := arch/m68k/68000/head.o 97 head-$(CONFIG_COLDFIRE) := arch/m68k/coldfire/head.o
|
/linux-4.4.14/arch/arm64/kernel/ |
H A D | Makefile | 47 head-y := head.o 48 extra-y += $(head-y) vmlinux.lds
|
/linux-4.4.14/tools/perf/scripts/python/ |
H A D | compaction-times.py | 121 head = cls.heads[pid] 122 filtered = head.is_filtered() 126 head = cls.heads[pid] = chead(comm, pid, filtered) 129 head.mark_pending(start_secs, start_nsecs) 133 head = cls.heads[pid] 134 if not head.is_filtered(): 135 if head.is_pending(): 136 head.do_increment(migrated, fscan, mscan) 142 head = cls.heads[pid] 143 if not head.is_filtered(): 144 if head.is_pending(): 145 head.make_complete(secs, nsecs)
|
/linux-4.4.14/tools/perf/arch/x86/util/ |
H A D | intel-pt.c | 875 void *data, size_t head) intel_pt_compare_ref() 880 if (head > ref_offset || head < ref_end - buf_size) intel_pt_compare_ref() 882 } else if (head > ref_offset && head < ref_end) { intel_pt_compare_ref() 891 void *data, size_t head) intel_pt_copy_ref() 893 if (head >= ref_size) { intel_pt_copy_ref() 894 memcpy(ref_buf, data + head - ref_size, ref_size); intel_pt_copy_ref() 896 memcpy(ref_buf, data, head); intel_pt_copy_ref() 897 ref_size -= head; intel_pt_copy_ref() 898 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size); intel_pt_copy_ref() 904 u64 head) intel_pt_wrapped() 911 data, head); intel_pt_wrapped() 914 data, head); intel_pt_wrapped() 938 u64 *head, u64 *old) intel_pt_find_snapshot() 945 pr_debug3("%s: mmap index %d old head %zu new head %zu\n", intel_pt_find_snapshot() 946 __func__, idx, (size_t)*old, (size_t)*head); intel_pt_find_snapshot() 964 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head); intel_pt_find_snapshot() 974 * In full trace mode 'head' continually increases. However in snapshot intel_pt_find_snapshot() 975 * mode 'head' is an offset within the buffer. Here 'old' and 'head' intel_pt_find_snapshot() 977 * always less than 'head'. intel_pt_find_snapshot() 980 *old = *head; intel_pt_find_snapshot() 981 *head += mm->len; intel_pt_find_snapshot() 987 if (*old > *head) intel_pt_find_snapshot() 988 *head += mm->len; intel_pt_find_snapshot() 991 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n", intel_pt_find_snapshot() 992 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); intel_pt_find_snapshot() 873 intel_pt_compare_ref(void *ref_buf, size_t ref_offset, size_t ref_size, size_t buf_size, void *data, size_t head) intel_pt_compare_ref() argument 890 intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size, void *data, size_t head) intel_pt_copy_ref() argument 902 intel_pt_wrapped(struct intel_pt_recording *ptr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 head) intel_pt_wrapped() argument 936 intel_pt_find_snapshot(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old) intel_pt_find_snapshot() argument
|
H A D | intel-bts.c | 367 u64 *head, u64 *old) intel_bts_find_snapshot() 374 pr_debug3("%s: mmap index %d old head %zu new head %zu\n", intel_bts_find_snapshot() 375 __func__, idx, (size_t)*old, (size_t)*head); intel_bts_find_snapshot() 390 * In full trace mode 'head' continually increases. However in snapshot intel_bts_find_snapshot() 391 * mode 'head' is an offset within the buffer. Here 'old' and 'head' intel_bts_find_snapshot() 393 * always less than 'head'. intel_bts_find_snapshot() 396 *old = *head; intel_bts_find_snapshot() 397 *head += mm->len; intel_bts_find_snapshot() 403 if (*old > *head) intel_bts_find_snapshot() 404 *head += mm->len; intel_bts_find_snapshot() 407 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n", intel_bts_find_snapshot() 408 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); intel_bts_find_snapshot() 365 intel_bts_find_snapshot(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old) intel_bts_find_snapshot() argument
|
/linux-4.4.14/drivers/target/tcm_fc/ |
H A D | tfc_sess.c | 166 struct hlist_head *head; ft_sess_get() local 174 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 175 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 197 struct hlist_head *head; ft_sess_create() local 199 head = &tport->hash[ft_sess_hash(port_id)]; ft_sess_create() 200 hlist_for_each_entry_rcu(sess, head, hash) ft_sess_create() 219 hlist_add_head_rcu(&sess->hash, head); ft_sess_create() 250 struct hlist_head *head; ft_sess_delete() local 253 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 254 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 269 struct hlist_head *head; ft_sess_delete_all() local 272 for (head = tport->hash; ft_sess_delete_all() 273 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { hlist_for_each_entry_rcu() 274 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu()
|