Home
last modified time | relevance | path

Searched refs:head (Results 1 – 200 of 1974) sorted by relevance

12345678910

/linux-4.4.14/drivers/scsi/aic7xxx/
Dqueue.h112 #define SLIST_HEAD_INITIALIZER(head) \ argument
123 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) argument
125 #define SLIST_FIRST(head) ((head)->slh_first) argument
127 #define SLIST_FOREACH(var, head, field) \ argument
128 for ((var) = SLIST_FIRST((head)); \
132 #define SLIST_INIT(head) do { \ argument
133 SLIST_FIRST((head)) = NULL; \
141 #define SLIST_INSERT_HEAD(head, elm, field) do { \ argument
142 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
143 SLIST_FIRST((head)) = (elm); \
[all …]
/linux-4.4.14/security/tomoyo/
Dcommon.c211 static bool tomoyo_flush(struct tomoyo_io_buffer *head) in tomoyo_flush() argument
213 while (head->r.w_pos) { in tomoyo_flush()
214 const char *w = head->r.w[0]; in tomoyo_flush()
217 if (len > head->read_user_buf_avail) in tomoyo_flush()
218 len = head->read_user_buf_avail; in tomoyo_flush()
221 if (copy_to_user(head->read_user_buf, w, len)) in tomoyo_flush()
223 head->read_user_buf_avail -= len; in tomoyo_flush()
224 head->read_user_buf += len; in tomoyo_flush()
227 head->r.w[0] = w; in tomoyo_flush()
231 if (head->poll) { in tomoyo_flush()
[all …]
Dgc.c41 struct tomoyo_io_buffer *head; in tomoyo_struct_used_by_io_buffer() local
45 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { in tomoyo_struct_used_by_io_buffer()
46 head->users++; in tomoyo_struct_used_by_io_buffer()
48 mutex_lock(&head->io_sem); in tomoyo_struct_used_by_io_buffer()
49 if (head->r.domain == element || head->r.group == element || in tomoyo_struct_used_by_io_buffer()
50 head->r.acl == element || &head->w.domain->list == element) in tomoyo_struct_used_by_io_buffer()
52 mutex_unlock(&head->io_sem); in tomoyo_struct_used_by_io_buffer()
54 head->users--; in tomoyo_struct_used_by_io_buffer()
72 struct tomoyo_io_buffer *head; in tomoyo_name_used_by_io_buffer() local
77 list_for_each_entry(head, &tomoyo_io_buffer_list, list) { in tomoyo_name_used_by_io_buffer()
[all …]
Dgroup.c21 return container_of(a, struct tomoyo_path_group, head)->member_name == in tomoyo_same_path_group()
22 container_of(b, struct tomoyo_path_group, head)->member_name; in tomoyo_same_path_group()
36 return !memcmp(&container_of(a, struct tomoyo_number_group, head) in tomoyo_same_number_group()
38 &container_of(b, struct tomoyo_number_group, head) in tomoyo_same_number_group()
40 sizeof(container_of(a, struct tomoyo_number_group, head) in tomoyo_same_number_group()
56 head); in tomoyo_same_address_group()
58 head); in tomoyo_same_address_group()
85 error = tomoyo_update_policy(&e.head, sizeof(e), param, in tomoyo_write_group()
93 error = tomoyo_update_policy(&e.head, sizeof(e), param, in tomoyo_write_group()
105 error = tomoyo_update_policy(&e.head, sizeof(e), param, in tomoyo_write_group()
[all …]
Dmemory.c108 list_for_each_entry(group, list, head.list) { in tomoyo_get_group()
110 atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) in tomoyo_get_group()
112 atomic_inc(&group->head.users); in tomoyo_get_group()
120 atomic_set(&entry->head.users, 1); in tomoyo_get_group()
121 list_add_tail_rcu(&entry->head.list, list); in tomoyo_get_group()
152 struct list_head *head; in tomoyo_get_name() local
158 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; in tomoyo_get_name()
161 list_for_each_entry(ptr, head, head.list) { in tomoyo_get_name()
163 atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) in tomoyo_get_name()
165 atomic_inc(&ptr->head.users); in tomoyo_get_name()
[all …]
Dfile.c254 head); in tomoyo_check_path_acl()
276 container_of(ptr, typeof(*acl), head); in tomoyo_check_path_number_acl()
296 container_of(ptr, typeof(*acl), head); in tomoyo_check_path2_acl()
315 container_of(ptr, typeof(*acl), head); in tomoyo_check_mkdev_acl()
338 const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); in tomoyo_same_path_acl()
339 const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); in tomoyo_same_path_acl()
356 u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head) in tomoyo_merge_path_acl()
359 const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; in tomoyo_merge_path_acl()
382 .head.type = TOMOYO_TYPE_PATH_ACL, in tomoyo_update_path_acl()
389 error = tomoyo_update_domain(&e.head, sizeof(e), param, in tomoyo_update_path_acl()
[all …]
Dcommon.h510 struct tomoyo_shared_acl_head head; member
538 struct tomoyo_shared_acl_head head; member
545 struct tomoyo_acl_head head; member
551 struct tomoyo_acl_head head; member
557 struct tomoyo_acl_head head; member
649 struct tomoyo_shared_acl_head head; member
694 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */ member
705 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */ member
715 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */ member
724 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */ member
[all …]
Denviron.c21 container_of(ptr, typeof(*acl), head); in tomoyo_check_env_acl()
78 const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); in tomoyo_same_env_acl()
79 const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); in tomoyo_same_env_acl()
95 struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; in tomoyo_write_env()
104 error = tomoyo_update_domain(&e.head, sizeof(e), param, in tomoyo_write_env()
/linux-4.4.14/drivers/gpu/drm/nouveau/dispnv04/
Dhw.c39 NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) in NVWriteVgaSeq() argument
41 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); in NVWriteVgaSeq()
42 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); in NVWriteVgaSeq()
46 NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) in NVReadVgaSeq() argument
48 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); in NVReadVgaSeq()
49 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); in NVReadVgaSeq()
53 NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) in NVWriteVgaGr() argument
55 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); in NVWriteVgaGr()
56 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); in NVWriteVgaGr()
60 NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) in NVReadVgaGr() argument
[all …]
Dhw.h38 void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
39 uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
40 void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
41 uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
43 void NVBlankScreen(struct drm_device *, int head, bool blank);
49 void nouveau_hw_save_state(struct drm_device *, int head,
51 void nouveau_hw_load_state(struct drm_device *, int head,
53 void nouveau_hw_load_state_palette(struct drm_device *, int head,
61 int head, uint32_t reg) in NVReadCRTC() argument
65 if (head) in NVReadCRTC()
[all …]
Dtvnv04.c89 int head = nouveau_crtc(encoder->crtc)->index; in nv04_tv_dpms() local
90 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); in nv04_tv_dpms()
92 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : in nv04_tv_dpms()
98 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); in nv04_tv_dpms()
106 static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) in nv04_tv_bind() argument
108 struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; in nv04_tv_bind()
117 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, in nv04_tv_bind()
119 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, in nv04_tv_bind()
121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, in nv04_tv_bind()
128 int head = nouveau_crtc(encoder->crtc)->index; in nv04_tv_prepare() local
[all …]
Dtvnv17.c54 int head; in nv42_tv_sample_load() local
62 head = (dacclk & 0x100) >> 8; in nv42_tv_sample_load()
67 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); in nv42_tv_sample_load()
68 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); in nv42_tv_sample_load()
69 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); in nv42_tv_sample_load()
70 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); in nv42_tv_sample_load()
72 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); in nv42_tv_sample_load()
73 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); in nv42_tv_sample_load()
74 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); in nv42_tv_sample_load()
80 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); in nv42_tv_sample_load()
[all …]
Ddfp.c69 int head, bool dl) in nv04_dfp_bind_head() argument
81 if (head != ramdac) in nv04_dfp_bind_head()
93 void nv04_dfp_disable(struct drm_device *dev, int head) in nv04_dfp_disable() argument
97 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & in nv04_dfp_disable()
103 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, in nv04_dfp_disable()
108 crtcstate[head].fp_control = FP_TG_CONTROL_OFF; in nv04_dfp_disable()
109 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= in nv04_dfp_disable()
135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nv04_dfp_update_fp_control()
171 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { in get_tmds_slave()
204 struct nouveau_encoder *nv_encoder, int head) in nv04_dfp_prepare_sel_clk() argument
[all …]
Ddisp.c94 &dev->mode_config.connector_list, head) { in nv04_display_create()
102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { in nv04_display_create()
110 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) in nv04_display_create()
113 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { in nv04_display_create()
133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nv04_display_destroy()
142 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { in nv04_display_destroy()
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) in nv04_display_destroy()
173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { in nv04_display_init()
179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) in nv04_display_init()
Ddac.c245 int head; in nv17_dac_sample_load() local
282 head = (saved_routput & 0x100) >> 8; in nv17_dac_sample_load()
285 if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) in nv17_dac_sample_load()
286 head ^= 1; in nv17_dac_sample_load()
289 routput = (saved_routput & 0xfffffece) | head << 8; in nv17_dac_sample_load()
304 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, in nv17_dac_sample_load()
306 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); in nv17_dac_sample_load()
307 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, in nv17_dac_sample_load()
315 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); in nv17_dac_sample_load()
316 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, in nv17_dac_sample_load()
[all …]
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_alloc.c40 struct sp_chunk **head) in c2_alloc_mqsp_chunk() argument
55 new_head->head = 0; in c2_alloc_mqsp_chunk()
67 *head = new_head; in c2_alloc_mqsp_chunk()
89 __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, in c2_alloc_mqsp() argument
94 while (head) { in c2_alloc_mqsp()
95 mqsp = head->head; in c2_alloc_mqsp()
97 head->head = head->shared_ptr[mqsp]; in c2_alloc_mqsp()
99 } else if (head->next == NULL) { in c2_alloc_mqsp()
100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == in c2_alloc_mqsp()
102 head = head->next; in c2_alloc_mqsp()
[all …]
/linux-4.4.14/include/linux/
Dplist.h95 #define PLIST_HEAD_INIT(head) \ argument
97 .node_list = LIST_HEAD_INIT((head).node_list) \
104 #define PLIST_HEAD(head) \ argument
105 struct plist_head head = PLIST_HEAD_INIT(head)
124 plist_head_init(struct plist_head *head) in plist_head_init() argument
126 INIT_LIST_HEAD(&head->node_list); in plist_head_init()
141 extern void plist_add(struct plist_node *node, struct plist_head *head);
142 extern void plist_del(struct plist_node *node, struct plist_head *head);
144 extern void plist_requeue(struct plist_node *node, struct plist_head *head);
151 #define plist_for_each(pos, head) \ argument
[all …]
Dlist.h61 static inline void list_add(struct list_head *new, struct list_head *head) in list_add() argument
63 __list_add(new, head, head->next); in list_add()
75 static inline void list_add_tail(struct list_head *new, struct list_head *head) in list_add_tail() argument
77 __list_add(new, head->prev, head); in list_add_tail()
154 static inline void list_move(struct list_head *list, struct list_head *head) in list_move() argument
157 list_add(list, head); in list_move()
166 struct list_head *head) in list_move_tail() argument
169 list_add_tail(list, head); in list_move_tail()
178 const struct list_head *head) in list_is_last() argument
180 return list->next == head; in list_is_last()
[all …]
Dbtree-type.h13 static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, in BTREE_FN()
16 btree_init_mempool(&head->h, mempool); in BTREE_FN()
19 static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) in BTREE_FN()
21 return btree_init(&head->h); in BTREE_FN()
24 static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) in BTREE_FN()
26 btree_destroy(&head->h); in BTREE_FN()
37 static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) in BTREE_FN()
40 return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); in BTREE_FN()
43 static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, in BTREE_FN()
47 return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); in BTREE_FN()
[all …]
Dbtree-128.h5 static inline void btree_init_mempool128(struct btree_head128 *head, in btree_init_mempool128() argument
8 btree_init_mempool(&head->h, mempool); in btree_init_mempool128()
11 static inline int btree_init128(struct btree_head128 *head) in btree_init128() argument
13 return btree_init(&head->h); in btree_init128()
16 static inline void btree_destroy128(struct btree_head128 *head) in btree_destroy128() argument
18 btree_destroy(&head->h); in btree_destroy128()
21 static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) in btree_lookup128() argument
24 return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); in btree_lookup128()
27 static inline void *btree_get_prev128(struct btree_head128 *head, in btree_get_prev128() argument
33 val = btree_get_prev(&head->h, &btree_geo128, in btree_get_prev128()
[all …]
Dbtree.h66 void btree_init_mempool(struct btree_head *head, mempool_t *mempool);
78 int __must_check btree_init(struct btree_head *head);
88 void btree_destroy(struct btree_head *head);
99 void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
114 int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo,
127 int btree_update(struct btree_head *head, struct btree_geo *geo,
139 void *btree_remove(struct btree_head *head, struct btree_geo *geo,
171 void *btree_last(struct btree_head *head, struct btree_geo *geo,
185 void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
190 size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
[all …]
Drculist.h78 static inline void list_add_rcu(struct list_head *new, struct list_head *head) in list_add_rcu() argument
80 __list_add_rcu(new, head, head->next); in list_add_rcu()
100 struct list_head *head) in list_add_tail_rcu() argument
102 __list_add_rcu(new, head->prev, head); in list_add_tail_rcu()
199 struct list_head *head, in list_splice_init_rcu() argument
204 struct list_head *at = head->next; in list_splice_init_rcu()
235 rcu_assign_pointer(list_next_rcu(head), first); in list_splice_init_rcu()
236 first->prev = head; in list_splice_init_rcu()
302 #define list_for_each_entry_rcu(pos, head, member) \ argument
303 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
[all …]
Dtimerqueue.h14 struct rb_root head; member
19 extern bool timerqueue_add(struct timerqueue_head *head,
21 extern bool timerqueue_del(struct timerqueue_head *head,
35 struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) in timerqueue_getnext() argument
37 return head->next; in timerqueue_getnext()
45 static inline void timerqueue_init_head(struct timerqueue_head *head) in timerqueue_init_head() argument
47 head->head = RB_ROOT; in timerqueue_init_head()
48 head->next = NULL; in timerqueue_init_head()
Dcirc_buf.h10 int head; member
15 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) argument
20 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) argument
25 #define CIRC_CNT_TO_END(head,tail,size) \ argument
27 int n = ((head) + end) & ((size)-1); \
31 #define CIRC_SPACE_TO_END(head,tail,size) \ argument
32 ({int end = (size) - 1 - (head); \
Dllist.h158 static inline bool llist_empty(const struct llist_head *head) in llist_empty() argument
160 return ACCESS_ONCE(head->first) == NULL; in llist_empty()
170 struct llist_head *head);
178 static inline bool llist_add(struct llist_node *new, struct llist_head *head) in llist_add() argument
180 return llist_add_batch(new, new, head); in llist_add()
191 static inline struct llist_node *llist_del_all(struct llist_head *head) in llist_del_all() argument
193 return xchg(&head->first, NULL); in llist_del_all()
196 extern struct llist_node *llist_del_first(struct llist_head *head);
198 struct llist_node *llist_reverse_order(struct llist_node *head);
Dnotifier.h61 struct notifier_block __rcu *head; member
66 struct notifier_block __rcu *head; member
70 struct notifier_block __rcu *head; member
76 struct notifier_block __rcu *head; member
81 (name)->head = NULL; \
85 (name)->head = NULL; \
88 (name)->head = NULL; \
98 .head = NULL }
101 .head = NULL }
103 .head = NULL }
Dseq_file.h197 extern struct list_head *seq_list_start(struct list_head *head,
199 extern struct list_head *seq_list_start_head(struct list_head *head,
201 extern struct list_head *seq_list_next(void *v, struct list_head *head,
208 extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
210 extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
212 extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
215 extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
217 extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
220 struct hlist_head *head,
224 extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t…
[all …]
Drcupdate.h162 void call_rcu(struct rcu_head *head,
193 void call_rcu_bh(struct rcu_head *head,
215 void call_rcu_sched(struct rcu_head *head,
224 struct rcu_head head; member
227 void wakeme_after_rcu(struct rcu_head *head);
277 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
442 void init_rcu_head(struct rcu_head *head);
443 void destroy_rcu_head(struct rcu_head *head);
444 void init_rcu_head_on_stack(struct rcu_head *head);
445 void destroy_rcu_head_on_stack(struct rcu_head *head);
[all …]
Dbio.h597 struct bio *head; member
603 return bl->head == NULL; in bio_list_empty()
608 bl->head = bl->tail = NULL; in bio_list_init()
614 for (bio = (bl)->head; bio; bio = bio->bi_next)
634 bl->head = bio; in bio_list_add()
641 bio->bi_next = bl->head; in bio_list_add_head()
643 bl->head = bio; in bio_list_add_head()
651 if (!bl2->head) in bio_list_merge()
655 bl->tail->bi_next = bl2->head; in bio_list_merge()
657 bl->head = bl2->head; in bio_list_merge()
[all …]
Dresource_ext.h40 extern void resource_list_free(struct list_head *head);
43 struct list_head *head) in resource_list_add() argument
45 list_add(&entry->node, head); in resource_list_add()
49 struct list_head *head) in resource_list_add_tail() argument
51 list_add_tail(&entry->node, head); in resource_list_add_tail()
Drculist_nulls.h40 #define hlist_nulls_first_rcu(head) \ argument
41 (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
113 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ argument
115 pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/disp/
Dgf119.c34 gf119_disp_vblank_init(struct nv50_disp *disp, int head) in gf119_disp_vblank_init() argument
37 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); in gf119_disp_vblank_init()
41 gf119_disp_vblank_fini(struct nv50_disp *disp, int head) in gf119_disp_vblank_fini() argument
44 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); in gf119_disp_vblank_fini()
48 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, in exec_lookup() argument
77 mask |= 0x0100 << head; in exec_lookup()
79 list_for_each_entry(outp, &disp->base.outp, head) { in exec_lookup()
95 exec_script(struct nv50_disp *disp, int head, int id) in exec_script() argument
106 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) { in exec_script()
108 if (ctrl & (1 << head)) in exec_script()
[all …]
Dvga.c27 nvkm_rdport(struct nvkm_device *device, int head, u16 port) in nvkm_rdport() argument
35 return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port); in nvkm_rdport()
41 head = 0; /* CR44 selects head */ in nvkm_rdport()
42 return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port); in nvkm_rdport()
49 nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data) in nvkm_wrport() argument
57 nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data); in nvkm_wrport()
63 head = 0; /* CR44 selects head */ in nvkm_wrport()
64 nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data); in nvkm_wrport()
69 nvkm_rdvgas(struct nvkm_device *device, int head, u8 index) in nvkm_rdvgas() argument
71 nvkm_wrport(device, head, 0x03c4, index); in nvkm_rdvgas()
[all …]
Dnv50.c100 nv50_disp_vblank_fini_(struct nvkm_disp *base, int head) in nv50_disp_vblank_fini_() argument
103 disp->func->head.vblank_fini(disp, head); in nv50_disp_vblank_fini_()
107 nv50_disp_vblank_init_(struct nvkm_disp *base, int head) in nv50_disp_vblank_init_() argument
110 disp->func->head.vblank_init(disp, head); in nv50_disp_vblank_init_()
139 .head.vblank_init = nv50_disp_vblank_init_,
140 .head.vblank_fini = nv50_disp_vblank_fini_,
164 nv50_disp_vblank_fini(struct nv50_disp *disp, int head) in nv50_disp_vblank_fini() argument
167 nvkm_mask(device, 0x61002c, (4 << head), 0); in nv50_disp_vblank_fini()
171 nv50_disp_vblank_init(struct nv50_disp *disp, int head) in nv50_disp_vblank_init() argument
174 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); in nv50_disp_vblank_init()
[all …]
Doimmnv50.c44 int head, ret; in nv50_disp_oimm_new() local
49 args->v0.version, args->v0.head); in nv50_disp_oimm_new()
50 if (args->v0.head > disp->base.head.nr) in nv50_disp_oimm_new()
52 head = args->v0.head; in nv50_disp_oimm_new()
56 return nv50_disp_chan_new_(func, mthd, root, chid + head, in nv50_disp_oimm_new()
57 head, oclass, pobject); in nv50_disp_oimm_new()
Dcursnv50.c44 int head, ret; in nv50_disp_curs_new() local
49 args->v0.version, args->v0.head); in nv50_disp_curs_new()
50 if (args->v0.head > disp->base.head.nr) in nv50_disp_curs_new()
52 head = args->v0.head; in nv50_disp_curs_new()
56 return nv50_disp_chan_new_(func, mthd, root, chid + head, in nv50_disp_curs_new()
57 head, oclass, pobject); in nv50_disp_curs_new()
Dbase.c39 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head) in nvkm_disp_vblank_fini() argument
42 disp->func->head.vblank_fini(disp, head); in nvkm_disp_vblank_fini()
46 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head) in nvkm_disp_vblank_init() argument
49 disp->func->head.vblank_init(disp, head); in nvkm_disp_vblank_init()
65 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { in nvkm_disp_vblank_ctor()
67 notify->index = req->v0.head; in nvkm_disp_vblank_ctor()
83 nvkm_disp_vblank(struct nvkm_disp *disp, int head) in nvkm_disp_vblank() argument
86 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep)); in nvkm_disp_vblank()
103 list_for_each_entry(outp, &disp->outp, head) { in nvkm_disp_hpd_ctor()
215 list_for_each_entry(outp, &disp->outp, head) { in nvkm_disp_fini()
[all …]
Dovlynv50.c44 int head, ret; in nv50_disp_ovly_new() local
51 args->v0.version, args->v0.pushbuf, args->v0.head); in nv50_disp_ovly_new()
52 if (args->v0.head > disp->base.head.nr) in nv50_disp_ovly_new()
55 head = args->v0.head; in nv50_disp_ovly_new()
59 return nv50_disp_dmac_new_(func, mthd, root, chid + head, in nv50_disp_ovly_new()
60 head, push, oclass, pobject); in nv50_disp_ovly_new()
Dbasenv50.c44 int head, ret; in nv50_disp_base_new() local
51 args->v0.version, args->v0.pushbuf, args->v0.head); in nv50_disp_base_new()
52 if (args->v0.head > disp->base.head.nr) in nv50_disp_base_new()
55 head = args->v0.head; in nv50_disp_base_new()
59 return nv50_disp_dmac_new_(func, mthd, root, chid + head, in nv50_disp_base_new()
60 head, push, oclass, pobject); in nv50_disp_base_new()
Drootnv50.c38 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); in nv50_disp_root_scanoutpos()
39 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); in nv50_disp_root_scanoutpos()
40 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); in nv50_disp_root_scanoutpos()
58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; in nv50_disp_root_scanoutpos()
61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; in nv50_disp_root_scanoutpos()
81 int head, ret; in nv50_disp_root_mthd_() local
89 args->v0.version, args->v0.method, args->v0.head); in nv50_disp_root_mthd_()
91 head = args->v0.head; in nv50_disp_root_mthd_()
101 head = ffs((mask >> 8) & 0x0f) - 1; in nv50_disp_root_mthd_()
105 if (head < 0 || head >= disp->base.head.nr) in nv50_disp_root_mthd_()
[all …]
Drootnv04.c39 void *data, u32 size, int head) in nv04_disp_scanoutpos() argument
43 const u32 hoff = head * 0x2000; in nv04_disp_scanoutpos()
88 int head, ret; in nv04_disp_mthd() local
93 args->v0.version, args->v0.method, args->v0.head); in nv04_disp_mthd()
95 head = args->v0.head; in nv04_disp_mthd()
99 if (head < 0 || head >= 2) in nv04_disp_mthd()
104 return nv04_disp_scanoutpos(root, data, size, head); in nv04_disp_mthd()
Dnv04.c33 nv04_disp_vblank_init(struct nvkm_disp *disp, int head) in nv04_disp_vblank_init() argument
36 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001); in nv04_disp_vblank_init()
40 nv04_disp_vblank_fini(struct nvkm_disp *disp, int head) in nv04_disp_vblank_fini() argument
43 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000); in nv04_disp_vblank_fini()
77 .head.vblank_init = nv04_disp_vblank_init,
78 .head.vblank_fini = nv04_disp_vblank_fini,
Drootgf119.c38 const u32 total = nvkm_rd32(device, 0x640414 + (head * 0x300)); in gf119_disp_root_scanoutpos()
39 const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300)); in gf119_disp_root_scanoutpos()
40 const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300)); in gf119_disp_root_scanoutpos()
58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; in gf119_disp_root_scanoutpos()
61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; in gf119_disp_root_scanoutpos()
90 for (i = 0; i < disp->base.head.nr; i++) { in gf119_disp_root_init()
136 for (i = 0; i < disp->base.head.nr; i++) in gf119_disp_root_init()
Dpriv.h11 void nvkm_disp_vblank(struct nvkm_disp *, int head);
33 void (*vblank_init)(struct nvkm_disp *, int head);
34 void (*vblank_fini)(struct nvkm_disp *, int head);
35 } head; member
Dnv50.h10 #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
11 #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
78 void (*vblank_init)(struct nv50_disp *, int head);
79 void (*vblank_fini)(struct nv50_disp *, int head);
81 } head; member
/linux-4.4.14/net/netlabel/
Dnetlabel_addrlist.h96 #define netlbl_af4list_foreach(iter, head) \ argument
97 for (iter = __af4list_valid((head)->next, head); \
98 &iter->list != (head); \
99 iter = __af4list_valid(iter->list.next, head))
101 #define netlbl_af4list_foreach_rcu(iter, head) \ argument
102 for (iter = __af4list_valid_rcu((head)->next, head); \
103 &iter->list != (head); \
104 iter = __af4list_valid_rcu(iter->list.next, head))
106 #define netlbl_af4list_foreach_safe(iter, tmp, head) \ argument
107 for (iter = __af4list_valid((head)->next, head), \
[all …]
Dnetlabel_addrlist.c61 struct list_head *head) in netlbl_af4list_search() argument
65 list_for_each_entry_rcu(iter, head, list) in netlbl_af4list_search()
86 struct list_head *head) in netlbl_af4list_search_exact() argument
90 list_for_each_entry_rcu(iter, head, list) in netlbl_af4list_search_exact()
111 struct list_head *head) in netlbl_af6list_search() argument
115 list_for_each_entry_rcu(iter, head, list) in netlbl_af6list_search()
137 struct list_head *head) in netlbl_af6list_search_exact() argument
141 list_for_each_entry_rcu(iter, head, list) in netlbl_af6list_search_exact()
162 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) in netlbl_af4list_add() argument
166 iter = netlbl_af4list_search(entry->addr, head); in netlbl_af4list_add()
[all …]
/linux-4.4.14/drivers/gpu/drm/radeon/
Dmkregtable.c84 static inline void list_add(struct list_head *new, struct list_head *head) in list_add() argument
86 __list_add(new, head, head->next); in list_add()
97 static inline void list_add_tail(struct list_head *new, struct list_head *head) in list_add_tail() argument
99 __list_add(new, head->prev, head); in list_add_tail()
169 static inline void list_move(struct list_head *list, struct list_head *head) in list_move() argument
172 list_add(list, head); in list_move()
181 struct list_head *head) in list_move_tail() argument
184 list_add_tail(list, head); in list_move_tail()
193 const struct list_head *head) in list_is_last() argument
195 return list->next == head; in list_is_last()
[all …]
/linux-4.4.14/drivers/scsi/sym53c8xx_2/
Dsym_misc.h55 static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) in sym_que_first() argument
57 return (head->flink == head) ? 0 : head->flink; in sym_que_first()
60 static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) in sym_que_last() argument
62 return (head->blink == head) ? 0 : head->blink; in sym_que_last()
82 static inline int sym_que_empty(struct sym_quehead *head) in sym_que_empty() argument
84 return head->flink == head; in sym_que_empty()
88 struct sym_quehead *head) in sym_que_splice() argument
94 struct sym_quehead *at = head->flink; in sym_que_splice()
96 first->blink = head; in sym_que_splice()
97 head->flink = first; in sym_que_splice()
[all …]
Dsym_fw2.h318 offsetof (struct sym_ccb, phys.head.status),
468 offsetof (struct sym_ccb, phys.head.lastp),
527 offsetof (struct sym_ccb, phys.head.lastp),
559 offsetof (struct sym_ccb, phys.head.lastp),
575 offsetof (struct sym_ccb, phys.head.lastp),
667 offsetof (struct sym_ccb, phys.head.status),
677 offsetof (struct sym_ccb, phys.head.status),
753 offsetof (struct sym_ccb, phys.head.lastp),
755 offsetof (struct sym_ccb, phys.head.savep),
775 offsetof (struct sym_ccb, phys.head.savep),
[all …]
/linux-4.4.14/arch/x86/include/asm/
Dspinlock.h53 set_bit(0, (volatile unsigned long *)&lock->tickets.head); in __ticket_enter_slowpath()
73 __ticket_t head) in __ticket_check_and_clear_slowpath() argument
75 if (head & TICKET_SLOWPATH_FLAG) { in __ticket_check_and_clear_slowpath()
78 old.tickets.head = head; in __ticket_check_and_clear_slowpath()
79 new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; in __ticket_check_and_clear_slowpath()
80 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; in __ticket_check_and_clear_slowpath()
90 return __tickets_equal(lock.tickets.head, lock.tickets.tail); in arch_spin_value_unlocked()
111 if (likely(inc.head == inc.tail)) in arch_spin_lock()
118 inc.head = READ_ONCE(lock->tickets.head); in arch_spin_lock()
119 if (__tickets_equal(inc.head, inc.tail)) in arch_spin_lock()
[all …]
/linux-4.4.14/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c73 rqstp->rq_arg.head[0].iov_base = page_address(page); in rdma_build_arg_xdr()
74 rqstp->rq_arg.head[0].iov_len = in rdma_build_arg_xdr()
80 bc = byte_count - rqstp->rq_arg.head[0].iov_len; in rdma_build_arg_xdr()
87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; in rdma_build_arg_xdr()
121 struct svc_rdma_op_ctxt *head, in rdma_read_chunk_lcl() argument
137 ctxt->read_hdr = head; in rdma_read_chunk_lcl()
145 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_lcl()
146 head->arg.page_len += len; in rdma_read_chunk_lcl()
147 head->arg.len += len; in rdma_read_chunk_lcl()
149 head->count++; in rdma_read_chunk_lcl()
[all …]
/linux-4.4.14/drivers/net/wireless/ath/carl9170/
Dfwdesc.h119 struct carl9170fw_desc_head head; member
140 struct carl9170fw_desc_head head; member
157 struct carl9170fw_desc_head head; member
166 struct carl9170fw_desc_head head; member
182 struct carl9170fw_desc_head head; member
192 struct carl9170fw_desc_head head; member
202 struct carl9170fw_desc_head head; member
212 struct carl9170fw_desc_head head; member
220 .head = { \
227 static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, in carl9170fw_fill_desc() argument
[all …]
/linux-4.4.14/lib/
Dbtree.c93 static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) in btree_node_alloc() argument
97 node = mempool_alloc(head->mempool, gfp); in btree_node_alloc()
176 static inline void __btree_init(struct btree_head *head) in __btree_init() argument
178 head->node = NULL; in __btree_init()
179 head->height = 0; in __btree_init()
182 void btree_init_mempool(struct btree_head *head, mempool_t *mempool) in btree_init_mempool() argument
184 __btree_init(head); in btree_init_mempool()
185 head->mempool = mempool; in btree_init_mempool()
189 int btree_init(struct btree_head *head) in btree_init() argument
191 __btree_init(head); in btree_init()
[all …]
Dtimerqueue.c39 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) in timerqueue_add() argument
41 struct rb_node **p = &head->head.rb_node; in timerqueue_add()
57 rb_insert_color(&node->node, &head->head); in timerqueue_add()
59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) { in timerqueue_add()
60 head->next = node; in timerqueue_add()
75 bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) in timerqueue_del() argument
80 if (head->next == node) { in timerqueue_del()
83 head->next = rbn ? in timerqueue_del()
86 rb_erase(&node->node, &head->head); in timerqueue_del()
88 return head->next != NULL; in timerqueue_del()
Dplist.c57 static void plist_check_head(struct plist_head *head) in plist_check_head() argument
59 if (!plist_head_empty(head)) in plist_check_head()
60 plist_check_list(&plist_first(head)->prio_list); in plist_check_head()
61 plist_check_list(&head->node_list); in plist_check_head()
74 void plist_add(struct plist_node *node, struct plist_head *head) in plist_add() argument
77 struct list_head *node_next = &head->node_list; in plist_add()
79 plist_check_head(head); in plist_add()
83 if (plist_head_empty(head)) in plist_add()
86 first = iter = plist_first(head); in plist_add()
104 plist_check_head(head); in plist_add()
[all …]
Dllist.c39 struct llist_head *head) in llist_add_batch() argument
44 new_last->next = first = ACCESS_ONCE(head->first); in llist_add_batch()
45 } while (cmpxchg(&head->first, first, new_first) != first); in llist_add_batch()
65 struct llist_node *llist_del_first(struct llist_head *head) in llist_del_first() argument
69 entry = smp_load_acquire(&head->first); in llist_del_first()
75 entry = cmpxchg(&head->first, old_entry, next); in llist_del_first()
91 struct llist_node *llist_reverse_order(struct llist_node *head) in llist_reverse_order() argument
95 while (head) { in llist_reverse_order()
96 struct llist_node *tmp = head; in llist_reverse_order()
97 head = head->next; in llist_reverse_order()
Dlist_sort.c24 struct list_head head, *tail = &head; in merge() local
38 return head.next; in merge()
51 struct list_head *head, in merge_and_restore_back_links() argument
54 struct list_head *tail = head; in merge_and_restore_back_links()
86 tail->next = head; in merge_and_restore_back_links()
87 head->prev = tail; in merge_and_restore_back_links()
104 void list_sort(void *priv, struct list_head *head, in list_sort() argument
114 if (list_empty(head)) in list_sort()
119 head->prev->next = NULL; in list_sort()
120 list = head->next; in list_sort()
[all …]
/linux-4.4.14/sound/pci/ctxfi/
Dctimap.c25 struct list_head *pos, *pre, *head; in input_mapper_add() local
28 head = mappers; in input_mapper_add()
30 if (list_empty(head)) { in input_mapper_add()
33 list_add(&entry->list, head); in input_mapper_add()
37 list_for_each(pos, head) { in input_mapper_add()
45 if (pos != head) { in input_mapper_add()
47 if (pre == head) in input_mapper_add()
48 pre = head->prev; in input_mapper_add()
52 pre = head->prev; in input_mapper_add()
53 pos = head->next; in input_mapper_add()
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/
Dlist.h159 list_add(struct list_head *entry, struct list_head *head) in list_add() argument
161 __list_add(entry, head, head->next); in list_add()
180 list_add_tail(struct list_head *entry, struct list_head *head) in list_add_tail() argument
182 __list_add(entry, head->prev, head); in list_add_tail()
220 struct list_head *head) in list_move_tail() argument
223 list_add_tail(list, head); in list_move_tail()
235 list_empty(struct list_head *head) in list_empty() argument
237 return head->next == head; in list_empty()
314 #define list_for_each_entry(pos, head, member) \ argument
315 for (pos = __container_of((head)->next, pos, member); \
[all …]
/linux-4.4.14/arch/avr32/oprofile/
Dbacktrace.c35 static struct frame_head *dump_user_backtrace(struct frame_head *head) in dump_user_backtrace() argument
40 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) in dump_user_backtrace()
42 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) in dump_user_backtrace()
49 if (bufhead[0].fp <= head) in dump_user_backtrace()
58 struct frame_head *head = (struct frame_head *)(regs->r7); in avr32_backtrace() local
67 (unsigned long)head)) { in avr32_backtrace()
68 oprofile_add_trace(head->lr); in avr32_backtrace()
69 if (head->fp <= head) in avr32_backtrace()
71 head = head->fp; in avr32_backtrace()
76 while (depth-- && head) in avr32_backtrace()
[all …]
/linux-4.4.14/fs/proc/
Dproc_sysctl.c28 static bool is_empty_dir(struct ctl_table_header *head) in is_empty_dir() argument
30 return head->ctl_table[0].child == sysctl_mount_point; in is_empty_dir()
76 static int insert_links(struct ctl_table_header *head);
105 struct ctl_table_header *head; in find_entry() local
116 head = ctl_node->header; in find_entry()
117 entry = &head->ctl_table[ctl_node - head->node]; in find_entry()
126 *phead = head; in find_entry()
133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) in insert_entry() argument
135 struct rb_node *node = &head->node[entry - head->ctl_table].node; in insert_entry()
136 struct rb_node **p = &head->parent->root.rb_node; in insert_entry()
[all …]
Dkcore.c100 static void free_kclist_ents(struct list_head *head) in free_kclist_ents() argument
104 list_for_each_entry_safe(pos, tmp, head, list) { in free_kclist_ents()
145 LIST_HEAD(head); in kcore_update_ram()
155 list_add(&ent->list, &head); in kcore_update_ram()
156 __kcore_update_ram(&head); in kcore_update_ram()
165 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) in get_sparsemem_vmemmap_info() argument
177 list_for_each_entry(tmp, head, list) { in get_sparsemem_vmemmap_info()
191 list_add_tail(&vmm->list, head); in get_sparsemem_vmemmap_info()
198 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) in get_sparsemem_vmemmap_info() argument
208 struct list_head *head = (struct list_head *)arg; in kclist_add_private() local
[all …]
/linux-4.4.14/kernel/rcu/
Drcu.h71 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
75 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue()
76 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue()
82 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
84 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue()
87 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue()
90 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
95 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
106 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) in __rcu_reclaim() argument
108 unsigned long offset = (unsigned long)head->func; in __rcu_reclaim()
[all …]
Dsrcu.c46 b->head = NULL; in rcu_batch_init()
47 b->tail = &b->head; in rcu_batch_init()
53 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) in rcu_batch_queue() argument
55 *b->tail = head; in rcu_batch_queue()
56 b->tail = &head->next; in rcu_batch_queue()
64 return b->tail == &b->head; in rcu_batch_empty()
73 struct rcu_head *head; in rcu_batch_dequeue() local
78 head = b->head; in rcu_batch_dequeue()
79 b->head = head->next; in rcu_batch_dequeue()
80 if (b->tail == &head->next) in rcu_batch_dequeue()
[all …]
Dupdate.c314 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument
318 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu()
336 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
338 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); in __wait_rcu_gp()
348 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
354 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument
356 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head()
359 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument
361 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head()
372 struct rcu_head *head = addr; in rcuhead_fixup_activate() local
[all …]
Dtiny.c46 static void __call_rcu(struct rcu_head *head,
205 static void __call_rcu(struct rcu_head *head, in __call_rcu() argument
211 debug_rcu_head_queue(head); in __call_rcu()
212 head->func = func; in __call_rcu()
213 head->next = NULL; in __call_rcu()
216 *rcp->curtail = head; in __call_rcu()
217 rcp->curtail = &head->next; in __call_rcu()
232 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) in call_rcu_sched() argument
234 __call_rcu(head, func, &rcu_sched_ctrlblk); in call_rcu_sched()
242 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) in call_rcu_bh() argument
[all …]
/linux-4.4.14/arch/x86/platform/uv/
Duv_time.c169 struct uv_rtc_timer_head *head = blade_info[bid]; in uv_rtc_allocate_timers() local
171 if (!head) { in uv_rtc_allocate_timers()
172 head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + in uv_rtc_allocate_timers()
176 if (!head) { in uv_rtc_allocate_timers()
180 spin_lock_init(&head->lock); in uv_rtc_allocate_timers()
181 head->ncpus = uv_blade_nr_possible_cpus(bid); in uv_rtc_allocate_timers()
182 head->next_cpu = -1; in uv_rtc_allocate_timers()
183 blade_info[bid] = head; in uv_rtc_allocate_timers()
186 head->cpu[bcpu].lcpu = cpu; in uv_rtc_allocate_timers()
187 head->cpu[bcpu].expires = ULLONG_MAX; in uv_rtc_allocate_timers()
[all …]
/linux-4.4.14/net/ipv6/netfilter/
Dnf_conntrack_reasm.c382 struct sk_buff *fp, *op, *head = fq->q.fragments; in nf_ct_frag6_reasm() local
388 WARN_ON(head == NULL); in nf_ct_frag6_reasm()
389 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); in nf_ct_frag6_reasm()
396 payload_len = ((head->data - skb_network_header(head)) - in nf_ct_frag6_reasm()
405 if (skb_unclone(head, GFP_ATOMIC)) { in nf_ct_frag6_reasm()
413 if (skb_has_frag_list(head)) { in nf_ct_frag6_reasm()
421 clone->next = head->next; in nf_ct_frag6_reasm()
422 head->next = clone; in nf_ct_frag6_reasm()
423 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; in nf_ct_frag6_reasm()
424 skb_frag_list_init(head); in nf_ct_frag6_reasm()
[all …]
/linux-4.4.14/arch/x86/oprofile/
Dbacktrace.c42 dump_user_backtrace_32(struct stack_frame_ia32 *head) in dump_user_backtrace_32() argument
49 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); in dump_user_backtrace_32()
59 if (head >= fp) in dump_user_backtrace_32()
68 struct stack_frame_ia32 *head; in x86_backtrace_32() local
74 head = (struct stack_frame_ia32 *) regs->bp; in x86_backtrace_32()
75 while (depth-- && head) in x86_backtrace_32()
76 head = dump_user_backtrace_32(head); in x86_backtrace_32()
89 static struct stack_frame *dump_user_backtrace(struct stack_frame *head) in dump_user_backtrace() argument
95 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); in dump_user_backtrace()
103 if (head >= bufhead[0].next_frame) in dump_user_backtrace()
[all …]
/linux-4.4.14/net/sched/
Dcls_cgroup.c32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify() local
37 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) in cls_cgroup_classify()
43 return tcf_exts_exec(skb, &head->exts, res); in cls_cgroup_classify()
62 struct cls_cgroup_head *head = container_of(root, in cls_cgroup_destroy_rcu() local
66 tcf_exts_destroy(&head->exts); in cls_cgroup_destroy_rcu()
67 tcf_em_tree_destroy(&head->ematches); in cls_cgroup_destroy_rcu()
68 kfree(head); in cls_cgroup_destroy_rcu()
77 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_change() local
86 if (!head && !handle) in cls_cgroup_change()
89 if (head && handle != head->handle) in cls_cgroup_change()
[all …]
Dcls_fw.c62 struct fw_head *head = rcu_dereference_bh(tp->root); in fw_classify() local
67 if (head != NULL) { in fw_classify()
68 id &= head->mask; in fw_classify()
70 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; in fw_classify()
100 struct fw_head *head = rtnl_dereference(tp->root); in fw_get() local
103 if (head == NULL) in fw_get()
106 f = rtnl_dereference(head->ht[fw_hash(handle)]); in fw_get()
122 static void fw_delete_filter(struct rcu_head *head) in fw_delete_filter() argument
124 struct fw_filter *f = container_of(head, struct fw_filter, rcu); in fw_delete_filter()
132 struct fw_head *head = rtnl_dereference(tp->root); in fw_destroy() local
[all …]
Dcls_flower.c121 struct cls_fl_head *head = rcu_dereference_bh(tp->root); in fl_classify() local
126 fl_clear_masked_range(&skb_key, &head->mask); in fl_classify()
132 skb_flow_dissect(skb, &head->dissector, &skb_key, 0); in fl_classify()
134 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask); in fl_classify()
136 f = rhashtable_lookup_fast(&head->ht, in fl_classify()
137 fl_key_get_start(&skb_mkey, &head->mask), in fl_classify()
138 head->ht_params); in fl_classify()
148 struct cls_fl_head *head; in fl_init() local
150 head = kzalloc(sizeof(*head), GFP_KERNEL); in fl_init()
151 if (!head) in fl_init()
[all …]
Dcls_basic.c44 struct basic_head *head = rcu_dereference_bh(tp->root); in basic_classify() local
47 list_for_each_entry_rcu(f, &head->flist, link) { in basic_classify()
62 struct basic_head *head = rtnl_dereference(tp->root); in basic_get() local
65 if (head == NULL) in basic_get()
68 list_for_each_entry(f, &head->flist, link) { in basic_get()
80 struct basic_head *head; in basic_init() local
82 head = kzalloc(sizeof(*head), GFP_KERNEL); in basic_init()
83 if (head == NULL) in basic_init()
85 INIT_LIST_HEAD(&head->flist); in basic_init()
86 rcu_assign_pointer(tp->root, head); in basic_init()
[all …]
Dcls_route.c72 route4_reset_fastmap(struct route4_head *head) in route4_reset_fastmap() argument
75 memset(head->fastmap, 0, sizeof(head->fastmap)); in route4_reset_fastmap()
80 route4_set_fastmap(struct route4_head *head, u32 id, int iif, in route4_set_fastmap() argument
87 head->fastmap[h].id = id; in route4_set_fastmap()
88 head->fastmap[h].iif = iif; in route4_set_fastmap()
89 head->fastmap[h].filter = f; in route4_set_fastmap()
124 route4_set_fastmap(head, id, iif, f); \
131 struct route4_head *head = rcu_dereference_bh(tp->root); in route4_classify() local
143 if (head == NULL) in route4_classify()
151 if (id == head->fastmap[h].id && in route4_classify()
[all …]
Dsch_choke.c71 unsigned int head; member
82 return (q->tail - q->head) & q->tab_mask; in choke_len()
101 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
102 if (q->head == q->tail) in choke_zap_head_holes()
104 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
112 if (q->head == q->tail) in choke_zap_tail_holes()
125 if (idx == q->head) in choke_drop_by_idx()
235 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; in choke_peek_random()
241 return q->tab[*pidx = q->head]; in choke_peek_random()
254 if (q->head == q->tail) in choke_match_random()
[all …]
Dcls_bpf.c81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); in cls_bpf_classify() local
95 list_for_each_entry_rcu(prog, &head->plist, link) { in cls_bpf_classify()
144 struct cls_bpf_head *head; in cls_bpf_init() local
146 head = kzalloc(sizeof(*head), GFP_KERNEL); in cls_bpf_init()
147 if (head == NULL) in cls_bpf_init()
150 INIT_LIST_HEAD_RCU(&head->plist); in cls_bpf_init()
151 rcu_assign_pointer(tp->root, head); in cls_bpf_init()
190 struct cls_bpf_head *head = rtnl_dereference(tp->root); in cls_bpf_destroy() local
193 if (!force && !list_empty(&head->plist)) in cls_bpf_destroy()
196 list_for_each_entry_safe(prog, tmp, &head->plist, link) { in cls_bpf_destroy()
[all …]
Dsch_hhf.c120 struct sk_buff *head; member
180 struct list_head *head, in seek_list() argument
186 if (list_empty(head)) in seek_list()
189 list_for_each_entry_safe(flow, next, head, flowchain) { in seek_list()
196 if (list_is_last(&flow->flowchain, head)) in seek_list()
211 static struct hh_flow_state *alloc_new_hh(struct list_head *head, in alloc_new_hh() argument
217 if (!list_empty(head)) { in alloc_new_hh()
219 list_for_each_entry(flow, head, flowchain) { in alloc_new_hh()
238 list_add_tail(&flow->flowchain, head); in alloc_new_hh()
330 struct sk_buff *skb = bucket->head; in dequeue_head()
[all …]
Dsch_fq.c61 struct sk_buff *head; /* list of skbs for this flow : first skb */ member
156 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) in fq_flow_add_tail() argument
158 if (head->first) in fq_flow_add_tail()
159 head->last->next = flow; in fq_flow_add_tail()
161 head->first = flow; in fq_flow_add_tail()
162 head->last = flow; in fq_flow_add_tail()
301 struct sk_buff *skb = flow->head; in fq_dequeue_head()
304 flow->head = skb->next; in fq_dequeue_head()
335 struct sk_buff *prev, *head = flow->head; in flow_queue_add() local
338 if (!head) { in flow_queue_add()
[all …]
Dcls_flow.c295 struct flow_head *head = rcu_dereference_bh(tp->root); in flow_classify() local
302 list_for_each_entry_rcu(f, &head->filters, list) { in flow_classify()
365 static void flow_destroy_filter(struct rcu_head *head) in flow_destroy_filter() argument
367 struct flow_filter *f = container_of(head, struct flow_filter, rcu); in flow_destroy_filter()
380 struct flow_head *head = rtnl_dereference(tp->root); in flow_change() local
535 list_add_tail_rcu(&fnew->list, &head->filters); in flow_change()
564 struct flow_head *head; in flow_init() local
566 head = kzalloc(sizeof(*head), GFP_KERNEL); in flow_init()
567 if (head == NULL) in flow_init()
569 INIT_LIST_HEAD(&head->filters); in flow_init()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
Dfm10k_mbx.c33 fifo->head = 0; in fm10k_fifo_init()
45 return fifo->tail - fifo->head; in fm10k_fifo_used()
56 return fifo->size + fifo->head - fifo->tail; in fm10k_fifo_unused()
67 return fifo->head == fifo->tail; in fm10k_fifo_empty()
79 return (fifo->head + offset) & (fifo->size - 1); in fm10k_fifo_head_offset()
102 u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); in fm10k_fifo_head_len() local
109 return FM10K_TLV_DWORD_LEN(*head); in fm10k_fifo_head_len()
123 fifo->head += len; in fm10k_fifo_head_drop()
137 fifo->head = fifo->tail; in fm10k_fifo_drop_all()
149 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) in fm10k_mbx_index_len() argument
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Dnrs_fifo.c81 struct nrs_fifo_head *head; in nrs_fifo_start() local
83 head = kzalloc_node(sizeof(*head), GFP_NOFS, in nrs_fifo_start()
86 if (head == NULL) in nrs_fifo_start()
89 INIT_LIST_HEAD(&head->fh_list); in nrs_fifo_start()
90 policy->pol_private = head; in nrs_fifo_start()
105 struct nrs_fifo_head *head = policy->pol_private; in nrs_fifo_stop() local
107 LASSERT(head != NULL); in nrs_fifo_stop()
108 LASSERT(list_empty(&head->fh_list)); in nrs_fifo_stop()
110 kfree(head); in nrs_fifo_stop()
165 struct nrs_fifo_head *head = policy->pol_private; in nrs_fifo_req_get() local
[all …]
/linux-4.4.14/net/atm/
Daddr.c51 struct list_head *head; in atm_reset_addr() local
55 head = &dev->lecs; in atm_reset_addr()
57 head = &dev->local; in atm_reset_addr()
58 list_for_each_entry_safe(this, p, head, entry) { in atm_reset_addr()
63 if (head == &dev->local) in atm_reset_addr()
72 struct list_head *head; in atm_add_addr() local
80 head = &dev->lecs; in atm_add_addr()
82 head = &dev->local; in atm_add_addr()
83 list_for_each_entry(this, head, entry) { in atm_add_addr()
95 list_add(&this->entry, head); in atm_add_addr()
[all …]
/linux-4.4.14/net/ipv6/
Dreassembly.c383 struct sk_buff *fp, *head = fq->q.fragments; in ip6_frag_reasm() local
397 head = prev->next; in ip6_frag_reasm()
398 fp = skb_clone(head, GFP_ATOMIC); in ip6_frag_reasm()
403 fp->next = head->next; in ip6_frag_reasm()
408 skb_morph(head, fq->q.fragments); in ip6_frag_reasm()
409 head->next = fq->q.fragments->next; in ip6_frag_reasm()
412 fq->q.fragments = head; in ip6_frag_reasm()
415 WARN_ON(head == NULL); in ip6_frag_reasm()
416 WARN_ON(FRAG6_CB(head)->offset != 0); in ip6_frag_reasm()
419 payload_len = ((head->data - skb_network_header(head)) - in ip6_frag_reasm()
[all …]
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_cq.c52 u32 head; in ipath_cq_enter() local
62 head = wc->head; in ipath_cq_enter()
63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter()
64 head = cq->ibcq.cqe; in ipath_cq_enter()
67 next = head + 1; in ipath_cq_enter()
81 wc->uqueue[head].wr_id = entry->wr_id; in ipath_cq_enter()
82 wc->uqueue[head].status = entry->status; in ipath_cq_enter()
83 wc->uqueue[head].opcode = entry->opcode; in ipath_cq_enter()
84 wc->uqueue[head].vendor_err = entry->vendor_err; in ipath_cq_enter()
85 wc->uqueue[head].byte_len = entry->byte_len; in ipath_cq_enter()
[all …]
Dipath_srq.c69 next = wq->head + 1; in ipath_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive()
86 wq->head = next; in ipath_post_srq_receive()
175 srq->rq.wq->head = 0; in ipath_create_srq()
226 u32 sz, size, n, head, tail; in ipath_modify_srq() local
268 head = owq->head; in ipath_modify_srq()
269 if (head >= srq->rq.size) in ipath_modify_srq()
270 head = 0; in ipath_modify_srq()
274 n = head; in ipath_modify_srq()
285 while (tail != head) { in ipath_modify_srq()
[all …]
/linux-4.4.14/tools/perf/util/
Dparse-events.y66 %type <head> event_config
68 %type <head> event_pmu
69 %type <head> event_legacy_symbol
70 %type <head> event_legacy_cache
71 %type <head> event_legacy_mem
72 %type <head> event_legacy_tracepoint
74 %type <head> event_legacy_numeric
75 %type <head> event_legacy_raw
76 %type <head> event_bpf_file
77 %type <head> event_def
[all …]
Dauxtrace.c150 INIT_LIST_HEAD(&queue_array[i].head); in auxtrace_alloc_queue_array()
187 list_splice_tail(&queues->queue_array[i].head, in auxtrace_queues__grow()
188 &queue_array[i].head); in auxtrace_queues__grow()
247 list_add_tail(&buffer->list, &queue->head); in auxtrace_queues__add_buffer()
382 while (!list_empty(&queues->queue_array[i].head)) { in auxtrace_queues__free()
385 buffer = list_entry(queues->queue_array[i].head.next, in auxtrace_queues__free()
526 unsigned char *data, u64 *head, u64 *old) in auxtrace_record__find_snapshot() argument
529 return itr->find_snapshot(itr, idx, mm, data, head, old); in auxtrace_record__find_snapshot()
569 static int auxtrace_index__alloc(struct list_head *head) in auxtrace_index__alloc() argument
580 list_add_tail(&auxtrace_index->list, head); in auxtrace_index__alloc()
[all …]
Dauxtrace.h189 struct list_head head; member
306 u64 *head, u64 *old);
326 u64 head = ACCESS_ONCE(pc->aux_head); in auxtrace_mmap__read_snapshot_head() local
330 return head; in auxtrace_mmap__read_snapshot_head()
337 u64 head = ACCESS_ONCE(pc->aux_head); in auxtrace_mmap__read_head() local
339 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); in auxtrace_mmap__read_head() local
344 return head; in auxtrace_mmap__read_head()
442 unsigned char *data, u64 *head, u64 *old);
445 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
447 int auxtrace_index__write(int fd, struct list_head *head);
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/
Dvga.h6 u8 nvkm_rdport(struct nvkm_device *, int head, u16 port);
7 void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
10 u8 nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
11 void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
14 u8 nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
15 void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
18 u8 nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
19 void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
22 u8 nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
23 void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
/linux-4.4.14/drivers/gpu/drm/
Ddrm_agpsupport.c220 list_add(&entry->head, &dev->agp->memory); in drm_agp_alloc()
252 list_for_each_entry(entry, &dev->agp->memory, head) { in drm_agp_lookup_entry()
367 list_del(&entry->head); in drm_agp_free()
399 struct drm_agp_head *head = NULL; in drm_agp_init() local
401 if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) in drm_agp_init()
403 head->bridge = agp_find_bridge(dev->pdev); in drm_agp_init()
404 if (!head->bridge) { in drm_agp_init()
405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { in drm_agp_init()
406 kfree(head); in drm_agp_init()
409 agp_copy_info(head->bridge, &head->agp_info); in drm_agp_init()
[all …]
Ddrm_hashtab.c69 hlist_for_each_entry(entry, h_list, head) in drm_ht_verbose_list()
82 hlist_for_each_entry(entry, h_list, head) { in drm_ht_find_key()
84 return &entry->head; in drm_ht_find_key()
100 hlist_for_each_entry_rcu(entry, h_list, head) { in drm_ht_find_key_rcu()
102 return &entry->head; in drm_ht_find_key_rcu()
120 hlist_for_each_entry(entry, h_list, head) { in drm_ht_insert_item()
125 parent = &entry->head; in drm_ht_insert_item()
128 hlist_add_behind_rcu(&item->head, parent); in drm_ht_insert_item()
130 hlist_add_head_rcu(&item->head, h_list); in drm_ht_insert_item()
174 *item = hlist_entry(list, struct drm_hash_item, head); in drm_ht_find_item()
[all …]
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_cq.c55 u32 head; in qib_cq_enter() local
65 head = wc->head; in qib_cq_enter()
66 if (head >= (unsigned) cq->ibcq.cqe) { in qib_cq_enter()
67 head = cq->ibcq.cqe; in qib_cq_enter()
70 next = head + 1; in qib_cq_enter()
84 wc->uqueue[head].wr_id = entry->wr_id; in qib_cq_enter()
85 wc->uqueue[head].status = entry->status; in qib_cq_enter()
86 wc->uqueue[head].opcode = entry->opcode; in qib_cq_enter()
87 wc->uqueue[head].vendor_err = entry->vendor_err; in qib_cq_enter()
88 wc->uqueue[head].byte_len = entry->byte_len; in qib_cq_enter()
[all …]
Dqib_srq.c69 next = wq->head + 1; in qib_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive()
86 wq->head = next; in qib_post_srq_receive()
171 srq->rq.wq->head = 0; in qib_create_srq()
222 u32 sz, size, n, head, tail; in qib_modify_srq() local
264 head = owq->head; in qib_modify_srq()
266 if (head >= srq->rq.size || tail >= srq->rq.size) { in qib_modify_srq()
270 n = head; in qib_modify_srq()
281 while (tail != head) { in qib_modify_srq()
297 wq->head = n; in qib_modify_srq()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dcq.c71 u32 head; in hfi1_cq_enter() local
81 head = wc->head; in hfi1_cq_enter()
82 if (head >= (unsigned) cq->ibcq.cqe) { in hfi1_cq_enter()
83 head = cq->ibcq.cqe; in hfi1_cq_enter()
86 next = head + 1; in hfi1_cq_enter()
100 wc->uqueue[head].wr_id = entry->wr_id; in hfi1_cq_enter()
101 wc->uqueue[head].status = entry->status; in hfi1_cq_enter()
102 wc->uqueue[head].opcode = entry->opcode; in hfi1_cq_enter()
103 wc->uqueue[head].vendor_err = entry->vendor_err; in hfi1_cq_enter()
104 wc->uqueue[head].byte_len = entry->byte_len; in hfi1_cq_enter()
[all …]
Dsrq.c86 next = wq->head + 1; in hfi1_post_srq_receive()
96 wqe = get_rwqe_ptr(&srq->rq, wq->head); in hfi1_post_srq_receive()
103 wq->head = next; in hfi1_post_srq_receive()
188 srq->rq.wq->head = 0; in hfi1_create_srq()
239 u32 sz, size, n, head, tail; in hfi1_modify_srq() local
281 head = owq->head; in hfi1_modify_srq()
283 if (head >= srq->rq.size || tail >= srq->rq.size) { in hfi1_modify_srq()
287 n = head; in hfi1_modify_srq()
298 while (tail != head) { in hfi1_modify_srq()
314 wq->head = n; in hfi1_modify_srq()
/linux-4.4.14/scripts/kconfig/
Dlist.h48 #define list_for_each_entry(pos, head, member) \ argument
49 for (pos = list_entry((head)->next, typeof(*pos), member); \
50 &pos->member != (head); \
60 #define list_for_each_entry_safe(pos, n, head, member) \ argument
61 for (pos = list_entry((head)->next, typeof(*pos), member), \
63 &pos->member != (head); \
70 static inline int list_empty(const struct list_head *head) in list_empty() argument
72 return head->next == head; in list_empty()
99 static inline void list_add_tail(struct list_head *_new, struct list_head *head) in list_add_tail() argument
101 __list_add(_new, head->prev, head); in list_add_tail()
/linux-4.4.14/drivers/misc/mic/scif/
Dscif_rma_list.c28 void scif_insert_tcw(struct scif_window *window, struct list_head *head) in scif_insert_tcw() argument
31 struct scif_window *prev = list_entry(head, struct scif_window, list); in scif_insert_tcw()
36 if (!list_empty(head)) { in scif_insert_tcw()
37 curr = list_entry(head->prev, struct scif_window, list); in scif_insert_tcw()
39 list_add_tail(&window->list, head); in scif_insert_tcw()
43 list_for_each(item, head) { in scif_insert_tcw()
58 void scif_insert_window(struct scif_window *window, struct list_head *head) in scif_insert_window() argument
64 list_for_each(item, head) { in scif_insert_window()
71 list_add(&window->list, head); in scif_insert_window()
87 struct list_head *item, *temp, *head = req->head; in scif_query_tcw() local
[all …]
/linux-4.4.14/drivers/gpu/drm/vmwgfx/
Dvmwgfx_marker.c32 struct list_head head; member
39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init()
50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown()
67 list_add_tail(&marker->head, &queue->head); in vmw_marker_push()
83 if (list_empty(&queue->head)) { in vmw_marker_pull()
90 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_pull()
97 list_del(&marker->head); in vmw_marker_pull()
137 if (list_empty(&queue->head)) in vmw_wait_lag()
140 marker = list_first_entry(&queue->head, in vmw_wait_lag()
141 struct vmw_marker, head); in vmw_wait_lag()
Dvmwgfx_cmdbuf_res.c46 struct list_head head; member
108 list_del(&entry->head); in vmw_cmdbuf_res_free()
128 list_for_each_entry_safe(entry, next, list, head) { in vmw_cmdbuf_res_commit()
129 list_del(&entry->head); in vmw_cmdbuf_res_commit()
136 list_add_tail(&entry->head, &entry->man->list); in vmw_cmdbuf_res_commit()
166 list_for_each_entry_safe(entry, next, list, head) { in vmw_cmdbuf_res_revert()
174 list_del(&entry->head); in vmw_cmdbuf_res_revert()
175 list_add_tail(&entry->head, &entry->man->list); in vmw_cmdbuf_res_revert()
219 list_add_tail(&cres->head, list); in vmw_cmdbuf_res_add()
265 list_del(&entry->head); in vmw_cmdbuf_res_remove()
[all …]
/linux-4.4.14/kernel/
Dfutex_compat.c24 compat_uptr_t __user *head, unsigned int *pi) in fetch_robust_entry() argument
26 if (get_user(*uentry, head)) in fetch_robust_entry()
52 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list() local
67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
72 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list()
79 &head->list_op_pending, &pip)) in compat_exit_robust_list()
83 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
121 struct compat_robust_list_head __user *, head, in COMPAT_SYSCALL_DEFINE2() argument
127 if (unlikely(len != sizeof(*head))) in COMPAT_SYSCALL_DEFINE2()
130 current->compat_robust_list = head; in COMPAT_SYSCALL_DEFINE2()
[all …]
Dtask_work.c29 struct callback_head *head; in task_work_add() local
32 head = ACCESS_ONCE(task->task_works); in task_work_add()
33 if (unlikely(head == &work_exited)) in task_work_add()
35 work->next = head; in task_work_add()
36 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
90 struct callback_head *work, *head, *next; in task_work_run() local
99 head = !work && (task->flags & PF_EXITING) ? in task_work_run()
101 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
Dnotifier.c128 ret = notifier_chain_register(&nh->head, n); in atomic_notifier_chain_register()
150 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister()
183 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); in __atomic_notifier_call_chain()
224 return notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
227 ret = notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
250 ret = notifier_chain_cond_register(&nh->head, n); in blocking_notifier_chain_cond_register()
277 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
280 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
315 if (rcu_access_pointer(nh->head)) { in __blocking_notifier_call_chain()
317 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, in __blocking_notifier_call_chain()
[all …]
Dsoftirq.c442 struct tasklet_struct *head; member
479 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first()
480 __this_cpu_write(tasklet_hi_vec.head, t); in __tasklet_hi_schedule_first()
490 list = __this_cpu_read(tasklet_vec.head); in tasklet_action()
491 __this_cpu_write(tasklet_vec.head, NULL); in tasklet_action()
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); in tasklet_action()
526 list = __this_cpu_read(tasklet_hi_vec.head); in tasklet_hi_action()
527 __this_cpu_write(tasklet_hi_vec.head, NULL); in tasklet_hi_action()
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); in tasklet_hi_action()
640 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
[all …]
/linux-4.4.14/arch/m68k/kernel/
DMakefile5 extra-$(CONFIG_AMIGA) := head.o
6 extra-$(CONFIG_ATARI) := head.o
7 extra-$(CONFIG_MAC) := head.o
8 extra-$(CONFIG_APOLLO) := head.o
9 extra-$(CONFIG_VME) := head.o
10 extra-$(CONFIG_HP300) := head.o
11 extra-$(CONFIG_Q40) := head.o
12 extra-$(CONFIG_SUN3X) := head.o
13 extra-$(CONFIG_SUN3) := sun3-head.o
/linux-4.4.14/drivers/net/wireless/ath/
Ddfs_pri_detector.c37 struct list_head head; member
105 list_for_each_entry_safe(p, p0, &pulse_pool, head) { in pool_deregister_ref()
106 list_del(&p->head); in pool_deregister_ref()
110 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { in pool_deregister_ref()
111 list_del(&ps->head); in pool_deregister_ref()
122 list_add(&pe->head, &pulse_pool); in pool_put_pulse_elem()
130 list_add(&pse->head, &pseq_pool); in pool_put_pseq_elem()
140 pse = list_first_entry(&pseq_pool, struct pri_sequence, head); in pool_get_pseq_elem()
141 list_del(&pse->head); in pool_get_pseq_elem()
153 pe = list_first_entry(&pulse_pool, struct pulse_elem, head); in pool_get_pulse_elem()
[all …]
/linux-4.4.14/drivers/mfd/
Dpcf50633-adc.c74 int head; in trigger_next_adc_job_if_any() local
76 head = adc->queue_head; in trigger_next_adc_job_if_any()
78 if (!adc->queue[head]) in trigger_next_adc_job_if_any()
81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); in trigger_next_adc_job_if_any()
88 int head, tail; in adc_enqueue_request() local
92 head = adc->queue_head; in adc_enqueue_request()
102 if (head == tail) in adc_enqueue_request()
177 int head, res; in pcf50633_adc_irq() local
180 head = adc->queue_head; in pcf50633_adc_irq()
182 req = adc->queue[head]; in pcf50633_adc_irq()
[all …]
/linux-4.4.14/net/ipv4/
Dip_fragment.c212 struct sk_buff *head = qp->q.fragments; in ip_expire() local
222 head->dev = dev_get_by_index_rcu(net, qp->iif); in ip_expire()
223 if (!head->dev) in ip_expire()
227 iph = ip_hdr(head); in ip_expire()
228 err = ip_route_input_noref(head, iph->daddr, iph->saddr, in ip_expire()
229 iph->tos, head->dev); in ip_expire()
237 (skb_rtable(head)->rt_type != RTN_LOCAL)) in ip_expire()
241 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); in ip_expire()
529 struct sk_buff *fp, *head = qp->q.fragments; in ip_frag_reasm() local
544 head = prev->next; in ip_frag_reasm()
[all …]
Dinet_hashtables.c63 struct inet_bind_hashbucket *head, in inet_bind_bucket_create() argument
75 hlist_add_head(&tb->node, &head->chain); in inet_bind_bucket_create()
108 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; in __inet_put_port() local
111 spin_lock(&head->lock); in __inet_put_port()
118 spin_unlock(&head->lock); in __inet_put_port()
135 struct inet_bind_hashbucket *head = &table->bhash[bhash]; in __inet_inherit_port() local
138 spin_lock(&head->lock); in __inet_inherit_port()
141 spin_unlock(&head->lock); in __inet_inherit_port()
150 inet_bind_bucket_for_each(tb, &head->chain) { in __inet_inherit_port()
157 sock_net(sk), head, port); in __inet_inherit_port()
[all …]
Dudp_offload.c115 skb->csum_start = skb_transport_header(skb) - skb->head; in __skb_udp_tunnel_segment()
262 static void udp_offload_free_routine(struct rcu_head *head) in udp_offload_free_routine() argument
264 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); in udp_offload_free_routine()
270 struct udp_offload_priv __rcu **head = &udp_offload_base; in udp_del_offload() local
275 uo_priv = udp_deref_protected(*head); in udp_del_offload()
277 uo_priv = udp_deref_protected(*head)) { in udp_del_offload()
279 rcu_assign_pointer(*head, in udp_del_offload()
283 head = &uo_priv->next; in udp_del_offload()
293 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, in udp_gro_receive() argument
323 for (p = *head; p; p = p->next) { in udp_gro_receive()
[all …]
/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_execbuf_util.c38 list_for_each_entry_continue_reverse(entry, list, head) { in ttm_eu_backoff_reservation_reverse()
49 list_for_each_entry(entry, list, head) { in ttm_eu_del_from_lru_locked()
66 entry = list_first_entry(list, struct ttm_validate_buffer, head); in ttm_eu_backoff_reservation()
70 list_for_each_entry(entry, list, head) { in ttm_eu_backoff_reservation()
106 entry = list_first_entry(list, struct ttm_validate_buffer, head); in ttm_eu_reserve_buffers()
112 list_for_each_entry(entry, list, head) { in ttm_eu_reserve_buffers()
124 entry = list_prev_entry(entry, head); in ttm_eu_reserve_buffers()
125 list_del(&safe->head); in ttm_eu_reserve_buffers()
126 list_add(&safe->head, dups); in ttm_eu_reserve_buffers()
169 list_del(&entry->head); in ttm_eu_reserve_buffers()
[all …]
/linux-4.4.14/drivers/tty/
Dtty_buffer.c69 restart = buf->head->commit != buf->head->read; in tty_buffer_unlock_exclusive()
121 while ((p = buf->head) != NULL) { in tty_buffer_free_all()
122 buf->head = p->next; in tty_buffer_free_all()
131 buf->head = &buf->sentinel; in tty_buffer_free_all()
225 while ((next = smp_load_acquire(&buf->head->next)) != NULL) { in tty_buffer_flush()
226 tty_buffer_free(port, buf->head); in tty_buffer_flush()
227 buf->head = next; in tty_buffer_flush()
229 buf->head->read = buf->head->commit; in tty_buffer_flush()
415 receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) in receive_buf() argument
418 unsigned char *p = char_buf_ptr(head, head->read); in receive_buf()
[all …]
/linux-4.4.14/fs/nilfs2/
Dsegbuf.h98 #define NILFS_LIST_SEGBUF(head) \ argument
99 list_entry((head), struct nilfs_segment_buffer, sb_list)
102 #define NILFS_LAST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->prev) argument
103 #define NILFS_FIRST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->next) argument
104 #define NILFS_SEGBUF_IS_LAST(segbuf, head) ((segbuf)->sb_list.next == (head)) argument
110 #define NILFS_SEGBUF_FIRST_BH(head) \ argument
111 (list_entry((head)->next, struct buffer_head, b_assoc_buffers))
115 #define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head) argument
Dpage.c157 struct buffer_head *bh, *head; in nilfs_page_buffers_clean() local
159 bh = head = page_buffers(page); in nilfs_page_buffers_clean()
164 } while (bh != head); in nilfs_page_buffers_clean()
187 struct buffer_head *bh, *head; in nilfs_page_bug() local
190 bh = head = page_buffers(page); in nilfs_page_bug()
197 } while (bh != head); in nilfs_page_bug()
421 struct buffer_head *bh, *head; in nilfs_clear_dirty_page() local
427 bh = head = page_buffers(page); in nilfs_clear_dirty_page()
437 } while (bh = bh->b_this_page, bh != head); in nilfs_clear_dirty_page()
447 struct buffer_head *bh, *head; in nilfs_page_count_clean_buffers() local
[all …]
/linux-4.4.14/net/batman-adv/
Dhash.h69 struct hlist_head *head; in batadv_hash_delete() local
75 head = &hash->table[i]; in batadv_hash_delete()
79 hlist_for_each_safe(node, node_tmp, head) { in batadv_hash_delete()
110 struct hlist_head *head; in batadv_hash_add() local
118 head = &hash->table[index]; in batadv_hash_add()
123 hlist_for_each(node, head) { in batadv_hash_add()
132 hlist_add_head_rcu(data_node, head); in batadv_hash_add()
154 struct hlist_head *head; in batadv_hash_remove() local
158 head = &hash->table[index]; in batadv_hash_remove()
161 hlist_for_each(node, head) { in batadv_hash_remove()
Dfragmentation.c49 static void batadv_frag_clear_chain(struct hlist_head *head) in batadv_frag_clear_chain() argument
54 hlist_for_each_entry_safe(entry, node, head, list) { in batadv_frag_clear_chain()
77 batadv_frag_clear_chain(&orig_node->fragments[i].head); in batadv_frag_purge_orig()
121 if (!hlist_empty(&chain->head)) in batadv_frag_init_chain()
122 batadv_frag_clear_chain(&chain->head); in batadv_frag_init_chain()
180 hlist_add_head(&frag_entry_new->list, &chain->head); in batadv_frag_insert_packet()
189 hlist_for_each_entry(frag_entry_curr, &chain->head, list) { in batadv_frag_insert_packet()
224 batadv_frag_clear_chain(&chain->head); in batadv_frag_insert_packet()
228 hlist_move_list(&chain->head, chain_out); in batadv_frag_insert_packet()
315 struct hlist_head head = HLIST_HEAD_INIT; in batadv_frag_skb_buffer() local
[all …]
Dbridge_loop_avoidance.c156 struct hlist_head *head; in batadv_claim_hash_find() local
165 head = &hash->table[index]; in batadv_claim_hash_find()
168 hlist_for_each_entry_rcu(claim, head, hash_entry) { in batadv_claim_hash_find()
196 struct hlist_head *head; in batadv_backbone_hash_find() local
208 head = &hash->table[index]; in batadv_backbone_hash_find()
211 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { in batadv_backbone_hash_find()
233 struct hlist_head *head; in batadv_bla_del_backbone_claims() local
243 head = &hash->table[i]; in batadv_bla_del_backbone_claims()
248 head, hash_entry) { in batadv_bla_del_backbone_claims()
478 struct hlist_head *head; in batadv_bla_answer_request() local
[all …]
/linux-4.4.14/arch/powerpc/platforms/pseries/
Dhvcserver.c75 int hvcs_free_partner_info(struct list_head *head) in hvcs_free_partner_info() argument
80 if (!head) in hvcs_free_partner_info()
83 while (!list_empty(head)) { in hvcs_free_partner_info()
84 element = head->next; in hvcs_free_partner_info()
132 int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, in hvcs_get_partner_info() argument
146 if (!head || !pi_buff) in hvcs_get_partner_info()
151 INIT_LIST_HEAD(head); in hvcs_get_partner_info()
161 if (!list_empty(head)) in hvcs_get_partner_info()
182 hvcs_free_partner_info(head); in hvcs_get_partner_info()
196 list_add_tail(&(next_partner_info->node), head); in hvcs_get_partner_info()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/timer/
Dbase.c41 list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { in nvkm_timer_alarm_trigger()
43 list_move_tail(&alarm->head, &exec); in nvkm_timer_alarm_trigger()
48 alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); in nvkm_timer_alarm_trigger()
56 list_for_each_entry_safe(alarm, atemp, &exec, head) { in nvkm_timer_alarm_trigger()
57 list_del_init(&alarm->head); in nvkm_timer_alarm_trigger()
73 if (!list_empty(&alarm->head)) in nvkm_timer_alarm()
74 list_del(&alarm->head); in nvkm_timer_alarm()
76 list_for_each_entry(list, &tmr->alarms, head) { in nvkm_timer_alarm()
80 list_add_tail(&alarm->head, &list->head); in nvkm_timer_alarm()
93 list_del_init(&alarm->head); in nvkm_timer_alarm_cancel()
/linux-4.4.14/tools/usb/usbip/libsrc/
Dlist.h57 static inline void list_add(struct list_head *new, struct list_head *head) in list_add() argument
59 __list_add(new, head, head->next); in list_add()
110 #define list_for_each(pos, head) \ argument
111 for (pos = (head)->next; pos != (head); pos = pos->next)
119 #define list_for_each_safe(pos, n, head) \ argument
120 for (pos = (head)->next, n = pos->next; pos != (head); \
/linux-4.4.14/kernel/bpf/
Dhashtab.c128 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, in lookup_elem_raw() argument
133 hlist_for_each_entry_rcu(l, head, hash_node) in lookup_elem_raw()
144 struct hlist_head *head; in htab_map_lookup_elem() local
155 head = select_bucket(htab, hash); in htab_map_lookup_elem()
157 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_lookup_elem()
169 struct hlist_head *head; in htab_map_get_next_key() local
180 head = select_bucket(htab, hash); in htab_map_get_next_key()
183 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_get_next_key()
207 head = select_bucket(htab, i); in htab_map_get_next_key()
210 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), in htab_map_get_next_key()
[all …]
/linux-4.4.14/fs/gfs2/
Drecovery.c57 struct list_head *head = &jd->jd_revoke_list; in gfs2_revoke_add() local
61 list_for_each_entry(rr, head, rr_list) { in gfs2_revoke_add()
79 list_add(&rr->rr_list, head); in gfs2_revoke_add()
110 struct list_head *head = &jd->jd_revoke_list; in gfs2_revoke_clean() local
113 while (!list_empty(head)) { in gfs2_revoke_clean()
114 rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); in gfs2_revoke_clean()
151 struct gfs2_log_header_host *head) in get_log_header() argument
173 *head = lh; in get_log_header()
192 struct gfs2_log_header_host *head) in find_good_lh() argument
198 error = get_log_header(jd, *blk, head); in find_good_lh()
[all …]
/linux-4.4.14/fs/9p/
Dvfs_dir.c55 int head; member
133 if (rdir->tail == rdir->head) { in v9fs_dir_readdir()
144 rdir->head = 0; in v9fs_dir_readdir()
147 while (rdir->head < rdir->tail) { in v9fs_dir_readdir()
149 err = p9stat_read(fid->clnt, rdir->buf + rdir->head, in v9fs_dir_readdir()
150 rdir->tail - rdir->head, &st); in v9fs_dir_readdir()
164 rdir->head += reclen; in v9fs_dir_readdir()
194 if (rdir->tail == rdir->head) { in v9fs_dir_readdir_dotl()
200 rdir->head = 0; in v9fs_dir_readdir_dotl()
204 while (rdir->head < rdir->tail) { in v9fs_dir_readdir_dotl()
[all …]
/linux-4.4.14/sound/oss/
Dmsnd.c105 f->head = 0; in msnd_fifo_alloc()
116 f->len = f->tail = f->head = 0; in msnd_fifo_make_empty()
127 if (f->head <= f->tail) { in msnd_fifo_write_io()
133 nwritten = f->head - f->tail; in msnd_fifo_write_io()
158 if (f->head <= f->tail) { in msnd_fifo_write()
164 nwritten = f->head - f->tail; in msnd_fifo_write()
189 if (f->tail <= f->head) { in msnd_fifo_read_io()
191 if (nread > f->n - f->head) in msnd_fifo_read_io()
192 nread = f->n - f->head; in msnd_fifo_read_io()
195 nread = f->tail - f->head; in msnd_fifo_read_io()
[all …]
/linux-4.4.14/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c50 struct hlist_nulls_node *head) in ct_get_next() argument
55 head = rcu_dereference(hlist_nulls_next_rcu(head)); in ct_get_next()
56 while (is_a_nulls(head)) { in ct_get_next()
57 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next()
61 head = rcu_dereference( in ct_get_next()
64 return head; in ct_get_next()
69 struct hlist_nulls_node *head = ct_get_first(seq); in ct_get_idx() local
71 if (head) in ct_get_idx()
72 while (pos && (head = ct_get_next(seq, head))) in ct_get_idx()
74 return pos ? NULL : head; in ct_get_idx()
[all …]
/linux-4.4.14/fs/hfsplus/
Dbtree.c135 struct hfs_btree_header_rec *head; in hfs_btree_open() local
165 head = (struct hfs_btree_header_rec *)(kmap(page) + in hfs_btree_open()
167 tree->root = be32_to_cpu(head->root); in hfs_btree_open()
168 tree->leaf_count = be32_to_cpu(head->leaf_count); in hfs_btree_open()
169 tree->leaf_head = be32_to_cpu(head->leaf_head); in hfs_btree_open()
170 tree->leaf_tail = be32_to_cpu(head->leaf_tail); in hfs_btree_open()
171 tree->node_count = be32_to_cpu(head->node_count); in hfs_btree_open()
172 tree->free_nodes = be32_to_cpu(head->free_nodes); in hfs_btree_open()
173 tree->attributes = be32_to_cpu(head->attributes); in hfs_btree_open()
174 tree->node_size = be16_to_cpu(head->node_size); in hfs_btree_open()
[all …]
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event_intel_bts.c57 local_t head; member
143 index = local_read(&buf->head); in bts_config_buffer()
167 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) in bts_buffer_pad_out() argument
169 unsigned long index = head - phys->offset; in bts_buffer_pad_out()
191 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; in bts_update() local
196 head = index + bts_buffer_offset(buf, buf->cur_buf); in bts_update()
197 old = local_xchg(&buf->head, head); in bts_update()
200 if (old == head) in bts_update()
210 local_add(head - old, &buf->data_size); in bts_update()
212 local_set(&buf->data_size, head); in bts_update()
[all …]
/linux-4.4.14/net/ieee802154/6lowpan/
Dreassembly.c229 struct sk_buff *fp, *head = fq->q.fragments; in lowpan_frag_reasm() local
236 head = prev->next; in lowpan_frag_reasm()
237 fp = skb_clone(head, GFP_ATOMIC); in lowpan_frag_reasm()
242 fp->next = head->next; in lowpan_frag_reasm()
247 skb_morph(head, fq->q.fragments); in lowpan_frag_reasm()
248 head->next = fq->q.fragments->next; in lowpan_frag_reasm()
251 fq->q.fragments = head; in lowpan_frag_reasm()
255 if (skb_unclone(head, GFP_ATOMIC)) in lowpan_frag_reasm()
262 if (skb_has_frag_list(head)) { in lowpan_frag_reasm()
269 clone->next = head->next; in lowpan_frag_reasm()
[all …]
/linux-4.4.14/fs/btrfs/
Ddelayed-ref.c158 struct btrfs_delayed_ref_head *head) in btrfs_delayed_ref_lock() argument
164 if (mutex_trylock(&head->mutex)) in btrfs_delayed_ref_lock()
167 atomic_inc(&head->node.refs); in btrfs_delayed_ref_lock()
170 mutex_lock(&head->mutex); in btrfs_delayed_ref_lock()
172 if (!head->node.in_tree) { in btrfs_delayed_ref_lock()
173 mutex_unlock(&head->mutex); in btrfs_delayed_ref_lock()
174 btrfs_put_delayed_ref(&head->node); in btrfs_delayed_ref_lock()
177 btrfs_put_delayed_ref(&head->node); in btrfs_delayed_ref_lock()
183 struct btrfs_delayed_ref_head *head, in drop_delayed_ref() argument
187 head = btrfs_delayed_node_to_head(ref); in drop_delayed_ref()
[all …]
/linux-4.4.14/arch/x86/kernel/
Dnmi.c38 struct list_head head; member
45 .head = LIST_HEAD_INIT(nmi_desc[0].head),
49 .head = LIST_HEAD_INIT(nmi_desc[1].head),
53 .head = LIST_HEAD_INIT(nmi_desc[2].head),
57 .head = LIST_HEAD_INIT(nmi_desc[3].head),
127 list_for_each_entry_rcu(a, &desc->head, list) { in nmi_handle()
168 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); in __register_nmi_handler()
169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); in __register_nmi_handler()
170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); in __register_nmi_handler()
177 list_add_rcu(&action->list, &desc->head); in __register_nmi_handler()
[all …]
/linux-4.4.14/fs/hfs/
Dbtree.c21 struct hfs_btree_header_rec *head; in hfs_btree_open() local
82 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); in hfs_btree_open()
83 tree->root = be32_to_cpu(head->root); in hfs_btree_open()
84 tree->leaf_count = be32_to_cpu(head->leaf_count); in hfs_btree_open()
85 tree->leaf_head = be32_to_cpu(head->leaf_head); in hfs_btree_open()
86 tree->leaf_tail = be32_to_cpu(head->leaf_tail); in hfs_btree_open()
87 tree->node_count = be32_to_cpu(head->node_count); in hfs_btree_open()
88 tree->free_nodes = be32_to_cpu(head->free_nodes); in hfs_btree_open()
89 tree->attributes = be32_to_cpu(head->attributes); in hfs_btree_open()
90 tree->node_size = be16_to_cpu(head->node_size); in hfs_btree_open()
[all …]
/linux-4.4.14/arch/s390/oprofile/
Dbacktrace.c58 unsigned long head; in s390_backtrace() local
64 head = regs->gprs[15]; in s390_backtrace()
65 head_sf = (struct stack_frame*)head; in s390_backtrace()
70 head = head_sf->back_chain; in s390_backtrace()
72 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, in s390_backtrace()
75 __show_trace(&depth, head, S390_lowcore.thread_info, in s390_backtrace()
/linux-4.4.14/net/tipc/
Dmsg.c122 struct sk_buff *head = *headbuf; in tipc_buf_append() local
139 if (unlikely(head)) in tipc_buf_append()
143 head = *headbuf = frag; in tipc_buf_append()
145 TIPC_SKB_CB(head)->tail = NULL; in tipc_buf_append()
146 if (skb_is_nonlinear(head)) { in tipc_buf_append()
147 skb_walk_frags(head, tail) { in tipc_buf_append()
148 TIPC_SKB_CB(head)->tail = tail; in tipc_buf_append()
151 skb_frag_list_init(head); in tipc_buf_append()
156 if (!head) in tipc_buf_append()
159 if (skb_try_coalesce(head, frag, &headstolen, &delta)) { in tipc_buf_append()
[all …]
/linux-4.4.14/drivers/gpu/drm/mga/
Dmga_dma.c106 u32 head, tail; in mga_do_dma_flush() local
141 head = MGA_READ(MGA_PRIMADDRESS); in mga_do_dma_flush()
143 if (head <= tail) in mga_do_dma_flush()
146 primary->space = head - tail; in mga_do_dma_flush()
148 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); in mga_do_dma_flush()
161 u32 head, tail; in mga_do_dma_wrap_start() local
179 head = MGA_READ(MGA_PRIMADDRESS); in mga_do_dma_wrap_start()
181 if (head == dev_priv->primary->offset) in mga_do_dma_wrap_start()
184 primary->space = head - dev_priv->primary->offset; in mga_do_dma_wrap_start()
186 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); in mga_do_dma_wrap_start()
[all …]
/linux-4.4.14/drivers/isdn/capi/
Dcapilib.c80 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize) in capilib_new_ncci() argument
98 list_add_tail(&np->list, head); in capilib_new_ncci()
104 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci) in capilib_free_ncci() argument
109 list_for_each(l, head) { in capilib_free_ncci()
125 void capilib_release_appl(struct list_head *head, u16 applid) in capilib_release_appl() argument
130 list_for_each_safe(l, n, head) { in capilib_release_appl()
142 void capilib_release(struct list_head *head) in capilib_release() argument
147 list_for_each_safe(l, n, head) { in capilib_release()
157 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid) in capilib_data_b3_req() argument
162 list_for_each(l, head) { in capilib_data_b3_req()
[all …]
/linux-4.4.14/drivers/dma/
Dvirt-dma.h44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
135 struct list_head *head) in vchan_get_all_descriptors() argument
137 list_splice_tail_init(&vc->desc_submitted, head); in vchan_get_all_descriptors()
138 list_splice_tail_init(&vc->desc_issued, head); in vchan_get_all_descriptors()
139 list_splice_tail_init(&vc->desc_completed, head); in vchan_get_all_descriptors()
145 LIST_HEAD(head); in vchan_free_chan_resources()
148 vchan_get_all_descriptors(vc, &head); in vchan_free_chan_resources()
151 vchan_dma_desc_free_list(vc, &head); in vchan_free_chan_resources()
Dvirt-dma.c65 LIST_HEAD(head); in vchan_complete()
68 list_splice_tail_init(&vc->desc_completed, &head); in vchan_complete()
80 while (!list_empty(&head)) { in vchan_complete()
81 vd = list_first_entry(&head, struct virt_dma_desc, node); in vchan_complete()
94 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) in vchan_dma_desc_free_list() argument
96 while (!list_empty(head)) { in vchan_dma_desc_free_list()
97 struct virt_dma_desc *vd = list_first_entry(head, in vchan_dma_desc_free_list()
/linux-4.4.14/arch/m68k/68360/
DMakefile9 extra-y := head.o
11 $(obj)/head.o: $(obj)/head-$(model-y).o
12 ln -sf head-$(model-y).o $(obj)/head.o
/linux-4.4.14/drivers/isdn/gigaset/
Dasyncdata.c51 unsigned char *src = inbuf->data + inbuf->head; in cmd_loop()
123 unsigned char *src = inbuf->data + inbuf->head; in lock_loop()
145 unsigned char *src = inbuf->data + inbuf->head; in hdlc_loop()
291 unsigned char *src = inbuf->data + inbuf->head; in iraw_loop()
347 if (inbuf->data[inbuf->head] == DLE_FLAG && in handle_dle()
350 inbuf->head++; in handle_dle()
351 if (inbuf->head == inbuf->tail || in handle_dle()
352 inbuf->head == RBUFSIZE) { in handle_dle()
366 switch (inbuf->data[inbuf->head]) { in handle_dle()
372 inbuf->head++; /* byte consumed */ in handle_dle()
[all …]
/linux-4.4.14/net/sunrpc/
Dxdr.c135 struct kvec *head = xdr->head; in xdr_inline_pages() local
137 char *buf = (char *)head->iov_base; in xdr_inline_pages()
138 unsigned int buflen = head->iov_len; in xdr_inline_pages()
140 head->iov_len = offset; in xdr_inline_pages()
318 struct kvec *head, *tail; in xdr_shrink_bufhead() local
323 head = buf->head; in xdr_shrink_bufhead()
325 WARN_ON_ONCE(len > head->iov_len); in xdr_shrink_bufhead()
326 if (len > head->iov_len) in xdr_shrink_bufhead()
327 len = head->iov_len; in xdr_shrink_bufhead()
356 (char *)head->iov_base + in xdr_shrink_bufhead()
[all …]
/linux-4.4.14/include/linux/wimax/
Ddebug.h172 void __d_head(char *head, size_t head_size, in __d_head() argument
176 head[0] = 0; in __d_head()
181 snprintf(head, head_size, "%s %s: ", in __d_head()
201 char head[64]; \
204 __d_head(head, sizeof(head), dev); \
205 printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
416 char head[64]; \
419 __d_head(head, sizeof(head), dev); \
420 print_hex_dump(KERN_ERR, head, 0, 16, 1, \
/linux-4.4.14/fs/
Dbuffer.c90 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
102 head = page_buffers(page); in buffer_check_dirty_writeback()
103 bh = head; in buffer_check_dirty_writeback()
112 } while (bh != head); in buffer_check_dirty_writeback()
209 struct buffer_head *head; in __find_get_block_slow() local
221 head = page_buffers(page); in __find_get_block_slow()
222 bh = head; in __find_get_block_slow()
232 } while (bh != head); in __find_get_block_slow()
684 struct buffer_head *head = page_buffers(page); in __set_page_dirty_buffers() local
685 struct buffer_head *bh = head; in __set_page_dirty_buffers()
[all …]
Dseq_file.c801 struct list_head *seq_list_start(struct list_head *head, loff_t pos) in seq_list_start() argument
805 list_for_each(lh, head) in seq_list_start()
813 struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) in seq_list_start_head() argument
816 return head; in seq_list_start_head()
818 return seq_list_start(head, pos - 1); in seq_list_start_head()
822 struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) in seq_list_next() argument
828 return lh == head ? NULL : lh; in seq_list_next()
839 struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) in seq_hlist_start() argument
843 hlist_for_each(node, head) in seq_hlist_start()
858 struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) in seq_hlist_start_head() argument
[all …]
/linux-4.4.14/scripts/
Dkernel-doc-xml-ref116 my $head = $arg;
119 $head = $1;
122 return "<link linkend=\"$key\">$head</link>$tail";
138 my ($head, $tail) = split_pointer($arg);
139 return "<link linkend=\"$key\">$head</link>$tail";
161 my ($head, $tail) = split_pointer($type);
162 return "<link linkend=\"$keyname\">$head</link>$tail";
/linux-4.4.14/scripts/gdb/linux/
Dlists.py21 def list_check(head): argument
23 if (head.type == list_head.get_type().pointer()):
24 head = head.dereference()
25 elif (head.type != list_head.get_type()):
27 c = head
74 if c == head:
/linux-4.4.14/drivers/scsi/arm/
Dqueue.c64 INIT_LIST_HEAD(&queue->head); in queue_initialise()
92 if (!list_empty(&queue->head)) in queue_free()
106 int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) in __queue_add() argument
126 if (head) in __queue_add()
127 list_add(l, &queue->head); in __queue_add()
129 list_add_tail(l, &queue->head); in __queue_add()
168 list_for_each(l, &queue->head) { in queue_remove_exclude()
193 if (!list_empty(&queue->head)) in queue_remove()
194 SCpnt = __queue_remove(queue, queue->head.next); in queue_remove()
217 list_for_each(l, &queue->head) { in queue_remove_tgtluntag()
[all …]
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce-genpool.c31 struct llist_node *head; in mce_gen_pool_process() local
35 head = llist_del_all(&mce_event_llist); in mce_gen_pool_process()
36 if (!head) in mce_gen_pool_process()
39 head = llist_reverse_order(head); in mce_gen_pool_process()
40 llist_for_each_entry_safe(node, tmp, head, llnode) { in mce_gen_pool_process()
/linux-4.4.14/drivers/char/agp/
Disoch.c22 static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) in agp_3_5_dev_list_insert() argument
27 list_for_each(pos, head) { in agp_3_5_dev_list_insert()
39 struct list_head *pos, *tmp, *head = &list->list, *start = head->next; in agp_3_5_dev_list_sort() local
42 INIT_LIST_HEAD(head); in agp_3_5_dev_list_sort()
44 for (pos=start; pos!=head; ) { in agp_3_5_dev_list_sort()
53 agp_3_5_dev_list_insert(head, tmp); in agp_3_5_dev_list_sort()
80 struct list_head *head = &dev_list->list, *pos; in agp_3_5_isochronous_node_enable() local
135 list_for_each(pos, head) { in agp_3_5_isochronous_node_enable()
290 struct list_head *head = &dev_list->list, *pos; in agp_3_5_nonisochronous_node_enable() local
302 for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { in agp_3_5_nonisochronous_node_enable()
[all …]
/linux-4.4.14/fs/nfs/
Dwrite.c183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) in nfs_page_group_search_locked() argument
187 WARN_ON_ONCE(head != head->wb_head); in nfs_page_group_search_locked()
188 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); in nfs_page_group_search_locked()
190 req = head; in nfs_page_group_search_locked()
197 } while (req != head); in nfs_page_group_search_locked()
324 nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, in nfs_unroll_locks_and_wait() argument
332 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) in nfs_unroll_locks_and_wait()
340 nfs_page_group_unlock(head); in nfs_unroll_locks_and_wait()
344 nfs_release_request(head); in nfs_unroll_locks_and_wait()
431 struct nfs_page *head, *subreq; in nfs_lock_and_join_requests() local
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/
Dnouveau_display.c57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_vblank_enable()
71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_vblank_disable()
103 .base.head = nouveau_crtc(crtc)->index, in nouveau_display_scanoutpos_head()
142 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_scanoutpos()
158 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_vblstamp()
176 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_vblank_fini()
189 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { in nouveau_display_vblank_init()
195 .head = nv_crtc->index, in nouveau_display_vblank_init()
376 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { in nouveau_display_init()
392 int head; in nouveau_display_fini() local
[all …]
Dnv50_display.c68 const s32 *oclass, u8 head, void *data, u32 size, in nv50_chan_create() argument
120 const s32 *oclass, u8 head, void *data, u32 size, in nv50_pioc_create() argument
123 return nv50_chan_create(device, disp, oclass, head, data, size, in nv50_pioc_create()
137 int head, struct nv50_curs *curs) in nv50_curs_create() argument
140 .head = head, in nv50_curs_create()
151 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args), in nv50_curs_create()
165 int head, struct nv50_oimm *oimm) in nv50_oimm_create() argument
168 .head = head, in nv50_oimm_create()
179 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args), in nv50_oimm_create()
219 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, in nv50_dmac_create() argument
[all …]
/linux-4.4.14/net/ceph/
Dpagelist.c11 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail()
22 while (!list_empty(&pl->head)) { in ceph_pagelist_release()
23 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release()
48 list_add_tail(&page->lru, &pl->head); in ceph_pagelist_addpage()
119 c->page_lru = pl->head.prev; in ceph_pagelist_set_cursor()
137 while (pl->head.prev != c->page_lru) { in ceph_pagelist_truncate()
138 page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_truncate()
144 if (!list_empty(&pl->head)) { in ceph_pagelist_truncate()
145 page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_truncate()
/linux-4.4.14/arch/ia64/hp/sim/
Dsimserial.c117 if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { in rs_put_char()
121 info->xmit.buf[info->xmit.head] = ch; in rs_put_char()
122 info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); in rs_put_char()
145 if (info->xmit.head == info->xmit.tail || tty->stopped) { in transmit_chars()
148 info->xmit.head, info->xmit.tail, tty->stopped); in transmit_chars()
160 count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), in transmit_chars()
169 count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); in transmit_chars()
182 if (info->xmit.head == info->xmit.tail || tty->stopped || in rs_flush_chars()
201 c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); in rs_write()
207 memcpy(info->xmit.buf + info->xmit.head, buf, c); in rs_write()
[all …]
/linux-4.4.14/drivers/input/joystick/iforce/
Diforce-packets.c55 int head, tail; in iforce_send_packet() local
63 head = iforce->xmit.head; in iforce_send_packet()
67 if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { in iforce_send_packet()
74 empty = head == tail; in iforce_send_packet()
75 XMIT_INC(iforce->xmit.head, n+2); in iforce_send_packet()
80 iforce->xmit.buf[head] = HI(cmd); in iforce_send_packet()
81 XMIT_INC(head, 1); in iforce_send_packet()
82 iforce->xmit.buf[head] = LO(cmd); in iforce_send_packet()
83 XMIT_INC(head, 1); in iforce_send_packet()
85 c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); in iforce_send_packet()
[all …]
/linux-4.4.14/kernel/power/
Dconsole.c22 struct list_head head; member
50 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required()
65 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required()
82 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister()
84 list_del(&tmp->head); in pm_vt_switch_unregister()
118 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
/linux-4.4.14/net/sctp/
Dinput.c713 struct sctp_hashbucket *head; in __sctp_hash_endpoint() local
718 head = &sctp_ep_hashtable[epb->hashent]; in __sctp_hash_endpoint()
720 write_lock(&head->lock); in __sctp_hash_endpoint()
721 hlist_add_head(&epb->node, &head->chain); in __sctp_hash_endpoint()
722 write_unlock(&head->lock); in __sctp_hash_endpoint()
737 struct sctp_hashbucket *head; in __sctp_unhash_endpoint() local
744 head = &sctp_ep_hashtable[epb->hashent]; in __sctp_unhash_endpoint()
746 write_lock(&head->lock); in __sctp_unhash_endpoint()
748 write_unlock(&head->lock); in __sctp_unhash_endpoint()
763 struct sctp_hashbucket *head; in __sctp_rcv_lookup_endpoint() local
[all …]
Dproc.c211 struct sctp_hashbucket *head; in sctp_eps_seq_show() local
220 head = &sctp_ep_hashtable[hash]; in sctp_eps_seq_show()
222 read_lock(&head->lock); in sctp_eps_seq_show()
223 sctp_for_each_hentry(epb, &head->chain) { in sctp_eps_seq_show()
237 read_unlock(&head->lock); in sctp_eps_seq_show()
319 struct sctp_hashbucket *head; in sctp_assocs_seq_show() local
328 head = &sctp_assoc_hashtable[hash]; in sctp_assocs_seq_show()
330 read_lock(&head->lock); in sctp_assocs_seq_show()
331 sctp_for_each_hentry(epb, &head->chain) { in sctp_assocs_seq_show()
364 read_unlock(&head->lock); in sctp_assocs_seq_show()
[all …]
/linux-4.4.14/arch/sparc/mm/
Dgup.c39 struct page *page, *head; in gup_pte_range() local
52 head = compound_head(page); in gup_pte_range()
53 if (!page_cache_get_speculative(head)) in gup_pte_range()
56 put_page(head); in gup_pte_range()
59 if (head != page) in gup_pte_range()
73 struct page *head, *page, *tail; in gup_huge_pmd() local
83 head = pmd_page(pmd); in gup_huge_pmd()
84 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
87 VM_BUG_ON(compound_head(page) != head); in gup_huge_pmd()
94 if (!page_cache_add_speculative(head, refs)) { in gup_huge_pmd()
[all …]
/linux-4.4.14/arch/arm/mach-netx/
Dxc.c120 struct fw_header *head; in xc_request_firmware() local
135 head = (struct fw_header *)fw->data; in xc_request_firmware()
136 if (head->magic != 0x4e657458) { in xc_request_firmware()
137 if (head->magic == 0x5874654e) { in xc_request_firmware()
144 head->magic); in xc_request_firmware()
149 x->type = head->type; in xc_request_firmware()
150 x->version = head->version; in xc_request_firmware()
155 src = fw->data + head->fw_desc[i].ofs; in xc_request_firmware()
158 size = head->fw_desc[i].size - sizeof (unsigned int); in xc_request_firmware()
165 src = fw->data + head->fw_desc[i].patch_ofs; in xc_request_firmware()
[all …]
/linux-4.4.14/drivers/target/tcm_fc/
Dtfc_sess.c166 struct hlist_head *head; in ft_sess_get() local
174 head = &tport->hash[ft_sess_hash(port_id)]; in ft_sess_get()
175 hlist_for_each_entry_rcu(sess, head, hash) { in ft_sess_get()
197 struct hlist_head *head; in ft_sess_create() local
199 head = &tport->hash[ft_sess_hash(port_id)]; in ft_sess_create()
200 hlist_for_each_entry_rcu(sess, head, hash) in ft_sess_create()
219 hlist_add_head_rcu(&sess->hash, head); in ft_sess_create()
250 struct hlist_head *head; in ft_sess_delete() local
253 head = &tport->hash[ft_sess_hash(port_id)]; in ft_sess_delete()
254 hlist_for_each_entry_rcu(sess, head, hash) { in ft_sess_delete()
[all …]
/linux-4.4.14/fs/befs/
Dbtree.c82 befs_host_btree_nodehead head; /* head of node converted to cpu byteorder */ member
219 node->head.left = fs64_to_cpu(sb, node->od_node->left); in befs_bt_read_node()
220 node->head.right = fs64_to_cpu(sb, node->od_node->right); in befs_bt_read_node()
221 node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow); in befs_bt_read_node()
222 node->head.all_key_count = in befs_bt_read_node()
224 node->head.all_key_length = in befs_bt_read_node()
287 node_off = this_node->head.overflow; in befs_btree_find()
356 last = node->head.all_key_count - 1; in befs_find_key()
467 while (key_sum + this_node->head.all_key_count <= key_no) { in befs_btree_read()
470 if (this_node->head.right == befs_bt_inval) { in befs_btree_read()
[all …]
/linux-4.4.14/net/sunrpc/auth_gss/
Dgss_krb5_wrap.c58 iov = &buf->head[0]; in gss_krb5_add_padding()
72 if (len <= buf->head[0].iov_len) { in gss_krb5_remove_padding()
73 pad = *(u8 *)(buf->head[0].iov_base + len - 1); in gss_krb5_remove_padding()
74 if (pad > buf->head[0].iov_len) in gss_krb5_remove_padding()
76 buf->head[0].iov_len -= pad; in gss_krb5_remove_padding()
79 len -= buf->head[0].iov_len; in gss_krb5_remove_padding()
186 ptr = buf->head[0].iov_base + offset; in gss_wrap_kerberos_v1()
287 ptr = (u8 *)buf->head[0].iov_base + offset; in gss_unwrap_kerberos_v1()
316 (unsigned char *)buf->head[0].iov_base; in gss_unwrap_kerberos_v1()
377 orig_start = buf->head[0].iov_base + offset; in gss_unwrap_kerberos_v1()
[all …]
/linux-4.4.14/drivers/xen/events/
Devents_fifo.c61 uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; member
112 q->head[i] = 0; in init_control_block()
288 uint32_t head; in consume_one_event() local
292 head = q->head[priority]; in consume_one_event()
298 if (head == 0) { in consume_one_event()
300 head = control_block->head[priority]; in consume_one_event()
303 port = head; in consume_one_event()
305 head = clear_linked(word); in consume_one_event()
314 if (head == 0) in consume_one_event()
324 q->head[priority] = head; in consume_one_event()
/linux-4.4.14/Documentation/trace/
Dring-buffer-design.txt19 head - where new reads happen in the ring buffer.
100 the head page, and its previous pointer pointing to a page before
101 the head page.
109 new head page.
115 show the head page in the buffer, it is for demonstrating a swap
202 head page - the next page in the ring buffer that will be swapped
297 head page
303 head page commit page |
311 There is a special case that the head page is after either the commit page
313 swapped with the reader page. This is because the head page is always
[all …]
/linux-4.4.14/drivers/gpu/drm/qxl/
Dqxl_display.c34 static bool qxl_head_enabled(struct qxl_head *head) in qxl_head_enabled() argument
36 return head->width && head->height; in qxl_head_enabled()
109 struct qxl_head *head; in qxl_update_offset_props() local
111 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { in qxl_update_offset_props()
114 head = &qdev->client_monitors_config->heads[output->index]; in qxl_update_offset_props()
117 dev->mode_config.suggested_x_property, head->x); in qxl_update_offset_props()
119 dev->mode_config.suggested_y_property, head->y); in qxl_update_offset_props()
151 struct qxl_head *head; in qxl_add_monitors_config_modes() local
155 head = &qdev->client_monitors_config->heads[h]; in qxl_add_monitors_config_modes()
157 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, in qxl_add_monitors_config_modes()
[all …]
/linux-4.4.14/drivers/net/ethernet/amd/
Dsun3lance.c105 #define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) argument
518 struct lance_tx_head *head; in lance_start_xmit() local
612 head = &(MEM->tx_head[entry]); in lance_start_xmit()
623 head->length = (-len) | 0xf000; in lance_start_xmit()
624 head->misc = 0; in lance_start_xmit()
626 skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); in lance_start_xmit()
628 memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); in lance_start_xmit()
630 head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; in lance_start_xmit()
700 struct lance_tx_head *head = &(MEM->tx_head[old_tx]); in lance_interrupt() local
704 if (head->flag & TMD1_OWN_CHIP) in lance_interrupt()
[all …]
/linux-4.4.14/net/dccp/ccids/lib/
Dpacket_history.h56 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno) in tfrc_tx_hist_find_entry() argument
58 while (head != NULL && head->seqno != seqno) in tfrc_tx_hist_find_entry()
59 head = head->next; in tfrc_tx_hist_find_entry()
60 return head; in tfrc_tx_hist_find_entry()
/linux-4.4.14/sound/core/seq/
Dseq_prioq.c66 f->head = NULL; in snd_seq_prioq_new()
180 cur = f->head; /* cursor */ in snd_seq_prioq_cell_in()
208 if (f->head == cur) /* this is the first cell, set head to it */ in snd_seq_prioq_cell_in()
209 f->head = cell; in snd_seq_prioq_cell_in()
229 cell = f->head; in snd_seq_prioq_cell_out()
231 f->head = cell->next; in snd_seq_prioq_cell_out()
263 return f->head; in snd_seq_prioq_cell_peek()
299 cell = f->head; in snd_seq_prioq_leave()
304 if (cell == f->head) { in snd_seq_prioq_leave()
305 f->head = cell->next; in snd_seq_prioq_leave()
[all …]
Dseq_fifo.c55 f->head = NULL; in snd_seq_fifo_new()
136 if (f->head == NULL) in snd_seq_fifo_event_in()
137 f->head = cell; in snd_seq_fifo_event_in()
156 if ((cell = f->head) != NULL) { in fifo_cell_out()
157 f->head = cell->next; in fifo_cell_out()
215 cell->next = f->head; in snd_seq_fifo_cell_putback()
216 f->head = cell; in snd_seq_fifo_cell_putback()
253 oldhead = f->head; in snd_seq_fifo_resize()
256 f->head = NULL; in snd_seq_fifo_resize()
/linux-4.4.14/drivers/pci/hotplug/
Dcpqphp_ctrl.c312 static int sort_by_size(struct pci_resource **head) in sort_by_size() argument
318 if (!(*head)) in sort_by_size()
321 if (!((*head)->next)) in sort_by_size()
328 if (((*head)->next) && in sort_by_size()
329 ((*head)->length > (*head)->next->length)) { in sort_by_size()
331 current_res = *head; in sort_by_size()
332 *head = (*head)->next; in sort_by_size()
333 current_res->next = (*head)->next; in sort_by_size()
334 (*head)->next = current_res; in sort_by_size()
337 current_res = *head; in sort_by_size()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/fld/
Dfld_cache.c142 struct list_head *head = &cache->fci_entries_head; in fld_fix_new_list() local
146 list_for_each_entry_safe(f_curr, f_next, head, fce_list) { in fld_fix_new_list()
151 if (&f_next->fce_list == head) in fld_fix_new_list()
385 struct list_head *head; in fld_cache_insert_nolock() local
400 head = &cache->fci_entries_head; in fld_cache_insert_nolock()
402 list_for_each_entry_safe(f_curr, n, head, fce_list) { in fld_cache_insert_nolock()
419 prev = head; in fld_cache_insert_nolock()
452 struct list_head *head; in fld_cache_delete_nolock() local
454 head = &cache->fci_entries_head; in fld_cache_delete_nolock()
455 list_for_each_entry_safe(flde, tmp, head, fce_list) { in fld_cache_delete_nolock()
[all …]
/linux-4.4.14/fs/ext4/
Dpage-io.c72 struct buffer_head *bh, *head; in ext4_finish_bio() local
94 bh = head = page_buffers(page); in ext4_finish_bio()
100 bit_spin_lock(BH_Uptodate_Lock, &head->b_state); in ext4_finish_bio()
111 } while ((bh = bh->b_this_page) != head); in ext4_finish_bio()
112 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); in ext4_finish_bio()
187 static void dump_completed_IO(struct inode *inode, struct list_head *head) in dump_completed_IO() argument
193 if (list_empty(head)) in dump_completed_IO()
197 list_for_each_entry(io, head, list) { in dump_completed_IO()
230 struct list_head *head) in ext4_do_flush_completed_IO() argument
239 dump_completed_IO(inode, head); in ext4_do_flush_completed_IO()
[all …]
/linux-4.4.14/tools/testing/selftests/timers/
Dclocksource-switch.c56 char *head, *tmp; in get_clocksources() local
67 head = buf; in get_clocksources()
69 while (head - buf < size) { in get_clocksources()
71 for (tmp = head; *tmp != ' '; tmp++) { in get_clocksources()
78 strcpy(list[i], head); in get_clocksources()
79 head = tmp + 1; in get_clocksources()
/linux-4.4.14/include/linux/isdn/
Dcapilli.h106 void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
107 void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
108 void capilib_release_appl(struct list_head *head, u16 applid);
109 void capilib_release(struct list_head *head);
110 void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
111 u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
/linux-4.4.14/arch/frv/kernel/
DMakefile5 heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o
6 heads-$(CONFIG_MMU) := head-mmu-fr451.o
8 extra-y:= head.o vmlinux.lds
/linux-4.4.14/drivers/block/
Dhd.c122 unsigned int head, sect, cyl, wpcom, lzone, ctl; member
182 if (hd_info[0].head != 0) in hd_setup()
184 hd_info[hdind].head = ints[2]; in hd_setup()
290 static int controller_ready(unsigned int drive, unsigned int head) in controller_ready() argument
297 outb_p(0xA0 | (drive<<4) | head, HD_CURRENT); in controller_ready()
307 unsigned int head, in hd_out() argument
320 if (!controller_ready(disk->unit, head)) { in hd_out()
332 outb_p(0xA0 | (disk->unit << 4) | head, ++port); in hd_out()
383 hd_out(disk, disk->sect, disk->sect, disk->head-1, in reset_hd()
561 if (disk->head > 16) { in do_special_op()
[all …]
/linux-4.4.14/drivers/input/serio/
Dserio_raw.c33 unsigned int tail, head; member
149 empty = serio_raw->head == serio_raw->tail; in serio_raw_fetch_byte()
173 if (serio_raw->head == serio_raw->tail && in serio_raw_read()
191 serio_raw->head != serio_raw->tail || in serio_raw_read()
251 if (serio_raw->head != serio_raw->tail) in serio_raw_poll()
278 unsigned int head = serio_raw->head; in serio_raw_interrupt() local
281 serio_raw->queue[head] = data; in serio_raw_interrupt()
282 head = (head + 1) % SERIO_RAW_QUEUE_LEN; in serio_raw_interrupt()
283 if (likely(head != serio_raw->tail)) { in serio_raw_interrupt()
284 serio_raw->head = head; in serio_raw_interrupt()
Duserio.c41 u8 head; member
62 userio->buf[userio->head] = val; in userio_device_write()
63 userio->head = (userio->head + 1) % USERIO_BUFSIZE; in userio_device_write()
65 if (userio->head == userio->tail) in userio_device_write()
140 nonwrap_len = CIRC_CNT_TO_END(userio->head, in userio_char_read()
167 userio->head != userio->tail); in userio_char_read()
257 if (userio->head != userio->tail) in userio_char_poll()
Dsa1111ps2.c52 unsigned int head; member
98 if (ps2if->head == ps2if->tail) { in ps2_txint()
118 unsigned int head; in ps2_write() local
128 if (ps2if->head == ps2if->tail) in ps2_write()
130 head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1); in ps2_write()
131 if (head != ps2if->tail) { in ps2_write()
132 ps2if->buf[ps2if->head] = val; in ps2_write()
133 ps2if->head = head; in ps2_write()
/linux-4.4.14/drivers/acpi/
Dacpi_ipmi.c41 struct list_head head; member
73 struct list_head head; member
135 INIT_LIST_HEAD(&ipmi_device->head); in ipmi_dev_alloc()
171 list_del(&ipmi_device->head); in __ipmi_dev_kill()
218 INIT_LIST_HEAD(&ipmi_msg->head); in ipmi_msg_alloc()
354 head); in ipmi_flush_tx_msg()
355 list_del(&tx_msg->head); in ipmi_flush_tx_msg()
374 list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { in ipmi_cancel_tx_msg()
377 list_del(&tx_msg->head); in ipmi_cancel_tx_msg()
403 list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { in ipmi_msg_handler()
[all …]
/linux-4.4.14/drivers/net/wireless/cw1200/
Dqueue.c20 struct list_head head; member
74 list_for_each_entry_safe(item, tmp, gc_list, head) { in cw1200_queue_post_gc()
75 list_del(&item->head); in cw1200_queue_post_gc()
89 list_add_tail(&gc_item->head, gc_list); in cw1200_queue_register_post_gc()
93 struct list_head *head, in __cw1200_queue_gc() argument
100 list_for_each_entry_safe(item, tmp, &queue->queue, head) { in __cw1200_queue_gc()
111 cw1200_queue_register_post_gc(head, item); in __cw1200_queue_gc()
113 list_move_tail(&item->head, &queue->free_pool); in __cw1200_queue_gc()
198 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init()
213 list_for_each_entry_safe(item, tmp, &queue->pending, head) { in cw1200_queue_clear()
[all …]
/linux-4.4.14/arch/mips/mm/
Dgup.c75 struct page *head, *page; in gup_huge_pmd() local
85 head = pte_page(pte); in gup_huge_pmd()
86 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
88 VM_BUG_ON(compound_head(page) != head); in gup_huge_pmd()
97 get_head_page_multiple(head, refs); in gup_huge_pmd()
141 struct page *head, *page; in gup_huge_pud() local
151 head = pte_page(pte); in gup_huge_pud()
152 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in gup_huge_pud()
154 VM_BUG_ON(compound_head(page) != head); in gup_huge_pud()
163 get_head_page_multiple(head, refs); in gup_huge_pud()
/linux-4.4.14/drivers/mmc/host/
Dvub300.c167 struct sd_command_header head; member
1071 vub300->cmnd.head.header_size = 20; in send_command()
1072 vub300->cmnd.head.header_type = 0x00; in send_command()
1073 vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ in send_command()
1074 vub300->cmnd.head.command_type = 0x00; /* standard read command */ in send_command()
1075 vub300->cmnd.head.response_type = response_type; in send_command()
1076 vub300->cmnd.head.command_index = cmd->opcode; in send_command()
1077 vub300->cmnd.head.arguments[0] = cmd->arg >> 24; in send_command()
1078 vub300->cmnd.head.arguments[1] = cmd->arg >> 16; in send_command()
1079 vub300->cmnd.head.arguments[2] = cmd->arg >> 8; in send_command()
[all …]
/linux-4.4.14/net/openvswitch/
Dflow_table.c257 struct hlist_head *head = flex_array_get(ti->buckets, i); in table_instance_destroy() local
262 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) { in table_instance_destroy()
295 struct hlist_head *head; in ovs_flow_tbl_dump_next() local
302 head = flex_array_get(ti->buckets, *bucket); in ovs_flow_tbl_dump_next()
303 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { in ovs_flow_tbl_dump_next()
328 struct hlist_head *head; in table_instance_insert() local
330 head = find_bucket(ti, flow->flow_table.hash); in table_instance_insert()
331 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); in table_instance_insert()
337 struct hlist_head *head; in ufid_table_instance_insert() local
339 head = find_bucket(ti, flow->ufid_table.hash); in ufid_table_instance_insert()
[all …]
/linux-4.4.14/fs/notify/
Dmark.c114 u32 fsnotify_recalc_mask(struct hlist_head *head) in fsnotify_recalc_mask() argument
119 hlist_for_each_entry(mark, head, obj_list) in fsnotify_recalc_mask()
212 void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock) in fsnotify_destroy_marks() argument
225 if (hlist_empty(head)) { in fsnotify_destroy_marks()
229 mark = hlist_entry(head->first, struct fsnotify_mark, obj_list); in fsnotify_destroy_marks()
297 int fsnotify_add_mark_list(struct hlist_head *head, struct fsnotify_mark *mark, in fsnotify_add_mark_list() argument
304 if (hlist_empty(head)) { in fsnotify_add_mark_list()
305 hlist_add_head_rcu(&mark->obj_list, head); in fsnotify_add_mark_list()
310 hlist_for_each_entry(lmark, head, obj_list) { in fsnotify_add_mark_list()
410 struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, in fsnotify_find_mark() argument
[all …]
Dfsnotify.h18 extern u32 fsnotify_recalc_mask(struct hlist_head *head);
27 extern int fsnotify_add_mark_list(struct hlist_head *head,
44 extern struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
47 extern void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock);
/linux-4.4.14/drivers/firmware/efi/
Defi-pstore.c112 struct list_head *head) in efi_pstore_scan_sysfs_enter() argument
115 if (&next->list != head) in efi_pstore_scan_sysfs_enter()
145 struct list_head *head, bool stop) in efi_pstore_scan_sysfs_exit() argument
149 __efi_pstore_scan_sysfs_exit(next, &next->list != head); in efi_pstore_scan_sysfs_exit()
169 struct list_head *head = &efivar_sysfs_list; in efi_pstore_sysfs_entry_iter() local
173 list_for_each_entry_safe(entry, n, head, list) { in efi_pstore_sysfs_entry_iter()
174 efi_pstore_scan_sysfs_enter(entry, n, head); in efi_pstore_sysfs_entry_iter()
177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); in efi_pstore_sysfs_entry_iter()
185 list_for_each_entry_safe_from((*pos), n, head, list) { in efi_pstore_sysfs_entry_iter()
186 efi_pstore_scan_sysfs_enter((*pos), n, head); in efi_pstore_sysfs_entry_iter()
[all …]
/linux-4.4.14/net/bridge/
Dbr_fdb.c31 static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
82 static void fdb_rcu_free(struct rcu_head *head) in fdb_rcu_free() argument
85 = container_of(head, struct net_bridge_fdb_entry, rcu); in fdb_rcu_free()
200 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; in br_fdb_find_delete_local() local
204 f = fdb_find(head, addr, vid); in br_fdb_find_delete_local()
469 static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, in fdb_find() argument
475 hlist_for_each_entry(fdb, head, hlist) { in fdb_find()
483 static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, in fdb_find_rcu() argument
489 hlist_for_each_entry_rcu(fdb, head, hlist) { in fdb_find_rcu()
497 static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, in fdb_create() argument
[all …]
/linux-4.4.14/arch/x86/mm/
Dgup.c121 struct page *head, *page; in gup_huge_pmd() local
134 head = pmd_page(pmd); in gup_huge_pmd()
135 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
137 VM_BUG_ON_PAGE(compound_head(page) != head, page); in gup_huge_pmd()
145 get_head_page_multiple(head, refs); in gup_huge_pmd()
197 struct page *head, *page; in gup_huge_pud() local
210 head = pud_page(pud); in gup_huge_pud()
211 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in gup_huge_pud()
213 VM_BUG_ON_PAGE(compound_head(page) != head, page); in gup_huge_pud()
221 get_head_page_multiple(head, refs); in gup_huge_pud()
/linux-4.4.14/arch/powerpc/include/asm/
Dps3gpu.h46 static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, in lv1_gpu_display_sync() argument
51 head, ddr_offset, 0, 0); in lv1_gpu_display_sync()
54 static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, in lv1_gpu_display_flip() argument
59 head, ddr_offset, 0, 0); in lv1_gpu_display_flip()
/linux-4.4.14/net/rds/
Dconnection.c74 struct hlist_head *head, in rds_conn_lookup() argument
80 hlist_for_each_entry_rcu(conn, head, c_hash_node) { in rds_conn_lookup()
127 struct hlist_head *head = rds_conn_bucket(laddr, faddr); in __rds_conn_create() local
133 conn = rds_conn_lookup(net, head, laddr, faddr, trans); in __rds_conn_create()
237 found = rds_conn_lookup(net, head, laddr, faddr, trans); in __rds_conn_create()
243 hlist_add_head_rcu(&conn->c_hash_node, head); in __rds_conn_create()
394 struct hlist_head *head; in rds_conn_message_info() local
406 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); in rds_conn_message_info()
407 i++, head++) { in rds_conn_message_info()
408 hlist_for_each_entry_rcu(conn, head, c_hash_node) { in rds_conn_message_info()
[all …]
/linux-4.4.14/drivers/crypto/caam/
Djr.c165 int hw_idx, sw_idx, i, head, tail; in caam_jr_dequeue() local
174 head = ACCESS_ONCE(jrp->head); in caam_jr_dequeue()
181 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { in caam_jr_dequeue()
189 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); in caam_jr_dequeue()
226 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && in caam_jr_dequeue()
330 int head, tail, desc_size; in caam_jr_enqueue() local
342 head = jrp->head; in caam_jr_enqueue()
346 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { in caam_jr_enqueue()
352 head_entry = &jrp->entinfo[head]; in caam_jr_enqueue()
370 jrp->head = (head + 1) & (JOBR_DEPTH - 1); in caam_jr_enqueue()
[all …]
/linux-4.4.14/drivers/pci/
Dsetup-bus.c44 static void free_list(struct list_head *head) in free_list() argument
48 list_for_each_entry_safe(dev_res, tmp, head, list) { in free_list()
63 static int add_to_list(struct list_head *head, in add_to_list() argument
83 list_add(&tmp->list, head); in add_to_list()
88 static void remove_from_list(struct list_head *head, in remove_from_list() argument
93 list_for_each_entry_safe(dev_res, tmp, head, list) { in remove_from_list()
102 static struct pci_dev_resource *res_to_dev_res(struct list_head *head, in res_to_dev_res() argument
107 list_for_each_entry(dev_res, head, list) { in res_to_dev_res()
124 static resource_size_t get_res_add_size(struct list_head *head, in get_res_add_size() argument
129 dev_res = res_to_dev_res(head, res); in get_res_add_size()
[all …]
/linux-4.4.14/drivers/char/tpm/
Dtpm.h419 struct tpm_input_header *head; in tpm_buf_init() local
428 head = (struct tpm_input_header *) buf->data; in tpm_buf_init()
430 head->tag = cpu_to_be16(tag); in tpm_buf_init()
431 head->length = cpu_to_be32(sizeof(*head)); in tpm_buf_init()
432 head->ordinal = cpu_to_be32(ordinal); in tpm_buf_init()
445 struct tpm_input_header *head = (struct tpm_input_header *) buf->data; in tpm_buf_length() local
447 return be32_to_cpu(head->length); in tpm_buf_length()
452 struct tpm_input_header *head = (struct tpm_input_header *) buf->data; in tpm_buf_tag() local
454 return be16_to_cpu(head->tag); in tpm_buf_tag()
461 struct tpm_input_header *head = (struct tpm_input_header *) buf->data; in tpm_buf_append() local
[all …]
/linux-4.4.14/tools/perf/scripts/python/
Dcompaction-times.py121 head = cls.heads[pid]
122 filtered = head.is_filtered()
126 head = cls.heads[pid] = chead(comm, pid, filtered)
129 head.mark_pending(start_secs, start_nsecs)
133 head = cls.heads[pid]
134 if not head.is_filtered():
135 if head.is_pending():
136 head.do_increment(migrated, fscan, mscan)
142 head = cls.heads[pid]
143 if not head.is_filtered():
[all …]
/linux-4.4.14/arch/tile/include/gxio/
Dmpipe.h1134 unsigned int head; member
1167 int head = iqueue->head + count; in gxio_mpipe_iqueue_advance() local
1168 iqueue->head = in gxio_mpipe_iqueue_advance()
1169 (head & iqueue->mask_num_entries) + in gxio_mpipe_iqueue_advance()
1170 (head >> iqueue->log2_num_entries); in gxio_mpipe_iqueue_advance()
1255 uint64_t head = iqueue->head; in gxio_mpipe_iqueue_try_peek() local
1260 (tail >= head) ? (tail - head) : (iqueue->num_entries - head); in gxio_mpipe_iqueue_try_peek()
1267 next = &iqueue->idescs[head]; in gxio_mpipe_iqueue_try_peek()
/linux-4.4.14/Documentation/
Dcircular-buffers.txt41 (1) A 'head' index - the point at which the producer inserts items into the
47 Typically when the tail pointer is equal to the head pointer, the buffer is
48 empty; and the buffer is full when the head pointer is one less than the tail
51 The head index is incremented when items are added, and the tail index when
52 items are removed. The tail index should never jump the head index, and both
116 they will return a lower bound as the producer controls the head index,
126 head index.
162 unsigned long head = buffer->head;
166 if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
168 struct item *item = buffer[head];
[all …]
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-fs-xfs6 The log sequence number (LSN) of the current head of the
23 The current state of the log reserve grant head. It
25 outstanding transactions. The grant head is exported in
34 The current state of the log write grant head. It
37 rolling transactions. The grant head is exported in
/linux-4.4.14/kernel/events/
Dring_buffer.c48 unsigned long head; in perf_output_put_handle() local
51 head = local_read(&rb->head); in perf_output_put_handle()
87 rb->user_page->data_head = head; in perf_output_put_handle()
93 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
109 unsigned long tail, offset, head; in perf_output_begin() local
145 offset = head = local_read(&rb->head); in perf_output_begin()
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) in perf_output_begin()
162 head += size; in perf_output_begin()
163 } while (local_cmpxchg(&rb->head, offset, head) != offset); in perf_output_begin()
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in perf_output_begin()
[all …]
/linux-4.4.14/drivers/video/fbdev/core/
Dmodedb.c988 struct list_head *head) in fb_find_best_mode() argument
995 list_for_each(pos, head) { in fb_find_best_mode()
1026 struct list_head *head) in fb_find_nearest_mode() argument
1033 list_for_each(pos, head) { in fb_find_nearest_mode()
1066 struct list_head *head) in fb_match_mode() argument
1073 list_for_each(pos, head) { in fb_match_mode()
1090 int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head) in fb_add_videomode() argument
1097 list_for_each(pos, head) { in fb_add_videomode()
1112 list_add(&modelist->list, head); in fb_add_videomode()
1126 struct list_head *head) in fb_delete_videomode() argument
[all …]

12345678910