Searched refs:entry (Results 1 - 200 of 5512) sorted by relevance

1234567891011>>

/linux-4.4.14/net/atm/
H A Dmpoa_caches.c37 in_cache_entry *entry; in_cache_get() local
40 entry = client->in_cache; in_cache_get()
41 while (entry != NULL) { in_cache_get()
42 if (entry->ctrl_info.in_dst_ip == dst_ip) { in_cache_get()
43 atomic_inc(&entry->use); in_cache_get()
45 return entry; in_cache_get()
47 entry = entry->next; in_cache_get()
58 in_cache_entry *entry; in_cache_get_with_mask() local
61 entry = client->in_cache; in_cache_get_with_mask()
62 while (entry != NULL) { in_cache_get_with_mask()
63 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) { in_cache_get_with_mask()
64 atomic_inc(&entry->use); in_cache_get_with_mask()
66 return entry; in_cache_get_with_mask()
68 entry = entry->next; in_cache_get_with_mask()
79 in_cache_entry *entry; in_cache_get_by_vcc() local
82 entry = client->in_cache; in_cache_get_by_vcc()
83 while (entry != NULL) { in_cache_get_by_vcc()
84 if (entry->shortcut == vcc) { in_cache_get_by_vcc()
85 atomic_inc(&entry->use); in_cache_get_by_vcc()
87 return entry; in_cache_get_by_vcc()
89 entry = entry->next; in_cache_get_by_vcc()
99 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); in_cache_add_entry() local
101 if (entry == NULL) { in_cache_add_entry()
106 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip); in_cache_add_entry()
108 atomic_set(&entry->use, 1); in_cache_add_entry()
111 entry->next = client->in_cache; in_cache_add_entry()
112 entry->prev = NULL; in_cache_add_entry()
114 client->in_cache->prev = entry; in_cache_add_entry()
115 client->in_cache = entry; in_cache_add_entry()
117 memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); in_cache_add_entry()
118 entry->ctrl_info.in_dst_ip = dst_ip; in_cache_add_entry()
119 do_gettimeofday(&(entry->tv)); in_cache_add_entry()
120 entry->retry_time = client->parameters.mpc_p4; in_cache_add_entry()
121 entry->count = 1; in_cache_add_entry()
122 entry->entry_state = INGRESS_INVALID; in_cache_add_entry()
123 entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT; in_cache_add_entry()
124 atomic_inc(&entry->use); in_cache_add_entry()
129 return entry; in_cache_add_entry()
132 static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc) cache_hit() argument
137 entry->count++; cache_hit()
138 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) cache_hit()
141 if (entry->entry_state == INGRESS_REFRESHING) { cache_hit()
142 if (entry->count > mpc->parameters.mpc_p1) { cache_hit()
144 msg.content.in_info = entry->ctrl_info; cache_hit()
146 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); cache_hit()
150 do_gettimeofday(&(entry->reply_wait)); cache_hit()
151 entry->entry_state = INGRESS_RESOLVING; cache_hit()
153 if (entry->shortcut != NULL) cache_hit()
158 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) cache_hit()
161 if (entry->count > mpc->parameters.mpc_p1 && cache_hit()
162 entry->entry_state == INGRESS_INVALID) { cache_hit()
164 mpc->dev->name, &entry->ctrl_info.in_dst_ip); cache_hit()
165 entry->entry_state = INGRESS_RESOLVING; cache_hit()
168 msg.content.in_info = entry->ctrl_info; cache_hit()
169 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); cache_hit()
173 do_gettimeofday(&(entry->reply_wait)); cache_hit()
179 static void in_cache_put(in_cache_entry *entry) in_cache_put() argument
181 if (atomic_dec_and_test(&entry->use)) { in_cache_put()
182 memset(entry, 0, sizeof(in_cache_entry)); in_cache_put()
183 kfree(entry); in_cache_put()
190 static void in_cache_remove_entry(in_cache_entry *entry, in_cache_remove_entry() argument
196 vcc = entry->shortcut; in_cache_remove_entry()
197 dprintk("removing an ingress entry, ip = %pI4\n", in_cache_remove_entry()
198 &entry->ctrl_info.in_dst_ip); in_cache_remove_entry()
200 if (entry->prev != NULL) in_cache_remove_entry()
201 entry->prev->next = entry->next; in_cache_remove_entry()
203 client->in_cache = entry->next; in_cache_remove_entry()
204 if (entry->next != NULL) in_cache_remove_entry()
205 entry->next->prev = entry->prev; in_cache_remove_entry()
206 client->in_ops->put(entry); in_cache_remove_entry()
228 in_cache_entry *entry, *next_entry; clear_count_and_expired() local
234 entry = client->in_cache; clear_count_and_expired()
235 while (entry != NULL) { clear_count_and_expired()
236 entry->count = 0; clear_count_and_expired()
237 next_entry = entry->next; clear_count_and_expired()
238 if ((now.tv_sec - entry->tv.tv_sec) clear_count_and_expired()
239 > entry->ctrl_info.holding_time) { clear_count_and_expired()
241 &entry->ctrl_info.in_dst_ip); clear_count_and_expired()
242 client->in_ops->remove_entry(entry, client); clear_count_and_expired()
244 entry = next_entry; clear_count_and_expired()
254 in_cache_entry *entry; check_resolving_entries() local
261 entry = client->in_cache; check_resolving_entries()
262 while (entry != NULL) { check_resolving_entries()
263 if (entry->entry_state == INGRESS_RESOLVING) { check_resolving_entries()
264 if ((now.tv_sec - entry->hold_down.tv_sec) < check_resolving_entries()
266 entry = entry->next; /* Entry in hold down */ check_resolving_entries()
269 if ((now.tv_sec - entry->reply_wait.tv_sec) > check_resolving_entries()
270 entry->retry_time) { check_resolving_entries()
271 entry->retry_time = MPC_C1 * (entry->retry_time); check_resolving_entries()
274 * put entry in hold down. check_resolving_entries()
276 if (entry->retry_time > client->parameters.mpc_p5) { check_resolving_entries()
277 do_gettimeofday(&(entry->hold_down)); check_resolving_entries()
278 entry->retry_time = client->parameters.mpc_p4; check_resolving_entries()
279 entry = entry->next; check_resolving_entries()
283 memset(&(entry->hold_down), 0, sizeof(struct timeval)); check_resolving_entries()
286 msg.content.in_info = entry->ctrl_info; check_resolving_entries()
287 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); check_resolving_entries()
291 do_gettimeofday(&(entry->reply_wait)); check_resolving_entries()
294 entry = entry->next; check_resolving_entries()
303 struct in_cache_entry *entry = client->in_cache; refresh_entries() local
309 while (entry != NULL) { refresh_entries()
310 if (entry->entry_state == INGRESS_RESOLVED) { refresh_entries()
311 if (!(entry->refresh_time)) refresh_entries()
312 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3; refresh_entries()
313 if ((now.tv_sec - entry->reply_wait.tv_sec) > refresh_entries()
314 entry->refresh_time) { refresh_entries()
315 dprintk("refreshing an entry.\n"); refresh_entries()
316 entry->entry_state = INGRESS_REFRESHING; refresh_entries()
320 entry = entry->next; refresh_entries()
336 eg_cache_entry *entry; eg_cache_get_by_cache_id() local
339 entry = mpc->eg_cache; eg_cache_get_by_cache_id()
340 while (entry != NULL) { eg_cache_get_by_cache_id()
341 if (entry->ctrl_info.cache_id == cache_id) { eg_cache_get_by_cache_id()
342 atomic_inc(&entry->use); eg_cache_get_by_cache_id()
344 return entry; eg_cache_get_by_cache_id()
346 entry = entry->next; eg_cache_get_by_cache_id()
357 eg_cache_entry *entry; eg_cache_get_by_tag() local
360 entry = mpc->eg_cache; eg_cache_get_by_tag()
361 while (entry != NULL) { eg_cache_get_by_tag()
362 if (entry->ctrl_info.tag == tag) { eg_cache_get_by_tag()
363 atomic_inc(&entry->use); eg_cache_get_by_tag()
365 return entry; eg_cache_get_by_tag()
367 entry = entry->next; eg_cache_get_by_tag()
379 eg_cache_entry *entry; eg_cache_get_by_vcc() local
382 entry = mpc->eg_cache; eg_cache_get_by_vcc()
383 while (entry != NULL) { eg_cache_get_by_vcc()
384 if (entry->shortcut == vcc) { eg_cache_get_by_vcc()
385 atomic_inc(&entry->use); eg_cache_get_by_vcc()
387 return entry; eg_cache_get_by_vcc()
389 entry = entry->next; eg_cache_get_by_vcc()
399 eg_cache_entry *entry; eg_cache_get_by_src_ip() local
402 entry = mpc->eg_cache; eg_cache_get_by_src_ip()
403 while (entry != NULL) { eg_cache_get_by_src_ip()
404 if (entry->latest_ip_addr == ipaddr) { eg_cache_get_by_src_ip()
405 atomic_inc(&entry->use); eg_cache_get_by_src_ip()
407 return entry; eg_cache_get_by_src_ip()
409 entry = entry->next; eg_cache_get_by_src_ip()
416 static void eg_cache_put(eg_cache_entry *entry) eg_cache_put() argument
418 if (atomic_dec_and_test(&entry->use)) { eg_cache_put()
419 memset(entry, 0, sizeof(eg_cache_entry)); eg_cache_put()
420 kfree(entry); eg_cache_put()
427 static void eg_cache_remove_entry(eg_cache_entry *entry, eg_cache_remove_entry() argument
433 vcc = entry->shortcut; eg_cache_remove_entry()
434 dprintk("removing an egress entry.\n"); eg_cache_remove_entry()
435 if (entry->prev != NULL) eg_cache_remove_entry()
436 entry->prev->next = entry->next; eg_cache_remove_entry()
438 client->eg_cache = entry->next; eg_cache_remove_entry()
439 if (entry->next != NULL) eg_cache_remove_entry()
440 entry->next->prev = entry->prev; eg_cache_remove_entry()
441 client->eg_ops->put(entry); eg_cache_remove_entry()
461 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); eg_cache_add_entry() local
463 if (entry == NULL) { eg_cache_add_entry()
468 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n", eg_cache_add_entry()
471 atomic_set(&entry->use, 1); eg_cache_add_entry()
474 entry->next = client->eg_cache; eg_cache_add_entry()
475 entry->prev = NULL; eg_cache_add_entry()
477 client->eg_cache->prev = entry; eg_cache_add_entry()
478 client->eg_cache = entry; eg_cache_add_entry()
480 memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); eg_cache_add_entry()
481 entry->ctrl_info = msg->content.eg_info; eg_cache_add_entry()
482 do_gettimeofday(&(entry->tv)); eg_cache_add_entry()
483 entry->entry_state = EGRESS_RESOLVED; eg_cache_add_entry()
485 ntohl(entry->ctrl_info.cache_id)); eg_cache_add_entry()
486 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip); eg_cache_add_entry()
487 atomic_inc(&entry->use); eg_cache_add_entry()
492 return entry; eg_cache_add_entry()
495 static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time) update_eg_cache_entry() argument
497 do_gettimeofday(&(entry->tv)); update_eg_cache_entry()
498 entry->entry_state = EGRESS_RESOLVED; update_eg_cache_entry()
499 entry->ctrl_info.holding_time = holding_time; update_eg_cache_entry()
504 eg_cache_entry *entry, *next_entry; clear_expired() local
511 entry = client->eg_cache; clear_expired()
512 while (entry != NULL) { clear_expired()
513 next_entry = entry->next; clear_expired()
514 if ((now.tv_sec - entry->tv.tv_sec) clear_expired()
515 > entry->ctrl_info.holding_time) { clear_expired()
517 msg.content.eg_info = entry->ctrl_info; clear_expired()
519 ntohl(entry->ctrl_info.cache_id)); clear_expired()
521 client->eg_ops->remove_entry(entry, client); clear_expired()
523 entry = next_entry; clear_expired()
H A Dlec.c102 static inline void lec_arp_hold(struct lec_arp_table *entry) lec_arp_hold() argument
104 atomic_inc(&entry->usage); lec_arp_hold()
107 static inline void lec_arp_put(struct lec_arp_table *entry) lec_arp_put() argument
109 if (atomic_dec_and_test(&entry->usage)) lec_arp_put()
110 kfree(entry); lec_arp_put()
208 struct lec_arp_table *entry; lec_start_xmit() local
279 entry = NULL; lec_start_xmit()
280 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); lec_start_xmit()
281 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", lec_start_xmit()
282 dev->name, vcc, vcc ? vcc->flags : 0, entry); lec_start_xmit()
284 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { lec_start_xmit()
287 skb_queue_tail(&entry->tx_wait, skb); lec_start_xmit()
289 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n", lec_start_xmit()
301 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { lec_start_xmit()
325 if (entry) lec_start_xmit()
326 lec_arp_put(entry); lec_start_xmit()
344 struct lec_arp_table *entry; lec_atm_send() local
374 entry = lec_arp_find(priv, mesg->content.normal.mac_addr); lec_atm_send()
375 lec_arp_remove(priv, entry); lec_atm_send()
442 pr_debug("%s: entry found, responding to zeppelin\n", lec_atm_send()
623 struct lec_arp_table *entry; lec_push() local
641 * the LE_ARP cache entry, delete the LE_ARP cache entry. lec_push()
646 entry = lec_arp_find(priv, src); lec_push()
647 if (entry && entry->vcc != vcc) { lec_push()
648 lec_arp_remove(priv, entry); lec_push()
649 lec_arp_put(entry); lec_push()
802 static void lec_info(struct seq_file *seq, struct lec_arp_table *entry) lec_info() argument
807 seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff); lec_info()
810 seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff); lec_info()
811 seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status), lec_info()
812 entry->flags & 0xffff); lec_info()
813 if (entry->vcc) lec_info()
814 seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); lec_info()
817 if (entry->recv_vcc) { lec_info()
818 seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, lec_info()
819 entry->recv_vcc->vci); lec_info()
981 struct lec_arp_table *entry = hlist_entry(state->node, lec_seq_show() local
986 lec_info(seq, entry); lec_seq_show()
1187 * TLVs get freed when entry is killed lane2_associate_ind()
1189 struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); lane2_associate_ind()
1191 if (entry == NULL) lane2_associate_ind()
1194 kfree(entry->tlvs); lane2_associate_ind()
1196 entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); lane2_associate_ind()
1197 if (entry->tlvs == NULL) lane2_associate_ind()
1199 entry->sizeoftlvs = sizeoftlvs; lane2_associate_ind()
1267 static void lec_arp_clear_vccs(struct lec_arp_table *entry) lec_arp_clear_vccs() argument
1269 if (entry->vcc) { lec_arp_clear_vccs()
1270 struct atm_vcc *vcc = entry->vcc; lec_arp_clear_vccs()
1279 vcc->push = entry->old_push; lec_arp_clear_vccs()
1281 entry->vcc = NULL; lec_arp_clear_vccs()
1283 if (entry->recv_vcc) { lec_arp_clear_vccs()
1284 entry->recv_vcc->push = entry->old_recv_push; lec_arp_clear_vccs()
1285 vcc_release_async(entry->recv_vcc, -EPIPE); lec_arp_clear_vccs()
1286 entry->recv_vcc = NULL; lec_arp_clear_vccs()
1291 * Insert entry to lec_arp_table
1295 lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) lec_arp_add() argument
1299 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; lec_arp_add()
1300 hlist_add_head(&entry->next, tmp); lec_arp_add()
1302 pr_debug("Added entry:%pM\n", entry->mac_addr); lec_arp_add()
1306 * Remove entry from lec_arp_table
1311 struct lec_arp_table *entry; lec_arp_remove() local
1329 hlist_for_each_entry(entry, lec_arp_remove()
1332 entry->atm_addr, ATM_ESA_LEN) == 0) { lec_arp_remove()
1343 pr_debug("Removed entry:%pM\n", to_remove->mac_addr); lec_arp_remove()
1490 struct lec_arp_table *entry; lec_arp_destroy() local
1501 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1503 lec_arp_remove(priv, entry); lec_arp_destroy()
1504 lec_arp_put(entry); lec_arp_destroy()
1509 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1511 del_timer_sync(&entry->timer); lec_arp_destroy()
1512 lec_arp_clear_vccs(entry); lec_arp_destroy()
1513 hlist_del(&entry->next); lec_arp_destroy()
1514 lec_arp_put(entry); lec_arp_destroy()
1518 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1520 del_timer_sync(&entry->timer); lec_arp_destroy()
1521 lec_arp_clear_vccs(entry); lec_arp_destroy()
1522 hlist_del(&entry->next); lec_arp_destroy()
1523 lec_arp_put(entry); lec_arp_destroy()
1527 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { lec_arp_destroy()
1529 lec_arp_clear_vccs(entry); lec_arp_destroy()
1530 hlist_del(&entry->next); lec_arp_destroy()
1531 lec_arp_put(entry); lec_arp_destroy()
1539 * Find entry by mac_address
1545 struct lec_arp_table *entry; lec_arp_find() local
1550 hlist_for_each_entry(entry, head, next) { hlist_for_each_entry()
1551 if (ether_addr_equal(mac_addr, entry->mac_addr)) hlist_for_each_entry()
1552 return entry; hlist_for_each_entry()
1564 pr_info("LEC: Arp entry kmalloc failed\n"); make_entry()
1581 struct lec_arp_table *entry; lec_arp_expire_arp() local
1583 entry = (struct lec_arp_table *)data; lec_arp_expire_arp()
1586 if (entry->status == ESI_ARP_PENDING) { lec_arp_expire_arp()
1587 if (entry->no_tries <= entry->priv->max_retry_count) { lec_arp_expire_arp()
1588 if (entry->is_rdesc) lec_arp_expire_arp()
1589 send_to_lecd(entry->priv, l_rdesc_arp_xmt, lec_arp_expire_arp()
1590 entry->mac_addr, NULL, NULL); lec_arp_expire_arp()
1592 send_to_lecd(entry->priv, l_arp_xmt, lec_arp_expire_arp()
1593 entry->mac_addr, NULL, NULL); lec_arp_expire_arp()
1594 entry->no_tries++; lec_arp_expire_arp()
1596 mod_timer(&entry->timer, jiffies + (1 * HZ)); lec_arp_expire_arp()
1600 /* Unknown/unused vcc expire, remove associated entry */ lec_arp_expire_vcc()
1622 static bool __lec_arp_check_expire(struct lec_arp_table *entry, __lec_arp_check_expire() argument
1628 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change) __lec_arp_check_expire()
1634 now, entry->last_used, time_to_check); __lec_arp_check_expire()
1635 if (time_after(now, entry->last_used + time_to_check) && __lec_arp_check_expire()
1636 !(entry->flags & LEC_PERMANENT_FLAG) && __lec_arp_check_expire()
1637 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ __lec_arp_check_expire()
1638 /* Remove entry */ __lec_arp_check_expire()
1640 lec_arp_remove(priv, entry); __lec_arp_check_expire()
1641 lec_arp_put(entry); __lec_arp_check_expire()
1644 if ((entry->status == ESI_VC_PENDING || __lec_arp_check_expire()
1645 entry->status == ESI_ARP_PENDING) && __lec_arp_check_expire()
1646 time_after_eq(now, entry->timestamp + __lec_arp_check_expire()
1648 entry->timestamp = jiffies; __lec_arp_check_expire()
1649 entry->packets_flooded = 0; __lec_arp_check_expire()
1650 if (entry->status == ESI_VC_PENDING) __lec_arp_check_expire()
1652 entry->mac_addr, __lec_arp_check_expire()
1653 entry->atm_addr, __lec_arp_check_expire()
1656 if (entry->status == ESI_FLUSH_PENDING && __lec_arp_check_expire()
1657 time_after_eq(now, entry->timestamp + __lec_arp_check_expire()
1659 lec_arp_hold(entry); __lec_arp_check_expire()
1669 * 2. For each entry, delete entries that have aged past the age limit.
1670 * 3. For each entry, depending on the status of the entry, perform
1688 struct lec_arp_table *entry; lec_arp_check_expire() local
1697 hlist_for_each_entry_safe(entry, next, lec_arp_check_expire()
1699 if (__lec_arp_check_expire(entry, now, priv)) { lec_arp_check_expire()
1701 struct atm_vcc *vcc = entry->vcc; lec_arp_check_expire()
1705 while ((skb = skb_dequeue(&entry->tx_wait))) lec_arp_check_expire()
1707 entry->last_used = jiffies; lec_arp_check_expire()
1708 entry->status = ESI_FORWARD_DIRECT; lec_arp_check_expire()
1709 lec_arp_put(entry); lec_arp_check_expire()
1730 struct lec_arp_table *entry; lec_arp_resolve() local
1747 entry = lec_arp_find(priv, mac_to_find); lec_arp_resolve()
1749 if (entry) { lec_arp_resolve()
1750 if (entry->status == ESI_FORWARD_DIRECT) { lec_arp_resolve()
1752 entry->last_used = jiffies; lec_arp_resolve()
1753 lec_arp_hold(entry); lec_arp_resolve()
1754 *ret_entry = entry; lec_arp_resolve()
1755 found = entry->vcc; lec_arp_resolve()
1759 * If the LE_ARP cache entry is still pending, reset count to 0 lec_arp_resolve()
1762 if (entry->status == ESI_ARP_PENDING) lec_arp_resolve()
1763 entry->no_tries = 0; lec_arp_resolve()
1770 if (entry->status != ESI_FLUSH_PENDING && lec_arp_resolve()
1771 entry->packets_flooded < lec_arp_resolve()
1773 entry->packets_flooded++; lec_arp_resolve()
1779 * We got here because entry->status == ESI_FLUSH_PENDING lec_arp_resolve()
1780 * or BUS flood limit was reached for an entry which is lec_arp_resolve()
1783 lec_arp_hold(entry); lec_arp_resolve()
1784 *ret_entry = entry; lec_arp_resolve()
1785 pr_debug("entry->status %d entry->vcc %p\n", entry->status, lec_arp_resolve()
1786 entry->vcc); lec_arp_resolve()
1789 /* No matching entry was found */ lec_arp_resolve()
1790 entry = make_entry(priv, mac_to_find); lec_arp_resolve()
1791 pr_debug("Making entry\n"); lec_arp_resolve()
1792 if (!entry) { lec_arp_resolve()
1796 lec_arp_add(priv, entry); lec_arp_resolve()
1798 entry->packets_flooded = 1; lec_arp_resolve()
1799 entry->status = ESI_ARP_PENDING; lec_arp_resolve()
1800 entry->no_tries = 1; lec_arp_resolve()
1801 entry->last_used = entry->timestamp = jiffies; lec_arp_resolve()
1802 entry->is_rdesc = is_rdesc; lec_arp_resolve()
1803 if (entry->is_rdesc) lec_arp_resolve()
1808 entry->timer.expires = jiffies + (1 * HZ); lec_arp_resolve()
1809 entry->timer.function = lec_arp_expire_arp; lec_arp_resolve()
1810 add_timer(&entry->timer); lec_arp_resolve()
1825 struct lec_arp_table *entry; lec_addr_delete() local
1831 hlist_for_each_entry_safe(entry, next, lec_addr_delete()
1833 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && lec_addr_delete()
1835 !(entry->flags & LEC_PERMANENT_FLAG))) { lec_addr_delete()
1836 lec_arp_remove(priv, entry); lec_addr_delete()
1837 lec_arp_put(entry); lec_addr_delete()
1857 struct lec_arp_table *entry, *tmp; lec_arp_update() local
1864 entry = lec_arp_find(priv, mac_addr); lec_arp_update()
1865 if (entry == NULL && targetless_le_arp) lec_arp_update()
1868 * we have no entry in the cache. 7.1.30 lec_arp_update()
1871 hlist_for_each_entry_safe(entry, next, lec_arp_update()
1873 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { lec_arp_update()
1874 hlist_del(&entry->next); lec_arp_update()
1875 del_timer(&entry->timer); lec_arp_update()
1881 tmp->vcc = entry->vcc; lec_arp_update()
1882 tmp->old_push = entry->old_push; lec_arp_update()
1884 del_timer(&entry->timer); lec_arp_update()
1885 lec_arp_put(entry); lec_arp_update()
1886 entry = tmp; lec_arp_update()
1888 entry->status = ESI_FORWARD_DIRECT; lec_arp_update()
1889 ether_addr_copy(entry->mac_addr, lec_arp_update()
1891 entry->last_used = jiffies; lec_arp_update()
1892 lec_arp_add(priv, entry); lec_arp_update()
1895 entry->flags |= LEC_REMOTE_FLAG; lec_arp_update()
1897 entry->flags &= ~LEC_REMOTE_FLAG; lec_arp_update()
1905 entry = lec_arp_find(priv, mac_addr); lec_arp_update()
1906 if (!entry) { lec_arp_update()
1907 entry = make_entry(priv, mac_addr); lec_arp_update()
1908 if (!entry) lec_arp_update()
1910 entry->status = ESI_UNKNOWN; lec_arp_update()
1911 lec_arp_add(priv, entry); lec_arp_update()
1914 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); lec_arp_update()
1915 del_timer(&entry->timer); lec_arp_update()
1919 if (entry != tmp && lec_arp_update()
1927 entry->vcc = tmp->vcc; lec_arp_update()
1928 entry->old_push = tmp->old_push; lec_arp_update()
1930 entry->status = tmp->status; lec_arp_update()
1936 entry->flags |= LEC_REMOTE_FLAG; lec_arp_update()
1938 entry->flags &= ~LEC_REMOTE_FLAG; lec_arp_update()
1939 if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) { lec_arp_update()
1940 entry->status = ESI_VC_PENDING; lec_arp_update()
1941 send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); lec_arp_update()
1958 struct lec_arp_table *entry; lec_vcc_added() local
1966 entry = lec_arp_find(priv, bus_mac); lec_vcc_added()
1967 if (!entry) { lec_vcc_added()
1968 pr_info("LEC_ARP: Multicast entry not found!\n"); lec_vcc_added()
1971 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
1972 entry->recv_vcc = vcc; lec_vcc_added()
1973 entry->old_recv_push = old_push; lec_vcc_added()
1975 entry = make_entry(priv, bus_mac); lec_vcc_added()
1976 if (entry == NULL) lec_vcc_added()
1978 del_timer(&entry->timer); lec_vcc_added()
1979 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
1980 entry->recv_vcc = vcc; lec_vcc_added()
1981 entry->old_recv_push = old_push; lec_vcc_added()
1982 hlist_add_head(&entry->next, &priv->mcast_fwds); lec_vcc_added()
2000 entry = make_entry(priv, bus_mac); lec_vcc_added()
2001 if (entry == NULL) lec_vcc_added()
2003 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
2004 eth_zero_addr(entry->mac_addr); lec_vcc_added()
2005 entry->recv_vcc = vcc; lec_vcc_added()
2006 entry->old_recv_push = old_push; lec_vcc_added()
2007 entry->status = ESI_UNKNOWN; lec_vcc_added()
2008 entry->timer.expires = jiffies + priv->vcc_timeout_period; lec_vcc_added()
2009 entry->timer.function = lec_arp_expire_vcc; lec_vcc_added()
2010 hlist_add_head(&entry->next, &priv->lec_no_forward); lec_vcc_added()
2011 add_timer(&entry->timer); lec_vcc_added()
2027 hlist_for_each_entry(entry, lec_vcc_added()
2030 (ioc_data->atm_addr, entry->atm_addr, lec_vcc_added()
2034 entry->vcc ? entry->vcc->vci : 0, lec_vcc_added()
2035 entry->recv_vcc ? entry->recv_vcc-> lec_vcc_added()
2038 del_timer(&entry->timer); lec_vcc_added()
2039 entry->vcc = vcc; lec_vcc_added()
2040 entry->old_push = old_push; lec_vcc_added()
2041 if (entry->status == ESI_VC_PENDING) { lec_vcc_added()
2044 entry->status = lec_vcc_added()
2047 entry->timestamp = jiffies; lec_vcc_added()
2048 entry->status = lec_vcc_added()
2053 entry->atm_addr, lec_vcc_added()
2083 entry = make_entry(priv, bus_mac); lec_vcc_added()
2084 if (!entry) lec_vcc_added()
2086 entry->vcc = vcc; lec_vcc_added()
2087 entry->old_push = old_push; lec_vcc_added()
2088 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
2089 eth_zero_addr(entry->mac_addr); lec_vcc_added()
2090 entry->status = ESI_UNKNOWN; lec_vcc_added()
2091 hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); lec_vcc_added()
2092 entry->timer.expires = jiffies + priv->vcc_timeout_period; lec_vcc_added()
2093 entry->timer.function = lec_arp_expire_vcc; lec_vcc_added()
2094 add_timer(&entry->timer); lec_vcc_added()
2104 struct lec_arp_table *entry; lec_flush_complete() local
2111 hlist_for_each_entry(entry, lec_flush_complete()
2113 if (entry->flush_tran_id == tran_id && lec_flush_complete()
2114 entry->status == ESI_FLUSH_PENDING) { lec_flush_complete()
2116 struct atm_vcc *vcc = entry->vcc; lec_flush_complete()
2118 lec_arp_hold(entry); lec_flush_complete()
2121 while ((skb = skb_dequeue(&entry->tx_wait))) lec_flush_complete()
2123 entry->last_used = jiffies; lec_flush_complete()
2124 entry->status = ESI_FORWARD_DIRECT; lec_flush_complete()
2125 lec_arp_put(entry); lec_flush_complete()
2140 struct lec_arp_table *entry; lec_set_flush_tran_id() local
2145 hlist_for_each_entry(entry, lec_set_flush_tran_id()
2147 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { lec_set_flush_tran_id()
2148 entry->flush_tran_id = tran_id; lec_set_flush_tran_id()
2150 tran_id, entry); lec_set_flush_tran_id()
2198 struct lec_arp_table *entry; lec_vcc_close() local
2207 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2209 if (vcc == entry->vcc) { lec_vcc_close()
2210 lec_arp_remove(priv, entry); lec_vcc_close()
2211 lec_arp_put(entry); lec_vcc_close()
2218 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2220 if (entry->vcc == vcc) { lec_vcc_close()
2221 lec_arp_clear_vccs(entry); lec_vcc_close()
2222 del_timer(&entry->timer); lec_vcc_close()
2223 hlist_del(&entry->next); lec_vcc_close()
2224 lec_arp_put(entry); lec_vcc_close()
2228 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2230 if (entry->recv_vcc == vcc) { lec_vcc_close()
2231 lec_arp_clear_vccs(entry); lec_vcc_close()
2232 del_timer(&entry->timer); lec_vcc_close()
2233 hlist_del(&entry->next); lec_vcc_close()
2234 lec_arp_put(entry); lec_vcc_close()
2238 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { lec_vcc_close()
2239 if (entry->recv_vcc == vcc) { lec_vcc_close()
2240 lec_arp_clear_vccs(entry); lec_vcc_close()
2242 hlist_del(&entry->next); lec_vcc_close()
2243 lec_arp_put(entry); lec_vcc_close()
2257 struct lec_arp_table *entry, *tmp; lec_arp_check_empties() local
2262 hlist_for_each_entry_safe(entry, next, lec_arp_check_empties()
2264 if (vcc == entry->vcc) { lec_arp_check_empties()
2265 del_timer(&entry->timer); lec_arp_check_empties()
2266 ether_addr_copy(entry->mac_addr, src); lec_arp_check_empties()
2267 entry->status = ESI_FORWARD_DIRECT; lec_arp_check_empties()
2268 entry->last_used = jiffies; lec_arp_check_empties()
2269 /* We might have got an entry */ lec_arp_check_empties()
2275 hlist_del(&entry->next); lec_arp_check_empties()
2276 lec_arp_add(priv, entry); lec_arp_check_empties()
2280 pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); lec_arp_check_empties()
H A Dclip.c75 static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) link_vcc() argument
77 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); link_vcc()
78 clip_vcc->entry = entry; link_vcc()
80 clip_vcc->next = entry->vccs; link_vcc()
81 entry->vccs = clip_vcc; link_vcc()
82 entry->neigh->used = jiffies; link_vcc()
87 struct atmarp_entry *entry = clip_vcc->entry; unlink_clip_vcc() local
90 if (!entry) { unlink_clip_vcc()
91 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); unlink_clip_vcc()
94 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ unlink_clip_vcc()
95 entry->neigh->used = jiffies; unlink_clip_vcc()
96 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) unlink_clip_vcc()
101 clip_vcc->entry = NULL; unlink_clip_vcc()
103 netif_wake_queue(entry->neigh->dev); unlink_clip_vcc()
104 if (entry->vccs) unlink_clip_vcc()
106 entry->expires = jiffies - 1; unlink_clip_vcc()
108 error = neigh_update(entry->neigh, NULL, NUD_NONE, unlink_clip_vcc()
114 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); unlink_clip_vcc()
116 netif_tx_unlock_bh(entry->neigh->dev); unlink_clip_vcc()
119 /* The neighbour entry n->lock is held. */ neigh_check_cb()
122 struct atmarp_entry *entry = neighbour_priv(n); neigh_check_cb() local
127 for (cv = entry->vccs; cv; cv = cv->next) { neigh_check_cb()
131 pr_debug("releasing vcc %p->%p of entry %p\n", neigh_check_cb()
132 cv, cv->vcc, entry); neigh_check_cb()
137 if (entry->vccs || time_before(jiffies, entry->expires)) neigh_check_cb()
203 if (clip_vcc->entry) clip_push()
210 skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; clip_push()
211 /* clip_vcc->entry == NULL if we don't have an IP address yet */ clip_push()
291 struct atmarp_entry *entry = neighbour_priv(neigh); clip_constructor() local
302 entry->neigh = neigh; clip_constructor()
303 entry->vccs = NULL; clip_constructor()
304 entry->expires = jiffies - 1; clip_constructor()
313 * to allocate the neighbour entry but not to ask atmarpd for resolution. Also,
332 struct atmarp_entry *entry; clip_start_xmit() local
359 entry = neighbour_priv(n); clip_start_xmit()
360 if (!entry->vccs) { clip_start_xmit()
361 if (time_after(jiffies, entry->expires)) { clip_start_xmit()
363 entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ; clip_start_xmit()
366 if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) clip_start_xmit()
367 skb_queue_tail(&entry->neigh->arp_queue, skb); clip_start_xmit()
374 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); clip_start_xmit()
375 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; clip_start_xmit()
377 if (entry->vccs->encap) { clip_start_xmit()
386 entry->vccs->last_use = jiffies; clip_start_xmit()
388 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ clip_start_xmit()
397 entry->vccs->xoff = 0; clip_start_xmit()
403 if (!entry->vccs->xoff) clip_start_xmit()
428 clip_vcc->entry = NULL; clip_mkip()
447 struct atmarp_entry *entry; clip_setentry() local
458 if (!clip_vcc->entry) { clip_setentry()
459 pr_err("hiding hidden ATMARP entry\n"); clip_setentry()
473 entry = neighbour_priv(neigh); clip_setentry()
474 if (entry != clip_vcc->entry) { clip_setentry()
475 if (!clip_vcc->entry) clip_setentry()
481 link_vcc(clip_vcc, entry); clip_setentry()
733 /* This means the neighbour entry has no attached VCC objects. */
737 struct atmarp_entry *entry, struct clip_vcc *clip_vcc) atmarp_info()
750 exp = entry->neigh->used; atmarp_info()
766 if (time_before(jiffies, entry->expires)) atmarp_info()
770 atomic_read(&entry->neigh->refcnt)); atmarp_info()
736 atmarp_info(struct seq_file *seq, struct neighbour *n, struct atmarp_entry *entry, struct clip_vcc *clip_vcc) atmarp_info() argument
H A Dmpc.c86 static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry);
174 * Overwrites the old entry or makes a new one.
178 struct atm_mpoa_qos *entry; atm_mpoa_add_qos() local
180 entry = atm_mpoa_search_qos(dst_ip); atm_mpoa_add_qos()
181 if (entry != NULL) { atm_mpoa_add_qos()
182 entry->qos = *qos; atm_mpoa_add_qos()
183 return entry; atm_mpoa_add_qos()
186 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); atm_mpoa_add_qos()
187 if (entry == NULL) { atm_mpoa_add_qos()
189 return entry; atm_mpoa_add_qos()
192 entry->ipaddr = dst_ip; atm_mpoa_add_qos()
193 entry->qos = *qos; atm_mpoa_add_qos()
195 entry->next = qos_head; atm_mpoa_add_qos()
196 qos_head = entry; atm_mpoa_add_qos()
198 return entry; atm_mpoa_add_qos()
218 int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) atm_mpoa_delete_qos() argument
222 if (entry == NULL) atm_mpoa_delete_qos()
224 if (entry == qos_head) { atm_mpoa_delete_qos()
226 kfree(entry); atm_mpoa_delete_qos()
232 if (curr->next == entry) { atm_mpoa_delete_qos()
233 curr->next = entry->next; atm_mpoa_delete_qos()
234 kfree(entry); atm_mpoa_delete_qos()
493 in_cache_entry *entry; send_via_shortcut() local
513 entry = mpc->in_ops->get(ipaddr, mpc); send_via_shortcut()
514 if (entry == NULL) { send_via_shortcut()
515 entry = mpc->in_ops->add_entry(ipaddr, mpc); send_via_shortcut()
516 if (entry != NULL) send_via_shortcut()
517 mpc->in_ops->put(entry); send_via_shortcut()
521 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) { send_via_shortcut()
524 mpc->in_ops->put(entry); send_via_shortcut()
534 mpc->in_ops->put(entry); send_via_shortcut()
541 if (entry->ctrl_info.tag != 0) { send_via_shortcut()
543 mpc->dev->name, entry->ctrl_info.tag); send_via_shortcut()
544 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; send_via_shortcut()
558 atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); send_via_shortcut()
559 ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; send_via_shortcut()
560 entry->shortcut->send(entry->shortcut, skb); send_via_shortcut()
561 entry->packets_fwded++; send_via_shortcut()
562 mpc->in_ops->put(entry); send_via_shortcut()
631 pr_info("(%s) did not find RESOLVED entry from ingress cache\n", atm_mpoa_vcc_attach()
637 pr_info("(%s) attaching ingress SVC, entry = %pI4\n", atm_mpoa_vcc_attach()
743 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n", mpc_push()
1084 in_cache_entry *entry; MPOA_trigger_rcvd() local
1086 entry = mpc->in_ops->get(dst_ip, mpc); MPOA_trigger_rcvd()
1087 if (entry == NULL) { MPOA_trigger_rcvd()
1088 entry = mpc->in_ops->add_entry(dst_ip, mpc); MPOA_trigger_rcvd()
1089 entry->entry_state = INGRESS_RESOLVING; MPOA_trigger_rcvd()
1091 msg->content.in_info = entry->ctrl_info; MPOA_trigger_rcvd()
1093 do_gettimeofday(&(entry->reply_wait)); MPOA_trigger_rcvd()
1094 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1098 if (entry->entry_state == INGRESS_INVALID) { MPOA_trigger_rcvd()
1099 entry->entry_state = INGRESS_RESOLVING; MPOA_trigger_rcvd()
1101 msg->content.in_info = entry->ctrl_info; MPOA_trigger_rcvd()
1103 do_gettimeofday(&(entry->reply_wait)); MPOA_trigger_rcvd()
1104 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1108 pr_info("(%s) entry already in resolving state\n", MPOA_trigger_rcvd()
1110 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1119 in_cache_entry *entry) check_qos_and_open_shortcut()
1130 entry->shortcut = eg_entry->shortcut; check_qos_and_open_shortcut()
1132 entry->shortcut = eg_entry->shortcut; check_qos_and_open_shortcut()
1134 if (entry->shortcut) { check_qos_and_open_shortcut()
1159 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); MPOA_res_reply_rcvd() local
1163 ddprintk("(%s) entry = %p", MPOA_res_reply_rcvd()
1164 mpc->dev->name, entry); MPOA_res_reply_rcvd()
1165 if (entry == NULL) { MPOA_res_reply_rcvd()
1166 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n", MPOA_res_reply_rcvd()
1170 ddprintk_cont(" entry_state = %d ", entry->entry_state); MPOA_res_reply_rcvd()
1172 if (entry->entry_state == INGRESS_RESOLVED) { MPOA_res_reply_rcvd()
1173 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name); MPOA_res_reply_rcvd()
1174 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1178 entry->ctrl_info = msg->content.in_info; MPOA_res_reply_rcvd()
1179 do_gettimeofday(&(entry->tv)); MPOA_res_reply_rcvd()
1180 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ MPOA_res_reply_rcvd()
1181 entry->refresh_time = 0; MPOA_res_reply_rcvd()
1182 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut); MPOA_res_reply_rcvd()
1184 if (entry->entry_state == INGRESS_RESOLVING && MPOA_res_reply_rcvd()
1185 entry->shortcut != NULL) { MPOA_res_reply_rcvd()
1186 entry->entry_state = INGRESS_RESOLVED; MPOA_res_reply_rcvd()
1187 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1191 if (entry->shortcut != NULL) { MPOA_res_reply_rcvd()
1192 pr_info("(%s) entry->shortcut != NULL, impossible!\n", MPOA_res_reply_rcvd()
1194 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1198 check_qos_and_open_shortcut(msg, mpc, entry); MPOA_res_reply_rcvd()
1199 entry->entry_state = INGRESS_RESOLVED; MPOA_res_reply_rcvd()
1200 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1210 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); ingress_purge_rcvd() local
1212 if (entry == NULL) { ingress_purge_rcvd()
1213 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n", ingress_purge_rcvd()
1219 dprintk("(%s) removing an ingress entry, ip = %pI4\n", ingress_purge_rcvd()
1222 mpc->in_ops->remove_entry(entry, mpc); ingress_purge_rcvd()
1224 mpc->in_ops->put(entry); ingress_purge_rcvd()
1225 entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); ingress_purge_rcvd()
1226 } while (entry != NULL); ingress_purge_rcvd()
1232 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); egress_purge_rcvd() local
1234 if (entry == NULL) { egress_purge_rcvd()
1235 dprintk("(%s) purge for a non-existing entry\n", egress_purge_rcvd()
1241 mpc->eg_ops->remove_entry(entry, mpc); egress_purge_rcvd()
1244 mpc->eg_ops->put(entry); egress_purge_rcvd()
1247 static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) purge_egress_shortcut() argument
1269 if (entry != NULL) purge_egress_shortcut()
1270 purge_msg->content.eg_info = entry->ctrl_info; purge_egress_shortcut()
1286 eg_cache_entry *entry; mps_death() local
1297 entry = mpc->eg_cache; mps_death()
1298 while (entry != NULL) { mps_death()
1299 purge_egress_shortcut(entry->shortcut, entry); mps_death()
1300 entry = entry->next; mps_death()
1312 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); MPOA_cache_impos_rcvd() local
1315 dprintk("(%s) entry = %p, holding_time = %u\n", MPOA_cache_impos_rcvd()
1316 mpc->dev->name, entry, holding_time); MPOA_cache_impos_rcvd()
1317 if (entry == NULL && holding_time) { MPOA_cache_impos_rcvd()
1318 entry = mpc->eg_ops->add_entry(msg, mpc); MPOA_cache_impos_rcvd()
1319 mpc->eg_ops->put(entry); MPOA_cache_impos_rcvd()
1323 mpc->eg_ops->update(entry, holding_time); MPOA_cache_impos_rcvd()
1328 mpc->eg_ops->remove_entry(entry, mpc); MPOA_cache_impos_rcvd()
1331 mpc->eg_ops->put(entry); MPOA_cache_impos_rcvd()
1392 eg_cache_entry *entry; clean_up() local
1398 entry = mpc->eg_cache; clean_up()
1399 while (entry != NULL) { clean_up()
1400 msg->content.eg_info = entry->ctrl_info; clean_up()
1401 dprintk("cache_id %u\n", entry->ctrl_info.cache_id); clean_up()
1403 entry = entry->next; clean_up()
1525 dprintk("freeing qos entry %p\n", qos); atm_mpoa_cleanup()
1117 check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) check_qos_and_open_shortcut() argument
H A Dmpoa_caches.h40 void (*put)(in_cache_entry *entry);
43 int (*cache_hit)(in_cache_entry *entry,
70 void (*put)(eg_cache_entry *entry);
71 void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client);
72 void (*update)(eg_cache_entry *entry, uint16_t holding_time);
78 /* Ingress cache entry states */
90 /* Egress cache entry states */
/linux-4.4.14/arch/arm/kernel/
H A Djump_label.c8 static void __arch_jump_label_transform(struct jump_entry *entry, __arch_jump_label_transform() argument
12 void *addr = (void *)entry->code; __arch_jump_label_transform()
16 insn = arm_gen_branch(entry->code, entry->target); __arch_jump_label_transform()
26 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
29 __arch_jump_label_transform(entry, type, false); arch_jump_label_transform()
32 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
35 __arch_jump_label_transform(entry, type, true); arch_jump_label_transform_static()
H A Dperf_callchain.c34 struct perf_callchain_entry *entry) user_backtrace()
49 perf_callchain_store(entry, buftail.lr); user_backtrace()
62 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user() argument
71 perf_callchain_store(entry, regs->ARM_pc); perf_callchain_user()
78 while ((entry->nr < PERF_MAX_STACK_DEPTH) && perf_callchain_user()
80 tail = user_backtrace(tail, entry); perf_callchain_user()
92 struct perf_callchain_entry *entry = data; callchain_trace() local
93 perf_callchain_store(entry, fr->pc); callchain_trace()
98 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
108 walk_stackframe(&fr, callchain_trace, entry); perf_callchain_kernel()
33 user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) user_backtrace() argument
/linux-4.4.14/sound/pci/ctxfi/
H A Dctimap.c22 int input_mapper_add(struct list_head *mappers, struct imapper *entry, input_mapper_add() argument
31 entry->next = entry->addr; input_mapper_add()
32 map_op(data, entry); input_mapper_add()
33 list_add(&entry->list, head); input_mapper_add()
39 if (pos_ent->slot > entry->slot) { list_for_each()
50 __list_add(&entry->list, pos->prev, pos);
54 list_add_tail(&entry->list, head);
60 entry->next = pos_ent->addr;
61 map_op(data, entry);
62 pre_ent->next = entry->addr;
68 int input_mapper_delete(struct list_head *mappers, struct imapper *entry, input_mapper_delete() argument
79 pre = (entry->list.prev == head) ? head->prev : entry->list.prev; input_mapper_delete()
80 next = (entry->list.next == head) ? head->next : entry->list.next; input_mapper_delete()
82 if (pre == &entry->list) { input_mapper_delete()
83 /* entry is the only one node in mappers list */ input_mapper_delete()
84 entry->next = entry->addr = entry->user = entry->slot = 0; input_mapper_delete()
85 map_op(data, entry); input_mapper_delete()
86 list_del(&entry->list); input_mapper_delete()
95 list_del(&entry->list); input_mapper_delete()
102 struct imapper *entry; free_input_mapper_list() local
108 entry = list_entry(pos, struct imapper, list); free_input_mapper_list()
109 kfree(entry); free_input_mapper_list()
H A Dctvmem.c35 struct ct_vm_block *block = NULL, *entry; get_vm_block() local
47 entry = list_entry(pos, struct ct_vm_block, list); get_vm_block()
48 if (entry->size >= size) get_vm_block()
54 if (entry->size == size) { get_vm_block()
56 list_move(&entry->list, &vm->used); get_vm_block()
58 block = entry; get_vm_block()
66 block->addr = entry->addr; get_vm_block()
69 entry->addr += size; get_vm_block()
70 entry->size -= size; get_vm_block()
80 struct ct_vm_block *entry, *pre_ent; put_vm_block() local
90 entry = list_entry(pos, struct ct_vm_block, list); put_vm_block()
91 if (entry->addr >= (block->addr + block->size)) put_vm_block()
96 entry = block; put_vm_block()
98 if ((block->addr + block->size) == entry->addr) { put_vm_block()
99 entry->addr = block->addr; put_vm_block()
100 entry->size += block->size; put_vm_block()
104 entry = block; put_vm_block()
108 pos = &entry->list; put_vm_block()
111 entry = list_entry(pos, struct ct_vm_block, list); put_vm_block()
113 if ((pre_ent->addr + pre_ent->size) > entry->addr) put_vm_block()
116 pre_ent->size += entry->size; put_vm_block()
118 kfree(entry); put_vm_block()
226 struct ct_vm_block *entry; ct_vm_destroy() local
232 entry = list_entry(pos, struct ct_vm_block, list); ct_vm_destroy()
233 kfree(entry); ct_vm_destroy()
238 entry = list_entry(pos, struct ct_vm_block, list); ct_vm_destroy()
239 kfree(entry); ct_vm_destroy()
H A Dctdaio.c162 struct imapper *entry; dao_set_left_input() local
166 entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL); dao_set_left_input()
167 if (!entry) dao_set_left_input()
174 for (i = 0; i < daio->rscl.msr; i++, entry++) { dao_set_left_input()
175 entry->slot = input->ops->output_slot(input); dao_set_left_input()
176 entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl); dao_set_left_input()
177 dao->mgr->imap_add(dao->mgr, entry); dao_set_left_input()
178 dao->imappers[i] = entry; dao_set_left_input()
191 struct imapper *entry; dao_set_right_input() local
195 entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL); dao_set_right_input()
196 if (!entry) dao_set_right_input()
203 for (i = 0; i < daio->rscr.msr; i++, entry++) { dao_set_right_input()
204 entry->slot = input->ops->output_slot(input); dao_set_right_input()
205 entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr); dao_set_right_input()
206 dao->mgr->imap_add(dao->mgr, entry); dao_set_right_input()
207 dao->imappers[daio->rscl.msr + i] = entry; dao_set_right_input()
220 struct imapper *entry; dao_clear_left_input() local
227 entry = dao->imappers[0]; dao_clear_left_input()
228 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_left_input()
231 entry = dao->imappers[i]; dao_clear_left_input()
232 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_left_input()
244 struct imapper *entry; dao_clear_right_input() local
251 entry = dao->imappers[daio->rscl.msr]; dao_clear_right_input()
252 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_right_input()
255 entry = dao->imappers[daio->rscl.msr + i]; dao_clear_right_input()
256 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_right_input()
635 static int daio_map_op(void *data, struct imapper *entry) daio_map_op() argument
640 hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot); daio_map_op()
641 hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next); daio_map_op()
642 hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr); daio_map_op()
648 static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry) daio_imap_add() argument
654 if (!entry->addr && mgr->init_imap_added) { daio_imap_add()
659 err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr); daio_imap_add()
665 static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry) daio_imap_delete() argument
671 err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr); daio_imap_delete()
694 struct imapper *entry; daio_mgr_create() local
708 entry = kzalloc(sizeof(*entry), GFP_KERNEL); daio_mgr_create()
709 if (!entry) { daio_mgr_create()
713 entry->slot = entry->addr = entry->next = entry->user = 0; daio_mgr_create()
714 list_add(&entry->list, &daio_mgr->imappers); daio_mgr_create()
715 daio_mgr->init_imap = entry; daio_mgr_create()
/linux-4.4.14/arch/s390/include/asm/
H A Dpci_dma.h97 static inline void set_pt_pfaa(unsigned long *entry, void *pfaa) set_pt_pfaa() argument
99 *entry &= ZPCI_PTE_FLAG_MASK; set_pt_pfaa()
100 *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK); set_pt_pfaa()
103 static inline void set_rt_sto(unsigned long *entry, void *sto) set_rt_sto() argument
105 *entry &= ZPCI_RTE_FLAG_MASK; set_rt_sto()
106 *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK); set_rt_sto()
107 *entry |= ZPCI_TABLE_TYPE_RTX; set_rt_sto()
110 static inline void set_st_pto(unsigned long *entry, void *pto) set_st_pto() argument
112 *entry &= ZPCI_STE_FLAG_MASK; set_st_pto()
113 *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK); set_st_pto()
114 *entry |= ZPCI_TABLE_TYPE_SX; set_st_pto()
117 static inline void validate_rt_entry(unsigned long *entry) validate_rt_entry() argument
119 *entry &= ~ZPCI_TABLE_VALID_MASK; validate_rt_entry()
120 *entry &= ~ZPCI_TABLE_OFFSET_MASK; validate_rt_entry()
121 *entry |= ZPCI_TABLE_VALID; validate_rt_entry()
122 *entry |= ZPCI_TABLE_LEN_RTX; validate_rt_entry()
125 static inline void validate_st_entry(unsigned long *entry) validate_st_entry() argument
127 *entry &= ~ZPCI_TABLE_VALID_MASK; validate_st_entry()
128 *entry |= ZPCI_TABLE_VALID; validate_st_entry()
131 static inline void invalidate_table_entry(unsigned long *entry) invalidate_table_entry() argument
133 *entry &= ~ZPCI_TABLE_VALID_MASK; invalidate_table_entry()
134 *entry |= ZPCI_TABLE_INVALID; invalidate_table_entry()
137 static inline void invalidate_pt_entry(unsigned long *entry) invalidate_pt_entry() argument
139 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); invalidate_pt_entry()
140 *entry &= ~ZPCI_PTE_VALID_MASK; invalidate_pt_entry()
141 *entry |= ZPCI_PTE_INVALID; invalidate_pt_entry()
144 static inline void validate_pt_entry(unsigned long *entry) validate_pt_entry() argument
146 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); validate_pt_entry()
147 *entry &= ~ZPCI_PTE_VALID_MASK; validate_pt_entry()
148 *entry |= ZPCI_PTE_VALID; validate_pt_entry()
151 static inline void entry_set_protected(unsigned long *entry) entry_set_protected() argument
153 *entry &= ~ZPCI_TABLE_PROT_MASK; entry_set_protected()
154 *entry |= ZPCI_TABLE_PROTECTED; entry_set_protected()
157 static inline void entry_clr_protected(unsigned long *entry) entry_clr_protected() argument
159 *entry &= ~ZPCI_TABLE_PROT_MASK; entry_clr_protected()
160 *entry |= ZPCI_TABLE_UNPROTECTED; entry_clr_protected()
163 static inline int reg_entry_isvalid(unsigned long entry) reg_entry_isvalid() argument
165 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; reg_entry_isvalid()
168 static inline int pt_entry_isvalid(unsigned long entry) pt_entry_isvalid() argument
170 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; pt_entry_isvalid()
173 static inline int entry_isprotected(unsigned long entry) entry_isprotected() argument
175 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; entry_isprotected()
178 static inline unsigned long *get_rt_sto(unsigned long entry) get_rt_sto() argument
180 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) get_rt_sto()
181 ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK) get_rt_sto()
185 static inline unsigned long *get_st_pto(unsigned long entry) get_st_pto() argument
187 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) get_st_pto()
188 ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK) get_st_pto()
199 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
H A Dpci_io.h35 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
36 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
50 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
51 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
138 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)]; zpci_memcpy_fromio() local
145 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_fromio()
159 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)]; zpci_memcpy_toio() local
169 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_toio()
/linux-4.4.14/arch/cris/kernel/
H A Dasm-offsets.c17 #define ENTRY(entry) DEFINE(PT_ ## entry, offsetof(struct pt_regs, entry)) main()
37 #define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry)) main()
43 #define ENTRY(entry) DEFINE(THREAD_ ## entry, offsetof(struct thread_struct, entry)) main()
53 #define ENTRY(entry) DEFINE(TASK_ ## entry, offsetof(struct task_struct, entry)) main()
/linux-4.4.14/drivers/acpi/
H A Dnvs.c96 struct nvs_page *entry, *next; suspend_nvs_register() local
104 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); suspend_nvs_register()
105 if (!entry) suspend_nvs_register()
108 list_add_tail(&entry->node, &nvs_list); suspend_nvs_register()
109 entry->phys_start = start; suspend_nvs_register()
111 entry->size = (size < nr_bytes) ? size : nr_bytes; suspend_nvs_register()
113 start += entry->size; suspend_nvs_register()
114 size -= entry->size; suspend_nvs_register()
119 list_for_each_entry_safe(entry, next, &nvs_list, node) { suspend_nvs_register()
120 list_del(&entry->node); suspend_nvs_register()
121 kfree(entry); suspend_nvs_register()
131 struct nvs_page *entry; suspend_nvs_free() local
133 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_free()
134 if (entry->data) { suspend_nvs_free()
135 free_page((unsigned long)entry->data); suspend_nvs_free()
136 entry->data = NULL; suspend_nvs_free()
137 if (entry->kaddr) { suspend_nvs_free()
138 if (entry->unmap) { suspend_nvs_free()
139 iounmap(entry->kaddr); suspend_nvs_free()
140 entry->unmap = false; suspend_nvs_free()
142 acpi_os_unmap_iomem(entry->kaddr, suspend_nvs_free()
143 entry->size); suspend_nvs_free()
145 entry->kaddr = NULL; suspend_nvs_free()
155 struct nvs_page *entry; suspend_nvs_alloc() local
157 list_for_each_entry(entry, &nvs_list, node) { suspend_nvs_alloc()
158 entry->data = (void *)__get_free_page(GFP_KERNEL); suspend_nvs_alloc()
159 if (!entry->data) { suspend_nvs_alloc()
172 struct nvs_page *entry; suspend_nvs_save() local
176 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_save()
177 if (entry->data) { suspend_nvs_save()
178 unsigned long phys = entry->phys_start; suspend_nvs_save()
179 unsigned int size = entry->size; suspend_nvs_save()
181 entry->kaddr = acpi_os_get_iomem(phys, size); suspend_nvs_save()
182 if (!entry->kaddr) { suspend_nvs_save()
183 entry->kaddr = acpi_os_ioremap(phys, size); suspend_nvs_save()
184 entry->unmap = !!entry->kaddr; suspend_nvs_save()
186 if (!entry->kaddr) { suspend_nvs_save()
190 memcpy(entry->data, entry->kaddr, entry->size); suspend_nvs_save()
204 struct nvs_page *entry; suspend_nvs_restore() local
208 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_restore()
209 if (entry->data) suspend_nvs_restore()
210 memcpy(entry->kaddr, entry->data, entry->size); suspend_nvs_restore()
H A Dpci_irq.c124 static void do_prt_fixups(struct acpi_prt_entry *entry, do_prt_fixups() argument
138 entry->id.segment == quirk->segment && do_prt_fixups()
139 entry->id.bus == quirk->bus && do_prt_fixups()
140 entry->id.device == quirk->device && do_prt_fixups()
141 entry->pin == quirk->pin && do_prt_fixups()
147 entry->id.segment, entry->id.bus, do_prt_fixups()
148 entry->id.device, pin_name(entry->pin), do_prt_fixups()
162 struct acpi_prt_entry *entry; acpi_pci_irq_check_entry() local
168 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); acpi_pci_irq_check_entry()
169 if (!entry) acpi_pci_irq_check_entry()
177 entry->id.segment = segment; acpi_pci_irq_check_entry()
178 entry->id.bus = bus; acpi_pci_irq_check_entry()
179 entry->id.device = (prt->address >> 16) & 0xFFFF; acpi_pci_irq_check_entry()
180 entry->pin = prt->pin + 1; acpi_pci_irq_check_entry()
182 do_prt_fixups(entry, prt); acpi_pci_irq_check_entry()
184 entry->index = prt->source_index; acpi_pci_irq_check_entry()
196 * (e.g. exists somewhere 'below' this _PRT entry in the ACPI acpi_pci_irq_check_entry()
200 acpi_get_handle(handle, prt->source, &entry->link); acpi_pci_irq_check_entry()
212 entry->id.segment, entry->id.bus, acpi_pci_irq_check_entry()
213 entry->id.device, pin_name(entry->pin), acpi_pci_irq_check_entry()
214 prt->source, entry->index)); acpi_pci_irq_check_entry()
216 *entry_ptr = entry; acpi_pci_irq_check_entry()
226 struct acpi_pci_routing_table *entry; acpi_pci_irq_find_prt_entry() local
242 entry = buffer.pointer; acpi_pci_irq_find_prt_entry()
243 while (entry && (entry->length > 0)) { acpi_pci_irq_find_prt_entry()
245 entry, entry_ptr)) acpi_pci_irq_find_prt_entry()
247 entry = (struct acpi_pci_routing_table *) acpi_pci_irq_find_prt_entry()
248 ((unsigned long)entry + entry->length); acpi_pci_irq_find_prt_entry()
277 * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
283 struct acpi_prt_entry *entry) acpi_reroute_boot_interrupt()
300 "IRQ %d\n", entry->index, acpi_reroute_boot_interrupt()
301 (entry->index % 4) + 16); acpi_reroute_boot_interrupt()
302 entry->index = (entry->index % 4) + 16; acpi_reroute_boot_interrupt()
306 "IRQ: unknown mapping\n", entry->index); acpi_reroute_boot_interrupt()
315 struct acpi_prt_entry *entry = NULL; acpi_pci_irq_lookup() local
320 ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry); acpi_pci_irq_lookup()
321 if (!ret && entry) { acpi_pci_irq_lookup()
323 acpi_reroute_boot_interrupt(dev, entry); acpi_pci_irq_lookup()
325 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", acpi_pci_irq_lookup()
327 return entry; acpi_pci_irq_lookup()
332 * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). acpi_pci_irq_lookup()
350 ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry); acpi_pci_irq_lookup()
351 if (!ret && entry) { acpi_pci_irq_lookup()
356 return entry; acpi_pci_irq_lookup()
395 struct acpi_prt_entry *entry; acpi_pci_irq_enable() local
415 entry = acpi_pci_irq_lookup(dev, pin); acpi_pci_irq_enable()
416 if (!entry) { acpi_pci_irq_enable()
426 if (entry) { acpi_pci_irq_enable()
427 if (entry->link) acpi_pci_irq_enable()
428 gsi = acpi_pci_link_allocate_irq(entry->link, acpi_pci_irq_enable()
429 entry->index, acpi_pci_irq_enable()
433 gsi = entry->index; acpi_pci_irq_enable()
446 kfree(entry); acpi_pci_irq_enable()
454 kfree(entry); acpi_pci_irq_enable()
470 kfree(entry); acpi_pci_irq_enable()
476 struct acpi_prt_entry *entry; acpi_pci_irq_disable() local
492 entry = acpi_pci_irq_lookup(dev, pin); acpi_pci_irq_disable()
493 if (!entry) acpi_pci_irq_disable()
496 if (entry->link) acpi_pci_irq_disable()
497 gsi = acpi_pci_link_free_irq(entry->link); acpi_pci_irq_disable()
499 gsi = entry->index; acpi_pci_irq_disable()
501 kfree(entry); acpi_pci_irq_disable()
282 acpi_reroute_boot_interrupt(struct pci_dev *dev, struct acpi_prt_entry *entry) acpi_reroute_boot_interrupt() argument
H A Dproc.c32 struct acpi_device_physical_node *entry; acpi_system_wakeup_device_seq_show() local
50 list_for_each_entry(entry, &dev->physical_node_list, acpi_system_wakeup_device_seq_show()
52 ldev = get_device(entry->dev); acpi_system_wakeup_device_seq_show()
56 if (&entry->node != acpi_system_wakeup_device_seq_show()
79 struct acpi_device_physical_node *entry; physical_device_enable_wakeup() local
83 list_for_each_entry(entry, physical_device_enable_wakeup()
85 if (entry->dev && device_can_wakeup(entry->dev)) { physical_device_enable_wakeup()
86 bool enable = !device_may_wakeup(entry->dev); physical_device_enable_wakeup()
87 device_set_wakeup_enable(entry->dev, enable); physical_device_enable_wakeup()
H A Dprocessor_core.c34 static int map_lapic_id(struct acpi_subtable_header *entry, map_lapic_id() argument
38 container_of(entry, struct acpi_madt_local_apic, header); map_lapic_id()
50 static int map_x2apic_id(struct acpi_subtable_header *entry, map_x2apic_id() argument
54 container_of(entry, struct acpi_madt_local_x2apic, header); map_x2apic_id()
67 static int map_lsapic_id(struct acpi_subtable_header *entry, map_lsapic_id() argument
71 container_of(entry, struct acpi_madt_local_sapic, header); map_lsapic_id()
77 if ((entry->length < 16) || (lsapic->uid != acpi_id)) map_lsapic_id()
89 static int map_gicc_mpidr(struct acpi_subtable_header *entry, map_gicc_mpidr() argument
93 container_of(entry, struct acpi_madt_generic_interrupt, header); map_gicc_mpidr()
113 unsigned long madt_end, entry; map_madt_entry() local
121 entry = (unsigned long)madt; map_madt_entry()
122 madt_end = entry + madt->header.length; map_madt_entry()
126 entry += sizeof(struct acpi_table_madt); map_madt_entry()
127 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { map_madt_entry()
129 (struct acpi_subtable_header *)entry; map_madt_entry()
143 entry += header->length; map_madt_entry()
250 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, get_ioapic_id() argument
253 struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry; get_ioapic_id()
266 unsigned long madt_end, entry; parse_madt_ioapic_entry() local
274 entry = (unsigned long)madt; parse_madt_ioapic_entry()
275 madt_end = entry + madt->header.length; parse_madt_ioapic_entry()
278 entry += sizeof(struct acpi_table_madt); parse_madt_ioapic_entry()
279 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { parse_madt_ioapic_entry()
280 hdr = (struct acpi_subtable_header *)entry; parse_madt_ioapic_entry()
285 entry += hdr->length; parse_madt_ioapic_entry()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_scatter.c50 static void drm_sg_cleanup(struct drm_sg_mem * entry) drm_sg_cleanup() argument
55 for (i = 0; i < entry->pages; i++) { drm_sg_cleanup()
56 page = entry->pagelist[i]; drm_sg_cleanup()
61 vfree(entry->virtual); drm_sg_cleanup()
63 kfree(entry->busaddr); drm_sg_cleanup()
64 kfree(entry->pagelist); drm_sg_cleanup()
65 kfree(entry); drm_sg_cleanup()
86 struct drm_sg_mem *entry; drm_legacy_sg_alloc() local
100 entry = kzalloc(sizeof(*entry), GFP_KERNEL); drm_legacy_sg_alloc()
101 if (!entry) drm_legacy_sg_alloc()
107 entry->pages = pages; drm_legacy_sg_alloc()
108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); drm_legacy_sg_alloc()
109 if (!entry->pagelist) { drm_legacy_sg_alloc()
110 kfree(entry); drm_legacy_sg_alloc()
114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); drm_legacy_sg_alloc()
115 if (!entry->busaddr) { drm_legacy_sg_alloc()
116 kfree(entry->pagelist); drm_legacy_sg_alloc()
117 kfree(entry); drm_legacy_sg_alloc()
121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); drm_legacy_sg_alloc()
122 if (!entry->virtual) { drm_legacy_sg_alloc()
123 kfree(entry->busaddr); drm_legacy_sg_alloc()
124 kfree(entry->pagelist); drm_legacy_sg_alloc()
125 kfree(entry); drm_legacy_sg_alloc()
132 memset(entry->virtual, 0, pages << PAGE_SHIFT); drm_legacy_sg_alloc()
134 entry->handle = ScatterHandle((unsigned long)entry->virtual); drm_legacy_sg_alloc()
136 DRM_DEBUG("handle = %08lx\n", entry->handle); drm_legacy_sg_alloc()
137 DRM_DEBUG("virtual = %p\n", entry->virtual); drm_legacy_sg_alloc()
139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; drm_legacy_sg_alloc()
141 entry->pagelist[j] = vmalloc_to_page((void *)i); drm_legacy_sg_alloc()
142 if (!entry->pagelist[j]) drm_legacy_sg_alloc()
144 SetPageReserved(entry->pagelist[j]); drm_legacy_sg_alloc()
147 request->handle = entry->handle; drm_legacy_sg_alloc()
149 dev->sg = entry; drm_legacy_sg_alloc()
161 tmp = page_address(entry->pagelist[i]); drm_legacy_sg_alloc()
167 tmp = (unsigned long *)((u8 *) entry->virtual + drm_legacy_sg_alloc()
179 tmp = page_address(entry->pagelist[i]); drm_legacy_sg_alloc()
194 drm_sg_cleanup(entry); drm_legacy_sg_alloc()
202 struct drm_sg_mem *entry; drm_legacy_sg_free() local
210 entry = dev->sg; drm_legacy_sg_free()
213 if (!entry || entry->handle != request->handle) drm_legacy_sg_free()
216 DRM_DEBUG("virtual = %p\n", entry->virtual); drm_legacy_sg_free()
218 drm_sg_cleanup(entry); drm_legacy_sg_free()
H A Ddrm_hashtab.c61 struct drm_hash_item *entry; drm_ht_verbose_list() local
69 hlist_for_each_entry(entry, h_list, head) drm_ht_verbose_list()
70 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); drm_ht_verbose_list()
76 struct drm_hash_item *entry; drm_ht_find_key() local
82 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry()
83 if (entry->key == key) hlist_for_each_entry()
84 return &entry->head; hlist_for_each_entry()
85 if (entry->key > key) hlist_for_each_entry()
94 struct drm_hash_item *entry; drm_ht_find_key_rcu() local
100 hlist_for_each_entry_rcu(entry, h_list, head) { hlist_for_each_entry_rcu()
101 if (entry->key == key) hlist_for_each_entry_rcu()
102 return &entry->head; hlist_for_each_entry_rcu()
103 if (entry->key > key) hlist_for_each_entry_rcu()
111 struct drm_hash_item *entry; drm_ht_insert_item() local
120 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry()
121 if (entry->key == key) hlist_for_each_entry()
123 if (entry->key > key) hlist_for_each_entry()
125 parent = &entry->head; hlist_for_each_entry()
H A Ddrm_agpsupport.c195 * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
199 struct drm_agp_mem *entry; drm_agp_alloc() local
206 if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL))) drm_agp_alloc()
212 kfree(entry); drm_agp_alloc()
216 entry->handle = (unsigned long)memory->key + 1; drm_agp_alloc()
217 entry->memory = memory; drm_agp_alloc()
218 entry->bound = 0; drm_agp_alloc()
219 entry->pages = pages; drm_agp_alloc()
220 list_add(&entry->head, &dev->agp->memory); drm_agp_alloc()
222 request->handle = entry->handle; drm_agp_alloc()
239 * Search for the AGP memory entry associated with a handle.
250 struct drm_agp_mem *entry; drm_agp_lookup_entry() local
252 list_for_each_entry(entry, &dev->agp->memory, head) { drm_agp_lookup_entry()
253 if (entry->handle == handle) drm_agp_lookup_entry()
254 return entry; drm_agp_lookup_entry()
269 * entry and passes it to the unbind_agp() function.
273 struct drm_agp_mem *entry; drm_agp_unbind() local
278 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_unbind()
280 if (!entry->bound) drm_agp_unbind()
282 ret = drm_unbind_agp(entry->memory); drm_agp_unbind()
284 entry->bound = 0; drm_agp_unbind()
308 * is currently bound into the GATT. Looks-up the AGP memory entry and passes
313 struct drm_agp_mem *entry; drm_agp_bind() local
319 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_bind()
321 if (entry->bound) drm_agp_bind()
324 if ((retcode = drm_bind_agp(entry->memory, page))) drm_agp_bind()
326 entry->bound = dev->agp->base + (page << PAGE_SHIFT); drm_agp_bind()
327 DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", drm_agp_bind()
328 dev->agp->base, entry->bound); drm_agp_bind()
352 * AGP memory entry. If the memory it's currently bound, unbind it via
353 * unbind_agp(). Frees it via free_agp() as well as the entry itself
358 struct drm_agp_mem *entry; drm_agp_free() local
362 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_free()
364 if (entry->bound) drm_agp_free()
365 drm_unbind_agp(entry->memory); drm_agp_free()
367 list_del(&entry->head); drm_agp_free()
369 drm_free_agp(entry->memory, entry->pages); drm_agp_free()
370 kfree(entry); drm_agp_free()
439 struct drm_agp_mem *entry, *tempe; drm_agp_clear() local
446 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { drm_agp_clear()
447 if (entry->bound) drm_agp_clear()
448 drm_unbind_agp(entry->memory); drm_agp_clear()
449 drm_free_agp(entry->memory, entry->pages); drm_agp_clear()
450 kfree(entry); drm_agp_clear()
H A Ddrm_bufs.c42 struct drm_map_list *entry; drm_find_matching_map() local
43 list_for_each_entry(entry, &dev->maplist, head) { drm_find_matching_map()
52 if (!entry->map || drm_find_matching_map()
53 map->type != entry->map->type || drm_find_matching_map()
54 entry->master != dev->primary->master) drm_find_matching_map()
60 return entry; drm_find_matching_map()
63 if ((entry->map->offset & 0xffffffff) == drm_find_matching_map()
65 return entry; drm_find_matching_map()
69 if (entry->map->offset == map->offset) drm_find_matching_map()
70 return entry; drm_find_matching_map()
257 struct drm_agp_mem *entry; drm_addmap_core() local
286 list_for_each_entry(entry, &dev->agp->memory, head) { drm_addmap_core()
287 if ((map->offset >= entry->bound) && drm_addmap_core()
288 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { drm_addmap_core()
436 /* Find the list entry for the map and remove it */ drm_legacy_rmmap_locked()
555 * \param entry buffer entry where the error occurred.
557 * Frees any pages and buffers associated with the given entry.
560 struct drm_buf_entry * entry) drm_cleanup_buf_error()
564 if (entry->seg_count) { drm_cleanup_buf_error()
565 for (i = 0; i < entry->seg_count; i++) { drm_cleanup_buf_error()
566 if (entry->seglist[i]) { drm_cleanup_buf_error()
567 drm_pci_free(dev, entry->seglist[i]); drm_cleanup_buf_error()
570 kfree(entry->seglist); drm_cleanup_buf_error()
572 entry->seg_count = 0; drm_cleanup_buf_error()
575 if (entry->buf_count) { drm_cleanup_buf_error()
576 for (i = 0; i < entry->buf_count; i++) { drm_cleanup_buf_error()
577 kfree(entry->buflist[i].dev_private); drm_cleanup_buf_error()
579 kfree(entry->buflist); drm_cleanup_buf_error()
581 entry->buf_count = 0; drm_cleanup_buf_error()
601 struct drm_buf_entry *entry; drm_legacy_addbufs_agp() local
664 entry = &dma->bufs[order]; drm_legacy_addbufs_agp()
665 if (entry->buf_count) { drm_legacy_addbufs_agp()
677 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); drm_legacy_addbufs_agp()
678 if (!entry->buflist) { drm_legacy_addbufs_agp()
684 entry->buf_size = size; drm_legacy_addbufs_agp()
685 entry->page_order = page_order; drm_legacy_addbufs_agp()
689 while (entry->buf_count < count) { drm_legacy_addbufs_agp()
690 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_agp()
691 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_agp()
708 entry->buf_count = count; drm_legacy_addbufs_agp()
709 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_agp()
715 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); drm_legacy_addbufs_agp()
718 entry->buf_count++; drm_legacy_addbufs_agp()
725 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_agp()
728 /* Free the entry because it isn't valid */ drm_legacy_addbufs_agp()
729 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_agp()
736 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_agp()
737 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_agp()
740 dma->buf_count += entry->buf_count; drm_legacy_addbufs_agp()
741 dma->seg_count += entry->seg_count; drm_legacy_addbufs_agp()
746 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); drm_legacy_addbufs_agp()
750 request->count = entry->buf_count; drm_legacy_addbufs_agp()
770 struct drm_buf_entry *entry; drm_legacy_addbufs_pci() local
814 entry = &dma->bufs[order]; drm_legacy_addbufs_pci()
815 if (entry->buf_count) { drm_legacy_addbufs_pci()
827 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); drm_legacy_addbufs_pci()
828 if (!entry->buflist) { drm_legacy_addbufs_pci()
834 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); drm_legacy_addbufs_pci()
835 if (!entry->seglist) { drm_legacy_addbufs_pci()
836 kfree(entry->buflist); drm_legacy_addbufs_pci()
848 kfree(entry->buflist); drm_legacy_addbufs_pci()
849 kfree(entry->seglist); drm_legacy_addbufs_pci()
859 entry->buf_size = size; drm_legacy_addbufs_pci()
860 entry->page_order = page_order; drm_legacy_addbufs_pci()
864 while (entry->buf_count < count) { drm_legacy_addbufs_pci()
870 entry->buf_count = count; drm_legacy_addbufs_pci()
871 entry->seg_count = count; drm_legacy_addbufs_pci()
872 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
878 entry->seglist[entry->seg_count++] = dmah; drm_legacy_addbufs_pci()
887 offset + size <= total && entry->buf_count < count; drm_legacy_addbufs_pci()
888 offset += alignment, ++entry->buf_count) { drm_legacy_addbufs_pci()
889 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_pci()
890 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_pci()
907 entry->buf_count = count; drm_legacy_addbufs_pci()
908 entry->seg_count = count; drm_legacy_addbufs_pci()
909 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
917 entry->buf_count, buf->address); drm_legacy_addbufs_pci()
923 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_pci()
926 /* Free the entry because it isn't valid */ drm_legacy_addbufs_pci()
927 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
935 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_pci()
936 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_pci()
947 dma->buf_count += entry->buf_count; drm_legacy_addbufs_pci()
948 dma->seg_count += entry->seg_count; drm_legacy_addbufs_pci()
949 dma->page_count += entry->seg_count << page_order; drm_legacy_addbufs_pci()
950 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); drm_legacy_addbufs_pci()
954 request->count = entry->buf_count; drm_legacy_addbufs_pci()
970 struct drm_buf_entry *entry; drm_legacy_addbufs_sg() local
1025 entry = &dma->bufs[order]; drm_legacy_addbufs_sg()
1026 if (entry->buf_count) { drm_legacy_addbufs_sg()
1038 entry->buflist = kzalloc(count * sizeof(*entry->buflist), drm_legacy_addbufs_sg()
1040 if (!entry->buflist) { drm_legacy_addbufs_sg()
1046 entry->buf_size = size; drm_legacy_addbufs_sg()
1047 entry->page_order = page_order; drm_legacy_addbufs_sg()
1051 while (entry->buf_count < count) { drm_legacy_addbufs_sg()
1052 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_sg()
1053 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_sg()
1071 entry->buf_count = count; drm_legacy_addbufs_sg()
1072 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_sg()
1078 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); drm_legacy_addbufs_sg()
1081 entry->buf_count++; drm_legacy_addbufs_sg()
1088 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_sg()
1091 /* Free the entry because it isn't valid */ drm_legacy_addbufs_sg()
1092 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_sg()
1099 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_sg()
1100 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_sg()
1103 dma->buf_count += entry->buf_count; drm_legacy_addbufs_sg()
1104 dma->seg_count += entry->seg_count; drm_legacy_addbufs_sg()
1109 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); drm_legacy_addbufs_sg()
1113 request->count = entry->buf_count; drm_legacy_addbufs_sg()
1257 * updates the respective drm_device_dma::bufs entry low and high water mark.
1267 struct drm_buf_entry *entry; drm_legacy_markbufs() local
1283 entry = &dma->bufs[order]; drm_legacy_markbufs()
1285 if (request->low_mark < 0 || request->low_mark > entry->buf_count) drm_legacy_markbufs()
1287 if (request->high_mark < 0 || request->high_mark > entry->buf_count) drm_legacy_markbufs()
1290 entry->low_mark = request->low_mark; drm_legacy_markbufs()
1291 entry->high_mark = request->high_mark; drm_legacy_markbufs()
1463 struct drm_map_list *entry; drm_legacy_getsarea() local
1465 list_for_each_entry(entry, &dev->maplist, head) { drm_legacy_getsarea()
1466 if (entry->map && entry->map->type == _DRM_SHM && drm_legacy_getsarea()
1467 (entry->map->flags & _DRM_CONTAINS_LOCK)) { drm_legacy_getsarea()
1468 return entry->map; drm_legacy_getsarea()
559 drm_cleanup_buf_error(struct drm_device * dev, struct drm_buf_entry * entry) drm_cleanup_buf_error() argument
H A Dati_pcigart.c61 struct drm_sg_mem *entry = dev->sg; drm_ati_pcigart_cleanup() local
67 if (!entry) { drm_ati_pcigart_cleanup()
75 pages = (entry->pages <= max_pages) drm_ati_pcigart_cleanup()
76 ? entry->pages : max_pages; drm_ati_pcigart_cleanup()
79 if (!entry->busaddr[i]) drm_ati_pcigart_cleanup()
81 pci_unmap_page(dev->pdev, entry->busaddr[i], drm_ati_pcigart_cleanup()
101 struct drm_sg_mem *entry = dev->sg; drm_ati_pcigart_init() local
109 if (!entry) { drm_ati_pcigart_init()
144 pages = (entry->pages <= max_real_pages) drm_ati_pcigart_init()
145 ? entry->pages : max_real_pages; drm_ati_pcigart_init()
156 entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], drm_ati_pcigart_init()
158 if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) { drm_ati_pcigart_init()
165 page_base = (u32) entry->busaddr[i]; drm_ati_pcigart_init()
/linux-4.4.14/arch/s390/kernel/
H A Djump_label.c21 struct jump_entry *entry; member in struct:insn_args
25 static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn) jump_label_make_nop() argument
32 static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) jump_label_make_branch() argument
36 insn->offset = (entry->target - entry->code) >> 1; jump_label_make_branch()
39 static void jump_label_bug(struct jump_entry *entry, struct insn *expected, jump_label_bug() argument
42 unsigned char *ipc = (unsigned char *)entry->code; jump_label_bug()
58 static void __jump_label_transform(struct jump_entry *entry, __jump_label_transform() argument
65 jump_label_make_nop(entry, &old); __jump_label_transform()
66 jump_label_make_branch(entry, &new); __jump_label_transform()
68 jump_label_make_branch(entry, &old); __jump_label_transform()
69 jump_label_make_nop(entry, &new); __jump_label_transform()
72 if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) __jump_label_transform()
73 jump_label_bug(entry, &orignop, &new); __jump_label_transform()
75 if (memcmp((void *)entry->code, &old, sizeof(old))) __jump_label_transform()
76 jump_label_bug(entry, &old, &new); __jump_label_transform()
78 s390_kernel_write((void *)entry->code, &new, sizeof(new)); __jump_label_transform()
85 __jump_label_transform(args->entry, args->type, 0); __sm_arch_jump_label_transform()
89 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
94 args.entry = entry; arch_jump_label_transform()
100 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
103 __jump_label_transform(entry, type, 1); arch_jump_label_transform_static()
H A Dos_info.c43 * Add OS info entry and update checksum
47 os_info.entry[nr].addr = (u64)(unsigned long)ptr; os_info_entry_add()
48 os_info.entry[nr].size = size; os_info_entry_add()
49 os_info.entry[nr].csum = csum_partial(ptr, size, 0); os_info_entry_add()
72 * Allocate and copy OS info entry from oldmem
80 addr = os_info_old->entry[nr].addr; os_info_old_alloc()
85 size = os_info_old->entry[nr].size; os_info_old_alloc()
97 if (csum != os_info_old->entry[nr].csum) { os_info_old_alloc()
101 os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align; os_info_old_alloc()
107 os_info_old->entry[nr].addr = 0; os_info_old_alloc()
109 pr_info("entry %i: %s (addr=0x%lx size=%lu)\n", os_info_old_alloc()
155 * Return pointer to os infor entry and its size
163 if (!os_info_old->entry[nr].addr) os_info_old_entry()
165 *size = (unsigned long) os_info_old->entry[nr].size; os_info_old_entry()
166 return (void *)(unsigned long)os_info_old->entry[nr].addr; os_info_old_entry()
/linux-4.4.14/drivers/firmware/efi/
H A Druntime-map.c23 struct kobject kobj; /* kobject for each entry */
30 ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
38 static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) type_show() argument
40 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); type_show()
43 #define EFI_RUNTIME_FIELD(var) entry->md.var
46 static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
64 struct efi_runtime_map_entry *entry = to_map_entry(kobj); map_attr_show() local
67 return map_attr->show(entry, buf); map_attr_show()
77 * These are default attributes that are added for every memmap entry.
94 struct efi_runtime_map_entry *entry; map_release() local
96 entry = to_map_entry(kobj); map_release()
97 kfree(entry); map_release()
112 struct efi_runtime_map_entry *entry; add_sysfs_runtime_map_entry() local
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); add_sysfs_runtime_map_entry()
121 if (!entry) { add_sysfs_runtime_map_entry()
127 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, add_sysfs_runtime_map_entry()
130 kobject_init(&entry->kobj, &map_ktype); add_sysfs_runtime_map_entry()
131 entry->kobj.kset = map_kset; add_sysfs_runtime_map_entry()
132 ret = kobject_add(&entry->kobj, NULL, "%d", nr); add_sysfs_runtime_map_entry()
134 kobject_put(&entry->kobj); add_sysfs_runtime_map_entry()
140 return entry; add_sysfs_runtime_map_entry()
174 struct efi_runtime_map_entry *entry; efi_runtime_map_init() local
179 map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL); efi_runtime_map_init()
186 entry = add_sysfs_runtime_map_entry(efi_kobj, i); efi_runtime_map_init()
187 if (IS_ERR(entry)) { efi_runtime_map_init()
188 ret = PTR_ERR(entry); efi_runtime_map_init()
191 *(map_entries + i) = entry; efi_runtime_map_init()
197 entry = *(map_entries + j); efi_runtime_map_init()
198 kobject_put(&entry->kobj); efi_runtime_map_init()
H A Defi-pstore.c46 static int efi_pstore_read_func(struct efivar_entry *entry, void *data) efi_pstore_read_func() argument
56 if (efi_guidcmp(entry->var.VendorGuid, vendor)) efi_pstore_read_func()
60 name[i] = entry->var.VariableName[i]; efi_pstore_read_func()
94 entry->var.DataSize = 1024; efi_pstore_read_func()
95 __efivar_entry_get(entry, &entry->var.Attributes, efi_pstore_read_func()
96 &entry->var.DataSize, entry->var.Data); efi_pstore_read_func()
97 size = entry->var.DataSize; efi_pstore_read_func()
98 memcpy(*cb_data->buf, entry->var.Data, efi_pstore_read_func()
106 * @pos: scanning entry
107 * @next: next entry
121 * @entry: deleting entry
124 static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry, __efi_pstore_scan_sysfs_exit() argument
127 if (entry->deleting) { __efi_pstore_scan_sysfs_exit()
128 list_del(&entry->list); __efi_pstore_scan_sysfs_exit()
130 efivar_unregister(entry); __efi_pstore_scan_sysfs_exit()
133 entry->scanning = false; __efi_pstore_scan_sysfs_exit()
138 * @pos: scanning entry
139 * @next: next entry
156 * @pos: entry to begin iterating from
161 * It is possible to begin iteration from an arbitrary entry within
163 * the next entry of the last one passed to efi_pstore_read_func().
168 struct efivar_entry *entry, *n; efi_pstore_sysfs_entry_iter() local
173 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
174 efi_pstore_scan_sysfs_enter(entry, n, head); list_for_each_entry_safe()
176 size = efi_pstore_read_func(entry, data); list_for_each_entry_safe()
177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); list_for_each_entry_safe()
200 * This function returns a size of NVRAM entry logged via efi_pstore_write().
203 * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
207 * size < 0: Failed to get data of entry logging via efi_pstore_write(),
208 * and pstore will stop reading entry.
274 * Clean up an entry with the same name
276 static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) efi_pstore_erase_func() argument
286 if (efi_guidcmp(entry->var.VendorGuid, vendor)) efi_pstore_erase_func()
289 if (ucs2_strncmp(entry->var.VariableName, efi_pstore_erase_func()
301 if (ucs2_strncmp(entry->var.VariableName, efi_name_old, efi_pstore_erase_func()
306 if (entry->scanning) { efi_pstore_erase_func()
308 * Skip deletion because this entry will be deleted efi_pstore_erase_func()
311 entry->deleting = true; efi_pstore_erase_func()
313 list_del(&entry->list); efi_pstore_erase_func()
316 __efivar_entry_delete(entry); efi_pstore_erase_func()
325 struct efivar_entry *entry = NULL; efi_pstore_erase() local
345 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry); efi_pstore_erase()
347 if (found && !entry->scanning) { efi_pstore_erase()
349 efivar_unregister(entry); efi_pstore_erase()
H A Dvars.c108 /* A valid entry must be at least 8 bytes */ validate_load_option()
120 /* Each boot entry must have a descriptor */ validate_load_option()
357 struct efivar_entry *entry, *n; variable_is_present() local
362 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
363 strsize2 = ucs2_strsize(entry->var.VariableName, 1024); list_for_each_entry_safe()
365 !memcmp(variable_name, &(entry->var.VariableName), list_for_each_entry_safe()
367 !efi_guidcmp(entry->var.VendorGuid, list_for_each_entry_safe()
528 * efivar_entry_add - add entry to variable list
529 * @entry: entry to add to list
532 void efivar_entry_add(struct efivar_entry *entry, struct list_head *head) efivar_entry_add() argument
535 list_add(&entry->list, head); efivar_entry_add()
541 * efivar_entry_remove - remove entry from variable list
542 * @entry: entry to remove from list
544 void efivar_entry_remove(struct efivar_entry *entry) efivar_entry_remove() argument
547 list_del(&entry->list); efivar_entry_remove()
553 * efivar_entry_list_del_unlock - remove entry from variable list
554 * @entry: entry to remove
556 * Remove @entry from the variable list and release the list lock.
563 static void efivar_entry_list_del_unlock(struct efivar_entry *entry) efivar_entry_list_del_unlock() argument
567 list_del(&entry->list); efivar_entry_list_del_unlock()
573 * @entry: entry containing EFI variable to delete
575 * Delete the variable from the firmware but leave @entry on the
579 * not remove @entry from the variable list. Also, it is safe to be
586 int __efivar_entry_delete(struct efivar_entry *entry) __efivar_entry_delete() argument
593 status = ops->set_variable(entry->var.VariableName, __efivar_entry_delete()
594 &entry->var.VendorGuid, __efivar_entry_delete()
602 * efivar_entry_delete - delete variable and remove entry from list
603 * @entry: entry containing variable to delete
605 * Delete the variable from the firmware and remove @entry from the
606 * variable list. It is the caller's responsibility to free @entry
612 int efivar_entry_delete(struct efivar_entry *entry) efivar_entry_delete() argument
618 status = ops->set_variable(entry->var.VariableName, efivar_entry_delete()
619 &entry->var.VendorGuid, efivar_entry_delete()
626 efivar_entry_list_del_unlock(entry); efivar_entry_delete()
633 * @entry: entry containing the EFI variable to write
646 * the entry is already on the list.
648 * Returns 0 on success, -EEXIST if a lookup is performed and the entry
652 int efivar_entry_set(struct efivar_entry *entry, u32 attributes, efivar_entry_set() argument
657 efi_char16_t *name = entry->var.VariableName; efivar_entry_set()
658 efi_guid_t vendor = entry->var.VendorGuid; efivar_entry_set()
775 * efivar_entry_find - search for an entry
779 * @remove: should we remove the entry from the list?
781 * Search for an entry on the variable list that has the EFI variable
782 * name @name and vendor guid @guid. If an entry is found on the list
783 * and @remove is true, the entry is removed from the list.
789 * Returns the entry if found on the list, %NULL otherwise.
794 struct efivar_entry *entry, *n; efivar_entry_find() local
800 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
802 strsize2 = ucs2_strsize(entry->var.VariableName, 1024); list_for_each_entry_safe()
804 !memcmp(name, &(entry->var.VariableName), strsize1) && list_for_each_entry_safe()
805 !efi_guidcmp(guid, entry->var.VendorGuid)) { list_for_each_entry_safe()
815 if (entry->scanning) {
817 * The entry will be deleted
820 entry->deleting = true;
822 list_del(&entry->list);
825 return entry;
831 * @entry: entry for this variable
834 int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) efivar_entry_size() argument
842 status = ops->get_variable(entry->var.VariableName, efivar_entry_size()
843 &entry->var.VendorGuid, NULL, size, NULL); efivar_entry_size()
855 * @entry: read data for this variable
864 int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, __efivar_entry_get() argument
872 status = ops->get_variable(entry->var.VariableName, __efivar_entry_get()
873 &entry->var.VendorGuid, __efivar_entry_get()
882 * @entry: read data for this variable
887 int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, efivar_entry_get() argument
894 status = ops->get_variable(entry->var.VariableName, efivar_entry_get()
895 &entry->var.VendorGuid, efivar_entry_get()
905 * @entry: entry containing variable to set and get
913 * Atomically call set_variable() for @entry and if the call is
923 * (EFI_NOT_FOUND), @entry is removed from the variable list.
925 int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, efivar_entry_set_get_size() argument
929 efi_char16_t *name = entry->var.VariableName; efivar_entry_set_get_size()
930 efi_guid_t *vendor = &entry->var.VendorGuid; efivar_entry_set_get_size()
977 status = ops->get_variable(entry->var.VariableName, efivar_entry_set_get_size()
978 &entry->var.VendorGuid, efivar_entry_set_get_size()
982 efivar_entry_list_del_unlock(entry); efivar_entry_set_get_size()
1001 * Lock the variable list to prevent entry insertion and removal until
1027 * @prev: entry to begin iterating from
1030 * entry on the list. It is safe for @func to remove entries in the
1036 * It is possible to begin iteration from an arbitrary entry within
1038 * the last entry passed to @func. To begin iterating from the
1048 struct efivar_entry *entry, *n; __efivar_entry_iter() local
1052 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
1053 err = func(entry, data); list_for_each_entry_safe()
1059 *prev = entry;
1082 * entry on the list. It is safe for @func to remove entries in the
1153 * The caller must have already removed every entry from the list,
H A Defivars.c102 ssize_t (*show) (struct efivar_entry *entry, char *buf);
103 ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
123 efivar_guid_read(struct efivar_entry *entry, char *buf) efivar_guid_read() argument
125 struct efi_variable *var = &entry->var; efivar_guid_read()
128 if (!entry || !buf) efivar_guid_read()
139 efivar_attr_read(struct efivar_entry *entry, char *buf) efivar_attr_read() argument
141 struct efi_variable *var = &entry->var; efivar_attr_read()
144 if (!entry || !buf) efivar_attr_read()
148 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_attr_read()
172 efivar_size_read(struct efivar_entry *entry, char *buf) efivar_size_read() argument
174 struct efi_variable *var = &entry->var; efivar_size_read()
177 if (!entry || !buf) efivar_size_read()
181 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_size_read()
189 efivar_data_read(struct efivar_entry *entry, char *buf) efivar_data_read() argument
191 struct efi_variable *var = &entry->var; efivar_data_read()
193 if (!entry || !buf) efivar_data_read()
197 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_data_read()
256 efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) efivar_store_raw() argument
258 struct efi_variable *new_var, *var = &entry->var; efivar_store_raw()
283 copy_out_compat(&entry->var, compat); efivar_store_raw()
300 memcpy(&entry->var, new_var, count); efivar_store_raw()
303 err = efivar_entry_set(entry, attributes, size, data, NULL); efivar_store_raw()
313 efivar_show_raw(struct efivar_entry *entry, char *buf) efivar_show_raw() argument
315 struct efi_variable *var = &entry->var; efivar_show_raw()
319 if (!entry || !buf) efivar_show_raw()
323 if (efivar_entry_get(entry, &entry->var.Attributes, efivar_show_raw()
324 &entry->var.DataSize, entry->var.Data)) efivar_show_raw()
474 printk(KERN_WARNING "efivars: failed to create sysfs entry.\n"); efivar_create()
490 struct efivar_entry *entry; efivar_delete() local
514 entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true); efivar_delete()
515 if (!entry) efivar_delete()
517 else if (__efivar_entry_delete(entry)) efivar_delete()
525 if (!entry->scanning) { efivar_delete()
527 efivar_unregister(entry); efivar_delete()
536 * efivar_create_sysfs_entry - create a new entry in sysfs
537 * @new_var: efivar entry to create
641 struct efivar_entry *entry = data; efivar_update_sysfs_entry() local
646 memcpy(entry->var.VariableName, name, name_size); efivar_update_sysfs_entry()
647 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); efivar_update_sysfs_entry()
654 struct efivar_entry *entry; efivar_update_sysfs_entries() local
659 entry = kzalloc(sizeof(*entry), GFP_KERNEL); efivar_update_sysfs_entries()
660 if (!entry) efivar_update_sysfs_entries()
663 err = efivar_init(efivar_update_sysfs_entry, entry, efivar_update_sysfs_entries()
668 efivar_create_sysfs_entry(entry); efivar_update_sysfs_entries()
671 kfree(entry); efivar_update_sysfs_entries()
677 struct efivar_entry *entry; efivars_sysfs_callback() local
679 entry = kzalloc(sizeof(*entry), GFP_KERNEL); efivars_sysfs_callback()
680 if (!entry) efivars_sysfs_callback()
683 memcpy(entry->var.VariableName, name, name_size); efivars_sysfs_callback()
684 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); efivars_sysfs_callback()
686 efivar_create_sysfs_entry(entry); efivars_sysfs_callback()
691 static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data) efivar_sysfs_destroy() argument
693 efivar_entry_remove(entry); efivar_sysfs_destroy()
694 efivar_unregister(entry); efivar_sysfs_destroy()
H A Desrt.c72 /* entry attribute */
75 ssize_t (*show)(struct esre_entry *entry, char *buf);
76 ssize_t (*store)(struct esre_entry *entry,
93 struct esre_entry *entry = to_entry(kobj); esre_attr_show() local
100 return attr->show(entry, buf); esre_attr_show()
108 static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) esre_fw_class_show() argument
112 efi_guid_to_str(&entry->esre.esre1->fw_class, str); esre_fw_class_show()
123 static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
126 le##size##_to_cpu(entry->esre.esre1->name)); \
151 struct esre_entry *entry = to_entry(kobj); esre_release() local
153 list_del(&entry->list); esre_release()
154 kfree(entry); esre_release()
169 struct esre_entry *entry; esre_create_sysfs_entry() local
172 entry = kzalloc(sizeof(*entry), GFP_KERNEL); esre_create_sysfs_entry()
173 if (!entry) esre_create_sysfs_entry()
176 sprintf(name, "entry%d", entry_num); esre_create_sysfs_entry()
178 entry->kobj.kset = esrt_kset; esre_create_sysfs_entry()
183 entry->esre.esre1 = esre; esre_create_sysfs_entry()
184 rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, esre_create_sysfs_entry()
187 kfree(entry); esre_create_sysfs_entry()
192 list_add_tail(&entry->list, &entry_list); esre_create_sysfs_entry()
274 pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n", efi_esrt_init()
297 pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n", efi_esrt_init()
321 pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n", efi_esrt_init()
368 pr_err("ESRT entry creation failed with error %d.\n", register_entries()
378 struct esre_entry *entry, *next; cleanup_entry_list() local
380 list_for_each_entry_safe(entry, next, &entry_list, list) { cleanup_entry_list()
381 kobject_put(&entry->kobj); cleanup_entry_list()
/linux-4.4.14/sound/core/
H A Dinfo.c70 struct snd_info_entry *entry; member in struct:snd_info_private_data
75 static void snd_info_disconnect(struct snd_info_entry *entry);
89 static int alloc_info_private(struct snd_info_entry *entry, alloc_info_private() argument
94 if (!entry || !entry->p) alloc_info_private()
96 if (!try_module_get(entry->module)) alloc_info_private()
100 module_put(entry->module); alloc_info_private()
103 data->entry = entry; alloc_info_private()
123 struct snd_info_entry *entry; snd_info_entry_llseek() local
127 entry = data->entry; snd_info_entry_llseek()
128 mutex_lock(&entry->access); snd_info_entry_llseek()
129 if (entry->c.ops->llseek) { snd_info_entry_llseek()
130 offset = entry->c.ops->llseek(entry, snd_info_entry_llseek()
136 size = entry->size; snd_info_entry_llseek()
158 mutex_unlock(&entry->access); snd_info_entry_llseek()
166 struct snd_info_entry *entry = data->entry; snd_info_entry_read() local
173 if (pos >= entry->size) snd_info_entry_read()
175 size = entry->size - pos; snd_info_entry_read()
177 size = entry->c.ops->read(entry, data->file_private_data, snd_info_entry_read()
188 struct snd_info_entry *entry = data->entry; snd_info_entry_write() local
196 size_t maxsize = entry->size - pos; snd_info_entry_write()
198 size = entry->c.ops->write(entry, data->file_private_data, snd_info_entry_write()
209 struct snd_info_entry *entry = data->entry; snd_info_entry_poll() local
212 if (entry->c.ops->poll) snd_info_entry_poll()
213 return entry->c.ops->poll(entry, snd_info_entry_poll()
216 if (entry->c.ops->read) snd_info_entry_poll()
218 if (entry->c.ops->write) snd_info_entry_poll()
227 struct snd_info_entry *entry = data->entry; snd_info_entry_ioctl() local
229 if (!entry->c.ops->ioctl) snd_info_entry_ioctl()
231 return entry->c.ops->ioctl(entry, data->file_private_data, snd_info_entry_ioctl()
239 struct snd_info_entry *entry; snd_info_entry_mmap() local
244 entry = data->entry; snd_info_entry_mmap()
245 if (!entry->c.ops->mmap) snd_info_entry_mmap()
247 return entry->c.ops->mmap(entry, data->file_private_data, snd_info_entry_mmap()
253 struct snd_info_entry *entry = PDE_DATA(inode); snd_info_entry_open() local
258 err = alloc_info_private(entry, &data); snd_info_entry_open()
263 if (((mode == O_RDONLY || mode == O_RDWR) && !entry->c.ops->read) || snd_info_entry_open()
264 ((mode == O_WRONLY || mode == O_RDWR) && !entry->c.ops->write)) { snd_info_entry_open()
269 if (entry->c.ops->open) { snd_info_entry_open()
270 err = entry->c.ops->open(entry, mode, &data->file_private_data); snd_info_entry_open()
281 module_put(entry->module); snd_info_entry_open()
290 struct snd_info_entry *entry = data->entry; snd_info_entry_release() local
292 if (entry->c.ops->release) snd_info_entry_release()
293 entry->c.ops->release(entry, file->f_flags & O_ACCMODE, snd_info_entry_release()
295 module_put(entry->module); snd_info_entry_release()
322 struct snd_info_entry *entry = data->entry; snd_info_text_entry_write() local
332 mutex_lock(&entry->access); snd_info_text_entry_write()
357 mutex_unlock(&entry->access); snd_info_text_entry_write()
367 struct snd_info_entry *entry = data->entry; snd_info_seq_show() local
369 if (entry->c.text.read) { snd_info_seq_show()
371 entry->c.text.read(entry, data->rbuffer); snd_info_seq_show()
378 struct snd_info_entry *entry = PDE_DATA(inode); snd_info_text_entry_open() local
383 err = alloc_info_private(entry, &data); snd_info_text_entry_open()
392 if (entry->size) snd_info_text_entry_open()
394 entry->size); snd_info_text_entry_open()
405 module_put(entry->module); snd_info_text_entry_open()
415 struct snd_info_entry *entry = data->entry; snd_info_text_entry_release() local
417 if (data->wbuffer && entry->c.text.write) snd_info_text_entry_release()
418 entry->c.text.write(entry, data->wbuffer); snd_info_text_entry_release()
427 module_put(entry->module); snd_info_text_entry_release()
445 struct snd_info_entry *entry; create_subdir() local
447 entry = snd_info_create_module_entry(mod, name, NULL); create_subdir()
448 if (!entry) create_subdir()
450 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; create_subdir()
451 if (snd_info_register(entry) < 0) { create_subdir()
452 snd_info_free_entry(entry); create_subdir()
455 return entry; create_subdir()
506 struct snd_info_entry *entry; snd_info_card_create() local
512 entry = create_subdir(card->module, str); snd_info_card_create()
513 if (!entry) snd_info_card_create()
515 card->proc_root = entry; snd_info_card_create()
520 static int snd_info_register_recursive(struct snd_info_entry *entry) snd_info_register_recursive() argument
525 if (!entry->p) { snd_info_register_recursive()
526 err = snd_info_register(entry); snd_info_register_recursive()
531 list_for_each_entry(p, &entry->children, list) { snd_info_register_recursive()
690 * snd_info_create_entry - create an info entry
694 * Creates an info entry with the given file name and initializes as
705 struct snd_info_entry *entry; snd_info_create_entry() local
706 entry = kzalloc(sizeof(*entry), GFP_KERNEL); snd_info_create_entry()
707 if (entry == NULL) snd_info_create_entry()
709 entry->name = kstrdup(name, GFP_KERNEL); snd_info_create_entry()
710 if (entry->name == NULL) { snd_info_create_entry()
711 kfree(entry); snd_info_create_entry()
714 entry->mode = S_IFREG | S_IRUGO; snd_info_create_entry()
715 entry->content = SNDRV_INFO_CONTENT_TEXT; snd_info_create_entry()
716 mutex_init(&entry->access); snd_info_create_entry()
717 INIT_LIST_HEAD(&entry->children); snd_info_create_entry()
718 INIT_LIST_HEAD(&entry->list); snd_info_create_entry()
719 entry->parent = parent; snd_info_create_entry()
721 list_add_tail(&entry->list, &parent->children); snd_info_create_entry()
722 return entry; snd_info_create_entry()
726 * snd_info_create_module_entry - create an info entry for the given module
731 * Creates a new info entry and assigns it to the given module.
739 struct snd_info_entry *entry = snd_info_create_entry(name, parent); snd_info_create_module_entry() local
740 if (entry) snd_info_create_module_entry()
741 entry->module = module; snd_info_create_module_entry()
742 return entry; snd_info_create_module_entry()
748 * snd_info_create_card_entry - create an info entry for the given card
753 * Creates a new info entry and assigns it to the given card.
761 struct snd_info_entry *entry = snd_info_create_entry(name, parent); snd_info_create_card_entry() local
762 if (entry) { snd_info_create_card_entry()
763 entry->module = card->module; snd_info_create_card_entry()
764 entry->card = card; snd_info_create_card_entry()
766 return entry; snd_info_create_card_entry()
771 static void snd_info_disconnect(struct snd_info_entry *entry) snd_info_disconnect() argument
775 if (!entry->p) snd_info_disconnect()
777 list_for_each_entry(p, &entry->children, list) snd_info_disconnect()
779 proc_remove(entry->p); snd_info_disconnect()
780 entry->p = NULL; snd_info_disconnect()
784 * snd_info_free_entry - release the info entry
785 * @entry: the info entry
787 * Releases the info entry.
789 void snd_info_free_entry(struct snd_info_entry * entry) snd_info_free_entry() argument
793 if (!entry) snd_info_free_entry()
795 if (entry->p) { snd_info_free_entry()
797 snd_info_disconnect(entry); snd_info_free_entry()
802 list_for_each_entry_safe(p, n, &entry->children, list) snd_info_free_entry()
805 list_del(&entry->list); snd_info_free_entry()
806 kfree(entry->name); snd_info_free_entry()
807 if (entry->private_free) snd_info_free_entry()
808 entry->private_free(entry); snd_info_free_entry()
809 kfree(entry); snd_info_free_entry()
815 * snd_info_register - register the info entry
816 * @entry: the info entry
818 * Registers the proc info entry.
822 int snd_info_register(struct snd_info_entry * entry) snd_info_register() argument
826 if (snd_BUG_ON(!entry)) snd_info_register()
828 root = entry->parent == NULL ? snd_proc_root->p : entry->parent->p; snd_info_register()
830 if (S_ISDIR(entry->mode)) { snd_info_register()
831 p = proc_mkdir_mode(entry->name, entry->mode, root); snd_info_register()
838 if (entry->content == SNDRV_INFO_CONTENT_DATA) snd_info_register()
842 p = proc_create_data(entry->name, entry->mode, root, snd_info_register()
843 ops, entry); snd_info_register()
848 proc_set_size(p, entry->size); snd_info_register()
850 entry->p = p; snd_info_register()
861 static void snd_info_version_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) snd_info_version_read() argument
870 struct snd_info_entry *entry; snd_info_version_init() local
872 entry = snd_info_create_module_entry(THIS_MODULE, "version", NULL); snd_info_version_init()
873 if (entry == NULL) snd_info_version_init()
875 entry->c.text.read = snd_info_version_read; snd_info_version_init()
876 return snd_info_register(entry); /* freed in error path */ snd_info_version_init()
H A Dpcm.c369 static void snd_pcm_stream_proc_info_read(struct snd_info_entry *entry, snd_pcm_stream_proc_info_read() argument
372 snd_pcm_proc_info_read(((struct snd_pcm_str *)entry->private_data)->substream, snd_pcm_stream_proc_info_read()
376 static void snd_pcm_substream_proc_info_read(struct snd_info_entry *entry, snd_pcm_substream_proc_info_read() argument
379 snd_pcm_proc_info_read(entry->private_data, buffer); snd_pcm_substream_proc_info_read()
382 static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry, snd_pcm_substream_proc_hw_params_read() argument
385 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_hw_params_read()
419 static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, snd_pcm_substream_proc_sw_params_read() argument
422 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_sw_params_read()
447 static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, snd_pcm_substream_proc_status_read() argument
450 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_status_read()
484 static void snd_pcm_xrun_injection_write(struct snd_info_entry *entry, snd_pcm_xrun_injection_write() argument
487 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_xrun_injection_write()
497 static void snd_pcm_xrun_debug_read(struct snd_info_entry *entry, snd_pcm_xrun_debug_read() argument
500 struct snd_pcm_str *pstr = entry->private_data; snd_pcm_xrun_debug_read()
504 static void snd_pcm_xrun_debug_write(struct snd_info_entry *entry, snd_pcm_xrun_debug_write() argument
507 struct snd_pcm_str *pstr = entry->private_data; snd_pcm_xrun_debug_write()
517 struct snd_info_entry *entry; snd_pcm_stream_proc_init() local
522 if ((entry = snd_info_create_card_entry(pcm->card, name, pcm->card->proc_root)) == NULL) snd_pcm_stream_proc_init()
524 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_pcm_stream_proc_init()
525 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
526 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
529 pstr->proc_root = entry; snd_pcm_stream_proc_init()
531 if ((entry = snd_info_create_card_entry(pcm->card, "info", pstr->proc_root)) != NULL) { snd_pcm_stream_proc_init()
532 snd_info_set_text_ops(entry, pstr, snd_pcm_stream_proc_info_read); snd_pcm_stream_proc_init()
533 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
534 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
535 entry = NULL; snd_pcm_stream_proc_init()
538 pstr->proc_info_entry = entry; snd_pcm_stream_proc_init()
541 if ((entry = snd_info_create_card_entry(pcm->card, "xrun_debug", snd_pcm_stream_proc_init()
543 entry->c.text.read = snd_pcm_xrun_debug_read; snd_pcm_stream_proc_init()
544 entry->c.text.write = snd_pcm_xrun_debug_write; snd_pcm_stream_proc_init()
545 entry->mode |= S_IWUSR; snd_pcm_stream_proc_init()
546 entry->private_data = pstr; snd_pcm_stream_proc_init()
547 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
548 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
549 entry = NULL; snd_pcm_stream_proc_init()
552 pstr->proc_xrun_debug_entry = entry; snd_pcm_stream_proc_init()
572 struct snd_info_entry *entry; snd_pcm_substream_proc_init() local
579 if ((entry = snd_info_create_card_entry(card, name, substream->pstr->proc_root)) == NULL) snd_pcm_substream_proc_init()
581 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_pcm_substream_proc_init()
582 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
583 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
586 substream->proc_root = entry; snd_pcm_substream_proc_init()
588 if ((entry = snd_info_create_card_entry(card, "info", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
589 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
591 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
592 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
593 entry = NULL; snd_pcm_substream_proc_init()
596 substream->proc_info_entry = entry; snd_pcm_substream_proc_init()
598 if ((entry = snd_info_create_card_entry(card, "hw_params", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
599 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
601 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
602 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
603 entry = NULL; snd_pcm_substream_proc_init()
606 substream->proc_hw_params_entry = entry; snd_pcm_substream_proc_init()
608 if ((entry = snd_info_create_card_entry(card, "sw_params", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
609 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
611 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
612 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
613 entry = NULL; snd_pcm_substream_proc_init()
616 substream->proc_sw_params_entry = entry; snd_pcm_substream_proc_init()
618 if ((entry = snd_info_create_card_entry(card, "status", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
619 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
621 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
622 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
623 entry = NULL; snd_pcm_substream_proc_init()
626 substream->proc_status_entry = entry; snd_pcm_substream_proc_init()
629 entry = snd_info_create_card_entry(card, "xrun_injection", snd_pcm_substream_proc_init()
631 if (entry) { snd_pcm_substream_proc_init()
632 entry->private_data = substream; snd_pcm_substream_proc_init()
633 entry->c.text.read = NULL; snd_pcm_substream_proc_init()
634 entry->c.text.write = snd_pcm_xrun_injection_write; snd_pcm_substream_proc_init()
635 entry->mode = S_IFREG | S_IWUSR; snd_pcm_substream_proc_init()
636 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
637 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
638 entry = NULL; snd_pcm_substream_proc_init()
641 substream->proc_xrun_injection_entry = entry; snd_pcm_substream_proc_init()
1186 static void snd_pcm_proc_read(struct snd_info_entry *entry, snd_pcm_proc_read() argument
1210 struct snd_info_entry *entry; snd_pcm_proc_init() local
1212 if ((entry = snd_info_create_module_entry(THIS_MODULE, "pcm", NULL)) != NULL) { snd_pcm_proc_init()
1213 snd_info_set_text_ops(entry, NULL, snd_pcm_proc_read); snd_pcm_proc_init()
1214 if (snd_info_register(entry) < 0) { snd_pcm_proc_init()
1215 snd_info_free_entry(entry); snd_pcm_proc_init()
1216 entry = NULL; snd_pcm_proc_init()
1219 snd_pcm_proc_entry = entry; snd_pcm_proc_init()
/linux-4.4.14/drivers/oprofile/
H A Dcpu_buffer.h73 * entry->event != NULL, otherwise entry->size or entry->event will be
78 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
79 int op_cpu_buffer_write_commit(struct op_entry *entry);
80 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
83 /* returns the remaining free size of data in the entry */
85 int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) op_cpu_buffer_add_data() argument
87 if (!entry->size) op_cpu_buffer_add_data()
89 *entry->data = val; op_cpu_buffer_add_data()
90 entry->size--; op_cpu_buffer_add_data()
91 entry->data++; op_cpu_buffer_add_data()
92 return entry->size; op_cpu_buffer_add_data()
95 /* returns the size of data in the entry */
97 int op_cpu_buffer_get_size(struct op_entry *entry) op_cpu_buffer_get_size() argument
99 return entry->size; op_cpu_buffer_get_size()
104 int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) op_cpu_buffer_get_data() argument
106 int size = entry->size; op_cpu_buffer_get_data()
109 *val = *entry->data; op_cpu_buffer_get_data()
110 entry->size--; op_cpu_buffer_get_data()
111 entry->data++; op_cpu_buffer_get_data()
H A Dcpu_buffer.c134 * buffer. Struct entry can be uninitialized. The function reserves a
142 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) op_cpu_buffer_write_reserve() argument
144 entry->event = ring_buffer_lock_reserve op_cpu_buffer_write_reserve()
146 size * sizeof(entry->sample->data[0])); op_cpu_buffer_write_reserve()
147 if (!entry->event) op_cpu_buffer_write_reserve()
149 entry->sample = ring_buffer_event_data(entry->event); op_cpu_buffer_write_reserve()
150 entry->size = size; op_cpu_buffer_write_reserve()
151 entry->data = entry->sample->data; op_cpu_buffer_write_reserve()
153 return entry->sample; op_cpu_buffer_write_reserve()
156 int op_cpu_buffer_write_commit(struct op_entry *entry) op_cpu_buffer_write_commit() argument
158 return ring_buffer_unlock_commit(op_ring_buffer, entry->event); op_cpu_buffer_write_commit()
161 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) op_cpu_buffer_read_entry() argument
168 entry->event = e; op_cpu_buffer_read_entry()
169 entry->sample = ring_buffer_event_data(e); op_cpu_buffer_read_entry()
170 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) op_cpu_buffer_read_entry()
171 / sizeof(entry->sample->data[0]); op_cpu_buffer_read_entry()
172 entry->data = entry->sample->data; op_cpu_buffer_read_entry()
173 return entry->sample; op_cpu_buffer_read_entry()
185 struct op_entry entry; op_add_code() local
219 sample = op_cpu_buffer_write_reserve(&entry, size); op_add_code()
227 op_cpu_buffer_add_data(&entry, (unsigned long)task); op_add_code()
229 op_cpu_buffer_write_commit(&entry); op_add_code()
238 struct op_entry entry; op_add_sample() local
241 sample = op_cpu_buffer_write_reserve(&entry, 0); op_add_sample()
248 return op_cpu_buffer_write_commit(&entry); op_add_sample()
351 * Use oprofile_add_data(&entry, val) to add data and
352 * oprofile_write_commit(&entry) to commit the sample.
355 oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, oprofile_write_reserve() argument
368 sample = op_cpu_buffer_write_reserve(entry, size + 2); oprofile_write_reserve()
374 op_cpu_buffer_add_data(entry, code); oprofile_write_reserve()
375 op_cpu_buffer_add_data(entry, pc); oprofile_write_reserve()
380 entry->event = NULL; oprofile_write_reserve()
384 int oprofile_add_data(struct op_entry *entry, unsigned long val) oprofile_add_data() argument
386 if (!entry->event) oprofile_add_data()
388 return op_cpu_buffer_add_data(entry, val); oprofile_add_data()
391 int oprofile_add_data64(struct op_entry *entry, u64 val) oprofile_add_data64() argument
393 if (!entry->event) oprofile_add_data64()
395 if (op_cpu_buffer_get_size(entry) < 2) oprofile_add_data64()
401 if (!op_cpu_buffer_add_data(entry, (u32)val)) oprofile_add_data64()
403 return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); oprofile_add_data64()
406 int oprofile_write_commit(struct op_entry *entry) oprofile_write_commit() argument
408 if (!entry->event) oprofile_write_commit()
410 return op_cpu_buffer_write_commit(entry); oprofile_write_commit()
/linux-4.4.14/tools/lib/api/
H A Dcpu.c8 char entry[PATH_MAX]; cpu__get_max_freq() local
14 snprintf(entry, sizeof(entry), cpu__get_max_freq()
17 return sysfs__read_ull(entry, freq); cpu__get_max_freq()
/linux-4.4.14/drivers/sh/intc/
H A Dvirq.c27 #define for_each_virq(entry, head) \
28 for (entry = head; entry; entry = entry->next)
86 struct intc_virq_list *entry; add_virq_to_pirq() local
90 for_each_virq(entry, irq_get_handler_data(irq)) { for_each_virq()
91 if (entry->irq == virq) for_each_virq()
93 last = &entry->next; for_each_virq()
96 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
97 if (!entry) {
102 entry->irq = virq;
105 *last = entry;
107 irq_set_handler_data(irq, entry);
117 struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); intc_virq_handler() local
122 for_each_virq(entry, vlist) { for_each_virq()
124 struct irq_desc *vdesc = irq_to_desc(entry->irq); for_each_virq()
167 struct intc_subgroup_entry *entry; intc_subgroup_init_one() local
173 entry = kmalloc(sizeof(*entry), GFP_NOWAIT); intc_subgroup_init_one()
174 if (!entry) intc_subgroup_init_one()
177 entry->pirq = pirq; intc_subgroup_init_one()
178 entry->enum_id = subgroup->enum_ids[i]; intc_subgroup_init_one()
179 entry->handle = intc_subgroup_data(subgroup, d, i); intc_subgroup_init_one()
181 err = radix_tree_insert(&d->tree, entry->enum_id, entry); intc_subgroup_init_one()
185 radix_tree_tag_set(&d->tree, entry->enum_id, intc_subgroup_init_one()
218 struct intc_subgroup_entry *entry; intc_subgroup_map() local
221 entry = radix_tree_deref_slot((void **)entries[i]); intc_subgroup_map()
222 if (unlikely(!entry)) intc_subgroup_map()
224 if (radix_tree_deref_retry(entry)) intc_subgroup_map()
236 irq, entry->pirq); intc_subgroup_map()
238 intc_irq_xlate_set(irq, entry->enum_id, d); intc_subgroup_map()
240 irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), intc_subgroup_map()
242 irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); intc_subgroup_map()
244 irq_set_handler_data(irq, (void *)entry->handle); intc_subgroup_map()
252 add_virq_to_pirq(entry->pirq, irq); intc_subgroup_map()
253 irq_set_chained_handler(entry->pirq, intc_virq_handler); intc_subgroup_map()
255 radix_tree_tag_clear(&d->tree, entry->enum_id, intc_subgroup_map()
H A Dvirq-debugfs.c26 struct intc_map_entry *entry = intc_irq_xlate_get(i); intc_irq_xlate_debug() local
27 struct intc_desc_int *desc = entry->desc; intc_irq_xlate_debug()
33 seq_printf(m, "0x%05x ", entry->enum_id); intc_irq_xlate_debug()
/linux-4.4.14/include/linux/
H A Dresource_ext.h42 static inline void resource_list_add(struct resource_entry *entry, resource_list_add() argument
45 list_add(&entry->node, head); resource_list_add()
48 static inline void resource_list_add_tail(struct resource_entry *entry, resource_list_add_tail() argument
51 list_add_tail(&entry->node, head); resource_list_add_tail()
54 static inline void resource_list_del(struct resource_entry *entry) resource_list_del() argument
56 list_del(&entry->node); resource_list_del()
59 static inline void resource_list_free_entry(struct resource_entry *entry) resource_list_free_entry() argument
61 kfree(entry); resource_list_free_entry()
65 resource_list_destroy_entry(struct resource_entry *entry) resource_list_destroy_entry() argument
67 resource_list_del(entry); resource_list_destroy_entry()
68 resource_list_free_entry(entry); resource_list_destroy_entry()
71 #define resource_list_for_each_entry(entry, list) \
72 list_for_each_entry((entry), (list), node)
74 #define resource_list_for_each_entry_safe(entry, tmp, list) \
75 list_for_each_entry_safe((entry), (tmp), (list), node)
H A Dswapops.h39 static inline unsigned swp_type(swp_entry_t entry) swp_type() argument
41 return (entry.val >> SWP_TYPE_SHIFT(entry)); swp_type()
48 static inline pgoff_t swp_offset(swp_entry_t entry) swp_offset() argument
50 return entry.val & SWP_OFFSET_MASK(entry); swp_offset()
54 /* check whether a pte points to a swap entry */ is_swap_pte()
79 static inline pte_t swp_entry_to_pte(swp_entry_t entry) swp_entry_to_pte() argument
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); swp_entry_to_pte()
89 swp_entry_t entry; radix_to_swp_entry() local
91 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; radix_to_swp_entry()
92 return entry; radix_to_swp_entry()
95 static inline void *swp_to_radix_entry(swp_entry_t entry) swp_to_radix_entry() argument
99 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; swp_to_radix_entry()
111 static inline int is_migration_entry(swp_entry_t entry) is_migration_entry() argument
113 return unlikely(swp_type(entry) == SWP_MIGRATION_READ || is_migration_entry()
114 swp_type(entry) == SWP_MIGRATION_WRITE); is_migration_entry()
117 static inline int is_write_migration_entry(swp_entry_t entry) is_write_migration_entry() argument
119 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); is_write_migration_entry()
122 static inline struct page *migration_entry_to_page(swp_entry_t entry) migration_entry_to_page() argument
124 struct page *p = pfn_to_page(swp_offset(entry)); migration_entry_to_page()
133 static inline void make_migration_entry_read(swp_entry_t *entry) make_migration_entry_read() argument
135 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); make_migration_entry_read()
159 static inline int is_write_migration_entry(swp_entry_t entry) is_write_migration_entry() argument
179 static inline int is_hwpoison_entry(swp_entry_t entry) is_hwpoison_entry() argument
181 return swp_type(entry) == SWP_HWPOISON; is_hwpoison_entry()
231 static inline int non_swap_entry(swp_entry_t entry) non_swap_entry() argument
233 return swp_type(entry) >= MAX_SWAPFILES; non_swap_entry()
236 static inline int non_swap_entry(swp_entry_t entry) non_swap_entry() argument
H A Ddqblk_qtree.h21 void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */
22 void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */
32 unsigned int dqi_free_entry; /* First block with free entry */
34 unsigned int dqi_entry_size; /* Size of quota entry in quota file */
37 struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */
/linux-4.4.14/sound/pci/emu10k1/
H A Demuproc.c80 static void snd_emu10k1_proc_read(struct snd_info_entry *entry, snd_emu10k1_proc_read() argument
186 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_read()
237 static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, snd_emu10k1_proc_spdif_read() argument
240 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_spdif_read()
276 static void snd_emu10k1_proc_rates_read(struct snd_info_entry *entry, snd_emu10k1_proc_rates_read() argument
280 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_rates_read()
291 static void snd_emu10k1_proc_acode_read(struct snd_info_entry *entry, snd_emu10k1_proc_acode_read() argument
295 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_acode_read()
334 static ssize_t snd_emu10k1_fx8010_read(struct snd_info_entry *entry, snd_emu10k1_fx8010_read() argument
339 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_fx8010_read()
346 if (!strcmp(entry->name, "fx8010_tram_addr")) { snd_emu10k1_fx8010_read()
349 } else if (!strcmp(entry->name, "fx8010_tram_data")) { snd_emu10k1_fx8010_read()
351 } else if (!strcmp(entry->name, "fx8010_code")) { snd_emu10k1_fx8010_read()
377 static void snd_emu10k1_proc_voices_read(struct snd_info_entry *entry, snd_emu10k1_proc_voices_read() argument
380 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_voices_read()
398 static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, snd_emu_proc_emu1010_reg_read() argument
401 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_emu1010_reg_read()
412 static void snd_emu_proc_io_reg_read(struct snd_info_entry *entry, snd_emu_proc_io_reg_read() argument
415 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_io_reg_read()
428 static void snd_emu_proc_io_reg_write(struct snd_info_entry *entry, snd_emu_proc_io_reg_write() argument
431 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_io_reg_write()
481 static void snd_emu_proc_ptr_reg_read(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read() argument
484 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_ptr_reg_read()
505 static void snd_emu_proc_ptr_reg_write(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write() argument
508 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_ptr_reg_write()
519 static void snd_emu_proc_ptr_reg_write00(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write00() argument
522 snd_emu_proc_ptr_reg_write(entry, buffer, 0); snd_emu_proc_ptr_reg_write00()
525 static void snd_emu_proc_ptr_reg_write20(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write20() argument
528 snd_emu_proc_ptr_reg_write(entry, buffer, 0x20); snd_emu_proc_ptr_reg_write20()
532 static void snd_emu_proc_ptr_reg_read00a(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read00a() argument
535 snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0, 0x40, 64); snd_emu_proc_ptr_reg_read00a()
538 static void snd_emu_proc_ptr_reg_read00b(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read00b() argument
541 snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0x40, 0x40, 64); snd_emu_proc_ptr_reg_read00b()
544 static void snd_emu_proc_ptr_reg_read20a(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20a() argument
547 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0, 0x40, 4); snd_emu_proc_ptr_reg_read20a()
550 static void snd_emu_proc_ptr_reg_read20b(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20b() argument
553 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x40, 0x40, 4); snd_emu_proc_ptr_reg_read20b()
556 static void snd_emu_proc_ptr_reg_read20c(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20c() argument
559 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x80, 0x20, 4); snd_emu_proc_ptr_reg_read20c()
569 struct snd_info_entry *entry; snd_emu10k1_proc_init() local
572 if (! snd_card_proc_new(emu->card, "emu1010_regs", &entry)) snd_emu10k1_proc_init()
573 snd_info_set_text_ops(entry, emu, snd_emu_proc_emu1010_reg_read); snd_emu10k1_proc_init()
575 if (! snd_card_proc_new(emu->card, "io_regs", &entry)) { snd_emu10k1_proc_init()
576 snd_info_set_text_ops(entry, emu, snd_emu_proc_io_reg_read); snd_emu10k1_proc_init()
577 entry->c.text.write = snd_emu_proc_io_reg_write; snd_emu10k1_proc_init()
578 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
580 if (! snd_card_proc_new(emu->card, "ptr_regs00a", &entry)) { snd_emu10k1_proc_init()
581 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00a); snd_emu10k1_proc_init()
582 entry->c.text.write = snd_emu_proc_ptr_reg_write00; snd_emu10k1_proc_init()
583 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
585 if (! snd_card_proc_new(emu->card, "ptr_regs00b", &entry)) { snd_emu10k1_proc_init()
586 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00b); snd_emu10k1_proc_init()
587 entry->c.text.write = snd_emu_proc_ptr_reg_write00; snd_emu10k1_proc_init()
588 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
590 if (! snd_card_proc_new(emu->card, "ptr_regs20a", &entry)) { snd_emu10k1_proc_init()
591 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20a); snd_emu10k1_proc_init()
592 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
593 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
595 if (! snd_card_proc_new(emu->card, "ptr_regs20b", &entry)) { snd_emu10k1_proc_init()
596 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20b); snd_emu10k1_proc_init()
597 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
598 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
600 if (! snd_card_proc_new(emu->card, "ptr_regs20c", &entry)) { snd_emu10k1_proc_init()
601 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20c); snd_emu10k1_proc_init()
602 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
603 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
607 if (! snd_card_proc_new(emu->card, "emu10k1", &entry)) snd_emu10k1_proc_init()
608 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_read); snd_emu10k1_proc_init()
611 if (! snd_card_proc_new(emu->card, "spdif-in", &entry)) snd_emu10k1_proc_init()
612 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_spdif_read); snd_emu10k1_proc_init()
615 if (! snd_card_proc_new(emu->card, "capture-rates", &entry)) snd_emu10k1_proc_init()
616 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_rates_read); snd_emu10k1_proc_init()
619 if (! snd_card_proc_new(emu->card, "voices", &entry)) snd_emu10k1_proc_init()
620 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_voices_read); snd_emu10k1_proc_init()
622 if (! snd_card_proc_new(emu->card, "fx8010_gpr", &entry)) { snd_emu10k1_proc_init()
623 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
624 entry->private_data = emu; snd_emu10k1_proc_init()
625 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
626 entry->size = emu->audigy ? A_TOTAL_SIZE_GPR : TOTAL_SIZE_GPR; snd_emu10k1_proc_init()
627 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
629 if (! snd_card_proc_new(emu->card, "fx8010_tram_data", &entry)) { snd_emu10k1_proc_init()
630 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
631 entry->private_data = emu; snd_emu10k1_proc_init()
632 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
633 entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_DATA : TOTAL_SIZE_TANKMEM_DATA ; snd_emu10k1_proc_init()
634 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
636 if (! snd_card_proc_new(emu->card, "fx8010_tram_addr", &entry)) { snd_emu10k1_proc_init()
637 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
638 entry->private_data = emu; snd_emu10k1_proc_init()
639 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
640 entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_ADDR : TOTAL_SIZE_TANKMEM_ADDR ; snd_emu10k1_proc_init()
641 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
643 if (! snd_card_proc_new(emu->card, "fx8010_code", &entry)) { snd_emu10k1_proc_init()
644 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
645 entry->private_data = emu; snd_emu10k1_proc_init()
646 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
647 entry->size = emu->audigy ? A_TOTAL_SIZE_CODE : TOTAL_SIZE_CODE; snd_emu10k1_proc_init()
648 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
650 if (! snd_card_proc_new(emu->card, "fx8010_acode", &entry)) { snd_emu10k1_proc_init()
651 entry->content = SNDRV_INFO_CONTENT_TEXT; snd_emu10k1_proc_init()
652 entry->private_data = emu; snd_emu10k1_proc_init()
653 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
654 entry->c.text.read = snd_emu10k1_proc_acode_read; snd_emu10k1_proc_init()
/linux-4.4.14/net/netfilter/
H A Dnf_queue.c49 void nf_queue_entry_release_refs(struct nf_queue_entry *entry) nf_queue_entry_release_refs() argument
51 struct nf_hook_state *state = &entry->state; nf_queue_entry_release_refs()
61 if (entry->skb->nf_bridge) { nf_queue_entry_release_refs()
64 physdev = nf_bridge_get_physindev(entry->skb); nf_queue_entry_release_refs()
67 physdev = nf_bridge_get_physoutdev(entry->skb); nf_queue_entry_release_refs()
76 void nf_queue_entry_get_refs(struct nf_queue_entry *entry) nf_queue_entry_get_refs() argument
78 struct nf_hook_state *state = &entry->state; nf_queue_entry_get_refs()
87 if (entry->skb->nf_bridge) { nf_queue_entry_get_refs()
90 physdev = nf_bridge_get_physindev(entry->skb); nf_queue_entry_get_refs()
93 physdev = nf_bridge_get_physoutdev(entry->skb); nf_queue_entry_get_refs()
122 struct nf_queue_entry *entry = NULL; nf_queue() local
137 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); nf_queue()
138 if (!entry) { nf_queue()
143 *entry = (struct nf_queue_entry) { nf_queue()
147 .size = sizeof(*entry) + afinfo->route_key_size, nf_queue()
150 nf_queue_entry_get_refs(entry); nf_queue()
152 afinfo->saveroute(skb, entry); nf_queue()
153 status = qh->outfn(entry, queuenum); nf_queue()
156 nf_queue_entry_release_refs(entry); nf_queue()
163 kfree(entry); nf_queue()
167 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) nf_reinject() argument
169 struct sk_buff *skb = entry->skb; nf_reinject()
170 struct nf_hook_ops *elem = entry->elem; nf_reinject()
174 nf_queue_entry_release_refs(entry); nf_reinject()
178 verdict = elem->hook(elem->priv, skb, &entry->state); nf_reinject()
181 afinfo = nf_get_afinfo(entry->state.pf); nf_reinject()
182 if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0) nf_reinject()
186 entry->state.thresh = INT_MIN; nf_reinject()
190 verdict = nf_iterate(entry->state.hook_list, nf_reinject()
191 skb, &entry->state, &elem); nf_reinject()
198 entry->state.okfn(entry->state.net, entry->state.sk, skb); nf_reinject()
202 err = nf_queue(skb, elem, &entry->state, nf_reinject()
217 kfree(entry); nf_reinject()
H A Dnfnetlink_queue.c188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __enqueue_entry() argument
190 list_add_tail(&entry->list, &queue->queue_list); __enqueue_entry()
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __dequeue_entry() argument
197 list_del(&entry->list); __dequeue_entry()
204 struct nf_queue_entry *entry = NULL, *i; find_dequeue_entry() local
210 entry = i; find_dequeue_entry()
215 if (entry) find_dequeue_entry()
216 __dequeue_entry(queue, entry); find_dequeue_entry()
220 return entry; find_dequeue_entry()
226 struct nf_queue_entry *entry, *next; nfqnl_flush() local
229 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { nfqnl_flush()
230 if (!cmpfn || cmpfn(entry, data)) { nfqnl_flush()
231 list_del(&entry->list); nfqnl_flush()
233 nf_reinject(entry, NF_DROP); nfqnl_flush()
300 struct nf_queue_entry *entry, nfqnl_build_packet_message()
311 struct sk_buff *entskb = entry->skb; nfqnl_build_packet_message()
337 if (entry->state.hook <= NF_INET_FORWARD || nfqnl_build_packet_message()
338 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) nfqnl_build_packet_message()
343 outdev = entry->state.out; nfqnl_build_packet_message()
405 nfmsg->nfgen_family = entry->state.pf; nfqnl_build_packet_message()
412 pmsg->hook = entry->state.hook; nfqnl_build_packet_message()
415 indev = entry->state.in; nfqnl_build_packet_message()
421 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
455 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
556 struct nf_queue_entry *entry) __nfqnl_enqueue_packet()
563 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); __nfqnl_enqueue_packet()
581 entry->id = ++queue->id_sequence; __nfqnl_enqueue_packet()
582 *packet_id_ptr = htonl(entry->id); __nfqnl_enqueue_packet()
591 __enqueue_entry(queue, entry); __nfqnl_enqueue_packet()
601 nf_reinject(entry, NF_ACCEPT); __nfqnl_enqueue_packet()
609 struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); nf_queue_entry_dup() local
610 if (entry) nf_queue_entry_dup()
611 nf_queue_entry_get_refs(entry); nf_queue_entry_dup()
612 return entry; nf_queue_entry_dup()
636 static void free_entry(struct nf_queue_entry *entry) free_entry() argument
638 nf_queue_entry_release_refs(entry); free_entry()
639 kfree(entry); free_entry()
644 struct sk_buff *skb, struct nf_queue_entry *entry) __nfqnl_enqueue_packet_gso()
651 if (skb->next == NULL) { /* last packet, no need to copy entry */ __nfqnl_enqueue_packet_gso()
652 struct sk_buff *gso_skb = entry->skb; __nfqnl_enqueue_packet_gso()
653 entry->skb = skb; __nfqnl_enqueue_packet_gso()
654 ret = __nfqnl_enqueue_packet(net, queue, entry); __nfqnl_enqueue_packet_gso()
656 entry->skb = gso_skb; __nfqnl_enqueue_packet_gso()
662 entry_seg = nf_queue_entry_dup(entry); __nfqnl_enqueue_packet_gso()
673 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) nfqnl_enqueue_packet() argument
679 struct net *net = entry->state.net; nfqnl_enqueue_packet()
690 skb = entry->skb; nfqnl_enqueue_packet()
692 switch (entry->state.pf) { nfqnl_enqueue_packet()
702 return __nfqnl_enqueue_packet(net, queue, entry); nfqnl_enqueue_packet()
718 segs, entry); nfqnl_enqueue_packet()
728 free_entry(entry); nfqnl_enqueue_packet()
800 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) dev_cmp() argument
802 if (entry->state.in) dev_cmp()
803 if (entry->state.in->ifindex == ifindex) dev_cmp()
805 if (entry->state.out) dev_cmp()
806 if (entry->state.out->ifindex == ifindex) dev_cmp()
809 if (entry->skb->nf_bridge) { dev_cmp()
812 physinif = nf_bridge_get_physinif(entry->skb); dev_cmp()
813 physoutif = nf_bridge_get_physoutif(entry->skb); dev_cmp()
859 static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr) nf_hook_cmp() argument
861 return entry->elem == (struct nf_hook_ops *)ops_ptr; nf_hook_cmp()
966 struct nf_queue_entry *entry, *tmp; nfqnl_recv_verdict_batch() local
990 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { nfqnl_recv_verdict_batch()
991 if (nfq_id_after(entry->id, maxid)) nfqnl_recv_verdict_batch()
993 __dequeue_entry(queue, entry); nfqnl_recv_verdict_batch()
994 list_add_tail(&entry->list, &batch_list); nfqnl_recv_verdict_batch()
1002 list_for_each_entry_safe(entry, tmp, &batch_list, list) { nfqnl_recv_verdict_batch()
1004 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nfqnl_recv_verdict_batch()
1005 nf_reinject(entry, verdict); nfqnl_recv_verdict_batch()
1013 struct nf_queue_entry *entry, nfqnl_ct_parse()
1018 ct = nfnl_ct->get_ct(entry->skb, ctinfo); nfqnl_ct_parse()
1027 NETLINK_CB(entry->skb).portid, nfqnl_ct_parse()
1043 struct nf_queue_entry *entry; nfqnl_recv_verdict() local
1064 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); nfqnl_recv_verdict()
1065 if (entry == NULL) nfqnl_recv_verdict()
1073 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); nfqnl_recv_verdict()
1078 int diff = payload_len - entry->skb->len; nfqnl_recv_verdict()
1081 payload_len, entry, diff) < 0) nfqnl_recv_verdict()
1085 nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff); nfqnl_recv_verdict()
1089 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nfqnl_recv_verdict()
1091 nf_reinject(entry, verdict); nfqnl_recv_verdict()
299 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry, __be32 **packet_id_ptr) nfqnl_build_packet_message() argument
555 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry) __nfqnl_enqueue_packet() argument
643 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, struct sk_buff *skb, struct nf_queue_entry *entry) __nfqnl_enqueue_packet_gso() argument
1010 nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[], struct nf_queue_entry *entry, enum ip_conntrack_info *ctinfo) nfqnl_ct_parse() argument
/linux-4.4.14/lib/
H A Dlist_debug.c16 * Insert a new entry between two known consecutive entries.
44 void __list_del_entry(struct list_head *entry) __list_del_entry() argument
48 prev = entry->prev; __list_del_entry()
49 next = entry->next; __list_del_entry()
53 entry, LIST_POISON1) || __list_del_entry()
56 entry, LIST_POISON2) || __list_del_entry()
57 WARN(prev->next != entry, __list_del_entry()
59 "but was %p\n", entry, prev->next) || __list_del_entry()
60 WARN(next->prev != entry, __list_del_entry()
62 "but was %p\n", entry, next->prev)) __list_del_entry()
70 * list_del - deletes entry from list.
71 * @entry: the element to delete from the list.
72 * Note: list_empty on entry does not return true after this, the entry is
75 void list_del(struct list_head *entry) list_del() argument
77 __list_del_entry(entry); list_del()
78 entry->next = LIST_POISON1; list_del()
79 entry->prev = LIST_POISON2; list_del()
H A Ddma-debug.c172 static inline void dump_entry_trace(struct dma_debug_entry *entry) dump_entry_trace() argument
175 if (entry) { dump_entry_trace()
177 print_stack_trace(&entry->stacktrace, 0); dump_entry_trace()
223 #define err_printk(dev, entry, format, arg...) do { \
230 dump_entry_trace(entry); \
242 static int hash_fn(struct dma_debug_entry *entry) hash_fn() argument
248 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; hash_fn()
254 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, get_hash_bucket() argument
257 int idx = hash_fn(entry); get_hash_bucket()
296 * Search a given entry in the hash bucket list
302 struct dma_debug_entry *entry, *ret = NULL; __hash_bucket_find() local
305 list_for_each_entry(entry, &bucket->list, list) { __hash_bucket_find()
306 if (!match(ref, entry)) __hash_bucket_find()
315 * best-fit algorithm here which returns the entry from __hash_bucket_find()
321 entry->size == ref->size ? ++match_lvl : 0; __hash_bucket_find()
322 entry->type == ref->type ? ++match_lvl : 0; __hash_bucket_find()
323 entry->direction == ref->direction ? ++match_lvl : 0; __hash_bucket_find()
324 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; __hash_bucket_find()
328 return entry; __hash_bucket_find()
331 * We found an entry that fits better then the __hash_bucket_find()
335 ret = entry; __hash_bucket_find()
360 struct dma_debug_entry *entry, index = *ref; bucket_find_contain() local
364 entry = __hash_bucket_find(*bucket, ref, containing_match); bucket_find_contain()
366 if (entry) bucket_find_contain()
367 return entry; bucket_find_contain()
382 * Add an entry to a hash bucket
385 struct dma_debug_entry *entry) hash_bucket_add()
387 list_add_tail(&entry->list, &bucket->list); hash_bucket_add()
391 * Remove entry from a hash bucket list
393 static void hash_bucket_del(struct dma_debug_entry *entry) hash_bucket_del() argument
395 list_del(&entry->list); hash_bucket_del()
398 static unsigned long long phys_addr(struct dma_debug_entry *entry) phys_addr() argument
400 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; phys_addr()
412 struct dma_debug_entry *entry; debug_dma_dump_mappings() local
417 list_for_each_entry(entry, &bucket->list, list) { debug_dma_dump_mappings()
418 if (!dev || dev == entry->dev) { debug_dma_dump_mappings()
419 dev_info(entry->dev, debug_dma_dump_mappings()
421 type2name[entry->type], idx, debug_dma_dump_mappings()
422 phys_addr(entry), entry->pfn, debug_dma_dump_mappings()
423 entry->dev_addr, entry->size, debug_dma_dump_mappings()
424 dir2name[entry->direction], debug_dma_dump_mappings()
425 maperr2str[entry->map_err_type]); debug_dma_dump_mappings()
439 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
440 * the entry already exists at insertion time add a tag as a reference
450 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
463 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) to_cacheline_number() argument
465 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + to_cacheline_number()
466 (entry->offset >> L1_CACHE_SHIFT); to_cacheline_number()
519 static int active_cacheline_insert(struct dma_debug_entry *entry) active_cacheline_insert() argument
521 phys_addr_t cln = to_cacheline_number(entry); active_cacheline_insert()
529 if (entry->direction == DMA_TO_DEVICE) active_cacheline_insert()
533 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); active_cacheline_insert()
541 static void active_cacheline_remove(struct dma_debug_entry *entry) active_cacheline_remove() argument
543 phys_addr_t cln = to_cacheline_number(entry); active_cacheline_remove()
547 if (entry->direction == DMA_TO_DEVICE) active_cacheline_remove()
571 struct dma_debug_entry *entry = NULL; debug_dma_assert_idle() local
591 entry = ents[i]; debug_dma_assert_idle()
598 if (!entry) debug_dma_assert_idle()
601 cln = to_cacheline_number(entry); debug_dma_assert_idle()
602 err_printk(entry->dev, entry, debug_dma_assert_idle()
608 * Wrapper function for adding an entry to the hash.
611 static void add_dma_entry(struct dma_debug_entry *entry) add_dma_entry() argument
617 bucket = get_hash_bucket(entry, &flags); add_dma_entry()
618 hash_bucket_add(bucket, entry); add_dma_entry()
621 rc = active_cacheline_insert(entry); add_dma_entry()
634 struct dma_debug_entry *entry; __dma_entry_alloc() local
636 entry = list_entry(free_entries.next, struct dma_debug_entry, list); __dma_entry_alloc()
637 list_del(&entry->list); __dma_entry_alloc()
638 memset(entry, 0, sizeof(*entry)); __dma_entry_alloc()
644 return entry; __dma_entry_alloc()
654 struct dma_debug_entry *entry; dma_entry_alloc() local
666 entry = __dma_entry_alloc(); dma_entry_alloc()
671 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; dma_entry_alloc()
672 entry->stacktrace.entries = entry->st_entries; dma_entry_alloc()
673 entry->stacktrace.skip = 2; dma_entry_alloc()
674 save_stack_trace(&entry->stacktrace); dma_entry_alloc()
677 return entry; dma_entry_alloc()
680 static void dma_entry_free(struct dma_debug_entry *entry) dma_entry_free() argument
684 active_cacheline_remove(entry); dma_entry_free()
691 list_add(&entry->list, &free_entries); dma_entry_free()
700 struct dma_debug_entry *entry; dma_debug_resize_entries() local
711 entry = kzalloc(sizeof(*entry), GFP_KERNEL); dma_debug_resize_entries()
712 if (!entry) dma_debug_resize_entries()
715 list_add_tail(&entry->list, &tmp); dma_debug_resize_entries()
727 entry = __dma_entry_alloc(); dma_debug_resize_entries()
728 kfree(entry); dma_debug_resize_entries()
753 struct dma_debug_entry *entry, *next_entry; prealloc_memory() local
757 entry = kzalloc(sizeof(*entry), GFP_KERNEL); prealloc_memory()
758 if (!entry) prealloc_memory()
761 list_add_tail(&entry->list, &free_entries); prealloc_memory()
773 list_for_each_entry_safe(entry, next_entry, &free_entries, list) { prealloc_memory()
774 list_del(&entry->list); prealloc_memory()
775 kfree(entry); prealloc_memory()
931 struct dma_debug_entry *entry; device_dma_allocations() local
939 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { device_dma_allocations()
940 if (entry->dev == dev) { device_dma_allocations()
942 *out_entry = entry; device_dma_allocations()
956 struct dma_debug_entry *uninitialized_var(entry); dma_debug_device_change()
964 count = device_dma_allocations(dev, &entry); dma_debug_device_change()
967 err_printk(dev, entry, "DMA-API: device driver has pending " dma_debug_device_change()
973 count, entry->dev_addr, entry->size, dma_debug_device_change()
974 dir2name[entry->direction], type2name[entry->type]); dma_debug_device_change()
1076 struct dma_debug_entry *entry; check_unmap() local
1081 entry = bucket_find_exact(bucket, ref); check_unmap()
1083 if (!entry) { check_unmap()
1101 if (ref->size != entry->size) { check_unmap()
1102 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1106 ref->dev_addr, entry->size, ref->size); check_unmap()
1109 if (ref->type != entry->type) { check_unmap()
1110 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1115 type2name[entry->type], type2name[ref->type]); check_unmap()
1116 } else if ((entry->type == dma_debug_coherent) && check_unmap()
1117 (phys_addr(ref) != phys_addr(entry))) { check_unmap()
1118 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1124 phys_addr(entry), check_unmap()
1129 ref->sg_call_ents != entry->sg_call_ents) { check_unmap()
1130 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1131 "DMA sg list with different entry count " check_unmap()
1133 entry->sg_call_ents, ref->sg_call_ents); check_unmap()
1140 if (ref->direction != entry->direction) { check_unmap()
1141 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1146 dir2name[entry->direction], check_unmap()
1150 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { check_unmap()
1151 err_printk(ref->dev, entry, check_unmap()
1156 type2name[entry->type]); check_unmap()
1159 hash_bucket_del(entry); check_unmap()
1160 dma_entry_free(entry); check_unmap()
1193 struct dma_debug_entry *entry; check_sync() local
1199 entry = bucket_find_contain(&bucket, ref, &flags); check_sync()
1201 if (!entry) { check_sync()
1209 if (ref->size > entry->size) { check_sync()
1210 err_printk(dev, entry, "DMA-API: device driver syncs" check_sync()
1215 entry->dev_addr, entry->size, check_sync()
1219 if (entry->direction == DMA_BIDIRECTIONAL) check_sync()
1222 if (ref->direction != entry->direction) { check_sync()
1223 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1227 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1228 dir2name[entry->direction], check_sync()
1232 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && check_sync()
1234 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1238 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1239 dir2name[entry->direction], check_sync()
1242 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && check_sync()
1244 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1248 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1249 dir2name[entry->direction], check_sync()
1253 ref->sg_call_ents != entry->sg_call_ents) { check_sync()
1254 err_printk(ref->dev, entry, "DMA-API: device driver syncs " check_sync()
1255 "DMA sg list with different entry count " check_sync()
1257 entry->sg_call_ents, ref->sg_call_ents); check_sync()
1268 struct dma_debug_entry *entry; debug_dma_map_page() local
1276 entry = dma_entry_alloc(); debug_dma_map_page()
1277 if (!entry) debug_dma_map_page()
1280 entry->dev = dev; debug_dma_map_page()
1281 entry->type = dma_debug_page; debug_dma_map_page()
1282 entry->pfn = page_to_pfn(page); debug_dma_map_page()
1283 entry->offset = offset, debug_dma_map_page()
1284 entry->dev_addr = dma_addr; debug_dma_map_page()
1285 entry->size = size; debug_dma_map_page()
1286 entry->direction = direction; debug_dma_map_page()
1287 entry->map_err_type = MAP_ERR_NOT_CHECKED; debug_dma_map_page()
1290 entry->type = dma_debug_single; debug_dma_map_page()
1299 add_dma_entry(entry); debug_dma_map_page()
1306 struct dma_debug_entry *entry; debug_dma_mapping_error() local
1317 list_for_each_entry(entry, &bucket->list, list) { debug_dma_mapping_error()
1318 if (!exact_match(&ref, entry)) debug_dma_mapping_error()
1327 * best-fit algorithm here which updates the first entry debug_dma_mapping_error()
1331 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { debug_dma_mapping_error()
1332 entry->map_err_type = MAP_ERR_CHECKED; debug_dma_mapping_error()
1365 struct dma_debug_entry *entry; debug_dma_map_sg() local
1373 entry = dma_entry_alloc(); for_each_sg()
1374 if (!entry) for_each_sg()
1377 entry->type = dma_debug_sg; for_each_sg()
1378 entry->dev = dev; for_each_sg()
1379 entry->pfn = page_to_pfn(sg_page(s)); for_each_sg()
1380 entry->offset = s->offset, for_each_sg()
1381 entry->size = sg_dma_len(s); for_each_sg()
1382 entry->dev_addr = sg_dma_address(s); for_each_sg()
1383 entry->direction = direction; for_each_sg()
1384 entry->sg_call_ents = nents; for_each_sg()
1385 entry->sg_mapped_ents = mapped_ents; for_each_sg()
1392 add_dma_entry(entry); for_each_sg()
1400 struct dma_debug_entry *entry; get_nr_mapped_entries() local
1406 entry = bucket_find_exact(bucket, ref); get_nr_mapped_entries()
1409 if (entry) get_nr_mapped_entries()
1410 mapped_ents = entry->sg_mapped_ents; get_nr_mapped_entries()
1452 struct dma_debug_entry *entry; debug_dma_alloc_coherent() local
1460 entry = dma_entry_alloc(); debug_dma_alloc_coherent()
1461 if (!entry) debug_dma_alloc_coherent()
1464 entry->type = dma_debug_coherent; debug_dma_alloc_coherent()
1465 entry->dev = dev; debug_dma_alloc_coherent()
1466 entry->pfn = page_to_pfn(virt_to_page(virt)); debug_dma_alloc_coherent()
1467 entry->offset = (size_t) virt & ~PAGE_MASK; debug_dma_alloc_coherent()
1468 entry->size = size; debug_dma_alloc_coherent()
1469 entry->dev_addr = dma_addr; debug_dma_alloc_coherent()
1470 entry->direction = DMA_BIDIRECTIONAL; debug_dma_alloc_coherent()
1472 add_dma_entry(entry); debug_dma_alloc_coherent()
384 hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) hash_bucket_add() argument
H A Dllist.c32 * @new_first: first entry in batch to be added
33 * @new_last: last entry in batch to be added
52 * llist_del_first - delete the first entry of lock-less list
55 * If list is empty, return NULL, otherwise, return the first entry
67 struct llist_node *entry, *old_entry, *next; llist_del_first() local
69 entry = smp_load_acquire(&head->first); llist_del_first()
71 if (entry == NULL) llist_del_first()
73 old_entry = entry; llist_del_first()
74 next = READ_ONCE(entry->next); llist_del_first()
75 entry = cmpxchg(&head->first, old_entry, next); llist_del_first()
76 if (entry == old_entry) llist_del_first()
80 return entry; llist_del_first()
89 * new first entry.
/linux-4.4.14/drivers/firmware/
H A Dmemmap.c31 * Firmware map entry. Because firmware memory maps are flat and not
43 struct list_head list; /* entry for the linked list */
44 struct kobject kobj; /* kobject for each entry */
52 static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
53 static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
54 static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
65 ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
73 * These are default attributes that are added for every memmap entry.
93 * map entry is allocated by bootmem, we need to remember the storage and
108 struct firmware_map_entry *entry = to_memmap_entry(kobj); release_firmware_map_entry() local
110 if (PageReserved(virt_to_page(entry))) { release_firmware_map_entry()
113 * the memory is hot-added again. The entry will be added to release_firmware_map_entry()
118 list_add(&entry->list, &map_entries_bootmem); release_firmware_map_entry()
124 kfree(entry); release_firmware_map_entry()
138 * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
142 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
143 * entry.
152 struct firmware_map_entry *entry) firmware_map_add_entry()
156 entry->start = start; firmware_map_add_entry()
157 entry->end = end - 1; firmware_map_add_entry()
158 entry->type = type; firmware_map_add_entry()
159 INIT_LIST_HEAD(&entry->list); firmware_map_add_entry()
160 kobject_init(&entry->kobj, &memmap_ktype); firmware_map_add_entry()
163 list_add_tail(&entry->list, &map_entries); firmware_map_add_entry()
171 * memmap entry.
172 * @entry: removed entry.
176 static inline void firmware_map_remove_entry(struct firmware_map_entry *entry) firmware_map_remove_entry() argument
178 list_del(&entry->list); firmware_map_remove_entry()
182 * Add memmap entry on sysfs
184 static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry) add_sysfs_fw_map_entry() argument
189 if (entry->kobj.state_in_sysfs) add_sysfs_fw_map_entry()
198 entry->kobj.kset = mmap_kset; add_sysfs_fw_map_entry()
199 if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++)) add_sysfs_fw_map_entry()
200 kobject_put(&entry->kobj); add_sysfs_fw_map_entry()
206 * Remove memmap entry on sysfs
208 static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry) remove_sysfs_fw_map_entry() argument
210 kobject_put(&entry->kobj); remove_sysfs_fw_map_entry()
214 * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
218 * @list: In which to find the entry.
222 * the lock until the processing of the returned entry has completed.
224 * Return: Pointer to the entry to be found on success, or NULL on failure.
230 struct firmware_map_entry *entry; firmware_map_find_entry_in_list() local
232 list_for_each_entry(entry, list, list) list_for_each_entry()
233 if ((entry->start == start) && (entry->end == end) && list_for_each_entry()
234 (!strcmp(entry->type, type))) { list_for_each_entry()
235 return entry; list_for_each_entry()
242 * firmware_map_find_entry() - Search memmap entry in map_entries.
249 * until the processing of the returned entry has completed.
251 * Return: Pointer to the entry to be found on success, or NULL on failure.
260 * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
266 * given entry in map_entries_bootmem.
268 * Return: Pointer to the entry to be found on success, or NULL on failure.
278 * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
284 * Adds a firmware mapping entry. This function is for memory hotplug, it is
286 * it will create the syfs entry dynamically.
292 struct firmware_map_entry *entry; firmware_map_add_hotplug() local
294 entry = firmware_map_find_entry(start, end - 1, type); firmware_map_add_hotplug()
295 if (entry) firmware_map_add_hotplug()
298 entry = firmware_map_find_entry_bootmem(start, end - 1, type); firmware_map_add_hotplug()
299 if (!entry) { firmware_map_add_hotplug()
300 entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); firmware_map_add_hotplug()
301 if (!entry) firmware_map_add_hotplug()
306 list_del(&entry->list); firmware_map_add_hotplug()
309 memset(entry, 0, sizeof(*entry)); firmware_map_add_hotplug()
312 firmware_map_add_entry(start, end, type, entry); firmware_map_add_hotplug()
313 /* create the memmap entry */ firmware_map_add_hotplug()
314 add_sysfs_fw_map_entry(entry); firmware_map_add_hotplug()
320 * firmware_map_add_early() - Adds a firmware mapping entry.
325 * Adds a firmware mapping entry. This function uses the bootmem allocator
334 struct firmware_map_entry *entry; firmware_map_add_early() local
336 entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0); firmware_map_add_early()
337 if (WARN_ON(!entry)) firmware_map_add_early()
340 return firmware_map_add_entry(start, end, type, entry); firmware_map_add_early()
344 * firmware_map_remove() - remove a firmware mapping entry
349 * removes a firmware mapping entry.
351 * Return: 0 on success, or -EINVAL if no entry.
355 struct firmware_map_entry *entry; firmware_map_remove() local
358 entry = firmware_map_find_entry(start, end - 1, type); firmware_map_remove()
359 if (!entry) { firmware_map_remove()
364 firmware_map_remove_entry(entry); firmware_map_remove()
367 /* remove the memmap entry */ firmware_map_remove()
368 remove_sysfs_fw_map_entry(entry); firmware_map_remove()
377 static ssize_t start_show(struct firmware_map_entry *entry, char *buf) start_show() argument
380 (unsigned long long)entry->start); start_show()
383 static ssize_t end_show(struct firmware_map_entry *entry, char *buf) end_show() argument
386 (unsigned long long)entry->end); end_show()
389 static ssize_t type_show(struct firmware_map_entry *entry, char *buf) type_show() argument
391 return snprintf(buf, PAGE_SIZE, "%s\n", entry->type); type_show()
402 struct firmware_map_entry *entry = to_memmap_entry(kobj); memmap_attr_show() local
405 return memmap_attr->show(entry, buf); memmap_attr_show()
418 struct firmware_map_entry *entry; firmware_memmap_init() local
420 list_for_each_entry(entry, &map_entries, list) firmware_memmap_init()
421 add_sysfs_fw_map_entry(entry); firmware_memmap_init()
150 firmware_map_add_entry(u64 start, u64 end, const char *type, struct firmware_map_entry *entry) firmware_map_add_entry() argument
H A Ddmi-sysfs.c13 * entry.
30 the top entry type is only 8 bits */
52 ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
62 * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
67 ssize_t (*show)(struct dmi_sysfs_entry *entry,
79 * Generic DMI entry support.
99 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_sysfs_attr_show() local
106 return attr->show(entry, buf); dmi_sysfs_attr_show()
117 struct dmi_sysfs_entry *entry; member in struct:find_dmi_data
128 struct dmi_sysfs_entry *entry = data->entry; find_dmi_entry_helper() local
130 /* Is this the entry we want? */ find_dmi_entry_helper()
131 if (dh->type != entry->dh.type) find_dmi_entry_helper()
146 /* Found the entry */ find_dmi_entry_helper()
147 data->ret = data->callback(entry, dh, data->private); find_dmi_entry_helper()
157 static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, find_dmi_entry() argument
161 .entry = entry, find_dmi_entry()
164 .instance_countdown = entry->instance, find_dmi_entry()
165 .ret = -EIO, /* To signal the entry disappeared */ find_dmi_entry()
177 * Calculate and return the byte length of the dmi entry identified by
194 * Support bits for specialized DMI entry support
201 static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry, dmi_entry_attr_show_helper() argument
210 return attr->show(entry, dh, data->buf); dmi_entry_attr_show_helper()
221 /* Find the entry according to our parent and call the dmi_entry_attr_show()
232 * Specialized DMI entry support.
267 static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
359 static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_io() argument
376 static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_phys32() argument
397 static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_helper() argument
413 return dmi_sel_raw_read_io(entry, &sel, state->buf, dmi_sel_raw_read_helper()
416 return dmi_sel_raw_read_phys32(entry, &sel, state->buf, dmi_sel_raw_read_helper()
432 struct dmi_sysfs_entry *entry = to_entry(kobj->parent); dmi_sel_raw_read() local
439 return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state); dmi_sel_raw_read()
447 static int dmi_system_event_log(struct dmi_sysfs_entry *entry) dmi_system_event_log() argument
451 entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL); dmi_system_event_log()
452 if (!entry->child) dmi_system_event_log()
454 ret = kobject_init_and_add(entry->child, dmi_system_event_log()
456 &entry->kobj, dmi_system_event_log()
461 ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr); dmi_system_event_log()
468 kobject_del(entry->child); dmi_system_event_log()
470 kfree(entry->child); dmi_system_event_log()
475 * Generic DMI entry support.
478 static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_length() argument
480 return sprintf(buf, "%d\n", entry->dh.length); dmi_sysfs_entry_length()
483 static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_handle() argument
485 return sprintf(buf, "%d\n", entry->dh.handle); dmi_sysfs_entry_handle()
488 static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_type() argument
490 return sprintf(buf, "%d\n", entry->dh.type); dmi_sysfs_entry_type()
493 static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry, dmi_sysfs_entry_instance() argument
496 return sprintf(buf, "%d\n", entry->instance); dmi_sysfs_entry_instance()
499 static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry, dmi_sysfs_entry_position() argument
502 return sprintf(buf, "%d\n", entry->position); dmi_sysfs_entry_position()
505 static DMI_SYSFS_ATTR(entry, length);
506 static DMI_SYSFS_ATTR(entry, handle);
507 static DMI_SYSFS_ATTR(entry, type);
508 static DMI_SYSFS_ATTR(entry, instance);
509 static DMI_SYSFS_ATTR(entry, position);
520 static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, dmi_entry_raw_read_helper() argument
538 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_entry_raw_read() local
545 return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state); dmi_entry_raw_read()
555 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_sysfs_entry_release() local
558 list_del(&entry->list); dmi_sysfs_entry_release()
560 kfree(entry); dmi_sysfs_entry_release()
580 struct dmi_sysfs_entry *entry; dmi_sysfs_register_handle() local
583 /* If a previous entry saw an error, short circuit */ dmi_sysfs_register_handle()
587 /* Allocate and register a new entry into the entries set */ dmi_sysfs_register_handle()
588 entry = kzalloc(sizeof(*entry), GFP_KERNEL); dmi_sysfs_register_handle()
589 if (!entry) { dmi_sysfs_register_handle()
595 memcpy(&entry->dh, dh, sizeof(*dh)); dmi_sysfs_register_handle()
596 entry->instance = instance_counts[dh->type]++; dmi_sysfs_register_handle()
597 entry->position = position_count++; dmi_sysfs_register_handle()
599 entry->kobj.kset = dmi_kset; dmi_sysfs_register_handle()
600 *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL, dmi_sysfs_register_handle()
601 "%d-%d", dh->type, entry->instance); dmi_sysfs_register_handle()
604 kfree(entry); dmi_sysfs_register_handle()
610 list_add_tail(&entry->list, &entry_list); dmi_sysfs_register_handle()
616 *ret = dmi_system_event_log(entry); dmi_sysfs_register_handle()
625 /* Create the raw binary file to access the entry */ dmi_sysfs_register_handle()
626 *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr); dmi_sysfs_register_handle()
632 kobject_put(entry->child); dmi_sysfs_register_handle()
633 kobject_put(&entry->kobj); dmi_sysfs_register_handle()
639 struct dmi_sysfs_entry *entry, *next; cleanup_entry_list() local
642 list_for_each_entry_safe(entry, next, &entry_list, list) { cleanup_entry_list()
643 kobject_put(entry->child); cleanup_entry_list()
644 kobject_put(&entry->kobj); cleanup_entry_list()
654 pr_err("dmi-sysfs: dmi entry is absent.\n"); dmi_sysfs_init()
H A Dqcom_scm-64.c19 * @entry: Entry point function for the cpus
20 * @cpus: The cpumask of cpus that will use the entry point
25 int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) __qcom_scm_set_cold_boot_addr() argument
32 * @entry: Entry point function for the cpus
33 * @cpus: The cpumask of cpus that will use the entry point
35 * Set the Linux entry point for the SCM to transfer control to when coming
38 int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) __qcom_scm_set_warm_boot_addr() argument
49 * warm boot entry point set for this cpu upon reset.
H A Dqcom_scm.c28 * @entry: Entry point function for the cpus
29 * @cpus: The cpumask of cpus that will use the entry point
34 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) qcom_scm_set_cold_boot_addr() argument
36 return __qcom_scm_set_cold_boot_addr(entry, cpus); qcom_scm_set_cold_boot_addr()
42 * @entry: Entry point function for the cpus
43 * @cpus: The cpumask of cpus that will use the entry point
45 * Set the Linux entry point for the SCM to transfer control to when coming
48 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) qcom_scm_set_warm_boot_addr() argument
50 return __qcom_scm_set_warm_boot_addr(entry, cpus); qcom_scm_set_warm_boot_addr()
60 * warm boot entry point set for this cpu upon reset.
/linux-4.4.14/sound/drivers/opl4/
H A Dopl4_proc.c25 static int snd_opl4_mem_proc_open(struct snd_info_entry *entry, snd_opl4_mem_proc_open() argument
28 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_open()
40 static int snd_opl4_mem_proc_release(struct snd_info_entry *entry, snd_opl4_mem_proc_release() argument
43 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_release()
51 static ssize_t snd_opl4_mem_proc_read(struct snd_info_entry *entry, snd_opl4_mem_proc_read() argument
56 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_read()
71 static ssize_t snd_opl4_mem_proc_write(struct snd_info_entry *entry, snd_opl4_mem_proc_write() argument
77 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_write()
101 struct snd_info_entry *entry; snd_opl4_create_proc() local
103 entry = snd_info_create_card_entry(opl4->card, "opl4-mem", opl4->card->proc_root); snd_opl4_create_proc()
104 if (entry) { snd_opl4_create_proc()
107 entry->mode |= S_IWUSR; snd_opl4_create_proc()
108 entry->size = 4 * 1024 * 1024; snd_opl4_create_proc()
111 entry->size = 1 * 1024 * 1024; snd_opl4_create_proc()
113 entry->content = SNDRV_INFO_CONTENT_DATA; snd_opl4_create_proc()
114 entry->c.ops = &snd_opl4_mem_proc_ops; snd_opl4_create_proc()
115 entry->module = THIS_MODULE; snd_opl4_create_proc()
116 entry->private_data = opl4; snd_opl4_create_proc()
117 if (snd_info_register(entry) < 0) { snd_opl4_create_proc()
118 snd_info_free_entry(entry); snd_opl4_create_proc()
119 entry = NULL; snd_opl4_create_proc()
122 opl4->proc_entry = entry; snd_opl4_create_proc()
/linux-4.4.14/arch/sh/kernel/cpu/sh2/
H A DMakefile5 obj-y := ex.o probe.o entry.o
/linux-4.4.14/arch/sparc/kernel/
H A Djump_label.c13 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
17 u32 *insn = (u32 *) (unsigned long) entry->code; arch_jump_label_transform()
20 s32 off = (s32)entry->target - (s32)entry->code; arch_jump_label_transform()
H A Dpci_sun4v.c41 unsigned long entry; /* Index into IOTSB. */ member in struct:iommu_batch
50 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) iommu_batch_start() argument
56 p->entry = entry; iommu_batch_start()
66 unsigned long entry = p->entry; iommu_batch_flush() local
73 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), iommu_batch_flush()
80 devhandle, HV_PCI_TSBID(0, entry), iommu_batch_flush()
85 entry += num; iommu_batch_flush()
90 p->entry = entry; iommu_batch_flush()
96 static inline void iommu_batch_new_entry(unsigned long entry) iommu_batch_new_entry() argument
100 if (p->entry + p->npages == entry) iommu_batch_new_entry()
102 if (p->entry != ~0UL) iommu_batch_new_entry()
104 p->entry = entry; iommu_batch_new_entry()
139 long entry; dma_4v_alloc_coherent() local
159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_alloc_coherent()
162 if (unlikely(entry == IOMMU_ERROR_CODE)) dma_4v_alloc_coherent()
165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); dma_4v_alloc_coherent()
174 entry); dma_4v_alloc_coherent()
197 static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, dma_4v_iommu_demap() argument
206 HV_PCI_TSBID(0, entry), dma_4v_iommu_demap()
209 entry += num; dma_4v_iommu_demap()
220 unsigned long order, npages, entry; dma_4v_free_coherent() local
227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); dma_4v_free_coherent()
228 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_free_coherent()
245 long entry; dma_4v_map_page() local
256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_map_page()
259 if (unlikely(entry == IOMMU_ERROR_CODE)) dma_4v_map_page()
262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); dma_4v_map_page()
271 iommu_batch_start(dev, prot, entry); dma_4v_map_page()
302 long entry; dma_4v_unmap_page() local
318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; dma_4v_unmap_page()
319 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_page()
364 unsigned long paddr, npages, entry, out_entry = 0, slen; for_each_sg() local
375 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, for_each_sg()
379 if (unlikely(entry == IOMMU_ERROR_CODE)) { for_each_sg()
386 iommu_batch_new_entry(entry); for_each_sg()
388 /* Convert entry to a dma_addr_t */ for_each_sg()
389 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); for_each_sg()
423 out_entry = entry; for_each_sg()
474 unsigned long flags, entry; dma_4v_unmap_sg() local
496 entry = ((dma_handle - tbl->table_map_base) >> shift); dma_4v_unmap_sg()
497 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_sg()
698 /* Clear the entry. */ pci_sun4v_dequeue_msi()
/linux-4.4.14/fs/ext4/
H A Dblock_validity.c60 struct ext4_system_zone *new_entry = NULL, *entry; add_system_zone() local
66 entry = rb_entry(parent, struct ext4_system_zone, node); add_system_zone()
67 if (start_blk < entry->start_blk) add_system_zone()
69 else if (start_blk >= (entry->start_blk + entry->count)) add_system_zone()
72 if (start_blk + count > (entry->start_blk + add_system_zone()
73 entry->count)) add_system_zone()
74 entry->count = (start_blk + count - add_system_zone()
75 entry->start_blk); add_system_zone()
99 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
100 if (can_merge(entry, new_entry)) { add_system_zone()
101 new_entry->start_blk = entry->start_blk; add_system_zone()
102 new_entry->count += entry->count; add_system_zone()
104 kmem_cache_free(ext4_system_zone_cachep, entry); add_system_zone()
111 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
112 if (can_merge(new_entry, entry)) { add_system_zone()
113 new_entry->count += entry->count; add_system_zone()
115 kmem_cache_free(ext4_system_zone_cachep, entry); add_system_zone()
124 struct ext4_system_zone *entry; debug_print_tree() local
130 entry = rb_entry(node, struct ext4_system_zone, node); debug_print_tree()
132 entry->start_blk, entry->start_blk + entry->count - 1); debug_print_tree()
182 struct ext4_system_zone *entry, *n; ext4_release_system_zone() local
184 rbtree_postorder_for_each_entry_safe(entry, n, ext4_release_system_zone()
186 kmem_cache_free(ext4_system_zone_cachep, entry); ext4_release_system_zone()
199 struct ext4_system_zone *entry; ext4_data_block_valid() local
209 entry = rb_entry(n, struct ext4_system_zone, node); ext4_data_block_valid()
210 if (start_blk + count - 1 < entry->start_blk) ext4_data_block_valid()
212 else if (start_blk >= (entry->start_blk + entry->count)) ext4_data_block_valid()
/linux-4.4.14/drivers/isdn/mISDN/
H A Ddsp_pipeline.c82 struct dsp_element_entry *entry = mISDN_dsp_dev_release() local
84 list_del(&entry->list); mISDN_dsp_dev_release()
85 kfree(entry); mISDN_dsp_dev_release()
90 struct dsp_element_entry *entry; mISDN_dsp_element_register() local
96 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); mISDN_dsp_element_register()
97 if (!entry) mISDN_dsp_element_register()
100 entry->elem = elem; mISDN_dsp_element_register()
102 entry->dev.class = elements_class; mISDN_dsp_element_register()
103 entry->dev.release = mISDN_dsp_dev_release; mISDN_dsp_element_register()
104 dev_set_drvdata(&entry->dev, elem); mISDN_dsp_element_register()
105 dev_set_name(&entry->dev, "%s", elem->name); mISDN_dsp_element_register()
106 ret = device_register(&entry->dev); mISDN_dsp_element_register()
112 list_add_tail(&entry->list, &dsp_elements); mISDN_dsp_element_register()
115 ret = device_create_file(&entry->dev, mISDN_dsp_element_register()
131 device_unregister(&entry->dev); mISDN_dsp_element_register()
134 kfree(entry); mISDN_dsp_element_register()
141 struct dsp_element_entry *entry, *n; mISDN_dsp_element_unregister() local
146 list_for_each_entry_safe(entry, n, &dsp_elements, list) mISDN_dsp_element_unregister()
147 if (entry->elem == elem) { mISDN_dsp_element_unregister()
148 device_unregister(&entry->dev); mISDN_dsp_element_unregister()
176 struct dsp_element_entry *entry, *n; dsp_pipeline_module_exit() local
182 list_for_each_entry_safe(entry, n, &dsp_elements, list) { dsp_pipeline_module_exit()
183 list_del(&entry->list); dsp_pipeline_module_exit()
185 __func__, entry->elem->name); dsp_pipeline_module_exit()
186 kfree(entry); dsp_pipeline_module_exit()
210 struct dsp_pipeline_entry *entry, *n; _dsp_pipeline_destroy() local
212 list_for_each_entry_safe(entry, n, &pipeline->list, list) { _dsp_pipeline_destroy()
213 list_del(&entry->list); _dsp_pipeline_destroy()
214 if (entry->elem == dsp_hwec) _dsp_pipeline_destroy()
218 entry->elem->free(entry->p); _dsp_pipeline_destroy()
219 kfree(entry); _dsp_pipeline_destroy()
240 struct dsp_element_entry *entry, *n; dsp_pipeline_build() local
261 list_for_each_entry_safe(entry, n, &dsp_elements, list) dsp_pipeline_build()
262 if (!strcmp(entry->elem->name, name)) { dsp_pipeline_build()
263 elem = entry->elem; dsp_pipeline_build()
269 "entry to pipeline: %s (out of " dsp_pipeline_build()
297 "to add entry to pipeline: " dsp_pipeline_build()
333 struct dsp_pipeline_entry *entry; dsp_pipeline_process_tx() local
338 list_for_each_entry(entry, &pipeline->list, list) dsp_pipeline_process_tx()
339 if (entry->elem->process_tx) dsp_pipeline_process_tx()
340 entry->elem->process_tx(entry->p, data, len); dsp_pipeline_process_tx()
346 struct dsp_pipeline_entry *entry; dsp_pipeline_process_rx() local
351 list_for_each_entry_reverse(entry, &pipeline->list, list) dsp_pipeline_process_rx()
352 if (entry->elem->process_rx) dsp_pipeline_process_rx()
353 entry->elem->process_rx(entry->p, data, len, txlen); dsp_pipeline_process_rx()
/linux-4.4.14/arch/sh/mm/
H A Dtlbex_32.c29 pte_t entry; handle_tlbmiss() local
52 entry = *pte; handle_tlbmiss()
53 if (unlikely(pte_none(entry) || pte_not_present(entry))) handle_tlbmiss()
55 if (unlikely(error_code && !pte_write(entry))) handle_tlbmiss()
59 entry = pte_mkdirty(entry); handle_tlbmiss()
60 entry = pte_mkyoung(entry); handle_tlbmiss()
62 set_pte(pte, entry); handle_tlbmiss()
66 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in handle_tlbmiss()
68 * flush it in order to avoid potential TLB entry duplication. handle_tlbmiss()
H A Dtlb-urb.c4 * TLB entry wiring helpers for URB-equipped parts.
18 * Load the entry for 'addr' into the TLB and wire the entry.
32 * Make sure we're not trying to wire the last TLB entry slot. tlb_wire_entry()
39 * Insert this entry into the highest non-wired TLB slot (via tlb_wire_entry()
46 /* Load the entry into the TLB */ tlb_wire_entry()
62 * Unwire the last wired TLB entry.
65 * TLB entries in an arbitrary order. If you wire TLB entry N, followed
66 * by entry N+1, you must unwire entry N+1 first, then entry N. In this
81 * Make sure we're not trying to unwire a TLB entry when none tlb_unwire_entry()
H A Dtlb-sh5.c47 * sh64_next_free_dtlb_entry - Find the next available DTLB entry
55 * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
59 unsigned long long entry = sh64_next_free_dtlb_entry(); sh64_get_wired_dtlb_entry() local
64 return entry; sh64_get_wired_dtlb_entry()
68 * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
70 * @entry: Address of TLB slot.
74 int sh64_put_wired_dtlb_entry(unsigned long long entry) sh64_put_wired_dtlb_entry() argument
76 __flush_tlb_slot(entry); sh64_put_wired_dtlb_entry()
85 * contents of a TLB entry) .. though I have a feeling that this is sh64_put_wired_dtlb_entry()
92 if (entry <= DTLB_FIXED) sh64_put_wired_dtlb_entry()
97 * entry beneath the first 'free' entry! sh64_put_wired_dtlb_entry()
99 if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) sh64_put_wired_dtlb_entry()
102 /* If we are, then bring this entry back into the list */ sh64_put_wired_dtlb_entry()
104 cpu_data->dtlb.next = entry; sh64_put_wired_dtlb_entry()
152 unsigned long long entry; tlb_wire_entry() local
159 entry = sh64_get_wired_dtlb_entry(); tlb_wire_entry()
160 dtlb_entries[dtlb_entry++] = entry; tlb_wire_entry()
165 sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); tlb_wire_entry()
172 unsigned long long entry; tlb_unwire_entry() local
178 entry = dtlb_entries[dtlb_entry--]; tlb_unwire_entry()
180 sh64_teardown_tlb_slot(entry); tlb_unwire_entry()
181 sh64_put_wired_dtlb_entry(entry); tlb_unwire_entry()
H A Dtlb-debugfs.c45 unsigned int nentries, entry; tlb_seq_show() local
73 /* Make the "entry >= urb" test fail. */ tlb_seq_show()
91 seq_printf(file, "entry: vpn ppn asid size valid wired\n"); tlb_seq_show()
93 for (entry = 0; entry < nentries; entry++) { tlb_seq_show()
100 val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
105 val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
109 val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
113 val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
126 entry, vpn, ppn, asid, tlb_seq_show()
128 (urb <= entry) ? "W" : "-"); tlb_seq_show()
/linux-4.4.14/fs/squashfs/
H A Dcache.c69 struct squashfs_cache_entry *entry; squashfs_cache_get() local
75 if (cache->entry[i].block == block) { squashfs_cache_get()
97 * At least one unused cache entry. A simple squashfs_cache_get()
98 * round-robin strategy is used to choose the entry to squashfs_cache_get()
103 if (cache->entry[i].refcount == 0) squashfs_cache_get()
109 entry = &cache->entry[i]; squashfs_cache_get()
112 * Initialise chosen cache entry, and fill it in from squashfs_cache_get()
116 entry->block = block; squashfs_cache_get()
117 entry->refcount = 1; squashfs_cache_get()
118 entry->pending = 1; squashfs_cache_get()
119 entry->num_waiters = 0; squashfs_cache_get()
120 entry->error = 0; squashfs_cache_get()
123 entry->length = squashfs_read_data(sb, block, length, squashfs_cache_get()
124 &entry->next_index, entry->actor); squashfs_cache_get()
128 if (entry->length < 0) squashfs_cache_get()
129 entry->error = entry->length; squashfs_cache_get()
131 entry->pending = 0; squashfs_cache_get()
134 * While filling this entry one or more other processes squashfs_cache_get()
138 if (entry->num_waiters) { squashfs_cache_get()
140 wake_up_all(&entry->wait_queue); squashfs_cache_get()
150 * previously unused there's one less cache entry available squashfs_cache_get()
153 entry = &cache->entry[i]; squashfs_cache_get()
154 if (entry->refcount == 0) squashfs_cache_get()
156 entry->refcount++; squashfs_cache_get()
159 * If the entry is currently being filled in by another process squashfs_cache_get()
162 if (entry->pending) { squashfs_cache_get()
163 entry->num_waiters++; squashfs_cache_get()
165 wait_event(entry->wait_queue, !entry->pending); squashfs_cache_get()
174 cache->name, i, entry->block, entry->refcount, entry->error); squashfs_cache_get()
176 if (entry->error) squashfs_cache_get()
177 ERROR("Unable to read %s cache entry [%llx]\n", cache->name, squashfs_cache_get()
179 return entry; squashfs_cache_get()
184 * Release cache entry, once usage count is zero it can be reused.
186 void squashfs_cache_put(struct squashfs_cache_entry *entry) squashfs_cache_put() argument
188 struct squashfs_cache *cache = entry->cache; squashfs_cache_put()
191 entry->refcount--; squashfs_cache_put()
192 if (entry->refcount == 0) { squashfs_cache_put()
218 if (cache->entry[i].data) { squashfs_cache_delete()
220 kfree(cache->entry[i].data[j]); squashfs_cache_delete()
221 kfree(cache->entry[i].data); squashfs_cache_delete()
223 kfree(cache->entry[i].actor); squashfs_cache_delete()
226 kfree(cache->entry); squashfs_cache_delete()
233 * size block_size. To avoid vmalloc fragmentation issues each entry
247 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); squashfs_cache_init()
248 if (cache->entry == NULL) { squashfs_cache_init()
266 struct squashfs_cache_entry *entry = &cache->entry[i]; squashfs_cache_init() local
268 init_waitqueue_head(&cache->entry[i].wait_queue); squashfs_cache_init()
269 entry->cache = cache; squashfs_cache_init()
270 entry->block = SQUASHFS_INVALID_BLK; squashfs_cache_init()
271 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); squashfs_cache_init()
272 if (entry->data == NULL) { squashfs_cache_init()
273 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); squashfs_cache_init()
279 if (entry->data[j] == NULL) { squashfs_cache_init()
285 entry->actor = squashfs_page_actor_init(entry->data, squashfs_cache_init()
287 if (entry->actor == NULL) { squashfs_cache_init()
288 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
302 * Copy up to length bytes from cache entry to buffer starting at offset bytes
303 * into the cache entry. If there's not length bytes then copy the number of
306 int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, squashfs_copy_data() argument
314 return min(length, entry->length - offset); squashfs_copy_data()
316 while (offset < entry->length) { squashfs_copy_data()
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] squashfs_copy_data()
319 int bytes = min_t(int, entry->length - offset, squashfs_copy_data()
349 struct squashfs_cache_entry *entry; squashfs_read_metadata() local
354 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); squashfs_read_metadata()
355 if (entry->error) { squashfs_read_metadata()
356 res = entry->error; squashfs_read_metadata()
358 } else if (*offset >= entry->length) { squashfs_read_metadata()
363 bytes = squashfs_copy_data(buffer, entry, *offset, length); squashfs_read_metadata()
369 if (*offset == entry->length) { squashfs_read_metadata()
370 *block = entry->next_index; squashfs_read_metadata()
374 squashfs_cache_put(entry); squashfs_read_metadata()
380 squashfs_cache_put(entry); squashfs_read_metadata()
/linux-4.4.14/arch/x86/kvm/
H A Dcpuid.c131 struct kvm_cpuid_entry2 *e, *entry; cpuid_fix_nx_cap() local
133 entry = NULL; cpuid_fix_nx_cap()
137 entry = e; cpuid_fix_nx_cap()
141 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) { cpuid_fix_nx_cap()
142 entry->edx &= ~F(NX); cpuid_fix_nx_cap()
251 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, do_cpuid_1_ent() argument
254 entry->function = function; do_cpuid_1_ent()
255 entry->index = index; do_cpuid_1_ent()
256 cpuid_count(entry->function, entry->index, do_cpuid_1_ent()
257 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); do_cpuid_1_ent()
258 entry->flags = 0; do_cpuid_1_ent()
261 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, __do_cpuid_ent_emulated() argument
266 entry->eax = 1; /* only one leaf currently */ __do_cpuid_ent_emulated()
270 entry->ecx = F(MOVBE); __do_cpuid_ent_emulated()
277 entry->function = func; __do_cpuid_ent_emulated()
278 entry->index = index; __do_cpuid_ent_emulated()
283 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, __do_cpuid_ent() argument
365 do_cpuid_1_ent(entry, function, index); __do_cpuid_ent()
370 entry->eax = min(entry->eax, (u32)0xd); __do_cpuid_ent()
373 entry->edx &= kvm_supported_word0_x86_features; __do_cpuid_ent()
374 cpuid_mask(&entry->edx, 0); __do_cpuid_ent()
375 entry->ecx &= kvm_supported_word4_x86_features; __do_cpuid_ent()
376 cpuid_mask(&entry->ecx, 4); __do_cpuid_ent()
379 entry->ecx |= F(X2APIC); __do_cpuid_ent()
386 int t, times = entry->eax & 0xff; __do_cpuid_ent()
388 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; __do_cpuid_ent()
389 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; __do_cpuid_ent()
394 do_cpuid_1_ent(&entry[t], function, 0); __do_cpuid_ent()
395 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; __do_cpuid_ent()
404 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
410 cache_type = entry[i - 1].eax & 0x1f; __do_cpuid_ent()
413 do_cpuid_1_ent(&entry[i], function, i); __do_cpuid_ent()
414 entry[i].flags |= __do_cpuid_ent()
421 entry->eax = 0x4; /* allow ARAT */ __do_cpuid_ent()
422 entry->ebx = 0; __do_cpuid_ent()
423 entry->ecx = 0; __do_cpuid_ent()
424 entry->edx = 0; __do_cpuid_ent()
427 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
430 entry->ebx &= kvm_supported_word9_x86_features; __do_cpuid_ent()
431 cpuid_mask(&entry->ebx, 9); __do_cpuid_ent()
433 entry->ebx |= F(TSC_ADJUST); __do_cpuid_ent()
435 entry->ebx = 0; __do_cpuid_ent()
436 entry->eax = 0; __do_cpuid_ent()
437 entry->ecx = 0; __do_cpuid_ent()
438 entry->edx = 0; __do_cpuid_ent()
466 entry->eax = eax.full; __do_cpuid_ent()
467 entry->ebx = cap.events_mask; __do_cpuid_ent()
468 entry->ecx = 0; __do_cpuid_ent()
469 entry->edx = edx.full; __do_cpuid_ent()
476 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
482 level_type = entry[i - 1].ecx & 0xff00; __do_cpuid_ent()
485 do_cpuid_1_ent(&entry[i], function, i); __do_cpuid_ent()
486 entry[i].flags |= __do_cpuid_ent()
496 entry->eax &= supported; __do_cpuid_ent()
497 entry->ebx = xstate_required_size(supported, false); __do_cpuid_ent()
498 entry->ecx = entry->ebx; __do_cpuid_ent()
499 entry->edx &= supported >> 32; __do_cpuid_ent()
500 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
509 do_cpuid_1_ent(&entry[i], function, idx); __do_cpuid_ent()
511 entry[i].eax &= kvm_supported_word10_x86_features; __do_cpuid_ent()
512 cpuid_mask(&entry[i].eax, 10); __do_cpuid_ent()
513 entry[i].ebx = 0; __do_cpuid_ent()
514 if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) __do_cpuid_ent()
515 entry[i].ebx = __do_cpuid_ent()
519 if (entry[i].eax == 0 || !(supported & mask)) __do_cpuid_ent()
521 if (WARN_ON_ONCE(entry[i].ecx & 1)) __do_cpuid_ent()
524 entry[i].ecx = 0; __do_cpuid_ent()
525 entry[i].edx = 0; __do_cpuid_ent()
526 entry[i].flags |= __do_cpuid_ent()
536 entry->eax = KVM_CPUID_FEATURES; __do_cpuid_ent()
537 entry->ebx = sigptr[0]; __do_cpuid_ent()
538 entry->ecx = sigptr[1]; __do_cpuid_ent()
539 entry->edx = sigptr[2]; __do_cpuid_ent()
543 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | __do_cpuid_ent()
552 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); __do_cpuid_ent()
554 entry->ebx = 0; __do_cpuid_ent()
555 entry->ecx = 0; __do_cpuid_ent()
556 entry->edx = 0; __do_cpuid_ent()
559 entry->eax = min(entry->eax, 0x8000001a); __do_cpuid_ent()
562 entry->edx &= kvm_supported_word1_x86_features; __do_cpuid_ent()
563 cpuid_mask(&entry->edx, 1); __do_cpuid_ent()
564 entry->ecx &= kvm_supported_word6_x86_features; __do_cpuid_ent()
565 cpuid_mask(&entry->ecx, 6); __do_cpuid_ent()
569 entry->edx &= (1 << 8); __do_cpuid_ent()
571 entry->edx &= boot_cpu_data.x86_power; __do_cpuid_ent()
572 entry->eax = entry->ebx = entry->ecx = 0; __do_cpuid_ent()
575 unsigned g_phys_as = (entry->eax >> 16) & 0xff; __do_cpuid_ent()
576 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); __do_cpuid_ent()
577 unsigned phys_as = entry->eax & 0xff; __do_cpuid_ent()
581 entry->eax = g_phys_as | (virt_as << 8); __do_cpuid_ent()
582 entry->ebx = entry->edx = 0; __do_cpuid_ent()
586 entry->ecx = entry->edx = 0; __do_cpuid_ent()
595 entry->eax = min(entry->eax, 0xC0000004); __do_cpuid_ent()
598 entry->edx &= kvm_supported_word5_x86_features; __do_cpuid_ent()
599 cpuid_mask(&entry->edx, 5); __do_cpuid_ent()
607 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; __do_cpuid_ent()
611 kvm_x86_ops->set_supported_cpuid(function, entry); __do_cpuid_ent()
621 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, do_cpuid_ent() argument
625 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); do_cpuid_ent()
627 return __do_cpuid_ent(entry, func, idx, nent, maxnent); do_cpuid_ent()
743 /* when no next entry is found, the current entry[i] is reselected */ move_to_next_stateful_cpuid_entry()
754 /* find an entry with matching function, matching index (if needed), and that
/linux-4.4.14/net/netlabel/
H A Dnetlabel_addrlist.c50 * netlbl_af4list_search - Search for a matching IPv4 address entry
55 * Searches the IPv4 address list given by @head. If a matching address entry
73 * netlbl_af4list_search_exact - Search for an exact IPv4 address entry
100 * netlbl_af6list_search - Search for a matching IPv6 address entry
105 * Searches the IPv6 address list given by @head. If a matching address entry
124 * netlbl_af6list_search_exact - Search for an exact IPv6 address entry
152 * netlbl_af4list_add - Add a new IPv4 address entry to a list
153 * @entry: address entry
157 * Add a new address entry to the list pointed to by @head. On success zero is
162 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) netlbl_af4list_add() argument
166 iter = netlbl_af4list_search(entry->addr, head); netlbl_af4list_add()
168 iter->addr == entry->addr && iter->mask == entry->mask) netlbl_af4list_add()
173 * address mask such that the entry with the widest mask (smallest netlbl_af4list_add()
177 ntohl(entry->mask) > ntohl(iter->mask)) { list_for_each_entry_rcu()
178 __list_add_rcu(&entry->list, list_for_each_entry_rcu()
183 list_add_tail_rcu(&entry->list, head);
189 * netlbl_af6list_add - Add a new IPv6 address entry to a list
190 * @entry: address entry
194 * Add a new address entry to the list pointed to by @head. On success zero is
199 int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) netlbl_af6list_add() argument
203 iter = netlbl_af6list_search(&entry->addr, head); netlbl_af6list_add()
205 ipv6_addr_equal(&iter->addr, &entry->addr) && netlbl_af6list_add()
206 ipv6_addr_equal(&iter->mask, &entry->mask)) netlbl_af6list_add()
211 * address mask such that the entry with the widest mask (smallest netlbl_af6list_add()
215 ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { list_for_each_entry_rcu()
216 __list_add_rcu(&entry->list, list_for_each_entry_rcu()
221 list_add_tail_rcu(&entry->list, head);
227 * netlbl_af4list_remove_entry - Remove an IPv4 address entry
228 * @entry: address entry
231 * Remove the specified IP address entry. The caller is responsible for
235 void netlbl_af4list_remove_entry(struct netlbl_af4list *entry) netlbl_af4list_remove_entry() argument
237 entry->valid = 0; netlbl_af4list_remove_entry()
238 list_del_rcu(&entry->list); netlbl_af4list_remove_entry()
242 * netlbl_af4list_remove - Remove an IPv4 address entry
248 * Remove an IP address entry from the list pointed to by @head. Returns the
249 * entry on success, NULL on failure. The caller is responsible for calling
256 struct netlbl_af4list *entry; netlbl_af4list_remove() local
258 entry = netlbl_af4list_search_exact(addr, mask, head); netlbl_af4list_remove()
259 if (entry == NULL) netlbl_af4list_remove()
261 netlbl_af4list_remove_entry(entry); netlbl_af4list_remove()
262 return entry; netlbl_af4list_remove()
267 * netlbl_af6list_remove_entry - Remove an IPv6 address entry
268 * @entry: address entry
271 * Remove the specified IP address entry. The caller is responsible for
275 void netlbl_af6list_remove_entry(struct netlbl_af6list *entry) netlbl_af6list_remove_entry() argument
277 entry->valid = 0; netlbl_af6list_remove_entry()
278 list_del_rcu(&entry->list); netlbl_af6list_remove_entry()
282 * netlbl_af6list_remove - Remove an IPv6 address entry
288 * Remove an IP address entry from the list pointed to by @head. Returns the
289 * entry on success, NULL on failure. The caller is responsible for calling
297 struct netlbl_af6list *entry; netlbl_af6list_remove() local
299 entry = netlbl_af6list_search_exact(addr, mask, head); netlbl_af6list_remove()
300 if (entry == NULL) netlbl_af6list_remove()
302 netlbl_af6list_remove_entry(entry); netlbl_af6list_remove()
303 return entry; netlbl_af6list_remove()
H A Dnetlabel_domainhash.c66 * netlbl_domhsh_free_entry - Frees a domain hash table entry
67 * @entry: the entry's RCU field
71 * function so that the memory allocated to a hash table entry can be released
75 static void netlbl_domhsh_free_entry(struct rcu_head *entry) netlbl_domhsh_free_entry() argument
85 ptr = container_of(entry, struct netlbl_dom_map, rcu); netlbl_domhsh_free_entry()
130 * netlbl_domhsh_search - Search for a domain entry
135 * entry if found, otherwise NULL is returned. The caller is responsible for
158 * netlbl_domhsh_search_def - Search for a domain entry
164 * entry if an exact match is found, if an exact match is not present in the
165 * hash table then the default entry is returned if valid otherwise NULL is
172 struct netlbl_dom_map *entry; netlbl_domhsh_search_def() local
174 entry = netlbl_domhsh_search(domain); netlbl_domhsh_search_def()
175 if (entry == NULL) { netlbl_domhsh_search_def()
176 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); netlbl_domhsh_search_def()
177 if (entry != NULL && !entry->valid) netlbl_domhsh_search_def()
178 entry = NULL; netlbl_domhsh_search_def()
181 return entry; netlbl_domhsh_search_def()
185 * netlbl_domhsh_audit_add - Generate an audit entry for an add event
186 * @entry: the entry being added
193 * Generate an audit record for adding a new NetLabel/LSM mapping entry with
198 static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, netlbl_domhsh_audit_add() argument
211 entry->domain ? entry->domain : "(default)"); netlbl_domhsh_audit_add()
228 type = entry->def.type; netlbl_domhsh_audit_add()
229 cipsov4 = entry->def.cipso; netlbl_domhsh_audit_add()
248 * netlbl_domhsh_validate - Validate a new domain mapping entry
249 * @entry: the entry to validate
251 * This function validates the new domain mapping entry to ensure that it is
252 * a valid entry. Returns zero on success, negative values on failure.
255 static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) netlbl_domhsh_validate() argument
264 if (entry == NULL) netlbl_domhsh_validate()
267 switch (entry->def.type) { netlbl_domhsh_validate()
269 if (entry->def.cipso != NULL || entry->def.addrsel != NULL) netlbl_domhsh_validate()
273 if (entry->def.cipso == NULL) netlbl_domhsh_validate()
277 netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) { netlbl_domhsh_validate()
293 netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) { netlbl_domhsh_validate()
355 * netlbl_domhsh_add - Adds a entry to the domain hash table
356 * @entry: the entry to add
360 * Adds a new entry to the domain hash table and handles any updates to the
365 int netlbl_domhsh_add(struct netlbl_dom_map *entry, netlbl_domhsh_add() argument
377 ret_val = netlbl_domhsh_validate(entry); netlbl_domhsh_add()
387 if (entry->domain != NULL) netlbl_domhsh_add()
388 entry_old = netlbl_domhsh_search(entry->domain); netlbl_domhsh_add()
390 entry_old = netlbl_domhsh_search_def(entry->domain); netlbl_domhsh_add()
392 entry->valid = 1; netlbl_domhsh_add()
394 if (entry->domain != NULL) { netlbl_domhsh_add()
395 u32 bkt = netlbl_domhsh_hash(entry->domain); netlbl_domhsh_add()
396 list_add_tail_rcu(&entry->list, netlbl_domhsh_add()
399 INIT_LIST_HEAD(&entry->list); netlbl_domhsh_add()
400 rcu_assign_pointer(netlbl_domhsh_def, entry); netlbl_domhsh_add()
403 if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_domhsh_add()
405 &entry->def.addrsel->list4) netlbl_domhsh_add()
406 netlbl_domhsh_audit_add(entry, iter4, NULL, netlbl_domhsh_add()
410 &entry->def.addrsel->list6) netlbl_domhsh_add()
411 netlbl_domhsh_audit_add(entry, NULL, iter6, netlbl_domhsh_add()
415 netlbl_domhsh_audit_add(entry, NULL, NULL, netlbl_domhsh_add()
418 entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_domhsh_add()
427 netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) netlbl_domhsh_add()
435 netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) netlbl_domhsh_add()
445 &entry->def.addrsel->list4) { netlbl_domhsh_add()
456 &entry->def.addrsel->list6) { netlbl_domhsh_add()
476 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table
477 * @entry: the entry to add
481 * Adds a new default entry to the domain hash table and handles any updates
486 int netlbl_domhsh_add_default(struct netlbl_dom_map *entry, netlbl_domhsh_add_default() argument
489 return netlbl_domhsh_add(entry, audit_info); netlbl_domhsh_add_default()
493 * netlbl_domhsh_remove_entry - Removes a given entry from the domain table
494 * @entry: the entry to remove
498 * Removes an entry from the domain hash table and handles any updates to the
504 int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, netlbl_domhsh_remove_entry() argument
510 if (entry == NULL) netlbl_domhsh_remove_entry()
514 if (entry->valid) { netlbl_domhsh_remove_entry()
515 entry->valid = 0; netlbl_domhsh_remove_entry()
516 if (entry != rcu_dereference(netlbl_domhsh_def)) netlbl_domhsh_remove_entry()
517 list_del_rcu(&entry->list); netlbl_domhsh_remove_entry()
528 entry->domain ? entry->domain : "(default)", netlbl_domhsh_remove_entry()
537 switch (entry->def.type) { netlbl_domhsh_remove_entry()
540 &entry->def.addrsel->list4) { netlbl_domhsh_remove_entry()
548 cipso_v4_doi_putdef(entry->def.cipso); netlbl_domhsh_remove_entry()
551 call_rcu(&entry->rcu, netlbl_domhsh_free_entry); netlbl_domhsh_remove_entry()
558 * netlbl_domhsh_remove_af4 - Removes an address selector entry
581 struct netlbl_domaddr4_map *entry; netlbl_domhsh_remove_af4() local
615 entry = netlbl_domhsh_addr4_entry(entry_addr); netlbl_domhsh_remove_af4()
616 cipso_v4_doi_putdef(entry->def.cipso); netlbl_domhsh_remove_af4()
617 kfree(entry); netlbl_domhsh_remove_af4()
626 * netlbl_domhsh_remove - Removes an entry from the domain hash table
631 * Removes an entry from the domain hash table and handles any updates to the
639 struct netlbl_dom_map *entry; netlbl_domhsh_remove() local
643 entry = netlbl_domhsh_search(domain); netlbl_domhsh_remove()
645 entry = netlbl_domhsh_search_def(domain); netlbl_domhsh_remove()
646 ret_val = netlbl_domhsh_remove_entry(entry, audit_info); netlbl_domhsh_remove()
653 * netlbl_domhsh_remove_default - Removes the default entry from the table
657 * Removes/resets the default entry for the domain hash table and handles any
668 * netlbl_domhsh_getentry - Get an entry from the domain hash table
672 * Look through the domain hash table searching for an entry to match @domain,
673 * return a pointer to a copy of the entry or NULL. The caller is responsible
683 * netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table
688 * Look through the domain hash table searching for an entry to match @domain
689 * and @addr, return a pointer to a copy of the entry or NULL. The caller is
713 * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
718 * Look through the domain hash table searching for an entry to match @domain
719 * and @addr, return a pointer to a copy of the entry or NULL. The caller is
746 * @callback: callback for each entry
751 * buckets and @skip_chain entries. For each entry in the table call
759 int (*callback) (struct netlbl_dom_map *entry, void *arg), netlbl_domhsh_walk()
757 netlbl_domhsh_walk(u32 *skip_bkt, u32 *skip_chain, int (*callback) (struct netlbl_dom_map *entry, void *arg), void *cb_arg) netlbl_domhsh_walk() argument
H A Dnetlabel_unlabeled.c68 * LSM. The hash table is used to lookup the network interface entry
69 * (struct netlbl_unlhsh_iface) and then the interface entry is used to
71 * match can not be found in the hash table then the default entry
72 * (netlbl_unlhsh_def) is used. The IP address entry list
155 * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table
156 * @entry: the entry's RCU field
160 * function so that memory allocated to a hash table interface entry can be
162 * the IPv4 and IPv6 address lists contained as part of an interface entry. It
163 * is up to the rest of the code to make sure an interface entry is only freed
167 static void netlbl_unlhsh_free_iface(struct rcu_head *entry) netlbl_unlhsh_free_iface() argument
177 iface = container_of(entry, struct netlbl_unlhsh_iface, rcu); netlbl_unlhsh_free_iface()
212 * netlbl_unlhsh_search_iface - Search for a matching interface entry
217 * interface entry which matches @ifindex, otherwise NULL is returned. The
238 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
239 * @iface: the associated interface entry
242 * @secid: LSM secid value for entry
245 * Add a new address entry into the unlabeled connection hash table using the
246 * interface entry specified by @iface. On success zero is returned, otherwise
256 struct netlbl_unlhsh_addr4 *entry; netlbl_unlhsh_add_addr4() local
258 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); netlbl_unlhsh_add_addr4()
259 if (entry == NULL) netlbl_unlhsh_add_addr4()
262 entry->list.addr = addr->s_addr & mask->s_addr; netlbl_unlhsh_add_addr4()
263 entry->list.mask = mask->s_addr; netlbl_unlhsh_add_addr4()
264 entry->list.valid = 1; netlbl_unlhsh_add_addr4()
265 entry->secid = secid; netlbl_unlhsh_add_addr4()
268 ret_val = netlbl_af4list_add(&entry->list, &iface->addr4_list); netlbl_unlhsh_add_addr4()
272 kfree(entry); netlbl_unlhsh_add_addr4()
278 * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
279 * @iface: the associated interface entry
282 * @secid: LSM secid value for entry
285 * Add a new address entry into the unlabeled connection hash table using the
286 * interface entry specified by @iface. On success zero is returned, otherwise
296 struct netlbl_unlhsh_addr6 *entry; netlbl_unlhsh_add_addr6() local
298 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); netlbl_unlhsh_add_addr6()
299 if (entry == NULL) netlbl_unlhsh_add_addr6()
302 entry->list.addr = *addr; netlbl_unlhsh_add_addr6()
303 entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; netlbl_unlhsh_add_addr6()
304 entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; netlbl_unlhsh_add_addr6()
305 entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; netlbl_unlhsh_add_addr6()
306 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; netlbl_unlhsh_add_addr6()
307 entry->list.mask = *mask; netlbl_unlhsh_add_addr6()
308 entry->list.valid = 1; netlbl_unlhsh_add_addr6()
309 entry->secid = secid; netlbl_unlhsh_add_addr6()
312 ret_val = netlbl_af6list_add(&entry->list, &iface->addr6_list); netlbl_unlhsh_add_addr6()
316 kfree(entry); netlbl_unlhsh_add_addr6()
322 * netlbl_unlhsh_add_iface - Adds a new interface entry to the hash table
326 * Add a new, empty, interface entry into the unlabeled connection hash table.
327 * On success a pointer to the new interface entry is returned, on failure NULL
369 * netlbl_unlhsh_add - Adds a new entry to the unlabeled connection hash table
375 * @secid: LSM secid value for the entry
379 * Adds a new entry to the unlabeled connection hash table. Returns zero on
473 * netlbl_unlhsh_remove_addr4 - Remove an IPv4 address entry
475 * @iface: interface entry
481 * Remove an IP address entry from the unlabeled connection hash table.
492 struct netlbl_unlhsh_addr4 *entry; netlbl_unlhsh_remove_addr4() local
503 entry = netlbl_unlhsh_addr4_entry(list_entry); netlbl_unlhsh_remove_addr4()
505 entry = NULL; netlbl_unlhsh_remove_addr4()
516 if (entry != NULL && netlbl_unlhsh_remove_addr4()
517 security_secid_to_secctx(entry->secid, netlbl_unlhsh_remove_addr4()
522 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); netlbl_unlhsh_remove_addr4()
526 if (entry == NULL) netlbl_unlhsh_remove_addr4()
529 kfree_rcu(entry, rcu); netlbl_unlhsh_remove_addr4()
535 * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
537 * @iface: interface entry
543 * Remove an IP address entry from the unlabeled connection hash table.
554 struct netlbl_unlhsh_addr6 *entry; netlbl_unlhsh_remove_addr6() local
564 entry = netlbl_unlhsh_addr6_entry(list_entry); netlbl_unlhsh_remove_addr6()
566 entry = NULL; netlbl_unlhsh_remove_addr6()
577 if (entry != NULL && netlbl_unlhsh_remove_addr6()
578 security_secid_to_secctx(entry->secid, netlbl_unlhsh_remove_addr6()
583 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); netlbl_unlhsh_remove_addr6()
587 if (entry == NULL) netlbl_unlhsh_remove_addr6()
590 kfree_rcu(entry, rcu); netlbl_unlhsh_remove_addr6()
596 * netlbl_unlhsh_condremove_iface - Remove an interface entry
597 * @iface: the interface entry
600 * Remove an interface entry from the unlabeled connection hash table if it is
601 * empty. An interface entry is considered to be empty if there are no
634 * netlbl_unlhsh_remove - Remove an entry from the unlabeled hash table
643 * Removes and existing entry from the unlabeled connection hash table.
891 * connection entry to the hash table. Returns zero on success, negative
907 * single entry. However, allow users to create two entries, one each netlbl_unlabel_staticadd()
943 * unlabeled connection entry. Returns zero on success, negative values on
958 * single entry. However, allow users to create two entries, one each netlbl_unlabel_staticadddef()
992 * unlabeled connection entry. Returns zero on success, negative values on
1007 * IPv4 and IPv6 in the same entry. */ netlbl_unlabel_staticremove()
1034 * unlabeled connection entry. Returns zero on success, negative values on
1048 * IPv4 and IPv6 in the same entry. */ netlbl_unlabel_staticremovedef()
1070 * @iface: the interface entry
1071 * @addr4: the IPv4 address entry
1072 * @addr6: the IPv6 address entry
1078 * can be specified, not both, the other unspecified entry should be set to
1260 * unlabeled connection entry in a form suitable for use in a kernel generated
1527 struct netlbl_dom_map *entry; netlbl_unlabel_defconf() local
1537 entry = kzalloc(sizeof(*entry), GFP_KERNEL); netlbl_unlabel_defconf()
1538 if (entry == NULL) netlbl_unlabel_defconf()
1540 entry->def.type = NETLBL_NLTYPE_UNLABELED; netlbl_unlabel_defconf()
1541 ret_val = netlbl_domhsh_add_default(entry, &audit_info); netlbl_unlabel_defconf()
/linux-4.4.14/arch/x86/kernel/
H A Djump_label.c39 static void __jump_label_transform(struct jump_entry *entry, __jump_label_transform() argument
54 if (unlikely(memcmp((void *)entry->code, default_nop, 5) __jump_label_transform()
56 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
62 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) __jump_label_transform()
64 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
68 code.offset = entry->target - __jump_label_transform()
69 (entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
78 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) __jump_label_transform()
79 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
82 code.offset = entry->target - __jump_label_transform()
83 (entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
84 if (unlikely(memcmp((void *)entry->code, &code, 5) != 0)) __jump_label_transform()
85 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
99 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); __jump_label_transform()
101 text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE, __jump_label_transform()
102 (void *)entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
105 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
110 __jump_label_transform(entry, type, NULL, 0); arch_jump_label_transform()
121 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
141 __jump_label_transform(entry, type, text_poke_early, 1); arch_jump_label_transform_static()
H A Dresource.c28 struct e820entry *entry; remove_e820_regions() local
31 entry = &e820.map[i]; remove_e820_regions()
33 resource_clip(avail, entry->addr, remove_e820_regions()
34 entry->addr + entry->size - 1); remove_e820_regions()
H A Dasm-offsets_64.c32 #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) main()
52 #define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) main()
/linux-4.4.14/arch/sh/kernel/
H A Dperf_callchain.c24 struct perf_callchain_entry *entry = data; callchain_address() local
27 perf_callchain_store(entry, addr); callchain_address()
36 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
38 perf_callchain_store(entry, regs->pc); perf_callchain_kernel()
40 unwind_stack(NULL, regs, NULL, &callchain_ops, entry); perf_callchain_kernel()
/linux-4.4.14/arch/mips/mm/
H A Dpgtable-64.c19 unsigned long entry; pgd_init() local
22 entry = (unsigned long)invalid_pte_table; pgd_init()
24 entry = (unsigned long)invalid_pmd_table; pgd_init()
31 p[0] = entry; pgd_init()
32 p[1] = entry; pgd_init()
33 p[2] = entry; pgd_init()
34 p[3] = entry; pgd_init()
35 p[4] = entry; pgd_init()
37 p[-3] = entry; pgd_init()
38 p[-2] = entry; pgd_init()
39 p[-1] = entry; pgd_init()
/linux-4.4.14/drivers/gpu/drm/ttm/
H A Dttm_execbuf_util.c36 struct ttm_validate_buffer *entry) ttm_eu_backoff_reservation_reverse()
38 list_for_each_entry_continue_reverse(entry, list, head) { list_for_each_entry_continue_reverse()
39 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry_continue_reverse()
47 struct ttm_validate_buffer *entry; ttm_eu_del_from_lru_locked() local
49 list_for_each_entry(entry, list, head) { list_for_each_entry()
50 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
60 struct ttm_validate_buffer *entry; ttm_eu_backoff_reservation() local
66 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_backoff_reservation()
67 glob = entry->bo->glob; ttm_eu_backoff_reservation()
70 list_for_each_entry(entry, list, head) { list_for_each_entry()
71 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
100 struct ttm_validate_buffer *entry; ttm_eu_reserve_buffers() local
106 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_reserve_buffers()
107 glob = entry->bo->glob; ttm_eu_reserve_buffers()
112 list_for_each_entry(entry, list, head) { list_for_each_entry()
113 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
123 struct ttm_validate_buffer *safe = entry; list_for_each_entry()
124 entry = list_prev_entry(entry, head); list_for_each_entry()
131 if (!entry->shared) list_for_each_entry()
143 ttm_eu_backoff_reservation_reverse(list, entry); list_for_each_entry()
153 if (!ret && entry->shared) list_for_each_entry()
169 list_del(&entry->head); list_for_each_entry()
170 list_add(&entry->head, list); list_for_each_entry()
185 struct ttm_validate_buffer *entry; ttm_eu_fence_buffer_objects() local
201 list_for_each_entry(entry, list, head) { list_for_each_entry()
202 bo = entry->bo; list_for_each_entry()
203 if (entry->shared) list_for_each_entry()
35 ttm_eu_backoff_reservation_reverse(struct list_head *list, struct ttm_validate_buffer *entry) ttm_eu_backoff_reservation_reverse() argument
/linux-4.4.14/arch/cris/arch-v32/mm/
H A Dintmem.c22 struct list_head entry; member in struct:intmem_allocation
45 list_add_tail(&alloc->entry, &intmem_allocations); crisv32_intmem_init()
58 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) { crisv32_intmem_alloc()
72 list_add(&alloc->entry, &allocation->entry); crisv32_intmem_alloc()
81 list_add_tail(&tmp->entry, crisv32_intmem_alloc()
82 &allocation->entry); crisv32_intmem_alloc()
105 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) { crisv32_intmem_free()
108 list_entry(allocation->entry.prev, crisv32_intmem_free()
109 struct intmem_allocation, entry); crisv32_intmem_free()
111 list_entry(allocation->entry.next, crisv32_intmem_free()
112 struct intmem_allocation, entry); crisv32_intmem_free()
119 list_del(&allocation->entry); crisv32_intmem_free()
126 list_del(&next->entry); crisv32_intmem_free()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A Dresource.c84 u32 entry; c4iw_get_resource() local
85 entry = c4iw_id_alloc(id_table); c4iw_get_resource()
86 if (entry == (u32)(-1)) c4iw_get_resource()
88 return entry; c4iw_get_resource()
91 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) c4iw_put_resource() argument
93 PDBG("%s entry 0x%x\n", __func__, entry); c4iw_put_resource()
94 c4iw_id_free(id_table, entry); c4iw_put_resource()
99 struct c4iw_qid_list *entry; c4iw_get_cqid() local
105 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, c4iw_get_cqid()
106 entry); c4iw_get_cqid()
107 list_del(&entry->entry); c4iw_get_cqid()
108 qid = entry->qid; c4iw_get_cqid()
109 kfree(entry); c4iw_get_cqid()
118 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
119 if (!entry) c4iw_get_cqid()
121 entry->qid = i; c4iw_get_cqid()
122 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_cqid()
129 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
130 if (!entry) c4iw_get_cqid()
132 entry->qid = qid; c4iw_get_cqid()
133 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_cqid()
135 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
136 if (!entry) c4iw_get_cqid()
138 entry->qid = i; c4iw_get_cqid()
139 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_cqid()
155 struct c4iw_qid_list *entry; c4iw_put_cqid() local
157 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_put_cqid()
158 if (!entry) c4iw_put_cqid()
161 entry->qid = qid; c4iw_put_cqid()
163 list_add_tail(&entry->entry, &uctx->cqids); c4iw_put_cqid()
169 struct c4iw_qid_list *entry; c4iw_get_qpid() local
175 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, c4iw_get_qpid()
176 entry); c4iw_get_qpid()
177 list_del(&entry->entry); c4iw_get_qpid()
178 qid = entry->qid; c4iw_get_qpid()
179 kfree(entry); c4iw_get_qpid()
192 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
193 if (!entry) c4iw_get_qpid()
195 entry->qid = i; c4iw_get_qpid()
196 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_qpid()
203 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
204 if (!entry) c4iw_get_qpid()
206 entry->qid = qid; c4iw_get_qpid()
207 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_qpid()
209 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
210 if (!entry) c4iw_get_qpid()
212 entry->qid = i; c4iw_get_qpid()
213 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_qpid()
229 struct c4iw_qid_list *entry; c4iw_put_qpid() local
231 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_put_qpid()
232 if (!entry) c4iw_put_qpid()
235 entry->qid = qid; c4iw_put_qpid()
237 list_add_tail(&entry->entry, &uctx->qpids); c4iw_put_qpid()
/linux-4.4.14/security/integrity/ima/
H A Dima_policy.c183 struct ima_rule_entry *entry, *tmp; ima_lsm_update_rules() local
188 list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { ima_lsm_update_rules()
190 if (!entry->lsm[i].rule) ima_lsm_update_rules()
192 result = security_filter_rule_init(entry->lsm[i].type, ima_lsm_update_rules()
194 entry->lsm[i].args_p, ima_lsm_update_rules()
195 &entry->lsm[i].rule); ima_lsm_update_rules()
196 BUG_ON(!entry->lsm[i].rule); ima_lsm_update_rules()
329 struct ima_rule_entry *entry; ima_match_policy() local
332 list_for_each_entry(entry, ima_rules, list) { list_for_each_entry()
334 if (!(entry->action & actmask)) list_for_each_entry()
337 if (!ima_match_rules(entry, inode, func, mask)) list_for_each_entry()
340 action |= entry->flags & IMA_ACTION_FLAGS; list_for_each_entry()
342 action |= entry->action & IMA_DO_MASK; list_for_each_entry()
343 if (entry->action & IMA_APPRAISE) list_for_each_entry()
344 action |= get_subaction(entry, func); list_for_each_entry()
346 if (entry->action & IMA_DO_MASK) list_for_each_entry()
347 actmask &= ~(entry->action | entry->action << 1); list_for_each_entry()
349 actmask &= ~(entry->action | entry->action >> 1); list_for_each_entry()
366 struct ima_rule_entry *entry; ima_update_policy_flag() local
369 list_for_each_entry(entry, ima_rules, list) { list_for_each_entry()
370 if (entry->action & IMA_DO_MASK) list_for_each_entry()
371 ima_policy_flag |= entry->action; list_for_each_entry()
467 static int ima_lsm_rule_init(struct ima_rule_entry *entry, ima_lsm_rule_init() argument
472 if (entry->lsm[lsm_rule].rule) ima_lsm_rule_init()
475 entry->lsm[lsm_rule].args_p = match_strdup(args); ima_lsm_rule_init()
476 if (!entry->lsm[lsm_rule].args_p) ima_lsm_rule_init()
479 entry->lsm[lsm_rule].type = audit_type; ima_lsm_rule_init()
480 result = security_filter_rule_init(entry->lsm[lsm_rule].type, ima_lsm_rule_init()
482 entry->lsm[lsm_rule].args_p, ima_lsm_rule_init()
483 &entry->lsm[lsm_rule].rule); ima_lsm_rule_init()
484 if (!entry->lsm[lsm_rule].rule) { ima_lsm_rule_init()
485 kfree(entry->lsm[lsm_rule].args_p); ima_lsm_rule_init()
499 static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) ima_parse_rule() argument
508 entry->uid = INVALID_UID; ima_parse_rule()
509 entry->fowner = INVALID_UID; ima_parse_rule()
510 entry->action = UNKNOWN; ima_parse_rule()
525 if (entry->action != UNKNOWN) ima_parse_rule()
528 entry->action = MEASURE; ima_parse_rule()
533 if (entry->action != UNKNOWN) ima_parse_rule()
536 entry->action = DONT_MEASURE; ima_parse_rule()
541 if (entry->action != UNKNOWN) ima_parse_rule()
544 entry->action = APPRAISE; ima_parse_rule()
549 if (entry->action != UNKNOWN) ima_parse_rule()
552 entry->action = DONT_APPRAISE; ima_parse_rule()
557 if (entry->action != UNKNOWN) ima_parse_rule()
560 entry->action = AUDIT; ima_parse_rule()
565 if (entry->func) ima_parse_rule()
569 entry->func = FILE_CHECK; ima_parse_rule()
572 entry->func = FILE_CHECK; ima_parse_rule()
574 entry->func = MODULE_CHECK; ima_parse_rule()
576 entry->func = FIRMWARE_CHECK; ima_parse_rule()
579 entry->func = MMAP_CHECK; ima_parse_rule()
581 entry->func = BPRM_CHECK; ima_parse_rule()
585 entry->flags |= IMA_FUNC; ima_parse_rule()
590 if (entry->mask) ima_parse_rule()
598 entry->mask = MAY_EXEC; ima_parse_rule()
600 entry->mask = MAY_WRITE; ima_parse_rule()
602 entry->mask = MAY_READ; ima_parse_rule()
604 entry->mask = MAY_APPEND; ima_parse_rule()
608 entry->flags |= (*args[0].from == '^') ima_parse_rule()
614 if (entry->fsmagic) { ima_parse_rule()
619 result = kstrtoul(args[0].from, 16, &entry->fsmagic); ima_parse_rule()
621 entry->flags |= IMA_FSMAGIC; ima_parse_rule()
626 if (memchr_inv(entry->fsuuid, 0x00, ima_parse_rule()
627 sizeof(entry->fsuuid))) { ima_parse_rule()
633 entry->fsuuid); ima_parse_rule()
635 entry->flags |= IMA_FSUUID; ima_parse_rule()
643 if (uid_valid(entry->uid)) { ima_parse_rule()
650 entry->uid = make_kuid(current_user_ns(), ima_parse_rule()
652 if (!uid_valid(entry->uid) || ima_parse_rule()
656 entry->flags |= (token == Opt_uid) ima_parse_rule()
663 if (uid_valid(entry->fowner)) { ima_parse_rule()
670 entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); ima_parse_rule()
671 if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) ima_parse_rule()
674 entry->flags |= IMA_FOWNER; ima_parse_rule()
679 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
685 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
691 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
697 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
703 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
709 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
714 if (entry->action != APPRAISE) { ima_parse_rule()
721 entry->flags |= IMA_DIGSIG_REQUIRED; ima_parse_rule()
726 entry->flags |= IMA_PERMIT_DIRECTIO; ima_parse_rule()
734 if (!result && (entry->action == UNKNOWN)) ima_parse_rule()
736 else if (entry->func == MODULE_CHECK) ima_parse_rule()
738 else if (entry->func == FIRMWARE_CHECK) ima_parse_rule()
756 struct ima_rule_entry *entry; ima_parse_add_rule() local
767 entry = kzalloc(sizeof(*entry), GFP_KERNEL); ima_parse_add_rule()
768 if (!entry) { ima_parse_add_rule()
774 INIT_LIST_HEAD(&entry->list); ima_parse_add_rule()
776 result = ima_parse_rule(p, entry); ima_parse_add_rule()
778 kfree(entry); ima_parse_add_rule()
786 list_add_tail(&entry->list, &ima_policy_rules); ima_parse_add_rule()
795 struct ima_rule_entry *entry, *tmp; ima_delete_rules() local
799 list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { ima_delete_rules()
801 kfree(entry->lsm[i].args_p); ima_delete_rules()
803 list_del(&entry->list); ima_delete_rules()
804 kfree(entry); ima_delete_rules()
H A Dima_api.c25 * ima_free_template_entry - free an existing template entry
27 void ima_free_template_entry(struct ima_template_entry *entry) ima_free_template_entry() argument
31 for (i = 0; i < entry->template_desc->num_fields; i++) ima_free_template_entry()
32 kfree(entry->template_data[i].data); ima_free_template_entry()
34 kfree(entry); ima_free_template_entry()
38 * ima_alloc_init_template - create and initialize a new template entry
41 struct ima_template_entry **entry) ima_alloc_init_template()
46 *entry = kzalloc(sizeof(**entry) + template_desc->num_fields * ima_alloc_init_template()
48 if (!*entry) ima_alloc_init_template()
51 (*entry)->template_desc = template_desc; ima_alloc_init_template()
57 &((*entry)->template_data[i])); ima_alloc_init_template()
61 len = (*entry)->template_data[i].len; ima_alloc_init_template()
62 (*entry)->template_data_len += sizeof(len); ima_alloc_init_template()
63 (*entry)->template_data_len += len; ima_alloc_init_template()
67 ima_free_template_entry(*entry); ima_alloc_init_template()
68 *entry = NULL; ima_alloc_init_template()
75 * Calculate the hash of a template entry, add the template entry
88 int ima_store_template(struct ima_template_entry *entry, ima_store_template() argument
94 char *template_name = entry->template_desc->name; ima_store_template()
102 int num_fields = entry->template_desc->num_fields; ima_store_template()
106 result = ima_calc_field_array_hash(&entry->template_data[0], ima_store_template()
107 entry->template_desc, ima_store_template()
115 memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); ima_store_template()
117 result = ima_add_template_entry(entry, violation, op, inode, filename); ima_store_template()
132 struct ima_template_entry *entry; ima_add_violation() local
142 result = ima_alloc_init_template(&event_data, &entry); ima_add_violation()
147 result = ima_store_template(entry, violation, inode, filename); ima_add_violation()
149 ima_free_template_entry(entry); ima_add_violation()
268 struct ima_template_entry *entry; ima_store_measurement() local
276 result = ima_alloc_init_template(&event_data, &entry); ima_store_measurement()
283 result = ima_store_template(entry, violation, inode, filename); ima_store_measurement()
287 ima_free_template_entry(entry); ima_store_measurement()
40 ima_alloc_init_template(struct ima_event_data *event_data, struct ima_template_entry **entry) ima_alloc_init_template() argument
H A Dima_queue.c18 * The measurement list is append-only. No entry is
46 /* lookup up the digest value in the hash table, and return the entry */ ima_lookup_digest_entry()
56 rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); ima_lookup_digest_entry()
67 * - Add template entry to measurement list and hash table.
71 static int ima_add_digest_entry(struct ima_template_entry *entry) ima_add_digest_entry() argument
78 pr_err("OUT OF MEMORY ERROR creating queue entry\n"); ima_add_digest_entry()
81 qe->entry = entry; ima_add_digest_entry()
87 key = ima_hash_key(entry->digest); ima_add_digest_entry()
105 /* Add template entry to the measurement list and hash table,
108 int ima_add_template_entry(struct ima_template_entry *entry, int violation, ima_add_template_entry() argument
120 memcpy(digest, entry->digest, sizeof(digest)); ima_add_template_entry()
128 result = ima_add_digest_entry(entry); ima_add_template_entry()
/linux-4.4.14/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_cmdbuf_res.c34 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
37 * @hash: Hash entry for the manager hash table.
40 * @state: Staging state of this resource entry.
41 * @man: Pointer to a resource manager for this entry.
100 * @entry: Pointer to a struct vmw_cmdbuf_res.
102 * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
106 struct vmw_cmdbuf_res *entry) vmw_cmdbuf_res_free()
108 list_del(&entry->head); vmw_cmdbuf_res_free()
109 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); vmw_cmdbuf_res_free()
110 vmw_resource_unreference(&entry->res); vmw_cmdbuf_res_free()
111 kfree(entry); vmw_cmdbuf_res_free()
126 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_commit() local
128 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe()
129 list_del(&entry->head); list_for_each_entry_safe()
130 if (entry->res->func->commit_notify) list_for_each_entry_safe()
131 entry->res->func->commit_notify(entry->res, list_for_each_entry_safe()
132 entry->state); list_for_each_entry_safe()
133 switch (entry->state) { list_for_each_entry_safe()
135 entry->state = VMW_CMDBUF_RES_COMMITTED; list_for_each_entry_safe()
136 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe()
139 vmw_resource_unreference(&entry->res); list_for_each_entry_safe()
140 kfree(entry); list_for_each_entry_safe()
163 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_revert() local
166 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe()
167 switch (entry->state) { list_for_each_entry_safe()
169 vmw_cmdbuf_res_free(entry->man, entry); list_for_each_entry_safe()
172 ret = drm_ht_insert_item(&entry->man->resources, list_for_each_entry_safe()
173 &entry->hash); list_for_each_entry_safe()
174 list_del(&entry->head); list_for_each_entry_safe()
175 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe()
176 entry->state = VMW_CMDBUF_RES_COMMITTED; list_for_each_entry_safe()
194 * This function allocates a struct vmw_cmdbuf_res entry and adds the
196 * entry is then put on the staging list identified by @list.
236 * This function looks up the struct vmw_cmdbuf_res entry from the manager
238 * state it then either removes the entry from the staging list or adds it
247 struct vmw_cmdbuf_res *entry; vmw_cmdbuf_res_remove() local
256 entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); vmw_cmdbuf_res_remove()
258 switch (entry->state) { vmw_cmdbuf_res_remove()
260 vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_remove()
264 (void) drm_ht_remove_item(&man->resources, &entry->hash); vmw_cmdbuf_res_remove()
265 list_del(&entry->head); vmw_cmdbuf_res_remove()
266 entry->state = VMW_CMDBUF_RES_DEL; vmw_cmdbuf_res_remove()
267 list_add_tail(&entry->head, list); vmw_cmdbuf_res_remove()
268 *res_p = entry->res; vmw_cmdbuf_res_remove()
319 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_man_destroy() local
321 list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_man_destroy()
322 vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_man_destroy()
105 vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, struct vmw_cmdbuf_res *entry) vmw_cmdbuf_res_free() argument
/linux-4.4.14/sound/isa/gus/
H A Dgus_mem_proc.c34 static ssize_t snd_gf1_mem_proc_dump(struct snd_info_entry *entry, snd_gf1_mem_proc_dump() argument
39 struct gus_proc_private *priv = entry->private_data; snd_gf1_mem_proc_dump()
49 static void snd_gf1_mem_proc_free(struct snd_info_entry *entry) snd_gf1_mem_proc_free() argument
51 struct gus_proc_private *priv = entry->private_data; snd_gf1_mem_proc_free()
64 struct snd_info_entry *entry; snd_gf1_mem_proc_init() local
73 if (! snd_card_proc_new(gus->card, name, &entry)) { snd_gf1_mem_proc_init()
74 entry->content = SNDRV_INFO_CONTENT_DATA; snd_gf1_mem_proc_init()
75 entry->private_data = priv; snd_gf1_mem_proc_init()
76 entry->private_free = snd_gf1_mem_proc_free; snd_gf1_mem_proc_init()
77 entry->c.ops = &snd_gf1_mem_proc_ops; snd_gf1_mem_proc_init()
79 priv->size = entry->size = gus->gf1.mem_alloc.banks_8[idx].size; snd_gf1_mem_proc_init()
91 if (! snd_card_proc_new(gus->card, name, &entry)) { snd_gf1_mem_proc_init()
92 entry->content = SNDRV_INFO_CONTENT_DATA; snd_gf1_mem_proc_init()
93 entry->private_data = priv; snd_gf1_mem_proc_init()
94 entry->private_free = snd_gf1_mem_proc_free; snd_gf1_mem_proc_init()
95 entry->c.ops = &snd_gf1_mem_proc_ops; snd_gf1_mem_proc_init()
97 priv->size = entry->size = gus->gf1.rom_memory; snd_gf1_mem_proc_init()
/linux-4.4.14/fs/afs/
H A Dvlclient.c64 struct afs_cache_vlocation *entry; afs_deliver_vl_get_entry_by_xxx() local
79 entry = call->reply; afs_deliver_vl_get_entry_by_xxx()
83 entry->name[loop] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
84 entry->name[loop] = 0; afs_deliver_vl_get_entry_by_xxx()
88 entry->nservers = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
91 entry->servers[loop].s_addr = *bp++; afs_deliver_vl_get_entry_by_xxx()
97 entry->srvtmask[loop] = 0; afs_deliver_vl_get_entry_by_xxx()
99 entry->srvtmask[loop] |= AFS_VOL_VTM_RW; afs_deliver_vl_get_entry_by_xxx()
101 entry->srvtmask[loop] |= AFS_VOL_VTM_RO; afs_deliver_vl_get_entry_by_xxx()
103 entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; afs_deliver_vl_get_entry_by_xxx()
106 entry->vid[0] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
107 entry->vid[1] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
108 entry->vid[2] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
113 entry->vidmask = 0; afs_deliver_vl_get_entry_by_xxx()
115 entry->vidmask |= AFS_VOL_VTM_RW; afs_deliver_vl_get_entry_by_xxx()
117 entry->vidmask |= AFS_VOL_VTM_RO; afs_deliver_vl_get_entry_by_xxx()
119 entry->vidmask |= AFS_VOL_VTM_BAK; afs_deliver_vl_get_entry_by_xxx()
120 if (!entry->vidmask) afs_deliver_vl_get_entry_by_xxx()
148 * dispatch a get volume entry by name operation
153 struct afs_cache_vlocation *entry, afs_vl_get_entry_by_name()
171 call->reply = entry; afs_vl_get_entry_by_name()
188 * dispatch a get volume entry by ID operation
194 struct afs_cache_vlocation *entry, afs_vl_get_entry_by_id()
207 call->reply = entry; afs_vl_get_entry_by_id()
150 afs_vl_get_entry_by_name(struct in_addr *addr, struct key *key, const char *volname, struct afs_cache_vlocation *entry, const struct afs_wait_mode *wait_mode) afs_vl_get_entry_by_name() argument
190 afs_vl_get_entry_by_id(struct in_addr *addr, struct key *key, afs_volid_t volid, afs_voltype_t voltype, struct afs_cache_vlocation *entry, const struct afs_wait_mode *wait_mode) afs_vl_get_entry_by_id() argument
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
H A Dstatahead.c54 SA_ENTRY_INIT = 0, /** init entry */
56 SA_ENTRY_INVA = 2, /** invalid entry */
57 SA_ENTRY_DEST = 3, /** entry to be destroyed */
67 /* entry reference count */
69 /* entry index in the sai */
73 /* entry status */
75 /* entry size, contains name */
83 /* entry name */
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) ll_sa_entry_unhashed() argument
92 return list_empty(&entry->se_hash); ll_sa_entry_unhashed()
96 * The entry only can be released by the caller, it is necessary to hold lock.
98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry) ll_sa_entry_stated() argument
101 return (entry->se_stat != SA_ENTRY_INIT); ll_sa_entry_stated()
110 * Insert entry to hash SA table.
113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_enhash() argument
115 int i = ll_sa_entry_hash(entry->se_qstr.hash); ll_sa_entry_enhash()
118 list_add_tail(&entry->se_hash, &sai->sai_cache[i]); ll_sa_entry_enhash()
123 * Remove entry from SA table.
126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_unhash() argument
128 int i = ll_sa_entry_hash(entry->se_qstr.hash); ll_sa_entry_unhash()
131 list_del_init(&entry->se_hash); ll_sa_entry_unhash()
200 struct ll_sa_entry *entry; ll_sa_entry_alloc() local
205 entry = kzalloc(entry_size, GFP_NOFS); ll_sa_entry_alloc()
206 if (unlikely(!entry)) ll_sa_entry_alloc()
209 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n", ll_sa_entry_alloc()
210 len, name, entry, index); ll_sa_entry_alloc()
212 entry->se_index = index; ll_sa_entry_alloc()
215 * Statahead entry reference rules: ll_sa_entry_alloc()
217 * 1) When statahead entry is initialized, its reference is set as 2. ll_sa_entry_alloc()
220 * lockless hash lookup (only the scanner can remove entry from hash ll_sa_entry_alloc()
222 * entry reference. So the performance is improved. After using the ll_sa_entry_alloc()
223 * statahead entry, the scanner will call "atomic_dec()" to drop the ll_sa_entry_alloc()
225 * the statahead entry will be freed. ll_sa_entry_alloc()
228 * when they process the statahead entry, the reference for target ll_sa_entry_alloc()
229 * should be held to guarantee the entry will not be released by the ll_sa_entry_alloc()
230 * directory scanner. After processing the entry, these threads will ll_sa_entry_alloc()
231 * drop the entry reference. If it is the last reference, the entry ll_sa_entry_alloc()
234 * The second reference when initializes the statahead entry is used ll_sa_entry_alloc()
237 atomic_set(&entry->se_refcount, 2); ll_sa_entry_alloc()
238 entry->se_stat = SA_ENTRY_INIT; ll_sa_entry_alloc()
239 entry->se_size = entry_size; ll_sa_entry_alloc()
240 dname = (char *)entry + sizeof(struct ll_sa_entry); ll_sa_entry_alloc()
243 entry->se_qstr.hash = full_name_hash(name, len); ll_sa_entry_alloc()
244 entry->se_qstr.len = len; ll_sa_entry_alloc()
245 entry->se_qstr.name = dname; ll_sa_entry_alloc()
249 list_add_tail(&entry->se_link, &sai->sai_entries); ll_sa_entry_alloc()
250 INIT_LIST_HEAD(&entry->se_list); ll_sa_entry_alloc()
251 ll_sa_entry_enhash(sai, entry); ll_sa_entry_alloc()
256 return entry; ll_sa_entry_alloc()
260 * Used by the directory scanner to search entry with name.
262 * Only the caller can remove the entry from hash, so it is unnecessary to hold
263 * hash lock. It is caller's duty to release the init refcount on the entry, so
264 * it is also unnecessary to increase refcount on the entry.
269 struct ll_sa_entry *entry; ll_sa_entry_get_byname() local
272 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { ll_sa_entry_get_byname()
273 if (entry->se_qstr.hash == qstr->hash && ll_sa_entry_get_byname()
274 entry->se_qstr.len == qstr->len && ll_sa_entry_get_byname()
275 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) ll_sa_entry_get_byname()
276 return entry; ll_sa_entry_get_byname()
282 * Used by the async getattr request callback to find entry with index.
285 * It needs to increase entry refcount before returning to guarantee that the
286 * entry cannot be freed by others.
291 struct ll_sa_entry *entry; ll_sa_entry_get_byindex() local
293 list_for_each_entry(entry, &sai->sai_entries, se_link) { ll_sa_entry_get_byindex()
294 if (entry->se_index == index) { ll_sa_entry_get_byindex()
295 LASSERT(atomic_read(&entry->se_refcount) > 0); ll_sa_entry_get_byindex()
296 atomic_inc(&entry->se_refcount); ll_sa_entry_get_byindex()
297 return entry; ll_sa_entry_get_byindex()
299 if (entry->se_index > index) ll_sa_entry_get_byindex()
306 struct ll_sa_entry *entry) ll_sa_entry_cleanup()
308 struct md_enqueue_info *minfo = entry->se_minfo; ll_sa_entry_cleanup()
309 struct ptlrpc_request *req = entry->se_req; ll_sa_entry_cleanup()
312 entry->se_minfo = NULL; ll_sa_entry_cleanup()
319 entry->se_req = NULL; ll_sa_entry_cleanup()
325 struct ll_sa_entry *entry) ll_sa_entry_put()
327 if (atomic_dec_and_test(&entry->se_refcount)) { ll_sa_entry_put()
328 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", ll_sa_entry_put()
329 entry->se_qstr.len, entry->se_qstr.name, entry, ll_sa_entry_put()
330 entry->se_index); ll_sa_entry_put()
332 LASSERT(list_empty(&entry->se_link)); ll_sa_entry_put()
333 LASSERT(list_empty(&entry->se_list)); ll_sa_entry_put()
334 LASSERT(ll_sa_entry_unhashed(entry)); ll_sa_entry_put()
336 ll_sa_entry_cleanup(sai, entry); ll_sa_entry_put()
337 iput(entry->se_inode); ll_sa_entry_put()
339 kfree(entry); ll_sa_entry_put()
345 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) do_sa_entry_fini() argument
349 LASSERT(!ll_sa_entry_unhashed(entry)); do_sa_entry_fini()
350 LASSERT(!list_empty(&entry->se_link)); do_sa_entry_fini()
352 ll_sa_entry_unhash(sai, entry); do_sa_entry_fini()
355 entry->se_stat = SA_ENTRY_DEST; do_sa_entry_fini()
356 list_del_init(&entry->se_link); do_sa_entry_fini()
357 if (likely(!list_empty(&entry->se_list))) do_sa_entry_fini()
358 list_del_init(&entry->se_list); do_sa_entry_fini()
361 ll_sa_entry_put(sai, entry); do_sa_entry_fini()
368 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_fini() argument
372 if (entry) ll_sa_entry_fini()
373 do_sa_entry_fini(sai, entry); ll_sa_entry_fini()
375 /* drop old entry, only 'scanner' process does this, no need to lock */ ll_sa_entry_fini()
388 struct ll_sa_entry *entry, se_stat_t stat) do_sa_entry_to_stated()
393 if (!list_empty(&entry->se_list)) do_sa_entry_to_stated()
394 list_del_init(&entry->se_list); do_sa_entry_to_stated()
397 if (se->se_index < entry->se_index) { do_sa_entry_to_stated()
403 list_add(&entry->se_list, pos); do_sa_entry_to_stated()
404 entry->se_stat = stat; do_sa_entry_to_stated()
408 * Move entry to sai_entries_stated and sort with the index.
409 * \retval 1 -- entry to be destroyed.
410 * \retval 0 -- entry is inserted into stated list.
414 struct ll_sa_entry *entry, se_stat_t stat) ll_sa_entry_to_stated()
419 ll_sa_entry_cleanup(sai, entry); ll_sa_entry_to_stated()
422 if (likely(entry->se_stat != SA_ENTRY_DEST)) { ll_sa_entry_to_stated()
423 do_sa_entry_to_stated(sai, entry, stat); ll_sa_entry_to_stated()
512 struct ll_sa_entry *entry, *next; ll_sai_put() local
535 list_for_each_entry_safe(entry, next, ll_sai_put()
537 do_sa_entry_fini(sai, entry); ll_sai_put()
560 /* AGL maybe fall behind statahead with one entry */ ll_agl_trigger()
616 struct ll_sa_entry *entry; ll_post_statahead() local
628 entry = sa_first_received_entry(sai); ll_post_statahead()
629 atomic_inc(&entry->se_refcount); ll_post_statahead()
630 list_del_init(&entry->se_list); ll_post_statahead()
633 LASSERT(entry->se_handle != 0); ll_post_statahead()
635 minfo = entry->se_minfo; ll_post_statahead()
637 req = entry->se_req; ll_post_statahead()
644 child = entry->se_inode; ll_post_statahead()
663 entry->se_inode = NULL; ll_post_statahead()
669 it->d.lustre.it_lock_handle = entry->se_handle; ll_post_statahead()
684 entry->se_inode = child; ll_post_statahead()
687 ll_agl_add(sai, child, entry->se_index); ll_post_statahead()
694 rc = ll_sa_entry_to_stated(sai, entry, ll_post_statahead()
696 if (rc == 0 && entry->se_index == sai->sai_index_wait) ll_post_statahead()
698 ll_sa_entry_put(sai, entry); ll_post_statahead()
708 struct ll_sa_entry *entry; ll_statahead_interpret() local
725 /* stale entry */ ll_statahead_interpret()
740 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); ll_statahead_interpret()
741 if (entry == NULL) { ll_statahead_interpret()
749 do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA); ll_statahead_interpret()
750 wakeup = (entry->se_index == sai->sai_index_wait); ll_statahead_interpret()
752 entry->se_minfo = minfo; ll_statahead_interpret()
753 entry->se_req = ptlrpc_request_addref(req); ll_statahead_interpret()
758 entry->se_handle = handle; ll_statahead_interpret()
760 list_add_tail(&entry->se_list, ll_statahead_interpret()
766 ll_sa_entry_put(sai, entry); ll_statahead_interpret()
795 struct ll_sa_entry *entry, struct md_enqueue_info **pmi, sa_args_init()
798 struct qstr *qstr = &entry->se_qstr; sa_args_init()
826 minfo->mi_cbdata = entry->se_index; sa_args_init()
841 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry) do_sa_lookup() argument
847 rc = sa_args_init(dir, NULL, entry, &minfo, &einfo); do_sa_lookup()
864 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, do_sa_revalidate() argument
880 entry->se_inode = igrab(inode); do_sa_revalidate()
884 entry->se_handle = it.d.lustre.it_lock_handle; do_sa_revalidate()
889 rc = sa_args_init(dir, inode, entry, &minfo, &einfo); do_sa_revalidate()
891 entry->se_inode = NULL; do_sa_revalidate()
898 entry->se_inode = NULL; do_sa_revalidate()
913 struct ll_sa_entry *entry; ll_statahead_one() local
917 entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name, ll_statahead_one()
919 if (IS_ERR(entry)) ll_statahead_one()
922 dentry = d_lookup(parent, &entry->se_qstr); ll_statahead_one()
924 rc = do_sa_lookup(dir, entry); ll_statahead_one()
926 rc = do_sa_revalidate(dir, entry, dentry); ll_statahead_one()
928 ll_agl_add(sai, d_inode(dentry), entry->se_index); ll_statahead_one()
935 rc1 = ll_sa_entry_to_stated(sai, entry, ll_statahead_one()
937 if (rc1 == 0 && entry->se_index == sai->sai_index_wait) ll_statahead_one()
944 /* drop one refcount on entry by ll_sa_entry_alloc */ ll_statahead_one()
945 ll_sa_entry_put(sai, entry); ll_statahead_one()
1131 * don't stat-ahead first entry. ll_statahead_thread()
1459 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sai_unplug() argument
1465 if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) ll_sai_unplug()
1470 ll_sa_entry_fini(sai, entry); ll_sai_unplug()
1498 * Start statahead thread if this is the first dir entry.
1500 * \retval 1 -- find entry with lock in cache, the caller needs to do
1502 * \retval 0 -- find entry in cache, but without lock, the caller needs
1512 struct ll_sa_entry *entry; do_statahead_enter() local
1556 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); do_statahead_enter()
1557 if (entry == NULL || only_unplug) { do_statahead_enter()
1558 ll_sai_unplug(sai, entry); do_statahead_enter()
1559 return entry ? 1 : -EAGAIN; do_statahead_enter()
1562 if (!ll_sa_entry_stated(entry)) { do_statahead_enter()
1563 sai->sai_index_wait = entry->se_index; do_statahead_enter()
1567 ll_sa_entry_stated(entry) || do_statahead_enter()
1571 ll_sai_unplug(sai, entry); do_statahead_enter()
1576 if (entry->se_stat == SA_ENTRY_SUCC && do_statahead_enter()
1577 entry->se_inode != NULL) { do_statahead_enter()
1578 struct inode *inode = entry->se_inode; do_statahead_enter()
1581 entry->se_handle }; do_statahead_enter()
1593 ll_sai_unplug(sai, entry); do_statahead_enter()
1606 ll_sai_unplug(sai, entry); do_statahead_enter()
1611 entry->se_inode = NULL; do_statahead_enter()
1620 ll_sai_unplug(sai, entry); do_statahead_enter()
305 ll_sa_entry_cleanup(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_cleanup() argument
324 ll_sa_entry_put(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_put() argument
387 do_sa_entry_to_stated(struct ll_statahead_info *sai, struct ll_sa_entry *entry, se_stat_t stat) do_sa_entry_to_stated() argument
413 ll_sa_entry_to_stated(struct ll_statahead_info *sai, struct ll_sa_entry *entry, se_stat_t stat) ll_sa_entry_to_stated() argument
794 sa_args_init(struct inode *dir, struct inode *child, struct ll_sa_entry *entry, struct md_enqueue_info **pmi, struct ldlm_enqueue_info **pei) sa_args_init() argument
/linux-4.4.14/arch/sparc/mm/
H A Dextable.c41 /* A range entry, skip both parts. */ search_extable()
46 /* A deleted entry; see trim_init_extable */ search_extable()
92 const struct exception_table_entry *entry; search_extables_range() local
94 entry = search_exception_tables(addr); search_extables_range()
95 if (!entry) search_extables_range()
99 if (!entry->fixup) { search_extables_range()
100 *g2 = (addr - entry->insn) / 4; search_extables_range()
101 return (entry + 1)->fixup; search_extables_range()
104 return entry->fixup; search_extables_range()
/linux-4.4.14/arch/x86/entry/
H A DMakefile2 # Makefile for the x86 low level entry code
/linux-4.4.14/net/bridge/
H A Dbr_mdb.c167 struct br_mdb_entry *entry, u32 pid, nlmsg_populate_mdb_fill()
189 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) nlmsg_populate_mdb_fill()
210 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, __br_mdb_notify() argument
221 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); __br_mdb_notify()
236 struct br_mdb_entry entry; br_mdb_notify() local
238 memset(&entry, 0, sizeof(entry)); br_mdb_notify()
239 entry.ifindex = port->dev->ifindex; br_mdb_notify()
240 entry.addr.proto = group->proto; br_mdb_notify()
241 entry.addr.u.ip4 = group->u.ip4; br_mdb_notify()
243 entry.addr.u.ip6 = group->u.ip6; br_mdb_notify()
245 entry.state = state; br_mdb_notify()
246 entry.vid = group->vid; br_mdb_notify()
247 __br_mdb_notify(dev, &entry, type); br_mdb_notify()
317 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) is_valid_mdb_entry() argument
319 if (entry->ifindex == 0) is_valid_mdb_entry()
322 if (entry->addr.proto == htons(ETH_P_IP)) { is_valid_mdb_entry()
323 if (!ipv4_is_multicast(entry->addr.u.ip4)) is_valid_mdb_entry()
325 if (ipv4_is_local_multicast(entry->addr.u.ip4)) is_valid_mdb_entry()
328 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { is_valid_mdb_entry()
329 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) is_valid_mdb_entry()
334 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) is_valid_mdb_entry()
336 if (entry->vid >= VLAN_VID_MASK) is_valid_mdb_entry()
346 struct br_mdb_entry *entry; br_mdb_parse() local
381 entry = nla_data(tb[MDBA_SET_ENTRY]); br_mdb_parse()
382 if (!is_valid_mdb_entry(entry)) { br_mdb_parse()
383 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); br_mdb_parse()
387 *pentry = entry; br_mdb_parse()
430 struct br_mdb_entry *entry) __br_mdb_add()
440 dev = __dev_get_by_index(net, entry->ifindex); __br_mdb_add()
449 ip.vid = entry->vid; __br_mdb_add()
450 ip.proto = entry->addr.proto; __br_mdb_add()
452 ip.u.ip4 = entry->addr.u.ip4; __br_mdb_add()
455 ip.u.ip6 = entry->addr.u.ip6; __br_mdb_add()
459 ret = br_mdb_add_group(br, p, &ip, entry->state); __br_mdb_add()
469 struct br_mdb_entry *entry; br_mdb_add() local
475 err = br_mdb_parse(skb, nlh, &dev, &entry); br_mdb_add()
482 * install mdb entry on all vlans configured on the port. br_mdb_add()
484 pdev = __dev_get_by_index(net, entry->ifindex); br_mdb_add()
493 if (br_vlan_enabled(br) && vg && entry->vid == 0) { br_mdb_add()
495 entry->vid = v->vid; br_mdb_add()
496 err = __br_mdb_add(net, br, entry); br_mdb_add()
499 __br_mdb_notify(dev, entry, RTM_NEWMDB); br_mdb_add()
502 err = __br_mdb_add(net, br, entry); br_mdb_add()
504 __br_mdb_notify(dev, entry, RTM_NEWMDB); br_mdb_add()
510 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) __br_mdb_del() argument
523 ip.vid = entry->vid; __br_mdb_del()
524 ip.proto = entry->addr.proto; __br_mdb_del()
526 ip.u.ip4 = entry->addr.u.ip4; __br_mdb_del()
529 ip.u.ip6 = entry->addr.u.ip6; __br_mdb_del()
542 if (!p->port || p->port->dev->ifindex != entry->ifindex) __br_mdb_del()
548 entry->state = p->state; __br_mdb_del()
571 struct br_mdb_entry *entry; br_mdb_del() local
577 err = br_mdb_parse(skb, nlh, &dev, &entry); br_mdb_del()
584 * delete mdb entry on all vlans configured on the port. br_mdb_del()
586 pdev = __dev_get_by_index(net, entry->ifindex); br_mdb_del()
595 if (br_vlan_enabled(br) && vg && entry->vid == 0) { br_mdb_del()
597 entry->vid = v->vid; br_mdb_del()
598 err = __br_mdb_del(br, entry); br_mdb_del()
600 __br_mdb_notify(dev, entry, RTM_DELMDB); br_mdb_del()
603 err = __br_mdb_del(br, entry); br_mdb_del()
605 __br_mdb_notify(dev, entry, RTM_DELMDB); br_mdb_del()
165 nlmsg_populate_mdb_fill(struct sk_buff *skb, struct net_device *dev, struct br_mdb_entry *entry, u32 pid, u32 seq, int type, unsigned int flags) nlmsg_populate_mdb_fill() argument
429 __br_mdb_add(struct net *net, struct net_bridge *br, struct br_mdb_entry *entry) __br_mdb_add() argument
/linux-4.4.14/arch/score/kernel/
H A DMakefile7 obj-y += entry.o irq.o process.o ptrace.o \
/linux-4.4.14/arch/sparc/include/asm/
H A Dsections.h7 /* sparc entry point */
H A Dspitfire.h101 static inline unsigned long spitfire_get_dtlb_data(int entry) spitfire_get_dtlb_data() argument
107 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS)); spitfire_get_dtlb_data()
115 static inline unsigned long spitfire_get_dtlb_tag(int entry) spitfire_get_dtlb_tag() argument
121 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ)); spitfire_get_dtlb_tag()
125 static inline void spitfire_put_dtlb_data(int entry, unsigned long data) spitfire_put_dtlb_data() argument
130 : "r" (data), "r" (entry << 3), spitfire_put_dtlb_data()
134 static inline unsigned long spitfire_get_itlb_data(int entry) spitfire_get_itlb_data() argument
140 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS)); spitfire_get_itlb_data()
148 static inline unsigned long spitfire_get_itlb_tag(int entry) spitfire_get_itlb_tag() argument
154 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ)); spitfire_get_itlb_tag()
158 static inline void spitfire_put_itlb_data(int entry, unsigned long data) spitfire_put_itlb_data() argument
163 : "r" (data), "r" (entry << 3), spitfire_put_itlb_data()
214 static inline unsigned long cheetah_get_ldtlb_data(int entry) cheetah_get_ldtlb_data() argument
221 : "r" ((0 << 16) | (entry << 3)), cheetah_get_ldtlb_data()
227 static inline unsigned long cheetah_get_litlb_data(int entry) cheetah_get_litlb_data() argument
234 : "r" ((0 << 16) | (entry << 3)), cheetah_get_litlb_data()
240 static inline unsigned long cheetah_get_ldtlb_tag(int entry) cheetah_get_ldtlb_tag() argument
246 : "r" ((0 << 16) | (entry << 3)), cheetah_get_ldtlb_tag()
252 static inline unsigned long cheetah_get_litlb_tag(int entry) cheetah_get_litlb_tag() argument
258 : "r" ((0 << 16) | (entry << 3)), cheetah_get_litlb_tag()
264 static inline void cheetah_put_ldtlb_data(int entry, unsigned long data) cheetah_put_ldtlb_data() argument
270 "r" ((0 << 16) | (entry << 3)), cheetah_put_ldtlb_data()
274 static inline void cheetah_put_litlb_data(int entry, unsigned long data) cheetah_put_litlb_data() argument
280 "r" ((0 << 16) | (entry << 3)), cheetah_put_litlb_data()
284 static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb) cheetah_get_dtlb_data() argument
291 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS)); cheetah_get_dtlb_data()
296 static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb) cheetah_get_dtlb_tag() argument
302 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ)); cheetah_get_dtlb_tag()
306 static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb) cheetah_put_dtlb_data() argument
312 "r" ((tlb << 16) | (entry << 3)), cheetah_put_dtlb_data()
316 static inline unsigned long cheetah_get_itlb_data(int entry) cheetah_get_itlb_data() argument
323 : "r" ((2 << 16) | (entry << 3)), cheetah_get_itlb_data()
329 static inline unsigned long cheetah_get_itlb_tag(int entry) cheetah_get_itlb_tag() argument
335 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ)); cheetah_get_itlb_tag()
339 static inline void cheetah_put_itlb_data(int entry, unsigned long data) cheetah_put_itlb_data() argument
344 : "r" (data), "r" ((2 << 16) | (entry << 3)), cheetah_put_itlb_data()
/linux-4.4.14/arch/metag/kernel/
H A Dperf_callchain.c32 struct perf_callchain_entry *entry) user_backtrace()
50 perf_callchain_store(entry, calladdr); user_backtrace()
59 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user() argument
68 while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) perf_callchain_user()
69 frame = user_backtrace(frame, entry); perf_callchain_user()
81 struct perf_callchain_entry *entry = data; callchain_trace() local
82 perf_callchain_store(entry, fr->pc); callchain_trace()
87 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
95 walk_stackframe(&fr, callchain_trace, entry); perf_callchain_kernel()
31 user_backtrace(struct metag_frame __user *user_frame, struct perf_callchain_entry *entry) user_backtrace() argument
/linux-4.4.14/arch/openrisc/kernel/
H A DMakefile8 traps.o time.o irq.o entry.o ptrace.o signal.o \
/linux-4.4.14/arch/m68k/68360/
H A DMakefile7 obj-y := config.o commproc.o entry.o ints.o
/linux-4.4.14/sound/pci/ice1712/
H A Dpsc724.h10 /* entry struct */
H A Dse.h12 /* entry struct */
/linux-4.4.14/drivers/hid/
H A Dhid-lg4ff.c310 struct lg4ff_device_entry *entry = drv_data->device_props; lg4ff_adjust_input_event() local
313 if (!entry) { lg4ff_adjust_input_event()
318 switch (entry->wdata.product_id) { lg4ff_adjust_input_event()
322 new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); lg4ff_adjust_input_event()
364 struct lg4ff_device_entry *entry; lg4ff_play() local
376 entry = drv_data->device_props; lg4ff_play()
377 if (!entry) { lg4ff_play()
381 value = entry->report->field[0]->value; lg4ff_play()
390 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_play()
401 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_play()
402 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_play()
414 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_play()
415 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_play()
430 struct lg4ff_device_entry *entry; lg4ff_set_autocenter_default() local
440 entry = drv_data->device_props; lg4ff_set_autocenter_default()
441 if (!entry) { lg4ff_set_autocenter_default()
445 value = entry->report->field[0]->value; lg4ff_set_autocenter_default()
448 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_set_autocenter_default()
458 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_autocenter_default()
459 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_autocenter_default()
472 switch (entry->wdata.product_id) { lg4ff_set_autocenter_default()
489 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_autocenter_default()
500 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_autocenter_default()
501 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_autocenter_default()
508 struct lg4ff_device_entry *entry; lg4ff_set_autocenter_ffex() local
520 entry = drv_data->device_props; lg4ff_set_autocenter_ffex()
521 if (!entry) { lg4ff_set_autocenter_ffex()
525 value = entry->report->field[0]->value; lg4ff_set_autocenter_ffex()
527 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_set_autocenter_ffex()
536 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_autocenter_ffex()
537 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_autocenter_ffex()
543 struct lg4ff_device_entry *entry; lg4ff_set_range_g25() local
554 entry = drv_data->device_props; lg4ff_set_range_g25()
555 if (!entry) { lg4ff_set_range_g25()
559 value = entry->report->field[0]->value; lg4ff_set_range_g25()
562 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_set_range_g25()
571 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_range_g25()
572 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_range_g25()
578 struct lg4ff_device_entry *entry; lg4ff_set_range_dfp() local
590 entry = drv_data->device_props; lg4ff_set_range_dfp()
591 if (!entry) { lg4ff_set_range_dfp()
595 value = entry->report->field[0]->value; lg4ff_set_range_dfp()
599 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_set_range_dfp()
615 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_range_dfp()
627 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_range_dfp()
628 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_range_dfp()
642 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_range_dfp()
643 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_range_dfp()
722 struct lg4ff_device_entry *entry; lg4ff_switch_compatibility_mode() local
734 entry = drv_data->device_props; lg4ff_switch_compatibility_mode()
735 if (!entry) { lg4ff_switch_compatibility_mode()
739 value = entry->report->field[0]->value; lg4ff_switch_compatibility_mode()
741 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_switch_compatibility_mode()
748 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_switch_compatibility_mode()
750 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_switch_compatibility_mode()
758 struct lg4ff_device_entry *entry; lg4ff_alternate_modes_show() local
769 entry = drv_data->device_props; lg4ff_alternate_modes_show()
770 if (!entry) { lg4ff_alternate_modes_show()
775 if (!entry->wdata.real_name) { lg4ff_alternate_modes_show()
781 if (entry->wdata.alternate_modes & BIT(i)) { lg4ff_alternate_modes_show()
785 !lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name); lg4ff_alternate_modes_show()
790 if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id || lg4ff_alternate_modes_show()
791 (lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id)) lg4ff_alternate_modes_show()
807 struct lg4ff_device_entry *entry; lg4ff_alternate_modes_store() local
820 entry = drv_data->device_props; lg4ff_alternate_modes_store()
821 if (!entry) { lg4ff_alternate_modes_store()
844 if (entry->wdata.alternate_modes & BIT(i)) { lg4ff_alternate_modes_store()
847 target_product_id = entry->wdata.real_product_id; lg4ff_alternate_modes_store()
862 if (target_product_id == entry->wdata.product_id) /* Nothing to do */ lg4ff_alternate_modes_store()
868 entry->wdata.real_name); lg4ff_alternate_modes_store()
873 if ((entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && lg4ff_alternate_modes_store()
874 entry->wdata.product_id > target_product_id) { lg4ff_alternate_modes_store()
875 hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->wdata.real_name, lg4ff_alternate_modes[i].name); lg4ff_alternate_modes_store()
879 s = lg4ff_get_mode_switch_command(entry->wdata.real_product_id, target_product_id); lg4ff_alternate_modes_store()
895 struct lg4ff_device_entry *entry; lg4ff_range_show() local
905 entry = drv_data->device_props; lg4ff_range_show()
906 if (!entry) { lg4ff_range_show()
911 count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.range); lg4ff_range_show()
921 struct lg4ff_device_entry *entry; lg4ff_range_store() local
931 entry = drv_data->device_props; lg4ff_range_store()
932 if (!entry) { lg4ff_range_store()
938 range = entry->wdata.max_range; lg4ff_range_store()
942 if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) { lg4ff_range_store()
943 entry->wdata.set_range(hid, range); lg4ff_range_store()
944 entry->wdata.range = range; lg4ff_range_store()
954 struct lg4ff_device_entry *entry; lg4ff_real_id_show() local
964 entry = drv_data->device_props; lg4ff_real_id_show()
965 if (!entry) { lg4ff_real_id_show()
970 if (!entry->wdata.real_tag || !entry->wdata.real_name) { lg4ff_real_id_show()
975 count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name); lg4ff_real_id_show()
990 struct lg4ff_device_entry *entry; lg4ff_set_leds() local
1000 entry = drv_data->device_props; lg4ff_set_leds()
1001 if (!entry) { lg4ff_set_leds()
1005 value = entry->report->field[0]->value; lg4ff_set_leds()
1007 spin_lock_irqsave(&entry->report_lock, flags); lg4ff_set_leds()
1015 hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); lg4ff_set_leds()
1016 spin_unlock_irqrestore(&entry->report_lock, flags); lg4ff_set_leds()
1025 struct lg4ff_device_entry *entry; lg4ff_led_set_brightness() local
1033 entry = drv_data->device_props; lg4ff_led_set_brightness()
1035 if (!entry) { lg4ff_led_set_brightness()
1041 if (led_cdev != entry->wdata.led[i]) lg4ff_led_set_brightness()
1043 state = (entry->wdata.led_state >> i) & 1; lg4ff_led_set_brightness()
1045 entry->wdata.led_state &= ~(1 << i); lg4ff_led_set_brightness()
1046 lg4ff_set_leds(hid, entry->wdata.led_state); lg4ff_led_set_brightness()
1048 entry->wdata.led_state |= 1 << i; lg4ff_led_set_brightness()
1049 lg4ff_set_leds(hid, entry->wdata.led_state); lg4ff_led_set_brightness()
1060 struct lg4ff_device_entry *entry; lg4ff_led_get_brightness() local
1068 entry = drv_data->device_props; lg4ff_led_get_brightness()
1070 if (!entry) { lg4ff_led_get_brightness()
1076 if (led_cdev == entry->wdata.led[i]) { lg4ff_led_get_brightness()
1077 value = (entry->wdata.led_state >> i) & 1; lg4ff_led_get_brightness()
1168 struct lg4ff_device_entry *entry; lg4ff_init() local
1183 entry = kzalloc(sizeof(*entry), GFP_KERNEL); lg4ff_init()
1184 if (!entry) lg4ff_init()
1186 spin_lock_init(&entry->report_lock); lg4ff_init()
1187 entry->report = report; lg4ff_init()
1188 drv_data->device_props = entry; lg4ff_init()
1248 lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id); lg4ff_init()
1278 entry->wdata.range = entry->wdata.max_range; lg4ff_init()
1279 if (entry->wdata.set_range) lg4ff_init()
1280 entry->wdata.set_range(hid, entry->wdata.range); lg4ff_init()
1284 entry->wdata.led_state = 0; lg4ff_init()
1286 entry->wdata.led[j] = NULL; lg4ff_init()
1313 entry->wdata.led[j] = led; lg4ff_init()
1321 led = entry->wdata.led[j]; lg4ff_init()
1322 entry->wdata.led[j] = NULL; lg4ff_init()
1339 kfree(entry); lg4ff_init()
1345 struct lg4ff_device_entry *entry; lg4ff_deinit() local
1353 entry = drv_data->device_props; lg4ff_deinit()
1354 if (!entry) lg4ff_deinit()
1358 if (entry->wdata.alternate_modes) { lg4ff_deinit()
1372 led = entry->wdata.led[j]; lg4ff_deinit()
1373 entry->wdata.led[j] = NULL; lg4ff_deinit()
1384 kfree(entry); lg4ff_deinit()
/linux-4.4.14/kernel/time/
H A Dtimer_stats.c53 struct entry { struct
57 struct entry *next;
107 * tstat entry structs only get allocated while collection is
117 static struct entry entries[MAX_ENTRIES];
128 #define __tstat_hashfn(entry) \
129 (((unsigned long)(entry)->timer ^ \
130 (unsigned long)(entry)->start_func ^ \
131 (unsigned long)(entry)->expire_func ^ \
132 (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK)
134 #define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry))
136 static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
146 static struct entry *alloc_entry(void) alloc_entry()
154 static int match_entries(struct entry *entry1, struct entry *entry2) match_entries()
163 * Look up whether an entry matching this item is present
167 static struct entry *tstat_lookup(struct entry *entry, char *comm) tstat_lookup() argument
169 struct entry **head, *curr, *prev; tstat_lookup()
171 head = tstat_hashentry(entry); tstat_lookup()
175 * The fastpath is when the entry is already hashed, tstat_lookup()
180 if (match_entries(curr, entry)) tstat_lookup()
186 * Slowpath: allocate, set up and link a new hash entry: tstat_lookup()
196 if (match_entries(curr, entry)) tstat_lookup()
205 *curr = *entry; tstat_lookup()
242 struct entry *entry, input; timer_stats_update_stats() local
260 entry = tstat_lookup(&input, comm); timer_stats_update_stats()
261 if (likely(entry)) timer_stats_update_stats()
262 entry->count++; timer_stats_update_stats()
283 struct entry *entry; tstats_show() local
308 entry = entries + i; tstats_show()
309 if (entry->flags & TIMER_DEFERRABLE) { tstats_show()
311 entry->count, entry->pid, entry->comm); tstats_show()
314 entry->count, entry->pid, entry->comm); tstats_show()
317 print_name_offset(m, (unsigned long)entry->start_func); tstats_show()
319 print_name_offset(m, (unsigned long)entry->expire_func); tstats_show()
322 events += entry->count; tstats_show()
/linux-4.4.14/Documentation/mic/mpssd/
H A Dsysfs.c26 readsysfs(char *dir, char *entry) readsysfs() argument
35 snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); readsysfs()
38 "%s/%s/%s", MICSYSFSDIR, dir, entry); readsysfs()
42 mpsslog("Failed to open sysfs entry '%s': %s\n", readsysfs()
49 mpsslog("Failed to read sysfs entry '%s': %s\n", readsysfs()
68 setsysfs(char *dir, char *entry, char *value) setsysfs() argument
75 snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); setsysfs()
78 MICSYSFSDIR, dir, entry); setsysfs()
80 oldvalue = readsysfs(dir, entry); setsysfs()
85 mpsslog("Failed to open sysfs entry '%s': %s\n", setsysfs()
93 mpsslog("Failed to write new sysfs entry '%s': %s\n", setsysfs()
/linux-4.4.14/fs/btrfs/
H A Dordered-data.c31 static u64 entry_end(struct btrfs_ordered_extent *entry) entry_end() argument
33 if (entry->file_offset + entry->len < entry->file_offset) entry_end()
35 return entry->file_offset + entry->len; entry_end()
46 struct btrfs_ordered_extent *entry; tree_insert() local
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); tree_insert()
52 if (file_offset < entry->file_offset) tree_insert()
54 else if (file_offset >= entry_end(entry)) tree_insert()
83 struct btrfs_ordered_extent *entry; __tree_search() local
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); __tree_search()
89 prev_entry = entry; __tree_search()
91 if (file_offset < entry->file_offset) __tree_search()
93 else if (file_offset >= entry_end(entry)) __tree_search()
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) offset_in_entry() argument
132 if (file_offset < entry->file_offset || offset_in_entry()
133 entry->file_offset + entry->len <= file_offset) offset_in_entry()
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, range_overlaps() argument
141 if (file_offset + len <= entry->file_offset || range_overlaps()
142 entry->file_offset + entry->len <= file_offset) range_overlaps()
157 struct btrfs_ordered_extent *entry; tree_search() local
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent, tree_search()
162 if (offset_in_entry(entry, file_offset)) tree_search()
191 struct btrfs_ordered_extent *entry; __btrfs_add_ordered_extent() local
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); __btrfs_add_ordered_extent()
195 if (!entry) __btrfs_add_ordered_extent()
198 entry->file_offset = file_offset; __btrfs_add_ordered_extent()
199 entry->start = start; __btrfs_add_ordered_extent()
200 entry->len = len; __btrfs_add_ordered_extent()
201 entry->disk_len = disk_len; __btrfs_add_ordered_extent()
202 entry->bytes_left = len; __btrfs_add_ordered_extent()
203 entry->inode = igrab(inode); __btrfs_add_ordered_extent()
204 entry->compress_type = compress_type; __btrfs_add_ordered_extent()
205 entry->truncated_len = (u64)-1; __btrfs_add_ordered_extent()
207 set_bit(type, &entry->flags); __btrfs_add_ordered_extent()
210 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); __btrfs_add_ordered_extent()
213 atomic_set(&entry->refs, 1); __btrfs_add_ordered_extent()
214 init_waitqueue_head(&entry->wait); __btrfs_add_ordered_extent()
215 INIT_LIST_HEAD(&entry->list); __btrfs_add_ordered_extent()
216 INIT_LIST_HEAD(&entry->root_extent_list); __btrfs_add_ordered_extent()
217 INIT_LIST_HEAD(&entry->work_list); __btrfs_add_ordered_extent()
218 init_completion(&entry->completion); __btrfs_add_ordered_extent()
219 INIT_LIST_HEAD(&entry->log_list); __btrfs_add_ordered_extent()
220 INIT_LIST_HEAD(&entry->trans_list); __btrfs_add_ordered_extent()
222 trace_btrfs_ordered_extent_add(inode, entry); __btrfs_add_ordered_extent()
226 &entry->rb_node); __btrfs_add_ordered_extent()
232 list_add_tail(&entry->root_extent_list, __btrfs_add_ordered_extent()
278 struct btrfs_ordered_extent *entry, btrfs_add_ordered_sum()
285 list_add_tail(&sum->list, &entry->list); btrfs_add_ordered_sum()
307 struct btrfs_ordered_extent *entry = NULL; btrfs_dec_test_first_ordered_pending() local
322 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_dec_test_first_ordered_pending()
323 if (!offset_in_entry(entry, *file_offset)) { btrfs_dec_test_first_ordered_pending()
328 dec_start = max(*file_offset, entry->file_offset); btrfs_dec_test_first_ordered_pending()
329 dec_end = min(*file_offset + io_size, entry->file_offset + btrfs_dec_test_first_ordered_pending()
330 entry->len); btrfs_dec_test_first_ordered_pending()
337 if (to_dec > entry->bytes_left) { btrfs_dec_test_first_ordered_pending()
340 entry->bytes_left, to_dec); btrfs_dec_test_first_ordered_pending()
342 entry->bytes_left -= to_dec; btrfs_dec_test_first_ordered_pending()
344 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); btrfs_dec_test_first_ordered_pending()
346 if (entry->bytes_left == 0) { btrfs_dec_test_first_ordered_pending()
347 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); btrfs_dec_test_first_ordered_pending()
351 if (waitqueue_active(&entry->wait)) btrfs_dec_test_first_ordered_pending()
352 wake_up(&entry->wait); btrfs_dec_test_first_ordered_pending()
357 if (!ret && cached && entry) { btrfs_dec_test_first_ordered_pending()
358 *cached = entry; btrfs_dec_test_first_ordered_pending()
359 atomic_inc(&entry->refs); btrfs_dec_test_first_ordered_pending()
380 struct btrfs_ordered_extent *entry = NULL; btrfs_dec_test_ordered_pending() local
387 entry = *cached; btrfs_dec_test_ordered_pending()
397 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_dec_test_ordered_pending()
399 if (!offset_in_entry(entry, file_offset)) { btrfs_dec_test_ordered_pending()
404 if (io_size > entry->bytes_left) { btrfs_dec_test_ordered_pending()
407 entry->bytes_left, io_size); btrfs_dec_test_ordered_pending()
409 entry->bytes_left -= io_size; btrfs_dec_test_ordered_pending()
411 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); btrfs_dec_test_ordered_pending()
413 if (entry->bytes_left == 0) { btrfs_dec_test_ordered_pending()
414 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); btrfs_dec_test_ordered_pending()
418 if (waitqueue_active(&entry->wait)) btrfs_dec_test_ordered_pending()
419 wake_up(&entry->wait); btrfs_dec_test_ordered_pending()
424 if (!ret && cached && entry) { btrfs_dec_test_ordered_pending()
425 *cached = entry; btrfs_dec_test_ordered_pending()
426 atomic_inc(&entry->refs); btrfs_dec_test_ordered_pending()
559 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) btrfs_put_ordered_extent() argument
564 trace_btrfs_ordered_extent_put(entry->inode, entry); btrfs_put_ordered_extent()
566 if (atomic_dec_and_test(&entry->refs)) { btrfs_put_ordered_extent()
567 ASSERT(list_empty(&entry->log_list)); btrfs_put_ordered_extent()
568 ASSERT(list_empty(&entry->trans_list)); btrfs_put_ordered_extent()
569 ASSERT(list_empty(&entry->root_extent_list)); btrfs_put_ordered_extent()
570 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); btrfs_put_ordered_extent()
571 if (entry->inode) btrfs_put_ordered_extent()
572 btrfs_add_delayed_iput(entry->inode); btrfs_put_ordered_extent()
573 while (!list_empty(&entry->list)) { btrfs_put_ordered_extent()
574 cur = entry->list.next; btrfs_put_ordered_extent()
579 kmem_cache_free(btrfs_ordered_extent_cache, entry); btrfs_put_ordered_extent()
588 struct btrfs_ordered_extent *entry) btrfs_remove_ordered_extent()
597 node = &entry->rb_node; btrfs_remove_ordered_extent()
602 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); btrfs_remove_ordered_extent()
603 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) btrfs_remove_ordered_extent()
635 list_del_init(&entry->root_extent_list); btrfs_remove_ordered_extent()
638 trace_btrfs_ordered_extent_remove(inode, entry); btrfs_remove_ordered_extent()
647 wake_up(&entry->wait); btrfs_remove_ordered_extent()
752 struct btrfs_ordered_extent *entry, btrfs_start_ordered_extent()
755 u64 start = entry->file_offset; btrfs_start_ordered_extent()
756 u64 end = start + entry->len - 1; btrfs_start_ordered_extent()
758 trace_btrfs_ordered_extent_start(inode, entry); btrfs_start_ordered_extent()
765 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) btrfs_start_ordered_extent()
768 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, btrfs_start_ordered_extent()
769 &entry->flags)); btrfs_start_ordered_extent()
842 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_ordered_extent() local
850 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_ordered_extent()
851 if (!offset_in_entry(entry, file_offset)) btrfs_lookup_ordered_extent()
852 entry = NULL; btrfs_lookup_ordered_extent()
853 if (entry) btrfs_lookup_ordered_extent()
854 atomic_inc(&entry->refs); btrfs_lookup_ordered_extent()
857 return entry; btrfs_lookup_ordered_extent()
869 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_ordered_range() local
881 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_ordered_range()
882 if (range_overlaps(entry, file_offset, len)) btrfs_lookup_ordered_range()
885 if (entry->file_offset >= file_offset + len) { btrfs_lookup_ordered_range()
886 entry = NULL; btrfs_lookup_ordered_range()
889 entry = NULL; btrfs_lookup_ordered_range()
895 if (entry) btrfs_lookup_ordered_range()
896 atomic_inc(&entry->refs); btrfs_lookup_ordered_range()
898 return entry; btrfs_lookup_ordered_range()
924 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_first_ordered_extent() local
932 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_first_ordered_extent()
933 atomic_inc(&entry->refs); btrfs_lookup_first_ordered_extent()
936 return entry; btrfs_lookup_first_ordered_extent()
1012 /* We treat this entry as if it doesnt exist */ btrfs_ordered_update_i_size()
277 btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum) btrfs_add_ordered_sum() argument
587 btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) btrfs_remove_ordered_extent() argument
751 btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait) btrfs_start_ordered_extent() argument
H A Dfree-space-cache.c521 struct btrfs_free_space_entry *entry; io_ctl_add_entry() local
526 entry = io_ctl->cur; io_ctl_add_entry()
527 entry->offset = cpu_to_le64(offset); io_ctl_add_entry()
528 entry->bytes = cpu_to_le64(bytes); io_ctl_add_entry()
529 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : io_ctl_add_entry()
589 struct btrfs_free_space *entry, u8 *type) io_ctl_read_entry()
601 entry->offset = le64_to_cpu(e->offset); io_ctl_read_entry()
602 entry->bytes = le64_to_cpu(e->bytes); io_ctl_read_entry()
616 struct btrfs_free_space *entry) io_ctl_read_bitmap()
624 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); io_ctl_read_bitmap()
1094 struct btrfs_free_space *entry = list_for_each_safe() local
1097 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); list_for_each_safe()
1100 list_del_init(&entry->list); list_for_each_safe()
1125 struct btrfs_free_space *entry = list_for_each_safe() local
1127 list_del_init(&entry->list); list_for_each_safe()
1452 * we could have a bitmap entry and an extent entry tree_insert_offset()
1454 * the extent entry to always be found first if we do a tree_insert_offset()
1458 * if we're inserting a bitmap and we find an entry at tree_insert_offset()
1459 * this offset, we want to go right, or after this entry tree_insert_offset()
1498 struct btrfs_free_space *entry, *prev = NULL; tree_search_offset() local
1500 /* find entry that is closest to the 'offset' */ tree_search_offset()
1503 entry = NULL; tree_search_offset()
1507 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1508 prev = entry; tree_search_offset()
1510 if (offset < entry->offset) tree_search_offset()
1512 else if (offset > entry->offset) tree_search_offset()
1519 if (!entry) tree_search_offset()
1521 if (entry->bitmap) tree_search_offset()
1522 return entry; tree_search_offset()
1525 * bitmap entry and extent entry may share same offset, tree_search_offset()
1526 * in that case, bitmap entry comes after extent entry. tree_search_offset()
1531 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1532 if (entry->offset != offset) tree_search_offset()
1535 WARN_ON(!entry->bitmap); tree_search_offset()
1536 return entry; tree_search_offset()
1537 } else if (entry) { tree_search_offset()
1538 if (entry->bitmap) { tree_search_offset()
1540 * if previous extent entry covers the offset, tree_search_offset()
1541 * we should return it instead of the bitmap entry tree_search_offset()
1543 n = rb_prev(&entry->offset_index); tree_search_offset()
1549 entry = prev; tree_search_offset()
1552 return entry; tree_search_offset()
1558 /* find last entry before the 'offset' */ tree_search_offset()
1559 entry = prev; tree_search_offset()
1560 if (entry->offset > offset) { tree_search_offset()
1561 n = rb_prev(&entry->offset_index); tree_search_offset()
1563 entry = rb_entry(n, struct btrfs_free_space, tree_search_offset()
1565 ASSERT(entry->offset <= offset); tree_search_offset()
1568 return entry; tree_search_offset()
1574 if (entry->bitmap) { tree_search_offset()
1575 n = rb_prev(&entry->offset_index); tree_search_offset()
1583 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) tree_search_offset()
1584 return entry; tree_search_offset()
1585 } else if (entry->offset + entry->bytes > offset) tree_search_offset()
1586 return entry; tree_search_offset()
1592 if (entry->bitmap) { tree_search_offset()
1593 if (entry->offset + BITS_PER_BITMAP * tree_search_offset()
1597 if (entry->offset + entry->bytes > offset) tree_search_offset()
1601 n = rb_next(&entry->offset_index); tree_search_offset()
1604 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1606 return entry; tree_search_offset()
1678 * we want the extent entry threshold to always be at most 1/2 the max recalculate_thresholds()
1789 struct btrfs_free_space *entry; find_free_space() local
1798 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); find_free_space()
1799 if (!entry) find_free_space()
1802 for (node = &entry->offset_index; node; node = rb_next(node)) { find_free_space()
1803 entry = rb_entry(node, struct btrfs_free_space, offset_index); find_free_space()
1804 if (entry->bytes < *bytes) { find_free_space()
1805 if (entry->bytes > *max_extent_size) find_free_space()
1806 *max_extent_size = entry->bytes; find_free_space()
1814 tmp = entry->offset - ctl->start + align - 1; find_free_space()
1817 align_off = tmp - entry->offset; find_free_space()
1820 tmp = entry->offset; find_free_space()
1823 if (entry->bytes < *bytes + align_off) { find_free_space()
1824 if (entry->bytes > *max_extent_size) find_free_space()
1825 *max_extent_size = entry->bytes; find_free_space()
1829 if (entry->bitmap) { find_free_space()
1832 ret = search_bitmap(ctl, entry, &tmp, &size, true); find_free_space()
1836 return entry; find_free_space()
1844 *bytes = entry->bytes - align_off; find_free_space()
1845 return entry; find_free_space()
1914 * no entry after this bitmap, but we still have bytes to remove_from_bitmap()
1924 * if the next entry isn't a bitmap we need to return to let the remove_from_bitmap()
2011 * entry. use_bitmap()
2050 struct btrfs_free_space *entry; insert_into_bitmap() local
2062 entry = rb_entry(node, struct btrfs_free_space, offset_index); insert_into_bitmap()
2063 if (!entry->bitmap) { insert_into_bitmap()
2068 if (entry->offset == offset_to_bitmap(ctl, offset)) { insert_into_bitmap()
2069 bytes_added = add_bytes_to_bitmap(ctl, entry, insert_into_bitmap()
2275 * entry, try to see if there's adjacent free space in bitmap entries, and if
2278 * because we attempt to satisfy them based on a single cache entry, and never
2280 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2344 * attempt to steal space from bitmaps if we're adding an extent entry. __btrfs_add_free_space()
2423 /* Not enough bytes in this entry to satisfy us */ btrfs_remove_free_space()
2465 "entry offset %llu, bytes %llu, bitmap %s", btrfs_dump_free_space()
2508 struct btrfs_free_space *entry; __btrfs_return_cluster_to_free_space() local
2523 entry = rb_entry(node, struct btrfs_free_space, offset_index); __btrfs_return_cluster_to_free_space()
2524 node = rb_next(&entry->offset_index); __btrfs_return_cluster_to_free_space()
2525 rb_erase(&entry->offset_index, &cluster->root); __btrfs_return_cluster_to_free_space()
2526 RB_CLEAR_NODE(&entry->offset_index); __btrfs_return_cluster_to_free_space()
2528 bitmap = (entry->bitmap != NULL); __btrfs_return_cluster_to_free_space()
2530 try_merge_free_space(ctl, entry, false); __btrfs_return_cluster_to_free_space()
2531 steal_from_bitmap(ctl, entry, false); __btrfs_return_cluster_to_free_space()
2534 entry->offset, &entry->offset_index, bitmap); __btrfs_return_cluster_to_free_space()
2597 struct btrfs_free_space *entry = NULL; btrfs_find_space_for_alloc() local
2604 entry = find_free_space(ctl, &offset, &bytes_search, btrfs_find_space_for_alloc()
2606 if (!entry) btrfs_find_space_for_alloc()
2610 if (entry->bitmap) { btrfs_find_space_for_alloc()
2611 bitmap_clear_bits(ctl, entry, offset, bytes); btrfs_find_space_for_alloc()
2612 if (!entry->bytes) btrfs_find_space_for_alloc()
2613 free_bitmap(ctl, entry); btrfs_find_space_for_alloc()
2615 unlink_free_space(ctl, entry); btrfs_find_space_for_alloc()
2616 align_gap_len = offset - entry->offset; btrfs_find_space_for_alloc()
2617 align_gap = entry->offset; btrfs_find_space_for_alloc()
2619 entry->offset = offset + bytes; btrfs_find_space_for_alloc()
2620 WARN_ON(entry->bytes < bytes + align_gap_len); btrfs_find_space_for_alloc()
2622 entry->bytes -= bytes + align_gap_len; btrfs_find_space_for_alloc()
2623 if (!entry->bytes) btrfs_find_space_for_alloc()
2624 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_find_space_for_alloc()
2626 link_free_space(ctl, entry); btrfs_find_space_for_alloc()
2681 struct btrfs_free_space *entry, btrfs_alloc_from_bitmap()
2694 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); btrfs_alloc_from_bitmap()
2702 __bitmap_clear_bits(ctl, entry, ret, bytes); btrfs_alloc_from_bitmap()
2717 struct btrfs_free_space *entry = NULL; btrfs_alloc_from_cluster() local
2732 entry = rb_entry(node, struct btrfs_free_space, offset_index); btrfs_alloc_from_cluster()
2734 if (entry->bytes < bytes && entry->bytes > *max_extent_size) btrfs_alloc_from_cluster()
2735 *max_extent_size = entry->bytes; btrfs_alloc_from_cluster()
2737 if (entry->bytes < bytes || btrfs_alloc_from_cluster()
2738 (!entry->bitmap && entry->offset < min_start)) { btrfs_alloc_from_cluster()
2739 node = rb_next(&entry->offset_index); btrfs_alloc_from_cluster()
2742 entry = rb_entry(node, struct btrfs_free_space, btrfs_alloc_from_cluster()
2747 if (entry->bitmap) { btrfs_alloc_from_cluster()
2749 cluster, entry, bytes, btrfs_alloc_from_cluster()
2753 node = rb_next(&entry->offset_index); btrfs_alloc_from_cluster()
2756 entry = rb_entry(node, struct btrfs_free_space, btrfs_alloc_from_cluster()
2762 ret = entry->offset; btrfs_alloc_from_cluster()
2764 entry->offset += bytes; btrfs_alloc_from_cluster()
2765 entry->bytes -= bytes; btrfs_alloc_from_cluster()
2768 if (entry->bytes == 0) btrfs_alloc_from_cluster()
2769 rb_erase(&entry->offset_index, &cluster->root); btrfs_alloc_from_cluster()
2781 if (entry->bytes == 0) { btrfs_alloc_from_cluster()
2783 if (entry->bitmap) { btrfs_alloc_from_cluster()
2784 kfree(entry->bitmap); btrfs_alloc_from_cluster()
2788 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_alloc_from_cluster()
2797 struct btrfs_free_space *entry, btrfs_bitmap_cluster()
2813 i = offset_to_bit(entry->offset, ctl->unit, btrfs_bitmap_cluster()
2814 max_t(u64, offset, entry->offset)); btrfs_bitmap_cluster()
2822 if (entry->max_extent_size && btrfs_bitmap_cluster()
2823 entry->max_extent_size < cont1_bytes) btrfs_bitmap_cluster()
2827 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { btrfs_bitmap_cluster()
2828 next_zero = find_next_zero_bit(entry->bitmap, btrfs_bitmap_cluster()
2842 entry->max_extent_size = (u64)max_bits * ctl->unit; btrfs_bitmap_cluster()
2861 cluster->window_start = start * ctl->unit + entry->offset; btrfs_bitmap_cluster()
2862 rb_erase(&entry->offset_index, &ctl->free_space_offset); btrfs_bitmap_cluster()
2863 ret = tree_insert_offset(&cluster->root, entry->offset, btrfs_bitmap_cluster()
2864 &entry->offset_index, 1); btrfs_bitmap_cluster()
2885 struct btrfs_free_space *entry = NULL; setup_cluster_no_bitmap() local
2892 entry = tree_search_offset(ctl, offset, 0, 1); setup_cluster_no_bitmap()
2893 if (!entry) setup_cluster_no_bitmap()
2898 * extent entry. setup_cluster_no_bitmap()
2900 while (entry->bitmap || entry->bytes < min_bytes) { setup_cluster_no_bitmap()
2901 if (entry->bitmap && list_empty(&entry->list)) setup_cluster_no_bitmap()
2902 list_add_tail(&entry->list, bitmaps); setup_cluster_no_bitmap()
2903 node = rb_next(&entry->offset_index); setup_cluster_no_bitmap()
2906 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2909 window_free = entry->bytes; setup_cluster_no_bitmap()
2910 max_extent = entry->bytes; setup_cluster_no_bitmap()
2911 first = entry; setup_cluster_no_bitmap()
2912 last = entry; setup_cluster_no_bitmap()
2914 for (node = rb_next(&entry->offset_index); node; setup_cluster_no_bitmap()
2915 node = rb_next(&entry->offset_index)) { setup_cluster_no_bitmap()
2916 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2918 if (entry->bitmap) { setup_cluster_no_bitmap()
2919 if (list_empty(&entry->list)) setup_cluster_no_bitmap()
2920 list_add_tail(&entry->list, bitmaps); setup_cluster_no_bitmap()
2924 if (entry->bytes < min_bytes) setup_cluster_no_bitmap()
2927 last = entry; setup_cluster_no_bitmap()
2928 window_free += entry->bytes; setup_cluster_no_bitmap()
2929 if (entry->bytes > max_extent) setup_cluster_no_bitmap()
2930 max_extent = entry->bytes; setup_cluster_no_bitmap()
2947 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2948 node = rb_next(&entry->offset_index); setup_cluster_no_bitmap()
2949 if (entry->bitmap || entry->bytes < min_bytes) setup_cluster_no_bitmap()
2952 rb_erase(&entry->offset_index, &ctl->free_space_offset); setup_cluster_no_bitmap()
2953 ret = tree_insert_offset(&cluster->root, entry->offset, setup_cluster_no_bitmap()
2954 &entry->offset_index, 0); setup_cluster_no_bitmap()
2955 total_size += entry->bytes; setup_cluster_no_bitmap()
2957 } while (node && entry != last); setup_cluster_no_bitmap()
2975 struct btrfs_free_space *entry = NULL; setup_cluster_bitmap() local
2987 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); setup_cluster_bitmap()
2989 if (!entry || entry->offset != bitmap_offset) { setup_cluster_bitmap()
2990 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); setup_cluster_bitmap()
2991 if (entry && list_empty(&entry->list)) setup_cluster_bitmap()
2992 list_add(&entry->list, bitmaps); setup_cluster_bitmap()
2995 list_for_each_entry(entry, bitmaps, list) { list_for_each_entry()
2996 if (entry->bytes < bytes) list_for_each_entry()
2998 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, list_for_each_entry()
3025 struct btrfs_free_space *entry, *tmp; btrfs_find_space_cluster() local
3078 list_for_each_entry_safe(entry, tmp, &bitmaps, list) btrfs_find_space_cluster()
3079 list_del_init(&entry->list); btrfs_find_space_cluster()
3160 struct btrfs_free_space *entry; trim_no_bitmap() local
3179 entry = tree_search_offset(ctl, start, 0, 1); trim_no_bitmap()
3180 if (!entry) { trim_no_bitmap()
3187 while (entry->bitmap) { trim_no_bitmap()
3188 node = rb_next(&entry->offset_index); trim_no_bitmap()
3194 entry = rb_entry(node, struct btrfs_free_space, trim_no_bitmap()
3198 if (entry->offset >= end) { trim_no_bitmap()
3204 extent_start = entry->offset; trim_no_bitmap()
3205 extent_bytes = entry->bytes; trim_no_bitmap()
3214 unlink_free_space(ctl, entry); trim_no_bitmap()
3215 kmem_cache_free(btrfs_free_space_cachep, entry); trim_no_bitmap()
3245 struct btrfs_free_space *entry; trim_bitmaps() local
3264 entry = tree_search_offset(ctl, offset, 1, 0); trim_bitmaps()
3265 if (!entry) { trim_bitmaps()
3273 ret2 = search_bitmap(ctl, entry, &start, &bytes, false); trim_bitmaps()
3288 bitmap_clear_bits(ctl, entry, start, bytes); trim_bitmaps()
3289 if (entry->bytes == 0) trim_bitmaps()
3290 free_bitmap(ctl, entry); trim_bitmaps()
3358 * We've left one free space entry and other tasks trimming btrfs_put_block_group_trimming()
3359 * this block group have left 1 entry each one. Free them. btrfs_put_block_group_trimming()
3400 struct btrfs_free_space *entry = NULL; btrfs_find_ino_for_alloc() local
3408 entry = rb_entry(rb_first(&ctl->free_space_offset), btrfs_find_ino_for_alloc()
3411 if (!entry->bitmap) { btrfs_find_ino_for_alloc()
3412 ino = entry->offset; btrfs_find_ino_for_alloc()
3414 unlink_free_space(ctl, entry); btrfs_find_ino_for_alloc()
3415 entry->offset++; btrfs_find_ino_for_alloc()
3416 entry->bytes--; btrfs_find_ino_for_alloc()
3417 if (!entry->bytes) btrfs_find_ino_for_alloc()
3418 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_find_ino_for_alloc()
3420 link_free_space(ctl, entry); btrfs_find_ino_for_alloc()
3426 ret = search_bitmap(ctl, entry, &offset, &count, true); btrfs_find_ino_for_alloc()
3431 bitmap_clear_bits(ctl, entry, offset, 1); btrfs_find_ino_for_alloc()
3432 if (entry->bytes == 0) btrfs_find_ino_for_alloc()
3433 free_bitmap(ctl, entry); btrfs_find_ino_for_alloc()
3557 * Use this if you need to make a bitmap or extent entry specifically, it
588 io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space *entry, u8 *type) io_ctl_read_entry() argument
615 io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space *entry) io_ctl_read_bitmap() argument
2679 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, struct btrfs_free_space *entry, u64 bytes, u64 min_start, u64 *max_extent_size) btrfs_alloc_from_bitmap() argument
2796 btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) btrfs_bitmap_cluster() argument
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_doorbell.c82 /* This is a one entry cache used to by the index allocation. */
99 struct dbell_entry *entry; vmci_dbell_get_priv_flags() local
107 entry = container_of(resource, struct dbell_entry, resource); vmci_dbell_get_priv_flags()
108 *priv_flags = entry->priv_flags; vmci_dbell_get_priv_flags()
124 * Find doorbell entry by bitmap index.
141 * Add the given entry to the index table. This willi take a reference to the
142 * entry's resource so that the entry is not deleted before it is removed from
145 static void dbell_index_table_add(struct dbell_entry *entry) dbell_index_table_add() argument
150 vmci_resource_get(&entry->resource); dbell_index_table_add()
194 entry->idx = new_notify_idx; dbell_index_table_add()
195 bucket = VMCI_DOORBELL_HASH(entry->idx); dbell_index_table_add()
196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); dbell_index_table_add()
202 * Remove the given entry from the index table. This will release() the
203 * entry's resource.
205 static void dbell_index_table_remove(struct dbell_entry *entry) dbell_index_table_remove() argument
209 hlist_del_init(&entry->node); dbell_index_table_remove()
212 if (entry->idx == max_notify_idx - 1) { dbell_index_table_remove()
214 * If we delete an entry with the maximum known dbell_index_table_remove()
225 last_notify_idx_released = entry->idx; dbell_index_table_remove()
229 vmci_resource_put(&entry->resource); dbell_index_table_remove()
290 struct dbell_entry *entry = container_of(work, dbell_delayed_dispatch() local
293 entry->notify_cb(entry->client_data); dbell_delayed_dispatch()
294 vmci_resource_put(&entry->resource); dbell_delayed_dispatch()
302 struct dbell_entry *entry; vmci_dbell_host_context_notify() local
319 entry = container_of(resource, struct dbell_entry, resource); vmci_dbell_host_context_notify()
320 if (entry->run_delayed) { vmci_dbell_host_context_notify()
321 schedule_work(&entry->work); vmci_dbell_host_context_notify()
323 entry->notify_cb(entry->client_data); vmci_dbell_host_context_notify()
417 struct dbell_entry *entry; vmci_doorbell_create() local
425 entry = kmalloc(sizeof(*entry), GFP_KERNEL); vmci_doorbell_create()
426 if (entry == NULL) { vmci_doorbell_create()
427 pr_warn("Failed allocating memory for datagram entry\n"); vmci_doorbell_create()
462 entry->idx = 0; vmci_doorbell_create()
463 INIT_HLIST_NODE(&entry->node); vmci_doorbell_create()
464 entry->priv_flags = priv_flags; vmci_doorbell_create()
465 INIT_WORK(&entry->work, dbell_delayed_dispatch); vmci_doorbell_create()
466 entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB; vmci_doorbell_create()
467 entry->notify_cb = notify_cb; vmci_doorbell_create()
468 entry->client_data = client_data; vmci_doorbell_create()
469 atomic_set(&entry->active, 0); vmci_doorbell_create()
471 result = vmci_resource_add(&entry->resource, vmci_doorbell_create()
480 new_handle = vmci_resource_handle(&entry->resource); vmci_doorbell_create()
482 dbell_index_table_add(entry); vmci_doorbell_create()
483 result = dbell_link(new_handle, entry->idx); vmci_doorbell_create()
487 atomic_set(&entry->active, 1); vmci_doorbell_create()
495 dbell_index_table_remove(entry); vmci_doorbell_create()
496 vmci_resource_remove(&entry->resource); vmci_doorbell_create()
498 kfree(entry); vmci_doorbell_create()
512 struct dbell_entry *entry; vmci_doorbell_destroy() local
526 entry = container_of(resource, struct dbell_entry, resource); vmci_doorbell_destroy()
531 dbell_index_table_remove(entry); vmci_doorbell_destroy()
558 vmci_resource_put(&entry->resource); vmci_doorbell_destroy()
559 vmci_resource_remove(&entry->resource); vmci_doorbell_destroy()
561 kfree(entry); vmci_doorbell_destroy()
H A Dvmci_queue_pair.c888 * Finds the entry in the list corresponding to a given handle. Assumes
894 struct qp_entry *entry; qp_list_find() local
899 list_for_each_entry(entry, &qp_list->head, list_item) { qp_list_find()
900 if (vmci_handle_is_equal(entry->handle, handle)) qp_list_find()
901 return entry; qp_list_find()
908 * Finds the entry in the list corresponding to a given handle.
913 struct qp_guest_endpoint *entry; qp_guest_handle_to_entry() local
916 entry = qp ? container_of( qp_guest_handle_to_entry()
918 return entry; qp_guest_handle_to_entry()
922 * Finds the entry in the list corresponding to a given handle.
927 struct qp_broker_entry *entry; qp_broker_handle_to_entry() local
930 entry = qp ? container_of( qp_broker_handle_to_entry()
932 return entry; qp_broker_handle_to_entry()
958 * Allocates a queue_pair rid (and handle) iff the given entry has
973 struct qp_guest_endpoint *entry; qp_guest_endpoint_create() local
984 entry = kzalloc(sizeof(*entry), GFP_KERNEL); qp_guest_endpoint_create()
985 if (entry) { qp_guest_endpoint_create()
986 entry->qp.peer = peer; qp_guest_endpoint_create()
987 entry->qp.flags = flags; qp_guest_endpoint_create()
988 entry->qp.produce_size = produce_size; qp_guest_endpoint_create()
989 entry->qp.consume_size = consume_size; qp_guest_endpoint_create()
990 entry->qp.ref_count = 0; qp_guest_endpoint_create()
991 entry->num_ppns = num_ppns; qp_guest_endpoint_create()
992 entry->produce_q = produce_q; qp_guest_endpoint_create()
993 entry->consume_q = consume_q; qp_guest_endpoint_create()
994 INIT_LIST_HEAD(&entry->qp.list_item); qp_guest_endpoint_create()
997 result = vmci_resource_add(&entry->resource, qp_guest_endpoint_create()
1000 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_guest_endpoint_create()
1002 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { qp_guest_endpoint_create()
1005 kfree(entry); qp_guest_endpoint_create()
1006 entry = NULL; qp_guest_endpoint_create()
1009 return entry; qp_guest_endpoint_create()
1015 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) qp_guest_endpoint_destroy() argument
1017 qp_free_ppn_set(&entry->ppn_set); qp_guest_endpoint_destroy()
1018 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); qp_guest_endpoint_destroy()
1019 qp_free_queue(entry->produce_q, entry->qp.produce_size); qp_guest_endpoint_destroy()
1020 qp_free_queue(entry->consume_q, entry->qp.consume_size); qp_guest_endpoint_destroy()
1022 vmci_resource_remove(&entry->resource); qp_guest_endpoint_destroy()
1024 kfree(entry); qp_guest_endpoint_destroy()
1031 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) qp_alloc_hypercall() argument
1037 if (!entry || entry->num_ppns <= 2) qp_alloc_hypercall()
1041 (size_t) entry->num_ppns * sizeof(u32); qp_alloc_hypercall()
1050 alloc_msg->handle = entry->qp.handle; qp_alloc_hypercall()
1051 alloc_msg->peer = entry->qp.peer; qp_alloc_hypercall()
1052 alloc_msg->flags = entry->qp.flags; qp_alloc_hypercall()
1053 alloc_msg->produce_size = entry->qp.produce_size; qp_alloc_hypercall()
1054 alloc_msg->consume_size = entry->qp.consume_size; qp_alloc_hypercall()
1055 alloc_msg->num_ppns = entry->num_ppns; qp_alloc_hypercall()
1058 &entry->ppn_set); qp_alloc_hypercall()
1085 * Adds the given entry to the list. Assumes that the list is locked.
1087 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) qp_list_add_entry() argument
1089 if (entry) qp_list_add_entry()
1090 list_add(&entry->list_item, &qp_list->head); qp_list_add_entry()
1094 * Removes the given entry from the list. Assumes that the list is locked.
1097 struct qp_entry *entry) qp_list_remove_entry()
1099 if (entry) qp_list_remove_entry()
1100 list_del(&entry->list_item); qp_list_remove_entry()
1110 struct qp_guest_endpoint *entry; qp_detatch_guest_work() local
1115 entry = qp_guest_handle_to_entry(handle); qp_detatch_guest_work()
1116 if (!entry) { qp_detatch_guest_work()
1121 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_detatch_guest_work()
1124 if (entry->qp.ref_count > 1) { qp_detatch_guest_work()
1129 * to release the entry if that happens, so qp_detatch_guest_work()
1140 * release the entry yet. It will get cleaned qp_detatch_guest_work()
1153 * we succeeded in all cases. Release the entry if required. qp_detatch_guest_work()
1156 entry->qp.ref_count--; qp_detatch_guest_work()
1157 if (entry->qp.ref_count == 0) qp_detatch_guest_work()
1158 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); qp_detatch_guest_work()
1160 /* If we didn't remove the entry, this could change once we unlock. */ qp_detatch_guest_work()
1161 if (entry) qp_detatch_guest_work()
1162 ref_count = entry->qp.ref_count; qp_detatch_guest_work()
1167 qp_guest_endpoint_destroy(entry); qp_detatch_guest_work()
1343 /* This path should only be used when an existing entry was found. */ qp_alloc_guest_work()
1377 struct qp_broker_entry *entry = NULL; qp_broker_create() local
1405 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); qp_broker_create()
1406 if (!entry) qp_broker_create()
1411 * The queue pair broker entry stores values from the guest qp_broker_create()
1425 entry->qp.handle = handle; qp_broker_create()
1426 entry->qp.peer = peer; qp_broker_create()
1427 entry->qp.flags = flags; qp_broker_create()
1428 entry->qp.produce_size = guest_produce_size; qp_broker_create()
1429 entry->qp.consume_size = guest_consume_size; qp_broker_create()
1430 entry->qp.ref_count = 1; qp_broker_create()
1431 entry->create_id = context_id; qp_broker_create()
1432 entry->attach_id = VMCI_INVALID_ID; qp_broker_create()
1433 entry->state = VMCIQPB_NEW; qp_broker_create()
1434 entry->require_trusted_attach = qp_broker_create()
1436 entry->created_by_trusted = qp_broker_create()
1438 entry->vmci_page_files = false; qp_broker_create()
1439 entry->wakeup_cb = wakeup_cb; qp_broker_create()
1440 entry->client_data = client_data; qp_broker_create()
1441 entry->produce_q = qp_host_alloc_queue(guest_produce_size); qp_broker_create()
1442 if (entry->produce_q == NULL) { qp_broker_create()
1446 entry->consume_q = qp_host_alloc_queue(guest_consume_size); qp_broker_create()
1447 if (entry->consume_q == NULL) { qp_broker_create()
1452 qp_init_queue_mutex(entry->produce_q, entry->consume_q); qp_broker_create()
1454 INIT_LIST_HEAD(&entry->qp.list_item); qp_broker_create()
1459 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), qp_broker_create()
1461 if (entry->local_mem == NULL) { qp_broker_create()
1465 entry->state = VMCIQPB_CREATED_MEM; qp_broker_create()
1466 entry->produce_q->q_header = entry->local_mem; qp_broker_create()
1467 tmp = (u8 *)entry->local_mem + PAGE_SIZE * qp_broker_create()
1468 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); qp_broker_create()
1469 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; qp_broker_create()
1476 entry->produce_q, qp_broker_create()
1477 entry->consume_q); qp_broker_create()
1481 entry->state = VMCIQPB_CREATED_MEM; qp_broker_create()
1490 entry->state = VMCIQPB_CREATED_NO_MEM; qp_broker_create()
1493 qp_list_add_entry(&qp_broker_list, &entry->qp); qp_broker_create()
1495 *ent = entry; qp_broker_create()
1498 result = vmci_resource_add(&entry->resource, qp_broker_create()
1507 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_broker_create()
1509 vmci_q_header_init(entry->produce_q->q_header, qp_broker_create()
1510 entry->qp.handle); qp_broker_create()
1511 vmci_q_header_init(entry->consume_q->q_header, qp_broker_create()
1512 entry->qp.handle); qp_broker_create()
1515 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_create()
1520 if (entry != NULL) { qp_broker_create()
1521 qp_host_free_queue(entry->produce_q, guest_produce_size); qp_broker_create()
1522 qp_host_free_queue(entry->consume_q, guest_consume_size); qp_broker_create()
1523 kfree(entry); qp_broker_create()
1594 static int qp_broker_attach(struct qp_broker_entry *entry, qp_broker_attach() argument
1610 if (entry->state != VMCIQPB_CREATED_NO_MEM && qp_broker_attach()
1611 entry->state != VMCIQPB_CREATED_MEM) qp_broker_attach()
1615 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || qp_broker_attach()
1616 context_id != entry->create_id) { qp_broker_attach()
1619 } else if (context_id == entry->create_id || qp_broker_attach()
1620 context_id == entry->attach_id) { qp_broker_attach()
1625 VMCI_CONTEXT_IS_VM(entry->create_id)) qp_broker_attach()
1633 !entry->created_by_trusted) qp_broker_attach()
1640 if (entry->require_trusted_attach && qp_broker_attach()
1648 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) qp_broker_attach()
1651 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { qp_broker_attach()
1669 create_context = vmci_ctx_get(entry->create_id); qp_broker_attach()
1677 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) qp_broker_attach()
1682 * The queue pair broker entry stores values from the guest qp_broker_attach()
1684 * stored in the entry. qp_broker_attach()
1687 if (entry->qp.produce_size != produce_size || qp_broker_attach()
1688 entry->qp.consume_size != consume_size) { qp_broker_attach()
1691 } else if (entry->qp.produce_size != consume_size || qp_broker_attach()
1692 entry->qp.consume_size != produce_size) { qp_broker_attach()
1710 if (entry->state != VMCIQPB_CREATED_NO_MEM) qp_broker_attach()
1722 entry->produce_q, qp_broker_attach()
1723 entry->consume_q); qp_broker_attach()
1727 entry->state = VMCIQPB_ATTACHED_MEM; qp_broker_attach()
1729 entry->state = VMCIQPB_ATTACHED_NO_MEM; qp_broker_attach()
1731 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { qp_broker_attach()
1742 entry->state = VMCIQPB_ATTACHED_MEM; qp_broker_attach()
1745 if (entry->state == VMCIQPB_ATTACHED_MEM) { qp_broker_attach()
1747 qp_notify_peer(true, entry->qp.handle, context_id, qp_broker_attach()
1748 entry->create_id); qp_broker_attach()
1751 entry->create_id, entry->qp.handle.context, qp_broker_attach()
1752 entry->qp.handle.resource); qp_broker_attach()
1755 entry->attach_id = context_id; qp_broker_attach()
1756 entry->qp.ref_count++; qp_broker_attach()
1758 entry->wakeup_cb = wakeup_cb; qp_broker_attach()
1759 entry->client_data = client_data; qp_broker_attach()
1764 * an entry tracking the queue pair, so don't add another one. qp_broker_attach()
1767 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_attach()
1770 *ent = entry; qp_broker_attach()
1794 struct qp_broker_entry *entry = NULL; qp_broker_alloc() local
1824 entry = qp_broker_handle_to_entry(handle); qp_broker_alloc()
1826 if (!entry) { qp_broker_alloc()
1835 qp_broker_attach(entry, peer, flags, priv_flags, qp_broker_alloc()
1866 struct qp_broker_entry *entry; qp_alloc_host_work() local
1877 entry = NULL; qp_alloc_host_work()
1881 wakeup_cb, client_data, &entry, &swap); qp_alloc_host_work()
1890 *produce_q = entry->consume_q; qp_alloc_host_work()
1891 *consume_q = entry->produce_q; qp_alloc_host_work()
1893 *produce_q = entry->produce_q; qp_alloc_host_work()
1894 *consume_q = entry->consume_q; qp_alloc_host_work()
1897 *handle = vmci_resource_handle(&entry->resource); qp_alloc_host_work()
1974 * Returns the entry from the head of the list. Assumes that the list is
1980 struct qp_entry *entry = qp_list_get_head() local
1983 return entry; qp_list_get_head()
1991 struct qp_entry *entry; vmci_qp_broker_exit() local
1996 while ((entry = qp_list_get_head(&qp_broker_list))) { vmci_qp_broker_exit()
1997 be = (struct qp_broker_entry *)entry; vmci_qp_broker_exit()
1999 qp_list_remove_entry(&qp_broker_list, entry); vmci_qp_broker_exit()
2008 * pair broker. Allocates a queue pair entry if one does not
2048 struct qp_broker_entry *entry; vmci_qp_broker_set_page_store() local
2073 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_set_page_store()
2074 if (!entry) { vmci_qp_broker_set_page_store()
2085 if (entry->create_id != context_id && vmci_qp_broker_set_page_store()
2086 (entry->create_id != VMCI_HOST_CONTEXT_ID || vmci_qp_broker_set_page_store()
2087 entry->attach_id != context_id)) { vmci_qp_broker_set_page_store()
2092 if (entry->state != VMCIQPB_CREATED_NO_MEM && vmci_qp_broker_set_page_store()
2093 entry->state != VMCIQPB_ATTACHED_NO_MEM) { vmci_qp_broker_set_page_store()
2099 entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2103 result = qp_host_map_queues(entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2105 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_set_page_store()
2106 entry->consume_q); vmci_qp_broker_set_page_store()
2110 if (entry->state == VMCIQPB_CREATED_NO_MEM) vmci_qp_broker_set_page_store()
2111 entry->state = VMCIQPB_CREATED_MEM; vmci_qp_broker_set_page_store()
2113 entry->state = VMCIQPB_ATTACHED_MEM; vmci_qp_broker_set_page_store()
2115 entry->vmci_page_files = true; vmci_qp_broker_set_page_store()
2117 if (entry->state == VMCIQPB_ATTACHED_MEM) { vmci_qp_broker_set_page_store()
2119 qp_notify_peer(true, handle, context_id, entry->create_id); vmci_qp_broker_set_page_store()
2122 entry->create_id, entry->qp.handle.context, vmci_qp_broker_set_page_store()
2123 entry->qp.handle.resource); vmci_qp_broker_set_page_store()
2135 * entry. Should be used when guest memory becomes available
2138 static void qp_reset_saved_headers(struct qp_broker_entry *entry) qp_reset_saved_headers() argument
2140 entry->produce_q->saved_header = NULL; qp_reset_saved_headers()
2141 entry->consume_q->saved_header = NULL; qp_reset_saved_headers()
2145 * The main entry point for detaching from a queue pair registered with the
2164 struct qp_broker_entry *entry; vmci_qp_broker_detach() local
2184 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_detach()
2185 if (!entry) { vmci_qp_broker_detach()
2192 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_detach()
2197 if (context_id == entry->create_id) { vmci_qp_broker_detach()
2198 peer_id = entry->attach_id; vmci_qp_broker_detach()
2199 entry->create_id = VMCI_INVALID_ID; vmci_qp_broker_detach()
2201 peer_id = entry->create_id; vmci_qp_broker_detach()
2202 entry->attach_id = VMCI_INVALID_ID; vmci_qp_broker_detach()
2204 entry->qp.ref_count--; vmci_qp_broker_detach()
2206 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_detach()
2219 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_detach()
2220 headers_mapped = entry->produce_q->q_header || vmci_qp_broker_detach()
2221 entry->consume_q->q_header; vmci_qp_broker_detach()
2222 if (QPBROKERSTATE_HAS_MEM(entry)) { vmci_qp_broker_detach()
2225 entry->produce_q, vmci_qp_broker_detach()
2226 entry->consume_q); vmci_qp_broker_detach()
2232 if (entry->vmci_page_files) vmci_qp_broker_detach()
2233 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_detach()
2234 entry-> vmci_qp_broker_detach()
2237 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_detach()
2238 entry-> vmci_qp_broker_detach()
2244 qp_reset_saved_headers(entry); vmci_qp_broker_detach()
2246 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_detach()
2248 if (!headers_mapped && entry->wakeup_cb) vmci_qp_broker_detach()
2249 entry->wakeup_cb(entry->client_data); vmci_qp_broker_detach()
2252 if (entry->wakeup_cb) { vmci_qp_broker_detach()
2253 entry->wakeup_cb = NULL; vmci_qp_broker_detach()
2254 entry->client_data = NULL; vmci_qp_broker_detach()
2258 if (entry->qp.ref_count == 0) { vmci_qp_broker_detach()
2259 qp_list_remove_entry(&qp_broker_list, &entry->qp); vmci_qp_broker_detach()
2262 kfree(entry->local_mem); vmci_qp_broker_detach()
2264 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); vmci_qp_broker_detach()
2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); vmci_qp_broker_detach()
2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); vmci_qp_broker_detach()
2268 vmci_resource_remove(&entry->resource); vmci_qp_broker_detach()
2270 kfree(entry); vmci_qp_broker_detach()
2276 QPBROKERSTATE_HAS_MEM(entry)) { vmci_qp_broker_detach()
2277 entry->state = VMCIQPB_SHUTDOWN_MEM; vmci_qp_broker_detach()
2279 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; vmci_qp_broker_detach()
2302 struct qp_broker_entry *entry; vmci_qp_broker_map() local
2320 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_map()
2321 if (!entry) { vmci_qp_broker_map()
2328 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_map()
2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_map()
2340 page_store.len = QPE_NUM_PAGES(entry->qp); vmci_qp_broker_map()
2342 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_map()
2343 qp_reset_saved_headers(entry); vmci_qp_broker_map()
2346 entry->produce_q, vmci_qp_broker_map()
2347 entry->consume_q); vmci_qp_broker_map()
2348 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_map()
2352 entry->state++; vmci_qp_broker_map()
2354 if (entry->wakeup_cb) vmci_qp_broker_map()
2355 entry->wakeup_cb(entry->client_data); vmci_qp_broker_map()
2366 * entry. Should be used when guest memory is unmapped.
2371 static int qp_save_headers(struct qp_broker_entry *entry) qp_save_headers() argument
2375 if (entry->produce_q->saved_header != NULL && qp_save_headers()
2376 entry->consume_q->saved_header != NULL) { qp_save_headers()
2386 if (NULL == entry->produce_q->q_header || qp_save_headers()
2387 NULL == entry->consume_q->q_header) { qp_save_headers()
2388 result = qp_host_map_queues(entry->produce_q, entry->consume_q); qp_save_headers()
2393 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, qp_save_headers()
2394 sizeof(entry->saved_produce_q)); qp_save_headers()
2395 entry->produce_q->saved_header = &entry->saved_produce_q; qp_save_headers()
2396 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, qp_save_headers()
2397 sizeof(entry->saved_consume_q)); qp_save_headers()
2398 entry->consume_q->saved_header = &entry->saved_consume_q; qp_save_headers()
2413 struct qp_broker_entry *entry; vmci_qp_broker_unmap() local
2431 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_unmap()
2432 if (!entry) { vmci_qp_broker_unmap()
2439 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_unmap()
2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_unmap()
2447 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_unmap()
2448 result = qp_save_headers(entry); vmci_qp_broker_unmap()
2453 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); vmci_qp_broker_unmap()
2462 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_unmap()
2463 entry->consume_q); vmci_qp_broker_unmap()
2468 entry->state--; vmci_qp_broker_unmap()
2470 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_unmap()
2488 struct qp_entry *entry; vmci_qp_guest_endpoints_exit() local
2493 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { vmci_qp_guest_endpoints_exit()
2494 ep = (struct qp_guest_endpoint *)entry; vmci_qp_guest_endpoints_exit()
2497 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) vmci_qp_guest_endpoints_exit()
2498 qp_detatch_hypercall(entry->handle); vmci_qp_guest_endpoints_exit()
2501 entry->ref_count = 0; vmci_qp_guest_endpoints_exit()
2502 qp_list_remove_entry(&qp_guest_endpoints, entry); vmci_qp_guest_endpoints_exit()
2875 * if it does so, it will cleanup the entry (if there is one). vmci_qpair_detach()
2876 * The host can fail too, but it won't cleanup the entry vmci_qpair_detach()
1096 qp_list_remove_entry(struct qp_list *qp_list, struct qp_entry *entry) qp_list_remove_entry() argument
/linux-4.4.14/arch/x86/xen/
H A Dsmp.h12 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
14 static inline void xen_pvh_early_cpu_init(int cpu, bool entry) xen_pvh_early_cpu_init() argument
/linux-4.4.14/sound/core/seq/
H A Dseq_info.c39 struct snd_info_entry *entry; create_info_entry() local
41 entry = snd_info_create_module_entry(THIS_MODULE, name, snd_seq_root); create_info_entry()
42 if (entry == NULL) create_info_entry()
44 entry->content = SNDRV_INFO_CONTENT_TEXT; create_info_entry()
45 entry->c.text.read = read; create_info_entry()
46 if (snd_info_register(entry) < 0) { create_info_entry()
47 snd_info_free_entry(entry); create_info_entry()
50 return entry; create_info_entry()
/linux-4.4.14/arch/mips/kernel/
H A Dperf_event.c28 static void save_raw_perf_callchain(struct perf_callchain_entry *entry, save_raw_perf_callchain() argument
37 perf_callchain_store(entry, addr); save_raw_perf_callchain()
38 if (entry->nr >= PERF_MAX_STACK_DEPTH) save_raw_perf_callchain()
44 void perf_callchain_kernel(struct perf_callchain_entry *entry, perf_callchain_kernel() argument
57 save_raw_perf_callchain(entry, sp); perf_callchain_kernel()
61 perf_callchain_store(entry, pc); perf_callchain_kernel()
62 if (entry->nr >= PERF_MAX_STACK_DEPTH) perf_callchain_kernel()
67 save_raw_perf_callchain(entry, sp); perf_callchain_kernel()
/linux-4.4.14/arch/cris/include/arch-v32/arch/
H A Dtlb.h5 * The TLB is a 64-entry cache. Each entry has a 8-bit page_id that is used
/linux-4.4.14/sound/firewire/digi00x/
H A Ddigi00x-proc.c26 static void proc_read_clock(struct snd_info_entry *entry, proc_read_clock() argument
39 struct snd_dg00x *dg00x = entry->private_data; proc_read_clock()
71 struct snd_info_entry *root, *entry; snd_dg00x_proc_init() local
88 entry = snd_info_create_card_entry(dg00x->card, "clock", root); snd_dg00x_proc_init()
89 if (entry == NULL) { snd_dg00x_proc_init()
94 snd_info_set_text_ops(entry, dg00x, proc_read_clock); snd_dg00x_proc_init()
95 if (snd_info_register(entry) < 0) { snd_dg00x_proc_init()
96 snd_info_free_entry(entry); snd_dg00x_proc_init()
/linux-4.4.14/sound/firewire/oxfw/
H A Doxfw-proc.c11 static void proc_read_formation(struct snd_info_entry *entry, proc_read_formation() argument
14 struct snd_oxfw *oxfw = entry->private_data; proc_read_formation()
83 struct snd_info_entry *entry; add_node() local
85 entry = snd_info_create_card_entry(oxfw->card, name, root); add_node()
86 if (entry == NULL) add_node()
89 snd_info_set_text_ops(entry, oxfw, op); add_node()
90 if (snd_info_register(entry) < 0) add_node()
91 snd_info_free_entry(entry); add_node()
/linux-4.4.14/sound/firewire/tascam/
H A Dtascam-proc.c11 static void proc_read_firmware(struct snd_info_entry *entry, proc_read_firmware() argument
14 struct snd_tscm *tscm = entry->private_data; proc_read_firmware()
58 struct snd_info_entry *entry; add_node() local
60 entry = snd_info_create_card_entry(tscm->card, name, root); add_node()
61 if (entry == NULL) add_node()
64 snd_info_set_text_ops(entry, tscm, op); add_node()
65 if (snd_info_register(entry) < 0) add_node()
66 snd_info_free_entry(entry); add_node()
/linux-4.4.14/drivers/pci/
H A Dmsi.c110 struct msi_desc *entry; arch_setup_msi_irqs() local
122 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
123 ret = arch_setup_msi_irq(dev, entry); for_each_pci_msi_entry()
140 struct msi_desc *entry; default_teardown_msi_irqs() local
142 for_each_pci_msi_entry(entry, dev) default_teardown_msi_irqs()
143 if (entry->irq) default_teardown_msi_irqs()
144 for (i = 0; i < entry->nvec_used; i++) default_teardown_msi_irqs()
145 arch_teardown_msi_irq(entry->irq + i); default_teardown_msi_irqs()
155 struct msi_desc *entry; default_restore_msi_irq() local
157 entry = NULL; default_restore_msi_irq()
159 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
160 if (irq == entry->irq) for_each_pci_msi_entry()
164 entry = irq_get_msi_desc(irq);
167 if (entry)
168 __pci_write_msi_msg(entry, &entry->msg);
272 struct msi_desc *entry; default_restore_msi_irqs() local
274 for_each_pci_msi_entry(entry, dev) default_restore_msi_irqs()
275 default_restore_msi_irq(dev, entry->irq); default_restore_msi_irqs()
278 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) __pci_read_msi_msg() argument
280 struct pci_dev *dev = msi_desc_to_pci_dev(entry); __pci_read_msi_msg()
284 if (entry->msi_attrib.is_msix) { __pci_read_msi_msg()
285 void __iomem *base = entry->mask_base + __pci_read_msi_msg()
286 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; __pci_read_msi_msg()
297 if (entry->msi_attrib.is_64) { __pci_read_msi_msg()
309 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) __pci_write_msi_msg() argument
311 struct pci_dev *dev = msi_desc_to_pci_dev(entry); __pci_write_msi_msg()
315 } else if (entry->msi_attrib.is_msix) { __pci_write_msi_msg()
317 base = entry->mask_base + __pci_write_msi_msg()
318 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; __pci_write_msi_msg()
329 msgctl |= entry->msi_attrib.multiple << 4; __pci_write_msi_msg()
334 if (entry->msi_attrib.is_64) { __pci_write_msi_msg()
344 entry->msg = *msg; __pci_write_msi_msg()
349 struct msi_desc *entry = irq_get_msi_desc(irq); pci_write_msi_msg() local
351 __pci_write_msi_msg(entry, msg); pci_write_msi_msg()
358 struct msi_desc *entry, *tmp; free_msi_irqs() local
363 for_each_pci_msi_entry(entry, dev) free_msi_irqs()
364 if (entry->irq) free_msi_irqs()
365 for (i = 0; i < entry->nvec_used; i++) free_msi_irqs()
366 BUG_ON(irq_has_action(entry->irq + i)); free_msi_irqs()
370 list_for_each_entry_safe(entry, tmp, msi_list, list) { free_msi_irqs()
371 if (entry->msi_attrib.is_msix) { free_msi_irqs()
372 if (list_is_last(&entry->list, msi_list)) free_msi_irqs()
373 iounmap(entry->mask_base); free_msi_irqs()
376 list_del(&entry->list); free_msi_irqs()
377 kfree(entry); free_msi_irqs()
406 struct msi_desc *entry; __pci_restore_msi_state() local
411 entry = irq_get_msi_desc(dev->irq); __pci_restore_msi_state()
418 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), __pci_restore_msi_state()
419 entry->masked); __pci_restore_msi_state()
421 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; __pci_restore_msi_state()
427 struct msi_desc *entry; __pci_restore_msix_state() local
439 for_each_pci_msi_entry(entry, dev) __pci_restore_msix_state()
440 msix_mask_irq(entry, entry->masked); __pci_restore_msix_state()
455 struct msi_desc *entry; msi_mode_show() local
463 entry = irq_get_msi_desc(irq); msi_mode_show()
464 if (entry) msi_mode_show()
466 entry->msi_attrib.is_msix ? "msix" : "msi"); msi_mode_show()
478 struct msi_desc *entry; populate_msi_sysfs() local
485 for_each_pci_msi_entry(entry, pdev) populate_msi_sysfs()
486 num_msi += entry->nvec_used; populate_msi_sysfs()
494 for_each_pci_msi_entry(entry, pdev) { for_each_pci_msi_entry()
495 for (i = 0; i < entry->nvec_used; i++) { for_each_pci_msi_entry()
503 entry->irq + i); for_each_pci_msi_entry()
551 struct msi_desc *entry; msi_setup_entry() local
554 entry = alloc_msi_entry(&dev->dev); msi_setup_entry()
555 if (!entry) msi_setup_entry()
560 entry->msi_attrib.is_msix = 0; msi_setup_entry()
561 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); msi_setup_entry()
562 entry->msi_attrib.entry_nr = 0; msi_setup_entry()
563 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); msi_setup_entry()
564 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ msi_setup_entry()
565 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; msi_setup_entry()
566 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); msi_setup_entry()
567 entry->nvec_used = nvec; msi_setup_entry()
570 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; msi_setup_entry()
572 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; msi_setup_entry()
575 if (entry->msi_attrib.maskbit) msi_setup_entry()
576 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); msi_setup_entry()
578 return entry; msi_setup_entry()
583 struct msi_desc *entry; msi_verify_entries() local
585 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
586 if (!dev->no_64bit_msi || !entry->msg.address_hi) for_each_pci_msi_entry()
602 * setup of an entry with the new MSI irq. A negative return value indicates
608 struct msi_desc *entry; msi_capability_init() local
614 entry = msi_setup_entry(dev, nvec); msi_capability_init()
615 if (!entry) msi_capability_init()
619 mask = msi_mask(entry->msi_attrib.multi_cap); msi_capability_init()
620 msi_mask_irq(entry, mask, mask); msi_capability_init()
622 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); msi_capability_init()
627 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
634 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
641 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
652 dev->irq = entry->irq; msi_capability_init()
679 struct msi_desc *entry; msix_setup_entries() local
683 entry = alloc_msi_entry(&dev->dev); msix_setup_entries()
684 if (!entry) { msix_setup_entries()
693 entry->msi_attrib.is_msix = 1; msix_setup_entries()
694 entry->msi_attrib.is_64 = 1; msix_setup_entries()
695 entry->msi_attrib.entry_nr = entries[i].entry; msix_setup_entries()
696 entry->msi_attrib.default_irq = dev->irq; msix_setup_entries()
697 entry->mask_base = base; msix_setup_entries()
698 entry->nvec_used = 1; msix_setup_entries()
700 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); msix_setup_entries()
709 struct msi_desc *entry; msix_program_entries() local
712 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
713 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + for_each_pci_msi_entry()
716 entries[i].vector = entry->irq; for_each_pci_msi_entry()
717 entry->masked = readl(entry->mask_base + offset); for_each_pci_msi_entry()
718 msix_mask_irq(entry, 1); for_each_pci_msi_entry()
790 struct msi_desc *entry; msix_capability_init() local
793 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
794 if (entry->irq != 0) for_each_pci_msi_entry()
963 if (entries[i].entry >= nr_entries) pci_enable_msix()
964 return -EINVAL; /* invalid entry */ pci_enable_msix()
966 if (entries[i].entry == entries[j].entry) pci_enable_msix()
967 return -EINVAL; /* duplicate entry */ pci_enable_msix()
983 struct msi_desc *entry; pci_msix_shutdown() local
989 for_each_pci_msi_entry(entry, dev) { for_each_pci_msi_entry()
991 __pci_msix_desc_mask_irq(entry, 1); for_each_pci_msi_entry()
/linux-4.4.14/arch/arm/xen/
H A Dp2m.c35 struct xen_p2m_entry *entry; xen_add_phys_to_mach_entry() local
40 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); xen_add_phys_to_mach_entry()
42 if (new->pfn == entry->pfn) xen_add_phys_to_mach_entry()
45 if (new->pfn < entry->pfn) xen_add_phys_to_mach_entry()
57 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); xen_add_phys_to_mach_entry()
65 struct xen_p2m_entry *entry; __pfn_to_mfn() local
70 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); __pfn_to_mfn()
71 if (entry->pfn <= pfn && __pfn_to_mfn()
72 entry->pfn + entry->nr_pages > pfn) { __pfn_to_mfn()
74 return entry->mfn + (pfn - entry->pfn); __pfn_to_mfn()
76 if (pfn < entry->pfn) __pfn_to_mfn()
/linux-4.4.14/mm/
H A Dzswap.c70 /* Store failed because the entry metadata could not be allocated (rare) */
131 * rbnode - links the entry into red-black tree for the appropriate swap type
132 * offset - the swap offset for the entry. Index into the red-black tree.
133 * refcount - the number of outstanding reference to the entry. This is needed
134 * to protect against premature freeing of the entry by code
136 * for the zswap_tree structure that contains the entry must
141 * pool - the zswap_pool the entry's data is in
160 * - the refcount field of each entry in the tree
217 * zswap entry functions
234 struct zswap_entry *entry; zswap_entry_cache_alloc() local
235 entry = kmem_cache_alloc(zswap_entry_cache, gfp); zswap_entry_cache_alloc()
236 if (!entry) zswap_entry_cache_alloc()
238 entry->refcount = 1; zswap_entry_cache_alloc()
239 RB_CLEAR_NODE(&entry->rbnode); zswap_entry_cache_alloc()
240 return entry; zswap_entry_cache_alloc()
243 static void zswap_entry_cache_free(struct zswap_entry *entry) zswap_entry_cache_free() argument
245 kmem_cache_free(zswap_entry_cache, entry); zswap_entry_cache_free()
254 struct zswap_entry *entry; zswap_rb_search() local
257 entry = rb_entry(node, struct zswap_entry, rbnode); zswap_rb_search()
258 if (entry->offset > offset) zswap_rb_search()
260 else if (entry->offset < offset) zswap_rb_search()
263 return entry; zswap_rb_search()
269 * In the case that a entry with the same offset is found, a pointer to
270 * the existing entry is stored in dupentry and the function returns -EEXIST
272 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, zswap_rb_insert() argument
281 if (myentry->offset > entry->offset) zswap_rb_insert()
283 else if (myentry->offset < entry->offset) zswap_rb_insert()
290 rb_link_node(&entry->rbnode, parent, link); zswap_rb_insert()
291 rb_insert_color(&entry->rbnode, root); zswap_rb_insert()
295 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) zswap_rb_erase() argument
297 if (!RB_EMPTY_NODE(&entry->rbnode)) { zswap_rb_erase()
298 rb_erase(&entry->rbnode, root); zswap_rb_erase()
299 RB_CLEAR_NODE(&entry->rbnode); zswap_rb_erase()
304 * Carries out the common pattern of freeing and entry's zpool allocation,
305 * freeing the entry itself, and decrementing the number of stored pages.
307 static void zswap_free_entry(struct zswap_entry *entry) zswap_free_entry() argument
309 zpool_free(entry->pool->zpool, entry->handle); zswap_free_entry()
310 zswap_pool_put(entry->pool); zswap_free_entry()
311 zswap_entry_cache_free(entry); zswap_free_entry()
317 static void zswap_entry_get(struct zswap_entry *entry) zswap_entry_get() argument
319 entry->refcount++; zswap_entry_get()
323 * remove from the tree and free it, if nobody reference the entry
326 struct zswap_entry *entry) zswap_entry_put()
328 int refcount = --entry->refcount; zswap_entry_put()
332 zswap_rb_erase(&tree->rbroot, entry); zswap_entry_put()
333 zswap_free_entry(entry); zswap_entry_put()
341 struct zswap_entry *entry; zswap_entry_find_get() local
343 entry = zswap_rb_search(root, offset); zswap_entry_find_get()
344 if (entry) zswap_entry_find_get()
345 zswap_entry_get(entry); zswap_entry_find_get()
347 return entry; zswap_entry_find_get()
799 * This function tries to find a page with the given swap entry
810 static int zswap_get_swap_cache_page(swp_entry_t entry, zswap_get_swap_cache_page() argument
815 *retpage = __read_swap_cache_async(entry, GFP_KERNEL, zswap_get_swap_cache_page()
825 * Attempts to free an entry by adding a page to the swap cache,
826 * decompressing the entry data into the page, and issuing a
842 struct zswap_entry *entry; zswap_writeback_entry() local
859 /* find and ref zswap entry */ zswap_writeback_entry()
861 entry = zswap_entry_find_get(&tree->rbroot, offset); zswap_writeback_entry()
862 if (!entry) { zswap_writeback_entry()
863 /* entry was invalidated */ zswap_writeback_entry()
868 BUG_ON(offset != entry->offset); zswap_writeback_entry()
885 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, zswap_writeback_entry()
888 tfm = *get_cpu_ptr(entry->pool->tfm); zswap_writeback_entry()
889 ret = crypto_comp_decompress(tfm, src, entry->length, zswap_writeback_entry()
891 put_cpu_ptr(entry->pool->tfm); zswap_writeback_entry()
893 zpool_unmap_handle(entry->pool->zpool, entry->handle); zswap_writeback_entry()
911 zswap_entry_put(tree, entry); zswap_writeback_entry()
914 * There are two possible situations for entry here: zswap_writeback_entry()
915 * (1) refcount is 1(normal case), entry is valid and on the tree zswap_writeback_entry()
916 * (2) refcount is 0, entry is freed and not on the tree zswap_writeback_entry()
918 * search the tree and free the entry if find entry zswap_writeback_entry()
920 if (entry == zswap_rb_search(&tree->rbroot, offset)) zswap_writeback_entry()
921 zswap_entry_put(tree, entry); zswap_writeback_entry()
929 * it is safe and okay to not free the entry zswap_writeback_entry()
930 * if we free the entry in the following put zswap_writeback_entry()
935 zswap_entry_put(tree, entry); zswap_writeback_entry()
966 struct zswap_entry *entry, *dupentry; zswap_frontswap_store() local
990 /* allocate entry */ zswap_frontswap_store()
991 entry = zswap_entry_cache_alloc(GFP_KERNEL); zswap_frontswap_store()
992 if (!entry) { zswap_frontswap_store()
998 /* if entry is successfully added, it keeps the reference */ zswap_frontswap_store()
999 entry->pool = zswap_pool_current_get(); zswap_frontswap_store()
1000 if (!entry->pool) { zswap_frontswap_store()
1007 tfm = *get_cpu_ptr(entry->pool->tfm); zswap_frontswap_store()
1011 put_cpu_ptr(entry->pool->tfm); zswap_frontswap_store()
1019 ret = zpool_malloc(entry->pool->zpool, len, zswap_frontswap_store()
1030 zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW); zswap_frontswap_store()
1034 zpool_unmap_handle(entry->pool->zpool, handle); zswap_frontswap_store()
1037 /* populate entry */ zswap_frontswap_store()
1038 entry->offset = offset; zswap_frontswap_store()
1039 entry->handle = handle; zswap_frontswap_store()
1040 entry->length = dlen; zswap_frontswap_store()
1045 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); zswap_frontswap_store()
1063 zswap_pool_put(entry->pool); zswap_frontswap_store()
1065 zswap_entry_cache_free(entry); zswap_frontswap_store()
1072 * return -1 on entry not found or error
1078 struct zswap_entry *entry; zswap_frontswap_load() local
1086 entry = zswap_entry_find_get(&tree->rbroot, offset); zswap_frontswap_load()
1087 if (!entry) { zswap_frontswap_load()
1088 /* entry was written back */ zswap_frontswap_load()
1096 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, zswap_frontswap_load()
1099 tfm = *get_cpu_ptr(entry->pool->tfm); zswap_frontswap_load()
1100 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen); zswap_frontswap_load()
1101 put_cpu_ptr(entry->pool->tfm); zswap_frontswap_load()
1103 zpool_unmap_handle(entry->pool->zpool, entry->handle); zswap_frontswap_load()
1107 zswap_entry_put(tree, entry); zswap_frontswap_load()
1113 /* frees an entry in zswap */ zswap_frontswap_invalidate_page()
1117 struct zswap_entry *entry; zswap_frontswap_invalidate_page() local
1121 entry = zswap_rb_search(&tree->rbroot, offset); zswap_frontswap_invalidate_page()
1122 if (!entry) { zswap_frontswap_invalidate_page()
1123 /* entry was written back */ zswap_frontswap_invalidate_page()
1129 zswap_rb_erase(&tree->rbroot, entry); zswap_frontswap_invalidate_page()
1131 /* drop the initial reference from entry creation */ zswap_frontswap_invalidate_page()
1132 zswap_entry_put(tree, entry); zswap_frontswap_invalidate_page()
1141 struct zswap_entry *entry, *n; zswap_frontswap_invalidate_area() local
1148 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) zswap_frontswap_invalidate_area()
1149 zswap_free_entry(entry); zswap_frontswap_invalidate_area()
1241 pr_err("entry cache creation failed\n"); init_zswap()
325 zswap_entry_put(struct zswap_tree *tree, struct zswap_entry *entry) zswap_entry_put() argument
H A Dswap_state.c79 int __add_to_swap_cache(struct page *page, swp_entry_t entry) __add_to_swap_cache() argument
90 set_page_private(page, entry.val); __add_to_swap_cache()
92 address_space = swap_address_space(entry); __add_to_swap_cache()
95 entry.val, page); __add_to_swap_cache()
119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) add_to_swap_cache() argument
125 error = __add_to_swap_cache(page, entry); add_to_swap_cache()
137 swp_entry_t entry; __delete_from_swap_cache() local
144 entry.val = page_private(page); __delete_from_swap_cache()
145 address_space = swap_address_space(entry); __delete_from_swap_cache()
163 swp_entry_t entry; add_to_swap() local
169 entry = get_swap_page(); add_to_swap()
170 if (!entry.val) add_to_swap()
175 swapcache_free(entry); add_to_swap()
190 err = add_to_swap_cache(page, entry, add_to_swap()
201 swapcache_free(entry); add_to_swap()
214 swp_entry_t entry; delete_from_swap_cache() local
217 entry.val = page_private(page); delete_from_swap_cache()
219 address_space = swap_address_space(entry); delete_from_swap_cache()
224 swapcache_free(entry); delete_from_swap_cache()
270 * Lookup a swap entry in the swap cache. A found page will be returned
275 struct page * lookup_swap_cache(swp_entry_t entry) lookup_swap_cache() argument
279 page = find_get_page(swap_address_space(entry), entry.val); lookup_swap_cache()
291 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __read_swap_cache_async() argument
296 struct address_space *swapper_space = swap_address_space(entry); __read_swap_cache_async()
306 found_page = find_get_page(swapper_space, entry.val); __read_swap_cache_async()
327 * Swap entry may have been freed since our caller observed it. __read_swap_cache_async()
329 err = swapcache_prepare(entry); __read_swap_cache_async()
334 * across a SWAP_HAS_CACHE swap_map entry whose page __read_swap_cache_async()
350 if (err) { /* swp entry is obsolete ? */ __read_swap_cache_async()
358 err = __add_to_swap_cache(new_page, entry); __read_swap_cache_async()
375 swapcache_free(entry); __read_swap_cache_async()
387 * the swap entry is no longer in use.
389 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, read_swap_cache_async() argument
393 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, read_swap_cache_async()
448 * @entry: swap entry of this memory
453 * Returns the struct page for entry and addr, after queueing swapin.
465 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, swapin_readahead() argument
469 unsigned long entry_offset = swp_offset(entry); swapin_readahead()
488 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), swapin_readahead()
500 return read_swap_cache_async(entry, gfp_mask, vma, addr); swapin_readahead()
/linux-4.4.14/include/scsi/
H A Dfc_encode.h222 struct fc_fdmi_attr_entry *entry; fc_ct_ms_fill() local
257 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; fc_ct_ms_fill()
262 &entry->type); fc_ct_ms_fill()
263 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
265 (__be64 *)&entry->value[0]); fc_ct_ms_fill()
268 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
273 &entry->type); fc_ct_ms_fill()
274 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
275 strncpy((char *)&entry->value, fc_ct_ms_fill()
280 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
285 &entry->type); fc_ct_ms_fill()
286 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
287 strncpy((char *)&entry->value, fc_ct_ms_fill()
292 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
297 &entry->type); fc_ct_ms_fill()
298 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
299 strncpy((char *)&entry->value, fc_ct_ms_fill()
304 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
309 &entry->type); fc_ct_ms_fill()
310 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
311 strncpy((char *)&entry->value, fc_ct_ms_fill()
316 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
321 &entry->type); fc_ct_ms_fill()
322 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
323 strncpy((char *)&entry->value, fc_ct_ms_fill()
328 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
333 &entry->type); fc_ct_ms_fill()
334 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
335 strncpy((char *)&entry->value, fc_ct_ms_fill()
340 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
345 &entry->type); fc_ct_ms_fill()
346 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
347 strncpy((char *)&entry->value, fc_ct_ms_fill()
352 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
357 &entry->type); fc_ct_ms_fill()
358 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
359 strncpy((char *)&entry->value, fc_ct_ms_fill()
364 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
369 &entry->type); fc_ct_ms_fill()
370 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
371 snprintf((char *)&entry->value, fc_ct_ms_fill()
400 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; fc_ct_ms_fill()
406 &entry->type); fc_ct_ms_fill()
407 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
408 memcpy(&entry->value, fc_host_supported_fc4s(lport->host), fc_ct_ms_fill()
412 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
417 &entry->type); fc_ct_ms_fill()
418 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
421 &entry->value); fc_ct_ms_fill()
424 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
429 &entry->type); fc_ct_ms_fill()
430 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
432 &entry->value); fc_ct_ms_fill()
435 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
440 &entry->type); fc_ct_ms_fill()
441 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
443 &entry->value); fc_ct_ms_fill()
446 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
451 &entry->type); fc_ct_ms_fill()
452 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
454 strncpy((char *)&entry->value, fc_ct_ms_fill()
460 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
465 &entry->type); fc_ct_ms_fill()
466 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
468 strncpy((char *)&entry->value, fc_ct_ms_fill()
473 strncpy((char *)&entry->value, fc_ct_ms_fill()
/linux-4.4.14/tools/usb/usbip/libsrc/
H A Dlist.h15 * using the generic single-entry routines.
34 * Insert a new entry between two known consecutive entries.
50 * list_add - add a new entry
51 * @new: new entry to be added
54 * Insert a new entry after the specified head.
63 * Delete a list entry by making the prev/next entries
80 * list_del - deletes entry from list.
81 * @entry: the element to delete from the list.
82 * Note: list_empty() on entry does not return true after this, the entry is
85 static inline void __list_del_entry(struct list_head *entry) __list_del_entry() argument
87 __list_del(entry->prev, entry->next); __list_del_entry()
90 static inline void list_del(struct list_head *entry) list_del() argument
92 __list_del(entry->prev, entry->next); list_del()
93 entry->next = LIST_POISON1; list_del()
94 entry->prev = LIST_POISON2; list_del()
98 * list_entry - get the struct for this entry
114 * list_for_each_safe - iterate over a list safe against removal of list entry
/linux-4.4.14/scripts/gdb/linux/
H A Dmodules.py26 entry = modules['next']
29 while entry != end_of_list:
30 yield utils.container_of(entry, module_ptr_type, "list")
31 entry = entry['next']
84 entry = source_list['next']
86 while entry != source_list.address:
87 use = utils.container_of(entry, t, "source_list")
92 entry = entry['next']
/linux-4.4.14/sound/pci/ca0106/
H A Dca0106_proc.c272 static void snd_ca0106_proc_iec958(struct snd_info_entry *entry, snd_ca0106_proc_iec958() argument
275 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_iec958()
294 static void snd_ca0106_proc_reg_write32(struct snd_info_entry *entry, snd_ca0106_proc_reg_write32() argument
297 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_write32()
312 static void snd_ca0106_proc_reg_read32(struct snd_info_entry *entry, snd_ca0106_proc_reg_read32() argument
315 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read32()
328 static void snd_ca0106_proc_reg_read16(struct snd_info_entry *entry, snd_ca0106_proc_reg_read16() argument
331 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read16()
344 static void snd_ca0106_proc_reg_read8(struct snd_info_entry *entry, snd_ca0106_proc_reg_read8() argument
347 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read8()
360 static void snd_ca0106_proc_reg_read1(struct snd_info_entry *entry, snd_ca0106_proc_reg_read1() argument
363 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read1()
378 static void snd_ca0106_proc_reg_read2(struct snd_info_entry *entry, snd_ca0106_proc_reg_read2() argument
381 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read2()
396 static void snd_ca0106_proc_reg_write(struct snd_info_entry *entry, snd_ca0106_proc_reg_write() argument
399 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_write()
410 static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry, snd_ca0106_proc_i2c_write() argument
413 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_i2c_write()
427 struct snd_info_entry *entry; snd_ca0106_proc_init() local
429 if(! snd_card_proc_new(emu->card, "iec958", &entry)) snd_ca0106_proc_init()
430 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_iec958); snd_ca0106_proc_init()
431 if(! snd_card_proc_new(emu->card, "ca0106_reg32", &entry)) { snd_ca0106_proc_init()
432 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read32); snd_ca0106_proc_init()
433 entry->c.text.write = snd_ca0106_proc_reg_write32; snd_ca0106_proc_init()
434 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
436 if(! snd_card_proc_new(emu->card, "ca0106_reg16", &entry)) snd_ca0106_proc_init()
437 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read16); snd_ca0106_proc_init()
438 if(! snd_card_proc_new(emu->card, "ca0106_reg8", &entry)) snd_ca0106_proc_init()
439 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read8); snd_ca0106_proc_init()
440 if(! snd_card_proc_new(emu->card, "ca0106_regs1", &entry)) { snd_ca0106_proc_init()
441 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read1); snd_ca0106_proc_init()
442 entry->c.text.write = snd_ca0106_proc_reg_write; snd_ca0106_proc_init()
443 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
445 if(! snd_card_proc_new(emu->card, "ca0106_i2c", &entry)) { snd_ca0106_proc_init()
446 entry->c.text.write = snd_ca0106_proc_i2c_write; snd_ca0106_proc_init()
447 entry->private_data = emu; snd_ca0106_proc_init()
448 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
450 if(! snd_card_proc_new(emu->card, "ca0106_regs2", &entry)) snd_ca0106_proc_init()
451 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read2); snd_ca0106_proc_init()
/linux-4.4.14/fs/f2fs/
H A Dxattr.h63 #define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \
64 entry->e_name_len + le16_to_cpu(entry->e_value_size)))
66 #define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\
67 ENTRY_SIZE(entry)))
69 #define IS_XATTR_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
71 #define list_for_each_xattr(entry, addr) \
72 for (entry = XATTR_FIRST_ENTRY(addr);\
73 !IS_XATTR_LAST_ENTRY(entry);\
74 entry = XATTR_NEXT_ENTRY(entry))
H A Dacl.c53 struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1); f2fs_acl_from_disk() local
71 if ((char *)entry > end) f2fs_acl_from_disk()
74 acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag); f2fs_acl_from_disk()
75 acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm); f2fs_acl_from_disk()
82 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
89 le32_to_cpu(entry->e_id)); f2fs_acl_from_disk()
90 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
96 le32_to_cpu(entry->e_id)); f2fs_acl_from_disk()
97 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
104 if ((char *)entry != end) f2fs_acl_from_disk()
115 struct f2fs_acl_entry *entry; f2fs_acl_to_disk() local
124 entry = (struct f2fs_acl_entry *)(f2fs_acl + 1); f2fs_acl_to_disk()
128 entry->e_tag = cpu_to_le16(acl->a_entries[i].e_tag); f2fs_acl_to_disk()
129 entry->e_perm = cpu_to_le16(acl->a_entries[i].e_perm); f2fs_acl_to_disk()
133 entry->e_id = cpu_to_le32( f2fs_acl_to_disk()
136 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
140 entry->e_id = cpu_to_le32( f2fs_acl_to_disk()
143 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
150 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
H A Drecovery.c61 struct fsync_inode_entry *entry; get_fsync_inode() local
63 list_for_each_entry(entry, head, list) get_fsync_inode()
64 if (entry->inode->i_ino == ino) get_fsync_inode()
65 return entry; get_fsync_inode()
186 struct fsync_inode_entry *entry; find_fsync_dnodes() local
199 entry = get_fsync_inode(head, ino_of_node(page)); find_fsync_dnodes()
200 if (!entry) { find_fsync_dnodes()
208 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); find_fsync_dnodes()
209 if (!entry) { find_fsync_dnodes()
217 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); find_fsync_dnodes()
218 if (IS_ERR(entry->inode)) { find_fsync_dnodes()
219 err = PTR_ERR(entry->inode); find_fsync_dnodes()
220 kmem_cache_free(fsync_entry_slab, entry); find_fsync_dnodes()
227 list_add_tail(&entry->list, head); find_fsync_dnodes()
229 entry->blkaddr = blkaddr; find_fsync_dnodes()
232 entry->last_inode = blkaddr; find_fsync_dnodes()
234 entry->last_dentry = blkaddr; find_fsync_dnodes()
249 struct fsync_inode_entry *entry, *tmp; destroy_fsync_dnodes() local
251 list_for_each_entry_safe(entry, tmp, head, list) { list_for_each_entry_safe()
252 iput(entry->inode); list_for_each_entry_safe()
253 list_del(&entry->list); list_for_each_entry_safe()
254 kmem_cache_free(fsync_entry_slab, entry); list_for_each_entry_safe()
476 struct fsync_inode_entry *entry; recover_data() local
490 entry = get_fsync_inode(head, ino_of_node(page)); recover_data()
491 if (!entry) recover_data()
498 if (entry->last_inode == blkaddr) recover_data()
499 recover_inode(entry->inode, page); recover_data()
500 if (entry->last_dentry == blkaddr) { recover_data()
501 err = recover_dentry(entry->inode, page); recover_data()
507 err = do_recover_data(sbi, entry->inode, page, blkaddr); recover_data()
513 if (entry->blkaddr == blkaddr) { recover_data()
514 iput(entry->inode); recover_data()
515 list_del(&entry->list); recover_data()
516 kmem_cache_free(fsync_entry_slab, entry); recover_data()
/linux-4.4.14/fs/proc/
H A Dnamespaces.c109 const struct proc_ns_operations **entry, **last; proc_ns_dir_readdir() local
118 entry = ns_entries + (ctx->pos - 2); proc_ns_dir_readdir()
120 while (entry <= last) { proc_ns_dir_readdir()
121 const struct proc_ns_operations *ops = *entry; proc_ns_dir_readdir()
126 entry++; proc_ns_dir_readdir()
143 const struct proc_ns_operations **entry, **last; proc_ns_dir_lookup() local
152 for (entry = ns_entries; entry < last; entry++) { proc_ns_dir_lookup()
153 if (strlen((*entry)->name) != len) proc_ns_dir_lookup()
155 if (!memcmp(dentry->d_name.name, (*entry)->name, len)) proc_ns_dir_lookup()
158 if (entry == last) proc_ns_dir_lookup()
161 error = proc_ns_instantiate(dir, dentry, task, *entry); proc_ns_dir_lookup()
H A Dproc_sysctl.c106 struct ctl_table *entry; find_entry() local
117 entry = &head->ctl_table[ctl_node - head->node]; find_entry()
118 procname = entry->procname; find_entry()
127 return entry; find_entry()
133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) insert_entry() argument
135 struct rb_node *node = &head->node[entry - head->ctl_table].node; insert_entry()
138 const char *name = entry->procname; insert_entry()
160 pr_err("sysctl duplicate entry: "); insert_entry()
162 pr_cont("/%s\n", entry->procname); insert_entry()
172 static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) erase_entry() argument
174 struct rb_node *node = &head->node[entry - head->ctl_table].node; erase_entry()
194 struct ctl_table *entry; init_header() local
195 for (entry = table; entry->procname; entry++, node++) init_header()
202 struct ctl_table *entry; erase_header() local
203 for (entry = head->ctl_table; entry->procname; entry++) erase_header()
204 erase_entry(head, entry); erase_header()
209 struct ctl_table *entry; insert_header() local
228 for (entry = header->ctl_table; entry->procname; entry++) { insert_header()
229 err = insert_entry(header, entry); insert_header()
266 * if p->used is 0, nobody will ever touch that entry again; start_unregistering()
335 struct ctl_table *entry; lookup_entry() local
338 entry = find_entry(&head, dir, name, namelen); lookup_entry()
339 if (entry && use_table(head)) lookup_entry()
342 entry = NULL; lookup_entry()
344 return entry; lookup_entry()
363 struct ctl_table *entry = NULL; first_entry() local
371 entry = &head->ctl_table[ctl_node - head->node]; first_entry()
374 *pentry = entry; first_entry()
380 struct ctl_table *entry = *pentry; next_entry() local
381 struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; next_entry()
391 entry = &head->ctl_table[ctl_node - head->node]; next_entry()
394 *pentry = entry; next_entry()
696 struct ctl_table *entry; proc_sys_readdir() local
710 for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { proc_sys_readdir()
711 if (!scan(h, entry, &pos, file, ctx)) { proc_sys_readdir()
741 else /* Use the permissions on the sysctl table entry */ proc_sys_permission()
866 struct ctl_table *entry; find_subdir() local
868 entry = find_entry(&head, dir, name, namelen); find_subdir()
869 if (!entry) find_subdir()
871 if (!S_ISDIR(entry->mode)) find_subdir()
942 /* Nope. Use the our freshly made directory entry. */ get_subdir()
983 struct ctl_table *entry; sysctl_follow_link() local
997 entry = find_entry(&head, dir, procname, strlen(procname)); sysctl_follow_link()
999 if (entry && use_table(head)) { sysctl_follow_link()
1002 *pentry = entry; sysctl_follow_link()
1060 struct ctl_table *link_table, *entry, *link; new_links() local
1068 for (entry = table; entry->procname; entry++) { new_links()
1070 name_bytes += strlen(entry->procname) + 1; new_links()
1086 for (link = link_table, entry = table; entry->procname; link++, entry++) { new_links()
1087 int len = strlen(entry->procname) + 1; new_links()
1088 memcpy(link_name, entry->procname, len); new_links()
1104 struct ctl_table *entry, *link; get_links() local
1106 /* Are there links available for every entry in table? */ get_links()
1107 for (entry = table; entry->procname; entry++) { get_links()
1108 const char *procname = entry->procname; get_links()
1112 if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) get_links()
1120 for (entry = table; entry->procname; entry++) { get_links()
1121 const char *procname = entry->procname; get_links()
1176 * array. A completely 0 filled entry terminates the table.
1219 struct ctl_table *entry; __register_sysctl_table() local
1223 for (entry = table; entry->procname; entry++) __register_sysctl_table()
1284 * array. A completely 0 filled entry terminates the table.
1312 struct ctl_table *entry; count_subheaders() local
1318 for (entry = table; entry->procname; entry++) { count_subheaders()
1319 if (entry->child) count_subheaders()
1320 nr_subheaders += count_subheaders(entry->child); count_subheaders()
1332 struct ctl_table *entry, *files; register_leaf_sysctl_tables() local
1337 for (entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1338 if (entry->child) register_leaf_sysctl_tables()
1354 for (new = files, entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1355 if (entry->child) register_leaf_sysctl_tables()
1357 *new = *entry; register_leaf_sysctl_tables()
1378 for (entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1381 if (!entry->child) register_leaf_sysctl_tables()
1385 child_pos = append_path(path, pos, entry->procname); register_leaf_sysctl_tables()
1390 set, entry->child); register_leaf_sysctl_tables()
1408 * array. A completely 0 filled entry terminates the table.
1479 * array. A completely 0 filled entry terminates the table.
1496 * array. A completely 0 filled entry terminates the table.
1514 struct ctl_table *entry; put_links() local
1523 for (entry = header->ctl_table; entry->procname; entry++) { put_links()
1526 const char *name = entry->procname; put_links()
1530 ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || put_links()
/linux-4.4.14/drivers/staging/gdm72xx/
H A Dgdm_qos.c43 struct qos_entry_s *entry; alloc_qos_entry() local
48 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s, alloc_qos_entry()
50 list_del(&entry->list); alloc_qos_entry()
53 return entry; alloc_qos_entry()
57 return kmalloc(sizeof(*entry), GFP_ATOMIC); alloc_qos_entry()
60 static void free_qos_entry(void *entry) free_qos_entry() argument
62 struct qos_entry_s *qentry = entry; free_qos_entry()
74 kfree(entry); free_qos_entry()
79 struct qos_entry_s *entry, *n; free_qos_entry_list() local
82 list_for_each_entry_safe(entry, n, free_list, list) { list_for_each_entry_safe()
83 list_del(&entry->list); list_for_each_entry_safe()
84 kfree(entry); list_for_each_entry_safe()
117 struct qos_entry_s *entry, *n; gdm_qos_release_list() local
134 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) { gdm_qos_release_list()
135 list_move_tail(&entry->list, &free_list); gdm_qos_release_list()
216 struct qos_entry_s *entry; extract_qos_list() local
229 entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s, extract_qos_list()
232 list_move_tail(&entry->list, head); extract_qos_list()
242 struct qos_entry_s *entry, *n; send_qos_list() local
244 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
245 list_del(&entry->list); list_for_each_entry_safe()
246 gdm_wimax_send_tx(entry->skb, entry->dev); list_for_each_entry_safe()
247 free_qos_entry(entry); list_for_each_entry_safe()
260 struct qos_entry_s *entry = NULL; gdm_qos_send_hci_pkt() local
268 entry = alloc_qos_entry(); gdm_qos_send_hci_pkt()
269 entry->skb = skb; gdm_qos_send_hci_pkt()
270 entry->dev = dev; gdm_qos_send_hci_pkt()
281 if (!entry) { gdm_qos_send_hci_pkt()
282 entry = alloc_qos_entry(); gdm_qos_send_hci_pkt()
283 entry->skb = skb; gdm_qos_send_hci_pkt()
284 entry->dev = dev; gdm_qos_send_hci_pkt()
287 list_add_tail(&entry->list, &qcb->qos_list[index]); gdm_qos_send_hci_pkt()
294 if (entry) gdm_qos_send_hci_pkt()
295 free_qos_entry(entry); gdm_qos_send_hci_pkt()
335 struct qos_entry_s *entry, *n; gdm_recv_qos_hci_packet() local
431 list_for_each_entry_safe(entry, n, &qcb->qos_list[index], gdm_recv_qos_hci_packet()
433 list_move_tail(&entry->list, &free_list); gdm_recv_qos_hci_packet()
/linux-4.4.14/kernel/
H A Dauditfilter.c114 /* Initialize an audit filterlist entry. */ audit_init_entry()
117 struct audit_entry *entry; audit_init_entry() local
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); audit_init_entry()
121 if (unlikely(!entry)) audit_init_entry()
126 kfree(entry); audit_init_entry()
129 entry->rule.fields = fields; audit_init_entry()
131 return entry; audit_init_entry()
219 static int audit_match_signal(struct audit_entry *entry) audit_match_signal() argument
221 struct audit_field *arch = entry->rule.arch_f; audit_match_signal()
227 entry->rule.mask) && audit_match_signal()
229 entry->rule.mask)); audit_match_signal()
235 entry->rule.mask)); audit_match_signal()
238 entry->rule.mask)); audit_match_signal()
249 struct audit_entry *entry; audit_to_entry_common() local
278 entry = audit_init_entry(rule->field_count); audit_to_entry_common()
279 if (!entry) audit_to_entry_common()
282 entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND; audit_to_entry_common()
283 entry->rule.listnr = listnr; audit_to_entry_common()
284 entry->rule.action = rule->action; audit_to_entry_common()
285 entry->rule.field_count = rule->field_count; audit_to_entry_common()
288 entry->rule.mask[i] = rule->mask[i]; audit_to_entry_common()
292 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; audit_to_entry_common()
302 entry->rule.mask[j] |= class[j]; audit_to_entry_common()
306 return entry; audit_to_entry_common()
333 static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) audit_field_valid() argument
337 if (entry->rule.listnr != AUDIT_FILTER_TYPE && audit_field_valid()
338 entry->rule.listnr != AUDIT_FILTER_USER) audit_field_valid()
411 if (entry->rule.listnr != AUDIT_FILTER_EXIT) audit_field_valid()
423 struct audit_entry *entry; audit_data_to_entry() local
430 entry = audit_to_entry_common(data); audit_data_to_entry()
431 if (IS_ERR(entry)) audit_data_to_entry()
436 struct audit_field *f = &entry->rule.fields[i]; audit_data_to_entry()
451 entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; audit_data_to_entry()
454 err = audit_field_valid(entry, f); audit_data_to_entry()
480 entry->rule.arch_f = f; audit_data_to_entry()
495 entry->rule.buflen += f->val; audit_data_to_entry()
516 entry->rule.buflen += f->val; audit_data_to_entry()
518 err = audit_to_watch(&entry->rule, str, f->val, f->op); audit_data_to_entry()
528 entry->rule.buflen += f->val; audit_data_to_entry()
530 err = audit_make_tree(&entry->rule, str, f->op); audit_data_to_entry()
536 err = audit_to_inode(&entry->rule, f); audit_data_to_entry()
541 if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN) audit_data_to_entry()
546 entry->rule.buflen += f->val; audit_data_to_entry()
547 entry->rule.filterkey = str; audit_data_to_entry()
550 if (entry->rule.exe || f->val > PATH_MAX) audit_data_to_entry()
557 entry->rule.buflen += f->val; audit_data_to_entry()
559 audit_mark = audit_alloc_mark(&entry->rule, str, f->val); audit_data_to_entry()
565 entry->rule.exe = audit_mark; audit_data_to_entry()
570 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) audit_data_to_entry()
571 entry->rule.inode_f = NULL; audit_data_to_entry()
574 return entry; audit_data_to_entry()
577 if (entry->rule.tree) audit_data_to_entry()
578 audit_put_tree(entry->rule.tree); /* that's the temporary one */ audit_data_to_entry()
579 if (entry->rule.exe) audit_data_to_entry()
580 audit_remove_mark(entry->rule.exe); /* that's the template one */ audit_data_to_entry()
581 audit_free_rule(entry); audit_data_to_entry()
785 struct audit_entry *entry; audit_dupe_rule() local
790 entry = audit_init_entry(fcount); audit_dupe_rule()
791 if (unlikely(!entry)) audit_dupe_rule()
794 new = &entry->rule; audit_dupe_rule()
847 audit_free_rule(entry); audit_dupe_rule()
857 return entry; audit_dupe_rule()
862 static struct audit_entry *audit_find_rule(struct audit_entry *entry, audit_find_rule() argument
869 if (entry->rule.inode_f) { audit_find_rule()
870 h = audit_hash_ino(entry->rule.inode_f->val); audit_find_rule()
872 } else if (entry->rule.watch) { audit_find_rule()
877 if (!audit_compare_rule(&entry->rule, &e->rule)) { list_for_each_entry()
884 *p = list = &audit_filter_list[entry->rule.listnr];
888 if (!audit_compare_rule(&entry->rule, &e->rule)) { list_for_each_entry()
901 static inline int audit_add_rule(struct audit_entry *entry) audit_add_rule() argument
904 struct audit_watch *watch = entry->rule.watch; audit_add_rule()
905 struct audit_tree *tree = entry->rule.tree; audit_add_rule()
912 if (entry->rule.listnr == AUDIT_FILTER_USER || audit_add_rule()
913 entry->rule.listnr == AUDIT_FILTER_TYPE) audit_add_rule()
918 e = audit_find_rule(entry, &list); audit_add_rule()
930 err = audit_add_watch(&entry->rule, &list); audit_add_rule()
943 err = audit_add_tree_rule(&entry->rule); audit_add_rule()
950 entry->rule.prio = ~0ULL; audit_add_rule()
951 if (entry->rule.listnr == AUDIT_FILTER_EXIT) { audit_add_rule()
952 if (entry->rule.flags & AUDIT_FILTER_PREPEND) audit_add_rule()
953 entry->rule.prio = ++prio_high; audit_add_rule()
955 entry->rule.prio = --prio_low; audit_add_rule()
958 if (entry->rule.flags & AUDIT_FILTER_PREPEND) { audit_add_rule()
959 list_add(&entry->rule.list, audit_add_rule()
960 &audit_rules_list[entry->rule.listnr]); audit_add_rule()
961 list_add_rcu(&entry->list, list); audit_add_rule()
962 entry->rule.flags &= ~AUDIT_FILTER_PREPEND; audit_add_rule()
964 list_add_tail(&entry->rule.list, audit_add_rule()
965 &audit_rules_list[entry->rule.listnr]); audit_add_rule()
966 list_add_tail_rcu(&entry->list, list); audit_add_rule()
972 if (!audit_match_signal(entry)) audit_add_rule()
981 int audit_del_rule(struct audit_entry *entry) audit_del_rule() argument
984 struct audit_tree *tree = entry->rule.tree; audit_del_rule()
991 if (entry->rule.listnr == AUDIT_FILTER_USER || audit_del_rule()
992 entry->rule.listnr == AUDIT_FILTER_TYPE) audit_del_rule()
997 e = audit_find_rule(entry, &list); audit_del_rule()
1016 if (!audit_match_signal(entry)) audit_del_rule()
1096 struct audit_entry *entry; audit_rule_change() local
1098 entry = audit_data_to_entry(data, datasz); audit_rule_change()
1099 if (IS_ERR(entry)) audit_rule_change()
1100 return PTR_ERR(entry); audit_rule_change()
1104 err = audit_add_rule(entry); audit_rule_change()
1105 audit_log_rule_change("add_rule", &entry->rule, !err); audit_rule_change()
1108 err = audit_del_rule(entry); audit_rule_change()
1109 audit_log_rule_change("remove_rule", &entry->rule, !err); audit_rule_change()
1117 if (entry->rule.exe) audit_rule_change()
1118 audit_remove_mark(entry->rule.exe); audit_rule_change()
1119 audit_free_rule(entry); audit_rule_change()
1404 struct audit_entry *entry = container_of(r, struct audit_entry, rule); update_lsm_rule() local
1412 if (entry->rule.exe) update_lsm_rule()
1413 audit_remove_mark(entry->rule.exe); update_lsm_rule()
1421 list_del_rcu(&entry->list); update_lsm_rule()
1426 list_replace_rcu(&entry->list, &nentry->list); update_lsm_rule()
1429 call_rcu(&entry->rcu, audit_free_rule_rcu); update_lsm_rule()
H A Djump_label.c120 static int addr_conflict(struct jump_entry *entry, void *start, void *end) addr_conflict() argument
122 if (entry->code <= (unsigned long)end && addr_conflict()
123 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) addr_conflict()
150 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
153 arch_jump_label_transform(entry, type); arch_jump_label_transform_static()
166 static inline struct static_key *jump_entry_key(struct jump_entry *entry) jump_entry_key() argument
168 return (struct static_key *)((unsigned long)entry->key & ~1UL); jump_entry_key()
171 static bool jump_entry_branch(struct jump_entry *entry) jump_entry_branch() argument
173 return (unsigned long)entry->key & 1UL; jump_entry_branch()
176 static enum jump_label_type jump_label_type(struct jump_entry *entry) jump_label_type() argument
178 struct static_key *key = jump_entry_key(entry); jump_label_type()
180 bool branch = jump_entry_branch(entry); jump_label_type()
187 struct jump_entry *entry, __jump_label_update()
190 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { __jump_label_update()
192 * entry->code set to 0 invalidates module init text sections __jump_label_update()
196 if (entry->code && kernel_text_address(entry->code)) __jump_label_update()
197 arch_jump_label_transform(entry, jump_label_type(entry)); __jump_label_update()
237 static enum jump_label_type jump_label_init_type(struct jump_entry *entry) jump_label_init_type() argument
239 struct static_key *key = jump_entry_key(entry); jump_label_init_type()
241 bool branch = jump_entry_branch(entry); jump_label_init_type()
468 struct jump_entry *entry = static_key_entries(key); jump_label_update() local
480 /* if there are no users, entry can be NULL */ jump_label_update()
481 if (entry) jump_label_update()
482 __jump_label_update(key, entry, stop); jump_label_update()
186 __jump_label_update(struct static_key *key, struct jump_entry *entry, struct jump_entry *stop) __jump_label_update() argument
H A Dasync.c107 * pick the first pending entry and run it
111 struct async_entry *entry = async_run_entry_fn() local
119 (long long)entry->cookie, async_run_entry_fn()
120 entry->func, task_pid_nr(current)); async_run_entry_fn()
123 entry->func(entry->data, entry->cookie); async_run_entry_fn()
128 (long long)entry->cookie, async_run_entry_fn()
129 entry->func, async_run_entry_fn()
135 list_del_init(&entry->domain_list); async_run_entry_fn()
136 list_del_init(&entry->global_list); async_run_entry_fn()
138 /* 3) free the entry */ async_run_entry_fn()
139 kfree(entry); async_run_entry_fn()
150 struct async_entry *entry; __async_schedule() local
155 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); __async_schedule()
161 if (!entry || atomic_read(&entry_count) > MAX_WORK) { __async_schedule()
162 kfree(entry); __async_schedule()
171 INIT_LIST_HEAD(&entry->domain_list); __async_schedule()
172 INIT_LIST_HEAD(&entry->global_list); __async_schedule()
173 INIT_WORK(&entry->work, async_run_entry_fn); __async_schedule()
174 entry->func = func; __async_schedule()
175 entry->data = data; __async_schedule()
176 entry->domain = domain; __async_schedule()
181 newcookie = entry->cookie = next_cookie++; __async_schedule()
183 list_add_tail(&entry->domain_list, &domain->pending); __async_schedule()
185 list_add_tail(&entry->global_list, &async_global_pending); __async_schedule()
194 queue_work(system_unbound_wq, &entry->work); __async_schedule()
H A Dfutex_compat.c23 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, fetch_robust_entry() argument
29 *entry = compat_ptr((*uentry) & ~1); fetch_robust_entry()
35 static void __user *futex_uaddr(struct robust_list __user *entry, futex_uaddr() argument
38 compat_uptr_t base = ptr_to_compat(entry); futex_uaddr()
53 struct robust_list __user *entry, *next_entry, *pending; compat_exit_robust_list() local
67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) compat_exit_robust_list()
83 while (entry != (struct robust_list __user *) &head->list) { compat_exit_robust_list()
85 * Fetch the next entry in the list before calling compat_exit_robust_list()
89 (compat_uptr_t __user *)&entry->next, &next_pi); compat_exit_robust_list()
94 if (entry != pending) { compat_exit_robust_list()
95 void __user *uaddr = futex_uaddr(entry, futex_offset); compat_exit_robust_list()
103 entry = next_entry; compat_exit_robust_list()
/linux-4.4.14/fs/ntfs/
H A Dindex.h36 * @idx_ni: index inode containing the @entry described by this context
37 * @entry: index entry (points into @ir or @ia)
38 * @data: index entry data (points into @entry)
40 * @is_in_root: 'true' if @entry is in @ir and 'false' if it is in @ia
49 * @entry is the index entry described by this context. @data and @data_len
50 * are the index entry data and its length in bytes, respectively. @data
51 * simply points into @entry. This is probably what the user is interested in.
53 * If @is_in_root is 'true', @entry is in the index root attribute @ir described
57 * If @is_in_root is 'false', @entry is in the index allocation attribute and @ia
64 * @entry and its @data without having to allocate a buffer and copy the @entry
67 * When finished with the @entry and its @data, call ntfs_index_ctx_put() to
70 * If the index entry was modified, call flush_dcache_index_entry_page()
77 INDEX_ENTRY *entry; member in struct:__anon11736
98 * @ictx: ntfs index context describing the index entry
100 * Call flush_dcache_page() for the page in which an index entry resides.
102 * This must be called every time an index entry is modified, just after the
105 * If the index entry is in the index root attribute, simply flush the page
108 * If the index entry is in an index block belonging to the index allocation
120 * ntfs_index_entry_mark_dirty - mark an index entry dirty
121 * @ictx: ntfs index context describing the index entry
123 * Mark the index entry described by the index entry context @ictx dirty.
125 * If the index entry is in the index root attribute, simply mark the mft
130 * If the index entry is in an index block belonging to the index allocation
133 * VFS inode of the ntfs index inode to which the index entry belongs dirty,
/linux-4.4.14/drivers/net/wireless/ath/
H A Dkey.c42 bool ath_hw_keyreset(struct ath_common *common, u16 entry) ath_hw_keyreset() argument
47 if (entry >= common->keymax) { ath_hw_keyreset()
48 ath_err(common, "keyreset: keycache entry %u out of range\n", ath_hw_keyreset()
49 entry); ath_hw_keyreset()
53 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); ath_hw_keyreset()
57 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); ath_hw_keyreset()
58 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); ath_hw_keyreset()
59 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); ath_hw_keyreset()
60 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); ath_hw_keyreset()
61 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); ath_hw_keyreset()
62 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); ath_hw_keyreset()
63 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); ath_hw_keyreset()
64 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); ath_hw_keyreset()
67 u16 micentry = entry + 64; ath_hw_keyreset()
88 u16 entry, const u8 *mac) ath_hw_keysetmac()
94 if (entry >= common->keymax) { ath_hw_keysetmac()
95 ath_err(common, "keysetmac: keycache entry %u out of range\n", ath_hw_keysetmac()
96 entry); ath_hw_keysetmac()
121 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); ath_hw_keysetmac()
122 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); ath_hw_keysetmac()
129 static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, ath_hw_set_keycache_entry() argument
137 if (entry >= common->keymax) { ath_hw_set_keycache_entry()
138 ath_err(common, "set-entry: keycache entry %u out of range\n", ath_hw_set_keycache_entry()
139 entry); ath_hw_set_keycache_entry()
157 if (entry + 64 >= common->keymax) { ath_hw_set_keycache_entry()
159 "entry %u inappropriate for TKIP\n", entry); ath_hw_set_keycache_entry()
200 u16 micentry = entry + 64; ath_hw_set_keycache_entry()
208 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); ath_hw_set_keycache_entry()
209 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); ath_hw_set_keycache_entry()
212 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); ath_hw_set_keycache_entry()
213 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); ath_hw_set_keycache_entry()
216 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); ath_hw_set_keycache_entry()
217 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); ath_hw_set_keycache_entry()
219 /* Write MAC address for the entry */ ath_hw_set_keycache_entry()
220 (void) ath_hw_keysetmac(common, entry, mac); ath_hw_set_keycache_entry()
225 * Michael MIC TX/RX keys in the same key cache entry ath_hw_set_keycache_entry()
302 /* MAC address registers are reserved for the MIC entry */ ath_hw_set_keycache_entry()
311 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); ath_hw_set_keycache_entry()
312 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); ath_hw_set_keycache_entry()
319 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); ath_hw_set_keycache_entry()
320 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); ath_hw_set_keycache_entry()
323 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); ath_hw_set_keycache_entry()
324 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); ath_hw_set_keycache_entry()
327 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); ath_hw_set_keycache_entry()
328 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); ath_hw_set_keycache_entry()
332 /* Write MAC address for the entry */ ath_hw_set_keycache_entry()
333 (void) ath_hw_keysetmac(common, entry, mac); ath_hw_set_keycache_entry()
365 /* TX and RX keys share the same key cache entry. */ ath_setkey_tkip()
376 /* TX MIC entry failed. No need to proceed further */ ath_setkey_tkip()
87 ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) ath_hw_keysetmac() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dbit.c32 u32 entry = bios->bit_offset + 12; bit_entry() local
34 if (nvbios_rd08(bios, entry + 0) == id) { bit_entry()
35 bit->id = nvbios_rd08(bios, entry + 0); bit_entry()
36 bit->version = nvbios_rd08(bios, entry + 1); bit_entry()
37 bit->length = nvbios_rd16(bios, entry + 2); bit_entry()
38 bit->offset = nvbios_rd16(bios, entry + 4); bit_entry()
42 entry += nvbios_rd08(bios, bios->bit_offset + 9); bit_entry()
H A Dextdev.c61 struct nvbios_extdev_func *entry) extdev_parse_entry()
63 entry->type = nvbios_rd08(bios, offset + 0); extdev_parse_entry()
64 entry->addr = nvbios_rd08(bios, offset + 1); extdev_parse_entry()
65 entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1; extdev_parse_entry()
73 u16 entry; nvbios_extdev_parse() local
75 if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len))) nvbios_extdev_parse()
78 extdev_parse_entry(bios, entry, func); nvbios_extdev_parse()
87 u16 entry; nvbios_extdev_find() local
90 while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) { nvbios_extdev_find()
91 extdev_parse_entry(bios, entry, func); nvbios_extdev_find()
60 extdev_parse_entry(struct nvkm_bios *bios, u16 offset, struct nvbios_extdev_func *entry) extdev_parse_entry() argument
/linux-4.4.14/drivers/thunderbolt/
H A Dnhi_regs.h28 * struct ring_desc - TX/RX ring entry
45 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
54 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
64 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
72 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
/linux-4.4.14/net/x25/
H A Dx25_forward.c27 struct list_head *entry; x25_forward_call() local
54 list_for_each(entry, &x25_forward_list) { x25_forward_call()
55 x25_frwd = list_entry(entry, struct x25_forward, node); x25_forward_call()
100 struct list_head *entry; x25_forward_data() local
107 list_for_each(entry, &x25_forward_list) { x25_forward_data()
108 frwd = list_entry(entry, struct x25_forward, node); x25_forward_data()
140 struct list_head *entry, *tmp; x25_clear_forward_by_lci() local
144 list_for_each_safe(entry, tmp, &x25_forward_list) { x25_clear_forward_by_lci()
145 fwd = list_entry(entry, struct x25_forward, node); x25_clear_forward_by_lci()
158 struct list_head *entry, *tmp; x25_clear_forward_by_dev() local
162 list_for_each_safe(entry, tmp, &x25_forward_list) { x25_clear_forward_by_dev()
163 fwd = list_entry(entry, struct x25_forward, node); x25_clear_forward_by_dev()
H A Dx25_route.c35 struct list_head *entry; x25_add_route() local
40 list_for_each(entry, &x25_route_list) { x25_add_route()
41 rt = list_entry(entry, struct x25_route, node); x25_add_route()
86 struct list_head *entry; x25_del_route() local
91 list_for_each(entry, &x25_route_list) { x25_del_route()
92 rt = list_entry(entry, struct x25_route, node); x25_del_route()
112 struct list_head *entry, *tmp; x25_route_device_down() local
116 list_for_each_safe(entry, tmp, &x25_route_list) { x25_route_device_down()
117 rt = list_entry(entry, struct x25_route, node); x25_route_device_down()
157 struct list_head *entry; x25_get_route() local
161 list_for_each(entry, &x25_route_list) { x25_get_route()
162 rt = list_entry(entry, struct x25_route, node); x25_get_route()
218 struct list_head *entry, *tmp; x25_route_free() local
221 list_for_each_safe(entry, tmp, &x25_route_list) { x25_route_free()
222 rt = list_entry(entry, struct x25_route, node); x25_route_free()
/linux-4.4.14/arch/powerpc/kernel/
H A Djump_label.c15 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
18 u32 *addr = (u32 *)(unsigned long)entry->code; arch_jump_label_transform()
21 patch_branch(addr, entry->target, 0); arch_jump_label_transform()
/linux-4.4.14/tools/perf/util/
H A Dstrlist.c15 struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry) strlist__node_new() argument
17 const char *s = entry; strlist__node_new()
55 static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) strlist__node_cmp() argument
57 const char *str = entry; strlist__node_cmp()
70 char entry[1024]; strlist__load() local
77 while (fgets(entry, sizeof(entry), fp) != NULL) { strlist__load()
78 const size_t len = strlen(entry); strlist__load()
82 entry[len - 1] = '\0'; strlist__load()
84 err = strlist__add(slist, entry); strlist__load()
100 struct str_node *strlist__find(struct strlist *slist, const char *entry) strlist__find() argument
103 struct rb_node *rb_node = rblist__find(&slist->rblist, entry); strlist__find()
H A Drblist.c52 const void *entry, __rblist__findnew()
63 rc = rblist->node_cmp(parent, entry); __rblist__findnew()
73 new_node = rblist->node_new(rblist, entry); __rblist__findnew()
84 struct rb_node *rblist__find(struct rblist *rblist, const void *entry) rblist__find() argument
86 return __rblist__findnew(rblist, entry, false); rblist__find()
89 struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry) rblist__findnew() argument
91 return __rblist__findnew(rblist, entry, true); rblist__findnew()
51 __rblist__findnew(struct rblist *rblist, const void *entry, bool create) __rblist__findnew() argument
H A Drblist.h25 int (*node_cmp)(struct rb_node *rbn, const void *entry);
34 struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
35 struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry);
/linux-4.4.14/drivers/net/wireless/rt2x00/
H A Drt2x00usb.c218 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) rt2x00usb_work_txdone_entry() argument
228 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00usb_work_txdone_entry()
229 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); rt2x00usb_work_txdone_entry()
231 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); rt2x00usb_work_txdone_entry()
239 struct queue_entry *entry; rt2x00usb_work_txdone() local
243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); tx_queue_for_each()
245 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || tx_queue_for_each()
246 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) tx_queue_for_each()
249 rt2x00usb_work_txdone_entry(entry); tx_queue_for_each()
256 struct queue_entry *entry = (struct queue_entry *)urb->context; rt2x00usb_interrupt_txdone() local
257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_txdone()
259 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_interrupt_txdone()
265 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_interrupt_txdone()
269 rt2x00lib_dmadone(entry); rt2x00usb_interrupt_txdone()
272 rt2x00dev->ops->lib->tx_dma_done(entry); rt2x00usb_interrupt_txdone()
282 static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) rt2x00usb_kick_tx_entry() argument
284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_tx_entry()
286 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_kick_tx_entry()
290 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || rt2x00usb_kick_tx_entry()
291 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_kick_tx_entry()
296 * and urb. Those paddings are not included in skbs. Pass entry rt2x00usb_kick_tx_entry()
299 length = rt2x00dev->ops->lib->get_tx_data_len(entry); rt2x00usb_kick_tx_entry()
301 status = skb_padto(entry->skb, length); rt2x00usb_kick_tx_entry()
305 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_tx_entry()
306 rt2x00lib_dmadone(entry); rt2x00usb_kick_tx_entry()
312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_tx_entry()
313 entry->skb->data, length, rt2x00usb_kick_tx_entry()
314 rt2x00usb_interrupt_txdone, entry); rt2x00usb_kick_tx_entry()
320 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_tx_entry()
321 rt2x00lib_dmadone(entry); rt2x00usb_kick_tx_entry()
334 struct queue_entry *entry; rt2x00usb_work_rxdone() local
339 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); rt2x00usb_work_rxdone()
341 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || rt2x00usb_work_rxdone()
342 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_work_rxdone()
348 skbdesc = get_skb_frame_desc(entry->skb); rt2x00usb_work_rxdone()
350 skbdesc->desc_len = entry->queue->desc_size; rt2x00usb_work_rxdone()
355 rt2x00lib_rxdone(entry, GFP_KERNEL); rt2x00usb_work_rxdone()
361 struct queue_entry *entry = (struct queue_entry *)urb->context; rt2x00usb_interrupt_rxdone() local
362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_rxdone()
364 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_interrupt_rxdone()
370 rt2x00lib_dmadone(entry); rt2x00usb_interrupt_rxdone()
377 if (urb->actual_length < entry->queue->desc_size || urb->status) rt2x00usb_interrupt_rxdone()
378 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_interrupt_rxdone()
387 static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) rt2x00usb_kick_rx_entry() argument
389 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_rx_entry()
391 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_kick_rx_entry()
394 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || rt2x00usb_kick_rx_entry()
395 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_kick_rx_entry()
398 rt2x00lib_dmastart(entry); rt2x00usb_kick_rx_entry()
401 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_rx_entry()
402 entry->skb->data, entry->skb->len, rt2x00usb_kick_rx_entry()
403 rt2x00usb_interrupt_rxdone, entry); rt2x00usb_kick_rx_entry()
409 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_rx_entry()
410 rt2x00lib_dmadone(entry); rt2x00usb_kick_rx_entry()
444 static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) rt2x00usb_flush_entry() argument
446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_flush_entry()
447 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_flush_entry()
448 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; rt2x00usb_flush_entry()
450 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_flush_entry()
458 if ((entry->queue->qid == QID_BEACON) && rt2x00usb_flush_entry()
527 struct queue_entry *entry; rt2x00usb_dma_timeout() local
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); rt2x00usb_dma_timeout()
530 return rt2x00queue_dma_timeout(entry); rt2x00usb_dma_timeout()
559 void rt2x00usb_clear_entry(struct queue_entry *entry) rt2x00usb_clear_entry() argument
561 entry->flags = 0; rt2x00usb_clear_entry()
563 if (entry->queue->qid == QID_RX) rt2x00usb_clear_entry()
564 rt2x00usb_kick_rx_entry(entry, NULL); rt2x00usb_clear_entry()
H A Drt2x00dev.c261 void rt2x00lib_dmastart(struct queue_entry *entry) rt2x00lib_dmastart() argument
263 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00lib_dmastart()
264 rt2x00queue_index_inc(entry, Q_INDEX); rt2x00lib_dmastart()
268 void rt2x00lib_dmadone(struct queue_entry *entry) rt2x00lib_dmadone() argument
270 set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); rt2x00lib_dmadone()
271 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00lib_dmadone()
272 rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE); rt2x00lib_dmadone()
276 static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) rt2x00lib_txdone_bar_status() argument
278 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_txdone_bar_status()
279 struct ieee80211_bar *bar = (void *) entry->skb->data; rt2x00lib_txdone_bar_status()
302 if (bar_entry->entry != entry) rt2x00lib_txdone_bar_status()
320 void rt2x00lib_txdone(struct queue_entry *entry, rt2x00lib_txdone() argument
323 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_txdone()
324 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); rt2x00lib_txdone()
325 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); rt2x00lib_txdone()
334 rt2x00queue_unmap_skb(entry); rt2x00lib_txdone()
339 skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); rt2x00lib_txdone()
349 header_length = ieee80211_get_hdrlen_from_skb(entry->skb); rt2x00lib_txdone()
355 rt2x00queue_remove_l2pad(entry->skb, header_length); rt2x00lib_txdone()
364 rt2x00crypto_tx_insert_iv(entry->skb, header_length); rt2x00lib_txdone()
370 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); rt2x00lib_txdone()
378 rt2x00lib_txdone_bar_status(entry) || rt2x00lib_txdone()
464 ieee80211_tx_status(rt2x00dev->hw, entry->skb); rt2x00lib_txdone()
466 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); rt2x00lib_txdone()
468 dev_kfree_skb_any(entry->skb); rt2x00lib_txdone()
471 * Make this entry available for reuse. rt2x00lib_txdone()
473 entry->skb = NULL; rt2x00lib_txdone()
474 entry->flags = 0; rt2x00lib_txdone()
476 rt2x00dev->ops->lib->clear_entry(entry); rt2x00lib_txdone()
478 rt2x00queue_index_inc(entry, Q_INDEX_DONE); rt2x00lib_txdone()
487 spin_lock_bh(&entry->queue->tx_lock); rt2x00lib_txdone()
488 if (!rt2x00queue_threshold(entry->queue)) rt2x00lib_txdone()
489 rt2x00queue_unpause_queue(entry->queue); rt2x00lib_txdone()
490 spin_unlock_bh(&entry->queue->tx_lock); rt2x00lib_txdone()
494 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status) rt2x00lib_txdone_noinfo() argument
502 rt2x00lib_txdone(entry, &txdesc); rt2x00lib_txdone_noinfo()
547 struct rt2x00_bar_list_entry *entry; rt2x00lib_rxdone_check_ba() local
557 list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) { rt2x00lib_rxdone_check_ba()
559 if (ba->start_seq_num != entry->start_seq_num) rt2x00lib_rxdone_check_ba()
566 if (!TID_CHECK(ba->control, entry->control)) rt2x00lib_rxdone_check_ba()
571 if (!ether_addr_equal_64bits(ba->ra, entry->ta)) rt2x00lib_rxdone_check_ba()
574 if (!ether_addr_equal_64bits(ba->ta, entry->ra)) rt2x00lib_rxdone_check_ba()
579 entry->block_acked = 1; rt2x00lib_rxdone_check_ba()
683 void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rt2x00lib_rxdone() argument
685 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_rxdone()
696 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00lib_rxdone()
703 skb = rt2x00queue_alloc_rxskb(entry, gfp); rt2x00lib_rxdone()
710 rt2x00queue_unmap_skb(entry); rt2x00lib_rxdone()
716 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); rt2x00lib_rxdone()
723 rxdesc.size > entry->queue->data_size)) { rt2x00lib_rxdone()
725 rxdesc.size, entry->queue->data_size); rt2x00lib_rxdone()
726 dev_kfree_skb(entry->skb); rt2x00lib_rxdone()
734 header_length = ieee80211_get_hdrlen_from_skb(entry->skb); rt2x00lib_rxdone()
744 rt2x00crypto_rx_insert_iv(entry->skb, header_length, rt2x00lib_rxdone()
749 rt2x00queue_remove_l2pad(entry->skb, header_length); rt2x00lib_rxdone()
752 skb_trim(entry->skb, rxdesc.size); rt2x00lib_rxdone()
766 rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
772 rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
777 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
779 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); rt2x00lib_rxdone()
785 rx_status = IEEE80211_SKB_RXCB(entry->skb); rt2x00lib_rxdone()
802 ieee80211_rx_ni(rt2x00dev->hw, entry->skb); rt2x00lib_rxdone()
808 entry->skb = skb; rt2x00lib_rxdone()
811 entry->flags = 0; rt2x00lib_rxdone()
812 rt2x00queue_index_inc(entry, Q_INDEX_DONE); rt2x00lib_rxdone()
815 rt2x00dev->ops->lib->clear_entry(entry); rt2x00lib_rxdone()
909 static void rt2x00lib_channel(struct ieee80211_channel *entry, rt2x00lib_channel() argument
914 entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rt2x00lib_channel()
915 entry->center_freq = ieee80211_channel_to_frequency(channel, rt2x00lib_channel()
916 entry->band); rt2x00lib_channel()
917 entry->hw_value = value; rt2x00lib_channel()
918 entry->max_power = tx_power; rt2x00lib_channel()
919 entry->max_antenna_gain = 0xff; rt2x00lib_channel()
922 static void rt2x00lib_rate(struct ieee80211_rate *entry, rt2x00lib_rate() argument
925 entry->flags = 0; rt2x00lib_rate()
926 entry->bitrate = rate->bitrate; rt2x00lib_rate()
927 entry->hw_value = index; rt2x00lib_rate()
928 entry->hw_value_short = index; rt2x00lib_rate()
931 entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; rt2x00lib_rate()
/linux-4.4.14/drivers/md/
H A Ddm-cache-policy-smq.c37 struct entry { struct
59 struct entry *begin;
60 struct entry *end;
70 es->begin = vzalloc(sizeof(struct entry) * nr_entries); space_init()
83 static struct entry *__get_entry(struct entry_space *es, unsigned block) __get_entry()
85 struct entry *e; __get_entry()
93 static unsigned to_index(struct entry_space *es, struct entry *e) to_index()
99 static struct entry *to_entry(struct entry_space *es, unsigned block) to_entry()
120 static struct entry *l_head(struct entry_space *es, struct ilist *l) l_head()
125 static struct entry *l_tail(struct entry_space *es, struct ilist *l) l_tail()
130 static struct entry *l_next(struct entry_space *es, struct entry *e) l_next()
135 static struct entry *l_prev(struct entry_space *es, struct entry *e) l_prev()
145 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) l_add_head()
147 struct entry *head = l_head(es, l); l_add_head()
161 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) l_add_tail()
163 struct entry *tail = l_tail(es, l); l_add_tail()
178 struct entry *old, struct entry *e) l_add_before()
180 struct entry *prev = l_prev(es, old); l_add_before()
195 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) l_del()
197 struct entry *prev = l_prev(es, e); l_del()
198 struct entry *next = l_next(es, e); l_del()
214 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l) l_pop_tail()
216 struct entry *e; l_pop_tail()
278 * Insert an entry to the back of the given level.
280 static void q_push(struct queue *q, struct entry *e) q_push()
288 static void q_push_before(struct queue *q, struct entry *old, struct entry *e) q_push_before()
296 static void q_del(struct queue *q, struct entry *e) q_del()
304 * Return the oldest entry of the lowest populated level.
306 static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel) q_peek()
309 struct entry *e; q_peek()
328 static struct entry *q_pop(struct queue *q) q_pop()
330 struct entry *e = q_peek(q, q->nr_levels, true); q_pop()
339 * Pops an entry from a level that is not past a sentinel.
341 static struct entry *q_pop_old(struct queue *q, unsigned max_level) q_pop_old()
343 struct entry *e = q_peek(q, max_level, false); q_pop_old()
352 * This function assumes there is a non-sentinel entry to pop. It's only
356 static struct entry *__redist_pop_from(struct queue *q, unsigned level) __redist_pop_from()
358 struct entry *e; __redist_pop_from()
415 struct entry *e; q_redistribute()
454 static void q_requeue_before(struct queue *q, struct entry *dest, struct entry *e, unsigned extra_levels) q_requeue_before()
456 struct entry *de; q_requeue_before()
483 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels) q_requeue()
561 * use indexing again, and only store indexes to the next entry.
586 static struct entry *h_head(struct hash_table *ht, unsigned bucket) h_head()
591 static struct entry *h_next(struct hash_table *ht, struct entry *e) h_next()
596 static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e) __h_insert()
602 static void h_insert(struct hash_table *ht, struct entry *e) h_insert()
608 static struct entry *__h_lookup(struct hash_table *ht, unsigned h, dm_oblock_t oblock, __h_lookup()
609 struct entry **prev) __h_lookup()
611 struct entry *e; __h_lookup()
625 struct entry *e, struct entry *prev) __h_unlink()
634 * Also moves each entry to the front of the bucket.
636 static struct entry *h_lookup(struct hash_table *ht, dm_oblock_t oblock) h_lookup()
638 struct entry *e, *prev; h_lookup()
644 * Move to the front because this entry is likely h_lookup()
654 static void h_remove(struct hash_table *ht, struct entry *e) h_remove()
657 struct entry *prev; h_remove()
692 static void init_entry(struct entry *e) init_entry()
705 static struct entry *alloc_entry(struct entry_alloc *ea) alloc_entry()
707 struct entry *e; alloc_entry()
722 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i) alloc_particular_entry()
724 struct entry *e = __get_entry(ea->es, ea->begin + i); alloc_particular_entry()
735 static void free_entry(struct entry_alloc *ea, struct entry *e) free_entry()
750 static unsigned get_index(struct entry_alloc *ea, struct entry *e) get_index()
755 static struct entry *get_entry(struct entry_alloc *ea, unsigned index) get_entry()
814 * The hash tables allows us to quickly find an entry by origin
835 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which) get_sentinel()
840 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level) writeback_sentinel()
845 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level) demote_sentinel()
854 struct entry *sentinel; __update_writeback_sentinels()
867 struct entry *sentinel; __update_demote_sentinels()
894 struct entry *sentinel; __sentinels_init()
926 static void push_new(struct smq_policy *mq, struct entry *e) push_new()
933 static void push(struct smq_policy *mq, struct entry *e) push()
935 struct entry *sentinel; push()
953 * Removes an entry from cache. Removes from the hash table.
955 static void __del(struct smq_policy *mq, struct queue *q, struct entry *e) __del()
961 static void del(struct smq_policy *mq, struct entry *e) del()
966 static struct entry *pop_old(struct smq_policy *mq, struct queue *q, unsigned max_level) pop_old()
968 struct entry *e = q_pop_old(q, max_level); pop_old()
974 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) infer_cblock()
979 static void requeue(struct smq_policy *mq, struct entry *e) requeue()
981 struct entry *sentinel; requeue()
1102 struct entry *demoted = q_peek(&mq->clean, mq->clean.nr_levels, false); demote_cblock()
1140 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bio, should_promote()
1158 struct entry *e; insert_in_cache()
1190 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio) update_hotspot_queue()
1194 struct entry *e = h_lookup(&mq->hotspot_table, hb); update_hotspot_queue()
1236 struct entry *e, *hs_e; map()
1316 struct entry *e; smq_lookup()
1332 struct entry *e; __smq_set_clear_dirty()
1367 struct entry *e; smq_load_mapping()
1383 struct entry *e; smq_save_hints()
1417 struct entry *e; __remove_mapping()
1438 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); __remove_cblock()
1483 struct entry *e = NULL; __smq_writeback_work()
1523 struct entry *e = h_lookup(&mq->table, current_oblock); __force_mapping()
1631 DMERR("couldn't initialize entry space"); smq_create()
H A Ddm-cache-policy-mq.c166 * Insert an entry to the back of the given level.
186 * Gives us the oldest entry of the lowest popoulated level. If the first
215 * Pops an entry from a level that is not past a sentinel.
304 * Describes a cache entry. Used in both the cache and the pre_cache.
306 struct entry { struct
319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
325 struct entry *entries, *entries_end;
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries); epool_init()
354 static struct entry *alloc_entry(struct entry_pool *ep) alloc_entry()
356 struct entry *e; alloc_entry()
361 e = list_entry(list_pop(&ep->free), struct entry, list); alloc_entry()
372 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) alloc_particular_entry()
374 struct entry *e = ep->entries + from_cblock(cblock); alloc_particular_entry()
383 static void free_entry(struct entry_pool *ep, struct entry *e) free_entry()
392 * Returns NULL if the entry is free.
394 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) epool_find()
396 struct entry *e = ep->entries + from_cblock(cblock); epool_find()
405 static bool in_pool(struct entry_pool *ep, struct entry *e) in_pool()
410 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) infer_cblock()
457 * and found an entry in the pre_cache or cache. Currently used to
476 * The hash table allows us to quickly find an entry by origin
495 static void hash_insert(struct mq_policy *mq, struct entry *e) hash_insert()
502 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) hash_lookup()
506 struct entry *e; hash_lookup()
518 static void hash_remove(struct entry *e) hash_remove()
546 static unsigned queue_level(struct entry *e) queue_level()
551 static bool in_cache(struct mq_policy *mq, struct entry *e) in_cache()
557 * Inserts the entry into the pre_cache or the cache. Ensures the cache
559 * Sets the tick which records when the entry was last moved about.
561 static void push(struct mq_policy *mq, struct entry *e) push()
573 * Removes an entry from pre_cache or cache. Removes from the hash table.
575 static void del(struct mq_policy *mq, struct entry *e) del()
586 * Like del, except it removes the first entry in the queue (ie. the least
589 static struct entry *pop(struct mq_policy *mq, struct queue *q) pop()
591 struct entry *e; pop()
597 e = container_of(h, struct entry, list); pop()
603 static struct entry *pop_old(struct mq_policy *mq, struct queue *q) pop_old()
605 struct entry *e; pop_old()
611 e = container_of(h, struct entry, list); pop_old()
617 static struct entry *peek(struct queue *q) peek()
620 return h ? container_of(h, struct entry, list) : NULL; peek()
642 struct entry *e; check_generation()
671 * Whenever we use an entry we bump up it's hit counter, and push it to the
674 static void requeue(struct mq_policy *mq, struct entry *e) requeue()
682 * Demote the least recently used entry from the cache to the pre_cache.
683 * Returns the new cache entry to use, and the old origin block it was
686 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
690 * - just forget about the demoted entry completely (ie. don't insert it
699 struct entry *demoted = peek(&mq->cache_clean); demote_cblock()
737 struct entry *e; promote_threshold()
780 static bool should_promote(struct mq_policy *mq, struct entry *e, should_promote()
788 struct entry *e, cache_entry_found()
802 * Moves an entry from the pre_cache to the cache. The main work is
805 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, pre_cache_to_cache()
810 struct entry *new_e; pre_cache_to_cache()
840 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, pre_cache_entry_found()
865 struct entry *e = alloc_entry(&mq->pre_cache_pool); insert_in_pre_cache()
869 * There's no spare entry structure, so we grab the least insert_in_pre_cache()
890 struct entry *e; insert_in_cache()
948 struct entry *e = hash_lookup(mq, oblock); map()
995 struct entry *e = container_of(h, struct entry, list); update_pre_cache_hits()
1002 struct entry *e = container_of(h, struct entry, list); update_cache_hits()
1057 struct entry *e; mq_lookup()
1076 struct entry *e; __mq_set_clear_dirty()
1109 struct entry *e; mq_load_mapping()
1126 struct entry *e; mq_save_hints()
1133 e = container_of(h, struct entry, list); mq_save_hints()
1162 struct entry *e; __remove_mapping()
1182 struct entry *e = epool_find(&mq->cache_pool, cblock); __remove_cblock()
1222 struct entry *e = pop_old(mq, &mq->cache_dirty); __mq_writeback_work()
1254 struct entry *e = hash_lookup(mq, current_oblock); __force_mapping()
1445 sizeof(struct entry), mq_init()
1446 __alignof__(struct entry), mq_init()
/linux-4.4.14/drivers/net/ethernet/dec/tulip/
H A Dinterrupt.c62 int entry; tulip_refill_rx() local
67 entry = tp->dirty_rx % RX_RING_SIZE; tulip_refill_rx()
68 if (tp->rx_buffers[entry].skb == NULL) { tulip_refill_rx()
72 skb = tp->rx_buffers[entry].skb = tulip_refill_rx()
81 tp->rx_buffers[entry].skb = NULL; tulip_refill_rx()
85 tp->rx_buffers[entry].mapping = mapping; tulip_refill_rx()
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); tulip_refill_rx()
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned); tulip_refill_rx()
116 int entry = tp->cur_rx % RX_RING_SIZE; tulip_poll() local
131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", tulip_poll()
132 entry, tp->rx_ring[entry].status); tulip_poll()
143 /* If we own the next entry, it is a new packet. Send it up. */ tulip_poll()
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { tulip_poll()
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status); tulip_poll()
152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_poll()
153 entry, status); tulip_poll()
214 tp->rx_buffers[entry].mapping, tulip_poll()
217 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, tulip_poll()
222 tp->rx_buffers[entry].skb->data, tulip_poll()
226 tp->rx_buffers[entry].mapping, tulip_poll()
229 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, tulip_poll()
233 if (tp->rx_buffers[entry].mapping != tulip_poll()
234 le32_to_cpu(tp->rx_ring[entry].buffer1)) { tulip_poll()
237 le32_to_cpu(tp->rx_ring[entry].buffer1), tulip_poll()
238 (unsigned long long)tp->rx_buffers[entry].mapping, tulip_poll()
243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_poll()
246 tp->rx_buffers[entry].skb = NULL; tulip_poll()
247 tp->rx_buffers[entry].mapping = 0; tulip_poll()
260 entry = (++tp->cur_rx) % RX_RING_SIZE; tulip_poll()
368 int entry = tp->cur_rx % RX_RING_SIZE; tulip_rx() local
373 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_rx()
374 entry, tp->rx_ring[entry].status); tulip_rx()
375 /* If we own the next entry, it is a new packet. Send it up. */ tulip_rx()
376 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { tulip_rx()
377 s32 status = le32_to_cpu(tp->rx_ring[entry].status); tulip_rx()
381 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_rx()
382 entry, status); tulip_rx()
440 tp->rx_buffers[entry].mapping, tulip_rx()
443 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, tulip_rx()
448 tp->rx_buffers[entry].skb->data, tulip_rx()
452 tp->rx_buffers[entry].mapping, tulip_rx()
455 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, tulip_rx()
459 if (tp->rx_buffers[entry].mapping != tulip_rx()
460 le32_to_cpu(tp->rx_ring[entry].buffer1)) { tulip_rx()
463 le32_to_cpu(tp->rx_ring[entry].buffer1), tulip_rx()
464 (long long)tp->rx_buffers[entry].mapping, tulip_rx()
469 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_rx()
472 tp->rx_buffers[entry].skb = NULL; tulip_rx()
473 tp->rx_buffers[entry].mapping = 0; tulip_rx()
483 entry = (++tp->cur_rx) % RX_RING_SIZE; tulip_rx()
531 int entry; tulip_interrupt() local
590 int entry = dirty_tx % TX_RING_SIZE; tulip_interrupt() local
591 int status = le32_to_cpu(tp->tx_ring[entry].status); tulip_interrupt()
597 if (tp->tx_buffers[entry].skb == NULL) { tulip_interrupt()
599 if (tp->tx_buffers[entry].mapping) tulip_interrupt()
601 tp->tx_buffers[entry].mapping, tulip_interrupt()
627 tp->tx_buffers[entry].skb->len; tulip_interrupt()
632 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tulip_interrupt()
633 tp->tx_buffers[entry].skb->len, tulip_interrupt()
637 dev_kfree_skb_irq(tp->tx_buffers[entry].skb); tulip_interrupt()
638 tp->tx_buffers[entry].skb = NULL; tulip_interrupt()
639 tp->tx_buffers[entry].mapping = 0; tulip_interrupt()
780 entry = tp->dirty_rx % RX_RING_SIZE;
781 if (tp->rx_buffers[entry].skb == NULL) {
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
H A Dfwsignal.c370 * @mac_handle: handle for mac entry determined by firmware.
373 * @suppressed: mac entry is suppressed.
420 * struct brcmf_fws_hanger_item - single entry for tx pending packet.
422 * @state: entry is either free or occupied.
437 * @failed_slotfind: packets for which failed to find an entry.
438 * @slot_pos: last returned item index for a free entry.
646 brcmf_err("entry not in use\n"); brcmf_fws_hanger_poppkt()
667 brcmf_err("entry not in use\n"); brcmf_fws_hanger_mark_suppressed()
743 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_macdesc_lookup() local
749 entry = &fws->desc.nodes[0]; brcmf_fws_macdesc_lookup()
751 if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) brcmf_fws_macdesc_lookup()
752 return entry; brcmf_fws_macdesc_lookup()
753 entry++; brcmf_fws_macdesc_lookup()
762 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; brcmf_fws_macdesc_find() local
767 /* Multicast destination, STA and P2P clients get the interface entry. brcmf_fws_macdesc_find()
769 * have their own entry. brcmf_fws_macdesc_find()
772 entry = ifp->fws_desc; brcmf_fws_macdesc_find()
776 entry = brcmf_fws_macdesc_lookup(fws, da); brcmf_fws_macdesc_find()
777 if (IS_ERR(entry)) brcmf_fws_macdesc_find()
778 entry = ifp->fws_desc; brcmf_fws_macdesc_find()
781 return entry; brcmf_fws_macdesc_find()
785 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_closed()
794 if (entry->mac_handle) { brcmf_fws_macdesc_closed()
795 if_entry = &fws->desc.iface[entry->interface_id]; brcmf_fws_macdesc_closed()
799 /* an entry is closed when the state is closed and brcmf_fws_macdesc_closed()
802 closed = entry->state == BRCMF_FWS_STATE_CLOSE && brcmf_fws_macdesc_closed()
803 !entry->requested_credit && !entry->requested_packet; brcmf_fws_macdesc_closed()
806 return closed || !(entry->ac_bitmap & BIT(fifo)); brcmf_fws_macdesc_closed()
810 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_cleanup()
813 if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { brcmf_fws_macdesc_cleanup()
814 brcmf_fws_psq_flush(fws, &entry->psq, ifidx); brcmf_fws_macdesc_cleanup()
815 entry->occupied = !!(entry->psq.len); brcmf_fws_macdesc_cleanup()
872 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_hdrpush() local
880 entry->name, brcmf_skb_if_flags_get_field(skb, INDEX), brcmf_fws_hdrpush()
883 if (entry->send_tim_signal) brcmf_fws_hdrpush()
905 if (entry->send_tim_signal) { brcmf_fws_hdrpush()
906 entry->send_tim_signal = 0; brcmf_fws_hdrpush()
909 wlh[2] = entry->mac_handle; brcmf_fws_hdrpush()
910 wlh[3] = entry->traffic_pending_bmp; brcmf_fws_hdrpush()
912 entry->mac_handle, entry->traffic_pending_bmp); brcmf_fws_hdrpush()
914 entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; brcmf_fws_hdrpush()
923 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_tim_update()
934 if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) brcmf_fws_tim_update()
935 entry->traffic_pending_bmp &= ~NBITVAL(fifo); brcmf_fws_tim_update()
937 entry->traffic_pending_bmp |= NBITVAL(fifo); brcmf_fws_tim_update()
939 entry->send_tim_signal = false; brcmf_fws_tim_update()
940 if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) brcmf_fws_tim_update()
941 entry->send_tim_signal = true; brcmf_fws_tim_update()
942 if (send_immediately && entry->send_tim_signal && brcmf_fws_tim_update()
943 entry->state == BRCMF_FWS_STATE_CLOSE) { brcmf_fws_tim_update()
955 skcb->mac = entry; brcmf_fws_tim_update()
1001 struct brcmf_fws_mac_descriptor *entry, *existing; brcmf_fws_macdesc_indicate() local
1010 entry = &fws->desc.nodes[mac_handle & 0x1F]; brcmf_fws_macdesc_indicate()
1012 if (entry->occupied) { brcmf_fws_macdesc_indicate()
1014 entry->name, addr); brcmf_fws_macdesc_indicate()
1016 brcmf_fws_macdesc_cleanup(fws, entry, -1); brcmf_fws_macdesc_indicate()
1017 brcmf_fws_macdesc_deinit(entry); brcmf_fws_macdesc_indicate()
1026 if (!entry->occupied) { brcmf_fws_macdesc_indicate()
1028 entry->mac_handle = mac_handle; brcmf_fws_macdesc_indicate()
1029 brcmf_fws_macdesc_init(entry, addr, ifidx); brcmf_fws_macdesc_indicate()
1030 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_macdesc_indicate()
1031 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, brcmf_fws_macdesc_indicate()
1034 brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); brcmf_fws_macdesc_indicate()
1039 if (entry != existing) { brcmf_fws_macdesc_indicate()
1042 memcpy(entry, existing, brcmf_fws_macdesc_indicate()
1044 entry->mac_handle = mac_handle; brcmf_fws_macdesc_indicate()
1046 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_macdesc_indicate()
1048 brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, brcmf_fws_macdesc_indicate()
1052 WARN_ON(entry->mac_handle != mac_handle); brcmf_fws_macdesc_indicate()
1062 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_macdesc_state_indicate() local
1067 entry = &fws->desc.nodes[mac_handle & 0x1F]; brcmf_fws_macdesc_state_indicate()
1068 if (!entry->occupied) { brcmf_fws_macdesc_state_indicate()
1074 entry->requested_credit = 0; brcmf_fws_macdesc_state_indicate()
1075 entry->requested_packet = 0; brcmf_fws_macdesc_state_indicate()
1077 entry->state = BRCMF_FWS_STATE_OPEN; brcmf_fws_macdesc_state_indicate()
1080 entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_macdesc_state_indicate()
1081 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); brcmf_fws_macdesc_state_indicate()
1082 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); brcmf_fws_macdesc_state_indicate()
1083 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); brcmf_fws_macdesc_state_indicate()
1084 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); brcmf_fws_macdesc_state_indicate()
1094 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_interface_state_indicate() local
1105 entry = &fws->desc.iface[ifidx]; brcmf_fws_interface_state_indicate()
1106 if (!entry->occupied) { brcmf_fws_interface_state_indicate()
1112 entry->name); brcmf_fws_interface_state_indicate()
1116 entry->state = BRCMF_FWS_STATE_OPEN; brcmf_fws_interface_state_indicate()
1120 entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_interface_state_indicate()
1139 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_request_indicate() local
1141 entry = &fws->desc.nodes[data[1] & 0x1F]; brcmf_fws_request_indicate()
1142 if (!entry->occupied) { brcmf_fws_request_indicate()
1151 brcmf_fws_get_tlv_name(type), type, entry->name, brcmf_fws_request_indicate()
1155 entry->requested_credit = data[0]; brcmf_fws_request_indicate()
1157 entry->requested_packet = data[0]; brcmf_fws_request_indicate()
1159 entry->ac_bitmap = data[2]; brcmf_fws_request_indicate()
1165 brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_use_req_credit() argument
1168 if (entry->requested_credit > 0) { brcmf_fws_macdesc_use_req_credit()
1169 entry->requested_credit--; brcmf_fws_macdesc_use_req_credit()
1172 if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_fws_macdesc_use_req_credit()
1174 } else if (entry->requested_packet > 0) { brcmf_fws_macdesc_use_req_credit()
1175 entry->requested_packet--; brcmf_fws_macdesc_use_req_credit()
1178 if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_fws_macdesc_use_req_credit()
1188 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_macdesc_return_req_credit() local
1191 (entry->state == BRCMF_FWS_STATE_CLOSE)) brcmf_fws_macdesc_return_req_credit()
1192 entry->requested_credit++; brcmf_fws_macdesc_return_req_credit()
1245 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_enq() local
1253 entry = brcmf_skbcb(p)->mac; brcmf_fws_enq()
1254 if (entry == NULL) { brcmf_fws_enq()
1266 pq = &entry->psq; brcmf_fws_enq()
1311 } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) { brcmf_fws_enq()
1327 brcmf_fws_tim_update(fws, entry, fifo, true); brcmf_fws_enq()
1328 brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_fws_enq()
1336 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_deq() local
1349 entry = &table[(node_pos + i) % num_nodes]; brcmf_fws_deq()
1350 if (!entry->occupied || brcmf_fws_deq()
1351 brcmf_fws_macdesc_closed(fws, entry, fifo)) brcmf_fws_deq()
1354 if (entry->suppressed) brcmf_fws_deq()
1358 p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); brcmf_fws_deq()
1360 if (entry->suppressed) { brcmf_fws_deq()
1361 if (entry->suppr_transit_count) brcmf_fws_deq()
1363 entry->suppressed = false; brcmf_fws_deq()
1364 p = brcmu_pktq_mdeq(&entry->psq, brcmf_fws_deq()
1371 brcmf_fws_macdesc_use_req_credit(entry, p); brcmf_fws_deq()
1375 brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_fws_deq()
1383 brcmf_fws_tim_update(fws, entry, fifo, false); brcmf_fws_deq()
1404 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_txstatus_suppressed() local
1411 if (!entry->suppressed) { brcmf_fws_txstatus_suppressed()
1412 entry->suppressed = true; brcmf_fws_txstatus_suppressed()
1413 entry->suppr_transit_count = entry->transit_count; brcmf_fws_txstatus_suppressed()
1415 entry->name, entry->transit_count); brcmf_fws_txstatus_suppressed()
1418 entry->generation = genbit; brcmf_fws_txstatus_suppressed()
1450 struct brcmf_fws_mac_descriptor *entry = NULL; brcmf_fws_txs_process() local
1478 entry = skcb->mac; brcmf_fws_txs_process()
1479 if (WARN_ON(!entry)) { brcmf_fws_txs_process()
1483 entry->transit_count--; brcmf_fws_txs_process()
1484 if (entry->suppressed && entry->suppr_transit_count) brcmf_fws_txs_process()
1485 entry->suppr_transit_count--; brcmf_fws_txs_process()
1487 brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags, brcmf_fws_txs_process()
1745 struct brcmf_fws_mac_descriptor *entry = skcb->mac; brcmf_fws_precommit_skb() local
1749 brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); brcmf_fws_precommit_skb()
1765 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_rollback_toq() local
1770 entry = brcmf_skbcb(skb)->mac; brcmf_fws_rollback_toq()
1771 if (entry->occupied) { brcmf_fws_rollback_toq()
1776 pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); brcmf_fws_rollback_toq()
1778 brcmf_err("%s queue %d full\n", entry->name, qidx); brcmf_fws_rollback_toq()
1782 brcmf_err("%s entry removed\n", entry->name); brcmf_fws_rollback_toq()
1826 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_commit_skb() local
1831 entry = skcb->mac; brcmf_fws_commit_skb()
1832 if (IS_ERR(entry)) brcmf_fws_commit_skb()
1833 return PTR_ERR(entry); brcmf_fws_commit_skb()
1836 entry->transit_count++; brcmf_fws_commit_skb()
1837 if (entry->suppressed) brcmf_fws_commit_skb()
1838 entry->suppr_transit_count++; brcmf_fws_commit_skb()
1843 brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, brcmf_fws_commit_skb()
1846 entry->transit_count--; brcmf_fws_commit_skb()
1847 if (entry->suppressed) brcmf_fws_commit_skb()
1848 entry->suppr_transit_count--; brcmf_fws_commit_skb()
1939 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_fws_reset_interface() local
1942 if (!entry) brcmf_fws_reset_interface()
1945 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_reset_interface()
1951 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_add_interface() local
1956 entry = &fws->desc.iface[ifp->ifidx]; brcmf_fws_add_interface()
1957 ifp->fws_desc = entry; brcmf_fws_add_interface()
1958 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_add_interface()
1959 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_add_interface()
1960 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, brcmf_fws_add_interface()
1962 brcmf_dbg(TRACE, "added %s\n", entry->name); brcmf_fws_add_interface()
1967 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_fws_del_interface() local
1969 if (!entry) brcmf_fws_del_interface()
1974 brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_del_interface()
1975 brcmf_fws_macdesc_deinit(entry); brcmf_fws_del_interface()
784 brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo) brcmf_fws_macdesc_closed() argument
809 brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int ifidx) brcmf_fws_macdesc_cleanup() argument
922 brcmf_fws_tim_update(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo, bool send_immediately) brcmf_fws_tim_update() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/
H A Dlist.h56 * struct list_head entry;
70 * list_add(&foo->entry, &bar.list_of_foos);
74 * list_del(&foo->entry);
84 * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
93 * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
95 * list_del(&iterator->entry);
134 __list_add(struct list_head *entry, __list_add() argument
137 next->prev = entry; __list_add()
138 entry->next = next; __list_add()
139 entry->prev = prev; __list_add()
140 prev->next = entry; __list_add()
153 * list_add(&newfoo->entry, &bar->list_of_foos);
155 * @param entry The new element to prepend to the list.
159 list_add(struct list_head *entry, struct list_head *head) list_add() argument
161 __list_add(entry, head, head->next); list_add()
174 * list_add_tail(&newfoo->entry, &bar->list_of_foos);
176 * @param entry The new element to prepend to the list.
180 list_add_tail(struct list_head *entry, struct list_head *head) list_add_tail() argument
182 __list_add(entry, head->prev, head); list_add_tail()
202 * list_del(&foo->entry);
204 * @param entry The element to remove.
207 list_del(struct list_head *entry) list_del() argument
209 __list_del(entry->prev, entry->next); list_del()
213 list_del_init(struct list_head *entry) list_del_init() argument
215 __list_del(entry->prev, entry->next); list_del_init()
216 INIT_LIST_HEAD(entry); list_del_init()
245 * f = container_of(&foo->entry, struct foo, entry);
265 * Retrieve the first list entry for the given list pointer.
280 * Retrieve the last list entry for the given listpointer.
302 * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
/linux-4.4.14/drivers/parisc/
H A Dpdc_stable.c98 /* This struct defines what we need to deal with a parisc pdc path entry */
100 rwlock_t rw_lock; /* to protect path entry access */
101 short ready; /* entry record is valid if != 0 */
102 unsigned long addr; /* entry address in stable storage */
103 char *name; /* entry name */
111 ssize_t (*show)(struct pdcspath_entry *entry, char *buf);
112 ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count);
140 * pdcspath_fetch - This function populates the path entry structs.
141 * @entry: A pointer to an allocated pdcspath_entry.
149 * This function expects to be called with @entry->rw_lock write-hold.
152 pdcspath_fetch(struct pdcspath_entry *entry) pdcspath_fetch() argument
156 if (!entry) pdcspath_fetch()
159 devpath = &entry->devpath; pdcspath_fetch()
162 entry, devpath, entry->addr); pdcspath_fetch()
165 if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) pdcspath_fetch()
171 entry->dev = hwpath_to_device((struct hardware_path *)devpath); pdcspath_fetch()
173 entry->ready = 1; pdcspath_fetch()
175 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); pdcspath_fetch()
182 * @entry: A pointer to an allocated pdcspath_entry.
190 * This function expects to be called with @entry->rw_lock write-hold.
193 pdcspath_store(struct pdcspath_entry *entry) pdcspath_store() argument
197 BUG_ON(!entry); pdcspath_store()
199 devpath = &entry->devpath; pdcspath_store()
204 if (!entry->ready) { pdcspath_store()
206 BUG_ON(!entry->dev); pdcspath_store()
207 device_to_hwpath(entry->dev, (struct hardware_path *)devpath); pdcspath_store()
212 entry, devpath, entry->addr); pdcspath_store()
215 if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) pdcspath_store()
221 entry->ready = 2; pdcspath_store()
223 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); pdcspath_store()
228 * @entry: An allocated and populated pdscpath_entry struct.
234 pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf) pdcspath_hwpath_read() argument
240 if (!entry || !buf) pdcspath_hwpath_read()
243 read_lock(&entry->rw_lock); pdcspath_hwpath_read()
244 devpath = &entry->devpath; pdcspath_hwpath_read()
245 i = entry->ready; pdcspath_hwpath_read()
246 read_unlock(&entry->rw_lock); pdcspath_hwpath_read()
248 if (!i) /* entry is not ready */ pdcspath_hwpath_read()
263 * @entry: An allocated and populated pdscpath_entry struct.
277 pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count) pdcspath_hwpath_write() argument
285 if (!entry || !buf || !count) pdcspath_hwpath_write()
322 "hardware path: %s\n", __func__, entry->name, buf); pdcspath_hwpath_write()
327 write_lock(&entry->rw_lock); pdcspath_hwpath_write()
328 entry->ready = 0; pdcspath_hwpath_write()
329 entry->dev = dev; pdcspath_hwpath_write()
332 pdcspath_store(entry); pdcspath_hwpath_write()
335 sysfs_remove_link(&entry->kobj, "device"); pdcspath_hwpath_write()
336 ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); pdcspath_hwpath_write()
339 write_unlock(&entry->rw_lock); pdcspath_hwpath_write()
342 entry->name, buf); pdcspath_hwpath_write()
349 * @entry: An allocated and populated pdscpath_entry struct.
355 pdcspath_layer_read(struct pdcspath_entry *entry, char *buf) pdcspath_layer_read() argument
361 if (!entry || !buf) pdcspath_layer_read()
364 read_lock(&entry->rw_lock); pdcspath_layer_read()
365 devpath = &entry->devpath; pdcspath_layer_read()
366 i = entry->ready; pdcspath_layer_read()
367 read_unlock(&entry->rw_lock); pdcspath_layer_read()
369 if (!i) /* entry is not ready */ pdcspath_layer_read()
382 * @entry: An allocated and populated pdscpath_entry struct.
393 pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count) pdcspath_layer_write() argument
399 if (!entry || !buf || !count) pdcspath_layer_write()
425 write_lock(&entry->rw_lock); pdcspath_layer_write()
429 memcpy(&entry->devpath.layers, &layers, sizeof(layers)); pdcspath_layer_write()
432 pdcspath_store(entry); pdcspath_layer_write()
433 write_unlock(&entry->rw_lock); pdcspath_layer_write()
436 entry->name, buf); pdcspath_layer_write()
450 struct pdcspath_entry *entry = to_pdcspath_entry(kobj); pdcspath_attr_show() local
455 ret = pdcs_attr->show(entry, buf); pdcspath_attr_show()
471 struct pdcspath_entry *entry = to_pdcspath_entry(kobj); pdcspath_attr_store() local
479 ret = pdcs_attr->store(entry, buf, count); pdcspath_attr_store()
558 /* Current flags are stored in primary boot path entry */ pdcs_auto_read()
604 /* Current flags are stored in primary boot path entry */ pdcs_timer_read()
774 /* Current flags are stored in primary boot path entry */ pdcs_auto_write()
799 /* Change the path entry flags first */ pdcs_auto_write()
967 * It creates kobjects corresponding to each path entry with nice sysfs
977 struct pdcspath_entry *entry; pdcs_register_pathentries() local
981 for (i = 0; (entry = pdcspath_entries[i]); i++) pdcs_register_pathentries()
982 rwlock_init(&entry->rw_lock); pdcs_register_pathentries()
984 for (i = 0; (entry = pdcspath_entries[i]); i++) { pdcs_register_pathentries()
985 write_lock(&entry->rw_lock); pdcs_register_pathentries()
986 err = pdcspath_fetch(entry); pdcs_register_pathentries()
987 write_unlock(&entry->rw_lock); pdcs_register_pathentries()
992 entry->kobj.kset = paths_kset; pdcs_register_pathentries()
993 err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL, pdcs_register_pathentries()
994 "%s", entry->name); pdcs_register_pathentries()
999 write_lock(&entry->rw_lock); pdcs_register_pathentries()
1000 entry->ready = 2; pdcs_register_pathentries()
1003 if (entry->dev) { pdcs_register_pathentries()
1004 err = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); pdcs_register_pathentries()
1008 write_unlock(&entry->rw_lock); pdcs_register_pathentries()
1009 kobject_uevent(&entry->kobj, KOBJ_ADD); pdcs_register_pathentries()
1022 struct pdcspath_entry *entry; pdcs_unregister_pathentries() local
1024 for (i = 0; (entry = pdcspath_entries[i]); i++) { pdcs_unregister_pathentries()
1025 read_lock(&entry->rw_lock); pdcs_unregister_pathentries()
1026 if (entry->ready >= 2) pdcs_unregister_pathentries()
1027 kobject_put(&entry->kobj); pdcs_unregister_pathentries()
1028 read_unlock(&entry->rw_lock); pdcs_unregister_pathentries()
/linux-4.4.14/drivers/s390/block/
H A Ddcssblk.c89 struct segment_info *entry, *temp; dcssblk_release_segment() local
92 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { dcssblk_release_segment()
93 list_del(&entry->lh); dcssblk_release_segment()
94 kfree(entry); dcssblk_release_segment()
110 struct dcssblk_dev_info *entry; dcssblk_assign_free_minor() local
117 list_for_each_entry(entry, &dcssblk_devices, lh) dcssblk_assign_free_minor()
118 if (minor == entry->gd->first_minor) dcssblk_assign_free_minor()
136 struct dcssblk_dev_info *entry; dcssblk_get_device_by_name() local
138 list_for_each_entry(entry, &dcssblk_devices, lh) { dcssblk_get_device_by_name()
139 if (!strcmp(name, entry->segment_name)) { dcssblk_get_device_by_name()
140 return entry; dcssblk_get_device_by_name()
155 struct segment_info *entry; dcssblk_get_segment_by_name() local
158 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_get_segment_by_name()
159 if (!strcmp(name, entry->segment_name)) dcssblk_get_segment_by_name()
160 return entry; dcssblk_get_segment_by_name()
173 struct segment_info *entry; dcssblk_find_highest_addr() local
176 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_find_highest_addr()
177 if (highest_addr < entry->end) dcssblk_find_highest_addr()
178 highest_addr = entry->end; dcssblk_find_highest_addr()
191 struct segment_info *entry; dcssblk_find_lowest_addr() local
195 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_find_lowest_addr()
197 lowest_addr = entry->start; dcssblk_find_lowest_addr()
200 if (lowest_addr > entry->start) dcssblk_find_lowest_addr()
201 lowest_addr = entry->start; dcssblk_find_lowest_addr()
214 struct segment_info *sort_list, *entry, temp; dcssblk_is_continuous() local
225 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_is_continuous()
226 memcpy(&sort_list[i], entry, sizeof(struct segment_info)); dcssblk_is_continuous()
325 struct segment_info *entry, *temp; dcssblk_shared_store() local
338 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
339 rc = segment_modify_shared(entry->segment_name, dcssblk_shared_store()
363 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
364 rc = segment_modify_shared(entry->segment_name, dcssblk_shared_store()
384 temp = entry; dcssblk_shared_store()
385 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
386 if (entry != temp) dcssblk_shared_store()
387 segment_unload(entry->segment_name); dcssblk_shared_store()
413 * undone by storing a non-true value to this entry.
429 struct segment_info *entry; dcssblk_save_store() local
441 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_save_store()
442 if (entry->segment_type == SEG_TYPE_EN || dcssblk_save_store()
443 entry->segment_type == SEG_TYPE_SN) dcssblk_save_store()
446 entry->segment_name); dcssblk_save_store()
448 segment_save(entry->segment_name); dcssblk_save_store()
487 struct segment_info *entry; dcssblk_seglist_show() local
493 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_seglist_show()
494 strcpy(&buf[i], entry->segment_name); dcssblk_seglist_show()
495 i += strlen(entry->segment_name); dcssblk_seglist_show()
713 struct segment_info *entry; dcssblk_remove_store() local
761 list_for_each_entry(entry, &dev_info->seg_list, lh) dcssblk_remove_store()
762 segment_unload(entry->segment_name); dcssblk_remove_store()
795 struct segment_info *entry; dcssblk_release() local
806 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_release()
807 if (entry->segment_type == SEG_TYPE_EN || dcssblk_release()
808 entry->segment_type == SEG_TYPE_SN) dcssblk_release()
810 " be saved\n", entry->segment_name); dcssblk_release()
812 segment_save(entry->segment_name); dcssblk_release()
979 struct segment_info *entry; dcssblk_restore() local
984 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_restore()
985 segment_unload(entry->segment_name); dcssblk_restore()
986 rc = segment_load(entry->segment_name, SEGMENT_SHARED, dcssblk_restore()
990 segment_warning(rc, entry->segment_name); dcssblk_restore()
993 if (start != entry->start || end != entry->end) { dcssblk_restore()
996 entry->segment_name); dcssblk_restore()
/linux-4.4.14/arch/blackfin/mach-common/
H A DMakefile6 cache.o cache-c.o entry.o head.o \
/linux-4.4.14/arch/c6x/kernel/
H A DMakefile9 obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
/linux-4.4.14/arch/cris/arch-v10/kernel/
H A DMakefile8 obj-y := entry.o traps.o shadows.o debugport.o irq.o \
/linux-4.4.14/arch/cris/arch-v32/kernel/
H A DMakefile8 obj-y := entry.o traps.o irq.o debugport.o \
/linux-4.4.14/scripts/kconfig/
H A Dlist.h34 * list_entry - get the struct for this entry
54 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
76 * Insert a new entry between two known consecutive entries.
92 * list_add_tail - add a new entry
93 * @new: new entry to be added
96 * Insert a new entry before the specified head.
105 * Delete a list entry by making the prev/next entries
120 * list_del - deletes entry from list.
121 * @entry: the element to delete from the list.
122 * Note: list_empty() on entry does not return true after this, the entry is
125 static inline void list_del(struct list_head *entry) list_del() argument
127 __list_del(entry->prev, entry->next); list_del()
128 entry->next = (struct list_head*)LIST_POISON1; list_del()
129 entry->prev = (struct list_head*)LIST_POISON2; list_del()
/linux-4.4.14/include/linux/soc/brcmstb/
H A Dbrcmstb.h6 * before SMP is brought up, called by machine entry point.
/linux-4.4.14/arch/mips/include/asm/
H A Dtlbmisc.h5 * - add_wired_entry() add a fixed TLB entry, and move wired register
/linux-4.4.14/arch/sh/kernel/cpu/sh5/
H A DMakefile4 obj-y := entry.o probe.o switchto.o
/linux-4.4.14/arch/m32r/kernel/
H A DMakefile7 obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
/linux-4.4.14/arch/m68k/68000/
H A DMakefile12 obj-y += entry.o ints.o timers.o
/linux-4.4.14/arch/arm/vfp/
H A DMakefile15 vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
/linux-4.4.14/arch/arm/mach-rpc/
H A Dfiq.S4 #include <mach/entry-macro.S>
/linux-4.4.14/fs/nilfs2/
H A Ddat.c91 struct nilfs_dat_entry *entry; nilfs_dat_commit_alloc() local
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_alloc()
97 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_alloc()
98 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); nilfs_dat_commit_alloc()
99 entry->de_blocknr = cpu_to_le64(0); nilfs_dat_commit_alloc()
115 struct nilfs_dat_entry *entry; nilfs_dat_commit_free() local
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_free()
121 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_free()
122 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_free()
123 entry->de_blocknr = cpu_to_le64(0); nilfs_dat_commit_free()
142 struct nilfs_dat_entry *entry; nilfs_dat_commit_start() local
146 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_start()
148 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); nilfs_dat_commit_start()
149 entry->de_blocknr = cpu_to_le64(blocknr); nilfs_dat_commit_start()
157 struct nilfs_dat_entry *entry; nilfs_dat_prepare_end() local
169 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_prepare_end()
171 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_prepare_end()
188 struct nilfs_dat_entry *entry; nilfs_dat_commit_end() local
194 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_end()
196 end = start = le64_to_cpu(entry->de_start); nilfs_dat_commit_end()
201 entry->de_end = cpu_to_le64(end); nilfs_dat_commit_end()
202 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_commit_end()
213 struct nilfs_dat_entry *entry; nilfs_dat_abort_end() local
219 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_abort_end()
221 start = le64_to_cpu(entry->de_start); nilfs_dat_abort_end()
222 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_abort_end()
329 struct nilfs_dat_entry *entry; nilfs_dat_move() local
342 * uncommitted block number, this makes a copy of the entry nilfs_dat_move()
354 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); nilfs_dat_move()
355 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { nilfs_dat_move()
358 (unsigned long long)le64_to_cpu(entry->de_start), nilfs_dat_move()
359 (unsigned long long)le64_to_cpu(entry->de_end)); nilfs_dat_move()
365 entry->de_blocknr = cpu_to_le64(blocknr); nilfs_dat_move()
398 struct nilfs_dat_entry *entry; nilfs_dat_translate() local
417 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); nilfs_dat_translate()
418 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_translate()
435 struct nilfs_dat_entry *entry; nilfs_dat_get_vinfo() local
457 entry = nilfs_palloc_block_get_entry( nilfs_dat_get_vinfo()
459 vinfo->vi_start = le64_to_cpu(entry->de_start); nilfs_dat_get_vinfo()
460 vinfo->vi_end = le64_to_cpu(entry->de_end); nilfs_dat_get_vinfo()
461 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_get_vinfo()
473 * @entry_size: size of a dat entry
487 "NILFS: too large DAT entry size: %zu bytes.\n", nilfs_dat_read()
492 "NILFS: too small DAT entry size: %zu bytes.\n", nilfs_dat_read()
/linux-4.4.14/kernel/events/
H A Dcallchain.c27 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, perf_callchain_kernel() argument
32 __weak void perf_callchain_user(struct perf_callchain_entry *entry, perf_callchain_user() argument
163 struct perf_callchain_entry *entry; perf_callchain() local
171 entry = get_callchain_entry(&rctx); perf_callchain()
175 if (!entry) perf_callchain()
178 entry->nr = 0; perf_callchain()
181 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain()
182 perf_callchain_kernel(entry, regs); perf_callchain()
200 perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain()
201 perf_callchain_user(entry, regs); perf_callchain()
208 return entry; perf_callchain()
/linux-4.4.14/arch/m68k/kernel/
H A Drelocate_kernel.S80 movel %a0@+,%d0 /* d0 = entry = *ptr */
83 btst #2,%d0 /* entry & IND_DONE? */
86 btst #1,%d0 /* entry & IND_INDIRECTION? */
89 movel %d0,%a0 /* ptr = entry & PAGE_MASK */
93 btst #0,%d0 /* entry & IND_DESTINATION? */
96 movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */
100 btst #3,%d0 /* entry & IND_SOURCE? */
104 movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */
/linux-4.4.14/arch/frv/kernel/
H A DMakefile10 obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o \
/linux-4.4.14/drivers/net/wireless/p54/
H A Deeprom.c238 struct p54_channel_entry *entry = NULL; p54_update_channel_param() local
242 * so it's very likely that the entry we are looking for p54_update_channel_param()
247 entry = &list->channels[i]; p54_update_channel_param()
253 /* entry does not exist yet. Initialize a new one. */ p54_update_channel_param()
264 entry = &list->channels[i]; p54_update_channel_param()
265 entry->freq = freq; p54_update_channel_param()
266 entry->band = band; p54_update_channel_param()
267 entry->index = ieee80211_frequency_to_channel(freq); p54_update_channel_param()
268 entry->max_power = 0; p54_update_channel_param()
269 entry->data = 0; p54_update_channel_param()
273 if (entry) p54_update_channel_param()
274 entry->data |= data; p54_update_channel_param()
276 return entry; p54_update_channel_param()
522 struct p54_rssi_db_entry *entry; p54_parse_rssical() local
551 db_len = sizeof(*entry) * entries; p54_parse_rssical()
558 priv->rssi_db->entry_size = sizeof(*entry); p54_parse_rssical()
561 entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset); p54_parse_rssical()
566 entry[i].freq = le16_to_cpu(cal[i].freq); p54_parse_rssical()
567 entry[i].mul = (s16) le16_to_cpu(cal[i].mul); p54_parse_rssical()
568 entry[i].add = (s16) le16_to_cpu(cal[i].add); p54_parse_rssical()
584 entry[i].freq = freq; p54_parse_rssical()
585 entry[i].mul = (s16) le16_to_cpu(cal[i].mul); p54_parse_rssical()
586 entry[i].add = (s16) le16_to_cpu(cal[i].add); p54_parse_rssical()
591 sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); p54_parse_rssical()
607 struct p54_rssi_db_entry *entry; p54_rssi_find() local
613 entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset); p54_rssi_find()
615 if (!same_band(freq, entry[i].freq)) p54_rssi_find()
624 if (abs(freq - entry[i].freq) < p54_rssi_find()
625 abs(freq - entry[found].freq)) { p54_rssi_find()
633 return found < 0 ? &p54_rssi_default : &entry[found]; p54_rssi_find()
643 "found possible invalid default country eeprom entry. (entry size: %d)\n", p54_parse_default_country()
734 struct pda_entry *entry; p54_parse_eeprom() local
743 entry = (void *)wrap->data + le16_to_cpu(wrap->len); p54_parse_eeprom()
745 /* verify that at least the entry length/code fits */ p54_parse_eeprom()
746 while ((u8 *)entry <= end - sizeof(*entry)) { p54_parse_eeprom()
747 entry_len = le16_to_cpu(entry->len); p54_parse_eeprom()
750 /* abort if entry exceeds whole structure */ p54_parse_eeprom()
751 if ((u8 *)entry + sizeof(*entry) + data_len > end) p54_parse_eeprom()
754 switch (le16_to_cpu(entry->code)) { p54_parse_eeprom()
758 SET_IEEE80211_PERM_ADDR(dev, entry->data); p54_parse_eeprom()
763 err = p54_convert_output_limits(dev, entry->data, p54_parse_eeprom()
770 (struct pda_pa_curve_data *)entry->data; p54_parse_eeprom()
795 priv->iq_autocal = kmemdup(entry->data, data_len, p54_parse_eeprom()
805 p54_parse_default_country(dev, entry->data, data_len); p54_parse_eeprom()
808 tmp = entry->data; p54_parse_eeprom()
809 while ((u8 *)tmp < entry->data + data_len) { p54_parse_eeprom()
819 priv->version = *(u8 *)(entry->data + 1); p54_parse_eeprom()
824 err = p54_parse_rssical(dev, entry->data, data_len, p54_parse_eeprom()
825 le16_to_cpu(entry->code)); p54_parse_eeprom()
830 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
851 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
858 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
865 crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry)); p54_parse_eeprom()
866 if (crc16 != le16_to_cpup((__le16 *)entry->data)) { p54_parse_eeprom()
879 crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2); p54_parse_eeprom()
880 entry = (void *)entry + (entry_len + 1) * 2; p54_parse_eeprom()
/linux-4.4.14/fs/jfs/
H A Djfs_dtree.h38 * entry segment/slot
40 * an entry consists of type dependent head/only segment/slot and
42 * N.B. last/only segment of entry is terminated by next = -1;
63 * internal node entry head/only segment
76 /* compute number of slots for entry */
81 * leaf node entry head/only segment
107 * Maximum entry in inline directory table
114 u8 slot; /* 1: slot within leaf page of entry */
117 index of next entry when this entry was deleted */
135 /* compute number of slots for entry */
150 u8 nextindex; /* 1: next free entry in stbl */
156 s8 stbl[8]; /* 8: sorted entry index table */
173 * entry slot array of 32 byte slot
175 * sorted entry slot index table (stbl):
177 * 1-byte per entry
178 * 512 byte block: 16 entry tbl (1 slot)
179 * 1024 byte block: 32 entry tbl (1 slot)
180 * 2048 byte block: 64 entry tbl (2 slot)
181 * 4096 byte block: 128 entry tbl (4 slot)
190 * except nextindex which refers to entry index in stbl;
191 * end of entry stot list or freelist is marked with -1.
199 u8 nextindex; /* 1: next entry index in stbl */
233 /* get sorted entry table of the page */
/linux-4.4.14/fs/lockd/
H A Dprocfs.c73 struct proc_dir_entry *entry; lockd_create_procfs() local
75 entry = proc_mkdir("fs/lockd", NULL); lockd_create_procfs()
76 if (!entry) lockd_create_procfs()
78 entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry, lockd_create_procfs()
80 if (!entry) { lockd_create_procfs()
/linux-4.4.14/include/trace/
H A Dperf.h7 #define __entry entry
39 struct trace_event_raw_##call *entry; \
55 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
59 entry = perf_trace_buf_prepare(__entry_size, \
61 if (!entry) \
70 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/
H A Dlustre_peer.c99 struct uuid_nid_data *data, *entry; class_add_uuid() local
116 list_for_each_entry(entry, &g_uuid_list, un_list) { class_add_uuid()
117 if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) { class_add_uuid()
121 for (i = 0; i < entry->un_nid_count; i++) class_add_uuid()
122 if (nid == entry->un_nids[i]) class_add_uuid()
125 if (i == entry->un_nid_count) { class_add_uuid()
126 LASSERT(entry->un_nid_count < NIDS_MAX); class_add_uuid()
127 entry->un_nids[entry->un_nid_count++] = nid; class_add_uuid()
138 libcfs_nid2str(nid), entry->un_nid_count); class_add_uuid()
192 struct uuid_nid_data *entry; class_check_uuid() local
199 list_for_each_entry(entry, &g_uuid_list, un_list) { class_check_uuid()
202 if (!obd_uuid_equals(&entry->un_uuid, uuid)) class_check_uuid()
206 for (i = 0; i < entry->un_nid_count; i++) { class_check_uuid()
207 if (entry->un_nids[i] == nid) { class_check_uuid()
/linux-4.4.14/fs/fat/
H A Dfatent.c21 static void fat12_ent_blocknr(struct super_block *sb, int entry, fat12_ent_blocknr() argument
25 int bytes = entry + (entry >> 1); fat12_ent_blocknr()
26 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); fat12_ent_blocknr()
31 static void fat_ent_blocknr(struct super_block *sb, int entry, fat_ent_blocknr() argument
35 int bytes = (entry << sbi->fatent_shift); fat_ent_blocknr()
36 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); fat_ent_blocknr()
82 /* This entry is block boundary, it needs the next block */ fat12_ent_bread()
123 if (fatent->entry & 1) fat12_ent_get()
161 if (fatent->entry & 1) { fat12_ent_put()
196 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); fat12_ent_next()
198 fatent->entry++; fat12_ent_next()
228 fatent->entry++; fat16_ent_next()
240 fatent->entry++; fat32_ent_next()
326 /* Is this fatent's blocks including this entry? */ fat_ent_update_ptr()
331 /* This entry is on bhs[0]. */ fat_ent_update_ptr()
337 /* This entry needs the next block. */ fat_ent_update_ptr()
348 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) fat_ent_read() argument
356 if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { fat_ent_read()
358 fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); fat_ent_read()
362 fatent_set_entry(fatent, entry); fat_ent_read()
363 ops->ent_blocknr(sb, entry, &offset, &blocknr); fat_ent_read()
426 if (fatent->entry < sbi->max_cluster) fat_ent_next()
440 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); fat_ent_read_block()
486 if (fatent.entry >= sbi->max_cluster) fat_alloc_clusters()
487 fatent.entry = FAT_START_ENT; fat_alloc_clusters()
488 fatent_set_entry(&fatent, fatent.entry); fat_alloc_clusters()
496 int entry = fatent.entry; fat_alloc_clusters() local
501 ops->ent_put(&prev_ent, entry); fat_alloc_clusters()
505 sbi->prev_free = entry; fat_alloc_clusters()
509 cluster[idx_clus] = entry; fat_alloc_clusters()
569 fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", fat_free_clusters()
581 if (cluster != fatent.entry + 1) { fat_free_clusters()
582 int nr_clus = fatent.entry - first_cl + 1; fat_free_clusters()
643 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); fat_ent_reada()
668 while (fatent.entry < sbi->max_cluster) { fat_count_free_clusters()
/linux-4.4.14/kernel/trace/
H A Dtrace_syscalls.c24 struct syscall_metadata *entry = call->data; syscall_get_enter_fields() local
26 return &entry->enter_fields; syscall_get_enter_fields()
117 struct syscall_metadata *entry; print_syscall_enter() local
122 entry = syscall_nr_to_meta(syscall); print_syscall_enter()
124 if (!entry) print_syscall_enter()
127 if (entry->enter_event->event.type != ent->type) { print_syscall_enter()
132 trace_seq_printf(s, "%s(", entry->name); print_syscall_enter()
134 for (i = 0; i < entry->nb_args; i++) { print_syscall_enter()
141 trace_seq_printf(s, "%s ", entry->types[i]); print_syscall_enter()
144 trace_seq_printf(s, "%s: %lx%s", entry->args[i], print_syscall_enter()
146 i == entry->nb_args - 1 ? "" : ", "); print_syscall_enter()
164 struct syscall_metadata *entry; print_syscall_exit() local
168 entry = syscall_nr_to_meta(syscall); print_syscall_exit()
170 if (!entry) { print_syscall_exit()
175 if (entry->exit_event->event.type != ent->type) { print_syscall_exit()
180 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, print_syscall_exit()
196 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) __set_enter_print_fmt() argument
205 for (i = 0; i < entry->nb_args; i++) { __set_enter_print_fmt()
207 entry->args[i], sizeof(unsigned long), __set_enter_print_fmt()
208 i == entry->nb_args - 1 ? "" : ", "); __set_enter_print_fmt()
212 for (i = 0; i < entry->nb_args; i++) { __set_enter_print_fmt()
214 ", ((unsigned long)(REC->%s))", entry->args[i]); __set_enter_print_fmt()
227 struct syscall_metadata *entry = call->data; set_syscall_print_fmt() local
229 if (entry->enter_event != call) { set_syscall_print_fmt()
235 len = __set_enter_print_fmt(entry, NULL, 0); set_syscall_print_fmt()
242 __set_enter_print_fmt(entry, print_fmt, len + 1); set_syscall_print_fmt()
250 struct syscall_metadata *entry = call->data; free_syscall_print_fmt() local
252 if (entry->enter_event == call) free_syscall_print_fmt()
298 struct syscall_trace_enter *entry; ftrace_syscall_enter() local
323 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; ftrace_syscall_enter()
334 entry = ring_buffer_event_data(event); ftrace_syscall_enter()
335 entry->nr = syscall_nr; ftrace_syscall_enter()
336 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); ftrace_syscall_enter()
338 event_trigger_unlock_commit(trace_file, buffer, event, entry, ftrace_syscall_enter()
346 struct syscall_trace_exit *entry; ftrace_syscall_exit() local
375 sys_data->exit_event->event.type, sizeof(*entry), ftrace_syscall_exit()
380 entry = ring_buffer_event_data(event); ftrace_syscall_exit()
381 entry->nr = syscall_nr; ftrace_syscall_exit()
382 entry->ret = syscall_get_return_value(current, regs); ftrace_syscall_exit()
384 event_trigger_unlock_commit(trace_file, buffer, event, entry, ftrace_syscall_exit()
600 "syscall entry trace point"); perf_sysenter_enable()
/linux-4.4.14/drivers/ntb/
H A Dntb_transport.c99 struct list_head entry; member in struct:ntb_queue_entry
115 unsigned int entry; member in struct:ntb_rx_info
195 struct list_head entry; member in struct:ntb_transport_client_dev
201 struct list_head entry; member in struct:ntb_transport_ctx
301 list_add_tail(&nt->entry, &ntb_transport_list); ntb_bus_init()
309 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { ntb_bus_remove()
312 list_del(&client_dev->entry); ntb_bus_remove()
316 list_del(&nt->entry); ntb_bus_remove()
338 list_for_each_entry(nt, &ntb_transport_list, entry) ntb_transport_unregister_client_dev()
339 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) ntb_transport_unregister_client_dev()
342 list_del(&client->entry); ntb_transport_unregister_client_dev()
364 list_for_each_entry(nt, &ntb_transport_list, entry) { ntb_transport_register_client_dev()
390 list_add_tail(&client_dev->entry, &nt->client_devs); ntb_transport_register_client_dev()
498 qp->remote_rx_info->entry); debugfs_read()
533 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, ntb_list_add() argument
539 list_add_tail(entry, list); ntb_list_add()
546 struct ntb_queue_entry *entry; ntb_list_rm() local
551 entry = NULL; ntb_list_rm()
554 entry = list_first_entry(list, struct ntb_queue_entry, entry); ntb_list_rm()
555 list_del(&entry->entry); ntb_list_rm()
560 return entry; ntb_list_rm()
567 struct ntb_queue_entry *entry; ntb_list_mv() local
573 entry = NULL; ntb_list_mv()
575 entry = list_first_entry(list, struct ntb_queue_entry, entry); ntb_list_mv()
576 list_move_tail(&entry->entry, to_list); ntb_list_mv()
581 return entry; ntb_list_mv()
618 qp->remote_rx_info->entry = qp->rx_max_entry - 1; ntb_transport_setup_qp_mw()
1167 struct ntb_queue_entry *entry; ntb_complete_rxc() local
1175 entry = list_first_entry(&qp->rx_post_q, ntb_complete_rxc()
1176 struct ntb_queue_entry, entry); ntb_complete_rxc()
1177 if (!(entry->flags & DESC_DONE_FLAG)) ntb_complete_rxc()
1180 entry->rx_hdr->flags = 0; ntb_complete_rxc()
1181 iowrite32(entry->index, &qp->rx_info->entry); ntb_complete_rxc()
1183 cb_data = entry->cb_data; ntb_complete_rxc()
1184 len = entry->len; ntb_complete_rxc()
1186 list_move_tail(&entry->entry, &qp->rx_free_q); ntb_complete_rxc()
1201 struct ntb_queue_entry *entry = data; ntb_rx_copy_callback() local
1203 entry->flags |= DESC_DONE_FLAG; ntb_rx_copy_callback()
1205 ntb_complete_rxc(entry->qp); ntb_rx_copy_callback()
1208 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_memcpy_rx() argument
1210 void *buf = entry->buf; ntb_memcpy_rx()
1211 size_t len = entry->len; ntb_memcpy_rx()
1218 ntb_rx_copy_callback(entry); ntb_memcpy_rx()
1221 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) ntb_async_rx() argument
1224 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx()
1230 void *buf = entry->buf; ntb_async_rx()
1232 len = entry->len; ntb_async_rx()
1273 txd->callback_param = entry; ntb_async_rx()
1293 ntb_memcpy_rx(entry, offset); ntb_async_rx()
1300 struct ntb_queue_entry *entry; ntb_process_rxc() local
1330 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); ntb_process_rxc()
1331 if (!entry) { ntb_process_rxc()
1337 entry->rx_hdr = hdr; ntb_process_rxc()
1338 entry->index = qp->rx_index; ntb_process_rxc()
1340 if (hdr->len > entry->len) { ntb_process_rxc()
1343 hdr->len, entry->len); ntb_process_rxc()
1346 entry->len = -EIO; ntb_process_rxc()
1347 entry->flags |= DESC_DONE_FLAG; ntb_process_rxc()
1353 qp->rx_index, hdr->ver, hdr->len, entry->len); ntb_process_rxc()
1358 entry->len = hdr->len; ntb_process_rxc()
1360 ntb_async_rx(entry, offset); ntb_process_rxc()
1408 struct ntb_queue_entry *entry = data; ntb_tx_copy_callback() local
1409 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback()
1410 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; ntb_tx_copy_callback()
1412 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); ntb_tx_copy_callback()
1416 /* The entry length can only be zero if the packet is intended to be a ntb_tx_copy_callback()
1420 if (entry->len > 0) { ntb_tx_copy_callback()
1421 qp->tx_bytes += entry->len; ntb_tx_copy_callback()
1424 qp->tx_handler(qp, qp->cb_data, entry->cb_data, ntb_tx_copy_callback()
1425 entry->len); ntb_tx_copy_callback()
1428 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); ntb_tx_copy_callback()
1431 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) ntb_memcpy_tx() argument
1438 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); ntb_memcpy_tx()
1440 memcpy_toio(offset, entry->buf, entry->len); ntb_memcpy_tx()
1446 ntb_tx_copy_callback(entry); ntb_memcpy_tx()
1450 struct ntb_queue_entry *entry) ntb_async_tx()
1461 size_t len = entry->len; ntb_async_tx()
1462 void *buf = entry->buf; ntb_async_tx()
1466 entry->tx_hdr = hdr; ntb_async_tx()
1468 iowrite32(entry->len, &hdr->len); ntb_async_tx()
1503 txd->callback_param = entry; ntb_async_tx()
1521 ntb_memcpy_tx(entry, offset); ntb_async_tx()
1526 struct ntb_queue_entry *entry) ntb_process_tx()
1528 if (qp->tx_index == qp->remote_rx_info->entry) { ntb_process_tx()
1533 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { ntb_process_tx()
1537 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_process_tx()
1542 ntb_async_tx(qp, entry); ntb_process_tx()
1555 struct ntb_queue_entry *entry; ntb_send_link_down() local
1564 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_send_link_down()
1565 if (entry) ntb_send_link_down()
1570 if (!entry) ntb_send_link_down()
1573 entry->cb_data = NULL; ntb_send_link_down()
1574 entry->buf = NULL; ntb_send_link_down()
1575 entry->len = 0; ntb_send_link_down()
1576 entry->flags = LINK_DOWN_FLAG; ntb_send_link_down()
1578 rc = ntb_process_tx(qp, entry); ntb_send_link_down()
1612 struct ntb_queue_entry *entry; ntb_transport_create_queue() local
1670 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); ntb_transport_create_queue()
1671 if (!entry) ntb_transport_create_queue()
1674 entry->qp = qp; ntb_transport_create_queue()
1675 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, ntb_transport_create_queue()
1680 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); ntb_transport_create_queue()
1681 if (!entry) ntb_transport_create_queue()
1684 entry->qp = qp; ntb_transport_create_queue()
1685 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_create_queue()
1697 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_create_queue()
1698 kfree(entry); ntb_transport_create_queue()
1700 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) ntb_transport_create_queue()
1701 kfree(entry); ntb_transport_create_queue()
1721 struct ntb_queue_entry *entry; ntb_transport_free_queue() local
1771 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) ntb_transport_free_queue()
1772 kfree(entry); ntb_transport_free_queue()
1774 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { ntb_transport_free_queue()
1776 kfree(entry); ntb_transport_free_queue()
1779 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { ntb_transport_free_queue()
1781 kfree(entry); ntb_transport_free_queue()
1784 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_free_queue()
1785 kfree(entry); ntb_transport_free_queue()
1805 struct ntb_queue_entry *entry; ntb_transport_rx_remove() local
1811 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); ntb_transport_rx_remove()
1812 if (!entry) ntb_transport_rx_remove()
1815 buf = entry->cb_data; ntb_transport_rx_remove()
1816 *len = entry->len; ntb_transport_rx_remove()
1818 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); ntb_transport_rx_remove()
1825 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1826 * @qp: NTB transport layer queue the entry is to be enqueued on
1839 struct ntb_queue_entry *entry; ntb_transport_rx_enqueue() local
1844 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); ntb_transport_rx_enqueue()
1845 if (!entry) ntb_transport_rx_enqueue()
1848 entry->cb_data = cb; ntb_transport_rx_enqueue()
1849 entry->buf = data; ntb_transport_rx_enqueue()
1850 entry->len = len; ntb_transport_rx_enqueue()
1851 entry->flags = 0; ntb_transport_rx_enqueue()
1853 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); ntb_transport_rx_enqueue()
1862 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1863 * @qp: NTB transport layer queue the entry is to be enqueued on
1877 struct ntb_queue_entry *entry; ntb_transport_tx_enqueue() local
1883 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_transport_tx_enqueue()
1884 if (!entry) { ntb_transport_tx_enqueue()
1889 entry->cb_data = cb; ntb_transport_tx_enqueue()
1890 entry->buf = data; ntb_transport_tx_enqueue()
1891 entry->len = len; ntb_transport_tx_enqueue()
1892 entry->flags = 0; ntb_transport_tx_enqueue()
1894 rc = ntb_process_tx(qp, entry); ntb_transport_tx_enqueue()
1896 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_tx_enqueue()
2018 unsigned int tail = qp->remote_rx_info->entry; ntb_transport_tx_free_entry()
1449 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx() argument
1525 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_process_tx() argument
/linux-4.4.14/kernel/power/
H A Dconsole.c47 struct pm_vt_switch *entry, *tmp; pm_vt_switch_required() local
58 entry = kmalloc(sizeof(*entry), GFP_KERNEL); pm_vt_switch_required()
59 if (!entry) pm_vt_switch_required()
62 entry->required = required; pm_vt_switch_required()
63 entry->dev = dev; pm_vt_switch_required()
65 list_add(&entry->head, &pm_vt_switch_list); pm_vt_switch_required()
108 struct pm_vt_switch *entry; pm_vt_switch() local
118 list_for_each_entry(entry, &pm_vt_switch_list, head) { pm_vt_switch()
119 if (entry->required) pm_vt_switch()
/linux-4.4.14/fs/omfs/
H A Dfile.c35 struct omfs_extent_entry *entry; omfs_shrink_inode() local
42 /* traverse extent table, freeing each entry that is greater omfs_shrink_inode()
71 entry = &oe->e_entry; omfs_shrink_inode()
73 /* ignore last entry as it is the terminator */ omfs_shrink_inode()
76 start = be64_to_cpu(entry->e_cluster); omfs_shrink_inode()
77 count = be64_to_cpu(entry->e_blocks); omfs_shrink_inode()
80 entry++; omfs_shrink_inode()
120 struct omfs_extent_entry *entry = &oe->e_entry; omfs_grow_extent() local
142 terminator = entry + extent_count - 1; omfs_grow_extent()
144 entry = terminator-1; omfs_grow_extent()
145 new_block = be64_to_cpu(entry->e_cluster) + omfs_grow_extent()
146 be64_to_cpu(entry->e_blocks); omfs_grow_extent()
149 be64_add_cpu(&entry->e_blocks, 1); omfs_grow_extent()
167 /* copy terminator down an entry */ omfs_grow_extent()
168 entry = terminator; omfs_grow_extent()
170 memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); omfs_grow_extent()
172 entry->e_cluster = cpu_to_be64(new_block); omfs_grow_extent()
173 entry->e_blocks = cpu_to_be64((u64) new_count); omfs_grow_extent()
178 /* write in new entry */ omfs_grow_extent()
227 struct omfs_extent_entry *entry; omfs_get_block() local
248 entry = &oe->e_entry; omfs_get_block()
253 offset = find_block(inode, entry, block, extent_count, &remain); omfs_get_block()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_bo_list.c100 struct amdgpu_bo_list_entry *entry = &array[i]; amdgpu_bo_list_set() local
107 entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); amdgpu_bo_list_set()
109 entry->priority = info[i].bo_priority; amdgpu_bo_list_set()
110 entry->prefered_domains = entry->robj->initial_domain; amdgpu_bo_list_set()
111 entry->allowed_domains = entry->prefered_domains; amdgpu_bo_list_set()
112 if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) amdgpu_bo_list_set()
113 entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; amdgpu_bo_list_set()
114 if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { amdgpu_bo_list_set()
116 entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; amdgpu_bo_list_set()
117 entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; amdgpu_bo_list_set()
119 entry->tv.bo = &entry->robj->tbo; amdgpu_bo_list_set()
120 entry->tv.shared = true; amdgpu_bo_list_set()
122 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) amdgpu_bo_list_set()
123 gds_obj = entry->robj; amdgpu_bo_list_set()
124 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) amdgpu_bo_list_set()
125 gws_obj = entry->robj; amdgpu_bo_list_set()
126 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) amdgpu_bo_list_set()
127 oa_obj = entry->robj; amdgpu_bo_list_set()
129 trace_amdgpu_bo_list_set(list, entry->robj); amdgpu_bo_list_set()
/linux-4.4.14/net/irda/
H A Dirqueue.c261 * Remove first entry in queue
295 * Return the removed entry (or NULL of queue was empty). dequeue_first()
338 * Return the removed entry (or NULL of queue was empty). dequeue_general()
442 * Function hashbin_insert (hashbin, entry, name)
444 * Insert an entry into the hashbin
447 void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, hashbin_insert() argument
471 entry->q_hash = hashv; hashbin_insert()
473 strlcpy( entry->q_name, name, sizeof(entry->q_name)); hashbin_insert()
476 * Insert new entry first hashbin_insert()
479 entry); hashbin_insert()
492 * Remove first entry of the hashbin
501 irda_queue_t *entry = NULL; hashbin_remove_first() local
508 entry = hashbin_get_first( hashbin); hashbin_remove_first()
509 if ( entry != NULL) { hashbin_remove_first()
515 hashv = entry->q_hash; hashbin_remove_first()
519 * Dequeue the entry... hashbin_remove_first()
522 entry); hashbin_remove_first()
524 entry->q_next = NULL; hashbin_remove_first()
525 entry->q_prev = NULL; hashbin_remove_first()
531 if ( entry == hashbin->hb_current) hashbin_remove_first()
540 return entry; hashbin_remove_first()
547 * Remove entry with the given name
552 * leading to removing the WRONG entry.
562 irda_queue_t* entry; hashbin_remove() local
580 * Search for entry hashbin_remove()
582 entry = hashbin->hb_queue[ bin ]; hashbin_remove()
583 if ( entry ) { hashbin_remove()
588 if ( entry->q_hash == hashv ) { hashbin_remove()
593 if ( strcmp( entry->q_name, name) == 0) hashbin_remove()
603 entry = entry->q_next; hashbin_remove()
604 } while ( entry != hashbin->hb_queue[ bin ] ); hashbin_remove()
608 * If entry was found, dequeue it hashbin_remove()
612 entry); hashbin_remove()
619 if ( entry == hashbin->hb_current) hashbin_remove()
631 return entry; hashbin_remove()
639 * Function hashbin_remove_this (hashbin, entry)
641 * Remove entry with the given name
649 void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) hashbin_remove_this() argument
657 IRDA_ASSERT( entry != NULL, return NULL;); hashbin_remove_this()
665 if((entry->q_next == NULL) || (entry->q_prev == NULL)) { hashbin_remove_this()
666 entry = NULL; hashbin_remove_this()
673 hashv = entry->q_hash; hashbin_remove_this()
677 * Dequeue the entry... hashbin_remove_this()
680 entry); hashbin_remove_this()
682 entry->q_next = NULL; hashbin_remove_this()
683 entry->q_prev = NULL; hashbin_remove_this()
689 if ( entry == hashbin->hb_current) hashbin_remove_this()
697 return entry; hashbin_remove_this()
712 irda_queue_t* entry; hashbin_find() local
727 * Search for entry hashbin_find()
729 entry = hashbin->hb_queue[ bin]; hashbin_find()
730 if ( entry ) { hashbin_find()
735 if ( entry->q_hash == hashv ) { hashbin_find()
740 if ( strcmp( entry->q_name, name ) == 0 ) { hashbin_find()
741 return entry; hashbin_find()
744 return entry; hashbin_find()
747 entry = entry->q_next; hashbin_find()
748 } while ( entry != hashbin->hb_queue[ bin ] ); hashbin_find()
767 irda_queue_t* entry; hashbin_lock_find() local
773 * Search for entry hashbin_lock_find()
775 entry = hashbin_find(hashbin, hashv, name); hashbin_lock_find()
780 return entry; hashbin_lock_find()
792 * NULL if the entry is removed. - Jean II
798 irda_queue_t* entry; hashbin_find_next() local
804 * Search for current entry hashbin_find_next()
808 entry = hashbin_find(hashbin, hashv, name); hashbin_find_next()
813 if(entry) { hashbin_find_next()
814 hashbin->hb_current = entry; hashbin_find_next()
822 return entry; hashbin_find_next()
834 irda_queue_t *entry; hashbin_get_first() local
844 entry = hashbin->hb_queue[ i]; hashbin_get_first()
845 if ( entry) { hashbin_get_first()
846 hashbin->hb_current = entry; hashbin_get_first()
847 return entry; hashbin_get_first()
869 irda_queue_t* entry; hashbin_get_next() local
880 entry = hashbin->hb_current->q_next; hashbin_get_next()
881 bin = GET_HASHBIN( entry->q_hash); hashbin_get_next()
887 if ( entry != hashbin->hb_queue[ bin ]) { hashbin_get_next()
888 hashbin->hb_current = entry; hashbin_get_next()
890 return entry; hashbin_get_next()
904 entry = hashbin->hb_queue[ i]; hashbin_get_next()
905 if ( entry) { hashbin_get_next()
906 hashbin->hb_current = entry; hashbin_get_next()
908 return entry; hashbin_get_next()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dmkregtable.c38 * using the generic single-entry routines.
57 * Insert a new entry between two known consecutive entries.
77 * list_add - add a new entry
78 * @new: new entry to be added
81 * Insert a new entry after the specified head.
90 * list_add_tail - add a new entry
91 * @new: new entry to be added
94 * Insert a new entry before the specified head.
103 * Delete a list entry by making the prev/next entries
116 * list_del - deletes entry from list.
117 * @entry: the element to delete from the list.
118 * Note: list_empty() on entry does not return true after this, the entry is
122 static inline void list_del(struct list_head *entry) list_del() argument
124 __list_del(entry->prev, entry->next); list_del()
125 entry->next = (void *)0xDEADBEEF; list_del()
126 entry->prev = (void *)0xBEEFDEAD; list_del()
129 extern void list_del(struct list_head *entry);
133 * list_replace - replace old entry by new one
155 * list_del_init - deletes entry from list and reinitialize it.
156 * @entry: the element to delete from the list.
158 static inline void list_del_init(struct list_head *entry) list_del_init() argument
160 __list_del(entry->prev, entry->next); list_del_init()
161 INIT_LIST_HEAD(entry); list_del_init()
166 * @list: the entry to move
167 * @head: the head that will precede our entry
177 * @list: the entry to move
178 * @head: the head that will follow our entry
188 * list_is_last - tests whether @list is the last entry in list @head
189 * @list: the entry to test
217 * to the list entry is list_del_init(). Eg. it cannot be used
227 * list_is_singular - tests whether a list has just one entry.
237 struct list_head *entry) __list_cut_position()
239 struct list_head *new_first = entry->next; __list_cut_position()
242 list->prev = entry; __list_cut_position()
243 entry->next = list; __list_cut_position()
252 * @entry: an entry within head, could be the head itself
256 * including @entry, from @head to @list. You should
257 * pass on @entry an element you know is on @head. @list
264 struct list_head *entry) list_cut_position()
268 if (list_is_singular(head) && (head->next != entry && head != entry)) list_cut_position()
270 if (entry == head) list_cut_position()
273 __list_cut_position(list, head, entry); list_cut_position()
347 * list_entry - get the struct for this entry
385 * list_for_each_safe - iterate over a list safe against removal of list entry
395 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
428 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
433 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
479 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
499 * safe against removal of list entry.
515 * removal of list entry.
530 * of list entry.
235 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument
262 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
H A Dl2t.h45 L2T_STATE_VALID, /* entry is up to date */
46 L2T_STATE_STALE, /* entry may be used but needs revalidation */
47 L2T_STATE_RESOLVING, /* entry needs address resolution */
48 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
51 /* when state is one of the below the entry is not hashed */
52 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
53 L2T_STATE_UNUSED /* entry not in use */
64 * Each L2T entry plays multiple roles. First of all, it keeps state for the
65 * corresponding entry of the HW L2 table and maintains a queue of offload
72 u16 state; /* entry state */
73 u16 idx; /* entry index within in-memory table */
82 atomic_t refcnt; /* entry reference count */
83 u16 hash; /* hash bucket the entry is on */
85 u8 v6; /* whether entry is for IPv6 */
/linux-4.4.14/arch/ia64/sn/kernel/sn2/
H A Dprominfo_proc.c25 /* Standard Intel FIT entry types */
26 #define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
27 #define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
29 #define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
30 #define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
31 #define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
34 #define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
35 #define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
36 #define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
37 #define FIT_ENTRY_EFI 0x1F /* EFI entry */
38 #define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
39 #define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
123 * pages -- each entry is about 60 chars wide when printed.) I read
189 /* module entry points */
/linux-4.4.14/arch/arm64/kernel/
H A Dperf_callchain.c34 struct perf_callchain_entry *entry) user_backtrace()
50 perf_callchain_store(entry, buftail.lr); user_backtrace()
79 struct perf_callchain_entry *entry) compat_user_backtrace()
95 perf_callchain_store(entry, buftail.lr); compat_user_backtrace()
109 void perf_callchain_user(struct perf_callchain_entry *entry, perf_callchain_user() argument
117 perf_callchain_store(entry, regs->pc); perf_callchain_user()
125 while (entry->nr < PERF_MAX_STACK_DEPTH && perf_callchain_user()
127 tail = user_backtrace(tail, entry); perf_callchain_user()
135 while ((entry->nr < PERF_MAX_STACK_DEPTH) && perf_callchain_user()
137 tail = compat_user_backtrace(tail, entry); perf_callchain_user()
149 struct perf_callchain_entry *entry = data; callchain_trace() local
150 perf_callchain_store(entry, frame->pc); callchain_trace()
154 void perf_callchain_kernel(struct perf_callchain_entry *entry, perf_callchain_kernel() argument
168 walk_stackframe(&frame, callchain_trace, entry); perf_callchain_kernel()
33 user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) user_backtrace() argument
78 compat_user_backtrace(struct compat_frame_tail __user *tail, struct perf_callchain_entry *entry) compat_user_backtrace() argument
H A DMakefile14 arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
15 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
22 extra-$(CONFIG_EFI) := efi-entry.o
31 arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
40 arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
H A Djump_label.c25 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
28 void *addr = (void *)entry->code; arch_jump_label_transform()
32 insn = aarch64_insn_gen_branch_imm(entry->code, arch_jump_label_transform()
33 entry->target, arch_jump_label_transform()
42 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
/linux-4.4.14/sound/firewire/bebob/
H A Dbebob_proc.c36 proc_read_hw_info(struct snd_info_entry *entry, proc_read_hw_info() argument
39 struct snd_bebob *bebob = entry->private_data; proc_read_hw_info()
72 proc_read_meters(struct snd_info_entry *entry, proc_read_meters() argument
75 struct snd_bebob *bebob = entry->private_data; proc_read_meters()
105 proc_read_formation(struct snd_info_entry *entry, proc_read_formation() argument
108 struct snd_bebob *bebob = entry->private_data; proc_read_formation()
132 proc_read_clock(struct snd_info_entry *entry, proc_read_clock() argument
140 struct snd_bebob *bebob = entry->private_data; proc_read_clock()
163 struct snd_info_entry *entry; add_node() local
165 entry = snd_info_create_card_entry(bebob->card, name, root); add_node()
166 if (entry == NULL) add_node()
169 snd_info_set_text_ops(entry, bebob, op); add_node()
170 if (snd_info_register(entry) < 0) add_node()
171 snd_info_free_entry(entry); add_node()
/linux-4.4.14/sound/firewire/fireworks/
H A Dfireworks_proc.c29 proc_read_hwinfo(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_read_hwinfo() argument
31 struct snd_efw *efw = entry->private_data; proc_read_hwinfo()
106 proc_read_clock(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_read_clock() argument
108 struct snd_efw *efw = entry->private_data; proc_read_clock()
128 proc_read_phys_meters(struct snd_info_entry *entry, proc_read_phys_meters() argument
131 struct snd_efw *efw = entry->private_data; proc_read_phys_meters()
179 proc_read_queues_state(struct snd_info_entry *entry, proc_read_queues_state() argument
182 struct snd_efw *efw = entry->private_data; proc_read_queues_state()
199 struct snd_info_entry *entry; add_node() local
201 entry = snd_info_create_card_entry(efw->card, name, root); add_node()
202 if (entry == NULL) add_node()
205 snd_info_set_text_ops(entry, efw, op); add_node()
206 if (snd_info_register(entry) < 0) add_node()
207 snd_info_free_entry(entry); add_node()
/linux-4.4.14/sound/usb/
H A Dproc.c46 static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_audio_usbbus_read() argument
48 struct snd_usb_audio *chip = entry->private_data; proc_audio_usbbus_read()
53 static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_audio_usbid_read() argument
55 struct snd_usb_audio *chip = entry->private_data; proc_audio_usbid_read()
64 struct snd_info_entry *entry; snd_usb_audio_create_proc() local
65 if (!snd_card_proc_new(chip->card, "usbbus", &entry)) snd_usb_audio_create_proc()
66 snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read); snd_usb_audio_create_proc()
67 if (!snd_card_proc_new(chip->card, "usbid", &entry)) snd_usb_audio_create_proc()
68 snd_info_set_text_ops(entry, chip, proc_audio_usbid_read); snd_usb_audio_create_proc()
150 static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_pcm_format_read() argument
152 struct snd_usb_stream *stream = entry->private_data; proc_pcm_format_read()
170 struct snd_info_entry *entry; snd_usb_proc_pcm_format_add() local
175 if (!snd_card_proc_new(card, name, &entry)) snd_usb_proc_pcm_format_add()
176 snd_info_set_text_ops(entry, stream, proc_pcm_format_read); snd_usb_proc_pcm_format_add()
/linux-4.4.14/tools/testing/selftests/vm/
H A Dcompaction_test.c158 struct map_list *list, *entry; main() local
196 entry = malloc(sizeof(struct map_list)); main()
197 if (!entry) { main()
201 entry->map = map; main()
202 entry->next = list; main()
203 list = entry; main()
214 for (entry = list; entry != NULL; entry = entry->next) { main()
215 munmap(entry->map, MAP_SIZE); main()
216 if (!entry->next) main()
218 entry = entry->next; main()
/linux-4.4.14/security/tomoyo/
H A Ddomain.c17 * tomoyo_update_policy - Update an entry for exception policy.
22 * @check_duplicate: Callback function to find duplicated entry.
36 struct tomoyo_acl_head *entry; tomoyo_update_policy() local
41 list_for_each_entry_rcu(entry, list, list) { list_for_each_entry_rcu()
42 if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) list_for_each_entry_rcu()
44 if (!check_duplicate(entry, new_entry)) list_for_each_entry_rcu()
46 entry->is_deleted = param->is_delete; list_for_each_entry_rcu()
51 entry = tomoyo_commit_ok(new_entry, size);
52 if (entry) {
53 list_add_tail_rcu(&entry->list, list);
62 * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry.
76 * tomoyo_update_domain - Update an entry for domain policy.
81 * @check_duplicate: Callback function to find duplicated entry.
82 * @merge_duplicate: Callback function to merge duplicated entry.
100 struct tomoyo_acl_info *entry; tomoyo_update_domain() local
119 list_for_each_entry_rcu(entry, list, list) { list_for_each_entry_rcu()
120 if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) list_for_each_entry_rcu()
122 if (!tomoyo_same_acl_head(entry, new_entry) || list_for_each_entry_rcu()
123 !check_duplicate(entry, new_entry)) list_for_each_entry_rcu()
126 entry->is_deleted = merge_duplicate(entry, new_entry, list_for_each_entry_rcu()
129 entry->is_deleted = is_delete; list_for_each_entry_rcu()
134 entry = tomoyo_commit_ok(new_entry, size);
135 if (entry) {
136 list_add_tail_rcu(&entry->list, list);
204 * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry.
229 * @type: Type of this entry.
361 * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry.
448 struct tomoyo_policy_namespace *entry; tomoyo_assign_namespace() local
458 entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS); tomoyo_assign_namespace()
459 if (!entry) tomoyo_assign_namespace()
464 if (!ptr && tomoyo_memory_ok(entry)) { tomoyo_assign_namespace()
465 char *name = (char *) (entry + 1); tomoyo_assign_namespace()
466 ptr = entry; tomoyo_assign_namespace()
469 entry->name = name; tomoyo_assign_namespace()
470 tomoyo_init_policy_namespace(entry); tomoyo_assign_namespace()
471 entry = NULL; tomoyo_assign_namespace()
475 kfree(entry); tomoyo_assign_namespace()
508 struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname); tomoyo_assign_domain() local
510 if (entry) { tomoyo_assign_domain()
519 !entry->ns->profile_ptr[entry->profile]) tomoyo_assign_domain()
522 return entry; tomoyo_assign_domain()
554 entry = tomoyo_find_domain(domainname); tomoyo_assign_domain()
555 if (!entry) { tomoyo_assign_domain()
556 entry = tomoyo_commit_ok(&e, sizeof(e)); tomoyo_assign_domain()
557 if (entry) { tomoyo_assign_domain()
558 INIT_LIST_HEAD(&entry->acl_info_list); tomoyo_assign_domain()
559 list_add_tail_rcu(&entry->list, &tomoyo_domain_list); tomoyo_assign_domain()
566 if (entry && transit) { tomoyo_assign_domain()
569 tomoyo_init_request_info(&r, entry, tomoyo_assign_domain()
573 entry->profile); tomoyo_assign_domain()
574 tomoyo_write_log(&r, "use_group %u\n", entry->group); tomoyo_assign_domain()
578 return entry; tomoyo_assign_domain()
H A Dgc.c158 struct tomoyo_path_acl *entry tomoyo_del_acl() local
159 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
160 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
165 struct tomoyo_path2_acl *entry tomoyo_del_acl() local
166 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
167 tomoyo_put_name_union(&entry->name1); tomoyo_del_acl()
168 tomoyo_put_name_union(&entry->name2); tomoyo_del_acl()
173 struct tomoyo_path_number_acl *entry tomoyo_del_acl() local
174 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
175 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
176 tomoyo_put_number_union(&entry->number); tomoyo_del_acl()
181 struct tomoyo_mkdev_acl *entry tomoyo_del_acl() local
182 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
183 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
184 tomoyo_put_number_union(&entry->mode); tomoyo_del_acl()
185 tomoyo_put_number_union(&entry->major); tomoyo_del_acl()
186 tomoyo_put_number_union(&entry->minor); tomoyo_del_acl()
191 struct tomoyo_mount_acl *entry tomoyo_del_acl() local
192 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
193 tomoyo_put_name_union(&entry->dev_name); tomoyo_del_acl()
194 tomoyo_put_name_union(&entry->dir_name); tomoyo_del_acl()
195 tomoyo_put_name_union(&entry->fs_type); tomoyo_del_acl()
196 tomoyo_put_number_union(&entry->flags); tomoyo_del_acl()
201 struct tomoyo_env_acl *entry = tomoyo_del_acl() local
202 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
204 tomoyo_put_name(entry->env); tomoyo_del_acl()
209 struct tomoyo_inet_acl *entry = tomoyo_del_acl() local
210 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
212 tomoyo_put_group(entry->address.group); tomoyo_del_acl()
213 tomoyo_put_number_union(&entry->port); tomoyo_del_acl()
218 struct tomoyo_unix_acl *entry = tomoyo_del_acl() local
219 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
221 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
226 struct tomoyo_task_acl *entry = tomoyo_del_acl() local
227 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
228 tomoyo_put_name(entry->domainname); tomoyo_del_acl()
365 * tomoyo_try_to_gc - Try to kfree() an entry.
427 head.list)->entry.name)) tomoyo_try_to_gc()
H A Dmemory.c117 struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e)); local
118 if (entry) {
119 INIT_LIST_HEAD(&entry->member_list);
120 atomic_set(&entry->head.users, 1);
121 list_add_tail_rcu(&entry->head.list, list);
122 group = entry;
162 if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || list_for_each_entry()
170 ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
171 memmove((char *) ptr->entry.name, name, len);
173 tomoyo_fill_path_info(&ptr->entry);
181 return ptr ? &ptr->entry : NULL;
/linux-4.4.14/net/mpls/
H A Dinternal.h42 struct mpls_nh { /* next hop label forwarding entry */
71 struct mpls_route { /* next hop label forwarding entry */
113 unsigned entry = be32_to_cpu(hdr->label_stack_entry); mpls_entry_decode() local
115 result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; mpls_entry_decode()
116 result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; mpls_entry_decode()
117 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; mpls_entry_decode()
118 result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; mpls_entry_decode()
/linux-4.4.14/drivers/net/ethernet/rocker/
H A Drocker.c117 struct hlist_node entry; member in struct:rocker_flow_tbl_entry
126 struct hlist_node entry; member in struct:rocker_group_tbl_entry
152 struct hlist_node entry; member in struct:rocker_fdb_tbl_entry
164 struct hlist_node entry; member in struct:rocker_internal_vlan_tbl_entry
171 struct hlist_node entry; member in struct:rocker_neigh_tbl_entry
1918 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ig_port()
1921 entry->key.ig_port.in_pport)) rocker_cmd_flow_tbl_add_ig_port()
1924 entry->key.ig_port.in_pport_mask)) rocker_cmd_flow_tbl_add_ig_port()
1927 entry->key.ig_port.goto_tbl)) rocker_cmd_flow_tbl_add_ig_port()
1935 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_vlan()
1938 entry->key.vlan.in_pport)) rocker_cmd_flow_tbl_add_vlan()
1941 entry->key.vlan.vlan_id)) rocker_cmd_flow_tbl_add_vlan()
1944 entry->key.vlan.vlan_id_mask)) rocker_cmd_flow_tbl_add_vlan()
1947 entry->key.vlan.goto_tbl)) rocker_cmd_flow_tbl_add_vlan()
1949 if (entry->key.vlan.untagged && rocker_cmd_flow_tbl_add_vlan()
1951 entry->key.vlan.new_vlan_id)) rocker_cmd_flow_tbl_add_vlan()
1959 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_term_mac()
1962 entry->key.term_mac.in_pport)) rocker_cmd_flow_tbl_add_term_mac()
1965 entry->key.term_mac.in_pport_mask)) rocker_cmd_flow_tbl_add_term_mac()
1968 entry->key.term_mac.eth_type)) rocker_cmd_flow_tbl_add_term_mac()
1971 ETH_ALEN, entry->key.term_mac.eth_dst)) rocker_cmd_flow_tbl_add_term_mac()
1974 ETH_ALEN, entry->key.term_mac.eth_dst_mask)) rocker_cmd_flow_tbl_add_term_mac()
1977 entry->key.term_mac.vlan_id)) rocker_cmd_flow_tbl_add_term_mac()
1980 entry->key.term_mac.vlan_id_mask)) rocker_cmd_flow_tbl_add_term_mac()
1983 entry->key.term_mac.goto_tbl)) rocker_cmd_flow_tbl_add_term_mac()
1985 if (entry->key.term_mac.copy_to_cpu && rocker_cmd_flow_tbl_add_term_mac()
1987 entry->key.term_mac.copy_to_cpu)) rocker_cmd_flow_tbl_add_term_mac()
1995 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ucast_routing()
1998 entry->key.ucast_routing.eth_type)) rocker_cmd_flow_tbl_add_ucast_routing()
2001 entry->key.ucast_routing.dst4)) rocker_cmd_flow_tbl_add_ucast_routing()
2004 entry->key.ucast_routing.dst4_mask)) rocker_cmd_flow_tbl_add_ucast_routing()
2007 entry->key.ucast_routing.goto_tbl)) rocker_cmd_flow_tbl_add_ucast_routing()
2010 entry->key.ucast_routing.group_id)) rocker_cmd_flow_tbl_add_ucast_routing()
2018 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_bridge()
2020 if (entry->key.bridge.has_eth_dst && rocker_cmd_flow_tbl_add_bridge()
2022 ETH_ALEN, entry->key.bridge.eth_dst)) rocker_cmd_flow_tbl_add_bridge()
2024 if (entry->key.bridge.has_eth_dst_mask && rocker_cmd_flow_tbl_add_bridge()
2026 ETH_ALEN, entry->key.bridge.eth_dst_mask)) rocker_cmd_flow_tbl_add_bridge()
2028 if (entry->key.bridge.vlan_id && rocker_cmd_flow_tbl_add_bridge()
2030 entry->key.bridge.vlan_id)) rocker_cmd_flow_tbl_add_bridge()
2032 if (entry->key.bridge.tunnel_id && rocker_cmd_flow_tbl_add_bridge()
2034 entry->key.bridge.tunnel_id)) rocker_cmd_flow_tbl_add_bridge()
2037 entry->key.bridge.goto_tbl)) rocker_cmd_flow_tbl_add_bridge()
2040 entry->key.bridge.group_id)) rocker_cmd_flow_tbl_add_bridge()
2042 if (entry->key.bridge.copy_to_cpu && rocker_cmd_flow_tbl_add_bridge()
2044 entry->key.bridge.copy_to_cpu)) rocker_cmd_flow_tbl_add_bridge()
2052 const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_acl()
2055 entry->key.acl.in_pport)) rocker_cmd_flow_tbl_add_acl()
2058 entry->key.acl.in_pport_mask)) rocker_cmd_flow_tbl_add_acl()
2061 ETH_ALEN, entry->key.acl.eth_src)) rocker_cmd_flow_tbl_add_acl()
2064 ETH_ALEN, entry->key.acl.eth_src_mask)) rocker_cmd_flow_tbl_add_acl()
2067 ETH_ALEN, entry->key.acl.eth_dst)) rocker_cmd_flow_tbl_add_acl()
2070 ETH_ALEN, entry->key.acl.eth_dst_mask)) rocker_cmd_flow_tbl_add_acl()
2073 entry->key.acl.eth_type)) rocker_cmd_flow_tbl_add_acl()
2076 entry->key.acl.vlan_id)) rocker_cmd_flow_tbl_add_acl()
2079 entry->key.acl.vlan_id_mask)) rocker_cmd_flow_tbl_add_acl()
2082 switch (ntohs(entry->key.acl.eth_type)) { rocker_cmd_flow_tbl_add_acl()
2086 entry->key.acl.ip_proto)) rocker_cmd_flow_tbl_add_acl()
2090 entry->key.acl.ip_proto_mask)) rocker_cmd_flow_tbl_add_acl()
2093 entry->key.acl.ip_tos & 0x3f)) rocker_cmd_flow_tbl_add_acl()
2097 entry->key.acl.ip_tos_mask & 0x3f)) rocker_cmd_flow_tbl_add_acl()
2100 (entry->key.acl.ip_tos & 0xc0) >> 6)) rocker_cmd_flow_tbl_add_acl()
2104 (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) rocker_cmd_flow_tbl_add_acl()
2109 if (entry->key.acl.group_id != ROCKER_GROUP_NONE && rocker_cmd_flow_tbl_add_acl()
2111 entry->key.acl.group_id)) rocker_cmd_flow_tbl_add_acl()
2121 const struct rocker_flow_tbl_entry *entry = priv; rocker_cmd_flow_tbl_add() local
2125 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_flow_tbl_add()
2131 entry->key.tbl_id)) rocker_cmd_flow_tbl_add()
2134 entry->key.priority)) rocker_cmd_flow_tbl_add()
2139 entry->cookie)) rocker_cmd_flow_tbl_add()
2142 switch (entry->key.tbl_id) { rocker_cmd_flow_tbl_add()
2144 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); rocker_cmd_flow_tbl_add()
2147 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); rocker_cmd_flow_tbl_add()
2150 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); rocker_cmd_flow_tbl_add()
2153 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); rocker_cmd_flow_tbl_add()
2156 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); rocker_cmd_flow_tbl_add()
2159 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); rocker_cmd_flow_tbl_add()
2178 const struct rocker_flow_tbl_entry *entry = priv; rocker_cmd_flow_tbl_del() local
2181 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_flow_tbl_del()
2187 entry->cookie)) rocker_cmd_flow_tbl_del()
2196 struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_interface()
2199 ROCKER_GROUP_PORT_GET(entry->group_id))) rocker_cmd_group_tbl_add_l2_interface()
2202 entry->l2_interface.pop_vlan)) rocker_cmd_group_tbl_add_l2_interface()
2210 const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_rewrite()
2213 entry->l2_rewrite.group_id)) rocker_cmd_group_tbl_add_l2_rewrite()
2215 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && rocker_cmd_group_tbl_add_l2_rewrite()
2217 ETH_ALEN, entry->l2_rewrite.eth_src)) rocker_cmd_group_tbl_add_l2_rewrite()
2219 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && rocker_cmd_group_tbl_add_l2_rewrite()
2221 ETH_ALEN, entry->l2_rewrite.eth_dst)) rocker_cmd_group_tbl_add_l2_rewrite()
2223 if (entry->l2_rewrite.vlan_id && rocker_cmd_group_tbl_add_l2_rewrite()
2225 entry->l2_rewrite.vlan_id)) rocker_cmd_group_tbl_add_l2_rewrite()
2233 const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_group_ids()
2239 entry->group_count)) rocker_cmd_group_tbl_add_group_ids()
2247 for (i = 0; i < entry->group_count; i++) rocker_cmd_group_tbl_add_group_ids()
2249 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) rocker_cmd_group_tbl_add_group_ids()
2259 const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l3_unicast()
2261 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && rocker_cmd_group_tbl_add_l3_unicast()
2263 ETH_ALEN, entry->l3_unicast.eth_src)) rocker_cmd_group_tbl_add_l3_unicast()
2265 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && rocker_cmd_group_tbl_add_l3_unicast()
2267 ETH_ALEN, entry->l3_unicast.eth_dst)) rocker_cmd_group_tbl_add_l3_unicast()
2269 if (entry->l3_unicast.vlan_id && rocker_cmd_group_tbl_add_l3_unicast()
2271 entry->l3_unicast.vlan_id)) rocker_cmd_group_tbl_add_l3_unicast()
2274 entry->l3_unicast.ttl_check)) rocker_cmd_group_tbl_add_l3_unicast()
2277 entry->l3_unicast.group_id)) rocker_cmd_group_tbl_add_l3_unicast()
2287 struct rocker_group_tbl_entry *entry = priv; rocker_cmd_group_tbl_add() local
2291 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_group_tbl_add()
2298 entry->group_id)) rocker_cmd_group_tbl_add()
2301 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { rocker_cmd_group_tbl_add()
2303 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); rocker_cmd_group_tbl_add()
2306 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); rocker_cmd_group_tbl_add()
2310 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); rocker_cmd_group_tbl_add()
2313 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); rocker_cmd_group_tbl_add()
2332 const struct rocker_group_tbl_entry *entry = priv; rocker_cmd_group_tbl_del() local
2335 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_group_tbl_del()
2341 entry->group_id)) rocker_cmd_group_tbl_del()
2384 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) rocker_free_tbls()
2385 hash_del(&flow_entry->entry); rocker_free_tbls()
2389 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) rocker_free_tbls()
2390 hash_del(&group_entry->entry); rocker_free_tbls()
2394 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) rocker_free_tbls()
2395 hash_del(&fdb_entry->entry); rocker_free_tbls()
2400 tmp, internal_vlan_entry, entry) rocker_free_tbls()
2401 hash_del(&internal_vlan_entry->entry); rocker_free_tbls()
2405 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) rocker_free_tbls()
2406 hash_del(&neigh_entry->entry); rocker_free_tbls()
2418 entry, match->key_crc32) { rocker_flow_tbl_find()
2444 hash_del(&found->entry); rocker_flow_tbl_add()
2455 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); rocker_flow_tbl_add()
2481 hash_del(&found->entry); rocker_flow_tbl_del()
2501 struct rocker_flow_tbl_entry *entry) rocker_flow_tbl_do()
2504 return rocker_flow_tbl_del(rocker_port, trans, flags, entry); rocker_flow_tbl_do()
2506 return rocker_flow_tbl_add(rocker_port, trans, flags, entry); rocker_flow_tbl_do()
2514 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_ig_port() local
2516 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_ig_port()
2517 if (!entry) rocker_flow_tbl_ig_port()
2520 entry->key.priority = ROCKER_PRIORITY_IG_PORT; rocker_flow_tbl_ig_port()
2521 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; rocker_flow_tbl_ig_port()
2522 entry->key.ig_port.in_pport = in_pport; rocker_flow_tbl_ig_port()
2523 entry->key.ig_port.in_pport_mask = in_pport_mask; rocker_flow_tbl_ig_port()
2524 entry->key.ig_port.goto_tbl = goto_tbl; rocker_flow_tbl_ig_port()
2526 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_ig_port()
2536 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_vlan() local
2538 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_vlan()
2539 if (!entry) rocker_flow_tbl_vlan()
2542 entry->key.priority = ROCKER_PRIORITY_VLAN; rocker_flow_tbl_vlan()
2543 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; rocker_flow_tbl_vlan()
2544 entry->key.vlan.in_pport = in_pport; rocker_flow_tbl_vlan()
2545 entry->key.vlan.vlan_id = vlan_id; rocker_flow_tbl_vlan()
2546 entry->key.vlan.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_vlan()
2547 entry->key.vlan.goto_tbl = goto_tbl; rocker_flow_tbl_vlan()
2549 entry->key.vlan.untagged = untagged; rocker_flow_tbl_vlan()
2550 entry->key.vlan.new_vlan_id = new_vlan_id; rocker_flow_tbl_vlan()
2552 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_vlan()
2563 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_term_mac() local
2565 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_term_mac()
2566 if (!entry) rocker_flow_tbl_term_mac()
2570 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; rocker_flow_tbl_term_mac()
2571 entry->key.term_mac.goto_tbl = rocker_flow_tbl_term_mac()
2574 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; rocker_flow_tbl_term_mac()
2575 entry->key.term_mac.goto_tbl = rocker_flow_tbl_term_mac()
2579 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; rocker_flow_tbl_term_mac()
2580 entry->key.term_mac.in_pport = in_pport; rocker_flow_tbl_term_mac()
2581 entry->key.term_mac.in_pport_mask = in_pport_mask; rocker_flow_tbl_term_mac()
2582 entry->key.term_mac.eth_type = eth_type; rocker_flow_tbl_term_mac()
2583 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); rocker_flow_tbl_term_mac()
2584 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_term_mac()
2585 entry->key.term_mac.vlan_id = vlan_id; rocker_flow_tbl_term_mac()
2586 entry->key.term_mac.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_term_mac()
2587 entry->key.term_mac.copy_to_cpu = copy_to_cpu; rocker_flow_tbl_term_mac()
2589 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_term_mac()
2599 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_bridge() local
2605 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_bridge()
2606 if (!entry) rocker_flow_tbl_bridge()
2609 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; rocker_flow_tbl_bridge()
2612 entry->key.bridge.has_eth_dst = 1; rocker_flow_tbl_bridge()
2613 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); rocker_flow_tbl_bridge()
2616 entry->key.bridge.has_eth_dst_mask = 1; rocker_flow_tbl_bridge()
2617 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_bridge()
2636 entry->key.priority = priority; rocker_flow_tbl_bridge()
2637 entry->key.bridge.vlan_id = vlan_id; rocker_flow_tbl_bridge()
2638 entry->key.bridge.tunnel_id = tunnel_id; rocker_flow_tbl_bridge()
2639 entry->key.bridge.goto_tbl = goto_tbl; rocker_flow_tbl_bridge()
2640 entry->key.bridge.group_id = group_id; rocker_flow_tbl_bridge()
2641 entry->key.bridge.copy_to_cpu = copy_to_cpu; rocker_flow_tbl_bridge()
2643 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_bridge()
2653 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_ucast4_routing() local
2655 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_ucast4_routing()
2656 if (!entry) rocker_flow_tbl_ucast4_routing()
2659 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; rocker_flow_tbl_ucast4_routing()
2660 entry->key.priority = priority; rocker_flow_tbl_ucast4_routing()
2661 entry->key.ucast_routing.eth_type = eth_type; rocker_flow_tbl_ucast4_routing()
2662 entry->key.ucast_routing.dst4 = dst; rocker_flow_tbl_ucast4_routing()
2663 entry->key.ucast_routing.dst4_mask = dst_mask; rocker_flow_tbl_ucast4_routing()
2664 entry->key.ucast_routing.goto_tbl = goto_tbl; rocker_flow_tbl_ucast4_routing()
2665 entry->key.ucast_routing.group_id = group_id; rocker_flow_tbl_ucast4_routing()
2666 entry->key_len = offsetof(struct rocker_flow_tbl_key, rocker_flow_tbl_ucast4_routing()
2669 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_ucast4_routing()
2683 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_acl() local
2685 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_flow_tbl_acl()
2686 if (!entry) rocker_flow_tbl_acl()
2697 entry->key.priority = priority; rocker_flow_tbl_acl()
2698 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; rocker_flow_tbl_acl()
2699 entry->key.acl.in_pport = in_pport; rocker_flow_tbl_acl()
2700 entry->key.acl.in_pport_mask = in_pport_mask; rocker_flow_tbl_acl()
2703 ether_addr_copy(entry->key.acl.eth_src, eth_src); rocker_flow_tbl_acl()
2705 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); rocker_flow_tbl_acl()
2707 ether_addr_copy(entry->key.acl.eth_dst, eth_dst); rocker_flow_tbl_acl()
2709 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_acl()
2711 entry->key.acl.eth_type = eth_type; rocker_flow_tbl_acl()
2712 entry->key.acl.vlan_id = vlan_id; rocker_flow_tbl_acl()
2713 entry->key.acl.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_acl()
2714 entry->key.acl.ip_proto = ip_proto; rocker_flow_tbl_acl()
2715 entry->key.acl.ip_proto_mask = ip_proto_mask; rocker_flow_tbl_acl()
2716 entry->key.acl.ip_tos = ip_tos; rocker_flow_tbl_acl()
2717 entry->key.acl.ip_tos_mask = ip_tos_mask; rocker_flow_tbl_acl()
2718 entry->key.acl.group_id = group_id; rocker_flow_tbl_acl()
2720 return rocker_flow_tbl_do(rocker_port, trans, flags, entry); rocker_flow_tbl_acl()
2730 entry, match->group_id) { rocker_group_tbl_find()
2739 struct rocker_group_tbl_entry *entry) rocker_group_tbl_entry_free()
2741 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { rocker_group_tbl_entry_free()
2744 rocker_port_kfree(trans, entry->group_ids); rocker_group_tbl_entry_free()
2749 rocker_port_kfree(trans, entry); rocker_group_tbl_entry_free()
2766 hash_del(&found->entry); rocker_group_tbl_add()
2776 hash_add(rocker->group_tbl, &found->entry, found->group_id); rocker_group_tbl_add()
2799 hash_del(&found->entry); rocker_group_tbl_del()
2819 struct rocker_group_tbl_entry *entry) rocker_group_tbl_do()
2822 return rocker_group_tbl_del(rocker_port, trans, flags, entry); rocker_group_tbl_do()
2824 return rocker_group_tbl_add(rocker_port, trans, flags, entry); rocker_group_tbl_do()
2832 struct rocker_group_tbl_entry *entry; rocker_group_l2_interface() local
2834 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_group_l2_interface()
2835 if (!entry) rocker_group_l2_interface()
2838 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); rocker_group_l2_interface()
2839 entry->l2_interface.pop_vlan = pop_vlan; rocker_group_l2_interface()
2841 return rocker_group_tbl_do(rocker_port, trans, flags, entry); rocker_group_l2_interface()
2849 struct rocker_group_tbl_entry *entry; rocker_group_l2_fan_out() local
2851 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_group_l2_fan_out()
2852 if (!entry) rocker_group_l2_fan_out()
2855 entry->group_id = group_id; rocker_group_l2_fan_out()
2856 entry->group_count = group_count; rocker_group_l2_fan_out()
2858 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags, rocker_group_l2_fan_out()
2860 if (!entry->group_ids) { rocker_group_l2_fan_out()
2861 rocker_port_kfree(trans, entry); rocker_group_l2_fan_out()
2864 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); rocker_group_l2_fan_out()
2866 return rocker_group_tbl_do(rocker_port, trans, flags, entry); rocker_group_l2_fan_out()
2884 struct rocker_group_tbl_entry *entry; rocker_group_l3_unicast() local
2886 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_group_l3_unicast()
2887 if (!entry) rocker_group_l3_unicast()
2890 entry->group_id = ROCKER_GROUP_L3_UNICAST(index); rocker_group_l3_unicast()
2892 ether_addr_copy(entry->l3_unicast.eth_src, src_mac); rocker_group_l3_unicast()
2894 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); rocker_group_l3_unicast()
2895 entry->l3_unicast.vlan_id = vlan_id; rocker_group_l3_unicast()
2896 entry->l3_unicast.ttl_check = ttl_check; rocker_group_l3_unicast()
2897 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); rocker_group_l3_unicast()
2899 return rocker_group_tbl_do(rocker_port, trans, flags, entry); rocker_group_l3_unicast()
2908 entry, be32_to_cpu(ip_addr)) rocker_neigh_tbl_find()
2917 struct rocker_neigh_tbl_entry *entry) _rocker_neigh_add()
2920 entry->index = rocker->neigh_tbl_next_index++; _rocker_neigh_add()
2923 entry->ref_count++; _rocker_neigh_add()
2924 hash_add(rocker->neigh_tbl, &entry->entry, _rocker_neigh_add()
2925 be32_to_cpu(entry->ip_addr)); _rocker_neigh_add()
2929 struct rocker_neigh_tbl_entry *entry) _rocker_neigh_del()
2933 if (--entry->ref_count == 0) { _rocker_neigh_del()
2934 hash_del(&entry->entry); _rocker_neigh_del()
2935 rocker_port_kfree(trans, entry); _rocker_neigh_del()
2939 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, _rocker_neigh_update() argument
2944 ether_addr_copy(entry->eth_dst, eth_dst); _rocker_neigh_update()
2945 entry->ttl_check = ttl_check; _rocker_neigh_update()
2947 entry->ref_count++; _rocker_neigh_update()
2956 struct rocker_neigh_tbl_entry *entry; rocker_port_ipv4_neigh() local
2969 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_port_ipv4_neigh()
2970 if (!entry) rocker_port_ipv4_neigh()
2982 entry->ip_addr = ip_addr; rocker_port_ipv4_neigh()
2983 entry->dev = rocker_port->dev; rocker_port_ipv4_neigh()
2984 ether_addr_copy(entry->eth_dst, eth_dst); rocker_port_ipv4_neigh()
2985 entry->ttl_check = true; rocker_port_ipv4_neigh()
2986 _rocker_neigh_add(rocker, trans, entry); rocker_port_ipv4_neigh()
2988 memcpy(entry, found, sizeof(*entry)); rocker_port_ipv4_neigh()
2992 memcpy(entry, found, sizeof(*entry)); rocker_port_ipv4_neigh()
3009 entry->index, rocker_port_ipv4_neigh()
3011 entry->eth_dst, rocker_port_ipv4_neigh()
3013 entry->ttl_check, rocker_port_ipv4_neigh()
3018 err, entry->index); rocker_port_ipv4_neigh()
3023 group_id = ROCKER_GROUP_L3_UNICAST(entry->index); rocker_port_ipv4_neigh()
3033 err, &entry->ip_addr, group_id); rocker_port_ipv4_neigh()
3038 rocker_port_kfree(trans, entry); rocker_port_ipv4_neigh()
3058 * install the entry, otherwise start the ARP process to rocker_port_ipv4_resolve()
3077 struct rocker_neigh_tbl_entry *entry; rocker_port_ipv4_nh() local
3086 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); rocker_port_ipv4_nh()
3087 if (!entry) rocker_port_ipv4_nh()
3101 entry->ip_addr = ip_addr; rocker_port_ipv4_nh()
3102 entry->dev = rocker_port->dev; rocker_port_ipv4_nh()
3103 _rocker_neigh_add(rocker, trans, entry); rocker_port_ipv4_nh()
3104 *index = entry->index; rocker_port_ipv4_nh()
3118 rocker_port_kfree(trans, entry); rocker_port_ipv4_nh()
3510 "Error (%d) ingress port table entry\n", err); rocker_port_ig_tbl()
3604 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) rocker_fdb_tbl_find()
3642 hash_del(&found->entry); rocker_port_fdb()
3646 hash_add(rocker->fdb_tbl, &fdb->entry, rocker_port_fdb()
3682 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { rocker_port_fdb_flush()
3693 hash_del(&found->entry); rocker_port_fdb_flush()
3706 struct rocker_fdb_tbl_entry *entry; rocker_fdb_cleanup() local
3717 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { rocker_fdb_cleanup()
3718 if (!entry->learned) rocker_fdb_cleanup()
3720 rocker_port = entry->key.rocker_port; rocker_fdb_cleanup()
3721 expires = entry->touched + rocker_port->ageing_time; rocker_fdb_cleanup()
3724 flags, entry->key.addr, rocker_fdb_cleanup()
3725 entry->key.vlan_id); rocker_fdb_cleanup()
3726 hash_del(&entry->entry); rocker_fdb_cleanup()
3909 entry, ifindex) { rocker_internal_vlan_tbl_find()
3921 struct rocker_internal_vlan_tbl_entry *entry; rocker_port_internal_vlan_id_get() local
3926 entry = kzalloc(sizeof(*entry), GFP_KERNEL); rocker_port_internal_vlan_id_get()
3927 if (!entry) rocker_port_internal_vlan_id_get()
3930 entry->ifindex = ifindex; rocker_port_internal_vlan_id_get()
3936 kfree(entry); rocker_port_internal_vlan_id_get()
3940 found = entry; rocker_port_internal_vlan_id_get()
3941 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); rocker_port_internal_vlan_id_get()
3981 hash_del(&found->entry); rocker_port_internal_vlan_id_put()
4577 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { rocker_port_fdb_dump()
5097 rocker->msix_entries[i].entry = i; rocker_msix_init()
1917 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ig_port() argument
1934 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_vlan() argument
1958 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_term_mac() argument
1994 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ucast_routing() argument
2017 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_bridge() argument
2051 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_acl() argument
2195 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_interface() argument
2209 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_rewrite() argument
2232 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_group_ids() argument
2258 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, const struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l3_unicast() argument
2499 rocker_flow_tbl_do(struct rocker_port *rocker_port, struct switchdev_trans *trans, int flags, struct rocker_flow_tbl_entry *entry) rocker_flow_tbl_do() argument
2738 rocker_group_tbl_entry_free(struct switchdev_trans *trans, struct rocker_group_tbl_entry *entry) rocker_group_tbl_entry_free() argument
2817 rocker_group_tbl_do(struct rocker_port *rocker_port, struct switchdev_trans *trans, int flags, struct rocker_group_tbl_entry *entry) rocker_group_tbl_do() argument
2915 _rocker_neigh_add(struct rocker *rocker, struct switchdev_trans *trans, struct rocker_neigh_tbl_entry *entry) _rocker_neigh_add() argument
2928 _rocker_neigh_del(struct switchdev_trans *trans, struct rocker_neigh_tbl_entry *entry) _rocker_neigh_del() argument
/linux-4.4.14/fs/nfs_common/
H A Dnfsacl.c11 * - Minimal ACLs always have an ACL_MASK entry, so they have
13 * - The ACL_MASK entry in such minimal ACLs always has the same
14 * permissions as the ACL_GROUP_OBJ entry. (In extended ACLs
54 struct posix_acl_entry *entry = xdr_nfsace_encode() local
57 *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); xdr_nfsace_encode()
58 switch(entry->e_tag) { xdr_nfsace_encode()
66 *p++ = htonl(from_kuid(&init_user_ns, entry->e_uid)); xdr_nfsace_encode()
69 *p++ = htonl(from_kgid(&init_user_ns, entry->e_gid)); xdr_nfsace_encode()
75 *p++ = htonl(entry->e_perm & S_IRWXO); xdr_nfsace_encode()
150 struct posix_acl_entry *entry; xdr_nfsace_decode() local
162 entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; xdr_nfsace_decode()
163 entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT; xdr_nfsace_decode()
165 entry->e_perm = ntohl(*p++); xdr_nfsace_decode()
167 switch(entry->e_tag) { xdr_nfsace_decode()
169 entry->e_uid = make_kuid(&init_user_ns, id); xdr_nfsace_decode()
170 if (!uid_valid(entry->e_uid)) xdr_nfsace_decode()
174 entry->e_gid = make_kgid(&init_user_ns, id); xdr_nfsace_decode()
175 if (!gid_valid(entry->e_gid)) xdr_nfsace_decode()
181 if (entry->e_perm & ~S_IRWXO) xdr_nfsace_decode()
186 entry->e_perm &= S_IRWXO; xdr_nfsace_decode()
246 /* remove bogus ACL_MASK entry */
/linux-4.4.14/include/sound/
H A Dinfo.h45 void (*read)(struct snd_info_entry *entry,
47 void (*write)(struct snd_info_entry *entry,
52 int (*open)(struct snd_info_entry *entry,
54 int (*release)(struct snd_info_entry *entry,
56 ssize_t (*read)(struct snd_info_entry *entry, void *file_private_data,
59 ssize_t (*write)(struct snd_info_entry *entry, void *file_private_data,
62 loff_t (*llseek)(struct snd_info_entry *entry,
65 unsigned int (*poll)(struct snd_info_entry *entry,
68 int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
70 int (*mmap)(struct snd_info_entry *entry, void *file_private_data,
88 void (*private_free)(struct snd_info_entry *entry);
136 void snd_info_free_entry(struct snd_info_entry *entry); snd_card_info_read_oss()
137 int snd_info_store_text(struct snd_info_entry *entry); snd_card_info_read_oss()
138 int snd_info_restore_text(struct snd_info_entry *entry); snd_card_info_read_oss()
145 int snd_info_register(struct snd_info_entry *entry); snd_card_info_read_oss()
155 static inline void snd_info_set_text_ops(struct snd_info_entry *entry, snd_info_set_text_ops() argument
159 entry->private_data = private_data; snd_info_set_text_ops()
160 entry->c.text.read = read; snd_info_set_text_ops()
178 static inline void snd_info_free_entry(struct snd_info_entry *entry) { ; } snd_info_free_entry() argument
185 static inline int snd_info_register(struct snd_info_entry *entry) { return 0; } snd_info_register() argument
189 static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)), snd_info_set_text_ops()
/linux-4.4.14/arch/s390/pci/
H A Dpci_dma.c29 unsigned long *table, *entry; dma_alloc_cpu_table() local
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) dma_alloc_cpu_table()
36 *entry = ZPCI_TABLE_INVALID; dma_alloc_cpu_table()
47 unsigned long *table, *entry; dma_alloc_page_table() local
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) dma_alloc_page_table()
54 *entry = ZPCI_PTE_INVALID; dma_alloc_page_table()
63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry) dma_get_seg_table_origin() argument
67 if (reg_entry_isvalid(*entry)) dma_get_seg_table_origin()
68 sto = get_rt_sto(*entry); dma_get_seg_table_origin()
74 set_rt_sto(entry, sto); dma_get_seg_table_origin()
75 validate_rt_entry(entry); dma_get_seg_table_origin()
76 entry_clr_protected(entry); dma_get_seg_table_origin()
81 static unsigned long *dma_get_page_table_origin(unsigned long *entry) dma_get_page_table_origin() argument
85 if (reg_entry_isvalid(*entry)) dma_get_page_table_origin()
86 pto = get_st_pto(*entry); dma_get_page_table_origin()
91 set_st_pto(entry, pto); dma_get_page_table_origin()
92 validate_st_entry(entry); dma_get_page_table_origin()
93 entry_clr_protected(entry); dma_get_page_table_origin()
117 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) dma_update_cpu_trans() argument
120 invalidate_pt_entry(entry); dma_update_cpu_trans()
122 set_pt_pfaa(entry, page_addr); dma_update_cpu_trans()
123 validate_pt_entry(entry); dma_update_cpu_trans()
127 entry_set_protected(entry); dma_update_cpu_trans()
129 entry_clr_protected(entry); dma_update_cpu_trans()
139 unsigned long *entry; dma_update_trans() local
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); dma_update_trans()
153 if (!entry) { dma_update_trans()
157 dma_update_cpu_trans(entry, page_addr, flags); dma_update_trans()
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); dma_update_trans()
183 if (!entry) dma_update_trans()
185 dma_update_cpu_trans(entry, page_addr, flags); dma_update_trans()
194 void dma_free_seg_table(unsigned long entry) dma_free_seg_table() argument
196 unsigned long *sto = get_rt_sto(entry); dma_free_seg_table()
/linux-4.4.14/arch/powerpc/oprofile/cell/
H A Dpr_util.h52 * The guard pointer is an entry in the _ovly_buf_table,
55 * entry in the _ovly_buf_table, the computation subtracts 1
57 * The guard value is stored in the _ovly_buf_table entry and
58 * is an index (starting at 1) back to the _ovly_table entry
59 * that is pointing at this _ovly_buf_table entry. So, for
62 * - Section 1 points to the first entry of the
64 * of '1', referencing the first (index=0) entry of
66 * - Section 2 points to the second entry of the
68 * of '2', referencing the second (index=1) entry of
/linux-4.4.14/include/linux/netfilter_arp/
H A Darp_tables.h17 /* Standard entry. */
19 struct arpt_entry entry; member in struct:arpt_standard
24 struct arpt_entry entry; member in struct:arpt_error
36 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \
44 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \
/linux-4.4.14/include/linux/netfilter_ipv4/
H A Dip_tables.h32 /* Standard entry. */
34 struct ipt_entry entry; member in struct:ipt_standard
39 struct ipt_entry entry; member in struct:ipt_error
51 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \
59 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \
/linux-4.4.14/drivers/net/ethernet/natsemi/
H A Dsonic.c206 int entry = lp->next_tx; sonic_send_packet() local
229 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_send_packet()
230 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_send_packet()
231 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ sonic_send_packet()
232 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff); sonic_send_packet()
233 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16); sonic_send_packet()
234 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length); sonic_send_packet()
235 sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_send_packet()
236 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); sonic_send_packet()
239 * Must set tx_skb[entry] only after clearing status, and sonic_send_packet()
243 lp->tx_len[entry] = length; sonic_send_packet()
244 lp->tx_laddr[entry] = laddr; sonic_send_packet()
245 lp->tx_skb[entry] = skb; sonic_send_packet()
250 lp->eol_tx = entry; sonic_send_packet()
252 lp->next_tx = (entry + 1) & SONIC_TDS_MASK; sonic_send_packet()
291 int entry = lp->cur_tx; sonic_interrupt() local
296 * unallocated/freed (status set & tx_skb[entry] clear) sonic_interrupt()
297 * allocated and sent (status set & tx_skb[entry] set ) sonic_interrupt()
298 * allocated and not yet sent (status clear & tx_skb[entry] set ) sonic_interrupt()
299 * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) sonic_interrupt()
305 while (lp->tx_skb[entry] != NULL) { sonic_interrupt()
306 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) sonic_interrupt()
311 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); sonic_interrupt()
325 dev_kfree_skb_irq(lp->tx_skb[entry]); sonic_interrupt()
326 lp->tx_skb[entry] = NULL; sonic_interrupt()
328 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); sonic_interrupt()
329 lp->tx_laddr[entry] = (dma_addr_t)0; sonic_interrupt()
332 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) { sonic_interrupt()
333 entry = (entry + 1) & SONIC_TDS_MASK; sonic_interrupt()
336 entry = (entry + 1) & SONIC_TDS_MASK; sonic_interrupt()
339 if (freed_some || lp->tx_skb[entry] == NULL) sonic_interrupt()
341 lp->cur_tx = entry; sonic_interrupt()
412 int entry = lp->cur_rx; sonic_rx() local
414 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { sonic_rx()
422 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); sonic_rx()
444 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); sonic_rx()
445 used_skb = lp->rx_skb[entry]; sonic_rx()
446 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); sonic_rx()
454 lp->rx_laddr[entry] = new_laddr; sonic_rx()
455 lp->rx_skb[entry] = new_skb; sonic_rx()
459 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); sonic_rx()
460 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); sonic_rx()
488 sonic_rda_put(dev, entry, SONIC_RD_LINK, sonic_rx()
489 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); sonic_rx()
490 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); sonic_rx()
493 lp->eol_rx = entry; sonic_rx()
494 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; sonic_rx()
/linux-4.4.14/net/dccp/
H A Dfeat.c282 static void dccp_feat_print_entry(struct dccp_feat_entry const *entry) dccp_feat_print_entry() argument
284 dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote", dccp_feat_print_entry()
285 dccp_feat_fname(entry->feat_num)); dccp_feat_print_entry()
286 dccp_feat_printval(entry->feat_num, &entry->val); dccp_feat_print_entry()
287 dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state], dccp_feat_print_entry()
288 entry->needs_confirm ? "(Confirm pending)" : ""); dccp_feat_print_entry()
414 static void dccp_feat_entry_destructor(struct dccp_feat_entry *entry) dccp_feat_entry_destructor() argument
416 if (entry != NULL) { dccp_feat_entry_destructor()
417 dccp_feat_val_destructor(entry->feat_num, &entry->val); dccp_feat_entry_destructor()
418 kfree(entry); dccp_feat_entry_destructor()
434 struct dccp_feat_entry *entry; dccp_feat_list_lookup() local
436 list_for_each_entry(entry, fn_list, node) { list_for_each_entry()
437 if (entry->feat_num == feat_num && entry->is_local == is_local) list_for_each_entry()
438 return entry; list_for_each_entry()
439 else if (entry->feat_num > feat_num) list_for_each_entry()
456 struct dccp_feat_entry *entry; dccp_feat_entry_new() local
458 list_for_each_entry(entry, head, node) list_for_each_entry()
459 if (entry->feat_num == feat && entry->is_local == local) { list_for_each_entry()
460 dccp_feat_val_destructor(entry->feat_num, &entry->val); list_for_each_entry()
461 return entry; list_for_each_entry()
462 } else if (entry->feat_num > feat) {
463 head = &entry->node;
467 entry = kmalloc(sizeof(*entry), gfp_any());
468 if (entry != NULL) {
469 entry->feat_num = feat;
470 entry->is_local = local;
471 list_add_tail(&entry->node, head);
473 return entry;
504 * dccp_feat_push_confirm - Add a Confirm entry to the FN list
538 static inline void dccp_feat_list_pop(struct dccp_feat_entry *entry) dccp_feat_list_pop() argument
540 list_del(&entry->node); dccp_feat_list_pop()
541 dccp_feat_entry_destructor(entry); dccp_feat_list_pop()
546 struct dccp_feat_entry *entry, *next; dccp_feat_list_purge() local
548 list_for_each_entry_safe(entry, next, fn_list, node) dccp_feat_list_purge()
549 dccp_feat_entry_destructor(entry); dccp_feat_list_purge()
557 struct dccp_feat_entry *entry, *new; dccp_feat_clone_list() local
560 list_for_each_entry(entry, from, node) { list_for_each_entry()
561 new = dccp_feat_clone_entry(entry); list_for_each_entry()
775 struct dccp_feat_entry *entry; dccp_feat_nn_get() local
777 entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1); dccp_feat_nn_get()
778 if (entry != NULL) dccp_feat_nn_get()
779 return entry->val.nn; dccp_feat_nn_get()
805 struct dccp_feat_entry *entry; dccp_feat_signal_nn_change() local
817 entry = dccp_feat_list_lookup(fn, feat, 1); dccp_feat_signal_nn_change()
818 if (entry != NULL) { dccp_feat_signal_nn_change()
819 dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n", dccp_feat_signal_nn_change()
820 (unsigned long long)entry->val.nn, dccp_feat_signal_nn_change()
822 dccp_feat_list_pop(entry); dccp_feat_signal_nn_change()
971 struct dccp_feat_entry *entry; dccp_feat_finalise_settings() local
983 list_for_each_entry(entry, fn, node) dccp_feat_finalise_settings()
984 if (entry->feat_num == DCCPF_CCID && entry->val.sp.len == 1) dccp_feat_finalise_settings()
985 ccids[entry->is_local] = entry->val.sp.vec[0]; dccp_feat_finalise_settings()
1001 struct dccp_feat_entry *entry; dccp_feat_server_ccid_dependencies() local
1005 entry = dccp_feat_list_lookup(fn, DCCPF_CCID, is_local); dccp_feat_server_ccid_dependencies()
1007 if (entry != NULL && !entry->empty_confirm) dccp_feat_server_ccid_dependencies()
1008 ccid = entry->val.sp.vec[0]; dccp_feat_server_ccid_dependencies()
1018 /* Select the first entry in @servlist that also occurs in @clilist (6.3.1) */ dccp_feat_preflist_match()
1031 * dccp_feat_prefer - Move preferred entry to the start of array
1059 * A value of 0 means that negotiation failed (no shared entry).
1102 struct dccp_feat_entry *entry; dccp_feat_change_recv() local
1129 entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_change_recv()
1130 if (entry == NULL) { dccp_feat_change_recv()
1161 } else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */ dccp_feat_change_recv()
1165 if (dccp_feat_reconcile(&entry->val, val, len, server, true)) { dccp_feat_change_recv()
1166 entry->empty_confirm = false; dccp_feat_change_recv()
1169 } else if (entry->state == FEAT_INITIALISING) { dccp_feat_change_recv()
1172 * the connection by checking whether entry contains the default dccp_feat_change_recv()
1180 if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true)) dccp_feat_change_recv()
1182 entry->empty_confirm = true; dccp_feat_change_recv()
1184 entry->needs_confirm = true; dccp_feat_change_recv()
1185 entry->needs_mandatory = false; dccp_feat_change_recv()
1186 entry->state = FEAT_STABLE; dccp_feat_change_recv()
1213 struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_confirm_recv() local
1217 if (entry == NULL) { /* nothing queued: ignore or handle error */ dccp_feat_confirm_recv()
1226 if (entry->state != FEAT_CHANGING) /* 6.6.2 */ dccp_feat_confirm_recv()
1237 * entry from the list. dccp_feat_confirm_recv()
1239 dccp_feat_list_pop(entry); dccp_feat_confirm_recv()
1244 if (len > sizeof(entry->val.nn)) dccp_feat_confirm_recv()
1247 if (entry->val.nn == dccp_decode_value_var(val, len)) dccp_feat_confirm_recv()
1271 if (dccp_feat_reconcile(&entry->val, plist, plen, server, 0) != *val) { dccp_feat_confirm_recv()
1275 entry->val.sp.vec[0] = *val; dccp_feat_confirm_recv()
1278 entry->state = FEAT_STABLE; dccp_feat_confirm_recv()
1310 struct dccp_feat_entry *entry; dccp_feat_handle_nn_established() local
1347 entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_handle_nn_established()
1348 if (entry == NULL || entry->state != FEAT_CHANGING) dccp_feat_handle_nn_established()
1358 if (fval.nn != entry->val.nn) dccp_feat_handle_nn_established()
1364 /* It has been confirmed - so remove the entry */ dccp_feat_handle_nn_established()
1365 dccp_feat_list_pop(entry); dccp_feat_handle_nn_established()
/linux-4.4.14/arch/x86/platform/uv/
H A Duv_irq.c30 struct uv_IO_APIC_route_entry *entry; uv_program_mmr() local
36 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; uv_program_mmr()
37 entry->vector = cfg->vector; uv_program_mmr()
38 entry->delivery_mode = apic->irq_delivery_mode; uv_program_mmr()
39 entry->dest_mode = apic->irq_dest_mode; uv_program_mmr()
40 entry->polarity = 0; uv_program_mmr()
41 entry->trigger = 0; uv_program_mmr()
42 entry->mask = 0; uv_program_mmr()
43 entry->dest = cfg->dest_apicid; uv_program_mmr()
144 struct uv_IO_APIC_route_entry *entry; uv_domain_deactivate() local
147 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; uv_domain_deactivate()
148 entry->mask = 1; uv_domain_deactivate()
/linux-4.4.14/include/uapi/linux/
H A Dauxvec.h9 #define AT_IGNORE 1 /* entry should be ignored */
12 #define AT_PHENT 4 /* size of program header entry */
17 #define AT_ENTRY 9 /* entry point of program */
/linux-4.4.14/arch/microblaze/include/asm/
H A Dentry.h21 * These are per-cpu variables required in entry.S, among other
30 DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
31 DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
/linux-4.4.14/drivers/net/wireless/iwlwifi/
H A Diwl-phy-db.c186 struct iwl_phy_db_entry *entry = iwl_phy_db_free_section() local
188 if (!entry) iwl_phy_db_free_section()
191 kfree(entry->data); iwl_phy_db_free_section()
192 entry->data = NULL; iwl_phy_db_free_section()
193 entry->size = 0; iwl_phy_db_free_section()
221 struct iwl_phy_db_entry *entry; iwl_phy_db_set_section() local
231 entry = iwl_phy_db_get_section(phy_db, type, chg_id); iwl_phy_db_set_section()
232 if (!entry) iwl_phy_db_set_section()
235 kfree(entry->data); iwl_phy_db_set_section()
236 entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); iwl_phy_db_set_section()
237 if (!entry->data) { iwl_phy_db_set_section()
238 entry->size = 0; iwl_phy_db_set_section()
242 entry->size = size; iwl_phy_db_set_section()
316 struct iwl_phy_db_entry *entry; iwl_phy_db_get_section_data() local
328 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); iwl_phy_db_get_section_data()
329 if (!entry) iwl_phy_db_get_section_data()
332 *data = entry->data; iwl_phy_db_get_section_data()
333 *size = entry->size; iwl_phy_db_get_section_data()
375 struct iwl_phy_db_entry *entry; iwl_phy_db_send_all_channel_groups() local
379 entry = iwl_phy_db_get_section(phy_db, iwl_phy_db_send_all_channel_groups()
382 if (!entry) iwl_phy_db_send_all_channel_groups()
385 if (!entry->size) iwl_phy_db_send_all_channel_groups()
391 entry->size, iwl_phy_db_send_all_channel_groups()
392 entry->data); iwl_phy_db_send_all_channel_groups()
/linux-4.4.14/arch/x86/entry/vdso/vdso32/
H A Dsystem_call.S2 * AT_SYSINFO entry point
10 * First get the common code for the sigreturn entry points.
22 * Reshuffle regs so that all of any of the entry instructions
25 * A really nice entry sequence would be:
31 * 2015 actually hardcode the traditional Linux SYSENTER entry
42 * entry. That is the ONLY correct way to make a fast 32-bit system
/linux-4.4.14/arch/powerpc/include/asm/
H A Dpgtable-ppc64-4k.h5 * for each page table entry. The PMD and PGD level use a 32b record for
6 * each entry by assuming that each entry is page aligned.
25 /* PMD_SHIFT determines what a second-level page table entry can map */
33 /* PUD_SHIFT determines what a third-level page table entry can map */
38 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
/linux-4.4.14/drivers/acpi/apei/
H A Dapei-base.c65 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) __apei_exec_read_register() argument
69 rc = apei_read(val, &entry->register_region); __apei_exec_read_register()
72 *val >>= entry->register_region.bit_offset; __apei_exec_read_register()
73 *val &= entry->mask; __apei_exec_read_register()
79 struct acpi_whea_header *entry) apei_exec_read_register()
84 rc = __apei_exec_read_register(entry, &val); apei_exec_read_register()
94 struct acpi_whea_header *entry) apei_exec_read_register_value()
98 rc = apei_exec_read_register(ctx, entry); apei_exec_read_register_value()
101 ctx->value = (ctx->value == entry->value); apei_exec_read_register_value()
107 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) __apei_exec_write_register() argument
111 val &= entry->mask; __apei_exec_write_register()
112 val <<= entry->register_region.bit_offset; __apei_exec_write_register()
113 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { __apei_exec_write_register()
115 rc = apei_read(&valr, &entry->register_region); __apei_exec_write_register()
118 valr &= ~(entry->mask << entry->register_region.bit_offset); __apei_exec_write_register()
121 rc = apei_write(val, &entry->register_region); __apei_exec_write_register()
127 struct acpi_whea_header *entry) apei_exec_write_register()
129 return __apei_exec_write_register(entry, ctx->value); apei_exec_write_register()
134 struct acpi_whea_header *entry) apei_exec_write_register_value()
138 ctx->value = entry->value; apei_exec_write_register_value()
139 rc = apei_exec_write_register(ctx, entry); apei_exec_write_register_value()
146 struct acpi_whea_header *entry) apei_exec_noop()
161 struct acpi_whea_header *entry; __apei_exec_run() local
175 entry = &ctx->action_table[i]; __apei_exec_run()
176 if (entry->action != action) __apei_exec_run()
179 if (entry->instruction >= ctx->instructions || __apei_exec_run()
180 !ctx->ins_table[entry->instruction].run) { __apei_exec_run()
183 entry->instruction); __apei_exec_run()
186 run = ctx->ins_table[entry->instruction].run; __apei_exec_run()
187 rc = run(ctx, entry); __apei_exec_run()
203 struct acpi_whea_header *entry,
213 struct acpi_whea_header *entry; apei_exec_for_each_entry() local
217 entry = ctx->action_table + i; apei_exec_for_each_entry()
218 ins = entry->instruction; apei_exec_for_each_entry()
227 rc = func(ctx, entry, data); apei_exec_for_each_entry()
236 struct acpi_whea_header *entry, pre_map_gar_callback()
239 u8 ins = entry->instruction; pre_map_gar_callback()
242 return apei_map_generic_address(&entry->register_region); pre_map_gar_callback()
269 struct acpi_whea_header *entry, post_unmap_gar_callback()
272 u8 ins = entry->instruction; post_unmap_gar_callback()
275 apei_unmap_generic_address(&entry->register_region); post_unmap_gar_callback()
432 * EINJ has two groups of GARs (EINJ table entry and trigger table
433 * entry), so common resources are subtracted from the trigger table
712 struct acpi_whea_header *entry, collect_res_callback()
716 struct acpi_generic_address *reg = &entry->register_region; collect_res_callback()
717 u8 ins = entry->instruction; collect_res_callback()
78 apei_exec_read_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_read_register() argument
93 apei_exec_read_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_read_register_value() argument
126 apei_exec_write_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_write_register() argument
133 apei_exec_write_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_write_register_value() argument
145 apei_exec_noop(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_noop() argument
235 pre_map_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) pre_map_gar_callback() argument
268 post_unmap_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) post_unmap_gar_callback() argument
711 collect_res_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) collect_res_callback() argument

Completed in 5713 milliseconds

1234567891011>>