Searched refs:entry (Results 1 - 200 of 5284) sorted by relevance

1234567891011>>

/linux-4.1.27/net/atm/
H A Dmpoa_caches.c37 in_cache_entry *entry; in_cache_get() local
40 entry = client->in_cache; in_cache_get()
41 while (entry != NULL) { in_cache_get()
42 if (entry->ctrl_info.in_dst_ip == dst_ip) { in_cache_get()
43 atomic_inc(&entry->use); in_cache_get()
45 return entry; in_cache_get()
47 entry = entry->next; in_cache_get()
58 in_cache_entry *entry; in_cache_get_with_mask() local
61 entry = client->in_cache; in_cache_get_with_mask()
62 while (entry != NULL) { in_cache_get_with_mask()
63 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) { in_cache_get_with_mask()
64 atomic_inc(&entry->use); in_cache_get_with_mask()
66 return entry; in_cache_get_with_mask()
68 entry = entry->next; in_cache_get_with_mask()
79 in_cache_entry *entry; in_cache_get_by_vcc() local
82 entry = client->in_cache; in_cache_get_by_vcc()
83 while (entry != NULL) { in_cache_get_by_vcc()
84 if (entry->shortcut == vcc) { in_cache_get_by_vcc()
85 atomic_inc(&entry->use); in_cache_get_by_vcc()
87 return entry; in_cache_get_by_vcc()
89 entry = entry->next; in_cache_get_by_vcc()
99 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); in_cache_add_entry() local
101 if (entry == NULL) { in_cache_add_entry()
106 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip); in_cache_add_entry()
108 atomic_set(&entry->use, 1); in_cache_add_entry()
111 entry->next = client->in_cache; in_cache_add_entry()
112 entry->prev = NULL; in_cache_add_entry()
114 client->in_cache->prev = entry; in_cache_add_entry()
115 client->in_cache = entry; in_cache_add_entry()
117 memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); in_cache_add_entry()
118 entry->ctrl_info.in_dst_ip = dst_ip; in_cache_add_entry()
119 do_gettimeofday(&(entry->tv)); in_cache_add_entry()
120 entry->retry_time = client->parameters.mpc_p4; in_cache_add_entry()
121 entry->count = 1; in_cache_add_entry()
122 entry->entry_state = INGRESS_INVALID; in_cache_add_entry()
123 entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT; in_cache_add_entry()
124 atomic_inc(&entry->use); in_cache_add_entry()
129 return entry; in_cache_add_entry()
132 static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc) cache_hit() argument
137 entry->count++; cache_hit()
138 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) cache_hit()
141 if (entry->entry_state == INGRESS_REFRESHING) { cache_hit()
142 if (entry->count > mpc->parameters.mpc_p1) { cache_hit()
144 msg.content.in_info = entry->ctrl_info; cache_hit()
146 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); cache_hit()
150 do_gettimeofday(&(entry->reply_wait)); cache_hit()
151 entry->entry_state = INGRESS_RESOLVING; cache_hit()
153 if (entry->shortcut != NULL) cache_hit()
158 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) cache_hit()
161 if (entry->count > mpc->parameters.mpc_p1 && cache_hit()
162 entry->entry_state == INGRESS_INVALID) { cache_hit()
164 mpc->dev->name, &entry->ctrl_info.in_dst_ip); cache_hit()
165 entry->entry_state = INGRESS_RESOLVING; cache_hit()
168 msg.content.in_info = entry->ctrl_info; cache_hit()
169 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); cache_hit()
173 do_gettimeofday(&(entry->reply_wait)); cache_hit()
179 static void in_cache_put(in_cache_entry *entry) in_cache_put() argument
181 if (atomic_dec_and_test(&entry->use)) { in_cache_put()
182 memset(entry, 0, sizeof(in_cache_entry)); in_cache_put()
183 kfree(entry); in_cache_put()
190 static void in_cache_remove_entry(in_cache_entry *entry, in_cache_remove_entry() argument
196 vcc = entry->shortcut; in_cache_remove_entry()
197 dprintk("removing an ingress entry, ip = %pI4\n", in_cache_remove_entry()
198 &entry->ctrl_info.in_dst_ip); in_cache_remove_entry()
200 if (entry->prev != NULL) in_cache_remove_entry()
201 entry->prev->next = entry->next; in_cache_remove_entry()
203 client->in_cache = entry->next; in_cache_remove_entry()
204 if (entry->next != NULL) in_cache_remove_entry()
205 entry->next->prev = entry->prev; in_cache_remove_entry()
206 client->in_ops->put(entry); in_cache_remove_entry()
228 in_cache_entry *entry, *next_entry; clear_count_and_expired() local
234 entry = client->in_cache; clear_count_and_expired()
235 while (entry != NULL) { clear_count_and_expired()
236 entry->count = 0; clear_count_and_expired()
237 next_entry = entry->next; clear_count_and_expired()
238 if ((now.tv_sec - entry->tv.tv_sec) clear_count_and_expired()
239 > entry->ctrl_info.holding_time) { clear_count_and_expired()
241 &entry->ctrl_info.in_dst_ip); clear_count_and_expired()
242 client->in_ops->remove_entry(entry, client); clear_count_and_expired()
244 entry = next_entry; clear_count_and_expired()
254 in_cache_entry *entry; check_resolving_entries() local
261 entry = client->in_cache; check_resolving_entries()
262 while (entry != NULL) { check_resolving_entries()
263 if (entry->entry_state == INGRESS_RESOLVING) { check_resolving_entries()
264 if ((now.tv_sec - entry->hold_down.tv_sec) < check_resolving_entries()
266 entry = entry->next; /* Entry in hold down */ check_resolving_entries()
269 if ((now.tv_sec - entry->reply_wait.tv_sec) > check_resolving_entries()
270 entry->retry_time) { check_resolving_entries()
271 entry->retry_time = MPC_C1 * (entry->retry_time); check_resolving_entries()
274 * put entry in hold down. check_resolving_entries()
276 if (entry->retry_time > client->parameters.mpc_p5) { check_resolving_entries()
277 do_gettimeofday(&(entry->hold_down)); check_resolving_entries()
278 entry->retry_time = client->parameters.mpc_p4; check_resolving_entries()
279 entry = entry->next; check_resolving_entries()
283 memset(&(entry->hold_down), 0, sizeof(struct timeval)); check_resolving_entries()
286 msg.content.in_info = entry->ctrl_info; check_resolving_entries()
287 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); check_resolving_entries()
291 do_gettimeofday(&(entry->reply_wait)); check_resolving_entries()
294 entry = entry->next; check_resolving_entries()
303 struct in_cache_entry *entry = client->in_cache; refresh_entries() local
309 while (entry != NULL) { refresh_entries()
310 if (entry->entry_state == INGRESS_RESOLVED) { refresh_entries()
311 if (!(entry->refresh_time)) refresh_entries()
312 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3; refresh_entries()
313 if ((now.tv_sec - entry->reply_wait.tv_sec) > refresh_entries()
314 entry->refresh_time) { refresh_entries()
315 dprintk("refreshing an entry.\n"); refresh_entries()
316 entry->entry_state = INGRESS_REFRESHING; refresh_entries()
320 entry = entry->next; refresh_entries()
336 eg_cache_entry *entry; eg_cache_get_by_cache_id() local
339 entry = mpc->eg_cache; eg_cache_get_by_cache_id()
340 while (entry != NULL) { eg_cache_get_by_cache_id()
341 if (entry->ctrl_info.cache_id == cache_id) { eg_cache_get_by_cache_id()
342 atomic_inc(&entry->use); eg_cache_get_by_cache_id()
344 return entry; eg_cache_get_by_cache_id()
346 entry = entry->next; eg_cache_get_by_cache_id()
357 eg_cache_entry *entry; eg_cache_get_by_tag() local
360 entry = mpc->eg_cache; eg_cache_get_by_tag()
361 while (entry != NULL) { eg_cache_get_by_tag()
362 if (entry->ctrl_info.tag == tag) { eg_cache_get_by_tag()
363 atomic_inc(&entry->use); eg_cache_get_by_tag()
365 return entry; eg_cache_get_by_tag()
367 entry = entry->next; eg_cache_get_by_tag()
379 eg_cache_entry *entry; eg_cache_get_by_vcc() local
382 entry = mpc->eg_cache; eg_cache_get_by_vcc()
383 while (entry != NULL) { eg_cache_get_by_vcc()
384 if (entry->shortcut == vcc) { eg_cache_get_by_vcc()
385 atomic_inc(&entry->use); eg_cache_get_by_vcc()
387 return entry; eg_cache_get_by_vcc()
389 entry = entry->next; eg_cache_get_by_vcc()
399 eg_cache_entry *entry; eg_cache_get_by_src_ip() local
402 entry = mpc->eg_cache; eg_cache_get_by_src_ip()
403 while (entry != NULL) { eg_cache_get_by_src_ip()
404 if (entry->latest_ip_addr == ipaddr) { eg_cache_get_by_src_ip()
405 atomic_inc(&entry->use); eg_cache_get_by_src_ip()
407 return entry; eg_cache_get_by_src_ip()
409 entry = entry->next; eg_cache_get_by_src_ip()
416 static void eg_cache_put(eg_cache_entry *entry) eg_cache_put() argument
418 if (atomic_dec_and_test(&entry->use)) { eg_cache_put()
419 memset(entry, 0, sizeof(eg_cache_entry)); eg_cache_put()
420 kfree(entry); eg_cache_put()
427 static void eg_cache_remove_entry(eg_cache_entry *entry, eg_cache_remove_entry() argument
433 vcc = entry->shortcut; eg_cache_remove_entry()
434 dprintk("removing an egress entry.\n"); eg_cache_remove_entry()
435 if (entry->prev != NULL) eg_cache_remove_entry()
436 entry->prev->next = entry->next; eg_cache_remove_entry()
438 client->eg_cache = entry->next; eg_cache_remove_entry()
439 if (entry->next != NULL) eg_cache_remove_entry()
440 entry->next->prev = entry->prev; eg_cache_remove_entry()
441 client->eg_ops->put(entry); eg_cache_remove_entry()
461 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); eg_cache_add_entry() local
463 if (entry == NULL) { eg_cache_add_entry()
468 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n", eg_cache_add_entry()
471 atomic_set(&entry->use, 1); eg_cache_add_entry()
474 entry->next = client->eg_cache; eg_cache_add_entry()
475 entry->prev = NULL; eg_cache_add_entry()
477 client->eg_cache->prev = entry; eg_cache_add_entry()
478 client->eg_cache = entry; eg_cache_add_entry()
480 memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN); eg_cache_add_entry()
481 entry->ctrl_info = msg->content.eg_info; eg_cache_add_entry()
482 do_gettimeofday(&(entry->tv)); eg_cache_add_entry()
483 entry->entry_state = EGRESS_RESOLVED; eg_cache_add_entry()
485 ntohl(entry->ctrl_info.cache_id)); eg_cache_add_entry()
486 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip); eg_cache_add_entry()
487 atomic_inc(&entry->use); eg_cache_add_entry()
492 return entry; eg_cache_add_entry()
495 static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time) update_eg_cache_entry() argument
497 do_gettimeofday(&(entry->tv)); update_eg_cache_entry()
498 entry->entry_state = EGRESS_RESOLVED; update_eg_cache_entry()
499 entry->ctrl_info.holding_time = holding_time; update_eg_cache_entry()
504 eg_cache_entry *entry, *next_entry; clear_expired() local
511 entry = client->eg_cache; clear_expired()
512 while (entry != NULL) { clear_expired()
513 next_entry = entry->next; clear_expired()
514 if ((now.tv_sec - entry->tv.tv_sec) clear_expired()
515 > entry->ctrl_info.holding_time) { clear_expired()
517 msg.content.eg_info = entry->ctrl_info; clear_expired()
519 ntohl(entry->ctrl_info.cache_id)); clear_expired()
521 client->eg_ops->remove_entry(entry, client); clear_expired()
523 entry = next_entry; clear_expired()
H A Dlec.c102 static inline void lec_arp_hold(struct lec_arp_table *entry) lec_arp_hold() argument
104 atomic_inc(&entry->usage); lec_arp_hold()
107 static inline void lec_arp_put(struct lec_arp_table *entry) lec_arp_put() argument
109 if (atomic_dec_and_test(&entry->usage)) lec_arp_put()
110 kfree(entry); lec_arp_put()
208 struct lec_arp_table *entry; lec_start_xmit() local
279 entry = NULL; lec_start_xmit()
280 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); lec_start_xmit()
281 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", lec_start_xmit()
282 dev->name, vcc, vcc ? vcc->flags : 0, entry); lec_start_xmit()
284 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { lec_start_xmit()
287 skb_queue_tail(&entry->tx_wait, skb); lec_start_xmit()
289 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n", lec_start_xmit()
301 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { lec_start_xmit()
325 if (entry) lec_start_xmit()
326 lec_arp_put(entry); lec_start_xmit()
344 struct lec_arp_table *entry; lec_atm_send() local
374 entry = lec_arp_find(priv, mesg->content.normal.mac_addr); lec_atm_send()
375 lec_arp_remove(priv, entry); lec_atm_send()
442 pr_debug("%s: entry found, responding to zeppelin\n", lec_atm_send()
623 struct lec_arp_table *entry; lec_push() local
641 * the LE_ARP cache entry, delete the LE_ARP cache entry. lec_push()
646 entry = lec_arp_find(priv, src); lec_push()
647 if (entry && entry->vcc != vcc) { lec_push()
648 lec_arp_remove(priv, entry); lec_push()
649 lec_arp_put(entry); lec_push()
802 static void lec_info(struct seq_file *seq, struct lec_arp_table *entry) lec_info() argument
807 seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff); lec_info()
810 seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff); lec_info()
811 seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status), lec_info()
812 entry->flags & 0xffff); lec_info()
813 if (entry->vcc) lec_info()
814 seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); lec_info()
817 if (entry->recv_vcc) { lec_info()
818 seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, lec_info()
819 entry->recv_vcc->vci); lec_info()
981 struct lec_arp_table *entry = hlist_entry(state->node, lec_seq_show() local
986 lec_info(seq, entry); lec_seq_show()
1187 * TLVs get freed when entry is killed lane2_associate_ind()
1189 struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); lane2_associate_ind()
1191 if (entry == NULL) lane2_associate_ind()
1194 kfree(entry->tlvs); lane2_associate_ind()
1196 entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); lane2_associate_ind()
1197 if (entry->tlvs == NULL) lane2_associate_ind()
1199 entry->sizeoftlvs = sizeoftlvs; lane2_associate_ind()
1267 static void lec_arp_clear_vccs(struct lec_arp_table *entry) lec_arp_clear_vccs() argument
1269 if (entry->vcc) { lec_arp_clear_vccs()
1270 struct atm_vcc *vcc = entry->vcc; lec_arp_clear_vccs()
1279 vcc->push = entry->old_push; lec_arp_clear_vccs()
1281 entry->vcc = NULL; lec_arp_clear_vccs()
1283 if (entry->recv_vcc) { lec_arp_clear_vccs()
1284 entry->recv_vcc->push = entry->old_recv_push; lec_arp_clear_vccs()
1285 vcc_release_async(entry->recv_vcc, -EPIPE); lec_arp_clear_vccs()
1286 entry->recv_vcc = NULL; lec_arp_clear_vccs()
1291 * Insert entry to lec_arp_table
1295 lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) lec_arp_add() argument
1299 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; lec_arp_add()
1300 hlist_add_head(&entry->next, tmp); lec_arp_add()
1302 pr_debug("Added entry:%pM\n", entry->mac_addr); lec_arp_add()
1306 * Remove entry from lec_arp_table
1311 struct lec_arp_table *entry; lec_arp_remove() local
1329 hlist_for_each_entry(entry, lec_arp_remove()
1332 entry->atm_addr, ATM_ESA_LEN) == 0) { lec_arp_remove()
1343 pr_debug("Removed entry:%pM\n", to_remove->mac_addr); lec_arp_remove()
1490 struct lec_arp_table *entry; lec_arp_destroy() local
1501 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1503 lec_arp_remove(priv, entry); lec_arp_destroy()
1504 lec_arp_put(entry); lec_arp_destroy()
1509 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1511 del_timer_sync(&entry->timer); lec_arp_destroy()
1512 lec_arp_clear_vccs(entry); lec_arp_destroy()
1513 hlist_del(&entry->next); lec_arp_destroy()
1514 lec_arp_put(entry); lec_arp_destroy()
1518 hlist_for_each_entry_safe(entry, next, lec_arp_destroy()
1520 del_timer_sync(&entry->timer); lec_arp_destroy()
1521 lec_arp_clear_vccs(entry); lec_arp_destroy()
1522 hlist_del(&entry->next); lec_arp_destroy()
1523 lec_arp_put(entry); lec_arp_destroy()
1527 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { lec_arp_destroy()
1529 lec_arp_clear_vccs(entry); lec_arp_destroy()
1530 hlist_del(&entry->next); lec_arp_destroy()
1531 lec_arp_put(entry); lec_arp_destroy()
1539 * Find entry by mac_address
1545 struct lec_arp_table *entry; lec_arp_find() local
1550 hlist_for_each_entry(entry, head, next) { hlist_for_each_entry()
1551 if (ether_addr_equal(mac_addr, entry->mac_addr)) hlist_for_each_entry()
1552 return entry; hlist_for_each_entry()
1564 pr_info("LEC: Arp entry kmalloc failed\n"); make_entry()
1581 struct lec_arp_table *entry; lec_arp_expire_arp() local
1583 entry = (struct lec_arp_table *)data; lec_arp_expire_arp()
1586 if (entry->status == ESI_ARP_PENDING) { lec_arp_expire_arp()
1587 if (entry->no_tries <= entry->priv->max_retry_count) { lec_arp_expire_arp()
1588 if (entry->is_rdesc) lec_arp_expire_arp()
1589 send_to_lecd(entry->priv, l_rdesc_arp_xmt, lec_arp_expire_arp()
1590 entry->mac_addr, NULL, NULL); lec_arp_expire_arp()
1592 send_to_lecd(entry->priv, l_arp_xmt, lec_arp_expire_arp()
1593 entry->mac_addr, NULL, NULL); lec_arp_expire_arp()
1594 entry->no_tries++; lec_arp_expire_arp()
1596 mod_timer(&entry->timer, jiffies + (1 * HZ)); lec_arp_expire_arp()
1600 /* Unknown/unused vcc expire, remove associated entry */ lec_arp_expire_vcc()
1622 static bool __lec_arp_check_expire(struct lec_arp_table *entry, __lec_arp_check_expire() argument
1628 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change) __lec_arp_check_expire()
1634 now, entry->last_used, time_to_check); __lec_arp_check_expire()
1635 if (time_after(now, entry->last_used + time_to_check) && __lec_arp_check_expire()
1636 !(entry->flags & LEC_PERMANENT_FLAG) && __lec_arp_check_expire()
1637 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ __lec_arp_check_expire()
1638 /* Remove entry */ __lec_arp_check_expire()
1640 lec_arp_remove(priv, entry); __lec_arp_check_expire()
1641 lec_arp_put(entry); __lec_arp_check_expire()
1644 if ((entry->status == ESI_VC_PENDING || __lec_arp_check_expire()
1645 entry->status == ESI_ARP_PENDING) && __lec_arp_check_expire()
1646 time_after_eq(now, entry->timestamp + __lec_arp_check_expire()
1648 entry->timestamp = jiffies; __lec_arp_check_expire()
1649 entry->packets_flooded = 0; __lec_arp_check_expire()
1650 if (entry->status == ESI_VC_PENDING) __lec_arp_check_expire()
1652 entry->mac_addr, __lec_arp_check_expire()
1653 entry->atm_addr, __lec_arp_check_expire()
1656 if (entry->status == ESI_FLUSH_PENDING && __lec_arp_check_expire()
1657 time_after_eq(now, entry->timestamp + __lec_arp_check_expire()
1659 lec_arp_hold(entry); __lec_arp_check_expire()
1669 * 2. For each entry, delete entries that have aged past the age limit.
1670 * 3. For each entry, depending on the status of the entry, perform
1688 struct lec_arp_table *entry; lec_arp_check_expire() local
1697 hlist_for_each_entry_safe(entry, next, lec_arp_check_expire()
1699 if (__lec_arp_check_expire(entry, now, priv)) { lec_arp_check_expire()
1701 struct atm_vcc *vcc = entry->vcc; lec_arp_check_expire()
1705 while ((skb = skb_dequeue(&entry->tx_wait))) lec_arp_check_expire()
1707 entry->last_used = jiffies; lec_arp_check_expire()
1708 entry->status = ESI_FORWARD_DIRECT; lec_arp_check_expire()
1709 lec_arp_put(entry); lec_arp_check_expire()
1730 struct lec_arp_table *entry; lec_arp_resolve() local
1747 entry = lec_arp_find(priv, mac_to_find); lec_arp_resolve()
1749 if (entry) { lec_arp_resolve()
1750 if (entry->status == ESI_FORWARD_DIRECT) { lec_arp_resolve()
1752 entry->last_used = jiffies; lec_arp_resolve()
1753 lec_arp_hold(entry); lec_arp_resolve()
1754 *ret_entry = entry; lec_arp_resolve()
1755 found = entry->vcc; lec_arp_resolve()
1759 * If the LE_ARP cache entry is still pending, reset count to 0 lec_arp_resolve()
1762 if (entry->status == ESI_ARP_PENDING) lec_arp_resolve()
1763 entry->no_tries = 0; lec_arp_resolve()
1770 if (entry->status != ESI_FLUSH_PENDING && lec_arp_resolve()
1771 entry->packets_flooded < lec_arp_resolve()
1773 entry->packets_flooded++; lec_arp_resolve()
1779 * We got here because entry->status == ESI_FLUSH_PENDING lec_arp_resolve()
1780 * or BUS flood limit was reached for an entry which is lec_arp_resolve()
1783 lec_arp_hold(entry); lec_arp_resolve()
1784 *ret_entry = entry; lec_arp_resolve()
1785 pr_debug("entry->status %d entry->vcc %p\n", entry->status, lec_arp_resolve()
1786 entry->vcc); lec_arp_resolve()
1789 /* No matching entry was found */ lec_arp_resolve()
1790 entry = make_entry(priv, mac_to_find); lec_arp_resolve()
1791 pr_debug("Making entry\n"); lec_arp_resolve()
1792 if (!entry) { lec_arp_resolve()
1796 lec_arp_add(priv, entry); lec_arp_resolve()
1798 entry->packets_flooded = 1; lec_arp_resolve()
1799 entry->status = ESI_ARP_PENDING; lec_arp_resolve()
1800 entry->no_tries = 1; lec_arp_resolve()
1801 entry->last_used = entry->timestamp = jiffies; lec_arp_resolve()
1802 entry->is_rdesc = is_rdesc; lec_arp_resolve()
1803 if (entry->is_rdesc) lec_arp_resolve()
1808 entry->timer.expires = jiffies + (1 * HZ); lec_arp_resolve()
1809 entry->timer.function = lec_arp_expire_arp; lec_arp_resolve()
1810 add_timer(&entry->timer); lec_arp_resolve()
1825 struct lec_arp_table *entry; lec_addr_delete() local
1831 hlist_for_each_entry_safe(entry, next, lec_addr_delete()
1833 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && lec_addr_delete()
1835 !(entry->flags & LEC_PERMANENT_FLAG))) { lec_addr_delete()
1836 lec_arp_remove(priv, entry); lec_addr_delete()
1837 lec_arp_put(entry); lec_addr_delete()
1857 struct lec_arp_table *entry, *tmp; lec_arp_update() local
1864 entry = lec_arp_find(priv, mac_addr); lec_arp_update()
1865 if (entry == NULL && targetless_le_arp) lec_arp_update()
1868 * we have no entry in the cache. 7.1.30 lec_arp_update()
1871 hlist_for_each_entry_safe(entry, next, lec_arp_update()
1873 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { lec_arp_update()
1874 hlist_del(&entry->next); lec_arp_update()
1875 del_timer(&entry->timer); lec_arp_update()
1881 tmp->vcc = entry->vcc; lec_arp_update()
1882 tmp->old_push = entry->old_push; lec_arp_update()
1884 del_timer(&entry->timer); lec_arp_update()
1885 lec_arp_put(entry); lec_arp_update()
1886 entry = tmp; lec_arp_update()
1888 entry->status = ESI_FORWARD_DIRECT; lec_arp_update()
1889 ether_addr_copy(entry->mac_addr, lec_arp_update()
1891 entry->last_used = jiffies; lec_arp_update()
1892 lec_arp_add(priv, entry); lec_arp_update()
1895 entry->flags |= LEC_REMOTE_FLAG; lec_arp_update()
1897 entry->flags &= ~LEC_REMOTE_FLAG; lec_arp_update()
1905 entry = lec_arp_find(priv, mac_addr); lec_arp_update()
1906 if (!entry) { lec_arp_update()
1907 entry = make_entry(priv, mac_addr); lec_arp_update()
1908 if (!entry) lec_arp_update()
1910 entry->status = ESI_UNKNOWN; lec_arp_update()
1911 lec_arp_add(priv, entry); lec_arp_update()
1914 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); lec_arp_update()
1915 del_timer(&entry->timer); lec_arp_update()
1919 if (entry != tmp && lec_arp_update()
1927 entry->vcc = tmp->vcc; lec_arp_update()
1928 entry->old_push = tmp->old_push; lec_arp_update()
1930 entry->status = tmp->status; lec_arp_update()
1936 entry->flags |= LEC_REMOTE_FLAG; lec_arp_update()
1938 entry->flags &= ~LEC_REMOTE_FLAG; lec_arp_update()
1939 if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) { lec_arp_update()
1940 entry->status = ESI_VC_PENDING; lec_arp_update()
1941 send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); lec_arp_update()
1958 struct lec_arp_table *entry; lec_vcc_added() local
1966 entry = lec_arp_find(priv, bus_mac); lec_vcc_added()
1967 if (!entry) { lec_vcc_added()
1968 pr_info("LEC_ARP: Multicast entry not found!\n"); lec_vcc_added()
1971 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
1972 entry->recv_vcc = vcc; lec_vcc_added()
1973 entry->old_recv_push = old_push; lec_vcc_added()
1975 entry = make_entry(priv, bus_mac); lec_vcc_added()
1976 if (entry == NULL) lec_vcc_added()
1978 del_timer(&entry->timer); lec_vcc_added()
1979 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
1980 entry->recv_vcc = vcc; lec_vcc_added()
1981 entry->old_recv_push = old_push; lec_vcc_added()
1982 hlist_add_head(&entry->next, &priv->mcast_fwds); lec_vcc_added()
2000 entry = make_entry(priv, bus_mac); lec_vcc_added()
2001 if (entry == NULL) lec_vcc_added()
2003 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
2004 eth_zero_addr(entry->mac_addr); lec_vcc_added()
2005 entry->recv_vcc = vcc; lec_vcc_added()
2006 entry->old_recv_push = old_push; lec_vcc_added()
2007 entry->status = ESI_UNKNOWN; lec_vcc_added()
2008 entry->timer.expires = jiffies + priv->vcc_timeout_period; lec_vcc_added()
2009 entry->timer.function = lec_arp_expire_vcc; lec_vcc_added()
2010 hlist_add_head(&entry->next, &priv->lec_no_forward); lec_vcc_added()
2011 add_timer(&entry->timer); lec_vcc_added()
2027 hlist_for_each_entry(entry, lec_vcc_added()
2030 (ioc_data->atm_addr, entry->atm_addr, lec_vcc_added()
2034 entry->vcc ? entry->vcc->vci : 0, lec_vcc_added()
2035 entry->recv_vcc ? entry->recv_vcc-> lec_vcc_added()
2038 del_timer(&entry->timer); lec_vcc_added()
2039 entry->vcc = vcc; lec_vcc_added()
2040 entry->old_push = old_push; lec_vcc_added()
2041 if (entry->status == ESI_VC_PENDING) { lec_vcc_added()
2044 entry->status = lec_vcc_added()
2047 entry->timestamp = jiffies; lec_vcc_added()
2048 entry->status = lec_vcc_added()
2053 entry->atm_addr, lec_vcc_added()
2083 entry = make_entry(priv, bus_mac); lec_vcc_added()
2084 if (!entry) lec_vcc_added()
2086 entry->vcc = vcc; lec_vcc_added()
2087 entry->old_push = old_push; lec_vcc_added()
2088 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); lec_vcc_added()
2089 eth_zero_addr(entry->mac_addr); lec_vcc_added()
2090 entry->status = ESI_UNKNOWN; lec_vcc_added()
2091 hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); lec_vcc_added()
2092 entry->timer.expires = jiffies + priv->vcc_timeout_period; lec_vcc_added()
2093 entry->timer.function = lec_arp_expire_vcc; lec_vcc_added()
2094 add_timer(&entry->timer); lec_vcc_added()
2104 struct lec_arp_table *entry; lec_flush_complete() local
2111 hlist_for_each_entry(entry, lec_flush_complete()
2113 if (entry->flush_tran_id == tran_id && lec_flush_complete()
2114 entry->status == ESI_FLUSH_PENDING) { lec_flush_complete()
2116 struct atm_vcc *vcc = entry->vcc; lec_flush_complete()
2118 lec_arp_hold(entry); lec_flush_complete()
2121 while ((skb = skb_dequeue(&entry->tx_wait))) lec_flush_complete()
2123 entry->last_used = jiffies; lec_flush_complete()
2124 entry->status = ESI_FORWARD_DIRECT; lec_flush_complete()
2125 lec_arp_put(entry); lec_flush_complete()
2140 struct lec_arp_table *entry; lec_set_flush_tran_id() local
2145 hlist_for_each_entry(entry, lec_set_flush_tran_id()
2147 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { lec_set_flush_tran_id()
2148 entry->flush_tran_id = tran_id; lec_set_flush_tran_id()
2150 tran_id, entry); lec_set_flush_tran_id()
2198 struct lec_arp_table *entry; lec_vcc_close() local
2207 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2209 if (vcc == entry->vcc) { lec_vcc_close()
2210 lec_arp_remove(priv, entry); lec_vcc_close()
2211 lec_arp_put(entry); lec_vcc_close()
2218 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2220 if (entry->vcc == vcc) { lec_vcc_close()
2221 lec_arp_clear_vccs(entry); lec_vcc_close()
2222 del_timer(&entry->timer); lec_vcc_close()
2223 hlist_del(&entry->next); lec_vcc_close()
2224 lec_arp_put(entry); lec_vcc_close()
2228 hlist_for_each_entry_safe(entry, next, lec_vcc_close()
2230 if (entry->recv_vcc == vcc) { lec_vcc_close()
2231 lec_arp_clear_vccs(entry); lec_vcc_close()
2232 del_timer(&entry->timer); lec_vcc_close()
2233 hlist_del(&entry->next); lec_vcc_close()
2234 lec_arp_put(entry); lec_vcc_close()
2238 hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) { lec_vcc_close()
2239 if (entry->recv_vcc == vcc) { lec_vcc_close()
2240 lec_arp_clear_vccs(entry); lec_vcc_close()
2242 hlist_del(&entry->next); lec_vcc_close()
2243 lec_arp_put(entry); lec_vcc_close()
2257 struct lec_arp_table *entry, *tmp; lec_arp_check_empties() local
2262 hlist_for_each_entry_safe(entry, next, lec_arp_check_empties()
2264 if (vcc == entry->vcc) { lec_arp_check_empties()
2265 del_timer(&entry->timer); lec_arp_check_empties()
2266 ether_addr_copy(entry->mac_addr, src); lec_arp_check_empties()
2267 entry->status = ESI_FORWARD_DIRECT; lec_arp_check_empties()
2268 entry->last_used = jiffies; lec_arp_check_empties()
2269 /* We might have got an entry */ lec_arp_check_empties()
2275 hlist_del(&entry->next); lec_arp_check_empties()
2276 lec_arp_add(priv, entry); lec_arp_check_empties()
2280 pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); lec_arp_check_empties()
H A Dclip.c75 static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) link_vcc() argument
77 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); link_vcc()
78 clip_vcc->entry = entry; link_vcc()
80 clip_vcc->next = entry->vccs; link_vcc()
81 entry->vccs = clip_vcc; link_vcc()
82 entry->neigh->used = jiffies; link_vcc()
87 struct atmarp_entry *entry = clip_vcc->entry; unlink_clip_vcc() local
90 if (!entry) { unlink_clip_vcc()
91 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); unlink_clip_vcc()
94 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ unlink_clip_vcc()
95 entry->neigh->used = jiffies; unlink_clip_vcc()
96 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) unlink_clip_vcc()
101 clip_vcc->entry = NULL; unlink_clip_vcc()
103 netif_wake_queue(entry->neigh->dev); unlink_clip_vcc()
104 if (entry->vccs) unlink_clip_vcc()
106 entry->expires = jiffies - 1; unlink_clip_vcc()
108 error = neigh_update(entry->neigh, NULL, NUD_NONE, unlink_clip_vcc()
114 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); unlink_clip_vcc()
116 netif_tx_unlock_bh(entry->neigh->dev); unlink_clip_vcc()
119 /* The neighbour entry n->lock is held. */ neigh_check_cb()
122 struct atmarp_entry *entry = neighbour_priv(n); neigh_check_cb() local
127 for (cv = entry->vccs; cv; cv = cv->next) { neigh_check_cb()
131 pr_debug("releasing vcc %p->%p of entry %p\n", neigh_check_cb()
132 cv, cv->vcc, entry); neigh_check_cb()
137 if (entry->vccs || time_before(jiffies, entry->expires)) neigh_check_cb()
203 if (clip_vcc->entry) clip_push()
210 skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; clip_push()
211 /* clip_vcc->entry == NULL if we don't have an IP address yet */ clip_push()
291 struct atmarp_entry *entry = neighbour_priv(neigh); clip_constructor() local
302 entry->neigh = neigh; clip_constructor()
303 entry->vccs = NULL; clip_constructor()
304 entry->expires = jiffies - 1; clip_constructor()
313 * to allocate the neighbour entry but not to ask atmarpd for resolution. Also,
329 struct atmarp_entry *entry; clip_start_xmit() local
356 entry = neighbour_priv(n); clip_start_xmit()
357 if (!entry->vccs) { clip_start_xmit()
358 if (time_after(jiffies, entry->expires)) { clip_start_xmit()
360 entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ; clip_start_xmit()
363 if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) clip_start_xmit()
364 skb_queue_tail(&entry->neigh->arp_queue, skb); clip_start_xmit()
371 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); clip_start_xmit()
372 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; clip_start_xmit()
374 if (entry->vccs->encap) { clip_start_xmit()
383 entry->vccs->last_use = jiffies; clip_start_xmit()
385 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ clip_start_xmit()
394 entry->vccs->xoff = 0; clip_start_xmit()
400 if (!entry->vccs->xoff) clip_start_xmit()
425 clip_vcc->entry = NULL; clip_mkip()
444 struct atmarp_entry *entry; clip_setentry() local
455 if (!clip_vcc->entry) { clip_setentry()
456 pr_err("hiding hidden ATMARP entry\n"); clip_setentry()
470 entry = neighbour_priv(neigh); clip_setentry()
471 if (entry != clip_vcc->entry) { clip_setentry()
472 if (!clip_vcc->entry) clip_setentry()
478 link_vcc(clip_vcc, entry); clip_setentry()
730 /* This means the neighbour entry has no attached VCC objects. */
734 struct atmarp_entry *entry, struct clip_vcc *clip_vcc) atmarp_info()
747 exp = entry->neigh->used; atmarp_info()
763 if (time_before(jiffies, entry->expires)) atmarp_info()
767 atomic_read(&entry->neigh->refcnt)); atmarp_info()
733 atmarp_info(struct seq_file *seq, struct neighbour *n, struct atmarp_entry *entry, struct clip_vcc *clip_vcc) atmarp_info() argument
H A Dmpc.c86 static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry);
174 * Overwrites the old entry or makes a new one.
178 struct atm_mpoa_qos *entry; atm_mpoa_add_qos() local
180 entry = atm_mpoa_search_qos(dst_ip); atm_mpoa_add_qos()
181 if (entry != NULL) { atm_mpoa_add_qos()
182 entry->qos = *qos; atm_mpoa_add_qos()
183 return entry; atm_mpoa_add_qos()
186 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); atm_mpoa_add_qos()
187 if (entry == NULL) { atm_mpoa_add_qos()
189 return entry; atm_mpoa_add_qos()
192 entry->ipaddr = dst_ip; atm_mpoa_add_qos()
193 entry->qos = *qos; atm_mpoa_add_qos()
195 entry->next = qos_head; atm_mpoa_add_qos()
196 qos_head = entry; atm_mpoa_add_qos()
198 return entry; atm_mpoa_add_qos()
218 int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) atm_mpoa_delete_qos() argument
222 if (entry == NULL) atm_mpoa_delete_qos()
224 if (entry == qos_head) { atm_mpoa_delete_qos()
226 kfree(entry); atm_mpoa_delete_qos()
232 if (curr->next == entry) { atm_mpoa_delete_qos()
233 curr->next = entry->next; atm_mpoa_delete_qos()
234 kfree(entry); atm_mpoa_delete_qos()
493 in_cache_entry *entry; send_via_shortcut() local
513 entry = mpc->in_ops->get(ipaddr, mpc); send_via_shortcut()
514 if (entry == NULL) { send_via_shortcut()
515 entry = mpc->in_ops->add_entry(ipaddr, mpc); send_via_shortcut()
516 if (entry != NULL) send_via_shortcut()
517 mpc->in_ops->put(entry); send_via_shortcut()
521 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) { send_via_shortcut()
524 mpc->in_ops->put(entry); send_via_shortcut()
534 mpc->in_ops->put(entry); send_via_shortcut()
541 if (entry->ctrl_info.tag != 0) { send_via_shortcut()
543 mpc->dev->name, entry->ctrl_info.tag); send_via_shortcut()
544 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; send_via_shortcut()
558 atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); send_via_shortcut()
559 ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; send_via_shortcut()
560 entry->shortcut->send(entry->shortcut, skb); send_via_shortcut()
561 entry->packets_fwded++; send_via_shortcut()
562 mpc->in_ops->put(entry); send_via_shortcut()
631 pr_info("(%s) did not find RESOLVED entry from ingress cache\n", atm_mpoa_vcc_attach()
637 pr_info("(%s) attaching ingress SVC, entry = %pI4\n", atm_mpoa_vcc_attach()
743 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n", mpc_push()
1084 in_cache_entry *entry; MPOA_trigger_rcvd() local
1086 entry = mpc->in_ops->get(dst_ip, mpc); MPOA_trigger_rcvd()
1087 if (entry == NULL) { MPOA_trigger_rcvd()
1088 entry = mpc->in_ops->add_entry(dst_ip, mpc); MPOA_trigger_rcvd()
1089 entry->entry_state = INGRESS_RESOLVING; MPOA_trigger_rcvd()
1091 msg->content.in_info = entry->ctrl_info; MPOA_trigger_rcvd()
1093 do_gettimeofday(&(entry->reply_wait)); MPOA_trigger_rcvd()
1094 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1098 if (entry->entry_state == INGRESS_INVALID) { MPOA_trigger_rcvd()
1099 entry->entry_state = INGRESS_RESOLVING; MPOA_trigger_rcvd()
1101 msg->content.in_info = entry->ctrl_info; MPOA_trigger_rcvd()
1103 do_gettimeofday(&(entry->reply_wait)); MPOA_trigger_rcvd()
1104 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1108 pr_info("(%s) entry already in resolving state\n", MPOA_trigger_rcvd()
1110 mpc->in_ops->put(entry); MPOA_trigger_rcvd()
1119 in_cache_entry *entry) check_qos_and_open_shortcut()
1130 entry->shortcut = eg_entry->shortcut; check_qos_and_open_shortcut()
1132 entry->shortcut = eg_entry->shortcut; check_qos_and_open_shortcut()
1134 if (entry->shortcut) { check_qos_and_open_shortcut()
1159 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); MPOA_res_reply_rcvd() local
1163 ddprintk("(%s) entry = %p", MPOA_res_reply_rcvd()
1164 mpc->dev->name, entry); MPOA_res_reply_rcvd()
1165 if (entry == NULL) { MPOA_res_reply_rcvd()
1166 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n", MPOA_res_reply_rcvd()
1170 ddprintk_cont(" entry_state = %d ", entry->entry_state); MPOA_res_reply_rcvd()
1172 if (entry->entry_state == INGRESS_RESOLVED) { MPOA_res_reply_rcvd()
1173 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name); MPOA_res_reply_rcvd()
1174 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1178 entry->ctrl_info = msg->content.in_info; MPOA_res_reply_rcvd()
1179 do_gettimeofday(&(entry->tv)); MPOA_res_reply_rcvd()
1180 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ MPOA_res_reply_rcvd()
1181 entry->refresh_time = 0; MPOA_res_reply_rcvd()
1182 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut); MPOA_res_reply_rcvd()
1184 if (entry->entry_state == INGRESS_RESOLVING && MPOA_res_reply_rcvd()
1185 entry->shortcut != NULL) { MPOA_res_reply_rcvd()
1186 entry->entry_state = INGRESS_RESOLVED; MPOA_res_reply_rcvd()
1187 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1191 if (entry->shortcut != NULL) { MPOA_res_reply_rcvd()
1192 pr_info("(%s) entry->shortcut != NULL, impossible!\n", MPOA_res_reply_rcvd()
1194 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1198 check_qos_and_open_shortcut(msg, mpc, entry); MPOA_res_reply_rcvd()
1199 entry->entry_state = INGRESS_RESOLVED; MPOA_res_reply_rcvd()
1200 mpc->in_ops->put(entry); MPOA_res_reply_rcvd()
1210 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); ingress_purge_rcvd() local
1212 if (entry == NULL) { ingress_purge_rcvd()
1213 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n", ingress_purge_rcvd()
1219 dprintk("(%s) removing an ingress entry, ip = %pI4\n", ingress_purge_rcvd()
1222 mpc->in_ops->remove_entry(entry, mpc); ingress_purge_rcvd()
1224 mpc->in_ops->put(entry); ingress_purge_rcvd()
1225 entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); ingress_purge_rcvd()
1226 } while (entry != NULL); ingress_purge_rcvd()
1232 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); egress_purge_rcvd() local
1234 if (entry == NULL) { egress_purge_rcvd()
1235 dprintk("(%s) purge for a non-existing entry\n", egress_purge_rcvd()
1241 mpc->eg_ops->remove_entry(entry, mpc); egress_purge_rcvd()
1244 mpc->eg_ops->put(entry); egress_purge_rcvd()
1247 static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) purge_egress_shortcut() argument
1269 if (entry != NULL) purge_egress_shortcut()
1270 purge_msg->content.eg_info = entry->ctrl_info; purge_egress_shortcut()
1286 eg_cache_entry *entry; mps_death() local
1297 entry = mpc->eg_cache; mps_death()
1298 while (entry != NULL) { mps_death()
1299 purge_egress_shortcut(entry->shortcut, entry); mps_death()
1300 entry = entry->next; mps_death()
1312 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); MPOA_cache_impos_rcvd() local
1315 dprintk("(%s) entry = %p, holding_time = %u\n", MPOA_cache_impos_rcvd()
1316 mpc->dev->name, entry, holding_time); MPOA_cache_impos_rcvd()
1317 if (entry == NULL && holding_time) { MPOA_cache_impos_rcvd()
1318 entry = mpc->eg_ops->add_entry(msg, mpc); MPOA_cache_impos_rcvd()
1319 mpc->eg_ops->put(entry); MPOA_cache_impos_rcvd()
1323 mpc->eg_ops->update(entry, holding_time); MPOA_cache_impos_rcvd()
1328 mpc->eg_ops->remove_entry(entry, mpc); MPOA_cache_impos_rcvd()
1331 mpc->eg_ops->put(entry); MPOA_cache_impos_rcvd()
1392 eg_cache_entry *entry; clean_up() local
1398 entry = mpc->eg_cache; clean_up()
1399 while (entry != NULL) { clean_up()
1400 msg->content.eg_info = entry->ctrl_info; clean_up()
1401 dprintk("cache_id %u\n", entry->ctrl_info.cache_id); clean_up()
1403 entry = entry->next; clean_up()
1525 dprintk("freeing qos entry %p\n", qos); atm_mpoa_cleanup()
1117 check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) check_qos_and_open_shortcut() argument
H A Dmpoa_caches.h40 void (*put)(in_cache_entry *entry);
43 int (*cache_hit)(in_cache_entry *entry,
70 void (*put)(eg_cache_entry *entry);
71 void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client);
72 void (*update)(eg_cache_entry *entry, uint16_t holding_time);
78 /* Ingress cache entry states */
90 /* Egress cache entry states */
H A Daddr.c58 list_for_each_entry_safe(this, p, head, entry) { list_for_each_entry_safe()
59 list_del(&this->entry); list_for_each_entry_safe()
83 list_for_each_entry(this, head, entry) { list_for_each_entry()
95 list_add(&this->entry, head);
118 list_for_each_entry(this, head, entry) { list_for_each_entry()
120 list_del(&this->entry); list_for_each_entry()
146 list_for_each_entry(this, head, entry) atm_get_addr()
153 list_for_each_entry(this, head, entry) atm_get_addr()
/linux-4.1.27/arch/s390/include/asm/
H A Dpci_dma.h97 static inline void set_pt_pfaa(unsigned long *entry, void *pfaa) set_pt_pfaa() argument
99 *entry &= ZPCI_PTE_FLAG_MASK; set_pt_pfaa()
100 *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK); set_pt_pfaa()
103 static inline void set_rt_sto(unsigned long *entry, void *sto) set_rt_sto() argument
105 *entry &= ZPCI_RTE_FLAG_MASK; set_rt_sto()
106 *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK); set_rt_sto()
107 *entry |= ZPCI_TABLE_TYPE_RTX; set_rt_sto()
110 static inline void set_st_pto(unsigned long *entry, void *pto) set_st_pto() argument
112 *entry &= ZPCI_STE_FLAG_MASK; set_st_pto()
113 *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK); set_st_pto()
114 *entry |= ZPCI_TABLE_TYPE_SX; set_st_pto()
117 static inline void validate_rt_entry(unsigned long *entry) validate_rt_entry() argument
119 *entry &= ~ZPCI_TABLE_VALID_MASK; validate_rt_entry()
120 *entry &= ~ZPCI_TABLE_OFFSET_MASK; validate_rt_entry()
121 *entry |= ZPCI_TABLE_VALID; validate_rt_entry()
122 *entry |= ZPCI_TABLE_LEN_RTX; validate_rt_entry()
125 static inline void validate_st_entry(unsigned long *entry) validate_st_entry() argument
127 *entry &= ~ZPCI_TABLE_VALID_MASK; validate_st_entry()
128 *entry |= ZPCI_TABLE_VALID; validate_st_entry()
131 static inline void invalidate_table_entry(unsigned long *entry) invalidate_table_entry() argument
133 *entry &= ~ZPCI_TABLE_VALID_MASK; invalidate_table_entry()
134 *entry |= ZPCI_TABLE_INVALID; invalidate_table_entry()
137 static inline void invalidate_pt_entry(unsigned long *entry) invalidate_pt_entry() argument
139 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); invalidate_pt_entry()
140 *entry &= ~ZPCI_PTE_VALID_MASK; invalidate_pt_entry()
141 *entry |= ZPCI_PTE_INVALID; invalidate_pt_entry()
144 static inline void validate_pt_entry(unsigned long *entry) validate_pt_entry() argument
146 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); validate_pt_entry()
147 *entry &= ~ZPCI_PTE_VALID_MASK; validate_pt_entry()
148 *entry |= ZPCI_PTE_VALID; validate_pt_entry()
151 static inline void entry_set_protected(unsigned long *entry) entry_set_protected() argument
153 *entry &= ~ZPCI_TABLE_PROT_MASK; entry_set_protected()
154 *entry |= ZPCI_TABLE_PROTECTED; entry_set_protected()
157 static inline void entry_clr_protected(unsigned long *entry) entry_clr_protected() argument
159 *entry &= ~ZPCI_TABLE_PROT_MASK; entry_clr_protected()
160 *entry |= ZPCI_TABLE_UNPROTECTED; entry_clr_protected()
163 static inline int reg_entry_isvalid(unsigned long entry) reg_entry_isvalid() argument
165 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; reg_entry_isvalid()
168 static inline int pt_entry_isvalid(unsigned long entry) pt_entry_isvalid() argument
170 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; pt_entry_isvalid()
173 static inline int entry_isprotected(unsigned long entry) entry_isprotected() argument
175 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; entry_isprotected()
178 static inline unsigned long *get_rt_sto(unsigned long entry) get_rt_sto() argument
180 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) get_rt_sto()
181 ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK) get_rt_sto()
185 static inline unsigned long *get_st_pto(unsigned long entry) get_st_pto() argument
187 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) get_st_pto()
188 ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK) get_st_pto()
H A Dpci_io.h35 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
36 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
50 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
51 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
138 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)]; zpci_memcpy_fromio() local
145 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_fromio()
159 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)]; zpci_memcpy_toio() local
169 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_toio()
/linux-4.1.27/arch/arm/kernel/
H A Djump_label.c8 static void __arch_jump_label_transform(struct jump_entry *entry, __arch_jump_label_transform() argument
12 void *addr = (void *)entry->code; __arch_jump_label_transform()
16 insn = arm_gen_branch(entry->code, entry->target); __arch_jump_label_transform()
26 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
29 __arch_jump_label_transform(entry, type, false); arch_jump_label_transform()
32 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
35 __arch_jump_label_transform(entry, type, true); arch_jump_label_transform_static()
H A Dperf_callchain.c34 struct perf_callchain_entry *entry) user_backtrace()
49 perf_callchain_store(entry, buftail.lr); user_backtrace()
62 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user() argument
71 perf_callchain_store(entry, regs->ARM_pc); perf_callchain_user()
78 while ((entry->nr < PERF_MAX_STACK_DEPTH) && perf_callchain_user()
80 tail = user_backtrace(tail, entry); perf_callchain_user()
92 struct perf_callchain_entry *entry = data; callchain_trace() local
93 perf_callchain_store(entry, fr->pc); callchain_trace()
98 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
108 walk_stackframe(&fr, callchain_trace, entry); perf_callchain_kernel()
33 user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) user_backtrace() argument
/linux-4.1.27/sound/pci/ctxfi/
H A Dctimap.c22 int input_mapper_add(struct list_head *mappers, struct imapper *entry, input_mapper_add() argument
31 entry->next = entry->addr; input_mapper_add()
32 map_op(data, entry); input_mapper_add()
33 list_add(&entry->list, head); input_mapper_add()
39 if (pos_ent->slot > entry->slot) { list_for_each()
50 __list_add(&entry->list, pos->prev, pos);
54 list_add_tail(&entry->list, head);
60 entry->next = pos_ent->addr;
61 map_op(data, entry);
62 pre_ent->next = entry->addr;
68 int input_mapper_delete(struct list_head *mappers, struct imapper *entry, input_mapper_delete() argument
79 pre = (entry->list.prev == head) ? head->prev : entry->list.prev; input_mapper_delete()
80 next = (entry->list.next == head) ? head->next : entry->list.next; input_mapper_delete()
82 if (pre == &entry->list) { input_mapper_delete()
83 /* entry is the only one node in mappers list */ input_mapper_delete()
84 entry->next = entry->addr = entry->user = entry->slot = 0; input_mapper_delete()
85 map_op(data, entry); input_mapper_delete()
86 list_del(&entry->list); input_mapper_delete()
95 list_del(&entry->list); input_mapper_delete()
102 struct imapper *entry; free_input_mapper_list() local
108 entry = list_entry(pos, struct imapper, list); free_input_mapper_list()
109 kfree(entry); free_input_mapper_list()
H A Dctvmem.c35 struct ct_vm_block *block = NULL, *entry; get_vm_block() local
47 entry = list_entry(pos, struct ct_vm_block, list); get_vm_block()
48 if (entry->size >= size) get_vm_block()
54 if (entry->size == size) { get_vm_block()
56 list_move(&entry->list, &vm->used); get_vm_block()
58 block = entry; get_vm_block()
66 block->addr = entry->addr; get_vm_block()
69 entry->addr += size; get_vm_block()
70 entry->size -= size; get_vm_block()
80 struct ct_vm_block *entry, *pre_ent; put_vm_block() local
90 entry = list_entry(pos, struct ct_vm_block, list); put_vm_block()
91 if (entry->addr >= (block->addr + block->size)) put_vm_block()
96 entry = block; put_vm_block()
98 if ((block->addr + block->size) == entry->addr) { put_vm_block()
99 entry->addr = block->addr; put_vm_block()
100 entry->size += block->size; put_vm_block()
104 entry = block; put_vm_block()
108 pos = &entry->list; put_vm_block()
111 entry = list_entry(pos, struct ct_vm_block, list); put_vm_block()
113 if ((pre_ent->addr + pre_ent->size) > entry->addr) put_vm_block()
116 pre_ent->size += entry->size; put_vm_block()
118 kfree(entry); put_vm_block()
226 struct ct_vm_block *entry; ct_vm_destroy() local
232 entry = list_entry(pos, struct ct_vm_block, list); ct_vm_destroy()
233 kfree(entry); ct_vm_destroy()
238 entry = list_entry(pos, struct ct_vm_block, list); ct_vm_destroy()
239 kfree(entry); ct_vm_destroy()
H A Dctdaio.c162 struct imapper *entry; dao_set_left_input() local
166 entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL); dao_set_left_input()
167 if (!entry) dao_set_left_input()
174 for (i = 0; i < daio->rscl.msr; i++, entry++) { dao_set_left_input()
175 entry->slot = input->ops->output_slot(input); dao_set_left_input()
176 entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl); dao_set_left_input()
177 dao->mgr->imap_add(dao->mgr, entry); dao_set_left_input()
178 dao->imappers[i] = entry; dao_set_left_input()
191 struct imapper *entry; dao_set_right_input() local
195 entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL); dao_set_right_input()
196 if (!entry) dao_set_right_input()
203 for (i = 0; i < daio->rscr.msr; i++, entry++) { dao_set_right_input()
204 entry->slot = input->ops->output_slot(input); dao_set_right_input()
205 entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr); dao_set_right_input()
206 dao->mgr->imap_add(dao->mgr, entry); dao_set_right_input()
207 dao->imappers[daio->rscl.msr + i] = entry; dao_set_right_input()
220 struct imapper *entry; dao_clear_left_input() local
227 entry = dao->imappers[0]; dao_clear_left_input()
228 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_left_input()
231 entry = dao->imappers[i]; dao_clear_left_input()
232 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_left_input()
244 struct imapper *entry; dao_clear_right_input() local
251 entry = dao->imappers[daio->rscl.msr]; dao_clear_right_input()
252 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_right_input()
255 entry = dao->imappers[daio->rscl.msr + i]; dao_clear_right_input()
256 dao->mgr->imap_delete(dao->mgr, entry); dao_clear_right_input()
635 static int daio_map_op(void *data, struct imapper *entry) daio_map_op() argument
640 hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot); daio_map_op()
641 hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next); daio_map_op()
642 hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr); daio_map_op()
648 static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry) daio_imap_add() argument
654 if (!entry->addr && mgr->init_imap_added) { daio_imap_add()
659 err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr); daio_imap_add()
665 static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry) daio_imap_delete() argument
671 err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr); daio_imap_delete()
694 struct imapper *entry; daio_mgr_create() local
708 entry = kzalloc(sizeof(*entry), GFP_KERNEL); daio_mgr_create()
709 if (!entry) { daio_mgr_create()
713 entry->slot = entry->addr = entry->next = entry->user = 0; daio_mgr_create()
714 list_add(&entry->list, &daio_mgr->imappers); daio_mgr_create()
715 daio_mgr->init_imap = entry; daio_mgr_create()
/linux-4.1.27/sound/core/
H A Dinfo.c76 struct snd_info_entry *entry; member in struct:snd_info_private_data
82 static void snd_info_disconnect(struct snd_info_entry *entry);
159 struct snd_info_entry *entry; snd_info_entry_llseek() local
163 entry = data->entry; snd_info_entry_llseek()
164 mutex_lock(&entry->access); snd_info_entry_llseek()
165 if (entry->content == SNDRV_INFO_CONTENT_DATA && snd_info_entry_llseek()
166 entry->c.ops->llseek) { snd_info_entry_llseek()
167 offset = entry->c.ops->llseek(entry, snd_info_entry_llseek()
172 if (entry->content == SNDRV_INFO_CONTENT_DATA) snd_info_entry_llseek()
173 size = entry->size; snd_info_entry_llseek()
197 mutex_unlock(&entry->access); snd_info_entry_llseek()
205 struct snd_info_entry *entry; snd_info_entry_read() local
218 entry = data->entry; snd_info_entry_read()
219 switch (entry->content) { snd_info_entry_read()
232 if (pos >= entry->size) snd_info_entry_read()
234 if (entry->c.ops->read) { snd_info_entry_read()
235 size = entry->size - pos; snd_info_entry_read()
237 size = entry->c.ops->read(entry, snd_info_entry_read()
252 struct snd_info_entry *entry; snd_info_entry_write() local
260 entry = data->entry; snd_info_entry_write()
266 switch (entry->content) { snd_info_entry_write()
271 mutex_lock(&entry->access); snd_info_entry_write()
274 mutex_unlock(&entry->access); snd_info_entry_write()
279 mutex_unlock(&entry->access); snd_info_entry_write()
283 mutex_unlock(&entry->access); snd_info_entry_write()
287 if (entry->c.ops->write && count > 0) { snd_info_entry_write()
288 size_t maxsize = entry->size - pos; snd_info_entry_write()
290 size = entry->c.ops->write(entry, snd_info_entry_write()
303 struct snd_info_entry *entry; snd_info_entry_open() local
309 entry = PDE_DATA(inode); snd_info_entry_open()
310 if (entry == NULL || ! entry->p) { snd_info_entry_open()
314 if (!try_module_get(entry->module)) { snd_info_entry_open()
320 if ((entry->content == SNDRV_INFO_CONTENT_DATA && snd_info_entry_open()
321 entry->c.ops->read == NULL)) { snd_info_entry_open()
327 if ((entry->content == SNDRV_INFO_CONTENT_DATA && snd_info_entry_open()
328 entry->c.ops->write == NULL)) { snd_info_entry_open()
338 data->entry = entry; snd_info_entry_open()
339 switch (entry->content) { snd_info_entry_open()
363 if (entry->c.ops->open) { snd_info_entry_open()
364 if ((err = entry->c.ops->open(entry, mode, snd_info_entry_open()
374 if (entry->content == SNDRV_INFO_CONTENT_TEXT && snd_info_entry_open()
376 if (entry->c.text.read) { snd_info_entry_open()
377 mutex_lock(&entry->access); snd_info_entry_open()
378 entry->c.text.read(entry, data->rbuffer); snd_info_entry_open()
379 mutex_unlock(&entry->access); snd_info_entry_open()
396 module_put(entry->module); snd_info_entry_open()
404 struct snd_info_entry *entry; snd_info_entry_release() local
410 entry = data->entry; snd_info_entry_release()
411 switch (entry->content) { snd_info_entry_release()
418 if (entry->c.text.write) { snd_info_entry_release()
419 entry->c.text.write(entry, data->wbuffer); snd_info_entry_release()
421 if (entry->card) snd_info_entry_release()
422 dev_warn(entry->card->dev, "info: data write error to %s (%i)\n", snd_info_entry_release()
423 entry->name, snd_info_entry_release()
427 entry->name, snd_info_entry_release()
436 if (entry->c.ops->release) snd_info_entry_release()
437 entry->c.ops->release(entry, mode, snd_info_entry_release()
441 module_put(entry->module); snd_info_entry_release()
449 struct snd_info_entry *entry; snd_info_entry_poll() local
455 entry = data->entry; snd_info_entry_poll()
457 switch (entry->content) { snd_info_entry_poll()
459 if (entry->c.ops->poll) snd_info_entry_poll()
460 return entry->c.ops->poll(entry, snd_info_entry_poll()
463 if (entry->c.ops->read) snd_info_entry_poll()
465 if (entry->c.ops->write) snd_info_entry_poll()
476 struct snd_info_entry *entry; snd_info_entry_ioctl() local
481 entry = data->entry; snd_info_entry_ioctl()
482 switch (entry->content) { snd_info_entry_ioctl()
484 if (entry->c.ops->ioctl) snd_info_entry_ioctl()
485 return entry->c.ops->ioctl(entry, snd_info_entry_ioctl()
497 struct snd_info_entry *entry; snd_info_entry_mmap() local
502 entry = data->entry; snd_info_entry_mmap()
503 switch (entry->content) { snd_info_entry_mmap()
505 if (entry->c.ops->mmap) snd_info_entry_mmap()
506 return entry->c.ops->mmap(entry, snd_info_entry_mmap()
537 struct snd_info_entry *entry; snd_info_init() local
538 if ((entry = snd_info_create_module_entry(THIS_MODULE, "oss", NULL)) == NULL) snd_info_init()
540 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_info_init()
541 if (snd_info_register(entry) < 0) { snd_info_init()
542 snd_info_free_entry(entry); snd_info_init()
545 snd_oss_root = entry; snd_info_init()
550 struct snd_info_entry *entry; snd_info_init() local
551 if ((entry = snd_info_create_module_entry(THIS_MODULE, "seq", NULL)) == NULL) snd_info_init()
553 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_info_init()
554 if (snd_info_register(entry) < 0) { snd_info_init()
555 snd_info_free_entry(entry); snd_info_init()
558 snd_seq_root = entry; snd_info_init()
598 struct snd_info_entry *entry; snd_info_card_create() local
604 if ((entry = snd_info_create_module_entry(card->module, str, NULL)) == NULL) snd_info_card_create()
606 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_info_card_create()
607 if (snd_info_register(entry) < 0) { snd_info_card_create()
608 snd_info_free_entry(entry); snd_info_card_create()
611 card->proc_root = entry; snd_info_card_create()
757 * snd_info_create_entry - create an info entry
760 * Creates an info entry with the given file name and initializes as
770 struct snd_info_entry *entry; snd_info_create_entry() local
771 entry = kzalloc(sizeof(*entry), GFP_KERNEL); snd_info_create_entry()
772 if (entry == NULL) snd_info_create_entry()
774 entry->name = kstrdup(name, GFP_KERNEL); snd_info_create_entry()
775 if (entry->name == NULL) { snd_info_create_entry()
776 kfree(entry); snd_info_create_entry()
779 entry->mode = S_IFREG | S_IRUGO; snd_info_create_entry()
780 entry->content = SNDRV_INFO_CONTENT_TEXT; snd_info_create_entry()
781 mutex_init(&entry->access); snd_info_create_entry()
782 INIT_LIST_HEAD(&entry->children); snd_info_create_entry()
783 INIT_LIST_HEAD(&entry->list); snd_info_create_entry()
784 return entry; snd_info_create_entry()
788 * snd_info_create_module_entry - create an info entry for the given module
793 * Creates a new info entry and assigns it to the given module.
801 struct snd_info_entry *entry = snd_info_create_entry(name); snd_info_create_module_entry() local
802 if (entry) { snd_info_create_module_entry()
803 entry->module = module; snd_info_create_module_entry()
804 entry->parent = parent; snd_info_create_module_entry()
806 return entry; snd_info_create_module_entry()
812 * snd_info_create_card_entry - create an info entry for the given card
817 * Creates a new info entry and assigns it to the given card.
825 struct snd_info_entry *entry = snd_info_create_entry(name); snd_info_create_card_entry() local
826 if (entry) { snd_info_create_card_entry()
827 entry->module = card->module; snd_info_create_card_entry()
828 entry->card = card; snd_info_create_card_entry()
829 entry->parent = parent; snd_info_create_card_entry()
831 return entry; snd_info_create_card_entry()
836 static void snd_info_disconnect(struct snd_info_entry *entry) snd_info_disconnect() argument
841 list_for_each_safe(p, n, &entry->children) { snd_info_disconnect()
845 if (! entry->p) snd_info_disconnect()
847 list_del_init(&entry->list); snd_info_disconnect()
848 root = entry->parent == NULL ? snd_proc_root : entry->parent->p; snd_info_disconnect()
850 proc_remove(entry->p); snd_info_disconnect()
851 entry->p = NULL; snd_info_disconnect()
856 struct snd_info_entry *entry = device->device_data; snd_info_dev_free_entry() local
857 snd_info_free_entry(entry); snd_info_dev_free_entry()
863 struct snd_info_entry *entry = device->device_data; snd_info_dev_register_entry() local
864 return snd_info_register(entry); snd_info_dev_register_entry()
868 * snd_card_proc_new - create an info entry for the given card
871 * @entryp: the pointer to store the new info entry
873 * Creates a new info entry and assigns it to the given card.
875 * info entry as an ALSA device component, so that it can be
877 * Also, you don't have to register this entry via snd_info_register(),
882 * For releasing this entry, use snd_device_free() instead of
895 struct snd_info_entry *entry; snd_card_proc_new() local
898 entry = snd_info_create_card_entry(card, name, card->proc_root); snd_card_proc_new()
899 if (! entry) snd_card_proc_new()
901 if ((err = snd_device_new(card, SNDRV_DEV_INFO, entry, &ops)) < 0) { snd_card_proc_new()
902 snd_info_free_entry(entry); snd_card_proc_new()
906 *entryp = entry; snd_card_proc_new()
913 * snd_info_free_entry - release the info entry
914 * @entry: the info entry
916 * Releases the info entry. Don't call this after registered.
918 void snd_info_free_entry(struct snd_info_entry * entry) snd_info_free_entry() argument
920 if (entry == NULL) snd_info_free_entry()
922 if (entry->p) { snd_info_free_entry()
924 snd_info_disconnect(entry); snd_info_free_entry()
927 kfree(entry->name); snd_info_free_entry()
928 if (entry->private_free) snd_info_free_entry()
929 entry->private_free(entry); snd_info_free_entry()
930 kfree(entry); snd_info_free_entry()
936 * snd_info_register - register the info entry
937 * @entry: the info entry
939 * Registers the proc info entry.
943 int snd_info_register(struct snd_info_entry * entry) snd_info_register() argument
947 if (snd_BUG_ON(!entry)) snd_info_register()
949 root = entry->parent == NULL ? snd_proc_root : entry->parent->p; snd_info_register()
951 if (S_ISDIR(entry->mode)) { snd_info_register()
952 p = proc_mkdir_mode(entry->name, entry->mode, root); snd_info_register()
958 p = proc_create_data(entry->name, entry->mode, root, snd_info_register()
959 &snd_info_entry_operations, entry); snd_info_register()
964 proc_set_size(p, entry->size); snd_info_register()
966 entry->p = p; snd_info_register()
967 if (entry->parent) snd_info_register()
968 list_add_tail(&entry->list, &entry->parent->children); snd_info_register()
981 static void snd_info_version_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) snd_info_version_read() argument
990 struct snd_info_entry *entry; snd_info_version_init() local
992 entry = snd_info_create_module_entry(THIS_MODULE, "version", NULL); snd_info_version_init()
993 if (entry == NULL) snd_info_version_init()
995 entry->c.text.read = snd_info_version_read; snd_info_version_init()
996 if (snd_info_register(entry) < 0) { snd_info_version_init()
997 snd_info_free_entry(entry); snd_info_version_init()
1000 snd_info_version_entry = entry; snd_info_version_init()
H A Dpcm.c369 static void snd_pcm_stream_proc_info_read(struct snd_info_entry *entry, snd_pcm_stream_proc_info_read() argument
372 snd_pcm_proc_info_read(((struct snd_pcm_str *)entry->private_data)->substream, snd_pcm_stream_proc_info_read()
376 static void snd_pcm_substream_proc_info_read(struct snd_info_entry *entry, snd_pcm_substream_proc_info_read() argument
379 snd_pcm_proc_info_read(entry->private_data, buffer); snd_pcm_substream_proc_info_read()
382 static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry, snd_pcm_substream_proc_hw_params_read() argument
385 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_hw_params_read()
419 static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, snd_pcm_substream_proc_sw_params_read() argument
422 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_sw_params_read()
447 static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, snd_pcm_substream_proc_status_read() argument
450 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_substream_proc_status_read()
484 static void snd_pcm_xrun_injection_write(struct snd_info_entry *entry, snd_pcm_xrun_injection_write() argument
487 struct snd_pcm_substream *substream = entry->private_data; snd_pcm_xrun_injection_write()
497 static void snd_pcm_xrun_debug_read(struct snd_info_entry *entry, snd_pcm_xrun_debug_read() argument
500 struct snd_pcm_str *pstr = entry->private_data; snd_pcm_xrun_debug_read()
504 static void snd_pcm_xrun_debug_write(struct snd_info_entry *entry, snd_pcm_xrun_debug_write() argument
507 struct snd_pcm_str *pstr = entry->private_data; snd_pcm_xrun_debug_write()
517 struct snd_info_entry *entry; snd_pcm_stream_proc_init() local
522 if ((entry = snd_info_create_card_entry(pcm->card, name, pcm->card->proc_root)) == NULL) snd_pcm_stream_proc_init()
524 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_pcm_stream_proc_init()
525 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
526 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
529 pstr->proc_root = entry; snd_pcm_stream_proc_init()
531 if ((entry = snd_info_create_card_entry(pcm->card, "info", pstr->proc_root)) != NULL) { snd_pcm_stream_proc_init()
532 snd_info_set_text_ops(entry, pstr, snd_pcm_stream_proc_info_read); snd_pcm_stream_proc_init()
533 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
534 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
535 entry = NULL; snd_pcm_stream_proc_init()
538 pstr->proc_info_entry = entry; snd_pcm_stream_proc_init()
541 if ((entry = snd_info_create_card_entry(pcm->card, "xrun_debug", snd_pcm_stream_proc_init()
543 entry->c.text.read = snd_pcm_xrun_debug_read; snd_pcm_stream_proc_init()
544 entry->c.text.write = snd_pcm_xrun_debug_write; snd_pcm_stream_proc_init()
545 entry->mode |= S_IWUSR; snd_pcm_stream_proc_init()
546 entry->private_data = pstr; snd_pcm_stream_proc_init()
547 if (snd_info_register(entry) < 0) { snd_pcm_stream_proc_init()
548 snd_info_free_entry(entry); snd_pcm_stream_proc_init()
549 entry = NULL; snd_pcm_stream_proc_init()
552 pstr->proc_xrun_debug_entry = entry; snd_pcm_stream_proc_init()
572 struct snd_info_entry *entry; snd_pcm_substream_proc_init() local
579 if ((entry = snd_info_create_card_entry(card, name, substream->pstr->proc_root)) == NULL) snd_pcm_substream_proc_init()
581 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; snd_pcm_substream_proc_init()
582 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
583 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
586 substream->proc_root = entry; snd_pcm_substream_proc_init()
588 if ((entry = snd_info_create_card_entry(card, "info", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
589 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
591 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
592 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
593 entry = NULL; snd_pcm_substream_proc_init()
596 substream->proc_info_entry = entry; snd_pcm_substream_proc_init()
598 if ((entry = snd_info_create_card_entry(card, "hw_params", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
599 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
601 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
602 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
603 entry = NULL; snd_pcm_substream_proc_init()
606 substream->proc_hw_params_entry = entry; snd_pcm_substream_proc_init()
608 if ((entry = snd_info_create_card_entry(card, "sw_params", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
609 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
611 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
612 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
613 entry = NULL; snd_pcm_substream_proc_init()
616 substream->proc_sw_params_entry = entry; snd_pcm_substream_proc_init()
618 if ((entry = snd_info_create_card_entry(card, "status", substream->proc_root)) != NULL) { snd_pcm_substream_proc_init()
619 snd_info_set_text_ops(entry, substream, snd_pcm_substream_proc_init()
621 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
622 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
623 entry = NULL; snd_pcm_substream_proc_init()
626 substream->proc_status_entry = entry; snd_pcm_substream_proc_init()
629 entry = snd_info_create_card_entry(card, "xrun_injection", snd_pcm_substream_proc_init()
631 if (entry) { snd_pcm_substream_proc_init()
632 entry->private_data = substream; snd_pcm_substream_proc_init()
633 entry->c.text.read = NULL; snd_pcm_substream_proc_init()
634 entry->c.text.write = snd_pcm_xrun_injection_write; snd_pcm_substream_proc_init()
635 entry->mode = S_IFREG | S_IWUSR; snd_pcm_substream_proc_init()
636 if (snd_info_register(entry) < 0) { snd_pcm_substream_proc_init()
637 snd_info_free_entry(entry); snd_pcm_substream_proc_init()
638 entry = NULL; snd_pcm_substream_proc_init()
641 substream->proc_xrun_injection_entry = entry; snd_pcm_substream_proc_init()
1189 static void snd_pcm_proc_read(struct snd_info_entry *entry, snd_pcm_proc_read() argument
1213 struct snd_info_entry *entry; snd_pcm_proc_init() local
1215 if ((entry = snd_info_create_module_entry(THIS_MODULE, "pcm", NULL)) != NULL) { snd_pcm_proc_init()
1216 snd_info_set_text_ops(entry, NULL, snd_pcm_proc_read); snd_pcm_proc_init()
1217 if (snd_info_register(entry) < 0) { snd_pcm_proc_init()
1218 snd_info_free_entry(entry); snd_pcm_proc_init()
1219 entry = NULL; snd_pcm_proc_init()
1222 snd_pcm_proc_entry = entry; snd_pcm_proc_init()
H A Dinfo_oss.c93 static void snd_sndstat_proc_read(struct snd_info_entry *entry, snd_sndstat_proc_read() argument
117 struct snd_info_entry *entry; snd_info_minor_register() local
120 if ((entry = snd_info_create_module_entry(THIS_MODULE, "sndstat", snd_oss_root)) != NULL) { snd_info_minor_register()
121 entry->c.text.read = snd_sndstat_proc_read; snd_info_minor_register()
122 if (snd_info_register(entry) < 0) { snd_info_minor_register()
123 snd_info_free_entry(entry); snd_info_minor_register()
124 entry = NULL; snd_info_minor_register()
127 snd_sndstat_proc_entry = entry; snd_info_minor_register()
/linux-4.1.27/arch/cris/kernel/
H A Dasm-offsets.c17 #define ENTRY(entry) DEFINE(PT_ ## entry, offsetof(struct pt_regs, entry)) main()
37 #define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry)) main()
43 #define ENTRY(entry) DEFINE(THREAD_ ## entry, offsetof(struct thread_struct, entry)) main()
53 #define ENTRY(entry) DEFINE(TASK_ ## entry, offsetof(struct task_struct, entry)) main()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_scatter.c50 static void drm_sg_cleanup(struct drm_sg_mem * entry) drm_sg_cleanup() argument
55 for (i = 0; i < entry->pages; i++) { drm_sg_cleanup()
56 page = entry->pagelist[i]; drm_sg_cleanup()
61 vfree(entry->virtual); drm_sg_cleanup()
63 kfree(entry->busaddr); drm_sg_cleanup()
64 kfree(entry->pagelist); drm_sg_cleanup()
65 kfree(entry); drm_sg_cleanup()
86 struct drm_sg_mem *entry; drm_legacy_sg_alloc() local
100 entry = kzalloc(sizeof(*entry), GFP_KERNEL); drm_legacy_sg_alloc()
101 if (!entry) drm_legacy_sg_alloc()
107 entry->pages = pages; drm_legacy_sg_alloc()
108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); drm_legacy_sg_alloc()
109 if (!entry->pagelist) { drm_legacy_sg_alloc()
110 kfree(entry); drm_legacy_sg_alloc()
114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); drm_legacy_sg_alloc()
115 if (!entry->busaddr) { drm_legacy_sg_alloc()
116 kfree(entry->pagelist); drm_legacy_sg_alloc()
117 kfree(entry); drm_legacy_sg_alloc()
121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); drm_legacy_sg_alloc()
122 if (!entry->virtual) { drm_legacy_sg_alloc()
123 kfree(entry->busaddr); drm_legacy_sg_alloc()
124 kfree(entry->pagelist); drm_legacy_sg_alloc()
125 kfree(entry); drm_legacy_sg_alloc()
132 memset(entry->virtual, 0, pages << PAGE_SHIFT); drm_legacy_sg_alloc()
134 entry->handle = ScatterHandle((unsigned long)entry->virtual); drm_legacy_sg_alloc()
136 DRM_DEBUG("handle = %08lx\n", entry->handle); drm_legacy_sg_alloc()
137 DRM_DEBUG("virtual = %p\n", entry->virtual); drm_legacy_sg_alloc()
139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; drm_legacy_sg_alloc()
141 entry->pagelist[j] = vmalloc_to_page((void *)i); drm_legacy_sg_alloc()
142 if (!entry->pagelist[j]) drm_legacy_sg_alloc()
144 SetPageReserved(entry->pagelist[j]); drm_legacy_sg_alloc()
147 request->handle = entry->handle; drm_legacy_sg_alloc()
149 dev->sg = entry; drm_legacy_sg_alloc()
161 tmp = page_address(entry->pagelist[i]); drm_legacy_sg_alloc()
167 tmp = (unsigned long *)((u8 *) entry->virtual + drm_legacy_sg_alloc()
179 tmp = page_address(entry->pagelist[i]); drm_legacy_sg_alloc()
194 drm_sg_cleanup(entry); drm_legacy_sg_alloc()
202 struct drm_sg_mem *entry; drm_legacy_sg_free() local
210 entry = dev->sg; drm_legacy_sg_free()
213 if (!entry || entry->handle != request->handle) drm_legacy_sg_free()
216 DRM_DEBUG("virtual = %p\n", entry->virtual); drm_legacy_sg_free()
218 drm_sg_cleanup(entry); drm_legacy_sg_free()
H A Ddrm_hashtab.c61 struct drm_hash_item *entry; drm_ht_verbose_list() local
69 hlist_for_each_entry(entry, h_list, head) drm_ht_verbose_list()
70 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); drm_ht_verbose_list()
76 struct drm_hash_item *entry; drm_ht_find_key() local
82 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry()
83 if (entry->key == key) hlist_for_each_entry()
84 return &entry->head; hlist_for_each_entry()
85 if (entry->key > key) hlist_for_each_entry()
94 struct drm_hash_item *entry; drm_ht_find_key_rcu() local
100 hlist_for_each_entry_rcu(entry, h_list, head) { hlist_for_each_entry_rcu()
101 if (entry->key == key) hlist_for_each_entry_rcu()
102 return &entry->head; hlist_for_each_entry_rcu()
103 if (entry->key > key) hlist_for_each_entry_rcu()
111 struct drm_hash_item *entry; drm_ht_insert_item() local
120 hlist_for_each_entry(entry, h_list, head) { hlist_for_each_entry()
121 if (entry->key == key) hlist_for_each_entry()
123 if (entry->key > key) hlist_for_each_entry()
125 parent = &entry->head; hlist_for_each_entry()
H A Ddrm_agpsupport.c197 * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
201 struct drm_agp_mem *entry; drm_agp_alloc() local
208 if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL))) drm_agp_alloc()
214 kfree(entry); drm_agp_alloc()
218 entry->handle = (unsigned long)memory->key + 1; drm_agp_alloc()
219 entry->memory = memory; drm_agp_alloc()
220 entry->bound = 0; drm_agp_alloc()
221 entry->pages = pages; drm_agp_alloc()
222 list_add(&entry->head, &dev->agp->memory); drm_agp_alloc()
224 request->handle = entry->handle; drm_agp_alloc()
241 * Search for the AGP memory entry associated with a handle.
252 struct drm_agp_mem *entry; drm_agp_lookup_entry() local
254 list_for_each_entry(entry, &dev->agp->memory, head) { drm_agp_lookup_entry()
255 if (entry->handle == handle) drm_agp_lookup_entry()
256 return entry; drm_agp_lookup_entry()
271 * entry and passes it to the unbind_agp() function.
275 struct drm_agp_mem *entry; drm_agp_unbind() local
280 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_unbind()
282 if (!entry->bound) drm_agp_unbind()
284 ret = drm_unbind_agp(entry->memory); drm_agp_unbind()
286 entry->bound = 0; drm_agp_unbind()
310 * is currently bound into the GATT. Looks-up the AGP memory entry and passes
315 struct drm_agp_mem *entry; drm_agp_bind() local
321 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_bind()
323 if (entry->bound) drm_agp_bind()
326 if ((retcode = drm_bind_agp(entry->memory, page))) drm_agp_bind()
328 entry->bound = dev->agp->base + (page << PAGE_SHIFT); drm_agp_bind()
329 DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", drm_agp_bind()
330 dev->agp->base, entry->bound); drm_agp_bind()
354 * AGP memory entry. If the memory it's currently bound, unbind it via
355 * unbind_agp(). Frees it via free_agp() as well as the entry itself
360 struct drm_agp_mem *entry; drm_agp_free() local
364 if (!(entry = drm_agp_lookup_entry(dev, request->handle))) drm_agp_free()
366 if (entry->bound) drm_agp_free()
367 drm_unbind_agp(entry->memory); drm_agp_free()
369 list_del(&entry->head); drm_agp_free()
371 drm_free_agp(entry->memory, entry->pages); drm_agp_free()
372 kfree(entry); drm_agp_free()
441 struct drm_agp_mem *entry, *tempe; drm_agp_clear() local
448 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { drm_agp_clear()
449 if (entry->bound) drm_agp_clear()
450 drm_unbind_agp(entry->memory); drm_agp_clear()
451 drm_free_agp(entry->memory, entry->pages); drm_agp_clear()
452 kfree(entry); drm_agp_clear()
H A Ddrm_bufs.c42 struct drm_map_list *entry; drm_find_matching_map() local
43 list_for_each_entry(entry, &dev->maplist, head) { drm_find_matching_map()
52 if (!entry->map || drm_find_matching_map()
53 map->type != entry->map->type || drm_find_matching_map()
54 entry->master != dev->primary->master) drm_find_matching_map()
60 return entry; drm_find_matching_map()
63 if ((entry->map->offset & 0xffffffff) == drm_find_matching_map()
65 return entry; drm_find_matching_map()
69 if (entry->map->offset == map->offset) drm_find_matching_map()
70 return entry; drm_find_matching_map()
257 struct drm_agp_mem *entry; drm_addmap_core() local
286 list_for_each_entry(entry, &dev->agp->memory, head) { drm_addmap_core()
287 if ((map->offset >= entry->bound) && drm_addmap_core()
288 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { drm_addmap_core()
436 /* Find the list entry for the map and remove it */ drm_legacy_rmmap_locked()
555 * \param entry buffer entry where the error occurred.
557 * Frees any pages and buffers associated with the given entry.
560 struct drm_buf_entry * entry) drm_cleanup_buf_error()
564 if (entry->seg_count) { drm_cleanup_buf_error()
565 for (i = 0; i < entry->seg_count; i++) { drm_cleanup_buf_error()
566 if (entry->seglist[i]) { drm_cleanup_buf_error()
567 drm_pci_free(dev, entry->seglist[i]); drm_cleanup_buf_error()
570 kfree(entry->seglist); drm_cleanup_buf_error()
572 entry->seg_count = 0; drm_cleanup_buf_error()
575 if (entry->buf_count) { drm_cleanup_buf_error()
576 for (i = 0; i < entry->buf_count; i++) { drm_cleanup_buf_error()
577 kfree(entry->buflist[i].dev_private); drm_cleanup_buf_error()
579 kfree(entry->buflist); drm_cleanup_buf_error()
581 entry->buf_count = 0; drm_cleanup_buf_error()
601 struct drm_buf_entry *entry; drm_legacy_addbufs_agp() local
664 entry = &dma->bufs[order]; drm_legacy_addbufs_agp()
665 if (entry->buf_count) { drm_legacy_addbufs_agp()
677 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); drm_legacy_addbufs_agp()
678 if (!entry->buflist) { drm_legacy_addbufs_agp()
684 entry->buf_size = size; drm_legacy_addbufs_agp()
685 entry->page_order = page_order; drm_legacy_addbufs_agp()
689 while (entry->buf_count < count) { drm_legacy_addbufs_agp()
690 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_agp()
691 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_agp()
708 entry->buf_count = count; drm_legacy_addbufs_agp()
709 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_agp()
715 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); drm_legacy_addbufs_agp()
718 entry->buf_count++; drm_legacy_addbufs_agp()
725 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_agp()
728 /* Free the entry because it isn't valid */ drm_legacy_addbufs_agp()
729 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_agp()
736 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_agp()
737 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_agp()
740 dma->buf_count += entry->buf_count; drm_legacy_addbufs_agp()
741 dma->seg_count += entry->seg_count; drm_legacy_addbufs_agp()
746 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); drm_legacy_addbufs_agp()
750 request->count = entry->buf_count; drm_legacy_addbufs_agp()
770 struct drm_buf_entry *entry; drm_legacy_addbufs_pci() local
814 entry = &dma->bufs[order]; drm_legacy_addbufs_pci()
815 if (entry->buf_count) { drm_legacy_addbufs_pci()
827 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); drm_legacy_addbufs_pci()
828 if (!entry->buflist) { drm_legacy_addbufs_pci()
834 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); drm_legacy_addbufs_pci()
835 if (!entry->seglist) { drm_legacy_addbufs_pci()
836 kfree(entry->buflist); drm_legacy_addbufs_pci()
848 kfree(entry->buflist); drm_legacy_addbufs_pci()
849 kfree(entry->seglist); drm_legacy_addbufs_pci()
859 entry->buf_size = size; drm_legacy_addbufs_pci()
860 entry->page_order = page_order; drm_legacy_addbufs_pci()
864 while (entry->buf_count < count) { drm_legacy_addbufs_pci()
870 entry->buf_count = count; drm_legacy_addbufs_pci()
871 entry->seg_count = count; drm_legacy_addbufs_pci()
872 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
878 entry->seglist[entry->seg_count++] = dmah; drm_legacy_addbufs_pci()
887 offset + size <= total && entry->buf_count < count; drm_legacy_addbufs_pci()
888 offset += alignment, ++entry->buf_count) { drm_legacy_addbufs_pci()
889 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_pci()
890 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_pci()
907 entry->buf_count = count; drm_legacy_addbufs_pci()
908 entry->seg_count = count; drm_legacy_addbufs_pci()
909 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
917 entry->buf_count, buf->address); drm_legacy_addbufs_pci()
923 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_pci()
926 /* Free the entry because it isn't valid */ drm_legacy_addbufs_pci()
927 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_pci()
935 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_pci()
936 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_pci()
947 dma->buf_count += entry->buf_count; drm_legacy_addbufs_pci()
948 dma->seg_count += entry->seg_count; drm_legacy_addbufs_pci()
949 dma->page_count += entry->seg_count << page_order; drm_legacy_addbufs_pci()
950 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); drm_legacy_addbufs_pci()
954 request->count = entry->buf_count; drm_legacy_addbufs_pci()
970 struct drm_buf_entry *entry; drm_legacy_addbufs_sg() local
1025 entry = &dma->bufs[order]; drm_legacy_addbufs_sg()
1026 if (entry->buf_count) { drm_legacy_addbufs_sg()
1038 entry->buflist = kzalloc(count * sizeof(*entry->buflist), drm_legacy_addbufs_sg()
1040 if (!entry->buflist) { drm_legacy_addbufs_sg()
1046 entry->buf_size = size; drm_legacy_addbufs_sg()
1047 entry->page_order = page_order; drm_legacy_addbufs_sg()
1051 while (entry->buf_count < count) { drm_legacy_addbufs_sg()
1052 buf = &entry->buflist[entry->buf_count]; drm_legacy_addbufs_sg()
1053 buf->idx = dma->buf_count + entry->buf_count; drm_legacy_addbufs_sg()
1071 entry->buf_count = count; drm_legacy_addbufs_sg()
1072 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_sg()
1078 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); drm_legacy_addbufs_sg()
1081 entry->buf_count++; drm_legacy_addbufs_sg()
1088 (dma->buf_count + entry->buf_count) * drm_legacy_addbufs_sg()
1091 /* Free the entry because it isn't valid */ drm_legacy_addbufs_sg()
1092 drm_cleanup_buf_error(dev, entry); drm_legacy_addbufs_sg()
1099 for (i = 0; i < entry->buf_count; i++) { drm_legacy_addbufs_sg()
1100 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; drm_legacy_addbufs_sg()
1103 dma->buf_count += entry->buf_count; drm_legacy_addbufs_sg()
1104 dma->seg_count += entry->seg_count; drm_legacy_addbufs_sg()
1109 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); drm_legacy_addbufs_sg()
1113 request->count = entry->buf_count; drm_legacy_addbufs_sg()
1257 * updates the respective drm_device_dma::bufs entry low and high water mark.
1267 struct drm_buf_entry *entry; drm_legacy_markbufs() local
1283 entry = &dma->bufs[order]; drm_legacy_markbufs()
1285 if (request->low_mark < 0 || request->low_mark > entry->buf_count) drm_legacy_markbufs()
1287 if (request->high_mark < 0 || request->high_mark > entry->buf_count) drm_legacy_markbufs()
1290 entry->low_mark = request->low_mark; drm_legacy_markbufs()
1291 entry->high_mark = request->high_mark; drm_legacy_markbufs()
1463 struct drm_map_list *entry; drm_legacy_getsarea() local
1465 list_for_each_entry(entry, &dev->maplist, head) { drm_legacy_getsarea()
1466 if (entry->map && entry->map->type == _DRM_SHM && drm_legacy_getsarea()
1467 (entry->map->flags & _DRM_CONTAINS_LOCK)) { drm_legacy_getsarea()
1468 return entry->map; drm_legacy_getsarea()
559 drm_cleanup_buf_error(struct drm_device * dev, struct drm_buf_entry * entry) drm_cleanup_buf_error() argument
H A Dati_pcigart.c61 struct drm_sg_mem *entry = dev->sg; drm_ati_pcigart_cleanup() local
67 if (!entry) { drm_ati_pcigart_cleanup()
75 pages = (entry->pages <= max_pages) drm_ati_pcigart_cleanup()
76 ? entry->pages : max_pages; drm_ati_pcigart_cleanup()
79 if (!entry->busaddr[i]) drm_ati_pcigart_cleanup()
81 pci_unmap_page(dev->pdev, entry->busaddr[i], drm_ati_pcigart_cleanup()
101 struct drm_sg_mem *entry = dev->sg; drm_ati_pcigart_init() local
109 if (!entry) { drm_ati_pcigart_init()
144 pages = (entry->pages <= max_real_pages) drm_ati_pcigart_init()
145 ? entry->pages : max_real_pages; drm_ati_pcigart_init()
156 entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], drm_ati_pcigart_init()
158 if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) { drm_ati_pcigart_init()
165 page_base = (u32) entry->busaddr[i]; drm_ati_pcigart_init()
/linux-4.1.27/drivers/acpi/
H A Dnvs.c96 struct nvs_page *entry, *next; suspend_nvs_register() local
104 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); suspend_nvs_register()
105 if (!entry) suspend_nvs_register()
108 list_add_tail(&entry->node, &nvs_list); suspend_nvs_register()
109 entry->phys_start = start; suspend_nvs_register()
111 entry->size = (size < nr_bytes) ? size : nr_bytes; suspend_nvs_register()
113 start += entry->size; suspend_nvs_register()
114 size -= entry->size; suspend_nvs_register()
119 list_for_each_entry_safe(entry, next, &nvs_list, node) { suspend_nvs_register()
120 list_del(&entry->node); suspend_nvs_register()
121 kfree(entry); suspend_nvs_register()
131 struct nvs_page *entry; suspend_nvs_free() local
133 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_free()
134 if (entry->data) { suspend_nvs_free()
135 free_page((unsigned long)entry->data); suspend_nvs_free()
136 entry->data = NULL; suspend_nvs_free()
137 if (entry->kaddr) { suspend_nvs_free()
138 if (entry->unmap) { suspend_nvs_free()
139 iounmap(entry->kaddr); suspend_nvs_free()
140 entry->unmap = false; suspend_nvs_free()
142 acpi_os_unmap_iomem(entry->kaddr, suspend_nvs_free()
143 entry->size); suspend_nvs_free()
145 entry->kaddr = NULL; suspend_nvs_free()
155 struct nvs_page *entry; suspend_nvs_alloc() local
157 list_for_each_entry(entry, &nvs_list, node) { suspend_nvs_alloc()
158 entry->data = (void *)__get_free_page(GFP_KERNEL); suspend_nvs_alloc()
159 if (!entry->data) { suspend_nvs_alloc()
172 struct nvs_page *entry; suspend_nvs_save() local
176 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_save()
177 if (entry->data) { suspend_nvs_save()
178 unsigned long phys = entry->phys_start; suspend_nvs_save()
179 unsigned int size = entry->size; suspend_nvs_save()
181 entry->kaddr = acpi_os_get_iomem(phys, size); suspend_nvs_save()
182 if (!entry->kaddr) { suspend_nvs_save()
183 entry->kaddr = acpi_os_ioremap(phys, size); suspend_nvs_save()
184 entry->unmap = !!entry->kaddr; suspend_nvs_save()
186 if (!entry->kaddr) { suspend_nvs_save()
190 memcpy(entry->data, entry->kaddr, entry->size); suspend_nvs_save()
204 struct nvs_page *entry; suspend_nvs_restore() local
208 list_for_each_entry(entry, &nvs_list, node) suspend_nvs_restore()
209 if (entry->data) suspend_nvs_restore()
210 memcpy(entry->kaddr, entry->data, entry->size); suspend_nvs_restore()
H A Dpci_irq.c129 static void do_prt_fixups(struct acpi_prt_entry *entry, do_prt_fixups() argument
143 entry->id.segment == quirk->segment && do_prt_fixups()
144 entry->id.bus == quirk->bus && do_prt_fixups()
145 entry->id.device == quirk->device && do_prt_fixups()
146 entry->pin == quirk->pin && do_prt_fixups()
152 entry->id.segment, entry->id.bus, do_prt_fixups()
153 entry->id.device, pin_name(entry->pin), do_prt_fixups()
167 struct acpi_prt_entry *entry; acpi_pci_irq_check_entry() local
173 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); acpi_pci_irq_check_entry()
174 if (!entry) acpi_pci_irq_check_entry()
182 entry->id.segment = segment; acpi_pci_irq_check_entry()
183 entry->id.bus = bus; acpi_pci_irq_check_entry()
184 entry->id.device = (prt->address >> 16) & 0xFFFF; acpi_pci_irq_check_entry()
185 entry->pin = prt->pin + 1; acpi_pci_irq_check_entry()
187 do_prt_fixups(entry, prt); acpi_pci_irq_check_entry()
189 entry->index = prt->source_index; acpi_pci_irq_check_entry()
201 * (e.g. exists somewhere 'below' this _PRT entry in the ACPI acpi_pci_irq_check_entry()
205 acpi_get_handle(handle, prt->source, &entry->link); acpi_pci_irq_check_entry()
217 entry->id.segment, entry->id.bus, acpi_pci_irq_check_entry()
218 entry->id.device, pin_name(entry->pin), acpi_pci_irq_check_entry()
219 prt->source, entry->index)); acpi_pci_irq_check_entry()
221 *entry_ptr = entry; acpi_pci_irq_check_entry()
231 struct acpi_pci_routing_table *entry; acpi_pci_irq_find_prt_entry() local
247 entry = buffer.pointer; acpi_pci_irq_find_prt_entry()
248 while (entry && (entry->length > 0)) { acpi_pci_irq_find_prt_entry()
250 entry, entry_ptr)) acpi_pci_irq_find_prt_entry()
252 entry = (struct acpi_pci_routing_table *) acpi_pci_irq_find_prt_entry()
253 ((unsigned long)entry + entry->length); acpi_pci_irq_find_prt_entry()
282 * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
288 struct acpi_prt_entry *entry) acpi_reroute_boot_interrupt()
305 "IRQ %d\n", entry->index, acpi_reroute_boot_interrupt()
306 (entry->index % 4) + 16); acpi_reroute_boot_interrupt()
307 entry->index = (entry->index % 4) + 16; acpi_reroute_boot_interrupt()
311 "IRQ: unknown mapping\n", entry->index); acpi_reroute_boot_interrupt()
320 struct acpi_prt_entry *entry = NULL; acpi_pci_irq_lookup() local
325 ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry); acpi_pci_irq_lookup()
326 if (!ret && entry) { acpi_pci_irq_lookup()
328 acpi_reroute_boot_interrupt(dev, entry); acpi_pci_irq_lookup()
330 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", acpi_pci_irq_lookup()
332 return entry; acpi_pci_irq_lookup()
337 * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). acpi_pci_irq_lookup()
355 ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry); acpi_pci_irq_lookup()
356 if (!ret && entry) { acpi_pci_irq_lookup()
361 return entry; acpi_pci_irq_lookup()
399 struct acpi_prt_entry *entry; acpi_pci_irq_enable() local
419 entry = acpi_pci_irq_lookup(dev, pin); acpi_pci_irq_enable()
420 if (!entry) { acpi_pci_irq_enable()
430 if (entry) { acpi_pci_irq_enable()
431 if (entry->link) acpi_pci_irq_enable()
432 gsi = acpi_pci_link_allocate_irq(entry->link, acpi_pci_irq_enable()
433 entry->index, acpi_pci_irq_enable()
437 gsi = entry->index; acpi_pci_irq_enable()
450 kfree(entry); acpi_pci_irq_enable()
458 kfree(entry); acpi_pci_irq_enable()
474 kfree(entry); acpi_pci_irq_enable()
480 struct acpi_prt_entry *entry; acpi_pci_irq_disable() local
496 entry = acpi_pci_irq_lookup(dev, pin); acpi_pci_irq_disable()
497 if (!entry) acpi_pci_irq_disable()
500 if (entry->link) acpi_pci_irq_disable()
501 gsi = acpi_pci_link_free_irq(entry->link); acpi_pci_irq_disable()
503 gsi = entry->index; acpi_pci_irq_disable()
505 kfree(entry); acpi_pci_irq_disable()
287 acpi_reroute_boot_interrupt(struct pci_dev *dev, struct acpi_prt_entry *entry) acpi_reroute_boot_interrupt() argument
H A Dproc.c32 struct acpi_device_physical_node *entry; acpi_system_wakeup_device_seq_show() local
50 list_for_each_entry(entry, &dev->physical_node_list, acpi_system_wakeup_device_seq_show()
52 ldev = get_device(entry->dev); acpi_system_wakeup_device_seq_show()
56 if (&entry->node != acpi_system_wakeup_device_seq_show()
79 struct acpi_device_physical_node *entry; physical_device_enable_wakeup() local
83 list_for_each_entry(entry, physical_device_enable_wakeup()
85 if (entry->dev && device_can_wakeup(entry->dev)) { physical_device_enable_wakeup()
86 bool enable = !device_may_wakeup(entry->dev); physical_device_enable_wakeup()
87 device_set_wakeup_enable(entry->dev, enable); physical_device_enable_wakeup()
H A Dprocessor_core.c34 static int map_lapic_id(struct acpi_subtable_header *entry, map_lapic_id() argument
38 container_of(entry, struct acpi_madt_local_apic, header); map_lapic_id()
50 static int map_x2apic_id(struct acpi_subtable_header *entry, map_x2apic_id() argument
54 container_of(entry, struct acpi_madt_local_x2apic, header); map_x2apic_id()
67 static int map_lsapic_id(struct acpi_subtable_header *entry, map_lsapic_id() argument
71 container_of(entry, struct acpi_madt_local_sapic, header); map_lsapic_id()
77 if ((entry->length < 16) || (lsapic->uid != acpi_id)) map_lsapic_id()
89 static int map_gicc_mpidr(struct acpi_subtable_header *entry, map_gicc_mpidr() argument
93 container_of(entry, struct acpi_madt_generic_interrupt, header); map_gicc_mpidr()
113 unsigned long madt_end, entry; map_madt_entry() local
121 entry = (unsigned long)madt; map_madt_entry()
122 madt_end = entry + madt->header.length; map_madt_entry()
126 entry += sizeof(struct acpi_table_madt); map_madt_entry()
127 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { map_madt_entry()
129 (struct acpi_subtable_header *)entry; map_madt_entry()
143 entry += header->length; map_madt_entry()
250 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, get_ioapic_id() argument
253 struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry; get_ioapic_id()
266 unsigned long madt_end, entry; parse_madt_ioapic_entry() local
274 entry = (unsigned long)madt; parse_madt_ioapic_entry()
275 madt_end = entry + madt->header.length; parse_madt_ioapic_entry()
278 entry += sizeof(struct acpi_table_madt); parse_madt_ioapic_entry()
279 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { parse_madt_ioapic_entry()
280 hdr = (struct acpi_subtable_header *)entry; parse_madt_ioapic_entry()
285 entry += hdr->length; parse_madt_ioapic_entry()
/linux-4.1.27/arch/s390/kernel/
H A Djump_label.c21 struct jump_entry *entry; member in struct:insn_args
25 static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn) jump_label_make_nop() argument
32 static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) jump_label_make_branch() argument
36 insn->offset = (entry->target - entry->code) >> 1; jump_label_make_branch()
39 static void jump_label_bug(struct jump_entry *entry, struct insn *expected, jump_label_bug() argument
42 unsigned char *ipc = (unsigned char *)entry->code; jump_label_bug()
61 static void __jump_label_transform(struct jump_entry *entry, __jump_label_transform() argument
68 jump_label_make_nop(entry, &old); __jump_label_transform()
69 jump_label_make_branch(entry, &new); __jump_label_transform()
71 jump_label_make_branch(entry, &old); __jump_label_transform()
72 jump_label_make_nop(entry, &new); __jump_label_transform()
75 if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) __jump_label_transform()
76 jump_label_bug(entry, &orignop, &new); __jump_label_transform()
78 if (memcmp((void *)entry->code, &old, sizeof(old))) __jump_label_transform()
79 jump_label_bug(entry, &old, &new); __jump_label_transform()
81 s390_kernel_write((void *)entry->code, &new, sizeof(new)); __jump_label_transform()
88 __jump_label_transform(args->entry, args->type, 0); __sm_arch_jump_label_transform()
92 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
97 args.entry = entry; arch_jump_label_transform()
103 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
106 __jump_label_transform(entry, type, 1); arch_jump_label_transform_static()
H A Dos_info.c43 * Add OS info entry and update checksum
47 os_info.entry[nr].addr = (u64)(unsigned long)ptr; os_info_entry_add()
48 os_info.entry[nr].size = size; os_info_entry_add()
49 os_info.entry[nr].csum = csum_partial(ptr, size, 0); os_info_entry_add()
72 * Allocate and copy OS info entry from oldmem
80 addr = os_info_old->entry[nr].addr; os_info_old_alloc()
85 size = os_info_old->entry[nr].size; os_info_old_alloc()
97 if (csum != os_info_old->entry[nr].csum) { os_info_old_alloc()
101 os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align; os_info_old_alloc()
107 os_info_old->entry[nr].addr = 0; os_info_old_alloc()
109 pr_info("entry %i: %s (addr=0x%lx size=%lu)\n", os_info_old_alloc()
155 * Return pointer to os infor entry and its size
163 if (!os_info_old->entry[nr].addr) os_info_old_entry()
165 *size = (unsigned long) os_info_old->entry[nr].size; os_info_old_entry()
166 return (void *)(unsigned long)os_info_old->entry[nr].addr; os_info_old_entry()
/linux-4.1.27/drivers/firmware/efi/
H A Druntime-map.c23 struct kobject kobj; /* kobject for each entry */
30 ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
38 static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) type_show() argument
40 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); type_show()
43 #define EFI_RUNTIME_FIELD(var) entry->md.var
46 static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
64 struct efi_runtime_map_entry *entry = to_map_entry(kobj); map_attr_show() local
67 return map_attr->show(entry, buf); map_attr_show()
77 * These are default attributes that are added for every memmap entry.
94 struct efi_runtime_map_entry *entry; map_release() local
96 entry = to_map_entry(kobj); map_release()
97 kfree(entry); map_release()
112 struct efi_runtime_map_entry *entry; add_sysfs_runtime_map_entry() local
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); add_sysfs_runtime_map_entry()
121 if (!entry) { add_sysfs_runtime_map_entry()
127 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, add_sysfs_runtime_map_entry()
130 kobject_init(&entry->kobj, &map_ktype); add_sysfs_runtime_map_entry()
131 entry->kobj.kset = map_kset; add_sysfs_runtime_map_entry()
132 ret = kobject_add(&entry->kobj, NULL, "%d", nr); add_sysfs_runtime_map_entry()
134 kobject_put(&entry->kobj); add_sysfs_runtime_map_entry()
140 return entry; add_sysfs_runtime_map_entry()
174 struct efi_runtime_map_entry *entry; efi_runtime_map_init() local
179 map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL); efi_runtime_map_init()
186 entry = add_sysfs_runtime_map_entry(efi_kobj, i); efi_runtime_map_init()
187 if (IS_ERR(entry)) { efi_runtime_map_init()
188 ret = PTR_ERR(entry); efi_runtime_map_init()
191 *(map_entries + i) = entry; efi_runtime_map_init()
197 entry = *(map_entries + j); efi_runtime_map_init()
198 kobject_put(&entry->kobj); efi_runtime_map_init()
H A Defi-pstore.c46 static int efi_pstore_read_func(struct efivar_entry *entry, void *data) efi_pstore_read_func() argument
56 if (efi_guidcmp(entry->var.VendorGuid, vendor)) efi_pstore_read_func()
60 name[i] = entry->var.VariableName[i]; efi_pstore_read_func()
94 entry->var.DataSize = 1024; efi_pstore_read_func()
95 __efivar_entry_get(entry, &entry->var.Attributes, efi_pstore_read_func()
96 &entry->var.DataSize, entry->var.Data); efi_pstore_read_func()
97 size = entry->var.DataSize; efi_pstore_read_func()
98 memcpy(*cb_data->buf, entry->var.Data, efi_pstore_read_func()
106 * @entry: scanning entry
107 * @next: next entry
121 * @entry: deleting entry
124 static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry, __efi_pstore_scan_sysfs_exit() argument
127 if (entry->deleting) { __efi_pstore_scan_sysfs_exit()
128 list_del(&entry->list); __efi_pstore_scan_sysfs_exit()
130 efivar_unregister(entry); __efi_pstore_scan_sysfs_exit()
133 entry->scanning = false; __efi_pstore_scan_sysfs_exit()
138 * @pos: scanning entry
139 * @next: next entry
156 * @pos: entry to begin iterating from
161 * It is possible to begin iteration from an arbitrary entry within
163 * the next entry of the last one passed to efi_pstore_read_func().
168 struct efivar_entry *entry, *n; efi_pstore_sysfs_entry_iter() local
173 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
174 efi_pstore_scan_sysfs_enter(entry, n, head); list_for_each_entry_safe()
176 size = efi_pstore_read_func(entry, data); list_for_each_entry_safe()
177 efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); list_for_each_entry_safe()
200 * This function returns a size of NVRAM entry logged via efi_pstore_write().
203 * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
207 * size < 0: Failed to get data of entry logging via efi_pstore_write(),
208 * and pstore will stop reading entry.
274 * Clean up an entry with the same name
276 static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) efi_pstore_erase_func() argument
286 if (efi_guidcmp(entry->var.VendorGuid, vendor)) efi_pstore_erase_func()
289 if (ucs2_strncmp(entry->var.VariableName, efi_pstore_erase_func()
301 if (ucs2_strncmp(entry->var.VariableName, efi_name_old, efi_pstore_erase_func()
306 if (entry->scanning) { efi_pstore_erase_func()
308 * Skip deletion because this entry will be deleted efi_pstore_erase_func()
311 entry->deleting = true; efi_pstore_erase_func()
313 list_del(&entry->list); efi_pstore_erase_func()
316 __efivar_entry_delete(entry); efi_pstore_erase_func()
325 struct efivar_entry *entry = NULL; efi_pstore_erase() local
345 found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry); efi_pstore_erase()
347 if (found && !entry->scanning) { efi_pstore_erase()
349 efivar_unregister(entry); efi_pstore_erase()
H A Dvars.c108 /* A valid entry must be at least 8 bytes */ validate_load_option()
120 /* Each boot entry must have a descriptor */ validate_load_option()
357 struct efivar_entry *entry, *n; variable_is_present() local
362 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
363 strsize2 = ucs2_strsize(entry->var.VariableName, 1024); list_for_each_entry_safe()
365 !memcmp(variable_name, &(entry->var.VariableName), list_for_each_entry_safe()
367 !efi_guidcmp(entry->var.VendorGuid, list_for_each_entry_safe()
528 * efivar_entry_add - add entry to variable list
529 * @entry: entry to add to list
532 void efivar_entry_add(struct efivar_entry *entry, struct list_head *head) efivar_entry_add() argument
535 list_add(&entry->list, head); efivar_entry_add()
541 * efivar_entry_remove - remove entry from variable list
542 * @entry: entry to remove from list
544 void efivar_entry_remove(struct efivar_entry *entry) efivar_entry_remove() argument
547 list_del(&entry->list); efivar_entry_remove()
553 * efivar_entry_list_del_unlock - remove entry from variable list
554 * @entry: entry to remove
556 * Remove @entry from the variable list and release the list lock.
563 static void efivar_entry_list_del_unlock(struct efivar_entry *entry) efivar_entry_list_del_unlock() argument
567 list_del(&entry->list); efivar_entry_list_del_unlock()
573 * @entry: entry containing EFI variable to delete
575 * Delete the variable from the firmware but leave @entry on the
579 * not remove @entry from the variable list. Also, it is safe to be
586 int __efivar_entry_delete(struct efivar_entry *entry) __efivar_entry_delete() argument
593 status = ops->set_variable(entry->var.VariableName, __efivar_entry_delete()
594 &entry->var.VendorGuid, __efivar_entry_delete()
602 * efivar_entry_delete - delete variable and remove entry from list
603 * @entry: entry containing variable to delete
605 * Delete the variable from the firmware and remove @entry from the
606 * variable list. It is the caller's responsibility to free @entry
612 int efivar_entry_delete(struct efivar_entry *entry) efivar_entry_delete() argument
618 status = ops->set_variable(entry->var.VariableName, efivar_entry_delete()
619 &entry->var.VendorGuid, efivar_entry_delete()
626 efivar_entry_list_del_unlock(entry); efivar_entry_delete()
633 * @entry: entry containing the EFI variable to write
646 * the entry is already on the list.
648 * Returns 0 on success, -EEXIST if a lookup is performed and the entry
652 int efivar_entry_set(struct efivar_entry *entry, u32 attributes, efivar_entry_set() argument
657 efi_char16_t *name = entry->var.VariableName; efivar_entry_set()
658 efi_guid_t vendor = entry->var.VendorGuid; efivar_entry_set()
775 * efivar_entry_find - search for an entry
779 * @remove: should we remove the entry from the list?
781 * Search for an entry on the variable list that has the EFI variable
782 * name @name and vendor guid @guid. If an entry is found on the list
783 * and @remove is true, the entry is removed from the list.
789 * Returns the entry if found on the list, %NULL otherwise.
794 struct efivar_entry *entry, *n; efivar_entry_find() local
800 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
802 strsize2 = ucs2_strsize(entry->var.VariableName, 1024); list_for_each_entry_safe()
804 !memcmp(name, &(entry->var.VariableName), strsize1) && list_for_each_entry_safe()
805 !efi_guidcmp(guid, entry->var.VendorGuid)) { list_for_each_entry_safe()
815 if (entry->scanning) {
817 * The entry will be deleted
820 entry->deleting = true;
822 list_del(&entry->list);
825 return entry;
831 * @entry: entry for this variable
834 int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) efivar_entry_size() argument
842 status = ops->get_variable(entry->var.VariableName, efivar_entry_size()
843 &entry->var.VendorGuid, NULL, size, NULL); efivar_entry_size()
855 * @entry: read data for this variable
864 int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, __efivar_entry_get() argument
872 status = ops->get_variable(entry->var.VariableName, __efivar_entry_get()
873 &entry->var.VendorGuid, __efivar_entry_get()
882 * @entry: read data for this variable
887 int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, efivar_entry_get() argument
894 status = ops->get_variable(entry->var.VariableName, efivar_entry_get()
895 &entry->var.VendorGuid, efivar_entry_get()
905 * @entry: entry containing variable to set and get
913 * Atomically call set_variable() for @entry and if the call is
923 * (EFI_NOT_FOUND), @entry is removed from the variable list.
925 int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, efivar_entry_set_get_size() argument
929 efi_char16_t *name = entry->var.VariableName; efivar_entry_set_get_size()
930 efi_guid_t *vendor = &entry->var.VendorGuid; efivar_entry_set_get_size()
977 status = ops->get_variable(entry->var.VariableName, efivar_entry_set_get_size()
978 &entry->var.VendorGuid, efivar_entry_set_get_size()
982 efivar_entry_list_del_unlock(entry); efivar_entry_set_get_size()
1001 * Lock the variable list to prevent entry insertion and removal until
1027 * @prev: entry to begin iterating from
1030 * entry on the list. It is safe for @func to remove entries in the
1036 * It is possible to begin iteration from an arbitrary entry within
1038 * the last entry passed to @func. To begin iterating from the
1048 struct efivar_entry *entry, *n; __efivar_entry_iter() local
1052 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
1053 err = func(entry, data); list_for_each_entry_safe()
1059 *prev = entry;
1082 * entry on the list. It is safe for @func to remove entries in the
1153 * The caller must have already removed every entry from the list,
H A Defivars.c102 ssize_t (*show) (struct efivar_entry *entry, char *buf);
103 ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
123 efivar_guid_read(struct efivar_entry *entry, char *buf) efivar_guid_read() argument
125 struct efi_variable *var = &entry->var; efivar_guid_read()
128 if (!entry || !buf) efivar_guid_read()
139 efivar_attr_read(struct efivar_entry *entry, char *buf) efivar_attr_read() argument
141 struct efi_variable *var = &entry->var; efivar_attr_read()
144 if (!entry || !buf) efivar_attr_read()
148 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_attr_read()
172 efivar_size_read(struct efivar_entry *entry, char *buf) efivar_size_read() argument
174 struct efi_variable *var = &entry->var; efivar_size_read()
177 if (!entry || !buf) efivar_size_read()
181 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_size_read()
189 efivar_data_read(struct efivar_entry *entry, char *buf) efivar_data_read() argument
191 struct efi_variable *var = &entry->var; efivar_data_read()
193 if (!entry || !buf) efivar_data_read()
197 if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) efivar_data_read()
256 efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) efivar_store_raw() argument
258 struct efi_variable *new_var, *var = &entry->var; efivar_store_raw()
283 copy_out_compat(&entry->var, compat); efivar_store_raw()
300 memcpy(&entry->var, new_var, count); efivar_store_raw()
303 err = efivar_entry_set(entry, attributes, size, data, NULL); efivar_store_raw()
313 efivar_show_raw(struct efivar_entry *entry, char *buf) efivar_show_raw() argument
315 struct efi_variable *var = &entry->var; efivar_show_raw()
319 if (!entry || !buf) efivar_show_raw()
323 if (efivar_entry_get(entry, &entry->var.Attributes, efivar_show_raw()
324 &entry->var.DataSize, entry->var.Data)) efivar_show_raw()
474 printk(KERN_WARNING "efivars: failed to create sysfs entry.\n"); efivar_create()
490 struct efivar_entry *entry; efivar_delete() local
514 entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true); efivar_delete()
515 if (!entry) efivar_delete()
517 else if (__efivar_entry_delete(entry)) efivar_delete()
525 if (!entry->scanning) { efivar_delete()
527 efivar_unregister(entry); efivar_delete()
536 * efivar_create_sysfs_entry - create a new entry in sysfs
537 * @new_var: efivar entry to create
641 struct efivar_entry *entry = data; efivar_update_sysfs_entry() local
646 memcpy(entry->var.VariableName, name, name_size); efivar_update_sysfs_entry()
647 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); efivar_update_sysfs_entry()
654 struct efivar_entry *entry; efivar_update_sysfs_entries() local
659 entry = kzalloc(sizeof(*entry), GFP_KERNEL); efivar_update_sysfs_entries()
660 if (!entry) efivar_update_sysfs_entries()
663 err = efivar_init(efivar_update_sysfs_entry, entry, efivar_update_sysfs_entries()
668 efivar_create_sysfs_entry(entry); efivar_update_sysfs_entries()
671 kfree(entry); efivar_update_sysfs_entries()
677 struct efivar_entry *entry; efivars_sysfs_callback() local
679 entry = kzalloc(sizeof(*entry), GFP_KERNEL); efivars_sysfs_callback()
680 if (!entry) efivars_sysfs_callback()
683 memcpy(entry->var.VariableName, name, name_size); efivars_sysfs_callback()
684 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); efivars_sysfs_callback()
686 efivar_create_sysfs_entry(entry); efivars_sysfs_callback()
691 static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data) efivar_sysfs_destroy() argument
693 efivar_entry_remove(entry); efivar_sysfs_destroy()
694 efivar_unregister(entry); efivar_sysfs_destroy()
/linux-4.1.27/drivers/oprofile/
H A Dcpu_buffer.h73 * entry->event != NULL, otherwise entry->size or entry->event will be
78 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
79 int op_cpu_buffer_write_commit(struct op_entry *entry);
80 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
83 /* returns the remaining free size of data in the entry */
85 int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) op_cpu_buffer_add_data() argument
87 if (!entry->size) op_cpu_buffer_add_data()
89 *entry->data = val; op_cpu_buffer_add_data()
90 entry->size--; op_cpu_buffer_add_data()
91 entry->data++; op_cpu_buffer_add_data()
92 return entry->size; op_cpu_buffer_add_data()
95 /* returns the size of data in the entry */
97 int op_cpu_buffer_get_size(struct op_entry *entry) op_cpu_buffer_get_size() argument
99 return entry->size; op_cpu_buffer_get_size()
104 int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) op_cpu_buffer_get_data() argument
106 int size = entry->size; op_cpu_buffer_get_data()
109 *val = *entry->data; op_cpu_buffer_get_data()
110 entry->size--; op_cpu_buffer_get_data()
111 entry->data++; op_cpu_buffer_get_data()
H A Dcpu_buffer.c134 * buffer. Struct entry can be uninitialized. The function reserves a
142 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) op_cpu_buffer_write_reserve() argument
144 entry->event = ring_buffer_lock_reserve op_cpu_buffer_write_reserve()
146 size * sizeof(entry->sample->data[0])); op_cpu_buffer_write_reserve()
147 if (!entry->event) op_cpu_buffer_write_reserve()
149 entry->sample = ring_buffer_event_data(entry->event); op_cpu_buffer_write_reserve()
150 entry->size = size; op_cpu_buffer_write_reserve()
151 entry->data = entry->sample->data; op_cpu_buffer_write_reserve()
153 return entry->sample; op_cpu_buffer_write_reserve()
156 int op_cpu_buffer_write_commit(struct op_entry *entry) op_cpu_buffer_write_commit() argument
158 return ring_buffer_unlock_commit(op_ring_buffer, entry->event); op_cpu_buffer_write_commit()
161 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) op_cpu_buffer_read_entry() argument
168 entry->event = e; op_cpu_buffer_read_entry()
169 entry->sample = ring_buffer_event_data(e); op_cpu_buffer_read_entry()
170 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) op_cpu_buffer_read_entry()
171 / sizeof(entry->sample->data[0]); op_cpu_buffer_read_entry()
172 entry->data = entry->sample->data; op_cpu_buffer_read_entry()
173 return entry->sample; op_cpu_buffer_read_entry()
185 struct op_entry entry; op_add_code() local
219 sample = op_cpu_buffer_write_reserve(&entry, size); op_add_code()
227 op_cpu_buffer_add_data(&entry, (unsigned long)task); op_add_code()
229 op_cpu_buffer_write_commit(&entry); op_add_code()
238 struct op_entry entry; op_add_sample() local
241 sample = op_cpu_buffer_write_reserve(&entry, 0); op_add_sample()
248 return op_cpu_buffer_write_commit(&entry); op_add_sample()
351 * Use oprofile_add_data(&entry, val) to add data and
352 * oprofile_write_commit(&entry) to commit the sample.
355 oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, oprofile_write_reserve() argument
368 sample = op_cpu_buffer_write_reserve(entry, size + 2); oprofile_write_reserve()
374 op_cpu_buffer_add_data(entry, code); oprofile_write_reserve()
375 op_cpu_buffer_add_data(entry, pc); oprofile_write_reserve()
380 entry->event = NULL; oprofile_write_reserve()
384 int oprofile_add_data(struct op_entry *entry, unsigned long val) oprofile_add_data() argument
386 if (!entry->event) oprofile_add_data()
388 return op_cpu_buffer_add_data(entry, val); oprofile_add_data()
391 int oprofile_add_data64(struct op_entry *entry, u64 val) oprofile_add_data64() argument
393 if (!entry->event) oprofile_add_data64()
395 if (op_cpu_buffer_get_size(entry) < 2) oprofile_add_data64()
401 if (!op_cpu_buffer_add_data(entry, (u32)val)) oprofile_add_data64()
403 return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); oprofile_add_data64()
406 int oprofile_write_commit(struct op_entry *entry) oprofile_write_commit() argument
408 if (!entry->event) oprofile_write_commit()
410 return op_cpu_buffer_write_commit(entry); oprofile_write_commit()
/linux-4.1.27/drivers/sh/intc/
H A Dvirq.c27 #define for_each_virq(entry, head) \
28 for (entry = head; entry; entry = entry->next)
86 struct intc_virq_list **last, *entry; add_virq_to_pirq() local
91 for_each_virq(entry, data->handler_data) { add_virq_to_pirq()
92 if (entry->irq == virq) add_virq_to_pirq()
94 last = &entry->next; add_virq_to_pirq()
97 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC); add_virq_to_pirq()
98 if (!entry) { add_virq_to_pirq()
103 entry->irq = virq; add_virq_to_pirq()
105 *last = entry; add_virq_to_pirq()
114 struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); intc_virq_handler() local
119 for_each_virq(entry, vlist) { for_each_virq()
122 handle = (unsigned long)irq_get_handler_data(entry->irq); for_each_virq()
126 generic_handle_irq(entry->irq); for_each_virq()
162 struct intc_subgroup_entry *entry; intc_subgroup_init_one() local
168 entry = kmalloc(sizeof(*entry), GFP_NOWAIT); intc_subgroup_init_one()
169 if (!entry) intc_subgroup_init_one()
172 entry->pirq = pirq; intc_subgroup_init_one()
173 entry->enum_id = subgroup->enum_ids[i]; intc_subgroup_init_one()
174 entry->handle = intc_subgroup_data(subgroup, d, i); intc_subgroup_init_one()
176 err = radix_tree_insert(&d->tree, entry->enum_id, entry); intc_subgroup_init_one()
180 radix_tree_tag_set(&d->tree, entry->enum_id, intc_subgroup_init_one()
213 struct intc_subgroup_entry *entry; intc_subgroup_map() local
216 entry = radix_tree_deref_slot((void **)entries[i]); intc_subgroup_map()
217 if (unlikely(!entry)) intc_subgroup_map()
219 if (radix_tree_deref_retry(entry)) intc_subgroup_map()
231 irq, entry->pirq); intc_subgroup_map()
233 intc_irq_xlate_set(irq, entry->enum_id, d); intc_subgroup_map()
235 irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), intc_subgroup_map()
237 irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); intc_subgroup_map()
239 irq_set_handler_data(irq, (void *)entry->handle); intc_subgroup_map()
246 irq_set_chained_handler(entry->pirq, intc_virq_handler); intc_subgroup_map()
247 add_virq_to_pirq(entry->pirq, irq); intc_subgroup_map()
249 radix_tree_tag_clear(&d->tree, entry->enum_id, intc_subgroup_map()
H A Dvirq-debugfs.c26 struct intc_map_entry *entry = intc_irq_xlate_get(i); intc_irq_xlate_debug() local
27 struct intc_desc_int *desc = entry->desc; intc_irq_xlate_debug()
33 seq_printf(m, "0x%05x ", entry->enum_id); intc_irq_xlate_debug()
/linux-4.1.27/include/linux/
H A Dresource_ext.h42 static inline void resource_list_add(struct resource_entry *entry, resource_list_add() argument
45 list_add(&entry->node, head); resource_list_add()
48 static inline void resource_list_add_tail(struct resource_entry *entry, resource_list_add_tail() argument
51 list_add_tail(&entry->node, head); resource_list_add_tail()
54 static inline void resource_list_del(struct resource_entry *entry) resource_list_del() argument
56 list_del(&entry->node); resource_list_del()
59 static inline void resource_list_free_entry(struct resource_entry *entry) resource_list_free_entry() argument
61 kfree(entry); resource_list_free_entry()
65 resource_list_destroy_entry(struct resource_entry *entry) resource_list_destroy_entry() argument
67 resource_list_del(entry); resource_list_destroy_entry()
68 resource_list_free_entry(entry); resource_list_destroy_entry()
71 #define resource_list_for_each_entry(entry, list) \
72 list_for_each_entry((entry), (list), node)
74 #define resource_list_for_each_entry_safe(entry, tmp, list) \
75 list_for_each_entry_safe((entry), (tmp), (list), node)
H A Dswapops.h39 static inline unsigned swp_type(swp_entry_t entry) swp_type() argument
41 return (entry.val >> SWP_TYPE_SHIFT(entry)); swp_type()
48 static inline pgoff_t swp_offset(swp_entry_t entry) swp_offset() argument
50 return entry.val & SWP_OFFSET_MASK(entry); swp_offset()
54 /* check whether a pte points to a swap entry */ is_swap_pte()
79 static inline pte_t swp_entry_to_pte(swp_entry_t entry) swp_entry_to_pte() argument
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); swp_entry_to_pte()
89 swp_entry_t entry; radix_to_swp_entry() local
91 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; radix_to_swp_entry()
92 return entry; radix_to_swp_entry()
95 static inline void *swp_to_radix_entry(swp_entry_t entry) swp_to_radix_entry() argument
99 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; swp_to_radix_entry()
111 static inline int is_migration_entry(swp_entry_t entry) is_migration_entry() argument
113 return unlikely(swp_type(entry) == SWP_MIGRATION_READ || is_migration_entry()
114 swp_type(entry) == SWP_MIGRATION_WRITE); is_migration_entry()
117 static inline int is_write_migration_entry(swp_entry_t entry) is_write_migration_entry() argument
119 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); is_write_migration_entry()
122 static inline struct page *migration_entry_to_page(swp_entry_t entry) migration_entry_to_page() argument
124 struct page *p = pfn_to_page(swp_offset(entry)); migration_entry_to_page()
133 static inline void make_migration_entry_read(swp_entry_t *entry) make_migration_entry_read() argument
135 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); make_migration_entry_read()
159 static inline int is_write_migration_entry(swp_entry_t entry) is_write_migration_entry() argument
176 static inline int is_hwpoison_entry(swp_entry_t entry) is_hwpoison_entry() argument
178 return swp_type(entry) == SWP_HWPOISON; is_hwpoison_entry()
194 static inline int non_swap_entry(swp_entry_t entry) non_swap_entry() argument
196 return swp_type(entry) >= MAX_SWAPFILES; non_swap_entry()
199 static inline int non_swap_entry(swp_entry_t entry) non_swap_entry() argument
H A Ddqblk_qtree.h21 void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */
22 void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */
32 unsigned int dqi_free_entry; /* First block with free entry */
34 unsigned int dqi_entry_size; /* Size of quota entry in quota file */
37 struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */
/linux-4.1.27/sound/pci/emu10k1/
H A Demuproc.c81 static void snd_emu10k1_proc_read(struct snd_info_entry *entry, snd_emu10k1_proc_read() argument
187 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_read()
238 static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, snd_emu10k1_proc_spdif_read() argument
241 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_spdif_read()
277 static void snd_emu10k1_proc_rates_read(struct snd_info_entry *entry, snd_emu10k1_proc_rates_read() argument
281 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_rates_read()
292 static void snd_emu10k1_proc_acode_read(struct snd_info_entry *entry, snd_emu10k1_proc_acode_read() argument
296 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_acode_read()
335 static ssize_t snd_emu10k1_fx8010_read(struct snd_info_entry *entry, snd_emu10k1_fx8010_read() argument
340 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_fx8010_read()
347 if (!strcmp(entry->name, "fx8010_tram_addr")) { snd_emu10k1_fx8010_read()
350 } else if (!strcmp(entry->name, "fx8010_tram_data")) { snd_emu10k1_fx8010_read()
352 } else if (!strcmp(entry->name, "fx8010_code")) { snd_emu10k1_fx8010_read()
378 static void snd_emu10k1_proc_voices_read(struct snd_info_entry *entry, snd_emu10k1_proc_voices_read() argument
381 struct snd_emu10k1 *emu = entry->private_data; snd_emu10k1_proc_voices_read()
399 static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, snd_emu_proc_emu1010_reg_read() argument
402 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_emu1010_reg_read()
413 static void snd_emu_proc_io_reg_read(struct snd_info_entry *entry, snd_emu_proc_io_reg_read() argument
416 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_io_reg_read()
429 static void snd_emu_proc_io_reg_write(struct snd_info_entry *entry, snd_emu_proc_io_reg_write() argument
432 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_io_reg_write()
482 static void snd_emu_proc_ptr_reg_read(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read() argument
485 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_ptr_reg_read()
506 static void snd_emu_proc_ptr_reg_write(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write() argument
509 struct snd_emu10k1 *emu = entry->private_data; snd_emu_proc_ptr_reg_write()
520 static void snd_emu_proc_ptr_reg_write00(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write00() argument
523 snd_emu_proc_ptr_reg_write(entry, buffer, 0); snd_emu_proc_ptr_reg_write00()
526 static void snd_emu_proc_ptr_reg_write20(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_write20() argument
529 snd_emu_proc_ptr_reg_write(entry, buffer, 0x20); snd_emu_proc_ptr_reg_write20()
533 static void snd_emu_proc_ptr_reg_read00a(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read00a() argument
536 snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0, 0x40, 64); snd_emu_proc_ptr_reg_read00a()
539 static void snd_emu_proc_ptr_reg_read00b(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read00b() argument
542 snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0x40, 0x40, 64); snd_emu_proc_ptr_reg_read00b()
545 static void snd_emu_proc_ptr_reg_read20a(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20a() argument
548 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0, 0x40, 4); snd_emu_proc_ptr_reg_read20a()
551 static void snd_emu_proc_ptr_reg_read20b(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20b() argument
554 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x40, 0x40, 4); snd_emu_proc_ptr_reg_read20b()
557 static void snd_emu_proc_ptr_reg_read20c(struct snd_info_entry *entry, snd_emu_proc_ptr_reg_read20c() argument
560 snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x80, 0x20, 4); snd_emu_proc_ptr_reg_read20c()
570 struct snd_info_entry *entry; snd_emu10k1_proc_init() local
573 if (! snd_card_proc_new(emu->card, "emu1010_regs", &entry)) snd_emu10k1_proc_init()
574 snd_info_set_text_ops(entry, emu, snd_emu_proc_emu1010_reg_read); snd_emu10k1_proc_init()
576 if (! snd_card_proc_new(emu->card, "io_regs", &entry)) { snd_emu10k1_proc_init()
577 snd_info_set_text_ops(entry, emu, snd_emu_proc_io_reg_read); snd_emu10k1_proc_init()
578 entry->c.text.write = snd_emu_proc_io_reg_write; snd_emu10k1_proc_init()
579 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
581 if (! snd_card_proc_new(emu->card, "ptr_regs00a", &entry)) { snd_emu10k1_proc_init()
582 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00a); snd_emu10k1_proc_init()
583 entry->c.text.write = snd_emu_proc_ptr_reg_write00; snd_emu10k1_proc_init()
584 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
586 if (! snd_card_proc_new(emu->card, "ptr_regs00b", &entry)) { snd_emu10k1_proc_init()
587 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00b); snd_emu10k1_proc_init()
588 entry->c.text.write = snd_emu_proc_ptr_reg_write00; snd_emu10k1_proc_init()
589 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
591 if (! snd_card_proc_new(emu->card, "ptr_regs20a", &entry)) { snd_emu10k1_proc_init()
592 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20a); snd_emu10k1_proc_init()
593 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
594 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
596 if (! snd_card_proc_new(emu->card, "ptr_regs20b", &entry)) { snd_emu10k1_proc_init()
597 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20b); snd_emu10k1_proc_init()
598 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
599 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
601 if (! snd_card_proc_new(emu->card, "ptr_regs20c", &entry)) { snd_emu10k1_proc_init()
602 snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20c); snd_emu10k1_proc_init()
603 entry->c.text.write = snd_emu_proc_ptr_reg_write20; snd_emu10k1_proc_init()
604 entry->mode |= S_IWUSR; snd_emu10k1_proc_init()
608 if (! snd_card_proc_new(emu->card, "emu10k1", &entry)) snd_emu10k1_proc_init()
609 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_read); snd_emu10k1_proc_init()
612 if (! snd_card_proc_new(emu->card, "spdif-in", &entry)) snd_emu10k1_proc_init()
613 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_spdif_read); snd_emu10k1_proc_init()
616 if (! snd_card_proc_new(emu->card, "capture-rates", &entry)) snd_emu10k1_proc_init()
617 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_rates_read); snd_emu10k1_proc_init()
620 if (! snd_card_proc_new(emu->card, "voices", &entry)) snd_emu10k1_proc_init()
621 snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_voices_read); snd_emu10k1_proc_init()
623 if (! snd_card_proc_new(emu->card, "fx8010_gpr", &entry)) { snd_emu10k1_proc_init()
624 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
625 entry->private_data = emu; snd_emu10k1_proc_init()
626 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
627 entry->size = emu->audigy ? A_TOTAL_SIZE_GPR : TOTAL_SIZE_GPR; snd_emu10k1_proc_init()
628 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
630 if (! snd_card_proc_new(emu->card, "fx8010_tram_data", &entry)) { snd_emu10k1_proc_init()
631 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
632 entry->private_data = emu; snd_emu10k1_proc_init()
633 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
634 entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_DATA : TOTAL_SIZE_TANKMEM_DATA ; snd_emu10k1_proc_init()
635 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
637 if (! snd_card_proc_new(emu->card, "fx8010_tram_addr", &entry)) { snd_emu10k1_proc_init()
638 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
639 entry->private_data = emu; snd_emu10k1_proc_init()
640 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
641 entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_ADDR : TOTAL_SIZE_TANKMEM_ADDR ; snd_emu10k1_proc_init()
642 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
644 if (! snd_card_proc_new(emu->card, "fx8010_code", &entry)) { snd_emu10k1_proc_init()
645 entry->content = SNDRV_INFO_CONTENT_DATA; snd_emu10k1_proc_init()
646 entry->private_data = emu; snd_emu10k1_proc_init()
647 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
648 entry->size = emu->audigy ? A_TOTAL_SIZE_CODE : TOTAL_SIZE_CODE; snd_emu10k1_proc_init()
649 entry->c.ops = &snd_emu10k1_proc_ops_fx8010; snd_emu10k1_proc_init()
651 if (! snd_card_proc_new(emu->card, "fx8010_acode", &entry)) { snd_emu10k1_proc_init()
652 entry->content = SNDRV_INFO_CONTENT_TEXT; snd_emu10k1_proc_init()
653 entry->private_data = emu; snd_emu10k1_proc_init()
654 entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; snd_emu10k1_proc_init()
655 entry->c.text.read = snd_emu10k1_proc_acode_read; snd_emu10k1_proc_init()
/linux-4.1.27/net/netfilter/
H A Dnf_queue.c49 void nf_queue_entry_release_refs(struct nf_queue_entry *entry) nf_queue_entry_release_refs() argument
51 struct nf_hook_state *state = &entry->state; nf_queue_entry_release_refs()
61 if (entry->skb->nf_bridge) { nf_queue_entry_release_refs()
64 physdev = nf_bridge_get_physindev(entry->skb); nf_queue_entry_release_refs()
67 physdev = nf_bridge_get_physoutdev(entry->skb); nf_queue_entry_release_refs()
73 module_put(entry->elem->owner); nf_queue_entry_release_refs()
78 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) nf_queue_entry_get_refs() argument
80 struct nf_hook_state *state = &entry->state; nf_queue_entry_get_refs()
82 if (!try_module_get(entry->elem->owner)) nf_queue_entry_get_refs()
92 if (entry->skb->nf_bridge) { nf_queue_entry_get_refs()
95 physdev = nf_bridge_get_physindev(entry->skb); nf_queue_entry_get_refs()
98 physdev = nf_bridge_get_physoutdev(entry->skb); nf_queue_entry_get_refs()
135 struct nf_queue_entry *entry = NULL; nf_queue() local
152 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); nf_queue()
153 if (!entry) { nf_queue()
158 *entry = (struct nf_queue_entry) { nf_queue()
162 .size = sizeof(*entry) + afinfo->route_key_size, nf_queue()
165 if (!nf_queue_entry_get_refs(entry)) { nf_queue()
170 afinfo->saveroute(skb, entry); nf_queue()
171 status = qh->outfn(entry, queuenum); nf_queue()
176 nf_queue_entry_release_refs(entry); nf_queue()
185 kfree(entry); nf_queue()
189 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) nf_reinject() argument
191 struct sk_buff *skb = entry->skb; nf_reinject()
192 struct nf_hook_ops *elem = entry->elem; nf_reinject()
198 nf_queue_entry_release_refs(entry); nf_reinject()
207 afinfo = nf_get_afinfo(entry->state.pf); nf_reinject()
208 if (!afinfo || afinfo->reroute(skb, entry) < 0) nf_reinject()
212 entry->state.thresh = INT_MIN; nf_reinject()
216 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], nf_reinject()
217 skb, &entry->state, &elem); nf_reinject()
224 entry->state.okfn(entry->state.sk, skb); nf_reinject()
228 err = nf_queue(skb, elem, &entry->state, nf_reinject()
245 kfree(entry); nf_reinject()
H A Dnfnetlink_queue_core.c188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __enqueue_entry() argument
190 list_add_tail(&entry->list, &queue->queue_list); __enqueue_entry()
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __dequeue_entry() argument
197 list_del(&entry->list); __dequeue_entry()
204 struct nf_queue_entry *entry = NULL, *i; find_dequeue_entry() local
210 entry = i; find_dequeue_entry()
215 if (entry) find_dequeue_entry()
216 __dequeue_entry(queue, entry); find_dequeue_entry()
220 return entry; find_dequeue_entry()
226 struct nf_queue_entry *entry, *next; nfqnl_flush() local
229 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { nfqnl_flush()
230 if (!cmpfn || cmpfn(entry, data)) { nfqnl_flush()
231 list_del(&entry->list); nfqnl_flush()
233 nf_reinject(entry, NF_DROP); nfqnl_flush()
283 struct nf_queue_entry *entry, nfqnl_build_packet_message()
294 struct sk_buff *entskb = entry->skb; nfqnl_build_packet_message()
317 if (entry->state.hook <= NF_INET_FORWARD || nfqnl_build_packet_message()
318 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) nfqnl_build_packet_message()
323 outdev = entry->state.out; nfqnl_build_packet_message()
371 nfmsg->nfgen_family = entry->state.pf; nfqnl_build_packet_message()
378 pmsg->hook = entry->state.hook; nfqnl_build_packet_message()
381 indev = entry->state.in; nfqnl_build_packet_message()
387 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
421 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
518 struct nf_queue_entry *entry) __nfqnl_enqueue_packet()
525 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); __nfqnl_enqueue_packet()
543 entry->id = ++queue->id_sequence; __nfqnl_enqueue_packet()
544 *packet_id_ptr = htonl(entry->id); __nfqnl_enqueue_packet()
553 __enqueue_entry(queue, entry); __nfqnl_enqueue_packet()
563 nf_reinject(entry, NF_ACCEPT); __nfqnl_enqueue_packet()
571 struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); nf_queue_entry_dup() local
572 if (entry) { nf_queue_entry_dup()
573 if (nf_queue_entry_get_refs(entry)) nf_queue_entry_dup()
574 return entry; nf_queue_entry_dup()
575 kfree(entry); nf_queue_entry_dup()
601 static void free_entry(struct nf_queue_entry *entry) free_entry() argument
603 nf_queue_entry_release_refs(entry); free_entry()
604 kfree(entry); free_entry()
609 struct sk_buff *skb, struct nf_queue_entry *entry) __nfqnl_enqueue_packet_gso()
616 if (skb->next == NULL) { /* last packet, no need to copy entry */ __nfqnl_enqueue_packet_gso()
617 struct sk_buff *gso_skb = entry->skb; __nfqnl_enqueue_packet_gso()
618 entry->skb = skb; __nfqnl_enqueue_packet_gso()
619 ret = __nfqnl_enqueue_packet(net, queue, entry); __nfqnl_enqueue_packet_gso()
621 entry->skb = gso_skb; __nfqnl_enqueue_packet_gso()
627 entry_seg = nf_queue_entry_dup(entry); __nfqnl_enqueue_packet_gso()
638 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) nfqnl_enqueue_packet() argument
644 struct net *net = dev_net(entry->state.in ? nfqnl_enqueue_packet()
645 entry->state.in : entry->state.out); nfqnl_enqueue_packet()
656 skb = entry->skb; nfqnl_enqueue_packet()
658 switch (entry->state.pf) { nfqnl_enqueue_packet()
668 return __nfqnl_enqueue_packet(net, queue, entry); nfqnl_enqueue_packet()
684 segs, entry); nfqnl_enqueue_packet()
694 free_entry(entry); nfqnl_enqueue_packet()
766 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) dev_cmp() argument
768 if (entry->state.in) dev_cmp()
769 if (entry->state.in->ifindex == ifindex) dev_cmp()
771 if (entry->state.out) dev_cmp()
772 if (entry->state.out->ifindex == ifindex) dev_cmp()
775 if (entry->skb->nf_bridge) { dev_cmp()
778 physinif = nf_bridge_get_physinif(entry->skb); dev_cmp()
779 physoutif = nf_bridge_get_physoutif(entry->skb); dev_cmp()
827 static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr) nf_hook_cmp() argument
829 return entry->elem == (struct nf_hook_ops *)ops_ptr; nf_hook_cmp()
934 struct nf_queue_entry *entry, *tmp; nfqnl_recv_verdict_batch() local
958 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { nfqnl_recv_verdict_batch()
959 if (nfq_id_after(entry->id, maxid)) nfqnl_recv_verdict_batch()
961 __dequeue_entry(queue, entry); nfqnl_recv_verdict_batch()
962 list_add_tail(&entry->list, &batch_list); nfqnl_recv_verdict_batch()
970 list_for_each_entry_safe(entry, tmp, &batch_list, list) { nfqnl_recv_verdict_batch()
972 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nfqnl_recv_verdict_batch()
973 nf_reinject(entry, verdict); nfqnl_recv_verdict_batch()
989 struct nf_queue_entry *entry; nfqnl_recv_verdict() local
1009 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); nfqnl_recv_verdict()
1010 if (entry == NULL) nfqnl_recv_verdict()
1014 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); nfqnl_recv_verdict()
1024 int diff = payload_len - entry->skb->len; nfqnl_recv_verdict()
1027 payload_len, entry, diff) < 0) nfqnl_recv_verdict()
1031 nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff); nfqnl_recv_verdict()
1035 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nfqnl_recv_verdict()
1037 nf_reinject(entry, verdict); nfqnl_recv_verdict()
282 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry, __be32 **packet_id_ptr) nfqnl_build_packet_message() argument
517 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry) __nfqnl_enqueue_packet() argument
608 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, struct sk_buff *skb, struct nf_queue_entry *entry) __nfqnl_enqueue_packet_gso() argument
/linux-4.1.27/lib/
H A Dlist_debug.c16 * Insert a new entry between two known consecutive entries.
44 void __list_del_entry(struct list_head *entry) __list_del_entry() argument
48 prev = entry->prev; __list_del_entry()
49 next = entry->next; __list_del_entry()
53 entry, LIST_POISON1) || __list_del_entry()
56 entry, LIST_POISON2) || __list_del_entry()
57 WARN(prev->next != entry, __list_del_entry()
59 "but was %p\n", entry, prev->next) || __list_del_entry()
60 WARN(next->prev != entry, __list_del_entry()
62 "but was %p\n", entry, next->prev)) __list_del_entry()
70 * list_del - deletes entry from list.
71 * @entry: the element to delete from the list.
72 * Note: list_empty on entry does not return true after this, the entry is
75 void list_del(struct list_head *entry) list_del() argument
77 __list_del_entry(entry); list_del()
78 entry->next = LIST_POISON1; list_del()
79 entry->prev = LIST_POISON2; list_del()
H A Ddma-debug.c172 static inline void dump_entry_trace(struct dma_debug_entry *entry) dump_entry_trace() argument
175 if (entry) { dump_entry_trace()
177 print_stack_trace(&entry->stacktrace, 0); dump_entry_trace()
223 #define err_printk(dev, entry, format, arg...) do { \
230 dump_entry_trace(entry); \
242 static int hash_fn(struct dma_debug_entry *entry) hash_fn() argument
248 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; hash_fn()
254 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, get_hash_bucket() argument
257 int idx = hash_fn(entry); get_hash_bucket()
296 * Search a given entry in the hash bucket list
302 struct dma_debug_entry *entry, *ret = NULL; __hash_bucket_find() local
305 list_for_each_entry(entry, &bucket->list, list) { __hash_bucket_find()
306 if (!match(ref, entry)) __hash_bucket_find()
315 * best-fit algorithm here which returns the entry from __hash_bucket_find()
321 entry->size == ref->size ? ++match_lvl : 0; __hash_bucket_find()
322 entry->type == ref->type ? ++match_lvl : 0; __hash_bucket_find()
323 entry->direction == ref->direction ? ++match_lvl : 0; __hash_bucket_find()
324 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; __hash_bucket_find()
328 return entry; __hash_bucket_find()
331 * We found an entry that fits better then the __hash_bucket_find()
335 ret = entry; __hash_bucket_find()
360 struct dma_debug_entry *entry, index = *ref; bucket_find_contain() local
364 entry = __hash_bucket_find(*bucket, ref, containing_match); bucket_find_contain()
366 if (entry) bucket_find_contain()
367 return entry; bucket_find_contain()
382 * Add an entry to a hash bucket
385 struct dma_debug_entry *entry) hash_bucket_add()
387 list_add_tail(&entry->list, &bucket->list); hash_bucket_add()
391 * Remove entry from a hash bucket list
393 static void hash_bucket_del(struct dma_debug_entry *entry) hash_bucket_del() argument
395 list_del(&entry->list); hash_bucket_del()
398 static unsigned long long phys_addr(struct dma_debug_entry *entry) phys_addr() argument
400 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; phys_addr()
412 struct dma_debug_entry *entry; debug_dma_dump_mappings() local
417 list_for_each_entry(entry, &bucket->list, list) { debug_dma_dump_mappings()
418 if (!dev || dev == entry->dev) { debug_dma_dump_mappings()
419 dev_info(entry->dev, debug_dma_dump_mappings()
421 type2name[entry->type], idx, debug_dma_dump_mappings()
422 phys_addr(entry), entry->pfn, debug_dma_dump_mappings()
423 entry->dev_addr, entry->size, debug_dma_dump_mappings()
424 dir2name[entry->direction], debug_dma_dump_mappings()
425 maperr2str[entry->map_err_type]); debug_dma_dump_mappings()
439 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
440 * the entry already exists at insertion time add a tag as a reference
450 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
463 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) to_cacheline_number() argument
465 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + to_cacheline_number()
466 (entry->offset >> L1_CACHE_SHIFT); to_cacheline_number()
519 static int active_cacheline_insert(struct dma_debug_entry *entry) active_cacheline_insert() argument
521 phys_addr_t cln = to_cacheline_number(entry); active_cacheline_insert()
529 if (entry->direction == DMA_TO_DEVICE) active_cacheline_insert()
533 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); active_cacheline_insert()
541 static void active_cacheline_remove(struct dma_debug_entry *entry) active_cacheline_remove() argument
543 phys_addr_t cln = to_cacheline_number(entry); active_cacheline_remove()
547 if (entry->direction == DMA_TO_DEVICE) active_cacheline_remove()
571 struct dma_debug_entry *entry = NULL; debug_dma_assert_idle() local
591 entry = ents[i]; debug_dma_assert_idle()
598 if (!entry) debug_dma_assert_idle()
601 cln = to_cacheline_number(entry); debug_dma_assert_idle()
602 err_printk(entry->dev, entry, debug_dma_assert_idle()
608 * Wrapper function for adding an entry to the hash.
611 static void add_dma_entry(struct dma_debug_entry *entry) add_dma_entry() argument
617 bucket = get_hash_bucket(entry, &flags); add_dma_entry()
618 hash_bucket_add(bucket, entry); add_dma_entry()
621 rc = active_cacheline_insert(entry); add_dma_entry()
634 struct dma_debug_entry *entry; __dma_entry_alloc() local
636 entry = list_entry(free_entries.next, struct dma_debug_entry, list); __dma_entry_alloc()
637 list_del(&entry->list); __dma_entry_alloc()
638 memset(entry, 0, sizeof(*entry)); __dma_entry_alloc()
644 return entry; __dma_entry_alloc()
654 struct dma_debug_entry *entry; dma_entry_alloc() local
666 entry = __dma_entry_alloc(); dma_entry_alloc()
671 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; dma_entry_alloc()
672 entry->stacktrace.entries = entry->st_entries; dma_entry_alloc()
673 entry->stacktrace.skip = 2; dma_entry_alloc()
674 save_stack_trace(&entry->stacktrace); dma_entry_alloc()
677 return entry; dma_entry_alloc()
680 static void dma_entry_free(struct dma_debug_entry *entry) dma_entry_free() argument
684 active_cacheline_remove(entry); dma_entry_free()
691 list_add(&entry->list, &free_entries); dma_entry_free()
700 struct dma_debug_entry *entry; dma_debug_resize_entries() local
711 entry = kzalloc(sizeof(*entry), GFP_KERNEL); dma_debug_resize_entries()
712 if (!entry) dma_debug_resize_entries()
715 list_add_tail(&entry->list, &tmp); dma_debug_resize_entries()
727 entry = __dma_entry_alloc(); dma_debug_resize_entries()
728 kfree(entry); dma_debug_resize_entries()
753 struct dma_debug_entry *entry, *next_entry; prealloc_memory() local
757 entry = kzalloc(sizeof(*entry), GFP_KERNEL); prealloc_memory()
758 if (!entry) prealloc_memory()
761 list_add_tail(&entry->list, &free_entries); prealloc_memory()
773 list_for_each_entry_safe(entry, next_entry, &free_entries, list) { prealloc_memory()
774 list_del(&entry->list); prealloc_memory()
775 kfree(entry); prealloc_memory()
931 struct dma_debug_entry *entry; device_dma_allocations() local
939 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { device_dma_allocations()
940 if (entry->dev == dev) { device_dma_allocations()
942 *out_entry = entry; device_dma_allocations()
956 struct dma_debug_entry *uninitialized_var(entry); dma_debug_device_change()
964 count = device_dma_allocations(dev, &entry); dma_debug_device_change()
967 err_printk(dev, entry, "DMA-API: device driver has pending " dma_debug_device_change()
973 count, entry->dev_addr, entry->size, dma_debug_device_change()
974 dir2name[entry->direction], type2name[entry->type]); dma_debug_device_change()
1076 struct dma_debug_entry *entry; check_unmap() local
1081 entry = bucket_find_exact(bucket, ref); check_unmap()
1083 if (!entry) { check_unmap()
1101 if (ref->size != entry->size) { check_unmap()
1102 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1106 ref->dev_addr, entry->size, ref->size); check_unmap()
1109 if (ref->type != entry->type) { check_unmap()
1110 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1115 type2name[entry->type], type2name[ref->type]); check_unmap()
1116 } else if ((entry->type == dma_debug_coherent) && check_unmap()
1117 (phys_addr(ref) != phys_addr(entry))) { check_unmap()
1118 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1124 phys_addr(entry), check_unmap()
1129 ref->sg_call_ents != entry->sg_call_ents) { check_unmap()
1130 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1131 "DMA sg list with different entry count " check_unmap()
1133 entry->sg_call_ents, ref->sg_call_ents); check_unmap()
1140 if (ref->direction != entry->direction) { check_unmap()
1141 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1146 dir2name[entry->direction], check_unmap()
1150 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { check_unmap()
1151 err_printk(ref->dev, entry, check_unmap()
1156 type2name[entry->type]); check_unmap()
1159 hash_bucket_del(entry); check_unmap()
1160 dma_entry_free(entry); check_unmap()
1193 struct dma_debug_entry *entry; check_sync() local
1199 entry = bucket_find_contain(&bucket, ref, &flags); check_sync()
1201 if (!entry) { check_sync()
1209 if (ref->size > entry->size) { check_sync()
1210 err_printk(dev, entry, "DMA-API: device driver syncs" check_sync()
1215 entry->dev_addr, entry->size, check_sync()
1219 if (entry->direction == DMA_BIDIRECTIONAL) check_sync()
1222 if (ref->direction != entry->direction) { check_sync()
1223 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1227 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1228 dir2name[entry->direction], check_sync()
1232 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && check_sync()
1234 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1238 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1239 dir2name[entry->direction], check_sync()
1242 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && check_sync()
1244 err_printk(dev, entry, "DMA-API: device driver syncs " check_sync()
1248 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1249 dir2name[entry->direction], check_sync()
1260 struct dma_debug_entry *entry; debug_dma_map_page() local
1268 entry = dma_entry_alloc(); debug_dma_map_page()
1269 if (!entry) debug_dma_map_page()
1272 entry->dev = dev; debug_dma_map_page()
1273 entry->type = dma_debug_page; debug_dma_map_page()
1274 entry->pfn = page_to_pfn(page); debug_dma_map_page()
1275 entry->offset = offset, debug_dma_map_page()
1276 entry->dev_addr = dma_addr; debug_dma_map_page()
1277 entry->size = size; debug_dma_map_page()
1278 entry->direction = direction; debug_dma_map_page()
1279 entry->map_err_type = MAP_ERR_NOT_CHECKED; debug_dma_map_page()
1282 entry->type = dma_debug_single; debug_dma_map_page()
1291 add_dma_entry(entry); debug_dma_map_page()
1298 struct dma_debug_entry *entry; debug_dma_mapping_error() local
1309 list_for_each_entry(entry, &bucket->list, list) { debug_dma_mapping_error()
1310 if (!exact_match(&ref, entry)) debug_dma_mapping_error()
1319 * best-fit algorithm here which updates the first entry debug_dma_mapping_error()
1323 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { debug_dma_mapping_error()
1324 entry->map_err_type = MAP_ERR_CHECKED; debug_dma_mapping_error()
1357 struct dma_debug_entry *entry; debug_dma_map_sg() local
1365 entry = dma_entry_alloc(); for_each_sg()
1366 if (!entry) for_each_sg()
1369 entry->type = dma_debug_sg; for_each_sg()
1370 entry->dev = dev; for_each_sg()
1371 entry->pfn = page_to_pfn(sg_page(s)); for_each_sg()
1372 entry->offset = s->offset, for_each_sg()
1373 entry->size = sg_dma_len(s); for_each_sg()
1374 entry->dev_addr = sg_dma_address(s); for_each_sg()
1375 entry->direction = direction; for_each_sg()
1376 entry->sg_call_ents = nents; for_each_sg()
1377 entry->sg_mapped_ents = mapped_ents; for_each_sg()
1384 add_dma_entry(entry); for_each_sg()
1392 struct dma_debug_entry *entry; get_nr_mapped_entries() local
1398 entry = bucket_find_exact(bucket, ref); get_nr_mapped_entries()
1401 if (entry) get_nr_mapped_entries()
1402 mapped_ents = entry->sg_mapped_ents; get_nr_mapped_entries()
1444 struct dma_debug_entry *entry; debug_dma_alloc_coherent() local
1452 entry = dma_entry_alloc(); debug_dma_alloc_coherent()
1453 if (!entry) debug_dma_alloc_coherent()
1456 entry->type = dma_debug_coherent; debug_dma_alloc_coherent()
1457 entry->dev = dev; debug_dma_alloc_coherent()
1458 entry->pfn = page_to_pfn(virt_to_page(virt)); debug_dma_alloc_coherent()
1459 entry->offset = (size_t) virt & PAGE_MASK; debug_dma_alloc_coherent()
1460 entry->size = size; debug_dma_alloc_coherent()
1461 entry->dev_addr = dma_addr; debug_dma_alloc_coherent()
1462 entry->direction = DMA_BIDIRECTIONAL; debug_dma_alloc_coherent()
1464 add_dma_entry(entry); debug_dma_alloc_coherent()
384 hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) hash_bucket_add() argument
H A Dllist.c32 * @new_first: first entry in batch to be added
33 * @new_last: last entry in batch to be added
52 * llist_del_first - delete the first entry of lock-less list
55 * If list is empty, return NULL, otherwise, return the first entry
67 struct llist_node *entry, *old_entry, *next; llist_del_first() local
69 entry = head->first; llist_del_first()
71 if (entry == NULL) llist_del_first()
73 old_entry = entry; llist_del_first()
74 next = entry->next; llist_del_first()
75 entry = cmpxchg(&head->first, old_entry, next); llist_del_first()
76 if (entry == old_entry) llist_del_first()
80 return entry; llist_del_first()
89 * new first entry.
/linux-4.1.27/drivers/firmware/
H A Dmemmap.c31 * Firmware map entry. Because firmware memory maps are flat and not
43 struct list_head list; /* entry for the linked list */
44 struct kobject kobj; /* kobject for each entry */
52 static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
53 static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
54 static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
65 ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
73 * These are default attributes that are added for every memmap entry.
93 * map entry is allocated by bootmem, we need to remember the storage and
108 struct firmware_map_entry *entry = to_memmap_entry(kobj); release_firmware_map_entry() local
110 if (PageReserved(virt_to_page(entry))) { release_firmware_map_entry()
113 * the memory is hot-added again. The entry will be added to release_firmware_map_entry()
118 list_add(&entry->list, &map_entries_bootmem); release_firmware_map_entry()
124 kfree(entry); release_firmware_map_entry()
138 * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
142 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
143 * entry.
150 struct firmware_map_entry *entry) firmware_map_add_entry()
154 entry->start = start; firmware_map_add_entry()
155 entry->end = end - 1; firmware_map_add_entry()
156 entry->type = type; firmware_map_add_entry()
157 INIT_LIST_HEAD(&entry->list); firmware_map_add_entry()
158 kobject_init(&entry->kobj, &memmap_ktype); firmware_map_add_entry()
161 list_add_tail(&entry->list, &map_entries); firmware_map_add_entry()
169 * memmap entry.
170 * @entry: removed entry.
174 static inline void firmware_map_remove_entry(struct firmware_map_entry *entry) firmware_map_remove_entry() argument
176 list_del(&entry->list); firmware_map_remove_entry()
180 * Add memmap entry on sysfs
182 static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry) add_sysfs_fw_map_entry() argument
187 if (entry->kobj.state_in_sysfs) add_sysfs_fw_map_entry()
196 entry->kobj.kset = mmap_kset; add_sysfs_fw_map_entry()
197 if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++)) add_sysfs_fw_map_entry()
198 kobject_put(&entry->kobj); add_sysfs_fw_map_entry()
204 * Remove memmap entry on sysfs
206 static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry) remove_sysfs_fw_map_entry() argument
208 kobject_put(&entry->kobj); remove_sysfs_fw_map_entry()
212 * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
216 * @list: In which to find the entry.
220 * the lock until the processing of the returned entry has completed.
222 * Return: Pointer to the entry to be found on success, or NULL on failure.
228 struct firmware_map_entry *entry; firmware_map_find_entry_in_list() local
230 list_for_each_entry(entry, list, list) list_for_each_entry()
231 if ((entry->start == start) && (entry->end == end) && list_for_each_entry()
232 (!strcmp(entry->type, type))) { list_for_each_entry()
233 return entry; list_for_each_entry()
240 * firmware_map_find_entry() - Search memmap entry in map_entries.
247 * until the processing of the returned entry has completed.
249 * Return: Pointer to the entry to be found on success, or NULL on failure.
258 * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
264 * given entry in map_entries_bootmem.
266 * Return: Pointer to the entry to be found on success, or NULL on failure.
276 * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
282 * Adds a firmware mapping entry. This function is for memory hotplug, it is
284 * it will create the syfs entry dynamically.
290 struct firmware_map_entry *entry; firmware_map_add_hotplug() local
292 entry = firmware_map_find_entry(start, end - 1, type); firmware_map_add_hotplug()
293 if (entry) firmware_map_add_hotplug()
296 entry = firmware_map_find_entry_bootmem(start, end - 1, type); firmware_map_add_hotplug()
297 if (!entry) { firmware_map_add_hotplug()
298 entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); firmware_map_add_hotplug()
299 if (!entry) firmware_map_add_hotplug()
304 list_del(&entry->list); firmware_map_add_hotplug()
307 memset(entry, 0, sizeof(*entry)); firmware_map_add_hotplug()
310 firmware_map_add_entry(start, end, type, entry); firmware_map_add_hotplug()
311 /* create the memmap entry */ firmware_map_add_hotplug()
312 add_sysfs_fw_map_entry(entry); firmware_map_add_hotplug()
318 * firmware_map_add_early() - Adds a firmware mapping entry.
323 * Adds a firmware mapping entry. This function uses the bootmem allocator
332 struct firmware_map_entry *entry; firmware_map_add_early() local
334 entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0); firmware_map_add_early()
335 if (WARN_ON(!entry)) firmware_map_add_early()
338 return firmware_map_add_entry(start, end, type, entry); firmware_map_add_early()
342 * firmware_map_remove() - remove a firmware mapping entry
347 * removes a firmware mapping entry.
349 * Returns 0 on success, or -EINVAL if no entry.
353 struct firmware_map_entry *entry; firmware_map_remove() local
356 entry = firmware_map_find_entry(start, end - 1, type); firmware_map_remove()
357 if (!entry) { firmware_map_remove()
362 firmware_map_remove_entry(entry); firmware_map_remove()
365 /* remove the memmap entry */ firmware_map_remove()
366 remove_sysfs_fw_map_entry(entry); firmware_map_remove()
375 static ssize_t start_show(struct firmware_map_entry *entry, char *buf) start_show() argument
378 (unsigned long long)entry->start); start_show()
381 static ssize_t end_show(struct firmware_map_entry *entry, char *buf) end_show() argument
384 (unsigned long long)entry->end); end_show()
387 static ssize_t type_show(struct firmware_map_entry *entry, char *buf) type_show() argument
389 return snprintf(buf, PAGE_SIZE, "%s\n", entry->type); type_show()
400 struct firmware_map_entry *entry = to_memmap_entry(kobj); memmap_attr_show() local
403 return memmap_attr->show(entry, buf); memmap_attr_show()
416 struct firmware_map_entry *entry; firmware_memmap_init() local
418 list_for_each_entry(entry, &map_entries, list) firmware_memmap_init()
419 add_sysfs_fw_map_entry(entry); firmware_memmap_init()
148 firmware_map_add_entry(u64 start, u64 end, const char *type, struct firmware_map_entry *entry) firmware_map_add_entry() argument
H A Ddmi-sysfs.c13 * entry.
30 the top entry type is only 8 bits */
52 ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
62 * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
67 ssize_t (*show)(struct dmi_sysfs_entry *entry,
79 * Generic DMI entry support.
99 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_sysfs_attr_show() local
106 return attr->show(entry, buf); dmi_sysfs_attr_show()
117 struct dmi_sysfs_entry *entry; member in struct:find_dmi_data
128 struct dmi_sysfs_entry *entry = data->entry; find_dmi_entry_helper() local
130 /* Is this the entry we want? */ find_dmi_entry_helper()
131 if (dh->type != entry->dh.type) find_dmi_entry_helper()
146 /* Found the entry */ find_dmi_entry_helper()
147 data->ret = data->callback(entry, dh, data->private); find_dmi_entry_helper()
157 static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, find_dmi_entry() argument
161 .entry = entry, find_dmi_entry()
164 .instance_countdown = entry->instance, find_dmi_entry()
165 .ret = -EIO, /* To signal the entry disappeared */ find_dmi_entry()
177 * Calculate and return the byte length of the dmi entry identified by
194 * Support bits for specialized DMI entry support
201 static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry, dmi_entry_attr_show_helper() argument
210 return attr->show(entry, dh, data->buf); dmi_entry_attr_show_helper()
221 /* Find the entry according to our parent and call the dmi_entry_attr_show()
232 * Specialized DMI entry support.
267 static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
359 static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_io() argument
376 static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_phys32() argument
397 static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry, dmi_sel_raw_read_helper() argument
413 return dmi_sel_raw_read_io(entry, &sel, state->buf, dmi_sel_raw_read_helper()
416 return dmi_sel_raw_read_phys32(entry, &sel, state->buf, dmi_sel_raw_read_helper()
432 struct dmi_sysfs_entry *entry = to_entry(kobj->parent); dmi_sel_raw_read() local
439 return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state); dmi_sel_raw_read()
447 static int dmi_system_event_log(struct dmi_sysfs_entry *entry) dmi_system_event_log() argument
451 entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL); dmi_system_event_log()
452 if (!entry->child) dmi_system_event_log()
454 ret = kobject_init_and_add(entry->child, dmi_system_event_log()
456 &entry->kobj, dmi_system_event_log()
461 ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr); dmi_system_event_log()
468 kobject_del(entry->child); dmi_system_event_log()
470 kfree(entry->child); dmi_system_event_log()
475 * Generic DMI entry support.
478 static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_length() argument
480 return sprintf(buf, "%d\n", entry->dh.length); dmi_sysfs_entry_length()
483 static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_handle() argument
485 return sprintf(buf, "%d\n", entry->dh.handle); dmi_sysfs_entry_handle()
488 static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf) dmi_sysfs_entry_type() argument
490 return sprintf(buf, "%d\n", entry->dh.type); dmi_sysfs_entry_type()
493 static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry, dmi_sysfs_entry_instance() argument
496 return sprintf(buf, "%d\n", entry->instance); dmi_sysfs_entry_instance()
499 static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry, dmi_sysfs_entry_position() argument
502 return sprintf(buf, "%d\n", entry->position); dmi_sysfs_entry_position()
505 static DMI_SYSFS_ATTR(entry, length);
506 static DMI_SYSFS_ATTR(entry, handle);
507 static DMI_SYSFS_ATTR(entry, type);
508 static DMI_SYSFS_ATTR(entry, instance);
509 static DMI_SYSFS_ATTR(entry, position);
520 static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, dmi_entry_raw_read_helper() argument
538 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_entry_raw_read() local
545 return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state); dmi_entry_raw_read()
555 struct dmi_sysfs_entry *entry = to_entry(kobj); dmi_sysfs_entry_release() local
558 list_del(&entry->list); dmi_sysfs_entry_release()
560 kfree(entry); dmi_sysfs_entry_release()
581 struct dmi_sysfs_entry *entry; dmi_sysfs_register_handle() local
584 /* If a previous entry saw an error, short circuit */ dmi_sysfs_register_handle()
588 /* Allocate and register a new entry into the entries set */ dmi_sysfs_register_handle()
589 entry = kzalloc(sizeof(*entry), GFP_KERNEL); dmi_sysfs_register_handle()
590 if (!entry) { dmi_sysfs_register_handle()
596 memcpy(&entry->dh, dh, sizeof(*dh)); dmi_sysfs_register_handle()
597 entry->instance = instance_counts[dh->type]++; dmi_sysfs_register_handle()
598 entry->position = position_count++; dmi_sysfs_register_handle()
600 entry->kobj.kset = dmi_kset; dmi_sysfs_register_handle()
601 *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL, dmi_sysfs_register_handle()
602 "%d-%d", dh->type, entry->instance); dmi_sysfs_register_handle()
605 kfree(entry); dmi_sysfs_register_handle()
611 list_add_tail(&entry->list, &entry_list); dmi_sysfs_register_handle()
617 *ret = dmi_system_event_log(entry); dmi_sysfs_register_handle()
626 /* Create the raw binary file to access the entry */ dmi_sysfs_register_handle()
627 *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr); dmi_sysfs_register_handle()
633 kobject_put(entry->child); dmi_sysfs_register_handle()
634 kobject_put(&entry->kobj); dmi_sysfs_register_handle()
640 struct dmi_sysfs_entry *entry, *next; cleanup_entry_list() local
643 list_for_each_entry_safe(entry, next, &entry_list, list) { cleanup_entry_list()
644 kobject_put(entry->child); cleanup_entry_list()
645 kobject_put(&entry->kobj); cleanup_entry_list()
/linux-4.1.27/sound/drivers/opl4/
H A Dopl4_proc.c27 static int snd_opl4_mem_proc_open(struct snd_info_entry *entry, snd_opl4_mem_proc_open() argument
30 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_open()
42 static int snd_opl4_mem_proc_release(struct snd_info_entry *entry, snd_opl4_mem_proc_release() argument
45 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_release()
53 static ssize_t snd_opl4_mem_proc_read(struct snd_info_entry *entry, snd_opl4_mem_proc_read() argument
58 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_read()
73 static ssize_t snd_opl4_mem_proc_write(struct snd_info_entry *entry, snd_opl4_mem_proc_write() argument
79 struct snd_opl4 *opl4 = entry->private_data; snd_opl4_mem_proc_write()
103 struct snd_info_entry *entry; snd_opl4_create_proc() local
105 entry = snd_info_create_card_entry(opl4->card, "opl4-mem", opl4->card->proc_root); snd_opl4_create_proc()
106 if (entry) { snd_opl4_create_proc()
109 entry->mode |= S_IWUSR; snd_opl4_create_proc()
110 entry->size = 4 * 1024 * 1024; snd_opl4_create_proc()
113 entry->size = 1 * 1024 * 1024; snd_opl4_create_proc()
115 entry->content = SNDRV_INFO_CONTENT_DATA; snd_opl4_create_proc()
116 entry->c.ops = &snd_opl4_mem_proc_ops; snd_opl4_create_proc()
117 entry->module = THIS_MODULE; snd_opl4_create_proc()
118 entry->private_data = opl4; snd_opl4_create_proc()
119 if (snd_info_register(entry) < 0) { snd_opl4_create_proc()
120 snd_info_free_entry(entry); snd_opl4_create_proc()
121 entry = NULL; snd_opl4_create_proc()
124 opl4->proc_entry = entry; snd_opl4_create_proc()
/linux-4.1.27/arch/sparc/kernel/
H A Djump_label.c13 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
17 u32 *insn = (u32 *) (unsigned long) entry->code; arch_jump_label_transform()
20 s32 off = (s32)entry->target - (s32)entry->code; arch_jump_label_transform()
H A Dpci_sun4v.c41 unsigned long entry; /* Index into IOTSB. */ member in struct:iommu_batch
50 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) iommu_batch_start() argument
56 p->entry = entry; iommu_batch_start()
66 unsigned long entry = p->entry; iommu_batch_flush() local
73 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), iommu_batch_flush()
80 devhandle, HV_PCI_TSBID(0, entry), iommu_batch_flush()
85 entry += num; iommu_batch_flush()
90 p->entry = entry; iommu_batch_flush()
96 static inline void iommu_batch_new_entry(unsigned long entry) iommu_batch_new_entry() argument
100 if (p->entry + p->npages == entry) iommu_batch_new_entry()
102 if (p->entry != ~0UL) iommu_batch_new_entry()
104 p->entry = entry; iommu_batch_new_entry()
139 long entry; dma_4v_alloc_coherent() local
159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_alloc_coherent()
162 if (unlikely(entry == DMA_ERROR_CODE)) dma_4v_alloc_coherent()
165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); dma_4v_alloc_coherent()
174 entry); dma_4v_alloc_coherent()
197 static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, dma_4v_iommu_demap() argument
206 HV_PCI_TSBID(0, entry), dma_4v_iommu_demap()
209 entry += num; dma_4v_iommu_demap()
220 unsigned long order, npages, entry; dma_4v_free_coherent() local
227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); dma_4v_free_coherent()
228 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_free_coherent()
245 long entry; dma_4v_map_page() local
256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_map_page()
259 if (unlikely(entry == DMA_ERROR_CODE)) dma_4v_map_page()
262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); dma_4v_map_page()
271 iommu_batch_start(dev, prot, entry); dma_4v_map_page()
302 long entry; dma_4v_unmap_page() local
318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; dma_4v_unmap_page()
319 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_page()
364 unsigned long paddr, npages, entry, out_entry = 0, slen; for_each_sg() local
375 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, for_each_sg()
379 if (unlikely(entry == DMA_ERROR_CODE)) { for_each_sg()
386 iommu_batch_new_entry(entry); for_each_sg()
388 /* Convert entry to a dma_addr_t */ for_each_sg()
389 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); for_each_sg()
423 out_entry = entry; for_each_sg()
474 unsigned long flags, entry; dma_4v_unmap_sg() local
496 entry = ((dma_handle - tbl->table_map_base) >> shift); dma_4v_unmap_sg()
497 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_sg()
698 /* Clear the entry. */ pci_sun4v_dequeue_msi()
/linux-4.1.27/arch/sh/kernel/cpu/sh2/
H A DMakefile5 obj-y := ex.o probe.o entry.o
/linux-4.1.27/fs/ext4/
H A Dblock_validity.c60 struct ext4_system_zone *new_entry = NULL, *entry; add_system_zone() local
66 entry = rb_entry(parent, struct ext4_system_zone, node); add_system_zone()
67 if (start_blk < entry->start_blk) add_system_zone()
69 else if (start_blk >= (entry->start_blk + entry->count)) add_system_zone()
72 if (start_blk + count > (entry->start_blk + add_system_zone()
73 entry->count)) add_system_zone()
74 entry->count = (start_blk + count - add_system_zone()
75 entry->start_blk); add_system_zone()
99 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
100 if (can_merge(entry, new_entry)) { add_system_zone()
101 new_entry->start_blk = entry->start_blk; add_system_zone()
102 new_entry->count += entry->count; add_system_zone()
104 kmem_cache_free(ext4_system_zone_cachep, entry); add_system_zone()
111 entry = rb_entry(node, struct ext4_system_zone, node); add_system_zone()
112 if (can_merge(new_entry, entry)) { add_system_zone()
113 new_entry->count += entry->count; add_system_zone()
115 kmem_cache_free(ext4_system_zone_cachep, entry); add_system_zone()
124 struct ext4_system_zone *entry; debug_print_tree() local
130 entry = rb_entry(node, struct ext4_system_zone, node); debug_print_tree()
132 entry->start_blk, entry->start_blk + entry->count - 1); debug_print_tree()
182 struct ext4_system_zone *entry, *n; ext4_release_system_zone() local
184 rbtree_postorder_for_each_entry_safe(entry, n, ext4_release_system_zone()
186 kmem_cache_free(ext4_system_zone_cachep, entry); ext4_release_system_zone()
199 struct ext4_system_zone *entry; ext4_data_block_valid() local
209 entry = rb_entry(n, struct ext4_system_zone, node); ext4_data_block_valid()
210 if (start_blk + count - 1 < entry->start_blk) ext4_data_block_valid()
212 else if (start_blk >= (entry->start_blk + entry->count)) ext4_data_block_valid()
/linux-4.1.27/drivers/isdn/mISDN/
H A Ddsp_pipeline.c82 struct dsp_element_entry *entry = mISDN_dsp_dev_release() local
84 list_del(&entry->list); mISDN_dsp_dev_release()
85 kfree(entry); mISDN_dsp_dev_release()
90 struct dsp_element_entry *entry; mISDN_dsp_element_register() local
96 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); mISDN_dsp_element_register()
97 if (!entry) mISDN_dsp_element_register()
100 entry->elem = elem; mISDN_dsp_element_register()
102 entry->dev.class = elements_class; mISDN_dsp_element_register()
103 entry->dev.release = mISDN_dsp_dev_release; mISDN_dsp_element_register()
104 dev_set_drvdata(&entry->dev, elem); mISDN_dsp_element_register()
105 dev_set_name(&entry->dev, "%s", elem->name); mISDN_dsp_element_register()
106 ret = device_register(&entry->dev); mISDN_dsp_element_register()
112 list_add_tail(&entry->list, &dsp_elements); mISDN_dsp_element_register()
115 ret = device_create_file(&entry->dev, mISDN_dsp_element_register()
131 device_unregister(&entry->dev); mISDN_dsp_element_register()
134 kfree(entry); mISDN_dsp_element_register()
141 struct dsp_element_entry *entry, *n; mISDN_dsp_element_unregister() local
146 list_for_each_entry_safe(entry, n, &dsp_elements, list) mISDN_dsp_element_unregister()
147 if (entry->elem == elem) { mISDN_dsp_element_unregister()
148 device_unregister(&entry->dev); mISDN_dsp_element_unregister()
176 struct dsp_element_entry *entry, *n; dsp_pipeline_module_exit() local
182 list_for_each_entry_safe(entry, n, &dsp_elements, list) { dsp_pipeline_module_exit()
183 list_del(&entry->list); dsp_pipeline_module_exit()
185 __func__, entry->elem->name); dsp_pipeline_module_exit()
186 kfree(entry); dsp_pipeline_module_exit()
210 struct dsp_pipeline_entry *entry, *n; _dsp_pipeline_destroy() local
212 list_for_each_entry_safe(entry, n, &pipeline->list, list) { _dsp_pipeline_destroy()
213 list_del(&entry->list); _dsp_pipeline_destroy()
214 if (entry->elem == dsp_hwec) _dsp_pipeline_destroy()
218 entry->elem->free(entry->p); _dsp_pipeline_destroy()
219 kfree(entry); _dsp_pipeline_destroy()
240 struct dsp_element_entry *entry, *n; dsp_pipeline_build() local
269 list_for_each_entry_safe(entry, n, &dsp_elements, list) dsp_pipeline_build()
270 if (!strcmp(entry->elem->name, name)) { dsp_pipeline_build()
271 elem = entry->elem; dsp_pipeline_build()
277 "entry to pipeline: %s (out of " dsp_pipeline_build()
305 "to add entry to pipeline: " dsp_pipeline_build()
341 struct dsp_pipeline_entry *entry; dsp_pipeline_process_tx() local
346 list_for_each_entry(entry, &pipeline->list, list) dsp_pipeline_process_tx()
347 if (entry->elem->process_tx) dsp_pipeline_process_tx()
348 entry->elem->process_tx(entry->p, data, len); dsp_pipeline_process_tx()
354 struct dsp_pipeline_entry *entry; dsp_pipeline_process_rx() local
359 list_for_each_entry_reverse(entry, &pipeline->list, list) dsp_pipeline_process_rx()
360 if (entry->elem->process_rx) dsp_pipeline_process_rx()
361 entry->elem->process_rx(entry->p, data, len, txlen); dsp_pipeline_process_rx()
/linux-4.1.27/arch/sh/mm/
H A Dtlbex_32.c29 pte_t entry; handle_tlbmiss() local
52 entry = *pte; handle_tlbmiss()
53 if (unlikely(pte_none(entry) || pte_not_present(entry))) handle_tlbmiss()
55 if (unlikely(error_code && !pte_write(entry))) handle_tlbmiss()
59 entry = pte_mkdirty(entry); handle_tlbmiss()
60 entry = pte_mkyoung(entry); handle_tlbmiss()
62 set_pte(pte, entry); handle_tlbmiss()
66 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in handle_tlbmiss()
68 * flush it in order to avoid potential TLB entry duplication. handle_tlbmiss()
H A Dtlb-urb.c4 * TLB entry wiring helpers for URB-equipped parts.
18 * Load the entry for 'addr' into the TLB and wire the entry.
32 * Make sure we're not trying to wire the last TLB entry slot. tlb_wire_entry()
39 * Insert this entry into the highest non-wired TLB slot (via tlb_wire_entry()
46 /* Load the entry into the TLB */ tlb_wire_entry()
62 * Unwire the last wired TLB entry.
65 * TLB entries in an arbitrary order. If you wire TLB entry N, followed
66 * by entry N+1, you must unwire entry N+1 first, then entry N. In this
81 * Make sure we're not trying to unwire a TLB entry when none tlb_unwire_entry()
H A Dtlb-sh5.c47 * sh64_next_free_dtlb_entry - Find the next available DTLB entry
55 * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
59 unsigned long long entry = sh64_next_free_dtlb_entry(); sh64_get_wired_dtlb_entry() local
64 return entry; sh64_get_wired_dtlb_entry()
68 * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
70 * @entry: Address of TLB slot.
74 int sh64_put_wired_dtlb_entry(unsigned long long entry) sh64_put_wired_dtlb_entry() argument
76 __flush_tlb_slot(entry); sh64_put_wired_dtlb_entry()
85 * contents of a TLB entry) .. though I have a feeling that this is sh64_put_wired_dtlb_entry()
92 if (entry <= DTLB_FIXED) sh64_put_wired_dtlb_entry()
97 * entry beneath the first 'free' entry! sh64_put_wired_dtlb_entry()
99 if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) sh64_put_wired_dtlb_entry()
102 /* If we are, then bring this entry back into the list */ sh64_put_wired_dtlb_entry()
104 cpu_data->dtlb.next = entry; sh64_put_wired_dtlb_entry()
152 unsigned long long entry; tlb_wire_entry() local
159 entry = sh64_get_wired_dtlb_entry(); tlb_wire_entry()
160 dtlb_entries[dtlb_entry++] = entry; tlb_wire_entry()
165 sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); tlb_wire_entry()
172 unsigned long long entry; tlb_unwire_entry() local
178 entry = dtlb_entries[dtlb_entry--]; tlb_unwire_entry()
180 sh64_teardown_tlb_slot(entry); tlb_unwire_entry()
181 sh64_put_wired_dtlb_entry(entry); tlb_unwire_entry()
H A Dtlb-debugfs.c45 unsigned int nentries, entry; tlb_seq_show() local
73 /* Make the "entry >= urb" test fail. */ tlb_seq_show()
91 seq_printf(file, "entry: vpn ppn asid size valid wired\n"); tlb_seq_show()
93 for (entry = 0; entry < nentries; entry++) { tlb_seq_show()
100 val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
105 val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
109 val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
113 val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT)); tlb_seq_show()
126 entry, vpn, ppn, asid, tlb_seq_show()
128 (urb <= entry) ? "W" : "-"); tlb_seq_show()
/linux-4.1.27/fs/squashfs/
H A Dcache.c69 struct squashfs_cache_entry *entry; squashfs_cache_get() local
75 if (cache->entry[i].block == block) { squashfs_cache_get()
97 * At least one unused cache entry. A simple squashfs_cache_get()
98 * round-robin strategy is used to choose the entry to squashfs_cache_get()
103 if (cache->entry[i].refcount == 0) squashfs_cache_get()
109 entry = &cache->entry[i]; squashfs_cache_get()
112 * Initialise chosen cache entry, and fill it in from squashfs_cache_get()
116 entry->block = block; squashfs_cache_get()
117 entry->refcount = 1; squashfs_cache_get()
118 entry->pending = 1; squashfs_cache_get()
119 entry->num_waiters = 0; squashfs_cache_get()
120 entry->error = 0; squashfs_cache_get()
123 entry->length = squashfs_read_data(sb, block, length, squashfs_cache_get()
124 &entry->next_index, entry->actor); squashfs_cache_get()
128 if (entry->length < 0) squashfs_cache_get()
129 entry->error = entry->length; squashfs_cache_get()
131 entry->pending = 0; squashfs_cache_get()
134 * While filling this entry one or more other processes squashfs_cache_get()
138 if (entry->num_waiters) { squashfs_cache_get()
140 wake_up_all(&entry->wait_queue); squashfs_cache_get()
150 * previously unused there's one less cache entry available squashfs_cache_get()
153 entry = &cache->entry[i]; squashfs_cache_get()
154 if (entry->refcount == 0) squashfs_cache_get()
156 entry->refcount++; squashfs_cache_get()
159 * If the entry is currently being filled in by another process squashfs_cache_get()
162 if (entry->pending) { squashfs_cache_get()
163 entry->num_waiters++; squashfs_cache_get()
165 wait_event(entry->wait_queue, !entry->pending); squashfs_cache_get()
174 cache->name, i, entry->block, entry->refcount, entry->error); squashfs_cache_get()
176 if (entry->error) squashfs_cache_get()
177 ERROR("Unable to read %s cache entry [%llx]\n", cache->name, squashfs_cache_get()
179 return entry; squashfs_cache_get()
184 * Release cache entry, once usage count is zero it can be reused.
186 void squashfs_cache_put(struct squashfs_cache_entry *entry) squashfs_cache_put() argument
188 struct squashfs_cache *cache = entry->cache; squashfs_cache_put()
191 entry->refcount--; squashfs_cache_put()
192 if (entry->refcount == 0) { squashfs_cache_put()
218 if (cache->entry[i].data) { squashfs_cache_delete()
220 kfree(cache->entry[i].data[j]); squashfs_cache_delete()
221 kfree(cache->entry[i].data); squashfs_cache_delete()
223 kfree(cache->entry[i].actor); squashfs_cache_delete()
226 kfree(cache->entry); squashfs_cache_delete()
233 * size block_size. To avoid vmalloc fragmentation issues each entry
247 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); squashfs_cache_init()
248 if (cache->entry == NULL) { squashfs_cache_init()
266 struct squashfs_cache_entry *entry = &cache->entry[i]; squashfs_cache_init() local
268 init_waitqueue_head(&cache->entry[i].wait_queue); squashfs_cache_init()
269 entry->cache = cache; squashfs_cache_init()
270 entry->block = SQUASHFS_INVALID_BLK; squashfs_cache_init()
271 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); squashfs_cache_init()
272 if (entry->data == NULL) { squashfs_cache_init()
273 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); squashfs_cache_init()
279 if (entry->data[j] == NULL) { squashfs_cache_init()
285 entry->actor = squashfs_page_actor_init(entry->data, squashfs_cache_init()
287 if (entry->actor == NULL) { squashfs_cache_init()
288 ERROR("Failed to allocate %s cache entry\n", name); squashfs_cache_init()
302 * Copy up to length bytes from cache entry to buffer starting at offset bytes
303 * into the cache entry. If there's not length bytes then copy the number of
306 int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, squashfs_copy_data() argument
314 return min(length, entry->length - offset); squashfs_copy_data()
316 while (offset < entry->length) { squashfs_copy_data()
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] squashfs_copy_data()
319 int bytes = min_t(int, entry->length - offset, squashfs_copy_data()
349 struct squashfs_cache_entry *entry; squashfs_read_metadata() local
354 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); squashfs_read_metadata()
355 if (entry->error) { squashfs_read_metadata()
356 res = entry->error; squashfs_read_metadata()
358 } else if (*offset >= entry->length) { squashfs_read_metadata()
363 bytes = squashfs_copy_data(buffer, entry, *offset, length); squashfs_read_metadata()
369 if (*offset == entry->length) { squashfs_read_metadata()
370 *block = entry->next_index; squashfs_read_metadata()
374 squashfs_cache_put(entry); squashfs_read_metadata()
380 squashfs_cache_put(entry); squashfs_read_metadata()
/linux-4.1.27/net/netlabel/
H A Dnetlabel_addrlist.c50 * netlbl_af4list_search - Search for a matching IPv4 address entry
55 * Searches the IPv4 address list given by @head. If a matching address entry
73 * netlbl_af4list_search_exact - Search for an exact IPv4 address entry
100 * netlbl_af6list_search - Search for a matching IPv6 address entry
105 * Searches the IPv6 address list given by @head. If a matching address entry
124 * netlbl_af6list_search_exact - Search for an exact IPv6 address entry
152 * netlbl_af4list_add - Add a new IPv4 address entry to a list
153 * @entry: address entry
157 * Add a new address entry to the list pointed to by @head. On success zero is
162 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) netlbl_af4list_add() argument
166 iter = netlbl_af4list_search(entry->addr, head); netlbl_af4list_add()
168 iter->addr == entry->addr && iter->mask == entry->mask) netlbl_af4list_add()
173 * address mask such that the entry with the widest mask (smallest netlbl_af4list_add()
177 ntohl(entry->mask) > ntohl(iter->mask)) { list_for_each_entry_rcu()
178 __list_add_rcu(&entry->list, list_for_each_entry_rcu()
183 list_add_tail_rcu(&entry->list, head);
189 * netlbl_af6list_add - Add a new IPv6 address entry to a list
190 * @entry: address entry
194 * Add a new address entry to the list pointed to by @head. On success zero is
199 int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) netlbl_af6list_add() argument
203 iter = netlbl_af6list_search(&entry->addr, head); netlbl_af6list_add()
205 ipv6_addr_equal(&iter->addr, &entry->addr) && netlbl_af6list_add()
206 ipv6_addr_equal(&iter->mask, &entry->mask)) netlbl_af6list_add()
211 * address mask such that the entry with the widest mask (smallest netlbl_af6list_add()
215 ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { list_for_each_entry_rcu()
216 __list_add_rcu(&entry->list, list_for_each_entry_rcu()
221 list_add_tail_rcu(&entry->list, head);
227 * netlbl_af4list_remove_entry - Remove an IPv4 address entry
228 * @entry: address entry
231 * Remove the specified IP address entry. The caller is responsible for
235 void netlbl_af4list_remove_entry(struct netlbl_af4list *entry) netlbl_af4list_remove_entry() argument
237 entry->valid = 0; netlbl_af4list_remove_entry()
238 list_del_rcu(&entry->list); netlbl_af4list_remove_entry()
242 * netlbl_af4list_remove - Remove an IPv4 address entry
248 * Remove an IP address entry from the list pointed to by @head. Returns the
249 * entry on success, NULL on failure. The caller is responsible for calling
256 struct netlbl_af4list *entry; netlbl_af4list_remove() local
258 entry = netlbl_af4list_search_exact(addr, mask, head); netlbl_af4list_remove()
259 if (entry == NULL) netlbl_af4list_remove()
261 netlbl_af4list_remove_entry(entry); netlbl_af4list_remove()
262 return entry; netlbl_af4list_remove()
267 * netlbl_af6list_remove_entry - Remove an IPv6 address entry
268 * @entry: address entry
271 * Remove the specified IP address entry. The caller is responsible for
275 void netlbl_af6list_remove_entry(struct netlbl_af6list *entry) netlbl_af6list_remove_entry() argument
277 entry->valid = 0; netlbl_af6list_remove_entry()
278 list_del_rcu(&entry->list); netlbl_af6list_remove_entry()
282 * netlbl_af6list_remove - Remove an IPv6 address entry
288 * Remove an IP address entry from the list pointed to by @head. Returns the
289 * entry on success, NULL on failure. The caller is responsible for calling
297 struct netlbl_af6list *entry; netlbl_af6list_remove() local
299 entry = netlbl_af6list_search_exact(addr, mask, head); netlbl_af6list_remove()
300 if (entry == NULL) netlbl_af6list_remove()
302 netlbl_af6list_remove_entry(entry); netlbl_af6list_remove()
303 return entry; netlbl_af6list_remove()
H A Dnetlabel_domainhash.c66 * netlbl_domhsh_free_entry - Frees a domain hash table entry
67 * @entry: the entry's RCU field
71 * function so that the memory allocated to a hash table entry can be released
75 static void netlbl_domhsh_free_entry(struct rcu_head *entry) netlbl_domhsh_free_entry() argument
85 ptr = container_of(entry, struct netlbl_dom_map, rcu); netlbl_domhsh_free_entry()
130 * netlbl_domhsh_search - Search for a domain entry
135 * entry if found, otherwise NULL is returned. The caller is responsible for
158 * netlbl_domhsh_search_def - Search for a domain entry
164 * entry if an exact match is found, if an exact match is not present in the
165 * hash table then the default entry is returned if valid otherwise NULL is
172 struct netlbl_dom_map *entry; netlbl_domhsh_search_def() local
174 entry = netlbl_domhsh_search(domain); netlbl_domhsh_search_def()
175 if (entry == NULL) { netlbl_domhsh_search_def()
176 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); netlbl_domhsh_search_def()
177 if (entry != NULL && !entry->valid) netlbl_domhsh_search_def()
178 entry = NULL; netlbl_domhsh_search_def()
181 return entry; netlbl_domhsh_search_def()
185 * netlbl_domhsh_audit_add - Generate an audit entry for an add event
186 * @entry: the entry being added
193 * Generate an audit record for adding a new NetLabel/LSM mapping entry with
198 static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, netlbl_domhsh_audit_add() argument
211 entry->domain ? entry->domain : "(default)"); netlbl_domhsh_audit_add()
228 type = entry->def.type; netlbl_domhsh_audit_add()
229 cipsov4 = entry->def.cipso; netlbl_domhsh_audit_add()
248 * netlbl_domhsh_validate - Validate a new domain mapping entry
249 * @entry: the entry to validate
251 * This function validates the new domain mapping entry to ensure that it is
252 * a valid entry. Returns zero on success, negative values on failure.
255 static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) netlbl_domhsh_validate() argument
264 if (entry == NULL) netlbl_domhsh_validate()
267 switch (entry->def.type) { netlbl_domhsh_validate()
269 if (entry->def.cipso != NULL || entry->def.addrsel != NULL) netlbl_domhsh_validate()
273 if (entry->def.cipso == NULL) netlbl_domhsh_validate()
277 netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) { netlbl_domhsh_validate()
293 netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) { netlbl_domhsh_validate()
355 * netlbl_domhsh_add - Adds a entry to the domain hash table
356 * @entry: the entry to add
360 * Adds a new entry to the domain hash table and handles any updates to the
365 int netlbl_domhsh_add(struct netlbl_dom_map *entry, netlbl_domhsh_add() argument
377 ret_val = netlbl_domhsh_validate(entry); netlbl_domhsh_add()
387 if (entry->domain != NULL) netlbl_domhsh_add()
388 entry_old = netlbl_domhsh_search(entry->domain); netlbl_domhsh_add()
390 entry_old = netlbl_domhsh_search_def(entry->domain); netlbl_domhsh_add()
392 entry->valid = 1; netlbl_domhsh_add()
394 if (entry->domain != NULL) { netlbl_domhsh_add()
395 u32 bkt = netlbl_domhsh_hash(entry->domain); netlbl_domhsh_add()
396 list_add_tail_rcu(&entry->list, netlbl_domhsh_add()
399 INIT_LIST_HEAD(&entry->list); netlbl_domhsh_add()
400 rcu_assign_pointer(netlbl_domhsh_def, entry); netlbl_domhsh_add()
403 if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_domhsh_add()
405 &entry->def.addrsel->list4) netlbl_domhsh_add()
406 netlbl_domhsh_audit_add(entry, iter4, NULL, netlbl_domhsh_add()
410 &entry->def.addrsel->list6) netlbl_domhsh_add()
411 netlbl_domhsh_audit_add(entry, NULL, iter6, netlbl_domhsh_add()
415 netlbl_domhsh_audit_add(entry, NULL, NULL, netlbl_domhsh_add()
418 entry->def.type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_domhsh_add()
427 netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) netlbl_domhsh_add()
435 netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) netlbl_domhsh_add()
445 &entry->def.addrsel->list4) { netlbl_domhsh_add()
456 &entry->def.addrsel->list6) { netlbl_domhsh_add()
476 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table
477 * @entry: the entry to add
481 * Adds a new default entry to the domain hash table and handles any updates
486 int netlbl_domhsh_add_default(struct netlbl_dom_map *entry, netlbl_domhsh_add_default() argument
489 return netlbl_domhsh_add(entry, audit_info); netlbl_domhsh_add_default()
493 * netlbl_domhsh_remove_entry - Removes a given entry from the domain table
494 * @entry: the entry to remove
498 * Removes an entry from the domain hash table and handles any updates to the
504 int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, netlbl_domhsh_remove_entry() argument
510 if (entry == NULL) netlbl_domhsh_remove_entry()
514 if (entry->valid) { netlbl_domhsh_remove_entry()
515 entry->valid = 0; netlbl_domhsh_remove_entry()
516 if (entry != rcu_dereference(netlbl_domhsh_def)) netlbl_domhsh_remove_entry()
517 list_del_rcu(&entry->list); netlbl_domhsh_remove_entry()
528 entry->domain ? entry->domain : "(default)", netlbl_domhsh_remove_entry()
537 switch (entry->def.type) { netlbl_domhsh_remove_entry()
540 &entry->def.addrsel->list4) { netlbl_domhsh_remove_entry()
548 cipso_v4_doi_putdef(entry->def.cipso); netlbl_domhsh_remove_entry()
551 call_rcu(&entry->rcu, netlbl_domhsh_free_entry); netlbl_domhsh_remove_entry()
558 * netlbl_domhsh_remove_af4 - Removes an address selector entry
581 struct netlbl_domaddr4_map *entry; netlbl_domhsh_remove_af4() local
615 entry = netlbl_domhsh_addr4_entry(entry_addr); netlbl_domhsh_remove_af4()
616 cipso_v4_doi_putdef(entry->def.cipso); netlbl_domhsh_remove_af4()
617 kfree(entry); netlbl_domhsh_remove_af4()
626 * netlbl_domhsh_remove - Removes an entry from the domain hash table
631 * Removes an entry from the domain hash table and handles any updates to the
639 struct netlbl_dom_map *entry; netlbl_domhsh_remove() local
643 entry = netlbl_domhsh_search(domain); netlbl_domhsh_remove()
645 entry = netlbl_domhsh_search_def(domain); netlbl_domhsh_remove()
646 ret_val = netlbl_domhsh_remove_entry(entry, audit_info); netlbl_domhsh_remove()
653 * netlbl_domhsh_remove_default - Removes the default entry from the table
657 * Removes/resets the default entry for the domain hash table and handles any
668 * netlbl_domhsh_getentry - Get an entry from the domain hash table
672 * Look through the domain hash table searching for an entry to match @domain,
673 * return a pointer to a copy of the entry or NULL. The caller is responsible
683 * netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table
688 * Look through the domain hash table searching for an entry to match @domain
689 * and @addr, return a pointer to a copy of the entry or NULL. The caller is
713 * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
718 * Look through the domain hash table searching for an entry to match @domain
719 * and @addr, return a pointer to a copy of the entry or NULL. The caller is
746 * @callback: callback for each entry
751 * buckets and @skip_chain entries. For each entry in the table call
759 int (*callback) (struct netlbl_dom_map *entry, void *arg), netlbl_domhsh_walk()
757 netlbl_domhsh_walk(u32 *skip_bkt, u32 *skip_chain, int (*callback) (struct netlbl_dom_map *entry, void *arg), void *cb_arg) netlbl_domhsh_walk() argument
H A Dnetlabel_unlabeled.c68 * LSM. The hash table is used to lookup the network interface entry
69 * (struct netlbl_unlhsh_iface) and then the interface entry is used to
71 * match can not be found in the hash table then the default entry
72 * (netlbl_unlhsh_def) is used. The IP address entry list
155 * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table
156 * @entry: the entry's RCU field
160 * function so that memory allocated to a hash table interface entry can be
162 * the IPv4 and IPv6 address lists contained as part of an interface entry. It
163 * is up to the rest of the code to make sure an interface entry is only freed
167 static void netlbl_unlhsh_free_iface(struct rcu_head *entry) netlbl_unlhsh_free_iface() argument
177 iface = container_of(entry, struct netlbl_unlhsh_iface, rcu); netlbl_unlhsh_free_iface()
212 * netlbl_unlhsh_search_iface - Search for a matching interface entry
217 * interface entry which matches @ifindex, otherwise NULL is returned. The
238 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
239 * @iface: the associated interface entry
242 * @secid: LSM secid value for entry
245 * Add a new address entry into the unlabeled connection hash table using the
246 * interface entry specified by @iface. On success zero is returned, otherwise
256 struct netlbl_unlhsh_addr4 *entry; netlbl_unlhsh_add_addr4() local
258 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); netlbl_unlhsh_add_addr4()
259 if (entry == NULL) netlbl_unlhsh_add_addr4()
262 entry->list.addr = addr->s_addr & mask->s_addr; netlbl_unlhsh_add_addr4()
263 entry->list.mask = mask->s_addr; netlbl_unlhsh_add_addr4()
264 entry->list.valid = 1; netlbl_unlhsh_add_addr4()
265 entry->secid = secid; netlbl_unlhsh_add_addr4()
268 ret_val = netlbl_af4list_add(&entry->list, &iface->addr4_list); netlbl_unlhsh_add_addr4()
272 kfree(entry); netlbl_unlhsh_add_addr4()
278 * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
279 * @iface: the associated interface entry
282 * @secid: LSM secid value for entry
285 * Add a new address entry into the unlabeled connection hash table using the
286 * interface entry specified by @iface. On success zero is returned, otherwise
296 struct netlbl_unlhsh_addr6 *entry; netlbl_unlhsh_add_addr6() local
298 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); netlbl_unlhsh_add_addr6()
299 if (entry == NULL) netlbl_unlhsh_add_addr6()
302 entry->list.addr = *addr; netlbl_unlhsh_add_addr6()
303 entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; netlbl_unlhsh_add_addr6()
304 entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; netlbl_unlhsh_add_addr6()
305 entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; netlbl_unlhsh_add_addr6()
306 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; netlbl_unlhsh_add_addr6()
307 entry->list.mask = *mask; netlbl_unlhsh_add_addr6()
308 entry->list.valid = 1; netlbl_unlhsh_add_addr6()
309 entry->secid = secid; netlbl_unlhsh_add_addr6()
312 ret_val = netlbl_af6list_add(&entry->list, &iface->addr6_list); netlbl_unlhsh_add_addr6()
316 kfree(entry); netlbl_unlhsh_add_addr6()
322 * netlbl_unlhsh_add_iface - Adds a new interface entry to the hash table
326 * Add a new, empty, interface entry into the unlabeled connection hash table.
327 * On success a pointer to the new interface entry is returned, on failure NULL
369 * netlbl_unlhsh_add - Adds a new entry to the unlabeled connection hash table
375 * @secid: LSM secid value for the entry
379 * Adds a new entry to the unlabeled connection hash table. Returns zero on
473 * netlbl_unlhsh_remove_addr4 - Remove an IPv4 address entry
475 * @iface: interface entry
481 * Remove an IP address entry from the unlabeled connection hash table.
492 struct netlbl_unlhsh_addr4 *entry; netlbl_unlhsh_remove_addr4() local
503 entry = netlbl_unlhsh_addr4_entry(list_entry); netlbl_unlhsh_remove_addr4()
505 entry = NULL; netlbl_unlhsh_remove_addr4()
516 if (entry != NULL && netlbl_unlhsh_remove_addr4()
517 security_secid_to_secctx(entry->secid, netlbl_unlhsh_remove_addr4()
522 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); netlbl_unlhsh_remove_addr4()
526 if (entry == NULL) netlbl_unlhsh_remove_addr4()
529 kfree_rcu(entry, rcu); netlbl_unlhsh_remove_addr4()
535 * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
537 * @iface: interface entry
543 * Remove an IP address entry from the unlabeled connection hash table.
554 struct netlbl_unlhsh_addr6 *entry; netlbl_unlhsh_remove_addr6() local
564 entry = netlbl_unlhsh_addr6_entry(list_entry); netlbl_unlhsh_remove_addr6()
566 entry = NULL; netlbl_unlhsh_remove_addr6()
577 if (entry != NULL && netlbl_unlhsh_remove_addr6()
578 security_secid_to_secctx(entry->secid, netlbl_unlhsh_remove_addr6()
583 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); netlbl_unlhsh_remove_addr6()
587 if (entry == NULL) netlbl_unlhsh_remove_addr6()
590 kfree_rcu(entry, rcu); netlbl_unlhsh_remove_addr6()
596 * netlbl_unlhsh_condremove_iface - Remove an interface entry
597 * @iface: the interface entry
600 * Remove an interface entry from the unlabeled connection hash table if it is
601 * empty. An interface entry is considered to be empty if there are no
634 * netlbl_unlhsh_remove - Remove an entry from the unlabeled hash table
643 * Removes and existing entry from the unlabeled connection hash table.
891 * connection entry to the hash table. Returns zero on success, negative
907 * single entry. However, allow users to create two entries, one each netlbl_unlabel_staticadd()
943 * unlabeled connection entry. Returns zero on success, negative values on
958 * single entry. However, allow users to create two entries, one each netlbl_unlabel_staticadddef()
992 * unlabeled connection entry. Returns zero on success, negative values on
1007 * IPv4 and IPv6 in the same entry. */ netlbl_unlabel_staticremove()
1034 * unlabeled connection entry. Returns zero on success, negative values on
1048 * IPv4 and IPv6 in the same entry. */ netlbl_unlabel_staticremovedef()
1070 * @iface: the interface entry
1071 * @addr4: the IPv4 address entry
1072 * @addr6: the IPv6 address entry
1078 * can be specified, not both, the other unspecified entry should be set to
1260 * unlabeled connection entry in a form suitable for use in a kernel generated
1527 struct netlbl_dom_map *entry; netlbl_unlabel_defconf() local
1537 entry = kzalloc(sizeof(*entry), GFP_KERNEL); netlbl_unlabel_defconf()
1538 if (entry == NULL) netlbl_unlabel_defconf()
1540 entry->def.type = NETLBL_NLTYPE_UNLABELED; netlbl_unlabel_defconf()
1541 ret_val = netlbl_domhsh_add_default(entry, &audit_info); netlbl_unlabel_defconf()
/linux-4.1.27/arch/x86/kernel/
H A Djump_label.c39 static void __jump_label_transform(struct jump_entry *entry, __jump_label_transform() argument
54 if (unlikely(memcmp((void *)entry->code, default_nop, 5) __jump_label_transform()
56 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
62 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) __jump_label_transform()
64 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
68 code.offset = entry->target - __jump_label_transform()
69 (entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
78 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) __jump_label_transform()
79 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
82 code.offset = entry->target - __jump_label_transform()
83 (entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
84 if (unlikely(memcmp((void *)entry->code, &code, 5) != 0)) __jump_label_transform()
85 bug_at((void *)entry->code, __LINE__); __jump_label_transform()
99 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); __jump_label_transform()
101 text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE, __jump_label_transform()
102 (void *)entry->code + JUMP_LABEL_NOP_SIZE); __jump_label_transform()
105 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
110 __jump_label_transform(entry, type, NULL, 0); arch_jump_label_transform()
121 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
141 __jump_label_transform(entry, type, text_poke_early, 1); arch_jump_label_transform_static()
H A Dasm-offsets_64.c36 #define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry) main()
53 #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) main()
73 #define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) main()
H A Dresource.c28 struct e820entry *entry; remove_e820_regions() local
31 entry = &e820.map[i]; remove_e820_regions()
33 resource_clip(avail, entry->addr, remove_e820_regions()
34 entry->addr + entry->size - 1); remove_e820_regions()
/linux-4.1.27/arch/sh/kernel/
H A Dperf_callchain.c24 struct perf_callchain_entry *entry = data; callchain_address() local
27 perf_callchain_store(entry, addr); callchain_address()
36 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
38 perf_callchain_store(entry, regs->pc); perf_callchain_kernel()
40 unwind_stack(NULL, regs, NULL, &callchain_ops, entry); perf_callchain_kernel()
/linux-4.1.27/arch/x86/kvm/
H A Dcpuid.c129 struct kvm_cpuid_entry2 *e, *entry; cpuid_fix_nx_cap() local
131 entry = NULL; cpuid_fix_nx_cap()
135 entry = e; cpuid_fix_nx_cap()
139 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) { cpuid_fix_nx_cap()
140 entry->edx &= ~F(NX); cpuid_fix_nx_cap()
249 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, do_cpuid_1_ent() argument
252 entry->function = function; do_cpuid_1_ent()
253 entry->index = index; do_cpuid_1_ent()
254 cpuid_count(entry->function, entry->index, do_cpuid_1_ent()
255 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); do_cpuid_1_ent()
256 entry->flags = 0; do_cpuid_1_ent()
259 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, __do_cpuid_ent_emulated() argument
264 entry->eax = 1; /* only one leaf currently */ __do_cpuid_ent_emulated()
268 entry->ecx = F(MOVBE); __do_cpuid_ent_emulated()
275 entry->function = func; __do_cpuid_ent_emulated()
276 entry->index = index; __do_cpuid_ent_emulated()
281 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, __do_cpuid_ent() argument
363 do_cpuid_1_ent(entry, function, index); __do_cpuid_ent()
368 entry->eax = min(entry->eax, (u32)0xd); __do_cpuid_ent()
371 entry->edx &= kvm_supported_word0_x86_features; __do_cpuid_ent()
372 cpuid_mask(&entry->edx, 0); __do_cpuid_ent()
373 entry->ecx &= kvm_supported_word4_x86_features; __do_cpuid_ent()
374 cpuid_mask(&entry->ecx, 4); __do_cpuid_ent()
377 entry->ecx |= F(X2APIC); __do_cpuid_ent()
384 int t, times = entry->eax & 0xff; __do_cpuid_ent()
386 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; __do_cpuid_ent()
387 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; __do_cpuid_ent()
392 do_cpuid_1_ent(&entry[t], function, 0); __do_cpuid_ent()
393 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; __do_cpuid_ent()
402 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
408 cache_type = entry[i - 1].eax & 0x1f; __do_cpuid_ent()
411 do_cpuid_1_ent(&entry[i], function, i); __do_cpuid_ent()
412 entry[i].flags |= __do_cpuid_ent()
419 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
422 entry->ebx &= kvm_supported_word9_x86_features; __do_cpuid_ent()
423 cpuid_mask(&entry->ebx, 9); __do_cpuid_ent()
425 entry->ebx |= F(TSC_ADJUST); __do_cpuid_ent()
427 entry->ebx = 0; __do_cpuid_ent()
428 entry->eax = 0; __do_cpuid_ent()
429 entry->ecx = 0; __do_cpuid_ent()
430 entry->edx = 0; __do_cpuid_ent()
458 entry->eax = eax.full; __do_cpuid_ent()
459 entry->ebx = cap.events_mask; __do_cpuid_ent()
460 entry->ecx = 0; __do_cpuid_ent()
461 entry->edx = edx.full; __do_cpuid_ent()
468 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
474 level_type = entry[i - 1].ecx & 0xff00; __do_cpuid_ent()
477 do_cpuid_1_ent(&entry[i], function, i); __do_cpuid_ent()
478 entry[i].flags |= __do_cpuid_ent()
488 entry->eax &= supported; __do_cpuid_ent()
489 entry->ebx = xstate_required_size(supported, false); __do_cpuid_ent()
490 entry->ecx = entry->ebx; __do_cpuid_ent()
491 entry->edx &= supported >> 32; __do_cpuid_ent()
492 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; __do_cpuid_ent()
501 do_cpuid_1_ent(&entry[i], function, idx); __do_cpuid_ent()
503 entry[i].eax &= kvm_supported_word10_x86_features; __do_cpuid_ent()
504 entry[i].ebx = 0; __do_cpuid_ent()
505 if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) __do_cpuid_ent()
506 entry[i].ebx = __do_cpuid_ent()
510 if (entry[i].eax == 0 || !(supported & mask)) __do_cpuid_ent()
512 if (WARN_ON_ONCE(entry[i].ecx & 1)) __do_cpuid_ent()
515 entry[i].ecx = 0; __do_cpuid_ent()
516 entry[i].edx = 0; __do_cpuid_ent()
517 entry[i].flags |= __do_cpuid_ent()
527 entry->eax = KVM_CPUID_FEATURES; __do_cpuid_ent()
528 entry->ebx = sigptr[0]; __do_cpuid_ent()
529 entry->ecx = sigptr[1]; __do_cpuid_ent()
530 entry->edx = sigptr[2]; __do_cpuid_ent()
534 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | __do_cpuid_ent()
543 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); __do_cpuid_ent()
545 entry->ebx = 0; __do_cpuid_ent()
546 entry->ecx = 0; __do_cpuid_ent()
547 entry->edx = 0; __do_cpuid_ent()
550 entry->eax = min(entry->eax, 0x8000001a); __do_cpuid_ent()
553 entry->edx &= kvm_supported_word1_x86_features; __do_cpuid_ent()
554 cpuid_mask(&entry->edx, 1); __do_cpuid_ent()
555 entry->ecx &= kvm_supported_word6_x86_features; __do_cpuid_ent()
556 cpuid_mask(&entry->ecx, 6); __do_cpuid_ent()
560 entry->edx &= (1 << 8); __do_cpuid_ent()
562 entry->edx &= boot_cpu_data.x86_power; __do_cpuid_ent()
563 entry->eax = entry->ebx = entry->ecx = 0; __do_cpuid_ent()
566 unsigned g_phys_as = (entry->eax >> 16) & 0xff; __do_cpuid_ent()
567 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); __do_cpuid_ent()
568 unsigned phys_as = entry->eax & 0xff; __do_cpuid_ent()
572 entry->eax = g_phys_as | (virt_as << 8); __do_cpuid_ent()
573 entry->ebx = entry->edx = 0; __do_cpuid_ent()
577 entry->ecx = entry->edx = 0; __do_cpuid_ent()
586 entry->eax = min(entry->eax, 0xC0000004); __do_cpuid_ent()
589 entry->edx &= kvm_supported_word5_x86_features; __do_cpuid_ent()
590 cpuid_mask(&entry->edx, 5); __do_cpuid_ent()
599 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; __do_cpuid_ent()
603 kvm_x86_ops->set_supported_cpuid(function, entry); __do_cpuid_ent()
613 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, do_cpuid_ent() argument
617 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); do_cpuid_ent()
619 return __do_cpuid_ent(entry, func, idx, nent, maxnent); do_cpuid_ent()
735 /* when no next entry is found, the current entry[i] is reselected */ move_to_next_stateful_cpuid_entry()
746 /* find an entry with matching function, matching index (if needed), and that
/linux-4.1.27/arch/mips/mm/
H A Dpgtable-64.c19 unsigned long entry; pgd_init() local
22 entry = (unsigned long)invalid_pte_table; pgd_init()
24 entry = (unsigned long)invalid_pmd_table; pgd_init()
31 p[0] = entry; pgd_init()
32 p[1] = entry; pgd_init()
33 p[2] = entry; pgd_init()
34 p[3] = entry; pgd_init()
35 p[4] = entry; pgd_init()
37 p[-3] = entry; pgd_init()
38 p[-2] = entry; pgd_init()
39 p[-1] = entry; pgd_init()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_execbuf_util.c36 struct ttm_validate_buffer *entry) ttm_eu_backoff_reservation_reverse()
38 list_for_each_entry_continue_reverse(entry, list, head) { list_for_each_entry_continue_reverse()
39 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry_continue_reverse()
47 struct ttm_validate_buffer *entry; ttm_eu_del_from_lru_locked() local
49 list_for_each_entry(entry, list, head) { list_for_each_entry()
50 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
60 struct ttm_validate_buffer *entry; ttm_eu_backoff_reservation() local
66 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_backoff_reservation()
67 glob = entry->bo->glob; ttm_eu_backoff_reservation()
70 list_for_each_entry(entry, list, head) { list_for_each_entry()
71 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
100 struct ttm_validate_buffer *entry; ttm_eu_reserve_buffers() local
106 entry = list_first_entry(list, struct ttm_validate_buffer, head); ttm_eu_reserve_buffers()
107 glob = entry->bo->glob; ttm_eu_reserve_buffers()
112 list_for_each_entry(entry, list, head) { list_for_each_entry()
113 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry()
123 struct ttm_validate_buffer *safe = entry; list_for_each_entry()
124 entry = list_prev_entry(entry, head); list_for_each_entry()
131 if (!entry->shared) list_for_each_entry()
143 ttm_eu_backoff_reservation_reverse(list, entry); list_for_each_entry()
153 if (!ret && entry->shared) list_for_each_entry()
169 list_del(&entry->head); list_for_each_entry()
170 list_add(&entry->head, list); list_for_each_entry()
185 struct ttm_validate_buffer *entry; ttm_eu_fence_buffer_objects() local
201 list_for_each_entry(entry, list, head) { list_for_each_entry()
202 bo = entry->bo; list_for_each_entry()
203 if (entry->shared) list_for_each_entry()
35 ttm_eu_backoff_reservation_reverse(struct list_head *list, struct ttm_validate_buffer *entry) ttm_eu_backoff_reservation_reverse() argument
/linux-4.1.27/mm/
H A Dzswap.c70 /* Store failed because the entry metadata could not be allocated (rare) */
167 * rbnode - links the entry into red-black tree for the appropriate swap type
168 * refcount - the number of outstanding reference to the entry. This is needed
169 * to protect against premature freeing of the entry by code
171 * for the zswap_tree structure that contains the entry must
174 * offset - the swap offset for the entry. Index into the red-black tree.
194 * - the refcount field of each entry in the tree
204 * zswap entry functions
221 struct zswap_entry *entry; zswap_entry_cache_alloc() local
222 entry = kmem_cache_alloc(zswap_entry_cache, gfp); zswap_entry_cache_alloc()
223 if (!entry) zswap_entry_cache_alloc()
225 entry->refcount = 1; zswap_entry_cache_alloc()
226 RB_CLEAR_NODE(&entry->rbnode); zswap_entry_cache_alloc()
227 return entry; zswap_entry_cache_alloc()
230 static void zswap_entry_cache_free(struct zswap_entry *entry) zswap_entry_cache_free() argument
232 kmem_cache_free(zswap_entry_cache, entry); zswap_entry_cache_free()
241 struct zswap_entry *entry; zswap_rb_search() local
244 entry = rb_entry(node, struct zswap_entry, rbnode); zswap_rb_search()
245 if (entry->offset > offset) zswap_rb_search()
247 else if (entry->offset < offset) zswap_rb_search()
250 return entry; zswap_rb_search()
256 * In the case that a entry with the same offset is found, a pointer to
257 * the existing entry is stored in dupentry and the function returns -EEXIST
259 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, zswap_rb_insert() argument
268 if (myentry->offset > entry->offset) zswap_rb_insert()
270 else if (myentry->offset < entry->offset) zswap_rb_insert()
277 rb_link_node(&entry->rbnode, parent, link); zswap_rb_insert()
278 rb_insert_color(&entry->rbnode, root); zswap_rb_insert()
282 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) zswap_rb_erase() argument
284 if (!RB_EMPTY_NODE(&entry->rbnode)) { zswap_rb_erase()
285 rb_erase(&entry->rbnode, root); zswap_rb_erase()
286 RB_CLEAR_NODE(&entry->rbnode); zswap_rb_erase()
291 * Carries out the common pattern of freeing and entry's zpool allocation,
292 * freeing the entry itself, and decrementing the number of stored pages.
294 static void zswap_free_entry(struct zswap_entry *entry) zswap_free_entry() argument
296 zpool_free(zswap_pool, entry->handle); zswap_free_entry()
297 zswap_entry_cache_free(entry); zswap_free_entry()
303 static void zswap_entry_get(struct zswap_entry *entry) zswap_entry_get() argument
305 entry->refcount++; zswap_entry_get()
309 * remove from the tree and free it, if nobody reference the entry
312 struct zswap_entry *entry) zswap_entry_put()
314 int refcount = --entry->refcount; zswap_entry_put()
318 zswap_rb_erase(&tree->rbroot, entry); zswap_entry_put()
319 zswap_free_entry(entry); zswap_entry_put()
327 struct zswap_entry *entry = NULL; zswap_entry_find_get() local
329 entry = zswap_rb_search(root, offset); zswap_entry_find_get()
330 if (entry) zswap_entry_find_get()
331 zswap_entry_get(entry); zswap_entry_find_get()
333 return entry; zswap_entry_find_get()
434 * This function tries to find a page with the given swap entry
445 static int zswap_get_swap_cache_page(swp_entry_t entry, zswap_get_swap_cache_page() argument
449 struct address_space *swapper_space = swap_address_space(entry); zswap_get_swap_cache_page()
459 found_page = find_get_page(swapper_space, entry.val); zswap_get_swap_cache_page()
480 * Swap entry may have been freed since our caller observed it. zswap_get_swap_cache_page()
482 err = swapcache_prepare(entry); zswap_get_swap_cache_page()
487 if (err) { /* swp entry is obsolete ? */ zswap_get_swap_cache_page()
495 err = __add_to_swap_cache(new_page, entry); zswap_get_swap_cache_page()
509 swapcache_free(entry); zswap_get_swap_cache_page()
521 * Attempts to free an entry by adding a page to the swap cache,
522 * decompressing the entry data into the page, and issuing a
538 struct zswap_entry *entry; zswap_writeback_entry() local
554 /* find and ref zswap entry */ zswap_writeback_entry()
556 entry = zswap_entry_find_get(&tree->rbroot, offset); zswap_writeback_entry()
557 if (!entry) { zswap_writeback_entry()
558 /* entry was invalidated */ zswap_writeback_entry()
563 BUG_ON(offset != entry->offset); zswap_writeback_entry()
580 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, zswap_writeback_entry()
584 entry->length, dst, &dlen); zswap_writeback_entry()
586 zpool_unmap_handle(zswap_pool, entry->handle); zswap_writeback_entry()
604 zswap_entry_put(tree, entry); zswap_writeback_entry()
607 * There are two possible situations for entry here: zswap_writeback_entry()
608 * (1) refcount is 1(normal case), entry is valid and on the tree zswap_writeback_entry()
609 * (2) refcount is 0, entry is freed and not on the tree zswap_writeback_entry()
611 * search the tree and free the entry if find entry zswap_writeback_entry()
613 if (entry == zswap_rb_search(&tree->rbroot, offset)) zswap_writeback_entry()
614 zswap_entry_put(tree, entry); zswap_writeback_entry()
622 * it is safe and okay to not free the entry zswap_writeback_entry()
623 * if we free the entry in the following put zswap_writeback_entry()
628 zswap_entry_put(tree, entry); zswap_writeback_entry()
643 struct zswap_entry *entry, *dupentry; zswap_frontswap_store() local
666 /* allocate entry */ zswap_frontswap_store()
667 entry = zswap_entry_cache_alloc(GFP_KERNEL); zswap_frontswap_store()
668 if (!entry) { zswap_frontswap_store()
703 /* populate entry */ zswap_frontswap_store()
704 entry->offset = offset; zswap_frontswap_store()
705 entry->handle = handle; zswap_frontswap_store()
706 entry->length = dlen; zswap_frontswap_store()
711 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); zswap_frontswap_store()
729 zswap_entry_cache_free(entry); zswap_frontswap_store()
736 * return -1 on entry not found or error
742 struct zswap_entry *entry; zswap_frontswap_load() local
749 entry = zswap_entry_find_get(&tree->rbroot, offset); zswap_frontswap_load()
750 if (!entry) { zswap_frontswap_load()
751 /* entry was written back */ zswap_frontswap_load()
759 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, zswap_frontswap_load()
762 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, zswap_frontswap_load()
765 zpool_unmap_handle(zswap_pool, entry->handle); zswap_frontswap_load()
769 zswap_entry_put(tree, entry); zswap_frontswap_load()
775 /* frees an entry in zswap */ zswap_frontswap_invalidate_page()
779 struct zswap_entry *entry; zswap_frontswap_invalidate_page() local
783 entry = zswap_rb_search(&tree->rbroot, offset); zswap_frontswap_invalidate_page()
784 if (!entry) { zswap_frontswap_invalidate_page()
785 /* entry was written back */ zswap_frontswap_invalidate_page()
791 zswap_rb_erase(&tree->rbroot, entry); zswap_frontswap_invalidate_page()
793 /* drop the initial reference from entry creation */ zswap_frontswap_invalidate_page()
794 zswap_entry_put(tree, entry); zswap_frontswap_invalidate_page()
803 struct zswap_entry *entry, *n; zswap_frontswap_invalidate_area() local
810 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) zswap_frontswap_invalidate_area()
811 zswap_free_entry(entry); zswap_frontswap_invalidate_area()
925 pr_err("entry cache creation failed\n"); init_zswap()
311 zswap_entry_put(struct zswap_tree *tree, struct zswap_entry *entry) zswap_entry_put() argument
H A Dswap_state.c79 int __add_to_swap_cache(struct page *page, swp_entry_t entry) __add_to_swap_cache() argument
90 set_page_private(page, entry.val); __add_to_swap_cache()
92 address_space = swap_address_space(entry); __add_to_swap_cache()
95 entry.val, page); __add_to_swap_cache()
119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) add_to_swap_cache() argument
125 error = __add_to_swap_cache(page, entry); add_to_swap_cache()
137 swp_entry_t entry; __delete_from_swap_cache() local
144 entry.val = page_private(page); __delete_from_swap_cache()
145 address_space = swap_address_space(entry); __delete_from_swap_cache()
163 swp_entry_t entry; add_to_swap() local
169 entry = get_swap_page(); add_to_swap()
170 if (!entry.val) add_to_swap()
175 swapcache_free(entry); add_to_swap()
190 err = add_to_swap_cache(page, entry, add_to_swap()
201 swapcache_free(entry); add_to_swap()
214 swp_entry_t entry; delete_from_swap_cache() local
217 entry.val = page_private(page); delete_from_swap_cache()
219 address_space = swap_address_space(entry); delete_from_swap_cache()
224 swapcache_free(entry); delete_from_swap_cache()
270 * Lookup a swap entry in the swap cache. A found page will be returned
275 struct page * lookup_swap_cache(swp_entry_t entry) lookup_swap_cache() argument
279 page = find_get_page(swap_address_space(entry), entry.val); lookup_swap_cache()
295 * the swap entry is no longer in use.
297 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, read_swap_cache_async() argument
309 found_page = find_get_page(swap_address_space(entry), read_swap_cache_async()
310 entry.val); read_swap_cache_async()
331 * Swap entry may have been freed since our caller observed it. read_swap_cache_async()
333 err = swapcache_prepare(entry); read_swap_cache_async()
338 * across a SWAP_HAS_CACHE swap_map entry whose page read_swap_cache_async()
354 if (err) { /* swp entry is obsolete ? */ read_swap_cache_async()
362 err = __add_to_swap_cache(new_page, entry); read_swap_cache_async()
379 swapcache_free(entry); read_swap_cache_async()
433 * @entry: swap entry of this memory
438 * Returns the struct page for entry and addr, after queueing swapin.
450 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, swapin_readahead() argument
454 unsigned long entry_offset = swp_offset(entry); swapin_readahead()
473 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), swapin_readahead()
485 return read_swap_cache_async(entry, gfp_mask, vma, addr); swapin_readahead()
/linux-4.1.27/arch/cris/arch-v32/mm/
H A Dintmem.c22 struct list_head entry; member in struct:intmem_allocation
45 list_add_tail(&alloc->entry, &intmem_allocations); crisv32_intmem_init()
58 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) { crisv32_intmem_alloc()
72 list_add(&alloc->entry, &allocation->entry); crisv32_intmem_alloc()
81 list_add_tail(&tmp->entry, crisv32_intmem_alloc()
82 &allocation->entry); crisv32_intmem_alloc()
105 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) { crisv32_intmem_free()
108 list_entry(allocation->entry.prev, crisv32_intmem_free()
109 struct intmem_allocation, entry); crisv32_intmem_free()
111 list_entry(allocation->entry.next, crisv32_intmem_free()
112 struct intmem_allocation, entry); crisv32_intmem_free()
119 list_del(&allocation->entry); crisv32_intmem_free()
126 list_del(&next->entry); crisv32_intmem_free()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dresource.c84 u32 entry; c4iw_get_resource() local
85 entry = c4iw_id_alloc(id_table); c4iw_get_resource()
86 if (entry == (u32)(-1)) c4iw_get_resource()
88 return entry; c4iw_get_resource()
91 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) c4iw_put_resource() argument
93 PDBG("%s entry 0x%x\n", __func__, entry); c4iw_put_resource()
94 c4iw_id_free(id_table, entry); c4iw_put_resource()
99 struct c4iw_qid_list *entry; c4iw_get_cqid() local
105 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, c4iw_get_cqid()
106 entry); c4iw_get_cqid()
107 list_del(&entry->entry); c4iw_get_cqid()
108 qid = entry->qid; c4iw_get_cqid()
109 kfree(entry); c4iw_get_cqid()
118 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
119 if (!entry) c4iw_get_cqid()
121 entry->qid = i; c4iw_get_cqid()
122 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_cqid()
129 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
130 if (!entry) c4iw_get_cqid()
132 entry->qid = qid; c4iw_get_cqid()
133 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_cqid()
135 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_cqid()
136 if (!entry) c4iw_get_cqid()
138 entry->qid = i; c4iw_get_cqid()
139 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_cqid()
155 struct c4iw_qid_list *entry; c4iw_put_cqid() local
157 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_put_cqid()
158 if (!entry) c4iw_put_cqid()
161 entry->qid = qid; c4iw_put_cqid()
163 list_add_tail(&entry->entry, &uctx->cqids); c4iw_put_cqid()
169 struct c4iw_qid_list *entry; c4iw_get_qpid() local
175 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, c4iw_get_qpid()
176 entry); c4iw_get_qpid()
177 list_del(&entry->entry); c4iw_get_qpid()
178 qid = entry->qid; c4iw_get_qpid()
179 kfree(entry); c4iw_get_qpid()
192 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
193 if (!entry) c4iw_get_qpid()
195 entry->qid = i; c4iw_get_qpid()
196 list_add_tail(&entry->entry, &uctx->qpids); c4iw_get_qpid()
203 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
204 if (!entry) c4iw_get_qpid()
206 entry->qid = qid; c4iw_get_qpid()
207 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_qpid()
209 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_get_qpid()
210 if (!entry) c4iw_get_qpid()
212 entry->qid = i; c4iw_get_qpid()
213 list_add_tail(&entry->entry, &uctx->cqids); c4iw_get_qpid()
229 struct c4iw_qid_list *entry; c4iw_put_qpid() local
231 entry = kmalloc(sizeof *entry, GFP_KERNEL); c4iw_put_qpid()
232 if (!entry) c4iw_put_qpid()
235 entry->qid = qid; c4iw_put_qpid()
237 list_add_tail(&entry->entry, &uctx->qpids); c4iw_put_qpid()
/linux-4.1.27/security/integrity/ima/
H A Dima_policy.c183 struct ima_rule_entry *entry, *tmp; ima_lsm_update_rules() local
188 list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { ima_lsm_update_rules()
190 if (!entry->lsm[i].rule) ima_lsm_update_rules()
192 result = security_filter_rule_init(entry->lsm[i].type, ima_lsm_update_rules()
194 entry->lsm[i].args_p, ima_lsm_update_rules()
195 &entry->lsm[i].rule); ima_lsm_update_rules()
196 BUG_ON(!entry->lsm[i].rule); ima_lsm_update_rules()
329 struct ima_rule_entry *entry; ima_match_policy() local
332 list_for_each_entry(entry, ima_rules, list) { list_for_each_entry()
334 if (!(entry->action & actmask)) list_for_each_entry()
337 if (!ima_match_rules(entry, inode, func, mask)) list_for_each_entry()
340 action |= entry->flags & IMA_ACTION_FLAGS; list_for_each_entry()
342 action |= entry->action & IMA_DO_MASK; list_for_each_entry()
343 if (entry->action & IMA_APPRAISE) list_for_each_entry()
344 action |= get_subaction(entry, func); list_for_each_entry()
346 if (entry->action & IMA_DO_MASK) list_for_each_entry()
347 actmask &= ~(entry->action | entry->action << 1); list_for_each_entry()
349 actmask &= ~(entry->action | entry->action >> 1); list_for_each_entry()
366 struct ima_rule_entry *entry; ima_update_policy_flag() local
369 list_for_each_entry(entry, ima_rules, list) { list_for_each_entry()
370 if (entry->action & IMA_DO_MASK) list_for_each_entry()
371 ima_policy_flag |= entry->action; list_for_each_entry()
467 static int ima_lsm_rule_init(struct ima_rule_entry *entry, ima_lsm_rule_init() argument
472 if (entry->lsm[lsm_rule].rule) ima_lsm_rule_init()
475 entry->lsm[lsm_rule].args_p = match_strdup(args); ima_lsm_rule_init()
476 if (!entry->lsm[lsm_rule].args_p) ima_lsm_rule_init()
479 entry->lsm[lsm_rule].type = audit_type; ima_lsm_rule_init()
480 result = security_filter_rule_init(entry->lsm[lsm_rule].type, ima_lsm_rule_init()
482 entry->lsm[lsm_rule].args_p, ima_lsm_rule_init()
483 &entry->lsm[lsm_rule].rule); ima_lsm_rule_init()
484 if (!entry->lsm[lsm_rule].rule) { ima_lsm_rule_init()
485 kfree(entry->lsm[lsm_rule].args_p); ima_lsm_rule_init()
499 static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) ima_parse_rule() argument
508 entry->uid = INVALID_UID; ima_parse_rule()
509 entry->fowner = INVALID_UID; ima_parse_rule()
510 entry->action = UNKNOWN; ima_parse_rule()
525 if (entry->action != UNKNOWN) ima_parse_rule()
528 entry->action = MEASURE; ima_parse_rule()
533 if (entry->action != UNKNOWN) ima_parse_rule()
536 entry->action = DONT_MEASURE; ima_parse_rule()
541 if (entry->action != UNKNOWN) ima_parse_rule()
544 entry->action = APPRAISE; ima_parse_rule()
549 if (entry->action != UNKNOWN) ima_parse_rule()
552 entry->action = DONT_APPRAISE; ima_parse_rule()
557 if (entry->action != UNKNOWN) ima_parse_rule()
560 entry->action = AUDIT; ima_parse_rule()
565 if (entry->func) ima_parse_rule()
569 entry->func = FILE_CHECK; ima_parse_rule()
572 entry->func = FILE_CHECK; ima_parse_rule()
574 entry->func = MODULE_CHECK; ima_parse_rule()
576 entry->func = FIRMWARE_CHECK; ima_parse_rule()
579 entry->func = MMAP_CHECK; ima_parse_rule()
581 entry->func = BPRM_CHECK; ima_parse_rule()
585 entry->flags |= IMA_FUNC; ima_parse_rule()
590 if (entry->mask) ima_parse_rule()
598 entry->mask = MAY_EXEC; ima_parse_rule()
600 entry->mask = MAY_WRITE; ima_parse_rule()
602 entry->mask = MAY_READ; ima_parse_rule()
604 entry->mask = MAY_APPEND; ima_parse_rule()
608 entry->flags |= (*args[0].from == '^') ima_parse_rule()
614 if (entry->fsmagic) { ima_parse_rule()
619 result = kstrtoul(args[0].from, 16, &entry->fsmagic); ima_parse_rule()
621 entry->flags |= IMA_FSMAGIC; ima_parse_rule()
626 if (memchr_inv(entry->fsuuid, 0x00, ima_parse_rule()
627 sizeof(entry->fsuuid))) { ima_parse_rule()
633 entry->fsuuid); ima_parse_rule()
635 entry->flags |= IMA_FSUUID; ima_parse_rule()
643 if (uid_valid(entry->uid)) { ima_parse_rule()
650 entry->uid = make_kuid(current_user_ns(), ima_parse_rule()
652 if (!uid_valid(entry->uid) || ima_parse_rule()
656 entry->flags |= (token == Opt_uid) ima_parse_rule()
663 if (uid_valid(entry->fowner)) { ima_parse_rule()
670 entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); ima_parse_rule()
671 if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) ima_parse_rule()
674 entry->flags |= IMA_FOWNER; ima_parse_rule()
679 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
685 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
691 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
697 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
703 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
709 result = ima_lsm_rule_init(entry, args, ima_parse_rule()
714 if (entry->action != APPRAISE) { ima_parse_rule()
721 entry->flags |= IMA_DIGSIG_REQUIRED; ima_parse_rule()
726 entry->flags |= IMA_PERMIT_DIRECTIO; ima_parse_rule()
734 if (!result && (entry->action == UNKNOWN)) ima_parse_rule()
736 else if (entry->func == MODULE_CHECK) ima_parse_rule()
738 else if (entry->func == FIRMWARE_CHECK) ima_parse_rule()
756 struct ima_rule_entry *entry; ima_parse_add_rule() local
767 entry = kzalloc(sizeof(*entry), GFP_KERNEL); ima_parse_add_rule()
768 if (!entry) { ima_parse_add_rule()
774 INIT_LIST_HEAD(&entry->list); ima_parse_add_rule()
776 result = ima_parse_rule(p, entry); ima_parse_add_rule()
778 kfree(entry); ima_parse_add_rule()
786 list_add_tail(&entry->list, &ima_policy_rules); ima_parse_add_rule()
795 struct ima_rule_entry *entry, *tmp; ima_delete_rules() local
799 list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { ima_delete_rules()
801 kfree(entry->lsm[i].args_p); ima_delete_rules()
803 list_del(&entry->list); ima_delete_rules()
804 kfree(entry); ima_delete_rules()
H A Dima_api.c25 * ima_free_template_entry - free an existing template entry
27 void ima_free_template_entry(struct ima_template_entry *entry) ima_free_template_entry() argument
31 for (i = 0; i < entry->template_desc->num_fields; i++) ima_free_template_entry()
32 kfree(entry->template_data[i].data); ima_free_template_entry()
34 kfree(entry); ima_free_template_entry()
38 * ima_alloc_init_template - create and initialize a new template entry
43 int xattr_len, struct ima_template_entry **entry) ima_alloc_init_template()
48 *entry = kzalloc(sizeof(**entry) + template_desc->num_fields * ima_alloc_init_template()
50 if (!*entry) ima_alloc_init_template()
53 (*entry)->template_desc = template_desc; ima_alloc_init_template()
60 &((*entry)->template_data[i])); ima_alloc_init_template()
64 len = (*entry)->template_data[i].len; ima_alloc_init_template()
65 (*entry)->template_data_len += sizeof(len); ima_alloc_init_template()
66 (*entry)->template_data_len += len; ima_alloc_init_template()
70 ima_free_template_entry(*entry); ima_alloc_init_template()
71 *entry = NULL; ima_alloc_init_template()
78 * Calculate the hash of a template entry, add the template entry
91 int ima_store_template(struct ima_template_entry *entry, ima_store_template() argument
97 char *template_name = entry->template_desc->name; ima_store_template()
105 int num_fields = entry->template_desc->num_fields; ima_store_template()
109 result = ima_calc_field_array_hash(&entry->template_data[0], ima_store_template()
110 entry->template_desc, ima_store_template()
118 memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); ima_store_template()
120 result = ima_add_template_entry(entry, violation, op, inode, filename); ima_store_template()
134 struct ima_template_entry *entry; ima_add_violation() local
143 NULL, 0, &entry); ima_add_violation()
148 result = ima_store_template(entry, violation, inode, filename); ima_add_violation()
150 ima_free_template_entry(entry); ima_add_violation()
269 struct ima_template_entry *entry; ima_store_measurement() local
276 xattr_value, xattr_len, &entry); ima_store_measurement()
283 result = ima_store_template(entry, violation, inode, filename); ima_store_measurement()
287 ima_free_template_entry(entry); ima_store_measurement()
40 ima_alloc_init_template(struct integrity_iint_cache *iint, struct file *file, const unsigned char *filename, struct evm_ima_xattr_data *xattr_value, int xattr_len, struct ima_template_entry **entry) ima_alloc_init_template() argument
H A Dima_queue.c18 * The measurement list is append-only. No entry is
46 /* lookup up the digest value in the hash table, and return the entry */ ima_lookup_digest_entry()
56 rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); ima_lookup_digest_entry()
67 * - Add template entry to measurement list and hash table.
71 static int ima_add_digest_entry(struct ima_template_entry *entry) ima_add_digest_entry() argument
78 pr_err("OUT OF MEMORY ERROR creating queue entry\n"); ima_add_digest_entry()
81 qe->entry = entry; ima_add_digest_entry()
87 key = ima_hash_key(entry->digest); ima_add_digest_entry()
105 /* Add template entry to the measurement list and hash table,
108 int ima_add_template_entry(struct ima_template_entry *entry, int violation, ima_add_template_entry() argument
120 memcpy(digest, entry->digest, sizeof(digest)); ima_add_template_entry()
128 result = ima_add_digest_entry(entry); ima_add_template_entry()
/linux-4.1.27/sound/isa/gus/
H A Dgus_mem_proc.c34 static ssize_t snd_gf1_mem_proc_dump(struct snd_info_entry *entry, snd_gf1_mem_proc_dump() argument
39 struct gus_proc_private *priv = entry->private_data; snd_gf1_mem_proc_dump()
49 static void snd_gf1_mem_proc_free(struct snd_info_entry *entry) snd_gf1_mem_proc_free() argument
51 struct gus_proc_private *priv = entry->private_data; snd_gf1_mem_proc_free()
64 struct snd_info_entry *entry; snd_gf1_mem_proc_init() local
73 if (! snd_card_proc_new(gus->card, name, &entry)) { snd_gf1_mem_proc_init()
74 entry->content = SNDRV_INFO_CONTENT_DATA; snd_gf1_mem_proc_init()
75 entry->private_data = priv; snd_gf1_mem_proc_init()
76 entry->private_free = snd_gf1_mem_proc_free; snd_gf1_mem_proc_init()
77 entry->c.ops = &snd_gf1_mem_proc_ops; snd_gf1_mem_proc_init()
79 priv->size = entry->size = gus->gf1.mem_alloc.banks_8[idx].size; snd_gf1_mem_proc_init()
91 if (! snd_card_proc_new(gus->card, name, &entry)) { snd_gf1_mem_proc_init()
92 entry->content = SNDRV_INFO_CONTENT_DATA; snd_gf1_mem_proc_init()
93 entry->private_data = priv; snd_gf1_mem_proc_init()
94 entry->private_free = snd_gf1_mem_proc_free; snd_gf1_mem_proc_init()
95 entry->c.ops = &snd_gf1_mem_proc_ops; snd_gf1_mem_proc_init()
97 priv->size = entry->size = gus->gf1.rom_memory; snd_gf1_mem_proc_init()
/linux-4.1.27/fs/afs/
H A Dvlclient.c64 struct afs_cache_vlocation *entry; afs_deliver_vl_get_entry_by_xxx() local
79 entry = call->reply; afs_deliver_vl_get_entry_by_xxx()
83 entry->name[loop] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
84 entry->name[loop] = 0; afs_deliver_vl_get_entry_by_xxx()
88 entry->nservers = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
91 entry->servers[loop].s_addr = *bp++; afs_deliver_vl_get_entry_by_xxx()
97 entry->srvtmask[loop] = 0; afs_deliver_vl_get_entry_by_xxx()
99 entry->srvtmask[loop] |= AFS_VOL_VTM_RW; afs_deliver_vl_get_entry_by_xxx()
101 entry->srvtmask[loop] |= AFS_VOL_VTM_RO; afs_deliver_vl_get_entry_by_xxx()
103 entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; afs_deliver_vl_get_entry_by_xxx()
106 entry->vid[0] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
107 entry->vid[1] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
108 entry->vid[2] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
113 entry->vidmask = 0; afs_deliver_vl_get_entry_by_xxx()
115 entry->vidmask |= AFS_VOL_VTM_RW; afs_deliver_vl_get_entry_by_xxx()
117 entry->vidmask |= AFS_VOL_VTM_RO; afs_deliver_vl_get_entry_by_xxx()
119 entry->vidmask |= AFS_VOL_VTM_BAK; afs_deliver_vl_get_entry_by_xxx()
120 if (!entry->vidmask) afs_deliver_vl_get_entry_by_xxx()
148 * dispatch a get volume entry by name operation
153 struct afs_cache_vlocation *entry, afs_vl_get_entry_by_name()
171 call->reply = entry; afs_vl_get_entry_by_name()
188 * dispatch a get volume entry by ID operation
194 struct afs_cache_vlocation *entry, afs_vl_get_entry_by_id()
207 call->reply = entry; afs_vl_get_entry_by_id()
150 afs_vl_get_entry_by_name(struct in_addr *addr, struct key *key, const char *volname, struct afs_cache_vlocation *entry, const struct afs_wait_mode *wait_mode) afs_vl_get_entry_by_name() argument
190 afs_vl_get_entry_by_id(struct in_addr *addr, struct key *key, afs_volid_t volid, afs_voltype_t voltype, struct afs_cache_vlocation *entry, const struct afs_wait_mode *wait_mode) afs_vl_get_entry_by_id() argument
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dstatahead.c54 SA_ENTRY_INIT = 0, /** init entry */
56 SA_ENTRY_INVA = 2, /** invalid entry */
57 SA_ENTRY_DEST = 3, /** entry to be destroyed */
67 /* entry reference count */
69 /* entry index in the sai */
73 /* entry status */
75 /* entry size, contains name */
83 /* entry name */
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) ll_sa_entry_unhashed() argument
92 return list_empty(&entry->se_hash); ll_sa_entry_unhashed()
96 * The entry only can be released by the caller, it is necessary to hold lock.
98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry) ll_sa_entry_stated() argument
101 return (entry->se_stat != SA_ENTRY_INIT); ll_sa_entry_stated()
110 * Insert entry to hash SA table.
113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_enhash() argument
115 int i = ll_sa_entry_hash(entry->se_qstr.hash); ll_sa_entry_enhash()
118 list_add_tail(&entry->se_hash, &sai->sai_cache[i]); ll_sa_entry_enhash()
123 * Remove entry from SA table.
126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_unhash() argument
128 int i = ll_sa_entry_hash(entry->se_qstr.hash); ll_sa_entry_unhash()
131 list_del_init(&entry->se_hash); ll_sa_entry_unhash()
200 struct ll_sa_entry *entry; ll_sa_entry_alloc() local
205 entry = kzalloc(entry_size, GFP_NOFS); ll_sa_entry_alloc()
206 if (unlikely(!entry)) ll_sa_entry_alloc()
209 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n", ll_sa_entry_alloc()
210 len, name, entry, index); ll_sa_entry_alloc()
212 entry->se_index = index; ll_sa_entry_alloc()
215 * Statahead entry reference rules: ll_sa_entry_alloc()
217 * 1) When statahead entry is initialized, its reference is set as 2. ll_sa_entry_alloc()
220 * lockless hash lookup (only the scanner can remove entry from hash ll_sa_entry_alloc()
222 * entry reference. So the performance is improved. After using the ll_sa_entry_alloc()
223 * statahead entry, the scanner will call "atomic_dec()" to drop the ll_sa_entry_alloc()
225 * the statahead entry will be freed. ll_sa_entry_alloc()
228 * when they process the statahead entry, the reference for target ll_sa_entry_alloc()
229 * should be held to guarantee the entry will not be released by the ll_sa_entry_alloc()
230 * directory scanner. After processing the entry, these threads will ll_sa_entry_alloc()
231 * drop the entry reference. If it is the last reference, the entry ll_sa_entry_alloc()
234 * The second reference when initializes the statahead entry is used ll_sa_entry_alloc()
237 atomic_set(&entry->se_refcount, 2); ll_sa_entry_alloc()
238 entry->se_stat = SA_ENTRY_INIT; ll_sa_entry_alloc()
239 entry->se_size = entry_size; ll_sa_entry_alloc()
240 dname = (char *)entry + sizeof(struct ll_sa_entry); ll_sa_entry_alloc()
243 entry->se_qstr.hash = full_name_hash(name, len); ll_sa_entry_alloc()
244 entry->se_qstr.len = len; ll_sa_entry_alloc()
245 entry->se_qstr.name = dname; ll_sa_entry_alloc()
249 list_add_tail(&entry->se_link, &sai->sai_entries); ll_sa_entry_alloc()
250 INIT_LIST_HEAD(&entry->se_list); ll_sa_entry_alloc()
251 ll_sa_entry_enhash(sai, entry); ll_sa_entry_alloc()
256 return entry; ll_sa_entry_alloc()
260 * Used by the directory scanner to search entry with name.
262 * Only the caller can remove the entry from hash, so it is unnecessary to hold
263 * hash lock. It is caller's duty to release the init refcount on the entry, so
264 * it is also unnecessary to increase refcount on the entry.
269 struct ll_sa_entry *entry; ll_sa_entry_get_byname() local
272 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { ll_sa_entry_get_byname()
273 if (entry->se_qstr.hash == qstr->hash && ll_sa_entry_get_byname()
274 entry->se_qstr.len == qstr->len && ll_sa_entry_get_byname()
275 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) ll_sa_entry_get_byname()
276 return entry; ll_sa_entry_get_byname()
282 * Used by the async getattr request callback to find entry with index.
285 * It needs to increase entry refcount before returning to guarantee that the
286 * entry cannot be freed by others.
291 struct ll_sa_entry *entry; ll_sa_entry_get_byindex() local
293 list_for_each_entry(entry, &sai->sai_entries, se_link) { ll_sa_entry_get_byindex()
294 if (entry->se_index == index) { ll_sa_entry_get_byindex()
295 LASSERT(atomic_read(&entry->se_refcount) > 0); ll_sa_entry_get_byindex()
296 atomic_inc(&entry->se_refcount); ll_sa_entry_get_byindex()
297 return entry; ll_sa_entry_get_byindex()
299 if (entry->se_index > index) ll_sa_entry_get_byindex()
306 struct ll_sa_entry *entry) ll_sa_entry_cleanup()
308 struct md_enqueue_info *minfo = entry->se_minfo; ll_sa_entry_cleanup()
309 struct ptlrpc_request *req = entry->se_req; ll_sa_entry_cleanup()
312 entry->se_minfo = NULL; ll_sa_entry_cleanup()
319 entry->se_req = NULL; ll_sa_entry_cleanup()
325 struct ll_sa_entry *entry) ll_sa_entry_put()
327 if (atomic_dec_and_test(&entry->se_refcount)) { ll_sa_entry_put()
328 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", ll_sa_entry_put()
329 entry->se_qstr.len, entry->se_qstr.name, entry, ll_sa_entry_put()
330 entry->se_index); ll_sa_entry_put()
332 LASSERT(list_empty(&entry->se_link)); ll_sa_entry_put()
333 LASSERT(list_empty(&entry->se_list)); ll_sa_entry_put()
334 LASSERT(ll_sa_entry_unhashed(entry)); ll_sa_entry_put()
336 ll_sa_entry_cleanup(sai, entry); ll_sa_entry_put()
337 iput(entry->se_inode); ll_sa_entry_put()
339 OBD_FREE(entry, entry->se_size); ll_sa_entry_put()
345 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) do_sa_entry_fini() argument
349 LASSERT(!ll_sa_entry_unhashed(entry)); do_sa_entry_fini()
350 LASSERT(!list_empty(&entry->se_link)); do_sa_entry_fini()
352 ll_sa_entry_unhash(sai, entry); do_sa_entry_fini()
355 entry->se_stat = SA_ENTRY_DEST; do_sa_entry_fini()
356 list_del_init(&entry->se_link); do_sa_entry_fini()
357 if (likely(!list_empty(&entry->se_list))) do_sa_entry_fini()
358 list_del_init(&entry->se_list); do_sa_entry_fini()
361 ll_sa_entry_put(sai, entry); do_sa_entry_fini()
368 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_fini() argument
372 if (entry) ll_sa_entry_fini()
373 do_sa_entry_fini(sai, entry); ll_sa_entry_fini()
375 /* drop old entry, only 'scanner' process does this, no need to lock */ ll_sa_entry_fini()
388 struct ll_sa_entry *entry, se_stat_t stat) do_sa_entry_to_stated()
393 if (!list_empty(&entry->se_list)) do_sa_entry_to_stated()
394 list_del_init(&entry->se_list); do_sa_entry_to_stated()
397 if (se->se_index < entry->se_index) { do_sa_entry_to_stated()
403 list_add(&entry->se_list, pos); do_sa_entry_to_stated()
404 entry->se_stat = stat; do_sa_entry_to_stated()
408 * Move entry to sai_entries_stated and sort with the index.
409 * \retval 1 -- entry to be destroyed.
410 * \retval 0 -- entry is inserted into stated list.
414 struct ll_sa_entry *entry, se_stat_t stat) ll_sa_entry_to_stated()
419 ll_sa_entry_cleanup(sai, entry); ll_sa_entry_to_stated()
422 if (likely(entry->se_stat != SA_ENTRY_DEST)) { ll_sa_entry_to_stated()
423 do_sa_entry_to_stated(sai, entry, stat); ll_sa_entry_to_stated()
512 struct ll_sa_entry *entry, *next; ll_sai_put() local
535 list_for_each_entry_safe(entry, next, ll_sai_put()
537 do_sa_entry_fini(sai, entry); ll_sai_put()
560 /* AGL maybe fall behind statahead with one entry */ ll_agl_trigger()
616 struct ll_sa_entry *entry; ll_post_statahead() local
628 entry = sa_first_received_entry(sai); ll_post_statahead()
629 atomic_inc(&entry->se_refcount); ll_post_statahead()
630 list_del_init(&entry->se_list); ll_post_statahead()
633 LASSERT(entry->se_handle != 0); ll_post_statahead()
635 minfo = entry->se_minfo; ll_post_statahead()
637 req = entry->se_req; ll_post_statahead()
644 child = entry->se_inode; ll_post_statahead()
663 entry->se_inode = NULL; ll_post_statahead()
669 it->d.lustre.it_lock_handle = entry->se_handle; ll_post_statahead()
684 entry->se_inode = child; ll_post_statahead()
687 ll_agl_add(sai, child, entry->se_index); ll_post_statahead()
694 rc = ll_sa_entry_to_stated(sai, entry, ll_post_statahead()
696 if (rc == 0 && entry->se_index == sai->sai_index_wait) ll_post_statahead()
698 ll_sa_entry_put(sai, entry); ll_post_statahead()
708 struct ll_sa_entry *entry; ll_statahead_interpret() local
725 /* stale entry */ ll_statahead_interpret()
740 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); ll_statahead_interpret()
741 if (entry == NULL) { ll_statahead_interpret()
749 do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA); ll_statahead_interpret()
750 wakeup = (entry->se_index == sai->sai_index_wait); ll_statahead_interpret()
752 entry->se_minfo = minfo; ll_statahead_interpret()
753 entry->se_req = ptlrpc_request_addref(req); ll_statahead_interpret()
758 entry->se_handle = handle; ll_statahead_interpret()
760 list_add_tail(&entry->se_list, ll_statahead_interpret()
766 ll_sa_entry_put(sai, entry); ll_statahead_interpret()
803 struct ll_sa_entry *entry, struct md_enqueue_info **pmi, sa_args_init()
807 struct qstr *qstr = &entry->se_qstr; sa_args_init()
835 minfo->mi_cbdata = entry->se_index; sa_args_init()
852 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry) do_sa_lookup() argument
859 rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas); do_sa_lookup()
880 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, do_sa_revalidate() argument
897 entry->se_inode = igrab(inode); do_sa_revalidate()
901 entry->se_handle = it.d.lustre.it_lock_handle; do_sa_revalidate()
906 rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas); do_sa_revalidate()
908 entry->se_inode = NULL; do_sa_revalidate()
918 entry->se_inode = NULL; do_sa_revalidate()
933 struct ll_sa_entry *entry; ll_statahead_one() local
937 entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name, ll_statahead_one()
939 if (IS_ERR(entry)) ll_statahead_one()
942 dentry = d_lookup(parent, &entry->se_qstr); ll_statahead_one()
944 rc = do_sa_lookup(dir, entry); ll_statahead_one()
946 rc = do_sa_revalidate(dir, entry, dentry); ll_statahead_one()
948 ll_agl_add(sai, d_inode(dentry), entry->se_index); ll_statahead_one()
955 rc1 = ll_sa_entry_to_stated(sai, entry, ll_statahead_one()
957 if (rc1 == 0 && entry->se_index == sai->sai_index_wait) ll_statahead_one()
964 /* drop one refcount on entry by ll_sa_entry_alloc */ ll_statahead_one()
965 ll_sa_entry_put(sai, entry); ll_statahead_one()
1151 * don't stat-ahead first entry. ll_statahead_thread()
1479 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sai_unplug() argument
1485 if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) ll_sai_unplug()
1490 ll_sa_entry_fini(sai, entry); ll_sai_unplug()
1518 * Start statahead thread if this is the first dir entry.
1520 * \retval 1 -- find entry with lock in cache, the caller needs to do
1522 * \retval 0 -- find entry in cache, but without lock, the caller needs
1532 struct ll_sa_entry *entry; do_statahead_enter() local
1576 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); do_statahead_enter()
1577 if (entry == NULL || only_unplug) { do_statahead_enter()
1578 ll_sai_unplug(sai, entry); do_statahead_enter()
1579 return entry ? 1 : -EAGAIN; do_statahead_enter()
1582 if (!ll_sa_entry_stated(entry)) { do_statahead_enter()
1583 sai->sai_index_wait = entry->se_index; do_statahead_enter()
1587 ll_sa_entry_stated(entry) || do_statahead_enter()
1591 ll_sai_unplug(sai, entry); do_statahead_enter()
1596 if (entry->se_stat == SA_ENTRY_SUCC && do_statahead_enter()
1597 entry->se_inode != NULL) { do_statahead_enter()
1598 struct inode *inode = entry->se_inode; do_statahead_enter()
1601 entry->se_handle }; do_statahead_enter()
1613 ll_sai_unplug(sai, entry); do_statahead_enter()
1626 ll_sai_unplug(sai, entry); do_statahead_enter()
1631 entry->se_inode = NULL; do_statahead_enter()
1640 ll_sai_unplug(sai, entry); do_statahead_enter()
305 ll_sa_entry_cleanup(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_cleanup() argument
324 ll_sa_entry_put(struct ll_statahead_info *sai, struct ll_sa_entry *entry) ll_sa_entry_put() argument
387 do_sa_entry_to_stated(struct ll_statahead_info *sai, struct ll_sa_entry *entry, se_stat_t stat) do_sa_entry_to_stated() argument
413 ll_sa_entry_to_stated(struct ll_statahead_info *sai, struct ll_sa_entry *entry, se_stat_t stat) ll_sa_entry_to_stated() argument
802 sa_args_init(struct inode *dir, struct inode *child, struct ll_sa_entry *entry, struct md_enqueue_info **pmi, struct ldlm_enqueue_info **pei, struct obd_capa **pcapa) sa_args_init() argument
/linux-4.1.27/arch/sparc/include/asm/
H A Dsections.h7 /* sparc entry point */
H A Dspitfire.h101 static inline unsigned long spitfire_get_dtlb_data(int entry) spitfire_get_dtlb_data() argument
107 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS)); spitfire_get_dtlb_data()
115 static inline unsigned long spitfire_get_dtlb_tag(int entry) spitfire_get_dtlb_tag() argument
121 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ)); spitfire_get_dtlb_tag()
125 static inline void spitfire_put_dtlb_data(int entry, unsigned long data) spitfire_put_dtlb_data() argument
130 : "r" (data), "r" (entry << 3), spitfire_put_dtlb_data()
134 static inline unsigned long spitfire_get_itlb_data(int entry) spitfire_get_itlb_data() argument
140 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS)); spitfire_get_itlb_data()
148 static inline unsigned long spitfire_get_itlb_tag(int entry) spitfire_get_itlb_tag() argument
154 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ)); spitfire_get_itlb_tag()
158 static inline void spitfire_put_itlb_data(int entry, unsigned long data) spitfire_put_itlb_data() argument
163 : "r" (data), "r" (entry << 3), spitfire_put_itlb_data()
214 static inline unsigned long cheetah_get_ldtlb_data(int entry) cheetah_get_ldtlb_data() argument
221 : "r" ((0 << 16) | (entry << 3)), cheetah_get_ldtlb_data()
227 static inline unsigned long cheetah_get_litlb_data(int entry) cheetah_get_litlb_data() argument
234 : "r" ((0 << 16) | (entry << 3)), cheetah_get_litlb_data()
240 static inline unsigned long cheetah_get_ldtlb_tag(int entry) cheetah_get_ldtlb_tag() argument
246 : "r" ((0 << 16) | (entry << 3)), cheetah_get_ldtlb_tag()
252 static inline unsigned long cheetah_get_litlb_tag(int entry) cheetah_get_litlb_tag() argument
258 : "r" ((0 << 16) | (entry << 3)), cheetah_get_litlb_tag()
264 static inline void cheetah_put_ldtlb_data(int entry, unsigned long data) cheetah_put_ldtlb_data() argument
270 "r" ((0 << 16) | (entry << 3)), cheetah_put_ldtlb_data()
274 static inline void cheetah_put_litlb_data(int entry, unsigned long data) cheetah_put_litlb_data() argument
280 "r" ((0 << 16) | (entry << 3)), cheetah_put_litlb_data()
284 static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb) cheetah_get_dtlb_data() argument
291 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS)); cheetah_get_dtlb_data()
296 static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb) cheetah_get_dtlb_tag() argument
302 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ)); cheetah_get_dtlb_tag()
306 static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb) cheetah_put_dtlb_data() argument
312 "r" ((tlb << 16) | (entry << 3)), cheetah_put_dtlb_data()
316 static inline unsigned long cheetah_get_itlb_data(int entry) cheetah_get_itlb_data() argument
323 : "r" ((2 << 16) | (entry << 3)), cheetah_get_itlb_data()
329 static inline unsigned long cheetah_get_itlb_tag(int entry) cheetah_get_itlb_tag() argument
335 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ)); cheetah_get_itlb_tag()
339 static inline void cheetah_put_itlb_data(int entry, unsigned long data) cheetah_put_itlb_data() argument
344 : "r" (data), "r" ((2 << 16) | (entry << 3)), cheetah_put_itlb_data()
/linux-4.1.27/arch/sparc/mm/
H A Dextable.c41 /* A range entry, skip both parts. */ search_extable()
46 /* A deleted entry; see trim_init_extable */ search_extable()
92 const struct exception_table_entry *entry; search_extables_range() local
94 entry = search_exception_tables(addr); search_extables_range()
95 if (!entry) search_extables_range()
99 if (!entry->fixup) { search_extables_range()
100 *g2 = (addr - entry->insn) / 4; search_extables_range()
101 return (entry + 1)->fixup; search_extables_range()
104 return entry->fixup; search_extables_range()
/linux-4.1.27/arch/metag/kernel/
H A Dperf_callchain.c32 struct perf_callchain_entry *entry) user_backtrace()
50 perf_callchain_store(entry, calladdr); user_backtrace()
59 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user() argument
68 while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) perf_callchain_user()
69 frame = user_backtrace(frame, entry); perf_callchain_user()
81 struct perf_callchain_entry *entry = data; callchain_trace() local
82 perf_callchain_store(entry, fr->pc); callchain_trace()
87 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel() argument
95 walk_stackframe(&fr, callchain_trace, entry); perf_callchain_kernel()
31 user_backtrace(struct metag_frame __user *user_frame, struct perf_callchain_entry *entry) user_backtrace() argument
/linux-4.1.27/arch/score/kernel/
H A DMakefile7 obj-y += entry.o irq.o process.o ptrace.o \
/linux-4.1.27/arch/openrisc/kernel/
H A DMakefile8 traps.o time.o irq.o entry.o ptrace.o signal.o \
/linux-4.1.27/arch/m68k/68360/
H A DMakefile7 obj-y := config.o commproc.o entry.o ints.o
/linux-4.1.27/sound/pci/ice1712/
H A Dpsc724.h10 /* entry struct */
H A Dse.h12 /* entry struct */
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_cmdbuf_res.c39 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
42 * @hash: Hash entry for the manager hash table.
45 * @state: Staging state of this resource entry.
46 * @man: Pointer to a resource manager for this entry.
105 * @entry: Pointer to a struct vmw_cmdbuf_res.
107 * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
111 struct vmw_cmdbuf_res *entry) vmw_cmdbuf_res_free()
113 list_del(&entry->head); vmw_cmdbuf_res_free()
114 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); vmw_cmdbuf_res_free()
115 vmw_resource_unreference(&entry->res); vmw_cmdbuf_res_free()
116 kfree(entry); vmw_cmdbuf_res_free()
131 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_commit() local
133 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe()
134 list_del(&entry->head); list_for_each_entry_safe()
135 switch (entry->state) { list_for_each_entry_safe()
137 entry->state = VMW_CMDBUF_RES_COMMITED; list_for_each_entry_safe()
138 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe()
141 vmw_resource_unreference(&entry->res); list_for_each_entry_safe()
142 kfree(entry); list_for_each_entry_safe()
165 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_revert() local
168 list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe()
169 switch (entry->state) { list_for_each_entry_safe()
171 vmw_cmdbuf_res_free(entry->man, entry); list_for_each_entry_safe()
174 ret = drm_ht_insert_item(&entry->man->resources, list_for_each_entry_safe()
175 &entry->hash); list_for_each_entry_safe()
176 list_del(&entry->head); list_for_each_entry_safe()
177 list_add_tail(&entry->head, &entry->man->list); list_for_each_entry_safe()
178 entry->state = VMW_CMDBUF_RES_COMMITED; list_for_each_entry_safe()
196 * This function allocates a struct vmw_cmdbuf_res entry and adds the
198 * entry is then put on the staging list identified by @list.
235 * This function looks up the struct vmw_cmdbuf_res entry from the manager
237 * state it then either removes the entry from the staging list or adds it
245 struct vmw_cmdbuf_res *entry; vmw_cmdbuf_res_remove() local
254 entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); vmw_cmdbuf_res_remove()
256 switch (entry->state) { vmw_cmdbuf_res_remove()
258 vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_remove()
261 (void) drm_ht_remove_item(&man->resources, &entry->hash); vmw_cmdbuf_res_remove()
262 list_del(&entry->head); vmw_cmdbuf_res_remove()
263 entry->state = VMW_CMDBUF_RES_DEL; vmw_cmdbuf_res_remove()
264 list_add_tail(&entry->head, list); vmw_cmdbuf_res_remove()
315 struct vmw_cmdbuf_res *entry, *next; vmw_cmdbuf_res_man_destroy() local
317 list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_man_destroy()
318 vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_man_destroy()
110 vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, struct vmw_cmdbuf_res *entry) vmw_cmdbuf_res_free() argument
/linux-4.1.27/net/bridge/
H A Dbr_mdb.c166 struct br_mdb_entry *entry, u32 pid, nlmsg_populate_mdb_fill()
188 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) nlmsg_populate_mdb_fill()
209 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, __br_mdb_notify() argument
220 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); __br_mdb_notify()
235 struct br_mdb_entry entry; br_mdb_notify() local
237 memset(&entry, 0, sizeof(entry)); br_mdb_notify()
238 entry.ifindex = port->dev->ifindex; br_mdb_notify()
239 entry.addr.proto = group->proto; br_mdb_notify()
240 entry.addr.u.ip4 = group->u.ip4; br_mdb_notify()
242 entry.addr.u.ip6 = group->u.ip6; br_mdb_notify()
244 __br_mdb_notify(dev, &entry, type); br_mdb_notify()
247 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) is_valid_mdb_entry() argument
249 if (entry->ifindex == 0) is_valid_mdb_entry()
252 if (entry->addr.proto == htons(ETH_P_IP)) { is_valid_mdb_entry()
253 if (!ipv4_is_multicast(entry->addr.u.ip4)) is_valid_mdb_entry()
255 if (ipv4_is_local_multicast(entry->addr.u.ip4)) is_valid_mdb_entry()
258 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { is_valid_mdb_entry()
259 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) is_valid_mdb_entry()
264 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) is_valid_mdb_entry()
274 struct br_mdb_entry *entry; br_mdb_parse() local
309 entry = nla_data(tb[MDBA_SET_ENTRY]); br_mdb_parse()
310 if (!is_valid_mdb_entry(entry)) { br_mdb_parse()
311 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); br_mdb_parse()
315 *pentry = entry; br_mdb_parse()
355 struct br_mdb_entry *entry) __br_mdb_add()
365 dev = __dev_get_by_index(net, entry->ifindex); __br_mdb_add()
374 ip.proto = entry->addr.proto; __br_mdb_add()
376 ip.u.ip4 = entry->addr.u.ip4; __br_mdb_add()
379 ip.u.ip6 = entry->addr.u.ip6; __br_mdb_add()
383 ret = br_mdb_add_group(br, p, &ip, entry->state); __br_mdb_add()
391 struct br_mdb_entry *entry; br_mdb_add() local
396 err = br_mdb_parse(skb, nlh, &dev, &entry); br_mdb_add()
402 err = __br_mdb_add(net, br, entry); br_mdb_add()
404 __br_mdb_notify(dev, entry, RTM_NEWMDB); br_mdb_add()
408 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) __br_mdb_del() argument
421 ip.proto = entry->addr.proto; __br_mdb_del()
426 ip.u.ip4 = entry->addr.u.ip4; __br_mdb_del()
432 ip.u.ip6 = entry->addr.u.ip6; __br_mdb_del()
446 if (!p->port || p->port->dev->ifindex != entry->ifindex) __br_mdb_del()
472 struct br_mdb_entry *entry; br_mdb_del() local
476 err = br_mdb_parse(skb, nlh, &dev, &entry); br_mdb_del()
482 err = __br_mdb_del(br, entry); br_mdb_del()
484 __br_mdb_notify(dev, entry, RTM_DELMDB); br_mdb_del()
164 nlmsg_populate_mdb_fill(struct sk_buff *skb, struct net_device *dev, struct br_mdb_entry *entry, u32 pid, u32 seq, int type, unsigned int flags) nlmsg_populate_mdb_fill() argument
354 __br_mdb_add(struct net *net, struct net_bridge *br, struct br_mdb_entry *entry) __br_mdb_add() argument
/linux-4.1.27/Documentation/mic/mpssd/
H A Dsysfs.c26 readsysfs(char *dir, char *entry) readsysfs() argument
35 snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); readsysfs()
38 "%s/%s/%s", MICSYSFSDIR, dir, entry); readsysfs()
42 mpsslog("Failed to open sysfs entry '%s': %s\n", readsysfs()
49 mpsslog("Failed to read sysfs entry '%s': %s\n", readsysfs()
68 setsysfs(char *dir, char *entry, char *value) setsysfs() argument
75 snprintf(filename, PATH_MAX, "%s/%s", MICSYSFSDIR, entry); setsysfs()
78 MICSYSFSDIR, dir, entry); setsysfs()
80 oldvalue = readsysfs(dir, entry); setsysfs()
85 mpsslog("Failed to open sysfs entry '%s': %s\n", setsysfs()
93 mpsslog("Failed to write new sysfs entry '%s': %s\n", setsysfs()
/linux-4.1.27/kernel/time/
H A Dtimer_stats.c53 struct entry { struct
57 struct entry *next;
107 * tstat entry structs only get allocated while collection is
117 static struct entry entries[MAX_ENTRIES];
128 #define __tstat_hashfn(entry) \
129 (((unsigned long)(entry)->timer ^ \
130 (unsigned long)(entry)->start_func ^ \
131 (unsigned long)(entry)->expire_func ^ \
132 (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK)
134 #define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry))
136 static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
146 static struct entry *alloc_entry(void) alloc_entry()
154 static int match_entries(struct entry *entry1, struct entry *entry2) match_entries()
163 * Look up whether an entry matching this item is present
167 static struct entry *tstat_lookup(struct entry *entry, char *comm) tstat_lookup() argument
169 struct entry **head, *curr, *prev; tstat_lookup()
171 head = tstat_hashentry(entry); tstat_lookup()
175 * The fastpath is when the entry is already hashed, tstat_lookup()
180 if (match_entries(curr, entry)) tstat_lookup()
186 * Slowpath: allocate, set up and link a new hash entry: tstat_lookup()
196 if (match_entries(curr, entry)) tstat_lookup()
205 *curr = *entry; tstat_lookup()
242 struct entry *entry, input; timer_stats_update_stats() local
260 entry = tstat_lookup(&input, comm); timer_stats_update_stats()
261 if (likely(entry)) timer_stats_update_stats()
262 entry->count++; timer_stats_update_stats()
283 struct entry *entry; tstats_show() local
308 entry = entries + i; tstats_show()
309 if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) { tstats_show()
311 entry->count, entry->pid, entry->comm); tstats_show()
314 entry->count, entry->pid, entry->comm); tstats_show()
317 print_name_offset(m, (unsigned long)entry->start_func); tstats_show()
319 print_name_offset(m, (unsigned long)entry->expire_func); tstats_show()
322 events += entry->count; tstats_show()
/linux-4.1.27/fs/btrfs/
H A Dordered-data.c31 static u64 entry_end(struct btrfs_ordered_extent *entry) entry_end() argument
33 if (entry->file_offset + entry->len < entry->file_offset) entry_end()
35 return entry->file_offset + entry->len; entry_end()
46 struct btrfs_ordered_extent *entry; tree_insert() local
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); tree_insert()
52 if (file_offset < entry->file_offset) tree_insert()
54 else if (file_offset >= entry_end(entry)) tree_insert()
83 struct btrfs_ordered_extent *entry; __tree_search() local
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); __tree_search()
89 prev_entry = entry; __tree_search()
91 if (file_offset < entry->file_offset) __tree_search()
93 else if (file_offset >= entry_end(entry)) __tree_search()
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) offset_in_entry() argument
132 if (file_offset < entry->file_offset || offset_in_entry()
133 entry->file_offset + entry->len <= file_offset) offset_in_entry()
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, range_overlaps() argument
141 if (file_offset + len <= entry->file_offset || range_overlaps()
142 entry->file_offset + entry->len <= file_offset) range_overlaps()
157 struct btrfs_ordered_extent *entry; tree_search() local
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent, tree_search()
162 if (offset_in_entry(entry, file_offset)) tree_search()
191 struct btrfs_ordered_extent *entry; __btrfs_add_ordered_extent() local
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); __btrfs_add_ordered_extent()
195 if (!entry) __btrfs_add_ordered_extent()
198 entry->file_offset = file_offset; __btrfs_add_ordered_extent()
199 entry->start = start; __btrfs_add_ordered_extent()
200 entry->len = len; __btrfs_add_ordered_extent()
203 entry->csum_bytes_left = disk_len; __btrfs_add_ordered_extent()
204 entry->disk_len = disk_len; __btrfs_add_ordered_extent()
205 entry->bytes_left = len; __btrfs_add_ordered_extent()
206 entry->inode = igrab(inode); __btrfs_add_ordered_extent()
207 entry->compress_type = compress_type; __btrfs_add_ordered_extent()
208 entry->truncated_len = (u64)-1; __btrfs_add_ordered_extent()
210 set_bit(type, &entry->flags); __btrfs_add_ordered_extent()
213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); __btrfs_add_ordered_extent()
216 atomic_set(&entry->refs, 1); __btrfs_add_ordered_extent()
217 init_waitqueue_head(&entry->wait); __btrfs_add_ordered_extent()
218 INIT_LIST_HEAD(&entry->list); __btrfs_add_ordered_extent()
219 INIT_LIST_HEAD(&entry->root_extent_list); __btrfs_add_ordered_extent()
220 INIT_LIST_HEAD(&entry->work_list); __btrfs_add_ordered_extent()
221 init_completion(&entry->completion); __btrfs_add_ordered_extent()
222 INIT_LIST_HEAD(&entry->log_list); __btrfs_add_ordered_extent()
223 INIT_LIST_HEAD(&entry->trans_list); __btrfs_add_ordered_extent()
225 trace_btrfs_ordered_extent_add(inode, entry); __btrfs_add_ordered_extent()
229 &entry->rb_node); __btrfs_add_ordered_extent()
235 list_add_tail(&entry->root_extent_list, __btrfs_add_ordered_extent()
281 struct btrfs_ordered_extent *entry, btrfs_add_ordered_sum()
288 list_add_tail(&sum->list, &entry->list); btrfs_add_ordered_sum()
289 WARN_ON(entry->csum_bytes_left < sum->len); btrfs_add_ordered_sum()
290 entry->csum_bytes_left -= sum->len; btrfs_add_ordered_sum()
291 if (entry->csum_bytes_left == 0) btrfs_add_ordered_sum()
292 wake_up(&entry->wait); btrfs_add_ordered_sum()
314 struct btrfs_ordered_extent *entry = NULL; btrfs_dec_test_first_ordered_pending() local
329 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_dec_test_first_ordered_pending()
330 if (!offset_in_entry(entry, *file_offset)) { btrfs_dec_test_first_ordered_pending()
335 dec_start = max(*file_offset, entry->file_offset); btrfs_dec_test_first_ordered_pending()
336 dec_end = min(*file_offset + io_size, entry->file_offset + btrfs_dec_test_first_ordered_pending()
337 entry->len); btrfs_dec_test_first_ordered_pending()
344 if (to_dec > entry->bytes_left) { btrfs_dec_test_first_ordered_pending()
347 entry->bytes_left, to_dec); btrfs_dec_test_first_ordered_pending()
349 entry->bytes_left -= to_dec; btrfs_dec_test_first_ordered_pending()
351 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); btrfs_dec_test_first_ordered_pending()
353 if (entry->bytes_left == 0) { btrfs_dec_test_first_ordered_pending()
354 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); btrfs_dec_test_first_ordered_pending()
355 if (waitqueue_active(&entry->wait)) btrfs_dec_test_first_ordered_pending()
356 wake_up(&entry->wait); btrfs_dec_test_first_ordered_pending()
361 if (!ret && cached && entry) { btrfs_dec_test_first_ordered_pending()
362 *cached = entry; btrfs_dec_test_first_ordered_pending()
363 atomic_inc(&entry->refs); btrfs_dec_test_first_ordered_pending()
384 struct btrfs_ordered_extent *entry = NULL; btrfs_dec_test_ordered_pending() local
391 entry = *cached; btrfs_dec_test_ordered_pending()
401 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_dec_test_ordered_pending()
403 if (!offset_in_entry(entry, file_offset)) { btrfs_dec_test_ordered_pending()
408 if (io_size > entry->bytes_left) { btrfs_dec_test_ordered_pending()
411 entry->bytes_left, io_size); btrfs_dec_test_ordered_pending()
413 entry->bytes_left -= io_size; btrfs_dec_test_ordered_pending()
415 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); btrfs_dec_test_ordered_pending()
417 if (entry->bytes_left == 0) { btrfs_dec_test_ordered_pending()
418 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); btrfs_dec_test_ordered_pending()
419 if (waitqueue_active(&entry->wait)) btrfs_dec_test_ordered_pending()
420 wake_up(&entry->wait); btrfs_dec_test_ordered_pending()
425 if (!ret && cached && entry) { btrfs_dec_test_ordered_pending()
426 *cached = entry; btrfs_dec_test_ordered_pending()
427 atomic_inc(&entry->refs); btrfs_dec_test_ordered_pending()
540 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) btrfs_put_ordered_extent() argument
545 trace_btrfs_ordered_extent_put(entry->inode, entry); btrfs_put_ordered_extent()
547 if (atomic_dec_and_test(&entry->refs)) { btrfs_put_ordered_extent()
548 if (entry->inode) btrfs_put_ordered_extent()
549 btrfs_add_delayed_iput(entry->inode); btrfs_put_ordered_extent()
550 while (!list_empty(&entry->list)) { btrfs_put_ordered_extent()
551 cur = entry->list.next; btrfs_put_ordered_extent()
556 kmem_cache_free(btrfs_ordered_extent_cache, entry); btrfs_put_ordered_extent()
565 struct btrfs_ordered_extent *entry) btrfs_remove_ordered_extent()
573 node = &entry->rb_node; btrfs_remove_ordered_extent()
577 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); btrfs_remove_ordered_extent()
581 list_del_init(&entry->root_extent_list); btrfs_remove_ordered_extent()
584 trace_btrfs_ordered_extent_remove(inode, entry); btrfs_remove_ordered_extent()
593 wake_up(&entry->wait); btrfs_remove_ordered_extent()
698 struct btrfs_ordered_extent *entry, btrfs_start_ordered_extent()
701 u64 start = entry->file_offset; btrfs_start_ordered_extent()
702 u64 end = start + entry->len - 1; btrfs_start_ordered_extent()
704 trace_btrfs_ordered_extent_start(inode, entry); btrfs_start_ordered_extent()
711 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) btrfs_start_ordered_extent()
714 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, btrfs_start_ordered_extent()
715 &entry->flags)); btrfs_start_ordered_extent()
788 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_ordered_extent() local
796 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_ordered_extent()
797 if (!offset_in_entry(entry, file_offset)) btrfs_lookup_ordered_extent()
798 entry = NULL; btrfs_lookup_ordered_extent()
799 if (entry) btrfs_lookup_ordered_extent()
800 atomic_inc(&entry->refs); btrfs_lookup_ordered_extent()
803 return entry; btrfs_lookup_ordered_extent()
815 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_ordered_range() local
827 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_ordered_range()
828 if (range_overlaps(entry, file_offset, len)) btrfs_lookup_ordered_range()
831 if (entry->file_offset >= file_offset + len) { btrfs_lookup_ordered_range()
832 entry = NULL; btrfs_lookup_ordered_range()
835 entry = NULL; btrfs_lookup_ordered_range()
841 if (entry) btrfs_lookup_ordered_range()
842 atomic_inc(&entry->refs); btrfs_lookup_ordered_range()
844 return entry; btrfs_lookup_ordered_range()
856 struct btrfs_ordered_extent *entry = NULL; btrfs_lookup_first_ordered_extent() local
864 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); btrfs_lookup_first_ordered_extent()
865 atomic_inc(&entry->refs); btrfs_lookup_first_ordered_extent()
868 return entry; btrfs_lookup_first_ordered_extent()
944 /* We treat this entry as if it doesnt exist */ btrfs_ordered_update_i_size()
280 btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum) btrfs_add_ordered_sum() argument
564 btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) btrfs_remove_ordered_extent() argument
697 btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait) btrfs_start_ordered_extent() argument
H A Dfree-space-cache.c523 struct btrfs_free_space_entry *entry; io_ctl_add_entry() local
528 entry = io_ctl->cur; io_ctl_add_entry()
529 entry->offset = cpu_to_le64(offset); io_ctl_add_entry()
530 entry->bytes = cpu_to_le64(bytes); io_ctl_add_entry()
531 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : io_ctl_add_entry()
591 struct btrfs_free_space *entry, u8 *type) io_ctl_read_entry()
603 entry->offset = le64_to_cpu(e->offset); io_ctl_read_entry()
604 entry->bytes = le64_to_cpu(e->bytes); io_ctl_read_entry()
618 struct btrfs_free_space *entry) io_ctl_read_bitmap()
626 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); io_ctl_read_bitmap()
1096 struct btrfs_free_space *entry = list_for_each_safe() local
1099 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); list_for_each_safe()
1102 list_del_init(&entry->list); list_for_each_safe()
1127 struct btrfs_free_space *entry = list_for_each_safe() local
1129 list_del_init(&entry->list); list_for_each_safe()
1454 * we could have a bitmap entry and an extent entry tree_insert_offset()
1456 * the extent entry to always be found first if we do a tree_insert_offset()
1460 * if we're inserting a bitmap and we find an entry at tree_insert_offset()
1461 * this offset, we want to go right, or after this entry tree_insert_offset()
1500 struct btrfs_free_space *entry, *prev = NULL; tree_search_offset() local
1502 /* find entry that is closest to the 'offset' */ tree_search_offset()
1505 entry = NULL; tree_search_offset()
1509 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1510 prev = entry; tree_search_offset()
1512 if (offset < entry->offset) tree_search_offset()
1514 else if (offset > entry->offset) tree_search_offset()
1521 if (!entry) tree_search_offset()
1523 if (entry->bitmap) tree_search_offset()
1524 return entry; tree_search_offset()
1527 * bitmap entry and extent entry may share same offset, tree_search_offset()
1528 * in that case, bitmap entry comes after extent entry. tree_search_offset()
1533 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1534 if (entry->offset != offset) tree_search_offset()
1537 WARN_ON(!entry->bitmap); tree_search_offset()
1538 return entry; tree_search_offset()
1539 } else if (entry) { tree_search_offset()
1540 if (entry->bitmap) { tree_search_offset()
1542 * if previous extent entry covers the offset, tree_search_offset()
1543 * we should return it instead of the bitmap entry tree_search_offset()
1545 n = rb_prev(&entry->offset_index); tree_search_offset()
1551 entry = prev; tree_search_offset()
1554 return entry; tree_search_offset()
1560 /* find last entry before the 'offset' */ tree_search_offset()
1561 entry = prev; tree_search_offset()
1562 if (entry->offset > offset) { tree_search_offset()
1563 n = rb_prev(&entry->offset_index); tree_search_offset()
1565 entry = rb_entry(n, struct btrfs_free_space, tree_search_offset()
1567 ASSERT(entry->offset <= offset); tree_search_offset()
1570 return entry; tree_search_offset()
1576 if (entry->bitmap) { tree_search_offset()
1577 n = rb_prev(&entry->offset_index); tree_search_offset()
1585 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) tree_search_offset()
1586 return entry; tree_search_offset()
1587 } else if (entry->offset + entry->bytes > offset) tree_search_offset()
1588 return entry; tree_search_offset()
1594 if (entry->bitmap) { tree_search_offset()
1595 if (entry->offset + BITS_PER_BITMAP * tree_search_offset()
1599 if (entry->offset + entry->bytes > offset) tree_search_offset()
1603 n = rb_next(&entry->offset_index); tree_search_offset()
1606 entry = rb_entry(n, struct btrfs_free_space, offset_index); tree_search_offset()
1608 return entry; tree_search_offset()
1680 * we want the extent entry threshold to always be at most 1/2 the max recalculate_thresholds()
1775 struct btrfs_free_space *entry; find_free_space() local
1784 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); find_free_space()
1785 if (!entry) find_free_space()
1788 for (node = &entry->offset_index; node; node = rb_next(node)) { find_free_space()
1789 entry = rb_entry(node, struct btrfs_free_space, offset_index); find_free_space()
1790 if (entry->bytes < *bytes) { find_free_space()
1791 if (entry->bytes > *max_extent_size) find_free_space()
1792 *max_extent_size = entry->bytes; find_free_space()
1800 tmp = entry->offset - ctl->start + align - 1; find_free_space()
1803 align_off = tmp - entry->offset; find_free_space()
1806 tmp = entry->offset; find_free_space()
1809 if (entry->bytes < *bytes + align_off) { find_free_space()
1810 if (entry->bytes > *max_extent_size) find_free_space()
1811 *max_extent_size = entry->bytes; find_free_space()
1815 if (entry->bitmap) { find_free_space()
1818 ret = search_bitmap(ctl, entry, &tmp, &size); find_free_space()
1822 return entry; find_free_space()
1830 *bytes = entry->bytes - align_off; find_free_space()
1831 return entry; find_free_space()
1899 * no entry after this bitmap, but we still have bytes to remove_from_bitmap()
1909 * if the next entry isn't a bitmap we need to return to let the remove_from_bitmap()
1983 * entry. use_bitmap()
2022 struct btrfs_free_space *entry; insert_into_bitmap() local
2034 entry = rb_entry(node, struct btrfs_free_space, offset_index); insert_into_bitmap()
2035 if (!entry->bitmap) { insert_into_bitmap()
2040 if (entry->offset == offset_to_bitmap(ctl, offset)) { insert_into_bitmap()
2041 bytes_added = add_bytes_to_bitmap(ctl, entry, insert_into_bitmap()
2247 * entry, try to see if there's adjacent free space in bitmap entries, and if
2250 * because we attempt to satisfy them based on a single cache entry, and never
2252 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2316 * attempt to steal space from bitmaps if we're adding an extent entry. __btrfs_add_free_space()
2395 /* Not enough bytes in this entry to satisfy us */ btrfs_remove_free_space()
2437 "entry offset %llu, bytes %llu, bitmap %s", btrfs_dump_free_space()
2480 struct btrfs_free_space *entry; __btrfs_return_cluster_to_free_space() local
2495 entry = rb_entry(node, struct btrfs_free_space, offset_index); __btrfs_return_cluster_to_free_space()
2496 node = rb_next(&entry->offset_index); __btrfs_return_cluster_to_free_space()
2497 rb_erase(&entry->offset_index, &cluster->root); __btrfs_return_cluster_to_free_space()
2498 RB_CLEAR_NODE(&entry->offset_index); __btrfs_return_cluster_to_free_space()
2500 bitmap = (entry->bitmap != NULL); __btrfs_return_cluster_to_free_space()
2502 try_merge_free_space(ctl, entry, false); __btrfs_return_cluster_to_free_space()
2503 steal_from_bitmap(ctl, entry, false); __btrfs_return_cluster_to_free_space()
2506 entry->offset, &entry->offset_index, bitmap); __btrfs_return_cluster_to_free_space()
2569 struct btrfs_free_space *entry = NULL; btrfs_find_space_for_alloc() local
2576 entry = find_free_space(ctl, &offset, &bytes_search, btrfs_find_space_for_alloc()
2578 if (!entry) btrfs_find_space_for_alloc()
2582 if (entry->bitmap) { btrfs_find_space_for_alloc()
2583 bitmap_clear_bits(ctl, entry, offset, bytes); btrfs_find_space_for_alloc()
2584 if (!entry->bytes) btrfs_find_space_for_alloc()
2585 free_bitmap(ctl, entry); btrfs_find_space_for_alloc()
2587 unlink_free_space(ctl, entry); btrfs_find_space_for_alloc()
2588 align_gap_len = offset - entry->offset; btrfs_find_space_for_alloc()
2589 align_gap = entry->offset; btrfs_find_space_for_alloc()
2591 entry->offset = offset + bytes; btrfs_find_space_for_alloc()
2592 WARN_ON(entry->bytes < bytes + align_gap_len); btrfs_find_space_for_alloc()
2594 entry->bytes -= bytes + align_gap_len; btrfs_find_space_for_alloc()
2595 if (!entry->bytes) btrfs_find_space_for_alloc()
2596 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_find_space_for_alloc()
2598 link_free_space(ctl, entry); btrfs_find_space_for_alloc()
2653 struct btrfs_free_space *entry, btrfs_alloc_from_bitmap()
2666 err = search_bitmap(ctl, entry, &search_start, &search_bytes); btrfs_alloc_from_bitmap()
2674 __bitmap_clear_bits(ctl, entry, ret, bytes); btrfs_alloc_from_bitmap()
2689 struct btrfs_free_space *entry = NULL; btrfs_alloc_from_cluster() local
2704 entry = rb_entry(node, struct btrfs_free_space, offset_index); btrfs_alloc_from_cluster()
2706 if (entry->bytes < bytes && entry->bytes > *max_extent_size) btrfs_alloc_from_cluster()
2707 *max_extent_size = entry->bytes; btrfs_alloc_from_cluster()
2709 if (entry->bytes < bytes || btrfs_alloc_from_cluster()
2710 (!entry->bitmap && entry->offset < min_start)) { btrfs_alloc_from_cluster()
2711 node = rb_next(&entry->offset_index); btrfs_alloc_from_cluster()
2714 entry = rb_entry(node, struct btrfs_free_space, btrfs_alloc_from_cluster()
2719 if (entry->bitmap) { btrfs_alloc_from_cluster()
2721 cluster, entry, bytes, btrfs_alloc_from_cluster()
2725 node = rb_next(&entry->offset_index); btrfs_alloc_from_cluster()
2728 entry = rb_entry(node, struct btrfs_free_space, btrfs_alloc_from_cluster()
2734 ret = entry->offset; btrfs_alloc_from_cluster()
2736 entry->offset += bytes; btrfs_alloc_from_cluster()
2737 entry->bytes -= bytes; btrfs_alloc_from_cluster()
2740 if (entry->bytes == 0) btrfs_alloc_from_cluster()
2741 rb_erase(&entry->offset_index, &cluster->root); btrfs_alloc_from_cluster()
2753 if (entry->bytes == 0) { btrfs_alloc_from_cluster()
2755 if (entry->bitmap) { btrfs_alloc_from_cluster()
2756 kfree(entry->bitmap); btrfs_alloc_from_cluster()
2760 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_alloc_from_cluster()
2769 struct btrfs_free_space *entry, btrfs_bitmap_cluster()
2784 i = offset_to_bit(entry->offset, ctl->unit, btrfs_bitmap_cluster()
2785 max_t(u64, offset, entry->offset)); btrfs_bitmap_cluster()
2791 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { btrfs_bitmap_cluster()
2792 next_zero = find_next_zero_bit(entry->bitmap, btrfs_bitmap_cluster()
2819 cluster->window_start = start * ctl->unit + entry->offset; btrfs_bitmap_cluster()
2820 rb_erase(&entry->offset_index, &ctl->free_space_offset); btrfs_bitmap_cluster()
2821 ret = tree_insert_offset(&cluster->root, entry->offset, btrfs_bitmap_cluster()
2822 &entry->offset_index, 1); btrfs_bitmap_cluster()
2843 struct btrfs_free_space *entry = NULL; setup_cluster_no_bitmap() local
2850 entry = tree_search_offset(ctl, offset, 0, 1); setup_cluster_no_bitmap()
2851 if (!entry) setup_cluster_no_bitmap()
2856 * extent entry. setup_cluster_no_bitmap()
2858 while (entry->bitmap || entry->bytes < min_bytes) { setup_cluster_no_bitmap()
2859 if (entry->bitmap && list_empty(&entry->list)) setup_cluster_no_bitmap()
2860 list_add_tail(&entry->list, bitmaps); setup_cluster_no_bitmap()
2861 node = rb_next(&entry->offset_index); setup_cluster_no_bitmap()
2864 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2867 window_free = entry->bytes; setup_cluster_no_bitmap()
2868 max_extent = entry->bytes; setup_cluster_no_bitmap()
2869 first = entry; setup_cluster_no_bitmap()
2870 last = entry; setup_cluster_no_bitmap()
2872 for (node = rb_next(&entry->offset_index); node; setup_cluster_no_bitmap()
2873 node = rb_next(&entry->offset_index)) { setup_cluster_no_bitmap()
2874 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2876 if (entry->bitmap) { setup_cluster_no_bitmap()
2877 if (list_empty(&entry->list)) setup_cluster_no_bitmap()
2878 list_add_tail(&entry->list, bitmaps); setup_cluster_no_bitmap()
2882 if (entry->bytes < min_bytes) setup_cluster_no_bitmap()
2885 last = entry; setup_cluster_no_bitmap()
2886 window_free += entry->bytes; setup_cluster_no_bitmap()
2887 if (entry->bytes > max_extent) setup_cluster_no_bitmap()
2888 max_extent = entry->bytes; setup_cluster_no_bitmap()
2905 entry = rb_entry(node, struct btrfs_free_space, offset_index); setup_cluster_no_bitmap()
2906 node = rb_next(&entry->offset_index); setup_cluster_no_bitmap()
2907 if (entry->bitmap || entry->bytes < min_bytes) setup_cluster_no_bitmap()
2910 rb_erase(&entry->offset_index, &ctl->free_space_offset); setup_cluster_no_bitmap()
2911 ret = tree_insert_offset(&cluster->root, entry->offset, setup_cluster_no_bitmap()
2912 &entry->offset_index, 0); setup_cluster_no_bitmap()
2913 total_size += entry->bytes; setup_cluster_no_bitmap()
2915 } while (node && entry != last); setup_cluster_no_bitmap()
2933 struct btrfs_free_space *entry; setup_cluster_bitmap() local
2944 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); setup_cluster_bitmap()
2945 if (entry->offset != bitmap_offset) { setup_cluster_bitmap()
2946 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); setup_cluster_bitmap()
2947 if (entry && list_empty(&entry->list)) setup_cluster_bitmap()
2948 list_add(&entry->list, bitmaps); setup_cluster_bitmap()
2951 list_for_each_entry(entry, bitmaps, list) { list_for_each_entry()
2952 if (entry->bytes < bytes) list_for_each_entry()
2954 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, list_for_each_entry()
2981 struct btrfs_free_space *entry, *tmp; btrfs_find_space_cluster() local
3034 list_for_each_entry_safe(entry, tmp, &bitmaps, list) btrfs_find_space_cluster()
3035 list_del_init(&entry->list); btrfs_find_space_cluster()
3115 struct btrfs_free_space *entry; trim_no_bitmap() local
3134 entry = tree_search_offset(ctl, start, 0, 1); trim_no_bitmap()
3135 if (!entry) { trim_no_bitmap()
3142 while (entry->bitmap) { trim_no_bitmap()
3143 node = rb_next(&entry->offset_index); trim_no_bitmap()
3149 entry = rb_entry(node, struct btrfs_free_space, trim_no_bitmap()
3153 if (entry->offset >= end) { trim_no_bitmap()
3159 extent_start = entry->offset; trim_no_bitmap()
3160 extent_bytes = entry->bytes; trim_no_bitmap()
3169 unlink_free_space(ctl, entry); trim_no_bitmap()
3170 kmem_cache_free(btrfs_free_space_cachep, entry); trim_no_bitmap()
3200 struct btrfs_free_space *entry; trim_bitmaps() local
3219 entry = tree_search_offset(ctl, offset, 1, 0); trim_bitmaps()
3220 if (!entry) { trim_bitmaps()
3228 ret2 = search_bitmap(ctl, entry, &start, &bytes); trim_bitmaps()
3243 bitmap_clear_bits(ctl, entry, start, bytes); trim_bitmaps()
3244 if (entry->bytes == 0) trim_bitmaps()
3245 free_bitmap(ctl, entry); trim_bitmaps()
3325 * We've left one free space entry and other tasks trimming btrfs_trim_block_group()
3326 * this block group have left 1 entry each one. Free them. btrfs_trim_block_group()
3346 struct btrfs_free_space *entry = NULL; btrfs_find_ino_for_alloc() local
3354 entry = rb_entry(rb_first(&ctl->free_space_offset), btrfs_find_ino_for_alloc()
3357 if (!entry->bitmap) { btrfs_find_ino_for_alloc()
3358 ino = entry->offset; btrfs_find_ino_for_alloc()
3360 unlink_free_space(ctl, entry); btrfs_find_ino_for_alloc()
3361 entry->offset++; btrfs_find_ino_for_alloc()
3362 entry->bytes--; btrfs_find_ino_for_alloc()
3363 if (!entry->bytes) btrfs_find_ino_for_alloc()
3364 kmem_cache_free(btrfs_free_space_cachep, entry); btrfs_find_ino_for_alloc()
3366 link_free_space(ctl, entry); btrfs_find_ino_for_alloc()
3372 ret = search_bitmap(ctl, entry, &offset, &count); btrfs_find_ino_for_alloc()
3377 bitmap_clear_bits(ctl, entry, offset, 1); btrfs_find_ino_for_alloc()
3378 if (entry->bytes == 0) btrfs_find_ino_for_alloc()
3379 free_bitmap(ctl, entry); btrfs_find_ino_for_alloc()
3503 * Use this if you need to make a bitmap or extent entry specifically, it
590 io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space *entry, u8 *type) io_ctl_read_entry() argument
617 io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space *entry) io_ctl_read_bitmap() argument
2651 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, struct btrfs_free_space *entry, u64 bytes, u64 min_start, u64 *max_extent_size) btrfs_alloc_from_bitmap() argument
2768 btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) btrfs_bitmap_cluster() argument
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_doorbell.c82 /* This is a one entry cache used to by the index allocation. */
99 struct dbell_entry *entry; vmci_dbell_get_priv_flags() local
107 entry = container_of(resource, struct dbell_entry, resource); vmci_dbell_get_priv_flags()
108 *priv_flags = entry->priv_flags; vmci_dbell_get_priv_flags()
124 * Find doorbell entry by bitmap index.
141 * Add the given entry to the index table. This willi take a reference to the
142 * entry's resource so that the entry is not deleted before it is removed from
145 static void dbell_index_table_add(struct dbell_entry *entry) dbell_index_table_add() argument
150 vmci_resource_get(&entry->resource); dbell_index_table_add()
194 entry->idx = new_notify_idx; dbell_index_table_add()
195 bucket = VMCI_DOORBELL_HASH(entry->idx); dbell_index_table_add()
196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); dbell_index_table_add()
202 * Remove the given entry from the index table. This will release() the
203 * entry's resource.
205 static void dbell_index_table_remove(struct dbell_entry *entry) dbell_index_table_remove() argument
209 hlist_del_init(&entry->node); dbell_index_table_remove()
212 if (entry->idx == max_notify_idx - 1) { dbell_index_table_remove()
214 * If we delete an entry with the maximum known dbell_index_table_remove()
225 last_notify_idx_released = entry->idx; dbell_index_table_remove()
229 vmci_resource_put(&entry->resource); dbell_index_table_remove()
290 struct dbell_entry *entry = container_of(work, dbell_delayed_dispatch() local
293 entry->notify_cb(entry->client_data); dbell_delayed_dispatch()
294 vmci_resource_put(&entry->resource); dbell_delayed_dispatch()
302 struct dbell_entry *entry; vmci_dbell_host_context_notify() local
319 entry = container_of(resource, struct dbell_entry, resource); vmci_dbell_host_context_notify()
320 if (entry->run_delayed) { vmci_dbell_host_context_notify()
321 schedule_work(&entry->work); vmci_dbell_host_context_notify()
323 entry->notify_cb(entry->client_data); vmci_dbell_host_context_notify()
417 struct dbell_entry *entry; vmci_doorbell_create() local
425 entry = kmalloc(sizeof(*entry), GFP_KERNEL); vmci_doorbell_create()
426 if (entry == NULL) { vmci_doorbell_create()
427 pr_warn("Failed allocating memory for datagram entry\n"); vmci_doorbell_create()
462 entry->idx = 0; vmci_doorbell_create()
463 INIT_HLIST_NODE(&entry->node); vmci_doorbell_create()
464 entry->priv_flags = priv_flags; vmci_doorbell_create()
465 INIT_WORK(&entry->work, dbell_delayed_dispatch); vmci_doorbell_create()
466 entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB; vmci_doorbell_create()
467 entry->notify_cb = notify_cb; vmci_doorbell_create()
468 entry->client_data = client_data; vmci_doorbell_create()
469 atomic_set(&entry->active, 0); vmci_doorbell_create()
471 result = vmci_resource_add(&entry->resource, vmci_doorbell_create()
480 new_handle = vmci_resource_handle(&entry->resource); vmci_doorbell_create()
482 dbell_index_table_add(entry); vmci_doorbell_create()
483 result = dbell_link(new_handle, entry->idx); vmci_doorbell_create()
487 atomic_set(&entry->active, 1); vmci_doorbell_create()
495 dbell_index_table_remove(entry); vmci_doorbell_create()
496 vmci_resource_remove(&entry->resource); vmci_doorbell_create()
498 kfree(entry); vmci_doorbell_create()
512 struct dbell_entry *entry; vmci_doorbell_destroy() local
526 entry = container_of(resource, struct dbell_entry, resource); vmci_doorbell_destroy()
531 dbell_index_table_remove(entry); vmci_doorbell_destroy()
558 vmci_resource_put(&entry->resource); vmci_doorbell_destroy()
559 vmci_resource_remove(&entry->resource); vmci_doorbell_destroy()
561 kfree(entry); vmci_doorbell_destroy()
H A Dvmci_queue_pair.c888 * Finds the entry in the list corresponding to a given handle. Assumes
894 struct qp_entry *entry; qp_list_find() local
899 list_for_each_entry(entry, &qp_list->head, list_item) { qp_list_find()
900 if (vmci_handle_is_equal(entry->handle, handle)) qp_list_find()
901 return entry; qp_list_find()
908 * Finds the entry in the list corresponding to a given handle.
913 struct qp_guest_endpoint *entry; qp_guest_handle_to_entry() local
916 entry = qp ? container_of( qp_guest_handle_to_entry()
918 return entry; qp_guest_handle_to_entry()
922 * Finds the entry in the list corresponding to a given handle.
927 struct qp_broker_entry *entry; qp_broker_handle_to_entry() local
930 entry = qp ? container_of( qp_broker_handle_to_entry()
932 return entry; qp_broker_handle_to_entry()
958 * Allocates a queue_pair rid (and handle) iff the given entry has
973 struct qp_guest_endpoint *entry; qp_guest_endpoint_create() local
984 entry = kzalloc(sizeof(*entry), GFP_KERNEL); qp_guest_endpoint_create()
985 if (entry) { qp_guest_endpoint_create()
986 entry->qp.peer = peer; qp_guest_endpoint_create()
987 entry->qp.flags = flags; qp_guest_endpoint_create()
988 entry->qp.produce_size = produce_size; qp_guest_endpoint_create()
989 entry->qp.consume_size = consume_size; qp_guest_endpoint_create()
990 entry->qp.ref_count = 0; qp_guest_endpoint_create()
991 entry->num_ppns = num_ppns; qp_guest_endpoint_create()
992 entry->produce_q = produce_q; qp_guest_endpoint_create()
993 entry->consume_q = consume_q; qp_guest_endpoint_create()
994 INIT_LIST_HEAD(&entry->qp.list_item); qp_guest_endpoint_create()
997 result = vmci_resource_add(&entry->resource, qp_guest_endpoint_create()
1000 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_guest_endpoint_create()
1002 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { qp_guest_endpoint_create()
1005 kfree(entry); qp_guest_endpoint_create()
1006 entry = NULL; qp_guest_endpoint_create()
1009 return entry; qp_guest_endpoint_create()
1015 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) qp_guest_endpoint_destroy() argument
1017 qp_free_ppn_set(&entry->ppn_set); qp_guest_endpoint_destroy()
1018 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); qp_guest_endpoint_destroy()
1019 qp_free_queue(entry->produce_q, entry->qp.produce_size); qp_guest_endpoint_destroy()
1020 qp_free_queue(entry->consume_q, entry->qp.consume_size); qp_guest_endpoint_destroy()
1022 vmci_resource_remove(&entry->resource); qp_guest_endpoint_destroy()
1024 kfree(entry); qp_guest_endpoint_destroy()
1031 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) qp_alloc_hypercall() argument
1037 if (!entry || entry->num_ppns <= 2) qp_alloc_hypercall()
1041 (size_t) entry->num_ppns * sizeof(u32); qp_alloc_hypercall()
1050 alloc_msg->handle = entry->qp.handle; qp_alloc_hypercall()
1051 alloc_msg->peer = entry->qp.peer; qp_alloc_hypercall()
1052 alloc_msg->flags = entry->qp.flags; qp_alloc_hypercall()
1053 alloc_msg->produce_size = entry->qp.produce_size; qp_alloc_hypercall()
1054 alloc_msg->consume_size = entry->qp.consume_size; qp_alloc_hypercall()
1055 alloc_msg->num_ppns = entry->num_ppns; qp_alloc_hypercall()
1058 &entry->ppn_set); qp_alloc_hypercall()
1085 * Adds the given entry to the list. Assumes that the list is locked.
1087 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) qp_list_add_entry() argument
1089 if (entry) qp_list_add_entry()
1090 list_add(&entry->list_item, &qp_list->head); qp_list_add_entry()
1094 * Removes the given entry from the list. Assumes that the list is locked.
1097 struct qp_entry *entry) qp_list_remove_entry()
1099 if (entry) qp_list_remove_entry()
1100 list_del(&entry->list_item); qp_list_remove_entry()
1110 struct qp_guest_endpoint *entry; qp_detatch_guest_work() local
1115 entry = qp_guest_handle_to_entry(handle); qp_detatch_guest_work()
1116 if (!entry) { qp_detatch_guest_work()
1121 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_detatch_guest_work()
1124 if (entry->qp.ref_count > 1) { qp_detatch_guest_work()
1129 * to release the entry if that happens, so qp_detatch_guest_work()
1140 * release the entry yet. It will get cleaned qp_detatch_guest_work()
1153 * we succeeded in all cases. Release the entry if required. qp_detatch_guest_work()
1156 entry->qp.ref_count--; qp_detatch_guest_work()
1157 if (entry->qp.ref_count == 0) qp_detatch_guest_work()
1158 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); qp_detatch_guest_work()
1160 /* If we didn't remove the entry, this could change once we unlock. */ qp_detatch_guest_work()
1161 if (entry) qp_detatch_guest_work()
1162 ref_count = entry->qp.ref_count; qp_detatch_guest_work()
1167 qp_guest_endpoint_destroy(entry); qp_detatch_guest_work()
1343 /* This path should only be used when an existing entry was found. */ qp_alloc_guest_work()
1377 struct qp_broker_entry *entry = NULL; qp_broker_create() local
1405 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); qp_broker_create()
1406 if (!entry) qp_broker_create()
1411 * The queue pair broker entry stores values from the guest qp_broker_create()
1425 entry->qp.handle = handle; qp_broker_create()
1426 entry->qp.peer = peer; qp_broker_create()
1427 entry->qp.flags = flags; qp_broker_create()
1428 entry->qp.produce_size = guest_produce_size; qp_broker_create()
1429 entry->qp.consume_size = guest_consume_size; qp_broker_create()
1430 entry->qp.ref_count = 1; qp_broker_create()
1431 entry->create_id = context_id; qp_broker_create()
1432 entry->attach_id = VMCI_INVALID_ID; qp_broker_create()
1433 entry->state = VMCIQPB_NEW; qp_broker_create()
1434 entry->require_trusted_attach = qp_broker_create()
1436 entry->created_by_trusted = qp_broker_create()
1438 entry->vmci_page_files = false; qp_broker_create()
1439 entry->wakeup_cb = wakeup_cb; qp_broker_create()
1440 entry->client_data = client_data; qp_broker_create()
1441 entry->produce_q = qp_host_alloc_queue(guest_produce_size); qp_broker_create()
1442 if (entry->produce_q == NULL) { qp_broker_create()
1446 entry->consume_q = qp_host_alloc_queue(guest_consume_size); qp_broker_create()
1447 if (entry->consume_q == NULL) { qp_broker_create()
1452 qp_init_queue_mutex(entry->produce_q, entry->consume_q); qp_broker_create()
1454 INIT_LIST_HEAD(&entry->qp.list_item); qp_broker_create()
1459 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), qp_broker_create()
1461 if (entry->local_mem == NULL) { qp_broker_create()
1465 entry->state = VMCIQPB_CREATED_MEM; qp_broker_create()
1466 entry->produce_q->q_header = entry->local_mem; qp_broker_create()
1467 tmp = (u8 *)entry->local_mem + PAGE_SIZE * qp_broker_create()
1468 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); qp_broker_create()
1469 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; qp_broker_create()
1476 entry->produce_q, qp_broker_create()
1477 entry->consume_q); qp_broker_create()
1481 entry->state = VMCIQPB_CREATED_MEM; qp_broker_create()
1490 entry->state = VMCIQPB_CREATED_NO_MEM; qp_broker_create()
1493 qp_list_add_entry(&qp_broker_list, &entry->qp); qp_broker_create()
1495 *ent = entry; qp_broker_create()
1498 result = vmci_resource_add(&entry->resource, qp_broker_create()
1507 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_broker_create()
1509 vmci_q_header_init(entry->produce_q->q_header, qp_broker_create()
1510 entry->qp.handle); qp_broker_create()
1511 vmci_q_header_init(entry->consume_q->q_header, qp_broker_create()
1512 entry->qp.handle); qp_broker_create()
1515 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_create()
1520 if (entry != NULL) { qp_broker_create()
1521 qp_host_free_queue(entry->produce_q, guest_produce_size); qp_broker_create()
1522 qp_host_free_queue(entry->consume_q, guest_consume_size); qp_broker_create()
1523 kfree(entry); qp_broker_create()
1594 static int qp_broker_attach(struct qp_broker_entry *entry, qp_broker_attach() argument
1610 if (entry->state != VMCIQPB_CREATED_NO_MEM && qp_broker_attach()
1611 entry->state != VMCIQPB_CREATED_MEM) qp_broker_attach()
1615 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || qp_broker_attach()
1616 context_id != entry->create_id) { qp_broker_attach()
1619 } else if (context_id == entry->create_id || qp_broker_attach()
1620 context_id == entry->attach_id) { qp_broker_attach()
1625 VMCI_CONTEXT_IS_VM(entry->create_id)) qp_broker_attach()
1633 !entry->created_by_trusted) qp_broker_attach()
1640 if (entry->require_trusted_attach && qp_broker_attach()
1648 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) qp_broker_attach()
1651 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { qp_broker_attach()
1669 create_context = vmci_ctx_get(entry->create_id); qp_broker_attach()
1677 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) qp_broker_attach()
1682 * The queue pair broker entry stores values from the guest qp_broker_attach()
1684 * stored in the entry. qp_broker_attach()
1687 if (entry->qp.produce_size != produce_size || qp_broker_attach()
1688 entry->qp.consume_size != consume_size) { qp_broker_attach()
1691 } else if (entry->qp.produce_size != consume_size || qp_broker_attach()
1692 entry->qp.consume_size != produce_size) { qp_broker_attach()
1710 if (entry->state != VMCIQPB_CREATED_NO_MEM) qp_broker_attach()
1722 entry->produce_q, qp_broker_attach()
1723 entry->consume_q); qp_broker_attach()
1727 entry->state = VMCIQPB_ATTACHED_MEM; qp_broker_attach()
1729 entry->state = VMCIQPB_ATTACHED_NO_MEM; qp_broker_attach()
1731 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { qp_broker_attach()
1742 entry->state = VMCIQPB_ATTACHED_MEM; qp_broker_attach()
1745 if (entry->state == VMCIQPB_ATTACHED_MEM) { qp_broker_attach()
1747 qp_notify_peer(true, entry->qp.handle, context_id, qp_broker_attach()
1748 entry->create_id); qp_broker_attach()
1751 entry->create_id, entry->qp.handle.context, qp_broker_attach()
1752 entry->qp.handle.resource); qp_broker_attach()
1755 entry->attach_id = context_id; qp_broker_attach()
1756 entry->qp.ref_count++; qp_broker_attach()
1758 entry->wakeup_cb = wakeup_cb; qp_broker_attach()
1759 entry->client_data = client_data; qp_broker_attach()
1764 * an entry tracking the queue pair, so don't add another one. qp_broker_attach()
1767 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_attach()
1770 *ent = entry; qp_broker_attach()
1794 struct qp_broker_entry *entry = NULL; qp_broker_alloc() local
1824 entry = qp_broker_handle_to_entry(handle); qp_broker_alloc()
1826 if (!entry) { qp_broker_alloc()
1835 qp_broker_attach(entry, peer, flags, priv_flags, qp_broker_alloc()
1866 struct qp_broker_entry *entry; qp_alloc_host_work() local
1877 entry = NULL; qp_alloc_host_work()
1881 wakeup_cb, client_data, &entry, &swap); qp_alloc_host_work()
1890 *produce_q = entry->consume_q; qp_alloc_host_work()
1891 *consume_q = entry->produce_q; qp_alloc_host_work()
1893 *produce_q = entry->produce_q; qp_alloc_host_work()
1894 *consume_q = entry->consume_q; qp_alloc_host_work()
1897 *handle = vmci_resource_handle(&entry->resource); qp_alloc_host_work()
1974 * Returns the entry from the head of the list. Assumes that the list is
1980 struct qp_entry *entry = qp_list_get_head() local
1983 return entry; qp_list_get_head()
1991 struct qp_entry *entry; vmci_qp_broker_exit() local
1996 while ((entry = qp_list_get_head(&qp_broker_list))) { vmci_qp_broker_exit()
1997 be = (struct qp_broker_entry *)entry; vmci_qp_broker_exit()
1999 qp_list_remove_entry(&qp_broker_list, entry); vmci_qp_broker_exit()
2008 * pair broker. Allocates a queue pair entry if one does not
2048 struct qp_broker_entry *entry; vmci_qp_broker_set_page_store() local
2073 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_set_page_store()
2074 if (!entry) { vmci_qp_broker_set_page_store()
2085 if (entry->create_id != context_id && vmci_qp_broker_set_page_store()
2086 (entry->create_id != VMCI_HOST_CONTEXT_ID || vmci_qp_broker_set_page_store()
2087 entry->attach_id != context_id)) { vmci_qp_broker_set_page_store()
2092 if (entry->state != VMCIQPB_CREATED_NO_MEM && vmci_qp_broker_set_page_store()
2093 entry->state != VMCIQPB_ATTACHED_NO_MEM) { vmci_qp_broker_set_page_store()
2099 entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2103 result = qp_host_map_queues(entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2105 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_set_page_store()
2106 entry->consume_q); vmci_qp_broker_set_page_store()
2110 if (entry->state == VMCIQPB_CREATED_NO_MEM) vmci_qp_broker_set_page_store()
2111 entry->state = VMCIQPB_CREATED_MEM; vmci_qp_broker_set_page_store()
2113 entry->state = VMCIQPB_ATTACHED_MEM; vmci_qp_broker_set_page_store()
2115 entry->vmci_page_files = true; vmci_qp_broker_set_page_store()
2117 if (entry->state == VMCIQPB_ATTACHED_MEM) { vmci_qp_broker_set_page_store()
2119 qp_notify_peer(true, handle, context_id, entry->create_id); vmci_qp_broker_set_page_store()
2122 entry->create_id, entry->qp.handle.context, vmci_qp_broker_set_page_store()
2123 entry->qp.handle.resource); vmci_qp_broker_set_page_store()
2135 * entry. Should be used when guest memory becomes available
2138 static void qp_reset_saved_headers(struct qp_broker_entry *entry) qp_reset_saved_headers() argument
2140 entry->produce_q->saved_header = NULL; qp_reset_saved_headers()
2141 entry->consume_q->saved_header = NULL; qp_reset_saved_headers()
2145 * The main entry point for detaching from a queue pair registered with the
2164 struct qp_broker_entry *entry; vmci_qp_broker_detach() local
2184 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_detach()
2185 if (!entry) { vmci_qp_broker_detach()
2192 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_detach()
2197 if (context_id == entry->create_id) { vmci_qp_broker_detach()
2198 peer_id = entry->attach_id; vmci_qp_broker_detach()
2199 entry->create_id = VMCI_INVALID_ID; vmci_qp_broker_detach()
2201 peer_id = entry->create_id; vmci_qp_broker_detach()
2202 entry->attach_id = VMCI_INVALID_ID; vmci_qp_broker_detach()
2204 entry->qp.ref_count--; vmci_qp_broker_detach()
2206 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_detach()
2219 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_detach()
2220 headers_mapped = entry->produce_q->q_header || vmci_qp_broker_detach()
2221 entry->consume_q->q_header; vmci_qp_broker_detach()
2222 if (QPBROKERSTATE_HAS_MEM(entry)) { vmci_qp_broker_detach()
2225 entry->produce_q, vmci_qp_broker_detach()
2226 entry->consume_q); vmci_qp_broker_detach()
2232 if (entry->vmci_page_files) vmci_qp_broker_detach()
2233 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_detach()
2234 entry-> vmci_qp_broker_detach()
2237 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_detach()
2238 entry-> vmci_qp_broker_detach()
2244 qp_reset_saved_headers(entry); vmci_qp_broker_detach()
2246 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_detach()
2248 if (!headers_mapped && entry->wakeup_cb) vmci_qp_broker_detach()
2249 entry->wakeup_cb(entry->client_data); vmci_qp_broker_detach()
2252 if (entry->wakeup_cb) { vmci_qp_broker_detach()
2253 entry->wakeup_cb = NULL; vmci_qp_broker_detach()
2254 entry->client_data = NULL; vmci_qp_broker_detach()
2258 if (entry->qp.ref_count == 0) { vmci_qp_broker_detach()
2259 qp_list_remove_entry(&qp_broker_list, &entry->qp); vmci_qp_broker_detach()
2262 kfree(entry->local_mem); vmci_qp_broker_detach()
2264 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); vmci_qp_broker_detach()
2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); vmci_qp_broker_detach()
2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); vmci_qp_broker_detach()
2268 vmci_resource_remove(&entry->resource); vmci_qp_broker_detach()
2270 kfree(entry); vmci_qp_broker_detach()
2276 QPBROKERSTATE_HAS_MEM(entry)) { vmci_qp_broker_detach()
2277 entry->state = VMCIQPB_SHUTDOWN_MEM; vmci_qp_broker_detach()
2279 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; vmci_qp_broker_detach()
2302 struct qp_broker_entry *entry; vmci_qp_broker_map() local
2320 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_map()
2321 if (!entry) { vmci_qp_broker_map()
2328 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_map()
2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_map()
2340 page_store.len = QPE_NUM_PAGES(entry->qp); vmci_qp_broker_map()
2342 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_map()
2343 qp_reset_saved_headers(entry); vmci_qp_broker_map()
2346 entry->produce_q, vmci_qp_broker_map()
2347 entry->consume_q); vmci_qp_broker_map()
2348 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_map()
2352 entry->state++; vmci_qp_broker_map()
2354 if (entry->wakeup_cb) vmci_qp_broker_map()
2355 entry->wakeup_cb(entry->client_data); vmci_qp_broker_map()
2366 * entry. Should be used when guest memory is unmapped.
2371 static int qp_save_headers(struct qp_broker_entry *entry) qp_save_headers() argument
2375 if (entry->produce_q->saved_header != NULL && qp_save_headers()
2376 entry->consume_q->saved_header != NULL) { qp_save_headers()
2386 if (NULL == entry->produce_q->q_header || qp_save_headers()
2387 NULL == entry->consume_q->q_header) { qp_save_headers()
2388 result = qp_host_map_queues(entry->produce_q, entry->consume_q); qp_save_headers()
2393 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, qp_save_headers()
2394 sizeof(entry->saved_produce_q)); qp_save_headers()
2395 entry->produce_q->saved_header = &entry->saved_produce_q; qp_save_headers()
2396 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, qp_save_headers()
2397 sizeof(entry->saved_consume_q)); qp_save_headers()
2398 entry->consume_q->saved_header = &entry->saved_consume_q; qp_save_headers()
2413 struct qp_broker_entry *entry; vmci_qp_broker_unmap() local
2431 entry = qp_broker_handle_to_entry(handle); vmci_qp_broker_unmap()
2432 if (!entry) { vmci_qp_broker_unmap()
2439 if (context_id != entry->create_id && context_id != entry->attach_id) { vmci_qp_broker_unmap()
2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_unmap()
2447 qp_acquire_queue_mutex(entry->produce_q); vmci_qp_broker_unmap()
2448 result = qp_save_headers(entry); vmci_qp_broker_unmap()
2453 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); vmci_qp_broker_unmap()
2462 qp_host_unregister_user_memory(entry->produce_q, vmci_qp_broker_unmap()
2463 entry->consume_q); vmci_qp_broker_unmap()
2468 entry->state--; vmci_qp_broker_unmap()
2470 qp_release_queue_mutex(entry->produce_q); vmci_qp_broker_unmap()
2488 struct qp_entry *entry; vmci_qp_guest_endpoints_exit() local
2493 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { vmci_qp_guest_endpoints_exit()
2494 ep = (struct qp_guest_endpoint *)entry; vmci_qp_guest_endpoints_exit()
2497 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) vmci_qp_guest_endpoints_exit()
2498 qp_detatch_hypercall(entry->handle); vmci_qp_guest_endpoints_exit()
2501 entry->ref_count = 0; vmci_qp_guest_endpoints_exit()
2502 qp_list_remove_entry(&qp_guest_endpoints, entry); vmci_qp_guest_endpoints_exit()
2875 * if it does so, it will cleanup the entry (if there is one). vmci_qpair_detach()
2876 * The host can fail too, but it won't cleanup the entry vmci_qpair_detach()
1096 qp_list_remove_entry(struct qp_list *qp_list, struct qp_entry *entry) qp_list_remove_entry() argument
/linux-4.1.27/arch/x86/xen/
H A Dsmp.h12 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
14 static inline void xen_pvh_early_cpu_init(int cpu, bool entry) xen_pvh_early_cpu_init() argument
/linux-4.1.27/arch/mips/kernel/
H A Dperf_event.c28 static void save_raw_perf_callchain(struct perf_callchain_entry *entry, save_raw_perf_callchain() argument
37 perf_callchain_store(entry, addr); save_raw_perf_callchain()
38 if (entry->nr >= PERF_MAX_STACK_DEPTH) save_raw_perf_callchain()
44 void perf_callchain_kernel(struct perf_callchain_entry *entry, perf_callchain_kernel() argument
57 save_raw_perf_callchain(entry, sp); perf_callchain_kernel()
61 perf_callchain_store(entry, pc); perf_callchain_kernel()
62 if (entry->nr >= PERF_MAX_STACK_DEPTH) perf_callchain_kernel()
67 save_raw_perf_callchain(entry, sp); perf_callchain_kernel()
/linux-4.1.27/arch/cris/include/arch-v32/arch/
H A Dtlb.h5 * The TLB is a 64-entry cache. Each entry has a 8-bit page_id that is used
/linux-4.1.27/sound/core/seq/
H A Dseq_info.c40 struct snd_info_entry *entry; create_info_entry() local
42 entry = snd_info_create_module_entry(THIS_MODULE, name, snd_seq_root); create_info_entry()
43 if (entry == NULL) create_info_entry()
45 entry->content = SNDRV_INFO_CONTENT_TEXT; create_info_entry()
46 entry->c.text.read = read; create_info_entry()
47 if (snd_info_register(entry) < 0) { create_info_entry()
48 snd_info_free_entry(entry); create_info_entry()
51 return entry; create_info_entry()
/linux-4.1.27/sound/firewire/oxfw/
H A Doxfw-proc.c11 static void proc_read_formation(struct snd_info_entry *entry, proc_read_formation() argument
14 struct snd_oxfw *oxfw = entry->private_data; proc_read_formation()
83 struct snd_info_entry *entry; add_node() local
85 entry = snd_info_create_card_entry(oxfw->card, name, root); add_node()
86 if (entry == NULL) add_node()
89 snd_info_set_text_ops(entry, oxfw, op); add_node()
90 if (snd_info_register(entry) < 0) add_node()
91 snd_info_free_entry(entry); add_node()
/linux-4.1.27/drivers/pci/
H A Dmsi.c124 struct msi_desc *entry; arch_setup_msi_irqs() local
134 list_for_each_entry(entry, &dev->msi_list, list) { arch_setup_msi_irqs()
135 ret = arch_setup_msi_irq(dev, entry); arch_setup_msi_irqs()
152 struct msi_desc *entry; default_teardown_msi_irqs() local
154 list_for_each_entry(entry, &dev->msi_list, list) default_teardown_msi_irqs()
155 if (entry->irq) default_teardown_msi_irqs()
156 for (i = 0; i < entry->nvec_used; i++) default_teardown_msi_irqs()
157 arch_teardown_msi_irq(entry->irq + i); default_teardown_msi_irqs()
167 struct msi_desc *entry; default_restore_msi_irq() local
169 entry = NULL; default_restore_msi_irq()
171 list_for_each_entry(entry, &dev->msi_list, list) { default_restore_msi_irq()
172 if (irq == entry->irq) default_restore_msi_irq()
176 entry = irq_get_msi_desc(irq); default_restore_msi_irq()
179 if (entry) default_restore_msi_irq()
180 __pci_write_msi_msg(entry, &entry->msg); default_restore_msi_irq()
304 struct msi_desc *entry; default_restore_msi_irqs() local
306 list_for_each_entry(entry, &dev->msi_list, list) default_restore_msi_irqs()
307 default_restore_msi_irq(dev, entry->irq); default_restore_msi_irqs()
310 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) __pci_read_msi_msg() argument
312 BUG_ON(entry->dev->current_state != PCI_D0); __pci_read_msi_msg()
314 if (entry->msi_attrib.is_msix) { __pci_read_msi_msg()
315 void __iomem *base = entry->mask_base + __pci_read_msi_msg()
316 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; __pci_read_msi_msg()
322 struct pci_dev *dev = entry->dev; __pci_read_msi_msg()
328 if (entry->msi_attrib.is_64) { __pci_read_msi_msg()
340 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) __pci_write_msi_msg() argument
342 if (entry->dev->current_state != PCI_D0) { __pci_write_msi_msg()
344 } else if (entry->msi_attrib.is_msix) { __pci_write_msi_msg()
346 base = entry->mask_base + __pci_write_msi_msg()
347 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; __pci_write_msi_msg()
353 struct pci_dev *dev = entry->dev; __pci_write_msi_msg()
359 msgctl |= entry->msi_attrib.multiple << 4; __pci_write_msi_msg()
364 if (entry->msi_attrib.is_64) { __pci_write_msi_msg()
374 entry->msg = *msg; __pci_write_msi_msg()
379 struct msi_desc *entry = irq_get_msi_desc(irq); pci_write_msi_msg() local
381 __pci_write_msi_msg(entry, msg); pci_write_msi_msg()
387 struct msi_desc *entry, *tmp; free_msi_irqs() local
392 list_for_each_entry(entry, &dev->msi_list, list) free_msi_irqs()
393 if (entry->irq) free_msi_irqs()
394 for (i = 0; i < entry->nvec_used; i++) free_msi_irqs()
395 BUG_ON(irq_has_action(entry->irq + i)); free_msi_irqs()
399 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { free_msi_irqs()
400 if (entry->msi_attrib.is_msix) { free_msi_irqs()
401 if (list_is_last(&entry->list, &dev->msi_list)) free_msi_irqs()
402 iounmap(entry->mask_base); free_msi_irqs()
405 list_del(&entry->list); free_msi_irqs()
406 kfree(entry); free_msi_irqs()
447 struct msi_desc *entry; __pci_restore_msi_state() local
452 entry = irq_get_msi_desc(dev->irq); __pci_restore_msi_state()
459 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), __pci_restore_msi_state()
460 entry->masked); __pci_restore_msi_state()
462 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; __pci_restore_msi_state()
468 struct msi_desc *entry; __pci_restore_msix_state() local
480 list_for_each_entry(entry, &dev->msi_list, list) __pci_restore_msix_state()
481 msix_mask_irq(entry, entry->masked); __pci_restore_msix_state()
496 struct msi_desc *entry; msi_mode_show() local
504 entry = irq_get_msi_desc(irq); msi_mode_show()
505 if (entry) msi_mode_show()
507 entry->msi_attrib.is_msix ? "msix" : "msi"); msi_mode_show()
519 struct msi_desc *entry; populate_msi_sysfs() local
525 list_for_each_entry(entry, &pdev->msi_list, list) populate_msi_sysfs()
534 list_for_each_entry(entry, &pdev->msi_list, list) { populate_msi_sysfs()
542 entry->irq); populate_msi_sysfs()
589 struct msi_desc *entry; msi_setup_entry() local
592 entry = alloc_msi_entry(dev); msi_setup_entry()
593 if (!entry) msi_setup_entry()
598 entry->msi_attrib.is_msix = 0; msi_setup_entry()
599 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); msi_setup_entry()
600 entry->msi_attrib.entry_nr = 0; msi_setup_entry()
601 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); msi_setup_entry()
602 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ msi_setup_entry()
603 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; msi_setup_entry()
604 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); msi_setup_entry()
605 entry->nvec_used = nvec; msi_setup_entry()
608 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; msi_setup_entry()
610 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; msi_setup_entry()
613 if (entry->msi_attrib.maskbit) msi_setup_entry()
614 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); msi_setup_entry()
616 return entry; msi_setup_entry()
621 struct msi_desc *entry; msi_verify_entries() local
623 list_for_each_entry(entry, &dev->msi_list, list) { msi_verify_entries()
624 if (!dev->no_64bit_msi || !entry->msg.address_hi) msi_verify_entries()
640 * setup of an entry with the new MSI irq. A negative return value indicates
646 struct msi_desc *entry; msi_capability_init() local
652 entry = msi_setup_entry(dev, nvec); msi_capability_init()
653 if (!entry) msi_capability_init()
657 mask = msi_mask(entry->msi_attrib.multi_cap); msi_capability_init()
658 msi_mask_irq(entry, mask, mask); msi_capability_init()
660 list_add_tail(&entry->list, &dev->msi_list); msi_capability_init()
665 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
672 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
679 msi_mask_irq(entry, mask, ~mask); msi_capability_init()
689 dev->irq = entry->irq; msi_capability_init()
716 struct msi_desc *entry; msix_setup_entries() local
720 entry = alloc_msi_entry(dev); msix_setup_entries()
721 if (!entry) { msix_setup_entries()
730 entry->msi_attrib.is_msix = 1; msix_setup_entries()
731 entry->msi_attrib.is_64 = 1; msix_setup_entries()
732 entry->msi_attrib.entry_nr = entries[i].entry; msix_setup_entries()
733 entry->msi_attrib.default_irq = dev->irq; msix_setup_entries()
734 entry->mask_base = base; msix_setup_entries()
735 entry->nvec_used = 1; msix_setup_entries()
737 list_add_tail(&entry->list, &dev->msi_list); msix_setup_entries()
746 struct msi_desc *entry; msix_program_entries() local
749 list_for_each_entry(entry, &dev->msi_list, list) { msix_program_entries()
750 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + msix_program_entries()
753 entries[i].vector = entry->irq; msix_program_entries()
754 entry->masked = readl(entry->mask_base + offset); msix_program_entries()
755 msix_mask_irq(entry, 1); msix_program_entries()
827 struct msi_desc *entry; msix_capability_init() local
830 list_for_each_entry(entry, &dev->msi_list, list) { msix_capability_init()
831 if (entry->irq != 0) msix_capability_init()
999 if (entries[i].entry >= nr_entries) pci_enable_msix()
1000 return -EINVAL; /* invalid entry */ pci_enable_msix()
1002 if (entries[i].entry == entries[j].entry) pci_enable_msix()
1003 return -EINVAL; /* duplicate entry */ pci_enable_msix()
1019 struct msi_desc *entry; pci_msix_shutdown() local
1025 list_for_each_entry(entry, &dev->msi_list, list) { pci_msix_shutdown()
1027 __pci_msix_desc_mask_irq(entry, 1); pci_msix_shutdown()
/linux-4.1.27/arch/arm/xen/
H A Dp2m.c35 struct xen_p2m_entry *entry; xen_add_phys_to_mach_entry() local
40 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); xen_add_phys_to_mach_entry()
42 if (new->pfn == entry->pfn) xen_add_phys_to_mach_entry()
45 if (new->pfn < entry->pfn) xen_add_phys_to_mach_entry()
57 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); xen_add_phys_to_mach_entry()
65 struct xen_p2m_entry *entry; __pfn_to_mfn() local
70 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); __pfn_to_mfn()
71 if (entry->pfn <= pfn && __pfn_to_mfn()
72 entry->pfn + entry->nr_pages > pfn) { __pfn_to_mfn()
74 return entry->mfn + (pfn - entry->pfn); __pfn_to_mfn()
76 if (pfn < entry->pfn) __pfn_to_mfn()
/linux-4.1.27/include/scsi/
H A Dfc_encode.h222 struct fc_fdmi_attr_entry *entry; fc_ct_ms_fill() local
257 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; fc_ct_ms_fill()
262 &entry->type); fc_ct_ms_fill()
263 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
265 (__be64 *)&entry->value[0]); fc_ct_ms_fill()
268 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
273 &entry->type); fc_ct_ms_fill()
274 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
275 strncpy((char *)&entry->value, fc_ct_ms_fill()
280 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
285 &entry->type); fc_ct_ms_fill()
286 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
287 strncpy((char *)&entry->value, fc_ct_ms_fill()
292 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
297 &entry->type); fc_ct_ms_fill()
298 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
299 strncpy((char *)&entry->value, fc_ct_ms_fill()
304 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
309 &entry->type); fc_ct_ms_fill()
310 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
311 strncpy((char *)&entry->value, fc_ct_ms_fill()
316 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
321 &entry->type); fc_ct_ms_fill()
322 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
323 strncpy((char *)&entry->value, fc_ct_ms_fill()
328 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
333 &entry->type); fc_ct_ms_fill()
334 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
335 strncpy((char *)&entry->value, fc_ct_ms_fill()
340 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
345 &entry->type); fc_ct_ms_fill()
346 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
347 strncpy((char *)&entry->value, fc_ct_ms_fill()
352 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
357 &entry->type); fc_ct_ms_fill()
358 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
359 strncpy((char *)&entry->value, fc_ct_ms_fill()
364 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
369 &entry->type); fc_ct_ms_fill()
370 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
371 snprintf((char *)&entry->value, fc_ct_ms_fill()
400 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; fc_ct_ms_fill()
406 &entry->type); fc_ct_ms_fill()
407 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
408 memcpy(&entry->value, fc_host_supported_fc4s(lport->host), fc_ct_ms_fill()
412 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
417 &entry->type); fc_ct_ms_fill()
418 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
421 &entry->value); fc_ct_ms_fill()
424 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
429 &entry->type); fc_ct_ms_fill()
430 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
432 &entry->value); fc_ct_ms_fill()
435 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
440 &entry->type); fc_ct_ms_fill()
441 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
443 &entry->value); fc_ct_ms_fill()
446 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
451 &entry->type); fc_ct_ms_fill()
452 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
454 strncpy((char *)&entry->value, fc_ct_ms_fill()
460 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + fc_ct_ms_fill()
465 &entry->type); fc_ct_ms_fill()
466 put_unaligned_be16(len, &entry->len); fc_ct_ms_fill()
468 strncpy((char *)&entry->value, fc_ct_ms_fill()
473 strncpy((char *)&entry->value, fc_ct_ms_fill()
/linux-4.1.27/net/mpls/
H A Dinternal.h43 unsigned entry = be32_to_cpu(hdr->label_stack_entry); mpls_entry_decode() local
45 result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; mpls_entry_decode()
46 result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; mpls_entry_decode()
47 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; mpls_entry_decode()
48 result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; mpls_entry_decode()
/linux-4.1.27/tools/usb/usbip/libsrc/
H A Dlist.h15 * using the generic single-entry routines.
34 * Insert a new entry between two known consecutive entries.
50 * list_add - add a new entry
51 * @new: new entry to be added
54 * Insert a new entry after the specified head.
63 * Delete a list entry by making the prev/next entries
80 * list_del - deletes entry from list.
81 * @entry: the element to delete from the list.
82 * Note: list_empty() on entry does not return true after this, the entry is
85 static inline void __list_del_entry(struct list_head *entry) __list_del_entry() argument
87 __list_del(entry->prev, entry->next); __list_del_entry()
90 static inline void list_del(struct list_head *entry) list_del() argument
92 __list_del(entry->prev, entry->next); list_del()
93 entry->next = LIST_POISON1; list_del()
94 entry->prev = LIST_POISON2; list_del()
98 * list_entry - get the struct for this entry
114 * list_for_each_safe - iterate over a list safe against removal of list entry
/linux-4.1.27/scripts/gdb/linux/
H A Dmodules.py26 entry = modules['next']
29 while entry != end_of_list:
30 yield utils.container_of(entry, module_ptr_type, "list")
31 entry = entry['next']
84 entry = source_list['next']
86 while entry != source_list.address:
87 use = utils.container_of(entry, t, "source_list")
92 entry = entry['next']
/linux-4.1.27/sound/pci/ca0106/
H A Dca0106_proc.c274 static void snd_ca0106_proc_iec958(struct snd_info_entry *entry, snd_ca0106_proc_iec958() argument
277 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_iec958()
296 static void snd_ca0106_proc_reg_write32(struct snd_info_entry *entry, snd_ca0106_proc_reg_write32() argument
299 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_write32()
314 static void snd_ca0106_proc_reg_read32(struct snd_info_entry *entry, snd_ca0106_proc_reg_read32() argument
317 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read32()
330 static void snd_ca0106_proc_reg_read16(struct snd_info_entry *entry, snd_ca0106_proc_reg_read16() argument
333 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read16()
346 static void snd_ca0106_proc_reg_read8(struct snd_info_entry *entry, snd_ca0106_proc_reg_read8() argument
349 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read8()
362 static void snd_ca0106_proc_reg_read1(struct snd_info_entry *entry, snd_ca0106_proc_reg_read1() argument
365 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read1()
380 static void snd_ca0106_proc_reg_read2(struct snd_info_entry *entry, snd_ca0106_proc_reg_read2() argument
383 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_read2()
398 static void snd_ca0106_proc_reg_write(struct snd_info_entry *entry, snd_ca0106_proc_reg_write() argument
401 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_reg_write()
412 static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry, snd_ca0106_proc_i2c_write() argument
415 struct snd_ca0106 *emu = entry->private_data; snd_ca0106_proc_i2c_write()
429 struct snd_info_entry *entry; snd_ca0106_proc_init() local
431 if(! snd_card_proc_new(emu->card, "iec958", &entry)) snd_ca0106_proc_init()
432 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_iec958); snd_ca0106_proc_init()
433 if(! snd_card_proc_new(emu->card, "ca0106_reg32", &entry)) { snd_ca0106_proc_init()
434 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read32); snd_ca0106_proc_init()
435 entry->c.text.write = snd_ca0106_proc_reg_write32; snd_ca0106_proc_init()
436 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
438 if(! snd_card_proc_new(emu->card, "ca0106_reg16", &entry)) snd_ca0106_proc_init()
439 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read16); snd_ca0106_proc_init()
440 if(! snd_card_proc_new(emu->card, "ca0106_reg8", &entry)) snd_ca0106_proc_init()
441 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read8); snd_ca0106_proc_init()
442 if(! snd_card_proc_new(emu->card, "ca0106_regs1", &entry)) { snd_ca0106_proc_init()
443 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read1); snd_ca0106_proc_init()
444 entry->c.text.write = snd_ca0106_proc_reg_write; snd_ca0106_proc_init()
445 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
447 if(! snd_card_proc_new(emu->card, "ca0106_i2c", &entry)) { snd_ca0106_proc_init()
448 entry->c.text.write = snd_ca0106_proc_i2c_write; snd_ca0106_proc_init()
449 entry->private_data = emu; snd_ca0106_proc_init()
450 entry->mode |= S_IWUSR; snd_ca0106_proc_init()
452 if(! snd_card_proc_new(emu->card, "ca0106_regs2", &entry)) snd_ca0106_proc_init()
453 snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read2); snd_ca0106_proc_init()
/linux-4.1.27/fs/proc/
H A Dnamespaces.c109 const struct proc_ns_operations **entry, **last; proc_ns_dir_readdir() local
118 entry = ns_entries + (ctx->pos - 2); proc_ns_dir_readdir()
120 while (entry <= last) { proc_ns_dir_readdir()
121 const struct proc_ns_operations *ops = *entry; proc_ns_dir_readdir()
126 entry++; proc_ns_dir_readdir()
143 const struct proc_ns_operations **entry, **last; proc_ns_dir_lookup() local
152 for (entry = ns_entries; entry < last; entry++) { proc_ns_dir_lookup()
153 if (strlen((*entry)->name) != len) proc_ns_dir_lookup()
155 if (!memcmp(dentry->d_name.name, (*entry)->name, len)) proc_ns_dir_lookup()
158 if (entry == last) proc_ns_dir_lookup()
161 error = proc_ns_instantiate(dir, dentry, task, *entry); proc_ns_dir_lookup()
H A Dproc_sysctl.c106 struct ctl_table *entry; find_entry() local
117 entry = &head->ctl_table[ctl_node - head->node]; find_entry()
118 procname = entry->procname; find_entry()
127 return entry; find_entry()
133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) insert_entry() argument
135 struct rb_node *node = &head->node[entry - head->ctl_table].node; insert_entry()
138 const char *name = entry->procname; insert_entry()
160 pr_err("sysctl duplicate entry: "); insert_entry()
162 pr_cont("/%s\n", entry->procname); insert_entry()
172 static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) erase_entry() argument
174 struct rb_node *node = &head->node[entry - head->ctl_table].node; erase_entry()
194 struct ctl_table *entry; init_header() local
195 for (entry = table; entry->procname; entry++, node++) init_header()
202 struct ctl_table *entry; erase_header() local
203 for (entry = head->ctl_table; entry->procname; entry++) erase_header()
204 erase_entry(head, entry); erase_header()
209 struct ctl_table *entry; insert_header() local
228 for (entry = header->ctl_table; entry->procname; entry++) { insert_header()
229 err = insert_entry(header, entry); insert_header()
266 * if p->used is 0, nobody will ever touch that entry again; start_unregistering()
335 struct ctl_table *entry; lookup_entry() local
338 entry = find_entry(&head, dir, name, namelen); lookup_entry()
339 if (entry && use_table(head)) lookup_entry()
342 entry = NULL; lookup_entry()
344 return entry; lookup_entry()
363 struct ctl_table *entry = NULL; first_entry() local
371 entry = &head->ctl_table[ctl_node - head->node]; first_entry()
374 *pentry = entry; first_entry()
380 struct ctl_table *entry = *pentry; next_entry() local
381 struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; next_entry()
391 entry = &head->ctl_table[ctl_node - head->node]; next_entry()
394 *pentry = entry; next_entry()
696 struct ctl_table *entry; proc_sys_readdir() local
710 for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { proc_sys_readdir()
711 if (!scan(h, entry, &pos, file, ctx)) { proc_sys_readdir()
741 else /* Use the permissions on the sysctl table entry */ proc_sys_permission()
866 struct ctl_table *entry; find_subdir() local
868 entry = find_entry(&head, dir, name, namelen); find_subdir()
869 if (!entry) find_subdir()
871 if (!S_ISDIR(entry->mode)) find_subdir()
942 /* Nope. Use the our freshly made directory entry. */ get_subdir()
983 struct ctl_table *entry; sysctl_follow_link() local
997 entry = find_entry(&head, dir, procname, strlen(procname)); sysctl_follow_link()
999 if (entry && use_table(head)) { sysctl_follow_link()
1002 *pentry = entry; sysctl_follow_link()
1060 struct ctl_table *link_table, *entry, *link; new_links() local
1068 for (entry = table; entry->procname; entry++) { new_links()
1070 name_bytes += strlen(entry->procname) + 1; new_links()
1086 for (link = link_table, entry = table; entry->procname; link++, entry++) { new_links()
1087 int len = strlen(entry->procname) + 1; new_links()
1088 memcpy(link_name, entry->procname, len); new_links()
1104 struct ctl_table *entry, *link; get_links() local
1106 /* Are there links available for every entry in table? */ get_links()
1107 for (entry = table; entry->procname; entry++) { get_links()
1108 const char *procname = entry->procname; get_links()
1112 if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) get_links()
1120 for (entry = table; entry->procname; entry++) { get_links()
1121 const char *procname = entry->procname; get_links()
1176 * array. A completely 0 filled entry terminates the table.
1219 struct ctl_table *entry; __register_sysctl_table() local
1223 for (entry = table; entry->procname; entry++) __register_sysctl_table()
1284 * array. A completely 0 filled entry terminates the table.
1312 struct ctl_table *entry; count_subheaders() local
1318 for (entry = table; entry->procname; entry++) { count_subheaders()
1319 if (entry->child) count_subheaders()
1320 nr_subheaders += count_subheaders(entry->child); count_subheaders()
1332 struct ctl_table *entry, *files; register_leaf_sysctl_tables() local
1337 for (entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1338 if (entry->child) register_leaf_sysctl_tables()
1354 for (new = files, entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1355 if (entry->child) register_leaf_sysctl_tables()
1357 *new = *entry; register_leaf_sysctl_tables()
1378 for (entry = table; entry->procname; entry++) { register_leaf_sysctl_tables()
1381 if (!entry->child) register_leaf_sysctl_tables()
1385 child_pos = append_path(path, pos, entry->procname); register_leaf_sysctl_tables()
1390 set, entry->child); register_leaf_sysctl_tables()
1408 * array. A completely 0 filled entry terminates the table.
1479 * array. A completely 0 filled entry terminates the table.
1496 * array. A completely 0 filled entry terminates the table.
1514 struct ctl_table *entry; put_links() local
1523 for (entry = header->ctl_table; entry->procname; entry++) { put_links()
1526 const char *name = entry->procname; put_links()
1530 ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || put_links()
/linux-4.1.27/fs/f2fs/
H A Dxattr.h59 #define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \
60 entry->e_name_len + le16_to_cpu(entry->e_value_size)))
62 #define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\
63 ENTRY_SIZE(entry)))
65 #define IS_XATTR_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
67 #define list_for_each_xattr(entry, addr) \
68 for (entry = XATTR_FIRST_ENTRY(addr);\
69 !IS_XATTR_LAST_ENTRY(entry);\
70 entry = XATTR_NEXT_ENTRY(entry))
H A Dacl.c53 struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1); f2fs_acl_from_disk() local
71 if ((char *)entry > end) f2fs_acl_from_disk()
74 acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag); f2fs_acl_from_disk()
75 acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm); f2fs_acl_from_disk()
82 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
89 le32_to_cpu(entry->e_id)); f2fs_acl_from_disk()
90 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
96 le32_to_cpu(entry->e_id)); f2fs_acl_from_disk()
97 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_from_disk()
104 if ((char *)entry != end) f2fs_acl_from_disk()
115 struct f2fs_acl_entry *entry; f2fs_acl_to_disk() local
124 entry = (struct f2fs_acl_entry *)(f2fs_acl + 1); f2fs_acl_to_disk()
128 entry->e_tag = cpu_to_le16(acl->a_entries[i].e_tag); f2fs_acl_to_disk()
129 entry->e_perm = cpu_to_le16(acl->a_entries[i].e_perm); f2fs_acl_to_disk()
133 entry->e_id = cpu_to_le32( f2fs_acl_to_disk()
136 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
140 entry->e_id = cpu_to_le32( f2fs_acl_to_disk()
143 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
150 entry = (struct f2fs_acl_entry *)((char *)entry + f2fs_acl_to_disk()
H A Drecovery.c61 struct fsync_inode_entry *entry; get_fsync_inode() local
63 list_for_each_entry(entry, head, list) get_fsync_inode()
64 if (entry->inode->i_ino == ino) get_fsync_inode()
65 return entry; get_fsync_inode()
175 struct fsync_inode_entry *entry; find_fsync_dnodes() local
188 entry = get_fsync_inode(head, ino_of_node(page)); find_fsync_dnodes()
189 if (!entry) { find_fsync_dnodes()
197 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); find_fsync_dnodes()
198 if (!entry) { find_fsync_dnodes()
206 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); find_fsync_dnodes()
207 if (IS_ERR(entry->inode)) { find_fsync_dnodes()
208 err = PTR_ERR(entry->inode); find_fsync_dnodes()
209 kmem_cache_free(fsync_entry_slab, entry); find_fsync_dnodes()
216 list_add_tail(&entry->list, head); find_fsync_dnodes()
218 entry->blkaddr = blkaddr; find_fsync_dnodes()
221 entry->last_inode = blkaddr; find_fsync_dnodes()
223 entry->last_dentry = blkaddr; find_fsync_dnodes()
238 struct fsync_inode_entry *entry, *tmp; destroy_fsync_dnodes() local
240 list_for_each_entry_safe(entry, tmp, head, list) { list_for_each_entry_safe()
241 iput(entry->inode); list_for_each_entry_safe()
242 list_del(&entry->list); list_for_each_entry_safe()
243 kmem_cache_free(fsync_entry_slab, entry); list_for_each_entry_safe()
455 struct fsync_inode_entry *entry; recover_data() local
469 entry = get_fsync_inode(head, ino_of_node(page)); recover_data()
470 if (!entry) recover_data()
477 if (entry->last_inode == blkaddr) recover_data()
478 recover_inode(entry->inode, page); recover_data()
479 if (entry->last_dentry == blkaddr) { recover_data()
480 err = recover_dentry(entry->inode, page); recover_data()
486 err = do_recover_data(sbi, entry->inode, page, blkaddr); recover_data()
492 if (entry->blkaddr == blkaddr) { recover_data()
493 iput(entry->inode); recover_data()
494 list_del(&entry->list); recover_data()
495 kmem_cache_free(fsync_entry_slab, entry); recover_data()
/linux-4.1.27/drivers/staging/gdm72xx/
H A Dgdm_qos.c43 struct qos_entry_s *entry; alloc_qos_entry() local
48 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s, alloc_qos_entry()
50 list_del(&entry->list); alloc_qos_entry()
53 return entry; alloc_qos_entry()
57 return kmalloc(sizeof(*entry), GFP_ATOMIC); alloc_qos_entry()
60 static void free_qos_entry(void *entry) free_qos_entry() argument
62 struct qos_entry_s *qentry = (struct qos_entry_s *)entry; free_qos_entry()
74 kfree(entry); free_qos_entry()
79 struct qos_entry_s *entry, *n; free_qos_entry_list() local
82 list_for_each_entry_safe(entry, n, free_list, list) { list_for_each_entry_safe()
83 list_del(&entry->list); list_for_each_entry_safe()
84 kfree(entry); list_for_each_entry_safe()
117 struct qos_entry_s *entry, *n; gdm_qos_release_list() local
134 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) { gdm_qos_release_list()
135 list_move_tail(&entry->list, &free_list); gdm_qos_release_list()
216 struct qos_entry_s *entry; extract_qos_list() local
229 entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s, extract_qos_list()
232 list_move_tail(&entry->list, head); extract_qos_list()
242 struct qos_entry_s *entry, *n; send_qos_list() local
244 list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe()
245 list_del(&entry->list); list_for_each_entry_safe()
246 gdm_wimax_send_tx(entry->skb, entry->dev); list_for_each_entry_safe()
247 free_qos_entry(entry); list_for_each_entry_safe()
260 struct qos_entry_s *entry = NULL; gdm_qos_send_hci_pkt() local
268 entry = alloc_qos_entry(); gdm_qos_send_hci_pkt()
269 entry->skb = skb; gdm_qos_send_hci_pkt()
270 entry->dev = dev; gdm_qos_send_hci_pkt()
281 if (!entry) { gdm_qos_send_hci_pkt()
282 entry = alloc_qos_entry(); gdm_qos_send_hci_pkt()
283 entry->skb = skb; gdm_qos_send_hci_pkt()
284 entry->dev = dev; gdm_qos_send_hci_pkt()
287 list_add_tail(&entry->list, &qcb->qos_list[index]); gdm_qos_send_hci_pkt()
294 if (entry) gdm_qos_send_hci_pkt()
295 free_qos_entry(entry); gdm_qos_send_hci_pkt()
335 struct qos_entry_s *entry, *n; gdm_recv_qos_hci_packet() local
431 list_for_each_entry_safe(entry, n, &qcb->qos_list[index], gdm_recv_qos_hci_packet()
433 list_move_tail(&entry->list, &free_list); gdm_recv_qos_hci_packet()
/linux-4.1.27/fs/ntfs/
H A Dindex.h36 * @idx_ni: index inode containing the @entry described by this context
37 * @entry: index entry (points into @ir or @ia)
38 * @data: index entry data (points into @entry)
40 * @is_in_root: 'true' if @entry is in @ir and 'false' if it is in @ia
49 * @entry is the index entry described by this context. @data and @data_len
50 * are the index entry data and its length in bytes, respectively. @data
51 * simply points into @entry. This is probably what the user is interested in.
53 * If @is_in_root is 'true', @entry is in the index root attribute @ir described
57 * If @is_in_root is 'false', @entry is in the index allocation attribute and @ia
64 * @entry and its @data without having to allocate a buffer and copy the @entry
67 * When finished with the @entry and its @data, call ntfs_index_ctx_put() to
70 * If the index entry was modified, call flush_dcache_index_entry_page()
77 INDEX_ENTRY *entry; member in struct:__anon11134
98 * @ictx: ntfs index context describing the index entry
100 * Call flush_dcache_page() for the page in which an index entry resides.
102 * This must be called every time an index entry is modified, just after the
105 * If the index entry is in the index root attribute, simply flush the page
108 * If the index entry is in an index block belonging to the index allocation
120 * ntfs_index_entry_mark_dirty - mark an index entry dirty
121 * @ictx: ntfs index context describing the index entry
123 * Mark the index entry described by the index entry context @ictx dirty.
125 * If the index entry is in the index root attribute, simply mark the mft
130 * If the index entry is in an index block belonging to the index allocation
133 * VFS inode of the ntfs index inode to which the index entry belongs dirty,
/linux-4.1.27/drivers/ntb/
H A Dntb_transport.c77 struct list_head entry; member in struct:ntb_queue_entry
93 unsigned int entry; member in struct:ntb_rx_info
162 struct list_head entry; member in struct:ntb_transport_client_dev
167 struct list_head entry; member in struct:ntb_transport
259 list_add(&nt->entry, &ntb_transport_list); ntb_bus_init()
268 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { ntb_bus_remove()
271 list_del(&client_dev->entry); ntb_bus_remove()
275 list_del(&nt->entry); ntb_bus_remove()
300 list_for_each_entry(nt, &ntb_transport_list, entry) ntb_unregister_client_dev()
301 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) ntb_unregister_client_dev()
304 list_del(&client->entry); ntb_unregister_client_dev()
325 list_for_each_entry(nt, &ntb_transport_list, entry) { ntb_register_client_dev()
349 list_add_tail(&client_dev->entry, &nt->client_devs); ntb_register_client_dev()
471 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, ntb_list_add() argument
477 list_add_tail(entry, list); ntb_list_add()
484 struct ntb_queue_entry *entry; ntb_list_rm() local
489 entry = NULL; ntb_list_rm()
492 entry = list_first_entry(list, struct ntb_queue_entry, entry); ntb_list_rm()
493 list_del(&entry->entry); ntb_list_rm()
497 return entry; ntb_list_rm()
529 qp->remote_rx_info->entry = qp->rx_max_entry - 1; ntb_transport_setup_qp_mw()
1025 struct ntb_queue_entry *entry = data; ntb_rx_copy_callback() local
1026 struct ntb_transport_qp *qp = entry->qp; ntb_rx_copy_callback()
1027 void *cb_data = entry->cb_data; ntb_rx_copy_callback()
1028 unsigned int len = entry->len; ntb_rx_copy_callback()
1029 struct ntb_payload_header *hdr = entry->rx_hdr; ntb_rx_copy_callback()
1035 iowrite32(entry->index, &qp->rx_info->entry); ntb_rx_copy_callback()
1037 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); ntb_rx_copy_callback()
1043 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_memcpy_rx() argument
1045 void *buf = entry->buf; ntb_memcpy_rx()
1046 size_t len = entry->len; ntb_memcpy_rx()
1050 ntb_rx_copy_callback(entry); ntb_memcpy_rx()
1053 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, ntb_async_rx() argument
1057 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx()
1063 void *buf = entry->buf; ntb_async_rx()
1065 entry->len = len; ntb_async_rx()
1106 txd->callback_param = entry; ntb_async_rx()
1132 ntb_memcpy_rx(entry, offset); ntb_async_rx()
1139 struct ntb_queue_entry *entry; ntb_process_rxc() local
1145 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); ntb_process_rxc()
1146 if (!entry) { ntb_process_rxc()
1155 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, ntb_process_rxc()
1165 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, ntb_process_rxc()
1179 qp->rx_index, hdr->ver, hdr->len, entry->len); ntb_process_rxc()
1184 if (hdr->len > entry->len) { ntb_process_rxc()
1188 hdr->len, entry->len); ntb_process_rxc()
1193 entry->index = qp->rx_index; ntb_process_rxc()
1194 entry->rx_hdr = hdr; ntb_process_rxc()
1196 ntb_async_rx(entry, offset, hdr->len); ntb_process_rxc()
1205 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); ntb_process_rxc()
1209 iowrite32(qp->rx_index, &qp->rx_info->entry); ntb_process_rxc()
1239 struct ntb_queue_entry *entry = data; ntb_tx_copy_callback() local
1240 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback()
1241 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; ntb_tx_copy_callback()
1245 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); ntb_tx_copy_callback()
1249 /* The entry length can only be zero if the packet is intended to be a ntb_tx_copy_callback()
1253 if (entry->len > 0) { ntb_tx_copy_callback()
1254 qp->tx_bytes += entry->len; ntb_tx_copy_callback()
1257 qp->tx_handler(qp, qp->cb_data, entry->cb_data, ntb_tx_copy_callback()
1258 entry->len); ntb_tx_copy_callback()
1261 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); ntb_tx_copy_callback()
1264 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) ntb_memcpy_tx() argument
1266 memcpy_toio(offset, entry->buf, entry->len); ntb_memcpy_tx()
1268 ntb_tx_copy_callback(entry); ntb_memcpy_tx()
1272 struct ntb_queue_entry *entry) ntb_async_tx()
1283 size_t len = entry->len; ntb_async_tx()
1284 void *buf = entry->buf; ntb_async_tx()
1288 entry->tx_hdr = hdr; ntb_async_tx()
1290 iowrite32(entry->len, &hdr->len); ntb_async_tx()
1325 txd->callback_param = entry; ntb_async_tx()
1343 ntb_memcpy_tx(entry, offset); ntb_async_tx()
1348 struct ntb_queue_entry *entry) ntb_process_tx()
1350 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n", ntb_process_tx()
1351 qp->tx_pkts, qp->tx_index, entry->len, entry->flags, ntb_process_tx()
1352 entry->buf); ntb_process_tx()
1353 if (qp->tx_index == qp->remote_rx_info->entry) { ntb_process_tx()
1358 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { ntb_process_tx()
1362 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_process_tx()
1367 ntb_async_tx(qp, entry); ntb_process_tx()
1380 struct ntb_queue_entry *entry; ntb_send_link_down() local
1390 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_send_link_down()
1391 if (entry) ntb_send_link_down()
1396 if (!entry) ntb_send_link_down()
1399 entry->cb_data = NULL; ntb_send_link_down()
1400 entry->buf = NULL; ntb_send_link_down()
1401 entry->len = 0; ntb_send_link_down()
1402 entry->flags = LINK_DOWN_FLAG; ntb_send_link_down()
1404 rc = ntb_process_tx(qp, entry); ntb_send_link_down()
1428 struct ntb_queue_entry *entry; ntb_transport_create_queue() local
1461 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); ntb_transport_create_queue()
1462 if (!entry) ntb_transport_create_queue()
1465 entry->qp = qp; ntb_transport_create_queue()
1466 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, ntb_transport_create_queue()
1471 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); ntb_transport_create_queue()
1472 if (!entry) ntb_transport_create_queue()
1475 entry->qp = qp; ntb_transport_create_queue()
1476 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_create_queue()
1490 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_create_queue()
1491 kfree(entry); ntb_transport_create_queue()
1493 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) ntb_transport_create_queue()
1494 kfree(entry); ntb_transport_create_queue()
1512 struct ntb_queue_entry *entry; ntb_transport_free_queue() local
1538 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) ntb_transport_free_queue()
1539 kfree(entry); ntb_transport_free_queue()
1541 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { ntb_transport_free_queue()
1543 kfree(entry); ntb_transport_free_queue()
1546 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_free_queue()
1547 kfree(entry); ntb_transport_free_queue()
1567 struct ntb_queue_entry *entry; ntb_transport_rx_remove() local
1573 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); ntb_transport_rx_remove()
1574 if (!entry) ntb_transport_rx_remove()
1577 buf = entry->cb_data; ntb_transport_rx_remove()
1578 *len = entry->len; ntb_transport_rx_remove()
1580 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); ntb_transport_rx_remove()
1587 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1588 * @qp: NTB transport layer queue the entry is to be enqueued on
1601 struct ntb_queue_entry *entry; ntb_transport_rx_enqueue() local
1606 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); ntb_transport_rx_enqueue()
1607 if (!entry) ntb_transport_rx_enqueue()
1610 entry->cb_data = cb; ntb_transport_rx_enqueue()
1611 entry->buf = data; ntb_transport_rx_enqueue()
1612 entry->len = len; ntb_transport_rx_enqueue()
1614 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); ntb_transport_rx_enqueue()
1621 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1622 * @qp: NTB transport layer queue the entry is to be enqueued on
1636 struct ntb_queue_entry *entry; ntb_transport_tx_enqueue() local
1642 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_transport_tx_enqueue()
1643 if (!entry) { ntb_transport_tx_enqueue()
1648 entry->cb_data = cb; ntb_transport_tx_enqueue()
1649 entry->buf = data; ntb_transport_tx_enqueue()
1650 entry->len = len; ntb_transport_tx_enqueue()
1651 entry->flags = 0; ntb_transport_tx_enqueue()
1653 rc = ntb_process_tx(qp, entry); ntb_transport_tx_enqueue()
1655 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_tx_enqueue()
1271 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx() argument
1347 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_process_tx() argument
/linux-4.1.27/drivers/net/wireless/ath/
H A Dkey.c42 bool ath_hw_keyreset(struct ath_common *common, u16 entry) ath_hw_keyreset() argument
47 if (entry >= common->keymax) { ath_hw_keyreset()
48 ath_err(common, "keyreset: keycache entry %u out of range\n", ath_hw_keyreset()
49 entry); ath_hw_keyreset()
53 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); ath_hw_keyreset()
57 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); ath_hw_keyreset()
58 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); ath_hw_keyreset()
59 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); ath_hw_keyreset()
60 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); ath_hw_keyreset()
61 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); ath_hw_keyreset()
62 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); ath_hw_keyreset()
63 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); ath_hw_keyreset()
64 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); ath_hw_keyreset()
67 u16 micentry = entry + 64; ath_hw_keyreset()
88 u16 entry, const u8 *mac) ath_hw_keysetmac()
94 if (entry >= common->keymax) { ath_hw_keysetmac()
95 ath_err(common, "keysetmac: keycache entry %u out of range\n", ath_hw_keysetmac()
96 entry); ath_hw_keysetmac()
121 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); ath_hw_keysetmac()
122 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); ath_hw_keysetmac()
129 static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, ath_hw_set_keycache_entry() argument
137 if (entry >= common->keymax) { ath_hw_set_keycache_entry()
138 ath_err(common, "set-entry: keycache entry %u out of range\n", ath_hw_set_keycache_entry()
139 entry); ath_hw_set_keycache_entry()
157 if (entry + 64 >= common->keymax) { ath_hw_set_keycache_entry()
159 "entry %u inappropriate for TKIP\n", entry); ath_hw_set_keycache_entry()
200 u16 micentry = entry + 64; ath_hw_set_keycache_entry()
208 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); ath_hw_set_keycache_entry()
209 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); ath_hw_set_keycache_entry()
212 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); ath_hw_set_keycache_entry()
213 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); ath_hw_set_keycache_entry()
216 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); ath_hw_set_keycache_entry()
217 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); ath_hw_set_keycache_entry()
219 /* Write MAC address for the entry */ ath_hw_set_keycache_entry()
220 (void) ath_hw_keysetmac(common, entry, mac); ath_hw_set_keycache_entry()
225 * Michael MIC TX/RX keys in the same key cache entry ath_hw_set_keycache_entry()
302 /* MAC address registers are reserved for the MIC entry */ ath_hw_set_keycache_entry()
311 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); ath_hw_set_keycache_entry()
312 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); ath_hw_set_keycache_entry()
319 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); ath_hw_set_keycache_entry()
320 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); ath_hw_set_keycache_entry()
323 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); ath_hw_set_keycache_entry()
324 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); ath_hw_set_keycache_entry()
327 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); ath_hw_set_keycache_entry()
328 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); ath_hw_set_keycache_entry()
332 /* Write MAC address for the entry */ ath_hw_set_keycache_entry()
333 (void) ath_hw_keysetmac(common, entry, mac); ath_hw_set_keycache_entry()
365 /* TX and RX keys share the same key cache entry. */ ath_setkey_tkip()
376 /* TX MIC entry failed. No need to proceed further */ ath_setkey_tkip()
87 ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) ath_hw_keysetmac() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dbit.c32 u32 entry = bios->bit_offset + 12; bit_entry() local
34 if (nv_ro08(bios, entry + 0) == id) { bit_entry()
35 bit->id = nv_ro08(bios, entry + 0); bit_entry()
36 bit->version = nv_ro08(bios, entry + 1); bit_entry()
37 bit->length = nv_ro16(bios, entry + 2); bit_entry()
38 bit->offset = nv_ro16(bios, entry + 4); bit_entry()
42 entry += nv_ro08(bios, bios->bit_offset + 9); bit_entry()
H A Dextdev.c61 struct nvbios_extdev_func *entry) extdev_parse_entry()
63 entry->type = nv_ro08(bios, offset + 0); extdev_parse_entry()
64 entry->addr = nv_ro08(bios, offset + 1); extdev_parse_entry()
65 entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1; extdev_parse_entry()
73 u16 entry; nvbios_extdev_parse() local
75 if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len))) nvbios_extdev_parse()
78 extdev_parse_entry(bios, entry, func); nvbios_extdev_parse()
87 u16 entry; nvbios_extdev_find() local
90 while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) { nvbios_extdev_find()
91 extdev_parse_entry(bios, entry, func); nvbios_extdev_find()
60 extdev_parse_entry(struct nvkm_bios *bios, u16 offset, struct nvbios_extdev_func *entry) extdev_parse_entry() argument
/linux-4.1.27/drivers/thunderbolt/
H A Dnhi_regs.h28 * struct ring_desc - TX/RX ring entry
45 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
54 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
64 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
72 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
/linux-4.1.27/arch/powerpc/kernel/
H A Djump_label.c15 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
18 u32 *addr = (u32 *)(unsigned long)entry->code; arch_jump_label_transform()
21 patch_branch(addr, entry->target, 0); arch_jump_label_transform()
H A Dmodule_32.c173 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) entry_matches() argument
175 if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) entry_matches()
176 && entry->jump[1] == 0x398c0000 + (val & 0xffff)) entry_matches()
187 struct ppc_plt_entry *entry; do_plt_call() local
193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; do_plt_call()
195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; do_plt_call()
197 /* Find this entry, or if that fails, the next avail. entry */ do_plt_call()
198 while (entry->jump[0]) { do_plt_call()
199 if (entry_matches(entry, val)) return (uint32_t)entry; do_plt_call()
200 entry++; do_plt_call()
203 entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ do_plt_call()
204 entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ do_plt_call()
205 entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ do_plt_call()
206 entry->jump[3] = 0x4e800420; /* bctr */ do_plt_call()
208 pr_debug("Initialized plt for 0x%x at %p\n", val, entry); do_plt_call()
209 return (uint32_t)entry; do_plt_call()
/linux-4.1.27/net/x25/
H A Dx25_forward.c27 struct list_head *entry; x25_forward_call() local
54 list_for_each(entry, &x25_forward_list) { x25_forward_call()
55 x25_frwd = list_entry(entry, struct x25_forward, node); x25_forward_call()
100 struct list_head *entry; x25_forward_data() local
107 list_for_each(entry, &x25_forward_list) { x25_forward_data()
108 frwd = list_entry(entry, struct x25_forward, node); x25_forward_data()
140 struct list_head *entry, *tmp; x25_clear_forward_by_lci() local
144 list_for_each_safe(entry, tmp, &x25_forward_list) { x25_clear_forward_by_lci()
145 fwd = list_entry(entry, struct x25_forward, node); x25_clear_forward_by_lci()
158 struct list_head *entry, *tmp; x25_clear_forward_by_dev() local
162 list_for_each_safe(entry, tmp, &x25_forward_list) { x25_clear_forward_by_dev()
163 fwd = list_entry(entry, struct x25_forward, node); x25_clear_forward_by_dev()
H A Dx25_route.c35 struct list_head *entry; x25_add_route() local
40 list_for_each(entry, &x25_route_list) { x25_add_route()
41 rt = list_entry(entry, struct x25_route, node); x25_add_route()
86 struct list_head *entry; x25_del_route() local
91 list_for_each(entry, &x25_route_list) { x25_del_route()
92 rt = list_entry(entry, struct x25_route, node); x25_del_route()
112 struct list_head *entry, *tmp; x25_route_device_down() local
116 list_for_each_safe(entry, tmp, &x25_route_list) { x25_route_device_down()
117 rt = list_entry(entry, struct x25_route, node); x25_route_device_down()
157 struct list_head *entry; x25_get_route() local
161 list_for_each(entry, &x25_route_list) { x25_get_route()
162 rt = list_entry(entry, struct x25_route, node); x25_get_route()
218 struct list_head *entry, *tmp; x25_route_free() local
221 list_for_each_safe(entry, tmp, &x25_route_list) { x25_route_free()
222 rt = list_entry(entry, struct x25_route, node); x25_route_free()
/linux-4.1.27/tools/perf/util/
H A Dstrlist.c15 struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry) strlist__node_new() argument
17 const char *s = entry; strlist__node_new()
55 static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) strlist__node_cmp() argument
57 const char *str = entry; strlist__node_cmp()
70 char entry[1024]; strlist__load() local
77 while (fgets(entry, sizeof(entry), fp) != NULL) { strlist__load()
78 const size_t len = strlen(entry); strlist__load()
82 entry[len - 1] = '\0'; strlist__load()
84 err = strlist__add(slist, entry); strlist__load()
100 struct str_node *strlist__find(struct strlist *slist, const char *entry) strlist__find() argument
103 struct rb_node *rb_node = rblist__find(&slist->rblist, entry); strlist__find()
H A Drblist.c52 const void *entry, __rblist__findnew()
63 rc = rblist->node_cmp(parent, entry); __rblist__findnew()
73 new_node = rblist->node_new(rblist, entry); __rblist__findnew()
84 struct rb_node *rblist__find(struct rblist *rblist, const void *entry) rblist__find() argument
86 return __rblist__findnew(rblist, entry, false); rblist__find()
89 struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry) rblist__findnew() argument
91 return __rblist__findnew(rblist, entry, true); rblist__findnew()
51 __rblist__findnew(struct rblist *rblist, const void *entry, bool create) __rblist__findnew() argument
H A Drblist.h25 int (*node_cmp)(struct rb_node *rbn, const void *entry);
34 struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
35 struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry);
/linux-4.1.27/kernel/
H A Dauditfilter.c114 /* Initialize an audit filterlist entry. */ audit_init_entry()
117 struct audit_entry *entry; audit_init_entry() local
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); audit_init_entry()
121 if (unlikely(!entry)) audit_init_entry()
126 kfree(entry); audit_init_entry()
129 entry->rule.fields = fields; audit_init_entry()
131 return entry; audit_init_entry()
219 static int audit_match_signal(struct audit_entry *entry) audit_match_signal() argument
221 struct audit_field *arch = entry->rule.arch_f; audit_match_signal()
227 entry->rule.mask) && audit_match_signal()
229 entry->rule.mask)); audit_match_signal()
235 entry->rule.mask)); audit_match_signal()
238 entry->rule.mask)); audit_match_signal()
249 struct audit_entry *entry; audit_to_entry_common() local
278 entry = audit_init_entry(rule->field_count); audit_to_entry_common()
279 if (!entry) audit_to_entry_common()
282 entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND; audit_to_entry_common()
283 entry->rule.listnr = listnr; audit_to_entry_common()
284 entry->rule.action = rule->action; audit_to_entry_common()
285 entry->rule.field_count = rule->field_count; audit_to_entry_common()
288 entry->rule.mask[i] = rule->mask[i]; audit_to_entry_common()
292 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; audit_to_entry_common()
302 entry->rule.mask[j] |= class[j]; audit_to_entry_common()
306 return entry; audit_to_entry_common()
333 static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) audit_field_valid() argument
337 if (entry->rule.listnr != AUDIT_FILTER_TYPE && audit_field_valid()
338 entry->rule.listnr != AUDIT_FILTER_USER) audit_field_valid()
417 struct audit_entry *entry; audit_data_to_entry() local
423 entry = audit_to_entry_common(data); audit_data_to_entry()
424 if (IS_ERR(entry)) audit_data_to_entry()
429 struct audit_field *f = &entry->rule.fields[i]; audit_data_to_entry()
444 entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; audit_data_to_entry()
447 err = audit_field_valid(entry, f); audit_data_to_entry()
473 entry->rule.arch_f = f; audit_data_to_entry()
488 entry->rule.buflen += f->val; audit_data_to_entry()
509 entry->rule.buflen += f->val; audit_data_to_entry()
511 err = audit_to_watch(&entry->rule, str, f->val, f->op); audit_data_to_entry()
521 entry->rule.buflen += f->val; audit_data_to_entry()
523 err = audit_make_tree(&entry->rule, str, f->op); audit_data_to_entry()
529 err = audit_to_inode(&entry->rule, f); audit_data_to_entry()
534 if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN) audit_data_to_entry()
539 entry->rule.buflen += f->val; audit_data_to_entry()
540 entry->rule.filterkey = str; audit_data_to_entry()
545 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) audit_data_to_entry()
546 entry->rule.inode_f = NULL; audit_data_to_entry()
549 return entry; audit_data_to_entry()
552 if (entry->rule.watch) audit_data_to_entry()
553 audit_put_watch(entry->rule.watch); /* matches initial get */ audit_data_to_entry()
554 if (entry->rule.tree) audit_data_to_entry()
555 audit_put_tree(entry->rule.tree); /* that's the temporary one */ audit_data_to_entry()
556 audit_free_rule(entry); audit_data_to_entry()
750 struct audit_entry *entry; audit_dupe_rule() local
755 entry = audit_init_entry(fcount); audit_dupe_rule()
756 if (unlikely(!entry)) audit_dupe_rule()
759 new = &entry->rule; audit_dupe_rule()
806 audit_free_rule(entry); audit_dupe_rule()
816 return entry; audit_dupe_rule()
821 static struct audit_entry *audit_find_rule(struct audit_entry *entry, audit_find_rule() argument
828 if (entry->rule.inode_f) { audit_find_rule()
829 h = audit_hash_ino(entry->rule.inode_f->val); audit_find_rule()
831 } else if (entry->rule.watch) { audit_find_rule()
836 if (!audit_compare_rule(&entry->rule, &e->rule)) { list_for_each_entry()
843 *p = list = &audit_filter_list[entry->rule.listnr];
847 if (!audit_compare_rule(&entry->rule, &e->rule)) { list_for_each_entry()
860 static inline int audit_add_rule(struct audit_entry *entry) audit_add_rule() argument
863 struct audit_watch *watch = entry->rule.watch; audit_add_rule()
864 struct audit_tree *tree = entry->rule.tree; audit_add_rule()
871 if (entry->rule.listnr == AUDIT_FILTER_USER || audit_add_rule()
872 entry->rule.listnr == AUDIT_FILTER_TYPE) audit_add_rule()
877 e = audit_find_rule(entry, &list); audit_add_rule()
889 err = audit_add_watch(&entry->rule, &list); audit_add_rule()
902 err = audit_add_tree_rule(&entry->rule); audit_add_rule()
909 entry->rule.prio = ~0ULL; audit_add_rule()
910 if (entry->rule.listnr == AUDIT_FILTER_EXIT) { audit_add_rule()
911 if (entry->rule.flags & AUDIT_FILTER_PREPEND) audit_add_rule()
912 entry->rule.prio = ++prio_high; audit_add_rule()
914 entry->rule.prio = --prio_low; audit_add_rule()
917 if (entry->rule.flags & AUDIT_FILTER_PREPEND) { audit_add_rule()
918 list_add(&entry->rule.list, audit_add_rule()
919 &audit_rules_list[entry->rule.listnr]); audit_add_rule()
920 list_add_rcu(&entry->list, list); audit_add_rule()
921 entry->rule.flags &= ~AUDIT_FILTER_PREPEND; audit_add_rule()
923 list_add_tail(&entry->rule.list, audit_add_rule()
924 &audit_rules_list[entry->rule.listnr]); audit_add_rule()
925 list_add_tail_rcu(&entry->list, list); audit_add_rule()
931 if (!audit_match_signal(entry)) audit_add_rule()
945 static inline int audit_del_rule(struct audit_entry *entry) audit_del_rule() argument
948 struct audit_watch *watch = entry->rule.watch; audit_del_rule()
949 struct audit_tree *tree = entry->rule.tree; audit_del_rule()
956 if (entry->rule.listnr == AUDIT_FILTER_USER || audit_del_rule()
957 entry->rule.listnr == AUDIT_FILTER_TYPE) audit_del_rule()
962 e = audit_find_rule(entry, &list); audit_del_rule()
983 if (!audit_match_signal(entry)) audit_del_rule()
1060 struct audit_entry *entry; audit_rule_change() local
1062 entry = audit_data_to_entry(data, datasz); audit_rule_change()
1063 if (IS_ERR(entry)) audit_rule_change()
1064 return PTR_ERR(entry); audit_rule_change()
1068 err = audit_add_rule(entry); audit_rule_change()
1069 audit_log_rule_change("add_rule", &entry->rule, !err); audit_rule_change()
1072 err = audit_del_rule(entry); audit_rule_change()
1073 audit_log_rule_change("remove_rule", &entry->rule, !err); audit_rule_change()
1081 audit_free_rule(entry); audit_rule_change()
1365 struct audit_entry *entry = container_of(r, struct audit_entry, rule); update_lsm_rule() local
1380 list_del_rcu(&entry->list); update_lsm_rule()
1385 list_replace_rcu(&entry->list, &nentry->list); update_lsm_rule()
1388 call_rcu(&entry->rcu, audit_free_rule_rcu); update_lsm_rule()
H A Dasync.c107 * pick the first pending entry and run it
111 struct async_entry *entry = async_run_entry_fn() local
119 (long long)entry->cookie, async_run_entry_fn()
120 entry->func, task_pid_nr(current)); async_run_entry_fn()
123 entry->func(entry->data, entry->cookie); async_run_entry_fn()
128 (long long)entry->cookie, async_run_entry_fn()
129 entry->func, async_run_entry_fn()
135 list_del_init(&entry->domain_list); async_run_entry_fn()
136 list_del_init(&entry->global_list); async_run_entry_fn()
138 /* 3) free the entry */ async_run_entry_fn()
139 kfree(entry); async_run_entry_fn()
150 struct async_entry *entry; __async_schedule() local
155 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); __async_schedule()
161 if (!entry || atomic_read(&entry_count) > MAX_WORK) { __async_schedule()
162 kfree(entry); __async_schedule()
171 INIT_LIST_HEAD(&entry->domain_list); __async_schedule()
172 INIT_LIST_HEAD(&entry->global_list); __async_schedule()
173 INIT_WORK(&entry->work, async_run_entry_fn); __async_schedule()
174 entry->func = func; __async_schedule()
175 entry->data = data; __async_schedule()
176 entry->domain = domain; __async_schedule()
181 newcookie = entry->cookie = next_cookie++; __async_schedule()
183 list_add_tail(&entry->domain_list, &domain->pending); __async_schedule()
185 list_add_tail(&entry->global_list, &async_global_pending); __async_schedule()
194 queue_work(system_unbound_wq, &entry->work); __async_schedule()
H A Dfutex_compat.c23 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, fetch_robust_entry() argument
29 *entry = compat_ptr((*uentry) & ~1); fetch_robust_entry()
35 static void __user *futex_uaddr(struct robust_list __user *entry, futex_uaddr() argument
38 compat_uptr_t base = ptr_to_compat(entry); futex_uaddr()
53 struct robust_list __user *entry, *next_entry, *pending; compat_exit_robust_list() local
67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) compat_exit_robust_list()
83 while (entry != (struct robust_list __user *) &head->list) { compat_exit_robust_list()
85 * Fetch the next entry in the list before calling compat_exit_robust_list()
89 (compat_uptr_t __user *)&entry->next, &next_pi); compat_exit_robust_list()
94 if (entry != pending) { compat_exit_robust_list()
95 void __user *uaddr = futex_uaddr(entry, futex_offset); compat_exit_robust_list()
103 entry = next_entry; compat_exit_robust_list()
/linux-4.1.27/drivers/net/wireless/rt2x00/
H A Drt2x00usb.c218 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) rt2x00usb_work_txdone_entry() argument
228 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00usb_work_txdone_entry()
229 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); rt2x00usb_work_txdone_entry()
231 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); rt2x00usb_work_txdone_entry()
239 struct queue_entry *entry; rt2x00usb_work_txdone() local
243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); tx_queue_for_each()
245 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || tx_queue_for_each()
246 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) tx_queue_for_each()
249 rt2x00usb_work_txdone_entry(entry); tx_queue_for_each()
256 struct queue_entry *entry = (struct queue_entry *)urb->context; rt2x00usb_interrupt_txdone() local
257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_txdone()
259 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_interrupt_txdone()
265 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_interrupt_txdone()
269 rt2x00lib_dmadone(entry); rt2x00usb_interrupt_txdone()
272 rt2x00dev->ops->lib->tx_dma_done(entry); rt2x00usb_interrupt_txdone()
282 static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) rt2x00usb_kick_tx_entry() argument
284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_tx_entry()
286 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_kick_tx_entry()
290 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || rt2x00usb_kick_tx_entry()
291 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_kick_tx_entry()
296 * and urb. Those paddings are not included in skbs. Pass entry rt2x00usb_kick_tx_entry()
299 length = rt2x00dev->ops->lib->get_tx_data_len(entry); rt2x00usb_kick_tx_entry()
301 status = skb_padto(entry->skb, length); rt2x00usb_kick_tx_entry()
305 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_tx_entry()
306 rt2x00lib_dmadone(entry); rt2x00usb_kick_tx_entry()
312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_tx_entry()
313 entry->skb->data, length, rt2x00usb_kick_tx_entry()
314 rt2x00usb_interrupt_txdone, entry); rt2x00usb_kick_tx_entry()
320 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_tx_entry()
321 rt2x00lib_dmadone(entry); rt2x00usb_kick_tx_entry()
334 struct queue_entry *entry; rt2x00usb_work_rxdone() local
339 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); rt2x00usb_work_rxdone()
341 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || rt2x00usb_work_rxdone()
342 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_work_rxdone()
348 skbdesc = get_skb_frame_desc(entry->skb); rt2x00usb_work_rxdone()
350 skbdesc->desc_len = entry->queue->desc_size; rt2x00usb_work_rxdone()
355 rt2x00lib_rxdone(entry, GFP_KERNEL); rt2x00usb_work_rxdone()
361 struct queue_entry *entry = (struct queue_entry *)urb->context; rt2x00usb_interrupt_rxdone() local
362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_rxdone()
364 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_interrupt_rxdone()
370 rt2x00lib_dmadone(entry); rt2x00usb_interrupt_rxdone()
377 if (urb->actual_length < entry->queue->desc_size || urb->status) rt2x00usb_interrupt_rxdone()
378 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_interrupt_rxdone()
387 static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) rt2x00usb_kick_rx_entry() argument
389 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_rx_entry()
391 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_kick_rx_entry()
394 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || rt2x00usb_kick_rx_entry()
395 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) rt2x00usb_kick_rx_entry()
398 rt2x00lib_dmastart(entry); rt2x00usb_kick_rx_entry()
401 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_rx_entry()
402 entry->skb->data, entry->skb->len, rt2x00usb_kick_rx_entry()
403 rt2x00usb_interrupt_rxdone, entry); rt2x00usb_kick_rx_entry()
409 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00usb_kick_rx_entry()
410 rt2x00lib_dmadone(entry); rt2x00usb_kick_rx_entry()
444 static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) rt2x00usb_flush_entry() argument
446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_flush_entry()
447 struct queue_entry_priv_usb *entry_priv = entry->priv_data; rt2x00usb_flush_entry()
448 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; rt2x00usb_flush_entry()
450 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) rt2x00usb_flush_entry()
458 if ((entry->queue->qid == QID_BEACON) && rt2x00usb_flush_entry()
527 struct queue_entry *entry; rt2x00usb_dma_timeout() local
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); rt2x00usb_dma_timeout()
530 return rt2x00queue_dma_timeout(entry); rt2x00usb_dma_timeout()
559 void rt2x00usb_clear_entry(struct queue_entry *entry) rt2x00usb_clear_entry() argument
561 entry->flags = 0; rt2x00usb_clear_entry()
563 if (entry->queue->qid == QID_RX) rt2x00usb_clear_entry()
564 rt2x00usb_kick_rx_entry(entry, NULL); rt2x00usb_clear_entry()
H A Drt2x00dev.c261 void rt2x00lib_dmastart(struct queue_entry *entry) rt2x00lib_dmastart() argument
263 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00lib_dmastart()
264 rt2x00queue_index_inc(entry, Q_INDEX); rt2x00lib_dmastart()
268 void rt2x00lib_dmadone(struct queue_entry *entry) rt2x00lib_dmadone() argument
270 set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); rt2x00lib_dmadone()
271 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00lib_dmadone()
272 rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE); rt2x00lib_dmadone()
276 static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) rt2x00lib_txdone_bar_status() argument
278 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_txdone_bar_status()
279 struct ieee80211_bar *bar = (void *) entry->skb->data; rt2x00lib_txdone_bar_status()
302 if (bar_entry->entry != entry) rt2x00lib_txdone_bar_status()
320 void rt2x00lib_txdone(struct queue_entry *entry, rt2x00lib_txdone() argument
323 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_txdone()
324 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); rt2x00lib_txdone()
325 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); rt2x00lib_txdone()
334 rt2x00queue_unmap_skb(entry); rt2x00lib_txdone()
339 skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); rt2x00lib_txdone()
349 header_length = ieee80211_get_hdrlen_from_skb(entry->skb); rt2x00lib_txdone()
355 rt2x00queue_remove_l2pad(entry->skb, header_length); rt2x00lib_txdone()
364 rt2x00crypto_tx_insert_iv(entry->skb, header_length); rt2x00lib_txdone()
370 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); rt2x00lib_txdone()
378 rt2x00lib_txdone_bar_status(entry) || rt2x00lib_txdone()
464 ieee80211_tx_status(rt2x00dev->hw, entry->skb); rt2x00lib_txdone()
466 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); rt2x00lib_txdone()
468 dev_kfree_skb_any(entry->skb); rt2x00lib_txdone()
471 * Make this entry available for reuse. rt2x00lib_txdone()
473 entry->skb = NULL; rt2x00lib_txdone()
474 entry->flags = 0; rt2x00lib_txdone()
476 rt2x00dev->ops->lib->clear_entry(entry); rt2x00lib_txdone()
478 rt2x00queue_index_inc(entry, Q_INDEX_DONE); rt2x00lib_txdone()
487 spin_lock_bh(&entry->queue->tx_lock); rt2x00lib_txdone()
488 if (!rt2x00queue_threshold(entry->queue)) rt2x00lib_txdone()
489 rt2x00queue_unpause_queue(entry->queue); rt2x00lib_txdone()
490 spin_unlock_bh(&entry->queue->tx_lock); rt2x00lib_txdone()
494 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status) rt2x00lib_txdone_noinfo() argument
502 rt2x00lib_txdone(entry, &txdesc); rt2x00lib_txdone_noinfo()
547 struct rt2x00_bar_list_entry *entry; rt2x00lib_rxdone_check_ba() local
557 list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) { rt2x00lib_rxdone_check_ba()
559 if (ba->start_seq_num != entry->start_seq_num) rt2x00lib_rxdone_check_ba()
566 if (!TID_CHECK(ba->control, entry->control)) rt2x00lib_rxdone_check_ba()
571 if (!ether_addr_equal_64bits(ba->ra, entry->ta)) rt2x00lib_rxdone_check_ba()
574 if (!ether_addr_equal_64bits(ba->ta, entry->ra)) rt2x00lib_rxdone_check_ba()
579 entry->block_acked = 1; rt2x00lib_rxdone_check_ba()
683 void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rt2x00lib_rxdone() argument
685 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00lib_rxdone()
696 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00lib_rxdone()
703 skb = rt2x00queue_alloc_rxskb(entry, gfp); rt2x00lib_rxdone()
710 rt2x00queue_unmap_skb(entry); rt2x00lib_rxdone()
716 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); rt2x00lib_rxdone()
723 rxdesc.size > entry->queue->data_size)) { rt2x00lib_rxdone()
725 rxdesc.size, entry->queue->data_size); rt2x00lib_rxdone()
726 dev_kfree_skb(entry->skb); rt2x00lib_rxdone()
734 header_length = ieee80211_get_hdrlen_from_skb(entry->skb); rt2x00lib_rxdone()
744 rt2x00crypto_rx_insert_iv(entry->skb, header_length, rt2x00lib_rxdone()
749 rt2x00queue_remove_l2pad(entry->skb, header_length); rt2x00lib_rxdone()
752 skb_trim(entry->skb, rxdesc.size); rt2x00lib_rxdone()
766 rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
772 rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
777 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); rt2x00lib_rxdone()
779 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); rt2x00lib_rxdone()
785 rx_status = IEEE80211_SKB_RXCB(entry->skb); rt2x00lib_rxdone()
802 ieee80211_rx_ni(rt2x00dev->hw, entry->skb); rt2x00lib_rxdone()
808 entry->skb = skb; rt2x00lib_rxdone()
811 entry->flags = 0; rt2x00lib_rxdone()
812 rt2x00queue_index_inc(entry, Q_INDEX_DONE); rt2x00lib_rxdone()
815 rt2x00dev->ops->lib->clear_entry(entry); rt2x00lib_rxdone()
909 static void rt2x00lib_channel(struct ieee80211_channel *entry, rt2x00lib_channel() argument
914 entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rt2x00lib_channel()
915 entry->center_freq = ieee80211_channel_to_frequency(channel, rt2x00lib_channel()
916 entry->band); rt2x00lib_channel()
917 entry->hw_value = value; rt2x00lib_channel()
918 entry->max_power = tx_power; rt2x00lib_channel()
919 entry->max_antenna_gain = 0xff; rt2x00lib_channel()
922 static void rt2x00lib_rate(struct ieee80211_rate *entry, rt2x00lib_rate() argument
925 entry->flags = 0; rt2x00lib_rate()
926 entry->bitrate = rate->bitrate; rt2x00lib_rate()
927 entry->hw_value = index; rt2x00lib_rate()
928 entry->hw_value_short = index; rt2x00lib_rate()
931 entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; rt2x00lib_rate()
/linux-4.1.27/drivers/net/ethernet/dec/tulip/
H A Dinterrupt.c62 int entry; tulip_refill_rx() local
67 entry = tp->dirty_rx % RX_RING_SIZE; tulip_refill_rx()
68 if (tp->rx_buffers[entry].skb == NULL) { tulip_refill_rx()
72 skb = tp->rx_buffers[entry].skb = tulip_refill_rx()
81 tp->rx_buffers[entry].skb = NULL; tulip_refill_rx()
85 tp->rx_buffers[entry].mapping = mapping; tulip_refill_rx()
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); tulip_refill_rx()
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned); tulip_refill_rx()
116 int entry = tp->cur_rx % RX_RING_SIZE; tulip_poll() local
131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", tulip_poll()
132 entry, tp->rx_ring[entry].status); tulip_poll()
143 /* If we own the next entry, it is a new packet. Send it up. */ tulip_poll()
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { tulip_poll()
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status); tulip_poll()
152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_poll()
153 entry, status); tulip_poll()
214 tp->rx_buffers[entry].mapping, tulip_poll()
217 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, tulip_poll()
222 tp->rx_buffers[entry].skb->data, tulip_poll()
226 tp->rx_buffers[entry].mapping, tulip_poll()
229 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, tulip_poll()
233 if (tp->rx_buffers[entry].mapping != tulip_poll()
234 le32_to_cpu(tp->rx_ring[entry].buffer1)) { tulip_poll()
237 le32_to_cpu(tp->rx_ring[entry].buffer1), tulip_poll()
238 (unsigned long long)tp->rx_buffers[entry].mapping, tulip_poll()
243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_poll()
246 tp->rx_buffers[entry].skb = NULL; tulip_poll()
247 tp->rx_buffers[entry].mapping = 0; tulip_poll()
260 entry = (++tp->cur_rx) % RX_RING_SIZE; tulip_poll()
368 int entry = tp->cur_rx % RX_RING_SIZE; tulip_rx() local
373 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_rx()
374 entry, tp->rx_ring[entry].status); tulip_rx()
375 /* If we own the next entry, it is a new packet. Send it up. */ tulip_rx()
376 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { tulip_rx()
377 s32 status = le32_to_cpu(tp->rx_ring[entry].status); tulip_rx()
381 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", tulip_rx()
382 entry, status); tulip_rx()
440 tp->rx_buffers[entry].mapping, tulip_rx()
443 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, tulip_rx()
448 tp->rx_buffers[entry].skb->data, tulip_rx()
452 tp->rx_buffers[entry].mapping, tulip_rx()
455 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, tulip_rx()
459 if (tp->rx_buffers[entry].mapping != tulip_rx()
460 le32_to_cpu(tp->rx_ring[entry].buffer1)) { tulip_rx()
463 le32_to_cpu(tp->rx_ring[entry].buffer1), tulip_rx()
464 (long long)tp->rx_buffers[entry].mapping, tulip_rx()
469 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_rx()
472 tp->rx_buffers[entry].skb = NULL; tulip_rx()
473 tp->rx_buffers[entry].mapping = 0; tulip_rx()
483 entry = (++tp->cur_rx) % RX_RING_SIZE; tulip_rx()
531 int entry; tulip_interrupt() local
590 int entry = dirty_tx % TX_RING_SIZE; tulip_interrupt() local
591 int status = le32_to_cpu(tp->tx_ring[entry].status); tulip_interrupt()
597 if (tp->tx_buffers[entry].skb == NULL) { tulip_interrupt()
599 if (tp->tx_buffers[entry].mapping) tulip_interrupt()
601 tp->tx_buffers[entry].mapping, tulip_interrupt()
627 tp->tx_buffers[entry].skb->len; tulip_interrupt()
632 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tulip_interrupt()
633 tp->tx_buffers[entry].skb->len, tulip_interrupt()
637 dev_kfree_skb_irq(tp->tx_buffers[entry].skb); tulip_interrupt()
638 tp->tx_buffers[entry].skb = NULL; tulip_interrupt()
639 tp->tx_buffers[entry].mapping = 0; tulip_interrupt()
780 entry = tp->dirty_rx % RX_RING_SIZE;
781 if (tp->rx_buffers[entry].skb == NULL) {
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dfwsignal.c370 * @mac_handle: handle for mac entry determined by firmware.
373 * @suppressed: mac entry is suppressed.
420 * struct brcmf_fws_hanger_item - single entry for tx pending packet.
422 * @state: entry is either free or occupied.
437 * @failed_slotfind: packets for which failed to find an entry.
438 * @slot_pos: last returned item index for a free entry.
646 brcmf_err("entry not in use\n"); brcmf_fws_hanger_poppkt()
667 brcmf_err("entry not in use\n"); brcmf_fws_hanger_mark_suppressed()
743 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_macdesc_lookup() local
749 entry = &fws->desc.nodes[0]; brcmf_fws_macdesc_lookup()
751 if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) brcmf_fws_macdesc_lookup()
752 return entry; brcmf_fws_macdesc_lookup()
753 entry++; brcmf_fws_macdesc_lookup()
762 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; brcmf_fws_macdesc_find() local
767 /* Multicast destination, STA and P2P clients get the interface entry. brcmf_fws_macdesc_find()
769 * have their own entry. brcmf_fws_macdesc_find()
772 entry = ifp->fws_desc; brcmf_fws_macdesc_find()
776 entry = brcmf_fws_macdesc_lookup(fws, da); brcmf_fws_macdesc_find()
777 if (IS_ERR(entry)) brcmf_fws_macdesc_find()
778 entry = ifp->fws_desc; brcmf_fws_macdesc_find()
781 return entry; brcmf_fws_macdesc_find()
785 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_closed()
794 if (entry->mac_handle) { brcmf_fws_macdesc_closed()
795 if_entry = &fws->desc.iface[entry->interface_id]; brcmf_fws_macdesc_closed()
799 /* an entry is closed when the state is closed and brcmf_fws_macdesc_closed()
802 closed = entry->state == BRCMF_FWS_STATE_CLOSE && brcmf_fws_macdesc_closed()
803 !entry->requested_credit && !entry->requested_packet; brcmf_fws_macdesc_closed()
806 return closed || !(entry->ac_bitmap & BIT(fifo)); brcmf_fws_macdesc_closed()
810 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_cleanup()
813 if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { brcmf_fws_macdesc_cleanup()
814 brcmf_fws_psq_flush(fws, &entry->psq, ifidx); brcmf_fws_macdesc_cleanup()
815 entry->occupied = !!(entry->psq.len); brcmf_fws_macdesc_cleanup()
872 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_hdrpush() local
880 entry->name, brcmf_skb_if_flags_get_field(skb, INDEX), brcmf_fws_hdrpush()
883 if (entry->send_tim_signal) brcmf_fws_hdrpush()
905 if (entry->send_tim_signal) { brcmf_fws_hdrpush()
906 entry->send_tim_signal = 0; brcmf_fws_hdrpush()
909 wlh[2] = entry->mac_handle; brcmf_fws_hdrpush()
910 wlh[3] = entry->traffic_pending_bmp; brcmf_fws_hdrpush()
912 entry->mac_handle, entry->traffic_pending_bmp); brcmf_fws_hdrpush()
914 entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; brcmf_fws_hdrpush()
923 struct brcmf_fws_mac_descriptor *entry, brcmf_fws_tim_update()
934 if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) brcmf_fws_tim_update()
935 entry->traffic_pending_bmp &= ~NBITVAL(fifo); brcmf_fws_tim_update()
937 entry->traffic_pending_bmp |= NBITVAL(fifo); brcmf_fws_tim_update()
939 entry->send_tim_signal = false; brcmf_fws_tim_update()
940 if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) brcmf_fws_tim_update()
941 entry->send_tim_signal = true; brcmf_fws_tim_update()
942 if (send_immediately && entry->send_tim_signal && brcmf_fws_tim_update()
943 entry->state == BRCMF_FWS_STATE_CLOSE) { brcmf_fws_tim_update()
955 skcb->mac = entry; brcmf_fws_tim_update()
1001 struct brcmf_fws_mac_descriptor *entry, *existing; brcmf_fws_macdesc_indicate() local
1010 entry = &fws->desc.nodes[mac_handle & 0x1F]; brcmf_fws_macdesc_indicate()
1012 if (entry->occupied) { brcmf_fws_macdesc_indicate()
1014 entry->name, addr); brcmf_fws_macdesc_indicate()
1016 brcmf_fws_macdesc_cleanup(fws, entry, -1); brcmf_fws_macdesc_indicate()
1017 brcmf_fws_macdesc_deinit(entry); brcmf_fws_macdesc_indicate()
1026 if (!entry->occupied) { brcmf_fws_macdesc_indicate()
1028 entry->mac_handle = mac_handle; brcmf_fws_macdesc_indicate()
1029 brcmf_fws_macdesc_init(entry, addr, ifidx); brcmf_fws_macdesc_indicate()
1030 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_macdesc_indicate()
1031 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, brcmf_fws_macdesc_indicate()
1034 brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); brcmf_fws_macdesc_indicate()
1039 if (entry != existing) { brcmf_fws_macdesc_indicate()
1042 memcpy(entry, existing, brcmf_fws_macdesc_indicate()
1044 entry->mac_handle = mac_handle; brcmf_fws_macdesc_indicate()
1046 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_macdesc_indicate()
1048 brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, brcmf_fws_macdesc_indicate()
1052 WARN_ON(entry->mac_handle != mac_handle); brcmf_fws_macdesc_indicate()
1062 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_macdesc_state_indicate() local
1067 entry = &fws->desc.nodes[mac_handle & 0x1F]; brcmf_fws_macdesc_state_indicate()
1068 if (!entry->occupied) { brcmf_fws_macdesc_state_indicate()
1074 entry->requested_credit = 0; brcmf_fws_macdesc_state_indicate()
1075 entry->requested_packet = 0; brcmf_fws_macdesc_state_indicate()
1077 entry->state = BRCMF_FWS_STATE_OPEN; brcmf_fws_macdesc_state_indicate()
1080 entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_macdesc_state_indicate()
1081 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); brcmf_fws_macdesc_state_indicate()
1082 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); brcmf_fws_macdesc_state_indicate()
1083 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); brcmf_fws_macdesc_state_indicate()
1084 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); brcmf_fws_macdesc_state_indicate()
1094 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_interface_state_indicate() local
1105 entry = &fws->desc.iface[ifidx]; brcmf_fws_interface_state_indicate()
1106 if (!entry->occupied) { brcmf_fws_interface_state_indicate()
1112 entry->name); brcmf_fws_interface_state_indicate()
1116 entry->state = BRCMF_FWS_STATE_OPEN; brcmf_fws_interface_state_indicate()
1120 entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_interface_state_indicate()
1139 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_request_indicate() local
1141 entry = &fws->desc.nodes[data[1] & 0x1F]; brcmf_fws_request_indicate()
1142 if (!entry->occupied) { brcmf_fws_request_indicate()
1151 brcmf_fws_get_tlv_name(type), type, entry->name, brcmf_fws_request_indicate()
1155 entry->requested_credit = data[0]; brcmf_fws_request_indicate()
1157 entry->requested_packet = data[0]; brcmf_fws_request_indicate()
1159 entry->ac_bitmap = data[2]; brcmf_fws_request_indicate()
1165 brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, brcmf_fws_macdesc_use_req_credit() argument
1168 if (entry->requested_credit > 0) { brcmf_fws_macdesc_use_req_credit()
1169 entry->requested_credit--; brcmf_fws_macdesc_use_req_credit()
1172 if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_fws_macdesc_use_req_credit()
1174 } else if (entry->requested_packet > 0) { brcmf_fws_macdesc_use_req_credit()
1175 entry->requested_packet--; brcmf_fws_macdesc_use_req_credit()
1178 if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_fws_macdesc_use_req_credit()
1188 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_macdesc_return_req_credit() local
1191 (entry->state == BRCMF_FWS_STATE_CLOSE)) brcmf_fws_macdesc_return_req_credit()
1192 entry->requested_credit++; brcmf_fws_macdesc_return_req_credit()
1245 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_enq() local
1253 entry = brcmf_skbcb(p)->mac; brcmf_fws_enq()
1254 if (entry == NULL) { brcmf_fws_enq()
1266 pq = &entry->psq; brcmf_fws_enq()
1311 } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) { brcmf_fws_enq()
1327 brcmf_fws_tim_update(fws, entry, fifo, true); brcmf_fws_enq()
1328 brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_fws_enq()
1336 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_deq() local
1349 entry = &table[(node_pos + i) % num_nodes]; brcmf_fws_deq()
1350 if (!entry->occupied || brcmf_fws_deq()
1351 brcmf_fws_macdesc_closed(fws, entry, fifo)) brcmf_fws_deq()
1354 if (entry->suppressed) brcmf_fws_deq()
1358 p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); brcmf_fws_deq()
1360 if (entry->suppressed) { brcmf_fws_deq()
1361 if (entry->suppr_transit_count) brcmf_fws_deq()
1363 entry->suppressed = false; brcmf_fws_deq()
1364 p = brcmu_pktq_mdeq(&entry->psq, brcmf_fws_deq()
1371 brcmf_fws_macdesc_use_req_credit(entry, p); brcmf_fws_deq()
1375 brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_fws_deq()
1383 brcmf_fws_tim_update(fws, entry, fifo, false); brcmf_fws_deq()
1404 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; brcmf_fws_txstatus_suppressed() local
1411 if (!entry->suppressed) { brcmf_fws_txstatus_suppressed()
1412 entry->suppressed = true; brcmf_fws_txstatus_suppressed()
1413 entry->suppr_transit_count = entry->transit_count; brcmf_fws_txstatus_suppressed()
1415 entry->name, entry->transit_count); brcmf_fws_txstatus_suppressed()
1418 entry->generation = genbit; brcmf_fws_txstatus_suppressed()
1450 struct brcmf_fws_mac_descriptor *entry = NULL; brcmf_fws_txs_process() local
1478 entry = skcb->mac; brcmf_fws_txs_process()
1479 if (WARN_ON(!entry)) { brcmf_fws_txs_process()
1483 entry->transit_count--; brcmf_fws_txs_process()
1484 if (entry->suppressed && entry->suppr_transit_count) brcmf_fws_txs_process()
1485 entry->suppr_transit_count--; brcmf_fws_txs_process()
1487 brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags, brcmf_fws_txs_process()
1747 struct brcmf_fws_mac_descriptor *entry = skcb->mac; brcmf_fws_precommit_skb() local
1751 brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); brcmf_fws_precommit_skb()
1767 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_rollback_toq() local
1772 entry = brcmf_skbcb(skb)->mac; brcmf_fws_rollback_toq()
1773 if (entry->occupied) { brcmf_fws_rollback_toq()
1778 pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); brcmf_fws_rollback_toq()
1780 brcmf_err("%s queue %d full\n", entry->name, qidx); brcmf_fws_rollback_toq()
1784 brcmf_err("%s entry removed\n", entry->name); brcmf_fws_rollback_toq()
1828 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_commit_skb() local
1833 entry = skcb->mac; brcmf_fws_commit_skb()
1834 if (IS_ERR(entry)) brcmf_fws_commit_skb()
1835 return PTR_ERR(entry); brcmf_fws_commit_skb()
1838 entry->transit_count++; brcmf_fws_commit_skb()
1839 if (entry->suppressed) brcmf_fws_commit_skb()
1840 entry->suppr_transit_count++; brcmf_fws_commit_skb()
1845 brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, brcmf_fws_commit_skb()
1848 entry->transit_count--; brcmf_fws_commit_skb()
1849 if (entry->suppressed) brcmf_fws_commit_skb()
1850 entry->suppr_transit_count--; brcmf_fws_commit_skb()
1941 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_fws_reset_interface() local
1944 if (!entry) brcmf_fws_reset_interface()
1947 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_reset_interface()
1953 struct brcmf_fws_mac_descriptor *entry; brcmf_fws_add_interface() local
1958 entry = &fws->desc.iface[ifp->ifidx]; brcmf_fws_add_interface()
1959 ifp->fws_desc = entry; brcmf_fws_add_interface()
1960 brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_add_interface()
1961 brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_add_interface()
1962 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, brcmf_fws_add_interface()
1964 brcmf_dbg(TRACE, "added %s\n", entry->name); brcmf_fws_add_interface()
1969 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_fws_del_interface() local
1971 if (!entry) brcmf_fws_del_interface()
1976 brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_del_interface()
1977 brcmf_fws_macdesc_deinit(entry); brcmf_fws_del_interface()
784 brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo) brcmf_fws_macdesc_closed() argument
809 brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int ifidx) brcmf_fws_macdesc_cleanup() argument
922 brcmf_fws_tim_update(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo, bool send_immediately) brcmf_fws_tim_update() argument
H A Ddebug.c84 struct brcmf_debugfs_entry *entry = inode->i_private; brcmf_debugfs_entry_open() local
86 return single_open(f, entry->read, entry->drvr->bus_if->dev); brcmf_debugfs_entry_open()
101 struct brcmf_debugfs_entry *entry; brcmf_debugfs_add_entry() local
106 entry = devm_kzalloc(drvr->bus_if->dev, sizeof(*entry), GFP_KERNEL); brcmf_debugfs_add_entry()
107 if (!entry) brcmf_debugfs_add_entry()
110 entry->read = read_fn; brcmf_debugfs_add_entry()
111 entry->drvr = drvr; brcmf_debugfs_add_entry()
113 dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry, brcmf_debugfs_add_entry()
/linux-4.1.27/fs/nilfs2/
H A Ddat.c91 struct nilfs_dat_entry *entry; nilfs_dat_commit_alloc() local
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_alloc()
97 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_alloc()
98 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); nilfs_dat_commit_alloc()
99 entry->de_blocknr = cpu_to_le64(0); nilfs_dat_commit_alloc()
115 struct nilfs_dat_entry *entry; nilfs_dat_commit_free() local
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_free()
121 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_free()
122 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); nilfs_dat_commit_free()
123 entry->de_blocknr = cpu_to_le64(0); nilfs_dat_commit_free()
142 struct nilfs_dat_entry *entry; nilfs_dat_commit_start() local
146 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_start()
148 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); nilfs_dat_commit_start()
149 entry->de_blocknr = cpu_to_le64(blocknr); nilfs_dat_commit_start()
157 struct nilfs_dat_entry *entry; nilfs_dat_prepare_end() local
170 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_prepare_end()
172 start = le64_to_cpu(entry->de_start); nilfs_dat_prepare_end()
173 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_prepare_end()
190 struct nilfs_dat_entry *entry; nilfs_dat_commit_end() local
196 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_end()
198 end = start = le64_to_cpu(entry->de_start); nilfs_dat_commit_end()
203 entry->de_end = cpu_to_le64(end); nilfs_dat_commit_end()
204 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_commit_end()
215 struct nilfs_dat_entry *entry; nilfs_dat_abort_end() local
221 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_abort_end()
223 start = le64_to_cpu(entry->de_start); nilfs_dat_abort_end()
224 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_abort_end()
331 struct nilfs_dat_entry *entry; nilfs_dat_move() local
344 * uncommitted block number, this makes a copy of the entry nilfs_dat_move()
356 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); nilfs_dat_move()
357 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { nilfs_dat_move()
360 (unsigned long long)le64_to_cpu(entry->de_start), nilfs_dat_move()
361 (unsigned long long)le64_to_cpu(entry->de_end)); nilfs_dat_move()
367 entry->de_blocknr = cpu_to_le64(blocknr); nilfs_dat_move()
400 struct nilfs_dat_entry *entry; nilfs_dat_translate() local
419 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); nilfs_dat_translate()
420 blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_translate()
437 struct nilfs_dat_entry *entry; nilfs_dat_get_vinfo() local
459 entry = nilfs_palloc_block_get_entry( nilfs_dat_get_vinfo()
461 vinfo->vi_start = le64_to_cpu(entry->de_start); nilfs_dat_get_vinfo()
462 vinfo->vi_end = le64_to_cpu(entry->de_end); nilfs_dat_get_vinfo()
463 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); nilfs_dat_get_vinfo()
475 * @entry_size: size of a dat entry
489 "NILFS: too large DAT entry size: %zu bytes.\n", nilfs_dat_read()
494 "NILFS: too small DAT entry size: %zu bytes.\n", nilfs_dat_read()
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvif/
H A Dlist.h56 * struct list_head entry;
70 * list_add(&foo->entry, &bar.list_of_foos);
74 * list_del(&foo->entry);
84 * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
93 * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
95 * list_del(&iterator->entry);
134 __list_add(struct list_head *entry, __list_add() argument
137 next->prev = entry; __list_add()
138 entry->next = next; __list_add()
139 entry->prev = prev; __list_add()
140 prev->next = entry; __list_add()
153 * list_add(&newfoo->entry, &bar->list_of_foos);
155 * @param entry The new element to prepend to the list.
159 list_add(struct list_head *entry, struct list_head *head) list_add() argument
161 __list_add(entry, head, head->next); list_add()
174 * list_add_tail(&newfoo->entry, &bar->list_of_foos);
176 * @param entry The new element to prepend to the list.
180 list_add_tail(struct list_head *entry, struct list_head *head) list_add_tail() argument
182 __list_add(entry, head->prev, head); list_add_tail()
202 * list_del(&foo->entry);
204 * @param entry The element to remove.
207 list_del(struct list_head *entry) list_del() argument
209 __list_del(entry->prev, entry->next); list_del()
213 list_del_init(struct list_head *entry) list_del_init() argument
215 __list_del(entry->prev, entry->next); list_del_init()
216 INIT_LIST_HEAD(entry); list_del_init()
245 * f = container_of(&foo->entry, struct foo, entry);
265 * Retrieve the first list entry for the given list pointer.
280 * Retrieve the last list entry for the given listpointer.
302 * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
/linux-4.1.27/drivers/parisc/
H A Dpdc_stable.c98 /* This struct defines what we need to deal with a parisc pdc path entry */
100 rwlock_t rw_lock; /* to protect path entry access */
101 short ready; /* entry record is valid if != 0 */
102 unsigned long addr; /* entry address in stable storage */
103 char *name; /* entry name */
111 ssize_t (*show)(struct pdcspath_entry *entry, char *buf);
112 ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count);
140 * pdcspath_fetch - This function populates the path entry structs.
141 * @entry: A pointer to an allocated pdcspath_entry.
149 * This function expects to be called with @entry->rw_lock write-hold.
152 pdcspath_fetch(struct pdcspath_entry *entry) pdcspath_fetch() argument
156 if (!entry) pdcspath_fetch()
159 devpath = &entry->devpath; pdcspath_fetch()
162 entry, devpath, entry->addr); pdcspath_fetch()
165 if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) pdcspath_fetch()
171 entry->dev = hwpath_to_device((struct hardware_path *)devpath); pdcspath_fetch()
173 entry->ready = 1; pdcspath_fetch()
175 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); pdcspath_fetch()
182 * @entry: A pointer to an allocated pdcspath_entry.
190 * This function expects to be called with @entry->rw_lock write-hold.
193 pdcspath_store(struct pdcspath_entry *entry) pdcspath_store() argument
197 BUG_ON(!entry); pdcspath_store()
199 devpath = &entry->devpath; pdcspath_store()
204 if (!entry->ready) { pdcspath_store()
206 BUG_ON(!entry->dev); pdcspath_store()
207 device_to_hwpath(entry->dev, (struct hardware_path *)devpath); pdcspath_store()
212 entry, devpath, entry->addr); pdcspath_store()
215 if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) pdcspath_store()
221 entry->ready = 2; pdcspath_store()
223 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); pdcspath_store()
228 * @entry: An allocated and populated pdscpath_entry struct.
234 pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf) pdcspath_hwpath_read() argument
240 if (!entry || !buf) pdcspath_hwpath_read()
243 read_lock(&entry->rw_lock); pdcspath_hwpath_read()
244 devpath = &entry->devpath; pdcspath_hwpath_read()
245 i = entry->ready; pdcspath_hwpath_read()
246 read_unlock(&entry->rw_lock); pdcspath_hwpath_read()
248 if (!i) /* entry is not ready */ pdcspath_hwpath_read()
263 * @entry: An allocated and populated pdscpath_entry struct.
277 pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count) pdcspath_hwpath_write() argument
285 if (!entry || !buf || !count) pdcspath_hwpath_write()
322 "hardware path: %s\n", __func__, entry->name, buf); pdcspath_hwpath_write()
327 write_lock(&entry->rw_lock); pdcspath_hwpath_write()
328 entry->ready = 0; pdcspath_hwpath_write()
329 entry->dev = dev; pdcspath_hwpath_write()
332 pdcspath_store(entry); pdcspath_hwpath_write()
335 sysfs_remove_link(&entry->kobj, "device"); pdcspath_hwpath_write()
336 ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); pdcspath_hwpath_write()
339 write_unlock(&entry->rw_lock); pdcspath_hwpath_write()
342 entry->name, buf); pdcspath_hwpath_write()
349 * @entry: An allocated and populated pdscpath_entry struct.
355 pdcspath_layer_read(struct pdcspath_entry *entry, char *buf) pdcspath_layer_read() argument
361 if (!entry || !buf) pdcspath_layer_read()
364 read_lock(&entry->rw_lock); pdcspath_layer_read()
365 devpath = &entry->devpath; pdcspath_layer_read()
366 i = entry->ready; pdcspath_layer_read()
367 read_unlock(&entry->rw_lock); pdcspath_layer_read()
369 if (!i) /* entry is not ready */ pdcspath_layer_read()
382 * @entry: An allocated and populated pdscpath_entry struct.
393 pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count) pdcspath_layer_write() argument
399 if (!entry || !buf || !count) pdcspath_layer_write()
425 write_lock(&entry->rw_lock); pdcspath_layer_write()
429 memcpy(&entry->devpath.layers, &layers, sizeof(layers)); pdcspath_layer_write()
432 pdcspath_store(entry); pdcspath_layer_write()
433 write_unlock(&entry->rw_lock); pdcspath_layer_write()
436 entry->name, buf); pdcspath_layer_write()
450 struct pdcspath_entry *entry = to_pdcspath_entry(kobj); pdcspath_attr_show() local
455 ret = pdcs_attr->show(entry, buf); pdcspath_attr_show()
471 struct pdcspath_entry *entry = to_pdcspath_entry(kobj); pdcspath_attr_store() local
479 ret = pdcs_attr->store(entry, buf, count); pdcspath_attr_store()
558 /* Current flags are stored in primary boot path entry */ pdcs_auto_read()
604 /* Current flags are stored in primary boot path entry */ pdcs_timer_read()
774 /* Current flags are stored in primary boot path entry */ pdcs_auto_write()
799 /* Change the path entry flags first */ pdcs_auto_write()
967 * It creates kobjects corresponding to each path entry with nice sysfs
977 struct pdcspath_entry *entry; pdcs_register_pathentries() local
981 for (i = 0; (entry = pdcspath_entries[i]); i++) pdcs_register_pathentries()
982 rwlock_init(&entry->rw_lock); pdcs_register_pathentries()
984 for (i = 0; (entry = pdcspath_entries[i]); i++) { pdcs_register_pathentries()
985 write_lock(&entry->rw_lock); pdcs_register_pathentries()
986 err = pdcspath_fetch(entry); pdcs_register_pathentries()
987 write_unlock(&entry->rw_lock); pdcs_register_pathentries()
992 entry->kobj.kset = paths_kset; pdcs_register_pathentries()
993 err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL, pdcs_register_pathentries()
994 "%s", entry->name); pdcs_register_pathentries()
999 write_lock(&entry->rw_lock); pdcs_register_pathentries()
1000 entry->ready = 2; pdcs_register_pathentries()
1003 if (entry->dev) { pdcs_register_pathentries()
1004 err = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); pdcs_register_pathentries()
1008 write_unlock(&entry->rw_lock); pdcs_register_pathentries()
1009 kobject_uevent(&entry->kobj, KOBJ_ADD); pdcs_register_pathentries()
1022 struct pdcspath_entry *entry; pdcs_unregister_pathentries() local
1024 for (i = 0; (entry = pdcspath_entries[i]); i++) { pdcs_unregister_pathentries()
1025 read_lock(&entry->rw_lock); pdcs_unregister_pathentries()
1026 if (entry->ready >= 2) pdcs_unregister_pathentries()
1027 kobject_put(&entry->kobj); pdcs_unregister_pathentries()
1028 read_unlock(&entry->rw_lock); pdcs_unregister_pathentries()
/linux-4.1.27/drivers/s390/block/
H A Ddcssblk.c88 struct segment_info *entry, *temp; dcssblk_release_segment() local
91 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { dcssblk_release_segment()
92 list_del(&entry->lh); dcssblk_release_segment()
93 kfree(entry); dcssblk_release_segment()
109 struct dcssblk_dev_info *entry; dcssblk_assign_free_minor() local
116 list_for_each_entry(entry, &dcssblk_devices, lh) dcssblk_assign_free_minor()
117 if (minor == entry->gd->first_minor) dcssblk_assign_free_minor()
135 struct dcssblk_dev_info *entry; dcssblk_get_device_by_name() local
137 list_for_each_entry(entry, &dcssblk_devices, lh) { dcssblk_get_device_by_name()
138 if (!strcmp(name, entry->segment_name)) { dcssblk_get_device_by_name()
139 return entry; dcssblk_get_device_by_name()
154 struct segment_info *entry; dcssblk_get_segment_by_name() local
157 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_get_segment_by_name()
158 if (!strcmp(name, entry->segment_name)) dcssblk_get_segment_by_name()
159 return entry; dcssblk_get_segment_by_name()
172 struct segment_info *entry; dcssblk_find_highest_addr() local
175 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_find_highest_addr()
176 if (highest_addr < entry->end) dcssblk_find_highest_addr()
177 highest_addr = entry->end; dcssblk_find_highest_addr()
190 struct segment_info *entry; dcssblk_find_lowest_addr() local
194 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_find_lowest_addr()
196 lowest_addr = entry->start; dcssblk_find_lowest_addr()
199 if (lowest_addr > entry->start) dcssblk_find_lowest_addr()
200 lowest_addr = entry->start; dcssblk_find_lowest_addr()
213 struct segment_info *sort_list, *entry, temp; dcssblk_is_continuous() local
224 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_is_continuous()
225 memcpy(&sort_list[i], entry, sizeof(struct segment_info)); dcssblk_is_continuous()
324 struct segment_info *entry, *temp; dcssblk_shared_store() local
337 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
338 rc = segment_modify_shared(entry->segment_name, dcssblk_shared_store()
362 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
363 rc = segment_modify_shared(entry->segment_name, dcssblk_shared_store()
383 temp = entry; dcssblk_shared_store()
384 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_shared_store()
385 if (entry != temp) dcssblk_shared_store()
386 segment_unload(entry->segment_name); dcssblk_shared_store()
412 * undone by storing a non-true value to this entry.
428 struct segment_info *entry; dcssblk_save_store() local
440 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_save_store()
441 if (entry->segment_type == SEG_TYPE_EN || dcssblk_save_store()
442 entry->segment_type == SEG_TYPE_SN) dcssblk_save_store()
445 entry->segment_name); dcssblk_save_store()
447 segment_save(entry->segment_name); dcssblk_save_store()
486 struct segment_info *entry; dcssblk_seglist_show() local
492 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_seglist_show()
493 strcpy(&buf[i], entry->segment_name); dcssblk_seglist_show()
494 i += strlen(entry->segment_name); dcssblk_seglist_show()
712 struct segment_info *entry; dcssblk_remove_store() local
760 list_for_each_entry(entry, &dev_info->seg_list, lh) dcssblk_remove_store()
761 segment_unload(entry->segment_name); dcssblk_remove_store()
794 struct segment_info *entry; dcssblk_release() local
805 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_release()
806 if (entry->segment_type == SEG_TYPE_EN || dcssblk_release()
807 entry->segment_type == SEG_TYPE_SN) dcssblk_release()
809 " be saved\n", entry->segment_name); dcssblk_release()
811 segment_save(entry->segment_name); dcssblk_release()
973 struct segment_info *entry; dcssblk_restore() local
978 list_for_each_entry(entry, &dev_info->seg_list, lh) { dcssblk_restore()
979 segment_unload(entry->segment_name); dcssblk_restore()
980 rc = segment_load(entry->segment_name, SEGMENT_SHARED, dcssblk_restore()
984 segment_warning(rc, entry->segment_name); dcssblk_restore()
987 if (start != entry->start || end != entry->end) { dcssblk_restore()
990 entry->segment_name); dcssblk_restore()
/linux-4.1.27/arch/sh/kernel/cpu/sh5/
H A DMakefile4 obj-y := entry.o probe.o switchto.o
/linux-4.1.27/arch/mips/include/asm/
H A Dtlbmisc.h5 * - add_wired_entry() add a fixed TLB entry, and move wired register
H A Deva.h14 #include <kernel-entry-init.h>
24 * their kernel-entry-init.h header. This macro usually does the
/linux-4.1.27/arch/arm/vfp/
H A DMakefile15 vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
/linux-4.1.27/arch/blackfin/mach-common/
H A DMakefile6 cache.o cache-c.o entry.o head.o \
/linux-4.1.27/arch/c6x/kernel/
H A DMakefile9 obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
/linux-4.1.27/arch/cris/arch-v10/kernel/
H A DMakefile8 obj-y := entry.o traps.o shadows.o debugport.o irq.o \
/linux-4.1.27/arch/cris/arch-v32/kernel/
H A DMakefile8 obj-y := entry.o traps.o irq.o debugport.o \
/linux-4.1.27/arch/m32r/kernel/
H A DMakefile7 obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
/linux-4.1.27/arch/m68k/68000/
H A DMakefile12 obj-y += entry.o ints.o timers.o
/linux-4.1.27/arch/arm/mach-rpc/
H A Dfiq.S4 #include <mach/entry-macro.S>
/linux-4.1.27/scripts/kconfig/
H A Dlist.h34 * list_entry - get the struct for this entry
54 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
76 * Insert a new entry between two known consecutive entries.
92 * list_add_tail - add a new entry
93 * @new: new entry to be added
96 * Insert a new entry before the specified head.
105 * Delete a list entry by making the prev/next entries
120 * list_del - deletes entry from list.
121 * @entry: the element to delete from the list.
122 * Note: list_empty() on entry does not return true after this, the entry is
125 static inline void list_del(struct list_head *entry) list_del() argument
127 __list_del(entry->prev, entry->next); list_del()
128 entry->next = (struct list_head*)LIST_POISON1; list_del()
129 entry->prev = (struct list_head*)LIST_POISON2; list_del()
/linux-4.1.27/kernel/events/
H A Dcallchain.c27 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, perf_callchain_kernel() argument
32 __weak void perf_callchain_user(struct perf_callchain_entry *entry, perf_callchain_user() argument
163 struct perf_callchain_entry *entry; perf_callchain() local
171 entry = get_callchain_entry(&rctx); perf_callchain()
175 if (!entry) perf_callchain()
178 entry->nr = 0; perf_callchain()
181 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain()
182 perf_callchain_kernel(entry, regs); perf_callchain()
200 perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain()
201 perf_callchain_user(entry, regs); perf_callchain()
208 return entry; perf_callchain()
/linux-4.1.27/arch/s390/pci/
H A Dpci_dma.c29 unsigned long *table, *entry; dma_alloc_cpu_table() local
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) dma_alloc_cpu_table()
36 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; dma_alloc_cpu_table()
47 unsigned long *table, *entry; dma_alloc_page_table() local
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) dma_alloc_page_table()
54 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; dma_alloc_page_table()
63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry) dma_get_seg_table_origin() argument
67 if (reg_entry_isvalid(*entry)) dma_get_seg_table_origin()
68 sto = get_rt_sto(*entry); dma_get_seg_table_origin()
74 set_rt_sto(entry, sto); dma_get_seg_table_origin()
75 validate_rt_entry(entry); dma_get_seg_table_origin()
76 entry_clr_protected(entry); dma_get_seg_table_origin()
81 static unsigned long *dma_get_page_table_origin(unsigned long *entry) dma_get_page_table_origin() argument
85 if (reg_entry_isvalid(*entry)) dma_get_page_table_origin()
86 pto = get_st_pto(*entry); dma_get_page_table_origin()
91 set_st_pto(entry, pto); dma_get_page_table_origin()
92 validate_st_entry(entry); dma_get_page_table_origin()
93 entry_clr_protected(entry); dma_get_page_table_origin()
120 unsigned long *entry; dma_update_cpu_trans() local
122 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); dma_update_cpu_trans()
123 if (!entry) { dma_update_cpu_trans()
129 invalidate_pt_entry(entry); dma_update_cpu_trans()
132 set_pt_pfaa(entry, page_addr); dma_update_cpu_trans()
133 validate_pt_entry(entry); dma_update_cpu_trans()
137 entry_set_protected(entry); dma_update_cpu_trans()
139 entry_clr_protected(entry); dma_update_cpu_trans()
184 static void dma_free_seg_table(unsigned long entry) dma_free_seg_table() argument
186 unsigned long *sto = get_rt_sto(entry); dma_free_seg_table()
/linux-4.1.27/arch/frv/kernel/
H A DMakefile10 obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o \
/linux-4.1.27/arch/m68k/kernel/
H A Drelocate_kernel.S80 movel %a0@+,%d0 /* d0 = entry = *ptr */
83 btst #2,%d0 /* entry & IND_DONE? */
86 btst #1,%d0 /* entry & IND_INDIRECTION? */
89 movel %d0,%a0 /* ptr = entry & PAGE_MASK */
93 btst #0,%d0 /* entry & IND_DESTINATION? */
96 movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */
100 btst #3,%d0 /* entry & IND_SOURCE? */
104 movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */
/linux-4.1.27/drivers/net/wireless/p54/
H A Deeprom.c238 struct p54_channel_entry *entry = NULL; p54_update_channel_param() local
242 * so it's very likely that the entry we are looking for p54_update_channel_param()
247 entry = &list->channels[i]; p54_update_channel_param()
253 /* entry does not exist yet. Initialize a new one. */ p54_update_channel_param()
264 entry = &list->channels[i]; p54_update_channel_param()
265 entry->freq = freq; p54_update_channel_param()
266 entry->band = band; p54_update_channel_param()
267 entry->index = ieee80211_frequency_to_channel(freq); p54_update_channel_param()
268 entry->max_power = 0; p54_update_channel_param()
269 entry->data = 0; p54_update_channel_param()
273 if (entry) p54_update_channel_param()
274 entry->data |= data; p54_update_channel_param()
276 return entry; p54_update_channel_param()
522 struct p54_rssi_db_entry *entry; p54_parse_rssical() local
551 db_len = sizeof(*entry) * entries; p54_parse_rssical()
558 priv->rssi_db->entry_size = sizeof(*entry); p54_parse_rssical()
561 entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset); p54_parse_rssical()
566 entry[i].freq = le16_to_cpu(cal[i].freq); p54_parse_rssical()
567 entry[i].mul = (s16) le16_to_cpu(cal[i].mul); p54_parse_rssical()
568 entry[i].add = (s16) le16_to_cpu(cal[i].add); p54_parse_rssical()
584 entry[i].freq = freq; p54_parse_rssical()
585 entry[i].mul = (s16) le16_to_cpu(cal[i].mul); p54_parse_rssical()
586 entry[i].add = (s16) le16_to_cpu(cal[i].add); p54_parse_rssical()
591 sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); p54_parse_rssical()
607 struct p54_rssi_db_entry *entry; p54_rssi_find() local
613 entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset); p54_rssi_find()
615 if (!same_band(freq, entry[i].freq)) p54_rssi_find()
624 if (abs(freq - entry[i].freq) < p54_rssi_find()
625 abs(freq - entry[found].freq)) { p54_rssi_find()
633 return found < 0 ? &p54_rssi_default : &entry[found]; p54_rssi_find()
643 "found possible invalid default country eeprom entry. (entry size: %d)\n", p54_parse_default_country()
734 struct pda_entry *entry; p54_parse_eeprom() local
743 entry = (void *)wrap->data + le16_to_cpu(wrap->len); p54_parse_eeprom()
745 /* verify that at least the entry length/code fits */ p54_parse_eeprom()
746 while ((u8 *)entry <= end - sizeof(*entry)) { p54_parse_eeprom()
747 entry_len = le16_to_cpu(entry->len); p54_parse_eeprom()
750 /* abort if entry exceeds whole structure */ p54_parse_eeprom()
751 if ((u8 *)entry + sizeof(*entry) + data_len > end) p54_parse_eeprom()
754 switch (le16_to_cpu(entry->code)) { p54_parse_eeprom()
758 SET_IEEE80211_PERM_ADDR(dev, entry->data); p54_parse_eeprom()
763 err = p54_convert_output_limits(dev, entry->data, p54_parse_eeprom()
770 (struct pda_pa_curve_data *)entry->data; p54_parse_eeprom()
795 priv->iq_autocal = kmemdup(entry->data, data_len, p54_parse_eeprom()
805 p54_parse_default_country(dev, entry->data, data_len); p54_parse_eeprom()
808 tmp = entry->data; p54_parse_eeprom()
809 while ((u8 *)tmp < entry->data + data_len) { p54_parse_eeprom()
819 priv->version = *(u8 *)(entry->data + 1); p54_parse_eeprom()
824 err = p54_parse_rssical(dev, entry->data, data_len, p54_parse_eeprom()
825 le16_to_cpu(entry->code)); p54_parse_eeprom()
830 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
851 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
858 struct pda_custom_wrapper *pda = (void *) entry->data; p54_parse_eeprom()
865 crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry)); p54_parse_eeprom()
866 if (crc16 != le16_to_cpup((__le16 *)entry->data)) { p54_parse_eeprom()
879 crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2); p54_parse_eeprom()
880 entry = (void *)entry + (entry_len + 1) * 2; p54_parse_eeprom()
/linux-4.1.27/fs/jfs/
H A Djfs_dtree.h38 * entry segment/slot
40 * an entry consists of type dependent head/only segment/slot and
42 * N.B. last/only segment of entry is terminated by next = -1;
63 * internal node entry head/only segment
76 /* compute number of slots for entry */
81 * leaf node entry head/only segment
107 * Maximum entry in inline directory table
114 u8 slot; /* 1: slot within leaf page of entry */
117 index of next entry when this entry was deleted */
135 /* compute number of slots for entry */
150 u8 nextindex; /* 1: next free entry in stbl */
156 s8 stbl[8]; /* 8: sorted entry index table */
173 * entry slot array of 32 byte slot
175 * sorted entry slot index table (stbl):
177 * 1-byte per entry
178 * 512 byte block: 16 entry tbl (1 slot)
179 * 1024 byte block: 32 entry tbl (1 slot)
180 * 2048 byte block: 64 entry tbl (2 slot)
181 * 4096 byte block: 128 entry tbl (4 slot)
190 * except nextindex which refers to entry index in stbl;
191 * end of entry stot list or freelist is marked with -1.
199 u8 nextindex; /* 1: next entry index in stbl */
233 /* get sorted entry table of the page */
/linux-4.1.27/fs/lockd/
H A Dprocfs.c73 struct proc_dir_entry *entry; lockd_create_procfs() local
75 entry = proc_mkdir("fs/lockd", NULL); lockd_create_procfs()
76 if (!entry) lockd_create_procfs()
78 entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry, lockd_create_procfs()
80 if (!entry) { lockd_create_procfs()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dlustre_peer.c99 struct uuid_nid_data *data, *entry; class_add_uuid() local
116 list_for_each_entry(entry, &g_uuid_list, un_list) { class_add_uuid()
117 if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) { class_add_uuid()
121 for (i = 0; i < entry->un_nid_count; i++) class_add_uuid()
122 if (nid == entry->un_nids[i]) class_add_uuid()
125 if (i == entry->un_nid_count) { class_add_uuid()
126 LASSERT(entry->un_nid_count < NIDS_MAX); class_add_uuid()
127 entry->un_nids[entry->un_nid_count++] = nid; class_add_uuid()
138 libcfs_nid2str(nid), entry->un_nid_count); class_add_uuid()
192 struct uuid_nid_data *entry; class_check_uuid() local
199 list_for_each_entry(entry, &g_uuid_list, un_list) { class_check_uuid()
202 if (!obd_uuid_equals(&entry->un_uuid, uuid)) class_check_uuid()
206 for (i = 0; i < entry->un_nid_count; i++) { class_check_uuid()
207 if (entry->un_nids[i] == nid) { class_check_uuid()
/linux-4.1.27/fs/fat/
H A Dfatent.c21 static void fat12_ent_blocknr(struct super_block *sb, int entry, fat12_ent_blocknr() argument
25 int bytes = entry + (entry >> 1); fat12_ent_blocknr()
26 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); fat12_ent_blocknr()
31 static void fat_ent_blocknr(struct super_block *sb, int entry, fat_ent_blocknr() argument
35 int bytes = (entry << sbi->fatent_shift); fat_ent_blocknr()
36 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); fat_ent_blocknr()
82 /* This entry is block boundary, it needs the next block */ fat12_ent_bread()
123 if (fatent->entry & 1) fat12_ent_get()
161 if (fatent->entry & 1) { fat12_ent_put()
196 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); fat12_ent_next()
198 fatent->entry++; fat12_ent_next()
228 fatent->entry++; fat16_ent_next()
240 fatent->entry++; fat32_ent_next()
326 /* Is this fatent's blocks including this entry? */ fat_ent_update_ptr()
331 /* This entry is on bhs[0]. */ fat_ent_update_ptr()
337 /* This entry needs the next block. */ fat_ent_update_ptr()
348 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) fat_ent_read() argument
356 if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { fat_ent_read()
358 fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); fat_ent_read()
362 fatent_set_entry(fatent, entry); fat_ent_read()
363 ops->ent_blocknr(sb, entry, &offset, &blocknr); fat_ent_read()
426 if (fatent->entry < sbi->max_cluster) fat_ent_next()
440 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); fat_ent_read_block()
486 if (fatent.entry >= sbi->max_cluster) fat_alloc_clusters()
487 fatent.entry = FAT_START_ENT; fat_alloc_clusters()
488 fatent_set_entry(&fatent, fatent.entry); fat_alloc_clusters()
496 int entry = fatent.entry; fat_alloc_clusters() local
501 ops->ent_put(&prev_ent, entry); fat_alloc_clusters()
505 sbi->prev_free = entry; fat_alloc_clusters()
509 cluster[idx_clus] = entry; fat_alloc_clusters()
569 fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", fat_free_clusters()
581 if (cluster != fatent.entry + 1) { fat_free_clusters()
582 int nr_clus = fatent.entry - first_cl + 1; fat_free_clusters()
643 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); fat_ent_reada()
668 while (fatent.entry < sbi->max_cluster) { fat_count_free_clusters()
/linux-4.1.27/kernel/trace/
H A Dtrace_syscalls.c24 struct syscall_metadata *entry = call->data; syscall_get_enter_fields() local
26 return &entry->enter_fields; syscall_get_enter_fields()
116 struct syscall_metadata *entry; print_syscall_enter() local
121 entry = syscall_nr_to_meta(syscall); print_syscall_enter()
123 if (!entry) print_syscall_enter()
126 if (entry->enter_event->event.type != ent->type) { print_syscall_enter()
131 trace_seq_printf(s, "%s(", entry->name); print_syscall_enter()
133 for (i = 0; i < entry->nb_args; i++) { print_syscall_enter()
140 trace_seq_printf(s, "%s ", entry->types[i]); print_syscall_enter()
143 trace_seq_printf(s, "%s: %lx%s", entry->args[i], print_syscall_enter()
145 i == entry->nb_args - 1 ? "" : ", "); print_syscall_enter()
163 struct syscall_metadata *entry; print_syscall_exit() local
167 entry = syscall_nr_to_meta(syscall); print_syscall_exit()
169 if (!entry) { print_syscall_exit()
174 if (entry->exit_event->event.type != ent->type) { print_syscall_exit()
179 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, print_syscall_exit()
195 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) __set_enter_print_fmt() argument
204 for (i = 0; i < entry->nb_args; i++) { __set_enter_print_fmt()
206 entry->args[i], sizeof(unsigned long), __set_enter_print_fmt()
207 i == entry->nb_args - 1 ? "" : ", "); __set_enter_print_fmt()
211 for (i = 0; i < entry->nb_args; i++) { __set_enter_print_fmt()
213 ", ((unsigned long)(REC->%s))", entry->args[i]); __set_enter_print_fmt()
226 struct syscall_metadata *entry = call->data; set_syscall_print_fmt() local
228 if (entry->enter_event != call) { set_syscall_print_fmt()
234 len = __set_enter_print_fmt(entry, NULL, 0); set_syscall_print_fmt()
241 __set_enter_print_fmt(entry, print_fmt, len + 1); set_syscall_print_fmt()
249 struct syscall_metadata *entry = call->data; free_syscall_print_fmt() local
251 if (entry->enter_event == call) free_syscall_print_fmt()
297 struct syscall_trace_enter *entry; ftrace_syscall_enter() local
322 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; ftrace_syscall_enter()
333 entry = ring_buffer_event_data(event); ftrace_syscall_enter()
334 entry->nr = syscall_nr; ftrace_syscall_enter()
335 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); ftrace_syscall_enter()
337 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, ftrace_syscall_enter()
345 struct syscall_trace_exit *entry; ftrace_syscall_exit() local
374 sys_data->exit_event->event.type, sizeof(*entry), ftrace_syscall_exit()
379 entry = ring_buffer_event_data(event); ftrace_syscall_exit()
380 entry->nr = syscall_nr; ftrace_syscall_exit()
381 entry->ret = syscall_get_return_value(current, regs); ftrace_syscall_exit()
383 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, ftrace_syscall_exit()
599 "syscall entry trace point"); perf_sysenter_enable()
/linux-4.1.27/drivers/hid/
H A Dhid-lg4ff.c274 struct lg4ff_device_entry *entry = drv_data->device_props; lg4ff_adjust_input_event() local
277 if (!entry) { lg4ff_adjust_input_event()
282 switch (entry->product_id) { lg4ff_adjust_input_event()
286 new_value = lg4ff_adjust_dfp_x_axis(value, entry->range); lg4ff_adjust_input_event()
349 struct lg4ff_device_entry *entry; hid_lg4ff_set_autocenter_default() local
358 entry = drv_data->device_props; hid_lg4ff_set_autocenter_default()
359 if (!entry) { hid_lg4ff_set_autocenter_default()
387 switch (entry->product_id) { hid_lg4ff_set_autocenter_default()
592 struct lg4ff_device_entry *entry; lg4ff_alternate_modes_show() local
603 entry = drv_data->device_props; lg4ff_alternate_modes_show()
604 if (!entry) { lg4ff_alternate_modes_show()
609 if (!entry->real_name) { lg4ff_alternate_modes_show()
615 if (entry->alternate_modes & BIT(i)) { lg4ff_alternate_modes_show()
619 !lg4ff_alternate_modes[i].product_id ? entry->real_name : lg4ff_alternate_modes[i].name); lg4ff_alternate_modes_show()
624 if (lg4ff_alternate_modes[i].product_id == entry->product_id || lg4ff_alternate_modes_show()
625 (lg4ff_alternate_modes[i].product_id == 0 && entry->product_id == entry->real_product_id)) lg4ff_alternate_modes_show()
641 struct lg4ff_device_entry *entry; lg4ff_alternate_modes_store() local
654 entry = drv_data->device_props; lg4ff_alternate_modes_store()
655 if (!entry) { lg4ff_alternate_modes_store()
678 if (entry->alternate_modes & BIT(i)) { lg4ff_alternate_modes_store()
681 target_product_id = entry->real_product_id; lg4ff_alternate_modes_store()
696 if (target_product_id == entry->product_id) /* Nothing to do */ lg4ff_alternate_modes_store()
702 entry->real_name); lg4ff_alternate_modes_store()
707 if ((entry->real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && lg4ff_alternate_modes_store()
708 entry->product_id > target_product_id) { lg4ff_alternate_modes_store()
709 hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->real_name, lg4ff_alternate_modes[i].name); lg4ff_alternate_modes_store()
713 s = lg4ff_get_mode_switch_command(entry->real_product_id, target_product_id); lg4ff_alternate_modes_store()
729 struct lg4ff_device_entry *entry; range_show() local
739 entry = drv_data->device_props; range_show()
740 if (!entry) { range_show()
745 count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range); range_show()
755 struct lg4ff_device_entry *entry; range_store() local
765 entry = drv_data->device_props; range_store()
766 if (!entry) { range_store()
772 range = entry->max_range; range_store()
776 if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) { range_store()
777 entry->set_range(hid, range); range_store()
778 entry->range = range; range_store()
788 struct lg4ff_device_entry *entry; lg4ff_real_id_show() local
798 entry = drv_data->device_props; lg4ff_real_id_show()
799 if (!entry) { lg4ff_real_id_show()
804 if (!entry->real_tag || !entry->real_name) { lg4ff_real_id_show()
809 count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->real_tag, entry->real_name); lg4ff_real_id_show()
843 struct lg4ff_device_entry *entry; lg4ff_led_set_brightness() local
851 entry = (struct lg4ff_device_entry *)drv_data->device_props; lg4ff_led_set_brightness()
853 if (!entry) { lg4ff_led_set_brightness()
859 if (led_cdev != entry->led[i]) lg4ff_led_set_brightness()
861 state = (entry->led_state >> i) & 1; lg4ff_led_set_brightness()
863 entry->led_state &= ~(1 << i); lg4ff_led_set_brightness()
864 lg4ff_set_leds(hid, entry->led_state); lg4ff_led_set_brightness()
866 entry->led_state |= 1 << i; lg4ff_led_set_brightness()
867 lg4ff_set_leds(hid, entry->led_state); lg4ff_led_set_brightness()
878 struct lg4ff_device_entry *entry; lg4ff_led_get_brightness() local
886 entry = (struct lg4ff_device_entry *)drv_data->device_props; lg4ff_led_get_brightness()
888 if (!entry) { lg4ff_led_get_brightness()
894 if (led_cdev == entry->led[i]) { lg4ff_led_get_brightness()
895 value = (entry->led_state >> i) & 1; lg4ff_led_get_brightness()
996 struct lg4ff_device_entry *entry; lg4ff_init() local
1059 entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL); lg4ff_init()
1060 if (!entry) { lg4ff_init()
1064 drv_data->device_props = entry; lg4ff_init()
1066 entry->product_id = lg4ff_devices[i].product_id; lg4ff_init()
1067 entry->real_product_id = real_product_id; lg4ff_init()
1068 entry->min_range = lg4ff_devices[i].min_range; lg4ff_init()
1069 entry->max_range = lg4ff_devices[i].max_range; lg4ff_init()
1070 entry->set_range = lg4ff_devices[i].set_range; lg4ff_init()
1073 entry->alternate_modes = lg4ff_multimode_wheels[mmode_idx].alternate_modes; lg4ff_init()
1074 entry->real_tag = lg4ff_multimode_wheels[mmode_idx].real_tag; lg4ff_init()
1075 entry->real_name = lg4ff_multimode_wheels[mmode_idx].real_name; lg4ff_init()
1106 entry->range = entry->max_range; lg4ff_init()
1107 if (entry->set_range != NULL) lg4ff_init()
1108 entry->set_range(hid, entry->range); lg4ff_init()
1112 entry->led_state = 0; lg4ff_init()
1114 entry->led[j] = NULL; lg4ff_init()
1140 entry->led[j] = led; lg4ff_init()
1148 led = entry->led[j]; lg4ff_init()
1149 entry->led[j] = NULL; lg4ff_init()
1167 struct lg4ff_device_entry *entry; lg4ff_deinit() local
1175 entry = drv_data->device_props; lg4ff_deinit()
1176 if (!entry) lg4ff_deinit()
1182 if (entry->alternate_modes) { lg4ff_deinit()
1195 led = entry->led[j]; lg4ff_deinit()
1196 entry->led[j] = NULL; lg4ff_deinit()
1206 kfree(entry); lg4ff_deinit()
/linux-4.1.27/kernel/power/
H A Dconsole.c47 struct pm_vt_switch *entry, *tmp; pm_vt_switch_required() local
58 entry = kmalloc(sizeof(*entry), GFP_KERNEL); pm_vt_switch_required()
59 if (!entry) pm_vt_switch_required()
62 entry->required = required; pm_vt_switch_required()
63 entry->dev = dev; pm_vt_switch_required()
65 list_add(&entry->head, &pm_vt_switch_list); pm_vt_switch_required()
108 struct pm_vt_switch *entry; pm_vt_switch() local
118 list_for_each_entry(entry, &pm_vt_switch_list, head) { pm_vt_switch()
119 if (entry->required) pm_vt_switch()
/linux-4.1.27/arch/x86/platform/uv/
H A Duv_irq.c75 /* irq entry exists */ uv_set_irq_2_mmr_info()
136 struct uv_IO_APIC_route_entry *entry; arch_enable_uv_irq() local
160 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; arch_enable_uv_irq()
161 entry->vector = cfg->vector; arch_enable_uv_irq()
162 entry->delivery_mode = apic->irq_delivery_mode; arch_enable_uv_irq()
163 entry->dest_mode = apic->irq_dest_mode; arch_enable_uv_irq()
164 entry->polarity = 0; arch_enable_uv_irq()
165 entry->trigger = 0; arch_enable_uv_irq()
166 entry->mask = 0; arch_enable_uv_irq()
167 entry->dest = dest; arch_enable_uv_irq()
185 struct uv_IO_APIC_route_entry *entry; arch_disable_uv_irq() local
191 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; arch_disable_uv_irq()
192 entry->mask = 1; arch_disable_uv_irq()
204 struct uv_IO_APIC_route_entry *entry; uv_set_irq_affinity() local
211 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; uv_set_irq_affinity()
213 entry->vector = cfg->vector; uv_set_irq_affinity()
214 entry->delivery_mode = apic->irq_delivery_mode; uv_set_irq_affinity()
215 entry->dest_mode = apic->irq_dest_mode; uv_set_irq_affinity()
216 entry->polarity = 0; uv_set_irq_affinity()
217 entry->trigger = 0; uv_set_irq_affinity()
218 entry->mask = 0; uv_set_irq_affinity()
219 entry->dest = dest; uv_set_irq_affinity()
/linux-4.1.27/fs/omfs/
H A Dfile.c35 struct omfs_extent_entry *entry; omfs_shrink_inode() local
42 /* traverse extent table, freeing each entry that is greater omfs_shrink_inode()
71 entry = &oe->e_entry; omfs_shrink_inode()
73 /* ignore last entry as it is the terminator */ omfs_shrink_inode()
76 start = be64_to_cpu(entry->e_cluster); omfs_shrink_inode()
77 count = be64_to_cpu(entry->e_blocks); omfs_shrink_inode()
80 entry++; omfs_shrink_inode()
120 struct omfs_extent_entry *entry = &oe->e_entry; omfs_grow_extent() local
142 terminator = entry + extent_count - 1; omfs_grow_extent()
144 entry = terminator-1; omfs_grow_extent()
145 new_block = be64_to_cpu(entry->e_cluster) + omfs_grow_extent()
146 be64_to_cpu(entry->e_blocks); omfs_grow_extent()
149 be64_add_cpu(&entry->e_blocks, 1); omfs_grow_extent()
167 /* copy terminator down an entry */ omfs_grow_extent()
168 entry = terminator; omfs_grow_extent()
170 memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); omfs_grow_extent()
172 entry->e_cluster = cpu_to_be64(new_block); omfs_grow_extent()
173 entry->e_blocks = cpu_to_be64((u64) new_count); omfs_grow_extent()
178 /* write in new entry */ omfs_grow_extent()
227 struct omfs_extent_entry *entry; omfs_get_block() local
248 entry = &oe->e_entry; omfs_get_block()
253 offset = find_block(inode, entry, block, extent_count, &remain); omfs_get_block()
/linux-4.1.27/net/irda/
H A Dirqueue.c261 * Remove first entry in queue
295 * Return the removed entry (or NULL of queue was empty). dequeue_first()
338 * Return the removed entry (or NULL of queue was empty). dequeue_general()
442 * Function hashbin_insert (hashbin, entry, name)
444 * Insert an entry into the hashbin
447 void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, hashbin_insert() argument
471 entry->q_hash = hashv; hashbin_insert()
473 strlcpy( entry->q_name, name, sizeof(entry->q_name)); hashbin_insert()
476 * Insert new entry first hashbin_insert()
479 entry); hashbin_insert()
492 * Remove first entry of the hashbin
501 irda_queue_t *entry = NULL; hashbin_remove_first() local
508 entry = hashbin_get_first( hashbin); hashbin_remove_first()
509 if ( entry != NULL) { hashbin_remove_first()
515 hashv = entry->q_hash; hashbin_remove_first()
519 * Dequeue the entry... hashbin_remove_first()
522 entry); hashbin_remove_first()
524 entry->q_next = NULL; hashbin_remove_first()
525 entry->q_prev = NULL; hashbin_remove_first()
531 if ( entry == hashbin->hb_current) hashbin_remove_first()
540 return entry; hashbin_remove_first()
547 * Remove entry with the given name
552 * leading to removing the WRONG entry.
562 irda_queue_t* entry; hashbin_remove() local
580 * Search for entry hashbin_remove()
582 entry = hashbin->hb_queue[ bin ]; hashbin_remove()
583 if ( entry ) { hashbin_remove()
588 if ( entry->q_hash == hashv ) { hashbin_remove()
593 if ( strcmp( entry->q_name, name) == 0) hashbin_remove()
603 entry = entry->q_next; hashbin_remove()
604 } while ( entry != hashbin->hb_queue[ bin ] ); hashbin_remove()
608 * If entry was found, dequeue it hashbin_remove()
612 entry); hashbin_remove()
619 if ( entry == hashbin->hb_current) hashbin_remove()
631 return entry; hashbin_remove()
639 * Function hashbin_remove_this (hashbin, entry)
641 * Remove entry with the given name
649 void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) hashbin_remove_this() argument
657 IRDA_ASSERT( entry != NULL, return NULL;); hashbin_remove_this()
665 if((entry->q_next == NULL) || (entry->q_prev == NULL)) { hashbin_remove_this()
666 entry = NULL; hashbin_remove_this()
673 hashv = entry->q_hash; hashbin_remove_this()
677 * Dequeue the entry... hashbin_remove_this()
680 entry); hashbin_remove_this()
682 entry->q_next = NULL; hashbin_remove_this()
683 entry->q_prev = NULL; hashbin_remove_this()
689 if ( entry == hashbin->hb_current) hashbin_remove_this()
697 return entry; hashbin_remove_this()
712 irda_queue_t* entry; hashbin_find() local
727 * Search for entry hashbin_find()
729 entry = hashbin->hb_queue[ bin]; hashbin_find()
730 if ( entry ) { hashbin_find()
735 if ( entry->q_hash == hashv ) { hashbin_find()
740 if ( strcmp( entry->q_name, name ) == 0 ) { hashbin_find()
741 return entry; hashbin_find()
744 return entry; hashbin_find()
747 entry = entry->q_next; hashbin_find()
748 } while ( entry != hashbin->hb_queue[ bin ] ); hashbin_find()
767 irda_queue_t* entry; hashbin_lock_find() local
773 * Search for entry hashbin_lock_find()
775 entry = hashbin_find(hashbin, hashv, name); hashbin_lock_find()
780 return entry; hashbin_lock_find()
792 * NULL if the entry is removed. - Jean II
798 irda_queue_t* entry; hashbin_find_next() local
804 * Search for current entry hashbin_find_next()
808 entry = hashbin_find(hashbin, hashv, name); hashbin_find_next()
813 if(entry) { hashbin_find_next()
814 hashbin->hb_current = entry; hashbin_find_next()
822 return entry; hashbin_find_next()
834 irda_queue_t *entry; hashbin_get_first() local
844 entry = hashbin->hb_queue[ i]; hashbin_get_first()
845 if ( entry) { hashbin_get_first()
846 hashbin->hb_current = entry; hashbin_get_first()
847 return entry; hashbin_get_first()
869 irda_queue_t* entry; hashbin_get_next() local
880 entry = hashbin->hb_current->q_next; hashbin_get_next()
881 bin = GET_HASHBIN( entry->q_hash); hashbin_get_next()
887 if ( entry != hashbin->hb_queue[ bin ]) { hashbin_get_next()
888 hashbin->hb_current = entry; hashbin_get_next()
890 return entry; hashbin_get_next()
904 entry = hashbin->hb_queue[ i]; hashbin_get_next()
905 if ( entry) { hashbin_get_next()
906 hashbin->hb_current = entry; hashbin_get_next()
908 return entry; hashbin_get_next()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dmkregtable.c38 * using the generic single-entry routines.
57 * Insert a new entry between two known consecutive entries.
77 * list_add - add a new entry
78 * @new: new entry to be added
81 * Insert a new entry after the specified head.
90 * list_add_tail - add a new entry
91 * @new: new entry to be added
94 * Insert a new entry before the specified head.
103 * Delete a list entry by making the prev/next entries
116 * list_del - deletes entry from list.
117 * @entry: the element to delete from the list.
118 * Note: list_empty() on entry does not return true after this, the entry is
122 static inline void list_del(struct list_head *entry) list_del() argument
124 __list_del(entry->prev, entry->next); list_del()
125 entry->next = (void *)0xDEADBEEF; list_del()
126 entry->prev = (void *)0xBEEFDEAD; list_del()
129 extern void list_del(struct list_head *entry);
133 * list_replace - replace old entry by new one
155 * list_del_init - deletes entry from list and reinitialize it.
156 * @entry: the element to delete from the list.
158 static inline void list_del_init(struct list_head *entry) list_del_init() argument
160 __list_del(entry->prev, entry->next); list_del_init()
161 INIT_LIST_HEAD(entry); list_del_init()
166 * @list: the entry to move
167 * @head: the head that will precede our entry
177 * @list: the entry to move
178 * @head: the head that will follow our entry
188 * list_is_last - tests whether @list is the last entry in list @head
189 * @list: the entry to test
217 * to the list entry is list_del_init(). Eg. it cannot be used
227 * list_is_singular - tests whether a list has just one entry.
237 struct list_head *entry) __list_cut_position()
239 struct list_head *new_first = entry->next; __list_cut_position()
242 list->prev = entry; __list_cut_position()
243 entry->next = list; __list_cut_position()
252 * @entry: an entry within head, could be the head itself
256 * including @entry, from @head to @list. You should
257 * pass on @entry an element you know is on @head. @list
264 struct list_head *entry) list_cut_position()
268 if (list_is_singular(head) && (head->next != entry && head != entry)) list_cut_position()
270 if (entry == head) list_cut_position()
273 __list_cut_position(list, head, entry); list_cut_position()
347 * list_entry - get the struct for this entry
385 * list_for_each_safe - iterate over a list safe against removal of list entry
395 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
428 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
433 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
479 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
499 * safe against removal of list entry.
515 * removal of list entry.
530 * of list entry.
235 __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) __list_cut_position() argument
262 list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) list_cut_position() argument
/linux-4.1.27/arch/ia64/sn/kernel/sn2/
H A Dprominfo_proc.c25 /* Standard Intel FIT entry types */
26 #define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
27 #define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
29 #define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
30 #define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
31 #define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
34 #define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
35 #define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
36 #define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
37 #define FIT_ENTRY_EFI 0x1F /* EFI entry */
38 #define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
39 #define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
123 * pages -- each entry is about 60 chars wide when printed.) I read
189 /* module entry points */
/linux-4.1.27/sound/firewire/bebob/
H A Dbebob_proc.c36 proc_read_hw_info(struct snd_info_entry *entry, proc_read_hw_info() argument
39 struct snd_bebob *bebob = entry->private_data; proc_read_hw_info()
72 proc_read_meters(struct snd_info_entry *entry, proc_read_meters() argument
75 struct snd_bebob *bebob = entry->private_data; proc_read_meters()
105 proc_read_formation(struct snd_info_entry *entry, proc_read_formation() argument
108 struct snd_bebob *bebob = entry->private_data; proc_read_formation()
132 proc_read_clock(struct snd_info_entry *entry, proc_read_clock() argument
135 struct snd_bebob *bebob = entry->private_data; proc_read_clock()
161 struct snd_info_entry *entry; add_node() local
163 entry = snd_info_create_card_entry(bebob->card, name, root); add_node()
164 if (entry == NULL) add_node()
167 snd_info_set_text_ops(entry, bebob, op); add_node()
168 if (snd_info_register(entry) < 0) add_node()
169 snd_info_free_entry(entry); add_node()
/linux-4.1.27/sound/firewire/fireworks/
H A Dfireworks_proc.c29 proc_read_hwinfo(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_read_hwinfo() argument
31 struct snd_efw *efw = entry->private_data; proc_read_hwinfo()
106 proc_read_clock(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_read_clock() argument
108 struct snd_efw *efw = entry->private_data; proc_read_clock()
128 proc_read_phys_meters(struct snd_info_entry *entry, proc_read_phys_meters() argument
131 struct snd_efw *efw = entry->private_data; proc_read_phys_meters()
179 proc_read_queues_state(struct snd_info_entry *entry, proc_read_queues_state() argument
182 struct snd_efw *efw = entry->private_data; proc_read_queues_state()
199 struct snd_info_entry *entry; add_node() local
201 entry = snd_info_create_card_entry(efw->card, name, root); add_node()
202 if (entry == NULL) add_node()
205 snd_info_set_text_ops(entry, efw, op); add_node()
206 if (snd_info_register(entry) < 0) add_node()
207 snd_info_free_entry(entry); add_node()
/linux-4.1.27/sound/usb/
H A Dproc.c46 static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_audio_usbbus_read() argument
48 struct snd_usb_audio *chip = entry->private_data; proc_audio_usbbus_read()
53 static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_audio_usbid_read() argument
55 struct snd_usb_audio *chip = entry->private_data; proc_audio_usbid_read()
64 struct snd_info_entry *entry; snd_usb_audio_create_proc() local
65 if (!snd_card_proc_new(chip->card, "usbbus", &entry)) snd_usb_audio_create_proc()
66 snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read); snd_usb_audio_create_proc()
67 if (!snd_card_proc_new(chip->card, "usbid", &entry)) snd_usb_audio_create_proc()
68 snd_info_set_text_ops(entry, chip, proc_audio_usbid_read); snd_usb_audio_create_proc()
150 static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) proc_pcm_format_read() argument
152 struct snd_usb_stream *stream = entry->private_data; proc_pcm_format_read()
170 struct snd_info_entry *entry; snd_usb_proc_pcm_format_add() local
175 if (!snd_card_proc_new(card, name, &entry)) snd_usb_proc_pcm_format_add()
176 snd_info_set_text_ops(entry, stream, proc_pcm_format_read); snd_usb_proc_pcm_format_add()
/linux-4.1.27/security/tomoyo/
H A Ddomain.c17 * tomoyo_update_policy - Update an entry for exception policy.
22 * @check_duplicate: Callback function to find duplicated entry.
36 struct tomoyo_acl_head *entry; tomoyo_update_policy() local
41 list_for_each_entry_rcu(entry, list, list) { list_for_each_entry_rcu()
42 if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) list_for_each_entry_rcu()
44 if (!check_duplicate(entry, new_entry)) list_for_each_entry_rcu()
46 entry->is_deleted = param->is_delete; list_for_each_entry_rcu()
51 entry = tomoyo_commit_ok(new_entry, size);
52 if (entry) {
53 list_add_tail_rcu(&entry->list, list);
62 * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry.
76 * tomoyo_update_domain - Update an entry for domain policy.
81 * @check_duplicate: Callback function to find duplicated entry.
82 * @merge_duplicate: Callback function to merge duplicated entry.
100 struct tomoyo_acl_info *entry; tomoyo_update_domain() local
119 list_for_each_entry_rcu(entry, list, list) { list_for_each_entry_rcu()
120 if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) list_for_each_entry_rcu()
122 if (!tomoyo_same_acl_head(entry, new_entry) || list_for_each_entry_rcu()
123 !check_duplicate(entry, new_entry)) list_for_each_entry_rcu()
126 entry->is_deleted = merge_duplicate(entry, new_entry, list_for_each_entry_rcu()
129 entry->is_deleted = is_delete; list_for_each_entry_rcu()
134 entry = tomoyo_commit_ok(new_entry, size);
135 if (entry) {
136 list_add_tail_rcu(&entry->list, list);
204 * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry.
229 * @type: Type of this entry.
361 * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry.
448 struct tomoyo_policy_namespace *entry; tomoyo_assign_namespace() local
458 entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS); tomoyo_assign_namespace()
459 if (!entry) tomoyo_assign_namespace()
464 if (!ptr && tomoyo_memory_ok(entry)) { tomoyo_assign_namespace()
465 char *name = (char *) (entry + 1); tomoyo_assign_namespace()
466 ptr = entry; tomoyo_assign_namespace()
469 entry->name = name; tomoyo_assign_namespace()
470 tomoyo_init_policy_namespace(entry); tomoyo_assign_namespace()
471 entry = NULL; tomoyo_assign_namespace()
475 kfree(entry); tomoyo_assign_namespace()
508 struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname); tomoyo_assign_domain() local
510 if (entry) { tomoyo_assign_domain()
519 !entry->ns->profile_ptr[entry->profile]) tomoyo_assign_domain()
522 return entry; tomoyo_assign_domain()
554 entry = tomoyo_find_domain(domainname); tomoyo_assign_domain()
555 if (!entry) { tomoyo_assign_domain()
556 entry = tomoyo_commit_ok(&e, sizeof(e)); tomoyo_assign_domain()
557 if (entry) { tomoyo_assign_domain()
558 INIT_LIST_HEAD(&entry->acl_info_list); tomoyo_assign_domain()
559 list_add_tail_rcu(&entry->list, &tomoyo_domain_list); tomoyo_assign_domain()
566 if (entry && transit) { tomoyo_assign_domain()
569 tomoyo_init_request_info(&r, entry, tomoyo_assign_domain()
573 entry->profile); tomoyo_assign_domain()
574 tomoyo_write_log(&r, "use_group %u\n", entry->group); tomoyo_assign_domain()
578 return entry; tomoyo_assign_domain()
H A Dgc.c158 struct tomoyo_path_acl *entry tomoyo_del_acl() local
159 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
160 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
165 struct tomoyo_path2_acl *entry tomoyo_del_acl() local
166 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
167 tomoyo_put_name_union(&entry->name1); tomoyo_del_acl()
168 tomoyo_put_name_union(&entry->name2); tomoyo_del_acl()
173 struct tomoyo_path_number_acl *entry tomoyo_del_acl() local
174 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
175 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
176 tomoyo_put_number_union(&entry->number); tomoyo_del_acl()
181 struct tomoyo_mkdev_acl *entry tomoyo_del_acl() local
182 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
183 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
184 tomoyo_put_number_union(&entry->mode); tomoyo_del_acl()
185 tomoyo_put_number_union(&entry->major); tomoyo_del_acl()
186 tomoyo_put_number_union(&entry->minor); tomoyo_del_acl()
191 struct tomoyo_mount_acl *entry tomoyo_del_acl() local
192 = container_of(acl, typeof(*entry), head); tomoyo_del_acl()
193 tomoyo_put_name_union(&entry->dev_name); tomoyo_del_acl()
194 tomoyo_put_name_union(&entry->dir_name); tomoyo_del_acl()
195 tomoyo_put_name_union(&entry->fs_type); tomoyo_del_acl()
196 tomoyo_put_number_union(&entry->flags); tomoyo_del_acl()
201 struct tomoyo_env_acl *entry = tomoyo_del_acl() local
202 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
204 tomoyo_put_name(entry->env); tomoyo_del_acl()
209 struct tomoyo_inet_acl *entry = tomoyo_del_acl() local
210 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
212 tomoyo_put_group(entry->address.group); tomoyo_del_acl()
213 tomoyo_put_number_union(&entry->port); tomoyo_del_acl()
218 struct tomoyo_unix_acl *entry = tomoyo_del_acl() local
219 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
221 tomoyo_put_name_union(&entry->name); tomoyo_del_acl()
226 struct tomoyo_task_acl *entry = tomoyo_del_acl() local
227 container_of(acl, typeof(*entry), head); tomoyo_del_acl()
228 tomoyo_put_name(entry->domainname); tomoyo_del_acl()
365 * tomoyo_try_to_gc - Try to kfree() an entry.
427 head.list)->entry.name)) tomoyo_try_to_gc()
H A Dmemory.c117 struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e)); local
118 if (entry) {
119 INIT_LIST_HEAD(&entry->member_list);
120 atomic_set(&entry->head.users, 1);
121 list_add_tail_rcu(&entry->head.list, list);
122 group = entry;
162 if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || list_for_each_entry()
170 ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
171 memmove((char *) ptr->entry.name, name, len);
173 tomoyo_fill_path_info(&ptr->entry);
181 return ptr ? &ptr->entry : NULL;
/linux-4.1.27/fs/nfs_common/
H A Dnfsacl.c11 * - Minimal ACLs always have an ACL_MASK entry, so they have
13 * - The ACL_MASK entry in such minimal ACLs always has the same
14 * permissions as the ACL_GROUP_OBJ entry. (In extended ACLs
54 struct posix_acl_entry *entry = xdr_nfsace_encode() local
57 *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); xdr_nfsace_encode()
58 switch(entry->e_tag) { xdr_nfsace_encode()
66 *p++ = htonl(from_kuid(&init_user_ns, entry->e_uid)); xdr_nfsace_encode()
69 *p++ = htonl(from_kgid(&init_user_ns, entry->e_gid)); xdr_nfsace_encode()
75 *p++ = htonl(entry->e_perm & S_IRWXO); xdr_nfsace_encode()
150 struct posix_acl_entry *entry; xdr_nfsace_decode() local
162 entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; xdr_nfsace_decode()
163 entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT; xdr_nfsace_decode()
165 entry->e_perm = ntohl(*p++); xdr_nfsace_decode()
167 switch(entry->e_tag) { xdr_nfsace_decode()
169 entry->e_uid = make_kuid(&init_user_ns, id); xdr_nfsace_decode()
170 if (!uid_valid(entry->e_uid)) xdr_nfsace_decode()
174 entry->e_gid = make_kgid(&init_user_ns, id); xdr_nfsace_decode()
175 if (!gid_valid(entry->e_gid)) xdr_nfsace_decode()
181 if (entry->e_perm & ~S_IRWXO) xdr_nfsace_decode()
186 entry->e_perm &= S_IRWXO; xdr_nfsace_decode()
246 /* remove bogus ACL_MASK entry */
/linux-4.1.27/include/sound/
H A Dinfo.h43 void (*read)(struct snd_info_entry *entry,
45 void (*write)(struct snd_info_entry *entry,
50 int (*open)(struct snd_info_entry *entry,
52 int (*release)(struct snd_info_entry *entry,
54 ssize_t (*read)(struct snd_info_entry *entry, void *file_private_data,
57 ssize_t (*write)(struct snd_info_entry *entry, void *file_private_data,
60 loff_t (*llseek)(struct snd_info_entry *entry,
63 unsigned int (*poll)(struct snd_info_entry *entry,
66 int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
68 int (*mmap)(struct snd_info_entry *entry, void *file_private_data,
86 void (*private_free)(struct snd_info_entry *entry);
126 void snd_info_free_entry(struct snd_info_entry *entry); snd_card_info_read_oss()
127 int snd_info_store_text(struct snd_info_entry *entry); snd_card_info_read_oss()
128 int snd_info_restore_text(struct snd_info_entry *entry); snd_card_info_read_oss()
135 int snd_info_register(struct snd_info_entry *entry); snd_card_info_read_oss()
141 static inline void snd_info_set_text_ops(struct snd_info_entry *entry, snd_info_set_text_ops() argument
145 entry->private_data = private_data; snd_info_set_text_ops()
146 entry->c.text.read = read; snd_info_set_text_ops()
164 static inline void snd_info_free_entry(struct snd_info_entry *entry) { ; } snd_info_free_entry() argument
171 static inline int snd_info_register(struct snd_info_entry *entry) { return 0; } snd_info_register() argument
175 static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)), snd_info_set_text_ops()
/linux-4.1.27/drivers/net/ethernet/rocker/
H A Drocker.c117 struct hlist_node entry; member in struct:rocker_flow_tbl_entry
126 struct hlist_node entry; member in struct:rocker_group_tbl_entry
152 struct hlist_node entry; member in struct:rocker_fdb_tbl_entry
163 struct hlist_node entry; member in struct:rocker_internal_vlan_tbl_entry
170 struct hlist_node entry; member in struct:rocker_neigh_tbl_entry
1803 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ig_port()
1806 entry->key.ig_port.in_pport)) rocker_cmd_flow_tbl_add_ig_port()
1809 entry->key.ig_port.in_pport_mask)) rocker_cmd_flow_tbl_add_ig_port()
1812 entry->key.ig_port.goto_tbl)) rocker_cmd_flow_tbl_add_ig_port()
1819 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_vlan()
1822 entry->key.vlan.in_pport)) rocker_cmd_flow_tbl_add_vlan()
1825 entry->key.vlan.vlan_id)) rocker_cmd_flow_tbl_add_vlan()
1828 entry->key.vlan.vlan_id_mask)) rocker_cmd_flow_tbl_add_vlan()
1831 entry->key.vlan.goto_tbl)) rocker_cmd_flow_tbl_add_vlan()
1833 if (entry->key.vlan.untagged && rocker_cmd_flow_tbl_add_vlan()
1835 entry->key.vlan.new_vlan_id)) rocker_cmd_flow_tbl_add_vlan()
1842 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_term_mac()
1845 entry->key.term_mac.in_pport)) rocker_cmd_flow_tbl_add_term_mac()
1848 entry->key.term_mac.in_pport_mask)) rocker_cmd_flow_tbl_add_term_mac()
1851 entry->key.term_mac.eth_type)) rocker_cmd_flow_tbl_add_term_mac()
1854 ETH_ALEN, entry->key.term_mac.eth_dst)) rocker_cmd_flow_tbl_add_term_mac()
1857 ETH_ALEN, entry->key.term_mac.eth_dst_mask)) rocker_cmd_flow_tbl_add_term_mac()
1860 entry->key.term_mac.vlan_id)) rocker_cmd_flow_tbl_add_term_mac()
1863 entry->key.term_mac.vlan_id_mask)) rocker_cmd_flow_tbl_add_term_mac()
1866 entry->key.term_mac.goto_tbl)) rocker_cmd_flow_tbl_add_term_mac()
1868 if (entry->key.term_mac.copy_to_cpu && rocker_cmd_flow_tbl_add_term_mac()
1870 entry->key.term_mac.copy_to_cpu)) rocker_cmd_flow_tbl_add_term_mac()
1878 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ucast_routing()
1881 entry->key.ucast_routing.eth_type)) rocker_cmd_flow_tbl_add_ucast_routing()
1884 entry->key.ucast_routing.dst4)) rocker_cmd_flow_tbl_add_ucast_routing()
1887 entry->key.ucast_routing.dst4_mask)) rocker_cmd_flow_tbl_add_ucast_routing()
1890 entry->key.ucast_routing.goto_tbl)) rocker_cmd_flow_tbl_add_ucast_routing()
1893 entry->key.ucast_routing.group_id)) rocker_cmd_flow_tbl_add_ucast_routing()
1900 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_bridge()
1902 if (entry->key.bridge.has_eth_dst && rocker_cmd_flow_tbl_add_bridge()
1904 ETH_ALEN, entry->key.bridge.eth_dst)) rocker_cmd_flow_tbl_add_bridge()
1906 if (entry->key.bridge.has_eth_dst_mask && rocker_cmd_flow_tbl_add_bridge()
1908 ETH_ALEN, entry->key.bridge.eth_dst_mask)) rocker_cmd_flow_tbl_add_bridge()
1910 if (entry->key.bridge.vlan_id && rocker_cmd_flow_tbl_add_bridge()
1912 entry->key.bridge.vlan_id)) rocker_cmd_flow_tbl_add_bridge()
1914 if (entry->key.bridge.tunnel_id && rocker_cmd_flow_tbl_add_bridge()
1916 entry->key.bridge.tunnel_id)) rocker_cmd_flow_tbl_add_bridge()
1919 entry->key.bridge.goto_tbl)) rocker_cmd_flow_tbl_add_bridge()
1922 entry->key.bridge.group_id)) rocker_cmd_flow_tbl_add_bridge()
1924 if (entry->key.bridge.copy_to_cpu && rocker_cmd_flow_tbl_add_bridge()
1926 entry->key.bridge.copy_to_cpu)) rocker_cmd_flow_tbl_add_bridge()
1933 struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_acl()
1936 entry->key.acl.in_pport)) rocker_cmd_flow_tbl_add_acl()
1939 entry->key.acl.in_pport_mask)) rocker_cmd_flow_tbl_add_acl()
1942 ETH_ALEN, entry->key.acl.eth_src)) rocker_cmd_flow_tbl_add_acl()
1945 ETH_ALEN, entry->key.acl.eth_src_mask)) rocker_cmd_flow_tbl_add_acl()
1948 ETH_ALEN, entry->key.acl.eth_dst)) rocker_cmd_flow_tbl_add_acl()
1951 ETH_ALEN, entry->key.acl.eth_dst_mask)) rocker_cmd_flow_tbl_add_acl()
1954 entry->key.acl.eth_type)) rocker_cmd_flow_tbl_add_acl()
1957 entry->key.acl.vlan_id)) rocker_cmd_flow_tbl_add_acl()
1960 entry->key.acl.vlan_id_mask)) rocker_cmd_flow_tbl_add_acl()
1963 switch (ntohs(entry->key.acl.eth_type)) { rocker_cmd_flow_tbl_add_acl()
1967 entry->key.acl.ip_proto)) rocker_cmd_flow_tbl_add_acl()
1971 entry->key.acl.ip_proto_mask)) rocker_cmd_flow_tbl_add_acl()
1974 entry->key.acl.ip_tos & 0x3f)) rocker_cmd_flow_tbl_add_acl()
1978 entry->key.acl.ip_tos_mask & 0x3f)) rocker_cmd_flow_tbl_add_acl()
1981 (entry->key.acl.ip_tos & 0xc0) >> 6)) rocker_cmd_flow_tbl_add_acl()
1985 (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) rocker_cmd_flow_tbl_add_acl()
1990 if (entry->key.acl.group_id != ROCKER_GROUP_NONE && rocker_cmd_flow_tbl_add_acl()
1992 entry->key.acl.group_id)) rocker_cmd_flow_tbl_add_acl()
2003 struct rocker_flow_tbl_entry *entry = priv; rocker_cmd_flow_tbl_add() local
2007 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_flow_tbl_add()
2013 entry->key.tbl_id)) rocker_cmd_flow_tbl_add()
2016 entry->key.priority)) rocker_cmd_flow_tbl_add()
2021 entry->cookie)) rocker_cmd_flow_tbl_add()
2024 switch (entry->key.tbl_id) { rocker_cmd_flow_tbl_add()
2026 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); rocker_cmd_flow_tbl_add()
2029 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); rocker_cmd_flow_tbl_add()
2032 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); rocker_cmd_flow_tbl_add()
2035 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); rocker_cmd_flow_tbl_add()
2038 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); rocker_cmd_flow_tbl_add()
2041 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); rocker_cmd_flow_tbl_add()
2061 const struct rocker_flow_tbl_entry *entry = priv; rocker_cmd_flow_tbl_del() local
2064 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_flow_tbl_del()
2070 entry->cookie)) rocker_cmd_flow_tbl_del()
2079 struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_interface()
2082 ROCKER_GROUP_PORT_GET(entry->group_id))) rocker_cmd_group_tbl_add_l2_interface()
2085 entry->l2_interface.pop_vlan)) rocker_cmd_group_tbl_add_l2_interface()
2093 struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_rewrite()
2096 entry->l2_rewrite.group_id)) rocker_cmd_group_tbl_add_l2_rewrite()
2098 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && rocker_cmd_group_tbl_add_l2_rewrite()
2100 ETH_ALEN, entry->l2_rewrite.eth_src)) rocker_cmd_group_tbl_add_l2_rewrite()
2102 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && rocker_cmd_group_tbl_add_l2_rewrite()
2104 ETH_ALEN, entry->l2_rewrite.eth_dst)) rocker_cmd_group_tbl_add_l2_rewrite()
2106 if (entry->l2_rewrite.vlan_id && rocker_cmd_group_tbl_add_l2_rewrite()
2108 entry->l2_rewrite.vlan_id)) rocker_cmd_group_tbl_add_l2_rewrite()
2116 struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_group_ids()
2122 entry->group_count)) rocker_cmd_group_tbl_add_group_ids()
2130 for (i = 0; i < entry->group_count; i++) rocker_cmd_group_tbl_add_group_ids()
2132 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) rocker_cmd_group_tbl_add_group_ids()
2142 struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l3_unicast()
2144 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && rocker_cmd_group_tbl_add_l3_unicast()
2146 ETH_ALEN, entry->l3_unicast.eth_src)) rocker_cmd_group_tbl_add_l3_unicast()
2148 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && rocker_cmd_group_tbl_add_l3_unicast()
2150 ETH_ALEN, entry->l3_unicast.eth_dst)) rocker_cmd_group_tbl_add_l3_unicast()
2152 if (entry->l3_unicast.vlan_id && rocker_cmd_group_tbl_add_l3_unicast()
2154 entry->l3_unicast.vlan_id)) rocker_cmd_group_tbl_add_l3_unicast()
2157 entry->l3_unicast.ttl_check)) rocker_cmd_group_tbl_add_l3_unicast()
2160 entry->l3_unicast.group_id)) rocker_cmd_group_tbl_add_l3_unicast()
2171 struct rocker_group_tbl_entry *entry = priv; rocker_cmd_group_tbl_add() local
2175 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_group_tbl_add()
2182 entry->group_id)) rocker_cmd_group_tbl_add()
2185 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { rocker_cmd_group_tbl_add()
2187 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); rocker_cmd_group_tbl_add()
2190 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); rocker_cmd_group_tbl_add()
2194 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); rocker_cmd_group_tbl_add()
2197 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); rocker_cmd_group_tbl_add()
2217 const struct rocker_group_tbl_entry *entry = priv; rocker_cmd_group_tbl_del() local
2220 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) rocker_cmd_group_tbl_del()
2226 entry->group_id)) rocker_cmd_group_tbl_del()
2269 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) rocker_free_tbls()
2270 hash_del(&flow_entry->entry); rocker_free_tbls()
2274 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) rocker_free_tbls()
2275 hash_del(&group_entry->entry); rocker_free_tbls()
2279 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) rocker_free_tbls()
2280 hash_del(&fdb_entry->entry); rocker_free_tbls()
2285 tmp, internal_vlan_entry, entry) rocker_free_tbls()
2286 hash_del(&internal_vlan_entry->entry); rocker_free_tbls()
2290 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) rocker_free_tbls()
2291 hash_del(&neigh_entry->entry); rocker_free_tbls()
2302 entry, match->key_crc32) { rocker_flow_tbl_find()
2327 hash_del(&found->entry); rocker_flow_tbl_add()
2337 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); rocker_flow_tbl_add()
2363 hash_del(&found->entry); rocker_flow_tbl_del()
2387 int flags, struct rocker_flow_tbl_entry *entry) rocker_flow_tbl_do()
2392 return rocker_flow_tbl_del(rocker_port, entry, nowait); rocker_flow_tbl_do()
2394 return rocker_flow_tbl_add(rocker_port, entry, nowait); rocker_flow_tbl_do()
2401 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_ig_port() local
2403 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_ig_port()
2404 if (!entry) rocker_flow_tbl_ig_port()
2407 entry->key.priority = ROCKER_PRIORITY_IG_PORT; rocker_flow_tbl_ig_port()
2408 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; rocker_flow_tbl_ig_port()
2409 entry->key.ig_port.in_pport = in_pport; rocker_flow_tbl_ig_port()
2410 entry->key.ig_port.in_pport_mask = in_pport_mask; rocker_flow_tbl_ig_port()
2411 entry->key.ig_port.goto_tbl = goto_tbl; rocker_flow_tbl_ig_port()
2413 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_ig_port()
2422 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_vlan() local
2424 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_vlan()
2425 if (!entry) rocker_flow_tbl_vlan()
2428 entry->key.priority = ROCKER_PRIORITY_VLAN; rocker_flow_tbl_vlan()
2429 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; rocker_flow_tbl_vlan()
2430 entry->key.vlan.in_pport = in_pport; rocker_flow_tbl_vlan()
2431 entry->key.vlan.vlan_id = vlan_id; rocker_flow_tbl_vlan()
2432 entry->key.vlan.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_vlan()
2433 entry->key.vlan.goto_tbl = goto_tbl; rocker_flow_tbl_vlan()
2435 entry->key.vlan.untagged = untagged; rocker_flow_tbl_vlan()
2436 entry->key.vlan.new_vlan_id = new_vlan_id; rocker_flow_tbl_vlan()
2438 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_vlan()
2448 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_term_mac() local
2450 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_term_mac()
2451 if (!entry) rocker_flow_tbl_term_mac()
2455 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; rocker_flow_tbl_term_mac()
2456 entry->key.term_mac.goto_tbl = rocker_flow_tbl_term_mac()
2459 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; rocker_flow_tbl_term_mac()
2460 entry->key.term_mac.goto_tbl = rocker_flow_tbl_term_mac()
2464 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; rocker_flow_tbl_term_mac()
2465 entry->key.term_mac.in_pport = in_pport; rocker_flow_tbl_term_mac()
2466 entry->key.term_mac.in_pport_mask = in_pport_mask; rocker_flow_tbl_term_mac()
2467 entry->key.term_mac.eth_type = eth_type; rocker_flow_tbl_term_mac()
2468 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); rocker_flow_tbl_term_mac()
2469 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_term_mac()
2470 entry->key.term_mac.vlan_id = vlan_id; rocker_flow_tbl_term_mac()
2471 entry->key.term_mac.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_term_mac()
2472 entry->key.term_mac.copy_to_cpu = copy_to_cpu; rocker_flow_tbl_term_mac()
2474 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_term_mac()
2484 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_bridge() local
2490 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_bridge()
2491 if (!entry) rocker_flow_tbl_bridge()
2494 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; rocker_flow_tbl_bridge()
2497 entry->key.bridge.has_eth_dst = 1; rocker_flow_tbl_bridge()
2498 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); rocker_flow_tbl_bridge()
2501 entry->key.bridge.has_eth_dst_mask = 1; rocker_flow_tbl_bridge()
2502 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_bridge()
2521 entry->key.priority = priority; rocker_flow_tbl_bridge()
2522 entry->key.bridge.vlan_id = vlan_id; rocker_flow_tbl_bridge()
2523 entry->key.bridge.tunnel_id = tunnel_id; rocker_flow_tbl_bridge()
2524 entry->key.bridge.goto_tbl = goto_tbl; rocker_flow_tbl_bridge()
2525 entry->key.bridge.group_id = group_id; rocker_flow_tbl_bridge()
2526 entry->key.bridge.copy_to_cpu = copy_to_cpu; rocker_flow_tbl_bridge()
2528 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_bridge()
2537 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_ucast4_routing() local
2539 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_ucast4_routing()
2540 if (!entry) rocker_flow_tbl_ucast4_routing()
2543 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; rocker_flow_tbl_ucast4_routing()
2544 entry->key.priority = priority; rocker_flow_tbl_ucast4_routing()
2545 entry->key.ucast_routing.eth_type = eth_type; rocker_flow_tbl_ucast4_routing()
2546 entry->key.ucast_routing.dst4 = dst; rocker_flow_tbl_ucast4_routing()
2547 entry->key.ucast_routing.dst4_mask = dst_mask; rocker_flow_tbl_ucast4_routing()
2548 entry->key.ucast_routing.goto_tbl = goto_tbl; rocker_flow_tbl_ucast4_routing()
2549 entry->key.ucast_routing.group_id = group_id; rocker_flow_tbl_ucast4_routing()
2550 entry->key_len = offsetof(struct rocker_flow_tbl_key, rocker_flow_tbl_ucast4_routing()
2553 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_ucast4_routing()
2568 struct rocker_flow_tbl_entry *entry; rocker_flow_tbl_acl() local
2570 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_flow_tbl_acl()
2571 if (!entry) rocker_flow_tbl_acl()
2582 entry->key.priority = priority; rocker_flow_tbl_acl()
2583 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; rocker_flow_tbl_acl()
2584 entry->key.acl.in_pport = in_pport; rocker_flow_tbl_acl()
2585 entry->key.acl.in_pport_mask = in_pport_mask; rocker_flow_tbl_acl()
2588 ether_addr_copy(entry->key.acl.eth_src, eth_src); rocker_flow_tbl_acl()
2590 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); rocker_flow_tbl_acl()
2592 ether_addr_copy(entry->key.acl.eth_dst, eth_dst); rocker_flow_tbl_acl()
2594 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); rocker_flow_tbl_acl()
2596 entry->key.acl.eth_type = eth_type; rocker_flow_tbl_acl()
2597 entry->key.acl.vlan_id = vlan_id; rocker_flow_tbl_acl()
2598 entry->key.acl.vlan_id_mask = vlan_id_mask; rocker_flow_tbl_acl()
2599 entry->key.acl.ip_proto = ip_proto; rocker_flow_tbl_acl()
2600 entry->key.acl.ip_proto_mask = ip_proto_mask; rocker_flow_tbl_acl()
2601 entry->key.acl.ip_tos = ip_tos; rocker_flow_tbl_acl()
2602 entry->key.acl.ip_tos_mask = ip_tos_mask; rocker_flow_tbl_acl()
2603 entry->key.acl.group_id = group_id; rocker_flow_tbl_acl()
2605 return rocker_flow_tbl_do(rocker_port, flags, entry); rocker_flow_tbl_acl()
2615 entry, match->group_id) { rocker_group_tbl_find()
2623 static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry) rocker_group_tbl_entry_free() argument
2625 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { rocker_group_tbl_entry_free()
2628 kfree(entry->group_ids); rocker_group_tbl_entry_free()
2633 kfree(entry); rocker_group_tbl_entry_free()
2649 hash_del(&found->entry); rocker_group_tbl_add()
2658 hash_add(rocker->group_tbl, &found->entry, found->group_id); rocker_group_tbl_add()
2681 hash_del(&found->entry); rocker_group_tbl_del()
2700 int flags, struct rocker_group_tbl_entry *entry) rocker_group_tbl_do()
2705 return rocker_group_tbl_del(rocker_port, entry, nowait); rocker_group_tbl_do()
2707 return rocker_group_tbl_add(rocker_port, entry, nowait); rocker_group_tbl_do()
2714 struct rocker_group_tbl_entry *entry; rocker_group_l2_interface() local
2716 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_group_l2_interface()
2717 if (!entry) rocker_group_l2_interface()
2720 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); rocker_group_l2_interface()
2721 entry->l2_interface.pop_vlan = pop_vlan; rocker_group_l2_interface()
2723 return rocker_group_tbl_do(rocker_port, flags, entry); rocker_group_l2_interface()
2730 struct rocker_group_tbl_entry *entry; rocker_group_l2_fan_out() local
2732 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_group_l2_fan_out()
2733 if (!entry) rocker_group_l2_fan_out()
2736 entry->group_id = group_id; rocker_group_l2_fan_out()
2737 entry->group_count = group_count; rocker_group_l2_fan_out()
2739 entry->group_ids = kcalloc(group_count, sizeof(u32), rocker_group_l2_fan_out()
2741 if (!entry->group_ids) { rocker_group_l2_fan_out()
2742 kfree(entry); rocker_group_l2_fan_out()
2745 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); rocker_group_l2_fan_out()
2747 return rocker_group_tbl_do(rocker_port, flags, entry); rocker_group_l2_fan_out()
2765 struct rocker_group_tbl_entry *entry; rocker_group_l3_unicast() local
2767 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_group_l3_unicast()
2768 if (!entry) rocker_group_l3_unicast()
2771 entry->group_id = ROCKER_GROUP_L3_UNICAST(index); rocker_group_l3_unicast()
2773 ether_addr_copy(entry->l3_unicast.eth_src, src_mac); rocker_group_l3_unicast()
2775 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); rocker_group_l3_unicast()
2776 entry->l3_unicast.vlan_id = vlan_id; rocker_group_l3_unicast()
2777 entry->l3_unicast.ttl_check = ttl_check; rocker_group_l3_unicast()
2778 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); rocker_group_l3_unicast()
2780 return rocker_group_tbl_do(rocker_port, flags, entry); rocker_group_l3_unicast()
2789 entry, be32_to_cpu(ip_addr)) rocker_neigh_tbl_find()
2797 struct rocker_neigh_tbl_entry *entry) _rocker_neigh_add()
2799 entry->index = rocker->neigh_tbl_next_index++; _rocker_neigh_add()
2800 entry->ref_count++; _rocker_neigh_add()
2801 hash_add(rocker->neigh_tbl, &entry->entry, _rocker_neigh_add()
2802 be32_to_cpu(entry->ip_addr)); _rocker_neigh_add()
2806 struct rocker_neigh_tbl_entry *entry) _rocker_neigh_del()
2808 if (--entry->ref_count == 0) { _rocker_neigh_del()
2809 hash_del(&entry->entry); _rocker_neigh_del()
2810 kfree(entry); _rocker_neigh_del()
2815 struct rocker_neigh_tbl_entry *entry, _rocker_neigh_update()
2819 ether_addr_copy(entry->eth_dst, eth_dst); _rocker_neigh_update()
2820 entry->ttl_check = ttl_check; _rocker_neigh_update()
2822 entry->ref_count++; _rocker_neigh_update()
2830 struct rocker_neigh_tbl_entry *entry; rocker_port_ipv4_neigh() local
2843 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_port_ipv4_neigh()
2844 if (!entry) rocker_port_ipv4_neigh()
2856 entry->ip_addr = ip_addr; rocker_port_ipv4_neigh()
2857 entry->dev = rocker_port->dev; rocker_port_ipv4_neigh()
2858 ether_addr_copy(entry->eth_dst, eth_dst); rocker_port_ipv4_neigh()
2859 entry->ttl_check = true; rocker_port_ipv4_neigh()
2860 _rocker_neigh_add(rocker, entry); rocker_port_ipv4_neigh()
2862 memcpy(entry, found, sizeof(*entry)); rocker_port_ipv4_neigh()
2866 memcpy(entry, found, sizeof(*entry)); rocker_port_ipv4_neigh()
2883 entry->index, rocker_port_ipv4_neigh()
2885 entry->eth_dst, rocker_port_ipv4_neigh()
2887 entry->ttl_check, rocker_port_ipv4_neigh()
2892 err, entry->index); rocker_port_ipv4_neigh()
2897 group_id = ROCKER_GROUP_L3_UNICAST(entry->index); rocker_port_ipv4_neigh()
2907 err, &entry->ip_addr, group_id); rocker_port_ipv4_neigh()
2912 kfree(entry); rocker_port_ipv4_neigh()
2931 * install the entry, otherwise start the ARP process to rocker_port_ipv4_resolve()
2948 struct rocker_neigh_tbl_entry *entry; rocker_port_ipv4_nh() local
2957 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); rocker_port_ipv4_nh()
2958 if (!entry) rocker_port_ipv4_nh()
2972 entry->ip_addr = ip_addr; rocker_port_ipv4_nh()
2973 entry->dev = rocker_port->dev; rocker_port_ipv4_nh()
2974 _rocker_neigh_add(rocker, entry); rocker_port_ipv4_nh()
2975 *index = entry->index; rocker_port_ipv4_nh()
2989 kfree(entry); rocker_port_ipv4_nh()
3363 "Error (%d) ingress port table entry\n", err); rocker_port_ig_tbl()
3450 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) rocker_fdb_tbl_find()
3483 hash_del(&found->entry); rocker_port_fdb()
3485 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32); rocker_port_fdb()
3518 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { rocker_port_fdb_flush()
3528 hash_del(&found->entry); rocker_port_fdb_flush()
3686 entry, ifindex) { rocker_internal_vlan_tbl_find()
3698 struct rocker_internal_vlan_tbl_entry *entry; rocker_port_internal_vlan_id_get() local
3703 entry = kzalloc(sizeof(*entry), GFP_KERNEL); rocker_port_internal_vlan_id_get()
3704 if (!entry) rocker_port_internal_vlan_id_get()
3707 entry->ifindex = ifindex; rocker_port_internal_vlan_id_get()
3713 kfree(entry); rocker_port_internal_vlan_id_get()
3717 found = entry; rocker_port_internal_vlan_id_get()
3718 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); rocker_port_internal_vlan_id_get()
3757 hash_del(&found->entry); rocker_port_internal_vlan_id_put()
4122 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { rocker_port_fdb_dump()
4711 rocker->msix_entries[i].entry = i; rocker_msix_init()
1802 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ig_port() argument
1818 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_vlan() argument
1841 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_term_mac() argument
1877 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_ucast_routing() argument
1899 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_bridge() argument
1932 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) rocker_cmd_flow_tbl_add_acl() argument
2078 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_interface() argument
2092 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l2_rewrite() argument
2115 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_group_ids() argument
2141 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) rocker_cmd_group_tbl_add_l3_unicast() argument
2386 rocker_flow_tbl_do(struct rocker_port *rocker_port, int flags, struct rocker_flow_tbl_entry *entry) rocker_flow_tbl_do() argument
2699 rocker_group_tbl_do(struct rocker_port *rocker_port, int flags, struct rocker_group_tbl_entry *entry) rocker_group_tbl_do() argument
2796 _rocker_neigh_add(struct rocker *rocker, struct rocker_neigh_tbl_entry *entry) _rocker_neigh_add() argument
2805 _rocker_neigh_del(struct rocker *rocker, struct rocker_neigh_tbl_entry *entry) _rocker_neigh_del() argument
2814 _rocker_neigh_update(struct rocker *rocker, struct rocker_neigh_tbl_entry *entry, u8 *eth_dst, bool ttl_check) _rocker_neigh_update() argument
/linux-4.1.27/arch/powerpc/oprofile/cell/
H A Dpr_util.h52 * The guard pointer is an entry in the _ovly_buf_table,
55 * entry in the _ovly_buf_table, the computation subtracts 1
57 * The guard value is stored in the _ovly_buf_table entry and
58 * is an index (starting at 1) back to the _ovly_table entry
59 * that is pointing at this _ovly_buf_table entry. So, for
62 * - Section 1 points to the first entry of the
64 * of '1', referencing the first (index=0) entry of
66 * - Section 2 points to the second entry of the
68 * of '2', referencing the second (index=1) entry of
/linux-4.1.27/include/linux/netfilter_arp/
H A Darp_tables.h17 /* Standard entry. */
19 struct arpt_entry entry; member in struct:arpt_standard
24 struct arpt_entry entry; member in struct:arpt_error
36 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \
44 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \
/linux-4.1.27/include/linux/netfilter_ipv4/
H A Dip_tables.h32 /* Standard entry. */
34 struct ipt_entry entry; member in struct:ipt_standard
39 struct ipt_entry entry; member in struct:ipt_error
51 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \
59 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \
/linux-4.1.27/arch/arm64/kernel/
H A Djump_label.c25 void arch_jump_label_transform(struct jump_entry *entry, arch_jump_label_transform() argument
28 void *addr = (void *)entry->code; arch_jump_label_transform()
32 insn = aarch64_insn_gen_branch_imm(entry->code, arch_jump_label_transform()
33 entry->target, arch_jump_label_transform()
42 void arch_jump_label_transform_static(struct jump_entry *entry, arch_jump_label_transform_static() argument
/linux-4.1.27/drivers/net/ethernet/natsemi/
H A Dsonic.c206 int entry = lp->next_tx; sonic_send_packet() local
229 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_send_packet()
230 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_send_packet()
231 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ sonic_send_packet()
232 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff); sonic_send_packet()
233 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16); sonic_send_packet()
234 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length); sonic_send_packet()
235 sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_send_packet()
236 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); sonic_send_packet()
239 * Must set tx_skb[entry] only after clearing status, and sonic_send_packet()
243 lp->tx_len[entry] = length; sonic_send_packet()
244 lp->tx_laddr[entry] = laddr; sonic_send_packet()
245 lp->tx_skb[entry] = skb; sonic_send_packet()
250 lp->eol_tx = entry; sonic_send_packet()
252 lp->next_tx = (entry + 1) & SONIC_TDS_MASK; sonic_send_packet()
291 int entry = lp->cur_tx; sonic_interrupt() local
296 * unallocated/freed (status set & tx_skb[entry] clear) sonic_interrupt()
297 * allocated and sent (status set & tx_skb[entry] set ) sonic_interrupt()
298 * allocated and not yet sent (status clear & tx_skb[entry] set ) sonic_interrupt()
299 * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) sonic_interrupt()
305 while (lp->tx_skb[entry] != NULL) { sonic_interrupt()
306 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) sonic_interrupt()
311 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); sonic_interrupt()
325 dev_kfree_skb_irq(lp->tx_skb[entry]); sonic_interrupt()
326 lp->tx_skb[entry] = NULL; sonic_interrupt()
328 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); sonic_interrupt()
329 lp->tx_laddr[entry] = (dma_addr_t)0; sonic_interrupt()
332 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) { sonic_interrupt()
333 entry = (entry + 1) & SONIC_TDS_MASK; sonic_interrupt()
336 entry = (entry + 1) & SONIC_TDS_MASK; sonic_interrupt()
339 if (freed_some || lp->tx_skb[entry] == NULL) sonic_interrupt()
341 lp->cur_tx = entry; sonic_interrupt()
412 int entry = lp->cur_rx; sonic_rx() local
414 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { sonic_rx()
422 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); sonic_rx()
444 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); sonic_rx()
445 used_skb = lp->rx_skb[entry]; sonic_rx()
446 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); sonic_rx()
454 lp->rx_laddr[entry] = new_laddr; sonic_rx()
455 lp->rx_skb[entry] = new_skb; sonic_rx()
459 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); sonic_rx()
460 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); sonic_rx()
488 sonic_rda_put(dev, entry, SONIC_RD_LINK, sonic_rx()
489 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); sonic_rx()
490 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); sonic_rx()
493 lp->eol_rx = entry; sonic_rx()
494 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; sonic_rx()
/linux-4.1.27/drivers/md/
H A Ddm-cache-policy-mq.c166 * Insert an entry to the back of the given level.
186 * Gives us the oldest entry of the lowest popoulated level. If the first
215 * Pops an entry from a level that is not past a sentinel.
304 * Describes a cache entry. Used in both the cache and the pre_cache.
306 struct entry { struct
319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
325 struct entry *entries, *entries_end;
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries); epool_init()
354 static struct entry *alloc_entry(struct entry_pool *ep) alloc_entry()
356 struct entry *e; alloc_entry()
361 e = list_entry(list_pop(&ep->free), struct entry, list); alloc_entry()
372 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) alloc_particular_entry()
374 struct entry *e = ep->entries + from_cblock(cblock); alloc_particular_entry()
383 static void free_entry(struct entry_pool *ep, struct entry *e) free_entry()
392 * Returns NULL if the entry is free.
394 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) epool_find()
396 struct entry *e = ep->entries + from_cblock(cblock); epool_find()
405 static bool in_pool(struct entry_pool *ep, struct entry *e) in_pool()
410 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) infer_cblock()
457 * and found an entry in the pre_cache or cache. Currently used to
476 * The hash table allows us to quickly find an entry by origin
495 static void hash_insert(struct mq_policy *mq, struct entry *e) hash_insert()
502 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) hash_lookup()
506 struct entry *e; hash_lookup()
518 static void hash_remove(struct entry *e) hash_remove()
546 static unsigned queue_level(struct entry *e) queue_level()
551 static bool in_cache(struct mq_policy *mq, struct entry *e) in_cache()
557 * Inserts the entry into the pre_cache or the cache. Ensures the cache
559 * Sets the tick which records when the entry was last moved about.
561 static void push(struct mq_policy *mq, struct entry *e) push()
573 * Removes an entry from pre_cache or cache. Removes from the hash table.
575 static void del(struct mq_policy *mq, struct entry *e) del()
586 * Like del, except it removes the first entry in the queue (ie. the least
589 static struct entry *pop(struct mq_policy *mq, struct queue *q) pop()
591 struct entry *e; pop()
597 e = container_of(h, struct entry, list); pop()
603 static struct entry *pop_old(struct mq_policy *mq, struct queue *q) pop_old()
605 struct entry *e; pop_old()
611 e = container_of(h, struct entry, list); pop_old()
617 static struct entry *peek(struct queue *q) peek()
620 return h ? container_of(h, struct entry, list) : NULL; peek()
642 struct entry *e; check_generation()
671 * Whenever we use an entry we bump up it's hit counter, and push it to the
674 static void requeue(struct mq_policy *mq, struct entry *e) requeue()
682 * Demote the least recently used entry from the cache to the pre_cache.
683 * Returns the new cache entry to use, and the old origin block it was
686 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
690 * - just forget about the demoted entry completely (ie. don't insert it
699 struct entry *demoted = peek(&mq->cache_clean); demote_cblock()
737 struct entry *e; promote_threshold()
780 static bool should_promote(struct mq_policy *mq, struct entry *e, should_promote()
788 struct entry *e, cache_entry_found()
802 * Moves an entry from the pre_cache to the cache. The main work is
805 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, pre_cache_to_cache()
810 struct entry *new_e; pre_cache_to_cache()
840 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, pre_cache_entry_found()
865 struct entry *e = alloc_entry(&mq->pre_cache_pool); insert_in_pre_cache()
869 * There's no spare entry structure, so we grab the least insert_in_pre_cache()
890 struct entry *e; insert_in_cache()
948 struct entry *e = hash_lookup(mq, oblock); map()
995 struct entry *e = container_of(h, struct entry, list); update_pre_cache_hits()
1002 struct entry *e = container_of(h, struct entry, list); update_cache_hits()
1057 struct entry *e; mq_lookup()
1076 struct entry *e; __mq_set_clear_dirty()
1109 struct entry *e; mq_load_mapping()
1126 struct entry *e; mq_save_hints()
1133 e = container_of(h, struct entry, list); mq_save_hints()
1162 struct entry *e; __remove_mapping()
1182 struct entry *e = epool_find(&mq->cache_pool, cblock); __remove_cblock()
1222 struct entry *e = pop_old(mq, &mq->cache_dirty); __mq_writeback_work()
1254 struct entry *e = hash_lookup(mq, current_oblock); __force_mapping()
1446 sizeof(struct entry), mq_init()
1447 __alignof__(struct entry), mq_init()
/linux-4.1.27/net/dccp/
H A Dfeat.c282 static void dccp_feat_print_entry(struct dccp_feat_entry const *entry) dccp_feat_print_entry() argument
284 dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote", dccp_feat_print_entry()
285 dccp_feat_fname(entry->feat_num)); dccp_feat_print_entry()
286 dccp_feat_printval(entry->feat_num, &entry->val); dccp_feat_print_entry()
287 dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state], dccp_feat_print_entry()
288 entry->needs_confirm ? "(Confirm pending)" : ""); dccp_feat_print_entry()
414 static void dccp_feat_entry_destructor(struct dccp_feat_entry *entry) dccp_feat_entry_destructor() argument
416 if (entry != NULL) { dccp_feat_entry_destructor()
417 dccp_feat_val_destructor(entry->feat_num, &entry->val); dccp_feat_entry_destructor()
418 kfree(entry); dccp_feat_entry_destructor()
434 struct dccp_feat_entry *entry; dccp_feat_list_lookup() local
436 list_for_each_entry(entry, fn_list, node) { list_for_each_entry()
437 if (entry->feat_num == feat_num && entry->is_local == is_local) list_for_each_entry()
438 return entry; list_for_each_entry()
439 else if (entry->feat_num > feat_num) list_for_each_entry()
456 struct dccp_feat_entry *entry; dccp_feat_entry_new() local
458 list_for_each_entry(entry, head, node) list_for_each_entry()
459 if (entry->feat_num == feat && entry->is_local == local) { list_for_each_entry()
460 dccp_feat_val_destructor(entry->feat_num, &entry->val); list_for_each_entry()
461 return entry; list_for_each_entry()
462 } else if (entry->feat_num > feat) {
463 head = &entry->node;
467 entry = kmalloc(sizeof(*entry), gfp_any());
468 if (entry != NULL) {
469 entry->feat_num = feat;
470 entry->is_local = local;
471 list_add_tail(&entry->node, head);
473 return entry;
504 * dccp_feat_push_confirm - Add a Confirm entry to the FN list
538 static inline void dccp_feat_list_pop(struct dccp_feat_entry *entry) dccp_feat_list_pop() argument
540 list_del(&entry->node); dccp_feat_list_pop()
541 dccp_feat_entry_destructor(entry); dccp_feat_list_pop()
546 struct dccp_feat_entry *entry, *next; dccp_feat_list_purge() local
548 list_for_each_entry_safe(entry, next, fn_list, node) dccp_feat_list_purge()
549 dccp_feat_entry_destructor(entry); dccp_feat_list_purge()
557 struct dccp_feat_entry *entry, *new; dccp_feat_clone_list() local
560 list_for_each_entry(entry, from, node) { list_for_each_entry()
561 new = dccp_feat_clone_entry(entry); list_for_each_entry()
775 struct dccp_feat_entry *entry; dccp_feat_nn_get() local
777 entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1); dccp_feat_nn_get()
778 if (entry != NULL) dccp_feat_nn_get()
779 return entry->val.nn; dccp_feat_nn_get()
805 struct dccp_feat_entry *entry; dccp_feat_signal_nn_change() local
817 entry = dccp_feat_list_lookup(fn, feat, 1); dccp_feat_signal_nn_change()
818 if (entry != NULL) { dccp_feat_signal_nn_change()
819 dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n", dccp_feat_signal_nn_change()
820 (unsigned long long)entry->val.nn, dccp_feat_signal_nn_change()
822 dccp_feat_list_pop(entry); dccp_feat_signal_nn_change()
971 struct dccp_feat_entry *entry; dccp_feat_finalise_settings() local
983 list_for_each_entry(entry, fn, node) dccp_feat_finalise_settings()
984 if (entry->feat_num == DCCPF_CCID && entry->val.sp.len == 1) dccp_feat_finalise_settings()
985 ccids[entry->is_local] = entry->val.sp.vec[0]; dccp_feat_finalise_settings()
1001 struct dccp_feat_entry *entry; dccp_feat_server_ccid_dependencies() local
1005 entry = dccp_feat_list_lookup(fn, DCCPF_CCID, is_local); dccp_feat_server_ccid_dependencies()
1007 if (entry != NULL && !entry->empty_confirm) dccp_feat_server_ccid_dependencies()
1008 ccid = entry->val.sp.vec[0]; dccp_feat_server_ccid_dependencies()
1018 /* Select the first entry in @servlist that also occurs in @clilist (6.3.1) */ dccp_feat_preflist_match()
1031 * dccp_feat_prefer - Move preferred entry to the start of array
1059 * A value of 0 means that negotiation failed (no shared entry).
1102 struct dccp_feat_entry *entry; dccp_feat_change_recv() local
1129 entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_change_recv()
1130 if (entry == NULL) { dccp_feat_change_recv()
1161 } else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */ dccp_feat_change_recv()
1165 if (dccp_feat_reconcile(&entry->val, val, len, server, true)) { dccp_feat_change_recv()
1166 entry->empty_confirm = false; dccp_feat_change_recv()
1169 } else if (entry->state == FEAT_INITIALISING) { dccp_feat_change_recv()
1172 * the connection by checking whether entry contains the default dccp_feat_change_recv()
1180 if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true)) dccp_feat_change_recv()
1182 entry->empty_confirm = true; dccp_feat_change_recv()
1184 entry->needs_confirm = true; dccp_feat_change_recv()
1185 entry->needs_mandatory = false; dccp_feat_change_recv()
1186 entry->state = FEAT_STABLE; dccp_feat_change_recv()
1213 struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_confirm_recv() local
1217 if (entry == NULL) { /* nothing queued: ignore or handle error */ dccp_feat_confirm_recv()
1226 if (entry->state != FEAT_CHANGING) /* 6.6.2 */ dccp_feat_confirm_recv()
1237 * entry from the list. dccp_feat_confirm_recv()
1239 dccp_feat_list_pop(entry); dccp_feat_confirm_recv()
1244 if (len > sizeof(entry->val.nn)) dccp_feat_confirm_recv()
1247 if (entry->val.nn == dccp_decode_value_var(val, len)) dccp_feat_confirm_recv()
1271 if (dccp_feat_reconcile(&entry->val, plist, plen, server, 0) != *val) { dccp_feat_confirm_recv()
1275 entry->val.sp.vec[0] = *val; dccp_feat_confirm_recv()
1278 entry->state = FEAT_STABLE; dccp_feat_confirm_recv()
1310 struct dccp_feat_entry *entry; dccp_feat_handle_nn_established() local
1347 entry = dccp_feat_list_lookup(fn, feat, local); dccp_feat_handle_nn_established()
1348 if (entry == NULL || entry->state != FEAT_CHANGING) dccp_feat_handle_nn_established()
1358 if (fval.nn != entry->val.nn) dccp_feat_handle_nn_established()
1364 /* It has been confirmed - so remove the entry */ dccp_feat_handle_nn_established()
1365 dccp_feat_list_pop(entry); dccp_feat_handle_nn_established()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dentry.h21 * These are per-cpu variables required in entry.S, among other
30 DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
31 DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
/linux-4.1.27/include/uapi/linux/
H A Dauxvec.h9 #define AT_IGNORE 1 /* entry should be ignored */
12 #define AT_PHENT 4 /* size of program header entry */
17 #define AT_ENTRY 9 /* entry point of program */
/linux-4.1.27/drivers/net/wireless/iwlwifi/
H A Diwl-phy-db.c186 struct iwl_phy_db_entry *entry = iwl_phy_db_free_section() local
188 if (!entry) iwl_phy_db_free_section()
191 kfree(entry->data); iwl_phy_db_free_section()
192 entry->data = NULL; iwl_phy_db_free_section()
193 entry->size = 0; iwl_phy_db_free_section()
221 struct iwl_phy_db_entry *entry; iwl_phy_db_set_section() local
231 entry = iwl_phy_db_get_section(phy_db, type, chg_id); iwl_phy_db_set_section()
232 if (!entry) iwl_phy_db_set_section()
235 kfree(entry->data); iwl_phy_db_set_section()
236 entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); iwl_phy_db_set_section()
237 if (!entry->data) { iwl_phy_db_set_section()
238 entry->size = 0; iwl_phy_db_set_section()
242 entry->size = size; iwl_phy_db_set_section()
316 struct iwl_phy_db_entry *entry; iwl_phy_db_get_section_data() local
328 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); iwl_phy_db_get_section_data()
329 if (!entry) iwl_phy_db_get_section_data()
332 *data = entry->data; iwl_phy_db_get_section_data()
333 *size = entry->size; iwl_phy_db_get_section_data()
375 struct iwl_phy_db_entry *entry; iwl_phy_db_send_all_channel_groups() local
379 entry = iwl_phy_db_get_section(phy_db, iwl_phy_db_send_all_channel_groups()
382 if (!entry) iwl_phy_db_send_all_channel_groups()
385 if (!entry->size) iwl_phy_db_send_all_channel_groups()
391 entry->size, iwl_phy_db_send_all_channel_groups()
392 entry->data); iwl_phy_db_send_all_channel_groups()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dpgtable-ppc64-4k.h5 * for each page table entry. The PMD and PGD level use a 32b record for
6 * each entry by assuming that each entry is page aligned.
25 /* PMD_SHIFT determines what a second-level page table entry can map */
33 /* PUD_SHIFT determines what a third-level page table entry can map */
38 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
/linux-4.1.27/drivers/acpi/apei/
H A Dapei-base.c69 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) __apei_exec_read_register() argument
73 rc = apei_read(val, &entry->register_region); __apei_exec_read_register()
76 *val >>= entry->register_region.bit_offset; __apei_exec_read_register()
77 *val &= entry->mask; __apei_exec_read_register()
83 struct acpi_whea_header *entry) apei_exec_read_register()
88 rc = __apei_exec_read_register(entry, &val); apei_exec_read_register()
98 struct acpi_whea_header *entry) apei_exec_read_register_value()
102 rc = apei_exec_read_register(ctx, entry); apei_exec_read_register_value()
105 ctx->value = (ctx->value == entry->value); apei_exec_read_register_value()
111 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) __apei_exec_write_register() argument
115 val &= entry->mask; __apei_exec_write_register()
116 val <<= entry->register_region.bit_offset; __apei_exec_write_register()
117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { __apei_exec_write_register()
119 rc = apei_read(&valr, &entry->register_region); __apei_exec_write_register()
122 valr &= ~(entry->mask << entry->register_region.bit_offset); __apei_exec_write_register()
125 rc = apei_write(val, &entry->register_region); __apei_exec_write_register()
131 struct acpi_whea_header *entry) apei_exec_write_register()
133 return __apei_exec_write_register(entry, ctx->value); apei_exec_write_register()
138 struct acpi_whea_header *entry) apei_exec_write_register_value()
142 ctx->value = entry->value; apei_exec_write_register_value()
143 rc = apei_exec_write_register(ctx, entry); apei_exec_write_register_value()
150 struct acpi_whea_header *entry) apei_exec_noop()
165 struct acpi_whea_header *entry; __apei_exec_run() local
179 entry = &ctx->action_table[i]; __apei_exec_run()
180 if (entry->action != action) __apei_exec_run()
183 if (entry->instruction >= ctx->instructions || __apei_exec_run()
184 !ctx->ins_table[entry->instruction].run) { __apei_exec_run()
187 entry->instruction); __apei_exec_run()
190 run = ctx->ins_table[entry->instruction].run; __apei_exec_run()
191 rc = run(ctx, entry); __apei_exec_run()
207 struct acpi_whea_header *entry,
217 struct acpi_whea_header *entry; apei_exec_for_each_entry() local
221 entry = ctx->action_table + i; apei_exec_for_each_entry()
222 ins = entry->instruction; apei_exec_for_each_entry()
231 rc = func(ctx, entry, data); apei_exec_for_each_entry()
240 struct acpi_whea_header *entry, pre_map_gar_callback()
243 u8 ins = entry->instruction; pre_map_gar_callback()
246 return apei_map_generic_address(&entry->register_region); pre_map_gar_callback()
273 struct acpi_whea_header *entry, post_unmap_gar_callback()
276 u8 ins = entry->instruction; post_unmap_gar_callback()
279 apei_unmap_generic_address(&entry->register_region); post_unmap_gar_callback()
436 * EINJ has two groups of GARs (EINJ table entry and trigger table
437 * entry), so common resources are subtracted from the trigger table
716 struct acpi_whea_header *entry, collect_res_callback()
720 struct acpi_generic_address *reg = &entry->register_region; collect_res_callback()
721 u8 ins = entry->instruction; collect_res_callback()
82 apei_exec_read_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_read_register() argument
97 apei_exec_read_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_read_register_value() argument
130 apei_exec_write_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_write_register() argument
137 apei_exec_write_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_write_register_value() argument
149 apei_exec_noop(struct apei_exec_context *ctx, struct acpi_whea_header *entry) apei_exec_noop() argument
239 pre_map_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) pre_map_gar_callback() argument
272 post_unmap_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) post_unmap_gar_callback() argument
715 collect_res_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) collect_res_callback() argument
H A Dapei-internal.h15 struct acpi_whea_header *entry);
82 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
83 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
85 struct acpi_whea_header *entry);
87 struct acpi_whea_header *entry);
89 struct acpi_whea_header *entry);
91 struct acpi_whea_header *entry);
93 struct acpi_whea_header *entry);
/linux-4.1.27/drivers/staging/vt6656/
H A Dkey.c57 u32 entry = 0; vnt_set_keymode() local
73 entry = key->hw_key_idx; vnt_set_keymode()
82 /* default key last entry */ vnt_set_keymode()
83 entry = MAX_KEY_TABLE - 1; vnt_set_keymode()
84 key->hw_key_idx = entry; vnt_set_keymode()
98 /* Don't save entry for pairwise key for station mode */ vnt_set_keymode()
100 clear_bit(entry, &priv->key_entry_inuse); vnt_set_keymode()
116 vnt_mac_set_keyentry(priv, key_mode, entry, key_inx, bssid, key->key); vnt_set_keymode()
/linux-4.1.27/fs/quota/
H A Dquota_tree.h16 __le32 dqdh_next_free; /* Number of next block with free entry */
17 __le32 dqdh_prev_free; /* Number of previous block with free entry */
/linux-4.1.27/fs/freevxfs/
H A Dvxfs_dir.h43 * This entry is the head of every filesystem block in a directory.
58 * of an VxFS directory entry.
63 * VxFS directory entry.
69 u_int16_t d_hashnext; /* next hash entry */
74 * VXFS_DIRPAD defines the directory entry boundaries, is _must_ be
76 * VXFS_NAMEMIN is the length of a directory entry with a NULL d_name.
79 * VXFS_DIRLEN calculates the directory entry size for an entry with
/linux-4.1.27/drivers/remoteproc/
H A Dste_modem_rproc.c35 /* STE-Modem firmware entry */
49 * such as remoteproc resource table. Each entry is identified by a unique
66 /* Find the entry for resource table in the Table of Content */ sproc_find_rsc_entry()
91 const struct ste_toc_entry *entry; sproc_find_rsc_table() local
96 entry = sproc_find_rsc_entry(fw->data); sproc_find_rsc_table()
97 if (!entry) { sproc_find_rsc_table()
102 table = (void *)(fw->data + entry->start); sproc_find_rsc_table()
105 if (entry->start > SPROC_FW_SIZE || sproc_find_rsc_table()
106 entry->size > SPROC_FW_SIZE || sproc_find_rsc_table()
108 entry->start + entry->size > fw->size || sproc_find_rsc_table()
109 sizeof(struct resource_table) > entry->size) { sproc_find_rsc_table()
129 sizeof(struct resource_table) > entry->size) { sproc_find_rsc_table()
142 *tablesz = entry->size; sproc_find_rsc_table()
152 const struct ste_toc_entry *entry; sproc_find_loaded_rsc_table() local
157 entry = sproc_find_rsc_entry(sproc->fw_addr); sproc_find_loaded_rsc_table()
158 if (!entry) { sproc_find_loaded_rsc_table()
163 return sproc->fw_addr + entry->start; sproc_find_loaded_rsc_table()
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_dir2_block.c193 * The biggest entry enough to avoid compaction. xfs_dir2_block_need_space()
202 * Tag just before the first leaf entry. xfs_dir2_block_need_space()
207 /* Data object just before the first leaf entry. */ xfs_dir2_block_need_space()
227 * Tag just before the first leaf entry. xfs_dir2_block_need_space()
231 /* Data object just before the first leaf entry. */ xfs_dir2_block_need_space()
236 * the space before the first leaf entry needs to be free so it xfs_dir2_block_need_space()
237 * can be expanded to hold the pointer to the new entry. xfs_dir2_block_need_space()
247 * Not the same free entry, just check its length. xfs_dir2_block_need_space()
259 * Yes, use the second-largest entry instead if it works. xfs_dir2_block_need_space()
276 * Leave the highest-numbered stale entry stale.
328 * Add an entry to a block directory.
339 xfs_dir2_data_entry_t *dep; /* block data entry */ xfs_dir2_block_addname()
341 xfs_dir2_data_unused_t *dup; /* block unused entry */ xfs_dir2_block_addname()
344 xfs_dahash_t hash; /* hash value of found entry */ xfs_dir2_block_addname()
349 int len; /* length of the new entry */ xfs_dir2_block_addname()
379 * space for entry and new leaf. xfs_dir2_block_addname()
395 * If we don't have space for the new entry & leaf ... xfs_dir2_block_addname()
403 * Then add the new entry in that format. xfs_dir2_block_addname()
450 * Mark the space needed for the new leaf entry, now in use. xfs_dir2_block_addname()
459 * Update the tail (entry count). xfs_dir2_block_addname()
471 * Adjust pointer to the first leaf entry, we're about to move xfs_dir2_block_addname()
472 * the table up one to open up space for the new leaf entry. xfs_dir2_block_addname()
483 * Use a stale leaf for our new entry. xfs_dir2_block_addname()
500 * Move entries toward the low-numbered stale entry. xfs_dir2_block_addname()
512 * Move entries toward the high-numbered stale entry. xfs_dir2_block_addname()
526 * Point to the new data entry. xfs_dir2_block_addname()
530 * Fill in the leaf entry. xfs_dir2_block_addname()
537 * Mark space for the data entry used. xfs_dir2_block_addname()
543 * Create the new data entry. xfs_dir2_block_addname()
552 * Clean up the bestfree array and log the header, tail, and entry. xfs_dir2_block_addname()
601 * Look up an entry in the block. This is the external routine,
612 xfs_dir2_data_entry_t *dep; /* block data entry */ xfs_dir2_block_lookup()
614 int ent; /* entry index */ xfs_dir2_block_lookup()
620 * Get the buffer, look up the entry. xfs_dir2_block_lookup()
631 * Get the offset from the leaf entry, to point to the data. xfs_dir2_block_lookup()
653 int *entno) /* returned entry number */ xfs_dir2_block_lookup_int()
655 xfs_dir2_dataptr_t addr; /* data entry address */ xfs_dir2_block_lookup_int()
660 xfs_dir2_data_entry_t *dep; /* block data entry */ xfs_dir2_block_lookup_int()
685 * Find our entry, ENOENT if it's not there. xfs_dir2_block_lookup_int()
716 * Get pointer to the entry from the leaf. xfs_dir2_block_lookup_int()
751 * Remove an entry from a block format directory.
762 xfs_dir2_data_entry_t *dep; /* block data entry */ xfs_dir2_block_removename()
764 int ent; /* block leaf entry index */ xfs_dir2_block_removename()
775 * Look up the entry in the block. Gets the buffer and entry index. xfs_dir2_block_removename()
787 * Point to the data entry using the leaf entry. xfs_dir2_block_removename()
793 * Mark the data entry's space free. xfs_dir2_block_removename()
805 * Remove the leaf entry by marking it stale. xfs_dir2_block_removename()
831 * Replace an entry in a V2 block directory.
842 xfs_dir2_data_entry_t *dep; /* block data entry */ xfs_dir2_block_replace()
844 int ent; /* leaf entry index */ xfs_dir2_block_replace()
850 * Lookup the entry in the directory. Get buffer and entry index. xfs_dir2_block_replace()
861 * Point to the data entry we need to change. xfs_dir2_block_replace()
882 const void *a, /* first leaf entry */ xfs_dir2_block_sort()
883 const void *b) /* second leaf entry */ xfs_dir2_block_sort()
885 const xfs_dir2_leaf_entry_t *la; /* first leaf entry */ xfs_dir2_block_sort()
886 const xfs_dir2_leaf_entry_t *lb; /* second leaf entry */ xfs_dir2_block_sort()
907 xfs_dir2_data_unused_t *dup; /* unused data entry */ xfs_dir2_leaf_to_block()
911 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ xfs_dir2_leaf_to_block()
918 __be16 *tagp; /* end of entry (tag) */ xfs_dir2_leaf_to_block()
974 * Look at the last data entry. xfs_dir2_leaf_to_block()
1051 xfs_dir2_data_entry_t *dep; /* data entry pointer */ xfs_dir2_sf_to_block()
1054 xfs_dir2_data_unused_t *dup; /* unused entry pointer */ xfs_dir2_sf_to_block()
1061 int newoffset; /* offset from current entry */ xfs_dir2_sf_to_block()
1063 xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ xfs_dir2_sf_to_block()
1066 __be16 *tagp; /* end of data entry */ xfs_dir2_sf_to_block()
1152 * Create entry for . xfs_dir2_sf_to_block()
1166 * Create entry for .. xfs_dir2_sf_to_block()
1217 * Copy a real entry. xfs_dir2_sf_to_block()
1246 * Log the leaf entry area and tail. xfs_dir2_sf_to_block()
/linux-4.1.27/fs/efivarfs/
H A Dsuper.c116 struct efivar_entry *entry; efivarfs_callback() local
125 entry = kzalloc(sizeof(*entry), GFP_KERNEL); efivarfs_callback()
126 if (!entry) efivarfs_callback()
129 memcpy(entry->var.VariableName, name16, name_size); efivarfs_callback()
130 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); efivarfs_callback()
132 len = ucs2_utf8size(entry->var.VariableName); efivarfs_callback()
139 ucs2_as_utf8(name, entry->var.VariableName, len); efivarfs_callback()
141 if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) efivarfs_callback()
146 efi_guid_to_str(&entry->var.VendorGuid, name + len + 1); efivarfs_callback()
164 efivar_entry_size(entry, &size); efivarfs_callback()
165 efivar_entry_add(entry, &efivarfs_list); efivarfs_callback()
168 inode->i_private = entry; efivarfs_callback()
169 i_size_write(inode, size + sizeof(entry->var.Attributes)); efivarfs_callback()
180 kfree(entry); efivarfs_callback()
184 static int efivarfs_destroy(struct efivar_entry *entry, void *data) efivarfs_destroy() argument
186 efivar_entry_remove(entry); efivarfs_destroy()
187 kfree(entry); efivarfs_destroy()
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
H A Dos_intfs.c183 struct proc_dir_entry *entry = NULL;
196 entry = create_proc_read_entry("ver_info", S_IFREG | S_IRUGO, rtw_proc, proc_get_drv_version, dev);
197 if (!entry) {
225 entry = create_proc_read_entry("write_reg", S_IFREG | S_IRUGO,
227 if (!entry) {
231 entry->write_proc = proc_set_write_reg;
233 entry = create_proc_read_entry("read_reg", S_IFREG | S_IRUGO,
235 if (!entry) {
239 entry->write_proc = proc_set_read_reg;
242 entry = create_proc_read_entry("fwstate", S_IFREG | S_IRUGO,
244 if (!entry) {
249 entry = create_proc_read_entry("sec_info", S_IFREG | S_IRUGO,
251 if (!entry) {
256 entry = create_proc_read_entry("mlmext_state", S_IFREG | S_IRUGO,
258 if (!entry) {
263 entry = create_proc_read_entry("qos_option", S_IFREG | S_IRUGO,
265 if (!entry) {
270 entry = create_proc_read_entry("ht_option", S_IFREG | S_IRUGO,
272 if (!entry) {
277 entry = create_proc_read_entry("rf_info", S_IFREG | S_IRUGO,
279 if (!entry) {
284 entry = create_proc_read_entry("ap_info", S_IFREG | S_IRUGO,
286 if (!entry) {
291 entry = create_proc_read_entry("adapter_state", S_IFREG | S_IRUGO,
293 if (!entry) {
298 entry = create_proc_read_entry("trx_info", S_IFREG | S_IRUGO,
300 if (!entry) {
305 entry = create_proc_read_entry("mac_reg_dump1", S_IFREG | S_IRUGO,
307 if (!entry) {
312 entry = create_proc_read_entry("mac_reg_dump2", S_IFREG | S_IRUGO,
314 if (!entry) {
319 entry = create_proc_read_entry("mac_reg_dump3", S_IFREG | S_IRUGO,
321 if (!entry) {
326 entry = create_proc_read_entry("bb_reg_dump1", S_IFREG | S_IRUGO,
328 if (!entry) {
333 entry = create_proc_read_entry("bb_reg_dump2", S_IFREG | S_IRUGO,
335 if (!entry) {
340 entry = create_proc_read_entry("bb_reg_dump3", S_IFREG | S_IRUGO,
342 if (!entry) {
347 entry = create_proc_read_entry("rf_reg_dump1", S_IFREG | S_IRUGO,
349 if (!entry) {
354 entry = create_proc_read_entry("rf_reg_dump2", S_IFREG | S_IRUGO,
356 if (!entry) {
363 entry = create_proc_read_entry("rf_reg_dump3", S_IFREG | S_IRUGO,
365 if (!entry) {
370 entry = create_proc_read_entry("rf_reg_dump4", S_IFREG | S_IRUGO,
372 if (!entry) {
380 entry = create_proc_read_entry("all_sta_info", S_IFREG | S_IRUGO,
382 if (!entry) {
388 entry = create_proc_read_entry("best_channel", S_IFREG | S_IRUGO,
390 if (!entry) {
395 entry = create_proc_read_entry("rx_signal", S_IFREG | S_IRUGO,
397 if (!entry) {
401 entry->write_proc = proc_set_rx_signal;
402 entry = create_proc_read_entry("ht_enable", S_IFREG | S_IRUGO,
404 if (!entry) {
408 entry->write_proc = proc_set_ht_enable;
410 entry = create_proc_read_entry("cbw40_enable", S_IFREG | S_IRUGO,
412 if (!entry) {
416 entry->write_proc = proc_set_cbw40_enable;
418 entry = create_proc_read_entry("ampdu_enable", S_IFREG | S_IRUGO,
420 if (!entry) {
424 entry->write_proc = proc_set_ampdu_enable;
426 entry = create_proc_read_entry("rx_stbc", S_IFREG | S_IRUGO,
428 if (!entry) {
432 entry->write_proc = proc_set_rx_stbc;
434 entry = create_proc_read_entry("path_rssi", S_IFREG | S_IRUGO,
436 if (!entry) {
440 entry = create_proc_read_entry("rssi_disp", S_IFREG | S_IRUGO,
442 if (!entry) {
446 entry->write_proc = proc_set_rssi_disp;
/linux-4.1.27/fs/
H A Dmbcache.c13 * There can only be one cache entry in a cache per device and block number.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
25 * entry is released. Entries that cannot be freed immediately are put
54 * prevent either any simultaneous release or free on the entry and also
55 * to serialize accesses to either the e_used or e_queued member of the entry.
178 * It assumes both the block and index hash chain is locked upon entry.
203 /* First lock the entry to serialize access to its local data. */ __mb_cache_entry_release()
205 /* Wake up all processes queuing for this cache entry. */ __mb_cache_entry_release()
221 * Need access to lru list, first drop entry lock, __mb_cache_entry_release()
251 struct mb_cache_entry *entry, *tmp; mb_cache_shrink_scan() local
266 /* Prevent any find or get operation on the entry */ mb_cache_shrink_scan()
283 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { mb_cache_shrink_scan()
284 __mb_cache_entry_forget(entry, gfp_mask); mb_cache_shrink_scan()
414 * Prevent any find or get operation on the entry. mb_cache_shrink()
468 * Prevent any find or get operation on the entry. mb_cache_destroy()
496 * Allocates a new cache entry. The new entry will not be valid initially,
522 * entry. mb_cache_entry_alloc()
567 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
568 * the cache. After this, the cache entry can be looked up, but is not yet
570 * success, or -EBUSY if a cache entry for that device + inode exists
572 * has inserted the same cache entry in the meantime).
574 * @bdev: device the cache entry belongs to
622 * Release a handle to a cache entry. When the last handle to a cache entry
655 * Get a cache entry by device / block number. (There can only be one entry
656 * in the cache per device and block.) Returns NULL if no such cache entry
657 * exists. The returned cache entry is locked for exclusive access ("single
678 * Prevent a free from removing the entry. hlist_bl_for_each_entry()
731 * Prevent a free from removing the entry. __mb_cache_entry_find()
776 * Find the first cache entry on a given device with a certain key in
779 * returned cache entry is locked for shared access ("multiple readers").
782 * @bdev: the device the cache entry should belong to
808 * Find the next cache entry on a given device with a certain key in an
810 * entry is atomatically released, so that mb_cache_entry_find_next() can
813 * entry = mb_cache_entry_find_first();
814 * while (entry) {
816 * entry = mb_cache_entry_find_next(entry, ...);
820 * @bdev: the device the cache entry should belong to
/linux-4.1.27/tools/usb/
H A Dtestusb.c250 struct testdev *entry; find_testdev() local
268 entry = calloc(1, sizeof *entry); find_testdev()
269 if (!entry) find_testdev()
272 entry->name = strdup(name); find_testdev()
273 if (!entry->name) { find_testdev()
274 free(entry); find_testdev()
280 entry->ifnum = ifnum; find_testdev()
285 speed(entry->speed), entry->name, entry->ifnum); find_testdev()
287 entry->next = testdevs; find_testdev()
288 testdevs = entry; find_testdev()
380 struct testdev *entry; main() local
496 for (entry = testdevs; entry; entry = entry->next) { main()
499 entry->param = param; main()
500 entry->forever = forever; main()
501 entry->test = test; main()
504 if (strcmp (entry->name, device)) main()
506 return handle_testdev (entry) != entry; main()
508 status = pthread_create (&entry->thread, 0, handle_testdev, entry); main()
528 for (entry = testdevs; entry; entry = entry->next) { main()
531 if (pthread_join (entry->thread, &retval)) main()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_bios.c517 * entry and setting the dual link flag for EDID equipped panels, nouveau_bios_parse_lvds_table()
737 /* First entry is normal dac, 2nd tv-out perhaps? */ parse_bit_A_tbl_entry()
746 * Parses the flat panel table segment that the bit entry points to. parse_bit_display_tbl_entry()
768 * Parses the init table segment that the bit entry points to. parse_bit_init_tbl_entry()
826 * comparison value, the second entry is a comparison value for parse_bit_i_tbl_entry()
848 * Parses the LVDS table segment that the bit entry points to. parse_bit_lvds_tbl_entry()
975 u8 entries, *entry; bit_table() local
981 entry = &bios->data[bios->offset + 12]; bit_table()
983 if (entry[0] == id) { bit_table()
984 bit->id = entry[0]; bit_table()
985 bit->version = entry[1]; bit_table()
986 bit->length = ROM16(entry[2]); bit_table()
987 bit->offset = ROM16(entry[4]); bit_table()
988 bit->data = ROMPTR(dev, entry[4]); bit_table()
992 entry += bios->data[bios->offset + 9]; bit_table()
1279 * always has the same single (crt) entry, even when tv-out olddcb_table()
1374 struct dcb_output *entry = &dcb->entry[dcb->entries]; new_dcb_entry() local
1376 memset(entry, 0, sizeof(struct dcb_output)); new_dcb_entry()
1377 entry->index = dcb->entries++; new_dcb_entry()
1379 return entry; new_dcb_entry()
1385 struct dcb_output *entry = new_dcb_entry(dcb); fabricate_dcb_output() local
1387 entry->type = type; fabricate_dcb_output()
1388 entry->i2c_index = i2c; fabricate_dcb_output()
1389 entry->heads = heads; fabricate_dcb_output()
1391 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ fabricate_dcb_output()
1392 entry->or = or; fabricate_dcb_output()
1397 uint32_t conn, uint32_t conf, struct dcb_output *entry) parse_dcb20_entry()
1402 entry->type = conn & 0xf; parse_dcb20_entry()
1403 entry->i2c_index = (conn >> 4) & 0xf; parse_dcb20_entry()
1404 entry->heads = (conn >> 8) & 0xf; parse_dcb20_entry()
1405 entry->connector = (conn >> 12) & 0xf; parse_dcb20_entry()
1406 entry->bus = (conn >> 16) & 0xf; parse_dcb20_entry()
1407 entry->location = (conn >> 20) & 0x3; parse_dcb20_entry()
1408 entry->or = (conn >> 24) & 0xf; parse_dcb20_entry()
1410 switch (entry->type) { parse_dcb20_entry()
1416 entry->crtconf.maxfreq = (dcb->version < 0x30) ? parse_dcb20_entry()
1424 entry->lvdsconf.use_straps_for_mode = true; parse_dcb20_entry()
1432 entry->lvdsconf.use_straps_for_mode = true; parse_dcb20_entry()
1438 entry->lvdsconf.use_power_scripts = true; parse_dcb20_entry()
1442 entry->lvdsconf.use_acpi_for_edid = true; parse_dcb20_entry()
1444 entry->lvdsconf.use_power_scripts = true; parse_dcb20_entry()
1445 entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; parse_dcb20_entry()
1446 link = entry->lvdsconf.sor.link; parse_dcb20_entry()
1464 entry->tvconf.has_component_output = conf & (0x8 << 4); parse_dcb20_entry()
1466 entry->tvconf.has_component_output = false; parse_dcb20_entry()
1471 entry->dpconf.sor.link = (conf & 0x00000030) >> 4; parse_dcb20_entry()
1472 entry->extdev = (conf & 0x0000ff00) >> 8; parse_dcb20_entry()
1475 entry->dpconf.link_bw = 162000; parse_dcb20_entry()
1478 entry->dpconf.link_bw = 270000; parse_dcb20_entry()
1481 entry->dpconf.link_bw = 540000; parse_dcb20_entry()
1484 entry->dpconf.link_nr = (conf & 0x0f000000) >> 24; parse_dcb20_entry()
1486 switch (entry->dpconf.link_nr) { parse_dcb20_entry()
1488 entry->dpconf.link_nr = 4; parse_dcb20_entry()
1491 entry->dpconf.link_nr = 2; parse_dcb20_entry()
1494 entry->dpconf.link_nr = 1; parse_dcb20_entry()
1498 link = entry->dpconf.sor.link; parse_dcb20_entry()
1499 entry->i2c_index += NV_I2C_AUX(0); parse_dcb20_entry()
1503 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; parse_dcb20_entry()
1504 entry->extdev = (conf & 0x0000ff00) >> 8; parse_dcb20_entry()
1505 link = entry->tmdsconf.sor.link; parse_dcb20_entry()
1508 entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; parse_dcb20_entry()
1510 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; parse_dcb20_entry()
1524 entry->duallink_possible = parse_dcb20_entry()
1525 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); parse_dcb20_entry()
1527 entry->duallink_possible = (entry->sorconf.link == 3); parse_dcb20_entry()
1532 entry->i2c_upper_default = true; parse_dcb20_entry()
1534 entry->hasht = (entry->location << 4) | entry->type; parse_dcb20_entry()
1535 entry->hashm = (entry->heads << 8) | (link << 6) | entry->or; parse_dcb20_entry()
1541 uint32_t conn, uint32_t conf, struct dcb_output *entry) parse_dcb15_entry()
1547 entry->type = DCB_OUTPUT_ANALOG; parse_dcb15_entry()
1550 entry->type = DCB_OUTPUT_TV; parse_dcb15_entry()
1555 entry->type = DCB_OUTPUT_LVDS; parse_dcb15_entry()
1557 entry->type = DCB_OUTPUT_TMDS; parse_dcb15_entry()
1560 entry->type = DCB_OUTPUT_LVDS; parse_dcb15_entry()
1567 entry->i2c_index = (conn & 0x0003c000) >> 14; parse_dcb15_entry()
1568 entry->heads = ((conn & 0x001c0000) >> 18) + 1; parse_dcb15_entry()
1569 entry->or = entry->heads; /* same as heads, hopefully safe enough */ parse_dcb15_entry()
1570 entry->location = (conn & 0x01e00000) >> 21; parse_dcb15_entry()
1571 entry->bus = (conn & 0x0e000000) >> 25; parse_dcb15_entry()
1572 entry->duallink_possible = false; parse_dcb15_entry()
1574 switch (entry->type) { parse_dcb15_entry()
1576 entry->crtconf.maxfreq = (conf & 0xffff) * 10; parse_dcb15_entry()
1579 entry->tvconf.has_component_output = false; parse_dcb15_entry()
1583 entry->lvdsconf.use_straps_for_mode = true; parse_dcb15_entry()
1584 entry->lvdsconf.use_power_scripts = true; parse_dcb15_entry()
1606 struct dcb_output *ient = &dcb->entry[i]; merge_like_dcb_entries()
1610 struct dcb_output *jent = &dcb->entry[j]; merge_like_dcb_entries()
1612 if (jent->type == 100) /* already merged entry */ merge_like_dcb_entries()
1630 if (dcb->entry[i].type == 100) merge_like_dcb_entries()
1634 dcb->entry[newentries] = dcb->entry[i]; merge_like_dcb_entries()
1635 dcb->entry[newentries].index = newentries; merge_like_dcb_entries()
1650 * DCB entry 2: 02025312 00000010 apply_dcb_encoder_quirks()
1651 * DCB entry 3: 02026312 00000020 apply_dcb_encoder_quirks()
1669 * DCB entry 1: f2005014 ffffffff apply_dcb_encoder_quirks()
1711 * an invalid conntab entry 2 so we figure it out ourself. apply_dcb_encoder_quirks()
1791 struct dcb_output *entry = new_dcb_entry(dcb); parse_dcb_entry() local
1796 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); parse_dcb_entry()
1798 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry); parse_dcb_entry()
1806 if (entry->type == DCB_OUTPUT_TV && parse_dcb_entry()
1807 entry->location == DCB_LOC_ON_CHIP) parse_dcb_entry()
1808 entry->i2c_index = 0x0f; parse_dcb_entry()
1828 if (dcbt->entry[i].connector) dcb_fake_connectors()
1840 u8 i2c = dcbt->entry[i].i2c_index; dcb_fake_connectors()
1842 dcbt->entry[i].connector = idx++; dcb_fake_connectors()
1846 dcbt->entry[i].connector = map[i2c] - 1; dcb_fake_connectors()
1886 * guarantees dcbent->index is the index of the entry in the rom image parse_dcb_table()
1906 static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry) load_nv17_hwsq_ucode_entry() argument
1910 * and the entry size load_nv17_hwsq_ucode_entry()
1912 * An entry consists of a dword to write to the sequencer control reg load_nv17_hwsq_ucode_entry()
1923 if (bios->data[hwsq_offset] <= entry) { load_nv17_hwsq_ucode_entry()
1925 "requested entry\n"); load_nv17_hwsq_ucode_entry()
1932 NV_ERROR(drm, "Unknown HW sequencer entry size\n"); load_nv17_hwsq_ucode_entry()
1938 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; load_nv17_hwsq_ucode_entry()
1974 /* always use entry 0? */ load_nv17_hw_sequencer_ucode()
1396 parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_output *entry) parse_dcb20_entry() argument
1540 parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_output *entry) parse_dcb15_entry() argument
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c160 int entry) get_cqe_from_buf()
163 return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); get_cqe_from_buf()
165 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf get_cqe_from_buf()
166 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; get_cqe_from_buf()
169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) get_cqe() argument
171 return get_cqe_from_buf(&cq->buf, entry); get_cqe()
380 struct ib_wc *entry, int *free_cqe) handle_error_cqe()
399 entry->status = IB_WC_LOC_LEN_ERR; handle_error_cqe()
402 entry->status = IB_WC_LOC_QP_OP_ERR; handle_error_cqe()
405 entry->status = IB_WC_LOC_EEC_OP_ERR; handle_error_cqe()
408 entry->status = IB_WC_LOC_PROT_ERR; handle_error_cqe()
411 entry->status = IB_WC_WR_FLUSH_ERR; handle_error_cqe()
414 entry->status = IB_WC_MW_BIND_ERR; handle_error_cqe()
417 entry->status = IB_WC_BAD_RESP_ERR; handle_error_cqe()
420 entry->status = IB_WC_LOC_ACCESS_ERR; handle_error_cqe()
423 entry->status = IB_WC_REM_INV_REQ_ERR; handle_error_cqe()
426 entry->status = IB_WC_REM_ACCESS_ERR; handle_error_cqe()
429 entry->status = IB_WC_REM_OP_ERR; handle_error_cqe()
432 entry->status = IB_WC_RETRY_EXC_ERR; handle_error_cqe()
435 entry->status = IB_WC_RNR_RETRY_EXC_ERR; handle_error_cqe()
438 entry->status = IB_WC_LOC_RDD_VIOL_ERR; handle_error_cqe()
441 entry->status = IB_WC_REM_INV_RD_REQ_ERR; handle_error_cqe()
444 entry->status = IB_WC_REM_ABORT_ERR; handle_error_cqe()
447 entry->status = IB_WC_INV_EECN_ERR; handle_error_cqe()
450 entry->status = IB_WC_INV_EEC_STATE_ERR; handle_error_cqe()
453 entry->status = IB_WC_GENERAL_ERR; handle_error_cqe()
457 entry->vendor_err = cqe->vendor_err; handle_error_cqe()
487 struct ib_wc *entry) mthca_poll_one()
503 * Make sure we read CQ entry contents after we've checked the mthca_poll_one()
529 mthca_warn(dev, "CQ entry for unknown QP %06x\n", mthca_poll_one()
536 entry->qp = &(*cur_qp)->ibqp; mthca_poll_one()
542 entry->wr_id = (*cur_qp)->wrid[wqe_index + mthca_poll_one()
549 entry->wr_id = srq->wrid[wqe_index]; mthca_poll_one()
563 entry->wr_id = (*cur_qp)->wrid[wqe_index]; mthca_poll_one()
578 entry, &free_cqe); mthca_poll_one()
583 entry->wc_flags = 0; mthca_poll_one()
586 entry->opcode = IB_WC_RDMA_WRITE; mthca_poll_one()
589 entry->opcode = IB_WC_RDMA_WRITE; mthca_poll_one()
590 entry->wc_flags |= IB_WC_WITH_IMM; mthca_poll_one()
593 entry->opcode = IB_WC_SEND; mthca_poll_one()
596 entry->opcode = IB_WC_SEND; mthca_poll_one()
597 entry->wc_flags |= IB_WC_WITH_IMM; mthca_poll_one()
600 entry->opcode = IB_WC_RDMA_READ; mthca_poll_one()
601 entry->byte_len = be32_to_cpu(cqe->byte_cnt); mthca_poll_one()
604 entry->opcode = IB_WC_COMP_SWAP; mthca_poll_one()
605 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; mthca_poll_one()
608 entry->opcode = IB_WC_FETCH_ADD; mthca_poll_one()
609 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; mthca_poll_one()
612 entry->opcode = IB_WC_BIND_MW; mthca_poll_one()
615 entry->opcode = MTHCA_OPCODE_INVALID; mthca_poll_one()
619 entry->byte_len = be32_to_cpu(cqe->byte_cnt); mthca_poll_one()
623 entry->wc_flags = IB_WC_WITH_IMM; mthca_poll_one()
624 entry->ex.imm_data = cqe->imm_etype_pkey_eec; mthca_poll_one()
625 entry->opcode = IB_WC_RECV; mthca_poll_one()
629 entry->wc_flags = IB_WC_WITH_IMM; mthca_poll_one()
630 entry->ex.imm_data = cqe->imm_etype_pkey_eec; mthca_poll_one()
631 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; mthca_poll_one()
634 entry->wc_flags = 0; mthca_poll_one()
635 entry->opcode = IB_WC_RECV; mthca_poll_one()
638 entry->slid = be16_to_cpu(cqe->rlid); mthca_poll_one()
639 entry->sl = cqe->sl_ipok >> 4; mthca_poll_one()
640 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; mthca_poll_one()
641 entry->dlid_path_bits = cqe->g_mlpath & 0x7f; mthca_poll_one()
642 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; mthca_poll_one()
643 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; mthca_poll_one()
646 entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? mthca_poll_one()
650 entry->status = IB_WC_SUCCESS; mthca_poll_one()
663 struct ib_wc *entry) mthca_poll_cq()
679 &freed, entry + npolled); mthca_poll_cq()
159 get_cqe_from_buf(struct mthca_cq_buf *buf, int entry) get_cqe_from_buf() argument
377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) handle_error_cqe() argument
483 mthca_poll_one(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp **cur_qp, int *freed, struct ib_wc *entry) mthca_poll_one() argument
662 mthca_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) mthca_poll_cq() argument

Completed in 4086 milliseconds

1234567891011>>