Lines Matching refs:e
74 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument
76 return e->vlan >> 13; in vlan_prio()
79 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) in l2t_hold() argument
81 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ in l2t_hold()
120 static int addreq(const struct l2t_entry *e, const u32 *addr) in addreq() argument
122 if (e->v6) in addreq()
123 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | in addreq()
124 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); in addreq()
125 return e->addr[0] ^ addr[0]; in addreq()
128 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument
131 if (e->neigh) in neigh_replace()
132 neigh_release(e->neigh); in neigh_replace()
133 e->neigh = n; in neigh_replace()
140 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) in write_l2e() argument
153 e->idx | (sync ? F_SYNC_WR : 0) | in write_l2e()
155 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); in write_l2e()
156 req->l2t_idx = htons(e->idx); in write_l2e()
157 req->vlan = htons(e->vlan); in write_l2e()
158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) in write_l2e()
159 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); in write_l2e()
160 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); in write_l2e()
165 if (sync && e->state != L2T_STATE_SWITCHING) in write_l2e()
166 e->state = L2T_STATE_SYNC_WRITE; in write_l2e()
174 static void send_pending(struct adapter *adap, struct l2t_entry *e) in send_pending() argument
176 while (e->arpq_head) { in send_pending()
177 struct sk_buff *skb = e->arpq_head; in send_pending()
179 e->arpq_head = skb->next; in send_pending()
183 e->arpq_tail = NULL; in send_pending()
204 struct l2t_entry *e = &adap->l2t->l2tab[idx]; in do_l2t_write_rpl() local
206 spin_lock(&e->lock); in do_l2t_write_rpl()
207 if (e->state != L2T_STATE_SWITCHING) { in do_l2t_write_rpl()
208 send_pending(adap, e); in do_l2t_write_rpl()
209 e->state = (e->neigh->nud_state & NUD_STALE) ? in do_l2t_write_rpl()
212 spin_unlock(&e->lock); in do_l2t_write_rpl()
220 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) in arpq_enqueue() argument
223 if (e->arpq_head) in arpq_enqueue()
224 e->arpq_tail->next = skb; in arpq_enqueue()
226 e->arpq_head = skb; in arpq_enqueue()
227 e->arpq_tail = skb; in arpq_enqueue()
231 struct l2t_entry *e) in cxgb4_l2t_send() argument
236 switch (e->state) { in cxgb4_l2t_send()
238 neigh_event_send(e->neigh, NULL); in cxgb4_l2t_send()
239 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
240 if (e->state == L2T_STATE_STALE) in cxgb4_l2t_send()
241 e->state = L2T_STATE_VALID; in cxgb4_l2t_send()
242 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
247 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
248 if (e->state != L2T_STATE_SYNC_WRITE && in cxgb4_l2t_send()
249 e->state != L2T_STATE_RESOLVING) { in cxgb4_l2t_send()
250 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
253 arpq_enqueue(e, skb); in cxgb4_l2t_send()
254 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
256 if (e->state == L2T_STATE_RESOLVING && in cxgb4_l2t_send()
257 !neigh_event_send(e->neigh, NULL)) { in cxgb4_l2t_send()
258 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
259 if (e->state == L2T_STATE_RESOLVING && e->arpq_head) in cxgb4_l2t_send()
260 write_l2e(adap, e, 1); in cxgb4_l2t_send()
261 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
273 struct l2t_entry *end, *e, **p; in alloc_l2e() local
279 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) in alloc_l2e()
280 if (atomic_read(&e->refcnt) == 0) in alloc_l2e()
283 for (e = d->l2tab; atomic_read(&e->refcnt); ++e) in alloc_l2e()
286 d->rover = e + 1; in alloc_l2e()
293 if (e->state < L2T_STATE_SWITCHING) in alloc_l2e()
294 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in alloc_l2e()
295 if (*p == e) { in alloc_l2e()
296 *p = e->next; in alloc_l2e()
297 e->next = NULL; in alloc_l2e()
301 e->state = L2T_STATE_UNUSED; in alloc_l2e()
302 return e; in alloc_l2e()
308 static void t4_l2e_free(struct l2t_entry *e) in t4_l2e_free() argument
312 spin_lock_bh(&e->lock); in t4_l2e_free()
313 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ in t4_l2e_free()
314 if (e->neigh) { in t4_l2e_free()
315 neigh_release(e->neigh); in t4_l2e_free()
316 e->neigh = NULL; in t4_l2e_free()
318 while (e->arpq_head) { in t4_l2e_free()
319 struct sk_buff *skb = e->arpq_head; in t4_l2e_free()
321 e->arpq_head = skb->next; in t4_l2e_free()
324 e->arpq_tail = NULL; in t4_l2e_free()
326 spin_unlock_bh(&e->lock); in t4_l2e_free()
328 d = container_of(e, struct l2t_data, l2tab[e->idx]); in t4_l2e_free()
332 void cxgb4_l2t_release(struct l2t_entry *e) in cxgb4_l2t_release() argument
334 if (atomic_dec_and_test(&e->refcnt)) in cxgb4_l2t_release()
335 t4_l2e_free(e); in cxgb4_l2t_release()
343 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) in reuse_entry() argument
347 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in reuse_entry()
348 if (neigh != e->neigh) in reuse_entry()
349 neigh_replace(e, neigh); in reuse_entry()
351 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || in reuse_entry()
353 e->state = L2T_STATE_RESOLVING; in reuse_entry()
355 e->state = L2T_STATE_VALID; in reuse_entry()
357 e->state = L2T_STATE_STALE; in reuse_entry()
358 spin_unlock(&e->lock); in reuse_entry()
367 struct l2t_entry *e; in cxgb4_l2t_get() local
384 for (e = d->l2tab[hash].first; e; e = e->next) in cxgb4_l2t_get()
385 if (!addreq(e, addr) && e->ifindex == ifidx && in cxgb4_l2t_get()
386 e->vlan == vlan && e->lport == lport) { in cxgb4_l2t_get()
387 l2t_hold(d, e); in cxgb4_l2t_get()
388 if (atomic_read(&e->refcnt) == 1) in cxgb4_l2t_get()
389 reuse_entry(e, neigh); in cxgb4_l2t_get()
394 e = alloc_l2e(d); in cxgb4_l2t_get()
395 if (e) { in cxgb4_l2t_get()
396 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in cxgb4_l2t_get()
397 e->state = L2T_STATE_RESOLVING; in cxgb4_l2t_get()
399 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); in cxgb4_l2t_get()
400 memcpy(e->addr, addr, addr_len); in cxgb4_l2t_get()
401 e->ifindex = ifidx; in cxgb4_l2t_get()
402 e->hash = hash; in cxgb4_l2t_get()
403 e->lport = lport; in cxgb4_l2t_get()
404 e->v6 = addr_len == 16; in cxgb4_l2t_get()
405 atomic_set(&e->refcnt, 1); in cxgb4_l2t_get()
406 neigh_replace(e, neigh); in cxgb4_l2t_get()
407 e->vlan = vlan; in cxgb4_l2t_get()
408 e->next = d->l2tab[hash].first; in cxgb4_l2t_get()
409 d->l2tab[hash].first = e; in cxgb4_l2t_get()
410 spin_unlock(&e->lock); in cxgb4_l2t_get()
414 return e; in cxgb4_l2t_get()
478 struct l2t_entry *e; in t4_l2t_update() local
487 for (e = d->l2tab[hash].first; e; e = e->next) in t4_l2t_update()
488 if (!addreq(e, addr) && e->ifindex == ifidx) { in t4_l2t_update()
489 spin_lock(&e->lock); in t4_l2t_update()
490 if (atomic_read(&e->refcnt)) in t4_l2t_update()
492 spin_unlock(&e->lock); in t4_l2t_update()
501 if (neigh != e->neigh) in t4_l2t_update()
502 neigh_replace(e, neigh); in t4_l2t_update()
504 if (e->state == L2T_STATE_RESOLVING) { in t4_l2t_update()
506 arpq = e->arpq_head; in t4_l2t_update()
507 e->arpq_head = e->arpq_tail = NULL; in t4_l2t_update()
509 e->arpq_head) { in t4_l2t_update()
510 write_l2e(adap, e, 1); in t4_l2t_update()
513 e->state = neigh->nud_state & NUD_CONNECTED ? in t4_l2t_update()
515 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) in t4_l2t_update()
516 write_l2e(adap, e, 0); in t4_l2t_update()
519 spin_unlock_bh(&e->lock); in t4_l2t_update()
531 struct l2t_entry *e; in t4_l2t_alloc_switching() local
534 e = alloc_l2e(d); in t4_l2t_alloc_switching()
535 if (e) { in t4_l2t_alloc_switching()
536 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in t4_l2t_alloc_switching()
537 e->state = L2T_STATE_SWITCHING; in t4_l2t_alloc_switching()
538 atomic_set(&e->refcnt, 1); in t4_l2t_alloc_switching()
539 spin_unlock(&e->lock); in t4_l2t_alloc_switching()
542 return e; in t4_l2t_alloc_switching()
548 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, in t4_l2t_set_switching() argument
551 e->vlan = vlan; in t4_l2t_set_switching()
552 e->lport = port; in t4_l2t_set_switching()
553 memcpy(e->dmac, eth_addr, ETH_ALEN); in t4_l2t_set_switching()
554 return write_l2e(adap, e, 0); in t4_l2t_set_switching()
603 static char l2e_state(const struct l2t_entry *e) in l2e_state() argument
605 switch (e->state) { in l2e_state()
609 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; in l2e_state()
623 struct l2t_entry *e = v; in l2t_seq_show() local
625 spin_lock_bh(&e->lock); in l2t_seq_show()
626 if (e->state == L2T_STATE_SWITCHING) in l2t_seq_show()
629 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); in l2t_seq_show()
631 e->idx, ip, e->dmac, in l2t_seq_show()
632 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, in l2t_seq_show()
633 l2e_state(e), atomic_read(&e->refcnt), in l2t_seq_show()
634 e->neigh ? e->neigh->dev->name : ""); in l2t_seq_show()
635 spin_unlock_bh(&e->lock); in l2t_seq_show()