1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
36#include <linux/jhash.h>
37#include <linux/slab.h>
38#include <linux/export.h>
39#include <net/neighbour.h>
40#include "common.h"
41#include "t3cdev.h"
42#include "cxgb3_defs.h"
43#include "l2t.h"
44#include "t3_cpl.h"
45#include "firmware_exports.h"
46
47#define VLAN_NONE 0xfff
48
49/*
50 * Module locking notes:  There is a RW lock protecting the L2 table as a
51 * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
52 * under the protection of the table lock, individual entry changes happen
53 * while holding that entry's spinlock.  The table lock nests outside the
54 * entry locks.  Allocations of new entries take the table lock as writers so
55 * no other lookups can happen while allocating new entries.  Entry updates
56 * take the table lock as readers so multiple entries can be updated in
57 * parallel.  An L2T entry can be dropped by decrementing its reference count
58 * and therefore can happen in parallel with entry allocation but no entry
59 * can change state or increment its ref count during allocation as both of
60 * these perform lookups.
61 */
62
63static inline unsigned int vlan_prio(const struct l2t_entry *e)
64{
65	return e->vlan >> 13;
66}
67
68static inline unsigned int arp_hash(u32 key, int ifindex,
69				    const struct l2t_data *d)
70{
71	return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
72}
73
74static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
75{
76	neigh_hold(n);
77	if (e->neigh)
78		neigh_release(e->neigh);
79	e->neigh = n;
80}
81
82/*
83 * Set up an L2T entry and send any packets waiting in the arp queue.  The
84 * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
85 * entry locked.
86 */
87static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
88				  struct l2t_entry *e)
89{
90	struct cpl_l2t_write_req *req;
91	struct sk_buff *tmp;
92
93	if (!skb) {
94		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
95		if (!skb)
96			return -ENOMEM;
97	}
98
99	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
100	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
101	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
102	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
103			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
104			    V_L2T_W_PRIO(vlan_prio(e)));
105	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
106	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
107	skb->priority = CPL_PRIORITY_CONTROL;
108	cxgb3_ofld_send(dev, skb);
109
110	skb_queue_walk_safe(&e->arpq, skb, tmp) {
111		__skb_unlink(skb, &e->arpq);
112		cxgb3_ofld_send(dev, skb);
113	}
114	e->state = L2T_STATE_VALID;
115
116	return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125	__skb_queue_tail(&e->arpq, skb);
126}
127
128int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
129		     struct l2t_entry *e)
130{
131again:
132	switch (e->state) {
133	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
134		neigh_event_send(e->neigh, NULL);
135		spin_lock_bh(&e->lock);
136		if (e->state == L2T_STATE_STALE)
137			e->state = L2T_STATE_VALID;
138		spin_unlock_bh(&e->lock);
139	case L2T_STATE_VALID:	/* fast-path, send the packet on */
140		return cxgb3_ofld_send(dev, skb);
141	case L2T_STATE_RESOLVING:
142		spin_lock_bh(&e->lock);
143		if (e->state != L2T_STATE_RESOLVING) {
144			/* ARP already completed */
145			spin_unlock_bh(&e->lock);
146			goto again;
147		}
148		arpq_enqueue(e, skb);
149		spin_unlock_bh(&e->lock);
150
151		/*
152		 * Only the first packet added to the arpq should kick off
153		 * resolution.  However, because the alloc_skb below can fail,
154		 * we allow each packet added to the arpq to retry resolution
155		 * as a way of recovering from transient memory exhaustion.
156		 * A better way would be to use a work request to retry L2T
157		 * entries when there's no memory.
158		 */
159		if (!neigh_event_send(e->neigh, NULL)) {
160			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
161					GFP_ATOMIC);
162			if (!skb)
163				break;
164
165			spin_lock_bh(&e->lock);
166			if (!skb_queue_empty(&e->arpq))
167				setup_l2e_send_pending(dev, skb, e);
168			else	/* we lost the race */
169				__kfree_skb(skb);
170			spin_unlock_bh(&e->lock);
171		}
172	}
173	return 0;
174}
175
176EXPORT_SYMBOL(t3_l2t_send_slow);
177
178void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
179{
180again:
181	switch (e->state) {
182	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
183		neigh_event_send(e->neigh, NULL);
184		spin_lock_bh(&e->lock);
185		if (e->state == L2T_STATE_STALE) {
186			e->state = L2T_STATE_VALID;
187		}
188		spin_unlock_bh(&e->lock);
189		return;
190	case L2T_STATE_VALID:	/* fast-path, send the packet on */
191		return;
192	case L2T_STATE_RESOLVING:
193		spin_lock_bh(&e->lock);
194		if (e->state != L2T_STATE_RESOLVING) {
195			/* ARP already completed */
196			spin_unlock_bh(&e->lock);
197			goto again;
198		}
199		spin_unlock_bh(&e->lock);
200
201		/*
202		 * Only the first packet added to the arpq should kick off
203		 * resolution.  However, because the alloc_skb below can fail,
204		 * we allow each packet added to the arpq to retry resolution
205		 * as a way of recovering from transient memory exhaustion.
206		 * A better way would be to use a work request to retry L2T
207		 * entries when there's no memory.
208		 */
209		neigh_event_send(e->neigh, NULL);
210	}
211}
212
213EXPORT_SYMBOL(t3_l2t_send_event);
214
215/*
216 * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
217 */
218static struct l2t_entry *alloc_l2e(struct l2t_data *d)
219{
220	struct l2t_entry *end, *e, **p;
221
222	if (!atomic_read(&d->nfree))
223		return NULL;
224
225	/* there's definitely a free entry */
226	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
227		if (atomic_read(&e->refcnt) == 0)
228			goto found;
229
230	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
231found:
232	d->rover = e + 1;
233	atomic_dec(&d->nfree);
234
235	/*
236	 * The entry we found may be an inactive entry that is
237	 * presently in the hash table.  We need to remove it.
238	 */
239	if (e->state != L2T_STATE_UNUSED) {
240		int hash = arp_hash(e->addr, e->ifindex, d);
241
242		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
243			if (*p == e) {
244				*p = e->next;
245				break;
246			}
247		e->state = L2T_STATE_UNUSED;
248	}
249	return e;
250}
251
252/*
253 * Called when an L2T entry has no more users.  The entry is left in the hash
254 * table since it is likely to be reused but we also bump nfree to indicate
255 * that the entry can be reallocated for a different neighbor.  We also drop
256 * the existing neighbor reference in case the neighbor is going away and is
257 * waiting on our reference.
258 *
259 * Because entries can be reallocated to other neighbors once their ref count
260 * drops to 0 we need to take the entry's lock to avoid races with a new
261 * incarnation.
262 */
263void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
264{
265	spin_lock_bh(&e->lock);
266	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
267		if (e->neigh) {
268			neigh_release(e->neigh);
269			e->neigh = NULL;
270		}
271	}
272	spin_unlock_bh(&e->lock);
273	atomic_inc(&d->nfree);
274}
275
276EXPORT_SYMBOL(t3_l2e_free);
277
278/*
279 * Update an L2T entry that was previously used for the same next hop as neigh.
280 * Must be called with softirqs disabled.
281 */
282static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
283{
284	unsigned int nud_state;
285
286	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
287
288	if (neigh != e->neigh)
289		neigh_replace(e, neigh);
290	nud_state = neigh->nud_state;
291	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
292	    !(nud_state & NUD_VALID))
293		e->state = L2T_STATE_RESOLVING;
294	else if (nud_state & NUD_CONNECTED)
295		e->state = L2T_STATE_VALID;
296	else
297		e->state = L2T_STATE_STALE;
298	spin_unlock(&e->lock);
299}
300
301struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
302			     struct net_device *dev, const void *daddr)
303{
304	struct l2t_entry *e = NULL;
305	struct neighbour *neigh;
306	struct port_info *p;
307	struct l2t_data *d;
308	int hash;
309	u32 addr;
310	int ifidx;
311	int smt_idx;
312
313	rcu_read_lock();
314	neigh = dst_neigh_lookup(dst, daddr);
315	if (!neigh)
316		goto done_rcu;
317
318	addr = *(u32 *) neigh->primary_key;
319	ifidx = neigh->dev->ifindex;
320
321	if (!dev)
322		dev = neigh->dev;
323	p = netdev_priv(dev);
324	smt_idx = p->port_id;
325
326	d = L2DATA(cdev);
327	if (!d)
328		goto done_rcu;
329
330	hash = arp_hash(addr, ifidx, d);
331
332	write_lock_bh(&d->lock);
333	for (e = d->l2tab[hash].first; e; e = e->next)
334		if (e->addr == addr && e->ifindex == ifidx &&
335		    e->smt_idx == smt_idx) {
336			l2t_hold(d, e);
337			if (atomic_read(&e->refcnt) == 1)
338				reuse_entry(e, neigh);
339			goto done_unlock;
340		}
341
342	/* Need to allocate a new entry */
343	e = alloc_l2e(d);
344	if (e) {
345		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
346		e->next = d->l2tab[hash].first;
347		d->l2tab[hash].first = e;
348		e->state = L2T_STATE_RESOLVING;
349		e->addr = addr;
350		e->ifindex = ifidx;
351		e->smt_idx = smt_idx;
352		atomic_set(&e->refcnt, 1);
353		neigh_replace(e, neigh);
354		if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
355			e->vlan = vlan_dev_vlan_id(neigh->dev);
356		else
357			e->vlan = VLAN_NONE;
358		spin_unlock(&e->lock);
359	}
360done_unlock:
361	write_unlock_bh(&d->lock);
362done_rcu:
363	if (neigh)
364		neigh_release(neigh);
365	rcu_read_unlock();
366	return e;
367}
368
369EXPORT_SYMBOL(t3_l2t_get);
370
371/*
372 * Called when address resolution fails for an L2T entry to handle packets
373 * on the arpq head.  If a packet specifies a failure handler it is invoked,
374 * otherwise the packets is sent to the offload device.
375 *
376 * XXX: maybe we should abandon the latter behavior and just require a failure
377 * handler.
378 */
379static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
380{
381	struct sk_buff *skb, *tmp;
382
383	skb_queue_walk_safe(arpq, skb, tmp) {
384		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
385
386		__skb_unlink(skb, arpq);
387		if (cb->arp_failure_handler)
388			cb->arp_failure_handler(dev, skb);
389		else
390			cxgb3_ofld_send(dev, skb);
391	}
392}
393
394/*
395 * Called when the host's ARP layer makes a change to some entry that is
396 * loaded into the HW L2 table.
397 */
398void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
399{
400	struct sk_buff_head arpq;
401	struct l2t_entry *e;
402	struct l2t_data *d = L2DATA(dev);
403	u32 addr = *(u32 *) neigh->primary_key;
404	int ifidx = neigh->dev->ifindex;
405	int hash = arp_hash(addr, ifidx, d);
406
407	read_lock_bh(&d->lock);
408	for (e = d->l2tab[hash].first; e; e = e->next)
409		if (e->addr == addr && e->ifindex == ifidx) {
410			spin_lock(&e->lock);
411			goto found;
412		}
413	read_unlock_bh(&d->lock);
414	return;
415
416found:
417	__skb_queue_head_init(&arpq);
418
419	read_unlock(&d->lock);
420	if (atomic_read(&e->refcnt)) {
421		if (neigh != e->neigh)
422			neigh_replace(e, neigh);
423
424		if (e->state == L2T_STATE_RESOLVING) {
425			if (neigh->nud_state & NUD_FAILED) {
426				skb_queue_splice_init(&e->arpq, &arpq);
427			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
428				setup_l2e_send_pending(dev, NULL, e);
429		} else {
430			e->state = neigh->nud_state & NUD_CONNECTED ?
431			    L2T_STATE_VALID : L2T_STATE_STALE;
432			if (!ether_addr_equal(e->dmac, neigh->ha))
433				setup_l2e_send_pending(dev, NULL, e);
434		}
435	}
436	spin_unlock_bh(&e->lock);
437
438	if (!skb_queue_empty(&arpq))
439		handle_failed_resolution(dev, &arpq);
440}
441
442struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
443{
444	struct l2t_data *d;
445	int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
446
447	d = cxgb_alloc_mem(size);
448	if (!d)
449		return NULL;
450
451	d->nentries = l2t_capacity;
452	d->rover = &d->l2tab[1];	/* entry 0 is not used */
453	atomic_set(&d->nfree, l2t_capacity - 1);
454	rwlock_init(&d->lock);
455
456	for (i = 0; i < l2t_capacity; ++i) {
457		d->l2tab[i].idx = i;
458		d->l2tab[i].state = L2T_STATE_UNUSED;
459		__skb_queue_head_init(&d->l2tab[i].arpq);
460		spin_lock_init(&d->l2tab[i].lock);
461		atomic_set(&d->l2tab[i].refcnt, 0);
462	}
463	return d;
464}
465
466void t3_free_l2t(struct l2t_data *d)
467{
468	cxgb_free_mem(d);
469}
470
471